2021-01-09 19:46:50 +01:00
|
|
|
#include "barriers.h"
|
|
|
|
|
#include "buffer_object.h"
|
|
|
|
|
#include "commands.h"
|
|
|
|
|
#include "device.h"
|
|
|
|
|
#include "sync.h"
|
|
|
|
|
#include "shared.h"
|
|
|
|
|
|
2021-03-23 20:32:50 +01:00
|
|
|
#include "Emu/Cell/timers.hpp"
|
|
|
|
|
|
2021-03-03 22:23:52 +01:00
|
|
|
#include "util/sysinfo.hpp"
|
2021-03-08 20:57:28 +01:00
|
|
|
#include "util/asm.hpp"
|
2021-03-03 22:23:52 +01:00
|
|
|
|
2023-05-16 01:49:44 +02:00
|
|
|
// FIXME: namespace pollution
|
|
|
|
|
#include "../VKResourceManager.h"
|
|
|
|
|
|
2021-01-09 19:46:50 +01:00
|
|
|
namespace vk
|
|
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
// Util
|
|
|
|
|
namespace v1_utils
|
|
|
|
|
{
|
|
|
|
|
VkPipelineStageFlags gather_src_stages(const VkDependencyInfoKHR& dependency)
|
|
|
|
|
{
|
|
|
|
|
VkPipelineStageFlags stages = VK_PIPELINE_STAGE_NONE;
|
|
|
|
|
for (u32 i = 0; i < dependency.bufferMemoryBarrierCount; ++i)
|
|
|
|
|
{
|
|
|
|
|
stages |= dependency.pBufferMemoryBarriers[i].srcStageMask;
|
|
|
|
|
}
|
|
|
|
|
for (u32 i = 0; i < dependency.imageMemoryBarrierCount; ++i)
|
|
|
|
|
{
|
|
|
|
|
stages |= dependency.pImageMemoryBarriers[i].srcStageMask;
|
|
|
|
|
}
|
|
|
|
|
for (u32 i = 0; i < dependency.memoryBarrierCount; ++i)
|
|
|
|
|
{
|
|
|
|
|
stages |= dependency.pMemoryBarriers[i].srcStageMask;
|
|
|
|
|
}
|
|
|
|
|
return stages;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkPipelineStageFlags gather_dst_stages(const VkDependencyInfoKHR& dependency)
|
|
|
|
|
{
|
|
|
|
|
VkPipelineStageFlags stages = VK_PIPELINE_STAGE_NONE;
|
|
|
|
|
for (u32 i = 0; i < dependency.bufferMemoryBarrierCount; ++i)
|
|
|
|
|
{
|
|
|
|
|
stages |= dependency.pBufferMemoryBarriers[i].dstStageMask;
|
|
|
|
|
}
|
|
|
|
|
for (u32 i = 0; i < dependency.imageMemoryBarrierCount; ++i)
|
|
|
|
|
{
|
|
|
|
|
stages |= dependency.pImageMemoryBarriers[i].dstStageMask;
|
|
|
|
|
}
|
|
|
|
|
for (u32 i = 0; i < dependency.memoryBarrierCount; ++i)
|
|
|
|
|
{
|
|
|
|
|
stages |= dependency.pMemoryBarriers[i].dstStageMask;
|
|
|
|
|
}
|
|
|
|
|
return stages;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto get_memory_barriers(const VkDependencyInfoKHR& dependency)
|
|
|
|
|
{
|
|
|
|
|
std::vector<VkMemoryBarrier> result;
|
|
|
|
|
for (u32 i = 0; i < dependency.memoryBarrierCount; ++i)
|
|
|
|
|
{
|
2023-06-22 20:33:47 +02:00
|
|
|
result.push_back
|
2023-06-22 19:55:35 +02:00
|
|
|
({
|
2023-06-21 23:19:03 +02:00
|
|
|
VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
|
|
|
|
nullptr,
|
|
|
|
|
static_cast<VkAccessFlags>(dependency.pMemoryBarriers[i].srcAccessMask),
|
|
|
|
|
static_cast<VkAccessFlags>(dependency.pMemoryBarriers[i].dstAccessMask)
|
2023-06-22 19:55:35 +02:00
|
|
|
});
|
2023-06-21 23:19:03 +02:00
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto get_image_memory_barriers(const VkDependencyInfoKHR& dependency)
|
|
|
|
|
{
|
|
|
|
|
std::vector<VkImageMemoryBarrier> result;
|
|
|
|
|
for (u32 i = 0; i < dependency.imageMemoryBarrierCount; ++i)
|
|
|
|
|
{
|
2023-06-22 20:33:47 +02:00
|
|
|
result.push_back
|
2023-06-22 19:55:35 +02:00
|
|
|
({
|
2023-06-21 23:19:03 +02:00
|
|
|
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
|
|
|
|
nullptr,
|
|
|
|
|
static_cast<VkAccessFlags>(dependency.pImageMemoryBarriers[i].srcAccessMask),
|
|
|
|
|
static_cast<VkAccessFlags>(dependency.pImageMemoryBarriers[i].dstAccessMask),
|
|
|
|
|
dependency.pImageMemoryBarriers[i].oldLayout,
|
|
|
|
|
dependency.pImageMemoryBarriers[i].newLayout,
|
|
|
|
|
dependency.pImageMemoryBarriers[i].srcQueueFamilyIndex,
|
|
|
|
|
dependency.pImageMemoryBarriers[i].dstQueueFamilyIndex,
|
|
|
|
|
dependency.pImageMemoryBarriers[i].image,
|
|
|
|
|
dependency.pImageMemoryBarriers[i].subresourceRange
|
2023-06-22 19:55:35 +02:00
|
|
|
});
|
2023-06-21 23:19:03 +02:00
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto get_buffer_memory_barriers(const VkDependencyInfoKHR& dependency)
|
|
|
|
|
{
|
|
|
|
|
std::vector<VkBufferMemoryBarrier> result;
|
|
|
|
|
for (u32 i = 0; i < dependency.bufferMemoryBarrierCount; ++i)
|
|
|
|
|
{
|
2023-06-22 20:33:47 +02:00
|
|
|
result.push_back
|
2023-06-22 19:55:35 +02:00
|
|
|
({
|
2023-06-21 23:19:03 +02:00
|
|
|
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
|
|
|
|
nullptr,
|
|
|
|
|
static_cast<VkAccessFlags>(dependency.pBufferMemoryBarriers[i].srcAccessMask),
|
|
|
|
|
static_cast<VkAccessFlags>(dependency.pBufferMemoryBarriers[i].dstAccessMask),
|
|
|
|
|
dependency.pBufferMemoryBarriers[i].srcQueueFamilyIndex,
|
|
|
|
|
dependency.pBufferMemoryBarriers[i].dstQueueFamilyIndex,
|
|
|
|
|
dependency.pBufferMemoryBarriers[i].buffer,
|
|
|
|
|
dependency.pBufferMemoryBarriers[i].offset,
|
|
|
|
|
dependency.pBufferMemoryBarriers[i].size
|
2023-06-22 19:55:35 +02:00
|
|
|
});
|
2023-06-21 23:19:03 +02:00
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Objects
|
2021-01-09 19:46:50 +01:00
|
|
|
fence::fence(VkDevice dev)
|
|
|
|
|
{
|
|
|
|
|
owner = dev;
|
|
|
|
|
VkFenceCreateInfo info = {};
|
|
|
|
|
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
|
|
|
|
CHECK_RESULT(vkCreateFence(dev, &info, nullptr, &handle));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fence::~fence()
|
|
|
|
|
{
|
|
|
|
|
if (handle)
|
|
|
|
|
{
|
|
|
|
|
vkDestroyFence(owner, handle, nullptr);
|
|
|
|
|
handle = VK_NULL_HANDLE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void fence::reset()
|
|
|
|
|
{
|
|
|
|
|
vkResetFences(owner, 1, &handle);
|
|
|
|
|
flushed.release(false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void fence::signal_flushed()
|
|
|
|
|
{
|
|
|
|
|
flushed.release(true);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void fence::wait_flush()
|
|
|
|
|
{
|
|
|
|
|
while (!flushed)
|
|
|
|
|
{
|
2021-12-30 17:39:18 +01:00
|
|
|
utils::pause();
|
2021-01-09 19:46:50 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fence::operator bool() const
|
|
|
|
|
{
|
|
|
|
|
return (handle != VK_NULL_HANDLE);
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-30 12:56:22 +01:00
|
|
|
semaphore::semaphore(const render_device& dev)
|
|
|
|
|
: m_device(dev)
|
|
|
|
|
{
|
|
|
|
|
VkSemaphoreCreateInfo info{};
|
|
|
|
|
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
|
|
|
|
CHECK_RESULT(vkCreateSemaphore(m_device, &info, nullptr, &m_handle));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
semaphore::~semaphore()
|
|
|
|
|
{
|
|
|
|
|
vkDestroySemaphore(m_device, m_handle, nullptr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
semaphore::operator VkSemaphore() const
|
|
|
|
|
{
|
|
|
|
|
return m_handle;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-26 18:43:06 +02:00
|
|
|
event::event(const render_device& dev, sync_domain /*domain*/)
|
2023-06-21 23:19:03 +02:00
|
|
|
: m_device(&dev), v2(dev.get_synchronization2_support())
|
2021-01-09 19:46:50 +01:00
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
VkEventCreateInfo info
|
2021-01-09 19:46:50 +01:00
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
|
|
|
|
|
.pNext = nullptr,
|
|
|
|
|
.flags = 0
|
|
|
|
|
};
|
|
|
|
|
CHECK_RESULT(vkCreateEvent(dev, &info, nullptr, &m_vk_event));
|
2021-01-09 19:46:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
event::~event()
|
|
|
|
|
{
|
|
|
|
|
if (m_vk_event) [[likely]]
|
|
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
vkDestroyEvent(*m_device, m_vk_event, nullptr);
|
2021-01-09 19:46:50 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-21 23:19:03 +02:00
|
|
|
void event::signal(const command_buffer& cmd, const VkDependencyInfoKHR& dependency)
|
2021-01-09 19:46:50 +01:00
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
if (v2) [[ likely ]]
|
2021-01-09 19:46:50 +01:00
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
m_device->_vkCmdSetEvent2KHR(cmd, m_vk_event, &dependency);
|
2021-01-09 19:46:50 +01:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
// Legacy fallback. Should be practically unused with the exception of in-development drivers.
|
|
|
|
|
const auto stages = v1_utils::gather_src_stages(dependency);
|
|
|
|
|
vkCmdSetEvent(cmd, m_vk_event, stages);
|
2021-01-09 19:46:50 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-28 19:11:33 +01:00
|
|
|
void event::host_signal() const
|
|
|
|
|
{
|
|
|
|
|
ensure(m_vk_event);
|
2023-06-21 23:19:03 +02:00
|
|
|
vkSetEvent(*m_device, m_vk_event);
|
2021-02-28 19:11:33 +01:00
|
|
|
}
|
|
|
|
|
|
2023-06-21 23:19:03 +02:00
|
|
|
void event::gpu_wait(const command_buffer& cmd, const VkDependencyInfoKHR& dependency) const
|
2021-02-28 20:00:46 +01:00
|
|
|
{
|
|
|
|
|
ensure(m_vk_event);
|
|
|
|
|
|
2023-06-21 23:19:03 +02:00
|
|
|
if (v2) [[ likely ]]
|
2021-02-28 19:11:33 +01:00
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
m_device->_vkCmdWaitEvents2KHR(cmd, 1, &m_vk_event, &dependency);
|
2021-02-28 19:11:33 +01:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
const auto src_stages = v1_utils::gather_src_stages(dependency);
|
|
|
|
|
const auto dst_stages = v1_utils::gather_dst_stages(dependency);
|
|
|
|
|
const auto memory_barriers = v1_utils::get_memory_barriers(dependency);
|
|
|
|
|
const auto image_memory_barriers = v1_utils::get_image_memory_barriers(dependency);
|
|
|
|
|
const auto buffer_memory_barriers = v1_utils::get_buffer_memory_barriers(dependency);
|
|
|
|
|
|
|
|
|
|
vkCmdWaitEvents(cmd,
|
|
|
|
|
1, &m_vk_event,
|
|
|
|
|
src_stages, dst_stages,
|
|
|
|
|
::size32(memory_barriers), memory_barriers.data(),
|
|
|
|
|
::size32(buffer_memory_barriers), buffer_memory_barriers.data(),
|
|
|
|
|
::size32(image_memory_barriers), image_memory_barriers.data());
|
2021-02-28 19:11:33 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-21 23:19:03 +02:00
|
|
|
void event::reset() const
|
|
|
|
|
{
|
|
|
|
|
vkResetEvent(*m_device, m_vk_event);
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-09 19:46:50 +01:00
|
|
|
VkResult event::status() const
|
|
|
|
|
{
|
2023-06-21 23:19:03 +02:00
|
|
|
return vkGetEventStatus(*m_device, m_vk_event);
|
2021-01-09 19:46:50 +01:00
|
|
|
}
|
|
|
|
|
|
2023-05-17 18:58:30 +02:00
|
|
|
gpu_debug_marker_pool::gpu_debug_marker_pool(const vk::render_device& dev, u32 count)
|
|
|
|
|
: m_count(count), pdev(&dev)
|
2023-05-16 01:49:44 +02:00
|
|
|
{}
|
|
|
|
|
|
2023-05-17 18:58:30 +02:00
|
|
|
std::tuple<VkBuffer, u64, volatile u32*> gpu_debug_marker_pool::allocate()
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
|
|
|
|
if (!m_buffer || m_offset >= m_count)
|
|
|
|
|
{
|
|
|
|
|
create_impl();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const auto out_offset = m_offset;
|
|
|
|
|
m_offset ++;
|
|
|
|
|
return { m_buffer->value, out_offset * 4, m_mapped + out_offset };
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-17 18:58:30 +02:00
|
|
|
void gpu_debug_marker_pool::create_impl()
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
|
|
|
|
if (m_buffer)
|
|
|
|
|
{
|
|
|
|
|
m_buffer->unmap();
|
|
|
|
|
vk::get_resource_manager()->dispose(m_buffer);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
m_buffer = std::make_unique<buffer>
|
|
|
|
|
(
|
|
|
|
|
*pdev,
|
|
|
|
|
m_count * 4,
|
|
|
|
|
pdev->get_memory_mapping().host_visible_coherent,
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
|
|
|
|
|
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
|
|
|
|
|
0,
|
|
|
|
|
VMM_ALLOCATION_POOL_SYSTEM
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
m_mapped = reinterpret_cast<volatile u32*>(m_buffer->map(0, VK_WHOLE_SIZE));
|
|
|
|
|
m_offset = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-17 18:58:30 +02:00
|
|
|
gpu_debug_marker::gpu_debug_marker(gpu_debug_marker_pool& pool, std::string message)
|
2023-05-17 19:17:41 +02:00
|
|
|
: m_message(std::move(message)), m_device(*pool.pdev)
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
|
|
|
|
std::tie(m_buffer, m_buffer_offset, m_value) = pool.allocate();
|
|
|
|
|
*m_value = 0xCAFEBABE;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-17 17:41:36 +02:00
|
|
|
gpu_debug_marker::~gpu_debug_marker()
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
|
|
|
|
if (!m_printed)
|
|
|
|
|
{
|
|
|
|
|
dump();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
m_value = nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-17 17:41:36 +02:00
|
|
|
void gpu_debug_marker::signal(const command_buffer& cmd, VkPipelineStageFlags stages, VkAccessFlags access)
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
|
|
|
|
insert_global_memory_barrier(cmd, stages, VK_PIPELINE_STAGE_TRANSFER_BIT, access, VK_ACCESS_TRANSFER_WRITE_BIT);
|
|
|
|
|
vkCmdFillBuffer(cmd, m_buffer, m_buffer_offset, 4, 0xDEADBEEF);
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-17 17:41:36 +02:00
|
|
|
void gpu_debug_marker::dump()
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
|
|
|
|
if (*m_value == 0xCAFEBABE)
|
|
|
|
|
{
|
|
|
|
|
rsx_log.error("DEBUG MARKER NOT REACHED: %s", m_message);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
m_printed = true;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-17 17:41:36 +02:00
|
|
|
void gpu_debug_marker::dump() const
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
|
|
|
|
if (*m_value == 0xCAFEBABE)
|
|
|
|
|
{
|
|
|
|
|
rsx_log.error("DEBUG MARKER NOT REACHED: %s", m_message);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
rsx_log.error("DEBUG MARKER: %s", m_message);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// FIXME
|
2023-05-17 18:58:30 +02:00
|
|
|
static std::unique_ptr<gpu_debug_marker_pool> g_gpu_debug_marker_pool;
|
2023-05-16 01:49:44 +02:00
|
|
|
|
2023-05-17 18:58:30 +02:00
|
|
|
gpu_debug_marker_pool& get_shared_marker_pool(const vk::render_device& dev)
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
2023-05-17 18:58:30 +02:00
|
|
|
if (!g_gpu_debug_marker_pool)
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
2023-05-17 18:58:30 +02:00
|
|
|
g_gpu_debug_marker_pool = std::make_unique<gpu_debug_marker_pool>(dev, 65536);
|
2023-05-16 01:49:44 +02:00
|
|
|
}
|
2023-05-17 18:58:30 +02:00
|
|
|
return *g_gpu_debug_marker_pool;
|
2023-05-16 01:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
2023-05-17 17:41:36 +02:00
|
|
|
void gpu_debug_marker::insert(
|
2023-05-16 01:49:44 +02:00
|
|
|
const vk::render_device& dev,
|
|
|
|
|
const vk::command_buffer& cmd,
|
|
|
|
|
std::string message,
|
|
|
|
|
VkPipelineStageFlags stages,
|
|
|
|
|
VkAccessFlags access)
|
|
|
|
|
{
|
2023-05-17 17:41:36 +02:00
|
|
|
auto result = std::make_unique<gpu_debug_marker>(get_shared_marker_pool(dev), message);
|
2023-05-16 01:49:44 +02:00
|
|
|
result->signal(cmd, stages, access);
|
|
|
|
|
vk::get_resource_manager()->dispose(result);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
debug_marker_scope::debug_marker_scope(const vk::command_buffer& cmd, const std::string& message)
|
2023-05-17 17:39:14 +02:00
|
|
|
: m_device(&cmd.get_command_pool().get_owner()), m_cb(&cmd), m_message(message), m_tag(rsx::get_shared_tag())
|
2023-05-16 01:49:44 +02:00
|
|
|
{
|
2023-05-17 17:41:36 +02:00
|
|
|
vk::gpu_debug_marker::insert(
|
2023-05-17 17:39:14 +02:00
|
|
|
*m_device,
|
|
|
|
|
*m_cb,
|
2023-05-22 19:30:12 +02:00
|
|
|
fmt::format("0x%llx: Enter %s", m_tag, m_message)
|
2023-05-16 01:49:44 +02:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
debug_marker_scope::~debug_marker_scope()
|
|
|
|
|
{
|
2023-05-17 17:39:14 +02:00
|
|
|
ensure(m_cb && m_cb->is_recording());
|
2023-05-16 01:49:44 +02:00
|
|
|
|
2023-05-17 17:41:36 +02:00
|
|
|
vk::gpu_debug_marker::insert(
|
2023-05-17 17:39:14 +02:00
|
|
|
*m_device,
|
|
|
|
|
*m_cb,
|
|
|
|
|
fmt::format("0x%x: Exit %s", m_tag, m_message)
|
2023-05-16 01:49:44 +02:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-09 19:46:50 +01:00
|
|
|
VkResult wait_for_fence(fence* pFence, u64 timeout)
|
|
|
|
|
{
|
|
|
|
|
pFence->wait_flush();
|
|
|
|
|
|
|
|
|
|
if (timeout)
|
|
|
|
|
{
|
|
|
|
|
return vkWaitForFences(*g_render_device, 1, &pFence->handle, VK_FALSE, timeout * 1000ull);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
while (auto status = vkGetFenceStatus(*g_render_device, pFence->handle))
|
|
|
|
|
{
|
|
|
|
|
switch (status)
|
|
|
|
|
{
|
|
|
|
|
case VK_NOT_READY:
|
2022-03-22 19:37:49 +01:00
|
|
|
utils::pause();
|
2021-01-09 19:46:50 +01:00
|
|
|
continue;
|
|
|
|
|
default:
|
|
|
|
|
die_with_error(status);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult wait_for_event(event* pEvent, u64 timeout)
|
|
|
|
|
{
|
2021-03-03 22:23:52 +01:00
|
|
|
// Convert timeout to TSC cycles. Timeout accuracy isn't super-important, only fast response when event is signaled (within 10us if possible)
|
2022-10-08 19:09:01 +02:00
|
|
|
const u64 freq = utils::get_tsc_freq();
|
|
|
|
|
|
|
|
|
|
if (freq)
|
|
|
|
|
{
|
|
|
|
|
timeout *= (freq / 1'000'000);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-03 22:23:52 +01:00
|
|
|
u64 start = 0;
|
|
|
|
|
|
2021-01-09 19:46:50 +01:00
|
|
|
while (true)
|
|
|
|
|
{
|
|
|
|
|
switch (const auto status = pEvent->status())
|
|
|
|
|
{
|
|
|
|
|
case VK_EVENT_SET:
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
case VK_EVENT_RESET:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
die_with_error(status);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (timeout)
|
|
|
|
|
{
|
2022-10-08 19:09:01 +02:00
|
|
|
const auto now = freq ? utils::get_tsc() : get_system_time();
|
|
|
|
|
|
2021-03-03 22:23:52 +01:00
|
|
|
if (!start)
|
2021-01-09 19:46:50 +01:00
|
|
|
{
|
2022-10-08 19:09:01 +02:00
|
|
|
start = now;
|
2021-01-09 19:46:50 +01:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-08 19:09:01 +02:00
|
|
|
if ((now > start) &&
|
2021-03-03 22:23:52 +01:00
|
|
|
(now - start) > timeout)
|
2021-01-09 19:46:50 +01:00
|
|
|
{
|
|
|
|
|
rsx_log.error("[vulkan] vk::wait_for_event has timed out!");
|
|
|
|
|
return VK_TIMEOUT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-30 17:39:18 +01:00
|
|
|
utils::pause();
|
2021-01-09 19:46:50 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|