rsx: fetch vulkan api from implementation

add vulkan driver workarounds to config
This commit is contained in:
DH 2025-03-18 18:19:05 +03:00
parent 4667a47e9b
commit ac0a803c9e
48 changed files with 487 additions and 361 deletions

View file

@ -41,7 +41,7 @@ namespace vk
.pSignalSemaphores = submit_info.signal_semaphores.data()
};
vkQueueSubmit(submit_info.queue, 1, &info, submit_info.pfence->handle);
VK_GET_SYMBOL(vkQueueSubmit)(submit_info.queue, 1, &info, submit_info.pfence->handle);
release_global_submit_lock();
// Signal fence

View file

@ -154,7 +154,7 @@ namespace vk
layout_info.pPushConstantRanges = push_constants.data();
VkPipelineLayout result;
CHECK_RESULT(vkCreatePipelineLayout(dev, &layout_info, nullptr, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreatePipelineLayout)(dev, &layout_info, nullptr, &result));
return std::make_tuple(result, set_layout);
}
}

View file

@ -57,7 +57,7 @@ namespace vk
layout_info.pPushConstantRanges = &push_constants;
}
CHECK_RESULT(vkCreatePipelineLayout(*g_render_device, &layout_info, nullptr, &m_pipeline_layout));
CHECK_RESULT(VK_GET_SYMBOL(vkCreatePipelineLayout)(*g_render_device, &layout_info, nullptr, &m_pipeline_layout));
}
void compute_task::create()
@ -121,8 +121,8 @@ namespace vk
m_program.reset();
m_param_buffer.reset();
vkDestroyDescriptorSetLayout(*g_render_device, m_descriptor_layout, nullptr);
vkDestroyPipelineLayout(*g_render_device, m_pipeline_layout, nullptr);
VK_GET_SYMBOL(vkDestroyDescriptorSetLayout)(*g_render_device, m_descriptor_layout, nullptr);
VK_GET_SYMBOL(vkDestroyPipelineLayout)(*g_render_device, m_pipeline_layout, nullptr);
m_descriptor_pool.destroy();
initialized = false;
@ -160,7 +160,7 @@ namespace vk
bind_resources();
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, m_program->pipeline);
VK_GET_SYMBOL(vkCmdBindPipeline)(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, m_program->pipeline);
m_descriptor_set.bind(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, m_pipeline_layout);
}
@ -173,7 +173,7 @@ namespace vk
}
load_program(cmd);
vkCmdDispatch(cmd, invocations_x, invocations_y, invocations_z);
VK_GET_SYMBOL(vkCmdDispatch)(cmd, invocations_x, invocations_y, invocations_z);
}
void compute_task::run(const vk::command_buffer& cmd, u32 num_invocations)
@ -279,7 +279,7 @@ namespace vk
void cs_shuffle_base::set_parameters(const vk::command_buffer& cmd, const u32* params, u8 count)
{
ensure(use_push_constants);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params);
}
void cs_shuffle_base::run(const vk::command_buffer& cmd, const vk::buffer* data, u32 data_length, u32 data_offset)

View file

@ -461,7 +461,7 @@ namespace vk
void set_parameters(const vk::command_buffer& cmd)
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, params.data);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, params.data);
}
void run(const vk::command_buffer& cmd, const vk::buffer* dst, u32 out_offset, const vk::buffer* src, u32 in_offset, u32 data_length, u32 width, u32 height, u32 depth, u32 mipmaps) override
@ -590,7 +590,7 @@ namespace vk
void set_parameters(const vk::command_buffer& cmd)
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, &params);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, &params);
}
void run(const vk::command_buffer& cmd, const RSX_detiler_config& config)

View file

@ -135,14 +135,14 @@ void VKGSRender::update_draw_state()
{
const float actual_line_width =
m_device->get_wide_lines_support() ? rsx::method_registers.line_width() * rsx::get_resolution_scale() : 1.f;
vkCmdSetLineWidth(*m_current_command_buffer, actual_line_width);
VK_GET_SYMBOL(vkCmdSetLineWidth)(*m_current_command_buffer, actual_line_width);
}
if (rsx::method_registers.blend_enabled())
{
// Update blend constants
auto blend_colors = rsx::get_constant_blend_colors();
vkCmdSetBlendConstants(*m_current_command_buffer, blend_colors.data());
VK_GET_SYMBOL(vkCmdSetBlendConstants)(*m_current_command_buffer, blend_colors.data());
}
if (rsx::method_registers.stencil_test_enabled())
@ -150,15 +150,15 @@ void VKGSRender::update_draw_state()
const bool two_sided_stencil = rsx::method_registers.two_sided_stencil_test_enabled();
VkStencilFaceFlags face_flag = (two_sided_stencil) ? VK_STENCIL_FACE_FRONT_BIT : VK_STENCIL_FRONT_AND_BACK;
vkCmdSetStencilWriteMask(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_mask());
vkCmdSetStencilCompareMask(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_func_mask());
vkCmdSetStencilReference(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_func_ref());
VK_GET_SYMBOL(vkCmdSetStencilWriteMask)(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_mask());
VK_GET_SYMBOL(vkCmdSetStencilCompareMask)(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_func_mask());
VK_GET_SYMBOL(vkCmdSetStencilReference)(*m_current_command_buffer, face_flag, rsx::method_registers.stencil_func_ref());
if (two_sided_stencil)
{
vkCmdSetStencilWriteMask(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_mask());
vkCmdSetStencilCompareMask(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_func_mask());
vkCmdSetStencilReference(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_func_ref());
VK_GET_SYMBOL(vkCmdSetStencilWriteMask)(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_mask());
VK_GET_SYMBOL(vkCmdSetStencilCompareMask)(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_func_mask());
VK_GET_SYMBOL(vkCmdSetStencilReference)(*m_current_command_buffer, VK_STENCIL_FACE_BACK_BIT, rsx::method_registers.back_stencil_func_ref());
}
}
@ -189,12 +189,12 @@ void VKGSRender::update_draw_state()
polygon_offset_bias *= 0.5f;
}
vkCmdSetDepthBias(*m_current_command_buffer, polygon_offset_bias, 0.f, polygon_offset_scale);
VK_GET_SYMBOL(vkCmdSetDepthBias)(*m_current_command_buffer, polygon_offset_bias, 0.f, polygon_offset_scale);
}
else
{
// Zero bias value - disables depth bias
vkCmdSetDepthBias(*m_current_command_buffer, 0.f, 0.f, 0.f);
VK_GET_SYMBOL(vkCmdSetDepthBias)(*m_current_command_buffer, 0.f, 0.f, 0.f);
}
if (m_device->get_depth_bounds_support())
@ -219,7 +219,7 @@ void VKGSRender::update_draw_state()
bounds_max = std::clamp(bounds_max, 0.f, 1.f);
}
vkCmdSetDepthBounds(*m_current_command_buffer, bounds_min, bounds_max);
VK_GET_SYMBOL(vkCmdSetDepthBounds)(*m_current_command_buffer, bounds_min, bounds_max);
}
bind_viewport();
@ -905,7 +905,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
if (reload_state)
{
vkCmdBindPipeline(*m_current_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_program->pipeline);
VK_GET_SYMBOL(vkCmdBindPipeline)(*m_current_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_program->pipeline);
update_draw_state();
begin_render_pass();
@ -931,11 +931,11 @@ void VKGSRender::emit_geometry(u32 sub_index)
{
if (draw_call.is_trivial_instanced_draw)
{
vkCmdDraw(*m_current_command_buffer, upload_info.vertex_draw_count, draw_call.pass_count(), 0, 0);
VK_GET_SYMBOL(vkCmdDraw)(*m_current_command_buffer, upload_info.vertex_draw_count, draw_call.pass_count(), 0, 0);
}
else if (draw_call.is_single_draw())
{
vkCmdDraw(*m_current_command_buffer, upload_info.vertex_draw_count, 1, 0, 0);
VK_GET_SYMBOL(vkCmdDraw)(*m_current_command_buffer, upload_info.vertex_draw_count, 1, 0, 0);
}
else
{
@ -943,7 +943,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
const auto subranges = draw_call.get_subranges();
for (const auto &range : subranges)
{
vkCmdDraw(*m_current_command_buffer, range.count, 1, vertex_offset, 0);
VK_GET_SYMBOL(vkCmdDraw)(*m_current_command_buffer, range.count, 1, vertex_offset, 0);
vertex_offset += range.count;
}
}
@ -953,15 +953,15 @@ void VKGSRender::emit_geometry(u32 sub_index)
const VkIndexType index_type = std::get<1>(*upload_info.index_info);
const VkDeviceSize offset = std::get<0>(*upload_info.index_info);
vkCmdBindIndexBuffer(*m_current_command_buffer, m_index_buffer_ring_info.heap->value, offset, index_type);
VK_GET_SYMBOL(vkCmdBindIndexBuffer)(*m_current_command_buffer, m_index_buffer_ring_info.heap->value, offset, index_type);
if (draw_call.is_trivial_instanced_draw)
{
vkCmdDrawIndexed(*m_current_command_buffer, upload_info.vertex_draw_count, draw_call.pass_count(), 0, 0, 0);
VK_GET_SYMBOL(vkCmdDrawIndexed)(*m_current_command_buffer, upload_info.vertex_draw_count, draw_call.pass_count(), 0, 0, 0);
}
else if (rsx::method_registers.current_draw_clause.is_single_draw())
{
vkCmdDrawIndexed(*m_current_command_buffer, upload_info.vertex_draw_count, 1, 0, 0, 0);
VK_GET_SYMBOL(vkCmdDrawIndexed)(*m_current_command_buffer, upload_info.vertex_draw_count, 1, 0, 0, 0);
}
else
{
@ -970,7 +970,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
for (const auto &range : subranges)
{
const auto count = get_index_count(draw_call.primitive, range.count);
vkCmdDrawIndexed(*m_current_command_buffer, count, 1, vertex_offset, 0, 0);
VK_GET_SYMBOL(vkCmdDrawIndexed)(*m_current_command_buffer, count, 1, vertex_offset, 0, 0);
vertex_offset += count;
}
}

View file

@ -563,8 +563,8 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar)
for (auto &ctx : frame_context_storage)
{
vkCreateSemaphore((*m_device), &semaphore_info, nullptr, &ctx.present_wait_semaphore);
vkCreateSemaphore((*m_device), &semaphore_info, nullptr, &ctx.acquire_signal_semaphore);
VK_GET_SYMBOL(vkCreateSemaphore)((*m_device), &semaphore_info, nullptr, &ctx.present_wait_semaphore);
VK_GET_SYMBOL(vkCreateSemaphore)((*m_device), &semaphore_info, nullptr, &ctx.acquire_signal_semaphore);
}
const auto& memory_map = m_device->get_memory_mapping();
@ -598,7 +598,7 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar)
VkImageSubresourceRange range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, range);
vkCmdClearColorImage(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
VK_GET_SYMBOL(vkCmdClearColorImage)(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, target_layout, range);
}
@ -778,7 +778,7 @@ VKGSRender::~VKGSRender()
}
//Wait for device to finish up with resources
vkDeviceWaitIdle(*m_device);
VK_GET_SYMBOL(vkDeviceWaitIdle)(*m_device);
// Globals. TODO: Refactor lifetime management
if (auto async_scheduler = g_fxo->try_get<vk::AsyncTaskScheduler>())
@ -844,8 +844,8 @@ VKGSRender::~VKGSRender()
// NOTE: aux_context uses descriptor pools borrowed from the main queues and any allocations will be automatically freed when pool is destroyed
for (auto &ctx : frame_context_storage)
{
vkDestroySemaphore((*m_device), ctx.present_wait_semaphore, nullptr);
vkDestroySemaphore((*m_device), ctx.acquire_signal_semaphore, nullptr);
VK_GET_SYMBOL(vkDestroySemaphore)((*m_device), ctx.present_wait_semaphore, nullptr);
VK_GET_SYMBOL(vkDestroySemaphore)((*m_device), ctx.acquire_signal_semaphore, nullptr);
ctx.buffer_views_to_clean.clear();
}
@ -859,8 +859,8 @@ VKGSRender::~VKGSRender()
// Pipeline descriptors
m_descriptor_pool.destroy();
vkDestroyPipelineLayout(*m_device, m_pipeline_layout, nullptr);
vkDestroyDescriptorSetLayout(*m_device, m_descriptor_layouts, nullptr);
VK_GET_SYMBOL(vkDestroyPipelineLayout)(*m_device, m_pipeline_layout, nullptr);
VK_GET_SYMBOL(vkDestroyDescriptorSetLayout)(*m_device, m_descriptor_layouts, nullptr);
// Queries
m_occlusion_query_manager.reset();
@ -1337,8 +1337,8 @@ void VKGSRender::bind_viewport()
m_graphics_state.clear(rsx::pipeline_state::zclip_config_state_dirty);
}
vkCmdSetViewport(*m_current_command_buffer, 0, 1, &m_viewport);
vkCmdSetScissor(*m_current_command_buffer, 0, 1, &m_scissor);
VK_GET_SYMBOL(vkCmdSetViewport)(*m_current_command_buffer, 0, 1, &m_viewport);
VK_GET_SYMBOL(vkCmdSetScissor)(*m_current_command_buffer, 0, 1, &m_scissor);
}
void VKGSRender::on_init_thread()
@ -1619,7 +1619,7 @@ void VKGSRender::clear_surface(u32 mask)
if (!clear_descriptors.empty())
{
begin_render_pass();
vkCmdClearAttachments(*m_current_command_buffer, ::size32(clear_descriptors), clear_descriptors.data(), 1, &region);
VK_GET_SYMBOL(vkCmdClearAttachments)(*m_current_command_buffer, ::size32(clear_descriptors), clear_descriptors.data(), 1, &region);
}
}
@ -1711,15 +1711,15 @@ bool VKGSRender::release_GCM_label(u32 address, u32 args)
vk::end_renderpass(*m_current_command_buffer);
}
vkCmdUpdateBuffer(*m_current_command_buffer, mapping.second->value, mapping.first, 4, &write_data);
VK_GET_SYMBOL(vkCmdUpdateBuffer)(*m_current_command_buffer, mapping.second->value, mapping.first, 4, &write_data);
flush_command_queue();
}
else
{
auto cmd = m_secondary_cb_list.next();
cmd->begin();
vkCmdUpdateBuffer(*cmd, mapping.second->value, mapping.first, 4, &write_data);
vkCmdUpdateBuffer(*cmd, m_host_object_data->value, ::offset32(&vk::host_data_t::commands_complete_event), 8, &release_event_id);
VK_GET_SYMBOL(vkCmdUpdateBuffer)(*cmd, mapping.second->value, mapping.first, 4, &write_data);
VK_GET_SYMBOL(vkCmdUpdateBuffer)(*cmd, m_host_object_data->value, ::offset32(&vk::host_data_t::commands_complete_event), 8, &release_event_id);
cmd->end();
vk::queue_submit_t submit_info = { m_device->get_graphics_queue(), nullptr };
@ -1741,7 +1741,7 @@ void VKGSRender::on_guest_texture_read(const vk::command_buffer& cmd)
// Queue a sync update on the CB doing the load
auto host_ctx = ensure(m_host_dma_ctrl->host_ctx());
const auto event_id = host_ctx->on_texture_load_acquire();
vkCmdUpdateBuffer(cmd, m_host_object_data->value, ::offset32(&vk::host_data_t::texture_load_complete_event), sizeof(u64), &event_id);
VK_GET_SYMBOL(vkCmdUpdateBuffer)(cmd, m_host_object_data->value, ::offset32(&vk::host_data_t::texture_load_complete_event), sizeof(u64), &event_id);
}
void VKGSRender::sync_hint(rsx::FIFO::interrupt_hint hint, rsx::reports::sync_hint_payload_t payload)
@ -2334,7 +2334,7 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_
data_size = 20;
}
vkCmdPushConstants(*m_current_command_buffer, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, data_size, draw_info);
VK_GET_SYMBOL(vkCmdPushConstants)(*m_current_command_buffer, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, data_size, draw_info);
const usz data_offset = (id * 128) + m_vertex_layout_stream_info.offset;
auto dst = m_vertex_layout_ring_info.map(data_offset, 128);
@ -2428,7 +2428,7 @@ void VKGSRender::patch_transform_constants(rsx::context* ctx, u32 index, u32 cou
preserve_renderpass);
// FIXME: This is illegal during a renderpass
vkCmdUpdateBuffer(
VK_GET_SYMBOL(vkCmdUpdateBuffer)(
*m_current_command_buffer,
m_vertex_constants_buffer_info.buffer,
data_range.first,
@ -2524,7 +2524,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
if (m_host_dma_ctrl && m_host_dma_ctrl->host_ctx()->needs_label_release())
{
vkCmdUpdateBuffer(*m_current_command_buffer,
VK_GET_SYMBOL(vkCmdUpdateBuffer)(*m_current_command_buffer,
m_host_object_data->value,
::offset32(&vk::host_data_t::commands_complete_event),
sizeof(u64),
@ -3132,7 +3132,7 @@ void VKGSRender::begin_conditional_rendering(const std::vector<rsx::reports::occ
ensure(dst_offset > 4);
// Clear result to zero
vkCmdFillBuffer(*m_current_command_buffer, m_cond_render_buffer->value, 0, 4, 0);
VK_GET_SYMBOL(vkCmdFillBuffer)(*m_current_command_buffer, m_cond_render_buffer->value, 0, 4, 0);
vk::insert_buffer_memory_barrier(*m_current_command_buffer, m_cond_render_buffer->value, 0, 4,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,

View file

@ -73,7 +73,7 @@ namespace vk
}
++reset_id;
CHECK_RESULT(vkResetCommandBuffer(commands, 0));
CHECK_RESULT(VK_GET_SYMBOL(vkResetCommandBuffer)(commands, 0));
}
bool poke()
@ -90,7 +90,7 @@ namespace vk
return false;
}
if (vkGetFenceStatus(pool->get_owner(), m_submit_fence->handle) == VK_SUCCESS)
if (VK_GET_SYMBOL(vkGetFenceStatus)(pool->get_owner(), m_submit_fence->handle) == VK_SUCCESS)
{
lock.upgrade();

View file

@ -152,6 +152,14 @@ namespace vk
rsx_log.warning("Unsupported device: %s", gpu_name);
}
if (g_cfg.video.vk.workarounds.no_primitive_restart) g_drv_no_primitive_restart = true;
if (g_cfg.video.vk.workarounds.sanitize_fp_values) g_drv_sanitize_fp_values = true;
if (g_cfg.video.vk.workarounds.disable_fence_reset) g_drv_disable_fence_reset = true;
if (g_cfg.video.vk.workarounds.emulate_cond_render) g_drv_emulate_cond_render = true;
if (g_cfg.video.vk.workarounds.strict_query_scopes) g_drv_strict_query_scopes = true;
if (g_cfg.video.vk.workarounds.force_reuse_query_pools) g_drv_force_reuse_query_pools = true;
rsx_log.notice("Vulkan: Renderer initialized on device '%s'", gpu_name);
{
@ -180,15 +188,15 @@ namespace vk
for (const auto &usage : types)
{
info.usage = usage;
CHECK_RESULT(vkCreateBuffer(*g_render_device, &info, nullptr, &tmp));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateBuffer)(*g_render_device, &info, nullptr, &tmp));
vkGetBufferMemoryRequirements(*g_render_device, tmp, &memory_reqs);
VK_GET_SYMBOL(vkGetBufferMemoryRequirements)(*g_render_device, tmp, &memory_reqs);
if (g_render_device->get_compatible_memory_type(memory_reqs.memoryTypeBits, memory_flags, nullptr))
{
g_heap_compatible_buffer_types |= usage;
}
vkDestroyBuffer(*g_render_device, tmp, nullptr);
VK_GET_SYMBOL(vkDestroyBuffer)(*g_render_device, tmp, nullptr);
}
}

View file

@ -115,7 +115,7 @@ namespace vk
layout_info.pPushConstantRanges = push_constants.data();
}
CHECK_RESULT(vkCreatePipelineLayout(*m_device, &layout_info, nullptr, &m_pipeline_layout));
CHECK_RESULT(VK_GET_SYMBOL(vkCreatePipelineLayout)(*m_device, &layout_info, nullptr, &m_pipeline_layout));
}
std::vector<vk::glsl::program_input> overlay_pass::get_vertex_inputs()
@ -254,12 +254,12 @@ namespace vk
program->bind_uniform(info, "fs" + std::to_string(n), VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, m_descriptor_set);
}
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, program->pipeline);
VK_GET_SYMBOL(vkCmdBindPipeline)(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, program->pipeline);
m_descriptor_set.bind(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_layout);
VkBuffer buffers = m_vao.heap->value;
VkDeviceSize offsets = m_vao_offset;
vkCmdBindVertexBuffers(cmd, 0, 1, &buffers, &offsets);
VK_GET_SYMBOL(vkCmdBindVertexBuffers)(cmd, 0, 1, &buffers, &offsets);
}
void overlay_pass::create(const vk::render_device& dev)
@ -282,8 +282,8 @@ namespace vk
m_program_cache.clear();
m_sampler.reset();
vkDestroyDescriptorSetLayout(*m_device, m_descriptor_layout, nullptr);
vkDestroyPipelineLayout(*m_device, m_pipeline_layout, nullptr);
VK_GET_SYMBOL(vkDestroyDescriptorSetLayout)(*m_device, m_descriptor_layout, nullptr);
VK_GET_SYMBOL(vkDestroyPipelineLayout)(*m_device, m_pipeline_layout, nullptr);
m_descriptor_pool.destroy();
initialized = false;
@ -305,7 +305,7 @@ namespace vk
void overlay_pass::emit_geometry(vk::command_buffer& cmd)
{
vkCmdDraw(cmd, num_drawable_elements, 1, first_vertex, 0);
VK_GET_SYMBOL(vkCmdDraw)(cmd, num_drawable_elements, 1, first_vertex, 0);
}
void overlay_pass::set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h)
@ -317,10 +317,10 @@ namespace vk
vp.height = static_cast<f32>(h);
vp.minDepth = 0.f;
vp.maxDepth = 1.f;
vkCmdSetViewport(cmd, 0, 1, &vp);
VK_GET_SYMBOL(vkCmdSetViewport)(cmd, 0, 1, &vp);
VkRect2D vs = { { static_cast<s32>(x), static_cast<s32>(y) }, { w, h } };
vkCmdSetScissor(cmd, 0, 1, &vs);
VK_GET_SYMBOL(vkCmdSetScissor)(cmd, 0, 1, &vs);
}
void overlay_pass::run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* fbo, const std::vector<vk::image_view*>& src, VkRenderPass render_pass)
@ -412,7 +412,7 @@ namespace vk
.imageExtent = { static_cast<u32>(w), static_cast<u32>(h), 1u }
};
change_image_layout(cmd, tex, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, range);
vkCmdCopyBufferToImage(cmd, upload_heap.heap->value, tex->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd, upload_heap.heap->value, tex->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
change_image_layout(cmd, tex, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, range);
}
@ -600,7 +600,7 @@ namespace vk
.get();
push_buf[16] = std::bit_cast<f32>(vert_config);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 68, push_buf);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 68, push_buf);
// 2. Fragment stuff
rsx::overlays::fragment_options frag_opts;
@ -614,7 +614,7 @@ namespace vk
push_buf[1] = m_time;
push_buf[2] = m_blur_strength;
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 68, 12, push_buf);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 68, 12, push_buf);
}
void ui_overlay_renderer::set_primitive_type(rsx::overlays::primitive_type type)
@ -651,7 +651,7 @@ namespace vk
for (u32 n = 0; n < num_quads; ++n)
{
vkCmdDraw(cmd, 4, 1, first, 0);
VK_GET_SYMBOL(vkCmdDraw)(cmd, 4, 1, first, 0);
first += 4;
}
}
@ -786,7 +786,7 @@ namespace vk
data[6] = colormask.b;
data[7] = colormask.a;
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 32, data);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 32, data);
}
void attachment_clear_pass::set_up_viewport(vk::command_buffer& cmd, u32 x, u32 y, u32 w, u32 h)
@ -798,9 +798,9 @@ namespace vk
vp.height = static_cast<f32>(h);
vp.minDepth = 0.f;
vp.maxDepth = 1.f;
vkCmdSetViewport(cmd, 0, 1, &vp);
VK_GET_SYMBOL(vkCmdSetViewport)(cmd, 0, 1, &vp);
vkCmdSetScissor(cmd, 0, 1, &region);
VK_GET_SYMBOL(vkCmdSetScissor)(cmd, 0, 1, &region);
}
void attachment_clear_pass::run(vk::command_buffer& cmd, vk::framebuffer* target, VkRect2D rect, u32 clearmask, color4f color, VkRenderPass render_pass)
@ -861,9 +861,9 @@ namespace vk
vp.height = static_cast<f32>(h);
vp.minDepth = 0.f;
vp.maxDepth = 1.f;
vkCmdSetViewport(cmd, 0, 1, &vp);
VK_GET_SYMBOL(vkCmdSetViewport)(cmd, 0, 1, &vp);
vkCmdSetScissor(cmd, 0, 1, &region);
VK_GET_SYMBOL(vkCmdSetScissor)(cmd, 0, 1, &region);
}
void stencil_clear_pass::run(vk::command_buffer& cmd, vk::render_target* target, VkRect2D rect, u32 stencil_clear, u32 stencil_write_mask, VkRenderPass render_pass)
@ -922,7 +922,7 @@ namespace vk
void video_out_calibration_pass::update_uniforms(vk::command_buffer& cmd, vk::glsl::program* /*program*/)
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, config.data);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, config.data);
}
void video_out_calibration_pass::run(vk::command_buffer& cmd, const areau& viewport, vk::framebuffer* target,

View file

@ -53,7 +53,7 @@ namespace vk
std::unique_ptr<glsl::program> pipe_compiler::int_compile_compute_pipe(const VkComputePipelineCreateInfo& create_info, VkPipelineLayout pipe_layout)
{
VkPipeline pipeline;
vkCreateComputePipelines(*g_render_device, nullptr, 1, &create_info, nullptr, &pipeline);
VK_GET_SYMBOL(vkCreateComputePipelines)(*g_render_device, nullptr, 1, &create_info, nullptr, &pipeline);
return std::make_unique<vk::glsl::program>(*m_device, pipeline, pipe_layout);
}
@ -61,7 +61,7 @@ namespace vk
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs)
{
VkPipeline pipeline;
CHECK_RESULT(vkCreateGraphicsPipelines(*m_device, VK_NULL_HANDLE, 1, &create_info, nullptr, &pipeline));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateGraphicsPipelines)(*m_device, VK_NULL_HANDLE, 1, &create_info, nullptr, &pipeline));
auto result = std::make_unique<vk::glsl::program>(*m_device, pipeline, pipe_layout, vs_inputs, fs_inputs);
result->link();
return result;

View file

@ -40,7 +40,7 @@ void VKGSRender::reinitialize_swapchain()
surface_lost = false;
#ifdef ANDROID
vkDeviceWaitIdle(*m_device);
VK_GET_SYMBOL(vkDeviceWaitIdle)(*m_device);
auto handle = m_frame->handle();
m_swapchain->create(handle);
@ -77,7 +77,7 @@ void VKGSRender::reinitialize_swapchain()
m_upscaler.reset();
// Drain all the queues
vkDeviceWaitIdle(*m_device);
VK_GET_SYMBOL(vkDeviceWaitIdle)(*m_device);
// Rebuild swapchain. Old swapchain destruction is handled by the init_swapchain call
if (!m_swapchain->init(m_swapchain_dims.width, m_swapchain_dims.height))
@ -96,7 +96,7 @@ void VKGSRender::reinitialize_swapchain()
VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, range);
vkCmdClearColorImage(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
VK_GET_SYMBOL(vkCmdClearColorImage)(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
vk::change_image_layout(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, target_layout, range);
}
@ -657,7 +657,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
// Clear the window background to black
VkClearColorValue clear_black {};
vk::change_image_layout(*m_current_command_buffer, target_image, present_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresource_range);
vkCmdClearColorImage(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_black, 1, &subresource_range);
VK_GET_SYMBOL(vkCmdClearColorImage)(*m_current_command_buffer, target_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_black, 1, &subresource_range);
target_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
}
@ -807,7 +807,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange = subresource_range;
vkCmdPipelineBarrier(*m_current_command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &barrier);
VK_GET_SYMBOL(vkCmdPipelineBarrier)(*m_current_command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &barrier);
target_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}

View file

@ -37,7 +37,7 @@ namespace vk
vs_info.pCode = m_compiled.data();
vs_info.flags = 0;
vkCreateShaderModule(*g_render_device, &vs_info, nullptr, &m_handle);
VK_GET_SYMBOL(vkCreateShaderModule)(*g_render_device, &vs_info, nullptr, &m_handle);
return m_handle;
}
@ -49,7 +49,7 @@ namespace vk
if (m_handle)
{
vkDestroyShaderModule(*g_render_device, m_handle, nullptr);
VK_GET_SYMBOL(vkDestroyShaderModule)(*g_render_device, m_handle, nullptr);
m_handle = nullptr;
}
}
@ -96,7 +96,7 @@ namespace vk
program::~program()
{
vkDestroyPipeline(m_device, pipeline, nullptr);
VK_GET_SYMBOL(vkDestroyPipeline)(m_device, pipeline, nullptr);
}
program& program::load_uniforms(const std::vector<program_input>& inputs)

View file

@ -16,7 +16,7 @@ namespace vk
// 2. The backend has fully processed the query and found no hits
u32 result[2] = { 0, 0 };
switch (const auto error = vkGetQueryPoolResults(*owner, *query.pool, index, 1, 8, result, 8, flags | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT))
switch (const auto error = VK_GET_SYMBOL(vkGetQueryPoolResults)(*owner, *query.pool, index, 1, 8, result, 8, flags | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT))
{
case VK_SUCCESS:
{
@ -89,7 +89,7 @@ namespace vk
}
// From spec: "After query pool creation, each query must be reset before it is used."
vkCmdResetQueryPool(cmd, *m_current_query_pool.get(), 0, m_current_query_pool->size());
VK_GET_SYMBOL(vkCmdResetQueryPool)(cmd, *m_current_query_pool.get(), 0, m_current_query_pool->size());
m_pool_lifetime_counter = m_current_query_pool->size();
}
@ -148,12 +148,12 @@ namespace vk
query_info.pool = m_current_query_pool.get();
query_info.active = true;
vkCmdBeginQuery(cmd, *query_info.pool, index, control_flags);
VK_GET_SYMBOL(vkCmdBeginQuery)(cmd, *query_info.pool, index, control_flags);
}
void query_pool_manager::end_query(vk::command_buffer& cmd, u32 index)
{
vkCmdEndQuery(cmd, *query_slot_status[index].pool, index);
VK_GET_SYMBOL(vkCmdEndQuery)(cmd, *query_slot_status[index].pool, index);
}
bool query_pool_manager::check_query_status(u32 index)
@ -184,7 +184,7 @@ namespace vk
{
// We're technically supposed to stop any active renderpasses before streaming the results out, but that doesn't matter on IMR hw
// On TBDR setups like the apple M series, the stop is required (results are all 0 if you don't flush the RP), but this introduces a very heavy performance loss.
vkCmdCopyQueryPoolResults(cmd, *query_slot_status[index].pool, index, count, dst, dst_offset, 4, VK_QUERY_RESULT_WAIT_BIT);
VK_GET_SYMBOL(vkCmdCopyQueryPoolResults)(cmd, *query_slot_status[index].pool, index, count, dst, dst_offset, 4, VK_QUERY_RESULT_WAIT_BIT);
}
void query_pool_manager::free_query(vk::command_buffer&/*cmd*/, u32 index)

View file

@ -319,7 +319,7 @@ namespace vk
rp_info.pSubpasses = &subpass;
VkRenderPass result;
CHECK_RESULT(vkCreateRenderPass(dev, &rp_info, NULL, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateRenderPass)(dev, &rp_info, NULL, &result));
g_renderpass_cache[renderpass_key] = result;
return result;
@ -335,7 +335,7 @@ namespace vk
// Destroy cache
for (const auto &renderpass : g_renderpass_cache)
{
vkDestroyRenderPass(dev, renderpass.second, nullptr);
VK_GET_SYMBOL(vkDestroyRenderPass)(dev, renderpass.second, nullptr);
}
g_renderpass_cache.clear();
@ -362,7 +362,7 @@ namespace vk
rp_begin.renderArea.extent.width = framebuffer_region.width;
rp_begin.renderArea.extent.height = framebuffer_region.height;
vkCmdBeginRenderPass(cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
VK_GET_SYMBOL(vkCmdBeginRenderPass)(cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
renderpass_info = { pass, target };
}
@ -379,7 +379,7 @@ namespace vk
void end_renderpass(const vk::command_buffer& cmd)
{
vkCmdEndRenderPass(cmd);
VK_GET_SYMBOL(vkCmdEndRenderPass)(cmd);
g_current_renderpass[cmd] = {};
}

View file

@ -485,12 +485,12 @@ namespace vk
if (surface->aspect() & VK_IMAGE_ASPECT_COLOR_BIT)
{
VkClearColorValue color = { {0.f, 0.f, 0.f, 1.f} };
vkCmdClearColorImage(cmd, surface->value, surface->current_layout, &color, 1, &range);
VK_GET_SYMBOL(vkCmdClearColorImage)(cmd, surface->value, surface->current_layout, &color, 1, &range);
}
else
{
VkClearDepthStencilValue clear{ 1.f, 255 };
vkCmdClearDepthStencilImage(cmd, surface->value, surface->current_layout, &clear, 1, &range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, surface->value, surface->current_layout, &clear, 1, &range);
}
surface->pop_layout(cmd);
@ -619,7 +619,7 @@ namespace vk
const auto regions = build_spill_transfer_descriptors(src);
src->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, m_spilled_mem->value, ::size32(regions), regions.data());
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, src->value, src->current_layout, m_spilled_mem->value, ::size32(regions), regions.data());
// Destroy this object through a cloned object
auto obj = std::unique_ptr<viewable_image>(clone());
@ -656,7 +656,7 @@ namespace vk
const auto regions = build_spill_transfer_descriptors(dst);
dst->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdCopyBufferToImage(cmd, m_spilled_mem->value, dst->value, dst->current_layout, ::size32(regions), regions.data());
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd, m_spilled_mem->value, dst->value, dst->current_layout, ::size32(regions), regions.data());
if (samples() > 1)
{

View file

@ -613,7 +613,7 @@ namespace vk
if (dest != bo)
{
VkBufferCopy copy = { src_offset_in_buffer, dst_offset_in_buffer, max_copy_length };
vkCmdCopyBuffer(cmd, dest->value, bo->value, 1, &copy);
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd, dest->value, bo->value, 1, &copy);
vk::insert_buffer_memory_barrier(cmd,
bo->value, dst_offset_in_buffer, max_copy_length,
@ -653,7 +653,7 @@ namespace vk
VkBufferCopy copy = { 0, offset, ::size32(*bo) };
offset += ::size32(*bo);
vkCmdCopyBuffer(cmd, bo->value, dst->value, 1, &copy);
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd, bo->value, dst->value, 1, &copy);
// Cleanup
vk::surface_cache_utils::dispose(bo);

View file

@ -110,7 +110,7 @@ namespace vk
VkImageSubresourceRange range{ VK_IMAGE_ASPECT_STENCIL_BIT, 0, 1, 0, 1 };
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdClearDepthStencilImage(cmd, dst->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear, 1, &range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, dst->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear, 1, &range);
dst->pop_layout(cmd);
}
}
@ -179,7 +179,7 @@ namespace vk
VkImageSubresourceRange range{ VK_IMAGE_ASPECT_STENCIL_BIT, 0, 1, 0, 1 };
dst->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdClearDepthStencilImage(cmd, dst->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear, 1, &range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, dst->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear, 1, &range);
dst->pop_layout(cmd);
}
}

View file

@ -128,7 +128,7 @@ namespace vk
void update_uniforms(vk::command_buffer& cmd, vk::glsl::program* /*program*/) override
{
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, static_parameters_width * 4, static_parameters);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, static_parameters_width * 4, static_parameters);
}
void update_sample_configuration(vk::image* msaa_image)
@ -228,12 +228,12 @@ namespace vk
void emit_geometry(vk::command_buffer& cmd) override
{
vkCmdClearAttachments(cmd, 1, &clear_info, 1, &region);
VK_GET_SYMBOL(vkCmdClearAttachments)(cmd, 1, &clear_info, 1, &region);
for (s32 write_mask = 0x1; write_mask <= 0x80; write_mask <<= 1)
{
vkCmdSetStencilWriteMask(cmd, VK_STENCIL_FRONT_AND_BACK, write_mask);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 8, 4, &write_mask);
VK_GET_SYMBOL(vkCmdSetStencilWriteMask)(cmd, VK_STENCIL_FRONT_AND_BACK, write_mask);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 8, 4, &write_mask);
overlay_pass::emit_geometry(cmd);
}
@ -287,12 +287,12 @@ namespace vk
void emit_geometry(vk::command_buffer& cmd) override
{
vkCmdClearAttachments(cmd, 1, &clear_info, 1, &clear_region);
VK_GET_SYMBOL(vkCmdClearAttachments)(cmd, 1, &clear_info, 1, &clear_region);
for (s32 write_mask = 0x1; write_mask <= 0x80; write_mask <<= 1)
{
vkCmdSetStencilWriteMask(cmd, VK_STENCIL_FRONT_AND_BACK, write_mask);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 8, 4, &write_mask);
VK_GET_SYMBOL(vkCmdSetStencilWriteMask)(cmd, VK_STENCIL_FRONT_AND_BACK, write_mask);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 8, 4, &write_mask);
overlay_pass::emit_geometry(cmd);
}

View file

@ -367,7 +367,7 @@ namespace vk
layout_info.pPushConstantRanges = push_constants.data();
VkPipelineLayout result;
CHECK_RESULT(vkCreatePipelineLayout(dev, &layout_info, nullptr, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreatePipelineLayout)(dev, &layout_info, nullptr, &result));
return { set_layout, result };
}
@ -399,13 +399,13 @@ namespace vk
if (m_shared_pipeline_layout)
{
vkDestroyPipelineLayout(m_device, m_shared_pipeline_layout, nullptr);
VK_GET_SYMBOL(vkDestroyPipelineLayout)(m_device, m_shared_pipeline_layout, nullptr);
m_shared_pipeline_layout = VK_NULL_HANDLE;
}
if (m_shared_descriptor_layout)
{
vkDestroyDescriptorSetLayout(m_device, m_shared_descriptor_layout, nullptr);
VK_GET_SYMBOL(vkDestroyDescriptorSetLayout)(m_device, m_shared_descriptor_layout, nullptr);
m_shared_descriptor_layout = VK_NULL_HANDLE;
}
}

View file

@ -68,7 +68,7 @@ namespace vk
default:
{
ensure(!options.swap_bytes); // "Implicit byteswap option not supported for speficied format"
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dst->value, 1, &region);
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, src->value, src->current_layout, dst->value, 1, &region);
if (options.sync_region)
{
@ -100,7 +100,7 @@ namespace vk
VkBufferImageCopy region2;
region2 = region;
region2.bufferOffset = z32_offset;
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dst->value, 1, &region2);
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, src->value, src->current_layout, dst->value, 1, &region2);
// 2. Pre-compute barrier
vk::insert_buffer_memory_barrier(cmd, dst->value, z32_offset, packed32_length,
@ -158,7 +158,7 @@ namespace vk
sub_regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
sub_regions[1].bufferOffset = s_offset;
sub_regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dst->value, 2, sub_regions);
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, src->value, src->current_layout, dst->value, 2, sub_regions);
// 2. Interleave the separated data blocks with a compute job
vk::cs_interleave_task *job;
@ -229,7 +229,7 @@ namespace vk
{
default:
{
vkCmdCopyBufferToImage(cmd, src->value, dst->value, dst->current_layout, 1, &region);
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd, src->value, dst->value, dst->current_layout, 1, &region);
break;
}
case VK_FORMAT_D32_SFLOAT:
@ -265,7 +265,7 @@ namespace vk
// 5. Copy the depth data to image
VkBufferImageCopy region2 = region;
region2.bufferOffset = z32_offset;
vkCmdCopyBufferToImage(cmd, src->value, dst->value, dst->current_layout, 1, &region2);
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd, src->value, dst->value, dst->current_layout, 1, &region2);
break;
}
case VK_FORMAT_D24_UNORM_S8_UINT:
@ -285,7 +285,7 @@ namespace vk
const auto s_offset = utils::align<u32>(z_offset + in_depth_size, 256);
// Zero out the stencil block
vkCmdFillBuffer(cmd, src->value, s_offset, utils::align(in_stencil_size, 4), 0);
VK_GET_SYMBOL(vkCmdFillBuffer)(cmd, src->value, s_offset, utils::align(in_stencil_size, 4), 0);
vk::insert_buffer_memory_barrier(cmd, src->value, s_offset, in_stencil_size,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
@ -319,7 +319,7 @@ namespace vk
sub_regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
sub_regions[1].bufferOffset = s_offset;
sub_regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdCopyBufferToImage(cmd, src->value, dst->value, dst->current_layout, 2, sub_regions);
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd, src->value, dst->value, dst->current_layout, 2, sub_regions);
break;
}
}
@ -506,7 +506,7 @@ namespace vk
for (u32 mip_level = 0; mip_level < mipmaps; ++mip_level)
{
vkCmdCopyImage(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &rgn);
VK_GET_SYMBOL(vkCmdCopyImage)(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &rgn);
rgn.srcSubresource.mipLevel++;
rgn.dstSubresource.mipLevel++;
@ -555,7 +555,7 @@ namespace vk
copy_rgn.srcSubresource = { src->aspect(), 0, 0, 1 };
copy_rgn.extent = { static_cast<u32>(src_rect.width()), static_cast<u32>(src_rect.height()), 1 };
vkCmdCopyImage(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &copy_rgn);
VK_GET_SYMBOL(vkCmdCopyImage)(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &copy_rgn);
}
else if ((src->aspect() & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0)
{
@ -609,12 +609,12 @@ namespace vk
info.imageExtent = { static_cast<u32>(src_w), static_cast<u32>(src_h), 1 };
info.imageSubresource = { aspect & transfer_flags, 0, 0, 1 };
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, scratch_buf->value, 1, &info);
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, src->value, src->current_layout, scratch_buf->value, 1, &info);
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, VK_WHOLE_SIZE, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
info.imageOffset = {};
info.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
vkCmdCopyBufferToImage(cmd, scratch_buf->value, typeless->value, VK_IMAGE_LAYOUT_GENERAL, 1, &info);
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd, scratch_buf->value, typeless->value, VK_IMAGE_LAYOUT_GENERAL, 1, &info);
//2. Blit typeless surface to self and apply transform if necessary
areai src_rect2 = { 0, 0, src_w, src_h };
@ -637,12 +637,12 @@ namespace vk
info.imageExtent = { static_cast<u32>(dst_w), static_cast<u32>(dst_h), 1 };
info.imageOffset = { 0, src_h, 0 };
vkCmdCopyImageToBuffer(cmd, typeless->value, VK_IMAGE_LAYOUT_GENERAL, scratch_buf->value, 1, &info);
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, typeless->value, VK_IMAGE_LAYOUT_GENERAL, scratch_buf->value, 1, &info);
insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, VK_WHOLE_SIZE, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
info.imageOffset = { dst_rect.x1, dst_rect.y1, 0 };
info.imageSubresource = { aspect & transfer_flags, 0, 0, 1 };
vkCmdCopyBufferToImage(cmd, scratch_buf->value, dst->value, dst->current_layout, 1, &info);
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd, scratch_buf->value, dst->value, dst->current_layout, 1, &info);
};
const u32 typeless_w = std::max(dst_rect.width(), src_rect.width());
@ -743,7 +743,7 @@ namespace vk
for (u32 mip_level = 0; mip_level < mipmaps; ++mip_level)
{
vkCmdBlitImage(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &rgn, filter);
VK_GET_SYMBOL(vkCmdBlitImage)(cmd, src->value, src->current_layout, dst->value, dst->current_layout, 1, &rgn, filter);
rgn.srcSubresource.mipLevel++;
rgn.dstSubresource.mipLevel++;
@ -1178,13 +1178,13 @@ namespace vk
auto range_ptr = buffer_copies.data();
for (const auto& op : upload_commands)
{
vkCmdCopyBuffer(cmd2, op.first, scratch_buf->value, op.second, range_ptr);
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd2, op.first, scratch_buf->value, op.second, range_ptr);
range_ptr += op.second;
}
}
else if (!buffer_copies.empty())
{
vkCmdCopyBuffer(cmd2, upload_buffer->value, scratch_buf->value, static_cast<u32>(buffer_copies.size()), buffer_copies.data());
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd2, upload_buffer->value, scratch_buf->value, static_cast<u32>(buffer_copies.size()), buffer_copies.data());
}
insert_buffer_memory_barrier(cmd2, scratch_buf->value, 0, scratch_offset, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
@ -1218,20 +1218,20 @@ namespace vk
insert_buffer_memory_barrier(cmd2, scratch_buf->value, block_start, scratch_offset, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
vkCmdCopyBufferToImage(cmd2, scratch_buf->value, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<u32>(copy_regions.size()), copy_regions.data());
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd2, scratch_buf->value, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<u32>(copy_regions.size()), copy_regions.data());
}
else if (upload_commands.size() > 1)
{
auto region_ptr = copy_regions.data();
for (const auto& op : upload_commands)
{
vkCmdCopyBufferToImage(cmd2, op.first, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, op.second, region_ptr);
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd2, op.first, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, op.second, region_ptr);
region_ptr += op.second;
}
}
else
{
vkCmdCopyBufferToImage(cmd2, upload_buffer->value, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<u32>(copy_regions.size()), copy_regions.data());
VK_GET_SYMBOL(vkCmdCopyBufferToImage)(cmd2, upload_buffer->value, dst_image->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<u32>(copy_regions.size()), copy_regions.data());
}
if (cmd2.get_queue_family() != cmd.get_queue_family())
@ -1291,7 +1291,7 @@ namespace vk
.dstOffset = tiled_data_scratch_offset,
.size = section_length
};
vkCmdCopyBuffer(cmd, dma_mapping.second->value, scratch_buf->value, 1, &copy_rgn);
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd, dma_mapping.second->value, scratch_buf->value, 1, &copy_rgn);
// Barrier
vk::insert_buffer_memory_barrier(

View file

@ -215,7 +215,7 @@ namespace vk
mem_load.srcOffset = dma_mapping.first;
mem_load.dstOffset = dst_offset;
mem_load.size = dma_sync_region.length();
vkCmdCopyBuffer(cmd, dma_mapping.second->value, working_buffer->value, 1, &mem_load);
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd, dma_mapping.second->value, working_buffer->value, 1, &mem_load);
// Transfer -> Compute barrier
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, dst_offset, dma_sync_region.length(),
@ -296,7 +296,7 @@ namespace vk
copy.srcOffset = result_offset;
copy.dstOffset = dma_mapping.first;
copy.size = dma_sync_region.length();
vkCmdCopyBuffer(cmd, working_buffer->value, dma_mapping.second->value, 1, &copy);
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd, working_buffer->value, dma_mapping.second->value, 1, &copy);
}
else
{
@ -315,7 +315,7 @@ namespace vk
dst_offset += rsx_pitch;
}
vkCmdCopyBuffer(cmd, working_buffer->value, dma_mapping.second->value, transfer_height, copy.data());
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd, working_buffer->value, dma_mapping.second->value, transfer_height, copy.data());
}
}
else
@ -329,7 +329,7 @@ namespace vk
region.imageExtent = { transfer_width, transfer_height, 1 };
region.bufferOffset = dma_mapping.first;
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dma_mapping.second->value, 1, &region);
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, src->value, src->current_layout, dma_mapping.second->value, 1, &region);
}
src->pop_layout(cmd);
@ -503,7 +503,7 @@ namespace vk
if (src_w == section.dst_w && src_h == section.dst_h) [[likely]]
{
const auto copy_rgn = get_output_region(src_x, src_y, src_w, src_h, src_image);
vkCmdCopyImage(cmd, src_image->value, src_image->current_layout, dst->value, dst->current_layout, 1, &copy_rgn);
VK_GET_SYMBOL(vkCmdCopyImage)(cmd, src_image->value, src_image->current_layout, dst->value, dst->current_layout, 1, &copy_rgn);
}
else
{
@ -538,7 +538,7 @@ namespace vk
// Casting comes after the scaling!
const auto copy_rgn = get_output_region(dst_x, dst_y, section.dst_w, section.dst_h, _dst);
_dst->change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdCopyImage(cmd, _dst->value, _dst->current_layout, dst->value, dst->current_layout, 1, &copy_rgn);
VK_GET_SYMBOL(vkCmdCopyImage)(cmd, _dst->value, _dst->current_layout, dst->value, dst->current_layout, 1, &copy_rgn);
}
}
@ -770,12 +770,12 @@ namespace vk
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearColorImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
copy_transfer_regions_impl(cmd, image, sections_to_copy);
@ -805,12 +805,12 @@ namespace vk
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearColorImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
copy_transfer_regions_impl(cmd, image, sections_to_copy);
@ -842,12 +842,12 @@ namespace vk
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearColorImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
}
@ -879,12 +879,12 @@ namespace vk
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
{
VkClearColorValue clear = {};
vkCmdClearColorImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearColorImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
else
{
VkClearDepthStencilValue clear = { 1.f, 0 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
copy_transfer_regions_impl(cmd, image, sections_to_copy);
@ -1000,12 +1000,12 @@ namespace vk
if (image->aspect() & VK_IMAGE_ASPECT_COLOR_BIT)
{
VkClearColorValue color = { {0.f, 0.f, 0.f, 1.f} };
vkCmdClearColorImage(cmd, image->value, image->current_layout, &color, 1, &range);
VK_GET_SYMBOL(vkCmdClearColorImage)(cmd, image->value, image->current_layout, &color, 1, &range);
}
else
{
VkClearDepthStencilValue clear{ 1.f, 255 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &range);
VK_GET_SYMBOL(vkCmdClearDepthStencilImage)(cmd, image->value, image->current_layout, &clear, 1, &range);
}
}
}
@ -1339,7 +1339,7 @@ namespace vk
cmd.submit(submit_info, VK_TRUE);
vk::wait_for_fence(&submit_fence, GENERAL_WAIT_TIMEOUT);
CHECK_RESULT(vkResetCommandBuffer(cmd, 0));
CHECK_RESULT(VK_GET_SYMBOL(vkResetCommandBuffer)(cmd, 0));
cmd.begin();
}
else
@ -1516,7 +1516,7 @@ namespace vk
subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkSubresourceLayout layout{};
vkGetImageSubresourceLayout(*m_device, image->value, &subresource, &layout);
VK_GET_SYMBOL(vkGetImageSubresourceLayout)(*m_device, image->value, &subresource, &layout);
void* mem = image->memory->map(0, layout.rowPitch * height);

View file

@ -6,13 +6,14 @@
#define VK_USE_PLATFORM_MACOS_MVK
#elif defined(ANDROID)
#define VK_USE_PLATFORM_ANDROID_KHR
#define VK_NO_PROTOTYPES
#elif HAVE_X11
#define VK_USE_PLATFORM_XLIB_KHR
#endif
#ifdef _MSC_VER
#pragma warning( push )
#pragma warning( disable : 4005 )
#pragma warning(push)
#pragma warning(disable : 4005)
#endif
#include <vulkan/vulkan.h>
@ -26,3 +27,59 @@
#if VK_HEADER_VERSION < 287
constexpr VkDriverId VK_DRIVER_ID_MESA_HONEYKRISP = static_cast<VkDriverId>(26);
#endif
#ifdef ANDROID
namespace vk {
template <std::size_t N>
struct string_literal
{
char data[N];
consteval string_literal(const char (&str)[N])
{
for (std::size_t i = 0; i < N; ++i)
{
data[i] = str[i];
}
}
};
class symbol_cache
{
std::vector<std::pair<std::string, void **>> registered_symbols;
public:
void initialize();
void clear();
void register_symbol(const char* name, void **ptr);
static symbol_cache& cache_instance()
{
static symbol_cache result;
return result;
}
};
template <auto V>
class symbol_cache_id
{
void *ptr = nullptr;
public:
symbol_cache_id()
{
symbol_cache::cache_instance().register_symbol(V.data, &ptr);
}
void* get() { return ptr; }
};
template <auto V>
symbol_cache_id<V> cached_symbols;
}
#define VK_GET_SYMBOL(x) reinterpret_cast<PFN_##x>(::vk::cached_symbols<::vk::string_literal{#x}>.get())
#else
#define VK_GET_SYMBOL(x) x
#endif

View file

@ -20,7 +20,7 @@ namespace vk
ensure(present_surface);
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdBlitImage(cmd, src->value, src->current_layout, present_surface, present_surface_layout, 1, &request, VK_FILTER_LINEAR);
VK_GET_SYMBOL(vkCmdBlitImage)(cmd, src->value, src->current_layout, present_surface, present_surface_layout, 1, &request, VK_FILTER_LINEAR);
src->pop_layout(cmd);
return nullptr;
}

View file

@ -158,7 +158,7 @@ namespace vk
static_cast<f32>(src_image->width()), static_cast<f32>(src_image->height()), // Size of the raw image to upscale (in case viewport does not cover it all)
static_cast<f32>(m_output_size.width), static_cast<f32>(m_output_size.height)); // Size of output viewport (target size)
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, m_constants_buf);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, m_constants_buf);
}
rcas_pass::rcas_pass()
@ -177,7 +177,7 @@ namespace vk
auto cas_attenuation = 2.f - (g_cfg.video.vk.rcas_sharpening_intensity / 50.f);
FsrRcasCon(&m_constants_buf[0], cas_attenuation);
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, m_constants_buf);
VK_GET_SYMBOL(vkCmdPushConstants)(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, push_constants_size, m_constants_buf);
}
} // Namespace FidelityFX
@ -395,7 +395,7 @@ namespace vk
if (mode & UPSCALE_AND_COMMIT)
{
src_image->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdBlitImage(cmd, src_image->value, src_image->current_layout, target_image, target_image_layout, 1, &output_request, VK_FILTER_LINEAR);
VK_GET_SYMBOL(vkCmdBlitImage)(cmd, src_image->value, src_image->current_layout, target_image, target_image_layout, 1, &output_request, VK_FILTER_LINEAR);
src_image->pop_layout(cmd);
return nullptr;
}

View file

@ -20,7 +20,7 @@ namespace vk
ensure(present_surface);
src->push_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdBlitImage(cmd, src->value, src->current_layout, present_surface, present_surface_layout, 1, &request, VK_FILTER_NEAREST);
VK_GET_SYMBOL(vkCmdBlitImage)(cmd, src->value, src->current_layout, present_surface, present_surface_layout, 1, &request, VK_FILTER_NEAREST);
src->pop_layout(cmd);
return nullptr;
}

View file

@ -31,7 +31,7 @@ namespace vk
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange = range;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
VK_GET_SYMBOL(vkCmdPipelineBarrier)(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
void insert_buffer_memory_barrier(
@ -57,7 +57,7 @@ namespace vk
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 1, &barrier, 0, nullptr);
VK_GET_SYMBOL(vkCmdPipelineBarrier)(cmd, src_stage, dst_stage, 0, 0, nullptr, 1, &barrier, 0, nullptr);
}
void insert_global_memory_barrier(
@ -75,7 +75,7 @@ namespace vk
barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
barrier.srcAccessMask = src_access;
barrier.dstAccessMask = dst_access;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 1, &barrier, 0, nullptr, 0, nullptr);
VK_GET_SYMBOL(vkCmdPipelineBarrier)(cmd, src_stage, dst_stage, 0, 1, &barrier, 0, nullptr, 0, nullptr);
}
void insert_texture_barrier(
@ -118,7 +118,7 @@ namespace vk
barrier.srcAccessMask = src_access;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(cmd, src_stage, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
VK_GET_SYMBOL(vkCmdPipelineBarrier)(cmd, src_stage, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
void insert_texture_barrier(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, bool preserve_renderpass)

View file

@ -12,12 +12,12 @@ namespace vk
info.offset = offset;
info.range = size;
info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
CHECK_RESULT(vkCreateBufferView(m_device, &info, nullptr, &value));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateBufferView)(m_device, &info, nullptr, &value));
}
buffer_view::~buffer_view()
{
vkDestroyBufferView(m_device, value, nullptr);
VK_GET_SYMBOL(vkDestroyBufferView)(m_device, value, nullptr);
}
bool buffer_view::in_range(u32 address, u32 size, u32& offset) const
@ -48,11 +48,11 @@ namespace vk
info.usage = usage;
info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
CHECK_RESULT(vkCreateBuffer(m_device, &info, nullptr, &value));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateBuffer)(m_device, &info, nullptr, &value));
// Allocate vram for this buffer
VkMemoryRequirements memory_reqs;
vkGetBufferMemoryRequirements(m_device, value, &memory_reqs);
VK_GET_SYMBOL(vkGetBufferMemoryRequirements)(m_device, value, &memory_reqs);
memory_type_info allocation_type_info = memory_type.get(dev, access_flags, memory_reqs.memoryTypeBits);
if (!allocation_type_info)
@ -61,7 +61,7 @@ namespace vk
}
memory = std::make_unique<memory_block>(m_device, memory_reqs.size, memory_reqs.alignment, allocation_type_info, allocation_pool);
vkBindBufferMemory(dev, value, memory->get_vk_device_memory(), memory->get_vk_device_memory_offset());
VK_GET_SYMBOL(vkBindBufferMemory)(dev, value, memory->get_vk_device_memory(), memory->get_vk_device_memory_offset());
}
buffer::buffer(const vk::render_device& dev, VkBufferUsageFlags usage, void* host_pointer, u64 size)
@ -79,7 +79,7 @@ namespace vk
ex_info.pNext = nullptr;
info.pNext = &ex_info;
CHECK_RESULT(vkCreateBuffer(m_device, &info, nullptr, &value));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateBuffer)(m_device, &info, nullptr, &value));
auto& memory_map = dev.get_memory_mapping();
ensure(memory_map._vkGetMemoryHostPointerPropertiesEXT);
@ -89,7 +89,7 @@ namespace vk
CHECK_RESULT(memory_map._vkGetMemoryHostPointerPropertiesEXT(dev, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, host_pointer, &memory_properties));
VkMemoryRequirements memory_reqs;
vkGetBufferMemoryRequirements(m_device, value, &memory_reqs);
VK_GET_SYMBOL(vkGetBufferMemoryRequirements)(m_device, value, &memory_reqs);
auto required_memory_type_bits = memory_reqs.memoryTypeBits & memory_properties.memoryTypeBits;
if (!required_memory_type_bits)
@ -109,12 +109,12 @@ namespace vk
}
memory = std::make_unique<memory_block_host>(m_device, host_pointer, size, allocation_type_info);
CHECK_RESULT(vkBindBufferMemory(dev, value, memory->get_vk_device_memory(), memory->get_vk_device_memory_offset()));
CHECK_RESULT(VK_GET_SYMBOL(vkBindBufferMemory)(dev, value, memory->get_vk_device_memory(), memory->get_vk_device_memory_offset()));
}
buffer::~buffer()
{
vkDestroyBuffer(m_device, value, nullptr);
VK_GET_SYMBOL(vkDestroyBuffer)(m_device, value, nullptr);
}
void* buffer::map(u64 offset, u64 size)

View file

@ -18,7 +18,7 @@ namespace vk
infos.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
infos.queueFamilyIndex = queue_family;
CHECK_RESULT(vkCreateCommandPool(dev, &infos, nullptr, &pool));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateCommandPool)(dev, &infos, nullptr, &pool));
}
void command_pool::destroy()
@ -26,7 +26,7 @@ namespace vk
if (!pool)
return;
vkDestroyCommandPool((*owner), pool, nullptr);
VK_GET_SYMBOL(vkDestroyCommandPool)((*owner), pool, nullptr);
pool = nullptr;
}
@ -52,7 +52,7 @@ namespace vk
infos.commandBufferCount = 1;
infos.commandPool = +cmd_pool;
infos.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
CHECK_RESULT(vkAllocateCommandBuffers(cmd_pool.get_owner(), &infos, &commands));
CHECK_RESULT(VK_GET_SYMBOL(vkAllocateCommandBuffers)(cmd_pool.get_owner(), &infos, &commands));
m_submit_fence = new fence(cmd_pool.get_owner());
pool = &cmd_pool;
@ -60,7 +60,7 @@ namespace vk
void command_buffer::destroy()
{
vkFreeCommandBuffers(pool->get_owner(), (*pool), 1, &commands);
VK_GET_SYMBOL(vkFreeCommandBuffers)(pool->get_owner(), (*pool), 1, &commands);
if (m_submit_fence)
{
@ -79,7 +79,7 @@ namespace vk
//CHECK_RESULT(vkResetFences(pool->get_owner(), 1, &m_submit_fence));
m_submit_fence->reset();
CHECK_RESULT(vkResetCommandBuffer(commands, 0));
CHECK_RESULT(VK_GET_SYMBOL(vkResetCommandBuffer)(commands, 0));
}
if (is_open)
@ -92,7 +92,7 @@ namespace vk
begin_infos.pInheritanceInfo = &inheritance_info;
begin_infos.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_infos.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
CHECK_RESULT(vkBeginCommandBuffer(commands, &begin_infos));
CHECK_RESULT(VK_GET_SYMBOL(vkBeginCommandBuffer)(commands, &begin_infos));
is_open = true;
}
@ -104,7 +104,7 @@ namespace vk
return;
}
CHECK_RESULT(vkEndCommandBuffer(commands));
CHECK_RESULT(VK_GET_SYMBOL(vkEndCommandBuffer)(commands));
is_open = false;
}

View file

@ -140,7 +140,7 @@ namespace vk
{
ensure(shadow);
ensure(heap);
vkCmdCopyBuffer(cmd, shadow->value, heap->value, ::size32(dirty_ranges), dirty_ranges.data());
VK_GET_SYMBOL(vkCmdCopyBuffer)(cmd, shadow->value, heap->value, ::size32(dirty_ranges), dirty_ranges.data());
dirty_ranges.clear();
insert_buffer_memory_barrier(cmd, heap->value, 0, heap->size(),

View file

@ -104,7 +104,7 @@ namespace vk
}
VkDescriptorSetLayout result;
CHECK_RESULT(vkCreateDescriptorSetLayout(*g_render_device, &infos, nullptr, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateDescriptorSetLayout)(*g_render_device, &infos, nullptr, &result));
return result;
}
}
@ -137,7 +137,7 @@ namespace vk
for (auto& pool : m_device_subpools)
{
vkDestroyDescriptorPool((*m_owner), pool.handle, nullptr);
VK_GET_SYMBOL(vkDestroyDescriptorPool)((*m_owner), pool.handle, nullptr);
pool.handle = VK_NULL_HANDLE;
}
@ -148,7 +148,7 @@ namespace vk
{
std::lock_guard lock(m_subpool_lock);
CHECK_RESULT(vkResetDescriptorPool(*m_owner, m_device_subpools[subpool_id].handle, flags));
CHECK_RESULT(VK_GET_SYMBOL(vkResetDescriptorPool)(*m_owner, m_device_subpools[subpool_id].handle, flags));
m_device_subpools[subpool_id].busy = VK_FALSE;
}
@ -197,7 +197,7 @@ namespace vk
alloc_info.pSetLayouts = m_allocation_request_cache.data();
m_descriptor_set_cache.resize(alloc_size);
CHECK_RESULT(vkAllocateDescriptorSets(*m_owner, &alloc_info, m_descriptor_set_cache.data()));
CHECK_RESULT(VK_GET_SYMBOL(vkAllocateDescriptorSets)(*m_owner, &alloc_info, m_descriptor_set_cache.data()));
m_current_subpool_offset += alloc_size;
new_descriptor_set = m_descriptor_set_cache.pop_back();
@ -205,7 +205,7 @@ namespace vk
else
{
m_current_subpool_offset++;
CHECK_RESULT(vkAllocateDescriptorSets(*m_owner, &alloc_info, &new_descriptor_set));
CHECK_RESULT(VK_GET_SYMBOL(vkAllocateDescriptorSets)(*m_owner, &alloc_info, &new_descriptor_set));
}
return new_descriptor_set;
@ -243,7 +243,7 @@ namespace vk
}
VkDescriptorPool subpool = VK_NULL_HANDLE;
if (VkResult result = vkCreateDescriptorPool(*m_owner, &m_create_info, nullptr, &subpool))
if (VkResult result = VK_GET_SYMBOL(vkCreateDescriptorPool)(*m_owner, &m_create_info, nullptr, &subpool))
{
if (retries-- && (result == VK_ERROR_FRAGMENTATION_EXT))
{
@ -410,7 +410,7 @@ namespace vk
nullptr, // pBufferInfo
nullptr // pTexelBufferView
};
vkUpdateDescriptorSets(*g_render_device, 1, &writer, 0, nullptr);
VK_GET_SYMBOL(vkUpdateDescriptorSets)(*g_render_device, 1, &writer, 0, nullptr);
}
void descriptor_set::push(rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask)
@ -437,7 +437,7 @@ namespace vk
flush();
}
vkCmdBindDescriptorSets(cmd, bind_point, layout, 0, 1, &m_handle, 0, nullptr);
VK_GET_SYMBOL(vkCmdBindDescriptorSets)(cmd, bind_point, layout, 0, 1, &m_handle, 0, nullptr);
}
void descriptor_set::flush()
@ -449,7 +449,7 @@ namespace vk
const auto num_writes = ::size32(m_pending_writes);
const auto num_copies = ::size32(m_pending_copies);
vkUpdateDescriptorSets(*g_render_device, num_writes, m_pending_writes.data(), num_copies, m_pending_copies.data());
VK_GET_SYMBOL(vkUpdateDescriptorSets)(*g_render_device, num_writes, m_pending_writes.data(), num_copies, m_pending_copies.data());
m_push_type_mask = 0;
m_pending_writes.clear();

View file

@ -12,7 +12,7 @@ namespace vk
{
if (!allow_extensions)
{
vkGetPhysicalDeviceFeatures(dev, &features);
VK_GET_SYMBOL(vkGetPhysicalDeviceFeatures)(dev, &features);
return;
}
@ -21,7 +21,7 @@ namespace vk
if (!instance_extensions.is_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME))
{
vkGetPhysicalDeviceFeatures(dev, &features);
VK_GET_SYMBOL(vkGetPhysicalDeviceFeatures)(dev, &features);
}
else
{
@ -86,7 +86,7 @@ namespace vk
features2.pNext = &device_fault_info;
}
auto _vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceFeatures2KHR"));
auto _vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(VK_GET_SYMBOL(vkGetInstanceProcAddr)(parent, "vkGetPhysicalDeviceFeatures2KHR"));
ensure(_vkGetPhysicalDeviceFeatures2KHR); // "vkGetInstanceProcAddress failed to find entry point!"
_vkGetPhysicalDeviceFeatures2KHR(dev, &features2);
@ -142,11 +142,11 @@ namespace vk
void physical_device::get_physical_device_properties(bool allow_extensions)
{
vkGetPhysicalDeviceMemoryProperties(dev, &memory_properties);
VK_GET_SYMBOL(vkGetPhysicalDeviceMemoryProperties)(dev, &memory_properties);
if (!allow_extensions)
{
vkGetPhysicalDeviceProperties(dev, &props);
VK_GET_SYMBOL(vkGetPhysicalDeviceProperties)(dev, &props);
return;
}
@ -155,7 +155,7 @@ namespace vk
if (!instance_extensions.is_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME))
{
vkGetPhysicalDeviceProperties(dev, &props);
VK_GET_SYMBOL(vkGetPhysicalDeviceProperties)(dev, &props);
}
else
{
@ -179,7 +179,7 @@ namespace vk
properties2.pNext = &driver_properties;
}
auto _vkGetPhysicalDeviceProperties2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceProperties2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceProperties2KHR"));
auto _vkGetPhysicalDeviceProperties2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceProperties2KHR>(VK_GET_SYMBOL(vkGetInstanceProcAddr)(parent, "vkGetPhysicalDeviceProperties2KHR"));
ensure(_vkGetPhysicalDeviceProperties2KHR);
_vkGetPhysicalDeviceProperties2KHR(dev, &properties2);
@ -397,7 +397,7 @@ namespace vk
return ::size32(queue_props);
u32 count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, nullptr);
VK_GET_SYMBOL(vkGetPhysicalDeviceQueueFamilyProperties)(dev, &count, nullptr);
return count;
}
@ -407,10 +407,10 @@ namespace vk
if (queue_props.empty())
{
u32 count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, nullptr);
VK_GET_SYMBOL(vkGetPhysicalDeviceQueueFamilyProperties)(dev, &count, nullptr);
queue_props.resize(count);
vkGetPhysicalDeviceQueueFamilyProperties(dev, &count, queue_props.data());
VK_GET_SYMBOL(vkGetPhysicalDeviceQueueFamilyProperties)(dev, &count, queue_props.data());
}
if (queue >= queue_props.size())
@ -556,11 +556,12 @@ namespace vk
requested_extensions.push_back(VK_EXT_DEVICE_FAULT_EXTENSION_NAME);
}
enabled_features.robustBufferAccess = VK_TRUE;
enabled_features.robustBufferAccess = ensure(pgpu->features.robustBufferAccess, "robustBufferAccess is unsupported");
enabled_features.fullDrawIndexUint32 = VK_TRUE;
enabled_features.independentBlend = VK_TRUE;
enabled_features.independentBlend = ensure(pgpu->features.independentBlend, "independentBlend is unsupported");
enabled_features.logicOp = VK_TRUE;
enabled_features.depthClamp = VK_TRUE;
enabled_features.depthClamp = ensure(pgpu->features.depthClamp, "depthClamp is unsupported");
enabled_features.depthBounds = VK_TRUE;
enabled_features.wideLines = VK_TRUE;
enabled_features.largePoints = VK_TRUE;
@ -780,7 +781,7 @@ namespace vk
device.pNext = &shader_barycentric_info;
}
if (auto error = vkCreateDevice(*pgpu, &device, nullptr, &dev))
if (auto error = VK_GET_SYMBOL(vkCreateDevice)(*pgpu, &device, nullptr, &dev))
{
dump_debug_info(requested_extensions, enabled_features);
vk::die_with_error(error);
@ -794,38 +795,38 @@ namespace vk
}
// Initialize queues
vkGetDeviceQueue(dev, graphics_queue_idx, 0, &m_graphics_queue);
vkGetDeviceQueue(dev, transfer_queue_idx, transfer_queue_sub_index, &m_transfer_queue);
VK_GET_SYMBOL(vkGetDeviceQueue)(dev, graphics_queue_idx, 0, &m_graphics_queue);
VK_GET_SYMBOL(vkGetDeviceQueue)(dev, transfer_queue_idx, transfer_queue_sub_index, &m_transfer_queue);
if (present_queue_idx != umax)
{
vkGetDeviceQueue(dev, present_queue_idx, 0, &m_present_queue);
VK_GET_SYMBOL(vkGetDeviceQueue)(dev, present_queue_idx, 0, &m_present_queue);
}
// Import optional function endpoints
if (pgpu->optional_features_support.conditional_rendering)
{
_vkCmdBeginConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdBeginConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdBeginConditionalRenderingEXT"));
_vkCmdEndConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdEndConditionalRenderingEXT>(vkGetDeviceProcAddr(dev, "vkCmdEndConditionalRenderingEXT"));
_vkCmdBeginConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdBeginConditionalRenderingEXT>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkCmdBeginConditionalRenderingEXT"));
_vkCmdEndConditionalRenderingEXT = reinterpret_cast<PFN_vkCmdEndConditionalRenderingEXT>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkCmdEndConditionalRenderingEXT"));
}
if (pgpu->optional_features_support.debug_utils)
{
_vkSetDebugUtilsObjectNameEXT = reinterpret_cast<PFN_vkSetDebugUtilsObjectNameEXT>(vkGetDeviceProcAddr(dev, "vkSetDebugUtilsObjectNameEXT"));
_vkQueueInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkQueueInsertDebugUtilsLabelEXT>(vkGetDeviceProcAddr(dev, "vkQueueInsertDebugUtilsLabelEXT"));
_vkCmdInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkCmdInsertDebugUtilsLabelEXT>(vkGetDeviceProcAddr(dev, "vkCmdInsertDebugUtilsLabelEXT"));
_vkSetDebugUtilsObjectNameEXT = reinterpret_cast<PFN_vkSetDebugUtilsObjectNameEXT>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkSetDebugUtilsObjectNameEXT"));
_vkQueueInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkQueueInsertDebugUtilsLabelEXT>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkQueueInsertDebugUtilsLabelEXT"));
_vkCmdInsertDebugUtilsLabelEXT = reinterpret_cast<PFN_vkCmdInsertDebugUtilsLabelEXT>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkCmdInsertDebugUtilsLabelEXT"));
}
if (pgpu->optional_features_support.synchronization_2)
{
_vkCmdSetEvent2KHR = reinterpret_cast<PFN_vkCmdSetEvent2KHR>(vkGetDeviceProcAddr(dev, "vkCmdSetEvent2KHR"));
_vkCmdWaitEvents2KHR = reinterpret_cast<PFN_vkCmdWaitEvents2KHR>(vkGetDeviceProcAddr(dev, "vkCmdWaitEvents2KHR"));
_vkCmdPipelineBarrier2KHR = reinterpret_cast<PFN_vkCmdPipelineBarrier2KHR>(vkGetDeviceProcAddr(dev, "vkCmdPipelineBarrier2KHR"));
_vkCmdSetEvent2KHR = reinterpret_cast<PFN_vkCmdSetEvent2KHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkCmdSetEvent2KHR"));
_vkCmdWaitEvents2KHR = reinterpret_cast<PFN_vkCmdWaitEvents2KHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkCmdWaitEvents2KHR"));
_vkCmdPipelineBarrier2KHR = reinterpret_cast<PFN_vkCmdPipelineBarrier2KHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkCmdPipelineBarrier2KHR"));
}
if (pgpu->optional_features_support.extended_device_fault)
{
_vkGetDeviceFaultInfoEXT = reinterpret_cast<PFN_vkGetDeviceFaultInfoEXT>(vkGetDeviceProcAddr(dev, "vkGetDeviceFaultInfoEXT"));
_vkGetDeviceFaultInfoEXT = reinterpret_cast<PFN_vkGetDeviceFaultInfoEXT>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkGetDeviceFaultInfoEXT"));
}
memory_map = vk::get_memory_mapping(pdev);
@ -834,7 +835,7 @@ namespace vk
if (pgpu->optional_features_support.external_memory_host)
{
memory_map._vkGetMemoryHostPointerPropertiesEXT = reinterpret_cast<PFN_vkGetMemoryHostPointerPropertiesEXT>(vkGetDeviceProcAddr(dev, "vkGetMemoryHostPointerPropertiesEXT"));
memory_map._vkGetMemoryHostPointerPropertiesEXT = reinterpret_cast<PFN_vkGetMemoryHostPointerPropertiesEXT>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkGetMemoryHostPointerPropertiesEXT"));
}
if (g_cfg.video.disable_vulkan_mem_allocator)
@ -866,7 +867,7 @@ namespace vk
m_allocator.reset();
}
vkDestroyDevice(dev, nullptr);
VK_GET_SYMBOL(vkDestroyDevice)(dev, nullptr);
dev = nullptr;
memory_map = {};
m_formats_support = {};
@ -882,7 +883,7 @@ namespace vk
}
auto& props = pgpu->format_properties[format];
vkGetPhysicalDeviceFormatProperties(*pgpu, format, &props);
VK_GET_SYMBOL(vkGetPhysicalDeviceFormatProperties)(*pgpu, format, &props);
return props;
}
@ -1003,7 +1004,7 @@ namespace vk
{
VkPhysicalDevice pdev = dev;
VkPhysicalDeviceMemoryProperties memory_properties;
vkGetPhysicalDeviceMemoryProperties(pdev, &memory_properties);
VK_GET_SYMBOL(vkGetPhysicalDeviceMemoryProperties)(pdev, &memory_properties);
memory_type_mapping result;
result.device_local_total_bytes = 0;
@ -1135,7 +1136,7 @@ namespace vk
const auto test_format_features = [&dev](VkFormat format, VkFlags required_features, VkBool32 linear_features) -> bool
{
VkFormatProperties props;
vkGetPhysicalDeviceFormatProperties(dev, format, &props);
VK_GET_SYMBOL(vkGetPhysicalDeviceFormatProperties)(dev, format, &props);
const auto supported_features_mask = (linear_features) ? props.linearTilingFeatures : props.optimalTilingFeatures;
return (supported_features_mask & required_features) == required_features;

View file

@ -39,12 +39,12 @@ namespace vk
m_width = width;
m_height = height;
CHECK_RESULT(vkCreateFramebuffer(dev, &info, nullptr, &value));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateFramebuffer)(dev, &info, nullptr, &value));
}
~framebuffer()
{
vkDestroyFramebuffer(m_device, value, nullptr);
VK_GET_SYMBOL(vkDestroyFramebuffer)(m_device, value, nullptr);
}
u32 width()

View file

@ -105,7 +105,7 @@ namespace vk
image::~image()
{
vkDestroyImage(m_device, value, nullptr);
VK_GET_SYMBOL(vkDestroyImage)(m_device, value, nullptr);
}
void image::create_impl(const vk::render_device& dev, u32 access_flags, const memory_type_info& memory_type, vmm_allocation_pool allocation_pool)
@ -116,10 +116,10 @@ namespace vk
const bool nullable = !!(info.flags & VK_IMAGE_CREATE_ALLOW_NULL_RPCS3);
info.flags &= ~VK_IMAGE_CREATE_SPECIAL_FLAGS_RPCS3;
CHECK_RESULT(vkCreateImage(m_device, &info, nullptr, &value));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateImage)(m_device, &info, nullptr, &value));
VkMemoryRequirements memory_req;
vkGetImageMemoryRequirements(m_device, value, &memory_req);
VK_GET_SYMBOL(vkGetImageMemoryRequirements)(m_device, value, &memory_req);
const auto allocation_type_info = memory_type.get(dev, access_flags, memory_req.memoryTypeBits);
if (!allocation_type_info)
@ -131,13 +131,13 @@ namespace vk
if (auto device_mem = memory->get_vk_device_memory();
device_mem != VK_NULL_HANDLE) [[likely]]
{
CHECK_RESULT(vkBindImageMemory(m_device, value, device_mem, memory->get_vk_device_memory_offset()));
CHECK_RESULT(VK_GET_SYMBOL(vkBindImageMemory)(m_device, value, device_mem, memory->get_vk_device_memory_offset()));
current_layout = info.initialLayout;
}
else
{
ensure(nullable);
vkDestroyImage(m_device, value, nullptr);
VK_GET_SYMBOL(vkDestroyImage)(m_device, value, nullptr);
value = VK_NULL_HANDLE;
}
}
@ -359,7 +359,7 @@ namespace vk
image_view::~image_view()
{
vkDestroyImageView(m_device, value, nullptr);
VK_GET_SYMBOL(vkDestroyImageView)(m_device, value, nullptr);
}
u32 image_view::encoded_component_map() const
@ -389,7 +389,7 @@ namespace vk
info.components = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
#endif
CHECK_RESULT(vkCreateImageView(m_device, &info, nullptr, &value));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateImageView)(m_device, &info, nullptr, &value));
#if (VK_DISABLE_COMPONENT_SWIZZLE)
// Restore requested mapping

View file

@ -184,7 +184,7 @@ namespace vk
if (!barrier.srcAccessMask) src_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
if (!barrier.dstAccessMask) dst_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkCmdPipelineBarrier(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
VK_GET_SYMBOL(vkCmdPipelineBarrier)(cmd, src_stage, dst_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
void change_image_layout(const vk::command_buffer& cmd, vk::image* image, VkImageLayout new_layout, const VkImageSubresourceRange& range)

View file

@ -1,3 +1,4 @@
#include "Emu/RSX/VK/VulkanAPI.h"
#include "stdafx.h"
#include "instance.h"
@ -9,49 +10,45 @@
#include <nlohmann/json.hpp>
#endif
#ifdef ANDROID
#define GET_VK_PFN(lib, name) (lib == nullptr ? &name : reinterpret_cast<PFN_##name>(dlsym(lib, #name)))
#else
#define GET_VK_PFN(lib, name) (&name)
#endif
namespace vk
{
static PFN_vkEnumerateInstanceExtensionProperties _vkEnumerateInstanceExtensionProperties;
static PFN_vkEnumerateDeviceExtensionProperties _vkEnumerateDeviceExtensionProperties;
// Supported extensions
supported_extensions::supported_extensions(enumeration_class _class, const char* layer_name, VkPhysicalDevice pdev)
{
u32 count;
if (_class == enumeration_class::instance)
{
if (_vkEnumerateInstanceExtensionProperties(layer_name, &count, nullptr) != VK_SUCCESS)
if (VK_GET_SYMBOL(vkEnumerateInstanceExtensionProperties)(layer_name, &count, nullptr) != VK_SUCCESS)
return;
}
else
{
ensure(pdev);
if (_vkEnumerateDeviceExtensionProperties(pdev, layer_name, &count, nullptr) != VK_SUCCESS)
if (VK_GET_SYMBOL(vkEnumerateDeviceExtensionProperties)(pdev, layer_name, &count, nullptr) != VK_SUCCESS)
return;
}
m_vk_exts.resize(count);
if (_class == enumeration_class::instance)
{
_vkEnumerateInstanceExtensionProperties(layer_name, &count, m_vk_exts.data());
VK_GET_SYMBOL(vkEnumerateInstanceExtensionProperties)(layer_name, &count, m_vk_exts.data());
}
else
{
_vkEnumerateDeviceExtensionProperties(pdev, layer_name, &count, m_vk_exts.data());
VK_GET_SYMBOL(vkEnumerateDeviceExtensionProperties)(pdev, layer_name, &count, m_vk_exts.data());
}
}
bool supported_extensions::is_supported(std::string_view ext) const
{
return std::any_of(m_vk_exts.cbegin(), m_vk_exts.cend(), [&](const VkExtensionProperties& p) { return p.extensionName == ext; });
return std::any_of(m_vk_exts.cbegin(), m_vk_exts.cend(), [&](const VkExtensionProperties& p)
{
return p.extensionName == ext;
});
}
void* instance::g_vk_loader = nullptr;
// Instance
instance::~instance()
{
@ -63,7 +60,8 @@ namespace vk
void instance::destroy()
{
if (!m_instance) return;
if (!m_instance)
return;
if (m_debugger)
{
@ -73,31 +71,33 @@ namespace vk
if (m_surface)
{
GET_VK_PFN(m_vk_loader, vkDestroySurfaceKHR)(m_instance, m_surface, nullptr);
VK_GET_SYMBOL(vkDestroySurfaceKHR)(m_instance, m_surface, nullptr);
m_surface = VK_NULL_HANDLE;
}
GET_VK_PFN(m_vk_loader, vkDestroyInstance)(m_instance, nullptr);
VK_GET_SYMBOL(vkDestroyInstance)(m_instance, nullptr);
m_instance = VK_NULL_HANDLE;
#ifdef ANDROID
if (m_vk_loader != nullptr)
if (owns_loader && g_vk_loader != nullptr)
{
::dlclose(m_vk_loader);
::dlclose(g_vk_loader);
g_vk_loader = nullptr;
symbol_cache::cache_instance().clear();
}
#endif
}
void instance::enable_debugging()
{
if (!g_cfg.video.debug_output) return;
if (!g_cfg.video.debug_output)
return;
PFN_vkDebugReportCallbackEXT callback = vk::dbgFunc;
auto _vkGetInstanceProcAddr = GET_VK_PFN(m_vk_loader, vkGetInstanceProcAddr);
_vkCreateDebugReportCallback = reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(_vkGetInstanceProcAddr(m_instance, "vkCreateDebugReportCallbackEXT"));
_vkDestroyDebugReportCallback = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(_vkGetInstanceProcAddr(m_instance, "vkDestroyDebugReportCallbackEXT"));
_vkCreateDebugReportCallback = reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(VK_GET_SYMBOL(vkGetInstanceProcAddr)(m_instance, "vkCreateDebugReportCallbackEXT"));
_vkDestroyDebugReportCallback = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(VK_GET_SYMBOL(vkGetInstanceProcAddr)(m_instance, "vkDestroyDebugReportCallbackEXT"));
VkDebugReportCallbackCreateInfoEXT dbgCreateInfo = {};
dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
@ -114,47 +114,59 @@ namespace vk
bool instance::create(const char* app_name, bool fast)
{
#ifdef ANDROID
auto custom_driver_path = g_cfg.video.vk.custom_driver_path.to_string();
if (!custom_driver_path.empty())
{
rsx_log.warning("Loading custom driver %s", custom_driver_path);
auto meta = nlohmann::json::parse(std::ifstream(custom_driver_path + "/meta.json"));
if (meta.contains("libraryName"))
if (g_vk_loader == nullptr)
{
auto library_name = meta["libraryName"].get<std::string>();
rsx_log.warning("Custom driver: library name %s", library_name);
auto hook_dir = g_cfg.video.vk.custom_driver_hook_dir.to_string();
rsx_log.warning("Custom driver: hook dir %s", hook_dir);
::dlerror();
m_vk_loader = adrenotools_open_libvulkan(
RTLD_NOW, ADRENOTOOLS_DRIVER_CUSTOM,
nullptr, (hook_dir + "/").c_str(),
(custom_driver_path + "/").c_str(), library_name.c_str(),
nullptr, nullptr);
if (m_vk_loader == nullptr)
auto custom_driver_path = g_cfg.video.vk.custom_driver_path.to_string();
if (!custom_driver_path.empty())
{
rsx_log.error("Failed to load custom driver at '%s': %s", custom_driver_path, ::dlerror());
rsx_log.warning("Loading custom driver %s", custom_driver_path);
auto meta = nlohmann::json::parse(std::ifstream(custom_driver_path + "/meta.json"));
if (meta.contains("libraryName"))
{
auto library_name = meta["libraryName"].get<std::string>();
rsx_log.warning("Custom driver: library name %s", library_name);
auto hook_dir = g_cfg.video.vk.custom_driver_hook_dir.to_string();
rsx_log.warning("Custom driver: hook dir %s", hook_dir);
::dlerror();
g_vk_loader = adrenotools_open_libvulkan(
RTLD_NOW, ADRENOTOOLS_DRIVER_CUSTOM,
nullptr, (hook_dir + "/").c_str(),
(custom_driver_path + "/").c_str(), library_name.c_str(),
nullptr, nullptr);
if (g_vk_loader == nullptr)
{
rsx_log.error("Failed to load custom driver at '%s': %s", custom_driver_path, ::dlerror());
}
else
{
rsx_log.success("Custom driver at '%s' successfully loaded", custom_driver_path);
}
}
else
{
rsx_log.error("Custom driver load error: Invalid meta.json at %s", custom_driver_path);
}
}
else
if (g_vk_loader == nullptr)
{
rsx_log.error("Custom driver at '%s' successfully loaded", custom_driver_path);
g_vk_loader = dlopen("libvulkan.so.1", RTLD_NOW | RTLD_LOCAL);
if (g_vk_loader == nullptr)
{
g_vk_loader = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL);
}
}
symbol_cache::cache_instance().initialize();
owns_loader = true;
}
else
{
rsx_log.error("Custom driver load error: Invalid meta.json at %s", custom_driver_path);
}
}
#endif
_vkEnumerateInstanceExtensionProperties = GET_VK_PFN(m_vk_loader, vkEnumerateInstanceExtensionProperties);
_vkEnumerateDeviceExtensionProperties = GET_VK_PFN(m_vk_loader, vkEnumerateDeviceExtensionProperties);
// Initialize a vulkan instance
VkApplicationInfo app = {};
@ -202,8 +214,8 @@ namespace vk
extensions.push_back(VK_EXT_LAYER_SETTINGS_EXTENSION_NAME);
layers.push_back(kMVKMoltenVKDriverLayerName);
mvk_settings.push_back(VkLayerSettingEXT{ kMVKMoltenVKDriverLayerName, "MVK_CONFIG_RESUME_LOST_DEVICE", VK_LAYER_SETTING_TYPE_BOOL32_EXT, 1, &setting_true });
mvk_settings.push_back(VkLayerSettingEXT{ kMVKMoltenVKDriverLayerName, "MVK_CONFIG_FAST_MATH_ENABLED", VK_LAYER_SETTING_TYPE_INT32_EXT, 1, &setting_fast_math });
mvk_settings.push_back(VkLayerSettingEXT{kMVKMoltenVKDriverLayerName, "MVK_CONFIG_RESUME_LOST_DEVICE", VK_LAYER_SETTING_TYPE_BOOL32_EXT, 1, &setting_true});
mvk_settings.push_back(VkLayerSettingEXT{kMVKMoltenVKDriverLayerName, "MVK_CONFIG_FAST_MATH_ENABLED", VK_LAYER_SETTING_TYPE_INT32_EXT, 1, &setting_fast_math});
mvk_layer_settings_create_info.sType = VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT;
mvk_layer_settings_create_info.pNext = next_info;
@ -275,7 +287,7 @@ namespace vk
instance_info.ppEnabledExtensionNames = fast ? nullptr : extensions.data();
instance_info.pNext = next_info;
if (VkResult result = GET_VK_PFN(m_vk_loader, vkCreateInstance)(&instance_info, nullptr, &m_instance); result != VK_SUCCESS)
if (VkResult result = VK_GET_SYMBOL(vkCreateInstance)(&instance_info, nullptr, &m_instance); result != VK_SUCCESS)
{
if (result == VK_ERROR_LAYER_NOT_PRESENT)
{
@ -306,7 +318,7 @@ namespace vk
{
u32 num_gpus;
// This may fail on unsupported drivers, so just assume no devices
if (GET_VK_PFN(m_vk_loader, vkEnumeratePhysicalDevices)(m_instance, &num_gpus, nullptr) != VK_SUCCESS)
if (VK_GET_SYMBOL(vkEnumeratePhysicalDevices)(m_instance, &num_gpus, nullptr) != VK_SUCCESS)
return gpus;
if (gpus.size() != num_gpus)
@ -314,7 +326,7 @@ namespace vk
std::vector<VkPhysicalDevice> pdevs(num_gpus);
gpus.resize(num_gpus);
CHECK_RESULT(GET_VK_PFN(m_vk_loader, vkEnumeratePhysicalDevices)(m_instance, &num_gpus, pdevs.data()));
CHECK_RESULT(VK_GET_SYMBOL(vkEnumeratePhysicalDevices)(m_instance, &num_gpus, pdevs.data()));
for (u32 i = 0; i < num_gpus; ++i)
gpus[i].create(m_instance, pdevs[i], extensions_loaded);
@ -325,9 +337,8 @@ namespace vk
swapchain_base* instance::create_swapchain(display_handle_t window_handle, vk::physical_device& dev)
{
WSI_config surface_config
{
.supports_automatic_wm_reports = true
WSI_config surface_config{
.supports_automatic_wm_reports = true,
};
m_surface = make_WSI_surface(m_instance, window_handle, &surface_config);
@ -337,7 +348,7 @@ namespace vk
for (u32 index = 0; index < device_queues; index++)
{
GET_VK_PFN(m_vk_loader, vkGetPhysicalDeviceSurfaceSupportKHR)(dev, index, m_surface, &supports_present[index]);
VK_GET_SYMBOL(vkGetPhysicalDeviceSurfaceSupportKHR)(dev, index, m_surface, &supports_present[index]);
}
u32 graphics_queue_idx = -1;
@ -345,15 +356,15 @@ namespace vk
u32 transfer_queue_idx = -1;
auto test_queue_family = [&](u32 index, u32 desired_flags)
{
if (const auto flags = dev.get_queue_properties(index).queueFlags;
(flags & desired_flags) == desired_flags)
{
if (const auto flags = dev.get_queue_properties(index).queueFlags;
(flags & desired_flags) == desired_flags)
{
return true;
}
return true;
}
return false;
};
return false;
};
for (u32 i = 0; i < device_queues; ++i)
{
@ -396,7 +407,7 @@ namespace vk
if (!present_possible)
{
//Native(sw) swapchain
// Native(sw) swapchain
rsx_log.error("It is not possible for the currently selected GPU to present to the window (Likely caused by NVIDIA driver running the current display)");
rsx_log.warning("Falling back to software present support (native windowing API)");
auto swapchain = new swapchain_NATIVE(dev, -1, graphics_queue_idx, transfer_queue_idx);
@ -406,10 +417,10 @@ namespace vk
// Get the list of VkFormat's that are supported:
u32 formatCount;
CHECK_RESULT(GET_VK_PFN(m_vk_loader,vkGetPhysicalDeviceSurfaceFormatsKHR)(dev, m_surface, &formatCount, nullptr));
CHECK_RESULT(VK_GET_SYMBOL(vkGetPhysicalDeviceSurfaceFormatsKHR)(dev, m_surface, &formatCount, nullptr));
std::vector<VkSurfaceFormatKHR> surfFormats(formatCount);
CHECK_RESULT(GET_VK_PFN(m_vk_loader,vkGetPhysicalDeviceSurfaceFormatsKHR)(dev, m_surface, &formatCount, surfFormats.data()));
CHECK_RESULT(VK_GET_SYMBOL(vkGetPhysicalDeviceSurfaceFormatsKHR)(dev, m_surface, &formatCount, surfFormats.data()));
VkFormat format;
VkColorSpaceKHR color_space;
@ -420,10 +431,11 @@ namespace vk
}
else
{
if (!formatCount) fmt::throw_exception("Format count is zero!");
if (!formatCount)
fmt::throw_exception("Format count is zero!");
format = surfFormats[0].format;
//Prefer BGRA8_UNORM to avoid sRGB compression (RADV)
// Prefer BGRA8_UNORM to avoid sRGB compression (RADV)
for (auto& surface_format : surfFormats)
{
if (surface_format.format == VK_FORMAT_B8G8R8A8_UNORM)
@ -438,4 +450,34 @@ namespace vk
return new swapchain_WSI(dev, present_queue_idx, graphics_queue_idx, transfer_queue_idx, format, m_surface, color_space, !surface_config.supports_automatic_wm_reports);
}
}
#ifdef ANDROID
void symbol_cache::initialize()
{
for (auto& symbol : registered_symbols)
{
auto sym = dlsym(instance::get_vk_loader(), symbol.first.c_str());
if (sym == nullptr)
{
rsx_log.error("vk: Failed to find instance of '%s'", symbol.first);
}
*symbol.second = sym;
}
}
void symbol_cache::clear()
{
for (auto& symbol : registered_symbols)
{
*symbol.second = nullptr;
}
}
void symbol_cache::register_symbol(const char* name, void **ptr)
{
registered_symbols.emplace_back(name, ptr);
}
#endif
} // namespace vk

View file

@ -3,7 +3,6 @@
#include "../VulkanAPI.h"
#include "swapchain.h"
#include <algorithm>
#include <vector>
#ifdef __APPLE__
@ -22,7 +21,7 @@ namespace vk
enum enumeration_class
{
instance = 0,
device = 1
device = 1
};
supported_extensions(enumeration_class _class, const char* layer_name = nullptr, VkPhysicalDevice pdev = VK_NULL_HANDLE);
@ -36,18 +35,25 @@ namespace vk
std::vector<physical_device> gpus;
VkInstance m_instance = VK_NULL_HANDLE;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
#ifdef ANDROID
void *m_vk_loader = nullptr;
#endif
PFN_vkDestroyDebugReportCallbackEXT _vkDestroyDebugReportCallback = nullptr;
PFN_vkCreateDebugReportCallbackEXT _vkCreateDebugReportCallback = nullptr;
VkDebugReportCallbackEXT m_debugger = nullptr;
#ifdef ANDROID
static void* g_vk_loader;
bool owns_loader = false;
#endif
bool extensions_loaded = false;
public:
#ifdef ANDROID
static void* get_vk_loader()
{
return g_vk_loader;
}
#endif
instance() = default;
~instance();
@ -64,4 +70,4 @@ namespace vk
swapchain_base* create_swapchain(display_handle_t window_handle, vk::physical_device& dev);
};
}
} // namespace vk

View file

@ -172,7 +172,7 @@ namespace vk
if (vram_allocation_limit < dev.get_memory_mapping().device_local_total_bytes)
{
VkPhysicalDeviceMemoryProperties memory_properties;
vkGetPhysicalDeviceMemoryProperties(pdev, &memory_properties);
VK_GET_SYMBOL(vkGetPhysicalDeviceMemoryProperties)(pdev, &memory_properties);
for (u32 i = 0; i < memory_properties.memoryHeapCount; ++i)
{
const u64 max_sz = (memory_properties.memoryHeaps[i].flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
@ -340,7 +340,7 @@ namespace vk
for (const auto& memory_type_index : memory_type)
{
info.memoryTypeIndex = memory_type_index;
error_code = vkAllocateMemory(m_device, &info, nullptr, &memory);
error_code = VK_GET_SYMBOL(vkAllocateMemory)(m_device, &info, nullptr, &memory);
if (error_code == VK_SUCCESS)
{
return { error_code, memory_type_index };
@ -385,19 +385,19 @@ namespace vk
void mem_allocator_vk::free(mem_handle_t mem_handle)
{
vmm_notify_memory_freed(mem_handle);
vkFreeMemory(m_device, static_cast<VkDeviceMemory>(mem_handle), nullptr);
VK_GET_SYMBOL(vkFreeMemory)(m_device, static_cast<VkDeviceMemory>(mem_handle), nullptr);
}
void* mem_allocator_vk::map(mem_handle_t mem_handle, u64 offset, u64 size)
{
void* data = nullptr;
CHECK_RESULT(vkMapMemory(m_device, static_cast<VkDeviceMemory>(mem_handle), offset, std::max<u64>(size, 1u), 0, &data));
CHECK_RESULT(VK_GET_SYMBOL(vkMapMemory)(m_device, static_cast<VkDeviceMemory>(mem_handle), offset, std::max<u64>(size, 1u), 0, &data));
return data;
}
void mem_allocator_vk::unmap(mem_handle_t mem_handle)
{
vkUnmapMemory(m_device, static_cast<VkDeviceMemory>(mem_handle));
VK_GET_SYMBOL(vkUnmapMemory)(m_device, static_cast<VkDeviceMemory>(mem_handle));
}
VkDeviceMemory mem_allocator_vk::get_vk_device_memory(mem_handle_t mem_handle)
@ -450,12 +450,12 @@ namespace vk
import_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
import_info.pHostPointer = host_pointer;
CHECK_RESULT(vkAllocateMemory(m_device, &alloc_info, nullptr, &m_mem_handle));
CHECK_RESULT(VK_GET_SYMBOL(vkAllocateMemory)(m_device, &alloc_info, nullptr, &m_mem_handle));
}
memory_block_host::~memory_block_host()
{
vkFreeMemory(m_device, m_mem_handle, nullptr);
VK_GET_SYMBOL(vkFreeMemory)(m_device, m_mem_handle, nullptr);
}
VkDeviceMemory memory_block_host::get_vk_device_memory()

View file

@ -22,7 +22,7 @@ namespace vk
info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
info.queryType = type;
info.queryCount = size;
CHECK_RESULT(vkCreateQueryPool(dev, &info, nullptr, &m_query_pool));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateQueryPool)(dev, &info, nullptr, &m_query_pool));
// Take 'size' references on this object
ref_count.release(static_cast<s32>(size));
@ -30,7 +30,7 @@ namespace vk
~query_pool()
{
vkDestroyQueryPool(m_device, m_query_pool, nullptr);
VK_GET_SYMBOL(vkDestroyQueryPool)(m_device, m_query_pool, nullptr);
}
operator VkQueryPool()

View file

@ -1,3 +1,4 @@
#include "Emu/RSX/VK/vkutils/instance.h"
#include "memory.h"
#include "sampler.h"
#include "../../color_utils.h"
@ -97,13 +98,13 @@ namespace vk
info.pNext = &custom_color_info;
}
CHECK_RESULT(vkCreateSampler(m_device, &info, nullptr, &value));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateSampler)(m_device, &info, nullptr, &value));
vmm_notify_object_allocated(VMM_ALLOCATION_POOL_SAMPLER);
}
sampler::~sampler()
{
vkDestroySampler(m_device, value, nullptr);
VK_GET_SYMBOL(vkDestroySampler)(m_device, value, nullptr);
vmm_notify_object_freed(VMM_ALLOCATION_POOL_SAMPLER);
}

View file

@ -50,7 +50,7 @@ namespace vk
sampler_info.compareOp = VK_COMPARE_OP_NEVER;
sampler_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
vkCreateSampler(*g_render_device, &sampler_info, nullptr, &g_null_sampler);
VK_GET_SYMBOL(vkCreateSampler)(*g_render_device, &sampler_info, nullptr, &g_null_sampler);
return g_null_sampler;
}
@ -110,7 +110,7 @@ namespace vk
VkClearColorValue clear_color = {};
VkImageSubresourceRange range = { VK_IMAGE_ASPECT_COLOR_BIT, 0, tex->mipmaps(), 0, tex->layers() };
vkCmdClearColorImage(cmd, tex->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
VK_GET_SYMBOL(vkCmdClearColorImage)(cmd, tex->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &range);
// Prep for shader access
tex->change_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
@ -185,7 +185,7 @@ namespace vk
{
// Zero-initialize the allocated VRAM
const u64 zero_length = init_mem ? buf->size() : utils::align(min_required_size, 4);
vkCmdFillBuffer(cmd, buf->value, 0, zero_length, 0);
VK_GET_SYMBOL(vkCmdFillBuffer)(cmd, buf->value, 0, zero_length, 0);
insert_buffer_memory_barrier(cmd, buf->value, 0, zero_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
@ -204,7 +204,7 @@ namespace vk
if (g_null_sampler)
{
vkDestroySampler(*g_render_device, g_null_sampler, nullptr);
VK_GET_SYMBOL(vkDestroySampler)(*g_render_device, g_null_sampler, nullptr);
g_null_sampler = nullptr;
}
}

View file

@ -28,7 +28,7 @@ namespace vk
copyRegion.imageExtent = { m_width, m_height, 1 };
change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
vkCmdCopyImageToBuffer(cmd, value, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_dma_buffer->value, 1, &copyRegion);
VK_GET_SYMBOL(vkCmdCopyImageToBuffer)(cmd, value, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_dma_buffer->value, 1, &copyRegion);
change_layout(cmd, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
@ -105,11 +105,11 @@ namespace vk
swapchain_WSI::swapchain_WSI(vk::physical_device& gpu, u32 present_queue, u32 graphics_queue, u32 transfer_queue, VkFormat format, VkSurfaceKHR surface, VkColorSpaceKHR color_space, bool force_wm_reporting_off)
: WSI_swapchain_base(gpu, present_queue, graphics_queue, transfer_queue, format)
{
_vkCreateSwapchainKHR = reinterpret_cast<PFN_vkCreateSwapchainKHR>(vkGetDeviceProcAddr(dev, "vkCreateSwapchainKHR"));
_vkDestroySwapchainKHR = reinterpret_cast<PFN_vkDestroySwapchainKHR>(vkGetDeviceProcAddr(dev, "vkDestroySwapchainKHR"));
_vkGetSwapchainImagesKHR = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(vkGetDeviceProcAddr(dev, "vkGetSwapchainImagesKHR"));
_vkAcquireNextImageKHR = reinterpret_cast<PFN_vkAcquireNextImageKHR>(vkGetDeviceProcAddr(dev, "vkAcquireNextImageKHR"));
_vkQueuePresentKHR = reinterpret_cast<PFN_vkQueuePresentKHR>(vkGetDeviceProcAddr(dev, "vkQueuePresentKHR"));
_vkCreateSwapchainKHR = reinterpret_cast<PFN_vkCreateSwapchainKHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkCreateSwapchainKHR"));
_vkDestroySwapchainKHR = reinterpret_cast<PFN_vkDestroySwapchainKHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkDestroySwapchainKHR"));
_vkGetSwapchainImagesKHR = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkGetSwapchainImagesKHR"));
_vkAcquireNextImageKHR = reinterpret_cast<PFN_vkAcquireNextImageKHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkAcquireNextImageKHR"));
_vkQueuePresentKHR = reinterpret_cast<PFN_vkQueuePresentKHR>(VK_GET_SYMBOL(vkGetDeviceProcAddr)(dev, "vkQueuePresentKHR"));
m_surface = surface;
m_color_space = color_space;
@ -194,10 +194,10 @@ namespace vk
pSurfaceInfo.pNext = &full_screen_exclusive_win32_info;
auto getPhysicalDeviceSurfaceCapabilities2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR>(
vkGetInstanceProcAddr(dev.gpu(), "vkGetPhysicalDeviceSurfaceCapabilities2KHR")
VK_GET_SYMBOL(vkGetInstanceProcAddr)(dev.gpu(), "vkGetPhysicalDeviceSurfaceCapabilities2KHR")
);
ensure(getPhysicalDeviceSurfaceCapabilities2KHR);
CHECK_RESULT(getPhysicalDeviceSurfaceCapabilities2KHR(dev.gpu(), &pSurfaceInfo, &pSurfaceCapabilities));
CHECK_RESULT(VK_GET_SYMBOL(getPhysicalDeviceSurfaceCapabilities2KHR)(dev.gpu(), &pSurfaceInfo, &pSurfaceCapabilities));
return { pSurfaceCapabilities.surfaceCapabilities, !!full_screen_exclusive_capabilities.fullScreenExclusiveSupported };
}
@ -208,7 +208,7 @@ namespace vk
}
#endif
VkSurfaceCapabilitiesKHR surface_descriptors = {};
CHECK_RESULT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(dev.gpu(), m_surface, &surface_descriptors));
CHECK_RESULT(VK_GET_SYMBOL(vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(dev.gpu(), m_surface, &surface_descriptors));
return { surface_descriptors, false };
}
@ -247,10 +247,10 @@ namespace vk
}
u32 nb_available_modes = 0;
CHECK_RESULT(vkGetPhysicalDeviceSurfacePresentModesKHR(gpu, m_surface, &nb_available_modes, nullptr));
CHECK_RESULT(VK_GET_SYMBOL(vkGetPhysicalDeviceSurfacePresentModesKHR)(gpu, m_surface, &nb_available_modes, nullptr));
std::vector<VkPresentModeKHR> present_modes(nb_available_modes);
CHECK_RESULT(vkGetPhysicalDeviceSurfacePresentModesKHR(gpu, m_surface, &nb_available_modes, present_modes.data()));
CHECK_RESULT(VK_GET_SYMBOL(vkGetPhysicalDeviceSurfacePresentModesKHR)(gpu, m_surface, &nb_available_modes, present_modes.data()));
VkPresentModeKHR swapchain_present_mode = VK_PRESENT_MODE_FIFO_KHR;
std::vector<VkPresentModeKHR> preferred_modes;

View file

@ -17,7 +17,7 @@ namespace vk
createInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
createInfo.window = std::get<ANativeWindow *>(window_handle);
CHECK_RESULT(vkCreateAndroidSurfaceKHR(vk_instance, &createInfo, nullptr, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateAndroidSurfaceKHR)(vk_instance, &createInfo, nullptr, &result));
return result;
}
#endif

View file

@ -200,7 +200,7 @@ namespace vk
VkResult acquire_next_swapchain_image(VkSemaphore semaphore, u64 timeout, u32* result) override
{
return vkAcquireNextImageKHR(dev, m_vk_swapchain, timeout, semaphore, VK_NULL_HANDLE, result);
return VK_GET_SYMBOL(vkAcquireNextImageKHR)(dev, m_vk_swapchain, timeout, semaphore, VK_NULL_HANDLE, result);
}
void end_frame(command_buffer& /*cmd*/, u32 /*index*/) override

View file

@ -16,7 +16,7 @@ namespace vk
createInfo.sType = VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK;
createInfo.pView = window_handle;
CHECK_RESULT(vkCreateMacOSSurfaceMVK(vk_instance, &createInfo, NULL, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateMacOSSurfaceMVK)(vk_instance, &createInfo, NULL, &result));
return result;
}
#endif

View file

@ -146,7 +146,7 @@ namespace vk
createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
createInfo.dpy = p.first;
createInfo.window = p.second;
CHECK_RESULT(vkCreateXlibSurfaceKHR(vk_instance, &createInfo, nullptr, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateXlibSurfaceKHR)(vk_instance, &createInfo, nullptr, &result));
}
else
#endif
@ -157,7 +157,7 @@ namespace vk
createInfo.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
createInfo.display = p.first;
createInfo.surface = p.second;
CHECK_RESULT(vkCreateWaylandSurfaceKHR(vk_instance, &createInfo, nullptr, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateWaylandSurfaceKHR)(vk_instance, &createInfo, nullptr, &result));
config->supports_automatic_wm_reports = false;
}
else

View file

@ -103,7 +103,7 @@ namespace vk
createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
createInfo.hinstance = hInstance;
createInfo.hwnd = window_handle;
CHECK_RESULT(vkCreateWin32SurfaceKHR(vk_instance, &createInfo, NULL, &result));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateWin32SurfaceKHR)(vk_instance, &createInfo, NULL, &result));
return result;
}
#endif

View file

@ -152,21 +152,21 @@ namespace vk
owner = dev;
VkFenceCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
CHECK_RESULT(vkCreateFence(dev, &info, nullptr, &handle));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateFence)(dev, &info, nullptr, &handle));
}
fence::~fence()
{
if (handle)
{
vkDestroyFence(owner, handle, nullptr);
VK_GET_SYMBOL(vkDestroyFence)(owner, handle, nullptr);
handle = VK_NULL_HANDLE;
}
}
void fence::reset()
{
vkResetFences(owner, 1, &handle);
VK_GET_SYMBOL(vkResetFences)(owner, 1, &handle);
flushed.release(false);
}
@ -193,12 +193,12 @@ namespace vk
{
VkSemaphoreCreateInfo info{};
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
CHECK_RESULT(vkCreateSemaphore(m_device, &info, nullptr, &m_handle));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateSemaphore)(m_device, &info, nullptr, &m_handle));
}
semaphore::~semaphore()
{
vkDestroySemaphore(m_device, m_handle, nullptr);
VK_GET_SYMBOL(vkDestroySemaphore)(m_device, m_handle, nullptr);
}
semaphore::operator VkSemaphore() const
@ -236,14 +236,14 @@ namespace vk
info.flags = VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR;
}
CHECK_RESULT(vkCreateEvent(dev, &info, nullptr, &m_vk_event));
CHECK_RESULT(VK_GET_SYMBOL(vkCreateEvent)(dev, &info, nullptr, &m_vk_event));
}
event::~event()
{
if (m_vk_event) [[likely]]
{
vkDestroyEvent(*m_device, m_vk_event, nullptr);
VK_GET_SYMBOL(vkDestroyEvent)(*m_device, m_vk_event, nullptr);
}
}
@ -263,7 +263,7 @@ namespace vk
const auto image_memory_barriers = v1_utils::get_image_memory_barriers(dependency);
const auto buffer_memory_barriers = v1_utils::get_buffer_memory_barriers(dependency);
vkCmdPipelineBarrier(cmd, src_stages, dst_stages, dependency.dependencyFlags,
VK_GET_SYMBOL(vkCmdPipelineBarrier)(cmd, src_stages, dst_stages, dependency.dependencyFlags,
::size32(memory_barriers), memory_barriers.data(),
::size32(buffer_memory_barriers), buffer_memory_barriers.data(),
::size32(image_memory_barriers), image_memory_barriers.data());
@ -289,7 +289,7 @@ namespace vk
else
{
const auto dst_stages = v1_utils::gather_dst_stages(dependency);
vkCmdSetEvent(cmd, m_vk_event, dst_stages);
VK_GET_SYMBOL(vkCmdSetEvent)(cmd, m_vk_event, dst_stages);
}
return;
@ -302,7 +302,7 @@ namespace vk
// 2. Signalling won't wait. The caller is responsible for setting up the dependencies correctly.
if (m_backend != sync_backend::events_v2)
{
vkCmdSetEvent(cmd, m_vk_event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
VK_GET_SYMBOL(vkCmdSetEvent)(cmd, m_vk_event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
return;
}
@ -329,7 +329,7 @@ namespace vk
{
if (m_backend != sync_backend::gpu_label) [[ likely ]]
{
vkSetEvent(*m_device, m_vk_event);
VK_GET_SYMBOL(vkSetEvent)(*m_device, m_vk_event);
return;
}
@ -352,7 +352,7 @@ namespace vk
const auto image_memory_barriers = v1_utils::get_image_memory_barriers(dependency);
const auto buffer_memory_barriers = v1_utils::get_buffer_memory_barriers(dependency);
vkCmdWaitEvents(cmd,
VK_GET_SYMBOL(vkCmdWaitEvents)(cmd,
1, &m_vk_event,
src_stages, dst_stages,
::size32(memory_barriers), memory_barriers.data(),
@ -364,7 +364,7 @@ namespace vk
{
if (m_backend != sync_backend::gpu_label) [[ likely ]]
{
vkResetEvent(*m_device, m_vk_event);
VK_GET_SYMBOL(vkResetEvent)(*m_device, m_vk_event);
return;
}
@ -375,7 +375,7 @@ namespace vk
{
if (m_backend != sync_backend::gpu_label) [[ likely ]]
{
return vkGetEventStatus(*m_device, m_vk_event);
return VK_GET_SYMBOL(vkGetEventStatus)(*m_device, m_vk_event);
}
return m_label->signaled() ? VK_EVENT_SET : VK_EVENT_RESET;
@ -468,12 +468,12 @@ namespace vk
barrier.dstAccessMask |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
vkCmdPipelineBarrier(cmd, src_stages, dst_stages, dependency.dependencyFlags,
VK_GET_SYMBOL(vkCmdPipelineBarrier)(cmd, src_stages, dst_stages, dependency.dependencyFlags,
::size32(memory_barriers), memory_barriers.data(),
::size32(buffer_memory_barriers), buffer_memory_barriers.data(),
::size32(image_memory_barriers), image_memory_barriers.data());
vkCmdFillBuffer(cmd, m_buffer_handle, m_buffer_offset, 4, label_constants::set_);
VK_GET_SYMBOL(vkCmdFillBuffer)(cmd, m_buffer_handle, m_buffer_offset, 4, label_constants::set_);
}
gpu_debug_marker::gpu_debug_marker(gpu_debug_marker_pool& pool, std::string message)
@ -565,11 +565,11 @@ namespace vk
if (timeout)
{
return vkWaitForFences(*g_render_device, 1, &pFence->handle, VK_FALSE, timeout * 1000ull);
return VK_GET_SYMBOL(vkWaitForFences)(*g_render_device, 1, &pFence->handle, VK_FALSE, timeout * 1000ull);
}
else
{
while (auto status = vkGetFenceStatus(*g_render_device, pFence->handle))
while (auto status = VK_GET_SYMBOL(vkGetFenceStatus)(*g_render_device, pFence->handle))
{
switch (status)
{

View file

@ -197,6 +197,17 @@ struct cfg_root : cfg::node
cfg::string custom_driver_path{ this, "Custom Driver Path", "", false };
cfg::string custom_driver_internal_data_dir{ this, "Custom Driver Internal Data Directory", "", false };
cfg::string custom_driver_hook_dir{ this, "Custom Driver Hook Directory", "", false };
struct node_workarounds : cfg::node
{
node_workarounds(cfg::node* _this) : cfg::node(_this, "Workarounds") {}
cfg::_bool no_primitive_restart{ this, "No Primitive Restart" };
cfg::_bool sanitize_fp_values{ this, "Sanitize FP Values" };
cfg::_bool disable_fence_reset{ this, "Disable Fence Reset" };
cfg::_bool emulate_cond_render{ this, "Emulate Cond Render" };
cfg::_bool strict_query_scopes{ this, "Strict Query Scopes" };
cfg::_bool force_reuse_query_pools{ this, "Force Reuse Query Pools" };
} workarounds{ this };
#endif
} vk{ this };