diff --git a/src/xenia/app/xenia_main.cc b/src/xenia/app/xenia_main.cc index 15f01a892..83fe45351 100644 --- a/src/xenia/app/xenia_main.cc +++ b/src/xenia/app/xenia_main.cc @@ -336,13 +336,42 @@ std::unique_ptr EmulatorApp::CreateGraphicsSystem() { // For maintainability, as much implementation code as possible should be // placed in `xe::gpu` and shared between the backends rather than duplicated // between them. + const std::string gpu_implementation_name = cvars::gpu; + if (gpu_implementation_name == "null") { + return std::make_unique(); + } Factory factory; #if XE_PLATFORM_WIN32 factory.Add("d3d12"); #endif // XE_PLATFORM_WIN32 factory.Add("vulkan"); - factory.Add("null"); - return factory.Create(cvars::gpu); + std::unique_ptr gpu_implementation = + factory.Create(gpu_implementation_name); + if (!gpu_implementation) { + xe::FatalError( + "Unable to initialize the graphics subsystem.\n" + "\n" +#if XE_PLATFORM_ANDROID + "The GPU must support at least Vulkan 1.0 with the 'independentBlend' " + "feature.\n" + "\n" +#else +#if XE_PLATFORM_WIN32 + "For Direct3D 12, at least Windows 10 is required, and the GPU must be " + "compatible with Direct3D 12 feature level 11_0.\n" + "\n" +#endif // XE_PLATFORM_WIN32 + "For Vulkan, the Vulkan runtime must be installed, and the GPU must " + "support at least Vulkan 1.0. The Vulkan runtime can be downloaded at " + "https://vulkan.lunarg.com/sdk/home.\n" + "\n" + "Also, ensure that you have the latest driver installed for your GPU.\n" + "\n" +#endif // XE_PLATFORM_ANDROID + "See https://xenia.jp/faq/ for more information and the system " + "requirements."); + } + return gpu_implementation; } std::vector> EmulatorApp::CreateInputDrivers( diff --git a/src/xenia/base/logging.cc b/src/xenia/base/logging.cc index ed636e428..727547374 100644 --- a/src/xenia/base/logging.cc +++ b/src/xenia/base/logging.cc @@ -467,7 +467,7 @@ void ShutdownLogging() { memory::AlignedFree(logger); } -bool logging::internal::ShouldLog(LogLevel log_level) { +bool logging::ShouldLog(LogLevel log_level) { return logger_ != nullptr && static_cast(log_level) <= cvars::log_level; } @@ -487,7 +487,7 @@ void logging::internal::AppendLogLine(LogLevel log_level, void logging::AppendLogLine(LogLevel log_level, const char prefix_char, const std::string_view str) { - if (!internal::ShouldLog(log_level) || !str.size()) { + if (!ShouldLog(log_level) || !str.size()) { return; } logger_->AppendLine(xe::threading::current_thread_id(), prefix_char, diff --git a/src/xenia/base/logging.h b/src/xenia/base/logging.h index 208abd3e3..0ac0886b9 100644 --- a/src/xenia/base/logging.h +++ b/src/xenia/base/logging.h @@ -70,9 +70,16 @@ void InitializeLogging(const std::string_view app_name); void ShutdownLogging(); namespace logging { -namespace internal { + +constexpr char kPrefixCharError = '!'; +constexpr char kPrefixCharWarning = 'w'; +constexpr char kPrefixCharInfo = 'i'; +constexpr char kPrefixCharDebug = 'd'; bool ShouldLog(LogLevel log_level); + +namespace internal { + std::pair GetThreadBuffer(); void AppendLogLine(LogLevel log_level, const char prefix_char, size_t written); @@ -83,7 +90,7 @@ void AppendLogLine(LogLevel log_level, const char prefix_char, size_t written); template void AppendLogLineFormat(LogLevel log_level, const char prefix_char, const char* format, const Args&... args) { - if (!internal::ShouldLog(log_level)) { + if (!ShouldLog(log_level)) { return; } auto target = internal::GetThreadBuffer(); @@ -106,22 +113,26 @@ void FatalError(const std::string_view str); template void XELOGE(const char* format, const Args&... args) { - xe::logging::AppendLogLineFormat(xe::LogLevel::Error, '!', format, args...); + xe::logging::AppendLogLineFormat( + xe::LogLevel::Error, xe::logging::kPrefixCharError, format, args...); } template void XELOGW(const char* format, const Args&... args) { - xe::logging::AppendLogLineFormat(xe::LogLevel::Warning, 'w', format, args...); + xe::logging::AppendLogLineFormat( + xe::LogLevel::Warning, xe::logging::kPrefixCharWarning, format, args...); } template void XELOGI(const char* format, const Args&... args) { - xe::logging::AppendLogLineFormat(xe::LogLevel::Info, 'i', format, args...); + xe::logging::AppendLogLineFormat( + xe::LogLevel::Info, xe::logging::kPrefixCharInfo, format, args...); } template void XELOGD(const char* format, const Args&... args) { - xe::logging::AppendLogLineFormat(xe::LogLevel::Debug, 'd', format, args...); + xe::logging::AppendLogLineFormat( + xe::LogLevel::Debug, xe::logging::kPrefixCharDebug, format, args...); } template diff --git a/src/xenia/gpu/d3d12/d3d12_command_processor.cc b/src/xenia/gpu/d3d12/d3d12_command_processor.cc index 27a3141ec..a2c48e3bd 100644 --- a/src/xenia/gpu/d3d12/d3d12_command_processor.cc +++ b/src/xenia/gpu/d3d12/d3d12_command_processor.cc @@ -2099,9 +2099,9 @@ bool D3D12CommandProcessor::IssueDraw(xenos::PrimitiveType primitive_type, uint32_t index_count, IndexBufferInfo* index_buffer_info, bool major_mode_explicit) { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES ID3D12Device* device = GetD3D12Provider().GetDevice(); const RegisterFile& regs = *register_file_; @@ -2594,9 +2594,9 @@ void D3D12CommandProcessor::InitializeTrace() { } bool D3D12CommandProcessor::IssueCopy() { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES if (!BeginSubmission(true)) { return false; } @@ -2733,9 +2733,9 @@ void D3D12CommandProcessor::CheckSubmissionFence(uint64_t await_submission) { } bool D3D12CommandProcessor::BeginSubmission(bool is_guest_command) { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES if (device_removed_) { return false; @@ -3023,9 +3023,9 @@ void D3D12CommandProcessor::UpdateFixedFunctionState( const draw_util::ViewportInfo& viewport_info, const draw_util::Scissor& scissor, bool primitive_polygonal, reg::RB_DEPTHCONTROL normalized_depth_control) { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES // Viewport. D3D12_VIEWPORT viewport; @@ -3093,9 +3093,9 @@ void D3D12CommandProcessor::UpdateSystemConstantValues( const draw_util::ViewportInfo& viewport_info, uint32_t used_texture_mask, reg::RB_DEPTHCONTROL normalized_depth_control, uint32_t normalized_color_mask) { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES const RegisterFile& regs = *register_file_; auto pa_cl_clip_cntl = regs.Get(); @@ -3595,9 +3595,9 @@ bool D3D12CommandProcessor::UpdateBindings(const D3D12Shader* vertex_shader, ID3D12Device* device = provider.GetDevice(); const RegisterFile& regs = *register_file_; -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES // Set the new root signature. if (current_graphics_root_signature_ != root_signature) { diff --git a/src/xenia/gpu/d3d12/d3d12_graphics_system.cc b/src/xenia/gpu/d3d12/d3d12_graphics_system.cc index b4e0d025d..c0f1c34f3 100644 --- a/src/xenia/gpu/d3d12/d3d12_graphics_system.cc +++ b/src/xenia/gpu/d3d12/d3d12_graphics_system.cc @@ -42,10 +42,10 @@ std::string D3D12GraphicsSystem::name() const { X_STATUS D3D12GraphicsSystem::Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - bool is_surface_required) { + bool with_presentation) { provider_ = xe::ui::d3d12::D3D12Provider::Create(); return GraphicsSystem::Setup(processor, kernel_state, app_context, - is_surface_required); + with_presentation); } std::unique_ptr diff --git a/src/xenia/gpu/d3d12/d3d12_graphics_system.h b/src/xenia/gpu/d3d12/d3d12_graphics_system.h index 249cc3ffa..40f690699 100644 --- a/src/xenia/gpu/d3d12/d3d12_graphics_system.h +++ b/src/xenia/gpu/d3d12/d3d12_graphics_system.h @@ -31,7 +31,7 @@ class D3D12GraphicsSystem : public GraphicsSystem { X_STATUS Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - bool is_surface_required) override; + bool with_presentation) override; protected: std::unique_ptr CreateCommandProcessor() override; diff --git a/src/xenia/gpu/d3d12/d3d12_texture_cache.cc b/src/xenia/gpu/d3d12/d3d12_texture_cache.cc index 9e3b794d1..984016cb8 100644 --- a/src/xenia/gpu/d3d12/d3d12_texture_cache.cc +++ b/src/xenia/gpu/d3d12/d3d12_texture_cache.cc @@ -22,6 +22,7 @@ #include "xenia/base/profiling.h" #include "xenia/gpu/d3d12/d3d12_command_processor.h" #include "xenia/gpu/d3d12/d3d12_shared_memory.h" +#include "xenia/gpu/gpu_flags.h" #include "xenia/gpu/texture_info.h" #include "xenia/gpu/texture_util.h" #include "xenia/gpu/xenos.h" @@ -766,9 +767,9 @@ void D3D12TextureCache::EndFrame() { } void D3D12TextureCache::RequestTextures(uint32_t used_texture_mask) { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES TextureCache::RequestTextures(used_texture_mask); @@ -911,12 +912,12 @@ void D3D12TextureCache::WriteActiveTextureBindfulSRV( } auto device = provider.GetDevice(); { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_i( "gpu", "xe::gpu::d3d12::D3D12TextureCache::WriteActiveTextureBindfulSRV->" "CopyDescriptorsSimple"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES device->CopyDescriptorsSimple(1, handle, source_handle, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); } diff --git a/src/xenia/gpu/d3d12/deferred_command_list.cc b/src/xenia/gpu/d3d12/deferred_command_list.cc index 581d1b71a..08c86ef2a 100644 --- a/src/xenia/gpu/d3d12/deferred_command_list.cc +++ b/src/xenia/gpu/d3d12/deferred_command_list.cc @@ -13,6 +13,7 @@ #include "xenia/base/math.h" #include "xenia/base/profiling.h" #include "xenia/gpu/d3d12/d3d12_command_processor.h" +#include "xenia/gpu/gpu_flags.h" namespace xe { namespace gpu { @@ -28,9 +29,9 @@ void DeferredCommandList::Reset() { command_stream_.clear(); } void DeferredCommandList::Execute(ID3D12GraphicsCommandList* command_list, ID3D12GraphicsCommandList1* command_list_1) { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES const uintmax_t* stream = command_stream_.data(); size_t stream_remaining = command_stream_.size(); ID3D12PipelineState* current_pipeline_state = nullptr; diff --git a/src/xenia/gpu/d3d12/pipeline_cache.cc b/src/xenia/gpu/d3d12/pipeline_cache.cc index 83b195c1e..ec7c0b249 100644 --- a/src/xenia/gpu/d3d12/pipeline_cache.cc +++ b/src/xenia/gpu/d3d12/pipeline_cache.cc @@ -965,9 +965,9 @@ bool PipelineCache::ConfigurePipeline( uint32_t bound_depth_and_color_render_target_bits, const uint32_t* bound_depth_and_color_render_target_formats, void** pipeline_handle_out, ID3D12RootSignature** root_signature_out) { -#if XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES assert_not_null(pipeline_handle_out); assert_not_null(root_signature_out); diff --git a/src/xenia/gpu/gpu_flags.h b/src/xenia/gpu/gpu_flags.h index ecdc73ca7..e057faaa6 100644 --- a/src/xenia/gpu/gpu_flags.h +++ b/src/xenia/gpu/gpu_flags.h @@ -26,4 +26,6 @@ DECLARE_bool(half_pixel_offset); DECLARE_int32(query_occlusion_fake_sample_count); +#define XE_GPU_FINE_GRAINED_DRAW_SCOPES 1 + #endif // XENIA_GPU_GPU_FLAGS_H_ diff --git a/src/xenia/gpu/graphics_system.cc b/src/xenia/gpu/graphics_system.cc index 3c04e0fff..aa96166ef 100644 --- a/src/xenia/gpu/graphics_system.cc +++ b/src/xenia/gpu/graphics_system.cc @@ -55,13 +55,13 @@ GraphicsSystem::~GraphicsSystem() = default; X_STATUS GraphicsSystem::Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - [[maybe_unused]] bool is_surface_required) { + bool with_presentation) { memory_ = processor->memory(); processor_ = processor; kernel_state_ = kernel_state; app_context_ = app_context; - if (provider_) { + if (with_presentation && provider_) { // Safe if either the UI thread call or the presenter creation fails. if (app_context_) { app_context_->CallInUIThreadSynchronous([this]() { diff --git a/src/xenia/gpu/graphics_system.h b/src/xenia/gpu/graphics_system.h index 0434a5619..8994b20bc 100644 --- a/src/xenia/gpu/graphics_system.h +++ b/src/xenia/gpu/graphics_system.h @@ -51,7 +51,7 @@ class GraphicsSystem { virtual X_STATUS Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - bool is_surface_required); + bool with_presentation); virtual void Shutdown(); // May be called from any thread any number of times, even during recovery diff --git a/src/xenia/gpu/null/null_graphics_system.cc b/src/xenia/gpu/null/null_graphics_system.cc index 57d5e958f..503afb150 100644 --- a/src/xenia/gpu/null/null_graphics_system.cc +++ b/src/xenia/gpu/null/null_graphics_system.cc @@ -24,12 +24,12 @@ NullGraphicsSystem::~NullGraphicsSystem() {} X_STATUS NullGraphicsSystem::Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - bool is_surface_required) { + bool with_presentation) { // This is a null graphics system, but we still setup vulkan because UI needs // it through us :| - provider_ = xe::ui::vulkan::VulkanProvider::Create(is_surface_required); + provider_ = xe::ui::vulkan::VulkanProvider::Create(false, with_presentation); return GraphicsSystem::Setup(processor, kernel_state, app_context, - is_surface_required); + with_presentation); } std::unique_ptr NullGraphicsSystem::CreateCommandProcessor() { diff --git a/src/xenia/gpu/null/null_graphics_system.h b/src/xenia/gpu/null/null_graphics_system.h index d5b8d32b9..5bc36ebd0 100644 --- a/src/xenia/gpu/null/null_graphics_system.h +++ b/src/xenia/gpu/null/null_graphics_system.h @@ -30,7 +30,7 @@ class NullGraphicsSystem : public GraphicsSystem { X_STATUS Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - bool is_surface_required) override; + bool with_presentation) override; private: std::unique_ptr CreateCommandProcessor() override; diff --git a/src/xenia/gpu/spirv_shader_translator.cc b/src/xenia/gpu/spirv_shader_translator.cc index 399b7079f..a6c1a7f07 100644 --- a/src/xenia/gpu/spirv_shader_translator.cc +++ b/src/xenia/gpu/spirv_shader_translator.cc @@ -43,27 +43,34 @@ SpirvShaderTranslator::Features::Features(bool all) demote_to_helper_invocation(all) {} SpirvShaderTranslator::Features::Features( - const ui::vulkan::VulkanProvider::DeviceInfo& device_info) - : max_storage_buffer_range(device_info.maxStorageBufferRange), - full_draw_index_uint32(device_info.fullDrawIndexUint32), + const ui::vulkan::VulkanDevice* const vulkan_device) + : max_storage_buffer_range( + vulkan_device->properties().maxStorageBufferRange), + full_draw_index_uint32(vulkan_device->properties().fullDrawIndexUint32), vertex_pipeline_stores_and_atomics( - device_info.vertexPipelineStoresAndAtomics), - fragment_stores_and_atomics(device_info.fragmentStoresAndAtomics), - clip_distance(device_info.shaderClipDistance), - cull_distance(device_info.shaderCullDistance), - image_view_format_swizzle(device_info.imageViewFormatSwizzle), + vulkan_device->properties().vertexPipelineStoresAndAtomics), + fragment_stores_and_atomics( + vulkan_device->properties().fragmentStoresAndAtomics), + clip_distance(vulkan_device->properties().shaderClipDistance), + cull_distance(vulkan_device->properties().shaderCullDistance), + image_view_format_swizzle( + vulkan_device->properties().imageViewFormatSwizzle), signed_zero_inf_nan_preserve_float32( - device_info.shaderSignedZeroInfNanPreserveFloat32), - denorm_flush_to_zero_float32(device_info.shaderDenormFlushToZeroFloat32), - rounding_mode_rte_float32(device_info.shaderRoundingModeRTEFloat32), + vulkan_device->properties().shaderSignedZeroInfNanPreserveFloat32), + denorm_flush_to_zero_float32( + vulkan_device->properties().shaderDenormFlushToZeroFloat32), + rounding_mode_rte_float32( + vulkan_device->properties().shaderRoundingModeRTEFloat32), fragment_shader_sample_interlock( - device_info.fragmentShaderSampleInterlock), - demote_to_helper_invocation(device_info.shaderDemoteToHelperInvocation) { - if (device_info.apiVersion >= VK_MAKE_API_VERSION(0, 1, 2, 0)) { + vulkan_device->properties().fragmentShaderSampleInterlock), + demote_to_helper_invocation( + vulkan_device->properties().shaderDemoteToHelperInvocation) { + const uint32_t vulkan_api_version = vulkan_device->properties().apiVersion; + if (vulkan_api_version >= VK_MAKE_API_VERSION(0, 1, 2, 0)) { spirv_version = spv::Spv_1_5; - } else if (device_info.ext_1_2_VK_KHR_spirv_1_4) { + } else if (vulkan_device->extensions().ext_1_2_KHR_spirv_1_4) { spirv_version = spv::Spv_1_4; - } else if (device_info.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { + } else if (vulkan_api_version >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { spirv_version = spv::Spv_1_3; } else { spirv_version = spv::Spv_1_0; diff --git a/src/xenia/gpu/spirv_shader_translator.h b/src/xenia/gpu/spirv_shader_translator.h index aefb00bf6..a201686e0 100644 --- a/src/xenia/gpu/spirv_shader_translator.h +++ b/src/xenia/gpu/spirv_shader_translator.h @@ -20,7 +20,7 @@ #include "xenia/gpu/shader_translator.h" #include "xenia/gpu/spirv_builder.h" #include "xenia/gpu/xenos.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace gpu { @@ -320,8 +320,7 @@ class SpirvShaderTranslator : public ShaderTranslator { static constexpr uint32_t kSpirvMagicToolId = 26; struct Features { - explicit Features( - const ui::vulkan::VulkanProvider::DeviceInfo& device_info); + explicit Features(const ui::vulkan::VulkanDevice* vulkan_device); explicit Features(bool all = false); unsigned int spirv_version; diff --git a/src/xenia/gpu/vulkan/deferred_command_buffer.cc b/src/xenia/gpu/vulkan/deferred_command_buffer.cc index 65c80cf23..00f398363 100644 --- a/src/xenia/gpu/vulkan/deferred_command_buffer.cc +++ b/src/xenia/gpu/vulkan/deferred_command_buffer.cc @@ -16,6 +16,7 @@ #include "xenia/base/assert.h" #include "xenia/base/math.h" #include "xenia/base/profiling.h" +#include "xenia/gpu/gpu_flags.h" #include "xenia/gpu/vulkan/vulkan_command_processor.h" namespace xe { @@ -31,12 +32,12 @@ DeferredCommandBuffer::DeferredCommandBuffer( void DeferredCommandBuffer::Reset() { command_stream_.clear(); } void DeferredCommandBuffer::Execute(VkCommandBuffer command_buffer) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = - command_processor_.GetVulkanProvider().dfn(); + const ui::vulkan::VulkanDevice::Functions& dfn = + command_processor_.GetVulkanDevice()->functions(); const uintmax_t* stream = command_stream_.data(); size_t stream_remaining = command_stream_.size(); while (stream_remaining) { diff --git a/src/xenia/gpu/vulkan/deferred_command_buffer.h b/src/xenia/gpu/vulkan/deferred_command_buffer.h index 186639c86..d0d68c3bc 100644 --- a/src/xenia/gpu/vulkan/deferred_command_buffer.h +++ b/src/xenia/gpu/vulkan/deferred_command_buffer.h @@ -13,10 +13,11 @@ #include #include #include +#include #include "xenia/base/assert.h" #include "xenia/base/math.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_api.h" namespace xe { namespace gpu { diff --git a/src/xenia/gpu/vulkan/vulkan_command_processor.cc b/src/xenia/gpu/vulkan/vulkan_command_processor.cc index ee1461d5e..48fe399d3 100644 --- a/src/xenia/gpu/vulkan/vulkan_command_processor.cc +++ b/src/xenia/gpu/vulkan/vulkan_command_processor.cc @@ -33,7 +33,6 @@ #include "xenia/gpu/vulkan/vulkan_shared_memory.h" #include "xenia/gpu/xenos.h" #include "xenia/ui/vulkan/vulkan_presenter.h" -#include "xenia/ui/vulkan/vulkan_provider.h" #include "xenia/ui/vulkan/vulkan_util.h" namespace xe { @@ -72,18 +71,21 @@ VulkanCommandProcessor::VulkanCommandProcessor( : CommandProcessor(graphics_system, kernel_state), deferred_command_buffer_(*this), transient_descriptor_allocator_uniform_buffer_( - *static_cast( - graphics_system->provider()), + static_cast( + graphics_system->provider()) + ->vulkan_device(), &kDescriptorPoolSizeUniformBuffer, 1, kLinkedTypeDescriptorPoolSetCount), transient_descriptor_allocator_storage_buffer_( - *static_cast( - graphics_system->provider()), + static_cast( + graphics_system->provider()) + ->vulkan_device(), &kDescriptorPoolSizeStorageBuffer, 1, kLinkedTypeDescriptorPoolSetCount), transient_descriptor_allocator_textures_( - *static_cast( - graphics_system->provider()), + static_cast( + graphics_system->provider()) + ->vulkan_device(), kDescriptorPoolSizeTextures, uint32_t(xe::countof(kDescriptorPoolSizeTextures)), kLinkedTypeDescriptorPoolSetCount) {} @@ -135,11 +137,11 @@ bool VulkanCommandProcessor::SetupContext() { return false; } - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + vulkan_device->properties(); // The unconditional inclusion of the vertex shader stage also covers the case // of manual index / factor buffer fetch (the system constants and the shared @@ -148,12 +150,12 @@ bool VulkanCommandProcessor::SetupContext() { guest_shader_pipeline_stages_ = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; guest_shader_vertex_stages_ = VK_SHADER_STAGE_VERTEX_BIT; - if (device_info.tessellationShader) { + if (device_properties.tessellationShader) { guest_shader_pipeline_stages_ |= VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT; guest_shader_vertex_stages_ |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; } - if (!device_info.vertexPipelineStoresAndAtomics) { + if (!device_properties.vertexPipelineStoresAndAtomics) { // For memory export from vertex shaders converted to compute shaders. guest_shader_pipeline_stages_ |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; guest_shader_vertex_stages_ |= VK_SHADER_STAGE_COMPUTE_BIT; @@ -162,10 +164,10 @@ bool VulkanCommandProcessor::SetupContext() { // 16384 is bigger than any single uniform buffer that Xenia needs, but is the // minimum maxUniformBufferRange, thus the safe minimum amount. uniform_buffer_pool_ = std::make_unique( - provider, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, + vulkan_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, xe::align(std::max(ui::GraphicsUploadBufferPool::kDefaultPageSize, size_t(16384)), - size_t(device_info.minUniformBufferOffsetAlignment))); + size_t(device_properties.minUniformBufferOffsetAlignment))); // Descriptor set layouts that don't depend on the setup of other subsystems. VkShaderStageFlags guest_shader_stages = @@ -199,9 +201,10 @@ bool VulkanCommandProcessor::SetupContext() { [SpirvShaderTranslator::kConstantBufferSystem] .stageFlags = guest_shader_stages | - (device_info.tessellationShader ? VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT - : 0) | - (device_info.geometryShader ? VK_SHADER_STAGE_GEOMETRY_BIT : 0); + (device_properties.tessellationShader + ? VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT + : 0) | + (device_properties.geometryShader ? VK_SHADER_STAGE_GEOMETRY_BIT : 0); descriptor_set_layout_bindings_constants [SpirvShaderTranslator::kConstantBufferFloatVertex] .stageFlags = guest_shader_vertex_stages_; @@ -280,7 +283,7 @@ bool VulkanCommandProcessor::SetupContext() { uint32_t shared_memory_binding_count_log2 = SpirvShaderTranslator::GetSharedMemoryStorageBufferCountLog2( - device_info.maxStorageBufferRange); + device_properties.maxStorageBufferRange); uint32_t shared_memory_binding_count = UINT32_C(1) << shared_memory_binding_count_log2; @@ -484,14 +487,14 @@ bool VulkanCommandProcessor::SetupContext() { &gamma_ramp_host_visible_buffer_memory_requirements); uint32_t gamma_ramp_host_visible_buffer_memory_types = gamma_ramp_host_visible_buffer_memory_requirements.memoryTypeBits & - (device_info.memory_types_device_local & - device_info.memory_types_host_visible); + (vulkan_device->memory_types().device_local & + vulkan_device->memory_types().host_visible); VkMemoryAllocateInfo gamma_ramp_host_visible_buffer_memory_allocate_info; // Prefer a host-uncached (because it's write-only) memory type, but try a // host-cached host-visible device-local one as well. if (xe::bit_scan_forward( gamma_ramp_host_visible_buffer_memory_types & - ~device_info.memory_types_host_cached, + ~vulkan_device->memory_types().host_cached, &(gamma_ramp_host_visible_buffer_memory_allocate_info .memoryTypeIndex)) || xe::bit_scan_forward( @@ -508,7 +511,7 @@ bool VulkanCommandProcessor::SetupContext() { gamma_ramp_host_visible_buffer_memory_requirements.size; VkMemoryDedicatedAllocateInfo gamma_ramp_host_visible_buffer_memory_dedicated_allocate_info; - if (device_info.ext_1_1_VK_KHR_dedicated_allocation) { + if (vulkan_device->extensions().ext_1_1_KHR_dedicated_allocation) { gamma_ramp_host_visible_buffer_memory_allocate_info_last->pNext = &gamma_ramp_host_visible_buffer_memory_dedicated_allocate_info; gamma_ramp_host_visible_buffer_memory_allocate_info_last = @@ -555,7 +558,7 @@ bool VulkanCommandProcessor::SetupContext() { if (gamma_ramp_buffer_ == VK_NULL_HANDLE) { // Create separate buffers for the shader and uploading. if (!ui::vulkan::util::CreateDedicatedAllocationBuffer( - provider, kGammaRampSize, + vulkan_device, kGammaRampSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, ui::vulkan::util::MemoryPurpose::kDeviceLocal, gamma_ramp_buffer_, @@ -564,7 +567,7 @@ bool VulkanCommandProcessor::SetupContext() { return false; } if (!ui::vulkan::util::CreateDedicatedAllocationBuffer( - provider, kGammaRampSize * kMaxFramesInFlight, + vulkan_device, kGammaRampSize * kMaxFramesInFlight, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, ui::vulkan::util::MemoryPurpose::kUpload, gamma_ramp_upload_buffer_, gamma_ramp_upload_buffer_memory_, &gamma_ramp_upload_memory_type_, @@ -854,11 +857,11 @@ bool VulkanCommandProcessor::SetupContext() { bool swap_apply_gamma_pixel_shaders_created = (swap_apply_gamma_pixel_shaders[kSwapApplyGammaPixelShader256EntryTable] = ui::vulkan::util::CreateShaderModule( - provider, shaders::apply_gamma_table_ps, + vulkan_device, shaders::apply_gamma_table_ps, sizeof(shaders::apply_gamma_table_ps))) != VK_NULL_HANDLE && (swap_apply_gamma_pixel_shaders[kSwapApplyGammaPixelShaderPWL] = ui::vulkan::util::CreateShaderModule( - provider, shaders::apply_gamma_pwl_ps, + vulkan_device, shaders::apply_gamma_pwl_ps, sizeof(shaders::apply_gamma_pwl_ps))) != VK_NULL_HANDLE; if (!swap_apply_gamma_pixel_shaders_created) { XELOGE("Failed to create the gamma ramp application pixel shader modules"); @@ -879,7 +882,8 @@ bool VulkanCommandProcessor::SetupContext() { swap_apply_gamma_pipeline_stages[0].flags = 0; swap_apply_gamma_pipeline_stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; swap_apply_gamma_pipeline_stages[0].module = - ui::vulkan::util::CreateShaderModule(provider, shaders::fullscreen_cw_vs, + ui::vulkan::util::CreateShaderModule(vulkan_device, + shaders::fullscreen_cw_vs, sizeof(shaders::fullscreen_cw_vs)); if (swap_apply_gamma_pipeline_stages[0].module == VK_NULL_HANDLE) { XELOGE("Failed to create the gamma ramp application vertex shader module"); @@ -1037,9 +1041,9 @@ bool VulkanCommandProcessor::SetupContext() { void VulkanCommandProcessor::ShutdownContext() { AwaitAllQueueOperationsCompletion(); - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); DestroyScratchBuffer(); @@ -1289,9 +1293,10 @@ void VulkanCommandProcessor::IssueSwap(uint32_t frontbuffer_ptr, context); uint64_t guest_output_image_version = vulkan_context.image_version(); - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = + vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); uint32_t swap_frame_index = uint32_t(frame_current_ % kMaxFramesInFlight); @@ -1358,7 +1363,7 @@ void VulkanCommandProcessor::IssueSwap(uint32_t frontbuffer_ptr, bool gamma_ramp_has_upload_buffer = gamma_ramp_upload_buffer_memory_ != VK_NULL_HANDLE; ui::vulkan::util::FlushMappedMemoryRange( - provider, + vulkan_device, gamma_ramp_has_upload_buffer ? gamma_ramp_upload_buffer_memory_ : gamma_ramp_buffer_memory_, gamma_ramp_upload_memory_type_, gamma_ramp_upload_offset, @@ -1816,9 +1821,9 @@ VkDescriptorSet VulkanCommandProcessor::AllocateSingleTransientDescriptor( descriptor_set = transient_descriptors_free.back(); transient_descriptors_free.pop_back(); } else { - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); bool is_storage_buffer = transient_descriptor_layout == SingleTransientDescriptorLayout::kStorageBufferCompute; @@ -1863,9 +1868,9 @@ VkDescriptorSetLayout VulkanCommandProcessor::GetTextureDescriptorSetLayout( return it_existing->second; } - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); descriptor_set_layout_bindings_.clear(); descriptor_set_layout_bindings_.reserve(binding_count); @@ -1959,9 +1964,9 @@ VulkanCommandProcessor::GetPipelineLayout(size_t texture_count_pixel, descriptor_set_layouts[SpirvShaderTranslator::kDescriptorSetTexturesPixel] = descriptor_set_layout_textures_pixel; - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VkPipelineLayoutCreateInfo pipeline_layout_create_info; pipeline_layout_create_info.sType = @@ -2019,14 +2024,14 @@ VulkanCommandProcessor::AcquireScratchGpuBuffer( size = xe::align(size, kScratchBufferSizeIncrement); - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); VkDeviceMemory new_scratch_buffer_memory; VkBuffer new_scratch_buffer; // VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT for // texture loading. if (!ui::vulkan::util::CreateDedicatedAllocationBuffer( - provider, size, + vulkan_device, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, ui::vulkan::util::MemoryPurpose::kDeviceLocal, new_scratch_buffer, new_scratch_buffer_memory)) { @@ -2037,8 +2042,8 @@ VulkanCommandProcessor::AcquireScratchGpuBuffer( } if (submission_completed_ >= scratch_buffer_last_usage_submission_) { - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); if (scratch_buffer_ != VK_NULL_HANDLE) { dfn.vkDestroyBuffer(device, scratch_buffer_, nullptr); } @@ -2153,9 +2158,9 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType prim_type, uint32_t index_count, IndexBufferInfo* index_buffer_info, bool major_mode_explicit) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES const RegisterFile& regs = *register_file_; @@ -2165,8 +2170,8 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType prim_type, return IssueCopy(); } - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - GetVulkanProvider().device_info(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + GetVulkanDevice()->properties(); memexport_ranges_.clear(); @@ -2182,7 +2187,7 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType prim_type, // to a compute shader and dispatch it after the draw if the draw doesn't use // tessellation. if (vertex_shader->memexport_eM_written() != 0 && - device_info.vertexPipelineStoresAndAtomics) { + device_properties.vertexPipelineStoresAndAtomics) { draw_util::AddMemExportRanges(regs, *vertex_shader, memexport_ranges_); } @@ -2213,7 +2218,7 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType prim_type, } } if (pixel_shader && pixel_shader->memexport_eM_written() != 0 && - device_info.fragmentStoresAndAtomics) { + device_properties.fragmentStoresAndAtomics) { draw_util::AddMemExportRanges(regs, *pixel_shader, memexport_ranges_); } @@ -2453,9 +2458,9 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType prim_type, // interlocks case completely - apply the viewport and the scissor offset // directly to pixel address and to things like ps_param_gen. draw_util::GetHostViewportInfo( - regs, 1, 1, false, device_info.maxViewportDimensions[0], - device_info.maxViewportDimensions[1], true, normalized_depth_control, - false, host_render_targets_used, + regs, 1, 1, false, device_properties.maxViewportDimensions[0], + device_properties.maxViewportDimensions[1], true, + normalized_depth_control, false, host_render_targets_used, pixel_shader && pixel_shader->writes_depth(), viewport_info); // Update dynamic graphics pipeline state. @@ -2468,7 +2473,7 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType prim_type, // indirectly in the vertex shader if full 32-bit indices are not supported by // the host. bool shader_32bit_index_dma = - !device_info.fullDrawIndexUint32 && + !device_properties.fullDrawIndexUint32 && primitive_processing_result.index_buffer_type == PrimitiveProcessor::ProcessedIndexBufferType::kGuestDMA && vgt_draw_initiator.index_size == xenos::IndexFormat::kInt32 && @@ -2619,9 +2624,9 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType prim_type, } bool VulkanCommandProcessor::IssueCopy() { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES if (!BeginSubmission(true)) { return false; @@ -2672,9 +2677,9 @@ void VulkanCommandProcessor::CheckSubmissionFenceAndDeviceLoss( await_submission = GetCurrentSubmission() - 1; } - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); size_t fences_total = submissions_in_flight_fences_.size(); size_t fences_awaited = 0; @@ -2787,9 +2792,9 @@ void VulkanCommandProcessor::CheckSubmissionFenceAndDeviceLoss( } bool VulkanCommandProcessor::BeginSubmission(bool is_guest_command) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES if (device_lost_) { return false; @@ -2935,9 +2940,9 @@ bool VulkanCommandProcessor::BeginSubmission(bool is_guest_command) { } bool VulkanCommandProcessor::EndSubmission(bool is_swap) { - ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // Make sure everything needed for submitting exist. if (submission_open_) { @@ -2977,7 +2982,7 @@ bool VulkanCommandProcessor::EndSubmission(bool is_swap) { command_pool_create_info.pNext = nullptr; command_pool_create_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; command_pool_create_info.queueFamilyIndex = - provider.queue_family_graphics_compute(); + vulkan_device->queue_family_graphics_compute(); if (dfn.vkCreateCommandPool(device, &command_pool_create_info, nullptr, &command_buffer.pool) != VK_SUCCESS) { XELOGE("Failed to create a Vulkan command pool"); @@ -3053,10 +3058,11 @@ bool VulkanCommandProcessor::EndSubmission(bool is_swap) { bind_sparse_info.pSignalSemaphores = &bind_sparse_semaphore; VkResult bind_sparse_result; { - ui::vulkan::VulkanProvider::QueueAcquisition queue_acquisition( - provider.AcquireQueue(provider.queue_family_sparse_binding(), 0)); + ui::vulkan::VulkanDevice::Queue::Acquisition queue_acquisition = + vulkan_device->AcquireQueue( + vulkan_device->queue_family_sparse_binding(), 0); bind_sparse_result = dfn.vkQueueBindSparse( - queue_acquisition.queue, 1, &bind_sparse_info, VK_NULL_HANDLE); + queue_acquisition.queue(), 1, &bind_sparse_info, VK_NULL_HANDLE); } if (bind_sparse_result != VK_SUCCESS) { XELOGE("Failed to submit Vulkan sparse binds"); @@ -3123,10 +3129,11 @@ bool VulkanCommandProcessor::EndSubmission(bool is_swap) { } VkResult submit_result; { - ui::vulkan::VulkanProvider::QueueAcquisition queue_acquisition( - provider.AcquireQueue(provider.queue_family_graphics_compute(), 0)); + ui::vulkan::VulkanDevice::Queue::Acquisition queue_acquisition = + vulkan_device->AcquireQueue( + vulkan_device->queue_family_graphics_compute(), 0); submit_result = - dfn.vkQueueSubmit(queue_acquisition.queue, 1, &submit_info, fence); + dfn.vkQueueSubmit(queue_acquisition.queue(), 1, &submit_info, fence); } if (submit_result != VK_SUCCESS) { XELOGE("Failed to submit a Vulkan command buffer"); @@ -3235,9 +3242,9 @@ void VulkanCommandProcessor::SplitPendingBarrier() { void VulkanCommandProcessor::DestroyScratchBuffer() { assert_false(scratch_buffer_used_); - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); scratch_buffer_last_usage_submission_ = 0; scratch_buffer_last_access_mask_ = 0; @@ -3252,9 +3259,9 @@ void VulkanCommandProcessor::DestroyScratchBuffer() { void VulkanCommandProcessor::UpdateDynamicState( const draw_util::ViewportInfo& viewport_info, bool primitive_polygonal, reg::RB_DEPTHCONTROL normalized_depth_control) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES const RegisterFile& regs = *register_file_; @@ -3358,7 +3365,7 @@ void VulkanCommandProcessor::UpdateDynamicState( if (normalized_depth_control.stencil_enable) { Register stencil_ref_mask_front_reg, stencil_ref_mask_back_reg; if (primitive_polygonal && normalized_depth_control.backface_enable) { - if (GetVulkanProvider().device_info().separateStencilMaskRef) { + if (GetVulkanDevice()->properties().separateStencilMaskRef) { stencil_ref_mask_front_reg = XE_GPU_REG_RB_STENCILREFMASK; stencil_ref_mask_back_reg = XE_GPU_REG_RB_STENCILREFMASK_BF; } else { @@ -3475,9 +3482,9 @@ void VulkanCommandProcessor::UpdateSystemConstantValues( bool shader_32bit_index_dma, const draw_util::ViewportInfo& viewport_info, uint32_t used_texture_mask, reg::RB_DEPTHCONTROL normalized_depth_control, uint32_t normalized_color_mask) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES const RegisterFile& regs = *register_file_; auto pa_cl_vte_cntl = regs.Get(); @@ -3719,7 +3726,7 @@ void VulkanCommandProcessor::UpdateSystemConstantValues( } // Texture host swizzle in the shader. - if (!GetVulkanProvider().device_info().imageViewFormatSwizzle) { + if (!GetVulkanDevice()->properties().imageViewFormatSwizzle) { uint32_t textures_remaining = used_texture_mask; uint32_t texture_index; while (xe::bit_scan_forward(textures_remaining, &texture_index)) { @@ -3935,15 +3942,15 @@ void VulkanCommandProcessor::UpdateSystemConstantValues( bool VulkanCommandProcessor::UpdateBindings(const VulkanShader* vertex_shader, const VulkanShader* pixel_shader) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES const RegisterFile& regs = *register_file_; - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // Invalidate constant buffers and descriptors for changed data. @@ -4002,7 +4009,7 @@ bool VulkanCommandProcessor::UpdateBindings(const VulkanShader* vertex_shader, current_graphics_descriptor_set_values_up_to_date_ &= ~(UINT32_C(1) << SpirvShaderTranslator::kDescriptorSetConstants); size_t uniform_buffer_alignment = - size_t(provider.device_info().minUniformBufferOffsetAlignment); + size_t(vulkan_device->properties().minUniformBufferOffsetAlignment); // System constants. if (!(current_constant_buffers_up_to_date_ & (UINT32_C(1) << SpirvShaderTranslator::kConstantBufferSystem))) { @@ -4376,10 +4383,9 @@ uint8_t* VulkanCommandProcessor::WriteTransientUniformBufferBinding( if (descriptor_set == VK_NULL_HANDLE) { return nullptr; } - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); uint8_t* mapping = uniform_buffer_pool_->Request( frame_current_, size, - size_t(provider.device_info().minUniformBufferOffsetAlignment), + size_t(GetVulkanDevice()->properties().minUniformBufferOffsetAlignment), descriptor_buffer_info_out.buffer, descriptor_buffer_info_out.offset); if (!mapping) { return nullptr; @@ -4409,9 +4415,9 @@ uint8_t* VulkanCommandProcessor::WriteTransientUniformBufferBinding( if (!mapping) { return nullptr; } - const ui::vulkan::VulkanProvider& provider = GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); dfn.vkUpdateDescriptorSets(device, 1, &write_descriptor_set, 0, nullptr); descriptor_set_out = write_descriptor_set.dstSet; return mapping; diff --git a/src/xenia/gpu/vulkan/vulkan_command_processor.h b/src/xenia/gpu/vulkan/vulkan_command_processor.h index 022fb37b2..8ea287bf9 100644 --- a/src/xenia/gpu/vulkan/vulkan_command_processor.h +++ b/src/xenia/gpu/vulkan/vulkan_command_processor.h @@ -143,9 +143,10 @@ class VulkanCommandProcessor : public CommandProcessor { void RestoreEdramSnapshot(const void* snapshot) override; - ui::vulkan::VulkanProvider& GetVulkanProvider() const { - return *static_cast( - graphics_system_->provider()); + ui::vulkan::VulkanDevice* GetVulkanDevice() const { + return static_cast( + graphics_system_->provider()) + ->vulkan_device(); } // Returns the deferred drawing command list for the currently open diff --git a/src/xenia/gpu/vulkan/vulkan_graphics_system.cc b/src/xenia/gpu/vulkan/vulkan_graphics_system.cc index 300bc9c3e..b7c9ef92e 100644 --- a/src/xenia/gpu/vulkan/vulkan_graphics_system.cc +++ b/src/xenia/gpu/vulkan/vulkan_graphics_system.cc @@ -33,10 +33,10 @@ std::string VulkanGraphicsSystem::name() const { X_STATUS VulkanGraphicsSystem::Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - bool is_surface_required) { - provider_ = xe::ui::vulkan::VulkanProvider::Create(is_surface_required); + bool with_presentation) { + provider_ = xe::ui::vulkan::VulkanProvider::Create(true, with_presentation); return GraphicsSystem::Setup(processor, kernel_state, app_context, - is_surface_required); + with_presentation); } std::unique_ptr diff --git a/src/xenia/gpu/vulkan/vulkan_graphics_system.h b/src/xenia/gpu/vulkan/vulkan_graphics_system.h index e06892aa1..d3e7469df 100644 --- a/src/xenia/gpu/vulkan/vulkan_graphics_system.h +++ b/src/xenia/gpu/vulkan/vulkan_graphics_system.h @@ -30,7 +30,7 @@ class VulkanGraphicsSystem : public GraphicsSystem { X_STATUS Setup(cpu::Processor* processor, kernel::KernelState* kernel_state, ui::WindowedAppContext* app_context, - bool is_surface_required) override; + bool with_presentation) override; private: std::unique_ptr CreateCommandProcessor() override; diff --git a/src/xenia/gpu/vulkan/vulkan_pipeline_cache.cc b/src/xenia/gpu/vulkan/vulkan_pipeline_cache.cc index eb2ee9b21..99a8932c8 100644 --- a/src/xenia/gpu/vulkan/vulkan_pipeline_cache.cc +++ b/src/xenia/gpu/vulkan/vulkan_pipeline_cache.cc @@ -51,15 +51,15 @@ VulkanPipelineCache::VulkanPipelineCache( VulkanPipelineCache::~VulkanPipelineCache() { Shutdown(); } bool VulkanPipelineCache::Initialize() { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); bool edram_fragment_shader_interlock = render_target_cache_.GetPath() == RenderTargetCache::Path::kPixelShaderInterlock; shader_translator_ = std::make_unique( - SpirvShaderTranslator::Features(provider.device_info()), + SpirvShaderTranslator::Features(vulkan_device), render_target_cache_.msaa_2x_attachments_supported(), render_target_cache_.msaa_2x_no_attachments_supported(), edram_fragment_shader_interlock); @@ -68,7 +68,7 @@ bool VulkanPipelineCache::Initialize() { std::vector depth_only_fragment_shader_code = shader_translator_->CreateDepthOnlyFragmentShader(); depth_only_fragment_shader_ = ui::vulkan::util::CreateShaderModule( - provider, + vulkan_device, reinterpret_cast( depth_only_fragment_shader_code.data()), depth_only_fragment_shader_code.size()); @@ -85,10 +85,10 @@ bool VulkanPipelineCache::Initialize() { } void VulkanPipelineCache::Shutdown() { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // Destroy all pipelines. last_pipeline_ = nullptr; @@ -136,7 +136,7 @@ VulkanShader* VulkanPipelineCache::LoadShader(xenos::ShaderType shader_type, // We need to track it even if it fails translation so we know not to try // again. VulkanShader* shader = - new VulkanShader(command_processor_.GetVulkanProvider(), shader_type, + new VulkanShader(command_processor_.GetVulkanDevice(), shader_type, data_hash, host_address, dword_count); shaders_.emplace(data_hash, shader); return shader; @@ -268,9 +268,9 @@ bool VulkanPipelineCache::ConfigurePipeline( VulkanRenderTargetCache::RenderPassKey render_pass_key, VkPipeline& pipeline_out, const PipelineLayoutProvider*& pipeline_layout_out) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES // Ensure shaders are translated - needed now for GetCurrentStateDescription. if (!EnsureShadersTranslated(vertex_shader, pixel_shader)) { @@ -471,8 +471,8 @@ void VulkanPipelineCache::WritePipelineRenderTargetDescription( render_target_out.dst_alpha_blend_factor = kBlendFactorMap[uint32_t(blend_control.alpha_destblend)]; render_target_out.alpha_blend_op = blend_control.alpha_comb_fcn; - if (!command_processor_.GetVulkanProvider() - .device_info() + if (!command_processor_.GetVulkanDevice() + ->properties() .constantAlphaColorBlendFactors) { if (blend_control.color_srcblend == xenos::BlendFactor::kConstantAlpha) { render_target_out.src_color_blend_factor = @@ -512,8 +512,8 @@ bool VulkanPipelineCache::GetCurrentStateDescription( PipelineDescription& description_out) const { description_out.Reset(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + command_processor_.GetVulkanDevice()->properties(); const RegisterFile& regs = register_file_; auto pa_su_sc_mode_cntl = regs.Get(); @@ -548,7 +548,7 @@ bool VulkanPipelineCache::GetCurrentStateDescription( break; case xenos::PrimitiveType::kTriangleFan: // The check should be performed at primitive processing time. - assert_true(device_info.triangleFans); + assert_true(device_properties.triangleFans); primitive_topology = PipelinePrimitiveTopology::kTriangleFan; break; case xenos::PrimitiveType::kTriangleStrip: @@ -572,7 +572,8 @@ bool VulkanPipelineCache::GetCurrentStateDescription( primitive_processing_result.host_primitive_reset_enabled; description_out.depth_clamp_enable = - device_info.depthClamp && regs.Get().clip_disable; + device_properties.depthClamp && + regs.Get().clip_disable; // TODO(Triang3l): Tessellation. bool primitive_polygonal = draw_util::IsPrimitivePolygonal(regs); @@ -587,7 +588,7 @@ bool VulkanPipelineCache::GetCurrentStateDescription( bool cull_back = pa_su_sc_mode_cntl.cull_back; description_out.cull_front = cull_front; description_out.cull_back = cull_back; - if (device_info.fillModeNonSolid) { + if (device_properties.fillModeNonSolid) { xenos::PolygonType polygon_type = xenos::PolygonType::kTriangles; if (!cull_front) { polygon_type = @@ -604,7 +605,7 @@ bool VulkanPipelineCache::GetCurrentStateDescription( case xenos::PolygonType::kPoints: // When points are not supported, use lines instead, preserving // debug-like purpose. - description_out.polygon_mode = device_info.pointPolygons + description_out.polygon_mode = device_properties.pointPolygons ? PipelinePolygonMode::kPoint : PipelinePolygonMode::kLine; break; @@ -671,83 +672,17 @@ bool VulkanPipelineCache::GetCurrentStateDescription( // Color blending and write masks (filled only for the attachments present // in the render pass object). uint32_t render_pass_color_rts = render_pass_key.depth_and_color_used >> 1; - if (device_info.independentBlend) { - uint32_t render_pass_color_rts_remaining = render_pass_color_rts; - uint32_t color_rt_index; - while (xe::bit_scan_forward(render_pass_color_rts_remaining, - &color_rt_index)) { - render_pass_color_rts_remaining &= ~(uint32_t(1) << color_rt_index); - WritePipelineRenderTargetDescription( - regs.Get( - reg::RB_BLENDCONTROL::rt_register_indices[color_rt_index]), - (normalized_color_mask >> (color_rt_index * 4)) & 0b1111, - description_out.render_targets[color_rt_index]); - } - } else { - // Take the blend control for the first render target that the guest wants - // to write to (consider it the most important) and use it for all render - // targets, if any. - // TODO(Triang3l): Implement an option for independent blending via - // replaying the render pass for each set of render targets with unique - // blending parameters, with depth / stencil saved before the first and - // restored before each of the rest maybe? Though independent blending - // support is pretty wide, with a quite prominent exception of Adreno 4xx - // apparently. - uint32_t render_pass_color_rts_remaining = render_pass_color_rts; - uint32_t render_pass_first_color_rt_index; - if (xe::bit_scan_forward(render_pass_color_rts_remaining, - &render_pass_first_color_rt_index)) { - render_pass_color_rts_remaining &= - ~(uint32_t(1) << render_pass_first_color_rt_index); - PipelineRenderTarget& render_pass_first_color_rt = - description_out.render_targets[render_pass_first_color_rt_index]; - uint32_t common_blend_rt_index; - if (xe::bit_scan_forward(normalized_color_mask, - &common_blend_rt_index)) { - common_blend_rt_index >>= 2; - // If a common write mask will be used for multiple render targets, - // use the original RB_COLOR_MASK instead of the normalized color mask - // as the normalized color mask has non-existent components forced to - // written (don't need reading to be preserved), while the number of - // components may vary between render targets. The attachments in the - // pass that must not be written to at all will be excluded via a - // shader modification. - WritePipelineRenderTargetDescription( - regs.Get( - reg::RB_BLENDCONTROL::rt_register_indices - [common_blend_rt_index]), - (((normalized_color_mask & - ~(uint32_t(0b1111) << (4 * common_blend_rt_index))) - ? regs[XE_GPU_REG_RB_COLOR_MASK] - : normalized_color_mask) >> - (4 * common_blend_rt_index)) & - 0b1111, - render_pass_first_color_rt); - } else { - // No render targets are written to, though the render pass still may - // contain color attachments - set them to not written and not - // blending. - render_pass_first_color_rt.src_color_blend_factor = - PipelineBlendFactor::kOne; - render_pass_first_color_rt.dst_color_blend_factor = - PipelineBlendFactor::kZero; - render_pass_first_color_rt.color_blend_op = xenos::BlendOp::kAdd; - render_pass_first_color_rt.src_alpha_blend_factor = - PipelineBlendFactor::kOne; - render_pass_first_color_rt.dst_alpha_blend_factor = - PipelineBlendFactor::kZero; - render_pass_first_color_rt.alpha_blend_op = xenos::BlendOp::kAdd; - } - // Reuse the same blending settings for all render targets in the pass, - // for description consistency. - uint32_t color_rt_index; - while (xe::bit_scan_forward(render_pass_color_rts_remaining, - &color_rt_index)) { - render_pass_color_rts_remaining &= ~(uint32_t(1) << color_rt_index); - description_out.render_targets[color_rt_index] = - render_pass_first_color_rt; - } - } + assert_true(device_properties.independentBlend); + uint32_t render_pass_color_rts_remaining = render_pass_color_rts; + uint32_t color_rt_index; + while (xe::bit_scan_forward(render_pass_color_rts_remaining, + &color_rt_index)) { + render_pass_color_rts_remaining &= ~(uint32_t(1) << color_rt_index); + WritePipelineRenderTargetDescription( + regs.Get( + reg::RB_BLENDCONTROL::rt_register_indices[color_rt_index]), + (normalized_color_mask >> (color_rt_index * 4)) & 0b1111, + description_out.render_targets[color_rt_index]); } } @@ -767,65 +702,37 @@ bool VulkanPipelineCache::ArePipelineRequirementsMet( return false; } - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + command_processor_.GetVulkanDevice()->properties(); - if (!device_info.geometryShader && + if (!device_properties.geometryShader && description.geometry_shader != PipelineGeometryShader::kNone) { return false; } - if (!device_info.triangleFans && + if (!device_properties.triangleFans && description.primitive_topology == PipelinePrimitiveTopology::kTriangleFan) { return false; } - if (!device_info.depthClamp && description.depth_clamp_enable) { + if (!device_properties.depthClamp && description.depth_clamp_enable) { return false; } - if (!device_info.pointPolygons && + if (!device_properties.pointPolygons && description.polygon_mode == PipelinePolygonMode::kPoint) { return false; } - if (!device_info.fillModeNonSolid && + if (!device_properties.fillModeNonSolid && description.polygon_mode != PipelinePolygonMode::kFill) { return false; } - if (!device_info.independentBlend) { - uint32_t color_rts_remaining = - description.render_pass_key.depth_and_color_used >> 1; - uint32_t first_color_rt_index; - if (xe::bit_scan_forward(color_rts_remaining, &first_color_rt_index)) { - color_rts_remaining &= ~(uint32_t(1) << first_color_rt_index); - const PipelineRenderTarget& first_color_rt = - description.render_targets[first_color_rt_index]; - uint32_t color_rt_index; - while (xe::bit_scan_forward(color_rts_remaining, &color_rt_index)) { - color_rts_remaining &= ~(uint32_t(1) << color_rt_index); - const PipelineRenderTarget& color_rt = - description.render_targets[color_rt_index]; - if (color_rt.src_color_blend_factor != - first_color_rt.src_color_blend_factor || - color_rt.dst_color_blend_factor != - first_color_rt.dst_color_blend_factor || - color_rt.color_blend_op != first_color_rt.color_blend_op || - color_rt.src_alpha_blend_factor != - first_color_rt.src_alpha_blend_factor || - color_rt.dst_alpha_blend_factor != - first_color_rt.dst_alpha_blend_factor || - color_rt.alpha_blend_op != first_color_rt.alpha_blend_op || - color_rt.color_write_mask != first_color_rt.color_write_mask) { - return false; - } - } - } - } + assert_true(device_properties.independentBlend); - if (!device_info.constantAlphaColorBlendFactors) { + if (!device_properties.constantAlphaColorBlendFactors) { uint32_t color_rts_remaining = description.render_pass_key.depth_and_color_used >> 1; uint32_t color_rt_index; @@ -1849,10 +1756,9 @@ VkShaderModule VulkanPipelineCache::GetGeometryShader(GeometryShaderKey key) { // Create the shader module, and store the handle even if creation fails not // to try to create it again later. - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); VkShaderModule shader_module = ui::vulkan::util::CreateShaderModule( - provider, reinterpret_cast(shader_code.data()), + command_processor_.GetVulkanDevice(), + reinterpret_cast(shader_code.data()), sizeof(uint32_t) * shader_code.size()); if (shader_module == VK_NULL_HANDLE) { XELOGE( @@ -1892,10 +1798,8 @@ bool VulkanPipelineCache::EnsurePipelineCreated( return false; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); bool edram_fragment_shader_interlock = render_target_cache_.GetPath() == @@ -2174,6 +2078,7 @@ bool VulkanPipelineCache::EnsurePipelineCreated( VK_BLEND_OP_ADD, VK_BLEND_OP_ADD, VK_BLEND_OP_ADD}; + assert_true(vulkan_device->properties().independentBlend); uint32_t color_rts_remaining = color_rts_used; uint32_t color_rt_index; while (xe::bit_scan_forward(color_rts_remaining, &color_rt_index)) { @@ -2204,28 +2109,10 @@ bool VulkanPipelineCache::EnsurePipelineCreated( } color_blend_attachment.colorWriteMask = VkColorComponentFlags(color_rt.color_write_mask); - if (!device_info.independentBlend) { - // For non-independent blend, the pAttachments element for the first - // actually used color will be replicated into all. - break; - } } } color_blend_state.attachmentCount = 32 - xe::lzcnt(color_rts_used); color_blend_state.pAttachments = color_blend_attachments; - if (color_rts_used && !device_info.independentBlend) { - // "If the independent blending feature is not enabled, all elements of - // pAttachments must be identical." - uint32_t first_color_rt_index; - xe::bit_scan_forward(color_rts_used, &first_color_rt_index); - for (uint32_t i = 0; i < color_blend_state.attachmentCount; ++i) { - if (i == first_color_rt_index) { - continue; - } - color_blend_attachments[i] = - color_blend_attachments[first_color_rt_index]; - } - } } std::array dynamic_states; @@ -2276,8 +2163,8 @@ bool VulkanPipelineCache::EnsurePipelineCreated( pipeline_create_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_create_info.basePipelineIndex = -1; - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VkPipeline pipeline; if (dfn.vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipeline_create_info, nullptr, diff --git a/src/xenia/gpu/vulkan/vulkan_pipeline_cache.h b/src/xenia/gpu/vulkan/vulkan_pipeline_cache.h index 09a26caa4..8b97327ec 100644 --- a/src/xenia/gpu/vulkan/vulkan_pipeline_cache.h +++ b/src/xenia/gpu/vulkan/vulkan_pipeline_cache.h @@ -27,7 +27,7 @@ #include "xenia/gpu/vulkan/vulkan_render_target_cache.h" #include "xenia/gpu/vulkan/vulkan_shader.h" #include "xenia/gpu/xenos.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_api.h" namespace xe { namespace gpu { diff --git a/src/xenia/gpu/vulkan/vulkan_primitive_processor.cc b/src/xenia/gpu/vulkan/vulkan_primitive_processor.cc index f4898acd8..8e1d0bcb1 100644 --- a/src/xenia/gpu/vulkan/vulkan_primitive_processor.cc +++ b/src/xenia/gpu/vulkan/vulkan_primitive_processor.cc @@ -17,7 +17,6 @@ #include "xenia/base/logging.h" #include "xenia/gpu/vulkan/deferred_command_buffer.h" #include "xenia/gpu/vulkan/vulkan_command_processor.h" -#include "xenia/ui/vulkan/vulkan_provider.h" #include "xenia/ui/vulkan/vulkan_util.h" namespace xe { @@ -27,29 +26,30 @@ namespace vulkan { VulkanPrimitiveProcessor::~VulkanPrimitiveProcessor() { Shutdown(true); } bool VulkanPrimitiveProcessor::Initialize() { - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); - if (!InitializeCommon(device_info.fullDrawIndexUint32, - device_info.triangleFans, false, - device_info.geometryShader, device_info.geometryShader, - device_info.geometryShader)) { + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + vulkan_device->properties(); + if (!InitializeCommon( + device_properties.fullDrawIndexUint32, device_properties.triangleFans, + false, device_properties.geometryShader, + device_properties.geometryShader, device_properties.geometryShader)) { Shutdown(); return false; } frame_index_buffer_pool_ = std::make_unique( - command_processor_.GetVulkanProvider(), - VK_BUFFER_USAGE_INDEX_BUFFER_BIT, + vulkan_device, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, std::max(size_t(kMinRequiredConvertedIndexBufferSize), ui::GraphicsUploadBufferPool::kDefaultPageSize)); return true; } void VulkanPrimitiveProcessor::Shutdown(bool from_destructor) { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); frame_index_buffers_.clear(); frame_index_buffer_pool_.reset(); @@ -71,10 +71,10 @@ void VulkanPrimitiveProcessor::CompletedSubmissionUpdated() { if (builtin_index_buffer_upload_ != VK_NULL_HANDLE && command_processor_.GetCompletedSubmission() >= builtin_index_buffer_upload_submission_) { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); ui::vulkan::util::DestroyAndNullHandle(dfn.vkDestroyBuffer, device, builtin_index_buffer_upload_); ui::vulkan::util::DestroyAndNullHandle(dfn.vkFreeMemory, device, @@ -131,14 +131,14 @@ bool VulkanPrimitiveProcessor::InitializeBuiltinIndexBuffer( assert_true(builtin_index_buffer_upload_ == VK_NULL_HANDLE); assert_true(builtin_index_buffer_upload_memory_ == VK_NULL_HANDLE); - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); builtin_index_buffer_size_ = VkDeviceSize(size_bytes); if (!ui::vulkan::util::CreateDedicatedAllocationBuffer( - provider, builtin_index_buffer_size_, + vulkan_device, builtin_index_buffer_size_, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, ui::vulkan::util::MemoryPurpose::kDeviceLocal, builtin_index_buffer_, builtin_index_buffer_memory_)) { @@ -150,7 +150,7 @@ bool VulkanPrimitiveProcessor::InitializeBuiltinIndexBuffer( } uint32_t upload_memory_type; if (!ui::vulkan::util::CreateDedicatedAllocationBuffer( - provider, builtin_index_buffer_size_, + vulkan_device, builtin_index_buffer_size_, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, ui::vulkan::util::MemoryPurpose::kUpload, builtin_index_buffer_upload_, builtin_index_buffer_upload_memory_, @@ -185,7 +185,7 @@ bool VulkanPrimitiveProcessor::InitializeBuiltinIndexBuffer( } fill_callback(mapping); ui::vulkan::util::FlushMappedMemoryRange( - provider, builtin_index_buffer_memory_, upload_memory_type); + vulkan_device, builtin_index_buffer_memory_, upload_memory_type); dfn.vkUnmapMemory(device, builtin_index_buffer_upload_memory_); // Schedule uploading in the first submission. diff --git a/src/xenia/gpu/vulkan/vulkan_primitive_processor.h b/src/xenia/gpu/vulkan/vulkan_primitive_processor.h index ea8ed4fed..aceab3933 100644 --- a/src/xenia/gpu/vulkan/vulkan_primitive_processor.h +++ b/src/xenia/gpu/vulkan/vulkan_primitive_processor.h @@ -15,7 +15,6 @@ #include "xenia/base/assert.h" #include "xenia/gpu/primitive_processor.h" -#include "xenia/ui/vulkan/vulkan_provider.h" #include "xenia/ui/vulkan/vulkan_upload_buffer_pool.h" namespace xe { diff --git a/src/xenia/gpu/vulkan/vulkan_render_target_cache.cc b/src/xenia/gpu/vulkan/vulkan_render_target_cache.cc index 8f7887b4e..47422876a 100644 --- a/src/xenia/gpu/vulkan/vulkan_render_target_cache.cc +++ b/src/xenia/gpu/vulkan/vulkan_render_target_cache.cc @@ -207,14 +207,15 @@ VulkanRenderTargetCache::VulkanRenderTargetCache( VulkanRenderTargetCache::~VulkanRenderTargetCache() { Shutdown(true); } bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::InstanceFunctions& ifn = provider.ifn(); - VkPhysicalDevice physical_device = provider.physical_device(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanInstance::Functions& ifn = + vulkan_device->vulkan_instance()->functions(); + const VkPhysicalDevice physical_device = vulkan_device->physical_device(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + vulkan_device->properties(); if (cvars::render_target_path_vulkan == "fsi") { path_ = Path::kPixelShaderInterlock; @@ -243,13 +244,13 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { // between, for instance, the ability to vfetch and memexport in fragment // shaders, and the usage of fragment shader interlock, prefer the former // for simplicity. - if (!(device_info.fragmentShaderSampleInterlock || - device_info.fragmentShaderPixelInterlock) || - !device_info.fragmentStoresAndAtomics || - !device_info.sampleRateShading || - !device_info.standardSampleLocations || + if (!(device_properties.fragmentShaderSampleInterlock || + device_properties.fragmentShaderPixelInterlock) || + !device_properties.fragmentStoresAndAtomics || + !device_properties.sampleRateShading || + !device_properties.standardSampleLocations || shared_memory_binding_count >= - device_info.maxPerStageDescriptorStorageBuffers) { + device_properties.maxPerStageDescriptorStorageBuffers) { path_ = Path::kHostRenderTargets; } } @@ -271,17 +272,18 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { if (cvars::native_2x_msaa) { // Multisampled integer sampled images are optional in Vulkan and in Xenia. msaa_2x_attachments_supported_ = - (device_info.framebufferColorSampleCounts & - device_info.framebufferDepthSampleCounts & - device_info.framebufferStencilSampleCounts & - device_info.sampledImageColorSampleCounts & - device_info.sampledImageDepthSampleCounts & - device_info.sampledImageStencilSampleCounts & VK_SAMPLE_COUNT_2_BIT) && - (device_info.sampledImageIntegerSampleCounts & + (device_properties.framebufferColorSampleCounts & + device_properties.framebufferDepthSampleCounts & + device_properties.framebufferStencilSampleCounts & + device_properties.sampledImageColorSampleCounts & + device_properties.sampledImageDepthSampleCounts & + device_properties.sampledImageStencilSampleCounts & + VK_SAMPLE_COUNT_2_BIT) && + (device_properties.sampledImageIntegerSampleCounts & (VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT)) != VK_SAMPLE_COUNT_4_BIT; msaa_2x_no_attachments_supported_ = - (device_info.framebufferNoAttachmentsSampleCounts & + (device_properties.framebufferNoAttachmentsSampleCounts & VK_SAMPLE_COUNT_2_BIT) != 0; } else { msaa_2x_attachments_supported_ = false; @@ -349,19 +351,19 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { descriptor_set_layout_size.descriptorCount = 1; descriptor_set_pool_sampled_image_ = std::make_unique( - provider, 256, 1, &descriptor_set_layout_size, + vulkan_device, 256, 1, &descriptor_set_layout_size, descriptor_set_layout_sampled_image_); descriptor_set_layout_size.descriptorCount = 2; descriptor_set_pool_sampled_image_x2_ = std::make_unique( - provider, 256, 1, &descriptor_set_layout_size, + vulkan_device, 256, 1, &descriptor_set_layout_size, descriptor_set_layout_sampled_image_x2_); // EDRAM contents reinterpretation buffer. // 90 MB with 9x resolution scaling - within the minimum // maxStorageBufferRange. if (!ui::vulkan::util::CreateDedicatedAllocationBuffer( - provider, + vulkan_device, VkDeviceSize(xenos::kEdramSizeBytes * (draw_resolution_scale_x() * draw_resolution_scale_y())), VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | @@ -498,7 +500,7 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { resolve_copy_shader_code.scaled && resolve_copy_shader_code.scaled_size_bytes); VkPipeline resolve_copy_pipeline = ui::vulkan::util::CreateComputePipeline( - provider, resolve_copy_pipeline_layout_, + vulkan_device, resolve_copy_pipeline_layout_, draw_resolution_scaled ? resolve_copy_shader_code.scaled : resolve_copy_shader_code.unscaled, draw_resolution_scaled ? resolve_copy_shader_code.scaled_size_bytes @@ -511,7 +513,7 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { Shutdown(); return false; } - provider.SetDeviceObjectName(VK_OBJECT_TYPE_PIPELINE, resolve_copy_pipeline, + vulkan_device->SetObjectName(VK_OBJECT_TYPE_PIPELINE, resolve_copy_pipeline, resolve_copy_shader_info.debug_name); resolve_copy_pipelines_[i] = resolve_copy_pipeline; } @@ -569,7 +571,7 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { host_depth_store_shaders[i]; VkPipeline host_depth_store_pipeline = ui::vulkan::util::CreateComputePipeline( - provider, host_depth_store_pipeline_layout_, + vulkan_device, host_depth_store_pipeline_layout_, host_depth_store_shader.first, host_depth_store_shader.second); if (host_depth_store_pipeline == VK_NULL_HANDLE) { XELOGE( @@ -585,7 +587,7 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { // Transfer and clear vertex buffer, for quads of up to tile granularity. transfer_vertex_buffer_pool_ = std::make_unique( - provider, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, + vulkan_device, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, std::max(ui::vulkan::VulkanUploadBufferPool::kDefaultPageSize, sizeof(float) * 2 * 6 * Transfer::kMaxCutoutBorderRectangles * @@ -593,7 +595,7 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { // Transfer vertex shader. transfer_passthrough_vertex_shader_ = ui::vulkan::util::CreateShaderModule( - provider, shaders::passthrough_position_xy_vs, + vulkan_device, shaders::passthrough_position_xy_vs, sizeof(shaders::passthrough_position_xy_vs)); if (transfer_passthrough_vertex_shader_ == VK_NULL_HANDLE) { XELOGE( @@ -757,7 +759,7 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { return false; } resolve_fsi_clear_32bpp_pipeline_ = ui::vulkan::util::CreateComputePipeline( - provider, resolve_fsi_clear_pipeline_layout_, + vulkan_device, resolve_fsi_clear_pipeline_layout_, draw_resolution_scaled ? shaders::resolve_clear_32bpp_scaled_cs : shaders::resolve_clear_32bpp_cs, draw_resolution_scaled ? sizeof(shaders::resolve_clear_32bpp_scaled_cs) @@ -770,7 +772,7 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { return false; } resolve_fsi_clear_64bpp_pipeline_ = ui::vulkan::util::CreateComputePipeline( - provider, resolve_fsi_clear_pipeline_layout_, + vulkan_device, resolve_fsi_clear_pipeline_layout_, draw_resolution_scaled ? shaders::resolve_clear_64bpp_scaled_cs : shaders::resolve_clear_64bpp_cs, draw_resolution_scaled ? sizeof(shaders::resolve_clear_64bpp_scaled_cs) @@ -838,10 +840,10 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { fsi_framebuffer_create_info.pAttachments = nullptr; fsi_framebuffer_create_info.width = std::min( xenos::kTexture2DCubeMaxWidthHeight * draw_resolution_scale_x(), - device_info.maxFramebufferWidth); + device_properties.maxFramebufferWidth); fsi_framebuffer_create_info.height = std::min( xenos::kTexture2DCubeMaxWidthHeight * draw_resolution_scale_y(), - device_info.maxFramebufferHeight); + device_properties.maxFramebufferHeight); fsi_framebuffer_create_info.layers = 1; if (dfn.vkCreateFramebuffer(device, &fsi_framebuffer_create_info, nullptr, &fsi_framebuffer_.framebuffer) != VK_SUCCESS) { @@ -873,10 +875,10 @@ bool VulkanRenderTargetCache::Initialize(uint32_t shared_memory_binding_count) { } void VulkanRenderTargetCache::Shutdown(bool from_destructor) { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // Destroy all render targets before the descriptor set pool is destroyed - // may happen if shutting down the VulkanRenderTargetCache by destroying it, @@ -985,10 +987,10 @@ void VulkanRenderTargetCache::Shutdown(bool from_destructor) { } void VulkanRenderTargetCache::ClearCache() { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // Framebuffer objects must be destroyed because they reference views of // attachment images, which may be removed by the common ClearCache. @@ -1044,10 +1046,10 @@ bool VulkanRenderTargetCache::Resolve(const Memory& memory, return true; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); DeferredCommandBuffer& command_buffer = command_processor_.deferred_command_buffer(); @@ -1554,10 +1556,10 @@ VkRenderPass VulkanRenderTargetCache::GetHostRenderTargetsRenderPass( : 0; render_pass_create_info.pDependencies = subpass_dependencies; - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VkRenderPass render_pass; if (dfn.vkCreateRenderPass(device, &render_pass_create_info, nullptr, &render_pass) != VK_SUCCESS) { @@ -1644,10 +1646,10 @@ VkFormat VulkanRenderTargetCache::GetColorOwnershipTransferVulkanFormat( } VulkanRenderTargetCache::VulkanRenderTarget::~VulkanRenderTarget() { - const ui::vulkan::VulkanProvider& provider = - render_target_cache_.command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + render_target_cache_.command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); ui::vulkan::SingleLayoutDescriptorSetPool& descriptor_set_pool = key().is_depth ? *render_target_cache_.descriptor_set_pool_sampled_image_x2_ @@ -1671,25 +1673,25 @@ VulkanRenderTargetCache::VulkanRenderTarget::~VulkanRenderTarget() { } uint32_t VulkanRenderTargetCache::GetMaxRenderTargetWidth() const { - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); - return std::min(device_info.maxFramebufferWidth, - device_info.maxImageDimension2D); + const ui::vulkan::VulkanDevice::Properties& device_properties = + command_processor_.GetVulkanDevice()->properties(); + return std::min(device_properties.maxFramebufferWidth, + device_properties.maxImageDimension2D); } uint32_t VulkanRenderTargetCache::GetMaxRenderTargetHeight() const { - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); - return std::min(device_info.maxFramebufferHeight, - device_info.maxImageDimension2D); + const ui::vulkan::VulkanDevice::Properties& device_properties = + command_processor_.GetVulkanDevice()->properties(); + return std::min(device_properties.maxFramebufferHeight, + device_properties.maxImageDimension2D); } RenderTargetCache::RenderTarget* VulkanRenderTargetCache::CreateRenderTarget( RenderTargetKey key) { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // Create the image. @@ -1745,7 +1747,7 @@ RenderTargetCache::RenderTarget* VulkanRenderTargetCache::CreateRenderTarget( VkImage image; VkDeviceMemory memory; if (!ui::vulkan::util::CreateDedicatedAllocationImage( - provider, image_create_info, + vulkan_device, image_create_info, ui::vulkan::util::MemoryPurpose::kDeviceLocal, image, memory)) { XELOGE( "VulkanRenderTarget: Failed to create a {}x{} {}xMSAA {} render target " @@ -2071,12 +2073,12 @@ VulkanRenderTargetCache::GetHostRenderTargetsFramebuffer( return &it->second; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + vulkan_device->properties(); VkRenderPass render_pass = GetHostRenderTargetsRenderPass(render_pass_key); if (render_pass == VK_NULL_HANDLE) { @@ -2125,9 +2127,9 @@ VulkanRenderTargetCache::GetHostRenderTargetsFramebuffer( // there's no limit imposed by the sizes of the attachments that have been // created successfully. host_extent.width = std::min(host_extent.width * draw_resolution_scale_x(), - device_info.maxFramebufferWidth); + device_properties.maxFramebufferWidth); host_extent.height = std::min(host_extent.height * draw_resolution_scale_y(), - device_info.maxFramebufferHeight); + device_properties.maxFramebufferHeight); framebuffer_create_info.width = host_extent.width; framebuffer_create_info.height = host_extent.height; framebuffer_create_info.layers = 1; @@ -2150,10 +2152,10 @@ VkShaderModule VulkanRenderTargetCache::GetTransferShader( return shader_it->second; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + vulkan_device->properties(); std::vector id_vector_temp; std::vector uint_vector_temp; @@ -2241,7 +2243,7 @@ VkShaderModule VulkanRenderTargetCache::GetTransferShader( // Outputs. bool shader_uses_stencil_reference_output = mode.output == TransferOutput::kDepth && - provider.device_info().ext_VK_EXT_shader_stencil_export; + vulkan_device->extensions().ext_EXT_shader_stencil_export; bool dest_color_is_uint = false; uint32_t dest_color_component_count = 0; spv::Id type_fragment_data_component = spv::NoResult; @@ -2477,7 +2479,7 @@ VkShaderModule VulkanRenderTargetCache::GetTransferShader( spv::Id input_sample_id = spv::NoResult; spv::Id spec_const_sample_id = spv::NoResult; if (key.dest_msaa_samples != xenos::MsaaSamples::k1X) { - if (device_info.sampleRateShading) { + if (device_properties.sampleRateShading) { // One draw for all samples. builder.addCapability(spv::CapabilitySampleRateShading); input_sample_id = builder.createVariable( @@ -2571,7 +2573,7 @@ VkShaderModule VulkanRenderTargetCache::GetTransferShader( // Load the destination sample index. spv::Id dest_sample_id = spv::NoResult; if (key.dest_msaa_samples != xenos::MsaaSamples::k1X) { - if (device_info.sampleRateShading) { + if (device_properties.sampleRateShading) { assert_true(input_sample_id != spv::NoResult); dest_sample_id = builder.createUnaryOp( spv::OpBitcast, type_uint, @@ -4194,7 +4196,7 @@ VkShaderModule VulkanRenderTargetCache::GetTransferShader( // Create the shader module, and store the handle even if creation fails not // to try to create it again later. VkShaderModule shader_module = ui::vulkan::util::CreateShaderModule( - provider, reinterpret_cast(shader_code.data()), + vulkan_device, reinterpret_cast(shader_code.data()), sizeof(uint32_t) * shader_code.size()); if (shader_module == VK_NULL_HANDLE) { XELOGE( @@ -4225,17 +4227,17 @@ VkPipeline const* VulkanRenderTargetCache::GetTransferPipelines( const TransferModeInfo& mode = kTransferModes[size_t(key.shader_key.mode)]; - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + vulkan_device->properties(); uint32_t dest_sample_count = uint32_t(1) << uint32_t(key.shader_key.dest_msaa_samples); bool dest_is_masked_sample = - dest_sample_count > 1 && !device_info.sampleRateShading; + dest_sample_count > 1 && !device_properties.sampleRateShading; VkPipelineShaderStageCreateInfo shader_stages[2]; shader_stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; @@ -4327,7 +4329,7 @@ VkPipeline const* VulkanRenderTargetCache::GetTransferPipelines( ? VK_SAMPLE_COUNT_4_BIT : VkSampleCountFlagBits(dest_sample_count); if (dest_sample_count > 1) { - if (device_info.sampleRateShading) { + if (device_properties.sampleRateShading) { multisample_state.sampleShadingEnable = VK_TRUE; multisample_state.minSampleShading = 1.0f; if (dest_sample_count == 2 && !msaa_2x_attachments_supported_) { @@ -4358,7 +4360,7 @@ VkPipeline const* VulkanRenderTargetCache::GetTransferPipelines( : VK_COMPARE_OP_ALWAYS; } if ((mode.output == TransferOutput::kDepth && - provider.device_info().ext_VK_EXT_shader_stencil_export) || + vulkan_device->extensions().ext_EXT_shader_stencil_export) || mode.output == TransferOutput::kStencilBit) { depth_stencil_state.stencilTestEnable = VK_TRUE; depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP; @@ -4386,21 +4388,10 @@ VkPipeline const* VulkanRenderTargetCache::GetTransferPipelines( 32 - xe::lzcnt(key.render_pass_key.depth_and_color_used >> 1); color_blend_state.pAttachments = color_blend_attachments; if (mode.output == TransferOutput::kColor) { - if (device_info.independentBlend) { - // State the intention more explicitly. - color_blend_attachments[key.shader_key.dest_color_rt_index] - .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | - VK_COLOR_COMPONENT_G_BIT | - VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; - } else { - // The blend state for all attachments must be identical, but other render - // targets are not written to by the shader. - for (uint32_t i = 0; i < color_blend_state.attachmentCount; ++i) { - color_blend_attachments[i].colorWriteMask = - VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | - VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; - } - } + assert_true(device_properties.independentBlend); + color_blend_attachments[key.shader_key.dest_color_rt_index].colorWriteMask = + VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | + VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; } std::array dynamic_states; @@ -4493,8 +4484,8 @@ void VulkanRenderTargetCache::PerformTransfersAndResolveClears( const Transfer::Rectangle* resolve_clear_rectangle) { assert_true(GetPath() == Path::kHostRenderTargets); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); uint64_t current_submission = command_processor_.GetCurrentSubmission(); DeferredCommandBuffer& command_buffer = command_processor_.deferred_command_buffer(); @@ -4809,7 +4800,8 @@ void VulkanRenderTargetCache::PerformTransfersAndResolveClears( // Gather shader keys and sort to reduce pipeline state and binding // switches. Also gather stencil rectangles to clear if needed. bool need_stencil_bit_draws = - dest_rt_key.is_depth && !device_info.ext_VK_EXT_shader_stencil_export; + dest_rt_key.is_depth && + !vulkan_device->extensions().ext_EXT_shader_stencil_export; current_transfer_invocations_.clear(); current_transfer_invocations_.reserve( current_transfers.size() << uint32_t(need_stencil_bit_draws)); @@ -5001,10 +4993,10 @@ void VulkanRenderTargetCache::PerformTransfersAndResolveClears( transfer_viewport.y = 0.0f; transfer_viewport.width = float(std::min(xe::next_pow2(transfer_framebuffer->host_extent.width), - device_info.maxViewportDimensions[0])); + vulkan_device->properties().maxViewportDimensions[0])); transfer_viewport.height = float( std::min(xe::next_pow2(transfer_framebuffer->host_extent.height), - device_info.maxViewportDimensions[1])); + vulkan_device->properties().maxViewportDimensions[1])); transfer_viewport.minDepth = 0.0f; transfer_viewport.maxDepth = 1.0f; command_processor_.SetViewport(transfer_viewport); @@ -5055,7 +5047,7 @@ void VulkanRenderTargetCache::PerformTransfersAndResolveClears( kTransferPipelineLayoutInfos[size_t( transfer_pipeline_layout_index)]; uint32_t transfer_sample_pipeline_count = - device_info.sampleRateShading + vulkan_device->properties().sampleRateShading ? 1 : uint32_t(1) << uint32_t(dest_rt_key.msaa_samples); bool transfer_is_stencil_bit = @@ -5922,7 +5914,7 @@ VkPipeline VulkanRenderTargetCache::GetDumpPipeline(DumpPipelineKey key) { // Create the pipeline, and store the handle even if creation fails not to try // to create it again later. VkPipeline pipeline = ui::vulkan::util::CreateComputePipeline( - command_processor_.GetVulkanProvider(), + command_processor_.GetVulkanDevice(), key.is_depth ? dump_pipeline_layout_depth_ : dump_pipeline_layout_color_, reinterpret_cast(shader_code.data()), sizeof(uint32_t) * shader_code.size()); diff --git a/src/xenia/gpu/vulkan/vulkan_render_target_cache.h b/src/xenia/gpu/vulkan/vulkan_render_target_cache.h index b151d622b..aa7ed5c7d 100644 --- a/src/xenia/gpu/vulkan/vulkan_render_target_cache.h +++ b/src/xenia/gpu/vulkan/vulkan_render_target_cache.h @@ -24,7 +24,6 @@ #include "xenia/gpu/vulkan/vulkan_texture_cache.h" #include "xenia/gpu/xenos.h" #include "xenia/ui/vulkan/single_layout_descriptor_set_pool.h" -#include "xenia/ui/vulkan/vulkan_provider.h" #include "xenia/ui/vulkan/vulkan_upload_buffer_pool.h" namespace xe { diff --git a/src/xenia/gpu/vulkan/vulkan_shader.cc b/src/xenia/gpu/vulkan/vulkan_shader.cc index 1ff7734ff..b79dd2812 100644 --- a/src/xenia/gpu/vulkan/vulkan_shader.cc +++ b/src/xenia/gpu/vulkan/vulkan_shader.cc @@ -11,6 +11,7 @@ #include +#include "xenia/base/assert.h" #include "xenia/base/logging.h" #include "xenia/ui/vulkan/vulkan_provider.h" @@ -20,10 +21,10 @@ namespace vulkan { VulkanShader::VulkanTranslation::~VulkanTranslation() { if (shader_module_) { - const ui::vulkan::VulkanProvider& provider = - static_cast(shader()).provider_; - provider.dfn().vkDestroyShaderModule(provider.device(), shader_module_, - nullptr); + const ui::vulkan::VulkanDevice* const vulkan_device = + static_cast(shader()).vulkan_device_; + vulkan_device->functions().vkDestroyShaderModule(vulkan_device->device(), + shader_module_, nullptr); } } @@ -34,8 +35,8 @@ VkShaderModule VulkanShader::VulkanTranslation::GetOrCreateShaderModule() { if (shader_module_ != VK_NULL_HANDLE) { return shader_module_; } - const ui::vulkan::VulkanProvider& provider = - static_cast(shader()).provider_; + const ui::vulkan::VulkanDevice* const vulkan_device = + static_cast(shader()).vulkan_device_; VkShaderModuleCreateInfo shader_module_create_info; shader_module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; shader_module_create_info.pNext = nullptr; @@ -43,9 +44,9 @@ VkShaderModule VulkanShader::VulkanTranslation::GetOrCreateShaderModule() { shader_module_create_info.codeSize = translated_binary().size(); shader_module_create_info.pCode = reinterpret_cast(translated_binary().data()); - if (provider.dfn().vkCreateShaderModule(provider.device(), - &shader_module_create_info, nullptr, - &shader_module_) != VK_SUCCESS) { + if (vulkan_device->functions().vkCreateShaderModule( + vulkan_device->device(), &shader_module_create_info, nullptr, + &shader_module_) != VK_SUCCESS) { XELOGE( "VulkanShader::VulkanTranslation: Failed to create a Vulkan shader " "module for shader {:016X} modification {:016X}", @@ -56,15 +57,17 @@ VkShaderModule VulkanShader::VulkanTranslation::GetOrCreateShaderModule() { return shader_module_; } -VulkanShader::VulkanShader(const ui::vulkan::VulkanProvider& provider, - xenos::ShaderType shader_type, - uint64_t ucode_data_hash, - const uint32_t* ucode_dwords, - size_t ucode_dword_count, - std::endian ucode_source_endian) +VulkanShader::VulkanShader(const ui::vulkan::VulkanDevice* const vulkan_device, + const xenos::ShaderType shader_type, + const uint64_t ucode_data_hash, + const uint32_t* const ucode_dwords, + const size_t ucode_dword_count, + const std::endian ucode_source_endian) : SpirvShader(shader_type, ucode_data_hash, ucode_dwords, ucode_dword_count, ucode_source_endian), - provider_(provider) {} + vulkan_device_(vulkan_device) { + assert_not_null(vulkan_device); +} Shader::Translation* VulkanShader::CreateTranslationInstance( uint64_t modification) { diff --git a/src/xenia/gpu/vulkan/vulkan_shader.h b/src/xenia/gpu/vulkan/vulkan_shader.h index 7e78ac3b6..a70cc7333 100644 --- a/src/xenia/gpu/vulkan/vulkan_shader.h +++ b/src/xenia/gpu/vulkan/vulkan_shader.h @@ -14,7 +14,7 @@ #include "xenia/gpu/spirv_shader.h" #include "xenia/gpu/xenos.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace gpu { @@ -35,7 +35,7 @@ class VulkanShader : public SpirvShader { VkShaderModule shader_module_ = VK_NULL_HANDLE; }; - explicit VulkanShader(const ui::vulkan::VulkanProvider& provider, + explicit VulkanShader(const ui::vulkan::VulkanDevice* vulkan_device, xenos::ShaderType shader_type, uint64_t ucode_data_hash, const uint32_t* ucode_dwords, size_t ucode_dword_count, std::endian ucode_source_endian = std::endian::big); @@ -67,7 +67,7 @@ class VulkanShader : public SpirvShader { Translation* CreateTranslationInstance(uint64_t modification) override; private: - const ui::vulkan::VulkanProvider& provider_; + const ui::vulkan::VulkanDevice* vulkan_device_; std::atomic_flag binding_layout_user_uids_set_up_ = ATOMIC_FLAG_INIT; size_t texture_binding_layout_user_uid_ = 0; diff --git a/src/xenia/gpu/vulkan/vulkan_shared_memory.cc b/src/xenia/gpu/vulkan/vulkan_shared_memory.cc index 4501adb5c..3a195ff64 100644 --- a/src/xenia/gpu/vulkan/vulkan_shared_memory.cc +++ b/src/xenia/gpu/vulkan/vulkan_shared_memory.cc @@ -47,12 +47,10 @@ VulkanSharedMemory::~VulkanSharedMemory() { Shutdown(true); } bool VulkanSharedMemory::Initialize() { InitializeCommon(); - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); const VkBufferCreateFlags sparse_flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT | @@ -70,14 +68,15 @@ bool VulkanSharedMemory::Initialize() { buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buffer_create_info.queueFamilyIndexCount = 0; buffer_create_info.pQueueFamilyIndices = nullptr; - if (cvars::vulkan_sparse_shared_memory && device_info.sparseResidencyBuffer) { + if (cvars::vulkan_sparse_shared_memory && + vulkan_device->properties().sparseResidencyBuffer) { if (dfn.vkCreateBuffer(device, &buffer_create_info, nullptr, &buffer_) == VK_SUCCESS) { VkMemoryRequirements buffer_memory_requirements; dfn.vkGetBufferMemoryRequirements(device, buffer_, &buffer_memory_requirements); if (xe::bit_scan_forward(buffer_memory_requirements.memoryTypeBits & - device_info.memory_types_device_local, + vulkan_device->memory_types().device_local, &buffer_memory_type_)) { uint32_t allocation_size_log2; xe::bit_scan_forward( @@ -130,7 +129,7 @@ bool VulkanSharedMemory::Initialize() { dfn.vkGetBufferMemoryRequirements(device, buffer_, &buffer_memory_requirements); if (!xe::bit_scan_forward(buffer_memory_requirements.memoryTypeBits & - device_info.memory_types_device_local, + vulkan_device->memory_types().device_local, &buffer_memory_type_)) { XELOGE( "Shared memory: Failed to get a device-local Vulkan memory type for " @@ -147,7 +146,7 @@ bool VulkanSharedMemory::Initialize() { buffer_memory_requirements.size; buffer_memory_allocate_info.memoryTypeIndex = buffer_memory_type_; VkMemoryDedicatedAllocateInfo buffer_memory_dedicated_allocate_info; - if (provider.device_info().ext_1_1_VK_KHR_dedicated_allocation) { + if (vulkan_device->extensions().ext_1_1_KHR_dedicated_allocation) { buffer_memory_allocate_info_last->pNext = &buffer_memory_dedicated_allocate_info; buffer_memory_allocate_info_last = @@ -183,7 +182,7 @@ bool VulkanSharedMemory::Initialize() { last_written_range_ = std::make_pair(0, 0); upload_buffer_pool_ = std::make_unique( - provider, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + vulkan_device, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, xe::align(ui::vulkan::VulkanUploadBufferPool::kDefaultPageSize, size_t(1) << page_size_log2())); @@ -195,10 +194,10 @@ void VulkanSharedMemory::Shutdown(bool from_destructor) { upload_buffer_pool_.reset(); - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); ui::vulkan::util::DestroyAndNullHandle(dfn.vkDestroyBuffer, device, buffer_); for (VkDeviceMemory memory : buffer_memory_) { @@ -260,10 +259,9 @@ bool VulkanSharedMemory::InitializeTraceSubmitDownloads() { return false; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); if (!ui::vulkan::util::CreateDedicatedAllocationBuffer( - provider, download_page_count << page_size_log2(), + command_processor_.GetVulkanDevice(), + download_page_count << page_size_log2(), VK_BUFFER_USAGE_TRANSFER_DST_BIT, ui::vulkan::util::MemoryPurpose::kReadback, trace_download_buffer_, trace_download_buffer_memory_)) { @@ -306,10 +304,10 @@ void VulkanSharedMemory::InitializeTraceCompleteDownloads() { if (!trace_download_buffer_memory_) { return; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); void* download_mapping; if (dfn.vkMapMemory(device, trace_download_buffer_memory_, 0, VK_WHOLE_SIZE, 0, &download_mapping) == VK_SUCCESS) { @@ -335,10 +333,10 @@ bool VulkanSharedMemory::AllocateSparseHostGpuMemoryRange( return true; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VkMemoryAllocateInfo memory_allocate_info; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; @@ -365,7 +363,7 @@ bool VulkanSharedMemory::AllocateSparseHostGpuMemoryRange( VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT; - if (provider.device_info().tessellationShader) { + if (vulkan_device->properties().tessellationShader) { bind_wait_stage_mask |= VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT; } @@ -482,10 +480,10 @@ void VulkanSharedMemory::GetUsageMasks(Usage usage, } void VulkanSharedMemory::ResetTraceDownload() { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); ui::vulkan::util::DestroyAndNullHandle(dfn.vkDestroyBuffer, device, trace_download_buffer_); ui::vulkan::util::DestroyAndNullHandle(dfn.vkFreeMemory, device, diff --git a/src/xenia/gpu/vulkan/vulkan_shared_memory.h b/src/xenia/gpu/vulkan/vulkan_shared_memory.h index 14214a5d0..b77fe24fe 100644 --- a/src/xenia/gpu/vulkan/vulkan_shared_memory.h +++ b/src/xenia/gpu/vulkan/vulkan_shared_memory.h @@ -18,7 +18,6 @@ #include "xenia/gpu/shared_memory.h" #include "xenia/gpu/trace_writer.h" #include "xenia/memory.h" -#include "xenia/ui/vulkan/vulkan_provider.h" #include "xenia/ui/vulkan/vulkan_upload_buffer_pool.h" namespace xe { diff --git a/src/xenia/gpu/vulkan/vulkan_texture_cache.cc b/src/xenia/gpu/vulkan/vulkan_texture_cache.cc index 1f3ccaf24..137ae5fb3 100644 --- a/src/xenia/gpu/vulkan/vulkan_texture_cache.cc +++ b/src/xenia/gpu/vulkan/vulkan_texture_cache.cc @@ -22,6 +22,7 @@ #include "xenia/gpu/texture_util.h" #include "xenia/gpu/vulkan/deferred_command_buffer.h" #include "xenia/gpu/vulkan/vulkan_command_processor.h" +#include "xenia/ui/vulkan/ui_samplers.h" #include "xenia/ui/vulkan/vulkan_mem_alloc.h" #include "xenia/ui/vulkan/vulkan_util.h" @@ -423,10 +424,10 @@ const VulkanTextureCache::HostFormatPair true}; VulkanTextureCache::~VulkanTextureCache() { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); for (const std::pair& sampler_pair : samplers_) { @@ -523,9 +524,9 @@ void VulkanTextureCache::BeginSubmission(uint64_t new_submission_index) { } void VulkanTextureCache::RequestTextures(uint32_t used_texture_mask) { -#if XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#if XE_GPU_FINE_GRAINED_DRAW_SCOPES SCOPE_profile_cpu_f("gpu"); -#endif // XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES +#endif // XE_GPU_FINE_GRAINED_DRAW_SCOPES TextureCache::RequestTextures(used_texture_mask); @@ -718,10 +719,10 @@ VkSampler VulkanTextureCache::UseSampler(SamplerParameters parameters, return sampler.second.sampler; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // See if an existing sampler can be destroyed to create space for the new // one. @@ -761,7 +762,7 @@ VkSampler VulkanTextureCache::UseSampler(SamplerParameters parameters, // GetSamplerParameters. VkSamplerCreateInfo sampler_create_info = {}; sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; - if (provider.device_info().nonSeamlessCubeMap && + if (vulkan_device->properties().nonSeamlessCubeMap && cvars::non_seamless_cube_map) { sampler_create_info.flags |= VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT; @@ -940,17 +941,17 @@ uint32_t VulkanTextureCache::GetHostFormatSwizzle(TextureKey key) const { uint32_t VulkanTextureCache::GetMaxHostTextureWidthHeight( xenos::DataDimension dimension) const { - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + command_processor_.GetVulkanDevice()->properties(); switch (dimension) { case xenos::DataDimension::k1D: case xenos::DataDimension::k2DOrStacked: // 1D and 2D are emulated as 2D arrays. - return device_info.maxImageDimension2D; + return device_properties.maxImageDimension2D; case xenos::DataDimension::k3D: - return device_info.maxImageDimension3D; + return device_properties.maxImageDimension3D; case xenos::DataDimension::kCube: - return device_info.maxImageDimensionCube; + return device_properties.maxImageDimensionCube; default: assert_unhandled_case(dimension); return 0; @@ -959,15 +960,15 @@ uint32_t VulkanTextureCache::GetMaxHostTextureWidthHeight( uint32_t VulkanTextureCache::GetMaxHostTextureDepthOrArraySize( xenos::DataDimension dimension) const { - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - command_processor_.GetVulkanProvider().device_info(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + command_processor_.GetVulkanDevice()->properties(); switch (dimension) { case xenos::DataDimension::k1D: case xenos::DataDimension::k2DOrStacked: // 1D and 2D are emulated as 2D arrays. - return device_info.maxImageArrayLayers; + return device_properties.maxImageArrayLayers; case xenos::DataDimension::k3D: - return device_info.maxImageDimension3D; + return device_properties.maxImageDimension3D; case xenos::DataDimension::kCube: // Not requesting the imageCubeArray feature, and the Xenos doesn't // support cube map arrays. @@ -1009,10 +1010,10 @@ std::unique_ptr VulkanTextureCache::CreateTexture( return nullptr; } - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); bool is_3d = key.dimension == xenos::DataDimension::k3D; uint32_t depth_or_array_size = key.GetDepthOrArraySize(); @@ -1049,7 +1050,7 @@ std::unique_ptr VulkanTextureCache::CreateTexture( image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageFormatListCreateInfo image_format_list_create_info; if (formats[1] != VK_FORMAT_UNDEFINED && - provider.device_info().ext_1_2_VK_KHR_image_format_list) { + vulkan_device->extensions().ext_1_2_KHR_image_format_list) { image_create_info_last->pNext = &image_format_list_create_info; image_create_info_last = reinterpret_cast(&image_format_list_create_info); @@ -1224,10 +1225,10 @@ bool VulkanTextureCache::LoadTextureDataFromResidentMemoryImpl(Texture& texture, // Begin loading. // TODO(Triang3l): Going from one descriptor to another on per-array-layer // or even per-8-depth-slices level to stay within maxStorageBufferRange. - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VulkanSharedMemory& vulkan_shared_memory = static_cast(shared_memory()); std::array write_descriptor_sets; @@ -1590,10 +1591,10 @@ VulkanTextureCache::VulkanTexture::VulkanTexture( VulkanTextureCache::VulkanTexture::~VulkanTexture() { const VulkanTextureCache& vulkan_texture_cache = static_cast(texture_cache()); - const ui::vulkan::VulkanProvider& provider = - vulkan_texture_cache.command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice* const vulkan_device = + vulkan_texture_cache.command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); for (const auto& view_pair : views_) { dfn.vkDestroyImageView(device, view_pair.second, nullptr); } @@ -1631,9 +1632,10 @@ VkImageView VulkanTextureCache::VulkanTexture::GetView(bool is_signed, is_signed && (host_format_pair.format_signed.format != host_format_pair.format_unsigned.format); - const ui::vulkan::VulkanProvider& provider = - vulkan_texture_cache.command_processor_.GetVulkanProvider(); - if (!provider.device_info().imageViewFormatSwizzle) { + const ui::vulkan::VulkanDevice* const vulkan_device = + vulkan_texture_cache.command_processor_.GetVulkanDevice(); + + if (!vulkan_device->properties().imageViewFormatSwizzle) { host_swizzle = xenos::XE_GPU_TEXTURE_SWIZZLE_RGBA; } view_key.host_swizzle = host_swizzle; @@ -1647,8 +1649,8 @@ VkImageView VulkanTextureCache::VulkanTexture::GetView(bool is_signed, } // Create a new view. - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VkImageViewCreateInfo view_create_info; view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view_create_info.pNext = nullptr; @@ -1704,18 +1706,19 @@ VulkanTextureCache::VulkanTextureCache( } bool VulkanTextureCache::Initialize() { - const ui::vulkan::VulkanProvider& provider = - command_processor_.GetVulkanProvider(); - const ui::vulkan::VulkanProvider::InstanceFunctions& ifn = provider.ifn(); - VkPhysicalDevice physical_device = provider.physical_device(); - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - const ui::vulkan::VulkanProvider::DeviceInfo& device_info = - provider.device_info(); + const ui::vulkan::VulkanDevice* const vulkan_device = + command_processor_.GetVulkanDevice(); + const ui::vulkan::VulkanInstance::Functions& ifn = + vulkan_device->vulkan_instance()->functions(); + const VkPhysicalDevice physical_device = vulkan_device->physical_device(); + const ui::vulkan::VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); + const ui::vulkan::VulkanDevice::Properties& device_properties = + vulkan_device->properties(); // Vulkan Memory Allocator. - vma_allocator_ = ui::vulkan::CreateVmaAllocator(provider, true); + vma_allocator_ = ui::vulkan::CreateVmaAllocator(vulkan_device, true); if (vma_allocator_ == VK_NULL_HANDLE) { return false; } @@ -2323,7 +2326,7 @@ bool VulkanTextureCache::Initialize() { load_shader_code[i]; assert_not_null(current_load_shader_code.first); load_pipelines_[i] = ui::vulkan::util::CreateComputePipeline( - provider, load_pipeline_layout_, current_load_shader_code.first, + vulkan_device, load_pipeline_layout_, current_load_shader_code.first, current_load_shader_code.second); if (load_pipelines_[i] == VK_NULL_HANDLE) { XELOGE( @@ -2337,7 +2340,7 @@ bool VulkanTextureCache::Initialize() { current_load_shader_code_scaled = load_shader_code_scaled[i]; if (current_load_shader_code_scaled.first) { load_pipelines_scaled_[i] = ui::vulkan::util::CreateComputePipeline( - provider, load_pipeline_layout_, + vulkan_device, load_pipeline_layout_, current_load_shader_code_scaled.first, current_load_shader_code_scaled.second); if (load_pipelines_scaled_[i] == VK_NULL_HANDLE) { @@ -2402,7 +2405,7 @@ bool VulkanTextureCache::Initialize() { dfn.vkGetImageMemoryRequirements(device, null_image_3d_, &null_image_memory_requirements_3d_); uint32_t null_image_memory_type_common = ui::vulkan::util::ChooseMemoryType( - provider, + vulkan_device->memory_types(), null_image_memory_requirements_2d_array_cube_.memoryTypeBits & null_image_memory_requirements_3d_.memoryTypeBits, ui::vulkan::util::MemoryPurpose::kDeviceLocal); @@ -2443,16 +2446,18 @@ bool VulkanTextureCache::Initialize() { } } else { // Place each null image in separate allocations. - uint32_t null_image_memory_type_2d_array_cube_ = + const uint32_t null_image_memory_type_2d_array_cube = ui::vulkan::util::ChooseMemoryType( - provider, + vulkan_device->memory_types(), null_image_memory_requirements_2d_array_cube_.memoryTypeBits, ui::vulkan::util::MemoryPurpose::kDeviceLocal); - uint32_t null_image_memory_type_3d_ = ui::vulkan::util::ChooseMemoryType( - provider, null_image_memory_requirements_3d_.memoryTypeBits, - ui::vulkan::util::MemoryPurpose::kDeviceLocal); - if (null_image_memory_type_2d_array_cube_ == UINT32_MAX || - null_image_memory_type_3d_ == UINT32_MAX) { + const uint32_t null_image_memory_type_3d = + ui::vulkan::util::ChooseMemoryType( + vulkan_device->memory_types(), + null_image_memory_requirements_3d_.memoryTypeBits, + ui::vulkan::util::MemoryPurpose::kDeviceLocal); + if (null_image_memory_type_2d_array_cube == UINT32_MAX || + null_image_memory_type_3d == UINT32_MAX) { XELOGE( "VulkanTextureCache: Failed to get the memory types for the null " "images"); @@ -2468,9 +2473,9 @@ bool VulkanTextureCache::Initialize() { null_image_memory_allocate_info.allocationSize = null_image_memory_requirements_2d_array_cube_.size; null_image_memory_allocate_info.memoryTypeIndex = - null_image_memory_type_2d_array_cube_; + null_image_memory_type_2d_array_cube; VkMemoryDedicatedAllocateInfo null_image_memory_dedicated_allocate_info; - if (device_info.ext_1_1_VK_KHR_dedicated_allocation) { + if (vulkan_device->extensions().ext_1_1_KHR_dedicated_allocation) { null_image_memory_allocate_info_last->pNext = &null_image_memory_dedicated_allocate_info; null_image_memory_allocate_info_last = @@ -2500,8 +2505,7 @@ bool VulkanTextureCache::Initialize() { null_image_memory_allocate_info.allocationSize = null_image_memory_requirements_3d_.size; - null_image_memory_allocate_info.memoryTypeIndex = - null_image_memory_type_3d_; + null_image_memory_allocate_info.memoryTypeIndex = null_image_memory_type_3d; null_image_memory_dedicated_allocate_info.image = null_image_3d_; if (dfn.vkAllocateMemory(device, &null_image_memory_allocate_info, nullptr, &null_images_memory_[1]) != VK_SUCCESS) { @@ -2531,8 +2535,8 @@ bool VulkanTextureCache::Initialize() { // constant components instead of the real texels. The image will be cleared // to (0, 0, 0, 0) anyway. VkComponentSwizzle null_image_view_swizzle = - device_info.imageViewFormatSwizzle ? VK_COMPONENT_SWIZZLE_ZERO - : VK_COMPONENT_SWIZZLE_IDENTITY; + device_properties.imageViewFormatSwizzle ? VK_COMPONENT_SWIZZLE_ZERO + : VK_COMPONENT_SWIZZLE_IDENTITY; null_image_view_create_info.components.r = null_image_view_swizzle; null_image_view_create_info.components.g = null_image_view_swizzle; null_image_view_create_info.components.b = null_image_view_swizzle; @@ -2571,15 +2575,15 @@ bool VulkanTextureCache::Initialize() { // VkDevice (true in a regular emulation scenario), so taking over all the // allocation slots exclusively. // Also leaving a few slots for use by things like overlay applications. - sampler_max_count_ = - device_info.maxSamplerAllocationCount - - uint32_t(ui::vulkan::VulkanProvider::HostSampler::kCount) - 16; + sampler_max_count_ = device_properties.maxSamplerAllocationCount - + ui::vulkan::UISamplers::kSamplerCount - 16; - if (device_info.samplerAnisotropy) { + if (device_properties.samplerAnisotropy) { max_anisotropy_ = xenos::AnisoFilter( uint32_t(xenos::AnisoFilter::kMax_1_1) + - (31 - xe::lzcnt(uint32_t(std::min( - 16.0f, std::max(1.0f, device_info.maxSamplerAnisotropy)))))); + (31 - + xe::lzcnt(uint32_t(std::min( + 16.0f, std::max(1.0f, device_properties.maxSamplerAnisotropy)))))); } else { max_anisotropy_ = xenos::AnisoFilter::kDisabled; } @@ -2643,8 +2647,8 @@ xenos::ClampMode VulkanTextureCache::NormalizeClampMode( clamp_mode == xenos::ClampMode::kMirrorClampToHalfway || clamp_mode == xenos::ClampMode::kMirrorClampToBorder) { // No equivalents for anything other than kMirrorClampToEdge in Vulkan. - return command_processor_.GetVulkanProvider() - .device_info() + return command_processor_.GetVulkanDevice() + ->properties() .samplerMirrorClampToEdge ? xenos::ClampMode::kMirrorClampToEdge : xenos::ClampMode::kMirroredRepeat; diff --git a/src/xenia/gpu/vulkan/vulkan_texture_cache.h b/src/xenia/gpu/vulkan/vulkan_texture_cache.h index 448e74d03..4b22c2695 100644 --- a/src/xenia/gpu/vulkan/vulkan_texture_cache.h +++ b/src/xenia/gpu/vulkan/vulkan_texture_cache.h @@ -20,7 +20,6 @@ #include "xenia/gpu/vulkan/vulkan_shader.h" #include "xenia/gpu/vulkan/vulkan_shared_memory.h" #include "xenia/ui/vulkan/vulkan_mem_alloc.h" -#include "xenia/ui/vulkan/vulkan_provider.h" namespace xe { namespace gpu { diff --git a/src/xenia/gpu/vulkan/vulkan_trace_dump_main.cc b/src/xenia/gpu/vulkan/vulkan_trace_dump_main.cc index 4483129b8..dc3d1025a 100644 --- a/src/xenia/gpu/vulkan/vulkan_trace_dump_main.cc +++ b/src/xenia/gpu/vulkan/vulkan_trace_dump_main.cc @@ -27,24 +27,24 @@ class VulkanTraceDump : public TraceDump { } void BeginHostCapture() override { - const RENDERDOC_API_1_0_0* renderdoc_api = + const ui::RenderDocAPI* const renderdoc_api = static_cast( graphics_system_->provider()) - ->renderdoc_api() - .api_1_0_0(); - if (renderdoc_api && !renderdoc_api->IsFrameCapturing()) { - renderdoc_api->StartFrameCapture(nullptr, nullptr); + ->vulkan_instance() + ->renderdoc_api(); + if (renderdoc_api && !renderdoc_api->api_1_0_0()->IsFrameCapturing()) { + renderdoc_api->api_1_0_0()->StartFrameCapture(nullptr, nullptr); } } void EndHostCapture() override { - const RENDERDOC_API_1_0_0* renderdoc_api = + const ui::RenderDocAPI* const renderdoc_api = static_cast( graphics_system_->provider()) - ->renderdoc_api() - .api_1_0_0(); - if (renderdoc_api && renderdoc_api->IsFrameCapturing()) { - renderdoc_api->EndFrameCapture(nullptr, nullptr); + ->vulkan_instance() + ->renderdoc_api(); + if (renderdoc_api && renderdoc_api->api_1_0_0()->IsFrameCapturing()) { + renderdoc_api->api_1_0_0()->EndFrameCapture(nullptr, nullptr); } } }; diff --git a/src/xenia/hid/hid_demo.cc b/src/xenia/hid/hid_demo.cc index 46111fc84..1487081e4 100644 --- a/src/xenia/hid/hid_demo.cc +++ b/src/xenia/hid/hid_demo.cc @@ -171,7 +171,7 @@ std::vector> HidDemoApp::CreateInputDrivers( bool HidDemoApp::OnInitialize() { // Create the graphics provider that provides the presenter for the window. - graphics_provider_ = xe::ui::vulkan::VulkanProvider::Create(true); + graphics_provider_ = xe::ui::vulkan::VulkanProvider::Create(false, true); if (!graphics_provider_) { XELOGE("Failed to initialize the graphics provider"); return false; diff --git a/src/xenia/ui/d3d12/d3d12_provider.h b/src/xenia/ui/d3d12/d3d12_provider.h index 36164eaba..079886344 100644 --- a/src/xenia/ui/d3d12/d3d12_provider.h +++ b/src/xenia/ui/d3d12/d3d12_provider.h @@ -15,8 +15,6 @@ #include "xenia/ui/d3d12/d3d12_api.h" #include "xenia/ui/graphics_provider.h" -#define XE_UI_D3D12_FINE_GRAINED_DRAW_SCOPES 1 - namespace xe { namespace ui { namespace d3d12 { diff --git a/src/xenia/ui/graphics_provider.h b/src/xenia/ui/graphics_provider.h index 08c78cd1f..10480a722 100644 --- a/src/xenia/ui/graphics_provider.h +++ b/src/xenia/ui/graphics_provider.h @@ -36,6 +36,11 @@ class GraphicsProvider { kQualcomm = 0x5143, }; + GraphicsProvider(const GraphicsProvider&) = delete; + GraphicsProvider& operator=(const GraphicsProvider&) = delete; + GraphicsProvider(GraphicsProvider&&) = delete; + GraphicsProvider& operator=(GraphicsProvider&&) = delete; + virtual ~GraphicsProvider() = default; // It's safe to reinitialize the presenter in the host GPU loss callback if it diff --git a/src/xenia/ui/renderdoc_api.cc b/src/xenia/ui/renderdoc_api.cc index 7c5f7ef99..af1a56f46 100644 --- a/src/xenia/ui/renderdoc_api.cc +++ b/src/xenia/ui/renderdoc_api.cc @@ -9,7 +9,6 @@ #include "xenia/ui/renderdoc_api.h" -#include "xenia/base/assert.h" #include "xenia/base/logging.h" #include "xenia/base/platform.h" @@ -22,54 +21,57 @@ namespace xe { namespace ui { -bool RenderdocApi::Initialize() { - Shutdown(); +std::unique_ptr RenderDocAPI::CreateIfConnected() { + std::unique_ptr renderdoc_api(new RenderDocAPI()); + pRENDERDOC_GetAPI get_api = nullptr; - // The RenderDoc library should be already loaded into the process if + + // The RenderDoc library should already be loaded into the process if // RenderDoc is attached - this is why RTLD_NOLOAD or GetModuleHandle instead // of LoadLibrary. #if XE_PLATFORM_LINUX #if XE_PLATFORM_ANDROID - const char* librenderdoc_name = "libVkLayer_GLES_RenderDoc.so"; + const char* const library_name = "libVkLayer_GLES_RenderDoc.so"; #else - const char* librenderdoc_name = "librenderdoc.so"; + const char* const library_name = "librenderdoc.so"; #endif - library_ = dlopen(librenderdoc_name, RTLD_NOW | RTLD_NOLOAD); - if (library_) { - get_api = pRENDERDOC_GetAPI(dlsym(library_, "RENDERDOC_GetAPI")); + renderdoc_api->library_ = dlopen(library_name, RTLD_NOW | RTLD_NOLOAD); + if (!renderdoc_api->library_) { + return nullptr; } + get_api = + pRENDERDOC_GetAPI(dlsym(renderdoc_api->library_, "RENDERDOC_GetAPI")); #elif XE_PLATFORM_WIN32 - library_ = GetModuleHandleA("renderdoc.dll"); - if (library_) { - get_api = pRENDERDOC_GetAPI( - GetProcAddress(HMODULE(library_), "RENDERDOC_GetAPI")); + renderdoc_api->library_ = GetModuleHandleW(L"renderdoc.dll"); + if (!renderdoc_api->library_) { + return nullptr; } + get_api = pRENDERDOC_GetAPI( + GetProcAddress(renderdoc_api->library_, "RENDERDOC_GetAPI")); #endif - if (!get_api) { - Shutdown(); - return false; - } - // get_api will be null if RenderDoc is not attached, or the API isn't + + // get_api will be null if RenderDoc is not connected, or the API isn't // available on this platform, or there was an error. - if (!get_api || !get_api(eRENDERDOC_API_Version_1_0_0, (void**)&api_1_0_0_) || - !api_1_0_0_) { - Shutdown(); - return false; + if (!get_api || + !get_api(eRENDERDOC_API_Version_1_0_0, + (void**)&renderdoc_api->api_1_0_0_) || + !renderdoc_api->api_1_0_0_) { + return nullptr; } + XELOGI("RenderDoc API initialized"); - return true; + + return renderdoc_api; } -void RenderdocApi::Shutdown() { - api_1_0_0_ = nullptr; - if (library_) { +RenderDocAPI::~RenderDocAPI() { #if XE_PLATFORM_LINUX + if (library_) { dlclose(library_); -#endif - // Not calling FreeLibrary on Windows as GetModuleHandle doesn't increment - // the reference count. - library_ = nullptr; } +#endif + // Not calling FreeLibrary on Windows as GetModuleHandle doesn't increment + // the reference count. } } // namespace ui diff --git a/src/xenia/ui/renderdoc_api.h b/src/xenia/ui/renderdoc_api.h index 1a07fe116..a183b6f81 100644 --- a/src/xenia/ui/renderdoc_api.h +++ b/src/xenia/ui/renderdoc_api.h @@ -10,26 +10,39 @@ #ifndef XENIA_UI_RENDERDOC_API_H_ #define XENIA_UI_RENDERDOC_API_H_ +#include + #include "third_party/renderdoc/renderdoc_app.h" +#include "xenia/base/platform.h" + +#if XE_PLATFORM_WIN32 +#include "xenia/base/platform_win.h" +#endif namespace xe { namespace ui { -class RenderdocApi { +class RenderDocAPI { public: - RenderdocApi() = default; - RenderdocApi(const RenderdocApi& renderdoc_api) = delete; - RenderdocApi& operator=(const RenderdocApi& renderdoc_api) = delete; - ~RenderdocApi() { Shutdown(); } + static std::unique_ptr CreateIfConnected(); - bool Initialize(); - void Shutdown(); + RenderDocAPI(const RenderDocAPI&) = delete; + RenderDocAPI& operator=(const RenderDocAPI&) = delete; - // nullptr if not attached. + ~RenderDocAPI(); + + // Always present if this object exists. const RENDERDOC_API_1_0_0* api_1_0_0() const { return api_1_0_0_; } private: + explicit RenderDocAPI() = default; + +#if XE_PLATFORM_LINUX void* library_ = nullptr; +#elif XE_PLATFORM_WIN32 + HMODULE library_ = nullptr; +#endif + const RENDERDOC_API_1_0_0* api_1_0_0_ = nullptr; }; diff --git a/src/xenia/ui/vulkan/functions/device_khr_bind_memory2.inc b/src/xenia/ui/vulkan/functions/device_1_1_khr_bind_memory2.inc similarity index 100% rename from src/xenia/ui/vulkan/functions/device_khr_bind_memory2.inc rename to src/xenia/ui/vulkan/functions/device_1_1_khr_bind_memory2.inc diff --git a/src/xenia/ui/vulkan/functions/device_khr_get_memory_requirements2.inc b/src/xenia/ui/vulkan/functions/device_1_1_khr_get_memory_requirements2.inc similarity index 100% rename from src/xenia/ui/vulkan/functions/device_khr_get_memory_requirements2.inc rename to src/xenia/ui/vulkan/functions/device_1_1_khr_get_memory_requirements2.inc diff --git a/src/xenia/ui/vulkan/functions/device_khr_maintenance4.inc b/src/xenia/ui/vulkan/functions/device_1_3_khr_maintenance4.inc similarity index 100% rename from src/xenia/ui/vulkan/functions/device_khr_maintenance4.inc rename to src/xenia/ui/vulkan/functions/device_1_3_khr_maintenance4.inc diff --git a/src/xenia/ui/vulkan/functions/instance_khr_get_physical_device_properties2.inc b/src/xenia/ui/vulkan/functions/instance_1_1_khr_get_physical_device_properties2.inc similarity index 100% rename from src/xenia/ui/vulkan/functions/instance_khr_get_physical_device_properties2.inc rename to src/xenia/ui/vulkan/functions/instance_1_1_khr_get_physical_device_properties2.inc diff --git a/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.cc b/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.cc index ff129f336..c9b4eba3f 100644 --- a/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.cc +++ b/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.cc @@ -22,8 +22,8 @@ namespace ui { namespace vulkan { void LinkedTypeDescriptorSetAllocator::Reset() { - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); ui::vulkan::util::DestroyAndNullHandle(dfn.vkDestroyDescriptorPool, device, page_usable_latest_.pool); page_usable_latest_.descriptors_remaining.reset(); @@ -53,8 +53,8 @@ VkDescriptorSet LinkedTypeDescriptorSetAllocator::Allocate( } #endif - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkDescriptorSetAllocateInfo descriptor_set_allocate_info; descriptor_set_allocate_info.sType = diff --git a/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.h b/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.h index 999c616b1..b0a200abd 100644 --- a/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.h +++ b/src/xenia/ui/vulkan/linked_type_descriptor_set_allocator.h @@ -18,7 +18,7 @@ #include #include "xenia/base/assert.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace ui { @@ -54,13 +54,15 @@ class LinkedTypeDescriptorSetAllocator { // Multiple descriptor sizes for the same descriptor type, and zero sizes, are // not allowed. explicit LinkedTypeDescriptorSetAllocator( - const ui::vulkan::VulkanProvider& provider, - const VkDescriptorPoolSize* descriptor_sizes, - uint32_t descriptor_size_count, uint32_t descriptor_sets_per_page) - : provider_(provider), + const VulkanDevice* const vulkan_device, + const VkDescriptorPoolSize* const descriptor_sizes, + const uint32_t descriptor_size_count, + const uint32_t descriptor_sets_per_page) + : vulkan_device_(vulkan_device), descriptor_pool_sizes_(new VkDescriptorPoolSize[descriptor_size_count]), descriptor_pool_size_count_(descriptor_size_count), descriptor_sets_per_page_(descriptor_sets_per_page) { + assert_not_null(vulkan_device); assert_not_zero(descriptor_size_count); assert_not_zero(descriptor_sets_per_page_); #ifndef NDEBUG @@ -94,7 +96,7 @@ class LinkedTypeDescriptorSetAllocator { uint32_t descriptor_sets_remaining; }; - const ui::vulkan::VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; std::unique_ptr descriptor_pool_sizes_; uint32_t descriptor_pool_size_count_; diff --git a/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.cc b/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.cc index 5b07c0673..477e51cc0 100644 --- a/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.cc +++ b/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.cc @@ -17,13 +17,14 @@ namespace ui { namespace vulkan { SingleLayoutDescriptorSetPool::SingleLayoutDescriptorSetPool( - const VulkanProvider& provider, uint32_t pool_set_count, - uint32_t set_layout_descriptor_counts_count, - const VkDescriptorPoolSize* set_layout_descriptor_counts, - VkDescriptorSetLayout set_layout) - : provider_(provider), + const VulkanDevice* const vulkan_device, const uint32_t pool_set_count, + const uint32_t set_layout_descriptor_counts_count, + const VkDescriptorPoolSize* const set_layout_descriptor_counts, + const VkDescriptorSetLayout set_layout) + : vulkan_device_(vulkan_device), pool_set_count_(pool_set_count), set_layout_(set_layout) { + assert_not_null(vulkan_device); assert_not_zero(pool_set_count); pool_descriptor_counts_.resize(set_layout_descriptor_counts_count); for (uint32_t i = 0; i < set_layout_descriptor_counts_count; ++i) { @@ -38,8 +39,8 @@ SingleLayoutDescriptorSetPool::SingleLayoutDescriptorSetPool( } SingleLayoutDescriptorSetPool::~SingleLayoutDescriptorSetPool() { - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); if (current_pool_ != VK_NULL_HANDLE) { dfn.vkDestroyDescriptorPool(device, current_pool_, nullptr); } @@ -55,8 +56,8 @@ size_t SingleLayoutDescriptorSetPool::Allocate() { return free_index; } - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); // Two iterations so if vkAllocateDescriptorSets fails even with a non-zero // current_pool_sets_remaining_, another attempt will be made in a new pool. diff --git a/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.h b/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.h index c3f3eb080..ba5b8230a 100644 --- a/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.h +++ b/src/xenia/ui/vulkan/single_layout_descriptor_set_pool.h @@ -15,7 +15,7 @@ #include #include "xenia/base/assert.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace ui { @@ -28,7 +28,7 @@ class SingleLayoutDescriptorSetPool { // set count will be done internally). The descriptor set layout must not be // destroyed until this object is also destroyed. SingleLayoutDescriptorSetPool( - const VulkanProvider& provider, uint32_t pool_set_count, + const VulkanDevice* vulkan_device, uint32_t pool_set_count, uint32_t set_layout_descriptor_counts_count, const VkDescriptorPoolSize* set_layout_descriptor_counts, VkDescriptorSetLayout set_layout); @@ -43,7 +43,7 @@ class SingleLayoutDescriptorSetPool { VkDescriptorSet Get(size_t index) const { return descriptor_sets_[index]; } private: - const VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; uint32_t pool_set_count_; std::vector pool_descriptor_counts_; VkDescriptorSetLayout set_layout_; diff --git a/src/xenia/ui/vulkan/ui_samplers.cc b/src/xenia/ui/vulkan/ui_samplers.cc new file mode 100644 index 000000000..eacba6ee3 --- /dev/null +++ b/src/xenia/ui/vulkan/ui_samplers.cc @@ -0,0 +1,88 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2025 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#include "xenia/ui/vulkan/ui_samplers.h" + +#include "xenia/base/assert.h" +#include "xenia/base/logging.h" + +namespace xe { +namespace ui { +namespace vulkan { + +std::unique_ptr UISamplers::Create( + const VulkanDevice* const vulkan_device) { + assert_not_null(vulkan_device); + + std::unique_ptr ui_samplers(new UISamplers(vulkan_device)); + + const VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); + + VkSamplerCreateInfo sampler_create_info = { + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO}; + + for (int sampler_index = 0; sampler_index < kSamplerCount; ++sampler_index) { + if (sampler_index == kSamplerIndexLinearRepeat || + sampler_index == kSamplerIndexLinearClampToEdge) { + sampler_create_info.magFilter = VK_FILTER_LINEAR; + sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; + } else { + sampler_create_info.magFilter = VK_FILTER_NEAREST; + sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + } + sampler_create_info.minFilter = sampler_create_info.magFilter; + + if (sampler_index == kSamplerIndexNearestClampToEdge || + sampler_index == kSamplerIndexLinearClampToEdge) { + sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + } else { + sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; + } + sampler_create_info.addressModeV = sampler_create_info.addressModeU; + sampler_create_info.addressModeW = sampler_create_info.addressModeU; + + const VkResult sampler_create_result = + dfn.vkCreateSampler(device, &sampler_create_info, nullptr, + &ui_samplers->samplers_[sampler_index]); + if (sampler_create_result != VK_SUCCESS) { + XELOGE( + "Failed to create the Vulkan UI sampler with filter {}, addressing " + "mode {}: {}", + vk::to_string(vk::Filter(sampler_create_info.magFilter)), + vk::to_string( + vk::SamplerAddressMode(sampler_create_info.addressModeU)), + vk::to_string(vk::Result(sampler_create_result))); + return nullptr; + } + } + + return ui_samplers; +} + +UISamplers::~UISamplers() { + for (const VkSampler sampler : samplers_) { + if (sampler == VK_NULL_HANDLE) { + continue; + } + vulkan_device_->functions().vkDestroySampler(vulkan_device_->device(), + sampler, nullptr); + } +} + +UISamplers::UISamplers(const VulkanDevice* vulkan_device) + : vulkan_device_(vulkan_device) { + assert_not_null(vulkan_device); + + samplers_.fill(VK_NULL_HANDLE); +} + +} // namespace vulkan +} // namespace ui +} // namespace xe diff --git a/src/xenia/ui/vulkan/ui_samplers.h b/src/xenia/ui/vulkan/ui_samplers.h new file mode 100644 index 000000000..146fed3ce --- /dev/null +++ b/src/xenia/ui/vulkan/ui_samplers.h @@ -0,0 +1,63 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2025 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#ifndef XENIA_UI_VULKAN_VULKAN_UI_SAMPLERS_H_ +#define XENIA_UI_VULKAN_VULKAN_UI_SAMPLERS_H_ + +#include +#include + +#include "xenia/ui/vulkan/vulkan_device.h" + +namespace xe { +namespace ui { +namespace vulkan { + +/// Samplers that can be used for presentation and UI drawing. +/// Because maxSamplerAllocationCount can be as low as 4000 (on Nvidia and Intel +/// GPUs primarily), no other samplers must be created for UI purposes. +/// The rest of the sampler allocation space on the device must be available to +/// GPU emulation. +class UISamplers { + public: + static std::unique_ptr Create(const VulkanDevice* vulkan_device); + + UISamplers(const UISamplers&) = delete; + UISamplers& operator=(const UISamplers&) = delete; + UISamplers(UISamplers&&) = delete; + UISamplers& operator=(UISamplers&&) = delete; + + ~UISamplers(); + + enum SamplerIndex { + kSamplerIndexNearestRepeat, + kSamplerIndexNearestClampToEdge, + kSamplerIndexLinearRepeat, + kSamplerIndexLinearClampToEdge, + + kSamplerCount, + }; + + const std::array& samplers() const { + return samplers_; + } + + private: + explicit UISamplers(const VulkanDevice* vulkan_device); + + const VulkanDevice* vulkan_device_; + + std::array samplers_; +}; + +} // namespace vulkan +} // namespace ui +} // namespace xe + +#endif // XENIA_UI_VULKAN_VULKAN_UI_SAMPLERS_H_ diff --git a/src/xenia/ui/vulkan/vulkan_api.h b/src/xenia/ui/vulkan/vulkan_api.h new file mode 100644 index 000000000..27f99fdf0 --- /dev/null +++ b/src/xenia/ui/vulkan/vulkan_api.h @@ -0,0 +1,50 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2025 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#ifndef XENIA_UI_VULKAN_VULKAN_API_H_ +#define XENIA_UI_VULKAN_VULKAN_API_H_ + +#include "xenia/base/assert.h" // For Vulkan-Hpp. +#include "xenia/base/platform.h" + +#ifndef VK_NO_PROTOTYPES +#define VK_NO_PROTOTYPES +#endif + +#ifndef VK_ENABLE_BETA_EXTENSIONS +#define VK_ENABLE_BETA_EXTENSIONS +#endif + +#if XE_PLATFORM_ANDROID +#ifndef VK_USE_PLATFORM_ANDROID_KHR +#define VK_USE_PLATFORM_ANDROID_KHR +#endif +#endif + +#if XE_PLATFORM_GNU_LINUX +#ifndef VK_USE_PLATFORM_XCB_KHR +#define VK_USE_PLATFORM_XCB_KHR +#endif +#endif + +#if XE_PLATFORM_WIN32 +// Must be included before including vulkan.h with VK_USE_PLATFORM_WIN32_KHR +// because it includes Windows.h too. +#include "xenia/base/platform_win.h" +#ifndef VK_USE_PLATFORM_WIN32_KHR +#define VK_USE_PLATFORM_WIN32_KHR +#endif +#endif + +#include "third_party/Vulkan-Headers/include/vulkan/vulkan.h" + +#include "third_party/Vulkan-Headers/include/vulkan/vulkan_hpp_macros.hpp" +#include "third_party/Vulkan-Headers/include/vulkan/vulkan_to_string.hpp" + +#endif // XENIA_UI_VULKAN_VULKAN_API_H_ diff --git a/src/xenia/ui/vulkan/vulkan_device.cc b/src/xenia/ui/vulkan/vulkan_device.cc new file mode 100644 index 000000000..0dc4ba8e5 --- /dev/null +++ b/src/xenia/ui/vulkan/vulkan_device.cc @@ -0,0 +1,828 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2025 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#include "xenia/ui/vulkan/vulkan_device.h" + +#include "xenia/base/assert.h" +#include "xenia/base/logging.h" +#include "xenia/base/platform.h" + +#include +#include +#include +#include +#include + +namespace xe { +namespace ui { +namespace vulkan { + +template +struct VulkanFeatures { + Structure supported = {StructureType}; + Structure enabled = {StructureType}; + + void Link(VkPhysicalDeviceFeatures2& supported_features_2, + VkDeviceCreateInfo& device_create_info) { + supported.pNext = supported_features_2.pNext; + supported_features_2.pNext = &supported; + enabled.pNext = const_cast(device_create_info.pNext); + device_create_info.pNext = &enabled; + } +}; + +std::unique_ptr VulkanDevice::CreateIfSupported( + const VulkanInstance* const vulkan_instance, + const VkPhysicalDevice physical_device, const bool with_gpu_emulation, + const bool with_swapchain) { + assert_not_null(vulkan_instance); + assert_not_null(physical_device); + + const VulkanInstance::Functions& ifn = vulkan_instance->functions(); + + // Get supported Vulkan 1.0 properties and features. + + VkPhysicalDeviceProperties properties = {}; + ifn.vkGetPhysicalDeviceProperties(physical_device, &properties); + + const uint32_t unclamped_api_version = properties.apiVersion; + if (vulkan_instance->api_version() < VK_MAKE_API_VERSION(0, 1, 1, 0)) { + // From the VkApplicationInfo specification: + // + // "The Khronos validation layers will treat apiVersion as the highest API + // version the application targets, and will validate API usage against the + // minimum of that version and the implementation version (instance or + // device, depending on context). If an application tries to use + // functionality from a greater version than this, a validation error will + // be triggered." + // + // "Vulkan 1.0 implementations were required to return + // VK_ERROR_INCOMPATIBLE_DRIVER if apiVersion was larger than 1.0." + properties.apiVersion = VK_MAKE_API_VERSION( + 0, 1, 0, VK_API_VERSION_PATCH(properties.apiVersion)); + } + + VkPhysicalDeviceFeatures supported_features = {}; + ifn.vkGetPhysicalDeviceFeatures(physical_device, &supported_features); + + if (with_gpu_emulation) { + if (!supported_features.independentBlend) { + // Not trivial to work around: + // - Affects not only the blend equation, but also the color write mask. + // - Can't reuse the blend state of the first attachment for all because + // some attachments may have a format that doesn't support blending. + // - Not possible to split the draw into per-attachment draws because of + // depth / stencil. + // Not supported only on the proprietary driver for the Qualcomm + // Adreno 4xx, where the driver is largely experimental and doesn't expose + // a lot of the functionality available in the hardware. + XELOGW( + "Vulkan device '{}' doesn't support the independentBlend feature " + "required for GPU emulation", + properties.deviceName); + return nullptr; + } + } + + // Enable needed extensions. + + std::unique_ptr device( + new VulkanDevice(vulkan_instance, physical_device)); + + const bool get_physical_device_properties2_supported = + vulkan_instance->extensions().ext_1_1_KHR_get_physical_device_properties2; + + // Name pointers from `requested_extensions` will be used in the enabled + // extensions vector. + std::unordered_map requested_extensions; + + const auto request_promoted_extension = + [&](const char* const name, uint32_t const major, uint32_t const minor, + bool* const supported_ptr) { + assert_not_null(supported_ptr); + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, major, minor, 0)) { + *supported_ptr = true; + } else { + requested_extensions.emplace(name, supported_ptr); + } + }; + +#define XE_UI_VULKAN_STRUCT_EXTENSION(name) \ + requested_extensions.emplace("VK_" #name, &device->extensions_.ext_##name); +#define XE_UI_VULKAN_LOCAL_EXTENSION(name) \ + requested_extensions.emplace("VK_" #name, &ext_##name); +#define XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(name, major, minor) \ + request_promoted_extension( \ + "VK_" #name, major, minor, \ + &device->extensions_.ext_##major##_##minor##_##name); +#define XE_UI_VULKAN_LOCAL_PROMOTED_EXTENSION(name, major, minor) \ + request_promoted_extension("VK_" #name, major, minor, \ + &ext_##major##_##minor##_##name); + + bool ext_KHR_portability_subset = false; + bool ext_1_2_KHR_driver_properties = false; + if (get_physical_device_properties2_supported) { + // #164. Must be enabled according to the specification if the physical + // device is a portability subset one. + XE_UI_VULKAN_LOCAL_EXTENSION(KHR_portability_subset) + // #197 + XE_UI_VULKAN_LOCAL_PROMOTED_EXTENSION(KHR_driver_properties, 1, 2) + } + + // Used by the Vulkan Memory Allocator and potentially by Xenia. + // #128. + XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(KHR_dedicated_allocation, 1, 1) + // #147. Also must be enabled for VK_KHR_dedicated_allocation and + // VK_KHR_sampler_ycbcr_conversion. + XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(KHR_get_memory_requirements2, 1, 1) + // #158. Also must be enabled for VK_KHR_sampler_ycbcr_conversion. + XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(KHR_bind_memory2, 1, 1) + if (get_physical_device_properties2_supported) { + // #238. + XE_UI_VULKAN_STRUCT_EXTENSION(EXT_memory_budget) + } + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { + // #414. + XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(KHR_maintenance4, 1, 3) + } + + if (with_swapchain) { + // #2. + XE_UI_VULKAN_STRUCT_EXTENSION(KHR_swapchain) + } + + bool ext_1_2_KHR_sampler_mirror_clamp_to_edge = false; + bool ext_1_1_KHR_maintenance1 = false; + bool ext_1_2_KHR_shader_float_controls = false; + bool ext_EXT_fragment_shader_interlock = false; + bool ext_1_3_EXT_shader_demote_to_helper_invocation = false; + bool ext_EXT_non_seamless_cube_map = false; + if (with_gpu_emulation) { + // #15. + XE_UI_VULKAN_LOCAL_PROMOTED_EXTENSION(KHR_sampler_mirror_clamp_to_edge, 1, + 2) + // #70. Must be enabled for VK_KHR_sampler_ycbcr_conversion. + XE_UI_VULKAN_LOCAL_PROMOTED_EXTENSION(KHR_maintenance1, 1, 1) + // #141. + XE_UI_VULKAN_STRUCT_EXTENSION(EXT_shader_stencil_export) + // #148. + XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(KHR_image_format_list, 1, 2) + if (get_physical_device_properties2_supported) { + // #157. + XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(KHR_sampler_ycbcr_conversion, 1, 1) + // #198. Also must be enabled for VK_KHR_spirv_1_4. + XE_UI_VULKAN_LOCAL_PROMOTED_EXTENSION(KHR_shader_float_controls, 1, 2) + // #252. + XE_UI_VULKAN_LOCAL_EXTENSION(EXT_fragment_shader_interlock) + // #277. + XE_UI_VULKAN_LOCAL_PROMOTED_EXTENSION( + EXT_shader_demote_to_helper_invocation, 1, 3) + // #423. + XE_UI_VULKAN_LOCAL_EXTENSION(EXT_non_seamless_cube_map) + } + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { + // #237. + XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION(KHR_spirv_1_4, 1, 2) + } + } + +#undef XE_UI_VULKAN_STRUCT_EXTENSION +#undef XE_UI_VULKAN_LOCAL_EXTENSION +#undef XE_UI_VULKAN_STRUCT_PROMOTED_EXTENSION +#undef XE_UI_VULKAN_LOCAL_PROMOTED_EXTENSION + + std::vector enabled_extensions; + { + uint32_t supported_extension_count = 0; + const VkResult get_supported_extension_count_result = + ifn.vkEnumerateDeviceExtensionProperties( + physical_device, nullptr, &supported_extension_count, nullptr); + if (get_supported_extension_count_result != VK_SUCCESS && + get_supported_extension_count_result != VK_INCOMPLETE) { + XELOGW("Failed to get the Vulkan device '{}' extension count", + properties.deviceName); + return nullptr; + } + if (supported_extension_count) { + std::vector supported_extensions( + supported_extension_count); + if (ifn.vkEnumerateDeviceExtensionProperties( + physical_device, nullptr, &supported_extension_count, + supported_extensions.data()) != VK_SUCCESS) { + XELOGW("Failed to get the Vulkan device '{}' extensions", + properties.deviceName); + return nullptr; + } + assert_true(supported_extension_count == supported_extensions.size()); + for (const VkExtensionProperties& supported_extension : + supported_extensions) { + const auto requested_extension_it = + requested_extensions.find(supported_extension.extensionName); + if (requested_extension_it == requested_extensions.cend()) { + continue; + } + assert_not_null(requested_extension_it->second); + if (!*requested_extension_it->second) { + enabled_extensions.emplace_back( + requested_extension_it->first.c_str()); + *requested_extension_it->second = true; + } + } + } + } + + if (with_swapchain && !device->extensions_.ext_KHR_swapchain) { + XELOGW("Vulkan device '{}' doesn't support swapchains", + properties.deviceName); + return nullptr; + } + + VkDeviceCreateInfo device_create_info = { + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO}; + + device_create_info.enabledExtensionCount = + uint32_t(enabled_extensions.size()); + device_create_info.ppEnabledExtensionNames = enabled_extensions.data(); + + // Get supported Vulkan 1.1+ and extension properties and features. + // + // The property and feature structures are initialized to zero or to the + // minimum / maximum requirements for the simplicity of handling unavailable + // VK_KHR_get_physical_device_properties2. + + VkPhysicalDeviceProperties2 properties_2 = { + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2}; + + VkPhysicalDeviceFeatures2 supported_features_2 = { + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2}; + + VulkanFeatures + features_1_2; + VulkanFeatures + features_1_3; + VulkanFeatures< + VkPhysicalDevicePortabilitySubsetFeaturesKHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR> + features_KHR_portability_subset; + VkPhysicalDeviceDriverPropertiesKHR properties_1_2_KHR_driver_properties = { + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES}; + VkPhysicalDeviceFloatControlsProperties + properties_1_2_KHR_shader_float_controls = { + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES}; + VulkanFeatures< + VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT> + features_EXT_fragment_shader_interlock; + VulkanFeatures< + VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT> + features_1_3_EXT_shader_demote_to_helper_invocation; + VulkanFeatures< + VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT> + features_EXT_non_seamless_cube_map; + + if (get_physical_device_properties2_supported) { + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 2, 0)) { + features_1_2.Link(supported_features_2, device_create_info); + } + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 3, 0)) { + features_1_3.Link(supported_features_2, device_create_info); + } else { + if (ext_1_3_EXT_shader_demote_to_helper_invocation) { + features_1_3_EXT_shader_demote_to_helper_invocation.Link( + supported_features_2, device_create_info); + } + } + if (ext_KHR_portability_subset) { + features_KHR_portability_subset.Link(supported_features_2, + device_create_info); + } + if (ext_1_2_KHR_driver_properties) { + properties_1_2_KHR_driver_properties.pNext = properties_2.pNext; + properties_2.pNext = &properties_1_2_KHR_driver_properties; + } + if (ext_1_2_KHR_shader_float_controls) { + properties_1_2_KHR_shader_float_controls.pNext = properties_2.pNext; + properties_2.pNext = &properties_1_2_KHR_shader_float_controls; + } + if (ext_EXT_fragment_shader_interlock) { + features_EXT_fragment_shader_interlock.Link(supported_features_2, + device_create_info); + } + if (ext_EXT_non_seamless_cube_map) { + features_EXT_non_seamless_cube_map.Link(supported_features_2, + device_create_info); + } + ifn.vkGetPhysicalDeviceProperties2(physical_device, &properties_2); + ifn.vkGetPhysicalDeviceFeatures2(physical_device, &supported_features_2); + } + + uint32_t queue_family_count = 0; + ifn.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, + &queue_family_count, nullptr); + std::vector queue_families(queue_family_count); + ifn.vkGetPhysicalDeviceQueueFamilyProperties( + physical_device, &queue_family_count, queue_families.data()); + + device->queue_families_.resize(queue_family_count); + + uint32_t first_queue_family_graphics_compute_sparse_binding = UINT32_MAX; + uint32_t first_queue_family_graphics_compute = UINT32_MAX; + uint32_t first_queue_family_sparse_binding = UINT32_MAX; + bool has_presentation_queue_family = false; + + for (uint32_t queue_family_index = 0; queue_family_index < queue_family_count; + ++queue_family_index) { + QueueFamily& queue_family = device->queue_families_[queue_family_index]; + const VkQueueFamilyProperties& queue_family_properties = + queue_families[queue_family_index]; + + const VkQueueFlags queue_unsupported_flags = + ~queue_family_properties.queueFlags; + + if (!(queue_unsupported_flags & + (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))) { + first_queue_family_graphics_compute = + std::min(queue_family_index, first_queue_family_graphics_compute); + } + + if (with_gpu_emulation && supported_features.sparseBinding && + !(queue_unsupported_flags & VK_QUEUE_SPARSE_BINDING_BIT)) { + first_queue_family_sparse_binding = + std::min(queue_family_index, first_queue_family_sparse_binding); + if (!(queue_unsupported_flags & + (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))) { + first_queue_family_graphics_compute_sparse_binding = + std::min(queue_family_index, + first_queue_family_graphics_compute_sparse_binding); + } + } + + if (with_swapchain) { +#if XE_PLATFORM_WIN32 + queue_family.may_support_presentation = + vulkan_instance->extensions().ext_KHR_win32_surface && + ifn.vkGetPhysicalDeviceWin32PresentationSupportKHR( + physical_device, queue_family_index); +#else + queue_family.may_support_presentation = true; +#endif + if (queue_family.may_support_presentation) { + queue_family.queues.resize( + std::max(size_t(1), queue_family.queues.size())); + has_presentation_queue_family = true; + } + } + } + + if (first_queue_family_graphics_compute == UINT32_MAX) { + // Not valid according to the Vulkan specification, but for safety. + XELOGW( + "Vulkan device '{}' doesn't provide a graphics and compute queue " + "family", + properties.deviceName); + return nullptr; + } + + if (with_swapchain && !has_presentation_queue_family) { + XELOGW( + "Vulkan device '{}' doesn't provide a queue family that supports " + "presentation", + properties.deviceName); + return nullptr; + } + + // Get the queues to create. + + if (first_queue_family_sparse_binding == UINT32_MAX) { + // Not valid not to provide a sparse binding queue if the sparseBinding + // feature is supported according to the Vulkan specification, but for + // safety and simplicity. + supported_features.sparseBinding = VK_FALSE; + } + if (!supported_features.sparseBinding) { + supported_features.sparseResidencyBuffer = VK_FALSE; + supported_features.sparseResidencyImage2D = VK_FALSE; + supported_features.sparseResidencyImage3D = VK_FALSE; + supported_features.sparseResidency2Samples = VK_FALSE; + supported_features.sparseResidency4Samples = VK_FALSE; + supported_features.sparseResidency8Samples = VK_FALSE; + supported_features.sparseResidency16Samples = VK_FALSE; + supported_features.sparseResidencyAliased = VK_FALSE; + } + + // Prefer using one queue for everything whenever possible for simplicity. + // TODO(Triang3l): Research if separate queues for purposes like composition, + // swapchain image presentation, and sparse binding, may be beneficial. + + if (first_queue_family_graphics_compute_sparse_binding != UINT32_MAX) { + device->queue_family_graphics_compute_ = + first_queue_family_graphics_compute_sparse_binding; + device->queue_family_sparse_binding_ = + first_queue_family_graphics_compute_sparse_binding; + } else { + device->queue_family_graphics_compute_ = + first_queue_family_graphics_compute; + device->queue_family_sparse_binding_ = first_queue_family_sparse_binding; + } + + device->queue_families_[device->queue_family_graphics_compute_].queues.resize( + std::max(size_t(1), + device->queue_families_[device->queue_family_graphics_compute_] + .queues.size())); + if (device->queue_family_sparse_binding_ != UINT32_MAX) { + device->queue_families_[device->queue_family_sparse_binding_].queues.resize( + std::max(size_t(1), + device->queue_families_[device->queue_family_sparse_binding_] + .queues.size())); + } + + size_t max_enabled_queues_per_family = 0; + for (const QueueFamily& queue_family : device->queue_families_) { + max_enabled_queues_per_family = + std::max(queue_family.queues.size(), max_enabled_queues_per_family); + } + const std::vector queue_priorities(max_enabled_queues_per_family, + 1.0f); + std::vector queue_create_infos; + for (size_t queue_family_index = 0; + queue_family_index < device->queue_families_.size(); + ++queue_family_index) { + const QueueFamily& queue_family = + device->queue_families_[queue_family_index]; + if (queue_family.queues.empty()) { + continue; + } + VkDeviceQueueCreateInfo& queue_create_info = + queue_create_infos.emplace_back(); + queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info.pNext = nullptr; + queue_create_info.flags = 0; + queue_create_info.queueFamilyIndex = uint32_t(queue_family_index); + queue_create_info.queueCount = uint32_t(queue_family.queues.size()); + queue_create_info.pQueuePriorities = queue_priorities.data(); + } + device_create_info.queueCreateInfoCount = uint32_t(queue_create_infos.size()); + device_create_info.pQueueCreateInfos = queue_create_infos.data(); + + // Enable needed features and copy the properties. + // + // Enabling only actually used features because drivers may take more optimal + // paths when certain features are disabled. Also, in VK_EXT_shader_object, + // the state that the application must set for the draw depends on which + // features are enabled. + + device->properties_.apiVersion = properties.apiVersion; + device->properties_.driverVersion = properties.driverVersion; + device->properties_.vendorID = properties.vendorID; + device->properties_.deviceID = properties.deviceID; + std::strcpy(device->properties_.deviceName, properties.deviceName); + + XELOGI( + "Vulkan device '{}': API {}.{}.{}, vendor 0x{:04X}, device 0x{:04X}, " + "driver version 0x{:X}", + properties.deviceName, VK_VERSION_MAJOR(properties.apiVersion), + VK_VERSION_MINOR(properties.apiVersion), + VK_VERSION_PATCH(properties.apiVersion), properties.vendorID, + properties.deviceID, properties.driverVersion); + if (unclamped_api_version != properties.apiVersion) { + XELOGI( + "Device supports Vulkan API {}.{}.{}, but the used version is limited " + "by the instance", + VK_VERSION_MAJOR(unclamped_api_version), + VK_VERSION_MINOR(unclamped_api_version), + VK_VERSION_PATCH(unclamped_api_version)); + } + + XELOGI("Enabled Vulkan device extensions:"); + for (uint32_t enabled_extension_index = 0; + enabled_extension_index < device_create_info.enabledExtensionCount; + ++enabled_extension_index) { + XELOGI("* {}", + device_create_info.ppEnabledExtensionNames[enabled_extension_index]); + } + + XELOGI("Vulkan device properties and enabled features:"); + + VkPhysicalDeviceFeatures enabled_features = {}; + device_create_info.pEnabledFeatures = &enabled_features; + +#define XE_UI_VULKAN_LIMIT(name) \ + device->properties_.name = properties.limits.name; \ + XELOGI("* " #name ": {}", properties.limits.name); +#define XE_UI_VULKAN_ENUM_LIMIT(name, type) \ + device->properties_.name = properties.limits.name; \ + XELOGI("* " #name ": {}", vk::to_string(vk::type(properties.limits.name))); +#define XE_UI_VULKAN_FEATURE(name) \ + enabled_features.name = supported_features.name; \ + device->properties_.name = supported_features.name; \ + if (supported_features.name) { \ + XELOGI("* " #name); \ + } +#define XE_UI_VULKAN_PROPERTY_2(structure, name) \ + device->properties_.name = structure.name; \ + XELOGI("* " #name ": {}", structure.name); +#define XE_UI_VULKAN_ENUM_PROPERTY_2(structure, name, type) \ + device->properties_.name = structure.name; \ + XELOGI("* " #name ": {}", vk::to_string(vk::type(structure.name))); +#define XE_UI_VULKAN_FEATURE_2(structure, name) \ + structure.enabled.name = structure.supported.name; \ + device->properties_.name = structure.supported.name; \ + if (structure.supported.name) { \ + XELOGI("* " #name); \ + } +#define XE_UI_VULKAN_FEATURE_IMPLIED(name) \ + device->properties_.name = true; \ + XELOGI("* " #name); + + if (ext_1_2_KHR_driver_properties) { + XE_UI_VULKAN_ENUM_PROPERTY_2(properties_1_2_KHR_driver_properties, driverID, + DriverId); + XELOGI("* driverName: {}", properties_1_2_KHR_driver_properties.driverName); + if (properties_1_2_KHR_driver_properties.driverInfo[0]) { + XELOGI("* driverInfo: {}", + properties_1_2_KHR_driver_properties.driverInfo); + } + XELOGI("* conformanceVersion: {}.{}.{}.{}", + properties_1_2_KHR_driver_properties.conformanceVersion.major, + properties_1_2_KHR_driver_properties.conformanceVersion.minor, + properties_1_2_KHR_driver_properties.conformanceVersion.subminor, + properties_1_2_KHR_driver_properties.conformanceVersion.patch); + } + + XE_UI_VULKAN_LIMIT(maxImageDimension2D) + XE_UI_VULKAN_LIMIT(maxImageDimension3D) + XE_UI_VULKAN_LIMIT(maxImageDimensionCube) + XE_UI_VULKAN_LIMIT(maxImageArrayLayers) + XE_UI_VULKAN_LIMIT(maxStorageBufferRange) + XE_UI_VULKAN_LIMIT(maxSamplerAllocationCount) + XE_UI_VULKAN_LIMIT(maxPerStageDescriptorSamplers) + XE_UI_VULKAN_LIMIT(maxPerStageDescriptorStorageBuffers) + XE_UI_VULKAN_LIMIT(maxPerStageDescriptorSampledImages) + XE_UI_VULKAN_LIMIT(maxPerStageResources) + XE_UI_VULKAN_LIMIT(maxVertexOutputComponents) + XE_UI_VULKAN_LIMIT(maxTessellationEvaluationOutputComponents) + XE_UI_VULKAN_LIMIT(maxGeometryInputComponents) + XE_UI_VULKAN_LIMIT(maxGeometryOutputComponents) + XE_UI_VULKAN_LIMIT(maxFragmentInputComponents) + XE_UI_VULKAN_LIMIT(maxFragmentCombinedOutputResources) + XE_UI_VULKAN_LIMIT(maxSamplerAnisotropy) + XE_UI_VULKAN_LIMIT(maxViewportDimensions[0]) + XE_UI_VULKAN_LIMIT(maxViewportDimensions[1]) + XE_UI_VULKAN_LIMIT(minUniformBufferOffsetAlignment) + XE_UI_VULKAN_LIMIT(minStorageBufferOffsetAlignment) + XE_UI_VULKAN_LIMIT(maxFramebufferWidth) + XE_UI_VULKAN_LIMIT(maxFramebufferHeight) + XE_UI_VULKAN_ENUM_LIMIT(framebufferColorSampleCounts, SampleCountFlags) + XE_UI_VULKAN_ENUM_LIMIT(framebufferDepthSampleCounts, SampleCountFlags) + XE_UI_VULKAN_ENUM_LIMIT(framebufferStencilSampleCounts, SampleCountFlags) + XE_UI_VULKAN_ENUM_LIMIT(framebufferNoAttachmentsSampleCounts, + SampleCountFlags) + XE_UI_VULKAN_ENUM_LIMIT(sampledImageColorSampleCounts, SampleCountFlags) + XE_UI_VULKAN_ENUM_LIMIT(sampledImageIntegerSampleCounts, SampleCountFlags) + XE_UI_VULKAN_ENUM_LIMIT(sampledImageDepthSampleCounts, SampleCountFlags) + XE_UI_VULKAN_ENUM_LIMIT(sampledImageStencilSampleCounts, SampleCountFlags) + XE_UI_VULKAN_LIMIT(standardSampleLocations) + XE_UI_VULKAN_LIMIT(optimalBufferCopyOffsetAlignment) + XE_UI_VULKAN_LIMIT(optimalBufferCopyRowPitchAlignment) + XE_UI_VULKAN_LIMIT(nonCoherentAtomSize) + + if (with_gpu_emulation) { + XE_UI_VULKAN_FEATURE(robustBufferAccess) + XE_UI_VULKAN_FEATURE(fullDrawIndexUint32) + XE_UI_VULKAN_FEATURE(independentBlend) + XE_UI_VULKAN_FEATURE(geometryShader) + XE_UI_VULKAN_FEATURE(tessellationShader) + XE_UI_VULKAN_FEATURE(sampleRateShading) + XE_UI_VULKAN_FEATURE(depthClamp) + XE_UI_VULKAN_FEATURE(fillModeNonSolid) + XE_UI_VULKAN_FEATURE(samplerAnisotropy) + XE_UI_VULKAN_FEATURE(occlusionQueryPrecise) + XE_UI_VULKAN_FEATURE(vertexPipelineStoresAndAtomics) + XE_UI_VULKAN_FEATURE(fragmentStoresAndAtomics) + XE_UI_VULKAN_FEATURE(shaderClipDistance) + XE_UI_VULKAN_FEATURE(shaderCullDistance) + XE_UI_VULKAN_FEATURE(sparseBinding) + XE_UI_VULKAN_FEATURE(sparseResidencyBuffer) + } + + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 2, 0)) { + if (with_gpu_emulation) { + XE_UI_VULKAN_FEATURE_2(features_1_2, samplerMirrorClampToEdge); + } + } else { + if (ext_1_2_KHR_sampler_mirror_clamp_to_edge) { + XE_UI_VULKAN_FEATURE_IMPLIED(samplerMirrorClampToEdge) + } + } + + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 3, 0)) { + if (with_gpu_emulation) { + XE_UI_VULKAN_FEATURE_2(features_1_3, shaderDemoteToHelperInvocation); + } + } else { + if (ext_1_3_EXT_shader_demote_to_helper_invocation) { + if (with_gpu_emulation) { + XE_UI_VULKAN_FEATURE_2( + features_1_3_EXT_shader_demote_to_helper_invocation, + shaderDemoteToHelperInvocation); + } + } + } + + if (ext_KHR_portability_subset) { + if (with_gpu_emulation) { + XE_UI_VULKAN_FEATURE_2(features_KHR_portability_subset, + constantAlphaColorBlendFactors) + XE_UI_VULKAN_FEATURE_2(features_KHR_portability_subset, + imageViewFormatReinterpretation) + XE_UI_VULKAN_FEATURE_2(features_KHR_portability_subset, + imageViewFormatSwizzle) + XE_UI_VULKAN_FEATURE_2(features_KHR_portability_subset, pointPolygons) + XE_UI_VULKAN_FEATURE_2(features_KHR_portability_subset, + separateStencilMaskRef) + XE_UI_VULKAN_FEATURE_2(features_KHR_portability_subset, + shaderSampleRateInterpolationFunctions) + XE_UI_VULKAN_FEATURE_2(features_KHR_portability_subset, triangleFans) + } + } else { + // Not a portability subset device. + XE_UI_VULKAN_FEATURE_IMPLIED(constantAlphaColorBlendFactors) + XE_UI_VULKAN_FEATURE_IMPLIED(imageViewFormatReinterpretation) + XE_UI_VULKAN_FEATURE_IMPLIED(imageViewFormatSwizzle) + XE_UI_VULKAN_FEATURE_IMPLIED(pointPolygons) + XE_UI_VULKAN_FEATURE_IMPLIED(separateStencilMaskRef) + XE_UI_VULKAN_FEATURE_IMPLIED(shaderSampleRateInterpolationFunctions) + XE_UI_VULKAN_FEATURE_IMPLIED(triangleFans) + } + + if (ext_1_2_KHR_shader_float_controls) { + XE_UI_VULKAN_PROPERTY_2(properties_1_2_KHR_shader_float_controls, + shaderSignedZeroInfNanPreserveFloat32); + XE_UI_VULKAN_PROPERTY_2(properties_1_2_KHR_shader_float_controls, + shaderDenormFlushToZeroFloat32); + XE_UI_VULKAN_PROPERTY_2(properties_1_2_KHR_shader_float_controls, + shaderRoundingModeRTEFloat32); + } + + if (ext_EXT_fragment_shader_interlock) { + if (with_gpu_emulation) { + XE_UI_VULKAN_FEATURE_2(features_EXT_fragment_shader_interlock, + fragmentShaderSampleInterlock) + XE_UI_VULKAN_FEATURE_2(features_EXT_fragment_shader_interlock, + fragmentShaderPixelInterlock) + } + } + + if (ext_EXT_non_seamless_cube_map) { + if (with_gpu_emulation) { + XE_UI_VULKAN_FEATURE_2(features_EXT_non_seamless_cube_map, + nonSeamlessCubeMap) + } + } + +#undef XE_UI_VULKAN_LIMIT +#undef XE_UI_VULKAN_ENUM_LIMIT +#undef XE_UI_VULKAN_FEATURE +#undef XE_UI_VULKAN_PROPERTY_2 +#undef XE_UI_VULKAN_ENUM_PROPERTY_2 +#undef XE_UI_VULKAN_FEATURE_2 + + // Create the device. + + const VkResult device_create_result = ifn.vkCreateDevice( + physical_device, &device_create_info, nullptr, &device->device_); + if (device_create_result != VK_SUCCESS) { + XELOGE( + "Failed to create a Vulkan logical device from the physical device " + "'{}': {}", + properties.deviceName, vk::to_string(vk::Result(device_create_result))); + return nullptr; + } + + // Load device functions. + + bool functions_loaded = true; + + Functions& dfn = device->functions_; + +#define XE_UI_VULKAN_FUNCTION(name) \ + functions_loaded &= (dfn.name = PFN_##name(ifn.vkGetDeviceProcAddr( \ + device->device_, #name))) != nullptr; + + // Vulkan 1.0. +#include "xenia/ui/vulkan/functions/device_1_0.inc" + + // Extensions promoted to a Vulkan version supported by the device. +#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ + functions_loaded &= \ + (dfn.core_name = PFN_##core_name( \ + ifn.vkGetDeviceProcAddr(device->device_, #core_name))) != nullptr; + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { +#include "xenia/ui/vulkan/functions/device_1_1_khr_bind_memory2.inc" +#include "xenia/ui/vulkan/functions/device_1_1_khr_get_memory_requirements2.inc" + } + if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 3, 0)) { +#include "xenia/ui/vulkan/functions/device_1_3_khr_maintenance4.inc" + } +#undef XE_UI_VULKAN_FUNCTION_PROMOTED + + // Non-promoted extensions, and extensions promoted to a Vulkan version not + // supported by the device. +#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ + functions_loaded &= \ + (dfn.core_name = PFN_##core_name(ifn.vkGetDeviceProcAddr( \ + device->device_, #extension_name))) != nullptr; + if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, 1, 0)) { + if (device->extensions_.ext_1_1_KHR_get_memory_requirements2) { +#include "xenia/ui/vulkan/functions/device_1_1_khr_get_memory_requirements2.inc" + } + if (device->extensions_.ext_1_1_KHR_bind_memory2) { +#include "xenia/ui/vulkan/functions/device_1_1_khr_bind_memory2.inc" + } + } + if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, 3, 0)) { + if (device->extensions_.ext_1_3_KHR_maintenance4) { +#include "xenia/ui/vulkan/functions/device_1_3_khr_maintenance4.inc" + } + } + if (device->extensions_.ext_KHR_swapchain) { +#include "xenia/ui/vulkan/functions/device_khr_swapchain.inc" + } +#undef XE_UI_VULKAN_FUNCTION_PROMOTED + +#undef XE_UI_VULKAN_FUNCTION + + if (!functions_loaded) { + XELOGE("Failed to get all Vulkan device function pointers for '{}'", + properties.deviceName); + return nullptr; + } + + // Get the queues. + + for (size_t queue_family_index = 0; + queue_family_index < device->queue_families_.size(); + ++queue_family_index) { + QueueFamily& queue_family = device->queue_families_[queue_family_index]; + for (size_t queue_index = 0; queue_index < queue_family.queues.size(); + ++queue_index) { + VkQueue queue; + dfn.vkGetDeviceQueue(device->device_, uint32_t(queue_family_index), + uint32_t(queue_index), &queue); + queue_family.queues[queue_index] = std::make_unique(queue); + } + } + + // Get the memory types. + + VkPhysicalDeviceMemoryProperties memory_properties; + ifn.vkGetPhysicalDeviceMemoryProperties(physical_device, &memory_properties); + for (uint32_t memory_type_index = 0; + memory_type_index < memory_properties.memoryTypeCount; + ++memory_type_index) { + const uint32_t memory_type_bit = uint32_t(1) << memory_type_index; + const VkMemoryPropertyFlags memory_type_flags = + memory_properties.memoryTypes[memory_type_index].propertyFlags; + if (memory_type_flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) { + device->memory_types_.device_local |= memory_type_bit; + } + if (memory_type_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) { + device->memory_types_.host_visible |= memory_type_bit; + } + if (memory_type_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { + device->memory_types_.host_coherent |= memory_type_bit; + } + if (memory_type_flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) { + device->memory_types_.host_cached |= memory_type_bit; + } + } + + return device; +} + +VulkanDevice::~VulkanDevice() { + if (device_) { + vulkan_instance_->functions().vkDestroyDevice(device_, nullptr); + } +} + +VulkanDevice::VulkanDevice(const VulkanInstance* const vulkan_instance, + const VkPhysicalDevice physical_device) + : vulkan_instance_(vulkan_instance), physical_device_(physical_device) { + assert_not_null(vulkan_instance); + assert_not_null(physical_device); +} + +} // namespace vulkan +} // namespace ui +} // namespace xe diff --git a/src/xenia/ui/vulkan/vulkan_device.h b/src/xenia/ui/vulkan/vulkan_device.h new file mode 100644 index 000000000..255c75464 --- /dev/null +++ b/src/xenia/ui/vulkan/vulkan_device.h @@ -0,0 +1,297 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2025 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#ifndef XENIA_UI_VULKAN_VULKAN_DEVICE_H_ +#define XENIA_UI_VULKAN_VULKAN_DEVICE_H_ + +#include +#include +#include + +#include "xenia/ui/vulkan/vulkan_instance.h" + +namespace xe { +namespace ui { +namespace vulkan { + +class VulkanDevice { + public: + static std::unique_ptr CreateIfSupported( + const VulkanInstance* vulkan_instance, VkPhysicalDevice physical_device, + bool with_gpu_emulation, bool with_swapchain); + + VulkanDevice(const VulkanDevice&) = delete; + VulkanDevice& operator=(const VulkanDevice&) = delete; + VulkanDevice(VulkanDevice&&) = delete; + VulkanDevice& operator=(VulkanDevice&&) = delete; + + ~VulkanDevice(); + + const VulkanInstance* vulkan_instance() const { return vulkan_instance_; } + + VkPhysicalDevice physical_device() const { return physical_device_; } + + // If functionality from higher API versions is used, increase this. + // This is for VkApplicationInfo. + // "apiVersion must be the highest version of Vulkan that the application is + // designed to use" + // "The patch version number specified in apiVersion is ignored when creating + // an instance object" + static constexpr uint32_t kHighestUsedApiMinorVersion = + VK_MAKE_API_VERSION(0, 1, 3, 0); + + struct Properties { + // Vulkan 1.0 + uint32_t apiVersion = VK_MAKE_API_VERSION(0, 1, 0, 0); + uint32_t driverVersion = 0; + uint32_t vendorID = 0; + uint32_t deviceID = 0; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE] = {}; + + uint32_t maxImageDimension2D = 4096; + uint32_t maxImageDimension3D = 256; + uint32_t maxImageDimensionCube = 4096; + uint32_t maxImageArrayLayers = 256; + uint32_t maxStorageBufferRange = uint32_t(1) << 27; + uint32_t maxSamplerAllocationCount = 4000; + uint32_t maxPerStageDescriptorSamplers = 16; + uint32_t maxPerStageDescriptorStorageBuffers = 4; + uint32_t maxPerStageDescriptorSampledImages = 16; + uint32_t maxPerStageResources = 128; + uint32_t maxVertexOutputComponents = 64; + uint32_t maxTessellationEvaluationOutputComponents = 64; + uint32_t maxGeometryInputComponents = 64; + uint32_t maxGeometryOutputComponents = 64; + uint32_t maxFragmentInputComponents = 64; + uint32_t maxFragmentCombinedOutputResources = 4; + float maxSamplerAnisotropy = 1.0f; + uint32_t maxViewportDimensions[2] = {4096, 4096}; + VkDeviceSize minUniformBufferOffsetAlignment = 256; + VkDeviceSize minStorageBufferOffsetAlignment = 256; + uint32_t maxFramebufferWidth = 4096; + uint32_t maxFramebufferHeight = 4096; + VkSampleCountFlags framebufferColorSampleCounts = + VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT; + VkSampleCountFlags framebufferDepthSampleCounts = + VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT; + VkSampleCountFlags framebufferStencilSampleCounts = + VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts = + VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT; + VkSampleCountFlags sampledImageColorSampleCounts = + VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT; + VkSampleCountFlags sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT; + VkSampleCountFlags sampledImageDepthSampleCounts = + VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT; + VkSampleCountFlags sampledImageStencilSampleCounts = + VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT; + bool standardSampleLocations = false; + VkDeviceSize optimalBufferCopyOffsetAlignment = 1; + VkDeviceSize optimalBufferCopyRowPitchAlignment = 1; + VkDeviceSize nonCoherentAtomSize = 256; + + bool robustBufferAccess = false; + bool fullDrawIndexUint32 = false; + bool independentBlend = false; + bool geometryShader = false; + bool tessellationShader = false; + bool sampleRateShading = false; + bool depthClamp = false; + bool fillModeNonSolid = false; + bool samplerAnisotropy = false; + bool occlusionQueryPrecise = false; + bool vertexPipelineStoresAndAtomics = false; + bool fragmentStoresAndAtomics = false; + bool shaderClipDistance = false; + bool shaderCullDistance = false; + bool sparseBinding = false; + bool sparseResidencyBuffer = false; + + // VK_KHR_sampler_mirror_clamp_to_edge (#15, promoted to 1.2) + + bool samplerMirrorClampToEdge = false; + + // VK_KHR_portability_subset (#164) + + bool constantAlphaColorBlendFactors = false; + bool imageViewFormatReinterpretation = false; + bool imageViewFormatSwizzle = false; + bool pointPolygons = false; + bool separateStencilMaskRef = false; + bool shaderSampleRateInterpolationFunctions = false; + bool triangleFans = false; + + // VK_KHR_driver_properties (#197, promoted to 1.2) + + VkDriverId driverID = VkDriverId(0); + + // VK_KHR_shader_float_controls (#198, promoted to 1.2) + + bool shaderSignedZeroInfNanPreserveFloat32 = false; + bool shaderDenormFlushToZeroFloat32 = false; + bool shaderRoundingModeRTEFloat32 = false; + + // VK_EXT_fragment_shader_interlock (#252) + + bool fragmentShaderSampleInterlock = false; + bool fragmentShaderPixelInterlock = false; + + // VK_EXT_shader_demote_to_helper_invocation (#277, promoted to 1.3) + + bool shaderDemoteToHelperInvocation = false; + + // VK_EXT_non_seamless_cube_map (#423) + + bool nonSeamlessCubeMap = false; + }; + + // Properties of the core API and enabled extensions, and enabled features. + // Some supported functionality is enabled conditionally based on the + // `with_swapchain` and `with_gpu_emulation` options. + const Properties& properties() const { return properties_; } + + // Enabled extensions not fully covered by the device properties and optional + // feature flags in the `Properties` structure (primarily those adding API + // functionality rather than GPU features). Also set to true if the version of + // the Vulkan API they were promoted to it supported (with the + // `ext_major_minor_` prefix rather than `ext_`). + struct Extensions { + bool ext_KHR_swapchain = false; // #2 + bool ext_1_1_KHR_dedicated_allocation = false; // #128 + bool ext_EXT_shader_stencil_export = false; // #141 + bool ext_1_1_KHR_get_memory_requirements2 = false; // #147 + bool ext_1_2_KHR_image_format_list = false; // #148 + // Has optional features not implied by this being true. + bool ext_1_1_KHR_sampler_ycbcr_conversion = false; // #157 + bool ext_1_1_KHR_bind_memory2 = false; // #158 + bool ext_1_2_KHR_spirv_1_4 = false; // #237 + bool ext_EXT_memory_budget = false; // #238 + // Has optional features not implied by this being true. + bool ext_1_3_KHR_maintenance4 = false; // #414 + }; + + const Extensions& extensions() const { return extensions_; } + + VkDevice device() const { return device_; } + + struct Functions { +#define XE_UI_VULKAN_FUNCTION(name) PFN_##name name = nullptr; +#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ + PFN_##core_name core_name = nullptr; +#include "xenia/ui/vulkan/functions/device_1_0.inc" + // VK_KHR_swapchain (#2) +#include "xenia/ui/vulkan/functions/device_khr_swapchain.inc" + // VK_KHR_get_memory_requirements2 (#147, promoted to 1.1) +#include "xenia/ui/vulkan/functions/device_1_1_khr_get_memory_requirements2.inc" + // VK_KHR_bind_memory2 (#158, promoted to 1.1) +#include "xenia/ui/vulkan/functions/device_1_1_khr_bind_memory2.inc" + // VK_KHR_maintenance4 (#414, promoted to 1.3) +#include "xenia/ui/vulkan/functions/device_1_3_khr_maintenance4.inc" +#undef XE_UI_VULKAN_FUNCTION_PROMOTED +#undef XE_UI_VULKAN_FUNCTION + }; + + const Functions& functions() const { return functions_; } + + template + void SetObjectName(const VkObjectType object_type, const Object object_handle, + const char* const object_name) const { + if (!vulkan_instance()->extensions().ext_EXT_debug_utils) { + return; + } + VkDebugUtilsObjectNameInfoEXT object_name_info; + object_name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + object_name_info.pNext = nullptr; + object_name_info.objectType = object_type; + object_name_info.objectHandle = (uint64_t)object_handle; + object_name_info.pObjectName = object_name; + vulkan_instance()->functions().vkSetDebugUtilsObjectNameEXT( + device(), &object_name_info); + } + + struct Queue { + // Host access to queues must be externally synchronized in Vulkan. + std::recursive_mutex mutex; + VkQueue queue = nullptr; + + explicit Queue(const VkQueue queue) : queue(queue) {} + + class Acquisition { + public: + explicit Acquisition(Queue& queue) + : lock_(queue.mutex), queue_(queue.queue) {} + + VkQueue queue() const { return queue_; } + + private: + std::unique_lock lock_; + VkQueue queue_; + }; + + Acquisition Acquire() { return Acquisition(*this); } + }; + + struct QueueFamily { + VkQueueFlags queue_flags = 0; + bool may_support_presentation = false; + std::vector> queues; + }; + + const std::vector& queue_families() const { + return queue_families_; + } + uint32_t queue_family_graphics_compute() const { + return queue_family_graphics_compute_; + } + // UINT32_MAX if not supported or not enabled. + // May be the same as queue_family_graphics_compute(). + uint32_t queue_family_sparse_binding() const { + return queue_family_sparse_binding_; + } + + Queue::Acquisition AcquireQueue(const uint32_t queue_family_index, + const uint32_t queue_index) const { + return queue_families()[queue_family_index].queues[queue_index]->Acquire(); + } + + struct MemoryTypes { + uint32_t device_local = 0b0; + uint32_t host_visible = 0b0; + uint32_t host_coherent = 0b0; + uint32_t host_cached = 0b0; + }; + + const MemoryTypes& memory_types() const { return memory_types_; } + + private: + explicit VulkanDevice(const VulkanInstance* vulkan_instance, + VkPhysicalDevice physical_device); + + const VulkanInstance* vulkan_instance_ = nullptr; + VkPhysicalDevice physical_device_ = nullptr; + + Properties properties_; + Extensions extensions_; + + VkDevice device_ = nullptr; + + Functions functions_; + + std::vector queue_families_; + uint32_t queue_family_graphics_compute_ = UINT32_MAX; + uint32_t queue_family_sparse_binding_ = UINT32_MAX; + + MemoryTypes memory_types_; +}; + +} // namespace vulkan +} // namespace ui +} // namespace xe + +#endif // XENIA_UI_VULKAN_VULKAN_DEVICE_H_ diff --git a/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc b/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc index 0a7f2ae8e..dc7788d54 100644 --- a/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc +++ b/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc @@ -30,6 +30,20 @@ namespace shaders { #include "xenia/ui/shaders/bytecode/vulkan_spirv/immediate_vs.h" } // namespace shaders +std::unique_ptr VulkanImmediateDrawer::Create( + const VulkanDevice* const vulkan_device, + const UISamplers* const ui_samplers) { + assert_not_null(vulkan_device); + assert_not_null(ui_samplers); + + auto immediate_drawer = std::unique_ptr( + new VulkanImmediateDrawer(vulkan_device, ui_samplers)); + if (!immediate_drawer->Initialize()) { + return nullptr; + } + return immediate_drawer; +} + VulkanImmediateDrawer::VulkanImmediateTexture::~VulkanImmediateTexture() { if (immediate_drawer_) { immediate_drawer_->OnImmediateTextureDestroyed(*this); @@ -45,8 +59,8 @@ VulkanImmediateDrawer::~VulkanImmediateDrawer() { last_paint_submission_index_); } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); util::DestroyAndNullHandle(dfn.vkDestroyPipeline, device, pipeline_line_); util::DestroyAndNullHandle(dfn.vkDestroyPipeline, device, pipeline_triangle_); @@ -88,9 +102,17 @@ VulkanImmediateDrawer::~VulkanImmediateDrawer() { texture_descriptor_set_layout_); } +VulkanImmediateDrawer::VulkanImmediateDrawer( + const VulkanDevice* const vulkan_device, + const UISamplers* const ui_samplers) + : vulkan_device_(vulkan_device), ui_samplers_(ui_samplers) { + assert_not_null(vulkan_device); + assert_not_null(ui_samplers); +} + bool VulkanImmediateDrawer::Initialize() { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkDescriptorSetLayoutBinding texture_descriptor_set_layout_binding; texture_descriptor_set_layout_binding.binding = 0; @@ -128,7 +150,7 @@ bool VulkanImmediateDrawer::Initialize() { } vertex_buffer_pool_ = std::make_unique( - provider_, + vulkan_device_, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); VkPushConstantRange push_constant_ranges[1]; @@ -194,8 +216,8 @@ void VulkanImmediateDrawer::Begin(UIDrawContext& ui_draw_context, last_completed_submission_index_ = vulkan_ui_draw_context.submission_index_completed(); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); // Destroy deleted textures. for (auto it = textures_deleted_.begin(); it != textures_deleted_.end();) { @@ -272,7 +294,7 @@ void VulkanImmediateDrawer::BeginDrawBatch(const ImmediateDrawBatch& batch) { VkCommandBuffer draw_command_buffer = vulkan_ui_draw_context.draw_command_buffer(); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); // Bind the vertices. size_t vertex_buffer_size = sizeof(ImmediateVertex) * batch.vertex_count; @@ -319,7 +341,7 @@ void VulkanImmediateDrawer::Draw(const ImmediateDraw& draw) { return; } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); const VulkanUIDrawContext& vulkan_ui_draw_context = *static_cast(ui_draw_context()); VkCommandBuffer draw_command_buffer = @@ -410,7 +432,7 @@ void VulkanImmediateDrawer::End() { vulkan_presenter.AcquireUISetupCommandBufferFromUIThread(); if (setup_command_buffer != VK_NULL_HANDLE) { size_t texture_uploads_pending_count = texture_uploads_pending_.size(); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); const VulkanUIDrawContext& vulkan_ui_draw_context = *static_cast(ui_draw_context()); @@ -528,8 +550,8 @@ void VulkanImmediateDrawer::OnLeavePresenter() { texture->last_usage_submission_ = 0; } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); for (SubmittedTextureUploadBuffer& submitted_texture_upload_buffer : texture_upload_buffers_submitted_) { @@ -566,8 +588,8 @@ bool VulkanImmediateDrawer::EnsurePipelinesCreatedForCurrentRenderPass() { last_paint_submission_index_); } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); // Safe to destroy the pipelines now - if the render pass was recreated, // completion of its usage has already been awaited. @@ -581,8 +603,8 @@ bool VulkanImmediateDrawer::EnsurePipelinesCreatedForCurrentRenderPass() { VkPipelineShaderStageCreateInfo stages[2] = {}; stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; - stages[0].module = util::CreateShaderModule(provider_, shaders::immediate_vs, - sizeof(shaders::immediate_vs)); + stages[0].module = util::CreateShaderModule( + vulkan_device_, shaders::immediate_vs, sizeof(shaders::immediate_vs)); if (stages[0].module == VK_NULL_HANDLE) { XELOGE("VulkanImmediateDrawer: Failed to create the vertex shader module"); return false; @@ -590,8 +612,8 @@ bool VulkanImmediateDrawer::EnsurePipelinesCreatedForCurrentRenderPass() { stages[0].pName = "main"; stages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; - stages[1].module = util::CreateShaderModule(provider_, shaders::immediate_ps, - sizeof(shaders::immediate_ps)); + stages[1].module = util::CreateShaderModule( + vulkan_device_, shaders::immediate_ps, sizeof(shaders::immediate_ps)); if (stages[1].module == VK_NULL_HANDLE) { XELOGE( "VulkanImmediateDrawer: Failed to create the fragment shader module"); @@ -753,8 +775,8 @@ uint32_t VulkanImmediateDrawer::AllocateTextureDescriptor() { return (pool->index << 6) | local_index; } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkDescriptorSetAllocateInfo allocate_info; allocate_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; @@ -864,8 +886,8 @@ bool VulkanImmediateDrawer::CreateTextureResource( bool is_repeated, const uint8_t* data, VulkanImmediateTexture::Resource& resource_out, size_t& pending_upload_index_out) { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); // Create the image and the descriptor. @@ -890,7 +912,7 @@ bool VulkanImmediateDrawer::CreateTextureResource( image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImage image; VkDeviceMemory image_memory; - if (!util::CreateDedicatedAllocationImage(provider_, image_create_info, + if (!util::CreateDedicatedAllocationImage(vulkan_device_, image_create_info, util::MemoryPurpose::kDeviceLocal, image, image_memory)) { XELOGE( @@ -910,7 +932,7 @@ bool VulkanImmediateDrawer::CreateTextureResource( // data == nullptr is a special case for (1, 1, 1, 1), though the image will // be cleared to (1, 1, 1, 1) anyway, just a micro-optimization. VkComponentSwizzle swizzle = - (data || !provider_.device_info().imageViewFormatSwizzle) + (data || !vulkan_device_->properties().imageViewFormatSwizzle) ? VK_COMPONENT_SWIZZLE_IDENTITY : VK_COMPONENT_SWIZZLE_ONE; image_view_create_info.components.r = swizzle; @@ -940,15 +962,16 @@ bool VulkanImmediateDrawer::CreateTextureResource( return false; } VkDescriptorImageInfo descriptor_image_info; - VulkanProvider::HostSampler host_sampler; + UISamplers::SamplerIndex ui_sampler_index; if (filter == ImmediateTextureFilter::kLinear) { - host_sampler = is_repeated ? VulkanProvider::HostSampler::kLinearRepeat - : VulkanProvider::HostSampler::kLinearClamp; + ui_sampler_index = is_repeated ? UISamplers::kSamplerIndexLinearRepeat + : UISamplers::kSamplerIndexLinearClampToEdge; } else { - host_sampler = is_repeated ? VulkanProvider::HostSampler::kNearestRepeat - : VulkanProvider::HostSampler::kNearestClamp; + ui_sampler_index = is_repeated + ? UISamplers::kSamplerIndexNearestRepeat + : UISamplers::kSamplerIndexNearestClampToEdge; } - descriptor_image_info.sampler = provider_.GetHostSampler(host_sampler); + descriptor_image_info.sampler = ui_samplers_->samplers()[ui_sampler_index]; descriptor_image_info.imageView = image_view; descriptor_image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; @@ -974,7 +997,7 @@ bool VulkanImmediateDrawer::CreateTextureResource( size_t data_size = sizeof(uint32_t) * width * height; uint32_t upload_buffer_memory_type; if (!util::CreateDedicatedAllocationBuffer( - provider_, VkDeviceSize(data_size), + vulkan_device_, VkDeviceSize(data_size), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, util::MemoryPurpose::kUpload, upload_buffer, upload_buffer_memory, &upload_buffer_memory_type)) { XELOGE( @@ -1003,7 +1026,7 @@ bool VulkanImmediateDrawer::CreateTextureResource( return false; } std::memcpy(upload_buffer_mapping, data, data_size); - util::FlushMappedMemoryRange(provider_, upload_buffer_memory, + util::FlushMappedMemoryRange(vulkan_device_, upload_buffer_memory, upload_buffer_memory_type); dfn.vkUnmapMemory(device, upload_buffer_memory); } @@ -1030,8 +1053,8 @@ bool VulkanImmediateDrawer::CreateTextureResource( void VulkanImmediateDrawer::DestroyTextureResource( VulkanImmediateTexture::Resource& resource) { FreeTextureDescriptor(resource.descriptor_index); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); dfn.vkDestroyImageView(device, resource.image_view, nullptr); dfn.vkDestroyImage(device, resource.image, nullptr); dfn.vkFreeMemory(device, resource.memory, nullptr); diff --git a/src/xenia/ui/vulkan/vulkan_immediate_drawer.h b/src/xenia/ui/vulkan/vulkan_immediate_drawer.h index ca087e1cb..ded33b348 100644 --- a/src/xenia/ui/vulkan/vulkan_immediate_drawer.h +++ b/src/xenia/ui/vulkan/vulkan_immediate_drawer.h @@ -17,6 +17,7 @@ #include #include "xenia/ui/immediate_drawer.h" +#include "xenia/ui/vulkan/ui_samplers.h" #include "xenia/ui/vulkan/vulkan_upload_buffer_pool.h" namespace xe { @@ -26,14 +27,7 @@ namespace vulkan { class VulkanImmediateDrawer : public ImmediateDrawer { public: static std::unique_ptr Create( - const VulkanProvider& provider) { - auto immediate_drawer = std::unique_ptr( - new VulkanImmediateDrawer(provider)); - if (!immediate_drawer->Initialize()) { - return nullptr; - } - return std::move(immediate_drawer); - } + const VulkanDevice* vulkan_device, const UISamplers* ui_samplers); ~VulkanImmediateDrawer(); @@ -96,7 +90,8 @@ class VulkanImmediateDrawer : public ImmediateDrawer { TextureDescriptorPool* recycled_next; }; - VulkanImmediateDrawer(const VulkanProvider& provider) : provider_(provider) {} + explicit VulkanImmediateDrawer(const VulkanDevice* vulkan_device, + const UISamplers* ui_samplers); bool Initialize(); bool EnsurePipelinesCreatedForCurrentRenderPass(); @@ -117,7 +112,8 @@ class VulkanImmediateDrawer : public ImmediateDrawer { void DestroyTextureResource(VulkanImmediateTexture::Resource& resource); void OnImmediateTextureDestroyed(VulkanImmediateTexture& texture); - const VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; + const UISamplers* ui_samplers_; // Combined image sampler pools for textures. VkDescriptorSetLayout texture_descriptor_set_layout_; diff --git a/src/xenia/ui/vulkan/vulkan_instance.cc b/src/xenia/ui/vulkan/vulkan_instance.cc new file mode 100644 index 000000000..5bf57ae2b --- /dev/null +++ b/src/xenia/ui/vulkan/vulkan_instance.cc @@ -0,0 +1,667 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2025 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#include "xenia/ui/vulkan/vulkan_instance.h" + +#include +#include +#include +#include +#include + +#include "xenia/base/cvar.h" +#include "xenia/base/logging.h" +#include "xenia/base/platform.h" +#include "xenia/ui/vulkan/vulkan_presenter.h" + +#if XE_PLATFORM_LINUX +#include +#elif XE_PLATFORM_WIN32 +#include "xenia/base/platform_win.h" +#endif + +DEFINE_bool( + vulkan_log_debug_messages, true, + "Write Vulkan VK_EXT_debug_utils messages to the Xenia log, as opposed to " + "the OS debug output.", + "Vulkan"); + +namespace xe { +namespace ui { +namespace vulkan { + +std::unique_ptr VulkanInstance::Create( + const bool with_surface, const bool try_enable_validation) { + std::unique_ptr vulkan_instance(new VulkanInstance()); + + // Load the RenderDoc API if connected. + + vulkan_instance->renderdoc_api_ = RenderDocAPI::CreateIfConnected(); + + // Load the loader library. + + Functions& ifn = vulkan_instance->functions_; + + bool functions_loaded = true; +#if XE_PLATFORM_LINUX +#if XE_PLATFORM_ANDROID + const char* const loader_library_name = "libvulkan.so"; +#else + const char* const loader_library_name = "libvulkan.so.1"; +#endif + // http://developer.download.nvidia.com/mobile/shield/assets/Vulkan/UsingtheVulkanAPI.pdf + vulkan_instance->loader_ = dlopen(loader_library_name, RTLD_NOW | RTLD_LOCAL); + if (!vulkan_instance->loader_) { + XELOGE("Failed to load {}", loader_library_name); + return false; + } +#define XE_VULKAN_LOAD_LOADER_FUNCTION(name) \ + functions_loaded &= \ + (ifn.name = PFN_##name(dlsym(vulkan_instance->loader_, #name))) != \ + nullptr; +#elif XE_PLATFORM_WIN32 + vulkan_instance->loader_ = LoadLibraryW(L"vulkan-1.dll"); + if (!vulkan_instance->loader_) { + XELOGE("Failed to load vulkan-1.dll"); + return false; + } +#define XE_VULKAN_LOAD_LOADER_FUNCTION(name) \ + functions_loaded &= (ifn.name = PFN_##name(GetProcAddress( \ + vulkan_instance->loader_, #name))) != nullptr; +#else +#error No Vulkan loader library loading provided for the target platform. +#endif + XE_VULKAN_LOAD_LOADER_FUNCTION(vkGetInstanceProcAddr); + XE_VULKAN_LOAD_LOADER_FUNCTION(vkDestroyInstance); +#undef XE_VULKAN_LOAD_LOADER_FUNCTION + if (!functions_loaded) { + XELOGE("Failed to get Vulkan loader function pointers"); + return nullptr; + } + + // Load global functions. + + functions_loaded &= + (ifn.vkCreateInstance = PFN_vkCreateInstance( + ifn.vkGetInstanceProcAddr(nullptr, "vkCreateInstance"))) != nullptr; + functions_loaded &= + (ifn.vkEnumerateInstanceExtensionProperties = + PFN_vkEnumerateInstanceExtensionProperties(ifn.vkGetInstanceProcAddr( + nullptr, "vkEnumerateInstanceExtensionProperties"))) != nullptr; + functions_loaded &= + (ifn.vkEnumerateInstanceLayerProperties = + PFN_vkEnumerateInstanceLayerProperties(ifn.vkGetInstanceProcAddr( + nullptr, "vkEnumerateInstanceLayerProperties"))) != nullptr; + if (!functions_loaded) { + XELOGE( + "Failed to get Vulkan global function pointers via " + "vkGetInstanceProcAddr"); + return nullptr; + } + // Available since Vulkan 1.1. If this is nullptr, it's a Vulkan 1.0 instance. + ifn.vkEnumerateInstanceVersion = PFN_vkEnumerateInstanceVersion( + ifn.vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion")); + + // Get the API version. + + if (ifn.vkEnumerateInstanceVersion) { + ifn.vkEnumerateInstanceVersion(&vulkan_instance->api_version_); + } + + // Enable extensions and layers. + + // Name pointers from `requested_extensions` will be used in the enabled + // extensions vector. + std::unordered_map requested_extensions; + if (vulkan_instance->api_version_ >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { + vulkan_instance->extensions_.ext_1_1_KHR_get_physical_device_properties2 = + true; + } else { + // #60. + requested_extensions.emplace( + "VK_KHR_get_physical_device_properties2", + &vulkan_instance->extensions_ + .ext_1_1_KHR_get_physical_device_properties2); + } + // #129. + requested_extensions.emplace( + "VK_EXT_debug_utils", &vulkan_instance->extensions_.ext_EXT_debug_utils); + // #395. + requested_extensions.emplace( + "VK_KHR_portability_enumeration", + &vulkan_instance->extensions_.ext_KHR_portability_enumeration); + if (with_surface) { + // #1. + requested_extensions.emplace("VK_KHR_surface", + &vulkan_instance->extensions_.ext_KHR_surface); +#ifdef VK_USE_PLATFORM_XCB_KHR + // #6. + requested_extensions.emplace( + "VK_KHR_xcb_surface", + &vulkan_instance->extensions_.ext_KHR_xcb_surface); +#endif +#ifdef VK_USE_PLATFORM_ANDROID_KHR + // #9. + requested_extensions.emplace( + "VK_KHR_android_surface", + &vulkan_instance->extensions_.ext_KHR_android_surface); +#endif +#ifdef VK_USE_PLATFORM_WIN32_KHR + // #10. + requested_extensions.emplace( + "VK_KHR_win32_surface", + &vulkan_instance->extensions_.ext_KHR_win32_surface); +#endif + } + + std::vector enabled_extensions; + + std::vector supported_implementation_extensions; + while (true) { + uint32_t supported_implementation_extension_count = 0; + const VkResult get_supported_implementation_extension_count_result = + ifn.vkEnumerateInstanceExtensionProperties( + nullptr, &supported_implementation_extension_count, nullptr); + if (get_supported_implementation_extension_count_result != VK_SUCCESS && + get_supported_implementation_extension_count_result != VK_INCOMPLETE) { + XELOGW("Failed to get the Vulkan instance extension count"); + return nullptr; + } + if (supported_implementation_extension_count) { + supported_implementation_extensions.resize( + supported_implementation_extension_count); + const VkResult get_supported_implementation_extensions_result = + ifn.vkEnumerateInstanceExtensionProperties( + nullptr, &supported_implementation_extension_count, + supported_implementation_extensions.data()); + if (get_supported_implementation_extensions_result == VK_INCOMPLETE) { + continue; + } + if (get_supported_implementation_extensions_result != VK_SUCCESS) { + XELOGW("Failed to get the Vulkan instance extensions"); + return nullptr; + } + } + supported_implementation_extensions.resize( + supported_implementation_extension_count); + break; + } + + for (const VkExtensionProperties& supported_extension : + supported_implementation_extensions) { + const auto requested_extension_it = + requested_extensions.find(supported_extension.extensionName); + if (requested_extension_it == requested_extensions.cend()) { + continue; + } + assert_not_null(requested_extension_it->second); + if (!*requested_extension_it->second) { + enabled_extensions.emplace_back(requested_extension_it->first.c_str()); + *requested_extension_it->second = true; + } + } + + // If enabled layers are not present, will disable all extensions provided by + // the layers by truncating the enabled extension vector to this size. + const size_t enabled_implementation_extension_count = + enabled_extensions.size(); + std::vector enabled_layer_extension_enablement_bools; + + // Name pointers from `requested_layers` will be used in the enabled layer + // vector. + std::unordered_map requested_layers; + bool layer_khronos_validation = false; + if (try_enable_validation) { + requested_layers.emplace("VK_LAYER_KHRONOS_validation", + &layer_khronos_validation); + } + + std::vector enabled_layers; + + if (!requested_layers.empty()) { + std::vector available_layers; + // "The list of available layers may change at any time due to actions + // outside of the Vulkan implementation" + while (true) { + available_layers.clear(); + uint32_t available_layer_count = 0; + const VkResult get_available_layer_count_result = + ifn.vkEnumerateInstanceLayerProperties(&available_layer_count, + nullptr); + if (get_available_layer_count_result != VK_SUCCESS && + get_available_layer_count_result != VK_INCOMPLETE) { + break; + } + if (available_layer_count) { + available_layers.resize(available_layer_count); + const VkResult get_available_layers_result = + ifn.vkEnumerateInstanceLayerProperties(&available_layer_count, + available_layers.data()); + if (get_available_layers_result == VK_INCOMPLETE) { + // New layers were added. + continue; + } + if (get_available_layers_result != VK_SUCCESS) { + available_layers.clear(); + break; + } + // In case the second enumeration returned fewer layers. + available_layers.resize(available_layer_count); + } + break; + } + + if (!available_layers.empty()) { + std::vector supported_layer_extensions; + + for (const VkLayerProperties& available_layer : available_layers) { + auto requested_layer_it = + requested_layers.find(available_layer.layerName); + if (requested_layer_it == requested_layers.cend()) { + continue; + } + + bool got_layer_extensions = true; + // "Because the list of available layers may change externally between + // calls to vkEnumerateInstanceExtensionProperties, two calls may + // retrieve different results if a pLayerName is available in one call + // but not in another." + while (true) { + uint32_t supported_layer_extension_count = 0; + const VkResult get_supported_layer_extension_count_result = + ifn.vkEnumerateInstanceExtensionProperties( + nullptr, &supported_layer_extension_count, nullptr); + if (get_supported_layer_extension_count_result != VK_SUCCESS && + get_supported_layer_extension_count_result != VK_INCOMPLETE) { + got_layer_extensions = false; + break; + } + if (supported_layer_extension_count) { + supported_layer_extensions.resize(supported_layer_extension_count); + const VkResult get_supported_layer_extensions_result = + ifn.vkEnumerateInstanceExtensionProperties( + available_layer.layerName, &supported_layer_extension_count, + supported_layer_extensions.data()); + if (get_supported_layer_extensions_result == VK_INCOMPLETE) { + continue; + } + if (get_supported_layer_extensions_result != VK_SUCCESS) { + got_layer_extensions = false; + break; + } + } + supported_layer_extensions.resize(supported_layer_extension_count); + break; + } + if (!got_layer_extensions) { + // The layer was possibly removed. + continue; + } + + for (const VkExtensionProperties& supported_extension : + supported_layer_extensions) { + const auto requested_extension_it = + requested_extensions.find(supported_extension.extensionName); + if (requested_extension_it == requested_extensions.cend()) { + continue; + } + assert_not_null(requested_extension_it->second); + // Don't add the extension to the enabled vector multiple times if + // provided by the implementation itself or by another layer. + if (!*requested_extension_it->second) { + enabled_extensions.emplace_back( + requested_extension_it->first.c_str()); + enabled_layer_extension_enablement_bools.push_back( + requested_layer_it->second); + *requested_extension_it->second = true; + } + } + + assert_not_null(requested_layer_it->second); + if (!*requested_layer_it->second) { + enabled_layers.emplace_back(requested_layer_it->first.c_str()); + *requested_layer_it->second = true; + } + } + } + } + + // Create the instance. + + VkApplicationInfo application_info; + application_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; + application_info.pNext = nullptr; + application_info.pApplicationName = "Xenia"; + application_info.applicationVersion = 1; + application_info.pEngineName = nullptr; + application_info.engineVersion = 0; + // "The patch version number specified in apiVersion is ignored when creating + // an instance object." + // "Vulkan 1.0 implementations were required to return + // VK_ERROR_INCOMPATIBLE_DRIVER if apiVersion was larger than 1.0." + application_info.apiVersion = + vulkan_instance->api_version_ >= VK_MAKE_API_VERSION(0, 1, 1, 0) + ? VulkanDevice::kHighestUsedApiMinorVersion + : VK_MAKE_API_VERSION(0, 1, 0, 0); + + VkInstanceCreateInfo instance_create_info; + instance_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; + instance_create_info.pNext = nullptr; + instance_create_info.flags = 0; + // VK_KHR_get_physical_device_properties2 is needed to get the portability + // subset features. + if (vulkan_instance->extensions_.ext_KHR_portability_enumeration && + vulkan_instance->extensions_ + .ext_1_1_KHR_get_physical_device_properties2) { + instance_create_info.flags |= + VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR; + } + instance_create_info.pApplicationInfo = &application_info; + instance_create_info.enabledLayerCount = uint32_t(enabled_layers.size()); + instance_create_info.ppEnabledLayerNames = enabled_layers.data(); + instance_create_info.enabledExtensionCount = + uint32_t(enabled_extensions.size()); + instance_create_info.ppEnabledExtensionNames = enabled_extensions.data(); + VkResult instance_create_result = ifn.vkCreateInstance( + &instance_create_info, nullptr, &vulkan_instance->instance_); + + if (instance_create_result == VK_ERROR_LAYER_NOT_PRESENT || + instance_create_result == VK_ERROR_EXTENSION_NOT_PRESENT) { + // A layer was possibly removed. Try without layers. + for (bool* const extension_enablement : + enabled_layer_extension_enablement_bools) { + *extension_enablement = false; + } + for (const std::pair& requested_layer : + requested_layers) { + *requested_layer.second = false; + } + instance_create_info.enabledLayerCount = 0; + instance_create_info.enabledExtensionCount = + uint32_t(enabled_implementation_extension_count); + instance_create_result = ifn.vkCreateInstance( + &instance_create_info, nullptr, &vulkan_instance->instance_); + } + + if (instance_create_result != VK_SUCCESS) { + XELOGE("Failed to create a Vulkan instance: {}", + vk::to_string(vk::Result(instance_create_result))); + return nullptr; + } + + // Load instance functions. + +#define XE_UI_VULKAN_FUNCTION(name) \ + functions_loaded &= (ifn.name = PFN_##name(ifn.vkGetInstanceProcAddr( \ + vulkan_instance->instance_, #name))) != nullptr; + + // Vulkan 1.0. +#include "xenia/ui/vulkan/functions/instance_1_0.inc" + + // Extensions promoted to a Vulkan version supported by the instance. +#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ + functions_loaded &= \ + (ifn.core_name = PFN_##core_name(ifn.vkGetInstanceProcAddr( \ + vulkan_instance->instance_, #core_name))) != nullptr; + if (vulkan_instance->api_version_ >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { +#include "xenia/ui/vulkan/functions/instance_1_1_khr_get_physical_device_properties2.inc" + } +#undef XE_UI_VULKAN_FUNCTION_PROMOTED + + // Non-promoted extensions, and extensions promoted to a Vulkan version not + // supported by the instance. +#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ + functions_loaded &= \ + (ifn.core_name = PFN_##core_name(ifn.vkGetInstanceProcAddr( \ + vulkan_instance->instance_, #extension_name))) != nullptr; + if (vulkan_instance->api_version_ < VK_MAKE_API_VERSION(0, 1, 1, 0)) { + if (vulkan_instance->extensions_ + .ext_1_1_KHR_get_physical_device_properties2) { +#include "xenia/ui/vulkan/functions/instance_1_1_khr_get_physical_device_properties2.inc" + } + } +#ifdef VK_USE_PLATFORM_XCB_KHR + if (vulkan_instance->extensions_.ext_KHR_xcb_surface) { +#include "xenia/ui/vulkan/functions/instance_khr_xcb_surface.inc" + } +#endif +#ifdef VK_USE_PLATFORM_ANDROID_KHR + if (vulkan_instance->extensions_.ext_KHR_android_surface) { +#include "xenia/ui/vulkan/functions/instance_khr_android_surface.inc" + } +#endif +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (vulkan_instance->extensions_.ext_KHR_win32_surface) { +#include "xenia/ui/vulkan/functions/instance_khr_win32_surface.inc" + } +#endif + if (vulkan_instance->extensions_.ext_KHR_surface) { +#include "xenia/ui/vulkan/functions/instance_khr_surface.inc" + } + if (vulkan_instance->extensions_.ext_EXT_debug_utils) { +#include "xenia/ui/vulkan/functions/instance_ext_debug_utils.inc" + } +#undef XE_UI_VULKAN_FUNCTION_PROMOTED + +#undef XE_UI_VULKAN_FUNCTION + + if (!functions_loaded) { + XELOGE("Failed to get all Vulkan instance function pointers"); + return nullptr; + } + + // Check whether a surface can be created. + + if (with_surface && !VulkanPresenter::GetSurfaceTypesSupportedByInstance( + vulkan_instance->extensions_)) { + XELOGE("The Vulkan instance doesn't support surface types used by Xenia"); + return nullptr; + } + + // Log instance properties. + + XELOGI("Vulkan instance API version {}.{}.{}. Enabled layers and extensions:", + VK_VERSION_MAJOR(vulkan_instance->api_version_), + VK_VERSION_MINOR(vulkan_instance->api_version_), + VK_VERSION_PATCH(vulkan_instance->api_version_)); + for (uint32_t enabled_layer_index = 0; + enabled_layer_index < instance_create_info.enabledLayerCount; + ++enabled_layer_index) { + XELOGI("* {}", + instance_create_info.ppEnabledLayerNames[enabled_layer_index]); + } + for (uint32_t enabled_extension_index = 0; + enabled_extension_index < instance_create_info.enabledExtensionCount; + ++enabled_extension_index) { + XELOGI( + "* {}", + instance_create_info.ppEnabledExtensionNames[enabled_extension_index]); + } + + // Create the debug messenger if requested and available. + + if (vulkan_instance->extensions_.ext_EXT_debug_utils && + cvars::vulkan_log_debug_messages) { + VkDebugUtilsMessengerCreateInfoEXT debug_utils_messenger_create_info = { + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT}; + if (xe::logging::ShouldLog(xe::LogLevel::Debug)) { + debug_utils_messenger_create_info.messageSeverity |= + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT; + } + if (xe::logging::ShouldLog(xe::LogLevel::Info)) { + debug_utils_messenger_create_info.messageSeverity |= + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT; + } + if (xe::logging::ShouldLog(xe::LogLevel::Warning)) { + debug_utils_messenger_create_info.messageSeverity |= + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT; + } + if (xe::logging::ShouldLog(xe::LogLevel::Error)) { + debug_utils_messenger_create_info.messageSeverity |= + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + } + // VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-requiredbitmask: + // "messageSeverity must not be 0" + if (debug_utils_messenger_create_info.messageSeverity) { + debug_utils_messenger_create_info.messageType = + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + debug_utils_messenger_create_info.pfnUserCallback = + DebugUtilsMessengerCallback; + debug_utils_messenger_create_info.pUserData = vulkan_instance.get(); + const VkResult debug_utils_messenger_create_result = + ifn.vkCreateDebugUtilsMessengerEXT( + vulkan_instance->instance_, &debug_utils_messenger_create_info, + nullptr, &vulkan_instance->debug_utils_messenger_); + if (debug_utils_messenger_create_result != VK_SUCCESS) { + XELOGW("Failed to create the Vulkan debug utils messenger: {}", + vk::to_string(vk::Result(debug_utils_messenger_create_result))); + } + } + } + + return vulkan_instance; +} + +VulkanInstance::~VulkanInstance() { + if (instance_) { + if (debug_utils_messenger_ != VK_NULL_HANDLE) { + functions_.vkDestroyDebugUtilsMessengerEXT( + instance_, debug_utils_messenger_, nullptr); + } + + functions_.vkDestroyInstance(instance_, nullptr); + } + +#if XE_PLATFORM_LINUX + if (loader_) { + dlclose(loader_); + } +#elif XE_PLATFORM_WIN32 + if (loader_) { + FreeLibrary(loader_); + } +#endif +} + +void VulkanInstance::EnumeratePhysicalDevices( + std::vector& physical_devices_out) const { + physical_devices_out.clear(); + while (true) { + uint32_t physical_device_count = 0; + const VkResult get_physical_device_count_result = + functions_.vkEnumeratePhysicalDevices(instance_, &physical_device_count, + nullptr); + if ((get_physical_device_count_result != VK_SUCCESS && + get_physical_device_count_result != VK_INCOMPLETE) || + !physical_device_count) { + return; + } + physical_devices_out.resize(physical_device_count); + const VkResult get_physical_devices_result = + functions_.vkEnumeratePhysicalDevices(instance_, &physical_device_count, + physical_devices_out.data()); + if (get_physical_devices_result == VK_INCOMPLETE) { + continue; + } + physical_devices_out.resize( + get_physical_devices_result == VK_SUCCESS ? physical_device_count : 0); + return; + } +} + +VkBool32 VulkanInstance::DebugUtilsMessengerCallback( + VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, + VkDebugUtilsMessageTypeFlagsEXT message_types, + const VkDebugUtilsMessengerCallbackDataEXT* callback_data, + [[maybe_unused]] void* user_data) { + xe::LogLevel log_level; + char log_prefix_char; + if (message_severity >= VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { + log_level = xe::LogLevel::Error; + log_prefix_char = xe::logging::kPrefixCharError; + } else if (message_severity >= + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) { + log_level = xe::LogLevel::Warning; + log_prefix_char = xe::logging::kPrefixCharWarning; + } else if (message_severity >= VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) { + log_level = xe::LogLevel::Info; + log_prefix_char = xe::logging::kPrefixCharInfo; + } else { + log_level = xe::LogLevel::Debug; + log_prefix_char = xe::logging::kPrefixCharDebug; + } + + std::ostringstream log_str; + + log_str << "Vulkan " + << vk::to_string( + vk::DebugUtilsMessageSeverityFlagBitsEXT(message_severity)) + << " (" + << vk::to_string(vk::DebugUtilsMessageTypeFlagsEXT(message_types)) + << ", ID " << callback_data->messageIdNumber; + if (callback_data->pMessageIdName) { + log_str << ": " << callback_data->pMessageIdName; + } + log_str << ')'; + + if (callback_data->pMessage) { + log_str << ": " << callback_data->pMessage; + } + + bool annotations_begun = false; + const auto begin_annotation = [&log_str, &annotations_begun]() { + log_str << (annotations_begun ? ", " : " ("); + annotations_begun = true; + }; + + for (uint32_t queue_label_index = 0; + queue_label_index < callback_data->queueLabelCount; + ++queue_label_index) { + begin_annotation(); + log_str << "queue label " << queue_label_index << ": " + << callback_data->pQueueLabels[queue_label_index].pLabelName; + } + + for (uint32_t cmd_buf_label_index = 0; + cmd_buf_label_index < callback_data->cmdBufLabelCount; + ++cmd_buf_label_index) { + begin_annotation(); + log_str << "command buffer label " << cmd_buf_label_index << ": " + << callback_data->pCmdBufLabels[cmd_buf_label_index].pLabelName; + } + + for (uint32_t object_index = 0; object_index < callback_data->objectCount; + ++object_index) { + begin_annotation(); + const VkDebugUtilsObjectNameInfoEXT& object_info = + callback_data->pObjects[object_index]; + // Lowercase hexadecimal digits in the handle to match the default Vulkan + // debug utils messenger. + log_str << "object " << object_index << ": " + << vk::to_string(vk::ObjectType(object_info.objectType)) << " 0x" + << std::hex << object_info.objectHandle << std::dec; + if (object_info.pObjectName) { + log_str << " '" << object_info.pObjectName << '\''; + } + } + + if (annotations_begun) { + log_str << ')'; + } + + xe::logging::AppendLogLine(log_level, log_prefix_char, log_str.str()); + + return VK_FALSE; +} + +} // namespace vulkan +} // namespace ui +} // namespace xe diff --git a/src/xenia/ui/vulkan/vulkan_instance.h b/src/xenia/ui/vulkan/vulkan_instance.h new file mode 100644 index 000000000..1468495f5 --- /dev/null +++ b/src/xenia/ui/vulkan/vulkan_instance.h @@ -0,0 +1,145 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2025 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#ifndef XENIA_UI_VULKAN_VULKAN_INSTANCE_H_ +#define XENIA_UI_VULKAN_VULKAN_INSTANCE_H_ + +#include +#include + +#include "xenia/base/platform.h" +#include "xenia/ui/renderdoc_api.h" +#include "xenia/ui/vulkan/vulkan_api.h" + +#if XE_PLATFORM_WIN32 +#include "xenia/base/platform_win.h" +#endif + +namespace xe { +namespace ui { +namespace vulkan { + +class VulkanInstance { + public: + static std::unique_ptr Create(bool with_surface, + bool try_enable_validation); + + VulkanInstance(const VulkanInstance&) = delete; + VulkanInstance& operator=(const VulkanInstance&) = delete; + VulkanInstance(VulkanInstance&&) = delete; + VulkanInstance& operator=(VulkanInstance&&) = delete; + + ~VulkanInstance(); + + // nullptr if RenderDoc is not connected. + RenderDocAPI* renderdoc_api() const { return renderdoc_api_.get(); } + + struct Functions { + // From the loader module. + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = nullptr; + PFN_vkDestroyInstance vkDestroyInstance = nullptr; + + // From vkGetInstanceProcAddr for nullptr. + PFN_vkCreateInstance vkCreateInstance = nullptr; + PFN_vkEnumerateInstanceExtensionProperties + vkEnumerateInstanceExtensionProperties = nullptr; + PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties = + nullptr; + // Vulkan 1.1. + PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion = nullptr; + + // From vkGetInstanceProcAddr for the instance. +#define XE_UI_VULKAN_FUNCTION(name) PFN_##name name = nullptr; +#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ + PFN_##core_name core_name = nullptr; +#include "xenia/ui/vulkan/functions/instance_1_0.inc" + // VK_KHR_surface (#1) +#include "xenia/ui/vulkan/functions/instance_khr_surface.inc" + // VK_KHR_xcb_surface (#6) +#ifdef VK_USE_PLATFORM_XCB_KHR +#include "xenia/ui/vulkan/functions/instance_khr_xcb_surface.inc" +#endif + // VK_KHR_android_surface (#9) +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "xenia/ui/vulkan/functions/instance_khr_android_surface.inc" +#endif + // VK_KHR_win32_surface (#10) +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include "xenia/ui/vulkan/functions/instance_khr_win32_surface.inc" +#endif + // VK_KHR_get_physical_device_properties2 (#60, promoted to 1.1) +#include "xenia/ui/vulkan/functions/instance_1_1_khr_get_physical_device_properties2.inc" + // VK_EXT_debug_utils (#129) +#include "xenia/ui/vulkan/functions/instance_ext_debug_utils.inc" +#undef XE_UI_VULKAN_FUNCTION_PROMOTED +#undef XE_UI_VULKAN_FUNCTION + }; + + const Functions& functions() const { return functions_; } + + uint32_t api_version() const { return api_version_; } + + // Also set to true if the version of the Vulkan API they were promoted to it + // supported (with the `ext_major_minor_` prefix rather than `ext_`). + struct Extensions { + bool ext_KHR_surface = false; // #1 +#ifdef VK_USE_PLATFORM_XCB_KHR + bool ext_KHR_xcb_surface = false; // #6 +#endif +#ifdef VK_USE_PLATFORM_ANDROID_KHR + bool ext_KHR_android_surface = false; // #9 +#endif +#ifdef VK_USE_PLATFORM_WIN32_KHR + bool ext_KHR_win32_surface = false; // #10 +#endif + bool ext_1_1_KHR_get_physical_device_properties2 = false; // #60 + bool ext_EXT_debug_utils = false; // #129 + bool ext_KHR_portability_enumeration = false; // #395 + }; + + const Extensions& extensions() const { return extensions_; } + + VkInstance instance() const { return instance_; } + + void EnumeratePhysicalDevices( + std::vector& physical_devices_out) const; + + private: + explicit VulkanInstance() = default; + + std::unique_ptr renderdoc_api_; + +#if XE_PLATFORM_LINUX + void* loader_ = nullptr; +#elif XE_PLATFORM_WIN32 + HMODULE loader_ = nullptr; +#endif + + Functions functions_; + + uint32_t api_version_ = VK_MAKE_API_VERSION(0, 1, 0, 0); + + Extensions extensions_; + + VkInstance instance_ = nullptr; + + static VkBool32 DebugUtilsMessengerCallback( + VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, + VkDebugUtilsMessageTypeFlagsEXT message_types, + const VkDebugUtilsMessengerCallbackDataEXT* callback_data, + void* user_data); + + VkDebugUtilsMessengerEXT debug_utils_messenger_ = VK_NULL_HANDLE; +}; + +} // namespace vulkan +} // namespace ui +} // namespace xe + +#endif // XENIA_UI_VULKAN_VULKAN_INSTANCE_H_ diff --git a/src/xenia/ui/vulkan/vulkan_mem_alloc.cc b/src/xenia/ui/vulkan/vulkan_mem_alloc.cc index 688b2ff7c..e892a271b 100644 --- a/src/xenia/ui/vulkan/vulkan_mem_alloc.cc +++ b/src/xenia/ui/vulkan/vulkan_mem_alloc.cc @@ -14,25 +14,24 @@ #include #include "xenia/base/logging.h" -#include "xenia/ui/vulkan/vulkan_provider.h" namespace xe { namespace ui { namespace vulkan { -VmaAllocator CreateVmaAllocator(const VulkanProvider& provider, - bool externally_synchronized) { - const VulkanProvider::LibraryFunctions& lfn = provider.lfn(); - const VulkanProvider::InstanceFunctions& ifn = provider.ifn(); - const VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - const VulkanProvider::InstanceExtensions& instance_extensions = - provider.instance_extensions(); - const VulkanProvider::DeviceInfo& device_info = provider.device_info(); +VmaAllocator CreateVmaAllocator(const VulkanDevice* const vulkan_device, + const bool externally_synchronized) { + assert_not_null(vulkan_device); + + const VulkanInstance* const vulkan_instance = + vulkan_device->vulkan_instance(); + const VulkanInstance::Functions& ifn = vulkan_instance->functions(); + const VulkanDevice::Functions& dfn = vulkan_device->functions(); VmaVulkanFunctions vma_vulkan_functions = {}; VmaAllocatorCreateInfo allocator_create_info = {}; - vma_vulkan_functions.vkGetInstanceProcAddr = lfn.vkGetInstanceProcAddr; + vma_vulkan_functions.vkGetInstanceProcAddr = ifn.vkGetInstanceProcAddr; vma_vulkan_functions.vkGetDeviceProcAddr = ifn.vkGetDeviceProcAddr; vma_vulkan_functions.vkGetPhysicalDeviceProperties = ifn.vkGetPhysicalDeviceProperties; @@ -57,29 +56,30 @@ VmaAllocator CreateVmaAllocator(const VulkanProvider& provider, vma_vulkan_functions.vkCreateImage = dfn.vkCreateImage; vma_vulkan_functions.vkDestroyImage = dfn.vkDestroyImage; vma_vulkan_functions.vkCmdCopyBuffer = dfn.vkCmdCopyBuffer; - if (device_info.ext_1_1_VK_KHR_get_memory_requirements2) { + if (vulkan_device->extensions().ext_1_1_KHR_get_memory_requirements2) { vma_vulkan_functions.vkGetBufferMemoryRequirements2KHR = dfn.vkGetBufferMemoryRequirements2; vma_vulkan_functions.vkGetImageMemoryRequirements2KHR = dfn.vkGetImageMemoryRequirements2; - if (device_info.ext_1_1_VK_KHR_dedicated_allocation) { + if (vulkan_device->extensions().ext_1_1_KHR_dedicated_allocation) { allocator_create_info.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; } } - if (device_info.ext_1_1_VK_KHR_bind_memory2) { + if (vulkan_device->extensions().ext_1_1_KHR_bind_memory2) { vma_vulkan_functions.vkBindBufferMemory2KHR = dfn.vkBindBufferMemory2; vma_vulkan_functions.vkBindImageMemory2KHR = dfn.vkBindImageMemory2; allocator_create_info.flags |= VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT; } - if (instance_extensions.khr_get_physical_device_properties2) { + if (vulkan_instance->extensions() + .ext_1_1_KHR_get_physical_device_properties2) { vma_vulkan_functions.vkGetPhysicalDeviceMemoryProperties2KHR = ifn.vkGetPhysicalDeviceMemoryProperties2; - if (device_info.ext_VK_EXT_memory_budget) { + if (vulkan_device->extensions().ext_EXT_memory_budget) { allocator_create_info.flags |= VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT; } } - if (device_info.ext_1_3_VK_KHR_maintenance4) { + if (vulkan_device->extensions().ext_1_3_KHR_maintenance4) { vma_vulkan_functions.vkGetDeviceBufferMemoryRequirements = dfn.vkGetDeviceBufferMemoryRequirements; vma_vulkan_functions.vkGetDeviceImageMemoryRequirements = @@ -90,11 +90,12 @@ VmaAllocator CreateVmaAllocator(const VulkanProvider& provider, allocator_create_info.flags |= VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT; } - allocator_create_info.physicalDevice = provider.physical_device(); - allocator_create_info.device = provider.device(); + allocator_create_info.physicalDevice = vulkan_device->physical_device(); + allocator_create_info.device = vulkan_device->device(); allocator_create_info.pVulkanFunctions = &vma_vulkan_functions; - allocator_create_info.instance = provider.instance(); - allocator_create_info.vulkanApiVersion = device_info.apiVersion; + allocator_create_info.instance = vulkan_instance->instance(); + allocator_create_info.vulkanApiVersion = + vulkan_device->properties().apiVersion; VmaAllocator allocator; if (vmaCreateAllocator(&allocator_create_info, &allocator) != VK_SUCCESS) { XELOGE("Failed to create a Vulkan Memory Allocator instance"); diff --git a/src/xenia/ui/vulkan/vulkan_mem_alloc.h b/src/xenia/ui/vulkan/vulkan_mem_alloc.h index 9ae9db16e..b26c34cba 100644 --- a/src/xenia/ui/vulkan/vulkan_mem_alloc.h +++ b/src/xenia/ui/vulkan/vulkan_mem_alloc.h @@ -13,7 +13,7 @@ // Make sure vulkan.h is included from third_party (rather than from the system // include directory) before vk_mem_alloc.h. -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" #define VMA_STATIC_VULKAN_FUNCTIONS 0 // Work around the pointer nullability completeness warnings on Clang. @@ -29,7 +29,7 @@ namespace xe { namespace ui { namespace vulkan { -VmaAllocator CreateVmaAllocator(const VulkanProvider& provider, +VmaAllocator CreateVmaAllocator(const VulkanDevice* vulkan_device, bool externally_synchronized); } // namespace vulkan diff --git a/src/xenia/ui/vulkan/vulkan_presenter.cc b/src/xenia/ui/vulkan/vulkan_presenter.cc index c158415c4..8846937ca 100644 --- a/src/xenia/ui/vulkan/vulkan_presenter.cc +++ b/src/xenia/ui/vulkan/vulkan_presenter.cc @@ -73,8 +73,8 @@ namespace shaders { } // namespace shaders VulkanPresenter::PaintContext::Submission::~Submission() { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); if (draw_command_pool_ != VK_NULL_HANDLE) { dfn.vkDestroyCommandPool(device, draw_command_pool_, nullptr); @@ -89,8 +89,8 @@ VulkanPresenter::PaintContext::Submission::~Submission() { } bool VulkanPresenter::PaintContext::Submission::Initialize() { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkSemaphoreCreateInfo semaphore_create_info; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; @@ -116,7 +116,7 @@ bool VulkanPresenter::PaintContext::Submission::Initialize() { command_pool_create_info.pNext = nullptr; command_pool_create_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; command_pool_create_info.queueFamilyIndex = - provider_.queue_family_graphics_compute(); + vulkan_device_->queue_family_graphics_compute(); if (dfn.vkCreateCommandPool(device, &command_pool_create_info, nullptr, &draw_command_pool_) != VK_SUCCESS) { XELOGE( @@ -161,8 +161,8 @@ VulkanPresenter::~VulkanPresenter() { ui_submission_tracker_.Shutdown(); guest_output_image_refresher_submission_tracker_.Shutdown(); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); if (paint_context_.swapchain_render_pass != VK_NULL_HANDLE) { dfn.vkDestroyRenderPass(device, paint_context_.swapchain_render_pass, @@ -207,11 +207,36 @@ VulkanPresenter::~VulkanPresenter() { guest_output_paint_image_descriptor_set_layout_); } -Surface::TypeFlags VulkanPresenter::GetSupportedSurfaceTypes() const { - if (!provider_.device_info().ext_VK_KHR_swapchain) { +Surface::TypeFlags VulkanPresenter::GetSurfaceTypesSupportedByInstance( + const VulkanInstance::Extensions& instance_extensions) { + if (!instance_extensions.ext_KHR_surface) { return 0; } - return GetSurfaceTypesSupportedByInstance(provider_.instance_extensions()); + Surface::TypeFlags type_flags = 0; +#if XE_PLATFORM_ANDROID + if (instance_extensions.ext_KHR_android_surface) { + type_flags |= Surface::kTypeFlag_AndroidNativeWindow; + } +#endif +#if XE_PLATFORM_GNU_LINUX + if (instance_extensions.ext_KHR_xcb_surface) { + type_flags |= Surface::kTypeFlag_XcbWindow; + } +#endif +#if XE_PLATFORM_WIN32 + if (instance_extensions.ext_KHR_win32_surface) { + type_flags |= Surface::kTypeFlag_Win32Hwnd; + } +#endif + return type_flags; +} + +Surface::TypeFlags VulkanPresenter::GetSupportedSurfaceTypes() const { + if (!vulkan_device_->extensions().ext_KHR_swapchain) { + return 0; + } + return GetSurfaceTypesSupportedByInstance( + vulkan_device_->vulkan_instance()->extensions()); } bool VulkanPresenter::CaptureGuestOutput(RawImage& image_out) { @@ -239,14 +264,14 @@ bool VulkanPresenter::CaptureGuestOutput(RawImage& image_out) { VkBuffer buffer; VkDeviceMemory buffer_memory; if (!util::CreateDedicatedAllocationBuffer( - provider_, buffer_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, + vulkan_device_, buffer_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, util::MemoryPurpose::kReadback, buffer, buffer_memory)) { XELOGE("VulkanPresenter: Failed to create the guest output capture buffer"); return false; } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); { VkCommandPoolCreateInfo command_pool_create_info; @@ -254,7 +279,7 @@ bool VulkanPresenter::CaptureGuestOutput(RawImage& image_out) { command_pool_create_info.pNext = nullptr; command_pool_create_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; command_pool_create_info.queueFamilyIndex = - provider_.queue_family_graphics_compute(); + vulkan_device_->queue_family_graphics_compute(); VkCommandPool command_pool; if (dfn.vkCreateCommandPool(device, &command_pool_create_info, nullptr, &command_pool) != VK_SUCCESS) { @@ -361,7 +386,7 @@ bool VulkanPresenter::CaptureGuestOutput(RawImage& image_out) { submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer; - VulkanSubmissionTracker submission_tracker(provider_); + VulkanSubmissionTracker submission_tracker(vulkan_device_); { VulkanSubmissionTracker::FenceAcquisition fence_acqusition( submission_tracker.AcquireFenceToAdvanceSubmission()); @@ -377,11 +402,12 @@ bool VulkanPresenter::CaptureGuestOutput(RawImage& image_out) { } VkResult submit_result; { - VulkanProvider::QueueAcquisition queue_acquisition( - provider_.AcquireQueue(provider_.queue_family_graphics_compute(), - 0)); - submit_result = dfn.vkQueueSubmit( - queue_acquisition.queue, 1, &submit_info, fence_acqusition.fence()); + const VulkanDevice::Queue::Acquisition queue_acquisition = + vulkan_device_->AcquireQueue( + vulkan_device_->queue_family_graphics_compute(), 0); + submit_result = + dfn.vkQueueSubmit(queue_acquisition.queue(), 1, &submit_info, + fence_acqusition.fence()); } if (submit_result != VK_SUCCESS) { XELOGE( @@ -442,8 +468,8 @@ VkCommandBuffer VulkanPresenter::AcquireUISetupCommandBufferFromUIThread() { .command_buffer; } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkCommandBufferBeginInfo command_buffer_begin_info; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; @@ -488,7 +514,7 @@ VkCommandBuffer VulkanPresenter::AcquireUISetupCommandBufferFromUIThread() { command_pool_create_info.pNext = nullptr; command_pool_create_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; command_pool_create_info.queueFamilyIndex = - provider_.queue_family_graphics_compute(); + vulkan_device_->queue_family_graphics_compute(); VkCommandPool new_command_pool; if (dfn.vkCreateCommandPool(device, &command_pool_create_info, nullptr, &new_command_pool) != VK_SUCCESS) { @@ -529,11 +555,12 @@ VulkanPresenter::ConnectOrReconnectPaintingToSurfaceFromUIThread( Surface& new_surface, uint32_t new_surface_width, uint32_t new_surface_height, bool was_paintable, bool& is_vsync_implicit_out) { - const VulkanProvider::InstanceFunctions& ifn = provider_.ifn(); - VkInstance instance = provider_.instance(); - VkPhysicalDevice physical_device = provider_.physical_device(); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanInstance* const vulkan_instance = + vulkan_device_->vulkan_instance(); + const VulkanInstance::Functions& ifn = vulkan_instance->functions(); + const VkInstance instance = vulkan_instance->instance(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkFormat new_swapchain_format; @@ -550,7 +577,7 @@ VulkanPresenter::ConnectOrReconnectPaintingToSurfaceFromUIThread( paint_context_.PrepareForSwapchainRetirement(); bool surface_unusable; paint_context_.swapchain = PaintContext::CreateSwapchainForVulkanSurface( - provider_, paint_context_.vulkan_surface, new_surface_width, + vulkan_device_, paint_context_.vulkan_surface, new_surface_width, new_surface_height, old_swapchain, paint_context_.present_queue_family, new_swapchain_format, paint_context_.swapchain_extent, paint_context_.swapchain_is_fifo, surface_unusable); @@ -641,7 +668,7 @@ VulkanPresenter::ConnectOrReconnectPaintingToSurfaceFromUIThread( } bool surface_unusable; paint_context_.swapchain = PaintContext::CreateSwapchainForVulkanSurface( - provider_, paint_context_.vulkan_surface, new_surface_width, + vulkan_device_, paint_context_.vulkan_surface, new_surface_width, new_surface_height, VK_NULL_HANDLE, paint_context_.present_queue_family, new_swapchain_format, paint_context_.swapchain_extent, paint_context_.swapchain_is_fifo, surface_unusable); @@ -837,7 +864,7 @@ bool VulkanPresenter::RefreshGuestOutputImpl( assert_not_zero(frontbuffer_width); assert_not_zero(frontbuffer_height); VkExtent2D max_framebuffer_extent = - util::GetMax2DFramebufferExtent(provider_); + util::GetMax2DFramebufferExtent(vulkan_device_->properties()); if (frontbuffer_width > max_framebuffer_extent.width || frontbuffer_height > max_framebuffer_extent.height) { // Writing the guest output isn't supposed to rescale, and a guest texture @@ -856,7 +883,7 @@ bool VulkanPresenter::RefreshGuestOutputImpl( } if (!image_instance.image) { std::unique_ptr new_image = GuestOutputImage::Create( - provider_, frontbuffer_width, frontbuffer_height); + vulkan_device_, frontbuffer_width, frontbuffer_height); if (!new_image) { return false; } @@ -883,14 +910,15 @@ bool VulkanPresenter::RefreshGuestOutputImpl( // "Fence signal operations that are defined by vkQueueSubmit additionally // include in the first synchronization scope all commands that occur earlier // in submission order." - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); { VulkanSubmissionTracker::FenceAcquisition fence_acqusition( guest_output_image_refresher_submission_tracker_ .AcquireFenceToAdvanceSubmission()); - VulkanProvider::QueueAcquisition queue_acquisition( - provider_.AcquireQueue(provider_.queue_family_graphics_compute(), 0)); - if (dfn.vkQueueSubmit(queue_acquisition.queue, 0, nullptr, + const VulkanDevice::Queue::Acquisition queue_acquisition = + vulkan_device_->AcquireQueue( + vulkan_device_->queue_family_graphics_compute(), 0); + if (dfn.vkQueueSubmit(queue_acquisition.queue(), 0, nullptr, fence_acqusition.fence()) != VK_SUCCESS) { fence_acqusition.SubmissionSucceededSignalFailed(); } @@ -900,18 +928,18 @@ bool VulkanPresenter::RefreshGuestOutputImpl( } VkSwapchainKHR VulkanPresenter::PaintContext::CreateSwapchainForVulkanSurface( - const VulkanProvider& provider, VkSurfaceKHR surface, uint32_t width, + const VulkanDevice* vulkan_device, VkSurfaceKHR surface, uint32_t width, uint32_t height, VkSwapchainKHR old_swapchain, uint32_t& present_queue_family_out, VkFormat& image_format_out, VkExtent2D& image_extent_out, bool& is_fifo_out, bool& ui_surface_unusable_out) { ui_surface_unusable_out = false; - const VulkanProvider::InstanceFunctions& ifn = provider.ifn(); - VkInstance instance = provider.instance(); - VkPhysicalDevice physical_device = provider.physical_device(); - const VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const VulkanInstance::Functions& ifn = + vulkan_device->vulkan_instance()->functions(); + const VkPhysicalDevice physical_device = vulkan_device->physical_device(); + const VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); // Get surface capabilities. VkSurfaceCapabilitiesKHR surface_capabilities; @@ -934,7 +962,8 @@ VkSwapchainKHR VulkanPresenter::PaintContext::CreateSwapchainForVulkanSurface( // requirements - the maximum 2D framebuffer size on the specific physical // device, and the minimum swap chain size on the whole instance - fail to // create until the surface becomes smaller). - VkExtent2D max_framebuffer_extent = util::GetMax2DFramebufferExtent(provider); + VkExtent2D max_framebuffer_extent = + util::GetMax2DFramebufferExtent(vulkan_device->properties()); VkExtent2D image_extent; image_extent.width = std::min(std::max(std::min(width, max_framebuffer_extent.width), @@ -952,17 +981,17 @@ VkSwapchainKHR VulkanPresenter::PaintContext::CreateSwapchainForVulkanSurface( // Get the queue family for presentation. uint32_t queue_family_index_present = UINT32_MAX; - const std::vector& queue_families = - provider.queue_families(); + const std::vector& queue_families = + vulkan_device->queue_families(); VkBool32 queue_family_present_supported; // First try the graphics and compute queue, prefer it to avoid the concurrent // image sharing mode. uint32_t queue_family_index_graphics_compute = - provider.queue_family_graphics_compute(); - const VulkanProvider::QueueFamily& queue_family_graphics_compute = + vulkan_device->queue_family_graphics_compute(); + const VulkanDevice::QueueFamily& queue_family_graphics_compute = queue_families[queue_family_index_graphics_compute]; - if (queue_family_graphics_compute.potentially_supports_present && - queue_family_graphics_compute.queue_count && + if (queue_family_graphics_compute.may_support_presentation && + !queue_family_graphics_compute.queues.empty() && ifn.vkGetPhysicalDeviceSurfaceSupportKHR( physical_device, queue_family_index_graphics_compute, surface, &queue_family_present_supported) == VK_SUCCESS && @@ -970,9 +999,9 @@ VkSwapchainKHR VulkanPresenter::PaintContext::CreateSwapchainForVulkanSurface( queue_family_index_present = queue_family_index_graphics_compute; } else { for (uint32_t i = 0; i < uint32_t(queue_families.size()); ++i) { - const VulkanProvider::QueueFamily& queue_family = queue_families[i]; - if (queue_family.potentially_supports_present && - queue_family.queue_count && + const VulkanDevice::QueueFamily& queue_family = queue_families[i]; + if (!queue_family.queues.empty() && + queue_family.may_support_presentation && ifn.vkGetPhysicalDeviceSurfaceSupportKHR( physical_device, i, surface, &queue_family_present_supported) == VK_SUCCESS && @@ -1228,7 +1257,7 @@ VkSwapchainKHR VulkanPresenter::PaintContext::CreateSwapchainForVulkanSurface( XELOGE("VulkanPresenter: Failed to create a swapchain"); return VK_NULL_HANDLE; } - XELOGVK( + XELOGI( "VulkanPresenter: Created {}x{} swapchain with format {}, color space " "{}, presentation mode {}", swapchain_create_info.imageExtent.width, @@ -1250,8 +1279,8 @@ VkSwapchainKHR VulkanPresenter::PaintContext::PrepareForSwapchainRetirement() { if (swapchain != VK_NULL_HANDLE) { submission_tracker.AwaitAllSubmissionsCompletion(); } - const VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); for (const SwapchainFramebuffer& framebuffer : swapchain_framebuffers) { dfn.vkDestroyFramebuffer(device, framebuffer.framebuffer, nullptr); dfn.vkDestroyImageView(device, framebuffer.image_view, nullptr); @@ -1269,22 +1298,21 @@ VkSwapchainKHR VulkanPresenter::PaintContext::PrepareForSwapchainRetirement() { void VulkanPresenter::PaintContext::DestroySwapchainAndVulkanSurface() { VkSwapchainKHR old_swapchain = PrepareForSwapchainRetirement(); if (old_swapchain != VK_NULL_HANDLE) { - const VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - dfn.vkDestroySwapchainKHR(device, old_swapchain, nullptr); + vulkan_device->functions().vkDestroySwapchainKHR(vulkan_device->device(), + old_swapchain, nullptr); } present_queue_family = UINT32_MAX; if (vulkan_surface != VK_NULL_HANDLE) { - const VulkanProvider::InstanceFunctions& ifn = provider.ifn(); - VkInstance instance = provider.instance(); - ifn.vkDestroySurfaceKHR(instance, vulkan_surface, nullptr); + const VulkanInstance* vulkan_instance = vulkan_device->vulkan_instance(); + vulkan_instance->functions().vkDestroySurfaceKHR( + vulkan_instance->instance(), vulkan_surface, nullptr); vulkan_surface = VK_NULL_HANDLE; } } VulkanPresenter::GuestOutputImage::~GuestOutputImage() { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); if (view_ != VK_NULL_HANDLE) { dfn.vkDestroyImageView(device, view_, nullptr); } @@ -1318,14 +1346,14 @@ bool VulkanPresenter::GuestOutputImage::Initialize() { image_create_info.pQueueFamilyIndices = nullptr; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; if (!ui::vulkan::util::CreateDedicatedAllocationImage( - provider_, image_create_info, + vulkan_device_, image_create_info, ui::vulkan::util::MemoryPurpose::kDeviceLocal, image_, memory_)) { XELOGE("VulkanPresenter: Failed to create a guest output image"); return false; } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkImageViewCreateInfo image_view_create_info; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; @@ -1368,8 +1396,8 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( *paint_context_.submissions[current_paint_submission_index % paint_submission_count]; - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkCommandPool draw_command_pool = paint_submission.draw_command_pool(); if (dfn.vkResetCommandPool(device, draw_command_pool, 0) != VK_SUCCESS) { @@ -1414,7 +1442,7 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( case VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT: // Not an error, reporting just as info (may normally occur while resizing // on some platforms). - XELOGVK( + XELOGI( "VulkanPresenter: Presentation to the swapchain image has been " "dropped as the swapchain or the surface has become outdated"); return PaintResult::kNotPresentedConnectionOutdated; @@ -1482,7 +1510,7 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( if (guest_output_image) { VkExtent2D max_framebuffer_extent = - util::GetMax2DFramebufferExtent(provider_); + util::GetMax2DFramebufferExtent(vulkan_device_->properties()); GuestOutputPaintFlow guest_output_flow = GetGuestOutputPaintFlow( guest_output_properties, paint_context_.swapchain_extent.width, paint_context_.swapchain_extent.height, max_framebuffer_extent.width, @@ -1599,7 +1627,7 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( } // Image. intermediate_image_ptr_ref = GuestOutputImage::Create( - provider_, intermediate_needed_size.first, + vulkan_device_, intermediate_needed_size.first, intermediate_needed_size.second); if (!intermediate_image_ptr_ref) { // Don't display the guest output, and don't try to create more @@ -2029,13 +2057,14 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( } VkResult submit_result; { - VulkanProvider::QueueAcquisition queue_acquisition( - provider_.AcquireQueue(provider_.queue_family_graphics_compute(), 0)); - submit_result = dfn.vkQueueSubmit(queue_acquisition.queue, 1, + const VulkanDevice::Queue::Acquisition queue_acquisition = + vulkan_device_->AcquireQueue( + vulkan_device_->queue_family_graphics_compute(), 0); + submit_result = dfn.vkQueueSubmit(queue_acquisition.queue(), 1, &submit_info, fence_acqusition.fence()); if (ui_fence_acquisition.fence() != VK_NULL_HANDLE && submit_result == VK_SUCCESS) { - if (dfn.vkQueueSubmit(queue_acquisition.queue, 0, nullptr, + if (dfn.vkQueueSubmit(queue_acquisition.queue(), 0, nullptr, ui_fence_acquisition.fence()) != VK_SUCCESS) { ui_fence_acquisition.SubmissionSucceededSignalFailed(); } @@ -2072,10 +2101,10 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( present_info.pResults = nullptr; VkResult present_result; { - VulkanProvider::QueueAcquisition queue_acquisition( - provider_.AcquireQueue(paint_context_.present_queue_family, 0)); + const VulkanDevice::Queue::Acquisition queue_acquisition = + vulkan_device_->AcquireQueue(paint_context_.present_queue_family, 0); present_result = - dfn.vkQueuePresentKHR(queue_acquisition.queue, &present_info); + dfn.vkQueuePresentKHR(queue_acquisition.queue(), &present_info); } switch (present_result) { case VK_SUCCESS: @@ -2092,7 +2121,7 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( case VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT: // Not an error, reporting just as info (may normally occur while resizing // on some platforms). - XELOGVK( + XELOGI( "VulkanPresenter: Presentation to the swapchain image has been " "dropped as the swapchain or the surface has become outdated"); // Note that the semaphore wait (followed by reset) has been enqueued, @@ -2108,8 +2137,8 @@ Presenter::PaintResult VulkanPresenter::PaintAndPresentImpl( } bool VulkanPresenter::InitializeSurfaceIndependent() { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkDescriptorSetLayoutBinding guest_output_image_sampler_bindings[2]; guest_output_image_sampler_bindings[0].binding = 0; @@ -2119,8 +2148,8 @@ bool VulkanPresenter::InitializeSurfaceIndependent() { guest_output_image_sampler_bindings[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; guest_output_image_sampler_bindings[0].pImmutableSamplers = nullptr; - VkSampler sampler_linear_clamp = - provider_.GetHostSampler(VulkanProvider::HostSampler::kLinearClamp); + const VkSampler sampler_linear_clamp = + ui_samplers_->samplers()[UISamplers::kSamplerIndexLinearClampToEdge]; guest_output_image_sampler_bindings[1].binding = 1; guest_output_image_sampler_bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; @@ -2371,7 +2400,8 @@ bool VulkanPresenter::InitializeSurfaceIndependent() { // Initialize connection-independent parts of the painting context. for (size_t i = 0; i < paint_context_.submissions.size(); ++i) { - paint_context_.submissions[i] = PaintContext::Submission::Create(provider_); + paint_context_.submissions[i] = + PaintContext::Submission::Create(vulkan_device_); if (!paint_context_.submissions[i]) { return false; } @@ -2536,8 +2566,8 @@ VkPipeline VulkanPresenter::CreateGuestOutputPaintPipeline( pipeline_create_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_create_info.basePipelineIndex = -1; - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkPipeline pipeline; if (dfn.vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, diff --git a/src/xenia/ui/vulkan/vulkan_presenter.h b/src/xenia/ui/vulkan/vulkan_presenter.h index 91e055c6d..5339b4551 100644 --- a/src/xenia/ui/vulkan/vulkan_presenter.h +++ b/src/xenia/ui/vulkan/vulkan_presenter.h @@ -19,10 +19,11 @@ #include #include "xenia/base/assert.h" -#include "xenia/base/platform.h" #include "xenia/ui/presenter.h" #include "xenia/ui/surface.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/ui_samplers.h" +#include "xenia/ui/vulkan/vulkan_device.h" +#include "xenia/ui/vulkan/vulkan_instance.h" #include "xenia/ui/vulkan/vulkan_submission_tracker.h" namespace xe { @@ -90,7 +91,7 @@ class VulkanPresenter final : public Presenter { static constexpr VkImageLayout kGuestOutputInternalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - // The callback must use the graphics and compute queue 0 of the provider. + // The callback must use the graphics and compute queue 0 of the device. class VulkanGuestOutputRefreshContext final : public GuestOutputRefreshContext { public: @@ -123,9 +124,10 @@ class VulkanPresenter final : public Presenter { }; static std::unique_ptr Create( - HostGpuLossCallback host_gpu_loss_callback, VulkanProvider& provider) { - auto presenter = std::unique_ptr( - new VulkanPresenter(host_gpu_loss_callback, provider)); + HostGpuLossCallback host_gpu_loss_callback, + const VulkanDevice* vulkan_device, const UISamplers* ui_samplers) { + auto presenter = std::unique_ptr(new VulkanPresenter( + host_gpu_loss_callback, vulkan_device, ui_samplers)); if (!presenter->InitializeSurfaceIndependent()) { return nullptr; } @@ -134,29 +136,10 @@ class VulkanPresenter final : public Presenter { ~VulkanPresenter(); - VulkanProvider& provider() const { return provider_; } + const VulkanDevice* vulkan_device() const { return vulkan_device_; } static Surface::TypeFlags GetSurfaceTypesSupportedByInstance( - const VulkanProvider::InstanceExtensions& instance_extensions) { - if (!instance_extensions.khr_surface) { - return 0; - } - Surface::TypeFlags type_flags = 0; -#if XE_PLATFORM_ANDROID - if (instance_extensions.khr_android_surface) { - type_flags |= Surface::kTypeFlag_AndroidNativeWindow; - } -#elif XE_PLATFORM_GNU_LINUX - if (instance_extensions.khr_xcb_surface) { - type_flags |= Surface::kTypeFlag_XcbWindow; - } -#elif XE_PLATFORM_WIN32 - if (instance_extensions.khr_win32_surface) { - type_flags |= Surface::kTypeFlag_Win32Hwnd; - } -#endif - return type_flags; - } + const VulkanInstance::Extensions& instance_extensions); Surface::TypeFlags GetSupportedSurfaceTypes() const override; bool CaptureGuestOutput(RawImage& image_out) override; @@ -186,11 +169,12 @@ class VulkanPresenter final : public Presenter { class GuestOutputImage { public: static std::unique_ptr Create( - const VulkanProvider& provider, uint32_t width, uint32_t height) { + const VulkanDevice* const vulkan_device, const uint32_t width, + const uint32_t height) { assert_not_zero(width); assert_not_zero(height); auto image = std::unique_ptr( - new GuestOutputImage(provider, width, height)); + new GuestOutputImage(vulkan_device, width, height)); if (!image->Initialize()) { return nullptr; } @@ -208,16 +192,16 @@ class VulkanPresenter final : public Presenter { VkImageView view() const { return view_; } private: - GuestOutputImage(const VulkanProvider& provider, uint32_t width, - uint32_t height) - : provider_(provider) { + GuestOutputImage(const VulkanDevice* const vulkan_device, + const uint32_t width, const uint32_t height) + : vulkan_device_(vulkan_device) { extent_.width = width; extent_.height = height; } bool Initialize(); - const VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; VkExtent2D extent_; VkImage image_ = VK_NULL_HANDLE; @@ -299,8 +283,9 @@ class VulkanPresenter final : public Presenter { class Submission { public: static std::unique_ptr Create( - const VulkanProvider& provider) { - auto submission = std::unique_ptr(new Submission(provider)); + const VulkanDevice* const vulkan_device) { + auto submission = + std::unique_ptr(new Submission(vulkan_device)); if (!submission->Initialize()) { return nullptr; } @@ -319,11 +304,11 @@ class VulkanPresenter final : public Presenter { } private: - explicit Submission(const VulkanProvider& provider) - : provider_(provider) {} + explicit Submission(const VulkanDevice* const vulkan_device) + : vulkan_device_(vulkan_device) {} bool Initialize(); - const VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; VkSemaphore acquire_semaphore_ = VK_NULL_HANDLE; VkSemaphore present_semaphore_ = VK_NULL_HANDLE; VkCommandPool draw_command_pool_ = VK_NULL_HANDLE; @@ -376,8 +361,8 @@ class VulkanPresenter final : public Presenter { VkFramebuffer framebuffer; }; - explicit PaintContext(VulkanProvider& provider) - : provider(provider), submission_tracker(provider) {} + explicit PaintContext(const VulkanDevice* const vulkan_device) + : vulkan_device(vulkan_device), submission_tracker(vulkan_device) {} PaintContext(const PaintContext& paint_context) = delete; PaintContext& operator=(const PaintContext& paint_context) = delete; @@ -386,7 +371,7 @@ class VulkanPresenter final : public Presenter { // technically retire it, so it will be in an undefined state), and needs to // be destroyed externally no matter what the result is. static VkSwapchainKHR CreateSwapchainForVulkanSurface( - const VulkanProvider& provider, VkSurfaceKHR surface, uint32_t width, + const VulkanDevice* vulkan_device, VkSurfaceKHR surface, uint32_t width, uint32_t height, VkSwapchainKHR old_swapchain, uint32_t& present_queue_family_out, VkFormat& image_format_out, VkExtent2D& image_extent_out, bool& is_fifo_out, @@ -401,7 +386,7 @@ class VulkanPresenter final : public Presenter { // Connection-indepedent. - const VulkanProvider& provider; + const VulkanDevice* vulkan_device; std::array, kSubmissionCount> submissions; @@ -460,19 +445,25 @@ class VulkanPresenter final : public Presenter { }; explicit VulkanPresenter(HostGpuLossCallback host_gpu_loss_callback, - VulkanProvider& provider) + const VulkanDevice* vulkan_device, + const UISamplers* ui_samplers) : Presenter(host_gpu_loss_callback), - provider_(provider), - guest_output_image_refresher_submission_tracker_(provider), - ui_submission_tracker_(provider), - paint_context_(provider) {} + vulkan_device_(vulkan_device), + ui_samplers_(ui_samplers), + guest_output_image_refresher_submission_tracker_(vulkan_device), + ui_submission_tracker_(vulkan_device), + paint_context_(vulkan_device) { + assert_not_null(vulkan_device); + assert_not_null(ui_samplers); + } bool InitializeSurfaceIndependent(); [[nodiscard]] VkPipeline CreateGuestOutputPaintPipeline( GuestOutputPaintEffect effect, VkRenderPass render_pass); - VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; + const UISamplers* ui_samplers_; // Static objects for guest output presentation, used only when painting the // main target (can be destroyed only after awaiting main target usage diff --git a/src/xenia/ui/vulkan/vulkan_provider.cc b/src/xenia/ui/vulkan/vulkan_provider.cc index 96265b223..41fa12fb8 100644 --- a/src/xenia/ui/vulkan/vulkan_provider.cc +++ b/src/xenia/ui/vulkan/vulkan_provider.cc @@ -2,1404 +2,115 @@ ****************************************************************************** * Xenia : Xbox 360 Emulator Research Project * ****************************************************************************** - * Copyright 2022 Ben Vanik. All rights reserved. * + * Copyright 2025 Ben Vanik. All rights reserved. * * Released under the BSD license - see LICENSE in the root for more details. * ****************************************************************************** */ #include "xenia/ui/vulkan/vulkan_provider.h" -#include -#include -#include -#include -#include #include -#include "xenia/base/assert.h" #include "xenia/base/cvar.h" #include "xenia/base/logging.h" -#include "xenia/base/math.h" -#include "xenia/base/platform.h" #include "xenia/ui/vulkan/vulkan_immediate_drawer.h" #include "xenia/ui/vulkan/vulkan_presenter.h" -#if XE_PLATFORM_LINUX -#include -#elif XE_PLATFORM_WIN32 -#include "xenia/base/platform_win.h" -#endif - DEFINE_bool( vulkan_validation, false, - "Enable Vulkan validation (VK_LAYER_KHRONOS_validation). Messages will be " - "written to the OS debug log without vulkan_debug_messenger or to the " - "Xenia log with it.", - "Vulkan"); -DEFINE_bool( - vulkan_debug_utils_messenger, false, - "Enable writing Vulkan debug messages via VK_EXT_debug_utils to the Xenia " - "log.", - "Vulkan"); -DEFINE_uint32( - vulkan_debug_utils_messenger_severity, 2, - "Maximum severity of messages to log via the Vulkan debug messenger: 0 - " - "error, 1 - warning, 2 - info, 3 - verbose.", - "Vulkan"); -DEFINE_bool(vulkan_debug_utils_names, false, - "Enable naming Vulkan objects via VK_EXT_debug_utils.", "Vulkan"); -DEFINE_int32( - vulkan_device, -1, - "Index of the physical device to use, or -1 for any compatible device.", + "Enable the Vulkan validation layer (VK_LAYER_KHRONOS_validation). " + "Messages will be written to the Xenia log if 'vulkan_log_debug_messages' " + "is enabled, or to the OS debug output otherwise.", "Vulkan"); +DEFINE_int32(vulkan_device, -1, + "Index of the preferred Vulkan physical device, or -1 to use any " + "compatible device.", + "Vulkan"); + namespace xe { namespace ui { namespace vulkan { std::unique_ptr VulkanProvider::Create( - bool is_surface_required) { - std::unique_ptr provider( - new VulkanProvider(is_surface_required)); - if (!provider->Initialize()) { - xe::FatalError( - "Unable to initialize Vulkan graphics subsystem.\n" - "\n" - "Ensure that you have the latest drivers for your GPU and it supports " - "Vulkan, and that you have the latest Vulkan runtime installed, which " - "can be downloaded at https://vulkan.lunarg.com/sdk/home.\n" - "\n" - "See https://xenia.jp/faq/ for more information and a list of " - "supported GPUs."); + const bool with_gpu_emulation, const bool with_presentation) { + std::unique_ptr provider(new VulkanProvider()); + + provider->vulkan_instance_ = + VulkanInstance::Create(with_presentation, cvars::vulkan_validation); + if (!provider->vulkan_instance_) { return nullptr; } - return provider; -} -VulkanProvider::~VulkanProvider() { - for (size_t i = 0; i < size_t(HostSampler::kCount); ++i) { - if (host_samplers_[i] != VK_NULL_HANDLE) { - dfn_.vkDestroySampler(device_, host_samplers_[i], nullptr); - } - } - - if (device_ != VK_NULL_HANDLE) { - ifn_.vkDestroyDevice(device_, nullptr); - } - if (instance_ != VK_NULL_HANDLE) { - if (debug_messenger_ != VK_NULL_HANDLE) { - ifn_.vkDestroyDebugUtilsMessengerEXT(instance_, debug_messenger_, - nullptr); - } - lfn_.vkDestroyInstance(instance_, nullptr); - } - -#if XE_PLATFORM_LINUX - if (library_) { - dlclose(library_); - } -#elif XE_PLATFORM_WIN32 - if (library_) { - FreeLibrary(library_); - } -#endif -} - -bool VulkanProvider::Initialize() { - renderdoc_api_.Initialize(); - - // Load the library. - bool library_functions_loaded = true; -#if XE_PLATFORM_LINUX -#if XE_PLATFORM_ANDROID - const char* libvulkan_name = "libvulkan.so"; -#else - const char* libvulkan_name = "libvulkan.so.1"; -#endif - // http://developer.download.nvidia.com/mobile/shield/assets/Vulkan/UsingtheVulkanAPI.pdf - library_ = dlopen(libvulkan_name, RTLD_NOW | RTLD_LOCAL); - if (!library_) { - XELOGE("Failed to load {}", libvulkan_name); - return false; - } -#define XE_VULKAN_LOAD_MODULE_LFN(name) \ - library_functions_loaded &= \ - (lfn_.name = PFN_##name(dlsym(library_, #name))) != nullptr; -#elif XE_PLATFORM_WIN32 - library_ = LoadLibraryA("vulkan-1.dll"); - if (!library_) { - XELOGE("Failed to load vulkan-1.dll"); - return false; - } -#define XE_VULKAN_LOAD_MODULE_LFN(name) \ - library_functions_loaded &= \ - (lfn_.name = PFN_##name(GetProcAddress(library_, #name))) != nullptr; -#else -#error No Vulkan library loading provided for the target platform. -#endif - XE_VULKAN_LOAD_MODULE_LFN(vkGetInstanceProcAddr); - XE_VULKAN_LOAD_MODULE_LFN(vkDestroyInstance); -#undef XE_VULKAN_LOAD_MODULE_LFN - if (!library_functions_loaded) { - XELOGE("Failed to get Vulkan library function pointers"); - return false; - } - library_functions_loaded &= - (lfn_.vkCreateInstance = PFN_vkCreateInstance(lfn_.vkGetInstanceProcAddr( - VK_NULL_HANDLE, "vkCreateInstance"))) != nullptr; - library_functions_loaded &= - (lfn_.vkEnumerateInstanceExtensionProperties = - PFN_vkEnumerateInstanceExtensionProperties( - lfn_.vkGetInstanceProcAddr( - VK_NULL_HANDLE, - "vkEnumerateInstanceExtensionProperties"))) != nullptr; - library_functions_loaded &= - (lfn_.vkEnumerateInstanceLayerProperties = - PFN_vkEnumerateInstanceLayerProperties(lfn_.vkGetInstanceProcAddr( - VK_NULL_HANDLE, "vkEnumerateInstanceLayerProperties"))) != - nullptr; - if (!library_functions_loaded) { - XELOGE( - "Failed to get Vulkan library function pointers via " - "vkGetInstanceProcAddr"); - return false; - } - lfn_.v_1_1.vkEnumerateInstanceVersion = PFN_vkEnumerateInstanceVersion( - lfn_.vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceVersion")); - - // Get the API version. - uint32_t instance_api_version; - if (!lfn_.v_1_1.vkEnumerateInstanceVersion || - lfn_.v_1_1.vkEnumerateInstanceVersion(&instance_api_version) != - VK_SUCCESS) { - instance_api_version = VK_API_VERSION_1_0; - } - XELOGVK("Vulkan instance version: {}.{}.{}", - VK_VERSION_MAJOR(instance_api_version), - VK_VERSION_MINOR(instance_api_version), - VK_VERSION_PATCH(instance_api_version)); - - // Get the instance extensions without layers, as well as extensions promoted - // to the core. - bool debug_utils_messenger_requested = cvars::vulkan_debug_utils_messenger; - bool debug_utils_names_requested = cvars::vulkan_debug_utils_names; - bool debug_utils_requested = - debug_utils_messenger_requested || debug_utils_names_requested; - std::memset(&instance_extensions_, 0, sizeof(instance_extensions_)); - if (instance_api_version >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { - instance_extensions_.khr_get_physical_device_properties2 = true; - } - std::vector instance_extensions_enabled; - std::vector instance_or_layer_extension_properties; - VkResult instance_extensions_enumerate_result; - for (;;) { - uint32_t instance_extension_count = - uint32_t(instance_or_layer_extension_properties.size()); - bool instance_extensions_were_empty = !instance_extension_count; - instance_extensions_enumerate_result = - lfn_.vkEnumerateInstanceExtensionProperties( - nullptr, &instance_extension_count, - instance_extensions_were_empty - ? nullptr - : instance_or_layer_extension_properties.data()); - // If the original extension count was 0 (first call), SUCCESS is returned, - // not INCOMPLETE. - if (instance_extensions_enumerate_result == VK_SUCCESS || - instance_extensions_enumerate_result == VK_INCOMPLETE) { - instance_or_layer_extension_properties.resize(instance_extension_count); - if (instance_extensions_enumerate_result == VK_SUCCESS && - (!instance_extensions_were_empty || !instance_extension_count)) { - break; - } - } else { - break; - } - } - if (instance_extensions_enumerate_result == VK_SUCCESS) { - AccumulateInstanceExtensions(instance_or_layer_extension_properties.size(), - instance_or_layer_extension_properties.data(), - debug_utils_requested, instance_extensions_, - instance_extensions_enabled); - } - size_t instance_extensions_enabled_count_without_layers = - instance_extensions_enabled.size(); - InstanceExtensions instance_extensions_without_layers = instance_extensions_; - - // Get the instance layers and their extensions. - std::vector layer_properties; - VkResult layers_enumerate_result; - for (;;) { - uint32_t layer_count = uint32_t(layer_properties.size()); - bool layers_were_empty = !layer_count; - layers_enumerate_result = lfn_.vkEnumerateInstanceLayerProperties( - &layer_count, layers_were_empty ? nullptr : layer_properties.data()); - // If the original layer count was 0 (first call), SUCCESS is returned, not - // INCOMPLETE. - if (layers_enumerate_result == VK_SUCCESS || - layers_enumerate_result == VK_INCOMPLETE) { - layer_properties.resize(layer_count); - if (layers_enumerate_result == VK_SUCCESS && - (!layers_were_empty || !layer_count)) { - break; - } - } else { - break; - } - } - if (layers_enumerate_result != VK_SUCCESS) { - layer_properties.clear(); - } - struct { - bool khronos_validation; - } layer_enabled_flags = {}; - std::vector layers_enabled; - for (const VkLayerProperties& layer : layer_properties) { - // Check if the layer is needed. - // Checking if already enabled as an optimization to do fewer and fewer - // string comparisons. Adding literals to layers_enabled for the most C - // string lifetime safety. - if (!layer_enabled_flags.khronos_validation && cvars::vulkan_validation && - !std::strcmp(layer.layerName, "VK_LAYER_KHRONOS_validation")) { - layers_enabled.push_back("VK_LAYER_KHRONOS_validation"); - layer_enabled_flags.khronos_validation = true; - } else { - // Not enabling this layer, so don't need the extensions from it as well. - continue; - } - // Load extensions from the layer. - instance_or_layer_extension_properties.clear(); - for (;;) { - uint32_t instance_extension_count = - uint32_t(instance_or_layer_extension_properties.size()); - bool instance_extensions_were_empty = !instance_extension_count; - instance_extensions_enumerate_result = - lfn_.vkEnumerateInstanceExtensionProperties( - layer.layerName, &instance_extension_count, - instance_extensions_were_empty - ? nullptr - : instance_or_layer_extension_properties.data()); - // If the original extension count was 0 (first call), SUCCESS is - // returned, not INCOMPLETE. - if (instance_extensions_enumerate_result == VK_SUCCESS || - instance_extensions_enumerate_result == VK_INCOMPLETE) { - instance_or_layer_extension_properties.resize(instance_extension_count); - if (instance_extensions_enumerate_result == VK_SUCCESS && - (!instance_extensions_were_empty || !instance_extension_count)) { - break; - } - } else { - break; - } - } - if (instance_extensions_enumerate_result == VK_SUCCESS) { - AccumulateInstanceExtensions( - instance_or_layer_extension_properties.size(), - instance_or_layer_extension_properties.data(), debug_utils_requested, - instance_extensions_, instance_extensions_enabled); - } - } - - // Create the instance. - VkApplicationInfo application_info; - application_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; - application_info.pNext = nullptr; - application_info.pApplicationName = "Xenia"; - application_info.applicationVersion = 1; - application_info.pEngineName = nullptr; - application_info.engineVersion = 0; - // "apiVersion must be the highest version of Vulkan that the application is - // designed to use" - // "Vulkan 1.0 implementations were required to return - // VK_ERROR_INCOMPATIBLE_DRIVER if apiVersion was larger than 1.0" - application_info.apiVersion = - instance_api_version >= VK_MAKE_API_VERSION(0, 1, 1, 0) - ? VK_HEADER_VERSION_COMPLETE - : instance_api_version; - VkInstanceCreateInfo instance_create_info; - instance_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; - instance_create_info.pNext = nullptr; - // TODO(Triang3l): Enumerate portability subset devices using - // VK_KHR_portability_enumeration when ready. - instance_create_info.flags = 0; - instance_create_info.pApplicationInfo = &application_info; - instance_create_info.enabledLayerCount = uint32_t(layers_enabled.size()); - instance_create_info.ppEnabledLayerNames = layers_enabled.data(); - instance_create_info.enabledExtensionCount = - uint32_t(instance_extensions_enabled.size()); - instance_create_info.ppEnabledExtensionNames = - instance_extensions_enabled.data(); - VkResult instance_create_result = - lfn_.vkCreateInstance(&instance_create_info, nullptr, &instance_); - if (instance_create_result != VK_SUCCESS) { - if ((instance_create_result == VK_ERROR_LAYER_NOT_PRESENT || - instance_create_result == VK_ERROR_EXTENSION_NOT_PRESENT) && - !layers_enabled.empty()) { - XELOGE("Failed to enable Vulkan layers"); - // Try to create without layers and their extensions. - std::memset(&layer_enabled_flags, 0, sizeof(layer_enabled_flags)); - instance_create_info.enabledLayerCount = 0; - instance_create_info.ppEnabledLayerNames = nullptr; - instance_create_info.enabledExtensionCount = - uint32_t(instance_extensions_enabled_count_without_layers); - instance_extensions_ = instance_extensions_without_layers; - instance_create_result = - lfn_.vkCreateInstance(&instance_create_info, nullptr, &instance_); - } - if (instance_create_result != VK_SUCCESS) { - XELOGE("Failed to create a Vulkan instance"); - return false; - } - } - - // Get instance functions. - std::memset(&ifn_, 0, sizeof(ifn_)); -#define XE_UI_VULKAN_FUNCTION(name) \ - functions_loaded &= (ifn_.name = PFN_##name(lfn_.vkGetInstanceProcAddr( \ - instance_, #name))) != nullptr; -#define XE_UI_VULKAN_FUNCTION_DONT_PROMOTE(extension_name, core_name) \ - functions_loaded &= \ - (ifn_.core_name = PFN_##core_name(lfn_.vkGetInstanceProcAddr( \ - instance_, #extension_name))) != nullptr; -#define XE_UI_VULKAN_FUNCTION_PROMOTE(extension_name, core_name) \ - functions_loaded &= \ - (ifn_.core_name = PFN_##core_name( \ - lfn_.vkGetInstanceProcAddr(instance_, #core_name))) != nullptr; - // Core - require unconditionally. - { - bool functions_loaded = true; -#include "xenia/ui/vulkan/functions/instance_1_0.inc" - if (!functions_loaded) { - XELOGE("Failed to get Vulkan instance function pointers"); - return false; - } - } - // Extensions - disable the specific extension if failed to get its functions. - if (instance_extensions_.ext_debug_utils) { - bool functions_loaded = true; -#include "xenia/ui/vulkan/functions/instance_ext_debug_utils.inc" - instance_extensions_.ext_debug_utils = functions_loaded; - } - if (instance_extensions_.khr_get_physical_device_properties2) { - bool functions_loaded = true; - if (instance_api_version >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { -#define XE_UI_VULKAN_FUNCTION_PROMOTED XE_UI_VULKAN_FUNCTION_PROMOTE -#include "xenia/ui/vulkan/functions/instance_khr_get_physical_device_properties2.inc" -#undef XE_UI_VULKAN_FUNCTION_PROMOTED - } else { -#define XE_UI_VULKAN_FUNCTION_PROMOTED XE_UI_VULKAN_FUNCTION_DONT_PROMOTE -#include "xenia/ui/vulkan/functions/instance_khr_get_physical_device_properties2.inc" -#undef XE_UI_VULKAN_FUNCTION_PROMOTED - } - instance_extensions_.khr_get_physical_device_properties2 = functions_loaded; - } - if (instance_extensions_.khr_surface) { - bool functions_loaded = true; -#include "xenia/ui/vulkan/functions/instance_khr_surface.inc" - instance_extensions_.khr_surface = functions_loaded; - } -#if XE_PLATFORM_ANDROID - if (instance_extensions_.khr_android_surface) { - bool functions_loaded = true; -#include "xenia/ui/vulkan/functions/instance_khr_android_surface.inc" - instance_extensions_.khr_android_surface = functions_loaded; - } -#elif XE_PLATFORM_GNU_LINUX - if (instance_extensions_.khr_xcb_surface) { - bool functions_loaded = true; -#include "xenia/ui/vulkan/functions/instance_khr_xcb_surface.inc" - instance_extensions_.khr_xcb_surface = functions_loaded; - } -#elif XE_PLATFORM_WIN32 - if (instance_extensions_.khr_win32_surface) { - bool functions_loaded = true; -#include "xenia/ui/vulkan/functions/instance_khr_win32_surface.inc" - instance_extensions_.khr_win32_surface = functions_loaded; - } -#endif // XE_PLATFORM -#undef XE_UI_VULKAN_FUNCTION_PROMOTE -#undef XE_UI_VULKAN_FUNCTION_DONT_PROMOTE -#undef XE_UI_VULKAN_FUNCTION - - // Check if surface is supported after verifying that surface extension - // function pointers could be obtained. - if (is_surface_required_ && - !VulkanPresenter::GetSurfaceTypesSupportedByInstance( - instance_extensions_)) { - XELOGE( - "The Vulkan instance doesn't support the required surface extension " - "for the platform"); - return false; - } - - // Report instance information after verifying that extension function - // pointers could be obtained. - XELOGVK("Vulkan layers enabled by Xenia:"); - XELOGVK("* VK_LAYER_KHRONOS_validation: {}", - layer_enabled_flags.khronos_validation ? "yes" : "no"); - XELOGVK("Vulkan instance extensions:"); - XELOGVK("* VK_EXT_debug_utils: {}", - instance_extensions_.ext_debug_utils - ? "yes" - : (debug_utils_requested ? "no" : "not requested")); - XELOGVK( - "* VK_KHR_get_physical_device_properties2: {}", - instance_extensions_.khr_get_physical_device_properties2 ? "yes" : "no"); - XELOGVK("* VK_KHR_surface: {}", - instance_extensions_.khr_surface ? "yes" : "no"); -#if XE_PLATFORM_ANDROID - XELOGVK(" * VK_KHR_android_surface: {}", - instance_extensions_.khr_android_surface ? "yes" : "no"); -#elif XE_PLATFORM_GNU_LINUX - XELOGVK(" * VK_KHR_xcb_surface: {}", - instance_extensions_.khr_xcb_surface ? "yes" : "no"); -#elif XE_PLATFORM_WIN32 - XELOGVK(" * VK_KHR_win32_surface: {}", - instance_extensions_.khr_win32_surface ? "yes" : "no"); -#endif - - // Enable the debug messenger. - if (debug_utils_messenger_requested) { - if (instance_extensions_.ext_debug_utils) { - VkDebugUtilsMessengerCreateInfoEXT debug_messenger_create_info; - debug_messenger_create_info.sType = - VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; - debug_messenger_create_info.pNext = nullptr; - debug_messenger_create_info.flags = 0; - debug_messenger_create_info.messageSeverity = - VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; - if (cvars::vulkan_debug_utils_messenger_severity >= 1) { - debug_messenger_create_info.messageSeverity |= - VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT; - if (cvars::vulkan_debug_utils_messenger_severity >= 2) { - debug_messenger_create_info.messageSeverity |= - VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT; - if (cvars::vulkan_debug_utils_messenger_severity >= 3) { - debug_messenger_create_info.messageSeverity |= - VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT; - } - } - } - debug_messenger_create_info.messageType = - VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; - debug_messenger_create_info.pfnUserCallback = DebugMessengerCallback; - debug_messenger_create_info.pUserData = this; - ifn_.vkCreateDebugUtilsMessengerEXT( - instance_, &debug_messenger_create_info, nullptr, &debug_messenger_); - } - if (debug_messenger_ != VK_NULL_HANDLE) { - XELOGVK("Vulkan debug messenger enabled"); - } else { - XELOGE("Failed to enable the Vulkan debug messenger"); - } - } - debug_names_used_ = - debug_utils_names_requested && instance_extensions_.ext_debug_utils; - - // Get the compatible physical device. std::vector physical_devices; - for (;;) { - uint32_t physical_device_count = uint32_t(physical_devices.size()); - bool physical_devices_were_empty = !physical_device_count; - VkResult physical_device_enumerate_result = ifn_.vkEnumeratePhysicalDevices( - instance_, &physical_device_count, - physical_devices_were_empty ? nullptr : physical_devices.data()); - // If the original device count was 0 (first call), SUCCESS is returned, not - // INCOMPLETE. - if (physical_device_enumerate_result == VK_SUCCESS || - physical_device_enumerate_result == VK_INCOMPLETE) { - physical_devices.resize(physical_device_count); - if (physical_device_enumerate_result == VK_SUCCESS && - (!physical_devices_were_empty || !physical_device_count)) { + provider->vulkan_instance_->EnumeratePhysicalDevices(physical_devices); + + if (physical_devices.empty()) { + XELOGW("No Vulkan physical devices available"); + return nullptr; + } + + const VulkanInstance::Functions& ifn = + provider->vulkan_instance_->functions(); + + XELOGW( + "Available Vulkan physical devices (use the 'vulkan_device' " + "configuration variable to force a specific device):"); + for (size_t physical_device_index = 0; + physical_device_index < physical_devices.size(); + ++physical_device_index) { + VkPhysicalDeviceProperties physical_device_properties; + ifn.vkGetPhysicalDeviceProperties(physical_devices[physical_device_index], + &physical_device_properties); + XELOGW("* {}: {}", physical_device_index, + physical_device_properties.deviceName); + } + + const int32_t preferred_physical_device_index = cvars::vulkan_device; + if (preferred_physical_device_index >= 0 && + uint32_t(preferred_physical_device_index) < physical_devices.size()) { + provider->vulkan_device_ = VulkanDevice::CreateIfSupported( + provider->vulkan_instance_.get(), + physical_devices[preferred_physical_device_index], with_gpu_emulation, + with_presentation); + } + + if (!provider->vulkan_device_) { + for (const VkPhysicalDevice physical_device : physical_devices) { + provider->vulkan_device_ = VulkanDevice::CreateIfSupported( + provider->vulkan_instance_.get(), physical_device, with_gpu_emulation, + with_presentation); + if (provider->vulkan_device_) { break; } - } else { - XELOGE("Failed to enumerate Vulkan physical devices"); - return false; } - } - if (physical_devices.empty()) { - XELOGE("No Vulkan physical devices are available"); - return false; - } - size_t physical_device_index_first, physical_device_index_last; - if (cvars::vulkan_device >= 0) { - physical_device_index_first = uint32_t(cvars::vulkan_device); - physical_device_index_last = physical_device_index_first; - if (physical_device_index_first >= physical_devices.size()) { - XELOGE( - "vulkan_device config variable is out of range, {} devices are " - "available", - physical_devices.size()); - return false; + + if (!provider->vulkan_device_) { + XELOGW( + "Couldn't choose a compatible Vulkan physical device or initialize a " + "Vulkan logical device"); + return nullptr; } - } else { - physical_device_index_first = 0; - physical_device_index_last = physical_devices.size() - 1; - } - for (size_t i = physical_device_index_first; i <= physical_device_index_last; - ++i) { - physical_device_ = physical_devices[i]; - TryCreateDevice(); - if (device_ != VK_NULL_HANDLE) { - break; - } - } - if (device_ == VK_NULL_HANDLE) { - XELOGE("Failed to select a compatible Vulkan physical device"); - physical_device_ = VK_NULL_HANDLE; - return false; } - // Create host-side samplers. - VkSamplerCreateInfo sampler_create_info = {}; - sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; - sampler_create_info.magFilter = VK_FILTER_NEAREST; - sampler_create_info.minFilter = VK_FILTER_NEAREST; - sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; - sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; - sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; - sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; - sampler_create_info.maxLod = FLT_MAX; - if (dfn_.vkCreateSampler( - device_, &sampler_create_info, nullptr, - &host_samplers_[size_t(HostSampler::kNearestClamp)]) != VK_SUCCESS) { - XELOGE("Failed to create the nearest-neighbor clamping Vulkan sampler"); - return false; - } - sampler_create_info.magFilter = VK_FILTER_LINEAR; - sampler_create_info.minFilter = VK_FILTER_LINEAR; - sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; - if (dfn_.vkCreateSampler( - device_, &sampler_create_info, nullptr, - &host_samplers_[size_t(HostSampler::kLinearClamp)]) != VK_SUCCESS) { - XELOGE("Failed to create the bilinear-filtering clamping Vulkan sampler"); - return false; - } - sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; - sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; - sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; - if (dfn_.vkCreateSampler( - device_, &sampler_create_info, nullptr, - &host_samplers_[size_t(HostSampler::kLinearRepeat)]) != VK_SUCCESS) { - XELOGE("Failed to create the bilinear-filtering repeating Vulkan sampler"); - return false; - } - sampler_create_info.magFilter = VK_FILTER_NEAREST; - sampler_create_info.minFilter = VK_FILTER_NEAREST; - sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; - if (dfn_.vkCreateSampler( - device_, &sampler_create_info, nullptr, - &host_samplers_[size_t(HostSampler::kNearestRepeat)]) != VK_SUCCESS) { - XELOGE("Failed to create the nearest-neighbor repeating Vulkan sampler"); - return false; + if (with_presentation) { + provider->ui_samplers_ = UISamplers::Create(provider->vulkan_device_.get()); + if (!provider->ui_samplers_) { + return nullptr; + } } - return true; + return provider; } std::unique_ptr VulkanProvider::CreatePresenter( Presenter::HostGpuLossCallback host_gpu_loss_callback) { - return VulkanPresenter::Create(host_gpu_loss_callback, *this); + return VulkanPresenter::Create(host_gpu_loss_callback, vulkan_device(), + ui_samplers()); } std::unique_ptr VulkanProvider::CreateImmediateDrawer() { - return VulkanImmediateDrawer::Create(*this); -} - -void VulkanProvider::AccumulateInstanceExtensions( - size_t properties_count, const VkExtensionProperties* properties, - bool request_debug_utils, InstanceExtensions& instance_extensions, - std::vector& instance_extensions_enabled) { - for (size_t i = 0; i < properties_count; ++i) { - const char* instance_extension_name = properties[i].extensionName; - // Checking if already enabled as an optimization to do fewer and fewer - // string comparisons, as well as to skip adding extensions promoted to the - // core to instance_extensions_enabled. Adding literals to - // instance_extensions_enabled for the most C string lifetime safety. - if (request_debug_utils && !instance_extensions.ext_debug_utils && - !std::strcmp(instance_extension_name, "VK_EXT_debug_utils")) { - // Debug utilities are only enabled when needed. Overhead in Xenia not - // profiled, but better to avoid unless enabled by the user. - instance_extensions_enabled.push_back("VK_EXT_debug_utils"); - instance_extensions.ext_debug_utils = true; - } else if (!instance_extensions.khr_get_physical_device_properties2 && - !std::strcmp(instance_extension_name, - "VK_KHR_get_physical_device_properties2")) { - instance_extensions_enabled.push_back( - "VK_KHR_get_physical_device_properties2"); - instance_extensions.khr_get_physical_device_properties2 = true; - } else if (!instance_extensions.khr_surface && - !std::strcmp(instance_extension_name, "VK_KHR_surface")) { - instance_extensions_enabled.push_back("VK_KHR_surface"); - instance_extensions.khr_surface = true; - } else { -#if XE_PLATFORM_ANDROID - if (!instance_extensions.khr_android_surface && - !std::strcmp(instance_extension_name, "VK_KHR_android_surface")) { - instance_extensions_enabled.push_back("VK_KHR_android_surface"); - instance_extensions.khr_android_surface = true; - } -#elif XE_PLATFORM_GNU_LINUX - if (!instance_extensions.khr_xcb_surface && - !std::strcmp(instance_extension_name, "VK_KHR_xcb_surface")) { - instance_extensions_enabled.push_back("VK_KHR_xcb_surface"); - instance_extensions.khr_xcb_surface = true; - } -#elif XE_PLATFORM_WIN32 - if (!instance_extensions.khr_win32_surface && - !std::strcmp(instance_extension_name, "VK_KHR_win32_surface")) { - instance_extensions_enabled.push_back("VK_KHR_win32_surface"); - instance_extensions.khr_win32_surface = true; - } -#endif - } - } -} - -VkBool32 VKAPI_CALL VulkanProvider::DebugMessengerCallback( - VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, - VkDebugUtilsMessageTypeFlagsEXT message_types, - const VkDebugUtilsMessengerCallbackDataEXT* callback_data, - void* user_data) { - const char* severity_string; - switch (message_severity) { - case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT: - severity_string = "verbose output"; - break; - case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT: - severity_string = "info"; - break; - case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT: - severity_string = "warning"; - break; - case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT: - severity_string = "error"; - break; - default: - switch (xe::bit_count(uint32_t(message_severity))) { - case 0: - severity_string = "no-severity"; - break; - case 1: - severity_string = "unknown-severity"; - break; - default: - severity_string = "multi-severity"; - } - } - const char* type_string; - switch (message_types) { - case VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT: - type_string = "general"; - break; - case VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT: - type_string = "validation"; - break; - case VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT: - type_string = "performance"; - break; - default: - switch (xe::bit_count(uint32_t(message_types))) { - case 0: - type_string = "no-type"; - break; - case 1: - type_string = "unknown-type"; - break; - default: - type_string = "multi-type"; - } - } - XELOGVK("Vulkan {} {}: {}", type_string, severity_string, - callback_data->pMessage); - return VK_FALSE; -} - -void VulkanProvider::TryCreateDevice() { - assert_true(physical_device_ != VK_NULL_HANDLE); - assert_true(device_ == VK_NULL_HANDLE); - - static_assert(std::is_trivially_copyable_v, - "DeviceInfo must be safe to clear using memset"); - std::memset(&device_info_, 0, sizeof(device_info_)); - - VkDeviceCreateInfo device_create_info = { - VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO}; - - // Needed device extensions, properties and features. - - VkPhysicalDeviceProperties properties; - ifn_.vkGetPhysicalDeviceProperties(physical_device_, &properties); - - XELOGVK("Trying Vulkan device '{}'", properties.deviceName); - - if (!instance_extensions_.khr_get_physical_device_properties2) { - // Many extensions promoted to Vulkan 1.1 and newer require the instance - // extension VK_KHR_get_physical_device_properties2, which is itself in the - // core 1.0, although there's one instance for all physical devices. - properties.apiVersion = VK_MAKE_API_VERSION( - 0, 1, 0, VK_API_VERSION_PATCH(properties.apiVersion)); - } - - device_info_.apiVersion = properties.apiVersion; - - XELOGVK("Device Vulkan API version: {}.{}.{}", - VK_API_VERSION_MAJOR(properties.apiVersion), - VK_API_VERSION_MINOR(properties.apiVersion), - VK_API_VERSION_PATCH(properties.apiVersion)); - - std::vector extension_properties; - VkResult extensions_enumerate_result; - for (;;) { - uint32_t extension_count = uint32_t(extension_properties.size()); - bool extensions_were_empty = !extension_count; - extensions_enumerate_result = ifn_.vkEnumerateDeviceExtensionProperties( - physical_device_, nullptr, &extension_count, - extensions_were_empty ? nullptr : extension_properties.data()); - // If the original extension count was 0 (first call), SUCCESS is - // returned, not INCOMPLETE. - if (extensions_enumerate_result == VK_SUCCESS || - extensions_enumerate_result == VK_INCOMPLETE) { - extension_properties.resize(extension_count); - if (extensions_enumerate_result == VK_SUCCESS && - (!extensions_were_empty || !extension_count)) { - break; - } - } else { - break; - } - } - if (extensions_enumerate_result != VK_SUCCESS) { - XELOGE("Failed to query Vulkan device '{}' extensions", - properties.deviceName); - return; - } - - XELOGVK("Requested Vulkan device extensions:"); - - std::vector enabled_extensions; - - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { - device_info_.ext_1_1_VK_KHR_dedicated_allocation = true; - device_info_.ext_1_1_VK_KHR_get_memory_requirements2 = true; - device_info_.ext_1_1_VK_KHR_sampler_ycbcr_conversion = true; - device_info_.ext_1_1_VK_KHR_bind_memory2 = true; - } - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 2, 0)) { - device_info_.ext_1_2_VK_KHR_sampler_mirror_clamp_to_edge = true; - device_info_.ext_1_2_VK_KHR_image_format_list = true; - device_info_.ext_1_2_VK_KHR_shader_float_controls = true; - device_info_.ext_1_2_VK_KHR_spirv_1_4 = true; - } - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 3, 0)) { - device_info_.ext_1_3_VK_EXT_shader_demote_to_helper_invocation = true; - device_info_.ext_1_3_VK_KHR_maintenance4 = true; - } - - for (const VkExtensionProperties& extension : extension_properties) { - // Checking if already enabled as an optimization to do fewer and fewer - // string comparisons. -#define EXTENSION(name) \ - if (!device_info_.ext_##name && \ - !std::strcmp(extension.extensionName, #name)) { \ - enabled_extensions.push_back(#name); \ - device_info_.ext_##name = true; \ - XELOGVK("* " #name); \ - } -#define EXTENSION_PROMOTED(name, minor_version) \ - if (!device_info_.ext_1_##minor_version##_##name && \ - !std::strcmp(extension.extensionName, #name)) { \ - enabled_extensions.push_back(#name); \ - device_info_.ext_1_##minor_version##_##name = true; \ - XELOGVK("* " #name); \ - } - EXTENSION(VK_KHR_swapchain) - EXTENSION(VK_EXT_shader_stencil_export) - if (instance_extensions_.khr_get_physical_device_properties2) { - EXTENSION(VK_KHR_portability_subset) - EXTENSION(VK_EXT_memory_budget) - EXTENSION(VK_EXT_fragment_shader_interlock) - EXTENSION(VK_EXT_non_seamless_cube_map) - } else { - if (!std::strcmp(extension.extensionName, "VK_KHR_portability_subset")) { - XELOGW( - "Vulkan device '{}' is a portability subset device, but its " - "portability subset features can't be queried as the instance " - "doesn't support VK_KHR_get_physical_device_properties2", - properties.deviceName); - return; - } - } - if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, 1, 0)) { - EXTENSION_PROMOTED(VK_KHR_dedicated_allocation, 1) - EXTENSION_PROMOTED(VK_KHR_get_memory_requirements2, 1) - EXTENSION_PROMOTED(VK_KHR_bind_memory2, 1) - if (instance_extensions_.khr_get_physical_device_properties2) { - EXTENSION_PROMOTED(VK_KHR_sampler_ycbcr_conversion, 1) - } - } - if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, 2, 0)) { - EXTENSION_PROMOTED(VK_KHR_sampler_mirror_clamp_to_edge, 2) - EXTENSION_PROMOTED(VK_KHR_image_format_list, 2) - if (instance_extensions_.khr_get_physical_device_properties2) { - EXTENSION_PROMOTED(VK_KHR_shader_float_controls, 2) - } - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { - EXTENSION_PROMOTED(VK_KHR_spirv_1_4, 2) - } - } - if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, 3, 0)) { - if (instance_extensions_.khr_get_physical_device_properties2) { - EXTENSION_PROMOTED(VK_EXT_shader_demote_to_helper_invocation, 3) - } - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { - EXTENSION_PROMOTED(VK_KHR_maintenance4, 3) - } - } -#undef EXTENSION_PROMOTED -#undef EXTENSION - } - - if (is_surface_required_ && !device_info_.ext_VK_KHR_swapchain) { - XELOGVK("Vulkan device '{}' doesn't support presentation", - properties.deviceName); - return; - } - - XELOGVK("Requested properties and features of the Vulkan device:"); - - XELOGVK("* driverVersion: 0x{:X}", properties.driverVersion); - XELOGVK("* vendorID: 0x{:04X}", properties.vendorID); - XELOGVK("* deviceID: 0x{:04X}", properties.deviceID); - -#define LIMIT(name) \ - device_info_.name = properties.limits.name; \ - XELOGVK("* " #name ": {}", properties.limits.name); -#define LIMIT_SAMPLE_COUNTS(name) \ - device_info_.name = properties.limits.name; \ - XELOGVK("* " #name ": 0b{:b}", static_cast(properties.limits.name)); - LIMIT(maxImageDimension2D) - LIMIT(maxImageDimension3D) - LIMIT(maxImageDimensionCube) - LIMIT(maxImageArrayLayers) - LIMIT(maxStorageBufferRange) - LIMIT(maxSamplerAllocationCount) - LIMIT(maxPerStageDescriptorSamplers) - LIMIT(maxPerStageDescriptorStorageBuffers) - LIMIT(maxPerStageDescriptorSampledImages) - LIMIT(maxPerStageResources) - LIMIT(maxVertexOutputComponents) - LIMIT(maxTessellationEvaluationOutputComponents) - LIMIT(maxGeometryInputComponents) - LIMIT(maxGeometryOutputComponents) - LIMIT(maxGeometryTotalOutputComponents) - LIMIT(maxFragmentInputComponents) - LIMIT(maxFragmentCombinedOutputResources) - LIMIT(maxSamplerAnisotropy) - std::memcpy(device_info_.maxViewportDimensions, - properties.limits.maxViewportDimensions, - sizeof(device_info_.maxViewportDimensions)); - XELOGVK("* maxViewportDimensions: {} x {}", - properties.limits.maxViewportDimensions[0], - properties.limits.maxViewportDimensions[1]); - std::memcpy(device_info_.viewportBoundsRange, - properties.limits.viewportBoundsRange, - sizeof(device_info_.viewportBoundsRange)); - XELOGVK("* viewportBoundsRange: [{}, {}]", - properties.limits.viewportBoundsRange[0], - properties.limits.viewportBoundsRange[1]); - LIMIT(minUniformBufferOffsetAlignment) - LIMIT(minStorageBufferOffsetAlignment) - LIMIT(maxFramebufferWidth) - LIMIT(maxFramebufferHeight) - LIMIT_SAMPLE_COUNTS(framebufferColorSampleCounts) - LIMIT_SAMPLE_COUNTS(framebufferDepthSampleCounts) - LIMIT_SAMPLE_COUNTS(framebufferStencilSampleCounts) - LIMIT_SAMPLE_COUNTS(framebufferNoAttachmentsSampleCounts) - LIMIT_SAMPLE_COUNTS(sampledImageColorSampleCounts) - LIMIT_SAMPLE_COUNTS(sampledImageIntegerSampleCounts) - LIMIT_SAMPLE_COUNTS(sampledImageDepthSampleCounts) - LIMIT_SAMPLE_COUNTS(sampledImageStencilSampleCounts) - LIMIT(standardSampleLocations) - LIMIT(optimalBufferCopyOffsetAlignment) - LIMIT(optimalBufferCopyRowPitchAlignment) - LIMIT(nonCoherentAtomSize) -#undef LIMIT_SAMPLE_COUNTS -#undef LIMIT - - VkPhysicalDeviceFeatures supported_features; - ifn_.vkGetPhysicalDeviceFeatures(physical_device_, &supported_features); - // Enabling only needed features because drivers may take more optimal paths - // when certain features are disabled. Also, in VK_EXT_shader_object, which - // features are enabled effects the pipeline state must be set before drawing. - VkPhysicalDeviceFeatures enabled_features = {}; - -#define FEATURE(name) \ - if (supported_features.name) { \ - device_info_.name = true; \ - enabled_features.name = VK_TRUE; \ - XELOGVK("* " #name); \ - } - FEATURE(fullDrawIndexUint32) - FEATURE(independentBlend) - FEATURE(geometryShader) - FEATURE(tessellationShader) - FEATURE(sampleRateShading) - FEATURE(depthClamp) - FEATURE(fillModeNonSolid) - FEATURE(samplerAnisotropy) - FEATURE(vertexPipelineStoresAndAtomics) - FEATURE(fragmentStoresAndAtomics) - FEATURE(shaderClipDistance) - FEATURE(shaderCullDistance) - FEATURE(sparseBinding) - FEATURE(sparseResidencyBuffer) -#undef FEATURE - - VkPhysicalDeviceProperties2 properties2 = { - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2}; -#define PROPERTIES2_DECLARE(type_suffix, structure_type_suffix) \ - VkPhysicalDevice##type_suffix supported_##type_suffix = { \ - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_##structure_type_suffix}; -#define PROPERTIES2_ADD(type_suffix) \ - (supported_##type_suffix).pNext = properties2.pNext; \ - properties2.pNext = &(supported_##type_suffix); - - VkPhysicalDeviceFeatures2 features2 = { - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2}; -#define FEATURES2_DECLARE(type_suffix, structure_type_suffix) \ - VkPhysicalDevice##type_suffix supported_##type_suffix = { \ - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_##structure_type_suffix}; \ - VkPhysicalDevice##type_suffix enabled_##type_suffix = { \ - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_##structure_type_suffix}; -#define FEATURES2_ADD(type_suffix) \ - (supported_##type_suffix).pNext = features2.pNext; \ - features2.pNext = &(supported_##type_suffix); \ - (enabled_##type_suffix).pNext = const_cast(device_create_info.pNext); \ - device_create_info.pNext = &(enabled_##type_suffix); - // VUID-VkDeviceCreateInfo-pNext: "If the pNext chain includes a - // VkPhysicalDeviceVulkan1XFeatures structure, then it must not include..." - // Enabling the features in Vulkan1XFeatures instead. -#define FEATURES2_ADD_PROMOTED(type_suffix, minor_version) \ - (supported_##type_suffix).pNext = features2.pNext; \ - features2.pNext = &(supported_##type_suffix); \ - if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, minor_version, 0)) { \ - (enabled_##type_suffix).pNext = \ - const_cast(device_create_info.pNext); \ - device_create_info.pNext = &(enabled_##type_suffix); \ - } - - FEATURES2_DECLARE(Vulkan11Features, VULKAN_1_1_FEATURES) - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { - FEATURES2_ADD(Vulkan11Features) - } - FEATURES2_DECLARE(Vulkan12Features, VULKAN_1_2_FEATURES) - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 2, 0)) { - FEATURES2_ADD(Vulkan12Features) - } - FEATURES2_DECLARE(Vulkan13Features, VULKAN_1_3_FEATURES) - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 3, 0)) { - FEATURES2_ADD(Vulkan13Features) - } - - FEATURES2_DECLARE(PortabilitySubsetFeaturesKHR, - PORTABILITY_SUBSET_FEATURES_KHR) - if (device_info_.ext_VK_KHR_portability_subset) { - FEATURES2_ADD(PortabilitySubsetFeaturesKHR) - } - PROPERTIES2_DECLARE(FloatControlsProperties, FLOAT_CONTROLS_PROPERTIES) - if (device_info_.ext_1_2_VK_KHR_shader_float_controls) { - PROPERTIES2_ADD(FloatControlsProperties) - } - FEATURES2_DECLARE(FragmentShaderInterlockFeaturesEXT, - FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT) - if (device_info_.ext_VK_EXT_fragment_shader_interlock) { - FEATURES2_ADD(FragmentShaderInterlockFeaturesEXT) - } - FEATURES2_DECLARE(ShaderDemoteToHelperInvocationFeatures, - SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES) - if (device_info_.ext_1_3_VK_EXT_shader_demote_to_helper_invocation) { - FEATURES2_ADD_PROMOTED(ShaderDemoteToHelperInvocationFeatures, 3) - } - FEATURES2_DECLARE(NonSeamlessCubeMapFeaturesEXT, - NON_SEAMLESS_CUBE_MAP_FEATURES_EXT) - if (device_info_.ext_VK_EXT_non_seamless_cube_map) { - FEATURES2_ADD(NonSeamlessCubeMapFeaturesEXT) - } - - if (instance_extensions_.khr_get_physical_device_properties2) { - ifn_.vkGetPhysicalDeviceProperties2(physical_device_, &properties2); - ifn_.vkGetPhysicalDeviceFeatures2(physical_device_, &features2); - } - -#undef FEATURES2_ADD_PROMOTED -#undef FEATURES2_ADD -#undef FEATURES2_DECLARE -#undef PROPERTIES2_ADD -#undef PROPERTIES2_DECLARE - - // VK_KHR_portability_subset removes functionality rather than adding it, so - // if the extension is not present, all its features are true by default. -#define PORTABILITY_SUBSET_FEATURE(name) \ - if (device_info_.ext_VK_KHR_portability_subset) { \ - if (supported_PortabilitySubsetFeaturesKHR.name) { \ - device_info_.name = true; \ - enabled_PortabilitySubsetFeaturesKHR.name = VK_TRUE; \ - XELOGVK("* " #name); \ - } \ - } else { \ - device_info_.name = true; \ - } - PORTABILITY_SUBSET_FEATURE(constantAlphaColorBlendFactors) - PORTABILITY_SUBSET_FEATURE(imageViewFormatReinterpretation) - PORTABILITY_SUBSET_FEATURE(imageViewFormatSwizzle) - PORTABILITY_SUBSET_FEATURE(pointPolygons) - PORTABILITY_SUBSET_FEATURE(separateStencilMaskRef) - PORTABILITY_SUBSET_FEATURE(shaderSampleRateInterpolationFunctions) - PORTABILITY_SUBSET_FEATURE(triangleFans) -#undef PORTABILITY_SUBSET_FEATURE - -#define EXTENSION_PROPERTY(type_suffix, name) \ - device_info_.name = supported_##type_suffix.name; \ - XELOGVK("* " #name ": {}", supported_##type_suffix.name); -#define EXTENSION_FEATURE(type_suffix, name) \ - if (supported_##type_suffix.name) { \ - device_info_.name = true; \ - enabled_##type_suffix.name = VK_TRUE; \ - XELOGVK("* " #name); \ - } -#define EXTENSION_FEATURE_PROMOTED(type_suffix, name, minor_version) \ - if (supported_##type_suffix.name) { \ - device_info_.name = true; \ - enabled_##type_suffix.name = VK_TRUE; \ - enabled_Vulkan1##minor_version##Features.name = VK_TRUE; \ - XELOGVK("* " #name); \ - } -#define EXTENSION_FEATURE_PROMOTED_AS_OPTIONAL(name, minor_version) \ - if (supported_Vulkan1##minor_version##Features.name) { \ - device_info_.name = true; \ - enabled_Vulkan1##minor_version##Features.name = VK_TRUE; \ - XELOGVK("* " #name); \ - } - - if (device_info_.ext_1_2_VK_KHR_sampler_mirror_clamp_to_edge) { - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 2, 0)) { - // Promoted - the feature is optional, and must be enabled if the - // extension is also enabled - // (VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-02832). - EXTENSION_FEATURE_PROMOTED_AS_OPTIONAL(samplerMirrorClampToEdge, 2) - } else { - // Extension - the feature is implied. - device_info_.samplerMirrorClampToEdge = true; - XELOGVK("* samplerMirrorClampToEdge"); - } - } - - if (device_info_.ext_1_2_VK_KHR_shader_float_controls) { - EXTENSION_PROPERTY(FloatControlsProperties, - shaderSignedZeroInfNanPreserveFloat32) - EXTENSION_PROPERTY(FloatControlsProperties, shaderDenormFlushToZeroFloat32) - EXTENSION_PROPERTY(FloatControlsProperties, shaderRoundingModeRTEFloat32) - } - - if (device_info_.ext_VK_EXT_fragment_shader_interlock) { - EXTENSION_FEATURE(FragmentShaderInterlockFeaturesEXT, - fragmentShaderSampleInterlock) - // fragmentShaderPixelInterlock is not needed by Xenia if - // fragmentShaderSampleInterlock is available as it accesses only per-sample - // data. - if (!device_info_.fragmentShaderSampleInterlock) { - EXTENSION_FEATURE(FragmentShaderInterlockFeaturesEXT, - fragmentShaderPixelInterlock) - } - } - - if (device_info_.ext_1_3_VK_EXT_shader_demote_to_helper_invocation) { - EXTENSION_FEATURE_PROMOTED(ShaderDemoteToHelperInvocationFeatures, - shaderDemoteToHelperInvocation, 3) - } - - if (device_info_.ext_VK_EXT_non_seamless_cube_map) { - EXTENSION_FEATURE(NonSeamlessCubeMapFeaturesEXT, nonSeamlessCubeMap) - } - -#undef EXTENSION_FEATURE_PROMOTED_AS_OPTIONAL -#undef EXTENSION_FEATURE_PROMOTED -#undef EXTENSION_FEATURE -#undef EXTENSION_PROPERTY - - // Memory types. - - VkPhysicalDeviceMemoryProperties memory_properties; - ifn_.vkGetPhysicalDeviceMemoryProperties(physical_device_, - &memory_properties); - for (uint32_t memory_type_index = 0; - memory_type_index < memory_properties.memoryTypeCount; - ++memory_type_index) { - VkMemoryPropertyFlags memory_property_flags = - memory_properties.memoryTypes[memory_type_index].propertyFlags; - uint32_t memory_type_bit = uint32_t(1) << memory_type_index; - if (memory_property_flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) { - device_info_.memory_types_device_local |= memory_type_bit; - } - if (memory_property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) { - device_info_.memory_types_host_visible |= memory_type_bit; - } - if (memory_property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { - device_info_.memory_types_host_coherent |= memory_type_bit; - } - if (memory_property_flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) { - device_info_.memory_types_host_cached |= memory_type_bit; - } - } - - // Queue families. - - uint32_t queue_family_count; - ifn_.vkGetPhysicalDeviceQueueFamilyProperties(physical_device_, - &queue_family_count, nullptr); - std::vector queue_families_properties( - queue_family_count); - ifn_.vkGetPhysicalDeviceQueueFamilyProperties( - physical_device_, &queue_family_count, queue_families_properties.data()); - queue_families_properties.resize(queue_family_count); - - queue_families_.clear(); - queue_families_.resize(queue_family_count); - - queue_family_graphics_compute_ = UINT32_MAX; - queue_family_sparse_binding_ = UINT32_MAX; - if (device_info_.sparseBinding) { - // Prefer a queue family that supports both graphics/compute and sparse - // binding because in Xenia sparse binding is done serially with graphics - // work. - for (uint32_t queue_family_index = 0; - queue_family_index < queue_family_count; ++queue_family_index) { - VkQueueFlags queue_flags = - queue_families_properties[queue_family_index].queueFlags; - bool is_graphics_compute = - (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) == - (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT); - bool is_sparse_binding = (queue_flags & VK_QUEUE_SPARSE_BINDING_BIT) == - VK_QUEUE_SPARSE_BINDING_BIT; - if (is_graphics_compute && is_sparse_binding) { - queue_family_graphics_compute_ = queue_family_index; - queue_family_sparse_binding_ = queue_family_index; - break; - } - // If can't do both, prefer the queue family that can do either with the - // lowest index. - if (is_graphics_compute && queue_family_graphics_compute_ == UINT32_MAX) { - queue_family_graphics_compute_ = queue_family_index; - } - if (is_sparse_binding && queue_family_sparse_binding_ == UINT32_MAX) { - queue_family_sparse_binding_ = queue_family_index; - } - } - } else { - for (uint32_t queue_family_index = 0; - queue_family_index < queue_family_count; ++queue_family_index) { - if ((queue_families_properties[queue_family_index].queueFlags & - (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) == - (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) { - queue_family_graphics_compute_ = queue_family_index; - break; - } - } - } - - if (queue_family_graphics_compute_ == UINT32_MAX) { - XELOGVK("Vulkan device '{}' doesn't have a graphics and compute queue", - properties.deviceName); - return; - } - queue_families_[queue_family_graphics_compute_].queue_count = std::max( - uint32_t(1), queue_families_[queue_family_graphics_compute_].queue_count); - - if (device_info_.sparseBinding && - queue_family_sparse_binding_ == UINT32_MAX) { - XELOGVK( - "Vulkan device '{}' reports that it supports sparse binding, but " - "doesn't have a queue that can perform sparse binding operations, " - "disabling sparse binding", - properties.deviceName); - device_info_.sparseBinding = false; - enabled_features.sparseBinding = false; - } - if (!enabled_features.sparseBinding) { - device_info_.sparseResidencyBuffer = false; - enabled_features.sparseResidencyBuffer = false; - } - if (queue_family_sparse_binding_ != UINT32_MAX) { - queue_families_[queue_family_sparse_binding_].queue_count = std::max( - uint32_t(1), queue_families_[queue_family_sparse_binding_].queue_count); - } - - // Request queues of all families potentially supporting presentation as which - // ones will actually be used depends on the surface object. - bool any_queue_potentially_supports_present = false; - if (instance_extensions_.khr_surface) { - for (uint32_t queue_family_index = 0; - queue_family_index < queue_family_count; ++queue_family_index) { -#if XE_PLATFORM_WIN32 - if (instance_extensions_.khr_win32_surface && - !ifn_.vkGetPhysicalDeviceWin32PresentationSupportKHR( - physical_device_, queue_family_index)) { - continue; - } -#endif - QueueFamily& queue_family = queue_families_[queue_family_index]; - // Requesting an additional queue in each family where possible so - // asynchronous presentation can potentially be done within a single queue - // family too. - queue_family.queue_count = - std::min(queue_families_properties[queue_family_index].queueCount, - queue_family.queue_count + uint32_t(1)); - queue_family.potentially_supports_present = true; - any_queue_potentially_supports_present = true; - } - } - if (!any_queue_potentially_supports_present && is_surface_required_) { - XELOGVK( - "Vulkan device '{}' doesn't have any queues supporting presentation", - properties.deviceName); - return; - } - - std::vector queue_create_infos; - queue_create_infos.reserve(queue_families_.size()); - uint32_t used_queue_count = 0; - uint32_t max_queue_count_per_family = 0; - for (uint32_t queue_family_index = 0; queue_family_index < queue_family_count; - ++queue_family_index) { - QueueFamily& queue_family = queue_families_[queue_family_index]; - queue_family.queue_first_index = used_queue_count; - if (!queue_family.queue_count) { - continue; - } - VkDeviceQueueCreateInfo& queue_create_info = - queue_create_infos.emplace_back(); - queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; - queue_create_info.pNext = nullptr; - queue_create_info.flags = 0; - queue_create_info.queueFamilyIndex = queue_family_index; - queue_create_info.queueCount = queue_family.queue_count; - // pQueuePriorities will be set later based on max_queue_count_per_family. - max_queue_count_per_family = - std::max(max_queue_count_per_family, queue_family.queue_count); - used_queue_count += queue_family.queue_count; - } - std::vector queue_priorities(max_queue_count_per_family, 1.0f); - for (VkDeviceQueueCreateInfo& queue_create_info : queue_create_infos) { - queue_create_info.pQueuePriorities = queue_priorities.data(); - } - - // Create the device. - - device_create_info.queueCreateInfoCount = - static_cast(queue_create_infos.size()); - device_create_info.pQueueCreateInfos = queue_create_infos.data(); - device_create_info.enabledExtensionCount = - static_cast(enabled_extensions.size()); - device_create_info.ppEnabledExtensionNames = enabled_extensions.data(); - device_create_info.pEnabledFeatures = &enabled_features; - VkResult create_device_result = ifn_.vkCreateDevice( - physical_device_, &device_create_info, nullptr, &device_); - if (create_device_result != VK_SUCCESS) { - XELOGE( - "Failed to create a Vulkan device for physical device '{}', result {}", - properties.deviceName, static_cast(create_device_result)); - device_ = VK_NULL_HANDLE; - return; - } - - // Device function pointers. - - std::memset(&dfn_, 0, sizeof(dfn_)); - - bool functions_loaded = true; - -#define XE_UI_VULKAN_FUNCTION(name) \ - functions_loaded &= \ - (dfn_.name = PFN_##name(ifn_.vkGetDeviceProcAddr(device_, #name))) != \ - nullptr; - - // Vulkan 1.0. -#include "xenia/ui/vulkan/functions/device_1_0.inc" - - // Promoted extensions when the API version they're promoted to is supported. -#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ - functions_loaded &= \ - (dfn_.core_name = PFN_##core_name( \ - ifn_.vkGetDeviceProcAddr(device_, #core_name))) != nullptr; - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 1, 0)) { -#include "xenia/ui/vulkan/functions/device_khr_bind_memory2.inc" -#include "xenia/ui/vulkan/functions/device_khr_get_memory_requirements2.inc" - } - if (properties.apiVersion >= VK_MAKE_API_VERSION(0, 1, 3, 0)) { -#include "xenia/ui/vulkan/functions/device_khr_maintenance4.inc" - } -#undef XE_UI_VULKAN_FUNCTION_PROMOTED - - // Non-promoted extensions, and promoted extensions on API versions lower than - // the ones they were promoted to. -#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ - functions_loaded &= \ - (dfn_.core_name = PFN_##core_name( \ - ifn_.vkGetDeviceProcAddr(device_, #extension_name))) != nullptr; - if (device_info_.ext_VK_KHR_swapchain) { -#include "xenia/ui/vulkan/functions/device_khr_swapchain.inc" - } - if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, 1, 0)) { - if (device_info_.ext_1_1_VK_KHR_get_memory_requirements2) { -#include "xenia/ui/vulkan/functions/device_khr_get_memory_requirements2.inc" - } - if (device_info_.ext_1_1_VK_KHR_bind_memory2) { -#include "xenia/ui/vulkan/functions/device_khr_bind_memory2.inc" - } - } - if (properties.apiVersion < VK_MAKE_API_VERSION(0, 1, 3, 0)) { - if (device_info_.ext_1_3_VK_KHR_maintenance4) { -#include "xenia/ui/vulkan/functions/device_khr_maintenance4.inc" - } - } -#undef XE_UI_VULKAN_FUNCTION_PROMOTED - -#undef XE_UI_VULKAN_FUNCTION - - if (!functions_loaded) { - XELOGE("Failed to get all Vulkan device function pointers for '{}'", - properties.deviceName); - ifn_.vkDestroyDevice(device_, nullptr); - device_ = VK_NULL_HANDLE; - return; - } - - // Queues. - - queues_.reset(); - queues_ = std::make_unique(used_queue_count); - uint32_t queue_index = 0; - for (uint32_t queue_family_index = 0; queue_family_index < queue_family_count; - ++queue_family_index) { - const QueueFamily& queue_family = queue_families_[queue_family_index]; - if (!queue_family.queue_count) { - continue; - } - assert_true(queue_index == queue_family.queue_first_index); - for (uint32_t family_queue_index = 0; - family_queue_index < queue_family.queue_count; ++family_queue_index) { - VkQueue queue; - dfn_.vkGetDeviceQueue(device_, queue_family_index, family_queue_index, - &queue); - queues_[queue_index++].queue = queue; - } - } - - XELOGVK("Created a Vulkan device for physical device '{}'", - properties.deviceName); + return VulkanImmediateDrawer::Create(vulkan_device(), ui_samplers()); } } // namespace vulkan diff --git a/src/xenia/ui/vulkan/vulkan_provider.h b/src/xenia/ui/vulkan/vulkan_provider.h index 12d1710b1..d9ed8df7d 100644 --- a/src/xenia/ui/vulkan/vulkan_provider.h +++ b/src/xenia/ui/vulkan/vulkan_provider.h @@ -2,7 +2,7 @@ ****************************************************************************** * Xenia : Xbox 360 Emulator Research Project * ****************************************************************************** - * Copyright 2022 Ben Vanik. All rights reserved. * + * Copyright 2025 Ben Vanik. All rights reserved. * * Released under the BSD license - see LICENSE in the root for more details. * ****************************************************************************** */ @@ -10,46 +10,12 @@ #ifndef XENIA_UI_VULKAN_VULKAN_PROVIDER_H_ #define XENIA_UI_VULKAN_VULKAN_PROVIDER_H_ -#include -#include #include -#include -#include -#include -#include "xenia/base/assert.h" -#include "xenia/base/platform.h" #include "xenia/ui/graphics_provider.h" -#include "xenia/ui/renderdoc_api.h" - -#if XE_PLATFORM_ANDROID -#ifndef VK_USE_PLATFORM_ANDROID_KHR -#define VK_USE_PLATFORM_ANDROID_KHR 1 -#endif -#elif XE_PLATFORM_GNU_LINUX -#ifndef VK_USE_PLATFORM_XCB_KHR -#define VK_USE_PLATFORM_XCB_KHR 1 -#endif -#elif XE_PLATFORM_WIN32 -// Must be included before vulkan.h with VK_USE_PLATFORM_WIN32_KHR because it -// includes Windows.h too. -#include "xenia/base/platform_win.h" -#ifndef VK_USE_PLATFORM_WIN32_KHR -#define VK_USE_PLATFORM_WIN32_KHR 1 -#endif -#endif - -#ifndef VK_ENABLE_BETA_EXTENSIONS -#define VK_ENABLE_BETA_EXTENSIONS 1 -#endif -#ifndef VK_NO_PROTOTYPES -#define VK_NO_PROTOTYPES 1 -#endif -#include "third_party/Vulkan-Headers/include/vulkan/vulkan.h" - -#define XELOGVK XELOGI - -#define XE_UI_VULKAN_FINE_GRAINED_DRAW_SCOPES 1 +#include "xenia/ui/vulkan/ui_samplers.h" +#include "xenia/ui/vulkan/vulkan_device.h" +#include "xenia/ui/vulkan/vulkan_instance.h" namespace xe { namespace ui { @@ -57,163 +23,15 @@ namespace vulkan { class VulkanProvider : public GraphicsProvider { public: - struct DeviceInfo { - // "ext_1_X"-prefixed extension fields are set to true not only if the - // extension itself is actually exposed, but also if it was promoted to the - // device's API version. Therefore, merely the field being set to true - // doesn't imply that all the required features in the extension are - // supported - actual properties and features must be checked rather than - // the extension itself where they matter. + static std::unique_ptr Create(bool with_gpu_emulation, + bool with_presentation); - // Vulkan 1.0. + VulkanInstance* vulkan_instance() const { return vulkan_instance_.get(); } - uint32_t memory_types_device_local; - uint32_t memory_types_host_visible; - uint32_t memory_types_host_coherent; - uint32_t memory_types_host_cached; + VulkanDevice* vulkan_device() const { return vulkan_device_.get(); } - uint32_t apiVersion; - uint32_t maxImageDimension2D; - uint32_t maxImageDimension3D; - uint32_t maxImageDimensionCube; - uint32_t maxImageArrayLayers; - uint32_t maxStorageBufferRange; - uint32_t maxSamplerAllocationCount; - uint32_t maxPerStageDescriptorSamplers; - uint32_t maxPerStageDescriptorStorageBuffers; - uint32_t maxPerStageDescriptorSampledImages; - uint32_t maxPerStageResources; - uint32_t maxVertexOutputComponents; - uint32_t maxTessellationEvaluationOutputComponents; - uint32_t maxGeometryInputComponents; - uint32_t maxGeometryOutputComponents; - uint32_t maxGeometryTotalOutputComponents; - uint32_t maxFragmentInputComponents; - uint32_t maxFragmentCombinedOutputResources; - float maxSamplerAnisotropy; - uint32_t maxViewportDimensions[2]; - float viewportBoundsRange[2]; - VkDeviceSize minUniformBufferOffsetAlignment; - VkDeviceSize minStorageBufferOffsetAlignment; - uint32_t maxFramebufferWidth; - uint32_t maxFramebufferHeight; - VkSampleCountFlags framebufferColorSampleCounts; - VkSampleCountFlags framebufferDepthSampleCounts; - VkSampleCountFlags framebufferStencilSampleCounts; - VkSampleCountFlags framebufferNoAttachmentsSampleCounts; - VkSampleCountFlags sampledImageColorSampleCounts; - VkSampleCountFlags sampledImageIntegerSampleCounts; - VkSampleCountFlags sampledImageDepthSampleCounts; - VkSampleCountFlags sampledImageStencilSampleCounts; - VkSampleCountFlags standardSampleLocations; - VkDeviceSize optimalBufferCopyOffsetAlignment; - VkDeviceSize optimalBufferCopyRowPitchAlignment; - VkDeviceSize nonCoherentAtomSize; - - bool fullDrawIndexUint32; - bool independentBlend; - bool geometryShader; - bool tessellationShader; - bool sampleRateShading; - bool depthClamp; - bool fillModeNonSolid; - bool samplerAnisotropy; - bool vertexPipelineStoresAndAtomics; - bool fragmentStoresAndAtomics; - bool shaderClipDistance; - bool shaderCullDistance; - bool sparseBinding; - bool sparseResidencyBuffer; - - // VK_KHR_swapchain (#2). - - bool ext_VK_KHR_swapchain; - - // VK_KHR_sampler_mirror_clamp_to_edge (#15, Vulkan 1.2). - - bool ext_1_2_VK_KHR_sampler_mirror_clamp_to_edge; - - bool samplerMirrorClampToEdge; - - // VK_KHR_dedicated_allocation (#128, Vulkan 1.1). - - bool ext_1_1_VK_KHR_dedicated_allocation; - - // VK_EXT_shader_stencil_export (#141). - - bool ext_VK_EXT_shader_stencil_export; - - // VK_KHR_get_memory_requirements2 (#147, Vulkan 1.1). - - bool ext_1_1_VK_KHR_get_memory_requirements2; - - // VK_KHR_image_format_list (#148, Vulkan 1.2). - - bool ext_1_2_VK_KHR_image_format_list; - - // VK_KHR_sampler_ycbcr_conversion (#157, Vulkan 1.1). - - bool ext_1_1_VK_KHR_sampler_ycbcr_conversion; - - // VK_KHR_bind_memory2 (#158, Vulkan 1.1). - - bool ext_1_1_VK_KHR_bind_memory2; - - // VK_KHR_portability_subset (#164). - - bool ext_VK_KHR_portability_subset; - - bool constantAlphaColorBlendFactors; - bool imageViewFormatReinterpretation; - bool imageViewFormatSwizzle; - bool pointPolygons; - bool separateStencilMaskRef; - bool shaderSampleRateInterpolationFunctions; - bool triangleFans; - - // VK_KHR_shader_float_controls (#198, Vulkan 1.2). - - bool ext_1_2_VK_KHR_shader_float_controls; - - bool shaderSignedZeroInfNanPreserveFloat32; - bool shaderDenormFlushToZeroFloat32; - bool shaderRoundingModeRTEFloat32; - - // VK_KHR_spirv_1_4 (#237, Vulkan 1.2). - - bool ext_1_2_VK_KHR_spirv_1_4; - - // VK_EXT_memory_budget (#238). - - bool ext_VK_EXT_memory_budget; - - // VK_EXT_fragment_shader_interlock (#252). - - bool ext_VK_EXT_fragment_shader_interlock; - - bool fragmentShaderSampleInterlock; - bool fragmentShaderPixelInterlock; - - // VK_EXT_shader_demote_to_helper_invocation (#277, Vulkan 1.3). - - bool ext_1_3_VK_EXT_shader_demote_to_helper_invocation; - - bool shaderDemoteToHelperInvocation; - - // VK_KHR_maintenance4 (#414, Vulkan 1.3). - - bool ext_1_3_VK_KHR_maintenance4; - - // VK_EXT_non_seamless_cube_map (#423). - - bool ext_VK_EXT_non_seamless_cube_map; - - bool nonSeamlessCubeMap; - }; - - ~VulkanProvider(); - - static std::unique_ptr Create(bool is_surface_required); + // nullptr if created without presentation support. + const UISamplers* ui_samplers() const { return ui_samplers_.get(); } std::unique_ptr CreatePresenter( Presenter::HostGpuLossCallback host_gpu_loss_callback = @@ -221,209 +39,16 @@ class VulkanProvider : public GraphicsProvider { std::unique_ptr CreateImmediateDrawer() override; - const RenderdocApi& renderdoc_api() const { return renderdoc_api_; } - - struct LibraryFunctions { - // From the module. - PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; - PFN_vkDestroyInstance vkDestroyInstance; - // From vkGetInstanceProcAddr. - PFN_vkCreateInstance vkCreateInstance; - PFN_vkEnumerateInstanceExtensionProperties - vkEnumerateInstanceExtensionProperties; - PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; - struct { - PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; - } v_1_1; - }; - const LibraryFunctions& lfn() const { return lfn_; } - - struct InstanceExtensions { - bool ext_debug_utils; - // Core since 1.1.0. - bool khr_get_physical_device_properties2; - - // Surface extensions. - bool khr_surface; -#if XE_PLATFORM_ANDROID - bool khr_android_surface; -#elif XE_PLATFORM_GNU_LINUX - bool khr_xcb_surface; -#elif XE_PLATFORM_WIN32 - bool khr_win32_surface; -#endif - }; - const InstanceExtensions& instance_extensions() const { - return instance_extensions_; - } - VkInstance instance() const { return instance_; } - struct InstanceFunctions { -#define XE_UI_VULKAN_FUNCTION(name) PFN_##name name; -#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ - PFN_##core_name core_name; -#include "xenia/ui/vulkan/functions/instance_1_0.inc" -#include "xenia/ui/vulkan/functions/instance_ext_debug_utils.inc" -#include "xenia/ui/vulkan/functions/instance_khr_get_physical_device_properties2.inc" -#include "xenia/ui/vulkan/functions/instance_khr_surface.inc" -#if XE_PLATFORM_ANDROID -#include "xenia/ui/vulkan/functions/instance_khr_android_surface.inc" -#elif XE_PLATFORM_GNU_LINUX -#include "xenia/ui/vulkan/functions/instance_khr_xcb_surface.inc" -#elif XE_PLATFORM_WIN32 -#include "xenia/ui/vulkan/functions/instance_khr_win32_surface.inc" -#endif -#undef XE_UI_VULKAN_FUNCTION_PROMOTED -#undef XE_UI_VULKAN_FUNCTION - }; - const InstanceFunctions& ifn() const { return ifn_; } - - VkPhysicalDevice physical_device() const { return physical_device_; } - - const DeviceInfo& device_info() const { return device_info_; } - - struct QueueFamily { - uint32_t queue_first_index = 0; - uint32_t queue_count = 0; - bool potentially_supports_present = false; - }; - const std::vector& queue_families() const { - return queue_families_; - } - // Required. - uint32_t queue_family_graphics_compute() const { - return queue_family_graphics_compute_; - } - // Optional, if sparse binding is supported (UINT32_MAX otherwise). May be the - // same as queue_family_graphics_compute_. - uint32_t queue_family_sparse_binding() const { - return queue_family_sparse_binding_; - } - - struct Queue { - VkQueue queue = VK_NULL_HANDLE; - std::recursive_mutex mutex; - }; - struct QueueAcquisition { - QueueAcquisition(std::unique_lock&& lock, - VkQueue queue) - : lock(std::move(lock)), queue(queue) {} - std::unique_lock lock; - VkQueue queue; - }; - QueueAcquisition AcquireQueue(uint32_t index) { - Queue& queue = queues_[index]; - return QueueAcquisition(std::unique_lock(queue.mutex), - queue.queue); - } - QueueAcquisition AcquireQueue(uint32_t family_index, uint32_t index) { - assert_true(family_index != UINT32_MAX); - return AcquireQueue(queue_families_[family_index].queue_first_index + - index); - } - - VkDevice device() const { return device_; } - struct DeviceFunctions { -#define XE_UI_VULKAN_FUNCTION(name) PFN_##name name; -#define XE_UI_VULKAN_FUNCTION_PROMOTED(extension_name, core_name) \ - PFN_##core_name core_name; -#include "xenia/ui/vulkan/functions/device_1_0.inc" -#include "xenia/ui/vulkan/functions/device_khr_bind_memory2.inc" -#include "xenia/ui/vulkan/functions/device_khr_get_memory_requirements2.inc" -#include "xenia/ui/vulkan/functions/device_khr_maintenance4.inc" -#include "xenia/ui/vulkan/functions/device_khr_swapchain.inc" -#undef XE_UI_VULKAN_FUNCTION_PROMOTED -#undef XE_UI_VULKAN_FUNCTION - }; - const DeviceFunctions& dfn() const { return dfn_; } - - template - void SetDeviceObjectName(VkObjectType type, T handle, - const char* name) const { - if (!debug_names_used_) { - return; - } - VkDebugUtilsObjectNameInfoEXT name_info; - name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; - name_info.pNext = nullptr; - name_info.objectType = type; - name_info.objectHandle = uint64_t(handle); - name_info.pObjectName = name; - ifn_.vkSetDebugUtilsObjectNameEXT(device_, &name_info); - } - - // Samplers that may be useful for host needs. Only these samplers should be - // used in host, non-emulation contexts, because the total number of samplers - // is heavily limited (4000) on Nvidia GPUs - the rest of samplers are - // allocated for emulation. - enum class HostSampler { - kNearestClamp, - kLinearClamp, - kNearestRepeat, - kLinearRepeat, - - kCount, - }; - VkSampler GetHostSampler(HostSampler sampler) const { - return host_samplers_[size_t(sampler)]; - } - private: - explicit VulkanProvider(bool is_surface_required) - : is_surface_required_(is_surface_required) {} + explicit VulkanProvider() = default; - bool Initialize(); + std::unique_ptr vulkan_instance_; - static void AccumulateInstanceExtensions( - size_t properties_count, const VkExtensionProperties* properties, - bool request_debug_utils, InstanceExtensions& instance_extensions, - std::vector& instance_extensions_enabled); + // Depends on the instance. + std::unique_ptr vulkan_device_; - static VkBool32 VKAPI_CALL DebugMessengerCallback( - VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, - VkDebugUtilsMessageTypeFlagsEXT message_types, - const VkDebugUtilsMessengerCallbackDataEXT* callback_data, - void* user_data); - - // For the current `physical_device_`, sets up the members obtained from the - // physical device info, and tries to create a device and get the needed - // queues. - // The call is successful if `device_` is not VK_NULL_HANDLE as a result. - void TryCreateDevice(); - - bool is_surface_required_; - - RenderdocApi renderdoc_api_; - -#if XE_PLATFORM_LINUX - void* library_ = nullptr; -#elif XE_PLATFORM_WIN32 - HMODULE library_ = nullptr; -#endif - - LibraryFunctions lfn_ = {}; - - InstanceExtensions instance_extensions_; - VkInstance instance_ = VK_NULL_HANDLE; - InstanceFunctions ifn_; - - VkDebugUtilsMessengerEXT debug_messenger_ = VK_NULL_HANDLE; - bool debug_names_used_ = false; - - VkPhysicalDevice physical_device_ = VK_NULL_HANDLE; - - DeviceInfo device_info_ = {}; - - std::vector queue_families_; - uint32_t queue_family_graphics_compute_; - uint32_t queue_family_sparse_binding_; - - VkDevice device_ = VK_NULL_HANDLE; - DeviceFunctions dfn_ = {}; - - // Queues contain a mutex, can't use std::vector. - std::unique_ptr queues_; - - VkSampler host_samplers_[size_t(HostSampler::kCount)] = {}; + // Depends on the device. + std::unique_ptr ui_samplers_; }; } // namespace vulkan diff --git a/src/xenia/ui/vulkan/vulkan_submission_tracker.cc b/src/xenia/ui/vulkan/vulkan_submission_tracker.cc index f6ebe60bb..8fe4e412a 100644 --- a/src/xenia/ui/vulkan/vulkan_submission_tracker.cc +++ b/src/xenia/ui/vulkan/vulkan_submission_tracker.cc @@ -40,8 +40,8 @@ VulkanSubmissionTracker::FenceAcquisition::~FenceAcquisition() { void VulkanSubmissionTracker::Shutdown() { AwaitAllSubmissionsCompletion(); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); for (VkFence fence : fences_reclaimed_) { dfn.vkDestroyFence(device, fence, nullptr); } @@ -71,8 +71,8 @@ void VulkanSubmissionTracker::FenceAcquisition::SubmissionFailedOrDropped() { uint64_t VulkanSubmissionTracker::UpdateAndGetCompletedSubmission() { if (!fences_pending_.empty()) { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); while (!fences_pending_.empty()) { const std::pair& pending_pair = fences_pending_.front(); @@ -113,8 +113,8 @@ bool VulkanSubmissionTracker::AwaitSubmissionCompletion( // in submission order." size_t reclaim_end = fences_pending_.size(); if (reclaim_end) { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); while (reclaim_end) { const std::pair& pending_pair = fences_pending_[reclaim_end - 1]; @@ -149,8 +149,8 @@ VulkanSubmissionTracker::AcquireFenceToAdvanceSubmission() { // Reclaim fences if the client only gets the completed submission index or // awaits in special cases such as shutdown. UpdateAndGetCompletedSubmission(); - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); if (!fences_reclaimed_.empty()) { VkFence reclaimed_fence = fences_reclaimed_.back(); if (dfn.vkResetFences(device, 1, &reclaimed_fence) == VK_SUCCESS) { diff --git a/src/xenia/ui/vulkan/vulkan_submission_tracker.h b/src/xenia/ui/vulkan/vulkan_submission_tracker.h index c1f055c87..c44d438e8 100644 --- a/src/xenia/ui/vulkan/vulkan_submission_tracker.h +++ b/src/xenia/ui/vulkan/vulkan_submission_tracker.h @@ -15,7 +15,8 @@ #include #include -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/base/assert.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace ui { @@ -87,11 +88,16 @@ class VulkanSubmissionTracker { bool signal_failed_ = false; }; - VulkanSubmissionTracker(VulkanProvider& provider) : provider_(provider) {} + VulkanSubmissionTracker(const VulkanDevice* vulkan_device) + : vulkan_device_(vulkan_device) { + assert_not_null(vulkan_device); + } + VulkanSubmissionTracker(const VulkanSubmissionTracker& submission_tracker) = delete; VulkanSubmissionTracker& operator=( const VulkanSubmissionTracker& submission_tracker) = delete; + ~VulkanSubmissionTracker() { Shutdown(); } void Shutdown(); @@ -112,7 +118,7 @@ class VulkanSubmissionTracker { [[nodiscard]] FenceAcquisition AcquireFenceToAdvanceSubmission(); private: - VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; uint64_t submission_current_ = 1; // Last submission with a successful fence signal as well as a successful // fence wait / query. diff --git a/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.cc b/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.cc index 3f4c139f4..26797ddad 100644 --- a/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.cc +++ b/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.cc @@ -24,12 +24,13 @@ namespace vulkan { // or flush range must be clamped to the actual allocation size as a special // case, but it's still unlikely that the allocation won't be aligned to it), so // try not to waste that padding. -VulkanUploadBufferPool::VulkanUploadBufferPool(const VulkanProvider& provider, - VkBufferUsageFlags usage, - size_t page_size) +VulkanUploadBufferPool::VulkanUploadBufferPool( + const VulkanDevice* const vulkan_device, const VkBufferUsageFlags usage, + const size_t page_size) : GraphicsUploadBufferPool(size_t( - util::GetMappableMemorySize(provider, VkDeviceSize(page_size)))), - provider_(provider), + xe::round_up(VkDeviceSize(page_size), + vulkan_device->properties().nonCoherentAtomSize))), + vulkan_device_(vulkan_device), usage_(usage) {} uint8_t* VulkanUploadBufferPool::Request(uint64_t submission_index, size_t size, @@ -72,8 +73,8 @@ VulkanUploadBufferPool::CreatePageImplementation() { return nullptr; } - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); VkBufferCreateInfo buffer_create_info; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; @@ -94,8 +95,9 @@ VulkanUploadBufferPool::CreatePageImplementation() { if (memory_type_ == kMemoryTypeUnknown) { VkMemoryRequirements memory_requirements; dfn.vkGetBufferMemoryRequirements(device, buffer, &memory_requirements); - memory_type_ = util::ChooseHostMemoryType( - provider_, memory_requirements.memoryTypeBits, false); + memory_type_ = + util::ChooseHostMemoryType(vulkan_device_->memory_types(), + memory_requirements.memoryTypeBits, false); if (memory_type_ == UINT32_MAX) { XELOGE( "No host-visible memory types can store an Vulkan upload buffer with " @@ -117,7 +119,8 @@ VulkanUploadBufferPool::CreatePageImplementation() { dfn.vkGetBufferMemoryRequirements(device, buffer_expanded, &memory_requirements_expanded); uint32_t memory_type_expanded = util::ChooseHostMemoryType( - provider_, memory_requirements.memoryTypeBits, false); + vulkan_device_->memory_types(), memory_requirements.memoryTypeBits, + false); if (memory_requirements_expanded.size <= allocation_size_ && memory_type_expanded != UINT32_MAX) { page_size_ = size_t(allocation_size_); @@ -139,7 +142,7 @@ VulkanUploadBufferPool::CreatePageImplementation() { memory_allocate_info.allocationSize = allocation_size_; memory_allocate_info.memoryTypeIndex = memory_type_; VkMemoryDedicatedAllocateInfo memory_dedicated_allocate_info; - if (provider_.device_info().ext_1_1_VK_KHR_dedicated_allocation) { + if (vulkan_device_->extensions().ext_1_1_KHR_dedicated_allocation) { memory_allocate_info_last->pNext = &memory_dedicated_allocate_info; memory_allocate_info_last = reinterpret_cast( &memory_dedicated_allocate_info); @@ -176,19 +179,19 @@ VulkanUploadBufferPool::CreatePageImplementation() { return nullptr; } - return new VulkanPage(provider_, buffer, memory, mapping); + return new VulkanPage(vulkan_device_, buffer, memory, mapping); } void VulkanUploadBufferPool::FlushPageWrites(Page* page, size_t offset, size_t size) { util::FlushMappedMemoryRange( - provider_, static_cast(page)->memory_, memory_type_, - VkDeviceSize(offset), allocation_size_, VkDeviceSize(size)); + vulkan_device_, static_cast(page)->memory_, + memory_type_, VkDeviceSize(offset), allocation_size_, VkDeviceSize(size)); } VulkanUploadBufferPool::VulkanPage::~VulkanPage() { - const VulkanProvider::DeviceFunctions& dfn = provider_.dfn(); - VkDevice device = provider_.device(); + const VulkanDevice::Functions& dfn = vulkan_device_->functions(); + const VkDevice device = vulkan_device_->device(); dfn.vkDestroyBuffer(device, buffer_, nullptr); // Unmapping is done implicitly when the memory is freed. dfn.vkFreeMemory(device, memory_, nullptr); diff --git a/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.h b/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.h index 309c44ff1..41c0a7b8e 100644 --- a/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.h +++ b/src/xenia/ui/vulkan/vulkan_upload_buffer_pool.h @@ -11,7 +11,7 @@ #define XENIA_UI_VULKAN_VULKAN_UPLOAD_BUFFER_POOL_H_ #include "xenia/ui/graphics_upload_buffer_pool.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace ui { @@ -19,7 +19,7 @@ namespace vulkan { class VulkanUploadBufferPool : public GraphicsUploadBufferPool { public: - VulkanUploadBufferPool(const VulkanProvider& provider, + VulkanUploadBufferPool(const VulkanDevice* vulkan_device, VkBufferUsageFlags usage, size_t page_size = kDefaultPageSize); @@ -37,20 +37,20 @@ class VulkanUploadBufferPool : public GraphicsUploadBufferPool { private: struct VulkanPage : public Page { // Takes ownership of the buffer and its memory and mapping. - VulkanPage(const VulkanProvider& provider, VkBuffer buffer, + VulkanPage(const VulkanDevice* vulkan_device, VkBuffer buffer, VkDeviceMemory memory, void* mapping) - : provider_(provider), + : vulkan_device_(vulkan_device), buffer_(buffer), memory_(memory), mapping_(mapping) {} ~VulkanPage() override; - const VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; VkBuffer buffer_; VkDeviceMemory memory_; void* mapping_; }; - const VulkanProvider& provider_; + const VulkanDevice* vulkan_device_; VkDeviceSize allocation_size_; static constexpr uint32_t kMemoryTypeUnknown = UINT32_MAX; diff --git a/src/xenia/ui/vulkan/vulkan_util.cc b/src/xenia/ui/vulkan/vulkan_util.cc index d51edb2d6..f2b09e6e7 100644 --- a/src/xenia/ui/vulkan/vulkan_util.cc +++ b/src/xenia/ui/vulkan/vulkan_util.cc @@ -13,21 +13,23 @@ #include "xenia/base/assert.h" #include "xenia/base/math.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace ui { namespace vulkan { namespace util { -void FlushMappedMemoryRange(const VulkanProvider& provider, - VkDeviceMemory memory, uint32_t memory_type, - VkDeviceSize offset, VkDeviceSize memory_size, - VkDeviceSize size) { +void FlushMappedMemoryRange(const VulkanDevice* const vulkan_device, + const VkDeviceMemory memory, + const uint32_t memory_type, + const VkDeviceSize offset, + const VkDeviceSize memory_size, + const VkDeviceSize size) { assert_false(size != VK_WHOLE_SIZE && memory_size == VK_WHOLE_SIZE); assert_true(memory_size == VK_WHOLE_SIZE || offset <= memory_size); assert_true(memory_size == VK_WHOLE_SIZE || size <= memory_size - offset); - if (!size || (provider.device_info().memory_types_host_coherent & + if (!size || (vulkan_device->memory_types().host_coherent & (uint32_t(1) << memory_type))) { return; } @@ -37,27 +39,25 @@ void FlushMappedMemoryRange(const VulkanProvider& provider, range.memory = memory; range.offset = offset; range.size = size; - VkDeviceSize non_coherent_atom_size = - provider.device_info().nonCoherentAtomSize; - // On some Android implementations, nonCoherentAtomSize is 0, not 1. - if (non_coherent_atom_size > 1) { - range.offset = offset / non_coherent_atom_size * non_coherent_atom_size; - if (size != VK_WHOLE_SIZE) { - range.size = std::min(xe::round_up(offset + size, non_coherent_atom_size), - memory_size) - - range.offset; - } + const VkDeviceSize non_coherent_atom_size = + vulkan_device->properties().nonCoherentAtomSize; + range.offset = offset / non_coherent_atom_size * non_coherent_atom_size; + if (size != VK_WHOLE_SIZE) { + range.size = std::min(xe::round_up(offset + size, non_coherent_atom_size), + memory_size) - + range.offset; } - provider.dfn().vkFlushMappedMemoryRanges(provider.device(), 1, &range); + vulkan_device->functions().vkFlushMappedMemoryRanges(vulkan_device->device(), + 1, &range); } bool CreateDedicatedAllocationBuffer( - const VulkanProvider& provider, VkDeviceSize size, VkBufferUsageFlags usage, - MemoryPurpose memory_purpose, VkBuffer& buffer_out, - VkDeviceMemory& memory_out, uint32_t* memory_type_out, - VkDeviceSize* memory_size_out) { - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const VulkanDevice* const vulkan_device, const VkDeviceSize size, + const VkBufferUsageFlags usage, const MemoryPurpose memory_purpose, + VkBuffer& buffer_out, VkDeviceMemory& memory_out, + uint32_t* const memory_type_out, VkDeviceSize* const memory_size_out) { + const VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VkBufferCreateInfo buffer_create_info; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; @@ -76,24 +76,23 @@ bool CreateDedicatedAllocationBuffer( VkMemoryRequirements memory_requirements; dfn.vkGetBufferMemoryRequirements(device, buffer, &memory_requirements); - uint32_t memory_type = ChooseMemoryType( - provider, memory_requirements.memoryTypeBits, memory_purpose); + uint32_t memory_type = + ChooseMemoryType(vulkan_device->memory_types(), + memory_requirements.memoryTypeBits, memory_purpose); if (memory_type == UINT32_MAX) { dfn.vkDestroyBuffer(device, buffer, nullptr); return false; } VkMemoryAllocateInfo memory_allocate_info; - VkMemoryAllocateInfo* memory_allocate_info_last = &memory_allocate_info; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.pNext = nullptr; memory_allocate_info.allocationSize = memory_requirements.size; memory_allocate_info.memoryTypeIndex = memory_type; VkMemoryDedicatedAllocateInfo memory_dedicated_allocate_info; - if (provider.device_info().ext_1_1_VK_KHR_dedicated_allocation) { - memory_allocate_info_last->pNext = &memory_dedicated_allocate_info; - memory_allocate_info_last = reinterpret_cast( - &memory_dedicated_allocate_info); + if (vulkan_device->extensions().ext_1_1_KHR_dedicated_allocation) { + memory_dedicated_allocate_info.pNext = memory_allocate_info.pNext; + memory_allocate_info.pNext = &memory_dedicated_allocate_info; memory_dedicated_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO; memory_dedicated_allocate_info.pNext = nullptr; @@ -124,15 +123,15 @@ bool CreateDedicatedAllocationBuffer( return true; } -bool CreateDedicatedAllocationImage(const VulkanProvider& provider, +bool CreateDedicatedAllocationImage(const VulkanDevice* const vulkan_device, const VkImageCreateInfo& create_info, - MemoryPurpose memory_purpose, + const MemoryPurpose memory_purpose, VkImage& image_out, VkDeviceMemory& memory_out, - uint32_t* memory_type_out, - VkDeviceSize* memory_size_out) { - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + uint32_t* const memory_type_out, + VkDeviceSize* const memory_size_out) { + const VulkanDevice::Functions& dfn = vulkan_device->functions(); + const VkDevice device = vulkan_device->device(); VkImage image; if (dfn.vkCreateImage(device, &create_info, nullptr, &image) != VK_SUCCESS) { @@ -141,24 +140,23 @@ bool CreateDedicatedAllocationImage(const VulkanProvider& provider, VkMemoryRequirements memory_requirements; dfn.vkGetImageMemoryRequirements(device, image, &memory_requirements); - uint32_t memory_type = ChooseMemoryType( - provider, memory_requirements.memoryTypeBits, memory_purpose); + uint32_t memory_type = + ChooseMemoryType(vulkan_device->memory_types(), + memory_requirements.memoryTypeBits, memory_purpose); if (memory_type == UINT32_MAX) { dfn.vkDestroyImage(device, image, nullptr); return false; } VkMemoryAllocateInfo memory_allocate_info; - VkMemoryAllocateInfo* memory_allocate_info_last = &memory_allocate_info; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.pNext = nullptr; memory_allocate_info.allocationSize = memory_requirements.size; memory_allocate_info.memoryTypeIndex = memory_type; VkMemoryDedicatedAllocateInfo memory_dedicated_allocate_info; - if (provider.device_info().ext_1_1_VK_KHR_dedicated_allocation) { - memory_allocate_info_last->pNext = &memory_dedicated_allocate_info; - memory_allocate_info_last = reinterpret_cast( - &memory_dedicated_allocate_info); + if (vulkan_device->extensions().ext_1_1_KHR_dedicated_allocation) { + memory_dedicated_allocate_info.pNext = memory_allocate_info.pNext; + memory_allocate_info.pNext = &memory_dedicated_allocate_info; memory_dedicated_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO; memory_dedicated_allocate_info.pNext = nullptr; @@ -190,11 +188,10 @@ bool CreateDedicatedAllocationImage(const VulkanProvider& provider, } VkPipeline CreateComputePipeline( - const VulkanProvider& provider, VkPipelineLayout layout, - VkShaderModule shader, const VkSpecializationInfo* specialization_info, - const char* entry_point) { - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); + const VulkanDevice* const vulkan_device, const VkPipelineLayout layout, + const VkShaderModule shader, + const VkSpecializationInfo* const specialization_info, + const char* const entry_point) { VkComputePipelineCreateInfo pipeline_create_info; pipeline_create_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; pipeline_create_info.pNext = nullptr; @@ -211,28 +208,27 @@ VkPipeline CreateComputePipeline( pipeline_create_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_create_info.basePipelineIndex = -1; VkPipeline pipeline; - if (dfn.vkCreateComputePipelines(device, VK_NULL_HANDLE, 1, - &pipeline_create_info, nullptr, - &pipeline) != VK_SUCCESS) { + if (vulkan_device->functions().vkCreateComputePipelines( + vulkan_device->device(), VK_NULL_HANDLE, 1, &pipeline_create_info, + nullptr, &pipeline) != VK_SUCCESS) { return VK_NULL_HANDLE; } return pipeline; } VkPipeline CreateComputePipeline( - const VulkanProvider& provider, VkPipelineLayout layout, + const VulkanDevice* const vulkan_device, VkPipelineLayout layout, const uint32_t* shader_code, size_t shader_code_size_bytes, const VkSpecializationInfo* specialization_info, const char* entry_point) { - VkShaderModule shader = - CreateShaderModule(provider, shader_code, shader_code_size_bytes); + const VkShaderModule shader = + CreateShaderModule(vulkan_device, shader_code, shader_code_size_bytes); if (shader == VK_NULL_HANDLE) { return VK_NULL_HANDLE; } - const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider.dfn(); - VkDevice device = provider.device(); - VkPipeline pipeline = CreateComputePipeline(provider, layout, shader, - specialization_info, entry_point); - dfn.vkDestroyShaderModule(device, shader, nullptr); + const VkPipeline pipeline = CreateComputePipeline( + vulkan_device, layout, shader, specialization_info, entry_point); + vulkan_device->functions().vkDestroyShaderModule(vulkan_device->device(), + shader, nullptr); return pipeline; } diff --git a/src/xenia/ui/vulkan/vulkan_util.h b/src/xenia/ui/vulkan/vulkan_util.h index 8415a5ffd..178180f3b 100644 --- a/src/xenia/ui/vulkan/vulkan_util.h +++ b/src/xenia/ui/vulkan/vulkan_util.h @@ -14,15 +14,16 @@ #include #include "xenia/base/math.h" -#include "xenia/ui/vulkan/vulkan_provider.h" +#include "xenia/ui/vulkan/vulkan_device.h" namespace xe { namespace ui { namespace vulkan { namespace util { -template -inline bool DestroyAndNullHandle(F* destroy_function, T& handle) { +template +inline bool DestroyAndNullHandle(DestroyFunction* const destroy_function, + Object& handle) { if (handle != VK_NULL_HANDLE) { destroy_function(handle, nullptr); handle = VK_NULL_HANDLE; @@ -31,8 +32,9 @@ inline bool DestroyAndNullHandle(F* destroy_function, T& handle) { return false; } -template -inline bool DestroyAndNullHandle(F* destroy_function, P parent, T& handle) { +template +inline bool DestroyAndNullHandle(DestroyFunction* const destroy_function, + const Parent parent, Object& handle) { if (handle != VK_NULL_HANDLE) { destroy_function(parent, handle, nullptr); handle = VK_NULL_HANDLE; @@ -47,28 +49,17 @@ enum class MemoryPurpose { kReadback, }; -inline VkDeviceSize GetMappableMemorySize(const VulkanProvider& provider, - VkDeviceSize size) { - VkDeviceSize non_coherent_atom_size = - provider.device_info().nonCoherentAtomSize; - // On some Android implementations, nonCoherentAtomSize is 0, not 1. - if (non_coherent_atom_size > 1) { - size = xe::round_up(size, non_coherent_atom_size, false); - } - return size; -} - -inline uint32_t ChooseHostMemoryType(const VulkanProvider& provider, - uint32_t supported_types, - bool is_readback) { - supported_types &= provider.device_info().memory_types_host_visible; - uint32_t host_cached = provider.device_info().memory_types_host_cached; +inline uint32_t ChooseHostMemoryType( + const VulkanDevice::MemoryTypes& memory_types, uint32_t supported_types, + const bool is_readback) { + supported_types &= memory_types.host_visible; uint32_t memory_type; // For upload, uncached is preferred so writes do not pollute the CPU cache. // For readback, cached is preferred so multiple CPU reads are fast. // If the preferred caching behavior is not available, pick any host-visible. if (xe::bit_scan_forward( - supported_types & (is_readback ? host_cached : ~host_cached), + supported_types & (is_readback ? memory_types.host_cached + : ~memory_types.host_cached), &memory_type) || xe::bit_scan_forward(supported_types, &memory_type)) { return memory_type; @@ -76,18 +67,24 @@ inline uint32_t ChooseHostMemoryType(const VulkanProvider& provider, return UINT32_MAX; } -inline uint32_t ChooseMemoryType(const VulkanProvider& provider, - uint32_t supported_types, - MemoryPurpose purpose) { +inline uint32_t ChooseMemoryType(const VulkanDevice::MemoryTypes& memory_types, + const uint32_t supported_types, + const MemoryPurpose purpose) { switch (purpose) { case MemoryPurpose::kDeviceLocal: { uint32_t memory_type; - return xe::bit_scan_forward(supported_types, &memory_type) ? memory_type - : UINT32_MAX; + if (xe::bit_scan_forward(supported_types & memory_types.device_local, + &memory_type)) { + return memory_type; + } + if (xe::bit_scan_forward(supported_types, &memory_type)) { + return memory_type; + } + return UINT32_MAX; } break; case MemoryPurpose::kUpload: case MemoryPurpose::kReadback: - return ChooseHostMemoryType(provider, supported_types, + return ChooseHostMemoryType(memory_types, supported_types, purpose == MemoryPurpose::kReadback); default: assert_unhandled_case(purpose); @@ -100,27 +97,28 @@ inline uint32_t ChooseMemoryType(const VulkanProvider& provider, // size (offset + size passed to vkFlushMappedMemoryRanges inside this function // must be either a multiple of nonCoherentAtomSize (but not exceeding the // memory size) or equal to the memory size). -void FlushMappedMemoryRange(const VulkanProvider& provider, +void FlushMappedMemoryRange(const VulkanDevice* vulkan_device, VkDeviceMemory memory, uint32_t memory_type, VkDeviceSize offset = 0, VkDeviceSize memory_size = VK_WHOLE_SIZE, VkDeviceSize size = VK_WHOLE_SIZE); -inline VkExtent2D GetMax2DFramebufferExtent(const VulkanProvider& provider) { - const VulkanProvider::DeviceInfo& device_info = provider.device_info(); +inline VkExtent2D GetMax2DFramebufferExtent( + const VulkanDevice::Properties& device_properties) { VkExtent2D max_extent; - max_extent.width = std::min(device_info.maxFramebufferWidth, - device_info.maxImageDimension2D); - max_extent.height = std::min(device_info.maxFramebufferHeight, - device_info.maxImageDimension2D); + max_extent.width = std::min(device_properties.maxFramebufferWidth, + device_properties.maxImageDimension2D); + max_extent.height = std::min(device_properties.maxFramebufferHeight, + device_properties.maxImageDimension2D); return max_extent; } inline VkImageSubresourceRange InitializeSubresourceRange( - VkImageAspectFlags aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT, - uint32_t base_mip_level = 0, uint32_t level_count = VK_REMAINING_MIP_LEVELS, - uint32_t base_array_layer = 0, - uint32_t layer_count = VK_REMAINING_ARRAY_LAYERS) { + const VkImageAspectFlags aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT, + const uint32_t base_mip_level = 0, + const uint32_t level_count = VK_REMAINING_MIP_LEVELS, + const uint32_t base_array_layer = 0, + const uint32_t layer_count = VK_REMAINING_ARRAY_LAYERS) { VkImageSubresourceRange range; range.aspectMask = aspect_mask; range.baseMipLevel = base_mip_level; @@ -133,13 +131,16 @@ inline VkImageSubresourceRange InitializeSubresourceRange( // Creates a buffer backed by a dedicated allocation. The allocation size will // NOT be aligned to nonCoherentAtomSize - if mapping or flushing not the whole // size, memory_size_out must be used for clamping the range. -bool CreateDedicatedAllocationBuffer( - const VulkanProvider& provider, VkDeviceSize size, VkBufferUsageFlags usage, - MemoryPurpose memory_purpose, VkBuffer& buffer_out, - VkDeviceMemory& memory_out, uint32_t* memory_type_out = nullptr, - VkDeviceSize* memory_size_out = nullptr); +bool CreateDedicatedAllocationBuffer(const VulkanDevice* vulkan_device, + VkDeviceSize size, + VkBufferUsageFlags usage, + MemoryPurpose memory_purpose, + VkBuffer& buffer_out, + VkDeviceMemory& memory_out, + uint32_t* memory_type_out = nullptr, + VkDeviceSize* memory_size_out = nullptr); -bool CreateDedicatedAllocationImage(const VulkanProvider& provider, +bool CreateDedicatedAllocationImage(const VulkanDevice* vulkan_device, const VkImageCreateInfo& create_info, MemoryPurpose memory_purpose, VkImage& image_out, @@ -149,9 +150,9 @@ bool CreateDedicatedAllocationImage(const VulkanProvider& provider, // Explicitly accepting const uint32_t* to make sure attention is paid to the // alignment where this is called for safety on different host architectures. -inline VkShaderModule CreateShaderModule(const VulkanProvider& provider, - const uint32_t* code, - size_t code_size_bytes) { +inline VkShaderModule CreateShaderModule( + const VulkanDevice* const vulkan_device, const uint32_t* const code, + const size_t code_size_bytes) { VkShaderModuleCreateInfo shader_module_create_info; shader_module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; shader_module_create_info.pNext = nullptr; @@ -159,20 +160,20 @@ inline VkShaderModule CreateShaderModule(const VulkanProvider& provider, shader_module_create_info.codeSize = code_size_bytes; shader_module_create_info.pCode = code; VkShaderModule shader_module; - return provider.dfn().vkCreateShaderModule( - provider.device(), &shader_module_create_info, nullptr, + return vulkan_device->functions().vkCreateShaderModule( + vulkan_device->device(), &shader_module_create_info, nullptr, &shader_module) == VK_SUCCESS ? shader_module : VK_NULL_HANDLE; } VkPipeline CreateComputePipeline( - const VulkanProvider& provider, VkPipelineLayout layout, + const VulkanDevice* vulkan_device, VkPipelineLayout layout, VkShaderModule shader, const VkSpecializationInfo* specialization_info = nullptr, const char* entry_point = "main"); VkPipeline CreateComputePipeline( - const VulkanProvider& provider, VkPipelineLayout layout, + const VulkanDevice* vulkan_device, VkPipelineLayout layout, const uint32_t* shader_code, size_t shader_code_size_bytes, const VkSpecializationInfo* specialization_info = nullptr, const char* entry_point = "main"); diff --git a/src/xenia/ui/vulkan/vulkan_window_demo.cc b/src/xenia/ui/vulkan/vulkan_window_demo.cc index f4ba6a669..796ef25aa 100644 --- a/src/xenia/ui/vulkan/vulkan_window_demo.cc +++ b/src/xenia/ui/vulkan/vulkan_window_demo.cc @@ -34,7 +34,7 @@ class VulkanWindowDemoApp final : public WindowDemoApp { std::unique_ptr VulkanWindowDemoApp::CreateGraphicsProvider() const { - return VulkanProvider::Create(true); + return VulkanProvider::Create(false, true); } } // namespace vulkan