vk: Fix stale pointer reference when back-to-back partial updates are interrupted by a shader swap

This commit is contained in:
kd-11 2025-08-05 01:19:56 +03:00 committed by kd-11
parent 052ebe5acf
commit d68e4acb50
4 changed files with 67 additions and 46 deletions

View file

@ -472,20 +472,8 @@ namespace vk
return m_descriptor_pool->allocate(m_descriptor_set_layout); return m_descriptor_pool->allocate(m_descriptor_set_layout);
} }
VkDescriptorSet descriptor_table_t::commit() void descriptor_table_t::create_descriptor_template()
{ {
if (!m_descriptor_set)
{
m_any_descriptors_dirty = true;
std::fill(m_descriptors_dirty.begin(), m_descriptors_dirty.end(), false);
}
// Check if we need to actually open a new set
if (!m_any_descriptors_dirty)
{
return m_descriptor_set.value();
}
auto push_descriptor_slot = [this](unsigned idx) auto push_descriptor_slot = [this](unsigned idx)
{ {
const auto& slot = m_descriptor_slots[idx]; const auto& slot = m_descriptor_slots[idx];
@ -519,6 +507,28 @@ namespace vk
fmt::throw_exception("Unexpected descriptor structure at index %u", idx); fmt::throw_exception("Unexpected descriptor structure at index %u", idx);
}; };
m_descriptor_template_typemask = 0u;
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i)
{
m_descriptor_template_typemask |= (1u << static_cast<u32>(m_descriptor_types[i]));
if (m_descriptors_dirty[i])
{
// Push
push_descriptor_slot(i);
m_descriptors_dirty[i] = false;
continue;
}
push_descriptor_slot(i);
}
m_descriptor_template = m_descriptor_set.peek();
m_descriptor_template_cache_id = m_descriptor_set.cache_id();
}
void descriptor_table_t::update_descriptor_template()
{
auto update_descriptor_slot = [this](unsigned idx) auto update_descriptor_slot = [this](unsigned idx)
{ {
const auto& slot = m_descriptor_slots[idx]; const auto& slot = m_descriptor_slots[idx];
@ -553,45 +563,50 @@ namespace vk
fmt::throw_exception("Unexpected descriptor structure at index %u", idx); fmt::throw_exception("Unexpected descriptor structure at index %u", idx);
}; };
const bool cache_is_valid = m_descriptor_template_cache_id == m_descriptor_set.cache_id();
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i)
{
m_descriptor_template[i].dstSet = m_descriptor_set.value();
if (!m_descriptors_dirty[i] && cache_is_valid)
{
continue;
}
// Update
update_descriptor_slot(i);
m_descriptors_dirty[i] = false;
}
// Push
m_descriptor_set.push(m_descriptor_template, m_descriptor_template_typemask);
m_descriptor_template_cache_id = m_descriptor_set.cache_id();
}
VkDescriptorSet descriptor_table_t::commit()
{
if (!m_descriptor_set)
{
m_any_descriptors_dirty = true;
std::fill(m_descriptors_dirty.begin(), m_descriptors_dirty.end(), false);
}
// Check if we need to actually open a new set
if (!m_any_descriptors_dirty)
{
return m_descriptor_set.value();
}
m_descriptor_set = allocate_descriptor_set(); m_descriptor_set = allocate_descriptor_set();
if (!m_descriptor_template.empty()) [[ likely ]] if (!m_descriptor_template.empty()) [[ likely ]]
{ {
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i) // Run pointer updates. Optimized for cached back-to-back updates which are quite frequent.
{ update_descriptor_template();
m_descriptor_template[i].dstSet = m_descriptor_set.value();
if (!m_descriptors_dirty[i])
{
continue;
}
// Update
update_descriptor_slot(i);
m_descriptors_dirty[i] = false;
}
// Push
m_descriptor_set.push(m_descriptor_template, m_descriptor_template_typemask);
} }
else else
{ {
m_descriptor_template_typemask = 0u; // Creating the template also seeds initial values
create_descriptor_template();
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i)
{
m_descriptor_template_typemask |= (1u << static_cast<u32>(m_descriptor_types[i]));
if (m_descriptors_dirty[i])
{
// Push
push_descriptor_slot(i);
m_descriptors_dirty[i] = false;
continue;
}
push_descriptor_slot(i);
}
m_descriptor_template = m_descriptor_set.peek();
} }
m_descriptor_set.on_bind(); m_descriptor_set.on_bind();

View file

@ -134,6 +134,7 @@ namespace vk
u32 m_descriptor_template_typemask = 0u; u32 m_descriptor_template_typemask = 0u;
rsx::simple_array<VkWriteDescriptorSet> m_descriptor_template; rsx::simple_array<VkWriteDescriptorSet> m_descriptor_template;
u64 m_descriptor_template_cache_id = umax;
std::vector<descriptor_slot_t> m_descriptor_slots; std::vector<descriptor_slot_t> m_descriptor_slots;
std::vector<bool> m_descriptors_dirty; std::vector<bool> m_descriptors_dirty;
@ -148,6 +149,8 @@ namespace vk
void create_descriptor_set_layout(); void create_descriptor_set_layout();
void create_descriptor_pool(); void create_descriptor_pool();
void create_descriptor_template();
void update_descriptor_template();
VkDescriptorSet allocate_descriptor_set(); VkDescriptorSet allocate_descriptor_set();
VkDescriptorSet commit(); VkDescriptorSet commit();

View file

@ -468,6 +468,7 @@ namespace vk
const auto num_copies = ::size32(m_pending_copies); const auto num_copies = ::size32(m_pending_copies);
vkUpdateDescriptorSets(*g_render_device, num_writes, m_pending_writes.data(), num_copies, m_pending_copies.data()); vkUpdateDescriptorSets(*g_render_device, num_writes, m_pending_writes.data(), num_copies, m_pending_copies.data());
m_storage_cache_id++;
m_push_type_mask = 0; m_push_type_mask = 0;
m_pending_writes.clear(); m_pending_writes.clear();
m_pending_copies.clear(); m_pending_copies.clear();

View file

@ -164,13 +164,15 @@ namespace vk
return &m_image_info_pool.back(); return &m_image_info_pool.back();
} }
// Temporary storage accessor // Temporary storage accessors
const rsx::simple_array<WriteDescriptorSetT> peek() const { return m_pending_writes; } const rsx::simple_array<WriteDescriptorSetT> peek() const { return m_pending_writes; }
u64 cache_id() const { return m_storage_cache_id; }
private: private:
VkDescriptorSet m_handle = VK_NULL_HANDLE; VkDescriptorSet m_handle = VK_NULL_HANDLE;
u64 m_update_after_bind_mask = 0; u64 m_update_after_bind_mask = 0;
u64 m_push_type_mask = 0; u64 m_push_type_mask = 0;
u64 m_storage_cache_id = 0;
bool m_in_use = false; bool m_in_use = false;
rsx::simple_array<VkBufferView> m_buffer_view_pool; rsx::simple_array<VkBufferView> m_buffer_view_pool;