vk: Implement descriptor write buffering

This commit is contained in:
kd-11 2025-08-03 16:52:17 +03:00 committed by kd-11
parent 97620c4e17
commit 335f3dbe2f
3 changed files with 133 additions and 48 deletions

View File

@ -519,21 +519,79 @@ namespace vk
fmt::throw_exception("Unexpected descriptor structure at index %u", idx);
};
m_descriptor_set = allocate_descriptor_set();
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i)
auto update_descriptor_slot = [this](unsigned idx)
{
if (m_descriptors_dirty[i])
const auto& slot = m_descriptor_slots[idx];
const VkDescriptorType type = m_descriptor_types[idx];
if (auto ptr = std::get_if<VkDescriptorImageInfo>(&slot))
{
// Push
push_descriptor_slot(i);
m_descriptors_dirty[i] = false;
continue;
m_descriptor_template[idx].pImageInfo = m_descriptor_set.store(*ptr);
return;
}
// We should copy here if possible.
// Without descriptor_buffer, the most efficient option is to just use the normal bind logic due to the pointer-based nature of the descriptor inputs and no stride.
push_descriptor_slot(i);
if (auto ptr = std::get_if<VkDescriptorBufferInfo>(&slot))
{
m_descriptor_template[idx].pBufferInfo = m_descriptor_set.store(*ptr);
return;
}
if (auto ptr = std::get_if<VkBufferView>(&slot))
{
m_descriptor_template[idx].pTexelBufferView = m_descriptor_set.store(*ptr);
return;
}
// FIXME: This sucks even if only used by interpreter. Do better.
if (auto ptr = std::get_if<descriptor_array_ref_t>(&slot))
{
ensure(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
ensure((ptr->first + ptr->count) <= m_scratch_images_array.size());
m_descriptor_set.push(m_scratch_images_array.data() + ptr->first, ptr->count, type, idx);
return;
}
fmt::throw_exception("Unexpected descriptor structure at index %u", idx);
};
m_descriptor_set = allocate_descriptor_set();
if (!m_descriptor_template.empty()) [[ likely ]]
{
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i)
{
m_descriptor_template[i].dstSet = m_descriptor_set.value();
if (!m_descriptors_dirty[i])
{
continue;
}
// Update
update_descriptor_slot(i);
m_descriptors_dirty[i] = false;
}
// Push
m_descriptor_set.push(m_descriptor_template, m_descriptor_template_typemask);
}
else
{
m_descriptor_template_typemask = 0u;
for (unsigned i = 0; i < m_descriptor_slots.size(); ++i)
{
m_descriptor_template_typemask |= (1u << static_cast<u32>(m_descriptor_types[i]));
if (m_descriptors_dirty[i])
{
// Push
push_descriptor_slot(i);
m_descriptors_dirty[i] = false;
continue;
}
push_descriptor_slot(i);
}
m_descriptor_template = m_descriptor_set.peek();
}
m_descriptor_set.on_bind();

View File

@ -130,6 +130,9 @@ namespace vk
rsx::simple_array<VkDescriptorPoolSize> m_descriptor_pool_sizes;
rsx::simple_array<VkDescriptorType> m_descriptor_types;
u32 m_descriptor_template_typemask = 0u;
rsx::simple_array<VkWriteDescriptorSet> m_descriptor_template;
std::vector<descriptor_slot_t> m_descriptor_slots;
std::vector<bool> m_descriptors_dirty;
bool m_any_descriptors_dirty = false;

View File

@ -85,43 +85,6 @@ namespace vk
void init(VkDescriptorSet new_set);
public:
descriptor_set(VkDescriptorSet set);
descriptor_set() = default;
~descriptor_set();
descriptor_set(const descriptor_set&) = delete;
void swap(descriptor_set& other);
descriptor_set& operator = (VkDescriptorSet set);
VkDescriptorSet value() const { return m_handle; }
operator bool() const { return m_handle != VK_NULL_HANDLE; }
VkDescriptorSet* ptr();
void push(const VkBufferView& buffer_view, VkDescriptorType type, u32 binding);
void push(const VkDescriptorBufferInfo& buffer_info, VkDescriptorType type, u32 binding);
void push(const VkDescriptorImageInfo& image_info, VkDescriptorType type, u32 binding);
void push(const VkDescriptorImageInfo* image_info, u32 count, VkDescriptorType type, u32 binding);
void push(const rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask = umax);
void push(const rsx::simple_array<VkWriteDescriptorSet>& write_cmds, u32 type_mask = umax);
void push(const descriptor_set_dynamic_offset_t& offset);
void on_bind();
void bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout);
void flush();
private:
VkDescriptorSet m_handle = VK_NULL_HANDLE;
u64 m_update_after_bind_mask = 0;
u64 m_push_type_mask = 0;
bool m_in_use = false;
rsx::simple_array<VkBufferView> m_buffer_view_pool;
rsx::simple_array<VkDescriptorBufferInfo> m_buffer_info_pool;
rsx::simple_array<VkDescriptorImageInfo> m_image_info_pool;
rsx::simple_array<u32> m_dynamic_offsets;
#if defined(__clang__) && (__clang_major__ < 16)
// Clang (pre 16.x) does not support LWG 2089, std::construct_at for POD types
struct WriteDescriptorSetT : public VkWriteDescriptorSet
@ -154,6 +117,67 @@ namespace vk
using WriteDescriptorSetT = VkWriteDescriptorSet;
#endif
public:
descriptor_set(VkDescriptorSet set);
descriptor_set() = default;
~descriptor_set();
descriptor_set(const descriptor_set&) = delete;
void swap(descriptor_set& other);
descriptor_set& operator = (VkDescriptorSet set);
VkDescriptorSet value() const { return m_handle; }
operator bool() const { return m_handle != VK_NULL_HANDLE; }
VkDescriptorSet* ptr();
void push(const VkBufferView& buffer_view, VkDescriptorType type, u32 binding);
void push(const VkDescriptorBufferInfo& buffer_info, VkDescriptorType type, u32 binding);
void push(const VkDescriptorImageInfo& image_info, VkDescriptorType type, u32 binding);
void push(const VkDescriptorImageInfo* image_info, u32 count, VkDescriptorType type, u32 binding);
void push(const rsx::simple_array<VkCopyDescriptorSet>& copy_cmd, u32 type_mask = umax);
void push(const rsx::simple_array<VkWriteDescriptorSet>& write_cmds, u32 type_mask = umax);
void push(const descriptor_set_dynamic_offset_t& offset);
// Event handlers
void on_bind();
void bind(const vk::command_buffer& cmd, VkPipelineBindPoint bind_point, VkPipelineLayout layout);
void flush();
// Typed temporary storage access. Should be inline, the overhead is significant
FORCE_INLINE VkBufferView* store(const VkBufferView& buffer_view)
{
m_buffer_view_pool.push_back(buffer_view);
return &m_buffer_view_pool.back();
}
FORCE_INLINE VkDescriptorBufferInfo* store(const VkDescriptorBufferInfo& buffer_info)
{
m_buffer_info_pool.push_back(buffer_info);
return &m_buffer_info_pool.back();
}
FORCE_INLINE VkDescriptorImageInfo* store(const VkDescriptorImageInfo& image_info)
{
m_image_info_pool.push_back(image_info);
return &m_image_info_pool.back();
}
// Temporary storage accessor
const rsx::simple_array<WriteDescriptorSetT> peek() const { return m_pending_writes; }
private:
VkDescriptorSet m_handle = VK_NULL_HANDLE;
u64 m_update_after_bind_mask = 0;
u64 m_push_type_mask = 0;
bool m_in_use = false;
rsx::simple_array<VkBufferView> m_buffer_view_pool;
rsx::simple_array<VkDescriptorBufferInfo> m_buffer_info_pool;
rsx::simple_array<VkDescriptorImageInfo> m_image_info_pool;
rsx::simple_array<u32> m_dynamic_offsets;
rsx::simple_array<WriteDescriptorSetT> m_pending_writes;
rsx::simple_array<VkCopyDescriptorSet> m_pending_copies;
};