11 #include "vulkan/vulkan_core.h"
12 #include "vulkan/vulkan_enums.hpp"
13 #include "vulkan/vulkan_structs.hpp"
18 const vk::Image& image,
19 vk::AccessFlags src_access_mask,
20 vk::AccessFlags dst_access_mask,
21 vk::ImageLayout old_layout,
22 vk::ImageLayout new_layout,
23 vk::PipelineStageFlags src_stage,
24 vk::PipelineStageFlags dst_stage,
25 uint32_t base_mip_level,
26 uint32_t mip_level_count = 1u) {
27 if (old_layout == new_layout) {
31 vk::ImageMemoryBarrier barrier;
32 barrier.srcAccessMask = src_access_mask;
33 barrier.dstAccessMask = dst_access_mask;
34 barrier.oldLayout = old_layout;
35 barrier.newLayout = new_layout;
36 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
37 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
38 barrier.image = image;
39 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
40 barrier.subresourceRange.baseMipLevel = base_mip_level;
41 barrier.subresourceRange.levelCount = mip_level_count;
42 barrier.subresourceRange.baseArrayLayer = 0u;
43 barrier.subresourceRange.layerCount = 1u;
45 cmd.pipelineBarrier(src_stage, dst_stage, {},
nullptr,
nullptr, barrier);
48 BlitPassVK::BlitPassVK(std::shared_ptr<CommandBufferVK> command_buffer)
49 : command_buffer_(
std::move(command_buffer)) {}
51 BlitPassVK::~BlitPassVK() =
default;
53 void BlitPassVK::OnSetLabel(std::string label) {
57 label_ = std::move(label);
61 bool BlitPassVK::IsValid()
const {
66 bool BlitPassVK::EncodeCommands(
67 const std::shared_ptr<Allocator>& transients_allocator)
const {
72 bool BlitPassVK::OnCopyTextureToTextureCommand(
73 std::shared_ptr<Texture> source,
74 std::shared_ptr<Texture> destination,
78 auto& encoder = *command_buffer_->GetEncoder();
79 const auto& cmd_buffer = encoder.GetCommandBuffer();
81 const auto& src = TextureVK::Cast(*source);
82 const auto& dst = TextureVK::Cast(*destination);
84 if (!encoder.Track(source) || !encoder.Track(destination)) {
88 BarrierVK src_barrier;
89 src_barrier.cmd_buffer = cmd_buffer;
90 src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
91 src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
92 vk::AccessFlagBits::eShaderWrite |
93 vk::AccessFlagBits::eColorAttachmentWrite;
94 src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
95 vk::PipelineStageFlagBits::eFragmentShader |
96 vk::PipelineStageFlagBits::eColorAttachmentOutput;
97 src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
98 src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
100 BarrierVK dst_barrier;
101 dst_barrier.cmd_buffer = cmd_buffer;
102 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
103 dst_barrier.src_access = {};
104 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
105 dst_barrier.dst_access =
106 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
107 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
108 vk::PipelineStageFlagBits::eTransfer;
110 if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
115 vk::ImageCopy image_copy;
117 image_copy.setSrcSubresource(
118 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
119 image_copy.setDstSubresource(
120 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
122 image_copy.srcOffset =
123 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0);
124 image_copy.dstOffset =
125 vk::Offset3D(destination_origin.x, destination_origin.y, 0);
127 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1);
131 cmd_buffer.copyImage(src.GetImage(),
132 src_barrier.new_layout,
134 dst_barrier.new_layout,
140 if (dst.IsSwapchainImage()) {
145 barrier.cmd_buffer = cmd_buffer;
146 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
147 barrier.src_access = {};
148 barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
149 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
150 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
152 return dst.SetLayout(barrier);
156 bool BlitPassVK::OnCopyTextureToBufferCommand(
157 std::shared_ptr<Texture> source,
158 std::shared_ptr<DeviceBuffer> destination,
160 size_t destination_offset,
162 auto& encoder = *command_buffer_->GetEncoder();
163 const auto& cmd_buffer = encoder.GetCommandBuffer();
166 const auto& src = TextureVK::Cast(*source);
168 if (!encoder.Track(source) || !encoder.Track(destination)) {
173 barrier.cmd_buffer = cmd_buffer;
174 barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
175 barrier.src_access = vk::AccessFlagBits::eShaderWrite |
176 vk::AccessFlagBits::eTransferWrite |
177 vk::AccessFlagBits::eColorAttachmentWrite;
178 barrier.src_stage = vk::PipelineStageFlagBits::eFragmentShader |
179 vk::PipelineStageFlagBits::eTransfer |
180 vk::PipelineStageFlagBits::eColorAttachmentOutput;
181 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
182 barrier.dst_stage = vk::PipelineStageFlagBits::eVertexShader |
183 vk::PipelineStageFlagBits::eFragmentShader;
185 const auto& dst = DeviceBufferVK::Cast(*destination);
187 vk::BufferImageCopy image_copy;
188 image_copy.setBufferOffset(destination_offset);
189 image_copy.setBufferRowLength(0);
190 image_copy.setBufferImageHeight(0);
191 image_copy.setImageSubresource(
192 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
193 image_copy.setImageOffset(
194 vk::Offset3D(source_region.GetX(), source_region.GetY(), 0));
195 image_copy.setImageExtent(
196 vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1));
198 if (!src.SetLayout(barrier)) {
203 cmd_buffer.copyImageToBuffer(src.GetImage(),
211 if (destination->GetDeviceBufferDescriptor().readback) {
212 vk::MemoryBarrier barrier;
213 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
214 barrier.dstAccessMask = vk::AccessFlagBits::eHostRead;
216 cmd_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
217 vk::PipelineStageFlagBits::eHost, {}, 1,
218 &barrier, 0, {}, 0, {});
224 bool BlitPassVK::ConvertTextureToShaderRead(
225 const std::shared_ptr<Texture>& texture) {
226 auto& encoder = *command_buffer_->GetEncoder();
227 const auto& cmd_buffer = encoder.GetCommandBuffer();
230 barrier.cmd_buffer = cmd_buffer;
231 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
232 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
233 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
234 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
236 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
238 const auto& texture_vk = TextureVK::Cast(*texture);
240 if (!encoder.Track(texture)) {
244 return texture_vk.SetLayout(barrier);
248 bool BlitPassVK::OnCopyBufferToTextureCommand(
250 std::shared_ptr<Texture> destination,
251 IRect destination_region,
254 bool convert_to_read) {
255 auto& encoder = *command_buffer_->GetEncoder();
256 const auto& cmd_buffer = encoder.GetCommandBuffer();
259 const auto& dst = TextureVK::Cast(*destination);
260 const auto& src = DeviceBufferVK::Cast(*source.buffer);
262 if (!encoder.Track(source.buffer) || !encoder.Track(destination)) {
266 BarrierVK dst_barrier;
267 dst_barrier.cmd_buffer = cmd_buffer;
268 dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
269 dst_barrier.src_access = {};
270 dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
271 dst_barrier.dst_access =
272 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
273 dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
274 vk::PipelineStageFlagBits::eTransfer;
276 vk::BufferImageCopy image_copy;
277 image_copy.setBufferOffset(source.range.offset);
278 image_copy.setBufferRowLength(0);
279 image_copy.setBufferImageHeight(0);
280 image_copy.setImageSubresource(
281 vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
282 image_copy.imageOffset.x = destination_region.GetX();
283 image_copy.imageOffset.y = destination_region.GetY();
284 image_copy.imageOffset.z = 0u;
285 image_copy.imageExtent.width = destination_region.GetWidth();
286 image_copy.imageExtent.height = destination_region.GetHeight();
287 image_copy.imageExtent.depth = 1u;
292 if (!dst.SetLayout(dst_barrier)) {
297 cmd_buffer.copyBufferToImage(src.GetBuffer(),
299 dst_barrier.new_layout,
304 if (convert_to_read) {
306 barrier.cmd_buffer = cmd_buffer;
307 barrier.src_access = vk::AccessFlagBits::eTransferWrite;
308 barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
309 barrier.dst_access = vk::AccessFlagBits::eShaderRead;
310 barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
312 barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
314 if (!dst.SetLayout(barrier)) {
323 bool BlitPassVK::OnGenerateMipmapCommand(std::shared_ptr<Texture> texture,
325 auto& encoder = *command_buffer_->GetEncoder();
326 auto& src = TextureVK::Cast(*texture);
328 const auto size = src.GetTextureDescriptor().size;
329 uint32_t mip_count = src.GetTextureDescriptor().mip_count;
331 if (mip_count < 2u) {
335 const auto& image = src.GetImage();
336 const auto& cmd = encoder.GetCommandBuffer();
338 if (!encoder.Track(texture)) {
350 vk::AccessFlagBits::eTransferWrite |
351 vk::AccessFlagBits::eColorAttachmentWrite,
352 vk::AccessFlagBits::eTransferRead,
354 vk::ImageLayout::eTransferDstOptimal,
355 vk::PipelineStageFlagBits::eTransfer |
356 vk::PipelineStageFlagBits::eColorAttachmentOutput,
357 vk::PipelineStageFlagBits::eTransfer,
361 vk::ImageMemoryBarrier barrier;
362 barrier.image = image;
363 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
364 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
365 barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
366 barrier.subresourceRange.baseArrayLayer = 0;
367 barrier.subresourceRange.layerCount = 1;
368 barrier.subresourceRange.levelCount = 1;
371 size_t width = size.width;
372 size_t height = size.height;
373 for (
size_t mip_level = 1u; mip_level < mip_count; mip_level++) {
374 barrier.subresourceRange.baseMipLevel = mip_level - 1;
375 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
376 barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
377 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
378 barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
384 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
385 vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
389 blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
390 blit.srcSubresource.baseArrayLayer = 0u;
391 blit.srcSubresource.layerCount = 1u;
392 blit.srcSubresource.mipLevel = mip_level - 1;
394 blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
395 blit.dstSubresource.baseArrayLayer = 0u;
396 blit.dstSubresource.layerCount = 1u;
397 blit.dstSubresource.mipLevel = mip_level;
400 blit.srcOffsets[1].x = std::max<int32_t>(width, 1u);
401 blit.srcOffsets[1].y = std::max<int32_t>(height, 1u);
402 blit.srcOffsets[1].z = 1u;
408 blit.dstOffsets[1].x = std::max<int32_t>(width, 1u);
409 blit.dstOffsets[1].y = std::max<int32_t>(height, 1u);
410 blit.dstOffsets[1].z = 1u;
413 vk::ImageLayout::eTransferSrcOptimal,
415 vk::ImageLayout::eTransferDstOptimal,
421 barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal;
422 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
423 barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead;
424 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
429 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
430 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
434 barrier.subresourceRange.baseMipLevel = mip_count - 1;
435 barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
436 barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
437 barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
438 barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
440 cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
441 vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
446 src.SetLayoutWithoutEncoding(vk::ImageLayout::eShaderReadOnlyOptimal);
447 src.SetMipMapGenerated();