Flutter Impeller
compute_pass_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 #include "flutter/fml/trace_event.h"
12 
13 namespace impeller {
14 
15 ComputePassVK::ComputePassVK(std::weak_ptr<const Context> context,
16  std::weak_ptr<CommandBufferVK> command_buffer)
17  : ComputePass(std::move(context)),
18  command_buffer_(std::move(command_buffer)) {
19  is_valid_ = true;
20 }
21 
23 
24 bool ComputePassVK::IsValid() const {
25  return is_valid_;
26 }
27 
28 void ComputePassVK::OnSetLabel(const std::string& label) {
29  if (label.empty()) {
30  return;
31  }
32  label_ = label;
33 }
34 
35 static bool UpdateBindingLayouts(const Bindings& bindings,
36  const vk::CommandBuffer& buffer) {
37  BarrierVK barrier;
38  barrier.cmd_buffer = buffer;
39  barrier.src_access = vk::AccessFlagBits::eTransferWrite;
40  barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
41  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
42  barrier.dst_stage = vk::PipelineStageFlagBits::eComputeShader;
43 
44  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
45 
46  for (const auto& [_, data] : bindings.sampled_images) {
47  if (!TextureVK::Cast(*data.texture.resource).SetLayout(barrier)) {
48  return false;
49  }
50  }
51  return true;
52 }
53 
54 static bool UpdateBindingLayouts(const ComputeCommand& command,
55  const vk::CommandBuffer& buffer) {
56  return UpdateBindingLayouts(command.bindings, buffer);
57 }
58 
59 static bool UpdateBindingLayouts(const std::vector<ComputeCommand>& commands,
60  const vk::CommandBuffer& buffer) {
61  for (const auto& command : commands) {
62  if (!UpdateBindingLayouts(command, buffer)) {
63  return false;
64  }
65  }
66  return true;
67 }
68 
69 static bool AllocateAndBindDescriptorSets(const ContextVK& context,
70  const ComputeCommand& command,
71  CommandEncoderVK& encoder,
72  const ComputePipelineVK& pipeline,
73  size_t command_count) {
74  auto desc_set = pipeline.GetDescriptor().GetDescriptorSetLayouts();
75  auto vk_desc_set = encoder.AllocateDescriptorSet(
76  pipeline.GetDescriptorSetLayout(), command_count);
77  if (!vk_desc_set) {
78  return false;
79  }
80 
81  auto& allocator = *context.GetResourceAllocator();
82 
83  std::unordered_map<uint32_t, vk::DescriptorBufferInfo> buffers;
84  std::unordered_map<uint32_t, vk::DescriptorImageInfo> images;
85  std::vector<vk::WriteDescriptorSet> writes;
86  auto bind_images = [&encoder, //
87  &images, //
88  &writes, //
89  &vk_desc_set //
90  ](const Bindings& bindings) -> bool {
91  for (const auto& [index, data] : bindings.sampled_images) {
92  auto texture = data.texture.resource;
93  const auto& texture_vk = TextureVK::Cast(*texture);
94  const SamplerVK& sampler = SamplerVK::Cast(*data.sampler.resource);
95 
96  if (!encoder.Track(texture) ||
97  !encoder.Track(sampler.GetSharedSampler())) {
98  return false;
99  }
100 
101  const SampledImageSlot& slot = data.slot;
102 
103  vk::DescriptorImageInfo image_info;
104  image_info.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
105  image_info.sampler = sampler.GetSampler();
106  image_info.imageView = texture_vk.GetImageView();
107 
108  vk::WriteDescriptorSet write_set;
109  write_set.dstSet = vk_desc_set.value();
110  write_set.dstBinding = slot.binding;
111  write_set.descriptorCount = 1u;
112  write_set.descriptorType = vk::DescriptorType::eCombinedImageSampler;
113  write_set.pImageInfo = &(images[slot.binding] = image_info);
114 
115  writes.push_back(write_set);
116  }
117 
118  return true;
119  };
120 
121  auto bind_buffers = [&allocator, //
122  &encoder, //
123  &buffers, //
124  &writes, //
125  &desc_set, //
126  &vk_desc_set //
127  ](const Bindings& bindings) -> bool {
128  for (const auto& [buffer_index, data] : bindings.buffers) {
129  const auto& buffer_view = data.view.resource.buffer;
130 
131  auto device_buffer = buffer_view->GetDeviceBuffer(allocator);
132  if (!device_buffer) {
133  VALIDATION_LOG << "Failed to get device buffer for vertex binding";
134  return false;
135  }
136 
137  auto buffer = DeviceBufferVK::Cast(*device_buffer).GetBuffer();
138  if (!buffer) {
139  return false;
140  }
141 
142  if (!encoder.Track(device_buffer)) {
143  return false;
144  }
145 
146  uint32_t offset = data.view.resource.range.offset;
147 
148  vk::DescriptorBufferInfo buffer_info;
149  buffer_info.buffer = buffer;
150  buffer_info.offset = offset;
151  buffer_info.range = data.view.resource.range.length;
152 
153  const ShaderUniformSlot& uniform = data.slot;
154  auto layout_it = std::find_if(desc_set.begin(), desc_set.end(),
155  [&uniform](DescriptorSetLayout& layout) {
156  return layout.binding == uniform.binding;
157  });
158  if (layout_it == desc_set.end()) {
159  VALIDATION_LOG << "Failed to get descriptor set layout for binding "
160  << uniform.binding;
161  return false;
162  }
163  auto layout = *layout_it;
164 
165  vk::WriteDescriptorSet write_set;
166  write_set.dstSet = vk_desc_set.value();
167  write_set.dstBinding = uniform.binding;
168  write_set.descriptorCount = 1u;
169  write_set.descriptorType = ToVKDescriptorType(layout.descriptor_type);
170  write_set.pBufferInfo = &(buffers[uniform.binding] = buffer_info);
171 
172  writes.push_back(write_set);
173  }
174  return true;
175  };
176 
177  if (!bind_buffers(command.bindings) || !bind_images(command.bindings)) {
178  return false;
179  }
180 
181  context.GetDevice().updateDescriptorSets(writes, {});
182 
183  encoder.GetCommandBuffer().bindDescriptorSets(
184  vk::PipelineBindPoint::eCompute, // bind point
185  pipeline.GetPipelineLayout(), // layout
186  0, // first set
187  {vk::DescriptorSet{*vk_desc_set}}, // sets
188  nullptr // offsets
189  );
190  return true;
191 }
192 
193 bool ComputePassVK::OnEncodeCommands(const Context& context,
194  const ISize& grid_size,
195  const ISize& thread_group_size) const {
196  TRACE_EVENT0("impeller", "ComputePassVK::EncodeCommands");
197  if (!IsValid()) {
198  return false;
199  }
200 
201  FML_DCHECK(!grid_size.IsEmpty() && !thread_group_size.IsEmpty());
202 
203  const auto& vk_context = ContextVK::Cast(context);
204  auto command_buffer = command_buffer_.lock();
205  if (!command_buffer) {
206  VALIDATION_LOG << "Command buffer died before commands could be encoded.";
207  return false;
208  }
209  auto encoder = command_buffer->GetEncoder();
210  if (!encoder) {
211  return false;
212  }
213 
214  fml::ScopedCleanupClosure pop_marker(
215  [&encoder]() { encoder->PopDebugGroup(); });
216  if (!label_.empty()) {
217  encoder->PushDebugGroup(label_.c_str());
218  } else {
219  pop_marker.Release();
220  }
221  auto cmd_buffer = encoder->GetCommandBuffer();
222 
223  if (!UpdateBindingLayouts(commands_, cmd_buffer)) {
224  VALIDATION_LOG << "Could not update binding layouts for compute pass.";
225  return false;
226  }
227 
228  {
229  TRACE_EVENT0("impeller", "EncodeComputePassCommands");
230 
231  for (const auto& command : commands_) {
232  if (!command.pipeline) {
233  continue;
234  }
235 
236  const auto& pipeline_vk = ComputePipelineVK::Cast(*command.pipeline);
237 
238  cmd_buffer.bindPipeline(vk::PipelineBindPoint::eCompute,
239  pipeline_vk.GetPipeline());
240  if (!AllocateAndBindDescriptorSets(vk_context, //
241  command, //
242  *encoder, //
243  pipeline_vk, //
244  commands_.size() //
245  )) {
246  return false;
247  }
248 
249  // TOOD(dnfield): This should be moved to caps. But for now keeping this
250  // in parallel with Metal.
251  auto device_properties = vk_context.GetPhysicalDevice().getProperties();
252 
253  auto max_wg_size = device_properties.limits.maxComputeWorkGroupSize;
254 
255  int64_t width = grid_size.width;
256  int64_t height = grid_size.height;
257 
258  // Special case for linear processing.
259  if (height == 1) {
260  int64_t minimum = 1;
261  int64_t threadGroups = std::max(
262  static_cast<int64_t>(std::ceil(width * 1.0 / max_wg_size[0] * 1.0)),
263  minimum);
264  cmd_buffer.dispatch(threadGroups, 1, 1);
265  } else {
266  while (width > max_wg_size[0]) {
267  width = std::max(static_cast<int64_t>(1), width / 2);
268  }
269  while (height > max_wg_size[1]) {
270  height = std::max(static_cast<int64_t>(1), height / 2);
271  }
272  cmd_buffer.dispatch(width, height, 1);
273  }
274  }
275  }
276 
277  return true;
278 }
279 
280 } // namespace impeller
impeller::DeviceBufferVK::GetBuffer
vk::Buffer GetBuffer() const
Definition: device_buffer_vk.cc:71
impeller::ComputePipelineVK::GetPipelineLayout
const vk::PipelineLayout & GetPipelineLayout() const
Definition: compute_pipeline_vk.cc:45
impeller::BarrierVK::dst_access
vk::AccessFlags dst_access
Definition: barrier_vk.h:42
impeller::DescriptorSetLayout
Definition: shader_types.h:149
impeller::AllocateAndBindDescriptorSets
static bool AllocateAndBindDescriptorSets(const ContextVK &context, const ComputeCommand &command, CommandEncoderVK &encoder, const ComputePipelineVK &pipeline, size_t command_count)
Definition: compute_pass_vk.cc:69
impeller::ShaderUniformSlot
Definition: shader_types.h:81
compute_pass_vk.h
impeller::ComputePipelineDescriptor::GetDescriptorSetLayouts
const std::vector< DescriptorSetLayout > & GetDescriptorSetLayouts() const
Definition: compute_pipeline_descriptor.cc:77
impeller::ToVKDescriptorType
constexpr vk::DescriptorType ToVKDescriptorType(DescriptorType type)
Definition: formats_vk.h:273
impeller::BarrierVK::new_layout
vk::ImageLayout new_layout
Definition: barrier_vk.h:22
impeller::ContextVK::GetResourceAllocator
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
Definition: context_vk.cc:453
impeller::BarrierVK::cmd_buffer
vk::CommandBuffer cmd_buffer
Definition: barrier_vk.h:21
impeller::ComputePassVK::~ComputePassVK
~ComputePassVK() override
impeller::ComputePass::commands_
std::vector< ComputeCommand > commands_
Definition: compute_pass.h:62
impeller::UpdateBindingLayouts
static bool UpdateBindingLayouts(const Bindings &bindings, const vk::CommandBuffer &buffer)
Definition: compute_pass_vk.cc:35
impeller::SamplerVK
Definition: sampler_vk.h:17
impeller::TextureVK::SetLayout
bool SetLayout(const BarrierVK &barrier) const
Definition: texture_vk.cc:140
impeller::CommandEncoderVK::AllocateDescriptorSet
std::optional< vk::DescriptorSet > AllocateDescriptorSet(const vk::DescriptorSetLayout &layout, size_t command_count)
Definition: command_encoder_vk.cc:284
impeller::ShaderUniformSlot::binding
size_t binding
Definition: shader_types.h:85
command_buffer_vk.h
impeller::BarrierVK
Defines an operations and memory access barrier on a resource.
Definition: barrier_vk.h:20
impeller::CommandEncoderVK::Track
bool Track(std::shared_ptr< SharedObjectVK > object)
Definition: command_encoder_vk.cc:232
compute_pipeline_vk.h
impeller::ComputePipelineVK::GetDescriptorSetLayout
const vk::DescriptorSetLayout & GetDescriptorSetLayout() const
Definition: compute_pipeline_vk.cc:49
impeller::SampledImageSlot
Definition: shader_types.h:129
impeller::ComputeCommand::bindings
Bindings bindings
Definition: compute_command.h:52
impeller::Bindings::sampled_images
std::map< size_t, TextureAndSampler > sampled_images
Definition: command.h:78
impeller::BarrierVK::src_access
vk::AccessFlags src_access
Definition: barrier_vk.h:32
impeller::SamplerVK::GetSampler
vk::Sampler GetSampler() const
Definition: sampler_vk.cc:17
impeller::Pipeline::GetDescriptor
const T & GetDescriptor() const
Get the descriptor that was responsible for creating this pipeline. It may be copied and modified to ...
Definition: pipeline.cc:49
texture_vk.h
impeller::ContextVK
Definition: context_vk.h:36
impeller::ISize
TSize< int64_t > ISize
Definition: size.h:136
impeller::BarrierVK::dst_stage
vk::PipelineStageFlags dst_stage
Definition: barrier_vk.h:37
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:60
impeller::ComputeCommand
An object used to specify compute work to the GPU along with references to resources the GPU will use...
Definition: compute_command.h:43
std
Definition: comparable.h:98
impeller::ContextVK::GetDevice
const vk::Device & GetDevice() const
Definition: context_vk.cc:480
impeller::BackendCast< TextureVK, Texture >::Cast
static TextureVK & Cast(Texture &base)
Definition: backend_cast.h:14
sampler_vk.h
impeller::SampledImageSlot::binding
size_t binding
Definition: shader_types.h:133
impeller::DescriptorSetLayout::descriptor_type
DescriptorType descriptor_type
Definition: shader_types.h:151
impeller::ComputePipelineVK
Definition: compute_pipeline_vk.h:17
impeller::CommandEncoderVK
Definition: command_encoder_vk.h:45
impeller
Definition: aiks_context.cc:10
impeller::SamplerVK::GetSharedSampler
const std::shared_ptr< SharedObjectVKT< vk::Sampler > > & GetSharedSampler() const
Definition: sampler_vk.cc:22
impeller::BarrierVK::src_stage
vk::PipelineStageFlags src_stage
Definition: barrier_vk.h:27
impeller::Bindings
Definition: command.h:77