Flutter Impeller
context_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 #include <thread>
7 #include <unordered_map>
8 
9 #include "fml/concurrent_message_loop.h"
10 #include "impeller/core/formats.h"
17 
18 #ifdef FML_OS_ANDROID
19 #include <pthread.h>
20 #include <sys/resource.h>
21 #include <sys/time.h>
22 #endif // FML_OS_ANDROID
23 
24 #include <map>
25 #include <memory>
26 #include <optional>
27 #include <string>
28 #include <vector>
29 
30 #include "flutter/fml/cpu_affinity.h"
31 #include "flutter/fml/trace_event.h"
46 
47 VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
48 
49 namespace impeller {
50 
51 static bool gHasValidationLayers = false;
52 
54  return gHasValidationLayers;
55 }
56 
57 static std::optional<vk::PhysicalDevice> PickPhysicalDevice(
58  const CapabilitiesVK& caps,
59  const vk::Instance& instance) {
60  for (const auto& device : instance.enumeratePhysicalDevices().value) {
61  if (caps.GetEnabledDeviceFeatures(device).has_value()) {
62  return device;
63  }
64  }
65  return std::nullopt;
66 }
67 
68 static std::vector<vk::DeviceQueueCreateInfo> GetQueueCreateInfos(
69  std::initializer_list<QueueIndexVK> queues) {
70  std::map<size_t /* family */, size_t /* index */> family_index_map;
71  for (const auto& queue : queues) {
72  family_index_map[queue.family] = 0;
73  }
74  for (const auto& queue : queues) {
75  auto value = family_index_map[queue.family];
76  family_index_map[queue.family] = std::max(value, queue.index);
77  }
78 
79  static float kQueuePriority = 1.0f;
80  std::vector<vk::DeviceQueueCreateInfo> infos;
81  for (const auto& item : family_index_map) {
82  vk::DeviceQueueCreateInfo info;
83  info.setQueueFamilyIndex(item.first);
84  info.setQueueCount(item.second + 1);
85  info.setQueuePriorities(kQueuePriority);
86  infos.push_back(info);
87  }
88  return infos;
89 }
90 
91 static std::optional<QueueIndexVK> PickQueue(const vk::PhysicalDevice& device,
92  vk::QueueFlagBits flags) {
93  // This can be modified to ensure that dedicated queues are returned for each
94  // queue type depending on support.
95  const auto families = device.getQueueFamilyProperties();
96  for (size_t i = 0u; i < families.size(); i++) {
97  if (!(families[i].queueFlags & flags)) {
98  continue;
99  }
100  return QueueIndexVK{.family = i, .index = 0};
101  }
102  return std::nullopt;
103 }
104 
105 std::shared_ptr<ContextVK> ContextVK::Create(Settings settings) {
106  auto context = std::shared_ptr<ContextVK>(new ContextVK());
107  context->Setup(std::move(settings));
108  if (!context->IsValid()) {
109  return nullptr;
110  }
111  return context;
112 }
113 
114 // static
115 size_t ContextVK::ChooseThreadCountForWorkers(size_t hardware_concurrency) {
116  // Never create more than 4 worker threads. Attempt to use up to
117  // half of the available concurrency.
118  return std::clamp(hardware_concurrency / 2ull, /*lo=*/1ull, /*hi=*/4ull);
119 }
120 
121 namespace {
122 thread_local uint64_t tls_context_count = 0;
123 uint64_t CalculateHash(void* ptr) {
124  // You could make a context once per nanosecond for 584 years on one thread
125  // before this overflows.
126  return ++tls_context_count;
127 }
128 } // namespace
129 
130 ContextVK::ContextVK() : hash_(CalculateHash(this)) {}
131 
133  if (device_holder_ && device_holder_->device) {
134  [[maybe_unused]] auto result = device_holder_->device->waitIdle();
135  }
137 }
138 
141 }
142 
143 void ContextVK::Setup(Settings settings) {
144  TRACE_EVENT0("impeller", "ContextVK::Setup");
145 
146  if (!settings.proc_address_callback) {
147  return;
148  }
149 
150  raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
151  ChooseThreadCountForWorkers(std::thread::hardware_concurrency()));
152  raster_message_loop_->PostTaskToAllWorkers([]() {
153  // Currently we only use the worker task pool for small parts of a frame
154  // workload, if this changes this setting may need to be adjusted.
155  fml::RequestAffinity(fml::CpuAffinity::kNotPerformance);
156 #ifdef FML_OS_ANDROID
157  if (::setpriority(PRIO_PROCESS, gettid(), -5) != 0) {
158  FML_LOG(ERROR) << "Failed to set Workers task runner priority";
159  }
160 #endif // FML_OS_ANDROID
161  });
162 
163  auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
164  dispatcher.init(settings.proc_address_callback);
165 
166  std::vector<std::string> embedder_instance_extensions;
167  std::vector<std::string> embedder_device_extensions;
168  if (settings.embedder_data.has_value()) {
169  embedder_instance_extensions = settings.embedder_data->instance_extensions;
170  embedder_device_extensions = settings.embedder_data->device_extensions;
171  }
172  auto caps = std::shared_ptr<CapabilitiesVK>(new CapabilitiesVK(
173  settings.enable_validation, //
174  settings.fatal_missing_validations, //
175  /*use_embedder_extensions=*/settings.embedder_data.has_value(), //
176  embedder_instance_extensions, //
177  embedder_device_extensions //
178  ));
179 
180  if (!caps->IsValid()) {
181  VALIDATION_LOG << "Could not determine device capabilities.";
182  return;
183  }
184 
185  gHasValidationLayers = caps->AreValidationsEnabled();
186 
187  auto enabled_layers = caps->GetEnabledLayers();
188  auto enabled_extensions = caps->GetEnabledInstanceExtensions();
189 
190  if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
191  VALIDATION_LOG << "Device has insufficient capabilities.";
192  return;
193  }
194 
195  vk::InstanceCreateFlags instance_flags = {};
196 
197  if (std::find(enabled_extensions.value().begin(),
198  enabled_extensions.value().end(),
199  "VK_KHR_portability_enumeration") !=
200  enabled_extensions.value().end()) {
201  instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
202  }
203 
204  std::vector<const char*> enabled_layers_c;
205  std::vector<const char*> enabled_extensions_c;
206 
207  for (const auto& layer : enabled_layers.value()) {
208  enabled_layers_c.push_back(layer.c_str());
209  }
210 
211  for (const auto& ext : enabled_extensions.value()) {
212  enabled_extensions_c.push_back(ext.c_str());
213  }
214 
215  vk::ApplicationInfo application_info;
216  application_info.setApplicationVersion(VK_API_VERSION_1_0);
217  application_info.setApiVersion(VK_API_VERSION_1_1);
218  application_info.setEngineVersion(VK_API_VERSION_1_0);
219  application_info.setPEngineName("Impeller");
220  application_info.setPApplicationName("Impeller");
221 
222  vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
223  instance_chain;
224 
225  if (!caps->AreValidationsEnabled()) {
226  instance_chain.unlink<vk::ValidationFeaturesEXT>();
227  }
228 
229  std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
230  vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
231  };
232 
233  auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
234  validation.setEnabledValidationFeatures(enabled_validations);
235 
236  auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
237  instance_info.setPEnabledLayerNames(enabled_layers_c);
238  instance_info.setPEnabledExtensionNames(enabled_extensions_c);
239  instance_info.setPApplicationInfo(&application_info);
240  instance_info.setFlags(instance_flags);
241 
242  auto device_holder = std::make_shared<DeviceHolderImpl>();
243  if (!settings.embedder_data.has_value()) {
244  auto instance = vk::createInstanceUnique(instance_info);
245  if (instance.result != vk::Result::eSuccess) {
246  VALIDATION_LOG << "Could not create Vulkan instance: "
247  << vk::to_string(instance.result);
248  return;
249  }
250  device_holder->instance = std::move(instance.value);
251  } else {
252  device_holder->instance.reset(settings.embedder_data->instance);
253  device_holder->owned = false;
254  }
255  dispatcher.init(device_holder->instance.get());
256 
257  //----------------------------------------------------------------------------
258  /// Setup the debug report.
259  ///
260  /// Do this as early as possible since we could use the debug report from
261  /// initialization issues.
262  ///
263  auto debug_report =
264  std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
265 
266  if (!debug_report->IsValid()) {
267  VALIDATION_LOG << "Could not set up debug report.";
268  return;
269  }
270 
271  //----------------------------------------------------------------------------
272  /// Pick the physical device.
273  ///
274  if (!settings.embedder_data.has_value()) {
275  auto physical_device =
276  PickPhysicalDevice(*caps, device_holder->instance.get());
277  if (!physical_device.has_value()) {
278  VALIDATION_LOG << "No valid Vulkan device found.";
279  return;
280  }
281  device_holder->physical_device = physical_device.value();
282  } else {
283  device_holder->physical_device = settings.embedder_data->physical_device;
284  }
285 
286  //----------------------------------------------------------------------------
287  /// Pick device queues.
288  ///
289  auto graphics_queue =
290  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
291  auto transfer_queue =
292  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
293  auto compute_queue =
294  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
295 
296  if (!graphics_queue.has_value()) {
297  VALIDATION_LOG << "Could not pick graphics queue.";
298  return;
299  }
300  if (!transfer_queue.has_value()) {
301  transfer_queue = graphics_queue.value();
302  }
303  if (!compute_queue.has_value()) {
304  VALIDATION_LOG << "Could not pick compute queue.";
305  return;
306  }
307 
308  //----------------------------------------------------------------------------
309  /// Create the logical device.
310  ///
311  auto enabled_device_extensions =
312  caps->GetEnabledDeviceExtensions(device_holder->physical_device);
313  if (!enabled_device_extensions.has_value()) {
314  // This shouldn't happen since we already did device selection. But
315  // doesn't hurt to check again.
316  return;
317  }
318 
319  std::vector<const char*> enabled_device_extensions_c;
320  for (const auto& ext : enabled_device_extensions.value()) {
321  enabled_device_extensions_c.push_back(ext.c_str());
322  }
323 
324  const auto queue_create_infos = GetQueueCreateInfos(
325  {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
326 
327  const auto enabled_features =
328  caps->GetEnabledDeviceFeatures(device_holder->physical_device);
329  if (!enabled_features.has_value()) {
330  // This shouldn't happen since the device can't be picked if this was not
331  // true. But doesn't hurt to check.
332  return;
333  }
334 
335  vk::DeviceCreateInfo device_info;
336 
337  device_info.setPNext(&enabled_features.value().get());
338  device_info.setQueueCreateInfos(queue_create_infos);
339  device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
340  // Device layers are deprecated and ignored.
341 
342  if (!settings.embedder_data.has_value()) {
343  auto device_result =
344  device_holder->physical_device.createDeviceUnique(device_info);
345  if (device_result.result != vk::Result::eSuccess) {
346  VALIDATION_LOG << "Could not create logical device.";
347  return;
348  }
349  device_holder->device = std::move(device_result.value);
350  } else {
351  device_holder->device.reset(settings.embedder_data->device);
352  }
353 
354  if (!caps->SetPhysicalDevice(device_holder->physical_device,
355  *enabled_features)) {
356  VALIDATION_LOG << "Capabilities could not be updated.";
357  return;
358  }
359 
360  //----------------------------------------------------------------------------
361  /// Create the allocator.
362  ///
363  auto allocator = std::shared_ptr<AllocatorVK>(new AllocatorVK(
364  weak_from_this(), //
365  application_info.apiVersion, //
366  device_holder->physical_device, //
367  device_holder, //
368  device_holder->instance.get(), //
369  *caps //
370  ));
371 
372  if (!allocator->IsValid()) {
373  VALIDATION_LOG << "Could not create memory allocator.";
374  return;
375  }
376 
377  //----------------------------------------------------------------------------
378  /// Setup the pipeline library.
379  ///
380  auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
381  new PipelineLibraryVK(device_holder, //
382  caps, //
383  std::move(settings.cache_directory), //
384  raster_message_loop_->GetTaskRunner() //
385  ));
386 
387  if (!pipeline_library->IsValid()) {
388  VALIDATION_LOG << "Could not create pipeline library.";
389  return;
390  }
391 
392  auto sampler_library =
393  std::shared_ptr<SamplerLibraryVK>(new SamplerLibraryVK(device_holder));
394 
395  auto shader_library = std::shared_ptr<ShaderLibraryVK>(
396  new ShaderLibraryVK(device_holder, //
397  settings.shader_libraries_data) //
398  );
399 
400  if (!shader_library->IsValid()) {
401  VALIDATION_LOG << "Could not create shader library.";
402  return;
403  }
404 
405  //----------------------------------------------------------------------------
406  /// Create the fence waiter.
407  ///
408  auto fence_waiter =
409  std::shared_ptr<FenceWaiterVK>(new FenceWaiterVK(device_holder));
410 
411  //----------------------------------------------------------------------------
412  /// Create the resource manager and command pool recycler.
413  ///
414  auto resource_manager = ResourceManagerVK::Create();
415  if (!resource_manager) {
416  VALIDATION_LOG << "Could not create resource manager.";
417  return;
418  }
419 
420  auto command_pool_recycler =
421  std::make_shared<CommandPoolRecyclerVK>(weak_from_this());
422  if (!command_pool_recycler) {
423  VALIDATION_LOG << "Could not create command pool recycler.";
424  return;
425  }
426 
427  auto descriptor_pool_recycler =
428  std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
429  if (!descriptor_pool_recycler) {
430  VALIDATION_LOG << "Could not create descriptor pool recycler.";
431  return;
432  }
433 
434  //----------------------------------------------------------------------------
435  /// Fetch the queues.
436  ///
437  QueuesVK queues;
438  if (!settings.embedder_data.has_value()) {
439  queues = QueuesVK::FromQueueIndices(device_holder->device.get(), //
440  graphics_queue.value(), //
441  compute_queue.value(), //
442  transfer_queue.value() //
443  );
444  } else {
445  queues =
446  QueuesVK::FromEmbedderQueue(settings.embedder_data->queue,
447  settings.embedder_data->queue_family_index);
448  }
449  if (!queues.IsValid()) {
450  VALIDATION_LOG << "Could not fetch device queues.";
451  return;
452  }
453 
454  VkPhysicalDeviceProperties physical_device_properties;
455  dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
456  &physical_device_properties);
457 
458  //----------------------------------------------------------------------------
459  /// All done!
460  ///
461 
462  // Apply workarounds for broken drivers.
463  auto driver_info =
464  std::make_unique<DriverInfoVK>(device_holder->physical_device);
465  workarounds_ = GetWorkaroundsFromDriverInfo(*driver_info);
466  caps->ApplyWorkarounds(workarounds_);
467  sampler_library->ApplyWorkarounds(workarounds_);
468 
469  device_holder_ = std::move(device_holder);
470  idle_waiter_vk_ = std::make_shared<IdleWaiterVK>(device_holder_);
471  driver_info_ = std::move(driver_info);
472  debug_report_ = std::move(debug_report);
473  allocator_ = std::move(allocator);
474  shader_library_ = std::move(shader_library);
475  sampler_library_ = std::move(sampler_library);
476  pipeline_library_ = std::move(pipeline_library);
477  yuv_conversion_library_ = std::shared_ptr<YUVConversionLibraryVK>(
478  new YUVConversionLibraryVK(device_holder_));
479  queues_ = std::move(queues);
480  device_capabilities_ = std::move(caps);
481  fence_waiter_ = std::move(fence_waiter);
482  resource_manager_ = std::move(resource_manager);
483  command_pool_recycler_ = std::move(command_pool_recycler);
484  descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
485  device_name_ = std::string(physical_device_properties.deviceName);
486  command_queue_vk_ = std::make_shared<CommandQueueVK>(weak_from_this());
487  should_enable_surface_control_ = settings.enable_surface_control;
488  should_batch_cmd_buffers_ = !workarounds_.batch_submit_command_buffer_timeout;
489  is_valid_ = true;
490 
491  // Create the GPU Tracer later because it depends on state from
492  // the ContextVK.
493  gpu_tracer_ = std::make_shared<GPUTracerVK>(weak_from_this(),
494  settings.enable_gpu_tracing);
495  gpu_tracer_->InitializeQueryPool(*this);
496 
497  //----------------------------------------------------------------------------
498  /// Label all the relevant objects. This happens after setup so that the
499  /// debug messengers have had a chance to be set up.
500  ///
501  SetDebugName(GetDevice(), device_holder_->device.get(), "ImpellerDevice");
502 }
503 
505  CapabilitiesVK::Cast(*device_capabilities_).SetOffscreenFormat(pixel_format);
506 }
507 
508 // |Context|
509 std::string ContextVK::DescribeGpuModel() const {
510  return device_name_;
511 }
512 
513 bool ContextVK::IsValid() const {
514  return is_valid_;
515 }
516 
517 std::shared_ptr<Allocator> ContextVK::GetResourceAllocator() const {
518  return allocator_;
519 }
520 
521 std::shared_ptr<ShaderLibrary> ContextVK::GetShaderLibrary() const {
522  return shader_library_;
523 }
524 
525 std::shared_ptr<SamplerLibrary> ContextVK::GetSamplerLibrary() const {
526  return sampler_library_;
527 }
528 
529 std::shared_ptr<PipelineLibrary> ContextVK::GetPipelineLibrary() const {
530  return pipeline_library_;
531 }
532 
533 std::shared_ptr<CommandBuffer> ContextVK::CreateCommandBuffer() const {
534  const auto& recycler = GetCommandPoolRecycler();
535  auto tls_pool = recycler->Get();
536  if (!tls_pool) {
537  return nullptr;
538  }
539 
540  // look up a cached descriptor pool for the current frame and reuse it
541  // if it exists, otherwise create a new pool.
542  std::shared_ptr<DescriptorPoolVK> descriptor_pool;
543  {
544  Lock lock(desc_pool_mutex_);
545  DescriptorPoolMap::iterator current_pool =
546  cached_descriptor_pool_.find(std::this_thread::get_id());
547  if (current_pool == cached_descriptor_pool_.end()) {
548  descriptor_pool =
549  (cached_descriptor_pool_[std::this_thread::get_id()] =
550  std::make_shared<DescriptorPoolVK>(weak_from_this()));
551  } else {
552  descriptor_pool = current_pool->second;
553  }
554  }
555 
556  auto tracked_objects = std::make_shared<TrackedObjectsVK>(
557  weak_from_this(), std::move(tls_pool), std::move(descriptor_pool),
558  GetGPUTracer()->CreateGPUProbe());
559  auto queue = GetGraphicsQueue();
560 
561  if (!tracked_objects || !tracked_objects->IsValid() || !queue) {
562  return nullptr;
563  }
564 
565  vk::CommandBufferBeginInfo begin_info;
566  begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
567  if (tracked_objects->GetCommandBuffer().begin(begin_info) !=
568  vk::Result::eSuccess) {
569  VALIDATION_LOG << "Could not begin command buffer.";
570  return nullptr;
571  }
572 
573  tracked_objects->GetGPUProbe().RecordCmdBufferStart(
574  tracked_objects->GetCommandBuffer());
575 
576  return std::shared_ptr<CommandBufferVK>(new CommandBufferVK(
577  shared_from_this(), //
578  GetDeviceHolder(), //
579  std::move(tracked_objects) //
580  ));
581 }
582 
583 vk::Instance ContextVK::GetInstance() const {
584  return *device_holder_->instance;
585 }
586 
587 const vk::Device& ContextVK::GetDevice() const {
588  return device_holder_->device.get();
589 }
590 
591 const std::shared_ptr<fml::ConcurrentTaskRunner>
593  return raster_message_loop_->GetTaskRunner();
594 }
595 
597  // There are multiple objects, for example |CommandPoolVK|, that in their
598  // destructors make a strong reference to |ContextVK|. Resetting these shared
599  // pointers ensures that cleanup happens in a correct order.
600  //
601  // tl;dr: Without it, we get thread::join failures on shutdown.
602  fence_waiter_.reset();
603  resource_manager_.reset();
604 
605  raster_message_loop_->Terminate();
606 }
607 
608 std::shared_ptr<SurfaceContextVK> ContextVK::CreateSurfaceContext() {
609  return std::make_shared<SurfaceContextVK>(shared_from_this());
610 }
611 
612 const std::shared_ptr<const Capabilities>& ContextVK::GetCapabilities() const {
613  return device_capabilities_;
614 }
615 
616 const std::shared_ptr<QueueVK>& ContextVK::GetGraphicsQueue() const {
617  return queues_.graphics_queue;
618 }
619 
620 vk::PhysicalDevice ContextVK::GetPhysicalDevice() const {
621  return device_holder_->physical_device;
622 }
623 
624 std::shared_ptr<FenceWaiterVK> ContextVK::GetFenceWaiter() const {
625  return fence_waiter_;
626 }
627 
628 std::shared_ptr<ResourceManagerVK> ContextVK::GetResourceManager() const {
629  return resource_manager_;
630 }
631 
632 std::shared_ptr<CommandPoolRecyclerVK> ContextVK::GetCommandPoolRecycler()
633  const {
634  return command_pool_recycler_;
635 }
636 
637 std::shared_ptr<GPUTracerVK> ContextVK::GetGPUTracer() const {
638  return gpu_tracer_;
639 }
640 
641 std::shared_ptr<DescriptorPoolRecyclerVK> ContextVK::GetDescriptorPoolRecycler()
642  const {
643  return descriptor_pool_recycler_;
644 }
645 
646 std::shared_ptr<CommandQueue> ContextVK::GetCommandQueue() const {
647  return command_queue_vk_;
648 }
649 
651  std::shared_ptr<CommandBuffer> command_buffer) {
652  if (should_batch_cmd_buffers_) {
653  pending_command_buffers_.push_back(std::move(command_buffer));
654  return true;
655  } else {
656  return GetCommandQueue()->Submit({command_buffer}).ok();
657  }
658 }
659 
661  if (pending_command_buffers_.empty()) {
662  return true;
663  }
664 
665  if (should_batch_cmd_buffers_) {
666  bool result = GetCommandQueue()->Submit(pending_command_buffers_).ok();
667  pending_command_buffers_.clear();
668  return result;
669  } else {
670  return true;
671  }
672 }
673 
674 // Creating a render pass is observed to take an additional 6ms on a Pixel 7
675 // device as the driver will lazily bootstrap and compile shaders to do so.
676 // The render pass does not need to be begun or executed.
679  RenderTarget render_target =
680  rt_allocator.CreateOffscreenMSAA(*this, {1, 1}, 1);
681 
682  RenderPassBuilderVK builder;
683 
684  render_target.IterateAllColorAttachments(
685  [&builder](size_t index, const ColorAttachment& attachment) -> bool {
686  builder.SetColorAttachment(
687  index, //
688  attachment.texture->GetTextureDescriptor().format, //
689  attachment.texture->GetTextureDescriptor().sample_count, //
690  attachment.load_action, //
691  attachment.store_action //
692  );
693  return true;
694  });
695 
696  if (auto depth = render_target.GetDepthAttachment(); depth.has_value()) {
698  depth->texture->GetTextureDescriptor().format, //
699  depth->texture->GetTextureDescriptor().sample_count, //
700  depth->load_action, //
701  depth->store_action //
702  );
703  } else if (auto stencil = render_target.GetStencilAttachment();
704  stencil.has_value()) {
705  builder.SetStencilAttachment(
706  stencil->texture->GetTextureDescriptor().format, //
707  stencil->texture->GetTextureDescriptor().sample_count, //
708  stencil->load_action, //
709  stencil->store_action //
710  );
711  }
712 
713  auto pass = builder.Build(GetDevice());
714 }
715 
717  {
718  Lock lock(desc_pool_mutex_);
719  cached_descriptor_pool_.erase(std::this_thread::get_id());
720  }
721  command_pool_recycler_->Dispose();
722 }
723 
724 const std::shared_ptr<YUVConversionLibraryVK>&
726  return yuv_conversion_library_;
727 }
728 
729 const std::unique_ptr<DriverInfoVK>& ContextVK::GetDriverInfo() const {
730  return driver_info_;
731 }
732 
734  return should_enable_surface_control_;
735 }
736 
739 }
740 
741 bool ContextVK::SubmitOnscreen(std::shared_ptr<CommandBuffer> cmd_buffer) {
742  return EnqueueCommandBuffer(std::move(cmd_buffer));
743 }
744 
746  return workarounds_;
747 }
748 
749 } // namespace impeller
static CapabilitiesVK & Cast(Capabilities &base)
Definition: backend_cast.h:13
The Vulkan layers and extensions wrangler.
void SetOffscreenFormat(PixelFormat pixel_format) const
std::optional< PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
static void DestroyThreadLocalPools(const ContextVK *context)
Clean up resources held by all per-thread command pools associated with the given context.
void SetOffscreenFormat(PixelFormat pixel_format)
Definition: context_vk.cc:504
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
Definition: context_vk.cc:517
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
Definition: context_vk.cc:628
vk::PhysicalDevice GetPhysicalDevice() const
Definition: context_vk.cc:620
const std::shared_ptr< YUVConversionLibraryVK > & GetYUVConversionLibrary() const
Definition: context_vk.cc:725
bool SetDebugName(T handle, std::string_view label) const
Definition: context_vk.h:150
bool EnqueueCommandBuffer(std::shared_ptr< CommandBuffer > command_buffer) override
Enqueue command_buffer for submission by the end of the frame.
Definition: context_vk.cc:650
const vk::Device & GetDevice() const
Definition: context_vk.cc:587
bool FlushCommandBuffers() override
Flush all pending command buffers.
Definition: context_vk.cc:660
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
Definition: context_vk.cc:513
const std::unique_ptr< DriverInfoVK > & GetDriverInfo() const
Definition: context_vk.cc:729
void DisposeThreadLocalCachedResources() override
Definition: context_vk.cc:716
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
Definition: context_vk.cc:533
virtual bool SubmitOnscreen(std::shared_ptr< CommandBuffer > cmd_buffer) override
Submit the command buffer that renders to the onscreen surface.
Definition: context_vk.cc:741
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
Definition: context_vk.cc:525
static std::shared_ptr< ContextVK > Create(Settings settings)
Definition: context_vk.cc:105
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
Definition: context_vk.cc:529
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
Definition: context_vk.cc:616
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
Definition: context_vk.cc:612
RuntimeStageBackend GetRuntimeStageBackend() const override
Retrieve the runtime stage for this context type.
Definition: context_vk.cc:737
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
Definition: context_vk.cc:632
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
Definition: context_vk.cc:646
void InitializeCommonlyUsedShadersIfNeeded() const override
Definition: context_vk.cc:677
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
Definition: context_vk.cc:624
bool GetShouldEnableSurfaceControlSwapchain() const
Whether the Android Surface control based swapchain should be enabled.
Definition: context_vk.cc:733
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
Definition: context_vk.cc:637
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
Definition: context_vk.cc:139
~ContextVK() override
Definition: context_vk.cc:132
std::string DescribeGpuModel() const override
Definition: context_vk.cc:509
const WorkaroundsVK & GetWorkarounds() const
Definition: context_vk.cc:745
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
Definition: context_vk.cc:592
static size_t ChooseThreadCountForWorkers(size_t hardware_concurrency)
Definition: context_vk.cc:115
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
Definition: context_vk.cc:521
vk::Instance GetInstance() const
Definition: context_vk.cc:583
std::shared_ptr< DeviceHolderVK > GetDeviceHolder() const
Definition: context_vk.h:190
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
Definition: context_vk.cc:596
std::shared_ptr< DescriptorPoolRecyclerVK > GetDescriptorPoolRecycler() const
Definition: context_vk.cc:641
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
Definition: context_vk.cc:608
RenderPassBuilderVK & SetDepthStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
RenderPassBuilderVK & SetStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
RenderPassBuilderVK & SetColorAttachment(size_t index, PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action, vk::ImageLayout current_layout=vk::ImageLayout::eUndefined)
vk::UniqueRenderPass Build(const vk::Device &device) const
a wrapper around the impeller [Allocator] instance that can be used to provide caching of allocated r...
virtual RenderTarget CreateOffscreenMSAA(const Context &context, ISize size, int mip_count, std::string_view label="Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config=RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional< RenderTarget::AttachmentConfig > stencil_attachment_config=RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr< Texture > &existing_color_msaa_texture=nullptr, const std::shared_ptr< Texture > &existing_color_resolve_texture=nullptr, const std::shared_ptr< Texture > &existing_depth_stencil_texture=nullptr)
bool IterateAllColorAttachments(const std::function< bool(size_t index, const ColorAttachment &attachment)> &iterator) const
const std::optional< DepthAttachment > & GetDepthAttachment() const
const std::optional< StencilAttachment > & GetStencilAttachment() const
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
int32_t value
ScopedObject< Object > Create(CtorArgs &&... args)
Definition: object.h:160
bool HasValidationLayers()
Definition: context_vk.cc:53
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
Definition: context_vk.cc:91
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
Definition: context_vk.cc:57
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition: formats.h:99
static bool gHasValidationLayers
Definition: context_vk.cc:51
WorkaroundsVK GetWorkaroundsFromDriverInfo(DriverInfoVK &driver_info)
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
Definition: context_vk.cc:68
LoadAction load_action
Definition: formats.h:659
std::shared_ptr< Texture > texture
Definition: formats.h:657
StoreAction store_action
Definition: formats.h:660
static QueuesVK FromEmbedderQueue(vk::Queue queue, uint32_t queue_family_index)
Definition: queue_vk.cc:58
static QueuesVK FromQueueIndices(const vk::Device &device, QueueIndexVK graphics, QueueIndexVK compute, QueueIndexVK transfer)
Definition: queue_vk.cc:67
std::shared_ptr< QueueVK > graphics_queue
Definition: queue_vk.h:64
A non-exhaustive set of driver specific workarounds.
#define VALIDATION_LOG
Definition: validation.h:91