Flutter Impeller
context_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 #include "fml/concurrent_message_loop.h"
11 
12 #ifdef FML_OS_ANDROID
13 #include <pthread.h>
14 #include <sys/resource.h>
15 #include <sys/time.h>
16 #endif // FML_OS_ANDROID
17 
18 #include <map>
19 #include <memory>
20 #include <optional>
21 #include <string>
22 #include <vector>
23 
24 #include "flutter/fml/cpu_affinity.h"
25 #include "flutter/fml/trace_event.h"
40 
41 VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
42 
43 namespace impeller {
44 
45 // TODO(csg): Fix this after caps are reworked.
46 static bool gHasValidationLayers = false;
47 
49  return gHasValidationLayers;
50 }
51 
52 static std::optional<vk::PhysicalDevice> PickPhysicalDevice(
53  const CapabilitiesVK& caps,
54  const vk::Instance& instance) {
55  for (const auto& device : instance.enumeratePhysicalDevices().value) {
56  if (caps.GetEnabledDeviceFeatures(device).has_value()) {
57  return device;
58  }
59  }
60  return std::nullopt;
61 }
62 
63 static std::vector<vk::DeviceQueueCreateInfo> GetQueueCreateInfos(
64  std::initializer_list<QueueIndexVK> queues) {
65  std::map<size_t /* family */, size_t /* index */> family_index_map;
66  for (const auto& queue : queues) {
67  family_index_map[queue.family] = 0;
68  }
69  for (const auto& queue : queues) {
70  auto value = family_index_map[queue.family];
71  family_index_map[queue.family] = std::max(value, queue.index);
72  }
73 
74  static float kQueuePriority = 1.0f;
75  std::vector<vk::DeviceQueueCreateInfo> infos;
76  for (const auto& item : family_index_map) {
77  vk::DeviceQueueCreateInfo info;
78  info.setQueueFamilyIndex(item.first);
79  info.setQueueCount(item.second + 1);
80  info.setQueuePriorities(kQueuePriority);
81  infos.push_back(info);
82  }
83  return infos;
84 }
85 
86 static std::optional<QueueIndexVK> PickQueue(const vk::PhysicalDevice& device,
87  vk::QueueFlagBits flags) {
88  // This can be modified to ensure that dedicated queues are returned for each
89  // queue type depending on support.
90  const auto families = device.getQueueFamilyProperties();
91  for (size_t i = 0u; i < families.size(); i++) {
92  if (!(families[i].queueFlags & flags)) {
93  continue;
94  }
95  return QueueIndexVK{.family = i, .index = 0};
96  }
97  return std::nullopt;
98 }
99 
100 std::shared_ptr<ContextVK> ContextVK::Create(Settings settings) {
101  auto context = std::shared_ptr<ContextVK>(new ContextVK());
102  context->Setup(std::move(settings));
103  if (!context->IsValid()) {
104  return nullptr;
105  }
106  return context;
107 }
108 
109 // static
110 size_t ContextVK::ChooseThreadCountForWorkers(size_t hardware_concurrency) {
111  // Never create more than 4 worker threads. Attempt to use up to
112  // half of the available concurrency.
113  return std::clamp(hardware_concurrency / 2ull, /*lo=*/1ull, /*hi=*/4ull);
114 }
115 
116 namespace {
117 thread_local uint64_t tls_context_count = 0;
118 uint64_t CalculateHash(void* ptr) {
119  // You could make a context once per nanosecond for 584 years on one thread
120  // before this overflows.
121  return ++tls_context_count;
122 }
123 } // namespace
124 
125 ContextVK::ContextVK() : hash_(CalculateHash(this)) {}
126 
128  if (device_holder_ && device_holder_->device) {
129  [[maybe_unused]] auto result = device_holder_->device->waitIdle();
130  }
132 }
133 
136 }
137 
138 void ContextVK::Setup(Settings settings) {
139  TRACE_EVENT0("impeller", "ContextVK::Setup");
140 
141  if (!settings.proc_address_callback) {
142  return;
143  }
144 
145  raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
146  ChooseThreadCountForWorkers(std::thread::hardware_concurrency()));
147  raster_message_loop_->PostTaskToAllWorkers([]() {
148  // Currently we only use the worker task pool for small parts of a frame
149  // workload, if this changes this setting may need to be adjusted.
150  fml::RequestAffinity(fml::CpuAffinity::kNotPerformance);
151 #ifdef FML_OS_ANDROID
152  if (::setpriority(PRIO_PROCESS, gettid(), -5) != 0) {
153  FML_LOG(ERROR) << "Failed to set Workers task runner priority";
154  }
155 #endif // FML_OS_ANDROID
156  });
157 
158  auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
159  dispatcher.init(settings.proc_address_callback);
160 
161  // Enable Vulkan validation if either:
162  // 1. The user has explicitly enabled it.
163  // 2. We are in a combination of debug mode, and running on Android.
164  // (It's possible 2 is overly conservative and we can simplify this)
165  auto enable_validation = settings.enable_validation;
166 
167 #if defined(FML_OS_ANDROID) && !defined(NDEBUG)
168  enable_validation = true;
169 #endif
170 
171  auto caps = std::shared_ptr<CapabilitiesVK>(new CapabilitiesVK(
172  enable_validation, settings.fatal_missing_validations));
173 
174  if (!caps->IsValid()) {
175  VALIDATION_LOG << "Could not determine device capabilities.";
176  return;
177  }
178 
179  gHasValidationLayers = caps->AreValidationsEnabled();
180 
181  auto enabled_layers = caps->GetEnabledLayers();
182  auto enabled_extensions = caps->GetEnabledInstanceExtensions();
183 
184  if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
185  VALIDATION_LOG << "Device has insufficient capabilities.";
186  return;
187  }
188 
189  vk::InstanceCreateFlags instance_flags = {};
190 
191  if (std::find(enabled_extensions.value().begin(),
192  enabled_extensions.value().end(),
193  "VK_KHR_portability_enumeration") !=
194  enabled_extensions.value().end()) {
195  instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
196  }
197 
198  std::vector<const char*> enabled_layers_c;
199  std::vector<const char*> enabled_extensions_c;
200 
201  for (const auto& layer : enabled_layers.value()) {
202  enabled_layers_c.push_back(layer.c_str());
203  }
204 
205  for (const auto& ext : enabled_extensions.value()) {
206  enabled_extensions_c.push_back(ext.c_str());
207  }
208 
209  vk::ApplicationInfo application_info;
210  application_info.setApplicationVersion(VK_API_VERSION_1_0);
211  application_info.setApiVersion(VK_API_VERSION_1_1);
212  application_info.setEngineVersion(VK_API_VERSION_1_0);
213  application_info.setPEngineName("Impeller");
214  application_info.setPApplicationName("Impeller");
215 
216  vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
217  instance_chain;
218 
219  if (!caps->AreValidationsEnabled()) {
220  instance_chain.unlink<vk::ValidationFeaturesEXT>();
221  }
222 
223  std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
224  vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
225  };
226 
227  auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
228  validation.setEnabledValidationFeatures(enabled_validations);
229 
230  auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
231  instance_info.setPEnabledLayerNames(enabled_layers_c);
232  instance_info.setPEnabledExtensionNames(enabled_extensions_c);
233  instance_info.setPApplicationInfo(&application_info);
234  instance_info.setFlags(instance_flags);
235 
236  auto device_holder = std::make_shared<DeviceHolderImpl>();
237  {
238  auto instance = vk::createInstanceUnique(instance_info);
239  if (instance.result != vk::Result::eSuccess) {
240  VALIDATION_LOG << "Could not create Vulkan instance: "
241  << vk::to_string(instance.result);
242  return;
243  }
244  device_holder->instance = std::move(instance.value);
245  }
246  dispatcher.init(device_holder->instance.get());
247 
248  //----------------------------------------------------------------------------
249  /// Setup the debug report.
250  ///
251  /// Do this as early as possible since we could use the debug report from
252  /// initialization issues.
253  ///
254  auto debug_report =
255  std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
256 
257  if (!debug_report->IsValid()) {
258  VALIDATION_LOG << "Could not set up debug report.";
259  return;
260  }
261 
262  //----------------------------------------------------------------------------
263  /// Pick the physical device.
264  ///
265  {
266  auto physical_device =
267  PickPhysicalDevice(*caps, device_holder->instance.get());
268  if (!physical_device.has_value()) {
269  VALIDATION_LOG << "No valid Vulkan device found.";
270  return;
271  }
272  device_holder->physical_device = physical_device.value();
273  }
274 
275  //----------------------------------------------------------------------------
276  /// Pick device queues.
277  ///
278  auto graphics_queue =
279  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
280  auto transfer_queue =
281  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
282  auto compute_queue =
283  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
284 
285  if (!graphics_queue.has_value()) {
286  VALIDATION_LOG << "Could not pick graphics queue.";
287  return;
288  }
289  if (!transfer_queue.has_value()) {
290  transfer_queue = graphics_queue.value();
291  }
292  if (!compute_queue.has_value()) {
293  VALIDATION_LOG << "Could not pick compute queue.";
294  return;
295  }
296 
297  //----------------------------------------------------------------------------
298  /// Create the logical device.
299  ///
300  auto enabled_device_extensions =
301  caps->GetEnabledDeviceExtensions(device_holder->physical_device);
302  if (!enabled_device_extensions.has_value()) {
303  // This shouldn't happen since we already did device selection. But
304  // doesn't hurt to check again.
305  return;
306  }
307 
308  std::vector<const char*> enabled_device_extensions_c;
309  for (const auto& ext : enabled_device_extensions.value()) {
310  enabled_device_extensions_c.push_back(ext.c_str());
311  }
312 
313  const auto queue_create_infos = GetQueueCreateInfos(
314  {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
315 
316  const auto enabled_features =
317  caps->GetEnabledDeviceFeatures(device_holder->physical_device);
318  if (!enabled_features.has_value()) {
319  // This shouldn't happen since the device can't be picked if this was not
320  // true. But doesn't hurt to check.
321  return;
322  }
323 
324  vk::DeviceCreateInfo device_info;
325 
326  device_info.setPNext(&enabled_features.value().get());
327  device_info.setQueueCreateInfos(queue_create_infos);
328  device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
329  // Device layers are deprecated and ignored.
330 
331  {
332  auto device_result =
333  device_holder->physical_device.createDeviceUnique(device_info);
334  if (device_result.result != vk::Result::eSuccess) {
335  VALIDATION_LOG << "Could not create logical device.";
336  return;
337  }
338  device_holder->device = std::move(device_result.value);
339  }
340 
341  if (!caps->SetPhysicalDevice(device_holder->physical_device)) {
342  VALIDATION_LOG << "Capabilities could not be updated.";
343  return;
344  }
345 
346  //----------------------------------------------------------------------------
347  /// Create the allocator.
348  ///
349  auto allocator = std::shared_ptr<AllocatorVK>(new AllocatorVK(
350  weak_from_this(), //
351  application_info.apiVersion, //
352  device_holder->physical_device, //
353  device_holder, //
354  device_holder->instance.get(), //
355  *caps //
356  ));
357 
358  if (!allocator->IsValid()) {
359  VALIDATION_LOG << "Could not create memory allocator.";
360  return;
361  }
362 
363  //----------------------------------------------------------------------------
364  /// Setup the pipeline library.
365  ///
366  auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
367  new PipelineLibraryVK(device_holder, //
368  caps, //
369  std::move(settings.cache_directory), //
370  raster_message_loop_->GetTaskRunner() //
371  ));
372 
373  if (!pipeline_library->IsValid()) {
374  VALIDATION_LOG << "Could not create pipeline library.";
375  return;
376  }
377 
378  auto sampler_library =
379  std::shared_ptr<SamplerLibraryVK>(new SamplerLibraryVK(device_holder));
380 
381  auto shader_library = std::shared_ptr<ShaderLibraryVK>(
382  new ShaderLibraryVK(device_holder, //
383  settings.shader_libraries_data) //
384  );
385 
386  if (!shader_library->IsValid()) {
387  VALIDATION_LOG << "Could not create shader library.";
388  return;
389  }
390 
391  //----------------------------------------------------------------------------
392  /// Create the fence waiter.
393  ///
394  auto fence_waiter =
395  std::shared_ptr<FenceWaiterVK>(new FenceWaiterVK(device_holder));
396 
397  //----------------------------------------------------------------------------
398  /// Create the resource manager and command pool recycler.
399  ///
400  auto resource_manager = ResourceManagerVK::Create();
401  if (!resource_manager) {
402  VALIDATION_LOG << "Could not create resource manager.";
403  return;
404  }
405 
406  auto command_pool_recycler =
407  std::make_shared<CommandPoolRecyclerVK>(weak_from_this());
408  if (!command_pool_recycler) {
409  VALIDATION_LOG << "Could not create command pool recycler.";
410  return;
411  }
412 
413  auto descriptor_pool_recycler =
414  std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
415  if (!descriptor_pool_recycler) {
416  VALIDATION_LOG << "Could not create descriptor pool recycler.";
417  return;
418  }
419 
420  //----------------------------------------------------------------------------
421  /// Fetch the queues.
422  ///
423  QueuesVK queues(device_holder->device.get(), //
424  graphics_queue.value(), //
425  compute_queue.value(), //
426  transfer_queue.value() //
427  );
428  if (!queues.IsValid()) {
429  VALIDATION_LOG << "Could not fetch device queues.";
430  return;
431  }
432 
433  VkPhysicalDeviceProperties physical_device_properties;
434  dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
435  &physical_device_properties);
436 
437  //----------------------------------------------------------------------------
438  /// All done!
439  ///
440  device_holder_ = std::move(device_holder);
441  driver_info_ =
442  std::make_unique<DriverInfoVK>(device_holder_->physical_device);
443  debug_report_ = std::move(debug_report);
444  allocator_ = std::move(allocator);
445  shader_library_ = std::move(shader_library);
446  sampler_library_ = std::move(sampler_library);
447  pipeline_library_ = std::move(pipeline_library);
448  yuv_conversion_library_ = std::shared_ptr<YUVConversionLibraryVK>(
449  new YUVConversionLibraryVK(device_holder_));
450  queues_ = std::move(queues);
451  device_capabilities_ = std::move(caps);
452  fence_waiter_ = std::move(fence_waiter);
453  resource_manager_ = std::move(resource_manager);
454  command_pool_recycler_ = std::move(command_pool_recycler);
455  descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
456  device_name_ = std::string(physical_device_properties.deviceName);
457  command_queue_vk_ = std::make_shared<CommandQueueVK>(weak_from_this());
458  is_valid_ = true;
459 
460  // Create the GPU Tracer later because it depends on state from
461  // the ContextVK.
462  gpu_tracer_ = std::make_shared<GPUTracerVK>(weak_from_this(),
463  settings.enable_gpu_tracing);
464  gpu_tracer_->InitializeQueryPool(*this);
465 
466  //----------------------------------------------------------------------------
467  /// Label all the relevant objects. This happens after setup so that the
468  /// debug messengers have had a chance to be set up.
469  ///
470  SetDebugName(GetDevice(), device_holder_->device.get(), "ImpellerDevice");
471 }
472 
474  CapabilitiesVK::Cast(*device_capabilities_).SetOffscreenFormat(pixel_format);
475 }
476 
477 // |Context|
478 std::string ContextVK::DescribeGpuModel() const {
479  return device_name_;
480 }
481 
482 bool ContextVK::IsValid() const {
483  return is_valid_;
484 }
485 
486 std::shared_ptr<Allocator> ContextVK::GetResourceAllocator() const {
487  return allocator_;
488 }
489 
490 std::shared_ptr<ShaderLibrary> ContextVK::GetShaderLibrary() const {
491  return shader_library_;
492 }
493 
494 std::shared_ptr<SamplerLibrary> ContextVK::GetSamplerLibrary() const {
495  return sampler_library_;
496 }
497 
498 std::shared_ptr<PipelineLibrary> ContextVK::GetPipelineLibrary() const {
499  return pipeline_library_;
500 }
501 
502 std::shared_ptr<CommandBuffer> ContextVK::CreateCommandBuffer() const {
503  return std::shared_ptr<CommandBufferVK>(
504  new CommandBufferVK(shared_from_this(), //
505  CreateGraphicsCommandEncoderFactory()) //
506  );
507 }
508 
509 vk::Instance ContextVK::GetInstance() const {
510  return *device_holder_->instance;
511 }
512 
513 const vk::Device& ContextVK::GetDevice() const {
514  return device_holder_->device.get();
515 }
516 
517 const std::shared_ptr<fml::ConcurrentTaskRunner>
519  return raster_message_loop_->GetTaskRunner();
520 }
521 
523  // There are multiple objects, for example |CommandPoolVK|, that in their
524  // destructors make a strong reference to |ContextVK|. Resetting these shared
525  // pointers ensures that cleanup happens in a correct order.
526  //
527  // tl;dr: Without it, we get thread::join failures on shutdown.
528  fence_waiter_.reset();
529  resource_manager_.reset();
530 
531  raster_message_loop_->Terminate();
532 }
533 
534 std::shared_ptr<SurfaceContextVK> ContextVK::CreateSurfaceContext() {
535  return std::make_shared<SurfaceContextVK>(shared_from_this());
536 }
537 
538 const std::shared_ptr<const Capabilities>& ContextVK::GetCapabilities() const {
539  return device_capabilities_;
540 }
541 
542 const std::shared_ptr<QueueVK>& ContextVK::GetGraphicsQueue() const {
543  return queues_.graphics_queue;
544 }
545 
546 vk::PhysicalDevice ContextVK::GetPhysicalDevice() const {
547  return device_holder_->physical_device;
548 }
549 
550 std::shared_ptr<FenceWaiterVK> ContextVK::GetFenceWaiter() const {
551  return fence_waiter_;
552 }
553 
554 std::shared_ptr<ResourceManagerVK> ContextVK::GetResourceManager() const {
555  return resource_manager_;
556 }
557 
558 std::shared_ptr<CommandPoolRecyclerVK> ContextVK::GetCommandPoolRecycler()
559  const {
560  return command_pool_recycler_;
561 }
562 
563 std::unique_ptr<CommandEncoderFactoryVK>
564 ContextVK::CreateGraphicsCommandEncoderFactory() const {
565  return std::make_unique<CommandEncoderFactoryVK>(weak_from_this());
566 }
567 
568 std::shared_ptr<GPUTracerVK> ContextVK::GetGPUTracer() const {
569  return gpu_tracer_;
570 }
571 
572 std::shared_ptr<DescriptorPoolRecyclerVK> ContextVK::GetDescriptorPoolRecycler()
573  const {
574  return descriptor_pool_recycler_;
575 }
576 
577 std::shared_ptr<CommandQueue> ContextVK::GetCommandQueue() const {
578  return command_queue_vk_;
579 }
580 
581 // Creating a render pass is observed to take an additional 6ms on a Pixel 7
582 // device as the driver will lazily bootstrap and compile shaders to do so.
583 // The render pass does not need to be begun or executed.
586  RenderTarget render_target =
587  rt_allocator.CreateOffscreenMSAA(*this, {1, 1}, 1);
588 
589  RenderPassBuilderVK builder;
590  for (const auto& [bind_point, color] : render_target.GetColorAttachments()) {
591  builder.SetColorAttachment(
592  bind_point, //
593  color.texture->GetTextureDescriptor().format, //
594  color.texture->GetTextureDescriptor().sample_count, //
595  color.load_action, //
596  color.store_action //
597  );
598  }
599 
600  if (auto depth = render_target.GetDepthAttachment(); depth.has_value()) {
602  depth->texture->GetTextureDescriptor().format, //
603  depth->texture->GetTextureDescriptor().sample_count, //
604  depth->load_action, //
605  depth->store_action //
606  );
607  } else if (auto stencil = render_target.GetStencilAttachment();
608  stencil.has_value()) {
609  builder.SetStencilAttachment(
610  stencil->texture->GetTextureDescriptor().format, //
611  stencil->texture->GetTextureDescriptor().sample_count, //
612  stencil->load_action, //
613  stencil->store_action //
614  );
615  }
616 
617  auto pass = builder.Build(GetDevice());
618 }
619 
620 const std::shared_ptr<YUVConversionLibraryVK>&
622  return yuv_conversion_library_;
623 }
624 
625 const std::unique_ptr<DriverInfoVK>& ContextVK::GetDriverInfo() const {
626  return driver_info_;
627 }
628 
629 } // namespace impeller
impeller::ContextVK::GetCapabilities
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
Definition: context_vk.cc:538
impeller::ContextVK::GetConcurrentWorkerTaskRunner
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
Definition: context_vk.cc:518
fence_waiter_vk.h
impeller::CapabilitiesVK::SetOffscreenFormat
void SetOffscreenFormat(PixelFormat pixel_format) const
Definition: capabilities_vk.cc:433
gpu_tracer_vk.h
impeller::ResourceManagerVK::Create
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
Definition: resource_manager_vk.cc:14
impeller::QueueIndexVK
Definition: queue_vk.h:15
allocator_vk.h
impeller::gHasValidationLayers
static bool gHasValidationLayers
Definition: context_vk.cc:46
impeller::CommandPoolRecyclerVK::DestroyThreadLocalPools
static void DestroyThreadLocalPools(const ContextVK *context)
Clean up resources held by all per-thread command pools associated with the given context.
Definition: command_pool_vk.cc:285
impeller::ContextVK::GetCommandQueue
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
Definition: context_vk.cc:577
impeller::Context::BackendType
BackendType
Definition: context.h:47
impeller::ContextVK::IsValid
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
Definition: context_vk.cc:482
impeller::ContextVK::GetInstance
vk::Instance GetInstance() const
Definition: context_vk.cc:509
impeller::PickPhysicalDevice
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
Definition: context_vk.cc:52
impeller::RenderPassBuilderVK::SetStencilAttachment
RenderPassBuilderVK & SetStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
Definition: render_pass_builder_vk.cc:74
command_encoder_vk.h
impeller::ContextVK::GetPhysicalDevice
vk::PhysicalDevice GetPhysicalDevice() const
Definition: context_vk.cc:546
impeller::ContextVK::GetResourceAllocator
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
Definition: context_vk.cc:486
impeller::ContextVK::GetBackendType
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
Definition: context_vk.cc:134
impeller::ContextVK::CreateCommandBuffer
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
Definition: context_vk.cc:502
yuv_conversion_library_vk.h
impeller::RenderPassBuilderVK
Definition: render_pass_builder_vk.h:17
surface_context_vk.h
impeller::RenderTarget::GetColorAttachments
const std::map< size_t, ColorAttachment > & GetColorAttachments() const
Definition: render_target.cc:198
validation.h
impeller::CapabilitiesVK::GetEnabledDeviceFeatures
std::optional< PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
Definition: capabilities_vk.cc:350
impeller::PixelFormat
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition: formats.h:99
impeller::GetQueueCreateInfos
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
Definition: context_vk.cc:63
capabilities_vk.h
impeller::RenderPassBuilderVK::SetDepthStencilAttachment
RenderPassBuilderVK & SetDepthStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
Definition: render_pass_builder_vk.cc:56
command_pool_vk.h
impeller::ContextVK::Settings
Definition: context_vk.h:46
command_buffer_vk.h
impeller::RenderTarget::GetDepthAttachment
const std::optional< DepthAttachment > & GetDepthAttachment() const
Definition: render_target.cc:203
impeller::ContextVK::CreateSurfaceContext
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
Definition: context_vk.cc:534
debug_report_vk.h
impeller::ContextVK::GetShaderLibrary
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
Definition: context_vk.cc:490
impeller::ContextVK::GetGraphicsQueue
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
Definition: context_vk.cc:542
impeller::ContextVK::GetDescriptorPoolRecycler
std::shared_ptr< DescriptorPoolRecyclerVK > GetDescriptorPoolRecycler() const
Definition: context_vk.cc:572
impeller::QueuesVK::graphics_queue
std::shared_ptr< QueueVK > graphics_queue
Definition: queue_vk.h:64
impeller::RenderPassBuilderVK::SetColorAttachment
RenderPassBuilderVK & SetColorAttachment(size_t index, PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
Definition: render_pass_builder_vk.cc:29
render_pass_builder_vk.h
capabilities.h
impeller::RenderTarget
Definition: render_target.h:38
impeller::ContextVK::Create
static std::shared_ptr< ContextVK > Create(Settings settings)
Definition: context_vk.cc:100
impeller::CapabilitiesVK
The Vulkan layers and extensions wrangler.
Definition: capabilities_vk.h:140
impeller::CommandBufferVK
Definition: command_buffer_vk.h:18
impeller::ContextVK::SetDebugName
bool SetDebugName(T handle, std::string_view label) const
Definition: context_vk.h:108
impeller::ContextVK::GetCommandPoolRecycler
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
Definition: context_vk.cc:558
impeller::RenderTargetAllocator::CreateOffscreenMSAA
virtual RenderTarget CreateOffscreenMSAA(const Context &context, ISize size, int mip_count, const std::string &label="Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config=RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional< RenderTarget::AttachmentConfig > stencil_attachment_config=RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr< Texture > &existing_color_msaa_texture=nullptr, const std::shared_ptr< Texture > &existing_color_resolve_texture=nullptr, const std::shared_ptr< Texture > &existing_depth_stencil_texture=nullptr)
Definition: render_target.cc:313
impeller::RenderTargetAllocator
a wrapper around the impeller [Allocator] instance that can be used to provide caching of allocated r...
Definition: render_target.h:142
impeller::ContextVK::GetGPUTracer
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
Definition: context_vk.cc:568
impeller::ContextVK
Definition: context_vk.h:42
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:73
impeller::ContextVK::InitializeCommonlyUsedShadersIfNeeded
void InitializeCommonlyUsedShadersIfNeeded() const override
Definition: context_vk.cc:584
impeller::Context::BackendType::kVulkan
@ kVulkan
resource_manager_vk.h
impeller::ContextVK::~ContextVK
~ContextVK() override
Definition: context_vk.cc:127
impeller::ContextVK::GetSamplerLibrary
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
Definition: context_vk.cc:494
impeller::ContextVK::GetDevice
const vk::Device & GetDevice() const
Definition: context_vk.cc:513
impeller::ContextVK::ChooseThreadCountForWorkers
static size_t ChooseThreadCountForWorkers(size_t hardware_concurrency)
Definition: context_vk.cc:110
impeller::BackendCast< CapabilitiesVK, Capabilities >::Cast
static CapabilitiesVK & Cast(Capabilities &base)
Definition: backend_cast.h:13
impeller::ContextVK::GetResourceManager
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
Definition: context_vk.cc:554
command_queue_vk.h
impeller::ContextVK::GetDriverInfo
const std::unique_ptr< DriverInfoVK > & GetDriverInfo() const
Definition: context_vk.cc:625
impeller::PickQueue
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
Definition: context_vk.cc:86
color
DlColor color
Definition: dl_golden_blur_unittests.cc:23
impeller::ContextVK::GetYUVConversionLibrary
const std::shared_ptr< YUVConversionLibraryVK > & GetYUVConversionLibrary() const
Definition: context_vk.cc:621
impeller::ContextVK::GetPipelineLibrary
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
Definition: context_vk.cc:498
impeller::ContextVK::DescribeGpuModel
std::string DescribeGpuModel() const override
Definition: context_vk.cc:478
impeller::ContextVK::GetFenceWaiter
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
Definition: context_vk.cc:550
impeller::RenderPassBuilderVK::Build
vk::UniqueRenderPass Build(const vk::Device &device) const
Definition: render_pass_builder_vk.cc:92
render_target.h
impeller::QueueIndexVK::family
size_t family
Definition: queue_vk.h:16
impeller::ContextVK::SetOffscreenFormat
void SetOffscreenFormat(PixelFormat pixel_format)
Definition: context_vk.cc:473
impeller::RenderTarget::GetStencilAttachment
const std::optional< StencilAttachment > & GetStencilAttachment() const
Definition: render_target.cc:207
context_vk.h
impeller::HasValidationLayers
bool HasValidationLayers()
Definition: context_vk.cc:48
impeller::ContextVK::Shutdown
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
Definition: context_vk.cc:522
impeller
Definition: aiks_blend_unittests.cc:18