Flutter Impeller
context_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 #include "fml/concurrent_message_loop.h"
8 
9 #ifdef FML_OS_ANDROID
10 #include <pthread.h>
11 #include <sys/resource.h>
12 #include <sys/time.h>
13 #endif // FML_OS_ANDROID
14 
15 #include <map>
16 #include <memory>
17 #include <optional>
18 #include <string>
19 #include <vector>
20 
21 #include "flutter/fml/cpu_affinity.h"
22 #include "flutter/fml/trace_event.h"
35 
36 VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
37 
38 namespace impeller {
39 
40 // TODO(csg): Fix this after caps are reworked.
41 static bool gHasValidationLayers = false;
42 
44  return gHasValidationLayers;
45 }
46 
47 static std::optional<vk::PhysicalDevice> PickPhysicalDevice(
48  const CapabilitiesVK& caps,
49  const vk::Instance& instance) {
50  for (const auto& device : instance.enumeratePhysicalDevices().value) {
51  if (caps.GetEnabledDeviceFeatures(device).has_value()) {
52  return device;
53  }
54  }
55  return std::nullopt;
56 }
57 
58 static std::vector<vk::DeviceQueueCreateInfo> GetQueueCreateInfos(
59  std::initializer_list<QueueIndexVK> queues) {
60  std::map<size_t /* family */, size_t /* index */> family_index_map;
61  for (const auto& queue : queues) {
62  family_index_map[queue.family] = 0;
63  }
64  for (const auto& queue : queues) {
65  auto value = family_index_map[queue.family];
66  family_index_map[queue.family] = std::max(value, queue.index);
67  }
68 
69  static float kQueuePriority = 1.0f;
70  std::vector<vk::DeviceQueueCreateInfo> infos;
71  for (const auto& item : family_index_map) {
72  vk::DeviceQueueCreateInfo info;
73  info.setQueueFamilyIndex(item.first);
74  info.setQueueCount(item.second + 1);
75  info.setQueuePriorities(kQueuePriority);
76  infos.push_back(info);
77  }
78  return infos;
79 }
80 
81 static std::optional<QueueIndexVK> PickQueue(const vk::PhysicalDevice& device,
82  vk::QueueFlagBits flags) {
83  // This can be modified to ensure that dedicated queues are returned for each
84  // queue type depending on support.
85  const auto families = device.getQueueFamilyProperties();
86  for (size_t i = 0u; i < families.size(); i++) {
87  if (!(families[i].queueFlags & flags)) {
88  continue;
89  }
90  return QueueIndexVK{.family = i, .index = 0};
91  }
92  return std::nullopt;
93 }
94 
95 std::shared_ptr<ContextVK> ContextVK::Create(Settings settings) {
96  auto context = std::shared_ptr<ContextVK>(new ContextVK());
97  context->Setup(std::move(settings));
98  if (!context->IsValid()) {
99  return nullptr;
100  }
101  return context;
102 }
103 
104 namespace {
105 thread_local uint64_t tls_context_count = 0;
106 uint64_t CalculateHash(void* ptr) {
107  // You could make a context once per nanosecond for 584 years on one thread
108  // before this overflows.
109  return ++tls_context_count;
110 }
111 } // namespace
112 
113 ContextVK::ContextVK() : hash_(CalculateHash(this)) {}
114 
116  if (device_holder_ && device_holder_->device) {
117  [[maybe_unused]] auto result = device_holder_->device->waitIdle();
118  }
120 }
121 
124 }
125 
126 void ContextVK::Setup(Settings settings) {
127  TRACE_EVENT0("impeller", "ContextVK::Setup");
128 
129  if (!settings.proc_address_callback) {
130  return;
131  }
132 
133  queue_submit_thread_ = std::make_unique<fml::Thread>("QueueSubmitThread");
134  queue_submit_thread_->GetTaskRunner()->PostTask([]() {
135  // submitKHR is extremely cheap and mostly blocks on an internal fence.
136  fml::RequestAffinity(fml::CpuAffinity::kEfficiency);
137  });
138 
139  raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
140  std::min(4u, std::thread::hardware_concurrency()));
141  raster_message_loop_->PostTaskToAllWorkers([]() {
142  // Currently we only use the worker task pool for small parts of a frame
143  // workload, if this changes this setting may need to be adjusted.
144  fml::RequestAffinity(fml::CpuAffinity::kNotPerformance);
145 #ifdef FML_OS_ANDROID
146  if (::setpriority(PRIO_PROCESS, gettid(), -5) != 0) {
147  FML_LOG(ERROR) << "Failed to set Workers task runner priority";
148  }
149 #endif // FML_OS_ANDROID
150  });
151 
152  auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
153  dispatcher.init(settings.proc_address_callback);
154 
155  // Enable Vulkan validation if either:
156  // 1. The user has explicitly enabled it.
157  // 2. We are in a combination of debug mode, and running on Android.
158  // (It's possible 2 is overly conservative and we can simplify this)
159  auto enable_validation = settings.enable_validation;
160 
161 #if defined(FML_OS_ANDROID) && !defined(NDEBUG)
162  enable_validation = true;
163 #endif
164 
165  auto caps =
166  std::shared_ptr<CapabilitiesVK>(new CapabilitiesVK(enable_validation));
167 
168  if (!caps->IsValid()) {
169  VALIDATION_LOG << "Could not determine device capabilities.";
170  return;
171  }
172 
173  gHasValidationLayers = caps->AreValidationsEnabled();
174 
175  auto enabled_layers = caps->GetEnabledLayers();
176  auto enabled_extensions = caps->GetEnabledInstanceExtensions();
177 
178  if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
179  VALIDATION_LOG << "Device has insufficient capabilities.";
180  return;
181  }
182 
183  vk::InstanceCreateFlags instance_flags = {};
184 
185  if (std::find(enabled_extensions.value().begin(),
186  enabled_extensions.value().end(),
187  "VK_KHR_portability_enumeration") !=
188  enabled_extensions.value().end()) {
189  instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
190  }
191 
192  std::vector<const char*> enabled_layers_c;
193  std::vector<const char*> enabled_extensions_c;
194 
195  for (const auto& layer : enabled_layers.value()) {
196  enabled_layers_c.push_back(layer.c_str());
197  }
198 
199  for (const auto& ext : enabled_extensions.value()) {
200  enabled_extensions_c.push_back(ext.c_str());
201  }
202 
203  vk::ApplicationInfo application_info;
204  application_info.setApplicationVersion(VK_API_VERSION_1_0);
205  application_info.setApiVersion(VK_API_VERSION_1_1);
206  application_info.setEngineVersion(VK_API_VERSION_1_0);
207  application_info.setPEngineName("Impeller");
208  application_info.setPApplicationName("Impeller");
209 
210  vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
211  instance_chain;
212 
213  if (!caps->AreValidationsEnabled()) {
214  instance_chain.unlink<vk::ValidationFeaturesEXT>();
215  }
216 
217  std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
218  vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
219  };
220 
221  auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
222  validation.setEnabledValidationFeatures(enabled_validations);
223 
224  auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
225  instance_info.setPEnabledLayerNames(enabled_layers_c);
226  instance_info.setPEnabledExtensionNames(enabled_extensions_c);
227  instance_info.setPApplicationInfo(&application_info);
228  instance_info.setFlags(instance_flags);
229 
230  auto device_holder = std::make_shared<DeviceHolderImpl>();
231  {
232  auto instance = vk::createInstanceUnique(instance_info);
233  if (instance.result != vk::Result::eSuccess) {
234  VALIDATION_LOG << "Could not create Vulkan instance: "
235  << vk::to_string(instance.result);
236  return;
237  }
238  device_holder->instance = std::move(instance.value);
239  }
240  dispatcher.init(device_holder->instance.get());
241 
242  //----------------------------------------------------------------------------
243  /// Setup the debug report.
244  ///
245  /// Do this as early as possible since we could use the debug report from
246  /// initialization issues.
247  ///
248  auto debug_report =
249  std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
250 
251  if (!debug_report->IsValid()) {
252  VALIDATION_LOG << "Could not set up debug report.";
253  return;
254  }
255 
256  //----------------------------------------------------------------------------
257  /// Pick the physical device.
258  ///
259  {
260  auto physical_device =
261  PickPhysicalDevice(*caps, device_holder->instance.get());
262  if (!physical_device.has_value()) {
263  VALIDATION_LOG << "No valid Vulkan device found.";
264  return;
265  }
266  device_holder->physical_device = physical_device.value();
267  }
268 
269  //----------------------------------------------------------------------------
270  /// Pick device queues.
271  ///
272  auto graphics_queue =
273  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
274  auto transfer_queue =
275  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
276  auto compute_queue =
277  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
278 
279  if (!graphics_queue.has_value()) {
280  VALIDATION_LOG << "Could not pick graphics queue.";
281  return;
282  }
283  if (!transfer_queue.has_value()) {
284  FML_LOG(INFO) << "Dedicated transfer queue not avialable.";
285  transfer_queue = graphics_queue.value();
286  }
287  if (!compute_queue.has_value()) {
288  VALIDATION_LOG << "Could not pick compute queue.";
289  return;
290  }
291 
292  //----------------------------------------------------------------------------
293  /// Create the logical device.
294  ///
295  auto enabled_device_extensions =
296  caps->GetEnabledDeviceExtensions(device_holder->physical_device);
297  if (!enabled_device_extensions.has_value()) {
298  // This shouldn't happen since we already did device selection. But
299  // doesn't hurt to check again.
300  return;
301  }
302 
303  std::vector<const char*> enabled_device_extensions_c;
304  for (const auto& ext : enabled_device_extensions.value()) {
305  enabled_device_extensions_c.push_back(ext.c_str());
306  }
307 
308  const auto queue_create_infos = GetQueueCreateInfos(
309  {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
310 
311  const auto enabled_features =
312  caps->GetEnabledDeviceFeatures(device_holder->physical_device);
313  if (!enabled_features.has_value()) {
314  // This shouldn't happen since the device can't be picked if this was not
315  // true. But doesn't hurt to check.
316  return;
317  }
318 
319  vk::DeviceCreateInfo device_info;
320 
321  device_info.setQueueCreateInfos(queue_create_infos);
322  device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
323  device_info.setPEnabledFeatures(&enabled_features.value());
324  // Device layers are deprecated and ignored.
325 
326  {
327  auto device_result =
328  device_holder->physical_device.createDeviceUnique(device_info);
329  if (device_result.result != vk::Result::eSuccess) {
330  VALIDATION_LOG << "Could not create logical device.";
331  return;
332  }
333  device_holder->device = std::move(device_result.value);
334  }
335 
336  if (!caps->SetPhysicalDevice(device_holder->physical_device)) {
337  VALIDATION_LOG << "Capabilities could not be updated.";
338  return;
339  }
340 
341  //----------------------------------------------------------------------------
342  /// Create the allocator.
343  ///
344  auto allocator = std::shared_ptr<AllocatorVK>(new AllocatorVK(
345  weak_from_this(), //
346  application_info.apiVersion, //
347  device_holder->physical_device, //
348  device_holder, //
349  device_holder->instance.get(), //
350  *caps //
351  ));
352 
353  if (!allocator->IsValid()) {
354  VALIDATION_LOG << "Could not create memory allocator.";
355  return;
356  }
357 
358  //----------------------------------------------------------------------------
359  /// Setup the pipeline library.
360  ///
361  auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
362  new PipelineLibraryVK(device_holder, //
363  caps, //
364  std::move(settings.cache_directory), //
365  raster_message_loop_->GetTaskRunner() //
366  ));
367 
368  if (!pipeline_library->IsValid()) {
369  VALIDATION_LOG << "Could not create pipeline library.";
370  return;
371  }
372 
373  auto sampler_library =
374  std::shared_ptr<SamplerLibraryVK>(new SamplerLibraryVK(device_holder));
375 
376  auto shader_library = std::shared_ptr<ShaderLibraryVK>(
377  new ShaderLibraryVK(device_holder, //
378  settings.shader_libraries_data) //
379  );
380 
381  if (!shader_library->IsValid()) {
382  VALIDATION_LOG << "Could not create shader library.";
383  return;
384  }
385 
386  //----------------------------------------------------------------------------
387  /// Create the fence waiter.
388  ///
389  auto fence_waiter =
390  std::shared_ptr<FenceWaiterVK>(new FenceWaiterVK(device_holder));
391 
392  //----------------------------------------------------------------------------
393  /// Create the resource manager and command pool recycler.
394  ///
395  auto resource_manager = ResourceManagerVK::Create();
396  if (!resource_manager) {
397  VALIDATION_LOG << "Could not create resource manager.";
398  return;
399  }
400 
401  auto command_pool_recycler =
402  std::make_shared<CommandPoolRecyclerVK>(weak_from_this());
403  if (!command_pool_recycler) {
404  VALIDATION_LOG << "Could not create command pool recycler.";
405  return;
406  }
407 
408  auto descriptor_pool_recycler =
409  std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
410  if (!descriptor_pool_recycler) {
411  VALIDATION_LOG << "Could not create descriptor pool recycler.";
412  return;
413  }
414 
415  //----------------------------------------------------------------------------
416  /// Fetch the queues.
417  ///
418  QueuesVK queues(device_holder->device.get(), //
419  graphics_queue.value(), //
420  compute_queue.value(), //
421  transfer_queue.value() //
422  );
423  if (!queues.IsValid()) {
424  VALIDATION_LOG << "Could not fetch device queues.";
425  return;
426  }
427 
428  VkPhysicalDeviceProperties physical_device_properties;
429  dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
430  &physical_device_properties);
431 
432  //----------------------------------------------------------------------------
433  /// All done!
434  ///
435  device_holder_ = std::move(device_holder);
436  debug_report_ = std::move(debug_report);
437  allocator_ = std::move(allocator);
438  shader_library_ = std::move(shader_library);
439  sampler_library_ = std::move(sampler_library);
440  pipeline_library_ = std::move(pipeline_library);
441  queues_ = std::move(queues);
442  device_capabilities_ = std::move(caps);
443  fence_waiter_ = std::move(fence_waiter);
444  resource_manager_ = std::move(resource_manager);
445  command_pool_recycler_ = std::move(command_pool_recycler);
446  descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
447  device_name_ = std::string(physical_device_properties.deviceName);
448  is_valid_ = true;
449 
450  // Create the GPU Tracer later because it depends on state from
451  // the ContextVK.
452  gpu_tracer_ = std::make_shared<GPUTracerVK>(GetDeviceHolder());
453 
454  //----------------------------------------------------------------------------
455  /// Label all the relevant objects. This happens after setup so that the
456  /// debug messengers have had a chance to be set up.
457  ///
458  SetDebugName(GetDevice(), device_holder_->device.get(), "ImpellerDevice");
459 }
460 
462  CapabilitiesVK::Cast(*device_capabilities_).SetOffscreenFormat(pixel_format);
463 }
464 
465 // |Context|
466 std::string ContextVK::DescribeGpuModel() const {
467  return device_name_;
468 }
469 
470 bool ContextVK::IsValid() const {
471  return is_valid_;
472 }
473 
474 std::shared_ptr<Allocator> ContextVK::GetResourceAllocator() const {
475  return allocator_;
476 }
477 
478 std::shared_ptr<ShaderLibrary> ContextVK::GetShaderLibrary() const {
479  return shader_library_;
480 }
481 
482 std::shared_ptr<SamplerLibrary> ContextVK::GetSamplerLibrary() const {
483  return sampler_library_;
484 }
485 
486 std::shared_ptr<PipelineLibrary> ContextVK::GetPipelineLibrary() const {
487  return pipeline_library_;
488 }
489 
490 std::shared_ptr<CommandBuffer> ContextVK::CreateCommandBuffer() const {
491  return std::shared_ptr<CommandBufferVK>(
492  new CommandBufferVK(shared_from_this(), //
493  CreateGraphicsCommandEncoderFactory()) //
494  );
495 }
496 
497 vk::Instance ContextVK::GetInstance() const {
498  return *device_holder_->instance;
499 }
500 
501 const vk::Device& ContextVK::GetDevice() const {
502  return device_holder_->device.get();
503 }
504 
505 const fml::RefPtr<fml::TaskRunner> ContextVK::GetQueueSubmitRunner() const {
506  return queue_submit_thread_->GetTaskRunner();
507 }
508 
509 const std::shared_ptr<fml::ConcurrentTaskRunner>
511  return raster_message_loop_->GetTaskRunner();
512 }
513 
515  // There are multiple objects, for example |CommandPoolVK|, that in their
516  // destructors make a strong reference to |ContextVK|. Resetting these shared
517  // pointers ensures that cleanup happens in a correct order.
518  //
519  // tl;dr: Without it, we get thread::join failures on shutdown.
520  fence_waiter_.reset();
521  resource_manager_.reset();
522 
523  queue_submit_thread_->Join();
524  raster_message_loop_->Terminate();
525 }
526 
527 std::shared_ptr<SurfaceContextVK> ContextVK::CreateSurfaceContext() {
528  return std::make_shared<SurfaceContextVK>(shared_from_this());
529 }
530 
531 const std::shared_ptr<const Capabilities>& ContextVK::GetCapabilities() const {
532  return device_capabilities_;
533 }
534 
535 const std::shared_ptr<QueueVK>& ContextVK::GetGraphicsQueue() const {
536  return queues_.graphics_queue;
537 }
538 
539 vk::PhysicalDevice ContextVK::GetPhysicalDevice() const {
540  return device_holder_->physical_device;
541 }
542 
543 std::shared_ptr<FenceWaiterVK> ContextVK::GetFenceWaiter() const {
544  return fence_waiter_;
545 }
546 
547 std::shared_ptr<ResourceManagerVK> ContextVK::GetResourceManager() const {
548  return resource_manager_;
549 }
550 
551 std::shared_ptr<CommandPoolRecyclerVK> ContextVK::GetCommandPoolRecycler()
552  const {
553  return command_pool_recycler_;
554 }
555 
556 std::unique_ptr<CommandEncoderFactoryVK>
557 ContextVK::CreateGraphicsCommandEncoderFactory() const {
558  return std::make_unique<CommandEncoderFactoryVK>(weak_from_this());
559 }
560 
561 std::shared_ptr<GPUTracerVK> ContextVK::GetGPUTracer() const {
562  return gpu_tracer_;
563 }
564 
565 } // namespace impeller
impeller::ContextVK::GetCapabilities
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
Definition: context_vk.cc:531
impeller::ContextVK::GetConcurrentWorkerTaskRunner
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
Definition: context_vk.cc:510
fence_waiter_vk.h
impeller::CapabilitiesVK::SetOffscreenFormat
void SetOffscreenFormat(PixelFormat pixel_format) const
Definition: capabilities_vk.cc:342
gpu_tracer_vk.h
impeller::ResourceManagerVK::Create
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
Definition: resource_manager_vk.cc:14
impeller::QueueIndexVK
Definition: queue_vk.h:16
allocator_vk.h
impeller::gHasValidationLayers
static bool gHasValidationLayers
Definition: context_vk.cc:41
impeller::CommandPoolRecyclerVK::DestroyThreadLocalPools
static void DestroyThreadLocalPools(const ContextVK *context)
Clean up resources held by all per-thread command pools associated with the given context.
Definition: command_pool_vk.cc:240
impeller::Context::BackendType
BackendType
Definition: context.h:49
impeller::ContextVK::IsValid
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
Definition: context_vk.cc:470
impeller::ContextVK::GetInstance
vk::Instance GetInstance() const
Definition: context_vk.cc:497
impeller::PickPhysicalDevice
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
Definition: context_vk.cc:47
command_encoder_vk.h
impeller::ContextVK::GetPhysicalDevice
vk::PhysicalDevice GetPhysicalDevice() const
Definition: context_vk.cc:539
impeller::ContextVK::GetResourceAllocator
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
Definition: context_vk.cc:474
impeller::ContextVK::GetBackendType
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
Definition: context_vk.cc:122
impeller::ContextVK::CreateCommandBuffer
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
Definition: context_vk.cc:490
surface_context_vk.h
validation.h
impeller::PixelFormat
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition: formats.h:94
impeller::GetQueueCreateInfos
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
Definition: context_vk.cc:58
capabilities_vk.h
command_pool_vk.h
impeller::ContextVK::Settings
Definition: context_vk.h:44
command_buffer_vk.h
impeller::ContextVK::CreateSurfaceContext
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
Definition: context_vk.cc:527
debug_report_vk.h
impeller::ContextVK::GetShaderLibrary
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
Definition: context_vk.cc:478
impeller::ContextVK::GetGraphicsQueue
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
Definition: context_vk.cc:535
impeller::QueuesVK::graphics_queue
std::shared_ptr< QueueVK > graphics_queue
Definition: queue_vk.h:61
impeller::ContextVK::GetDeviceHolder
std::shared_ptr< DeviceHolder > GetDeviceHolder() const
Definition: context_vk.h:128
impeller::CapabilitiesVK::GetEnabledDeviceFeatures
std::optional< vk::PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
Definition: capabilities_vk.cc:291
capabilities.h
impeller::ContextVK::Create
static std::shared_ptr< ContextVK > Create(Settings settings)
Definition: context_vk.cc:95
impeller::CapabilitiesVK
The Vulkan layers and extensions wrangler.
Definition: capabilities_vk.h:33
impeller::CommandBufferVK
Definition: command_buffer_vk.h:19
impeller::ContextVK::SetDebugName
bool SetDebugName(T handle, std::string_view label) const
Definition: context_vk.h:100
impeller::ContextVK::GetCommandPoolRecycler
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
Definition: context_vk.cc:551
impeller::ContextVK::GetGPUTracer
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
Definition: context_vk.cc:561
impeller::ContextVK
Definition: context_vk.h:40
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:67
impeller::Context::BackendType::kVulkan
@ kVulkan
resource_manager_vk.h
impeller::ContextVK::~ContextVK
~ContextVK() override
Definition: context_vk.cc:115
impeller::ContextVK::GetSamplerLibrary
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
Definition: context_vk.cc:482
impeller::ContextVK::GetDevice
const vk::Device & GetDevice() const
Definition: context_vk.cc:501
impeller::BackendCast< CapabilitiesVK, Capabilities >::Cast
static CapabilitiesVK & Cast(Capabilities &base)
Definition: backend_cast.h:15
impeller::ContextVK::GetResourceManager
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
Definition: context_vk.cc:547
impeller::PickQueue
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
Definition: context_vk.cc:81
impeller::ContextVK::GetPipelineLibrary
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
Definition: context_vk.cc:486
impeller::ContextVK::DescribeGpuModel
std::string DescribeGpuModel() const override
Definition: context_vk.cc:466
impeller::ContextVK::GetFenceWaiter
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
Definition: context_vk.cc:543
impeller::QueueIndexVK::family
size_t family
Definition: queue_vk.h:17
impeller::ContextVK::SetOffscreenFormat
void SetOffscreenFormat(PixelFormat pixel_format)
Definition: context_vk.cc:461
context_vk.h
impeller::ContextVK::GetQueueSubmitRunner
const fml::RefPtr< fml::TaskRunner > GetQueueSubmitRunner() const
A single-threaded task runner that should only be used for submitKHR.
Definition: context_vk.cc:505
impeller::HasValidationLayers
bool HasValidationLayers()
Definition: context_vk.cc:43
impeller::ContextVK::Shutdown
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
Definition: context_vk.cc:514
impeller
Definition: aiks_context.cc:10