7 #include <Foundation/Foundation.h>
10 #include "flutter/fml/concurrent_message_loop.h"
11 #include "flutter/fml/file.h"
12 #include "flutter/fml/logging.h"
13 #include "flutter/fml/paths.h"
14 #include "flutter/fml/synchronization/sync_switch.h"
25 #if FML_OS_IOS_SIMULATOR
27 #else // FML_OS_IOS_SIMULATOR
29 if (@available(macOS 10.15, iOS 13, tvOS 13, *)) {
30 return [device supportsFamily:MTLGPUFamilyApple2];
36 return [device supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1];
40 #endif // FML_OS_IOS_SIMULATOR
44 bool supports_subgroups =
false;
47 if (@available(ios 13.0, tvos 13.0, macos 10.15, *)) {
48 supports_subgroups = [device supportsFamily:MTLGPUFamilyApple7] ||
49 [device supportsFamily:MTLGPUFamilyMac2];
51 return supports_subgroups;
74 ContextMTL::ContextMTL(
76 id<MTLCommandQueue> command_queue,
77 NSArray<id<MTLLibrary>>* shader_libraries,
78 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch)
80 command_queue_(command_queue),
81 is_gpu_disabled_sync_switch_(
std::move(is_gpu_disabled_sync_switch)) {
88 sync_switch_observer_.reset(
new SyncSwitchObserver(*
this));
89 is_gpu_disabled_sync_switch_->AddObserver(sync_switch_observer_.get());
93 raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
94 std::min(4u, std::thread::hardware_concurrency()));
95 raster_message_loop_->PostTaskToAllWorkers([]() {
98 [[NSThread currentThread] setThreadPriority:1.0];
101 pthread_t thread = pthread_self();
102 if (!pthread_getschedparam(thread, &policy, ¶m)) {
103 param.sched_priority = 50;
104 pthread_setschedparam(thread, policy, ¶m);
111 if (shader_libraries == nil) {
117 auto library = std::shared_ptr<ShaderLibraryMTL>(
118 new ShaderLibraryMTL(shader_libraries));
119 if (!library->IsValid()) {
123 shader_library_ = std::move(library);
129 std::shared_ptr<PipelineLibraryMTL>(
new PipelineLibraryMTL(device_));
135 std::shared_ptr<SamplerLibraryMTL>(
new SamplerLibraryMTL(device_));
140 resource_allocator_ = std::shared_ptr<AllocatorMTL>(
141 new AllocatorMTL(device_,
"Impeller Permanents Allocator"));
142 if (!resource_allocator_) {
148 device_capabilities_ =
150 #ifdef IMPELLER_DEBUG
151 gpu_tracer_ = std::make_shared<GPUTracerMTL>();
152 #endif // IMPELLER_DEBUG
157 id<MTLDevice> device,
158 const std::vector<std::string>& libraries_paths) {
159 NSMutableArray<id<MTLLibrary>>* found_libraries = [NSMutableArray array];
160 for (
const auto& library_path : libraries_paths) {
161 if (!fml::IsFile(library_path)) {
163 << library_path <<
"'";
166 NSError* shader_library_error = nil;
167 auto library = [device newLibraryWithFile:@(library_path.c_str())
168 error:&shader_library_error];
170 FML_LOG(ERROR) <<
"Could not create shader library: "
171 << shader_library_error.localizedDescription.UTF8String;
174 [found_libraries addObject:library];
176 return found_libraries;
180 id<MTLDevice> device,
181 const std::vector<std::shared_ptr<fml::Mapping>>& libraries_data,
182 const std::string& label) {
183 NSMutableArray<id<MTLLibrary>>* found_libraries = [NSMutableArray array];
184 for (
const auto& library_data : libraries_data) {
185 if (library_data ==
nullptr) {
186 FML_LOG(ERROR) <<
"Shader library data was null.";
190 __block
auto data = library_data;
193 ::dispatch_data_create(library_data->GetMapping(),
194 library_data->GetSize(),
195 dispatch_get_main_queue(),
201 if (!dispatch_data) {
202 FML_LOG(ERROR) <<
"Could not wrap shader data in dispatch data.";
206 NSError* shader_library_error = nil;
207 auto library = [device newLibraryWithData:dispatch_data
208 error:&shader_library_error];
210 FML_LOG(ERROR) <<
"Could not create shader library: "
211 << shader_library_error.localizedDescription.UTF8String;
214 if (!label.empty()) {
215 library.label = @(label.c_str());
217 [found_libraries addObject:library];
219 return found_libraries;
223 return ::MTLCreateSystemDefaultDevice();
227 auto command_queue = device.newCommandQueue;
228 if (!command_queue) {
232 command_queue.label =
@"Impeller Command Queue";
233 return command_queue;
236 std::shared_ptr<ContextMTL> ContextMTL::Create(
237 const std::vector<std::string>& shader_library_paths,
238 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch) {
241 if (!command_queue) {
244 auto context = std::shared_ptr<ContextMTL>(
new ContextMTL(
245 device, command_queue,
247 std::move(is_gpu_disabled_sync_switch)));
248 if (!context->IsValid()) {
249 FML_LOG(ERROR) <<
"Could not create Metal context.";
255 std::shared_ptr<ContextMTL> ContextMTL::Create(
256 const std::vector<std::shared_ptr<fml::Mapping>>& shader_libraries_data,
257 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
258 const std::string& library_label) {
261 if (!command_queue) {
264 auto context = std::shared_ptr<ContextMTL>(
268 std::move(is_gpu_disabled_sync_switch)));
269 if (!context->IsValid()) {
270 FML_LOG(ERROR) <<
"Could not create Metal context.";
276 std::shared_ptr<ContextMTL> ContextMTL::Create(
277 id<MTLDevice> device,
278 id<MTLCommandQueue> command_queue,
279 const std::vector<std::shared_ptr<fml::Mapping>>& shader_libraries_data,
280 std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch,
281 const std::string& library_label) {
282 auto context = std::shared_ptr<ContextMTL>(
286 std::move(is_gpu_disabled_sync_switch)));
287 if (!context->IsValid()) {
288 FML_LOG(ERROR) <<
"Could not create Metal context.";
294 ContextMTL::~ContextMTL() {
295 is_gpu_disabled_sync_switch_->RemoveObserver(sync_switch_observer_.get());
299 return Context::BackendType::kMetal;
303 std::string ContextMTL::DescribeGpuModel()
const {
304 return std::string([[device_ name] UTF8String]);
308 bool ContextMTL::IsValid()
const {
313 std::shared_ptr<ShaderLibrary> ContextMTL::GetShaderLibrary()
const {
314 return shader_library_;
318 std::shared_ptr<PipelineLibrary> ContextMTL::GetPipelineLibrary()
const {
319 return pipeline_library_;
323 std::shared_ptr<SamplerLibrary> ContextMTL::GetSamplerLibrary()
const {
324 return sampler_library_;
329 return CreateCommandBufferInQueue(command_queue_);
333 void ContextMTL::Shutdown() {
334 raster_message_loop_.reset();
337 #ifdef IMPELLER_DEBUG
338 std::shared_ptr<GPUTracerMTL> ContextMTL::GetGPUTracer()
const {
341 #endif // IMPELLER_DEBUG
343 const std::shared_ptr<fml::ConcurrentTaskRunner>
344 ContextMTL::GetWorkerTaskRunner()
const {
345 return raster_message_loop_->GetTaskRunner();
348 std::shared_ptr<const fml::SyncSwitch> ContextMTL::GetIsGpuDisabledSyncSwitch()
350 return is_gpu_disabled_sync_switch_;
353 std::shared_ptr<CommandBuffer> ContextMTL::CreateCommandBufferInQueue(
354 id<MTLCommandQueue> queue)
const {
359 auto buffer = std::shared_ptr<CommandBufferMTL>(
360 new CommandBufferMTL(weak_from_this(), queue));
361 if (!buffer->IsValid()) {
367 std::shared_ptr<Allocator> ContextMTL::GetResourceAllocator()
const {
368 return resource_allocator_;
371 id<MTLDevice> ContextMTL::GetMTLDevice()
const {
375 const std::shared_ptr<const Capabilities>& ContextMTL::GetCapabilities()
const {
376 return device_capabilities_;
379 void ContextMTL::SetCapabilities(
380 const std::shared_ptr<const Capabilities>& capabilities) {
381 device_capabilities_ = capabilities;
385 bool ContextMTL::UpdateOffscreenLayerPixelFormat(
PixelFormat format) {
390 id<MTLCommandBuffer> ContextMTL::CreateMTLCommandBuffer(
391 const std::string& label)
const {
392 auto buffer = [command_queue_ commandBuffer];
393 if (!label.empty()) {
394 [buffer setLabel:@(label.data())];
399 void ContextMTL::StoreTaskForGPU(
const std::function<
void()>& task) {
400 tasks_awaiting_gpu_.emplace_back(task);
401 while (tasks_awaiting_gpu_.size() > kMaxTasksAwaitingGPU) {
402 tasks_awaiting_gpu_.front()();
403 tasks_awaiting_gpu_.pop_front();
407 void ContextMTL::FlushTasksAwaitingGPU() {
408 for (
const auto& task : tasks_awaiting_gpu_) {
411 tasks_awaiting_gpu_.clear();
414 ContextMTL::SyncSwitchObserver::SyncSwitchObserver(ContextMTL& parent)
417 void ContextMTL::SyncSwitchObserver::OnSyncSwitchUpdate(
bool new_is_disabled) {
418 if (!new_is_disabled) {
419 parent_.FlushTasksAwaitingGPU();