Flutter Impeller
reflector.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6 
8 
9 #include <atomic>
10 #include <optional>
11 #include <set>
12 #include <sstream>
13 
14 #include "flutter/fml/logging.h"
15 #include "fml/backtrace.h"
16 #include "impeller/base/strings.h"
24 #include "impeller/geometry/half.h"
28 #include "spirv_common.hpp"
29 
30 namespace impeller {
31 namespace compiler {
32 
33 static std::string ExecutionModelToString(spv::ExecutionModel model) {
34  switch (model) {
35  case spv::ExecutionModel::ExecutionModelVertex:
36  return "vertex";
37  case spv::ExecutionModel::ExecutionModelFragment:
38  return "fragment";
39  case spv::ExecutionModel::ExecutionModelGLCompute:
40  return "compute";
41  default:
42  return "unsupported";
43  }
44 }
45 
46 static std::string StringToShaderStage(const std::string& str) {
47  if (str == "vertex") {
48  return "ShaderStage::kVertex";
49  }
50 
51  if (str == "fragment") {
52  return "ShaderStage::kFragment";
53  }
54 
55  if (str == "compute") {
56  return "ShaderStage::kCompute";
57  }
58 
59  return "ShaderStage::kUnknown";
60 }
61 
63  const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
64  const std::shared_ptr<fml::Mapping>& shader_data,
65  const CompilerBackend& compiler)
66  : options_(std::move(options)),
67  ir_(ir),
68  shader_data_(shader_data),
69  compiler_(compiler) {
70  if (!ir_ || !compiler_) {
71  return;
72  }
73 
74  if (auto template_arguments = GenerateTemplateArguments();
75  template_arguments.has_value()) {
76  template_arguments_ =
77  std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
78  } else {
79  return;
80  }
81 
82  reflection_header_ = GenerateReflectionHeader();
83  if (!reflection_header_) {
84  return;
85  }
86 
87  reflection_cc_ = GenerateReflectionCC();
88  if (!reflection_cc_) {
89  return;
90  }
91 
92  runtime_stage_shader_ = GenerateRuntimeStageData();
93 
94  shader_bundle_data_ = GenerateShaderBundleData();
95  if (!shader_bundle_data_) {
96  return;
97  }
98 
99  is_valid_ = true;
100 }
101 
102 Reflector::~Reflector() = default;
103 
104 bool Reflector::IsValid() const {
105  return is_valid_;
106 }
107 
108 std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
109  if (!is_valid_) {
110  return nullptr;
111  }
112 
113  auto json_string =
114  std::make_shared<std::string>(template_arguments_->dump(2u));
115 
116  return std::make_shared<fml::NonOwnedMapping>(
117  reinterpret_cast<const uint8_t*>(json_string->data()),
118  json_string->size(), [json_string](auto, auto) {});
119 }
120 
121 std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
122  return reflection_header_;
123 }
124 
125 std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
126  return reflection_cc_;
127 }
128 
129 std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
130  const {
131  return runtime_stage_shader_;
132 }
133 
134 std::shared_ptr<ShaderBundleData> Reflector::GetShaderBundleData() const {
135  return shader_bundle_data_;
136 }
137 
138 std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
139  nlohmann::json root;
140 
141  const auto& entrypoints = compiler_->get_entry_points_and_stages();
142  if (entrypoints.size() != 1) {
143  VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
144  << entrypoints.size() << " but expected 1.";
145  return std::nullopt;
146  }
147 
148  auto execution_model = entrypoints.front().execution_model;
149  {
150  root["entrypoint"] = options_.entry_point_name;
151  root["shader_name"] = options_.shader_name;
152  root["shader_stage"] = ExecutionModelToString(execution_model);
153  root["header_file_name"] = options_.header_file_name;
154  }
155 
156  const auto shader_resources = compiler_->get_shader_resources();
157 
158  // Subpass Inputs.
159  {
160  auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
161  if (auto subpass_inputs_json =
162  ReflectResources(shader_resources.subpass_inputs);
163  subpass_inputs_json.has_value()) {
164  for (auto subpass_input : subpass_inputs_json.value()) {
165  subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
166  subpass_inputs.emplace_back(std::move(subpass_input));
167  }
168  } else {
169  return std::nullopt;
170  }
171  }
172 
173  // Uniform and storage buffers.
174  {
175  auto& buffers = root["buffers"] = nlohmann::json::array_t{};
176  if (auto uniform_buffers_json =
177  ReflectResources(shader_resources.uniform_buffers);
178  uniform_buffers_json.has_value()) {
179  for (auto uniform_buffer : uniform_buffers_json.value()) {
180  uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
181  buffers.emplace_back(std::move(uniform_buffer));
182  }
183  } else {
184  return std::nullopt;
185  }
186  if (auto storage_buffers_json =
187  ReflectResources(shader_resources.storage_buffers);
188  storage_buffers_json.has_value()) {
189  for (auto uniform_buffer : storage_buffers_json.value()) {
190  uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
191  buffers.emplace_back(std::move(uniform_buffer));
192  }
193  } else {
194  return std::nullopt;
195  }
196  }
197 
198  {
199  auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
200  if (auto stage_inputs_json = ReflectResources(
201  shader_resources.stage_inputs,
202  /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
203  stage_inputs_json.has_value()) {
204  stage_inputs = std::move(stage_inputs_json.value());
205  } else {
206  return std::nullopt;
207  }
208  }
209 
210  {
211  auto combined_sampled_images =
212  ReflectResources(shader_resources.sampled_images);
213  auto images = ReflectResources(shader_resources.separate_images);
214  auto samplers = ReflectResources(shader_resources.separate_samplers);
215  if (!combined_sampled_images.has_value() || !images.has_value() ||
216  !samplers.has_value()) {
217  return std::nullopt;
218  }
219  auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
220  for (auto value : combined_sampled_images.value()) {
221  value["descriptor_type"] = "DescriptorType::kSampledImage";
222  sampled_images.emplace_back(std::move(value));
223  }
224  for (auto value : images.value()) {
225  value["descriptor_type"] = "DescriptorType::kImage";
226  sampled_images.emplace_back(std::move(value));
227  }
228  for (auto value : samplers.value()) {
229  value["descriptor_type"] = "DescriptorType::kSampledSampler";
230  sampled_images.emplace_back(std::move(value));
231  }
232  }
233 
234  if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
235  stage_outputs.has_value()) {
236  root["stage_outputs"] = std::move(stage_outputs.value());
237  } else {
238  return std::nullopt;
239  }
240 
241  {
242  auto& struct_definitions = root["struct_definitions"] =
243  nlohmann::json::array_t{};
244  if (entrypoints.front().execution_model ==
245  spv::ExecutionModel::ExecutionModelVertex &&
246  !shader_resources.stage_inputs.empty()) {
247  if (auto struc =
248  ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
249  struc.has_value()) {
250  struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
251  } else {
252  // If there are stage inputs, it is an error to not generate a per
253  // vertex data struct for a vertex like shader stage.
254  return std::nullopt;
255  }
256  }
257 
258  std::set<spirv_cross::ID> known_structs;
259  ir_->for_each_typed_id<spirv_cross::SPIRType>(
260  [&](uint32_t, const spirv_cross::SPIRType& type) {
261  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
262  return;
263  }
264  // Skip structs that do not have layout offset decorations.
265  // These structs are used internally within the shader and are not
266  // part of the shader's interface.
267  for (size_t i = 0; i < type.member_types.size(); i++) {
268  if (!compiler_->has_member_decoration(type.self, i,
269  spv::DecorationOffset)) {
270  return;
271  }
272  }
273  if (known_structs.find(type.self) != known_structs.end()) {
274  // Iterating over types this way leads to duplicates which may cause
275  // duplicate struct definitions.
276  return;
277  }
278  known_structs.insert(type.self);
279  if (auto struc = ReflectStructDefinition(type.self);
280  struc.has_value()) {
281  struct_definitions.emplace_back(
282  EmitStructDefinition(struc.value()));
283  }
284  });
285  }
286 
287  root["bind_prototypes"] =
288  EmitBindPrototypes(shader_resources, execution_model);
289 
290  return root;
291 }
292 
293 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
294  return InflateTemplate(kReflectionHeaderTemplate);
295 }
296 
297 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
298  return InflateTemplate(kReflectionCCTemplate);
299 }
300 
301 static std::optional<RuntimeStageBackend> GetRuntimeStageBackend(
302  TargetPlatform target_platform) {
303  switch (target_platform) {
310  return std::nullopt;
319  }
320  FML_UNREACHABLE();
321 }
322 
323 std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
324  const {
325  auto backend = GetRuntimeStageBackend(options_.target_platform);
326  if (!backend.has_value()) {
327  return nullptr;
328  }
329 
330  const auto& entrypoints = compiler_->get_entry_points_and_stages();
331  if (entrypoints.size() != 1u) {
332  VALIDATION_LOG << "Single entrypoint not found.";
333  return nullptr;
334  }
335  auto data = std::make_unique<RuntimeStageData::Shader>();
336  data->entrypoint = options_.entry_point_name;
337  data->stage = entrypoints.front().execution_model;
338  data->shader = shader_data_;
339  data->backend = backend.value();
340 
341  // Sort the IR so that the uniforms are in declaration order.
342  std::vector<spirv_cross::ID> uniforms =
343  SortUniforms(ir_.get(), compiler_.GetCompiler());
344  for (auto& sorted_id : uniforms) {
345  auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
346  const auto spir_type = compiler_->get_type(var.basetype);
347  UniformDescription uniform_description;
348  uniform_description.name = compiler_->get_name(var.self);
349  uniform_description.location = compiler_->get_decoration(
350  var.self, spv::Decoration::DecorationLocation);
351  uniform_description.binding =
352  compiler_->get_decoration(var.self, spv::Decoration::DecorationBinding);
353  uniform_description.type = spir_type.basetype;
354  uniform_description.rows = spir_type.vecsize;
355  uniform_description.columns = spir_type.columns;
356  uniform_description.bit_width = spir_type.width;
357  uniform_description.array_elements = GetArrayElements(spir_type);
358  FML_CHECK(data->backend != RuntimeStageBackend::kVulkan ||
359  spir_type.basetype ==
360  spirv_cross::SPIRType::BaseType::SampledImage)
361  << "Vulkan runtime effect had unexpected uniforms outside of the "
362  "uniform buffer object.";
363  data->uniforms.emplace_back(std::move(uniform_description));
364  }
365 
366  const auto ubos = compiler_->get_shader_resources().uniform_buffers;
367  if (data->backend == RuntimeStageBackend::kVulkan && !ubos.empty()) {
368  if (ubos.size() != 1 && ubos[0].name != RuntimeStage::kVulkanUBOName) {
369  VALIDATION_LOG << "Expected a single UBO resource named "
370  "'"
372  << "' "
373  "for Vulkan runtime stage backend.";
374  return nullptr;
375  }
376 
377  const auto& ubo = ubos[0];
378 
379  size_t binding =
380  compiler_->get_decoration(ubo.id, spv::Decoration::DecorationBinding);
381  auto members = ReadStructMembers(ubo.type_id);
382  std::vector<uint8_t> struct_layout;
383  size_t float_count = 0;
384 
385  for (size_t i = 0; i < members.size(); i += 1) {
386  const auto& member = members[i];
387  std::vector<int> bytes;
388  switch (member.underlying_type) {
390  size_t padding_count =
391  (member.size + sizeof(float) - 1) / sizeof(float);
392  while (padding_count > 0) {
393  struct_layout.push_back(0);
394  padding_count--;
395  }
396  break;
397  }
399  size_t member_float_count = member.byte_length / sizeof(float);
400  float_count += member_float_count;
401  while (member_float_count > 0) {
402  struct_layout.push_back(1);
403  member_float_count--;
404  }
405  break;
406  }
408  VALIDATION_LOG << "Non-floating-type struct member " << member.name
409  << " is not supported.";
410  return nullptr;
411  }
412  }
413  data->uniforms.emplace_back(UniformDescription{
414  .name = ubo.name,
415  .location = binding,
416  .binding = binding,
417  .type = spirv_cross::SPIRType::Struct,
418  .struct_layout = std::move(struct_layout),
419  .struct_float_count = float_count,
420  });
421  }
422 
423  // We only need to worry about storing vertex attributes.
424  if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
425  const auto inputs = compiler_->get_shader_resources().stage_inputs;
426  auto input_offsets = ComputeOffsets(inputs);
427  for (const auto& input : inputs) {
428  std::optional<size_t> offset = GetOffset(input.id, input_offsets);
429 
430  const auto type = compiler_->get_type(input.type_id);
431 
432  InputDescription input_description;
433  input_description.name = input.name;
434  input_description.location = compiler_->get_decoration(
435  input.id, spv::Decoration::DecorationLocation);
436  input_description.set = compiler_->get_decoration(
437  input.id, spv::Decoration::DecorationDescriptorSet);
438  input_description.binding = compiler_->get_decoration(
439  input.id, spv::Decoration::DecorationBinding);
440  input_description.type = type.basetype;
441  input_description.bit_width = type.width;
442  input_description.vec_size = type.vecsize;
443  input_description.columns = type.columns;
444  input_description.offset = offset.value_or(0u);
445  data->inputs.emplace_back(std::move(input_description));
446  }
447  }
448 
449  return data;
450 }
451 
452 std::shared_ptr<ShaderBundleData> Reflector::GenerateShaderBundleData() const {
453  const auto& entrypoints = compiler_->get_entry_points_and_stages();
454  if (entrypoints.size() != 1u) {
455  VALIDATION_LOG << "Single entrypoint not found.";
456  return nullptr;
457  }
458  auto data = std::make_shared<ShaderBundleData>(
459  options_.entry_point_name, //
460  entrypoints.front().execution_model, //
461  options_.target_platform //
462  );
463  data->SetShaderData(shader_data_);
464 
465  const auto uniforms = compiler_->get_shader_resources().uniform_buffers;
466  for (const auto& uniform : uniforms) {
467  ShaderBundleData::ShaderUniformStruct uniform_struct;
468  uniform_struct.name = uniform.name;
469  uniform_struct.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
471  uniform_struct.set = compiler_->get_decoration(
472  uniform.id, spv::Decoration::DecorationDescriptorSet);
473  uniform_struct.binding = compiler_->get_decoration(
474  uniform.id, spv::Decoration::DecorationBinding);
475 
476  const auto type = compiler_->get_type(uniform.type_id);
477  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
478  std::cerr << "Error: Uniform \"" << uniform.name
479  << "\" is not a struct. All Flutter GPU shader uniforms must "
480  "be structs."
481  << std::endl;
482  return nullptr;
483  }
484 
485  size_t size_in_bytes = 0;
486  for (const auto& struct_member : ReadStructMembers(uniform.type_id)) {
487  size_in_bytes += struct_member.byte_length;
488  if (StringStartsWith(struct_member.name, "_PADDING_")) {
489  continue;
490  }
491  ShaderBundleData::ShaderUniformStructField uniform_struct_field;
492  uniform_struct_field.name = struct_member.name;
493  uniform_struct_field.type = struct_member.base_type;
494  uniform_struct_field.offset_in_bytes = struct_member.offset;
495  uniform_struct_field.element_size_in_bytes = struct_member.size;
496  uniform_struct_field.total_size_in_bytes = struct_member.byte_length;
497  uniform_struct_field.array_elements = struct_member.array_elements;
498  uniform_struct.fields.push_back(uniform_struct_field);
499  }
500  uniform_struct.size_in_bytes = size_in_bytes;
501 
502  data->AddUniformStruct(uniform_struct);
503  }
504 
505  const auto sampled_images = compiler_->get_shader_resources().sampled_images;
506  for (const auto& image : sampled_images) {
507  ShaderBundleData::ShaderUniformTexture uniform_texture;
508  uniform_texture.name = image.name;
509  uniform_texture.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
511  uniform_texture.set = compiler_->get_decoration(
512  image.id, spv::Decoration::DecorationDescriptorSet);
513  uniform_texture.binding =
514  compiler_->get_decoration(image.id, spv::Decoration::DecorationBinding);
515  data->AddUniformTexture(uniform_texture);
516  }
517 
518  // We only need to worry about storing vertex attributes.
519  if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
520  const auto inputs = compiler_->get_shader_resources().stage_inputs;
521  auto input_offsets = ComputeOffsets(inputs);
522  for (const auto& input : inputs) {
523  std::optional<size_t> offset = GetOffset(input.id, input_offsets);
524 
525  const auto type = compiler_->get_type(input.type_id);
526 
527  InputDescription input_description;
528  input_description.name = input.name;
529  input_description.location = compiler_->get_decoration(
530  input.id, spv::Decoration::DecorationLocation);
531  input_description.set = compiler_->get_decoration(
532  input.id, spv::Decoration::DecorationDescriptorSet);
533  input_description.binding = compiler_->get_decoration(
534  input.id, spv::Decoration::DecorationBinding);
535  input_description.type = type.basetype;
536  input_description.bit_width = type.width;
537  input_description.vec_size = type.vecsize;
538  input_description.columns = type.columns;
539  input_description.offset = offset.value_or(0u);
540  data->AddInputDescription(std::move(input_description));
541  }
542  }
543 
544  return data;
545 }
546 
547 std::optional<uint32_t> Reflector::GetArrayElements(
548  const spirv_cross::SPIRType& type) const {
549  if (type.array.empty()) {
550  return std::nullopt;
551  }
552  FML_CHECK(type.array.size() == 1)
553  << "Multi-dimensional arrays are not supported.";
554  FML_CHECK(type.array_size_literal.front())
555  << "Must use a literal for array sizes.";
556  return type.array.front();
557 }
558 
559 static std::string ToString(CompilerBackend::Type type) {
560  switch (type) {
562  return "Metal Shading Language";
564  return "OpenGL Shading Language";
566  return "OpenGL Shading Language (Relaxed Vulkan Semantics)";
568  return "SkSL Shading Language";
569  }
570  FML_UNREACHABLE();
571 }
572 
573 std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
574  std::string_view tmpl) const {
575  inja::Environment env;
576  env.set_trim_blocks(true);
577  env.set_lstrip_blocks(true);
578 
579  env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
580  return ToCamelCase(args.at(0u)->get<std::string>());
581  });
582 
583  env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
584  return StringToShaderStage(args.at(0u)->get<std::string>());
585  });
586 
587  env.add_callback("get_generator_name", 0u,
588  [type = compiler_.GetType()](inja::Arguments& args) {
589  return ToString(type);
590  });
591 
592  auto inflated_template =
593  std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
594 
595  return std::make_shared<fml::NonOwnedMapping>(
596  reinterpret_cast<const uint8_t*>(inflated_template->data()),
597  inflated_template->size(), [inflated_template](auto, auto) {});
598 }
599 
600 std::vector<size_t> Reflector::ComputeOffsets(
601  const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
602  std::vector<size_t> offsets(resources.size(), 0);
603  if (resources.size() == 0) {
604  return offsets;
605  }
606  for (const auto& resource : resources) {
607  const auto type = compiler_->get_type(resource.type_id);
608  auto location = compiler_->get_decoration(
609  resource.id, spv::Decoration::DecorationLocation);
610  // Malformed shader, will be caught later on.
611  if (location >= resources.size() || location < 0) {
612  location = 0;
613  }
614  offsets[location] = (type.width * type.vecsize) / 8;
615  }
616  for (size_t i = 1; i < resources.size(); i++) {
617  offsets[i] += offsets[i - 1];
618  }
619  for (size_t i = resources.size() - 1; i > 0; i--) {
620  offsets[i] = offsets[i - 1];
621  }
622  offsets[0] = 0;
623 
624  return offsets;
625 }
626 
627 std::optional<size_t> Reflector::GetOffset(
628  spirv_cross::ID id,
629  const std::vector<size_t>& offsets) const {
630  uint32_t location =
631  compiler_->get_decoration(id, spv::Decoration::DecorationLocation);
632  if (location >= offsets.size()) {
633  return std::nullopt;
634  }
635  return offsets[location];
636 }
637 
638 std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
639  const spirv_cross::Resource& resource,
640  std::optional<size_t> offset) const {
641  nlohmann::json::object_t result;
642 
643  result["name"] = resource.name;
644  result["descriptor_set"] = compiler_->get_decoration(
645  resource.id, spv::Decoration::DecorationDescriptorSet);
646  result["binding"] = compiler_->get_decoration(
647  resource.id, spv::Decoration::DecorationBinding);
648  result["set"] = compiler_->get_decoration(
649  resource.id, spv::Decoration::DecorationDescriptorSet);
650  result["location"] = compiler_->get_decoration(
651  resource.id, spv::Decoration::DecorationLocation);
652  result["index"] =
653  compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
654  result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
656  result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
658  result["relaxed_precision"] =
659  compiler_->get_decoration(
660  resource.id, spv::Decoration::DecorationRelaxedPrecision) == 1;
661  result["offset"] = offset.value_or(0u);
662  auto type = ReflectType(resource.type_id);
663  if (!type.has_value()) {
664  return std::nullopt;
665  }
666  result["type"] = std::move(type.value());
667  return result;
668 }
669 
670 std::optional<nlohmann::json::object_t> Reflector::ReflectType(
671  const spirv_cross::TypeID& type_id) const {
672  nlohmann::json::object_t result;
673 
674  const auto type = compiler_->get_type(type_id);
675 
676  result["type_name"] = StructMember::BaseTypeToString(type.basetype);
677  result["bit_width"] = type.width;
678  result["vec_size"] = type.vecsize;
679  result["columns"] = type.columns;
680  auto& members = result["members"] = nlohmann::json::array_t{};
681  if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
682  for (const auto& struct_member : ReadStructMembers(type_id)) {
683  auto member = nlohmann::json::object_t{};
684  member["name"] = struct_member.name;
685  member["type"] = struct_member.type;
686  member["base_type"] =
687  StructMember::BaseTypeToString(struct_member.base_type);
688  member["offset"] = struct_member.offset;
689  member["size"] = struct_member.size;
690  member["byte_length"] = struct_member.byte_length;
691  if (struct_member.array_elements.has_value()) {
692  member["array_elements"] = struct_member.array_elements.value();
693  } else {
694  member["array_elements"] = "std::nullopt";
695  }
696  members.emplace_back(std::move(member));
697  }
698  }
699 
700  return result;
701 }
702 
703 std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
704  const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
705  bool compute_offsets) const {
706  nlohmann::json::array_t result;
707  result.reserve(resources.size());
708  std::vector<size_t> offsets;
709  if (compute_offsets) {
710  offsets = ComputeOffsets(resources);
711  }
712  for (const auto& resource : resources) {
713  std::optional<size_t> maybe_offset = std::nullopt;
714  if (compute_offsets) {
715  maybe_offset = GetOffset(resource.id, offsets);
716  }
717  if (auto reflected = ReflectResource(resource, maybe_offset);
718  reflected.has_value()) {
719  result.emplace_back(std::move(reflected.value()));
720  } else {
721  return std::nullopt;
722  }
723  }
724  return result;
725 }
726 
727 static std::string TypeNameWithPaddingOfSize(size_t size) {
728  std::stringstream stream;
729  stream << "Padding<" << size << ">";
730  return stream.str();
731 }
732 
733 struct KnownType {
734  std::string name;
735  size_t byte_size = 0;
736 };
737 
738 static std::optional<KnownType> ReadKnownScalarType(
739  spirv_cross::SPIRType::BaseType type) {
740  switch (type) {
741  case spirv_cross::SPIRType::BaseType::Boolean:
742  return KnownType{
743  .name = "bool",
744  .byte_size = sizeof(bool),
745  };
746  case spirv_cross::SPIRType::BaseType::Float:
747  return KnownType{
748  .name = "Scalar",
749  .byte_size = sizeof(Scalar),
750  };
751  case spirv_cross::SPIRType::BaseType::Half:
752  return KnownType{
753  .name = "Half",
754  .byte_size = sizeof(Half),
755  };
756  case spirv_cross::SPIRType::BaseType::UInt:
757  return KnownType{
758  .name = "uint32_t",
759  .byte_size = sizeof(uint32_t),
760  };
761  case spirv_cross::SPIRType::BaseType::Int:
762  return KnownType{
763  .name = "int32_t",
764  .byte_size = sizeof(int32_t),
765  };
766  default:
767  break;
768  }
769  return std::nullopt;
770 }
771 
772 //------------------------------------------------------------------------------
773 /// @brief Get the reflected struct size. In the vast majority of the
774 /// cases, this is the same as the declared struct size as given by
775 /// the compiler. But, additional padding may need to be introduced
776 /// after the end of the struct to keep in line with the alignment
777 /// requirement of the individual struct members. This method
778 /// figures out the actual size of the reflected struct that can be
779 /// referenced in native code.
780 ///
781 /// @param[in] members The members
782 ///
783 /// @return The reflected structure size.
784 ///
785 static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
786  auto struct_size = 0u;
787  for (const auto& member : members) {
788  struct_size += member.byte_length;
789  }
790  return struct_size;
791 }
792 
793 std::vector<StructMember> Reflector::ReadStructMembers(
794  const spirv_cross::TypeID& type_id) const {
795  const auto& struct_type = compiler_->get_type(type_id);
796  FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
797 
798  std::vector<StructMember> result;
799 
800  size_t current_byte_offset = 0;
801  size_t max_member_alignment = 0;
802 
803  for (size_t i = 0; i < struct_type.member_types.size(); i++) {
804  const auto& member = compiler_->get_type(struct_type.member_types[i]);
805  const auto struct_member_offset =
806  compiler_->type_struct_member_offset(struct_type, i);
807  auto array_elements = GetArrayElements(member);
808 
809  if (struct_member_offset > current_byte_offset) {
810  const auto alignment_pad = struct_member_offset - current_byte_offset;
811  result.emplace_back(StructMember{
812  TypeNameWithPaddingOfSize(alignment_pad), // type
813  spirv_cross::SPIRType::BaseType::Void, // basetype
814  SPrintF("_PADDING_%s_",
815  GetMemberNameAtIndex(struct_type, i).c_str()), // name
816  current_byte_offset, // offset
817  alignment_pad, // size
818  alignment_pad, // byte_length
819  std::nullopt, // array_elements
820  0, // element_padding
821  });
822  current_byte_offset += alignment_pad;
823  }
824 
825  max_member_alignment =
826  std::max<size_t>(max_member_alignment,
827  (member.width / 8) * member.columns * member.vecsize);
828 
829  FML_CHECK(current_byte_offset == struct_member_offset);
830 
831  // A user defined struct.
832  if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
833  const size_t size =
834  GetReflectedStructSize(ReadStructMembers(member.self));
835  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
836  if (stride == 0) {
837  stride = size;
838  }
839  uint32_t element_padding = stride - size;
840  result.emplace_back(StructMember{
841  compiler_->get_name(member.self), // type
842  member.basetype, // basetype
843  GetMemberNameAtIndex(struct_type, i), // name
844  struct_member_offset, // offset
845  size, // size
846  stride * array_elements.value_or(1), // byte_length
847  array_elements, // array_elements
848  element_padding, // element_padding
849  });
850  current_byte_offset += stride * array_elements.value_or(1);
851  continue;
852  }
853 
854  // Tightly packed 4x4 Matrix is special cased as we know how to work with
855  // those.
856  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
857  member.width == sizeof(Scalar) * 8 && //
858  member.columns == 4 && //
859  member.vecsize == 4 //
860  ) {
861  uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
862  uint32_t element_padding = stride - sizeof(Matrix);
863  result.emplace_back(StructMember{
864  "Matrix", // type
865  member.basetype, // basetype
866  GetMemberNameAtIndex(struct_type, i), // name
867  struct_member_offset, // offset
868  sizeof(Matrix), // size
869  stride * array_elements.value_or(1), // byte_length
870  array_elements, // array_elements
871  element_padding, // element_padding
872  });
873  current_byte_offset += stride * array_elements.value_or(1);
874  continue;
875  }
876 
877  // Tightly packed UintPoint32 (uvec2)
878  if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
879  member.width == sizeof(uint32_t) * 8 && //
880  member.columns == 1 && //
881  member.vecsize == 2 //
882  ) {
883  uint32_t stride =
884  GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
885  uint32_t element_padding = stride - sizeof(UintPoint32);
886  result.emplace_back(StructMember{
887  "UintPoint32", // type
888  member.basetype, // basetype
889  GetMemberNameAtIndex(struct_type, i), // name
890  struct_member_offset, // offset
891  sizeof(UintPoint32), // size
892  stride * array_elements.value_or(1), // byte_length
893  array_elements, // array_elements
894  element_padding, // element_padding
895  });
896  current_byte_offset += stride * array_elements.value_or(1);
897  continue;
898  }
899 
900  // Tightly packed UintPoint32 (ivec2)
901  if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
902  member.width == sizeof(int32_t) * 8 && //
903  member.columns == 1 && //
904  member.vecsize == 2 //
905  ) {
906  uint32_t stride =
907  GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
908  uint32_t element_padding = stride - sizeof(IPoint32);
909  result.emplace_back(StructMember{
910  "IPoint32", // type
911  member.basetype, // basetype
912  GetMemberNameAtIndex(struct_type, i), // name
913  struct_member_offset, // offset
914  sizeof(IPoint32), // size
915  stride * array_elements.value_or(1), // byte_length
916  array_elements, // array_elements
917  element_padding, // element_padding
918  });
919  current_byte_offset += stride * array_elements.value_or(1);
920  continue;
921  }
922 
923  // Tightly packed Point (vec2).
924  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
925  member.width == sizeof(float) * 8 && //
926  member.columns == 1 && //
927  member.vecsize == 2 //
928  ) {
929  uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
930  uint32_t element_padding = stride - sizeof(Point);
931  result.emplace_back(StructMember{
932  "Point", // type
933  member.basetype, // basetype
934  GetMemberNameAtIndex(struct_type, i), // name
935  struct_member_offset, // offset
936  sizeof(Point), // size
937  stride * array_elements.value_or(1), // byte_length
938  array_elements, // array_elements
939  element_padding, // element_padding
940  });
941  current_byte_offset += stride * array_elements.value_or(1);
942  continue;
943  }
944 
945  // Tightly packed Vector3.
946  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
947  member.width == sizeof(float) * 8 && //
948  member.columns == 1 && //
949  member.vecsize == 3 //
950  ) {
951  uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
952  uint32_t element_padding = stride - sizeof(Vector3);
953  result.emplace_back(StructMember{
954  "Vector3", // type
955  member.basetype, // basetype
956  GetMemberNameAtIndex(struct_type, i), // name
957  struct_member_offset, // offset
958  sizeof(Vector3), // size
959  stride * array_elements.value_or(1), // byte_length
960  array_elements, // array_elements
961  element_padding, // element_padding
962  });
963  current_byte_offset += stride * array_elements.value_or(1);
964  continue;
965  }
966 
967  // Tightly packed Vector4.
968  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
969  member.width == sizeof(float) * 8 && //
970  member.columns == 1 && //
971  member.vecsize == 4 //
972  ) {
973  uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
974  uint32_t element_padding = stride - sizeof(Vector4);
975  result.emplace_back(StructMember{
976  "Vector4", // type
977  member.basetype, // basetype
978  GetMemberNameAtIndex(struct_type, i), // name
979  struct_member_offset, // offset
980  sizeof(Vector4), // size
981  stride * array_elements.value_or(1), // byte_length
982  array_elements, // array_elements
983  element_padding, // element_padding
984  });
985  current_byte_offset += stride * array_elements.value_or(1);
986  continue;
987  }
988 
989  // Tightly packed half Point (vec2).
990  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
991  member.width == sizeof(Half) * 8 && //
992  member.columns == 1 && //
993  member.vecsize == 2 //
994  ) {
995  uint32_t stride =
996  GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
997  uint32_t element_padding = stride - sizeof(HalfVector2);
998  result.emplace_back(StructMember{
999  "HalfVector2", // type
1000  member.basetype, // basetype
1001  GetMemberNameAtIndex(struct_type, i), // name
1002  struct_member_offset, // offset
1003  sizeof(HalfVector2), // size
1004  stride * array_elements.value_or(1), // byte_length
1005  array_elements, // array_elements
1006  element_padding, // element_padding
1007  });
1008  current_byte_offset += stride * array_elements.value_or(1);
1009  continue;
1010  }
1011 
1012  // Tightly packed Half Float Vector3.
1013  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1014  member.width == sizeof(Half) * 8 && //
1015  member.columns == 1 && //
1016  member.vecsize == 3 //
1017  ) {
1018  uint32_t stride =
1019  GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
1020  uint32_t element_padding = stride - sizeof(HalfVector3);
1021  result.emplace_back(StructMember{
1022  "HalfVector3", // type
1023  member.basetype, // basetype
1024  GetMemberNameAtIndex(struct_type, i), // name
1025  struct_member_offset, // offset
1026  sizeof(HalfVector3), // size
1027  stride * array_elements.value_or(1), // byte_length
1028  array_elements, // array_elements
1029  element_padding, // element_padding
1030  });
1031  current_byte_offset += stride * array_elements.value_or(1);
1032  continue;
1033  }
1034 
1035  // Tightly packed Half Float Vector4.
1036  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1037  member.width == sizeof(Half) * 8 && //
1038  member.columns == 1 && //
1039  member.vecsize == 4 //
1040  ) {
1041  uint32_t stride =
1042  GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
1043  uint32_t element_padding = stride - sizeof(HalfVector4);
1044  result.emplace_back(StructMember{
1045  "HalfVector4", // type
1046  member.basetype, // basetype
1047  GetMemberNameAtIndex(struct_type, i), // name
1048  struct_member_offset, // offset
1049  sizeof(HalfVector4), // size
1050  stride * array_elements.value_or(1), // byte_length
1051  array_elements, // array_elements
1052  element_padding, // element_padding
1053  });
1054  current_byte_offset += stride * array_elements.value_or(1);
1055  continue;
1056  }
1057 
1058  // Other isolated scalars (like bool, int, float/Scalar, etc..).
1059  {
1060  auto maybe_known_type = ReadKnownScalarType(member.basetype);
1061  if (maybe_known_type.has_value() && //
1062  member.columns == 1 && //
1063  member.vecsize == 1 //
1064  ) {
1065  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1066  if (stride == 0) {
1067  stride = maybe_known_type.value().byte_size;
1068  }
1069  uint32_t element_padding = stride - maybe_known_type.value().byte_size;
1070  // Add the type directly.
1071  result.emplace_back(StructMember{
1072  maybe_known_type.value().name, // type
1073  member.basetype, // basetype
1074  GetMemberNameAtIndex(struct_type, i), // name
1075  struct_member_offset, // offset
1076  maybe_known_type.value().byte_size, // size
1077  stride * array_elements.value_or(1), // byte_length
1078  array_elements, // array_elements
1079  element_padding, // element_padding
1080  });
1081  current_byte_offset += stride * array_elements.value_or(1);
1082  continue;
1083  }
1084  }
1085 
1086  // Catch all for unknown types. Just add the necessary padding to the struct
1087  // and move on.
1088  {
1089  const size_t size = (member.width * member.columns * member.vecsize) / 8u;
1090  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1091  if (stride == 0) {
1092  stride = size;
1093  }
1094  auto element_padding = stride - size;
1095  result.emplace_back(StructMember{
1096  TypeNameWithPaddingOfSize(size), // type
1097  member.basetype, // basetype
1098  GetMemberNameAtIndex(struct_type, i), // name
1099  struct_member_offset, // offset
1100  size, // size
1101  stride * array_elements.value_or(1), // byte_length
1102  array_elements, // array_elements
1103  element_padding, // element_padding
1104  });
1105  current_byte_offset += stride * array_elements.value_or(1);
1106  continue;
1107  }
1108  }
1109 
1110  if (max_member_alignment > 0u) {
1111  const auto struct_length = current_byte_offset;
1112  {
1113  const auto excess = struct_length % max_member_alignment;
1114  if (excess != 0) {
1115  const auto padding = max_member_alignment - excess;
1116  result.emplace_back(StructMember{
1118  spirv_cross::SPIRType::BaseType::Void, // basetype
1119  "_PADDING_", // name
1120  current_byte_offset, // offset
1121  padding, // size
1122  padding, // byte_length
1123  std::nullopt, // array_elements
1124  0, // element_padding
1125  });
1126  }
1127  }
1128  }
1129 
1130  return result;
1131 }
1132 
1133 std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
1134  const spirv_cross::TypeID& type_id) const {
1135  const auto& type = compiler_->get_type(type_id);
1136  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
1137  return std::nullopt;
1138  }
1139 
1140  const auto struct_name = compiler_->get_name(type_id);
1141  if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
1142  return std::nullopt;
1143  }
1144 
1145  auto struct_members = ReadStructMembers(type_id);
1146  auto reflected_struct_size = GetReflectedStructSize(struct_members);
1147 
1148  StructDefinition struc;
1149  struc.name = struct_name;
1150  struc.byte_length = reflected_struct_size;
1151  struc.members = std::move(struct_members);
1152  return struc;
1153 }
1154 
1155 nlohmann::json::object_t Reflector::EmitStructDefinition(
1156  std::optional<Reflector::StructDefinition> struc) const {
1157  nlohmann::json::object_t result;
1158  result["name"] = struc->name;
1159  result["byte_length"] = struc->byte_length;
1160  auto& members = result["members"] = nlohmann::json::array_t{};
1161  for (const auto& struct_member : struc->members) {
1162  auto& member = members.emplace_back(nlohmann::json::object_t{});
1163  member["name"] = struct_member.name;
1164  member["type"] = struct_member.type;
1165  member["base_type"] =
1166  StructMember::BaseTypeToString(struct_member.base_type);
1167  member["offset"] = struct_member.offset;
1168  member["byte_length"] = struct_member.byte_length;
1169  if (struct_member.array_elements.has_value()) {
1170  member["array_elements"] = struct_member.array_elements.value();
1171  } else {
1172  member["array_elements"] = "std::nullopt";
1173  }
1174  member["element_padding"] = struct_member.element_padding;
1175  }
1176  return result;
1177 }
1178 
1179 struct VertexType {
1180  std::string type_name;
1181  spirv_cross::SPIRType::BaseType base_type;
1182  std::string variable_name;
1183  size_t byte_length = 0u;
1184 };
1185 
1187  const spirv_cross::Compiler& compiler,
1188  const spirv_cross::Resource* resource) {
1189  VertexType result;
1190  result.variable_name = resource->name;
1191  const auto& type = compiler.get_type(resource->type_id);
1192  result.base_type = type.basetype;
1193  const auto total_size = type.columns * type.vecsize * type.width / 8u;
1194  result.byte_length = total_size;
1195 
1196  if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1197  type.columns == 1u && type.vecsize == 2u &&
1198  type.width == sizeof(float) * 8u) {
1199  result.type_name = "Point";
1200  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1201  type.columns == 1u && type.vecsize == 4u &&
1202  type.width == sizeof(float) * 8u) {
1203  result.type_name = "Vector4";
1204  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1205  type.columns == 1u && type.vecsize == 3u &&
1206  type.width == sizeof(float) * 8u) {
1207  result.type_name = "Vector3";
1208  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1209  type.columns == 1u && type.vecsize == 1u &&
1210  type.width == sizeof(float) * 8u) {
1211  result.type_name = "Scalar";
1212  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1213  type.columns == 1u && type.vecsize == 1u &&
1214  type.width == sizeof(int32_t) * 8u) {
1215  result.type_name = "int32_t";
1216  } else {
1217  // Catch all unknown padding.
1218  result.type_name = TypeNameWithPaddingOfSize(total_size);
1219  }
1220 
1221  return result;
1222 }
1223 
1224 std::optional<Reflector::StructDefinition>
1225 Reflector::ReflectPerVertexStructDefinition(
1226  const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1227  // Avoid emitting a zero sized structure. The code gen templates assume a
1228  // non-zero size.
1229  if (stage_inputs.empty()) {
1230  return std::nullopt;
1231  }
1232 
1233  // Validate locations are contiguous and there are no duplicates.
1234  std::set<uint32_t> locations;
1235  for (const auto& input : stage_inputs) {
1236  auto location = compiler_->get_decoration(
1237  input.id, spv::Decoration::DecorationLocation);
1238  if (locations.count(location) != 0) {
1239  // Duplicate location. Bail.
1240  return std::nullopt;
1241  }
1242  locations.insert(location);
1243  }
1244 
1245  for (size_t i = 0; i < locations.size(); i++) {
1246  if (locations.count(i) != 1) {
1247  // Locations are not contiguous. This usually happens when a single stage
1248  // input takes multiple input slots. No reflection information can be
1249  // generated for such cases anyway. So bail! It is up to the shader author
1250  // to make sure one stage input maps to a single input slot.
1251  return std::nullopt;
1252  }
1253  }
1254 
1255  auto input_for_location =
1256  [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1257  for (const auto& input : stage_inputs) {
1258  auto location = compiler_->get_decoration(
1259  input.id, spv::Decoration::DecorationLocation);
1260  if (location == queried_location) {
1261  return &input;
1262  }
1263  }
1264  // This really cannot happen with all the validation above.
1265  FML_UNREACHABLE();
1266  return nullptr;
1267  };
1268 
1269  StructDefinition struc;
1270  struc.name = "PerVertexData";
1271  struc.byte_length = 0u;
1272  for (size_t i = 0; i < locations.size(); i++) {
1273  auto resource = input_for_location(i);
1274  if (resource == nullptr) {
1275  return std::nullopt;
1276  }
1277  const auto vertex_type =
1278  VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1279 
1280  auto member = StructMember{
1281  vertex_type.type_name, // type
1282  vertex_type.base_type, // base type
1283  vertex_type.variable_name, // name
1284  struc.byte_length, // offset
1285  vertex_type.byte_length, // size
1286  vertex_type.byte_length, // byte_length
1287  std::nullopt, // array_elements
1288  0, // element_padding
1289  };
1290  struc.byte_length += vertex_type.byte_length;
1291  struc.members.emplace_back(std::move(member));
1292  }
1293  return struc;
1294 }
1295 
1296 std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1297  const spirv_cross::SPIRType& parent_type,
1298  size_t index) const {
1299  if (parent_type.type_alias != 0) {
1300  return GetMemberNameAtIndexIfExists(
1301  compiler_->get_type(parent_type.type_alias), index);
1302  }
1303 
1304  if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1305  const auto& members = found->second.members;
1306  if (index < members.size() && !members[index].alias.empty()) {
1307  return members[index].alias;
1308  }
1309  }
1310  return std::nullopt;
1311 }
1312 
1313 std::string Reflector::GetMemberNameAtIndex(
1314  const spirv_cross::SPIRType& parent_type,
1315  size_t index,
1316  std::string suffix) const {
1317  if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1318  name.has_value()) {
1319  return name.value();
1320  }
1321  static std::atomic_size_t sUnnamedMembersID;
1322  std::stringstream stream;
1323  stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1324  return stream.str();
1325 }
1326 
1327 std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1328  const spirv_cross::ShaderResources& resources,
1329  spv::ExecutionModel execution_model) const {
1330  std::vector<BindPrototype> prototypes;
1331  for (const auto& uniform_buffer : resources.uniform_buffers) {
1332  auto& proto = prototypes.emplace_back(BindPrototype{});
1333  proto.return_type = "bool";
1334  proto.name = ToCamelCase(uniform_buffer.name);
1335  proto.descriptor_type = "DescriptorType::kUniformBuffer";
1336  {
1337  std::stringstream stream;
1338  stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1339  << ".";
1340  proto.docstring = stream.str();
1341  }
1342  proto.args.push_back(BindPrototypeArgument{
1343  .type_name = "ResourceBinder&",
1344  .argument_name = "command",
1345  });
1346  proto.args.push_back(BindPrototypeArgument{
1347  .type_name = "BufferView",
1348  .argument_name = "view",
1349  });
1350  }
1351  for (const auto& storage_buffer : resources.storage_buffers) {
1352  auto& proto = prototypes.emplace_back(BindPrototype{});
1353  proto.return_type = "bool";
1354  proto.name = ToCamelCase(storage_buffer.name);
1355  proto.descriptor_type = "DescriptorType::kStorageBuffer";
1356  {
1357  std::stringstream stream;
1358  stream << "Bind storage buffer for resource named " << storage_buffer.name
1359  << ".";
1360  proto.docstring = stream.str();
1361  }
1362  proto.args.push_back(BindPrototypeArgument{
1363  .type_name = "ResourceBinder&",
1364  .argument_name = "command",
1365  });
1366  proto.args.push_back(BindPrototypeArgument{
1367  .type_name = "BufferView",
1368  .argument_name = "view",
1369  });
1370  }
1371  for (const auto& sampled_image : resources.sampled_images) {
1372  auto& proto = prototypes.emplace_back(BindPrototype{});
1373  proto.return_type = "bool";
1374  proto.name = ToCamelCase(sampled_image.name);
1375  proto.descriptor_type = "DescriptorType::kSampledImage";
1376  {
1377  std::stringstream stream;
1378  stream << "Bind combined image sampler for resource named "
1379  << sampled_image.name << ".";
1380  proto.docstring = stream.str();
1381  }
1382  proto.args.push_back(BindPrototypeArgument{
1383  .type_name = "ResourceBinder&",
1384  .argument_name = "command",
1385  });
1386  proto.args.push_back(BindPrototypeArgument{
1387  .type_name = "std::shared_ptr<const Texture>",
1388  .argument_name = "texture",
1389  });
1390  proto.args.push_back(BindPrototypeArgument{
1391  .type_name = "const std::unique_ptr<const Sampler>&",
1392  .argument_name = "sampler",
1393  });
1394  }
1395  for (const auto& separate_image : resources.separate_images) {
1396  auto& proto = prototypes.emplace_back(BindPrototype{});
1397  proto.return_type = "bool";
1398  proto.name = ToCamelCase(separate_image.name);
1399  proto.descriptor_type = "DescriptorType::kImage";
1400  {
1401  std::stringstream stream;
1402  stream << "Bind separate image for resource named " << separate_image.name
1403  << ".";
1404  proto.docstring = stream.str();
1405  }
1406  proto.args.push_back(BindPrototypeArgument{
1407  .type_name = "Command&",
1408  .argument_name = "command",
1409  });
1410  proto.args.push_back(BindPrototypeArgument{
1411  .type_name = "std::shared_ptr<const Texture>",
1412  .argument_name = "texture",
1413  });
1414  }
1415  for (const auto& separate_sampler : resources.separate_samplers) {
1416  auto& proto = prototypes.emplace_back(BindPrototype{});
1417  proto.return_type = "bool";
1418  proto.name = ToCamelCase(separate_sampler.name);
1419  proto.descriptor_type = "DescriptorType::kSampler";
1420  {
1421  std::stringstream stream;
1422  stream << "Bind separate sampler for resource named "
1423  << separate_sampler.name << ".";
1424  proto.docstring = stream.str();
1425  }
1426  proto.args.push_back(BindPrototypeArgument{
1427  .type_name = "Command&",
1428  .argument_name = "command",
1429  });
1430  proto.args.push_back(BindPrototypeArgument{
1431  .type_name = "std::shared_ptr<const Sampler>",
1432  .argument_name = "sampler",
1433  });
1434  }
1435  return prototypes;
1436 }
1437 
1438 nlohmann::json::array_t Reflector::EmitBindPrototypes(
1439  const spirv_cross::ShaderResources& resources,
1440  spv::ExecutionModel execution_model) const {
1441  const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1442  nlohmann::json::array_t result;
1443  for (const auto& res : prototypes) {
1444  auto& item = result.emplace_back(nlohmann::json::object_t{});
1445  item["return_type"] = res.return_type;
1446  item["name"] = res.name;
1447  item["docstring"] = res.docstring;
1448  item["descriptor_type"] = res.descriptor_type;
1449  auto& args = item["args"] = nlohmann::json::array_t{};
1450  for (const auto& arg : res.args) {
1451  auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1452  json_arg["type_name"] = arg.type_name;
1453  json_arg["argument_name"] = arg.argument_name;
1454  }
1455  }
1456  return result;
1457 }
1458 
1459 } // namespace compiler
1460 } // namespace impeller
impeller::compiler::ToCamelCase
std::string ToCamelCase(std::string_view string)
Definition: utilities.cc:39
impeller::compiler::GetRuntimeStageBackend
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition: reflector.cc:301
uniform_sorter.h
impeller::Scalar
float Scalar
Definition: scalar.h:18
impeller::RuntimeStageBackend::kVulkan
@ kVulkan
impeller::compiler::VertexType::byte_length
size_t byte_length
Definition: reflector.cc:1183
impeller::compiler::CompilerBackend
Definition: compiler_backend.h:19
impeller::compiler::VertexType::variable_name
std::string variable_name
Definition: reflector.cc:1182
data
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63
impeller::RuntimeStage::kVulkanUBOName
static const char * kVulkanUBOName
Definition: runtime_stage.h:22
impeller::compiler::TargetPlatform::kMetalDesktop
@ kMetalDesktop
impeller::compiler::VertexType::base_type
spirv_cross::SPIRType::BaseType base_type
Definition: reflector.cc:1181
impeller::compiler::StructMember::UnderlyingType::kFloat
@ kFloat
impeller::UintPoint32
TPoint< uint32_t > UintPoint32
Definition: point.h:330
padding
Vector2 padding
The halo padding in source space.
Definition: gaussian_blur_filter_contents.cc:85
impeller::compiler::StructMember::UnderlyingType::kOther
@ kOther
impeller::compiler::TargetPlatform::kMetalIOS
@ kMetalIOS
impeller::compiler::KnownType
Definition: reflector.cc:733
impeller::compiler::CompilerBackend::Type::kGLSLVulkan
@ kGLSLVulkan
impeller::compiler::Reflector::GetReflectionCC
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition: reflector.cc:125
impeller::compiler::Reflector::Options::header_file_name
std::string header_file_name
Definition: reflector.h:150
validation.h
impeller::compiler::TargetPlatform
TargetPlatform
Definition: types.h:28
offset
SeparatedVector2 offset
Definition: stroke_path_geometry.cc:304
impeller::compiler::VertexTypeFromInputResource
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
Definition: reflector.cc:1186
impeller::compiler::Reflector::GetReflectionHeader
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition: reflector.cc:121
runtime_types.h
impeller::RuntimeStageBackend::kOpenGLES
@ kOpenGLES
impeller::compiler::Reflector::Options::target_platform
TargetPlatform target_platform
Definition: reflector.h:147
code_gen_template.h
reflector.h
impeller::compiler::Reflector::GetRuntimeStageShaderData
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition: reflector.cc:129
matrix.h
shader_bundle_data.h
impeller::compiler::KnownType::byte_size
size_t byte_size
Definition: reflector.cc:735
impeller::compiler::StructMember::UnderlyingType::kPadding
@ kPadding
impeller::Point
TPoint< Scalar > Point
Definition: point.h:327
runtime_stage.h
impeller::Half
A storage only class for half precision floating point.
Definition: half.h:41
impeller::compiler::CompilerBackend::Type
Type
Definition: compiler_backend.h:25
impeller::compiler::KnownType::name
std::string name
Definition: reflector.cc:734
impeller::SortUniforms
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
Definition: uniform_sorter.cc:11
impeller::compiler::TargetPlatform::kVulkan
@ kVulkan
impeller::SPrintF
std::string SPrintF(const char *format,...)
Definition: strings.cc:12
impeller::compiler::Reflector::Options::shader_name
std::string shader_name
Definition: reflector.h:149
impeller::compiler::TargetPlatform::kRuntimeStageVulkan
@ kRuntimeStageVulkan
impeller::compiler::kReflectionCCTemplate
constexpr std::string_view kReflectionCCTemplate
Definition: code_gen_template.h:202
impeller::compiler::CompilerBackend::ExtendedResourceIndex::kSecondary
@ kSecondary
impeller::compiler::ExecutionModelToString
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition: reflector.cc:33
impeller::compiler::CompilerBackend::GetExtendedMSLResourceBinding
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
Definition: compiler_backend.cc:35
impeller::IPoint32
TPoint< int32_t > IPoint32
Definition: point.h:329
impeller::compiler::CompilerBackend::Type::kGLSL
@ kGLSL
impeller::compiler::CompilerBackend::GetType
Type GetType() const
Definition: compiler_backend.cc:108
impeller::compiler::VertexType::type_name
std::string type_name
Definition: reflector.cc:1180
impeller::compiler::ReadKnownScalarType
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition: reflector.cc:738
impeller::compiler::Reflector::~Reflector
~Reflector()
type
GLenum type
Definition: blit_command_gles.cc:127
impeller::compiler::Reflector::Options
Definition: reflector.h:146
impeller::compiler::kReflectionHeaderTemplate
constexpr std::string_view kReflectionHeaderTemplate
Definition: code_gen_template.h:10
impeller::compiler::StructMember::BaseTypeToString
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition: reflector.h:44
impeller::compiler::ToString
static std::string ToString(CompilerBackend::Type type)
Definition: reflector.cc:559
utilities.h
strings.h
impeller::compiler::CompilerBackend::GetCompiler
spirv_cross::Compiler * GetCompiler()
Definition: compiler_backend.cc:54
impeller::compiler::StringToShaderStage
static std::string StringToShaderStage(const std::string &str)
Definition: reflector.cc:46
scalar.h
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:91
impeller::compiler::CompilerBackend::Type::kSkSL
@ kSkSL
half.h
impeller::RuntimeStageBackend::kSkSL
@ kSkSL
std
Definition: comparable.h:95
impeller::compiler::TargetPlatform::kOpenGLDesktop
@ kOpenGLDesktop
impeller::compiler::TargetPlatform::kUnknown
@ kUnknown
impeller::compiler::TargetPlatform::kOpenGLES
@ kOpenGLES
impeller::compiler::CompilerBackend::Type::kMSL
@ kMSL
impeller::compiler::Reflector::GetShaderBundleData
std::shared_ptr< ShaderBundleData > GetShaderBundleData() const
Definition: reflector.cc:134
impeller::compiler::TargetPlatform::kRuntimeStageMetal
@ kRuntimeStageMetal
impeller::compiler::Reflector::GetReflectionJSON
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition: reflector.cc:108
impeller::compiler::CompilerBackend::ExtendedResourceIndex::kPrimary
@ kPrimary
impeller::compiler::Reflector::Options::entry_point_name
std::string entry_point_name
Definition: reflector.h:148
impeller::compiler::Reflector::IsValid
bool IsValid() const
Definition: reflector.cc:104
impeller::compiler::GetReflectedStructSize
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition: reflector.cc:785
impeller::RuntimeStageBackend::kMetal
@ kMetal
impeller::compiler::StringStartsWith
bool StringStartsWith(const std::string &target, const std::string &prefix)
Definition: utilities.cc:87
impeller
Definition: allocation.cc:12
impeller::compiler::TypeNameWithPaddingOfSize
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition: reflector.cc:727
impeller::compiler::Reflector::Reflector
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition: reflector.cc:62
types.h
impeller::compiler::VertexType
Definition: reflector.cc:1179
impeller::compiler::TargetPlatform::kSkSL
@ kSkSL
impeller::compiler::TargetPlatform::kRuntimeStageGLES
@ kRuntimeStageGLES