Flutter Impeller
reflector.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6 
8 
9 #include <atomic>
10 #include <optional>
11 #include <set>
12 #include <sstream>
13 
14 #include "flutter/fml/logging.h"
15 #include "impeller/base/strings.h"
22 #include "impeller/geometry/half.h"
25 
26 namespace impeller {
27 namespace compiler {
28 
29 static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type) {
30  using Type = spirv_cross::SPIRType::BaseType;
31  switch (type) {
32  case Type::Void:
33  return "ShaderType::kVoid";
34  case Type::Boolean:
35  return "ShaderType::kBoolean";
36  case Type::SByte:
37  return "ShaderType::kSignedByte";
38  case Type::UByte:
39  return "ShaderType::kUnsignedByte";
40  case Type::Short:
41  return "ShaderType::kSignedShort";
42  case Type::UShort:
43  return "ShaderType::kUnsignedShort";
44  case Type::Int:
45  return "ShaderType::kSignedInt";
46  case Type::UInt:
47  return "ShaderType::kUnsignedInt";
48  case Type::Int64:
49  return "ShaderType::kSignedInt64";
50  case Type::UInt64:
51  return "ShaderType::kUnsignedInt64";
52  case Type::AtomicCounter:
53  return "ShaderType::kAtomicCounter";
54  case Type::Half:
55  return "ShaderType::kHalfFloat";
56  case Type::Float:
57  return "ShaderType::kFloat";
58  case Type::Double:
59  return "ShaderType::kDouble";
60  case Type::Struct:
61  return "ShaderType::kStruct";
62  case Type::Image:
63  return "ShaderType::kImage";
64  case Type::SampledImage:
65  return "ShaderType::kSampledImage";
66  case Type::Sampler:
67  return "ShaderType::kSampler";
68  default:
69  return "ShaderType::kUnknown";
70  }
71 }
72 
73 static std::string ExecutionModelToString(spv::ExecutionModel model) {
74  switch (model) {
75  case spv::ExecutionModel::ExecutionModelVertex:
76  return "vertex";
77  case spv::ExecutionModel::ExecutionModelFragment:
78  return "fragment";
79  case spv::ExecutionModel::ExecutionModelGLCompute:
80  return "compute";
81  default:
82  return "unsupported";
83  }
84 }
85 
86 static std::string StringToShaderStage(const std::string& str) {
87  if (str == "vertex") {
88  return "ShaderStage::kVertex";
89  }
90 
91  if (str == "fragment") {
92  return "ShaderStage::kFragment";
93  }
94 
95  if (str == "compute") {
96  return "ShaderStage::kCompute";
97  }
98 
99  return "ShaderStage::kUnknown";
100 }
101 
103  const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
104  const std::shared_ptr<fml::Mapping>& shader_data,
105  const CompilerBackend& compiler)
106  : options_(std::move(options)),
107  ir_(ir),
108  shader_data_(shader_data),
109  compiler_(compiler) {
110  if (!ir_ || !compiler_) {
111  return;
112  }
113 
114  if (auto template_arguments = GenerateTemplateArguments();
115  template_arguments.has_value()) {
116  template_arguments_ =
117  std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
118  } else {
119  return;
120  }
121 
122  reflection_header_ = GenerateReflectionHeader();
123  if (!reflection_header_) {
124  return;
125  }
126 
127  reflection_cc_ = GenerateReflectionCC();
128  if (!reflection_cc_) {
129  return;
130  }
131 
132  runtime_stage_shader_ = GenerateRuntimeStageData();
133 
134  is_valid_ = true;
135 }
136 
137 Reflector::~Reflector() = default;
138 
139 bool Reflector::IsValid() const {
140  return is_valid_;
141 }
142 
143 std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
144  if (!is_valid_) {
145  return nullptr;
146  }
147 
148  auto json_string =
149  std::make_shared<std::string>(template_arguments_->dump(2u));
150 
151  return std::make_shared<fml::NonOwnedMapping>(
152  reinterpret_cast<const uint8_t*>(json_string->data()),
153  json_string->size(), [json_string](auto, auto) {});
154 }
155 
156 std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
157  return reflection_header_;
158 }
159 
160 std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
161  return reflection_cc_;
162 }
163 
164 std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
165  const {
166  return runtime_stage_shader_;
167 }
168 
169 std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
170  nlohmann::json root;
171 
172  const auto& entrypoints = compiler_->get_entry_points_and_stages();
173  if (entrypoints.size() != 1) {
174  VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
175  << entrypoints.size() << " but expected 1.";
176  return std::nullopt;
177  }
178 
179  auto execution_model = entrypoints.front().execution_model;
180  {
181  root["entrypoint"] = options_.entry_point_name;
182  root["shader_name"] = options_.shader_name;
183  root["shader_stage"] = ExecutionModelToString(execution_model);
184  root["header_file_name"] = options_.header_file_name;
185  }
186 
187  const auto shader_resources = compiler_->get_shader_resources();
188 
189  // Subpass Inputs.
190  {
191  auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
192  if (auto subpass_inputs_json =
193  ReflectResources(shader_resources.subpass_inputs);
194  subpass_inputs_json.has_value()) {
195  for (auto subpass_input : subpass_inputs_json.value()) {
196  subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
197  subpass_inputs.emplace_back(std::move(subpass_input));
198  }
199  } else {
200  return std::nullopt;
201  }
202  }
203 
204  // Uniform and storage buffers.
205  {
206  auto& buffers = root["buffers"] = nlohmann::json::array_t{};
207  if (auto uniform_buffers_json =
208  ReflectResources(shader_resources.uniform_buffers);
209  uniform_buffers_json.has_value()) {
210  for (auto uniform_buffer : uniform_buffers_json.value()) {
211  uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
212  buffers.emplace_back(std::move(uniform_buffer));
213  }
214  } else {
215  return std::nullopt;
216  }
217  if (auto storage_buffers_json =
218  ReflectResources(shader_resources.storage_buffers);
219  storage_buffers_json.has_value()) {
220  for (auto uniform_buffer : storage_buffers_json.value()) {
221  uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
222  buffers.emplace_back(std::move(uniform_buffer));
223  }
224  } else {
225  return std::nullopt;
226  }
227  }
228 
229  {
230  auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
231  if (auto stage_inputs_json = ReflectResources(
232  shader_resources.stage_inputs,
233  /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
234  stage_inputs_json.has_value()) {
235  stage_inputs = std::move(stage_inputs_json.value());
236  } else {
237  return std::nullopt;
238  }
239  }
240 
241  {
242  auto combined_sampled_images =
243  ReflectResources(shader_resources.sampled_images);
244  auto images = ReflectResources(shader_resources.separate_images);
245  auto samplers = ReflectResources(shader_resources.separate_samplers);
246  if (!combined_sampled_images.has_value() || !images.has_value() ||
247  !samplers.has_value()) {
248  return std::nullopt;
249  }
250  auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
251  for (auto value : combined_sampled_images.value()) {
252  value["descriptor_type"] = "DescriptorType::kSampledImage";
253  sampled_images.emplace_back(std::move(value));
254  }
255  for (auto value : images.value()) {
256  value["descriptor_type"] = "DescriptorType::kImage";
257  sampled_images.emplace_back(std::move(value));
258  }
259  for (auto value : samplers.value()) {
260  value["descriptor_type"] = "DescriptorType::kSampledSampler";
261  sampled_images.emplace_back(std::move(value));
262  }
263  }
264 
265  if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
266  stage_outputs.has_value()) {
267  root["stage_outputs"] = std::move(stage_outputs.value());
268  } else {
269  return std::nullopt;
270  }
271 
272  {
273  auto& struct_definitions = root["struct_definitions"] =
274  nlohmann::json::array_t{};
275  if (entrypoints.front().execution_model ==
276  spv::ExecutionModel::ExecutionModelVertex &&
277  !shader_resources.stage_inputs.empty()) {
278  if (auto struc =
279  ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
280  struc.has_value()) {
281  struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
282  } else {
283  // If there are stage inputs, it is an error to not generate a per
284  // vertex data struct for a vertex like shader stage.
285  return std::nullopt;
286  }
287  }
288 
289  std::set<spirv_cross::ID> known_structs;
290  ir_->for_each_typed_id<spirv_cross::SPIRType>(
291  [&](uint32_t, const spirv_cross::SPIRType& type) {
292  if (known_structs.find(type.self) != known_structs.end()) {
293  // Iterating over types this way leads to duplicates which may cause
294  // duplicate struct definitions.
295  return;
296  }
297  known_structs.insert(type.self);
298  if (auto struc = ReflectStructDefinition(type.self);
299  struc.has_value()) {
300  struct_definitions.emplace_back(
301  EmitStructDefinition(struc.value()));
302  }
303  });
304  }
305 
306  root["bind_prototypes"] =
307  EmitBindPrototypes(shader_resources, execution_model);
308 
309  return root;
310 }
311 
312 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
313  return InflateTemplate(kReflectionHeaderTemplate);
314 }
315 
316 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
317  return InflateTemplate(kReflectionCCTemplate);
318 }
319 
320 static std::optional<RuntimeStageBackend> GetRuntimeStageBackend(
321  TargetPlatform target_platform) {
322  switch (target_platform) {
329  return std::nullopt;
338  }
339  FML_UNREACHABLE();
340 }
341 
342 std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
343  const {
344  auto backend = GetRuntimeStageBackend(options_.target_platform);
345  if (!backend.has_value()) {
346  return nullptr;
347  }
348 
349  const auto& entrypoints = compiler_->get_entry_points_and_stages();
350  if (entrypoints.size() != 1u) {
351  VALIDATION_LOG << "Single entrypoint not found.";
352  return nullptr;
353  }
354  auto data = std::make_unique<RuntimeStageData::Shader>();
355  data->entrypoint = options_.entry_point_name;
356  data->stage = entrypoints.front().execution_model;
357  data->shader = shader_data_;
358  data->backend = backend.value();
359 
360  // Sort the IR so that the uniforms are in declaration order.
361  std::vector<spirv_cross::ID> uniforms =
362  SortUniforms(ir_.get(), compiler_.GetCompiler());
363 
364  for (auto& sorted_id : uniforms) {
365  auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
366  const auto spir_type = compiler_->get_type(var.basetype);
367  UniformDescription uniform_description;
368  uniform_description.name = compiler_->get_name(var.self);
369  uniform_description.location = compiler_->get_decoration(
370  var.self, spv::Decoration::DecorationLocation);
371  uniform_description.type = spir_type.basetype;
372  uniform_description.rows = spir_type.vecsize;
373  uniform_description.columns = spir_type.columns;
374  uniform_description.bit_width = spir_type.width;
375  uniform_description.array_elements = GetArrayElements(spir_type);
376  data->uniforms.emplace_back(std::move(uniform_description));
377  }
378 
379  // We only need to worry about storing vertex attributes.
380  if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
381  const auto inputs = compiler_->get_shader_resources().stage_inputs;
382  auto input_offsets = ComputeOffsets(inputs);
383  for (const auto& input : inputs) {
384  auto location = compiler_->get_decoration(
385  input.id, spv::Decoration::DecorationLocation);
386  std::optional<size_t> offset = input_offsets[location];
387 
388  const auto type = compiler_->get_type(input.type_id);
389 
390  InputDescription input_description;
391  input_description.name = input.name;
392  input_description.location = compiler_->get_decoration(
393  input.id, spv::Decoration::DecorationLocation);
394  input_description.set = compiler_->get_decoration(
395  input.id, spv::Decoration::DecorationDescriptorSet);
396  input_description.binding = compiler_->get_decoration(
397  input.id, spv::Decoration::DecorationBinding);
398  input_description.type = type.basetype;
399  input_description.bit_width = type.width;
400  input_description.vec_size = type.vecsize;
401  input_description.columns = type.columns;
402  input_description.offset = offset.value_or(0u);
403  data->inputs.emplace_back(std::move(input_description));
404  }
405  }
406 
407  return data;
408 }
409 
410 std::optional<uint32_t> Reflector::GetArrayElements(
411  const spirv_cross::SPIRType& type) const {
412  if (type.array.empty()) {
413  return std::nullopt;
414  }
415  FML_CHECK(type.array.size() == 1)
416  << "Multi-dimensional arrays are not supported.";
417  FML_CHECK(type.array_size_literal.front())
418  << "Must use a literal for array sizes.";
419  return type.array.front();
420 }
421 
422 static std::string ToString(CompilerBackend::Type type) {
423  switch (type) {
425  return "Metal Shading Language";
427  return "OpenGL Shading Language";
429  return "SkSL Shading Language";
430  }
431  FML_UNREACHABLE();
432 }
433 
434 std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
435  std::string_view tmpl) const {
436  inja::Environment env;
437  env.set_trim_blocks(true);
438  env.set_lstrip_blocks(true);
439 
440  env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
441  return ToCamelCase(args.at(0u)->get<std::string>());
442  });
443 
444  env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
445  return StringToShaderStage(args.at(0u)->get<std::string>());
446  });
447 
448  env.add_callback("get_generator_name", 0u,
449  [type = compiler_.GetType()](inja::Arguments& args) {
450  return ToString(type);
451  });
452 
453  auto inflated_template =
454  std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
455 
456  return std::make_shared<fml::NonOwnedMapping>(
457  reinterpret_cast<const uint8_t*>(inflated_template->data()),
458  inflated_template->size(), [inflated_template](auto, auto) {});
459 }
460 
461 std::vector<size_t> Reflector::ComputeOffsets(
462  const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
463  std::vector<size_t> offsets(resources.size(), 0);
464  if (resources.size() == 0) {
465  return offsets;
466  }
467  for (const auto& resource : resources) {
468  const auto type = compiler_->get_type(resource.type_id);
469  auto location = compiler_->get_decoration(
470  resource.id, spv::Decoration::DecorationLocation);
471  // Malformed shader, will be caught later on.
472  if (location >= resources.size() || location < 0) {
473  location = 0;
474  }
475  offsets[location] = (type.width * type.vecsize) / 8;
476  }
477  for (size_t i = 1; i < resources.size(); i++) {
478  offsets[i] += offsets[i - 1];
479  }
480  for (size_t i = resources.size() - 1; i > 0; i--) {
481  offsets[i] = offsets[i - 1];
482  }
483  offsets[0] = 0;
484 
485  return offsets;
486 }
487 
488 std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
489  const spirv_cross::Resource& resource,
490  std::optional<size_t> offset) const {
491  nlohmann::json::object_t result;
492 
493  result["name"] = resource.name;
494  result["descriptor_set"] = compiler_->get_decoration(
495  resource.id, spv::Decoration::DecorationDescriptorSet);
496  result["binding"] = compiler_->get_decoration(
497  resource.id, spv::Decoration::DecorationBinding);
498  result["set"] = compiler_->get_decoration(
499  resource.id, spv::Decoration::DecorationDescriptorSet);
500  result["location"] = compiler_->get_decoration(
501  resource.id, spv::Decoration::DecorationLocation);
502  result["index"] =
503  compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
504  result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
506  result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
508  auto type = ReflectType(resource.type_id);
509  if (!type.has_value()) {
510  return std::nullopt;
511  }
512  result["type"] = std::move(type.value());
513  result["offset"] = offset.value_or(0u);
514  return result;
515 }
516 
517 std::optional<nlohmann::json::object_t> Reflector::ReflectType(
518  const spirv_cross::TypeID& type_id) const {
519  nlohmann::json::object_t result;
520 
521  const auto type = compiler_->get_type(type_id);
522 
523  result["type_name"] = BaseTypeToString(type.basetype);
524  result["bit_width"] = type.width;
525  result["vec_size"] = type.vecsize;
526  result["columns"] = type.columns;
527  auto& members = result["members"] = nlohmann::json::array_t{};
528  if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
529  for (const auto& struct_member : ReadStructMembers(type_id)) {
530  auto member = nlohmann::json::object_t{};
531  member["name"] = struct_member.name;
532  member["type"] = struct_member.type;
533  member["base_type"] = struct_member.base_type;
534  member["offset"] = struct_member.offset;
535  member["size"] = struct_member.size;
536  member["byte_length"] = struct_member.byte_length;
537  if (struct_member.array_elements.has_value()) {
538  member["array_elements"] = struct_member.array_elements.value();
539  } else {
540  member["array_elements"] = "std::nullopt";
541  }
542  members.emplace_back(std::move(member));
543  }
544  }
545 
546  return result;
547 }
548 
549 std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
550  const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
551  bool compute_offsets) const {
552  nlohmann::json::array_t result;
553  result.reserve(resources.size());
554  std::vector<size_t> offsets;
555  if (compute_offsets) {
556  offsets = ComputeOffsets(resources);
557  }
558  for (const auto& resource : resources) {
559  std::optional<size_t> maybe_offset = std::nullopt;
560  if (compute_offsets) {
561  auto location = compiler_->get_decoration(
562  resource.id, spv::Decoration::DecorationLocation);
563  maybe_offset = offsets[location];
564  }
565  if (auto reflected = ReflectResource(resource, maybe_offset);
566  reflected.has_value()) {
567  result.emplace_back(std::move(reflected.value()));
568  } else {
569  return std::nullopt;
570  }
571  }
572  return result;
573 }
574 
575 static std::string TypeNameWithPaddingOfSize(size_t size) {
576  std::stringstream stream;
577  stream << "Padding<" << size << ">";
578  return stream.str();
579 }
580 
581 struct KnownType {
582  std::string name;
583  size_t byte_size = 0;
584 };
585 
586 static std::optional<KnownType> ReadKnownScalarType(
587  spirv_cross::SPIRType::BaseType type) {
588  switch (type) {
589  case spirv_cross::SPIRType::BaseType::Boolean:
590  return KnownType{
591  .name = "bool",
592  .byte_size = sizeof(bool),
593  };
594  case spirv_cross::SPIRType::BaseType::Float:
595  return KnownType{
596  .name = "Scalar",
597  .byte_size = sizeof(Scalar),
598  };
599  case spirv_cross::SPIRType::BaseType::Half:
600  return KnownType{
601  .name = "Half",
602  .byte_size = sizeof(Half),
603  };
604  case spirv_cross::SPIRType::BaseType::UInt:
605  return KnownType{
606  .name = "uint32_t",
607  .byte_size = sizeof(uint32_t),
608  };
609  case spirv_cross::SPIRType::BaseType::Int:
610  return KnownType{
611  .name = "int32_t",
612  .byte_size = sizeof(int32_t),
613  };
614  default:
615  break;
616  }
617  return std::nullopt;
618 }
619 
620 //------------------------------------------------------------------------------
621 /// @brief Get the reflected struct size. In the vast majority of the
622 /// cases, this is the same as the declared struct size as given by
623 /// the compiler. But, additional padding may need to be introduced
624 /// after the end of the struct to keep in line with the alignment
625 /// requirement of the individual struct members. This method
626 /// figures out the actual size of the reflected struct that can be
627 /// referenced in native code.
628 ///
629 /// @param[in] members The members
630 ///
631 /// @return The reflected structure size.
632 ///
633 static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
634  auto struct_size = 0u;
635  for (const auto& member : members) {
636  struct_size += member.byte_length;
637  }
638  return struct_size;
639 }
640 
641 std::vector<StructMember> Reflector::ReadStructMembers(
642  const spirv_cross::TypeID& type_id) const {
643  const auto& struct_type = compiler_->get_type(type_id);
644  FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
645 
646  std::vector<StructMember> result;
647 
648  size_t current_byte_offset = 0;
649  size_t max_member_alignment = 0;
650 
651  for (size_t i = 0; i < struct_type.member_types.size(); i++) {
652  const auto& member = compiler_->get_type(struct_type.member_types[i]);
653  const auto struct_member_offset =
654  compiler_->type_struct_member_offset(struct_type, i);
655  auto array_elements = GetArrayElements(member);
656 
657  if (struct_member_offset > current_byte_offset) {
658  const auto alignment_pad = struct_member_offset - current_byte_offset;
659  result.emplace_back(StructMember{
660  TypeNameWithPaddingOfSize(alignment_pad), // type
661  BaseTypeToString(spirv_cross::SPIRType::BaseType::Void), // basetype
662  SPrintF("_PADDING_%s_",
663  GetMemberNameAtIndex(struct_type, i).c_str()), // name
664  current_byte_offset, // offset
665  alignment_pad, // size
666  alignment_pad, // byte_length
667  std::nullopt, // array_elements
668  0, // element_padding
669  });
670  current_byte_offset += alignment_pad;
671  }
672 
673  max_member_alignment =
674  std::max<size_t>(max_member_alignment,
675  (member.width / 8) * member.columns * member.vecsize);
676 
677  FML_CHECK(current_byte_offset == struct_member_offset);
678 
679  // A user defined struct.
680  if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
681  const size_t size =
682  GetReflectedStructSize(ReadStructMembers(member.self));
683  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
684  if (stride == 0) {
685  stride = size;
686  }
687  uint32_t element_padding = stride - size;
688  result.emplace_back(StructMember{
689  compiler_->get_name(member.self), // type
690  BaseTypeToString(member.basetype), // basetype
691  GetMemberNameAtIndex(struct_type, i), // name
692  struct_member_offset, // offset
693  size, // size
694  stride * array_elements.value_or(1), // byte_length
695  array_elements, // array_elements
696  element_padding, // element_padding
697  });
698  current_byte_offset += stride * array_elements.value_or(1);
699  continue;
700  }
701 
702  // Tightly packed 4x4 Matrix is special cased as we know how to work with
703  // those.
704  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
705  member.width == sizeof(Scalar) * 8 && //
706  member.columns == 4 && //
707  member.vecsize == 4 //
708  ) {
709  uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
710  uint32_t element_padding = stride - sizeof(Matrix);
711  result.emplace_back(StructMember{
712  "Matrix", // type
713  BaseTypeToString(member.basetype), // basetype
714  GetMemberNameAtIndex(struct_type, i), // name
715  struct_member_offset, // offset
716  sizeof(Matrix), // size
717  stride * array_elements.value_or(1), // byte_length
718  array_elements, // array_elements
719  element_padding, // element_padding
720  });
721  current_byte_offset += stride * array_elements.value_or(1);
722  continue;
723  }
724 
725  // Tightly packed UintPoint32 (uvec2)
726  if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
727  member.width == sizeof(uint32_t) * 8 && //
728  member.columns == 1 && //
729  member.vecsize == 2 //
730  ) {
731  uint32_t stride =
732  GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
733  uint32_t element_padding = stride - sizeof(UintPoint32);
734  result.emplace_back(StructMember{
735  "UintPoint32", // type
736  BaseTypeToString(member.basetype), // basetype
737  GetMemberNameAtIndex(struct_type, i), // name
738  struct_member_offset, // offset
739  sizeof(UintPoint32), // size
740  stride * array_elements.value_or(1), // byte_length
741  array_elements, // array_elements
742  element_padding, // element_padding
743  });
744  current_byte_offset += stride * array_elements.value_or(1);
745  continue;
746  }
747 
748  // Tightly packed UintPoint32 (ivec2)
749  if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
750  member.width == sizeof(int32_t) * 8 && //
751  member.columns == 1 && //
752  member.vecsize == 2 //
753  ) {
754  uint32_t stride =
755  GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
756  uint32_t element_padding = stride - sizeof(IPoint32);
757  result.emplace_back(StructMember{
758  "IPoint32", // type
759  BaseTypeToString(member.basetype), // basetype
760  GetMemberNameAtIndex(struct_type, i), // name
761  struct_member_offset, // offset
762  sizeof(IPoint32), // size
763  stride * array_elements.value_or(1), // byte_length
764  array_elements, // array_elements
765  element_padding, // element_padding
766  });
767  current_byte_offset += stride * array_elements.value_or(1);
768  continue;
769  }
770 
771  // Tightly packed Point (vec2).
772  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
773  member.width == sizeof(float) * 8 && //
774  member.columns == 1 && //
775  member.vecsize == 2 //
776  ) {
777  uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
778  uint32_t element_padding = stride - sizeof(Point);
779  result.emplace_back(StructMember{
780  "Point", // type
781  BaseTypeToString(member.basetype), // basetype
782  GetMemberNameAtIndex(struct_type, i), // name
783  struct_member_offset, // offset
784  sizeof(Point), // size
785  stride * array_elements.value_or(1), // byte_length
786  array_elements, // array_elements
787  element_padding, // element_padding
788  });
789  current_byte_offset += stride * array_elements.value_or(1);
790  continue;
791  }
792 
793  // Tightly packed Vector3.
794  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
795  member.width == sizeof(float) * 8 && //
796  member.columns == 1 && //
797  member.vecsize == 3 //
798  ) {
799  uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
800  uint32_t element_padding = stride - sizeof(Vector3);
801  result.emplace_back(StructMember{
802  "Vector3", // type
803  BaseTypeToString(member.basetype), // basetype
804  GetMemberNameAtIndex(struct_type, i), // name
805  struct_member_offset, // offset
806  sizeof(Vector3), // size
807  stride * array_elements.value_or(1), // byte_length
808  array_elements, // array_elements
809  element_padding, // element_padding
810  });
811  current_byte_offset += stride * array_elements.value_or(1);
812  continue;
813  }
814 
815  // Tightly packed Vector4.
816  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
817  member.width == sizeof(float) * 8 && //
818  member.columns == 1 && //
819  member.vecsize == 4 //
820  ) {
821  uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
822  uint32_t element_padding = stride - sizeof(Vector4);
823  result.emplace_back(StructMember{
824  "Vector4", // type
825  BaseTypeToString(member.basetype), // basetype
826  GetMemberNameAtIndex(struct_type, i), // name
827  struct_member_offset, // offset
828  sizeof(Vector4), // size
829  stride * array_elements.value_or(1), // byte_length
830  array_elements, // array_elements
831  element_padding, // element_padding
832  });
833  current_byte_offset += stride * array_elements.value_or(1);
834  continue;
835  }
836 
837  // Tightly packed half Point (vec2).
838  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
839  member.width == sizeof(Half) * 8 && //
840  member.columns == 1 && //
841  member.vecsize == 2 //
842  ) {
843  uint32_t stride =
844  GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
845  uint32_t element_padding = stride - sizeof(HalfVector2);
846  result.emplace_back(StructMember{
847  "HalfVector2", // type
848  BaseTypeToString(member.basetype), // basetype
849  GetMemberNameAtIndex(struct_type, i), // name
850  struct_member_offset, // offset
851  sizeof(HalfVector2), // size
852  stride * array_elements.value_or(1), // byte_length
853  array_elements, // array_elements
854  element_padding, // element_padding
855  });
856  current_byte_offset += stride * array_elements.value_or(1);
857  continue;
858  }
859 
860  // Tightly packed Half Float Vector3.
861  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
862  member.width == sizeof(Half) * 8 && //
863  member.columns == 1 && //
864  member.vecsize == 3 //
865  ) {
866  uint32_t stride =
867  GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
868  uint32_t element_padding = stride - sizeof(HalfVector3);
869  result.emplace_back(StructMember{
870  "HalfVector3", // type
871  BaseTypeToString(member.basetype), // basetype
872  GetMemberNameAtIndex(struct_type, i), // name
873  struct_member_offset, // offset
874  sizeof(HalfVector3), // size
875  stride * array_elements.value_or(1), // byte_length
876  array_elements, // array_elements
877  element_padding, // element_padding
878  });
879  current_byte_offset += stride * array_elements.value_or(1);
880  continue;
881  }
882 
883  // Tightly packed Half Float Vector4.
884  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
885  member.width == sizeof(Half) * 8 && //
886  member.columns == 1 && //
887  member.vecsize == 4 //
888  ) {
889  uint32_t stride =
890  GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
891  uint32_t element_padding = stride - sizeof(HalfVector4);
892  result.emplace_back(StructMember{
893  "HalfVector4", // type
894  BaseTypeToString(member.basetype), // basetype
895  GetMemberNameAtIndex(struct_type, i), // name
896  struct_member_offset, // offset
897  sizeof(HalfVector4), // size
898  stride * array_elements.value_or(1), // byte_length
899  array_elements, // array_elements
900  element_padding, // element_padding
901  });
902  current_byte_offset += stride * array_elements.value_or(1);
903  continue;
904  }
905 
906  // Other isolated scalars (like bool, int, float/Scalar, etc..).
907  {
908  auto maybe_known_type = ReadKnownScalarType(member.basetype);
909  if (maybe_known_type.has_value() && //
910  member.columns == 1 && //
911  member.vecsize == 1 //
912  ) {
913  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
914  if (stride == 0) {
915  stride = maybe_known_type.value().byte_size;
916  }
917  uint32_t element_padding = stride - maybe_known_type.value().byte_size;
918  // Add the type directly.
919  result.emplace_back(StructMember{
920  maybe_known_type.value().name, // type
921  BaseTypeToString(member.basetype), // basetype
922  GetMemberNameAtIndex(struct_type, i), // name
923  struct_member_offset, // offset
924  maybe_known_type.value().byte_size, // size
925  stride * array_elements.value_or(1), // byte_length
926  array_elements, // array_elements
927  element_padding, // element_padding
928  });
929  current_byte_offset += stride * array_elements.value_or(1);
930  continue;
931  }
932  }
933 
934  // Catch all for unknown types. Just add the necessary padding to the struct
935  // and move on.
936  {
937  const size_t size = (member.width * member.columns * member.vecsize) / 8u;
938  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
939  if (stride == 0) {
940  stride = size;
941  }
942  auto element_padding = stride - size;
943  result.emplace_back(StructMember{
944  TypeNameWithPaddingOfSize(size), // type
945  BaseTypeToString(member.basetype), // basetype
946  GetMemberNameAtIndex(struct_type, i), // name
947  struct_member_offset, // offset
948  size, // size
949  stride * array_elements.value_or(1), // byte_length
950  array_elements, // array_elements
951  element_padding, // element_padding
952  });
953  current_byte_offset += stride * array_elements.value_or(1);
954  continue;
955  }
956  }
957 
958  if (max_member_alignment > 0u) {
959  const auto struct_length = current_byte_offset;
960  {
961  const auto excess = struct_length % max_member_alignment;
962  if (excess != 0) {
963  const auto padding = max_member_alignment - excess;
964  result.emplace_back(StructMember{
965  TypeNameWithPaddingOfSize(padding), // type
967  spirv_cross::SPIRType::BaseType::Void), // basetype
968  "_PADDING_", // name
969  current_byte_offset, // offset
970  padding, // size
971  padding, // byte_length
972  std::nullopt, // array_elements
973  0, // element_padding
974  });
975  }
976  }
977  }
978 
979  return result;
980 }
981 
982 std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
983  const spirv_cross::TypeID& type_id) const {
984  const auto& type = compiler_->get_type(type_id);
985  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
986  return std::nullopt;
987  }
988 
989  const auto struct_name = compiler_->get_name(type_id);
990  if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
991  return std::nullopt;
992  }
993 
994  auto struct_members = ReadStructMembers(type_id);
995  auto reflected_struct_size = GetReflectedStructSize(struct_members);
996 
997  StructDefinition struc;
998  struc.name = struct_name;
999  struc.byte_length = reflected_struct_size;
1000  struc.members = std::move(struct_members);
1001  return struc;
1002 }
1003 
1004 nlohmann::json::object_t Reflector::EmitStructDefinition(
1005  std::optional<Reflector::StructDefinition> struc) const {
1006  nlohmann::json::object_t result;
1007  result["name"] = struc->name;
1008  result["byte_length"] = struc->byte_length;
1009  auto& members = result["members"] = nlohmann::json::array_t{};
1010  for (const auto& struct_member : struc->members) {
1011  auto& member = members.emplace_back(nlohmann::json::object_t{});
1012  member["name"] = struct_member.name;
1013  member["type"] = struct_member.type;
1014  member["base_type"] = struct_member.base_type;
1015  member["offset"] = struct_member.offset;
1016  member["byte_length"] = struct_member.byte_length;
1017  if (struct_member.array_elements.has_value()) {
1018  member["array_elements"] = struct_member.array_elements.value();
1019  } else {
1020  member["array_elements"] = "std::nullopt";
1021  }
1022  member["element_padding"] = struct_member.element_padding;
1023  }
1024  return result;
1025 }
1026 
1027 struct VertexType {
1028  std::string type_name;
1029  std::string base_type_name;
1030  std::string variable_name;
1031  size_t byte_length = 0u;
1032 };
1033 
1035  const spirv_cross::Compiler& compiler,
1036  const spirv_cross::Resource* resource) {
1037  VertexType result;
1038  result.variable_name = resource->name;
1039  const auto type = compiler.get_type(resource->type_id);
1040  result.base_type_name = BaseTypeToString(type.basetype);
1041  const auto total_size = type.columns * type.vecsize * type.width / 8u;
1042  result.byte_length = total_size;
1043 
1044  if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1045  type.columns == 1u && type.vecsize == 2u &&
1046  type.width == sizeof(float) * 8u) {
1047  result.type_name = "Point";
1048  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1049  type.columns == 1u && type.vecsize == 4u &&
1050  type.width == sizeof(float) * 8u) {
1051  result.type_name = "Vector4";
1052  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1053  type.columns == 1u && type.vecsize == 3u &&
1054  type.width == sizeof(float) * 8u) {
1055  result.type_name = "Vector3";
1056  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1057  type.columns == 1u && type.vecsize == 1u &&
1058  type.width == sizeof(float) * 8u) {
1059  result.type_name = "Scalar";
1060  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1061  type.columns == 1u && type.vecsize == 1u &&
1062  type.width == sizeof(int32_t) * 8u) {
1063  result.type_name = "int32_t";
1064  } else {
1065  // Catch all unknown padding.
1066  result.type_name = TypeNameWithPaddingOfSize(total_size);
1067  }
1068 
1069  return result;
1070 }
1071 
1072 std::optional<Reflector::StructDefinition>
1073 Reflector::ReflectPerVertexStructDefinition(
1074  const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1075  // Avoid emitting a zero sized structure. The code gen templates assume a
1076  // non-zero size.
1077  if (stage_inputs.empty()) {
1078  return std::nullopt;
1079  }
1080 
1081  // Validate locations are contiguous and there are no duplicates.
1082  std::set<uint32_t> locations;
1083  for (const auto& input : stage_inputs) {
1084  auto location = compiler_->get_decoration(
1085  input.id, spv::Decoration::DecorationLocation);
1086  if (locations.count(location) != 0) {
1087  // Duplicate location. Bail.
1088  return std::nullopt;
1089  }
1090  locations.insert(location);
1091  }
1092 
1093  for (size_t i = 0; i < locations.size(); i++) {
1094  if (locations.count(i) != 1) {
1095  // Locations are not contiguous. This usually happens when a single stage
1096  // input takes multiple input slots. No reflection information can be
1097  // generated for such cases anyway. So bail! It is up to the shader author
1098  // to make sure one stage input maps to a single input slot.
1099  return std::nullopt;
1100  }
1101  }
1102 
1103  auto input_for_location =
1104  [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1105  for (const auto& input : stage_inputs) {
1106  auto location = compiler_->get_decoration(
1107  input.id, spv::Decoration::DecorationLocation);
1108  if (location == queried_location) {
1109  return &input;
1110  }
1111  }
1112  // This really cannot happen with all the validation above.
1113  FML_UNREACHABLE();
1114  return nullptr;
1115  };
1116 
1117  StructDefinition struc;
1118  struc.name = "PerVertexData";
1119  struc.byte_length = 0u;
1120  for (size_t i = 0; i < locations.size(); i++) {
1121  auto resource = input_for_location(i);
1122  if (resource == nullptr) {
1123  return std::nullopt;
1124  }
1125  const auto vertex_type =
1126  VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1127 
1128  auto member = StructMember{
1129  vertex_type.type_name, // type
1130  vertex_type.base_type_name, // base type
1131  vertex_type.variable_name, // name
1132  struc.byte_length, // offset
1133  vertex_type.byte_length, // size
1134  vertex_type.byte_length, // byte_length
1135  std::nullopt, // array_elements
1136  0, // element_padding
1137  };
1138  struc.byte_length += vertex_type.byte_length;
1139  struc.members.emplace_back(std::move(member));
1140  }
1141  return struc;
1142 }
1143 
1144 std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1145  const spirv_cross::SPIRType& parent_type,
1146  size_t index) const {
1147  if (parent_type.type_alias != 0) {
1148  return GetMemberNameAtIndexIfExists(
1149  compiler_->get_type(parent_type.type_alias), index);
1150  }
1151 
1152  if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1153  const auto& members = found->second.members;
1154  if (index < members.size() && !members[index].alias.empty()) {
1155  return members[index].alias;
1156  }
1157  }
1158  return std::nullopt;
1159 }
1160 
1161 std::string Reflector::GetMemberNameAtIndex(
1162  const spirv_cross::SPIRType& parent_type,
1163  size_t index,
1164  std::string suffix) const {
1165  if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1166  name.has_value()) {
1167  return name.value();
1168  }
1169  static std::atomic_size_t sUnnamedMembersID;
1170  std::stringstream stream;
1171  stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1172  return stream.str();
1173 }
1174 
1175 std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1176  const spirv_cross::ShaderResources& resources,
1177  spv::ExecutionModel execution_model) const {
1178  std::vector<BindPrototype> prototypes;
1179  for (const auto& uniform_buffer : resources.uniform_buffers) {
1180  auto& proto = prototypes.emplace_back(BindPrototype{});
1181  proto.return_type = "bool";
1182  proto.name = ToCamelCase(uniform_buffer.name);
1183  {
1184  std::stringstream stream;
1185  stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1186  << ".";
1187  proto.docstring = stream.str();
1188  }
1189  proto.args.push_back(BindPrototypeArgument{
1190  .type_name = "ResourceBinder&",
1191  .argument_name = "command",
1192  });
1193  proto.args.push_back(BindPrototypeArgument{
1194  .type_name = "BufferView",
1195  .argument_name = "view",
1196  });
1197  }
1198  for (const auto& storage_buffer : resources.storage_buffers) {
1199  auto& proto = prototypes.emplace_back(BindPrototype{});
1200  proto.return_type = "bool";
1201  proto.name = ToCamelCase(storage_buffer.name);
1202  {
1203  std::stringstream stream;
1204  stream << "Bind storage buffer for resource named " << storage_buffer.name
1205  << ".";
1206  proto.docstring = stream.str();
1207  }
1208  proto.args.push_back(BindPrototypeArgument{
1209  .type_name = "ResourceBinder&",
1210  .argument_name = "command",
1211  });
1212  proto.args.push_back(BindPrototypeArgument{
1213  .type_name = "BufferView",
1214  .argument_name = "view",
1215  });
1216  }
1217  for (const auto& sampled_image : resources.sampled_images) {
1218  auto& proto = prototypes.emplace_back(BindPrototype{});
1219  proto.return_type = "bool";
1220  proto.name = ToCamelCase(sampled_image.name);
1221  {
1222  std::stringstream stream;
1223  stream << "Bind combined image sampler for resource named "
1224  << sampled_image.name << ".";
1225  proto.docstring = stream.str();
1226  }
1227  proto.args.push_back(BindPrototypeArgument{
1228  .type_name = "ResourceBinder&",
1229  .argument_name = "command",
1230  });
1231  proto.args.push_back(BindPrototypeArgument{
1232  .type_name = "std::shared_ptr<const Texture>",
1233  .argument_name = "texture",
1234  });
1235  proto.args.push_back(BindPrototypeArgument{
1236  .type_name = "std::shared_ptr<const Sampler>",
1237  .argument_name = "sampler",
1238  });
1239  }
1240  for (const auto& separate_image : resources.separate_images) {
1241  auto& proto = prototypes.emplace_back(BindPrototype{});
1242  proto.return_type = "bool";
1243  proto.name = ToCamelCase(separate_image.name);
1244  {
1245  std::stringstream stream;
1246  stream << "Bind separate image for resource named " << separate_image.name
1247  << ".";
1248  proto.docstring = stream.str();
1249  }
1250  proto.args.push_back(BindPrototypeArgument{
1251  .type_name = "Command&",
1252  .argument_name = "command",
1253  });
1254  proto.args.push_back(BindPrototypeArgument{
1255  .type_name = "std::shared_ptr<const Texture>",
1256  .argument_name = "texture",
1257  });
1258  }
1259  for (const auto& separate_sampler : resources.separate_samplers) {
1260  auto& proto = prototypes.emplace_back(BindPrototype{});
1261  proto.return_type = "bool";
1262  proto.name = ToCamelCase(separate_sampler.name);
1263  {
1264  std::stringstream stream;
1265  stream << "Bind separate sampler for resource named "
1266  << separate_sampler.name << ".";
1267  proto.docstring = stream.str();
1268  }
1269  proto.args.push_back(BindPrototypeArgument{
1270  .type_name = "Command&",
1271  .argument_name = "command",
1272  });
1273  proto.args.push_back(BindPrototypeArgument{
1274  .type_name = "std::shared_ptr<const Sampler>",
1275  .argument_name = "sampler",
1276  });
1277  }
1278  return prototypes;
1279 }
1280 
1281 nlohmann::json::array_t Reflector::EmitBindPrototypes(
1282  const spirv_cross::ShaderResources& resources,
1283  spv::ExecutionModel execution_model) const {
1284  const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1285  nlohmann::json::array_t result;
1286  for (const auto& res : prototypes) {
1287  auto& item = result.emplace_back(nlohmann::json::object_t{});
1288  item["return_type"] = res.return_type;
1289  item["name"] = res.name;
1290  item["docstring"] = res.docstring;
1291  auto& args = item["args"] = nlohmann::json::array_t{};
1292  for (const auto& arg : res.args) {
1293  auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1294  json_arg["type_name"] = arg.type_name;
1295  json_arg["argument_name"] = arg.argument_name;
1296  }
1297  }
1298  return result;
1299 }
1300 
1301 } // namespace compiler
1302 } // namespace impeller
impeller::compiler::ToCamelCase
std::string ToCamelCase(std::string_view string)
Definition: utilities.cc:39
impeller::compiler::GetRuntimeStageBackend
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition: reflector.cc:320
uniform_sorter.h
impeller::Scalar
float Scalar
Definition: scalar.h:18
impeller::RuntimeStageBackend::kVulkan
@ kVulkan
impeller::compiler::VertexType::byte_length
size_t byte_length
Definition: reflector.cc:1031
impeller::compiler::CompilerBackend
Definition: compiler_backend.h:21
impeller::compiler::VertexType::variable_name
std::string variable_name
Definition: reflector.cc:1030
impeller::compiler::TargetPlatform::kMetalDesktop
@ kMetalDesktop
impeller::UintPoint32
TPoint< uint32_t > UintPoint32
Definition: point.h:311
impeller::compiler::TargetPlatform::kMetalIOS
@ kMetalIOS
impeller::compiler::KnownType
Definition: reflector.cc:581
impeller::compiler::Reflector::GetReflectionCC
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition: reflector.cc:160
impeller::compiler::Reflector::Options::header_file_name
std::string header_file_name
Definition: reflector.h:57
validation.h
impeller::compiler::TargetPlatform
TargetPlatform
Definition: types.h:28
impeller::compiler::VertexTypeFromInputResource
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
Definition: reflector.cc:1034
impeller::compiler::Reflector::GetReflectionHeader
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition: reflector.cc:156
runtime_types.h
impeller::RuntimeStageBackend::kOpenGLES
@ kOpenGLES
impeller::compiler::Reflector::Options::target_platform
TargetPlatform target_platform
Definition: reflector.h:54
code_gen_template.h
reflector.h
impeller::compiler::Reflector::GetRuntimeStageShaderData
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition: reflector.cc:164
matrix.h
impeller::compiler::KnownType::byte_size
size_t byte_size
Definition: reflector.cc:583
impeller::Point
TPoint< Scalar > Point
Definition: point.h:308
impeller::Half
A storage only class for half precision floating point.
Definition: half.h:40
impeller::compiler::CompilerBackend::Type
Type
Definition: compiler_backend.h:27
impeller::compiler::KnownType::name
std::string name
Definition: reflector.cc:582
impeller::SortUniforms
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
Definition: uniform_sorter.cc:11
impeller::compiler::TargetPlatform::kVulkan
@ kVulkan
impeller::SPrintF
std::string SPrintF(const char *format,...)
Definition: strings.cc:12
impeller::compiler::Reflector::Options::shader_name
std::string shader_name
Definition: reflector.h:56
impeller::compiler::TargetPlatform::kRuntimeStageVulkan
@ kRuntimeStageVulkan
impeller::compiler::kReflectionCCTemplate
constexpr std::string_view kReflectionCCTemplate
Definition: code_gen_template.h:204
impeller::compiler::CompilerBackend::ExtendedResourceIndex::kSecondary
@ kSecondary
impeller::compiler::ExecutionModelToString
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition: reflector.cc:73
impeller::compiler::CompilerBackend::GetExtendedMSLResourceBinding
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
Definition: compiler_backend.cc:32
impeller::IPoint32
TPoint< int32_t > IPoint32
Definition: point.h:310
impeller::compiler::CompilerBackend::Type::kGLSL
@ kGLSL
impeller::compiler::CompilerBackend::GetType
Type GetType() const
Definition: compiler_backend.cc:105
impeller::compiler::VertexType::type_name
std::string type_name
Definition: reflector.cc:1028
impeller::compiler::ReadKnownScalarType
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition: reflector.cc:586
impeller::compiler::Reflector::~Reflector
~Reflector()
impeller::compiler::Reflector::Options
Definition: reflector.h:53
impeller::compiler::kReflectionHeaderTemplate
constexpr std::string_view kReflectionHeaderTemplate
Definition: code_gen_template.h:10
impeller::compiler::ToString
static std::string ToString(CompilerBackend::Type type)
Definition: reflector.cc:422
utilities.h
strings.h
impeller::compiler::CompilerBackend::GetCompiler
spirv_cross::Compiler * GetCompiler()
Definition: compiler_backend.cc:51
impeller::compiler::StringToShaderStage
static std::string StringToShaderStage(const std::string &str)
Definition: reflector.cc:86
impeller::compiler::VertexType::base_type_name
std::string base_type_name
Definition: reflector.cc:1029
scalar.h
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:67
impeller::compiler::CompilerBackend::Type::kSkSL
@ kSkSL
half.h
impeller::RuntimeStageBackend::kSkSL
@ kSkSL
std
Definition: comparable.h:95
impeller::compiler::TargetPlatform::kOpenGLDesktop
@ kOpenGLDesktop
impeller::compiler::TargetPlatform::kUnknown
@ kUnknown
impeller::compiler::TargetPlatform::kOpenGLES
@ kOpenGLES
impeller::compiler::CompilerBackend::Type::kMSL
@ kMSL
impeller::compiler::TargetPlatform::kRuntimeStageMetal
@ kRuntimeStageMetal
impeller::compiler::BaseTypeToString
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition: reflector.cc:29
impeller::compiler::Reflector::GetReflectionJSON
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition: reflector.cc:143
impeller::compiler::CompilerBackend::ExtendedResourceIndex::kPrimary
@ kPrimary
impeller::compiler::Reflector::Options::entry_point_name
std::string entry_point_name
Definition: reflector.h:55
impeller::compiler::Reflector::IsValid
bool IsValid() const
Definition: reflector.cc:139
impeller::compiler::GetReflectedStructSize
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition: reflector.cc:633
impeller::RuntimeStageBackend::kMetal
@ kMetal
impeller
Definition: aiks_context.cc:10
impeller::compiler::TypeNameWithPaddingOfSize
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition: reflector.cc:575
impeller::compiler::Reflector::Reflector
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition: reflector.cc:102
types.h
impeller::compiler::VertexType
Definition: reflector.cc:1027
impeller::compiler::TargetPlatform::kSkSL
@ kSkSL
impeller::compiler::TargetPlatform::kRuntimeStageGLES
@ kRuntimeStageGLES