Flutter Impeller
reflector.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6 
8 
9 #include <atomic>
10 #include <optional>
11 #include <set>
12 #include <sstream>
13 
14 #include "flutter/fml/closure.h"
15 #include "flutter/fml/logging.h"
16 #include "impeller/base/strings.h"
22 #include "impeller/geometry/half.h"
25 
26 namespace impeller {
27 namespace compiler {
28 
29 static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type) {
30  using Type = spirv_cross::SPIRType::BaseType;
31  switch (type) {
32  case Type::Void:
33  return "ShaderType::kVoid";
34  case Type::Boolean:
35  return "ShaderType::kBoolean";
36  case Type::SByte:
37  return "ShaderType::kSignedByte";
38  case Type::UByte:
39  return "ShaderType::kUnsignedByte";
40  case Type::Short:
41  return "ShaderType::kSignedShort";
42  case Type::UShort:
43  return "ShaderType::kUnsignedShort";
44  case Type::Int:
45  return "ShaderType::kSignedInt";
46  case Type::UInt:
47  return "ShaderType::kUnsignedInt";
48  case Type::Int64:
49  return "ShaderType::kSignedInt64";
50  case Type::UInt64:
51  return "ShaderType::kUnsignedInt64";
52  case Type::AtomicCounter:
53  return "ShaderType::kAtomicCounter";
54  case Type::Half:
55  return "ShaderType::kHalfFloat";
56  case Type::Float:
57  return "ShaderType::kFloat";
58  case Type::Double:
59  return "ShaderType::kDouble";
60  case Type::Struct:
61  return "ShaderType::kStruct";
62  case Type::Image:
63  return "ShaderType::kImage";
64  case Type::SampledImage:
65  return "ShaderType::kSampledImage";
66  case Type::Sampler:
67  return "ShaderType::kSampler";
68  default:
69  return "ShaderType::kUnknown";
70  }
71 }
72 
73 static std::string ExecutionModelToString(spv::ExecutionModel model) {
74  switch (model) {
75  case spv::ExecutionModel::ExecutionModelVertex:
76  return "vertex";
77  case spv::ExecutionModel::ExecutionModelFragment:
78  return "fragment";
79  case spv::ExecutionModel::ExecutionModelTessellationControl:
80  return "tessellation_control";
81  case spv::ExecutionModel::ExecutionModelTessellationEvaluation:
82  return "tessellation_evaluation";
83  case spv::ExecutionModel::ExecutionModelGLCompute:
84  return "compute";
85  default:
86  return "unsupported";
87  }
88 }
89 
90 static std::string StringToShaderStage(std::string str) {
91  if (str == "vertex") {
92  return "ShaderStage::kVertex";
93  }
94 
95  if (str == "fragment") {
96  return "ShaderStage::kFragment";
97  }
98 
99  if (str == "tessellation_control") {
100  return "ShaderStage::kTessellationControl";
101  }
102 
103  if (str == "tessellation_evaluation") {
104  return "ShaderStage::kTessellationEvaluation";
105  }
106 
107  if (str == "compute") {
108  return "ShaderStage::kCompute";
109  }
110 
111  return "ShaderStage::kUnknown";
112 }
113 
115  std::shared_ptr<const spirv_cross::ParsedIR> ir,
116  std::shared_ptr<fml::Mapping> shader_data,
117  CompilerBackend compiler)
118  : options_(std::move(options)),
119  ir_(std::move(ir)),
120  shader_data_(std::move(shader_data)),
121  compiler_(std::move(compiler)) {
122  if (!ir_ || !compiler_) {
123  return;
124  }
125 
126  if (auto template_arguments = GenerateTemplateArguments();
127  template_arguments.has_value()) {
128  template_arguments_ =
129  std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
130  } else {
131  return;
132  }
133 
134  reflection_header_ = GenerateReflectionHeader();
135  if (!reflection_header_) {
136  return;
137  }
138 
139  reflection_cc_ = GenerateReflectionCC();
140  if (!reflection_cc_) {
141  return;
142  }
143 
144  runtime_stage_data_ = GenerateRuntimeStageData();
145  if (!runtime_stage_data_) {
146  return;
147  }
148 
149  is_valid_ = true;
150 }
151 
152 Reflector::~Reflector() = default;
153 
154 bool Reflector::IsValid() const {
155  return is_valid_;
156 }
157 
158 std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
159  if (!is_valid_) {
160  return nullptr;
161  }
162 
163  auto json_string =
164  std::make_shared<std::string>(template_arguments_->dump(2u));
165 
166  return std::make_shared<fml::NonOwnedMapping>(
167  reinterpret_cast<const uint8_t*>(json_string->data()),
168  json_string->size(), [json_string](auto, auto) {});
169 }
170 
171 std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
172  return reflection_header_;
173 }
174 
175 std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
176  return reflection_cc_;
177 }
178 
179 std::shared_ptr<RuntimeStageData> Reflector::GetRuntimeStageData() const {
180  return runtime_stage_data_;
181 }
182 
183 std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
184  nlohmann::json root;
185 
186  const auto& entrypoints = compiler_->get_entry_points_and_stages();
187  if (entrypoints.size() != 1) {
188  VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
189  << entrypoints.size() << " but expected 1.";
190  return std::nullopt;
191  }
192 
193  auto execution_model = entrypoints.front().execution_model;
194  {
195  root["entrypoint"] = options_.entry_point_name;
196  root["shader_name"] = options_.shader_name;
197  root["shader_stage"] = ExecutionModelToString(execution_model);
198  root["header_file_name"] = options_.header_file_name;
199  }
200 
201  const auto shader_resources = compiler_->get_shader_resources();
202 
203  // Uniform and storage buffers.
204  {
205  auto& buffers = root["buffers"] = nlohmann::json::array_t{};
206  if (auto uniform_buffers_json =
207  ReflectResources(shader_resources.uniform_buffers);
208  uniform_buffers_json.has_value()) {
209  for (auto uniform_buffer : uniform_buffers_json.value()) {
210  uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
211  buffers.emplace_back(std::move(uniform_buffer));
212  }
213  } else {
214  return std::nullopt;
215  }
216  if (auto storage_buffers_json =
217  ReflectResources(shader_resources.storage_buffers);
218  storage_buffers_json.has_value()) {
219  for (auto uniform_buffer : storage_buffers_json.value()) {
220  uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
221  buffers.emplace_back(std::move(uniform_buffer));
222  }
223  } else {
224  return std::nullopt;
225  }
226  }
227 
228  {
229  auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
230  if (auto stage_inputs_json = ReflectResources(
231  shader_resources.stage_inputs,
232  /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
233  stage_inputs_json.has_value()) {
234  stage_inputs = std::move(stage_inputs_json.value());
235  } else {
236  return std::nullopt;
237  }
238  }
239 
240  {
241  auto combined_sampled_images =
242  ReflectResources(shader_resources.sampled_images);
243  auto images = ReflectResources(shader_resources.separate_images);
244  auto samplers = ReflectResources(shader_resources.separate_samplers);
245  if (!combined_sampled_images.has_value() || !images.has_value() ||
246  !samplers.has_value()) {
247  return std::nullopt;
248  }
249  auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
250  for (auto value : combined_sampled_images.value()) {
251  value["descriptor_type"] = "DescriptorType::kSampledImage";
252  sampled_images.emplace_back(std::move(value));
253  }
254  for (auto value : images.value()) {
255  value["descriptor_type"] = "DescriptorType::kImage";
256  sampled_images.emplace_back(std::move(value));
257  }
258  for (auto value : samplers.value()) {
259  value["descriptor_type"] = "DescriptorType::kSampledSampler";
260  sampled_images.emplace_back(std::move(value));
261  }
262  }
263 
264  if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
265  stage_outputs.has_value()) {
266  root["stage_outputs"] = std::move(stage_outputs.value());
267  } else {
268  return std::nullopt;
269  }
270 
271  {
272  auto& struct_definitions = root["struct_definitions"] =
273  nlohmann::json::array_t{};
274  if (entrypoints.front().execution_model ==
275  spv::ExecutionModel::ExecutionModelVertex &&
276  !shader_resources.stage_inputs.empty()) {
277  if (auto struc =
278  ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
279  struc.has_value()) {
280  struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
281  } else {
282  // If there are stage inputs, it is an error to not generate a per
283  // vertex data struct for a vertex like shader stage.
284  return std::nullopt;
285  }
286  }
287 
288  std::set<spirv_cross::ID> known_structs;
289  ir_->for_each_typed_id<spirv_cross::SPIRType>(
290  [&](uint32_t, const spirv_cross::SPIRType& type) {
291  if (known_structs.find(type.self) != known_structs.end()) {
292  // Iterating over types this way leads to duplicates which may cause
293  // duplicate struct definitions.
294  return;
295  }
296  known_structs.insert(type.self);
297  if (auto struc = ReflectStructDefinition(type.self);
298  struc.has_value()) {
299  struct_definitions.emplace_back(
300  EmitStructDefinition(struc.value()));
301  }
302  });
303  }
304 
305  root["bind_prototypes"] =
306  EmitBindPrototypes(shader_resources, execution_model);
307 
308  return root;
309 }
310 
311 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
312  return InflateTemplate(kReflectionHeaderTemplate);
313 }
314 
315 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
316  return InflateTemplate(kReflectionCCTemplate);
317 }
318 
319 std::shared_ptr<RuntimeStageData> Reflector::GenerateRuntimeStageData() const {
320  const auto& entrypoints = compiler_->get_entry_points_and_stages();
321  if (entrypoints.size() != 1u) {
322  VALIDATION_LOG << "Single entrypoint not found.";
323  return nullptr;
324  }
325  auto data = std::make_shared<RuntimeStageData>(
326  options_.entry_point_name, //
327  entrypoints.front().execution_model, //
328  options_.target_platform //
329  );
330  data->SetShaderData(shader_data_);
331  if (sksl_data_) {
332  data->SetSkSLData(sksl_data_);
333  }
334 
335  // Sort the IR so that the uniforms are in declaration order.
336  std::vector<spirv_cross::ID> uniforms =
337  SortUniforms(ir_.get(), compiler_.GetCompiler());
338 
339  for (auto& sorted_id : uniforms) {
340  auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
341  const auto spir_type = compiler_->get_type(var.basetype);
342  UniformDescription uniform_description;
343  uniform_description.name = compiler_->get_name(var.self);
344  uniform_description.location = compiler_->get_decoration(
345  var.self, spv::Decoration::DecorationLocation);
346  uniform_description.type = spir_type.basetype;
347  uniform_description.rows = spir_type.vecsize;
348  uniform_description.columns = spir_type.columns;
349  uniform_description.bit_width = spir_type.width;
350  uniform_description.array_elements = GetArrayElements(spir_type);
351  data->AddUniformDescription(std::move(uniform_description));
352  }
353  return data;
354 }
355 
356 std::optional<uint32_t> Reflector::GetArrayElements(
357  const spirv_cross::SPIRType& type) const {
358  if (type.array.empty()) {
359  return std::nullopt;
360  }
361  FML_CHECK(type.array.size() == 1)
362  << "Multi-dimensional arrays are not supported.";
363  FML_CHECK(type.array_size_literal.front())
364  << "Must use a literal for array sizes.";
365  return type.array.front();
366 }
367 
368 static std::string ToString(CompilerBackend::Type type) {
369  switch (type) {
371  return "Metal Shading Language";
373  return "OpenGL Shading Language";
375  return "SkSL Shading Language";
376  }
377  FML_UNREACHABLE();
378 }
379 
380 std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
381  std::string_view tmpl) const {
382  inja::Environment env;
383  env.set_trim_blocks(true);
384  env.set_lstrip_blocks(true);
385 
386  env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
387  return ConvertToCamelCase(args.at(0u)->get<std::string>());
388  });
389 
390  env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
391  return StringToShaderStage(args.at(0u)->get<std::string>());
392  });
393 
394  env.add_callback("get_generator_name", 0u,
395  [type = compiler_.GetType()](inja::Arguments& args) {
396  return ToString(type);
397  });
398 
399  auto inflated_template =
400  std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
401 
402  return std::make_shared<fml::NonOwnedMapping>(
403  reinterpret_cast<const uint8_t*>(inflated_template->data()),
404  inflated_template->size(), [inflated_template](auto, auto) {});
405 }
406 
407 std::vector<size_t> Reflector::ComputeOffsets(
408  const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
409  std::vector<size_t> offsets(resources.size(), 0);
410  if (resources.size() == 0) {
411  return offsets;
412  }
413  for (const auto& resource : resources) {
414  const auto type = compiler_->get_type(resource.type_id);
415  auto location = compiler_->get_decoration(
416  resource.id, spv::Decoration::DecorationLocation);
417  // Malformed shader, will be caught later on.
418  if (location >= resources.size() || location < 0) {
419  location = 0;
420  }
421  offsets[location] = (type.width * type.vecsize) / 8;
422  }
423  for (size_t i = 1; i < resources.size(); i++) {
424  offsets[i] += offsets[i - 1];
425  }
426  for (size_t i = resources.size() - 1; i > 0; i--) {
427  offsets[i] = offsets[i - 1];
428  }
429  offsets[0] = 0;
430 
431  return offsets;
432 }
433 
434 std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
435  const spirv_cross::Resource& resource,
436  std::optional<size_t> offset) const {
437  nlohmann::json::object_t result;
438 
439  result["name"] = resource.name;
440  result["descriptor_set"] = compiler_->get_decoration(
441  resource.id, spv::Decoration::DecorationDescriptorSet);
442  result["binding"] = compiler_->get_decoration(
443  resource.id, spv::Decoration::DecorationBinding);
444  result["set"] = compiler_->get_decoration(
445  resource.id, spv::Decoration::DecorationDescriptorSet);
446  result["location"] = compiler_->get_decoration(
447  resource.id, spv::Decoration::DecorationLocation);
448  result["index"] =
449  compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
450  result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
452  result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
454  auto type = ReflectType(resource.type_id);
455  if (!type.has_value()) {
456  return std::nullopt;
457  }
458  result["type"] = std::move(type.value());
459  result["offset"] = offset.value_or(0u);
460  return result;
461 }
462 
463 std::optional<nlohmann::json::object_t> Reflector::ReflectType(
464  const spirv_cross::TypeID& type_id) const {
465  nlohmann::json::object_t result;
466 
467  const auto type = compiler_->get_type(type_id);
468 
469  result["type_name"] = BaseTypeToString(type.basetype);
470  result["bit_width"] = type.width;
471  result["vec_size"] = type.vecsize;
472  result["columns"] = type.columns;
473  auto& members = result["members"] = nlohmann::json::array_t{};
474  if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
475  for (const auto& struct_member : ReadStructMembers(type_id)) {
476  auto member = nlohmann::json::object_t{};
477  member["name"] = struct_member.name;
478  member["type"] = struct_member.type;
479  member["base_type"] = struct_member.base_type;
480  member["offset"] = struct_member.offset;
481  member["size"] = struct_member.size;
482  member["byte_length"] = struct_member.byte_length;
483  if (struct_member.array_elements.has_value()) {
484  member["array_elements"] = struct_member.array_elements.value();
485  } else {
486  member["array_elements"] = "std::nullopt";
487  }
488  members.emplace_back(std::move(member));
489  }
490  }
491 
492  return result;
493 }
494 
495 std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
496  const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
497  bool compute_offsets) const {
498  nlohmann::json::array_t result;
499  result.reserve(resources.size());
500  std::vector<size_t> offsets;
501  if (compute_offsets) {
502  offsets = ComputeOffsets(resources);
503  }
504  for (const auto& resource : resources) {
505  std::optional<size_t> maybe_offset = std::nullopt;
506  if (compute_offsets) {
507  auto location = compiler_->get_decoration(
508  resource.id, spv::Decoration::DecorationLocation);
509  maybe_offset = offsets[location];
510  }
511  if (auto reflected = ReflectResource(resource, maybe_offset);
512  reflected.has_value()) {
513  result.emplace_back(std::move(reflected.value()));
514  } else {
515  return std::nullopt;
516  }
517  }
518  return result;
519 }
520 
521 static std::string TypeNameWithPaddingOfSize(size_t size) {
522  std::stringstream stream;
523  stream << "Padding<" << size << ">";
524  return stream.str();
525 }
526 
527 struct KnownType {
528  std::string name;
529  size_t byte_size = 0;
530 };
531 
532 static std::optional<KnownType> ReadKnownScalarType(
533  spirv_cross::SPIRType::BaseType type) {
534  switch (type) {
535  case spirv_cross::SPIRType::BaseType::Boolean:
536  return KnownType{
537  .name = "bool",
538  .byte_size = sizeof(bool),
539  };
540  case spirv_cross::SPIRType::BaseType::Float:
541  return KnownType{
542  .name = "Scalar",
543  .byte_size = sizeof(Scalar),
544  };
545  case spirv_cross::SPIRType::BaseType::Half:
546  return KnownType{
547  .name = "Half",
548  .byte_size = sizeof(Half),
549  };
550  case spirv_cross::SPIRType::BaseType::UInt:
551  return KnownType{
552  .name = "uint32_t",
553  .byte_size = sizeof(uint32_t),
554  };
555  case spirv_cross::SPIRType::BaseType::Int:
556  return KnownType{
557  .name = "int32_t",
558  .byte_size = sizeof(int32_t),
559  };
560  default:
561  break;
562  }
563  return std::nullopt;
564 }
565 
566 //------------------------------------------------------------------------------
567 /// @brief Get the reflected struct size. In the vast majority of the
568 /// cases, this is the same as the declared struct size as given by
569 /// the compiler. But, additional padding may need to be introduced
570 /// after the end of the struct to keep in line with the alignment
571 /// requirement of the individual struct members. This method
572 /// figures out the actual size of the reflected struct that can be
573 /// referenced in native code.
574 ///
575 /// @param[in] members The members
576 ///
577 /// @return The reflected structure size.
578 ///
579 static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
580  auto struct_size = 0u;
581  for (const auto& member : members) {
582  struct_size += member.byte_length;
583  }
584  return struct_size;
585 }
586 
587 std::vector<StructMember> Reflector::ReadStructMembers(
588  const spirv_cross::TypeID& type_id) const {
589  const auto& struct_type = compiler_->get_type(type_id);
590  FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
591 
592  std::vector<StructMember> result;
593 
594  size_t current_byte_offset = 0;
595  size_t max_member_alignment = 0;
596 
597  for (size_t i = 0; i < struct_type.member_types.size(); i++) {
598  const auto& member = compiler_->get_type(struct_type.member_types[i]);
599  const auto struct_member_offset =
600  compiler_->type_struct_member_offset(struct_type, i);
601  auto array_elements = GetArrayElements(member);
602 
603  if (struct_member_offset > current_byte_offset) {
604  const auto alignment_pad = struct_member_offset - current_byte_offset;
605  result.emplace_back(StructMember{
606  TypeNameWithPaddingOfSize(alignment_pad), // type
607  BaseTypeToString(spirv_cross::SPIRType::BaseType::Void), // basetype
608  SPrintF("_PADDING_%s_",
609  GetMemberNameAtIndex(struct_type, i).c_str()), // name
610  current_byte_offset, // offset
611  alignment_pad, // size
612  alignment_pad, // byte_length
613  std::nullopt, // array_elements
614  0, // element_padding
615  });
616  current_byte_offset += alignment_pad;
617  }
618 
619  max_member_alignment =
620  std::max<size_t>(max_member_alignment,
621  (member.width / 8) * member.columns * member.vecsize);
622 
623  FML_CHECK(current_byte_offset == struct_member_offset);
624 
625  // A user defined struct.
626  if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
627  const size_t size =
628  GetReflectedStructSize(ReadStructMembers(member.self));
629  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
630  if (stride == 0) {
631  stride = size;
632  }
633  uint32_t element_padding = stride - size;
634  result.emplace_back(StructMember{
635  compiler_->get_name(member.self), // type
636  BaseTypeToString(member.basetype), // basetype
637  GetMemberNameAtIndex(struct_type, i), // name
638  struct_member_offset, // offset
639  size, // size
640  stride * array_elements.value_or(1), // byte_length
641  array_elements, // array_elements
642  element_padding, // element_padding
643  });
644  current_byte_offset += stride * array_elements.value_or(1);
645  continue;
646  }
647 
648  // Tightly packed 4x4 Matrix is special cased as we know how to work with
649  // those.
650  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
651  member.width == sizeof(Scalar) * 8 && //
652  member.columns == 4 && //
653  member.vecsize == 4 //
654  ) {
655  uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
656  uint32_t element_padding = stride - sizeof(Matrix);
657  result.emplace_back(StructMember{
658  "Matrix", // type
659  BaseTypeToString(member.basetype), // basetype
660  GetMemberNameAtIndex(struct_type, i), // name
661  struct_member_offset, // offset
662  sizeof(Matrix), // size
663  stride * array_elements.value_or(1), // byte_length
664  array_elements, // array_elements
665  element_padding, // element_padding
666  });
667  current_byte_offset += stride * array_elements.value_or(1);
668  continue;
669  }
670 
671  // Tightly packed UintPoint32 (uvec2)
672  if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
673  member.width == sizeof(uint32_t) * 8 && //
674  member.columns == 1 && //
675  member.vecsize == 2 //
676  ) {
677  uint32_t stride =
678  GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
679  uint32_t element_padding = stride - sizeof(UintPoint32);
680  result.emplace_back(StructMember{
681  "UintPoint32", // type
682  BaseTypeToString(member.basetype), // basetype
683  GetMemberNameAtIndex(struct_type, i), // name
684  struct_member_offset, // offset
685  sizeof(UintPoint32), // size
686  stride * array_elements.value_or(1), // byte_length
687  array_elements, // array_elements
688  element_padding, // element_padding
689  });
690  current_byte_offset += stride * array_elements.value_or(1);
691  continue;
692  }
693 
694  // Tightly packed UintPoint32 (ivec2)
695  if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
696  member.width == sizeof(int32_t) * 8 && //
697  member.columns == 1 && //
698  member.vecsize == 2 //
699  ) {
700  uint32_t stride =
701  GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
702  uint32_t element_padding = stride - sizeof(IPoint32);
703  result.emplace_back(StructMember{
704  "IPoint32", // type
705  BaseTypeToString(member.basetype), // basetype
706  GetMemberNameAtIndex(struct_type, i), // name
707  struct_member_offset, // offset
708  sizeof(IPoint32), // size
709  stride * array_elements.value_or(1), // byte_length
710  array_elements, // array_elements
711  element_padding, // element_padding
712  });
713  current_byte_offset += stride * array_elements.value_or(1);
714  continue;
715  }
716 
717  // Tightly packed Point (vec2).
718  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
719  member.width == sizeof(float) * 8 && //
720  member.columns == 1 && //
721  member.vecsize == 2 //
722  ) {
723  uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
724  uint32_t element_padding = stride - sizeof(Point);
725  result.emplace_back(StructMember{
726  "Point", // type
727  BaseTypeToString(member.basetype), // basetype
728  GetMemberNameAtIndex(struct_type, i), // name
729  struct_member_offset, // offset
730  sizeof(Point), // size
731  stride * array_elements.value_or(1), // byte_length
732  array_elements, // array_elements
733  element_padding, // element_padding
734  });
735  current_byte_offset += stride * array_elements.value_or(1);
736  continue;
737  }
738 
739  // Tightly packed Vector3.
740  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
741  member.width == sizeof(float) * 8 && //
742  member.columns == 1 && //
743  member.vecsize == 3 //
744  ) {
745  uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
746  uint32_t element_padding = stride - sizeof(Vector3);
747  result.emplace_back(StructMember{
748  "Vector3", // type
749  BaseTypeToString(member.basetype), // basetype
750  GetMemberNameAtIndex(struct_type, i), // name
751  struct_member_offset, // offset
752  sizeof(Vector3), // size
753  stride * array_elements.value_or(1), // byte_length
754  array_elements, // array_elements
755  element_padding, // element_padding
756  });
757  current_byte_offset += stride * array_elements.value_or(1);
758  continue;
759  }
760 
761  // Tightly packed Vector4.
762  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
763  member.width == sizeof(float) * 8 && //
764  member.columns == 1 && //
765  member.vecsize == 4 //
766  ) {
767  uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
768  uint32_t element_padding = stride - sizeof(Vector4);
769  result.emplace_back(StructMember{
770  "Vector4", // type
771  BaseTypeToString(member.basetype), // basetype
772  GetMemberNameAtIndex(struct_type, i), // name
773  struct_member_offset, // offset
774  sizeof(Vector4), // size
775  stride * array_elements.value_or(1), // byte_length
776  array_elements, // array_elements
777  element_padding, // element_padding
778  });
779  current_byte_offset += stride * array_elements.value_or(1);
780  continue;
781  }
782 
783  // Tightly packed half Point (vec2).
784  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
785  member.width == sizeof(Half) * 8 && //
786  member.columns == 1 && //
787  member.vecsize == 2 //
788  ) {
789  uint32_t stride =
790  GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
791  uint32_t element_padding = stride - sizeof(HalfVector2);
792  result.emplace_back(StructMember{
793  "HalfVector2", // type
794  BaseTypeToString(member.basetype), // basetype
795  GetMemberNameAtIndex(struct_type, i), // name
796  struct_member_offset, // offset
797  sizeof(HalfVector2), // size
798  stride * array_elements.value_or(1), // byte_length
799  array_elements, // array_elements
800  element_padding, // element_padding
801  });
802  current_byte_offset += stride * array_elements.value_or(1);
803  continue;
804  }
805 
806  // Tightly packed Half Float Vector3.
807  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
808  member.width == sizeof(Half) * 8 && //
809  member.columns == 1 && //
810  member.vecsize == 3 //
811  ) {
812  uint32_t stride =
813  GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
814  uint32_t element_padding = stride - sizeof(HalfVector3);
815  result.emplace_back(StructMember{
816  "HalfVector3", // type
817  BaseTypeToString(member.basetype), // basetype
818  GetMemberNameAtIndex(struct_type, i), // name
819  struct_member_offset, // offset
820  sizeof(HalfVector3), // size
821  stride * array_elements.value_or(1), // byte_length
822  array_elements, // array_elements
823  element_padding, // element_padding
824  });
825  current_byte_offset += stride * array_elements.value_or(1);
826  continue;
827  }
828 
829  // Tightly packed Half Float Vector4.
830  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
831  member.width == sizeof(Half) * 8 && //
832  member.columns == 1 && //
833  member.vecsize == 4 //
834  ) {
835  uint32_t stride =
836  GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
837  uint32_t element_padding = stride - sizeof(HalfVector4);
838  result.emplace_back(StructMember{
839  "HalfVector4", // type
840  BaseTypeToString(member.basetype), // basetype
841  GetMemberNameAtIndex(struct_type, i), // name
842  struct_member_offset, // offset
843  sizeof(HalfVector4), // size
844  stride * array_elements.value_or(1), // byte_length
845  array_elements, // array_elements
846  element_padding, // element_padding
847  });
848  current_byte_offset += stride * array_elements.value_or(1);
849  continue;
850  }
851 
852  // Other isolated scalars (like bool, int, float/Scalar, etc..).
853  {
854  auto maybe_known_type = ReadKnownScalarType(member.basetype);
855  if (maybe_known_type.has_value() && //
856  member.columns == 1 && //
857  member.vecsize == 1 //
858  ) {
859  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
860  if (stride == 0) {
861  stride = maybe_known_type.value().byte_size;
862  }
863  uint32_t element_padding = stride - maybe_known_type.value().byte_size;
864  // Add the type directly.
865  result.emplace_back(StructMember{
866  maybe_known_type.value().name, // type
867  BaseTypeToString(member.basetype), // basetype
868  GetMemberNameAtIndex(struct_type, i), // name
869  struct_member_offset, // offset
870  maybe_known_type.value().byte_size, // size
871  stride * array_elements.value_or(1), // byte_length
872  array_elements, // array_elements
873  element_padding, // element_padding
874  });
875  current_byte_offset += stride * array_elements.value_or(1);
876  continue;
877  }
878  }
879 
880  // Catch all for unknown types. Just add the necessary padding to the struct
881  // and move on.
882  {
883  const size_t size = (member.width * member.columns * member.vecsize) / 8u;
884  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
885  if (stride == 0) {
886  stride = size;
887  }
888  auto element_padding = stride - size;
889  result.emplace_back(StructMember{
890  TypeNameWithPaddingOfSize(size), // type
891  BaseTypeToString(member.basetype), // basetype
892  GetMemberNameAtIndex(struct_type, i), // name
893  struct_member_offset, // offset
894  size, // size
895  stride * array_elements.value_or(1), // byte_length
896  array_elements, // array_elements
897  element_padding, // element_padding
898  });
899  current_byte_offset += stride * array_elements.value_or(1);
900  continue;
901  }
902  }
903 
904  if (max_member_alignment > 0u) {
905  const auto struct_length = current_byte_offset;
906  {
907  const auto excess = struct_length % max_member_alignment;
908  if (excess != 0) {
909  const auto padding = max_member_alignment - excess;
910  result.emplace_back(StructMember{
911  TypeNameWithPaddingOfSize(padding), // type
913  spirv_cross::SPIRType::BaseType::Void), // basetype
914  "_PADDING_", // name
915  current_byte_offset, // offset
916  padding, // size
917  padding, // byte_length
918  std::nullopt, // array_elements
919  0, // element_padding
920  });
921  }
922  }
923  }
924 
925  return result;
926 }
927 
928 std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
929  const spirv_cross::TypeID& type_id) const {
930  const auto& type = compiler_->get_type(type_id);
931  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
932  return std::nullopt;
933  }
934 
935  const auto struct_name = compiler_->get_name(type_id);
936  if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
937  return std::nullopt;
938  }
939 
940  auto struct_members = ReadStructMembers(type_id);
941  auto reflected_struct_size = GetReflectedStructSize(struct_members);
942 
943  StructDefinition struc;
944  struc.name = struct_name;
945  struc.byte_length = reflected_struct_size;
946  struc.members = std::move(struct_members);
947  return struc;
948 }
949 
950 nlohmann::json::object_t Reflector::EmitStructDefinition(
951  std::optional<Reflector::StructDefinition> struc) const {
952  nlohmann::json::object_t result;
953  result["name"] = struc->name;
954  result["byte_length"] = struc->byte_length;
955  auto& members = result["members"] = nlohmann::json::array_t{};
956  for (const auto& struct_member : struc->members) {
957  auto& member = members.emplace_back(nlohmann::json::object_t{});
958  member["name"] = struct_member.name;
959  member["type"] = struct_member.type;
960  member["base_type"] = struct_member.base_type;
961  member["offset"] = struct_member.offset;
962  member["byte_length"] = struct_member.byte_length;
963  if (struct_member.array_elements.has_value()) {
964  member["array_elements"] = struct_member.array_elements.value();
965  } else {
966  member["array_elements"] = "std::nullopt";
967  }
968  member["element_padding"] = struct_member.element_padding;
969  }
970  return result;
971 }
972 
973 struct VertexType {
974  std::string type_name;
975  std::string base_type_name;
976  std::string variable_name;
977  size_t byte_length = 0u;
978 };
979 
981  const spirv_cross::Compiler& compiler,
982  const spirv_cross::Resource* resource) {
983  VertexType result;
984  result.variable_name = resource->name;
985  const auto type = compiler.get_type(resource->type_id);
986  result.base_type_name = BaseTypeToString(type.basetype);
987  const auto total_size = type.columns * type.vecsize * type.width / 8u;
988  result.byte_length = total_size;
989 
990  if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
991  type.columns == 1u && type.vecsize == 2u &&
992  type.width == sizeof(float) * 8u) {
993  result.type_name = "Point";
994  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
995  type.columns == 1u && type.vecsize == 4u &&
996  type.width == sizeof(float) * 8u) {
997  result.type_name = "Vector4";
998  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
999  type.columns == 1u && type.vecsize == 3u &&
1000  type.width == sizeof(float) * 8u) {
1001  result.type_name = "Vector3";
1002  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1003  type.columns == 1u && type.vecsize == 1u &&
1004  type.width == sizeof(float) * 8u) {
1005  result.type_name = "Scalar";
1006  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1007  type.columns == 1u && type.vecsize == 1u &&
1008  type.width == sizeof(int32_t) * 8u) {
1009  result.type_name = "int32_t";
1010  } else {
1011  // Catch all unknown padding.
1012  result.type_name = TypeNameWithPaddingOfSize(total_size);
1013  }
1014 
1015  return result;
1016 }
1017 
1018 std::optional<Reflector::StructDefinition>
1019 Reflector::ReflectPerVertexStructDefinition(
1020  const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1021  // Avoid emitting a zero sized structure. The code gen templates assume a
1022  // non-zero size.
1023  if (stage_inputs.empty()) {
1024  return std::nullopt;
1025  }
1026 
1027  // Validate locations are contiguous and there are no duplicates.
1028  std::set<uint32_t> locations;
1029  for (const auto& input : stage_inputs) {
1030  auto location = compiler_->get_decoration(
1031  input.id, spv::Decoration::DecorationLocation);
1032  if (locations.count(location) != 0) {
1033  // Duplicate location. Bail.
1034  return std::nullopt;
1035  }
1036  locations.insert(location);
1037  }
1038 
1039  for (size_t i = 0; i < locations.size(); i++) {
1040  if (locations.count(i) != 1) {
1041  // Locations are not contiguous. This usually happens when a single stage
1042  // input takes multiple input slots. No reflection information can be
1043  // generated for such cases anyway. So bail! It is up to the shader author
1044  // to make sure one stage input maps to a single input slot.
1045  return std::nullopt;
1046  }
1047  }
1048 
1049  auto input_for_location =
1050  [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1051  for (const auto& input : stage_inputs) {
1052  auto location = compiler_->get_decoration(
1053  input.id, spv::Decoration::DecorationLocation);
1054  if (location == queried_location) {
1055  return &input;
1056  }
1057  }
1058  // This really cannot happen with all the validation above.
1059  FML_UNREACHABLE();
1060  return nullptr;
1061  };
1062 
1063  StructDefinition struc;
1064  struc.name = "PerVertexData";
1065  struc.byte_length = 0u;
1066  for (size_t i = 0; i < locations.size(); i++) {
1067  auto resource = input_for_location(i);
1068  if (resource == nullptr) {
1069  return std::nullopt;
1070  }
1071  const auto vertex_type =
1072  VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1073 
1074  auto member = StructMember{
1075  vertex_type.type_name, // type
1076  vertex_type.base_type_name, // base type
1077  vertex_type.variable_name, // name
1078  struc.byte_length, // offset
1079  vertex_type.byte_length, // size
1080  vertex_type.byte_length, // byte_length
1081  std::nullopt, // array_elements
1082  0, // element_padding
1083  };
1084  struc.byte_length += vertex_type.byte_length;
1085  struc.members.emplace_back(std::move(member));
1086  }
1087  return struc;
1088 }
1089 
1090 std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1091  const spirv_cross::SPIRType& parent_type,
1092  size_t index) const {
1093  if (parent_type.type_alias != 0) {
1094  return GetMemberNameAtIndexIfExists(
1095  compiler_->get_type(parent_type.type_alias), index);
1096  }
1097 
1098  if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1099  const auto& members = found->second.members;
1100  if (index < members.size() && !members[index].alias.empty()) {
1101  return members[index].alias;
1102  }
1103  }
1104  return std::nullopt;
1105 }
1106 
1107 std::string Reflector::GetMemberNameAtIndex(
1108  const spirv_cross::SPIRType& parent_type,
1109  size_t index,
1110  std::string suffix) const {
1111  if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1112  name.has_value()) {
1113  return name.value();
1114  }
1115  static std::atomic_size_t sUnnamedMembersID;
1116  std::stringstream stream;
1117  stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1118  return stream.str();
1119 }
1120 
1121 std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1122  const spirv_cross::ShaderResources& resources,
1123  spv::ExecutionModel execution_model) const {
1124  std::vector<BindPrototype> prototypes;
1125  for (const auto& uniform_buffer : resources.uniform_buffers) {
1126  auto& proto = prototypes.emplace_back(BindPrototype{});
1127  proto.return_type = "bool";
1128  proto.name = ConvertToCamelCase(uniform_buffer.name);
1129  {
1130  std::stringstream stream;
1131  stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1132  << ".";
1133  proto.docstring = stream.str();
1134  }
1135  proto.args.push_back(BindPrototypeArgument{
1136  .type_name = "ResourceBinder&",
1137  .argument_name = "command",
1138  });
1139  proto.args.push_back(BindPrototypeArgument{
1140  .type_name = "BufferView",
1141  .argument_name = "view",
1142  });
1143  }
1144  for (const auto& storage_buffer : resources.storage_buffers) {
1145  auto& proto = prototypes.emplace_back(BindPrototype{});
1146  proto.return_type = "bool";
1147  proto.name = ConvertToCamelCase(storage_buffer.name);
1148  {
1149  std::stringstream stream;
1150  stream << "Bind storage buffer for resource named " << storage_buffer.name
1151  << ".";
1152  proto.docstring = stream.str();
1153  }
1154  proto.args.push_back(BindPrototypeArgument{
1155  .type_name = "ResourceBinder&",
1156  .argument_name = "command",
1157  });
1158  proto.args.push_back(BindPrototypeArgument{
1159  .type_name = "BufferView",
1160  .argument_name = "view",
1161  });
1162  }
1163  for (const auto& sampled_image : resources.sampled_images) {
1164  auto& proto = prototypes.emplace_back(BindPrototype{});
1165  proto.return_type = "bool";
1166  proto.name = ConvertToCamelCase(sampled_image.name);
1167  {
1168  std::stringstream stream;
1169  stream << "Bind combined image sampler for resource named "
1170  << sampled_image.name << ".";
1171  proto.docstring = stream.str();
1172  }
1173  proto.args.push_back(BindPrototypeArgument{
1174  .type_name = "ResourceBinder&",
1175  .argument_name = "command",
1176  });
1177  proto.args.push_back(BindPrototypeArgument{
1178  .type_name = "std::shared_ptr<const Texture>",
1179  .argument_name = "texture",
1180  });
1181  proto.args.push_back(BindPrototypeArgument{
1182  .type_name = "std::shared_ptr<const Sampler>",
1183  .argument_name = "sampler",
1184  });
1185  }
1186  for (const auto& separate_image : resources.separate_images) {
1187  auto& proto = prototypes.emplace_back(BindPrototype{});
1188  proto.return_type = "bool";
1189  proto.name = ConvertToCamelCase(separate_image.name);
1190  {
1191  std::stringstream stream;
1192  stream << "Bind separate image for resource named " << separate_image.name
1193  << ".";
1194  proto.docstring = stream.str();
1195  }
1196  proto.args.push_back(BindPrototypeArgument{
1197  .type_name = "Command&",
1198  .argument_name = "command",
1199  });
1200  proto.args.push_back(BindPrototypeArgument{
1201  .type_name = "std::shared_ptr<const Texture>",
1202  .argument_name = "texture",
1203  });
1204  }
1205  for (const auto& separate_sampler : resources.separate_samplers) {
1206  auto& proto = prototypes.emplace_back(BindPrototype{});
1207  proto.return_type = "bool";
1208  proto.name = ConvertToCamelCase(separate_sampler.name);
1209  {
1210  std::stringstream stream;
1211  stream << "Bind separate sampler for resource named "
1212  << separate_sampler.name << ".";
1213  proto.docstring = stream.str();
1214  }
1215  proto.args.push_back(BindPrototypeArgument{
1216  .type_name = "Command&",
1217  .argument_name = "command",
1218  });
1219  proto.args.push_back(BindPrototypeArgument{
1220  .type_name = "std::shared_ptr<const Sampler>",
1221  .argument_name = "sampler",
1222  });
1223  }
1224  return prototypes;
1225 }
1226 
1227 nlohmann::json::array_t Reflector::EmitBindPrototypes(
1228  const spirv_cross::ShaderResources& resources,
1229  spv::ExecutionModel execution_model) const {
1230  const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1231  nlohmann::json::array_t result;
1232  for (const auto& res : prototypes) {
1233  auto& item = result.emplace_back(nlohmann::json::object_t{});
1234  item["return_type"] = res.return_type;
1235  item["name"] = res.name;
1236  item["docstring"] = res.docstring;
1237  auto& args = item["args"] = nlohmann::json::array_t{};
1238  for (const auto& arg : res.args) {
1239  auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1240  json_arg["type_name"] = arg.type_name;
1241  json_arg["argument_name"] = arg.argument_name;
1242  }
1243  }
1244  return result;
1245 }
1246 
1247 } // namespace compiler
1248 } // namespace impeller
uniform_sorter.h
impeller::Scalar
float Scalar
Definition: scalar.h:15
impeller::compiler::VertexType::byte_length
size_t byte_length
Definition: reflector.cc:977
impeller::compiler::CompilerBackend
Definition: compiler_backend.h:19
impeller::compiler::VertexType::variable_name
std::string variable_name
Definition: reflector.cc:976
impeller::compiler::Reflector::Reflector
Reflector(Options options, std::shared_ptr< const spirv_cross::ParsedIR > ir, std::shared_ptr< fml::Mapping > shader_data, CompilerBackend compiler)
Definition: reflector.cc:114
impeller::UintPoint32
TPoint< uint32_t > UintPoint32
Definition: point.h:309
impeller::compiler::KnownType
Definition: reflector.cc:527
impeller::compiler::ConvertToCamelCase
std::string ConvertToCamelCase(std::string_view string)
Definition: utilities.cc:23
impeller::compiler::Reflector::GetReflectionCC
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition: reflector.cc:175
impeller::compiler::Reflector::Options::header_file_name
std::string header_file_name
Definition: reflector.h:55
validation.h
impeller::compiler::VertexTypeFromInputResource
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
Definition: reflector.cc:980
impeller::compiler::Reflector::GetReflectionHeader
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition: reflector.cc:171
impeller::compiler::Reflector::Options::target_platform
TargetPlatform target_platform
Definition: reflector.h:52
code_gen_template.h
reflector.h
matrix.h
impeller::compiler::KnownType::byte_size
size_t byte_size
Definition: reflector.cc:529
impeller::Point
TPoint< Scalar > Point
Definition: point.h:306
impeller::Half
A storage only class for half precision floating point.
Definition: half.h:37
impeller::compiler::CompilerBackend::Type
Type
Definition: compiler_backend.h:25
impeller::compiler::KnownType::name
std::string name
Definition: reflector.cc:528
impeller::SortUniforms
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
Definition: uniform_sorter.cc:9
impeller::compiler::StringToShaderStage
static std::string StringToShaderStage(std::string str)
Definition: reflector.cc:90
impeller::SPrintF
std::string SPrintF(const char *format,...)
Definition: strings.cc:12
impeller::compiler::Reflector::Options::shader_name
std::string shader_name
Definition: reflector.h:54
impeller::compiler::kReflectionCCTemplate
constexpr std::string_view kReflectionCCTemplate
Definition: code_gen_template.h:198
impeller::compiler::CompilerBackend::ExtendedResourceIndex::kSecondary
@ kSecondary
impeller::compiler::ExecutionModelToString
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition: reflector.cc:73
impeller::compiler::CompilerBackend::GetExtendedMSLResourceBinding
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
Definition: compiler_backend.cc:32
impeller::IPoint32
TPoint< int32_t > IPoint32
Definition: point.h:308
impeller::compiler::CompilerBackend::Type::kGLSL
@ kGLSL
impeller::compiler::CompilerBackend::GetType
Type GetType() const
Definition: compiler_backend.cc:105
impeller::compiler::VertexType::type_name
std::string type_name
Definition: reflector.cc:974
impeller::compiler::ReadKnownScalarType
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition: reflector.cc:532
impeller::compiler::Reflector::~Reflector
~Reflector()
impeller::compiler::Reflector::Options
Definition: reflector.h:51
impeller::compiler::kReflectionHeaderTemplate
constexpr std::string_view kReflectionHeaderTemplate
Definition: code_gen_template.h:10
impeller::compiler::ToString
static std::string ToString(CompilerBackend::Type type)
Definition: reflector.cc:368
impeller::compiler::Reflector::GetRuntimeStageData
std::shared_ptr< RuntimeStageData > GetRuntimeStageData() const
Definition: reflector.cc:179
utilities.h
strings.h
impeller::compiler::CompilerBackend::GetCompiler
spirv_cross::Compiler * GetCompiler()
Definition: compiler_backend.cc:51
impeller::compiler::VertexType::base_type_name
std::string base_type_name
Definition: reflector.cc:975
scalar.h
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:60
impeller::compiler::CompilerBackend::Type::kSkSL
@ kSkSL
half.h
std
Definition: comparable.h:98
impeller::compiler::CompilerBackend::Type::kMSL
@ kMSL
impeller::compiler::BaseTypeToString
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition: reflector.cc:29
impeller::compiler::Reflector::GetReflectionJSON
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition: reflector.cc:158
impeller::compiler::CompilerBackend::ExtendedResourceIndex::kPrimary
@ kPrimary
impeller::compiler::Reflector::Options::entry_point_name
std::string entry_point_name
Definition: reflector.h:53
impeller::compiler::Reflector::IsValid
bool IsValid() const
Definition: reflector.cc:154
impeller::compiler::GetReflectedStructSize
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition: reflector.cc:579
impeller
Definition: aiks_context.cc:10
impeller::compiler::TypeNameWithPaddingOfSize
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition: reflector.cc:521
types.h
impeller::compiler::VertexType
Definition: reflector.cc:973