Flutter Impeller
reflector.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6 
8 
9 #include <atomic>
10 #include <format>
11 #include <optional>
12 #include <set>
13 #include <sstream>
14 
15 #include "flutter/fml/logging.h"
16 #include "fml/backtrace.h"
17 #include "impeller/base/strings.h"
25 #include "impeller/geometry/half.h"
29 #include "runtime_stage_types_flatbuffers.h"
30 #include "spirv_common.hpp"
31 
32 namespace impeller {
33 namespace compiler {
34 
35 static std::string ExecutionModelToString(spv::ExecutionModel model) {
36  switch (model) {
37  case spv::ExecutionModel::ExecutionModelVertex:
38  return "vertex";
39  case spv::ExecutionModel::ExecutionModelFragment:
40  return "fragment";
41  case spv::ExecutionModel::ExecutionModelGLCompute:
42  return "compute";
43  default:
44  return "unsupported";
45  }
46 }
47 
48 static std::string StringToShaderStage(const std::string& str) {
49  if (str == "vertex") {
50  return "ShaderStage::kVertex";
51  }
52 
53  if (str == "fragment") {
54  return "ShaderStage::kFragment";
55  }
56 
57  if (str == "compute") {
58  return "ShaderStage::kCompute";
59  }
60 
61  return "ShaderStage::kUnknown";
62 }
63 
65  const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
66  const std::shared_ptr<fml::Mapping>& shader_data,
67  const CompilerBackend& compiler)
68  : options_(std::move(options)),
69  ir_(ir),
70  shader_data_(shader_data),
71  compiler_(compiler) {
72  if (!ir_ || !compiler_) {
73  return;
74  }
75 
76  if (auto template_arguments = GenerateTemplateArguments();
77  template_arguments.has_value()) {
78  template_arguments_ =
79  std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
80  } else {
81  return;
82  }
83 
84  reflection_header_ = GenerateReflectionHeader();
85  if (!reflection_header_) {
86  return;
87  }
88 
89  reflection_cc_ = GenerateReflectionCC();
90  if (!reflection_cc_) {
91  return;
92  }
93 
94  runtime_stage_shader_ = GenerateRuntimeStageData();
95 
96  shader_bundle_data_ = GenerateShaderBundleData();
97  if (!shader_bundle_data_) {
98  return;
99  }
100 
101  is_valid_ = true;
102 }
103 
104 Reflector::~Reflector() = default;
105 
106 bool Reflector::IsValid() const {
107  return is_valid_;
108 }
109 
110 std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
111  if (!is_valid_) {
112  return nullptr;
113  }
114 
115  auto json_string =
116  std::make_shared<std::string>(template_arguments_->dump(2u));
117 
118  return std::make_shared<fml::NonOwnedMapping>(
119  reinterpret_cast<const uint8_t*>(json_string->data()),
120  json_string->size(), [json_string](auto, auto) {});
121 }
122 
123 std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
124  return reflection_header_;
125 }
126 
127 std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
128  return reflection_cc_;
129 }
130 
131 std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
132  const {
133  return runtime_stage_shader_;
134 }
135 
136 std::shared_ptr<ShaderBundleData> Reflector::GetShaderBundleData() const {
137  return shader_bundle_data_;
138 }
139 
140 std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
141  nlohmann::json root;
142 
143  const auto& entrypoints = compiler_->get_entry_points_and_stages();
144  if (entrypoints.size() != 1) {
145  VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
146  << entrypoints.size() << " but expected 1.";
147  return std::nullopt;
148  }
149 
150  auto execution_model = entrypoints.front().execution_model;
151  {
152  root["entrypoint"] = options_.entry_point_name;
153  root["shader_name"] = options_.shader_name;
154  root["shader_stage"] = ExecutionModelToString(execution_model);
155  root["header_file_name"] = options_.header_file_name;
156  }
157 
158  const auto shader_resources = compiler_->get_shader_resources();
159 
160  // Subpass Inputs.
161  {
162  auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
163  if (auto subpass_inputs_json =
164  ReflectResources(shader_resources.subpass_inputs);
165  subpass_inputs_json.has_value()) {
166  for (auto subpass_input : subpass_inputs_json.value()) {
167  subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
168  subpass_inputs.emplace_back(std::move(subpass_input));
169  }
170  } else {
171  return std::nullopt;
172  }
173  }
174 
175  // Uniform and storage buffers.
176  {
177  auto& buffers = root["buffers"] = nlohmann::json::array_t{};
178  if (auto uniform_buffers_json =
179  ReflectResources(shader_resources.uniform_buffers);
180  uniform_buffers_json.has_value()) {
181  for (auto uniform_buffer : uniform_buffers_json.value()) {
182  uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
183  buffers.emplace_back(std::move(uniform_buffer));
184  }
185  } else {
186  return std::nullopt;
187  }
188  if (auto storage_buffers_json =
189  ReflectResources(shader_resources.storage_buffers);
190  storage_buffers_json.has_value()) {
191  for (auto uniform_buffer : storage_buffers_json.value()) {
192  uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
193  buffers.emplace_back(std::move(uniform_buffer));
194  }
195  } else {
196  return std::nullopt;
197  }
198  }
199 
200  {
201  auto& uniforms = root["uniforms"] = nlohmann::json::array_t{};
202  if (auto uniforms_json =
203  ReflectResources(shader_resources.gl_plain_uniforms);
204  uniforms_json.has_value()) {
205  for (auto uniform : uniforms_json.value()) {
206  uniform["descriptor_type"] = "DescriptorType::kUniform";
207  uniforms.emplace_back(std::move(uniform));
208  }
209  } else {
210  return std::nullopt;
211  }
212  }
213 
214  {
215  auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
216  if (auto stage_inputs_json = ReflectResources(
217  shader_resources.stage_inputs,
218  /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
219  stage_inputs_json.has_value()) {
220  stage_inputs = std::move(stage_inputs_json.value());
221  } else {
222  return std::nullopt;
223  }
224  }
225 
226  {
227  auto combined_sampled_images =
228  ReflectResources(shader_resources.sampled_images);
229  auto images = ReflectResources(shader_resources.separate_images);
230  auto samplers = ReflectResources(shader_resources.separate_samplers);
231  if (!combined_sampled_images.has_value() || !images.has_value() ||
232  !samplers.has_value()) {
233  return std::nullopt;
234  }
235  auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
236  for (auto value : combined_sampled_images.value()) {
237  value["descriptor_type"] = "DescriptorType::kSampledImage";
238  sampled_images.emplace_back(std::move(value));
239  }
240  for (auto value : images.value()) {
241  value["descriptor_type"] = "DescriptorType::kImage";
242  sampled_images.emplace_back(std::move(value));
243  }
244  for (auto value : samplers.value()) {
245  value["descriptor_type"] = "DescriptorType::kSampledSampler";
246  sampled_images.emplace_back(std::move(value));
247  }
248  }
249 
250  if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
251  stage_outputs.has_value()) {
252  root["stage_outputs"] = std::move(stage_outputs.value());
253  } else {
254  return std::nullopt;
255  }
256 
257  {
258  auto& struct_definitions = root["struct_definitions"] =
259  nlohmann::json::array_t{};
260  if (entrypoints.front().execution_model ==
261  spv::ExecutionModel::ExecutionModelVertex &&
262  !shader_resources.stage_inputs.empty()) {
263  if (auto struc =
264  ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
265  struc.has_value()) {
266  struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
267  } else {
268  // If there are stage inputs, it is an error to not generate a per
269  // vertex data struct for a vertex like shader stage.
270  return std::nullopt;
271  }
272  }
273 
274  std::set<spirv_cross::ID> known_structs;
275  ir_->for_each_typed_id<spirv_cross::SPIRType>(
276  [&](uint32_t, const spirv_cross::SPIRType& type) {
277  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
278  return;
279  }
280  // Skip structs that do not have layout offset decorations.
281  // These structs are used internally within the shader and are not
282  // part of the shader's interface.
283  for (size_t i = 0; i < type.member_types.size(); i++) {
284  if (!compiler_->has_member_decoration(type.self, i,
285  spv::DecorationOffset)) {
286  return;
287  }
288  }
289  if (known_structs.find(type.self) != known_structs.end()) {
290  // Iterating over types this way leads to duplicates which may cause
291  // duplicate struct definitions.
292  return;
293  }
294  known_structs.insert(type.self);
295  if (auto struc = ReflectStructDefinition(type.self);
296  struc.has_value()) {
297  struct_definitions.emplace_back(
298  EmitStructDefinition(struc.value()));
299  }
300  });
301  }
302 
303  root["bind_prototypes"] =
304  EmitBindPrototypes(shader_resources, execution_model);
305 
306  return root;
307 }
308 
309 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
310  return InflateTemplate(kReflectionHeaderTemplate);
311 }
312 
313 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
314  return InflateTemplate(kReflectionCCTemplate);
315 }
316 
317 static std::optional<RuntimeStageBackend> GetRuntimeStageBackend(
318  TargetPlatform target_platform) {
319  switch (target_platform) {
326  return std::nullopt;
337  }
338  FML_UNREACHABLE();
339 }
340 
341 std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
342  const {
343  auto backend = GetRuntimeStageBackend(options_.target_platform);
344  if (!backend.has_value()) {
345  return nullptr;
346  }
347 
348  const auto& entrypoints = compiler_->get_entry_points_and_stages();
349  if (entrypoints.size() != 1u) {
350  VALIDATION_LOG << "Single entrypoint not found.";
351  return nullptr;
352  }
353  auto data = std::make_unique<RuntimeStageData::Shader>();
354  data->entrypoint = options_.entry_point_name;
355  data->stage = entrypoints.front().execution_model;
356  data->shader = shader_data_;
357  data->backend = backend.value();
358 
359  // Sort the IR so that the uniforms are in declaration order.
360  std::vector<spirv_cross::ID> uniforms =
361  SortUniforms(ir_.get(), compiler_.GetCompiler());
362  for (auto& sorted_id : uniforms) {
363  auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
364  const auto spir_type = compiler_->get_type(var.basetype);
365  UniformDescription uniform_description;
366  uniform_description.name = compiler_->get_name(var.self);
367  uniform_description.location = compiler_->get_decoration(
368  var.self, spv::Decoration::DecorationLocation);
369  uniform_description.binding =
370  compiler_->get_decoration(var.self, spv::Decoration::DecorationBinding);
371  uniform_description.type = spir_type.basetype;
372  uniform_description.rows = spir_type.vecsize;
373  uniform_description.columns = spir_type.columns;
374  uniform_description.bit_width = spir_type.width;
375  uniform_description.array_elements = GetArrayElements(spir_type);
376 
377  if (TargetPlatformIsMetal(options_.target_platform) &&
378  uniform_description.type == spirv_cross::SPIRType::BaseType::Float) {
379  // Metal aligns float3 to 16 bytes.
380  // Metal aligns float3x3 COLUMNS to 16 bytes.
381  // For float3: Size 12. Padding 4. Stride 16.
382  // For float3x3: Size 36. Padding 12 (4 per col). Stride 48.
383 
384  if (spir_type.vecsize == 3 &&
385  (spir_type.columns == 1 || spir_type.columns == 3)) {
386  for (size_t c = 0; c < spir_type.columns; c++) {
387  for (size_t v = 0; v < 3; v++) {
388  uniform_description.padding_layout.push_back(
390  }
391  uniform_description.padding_layout.push_back(
393  }
394  }
395  }
396 
397  FML_CHECK(data->backend != RuntimeStageBackend::kVulkan ||
398  spir_type.basetype ==
399  spirv_cross::SPIRType::BaseType::SampledImage)
400  << "Vulkan runtime effect had unexpected uniforms outside of the "
401  "uniform buffer object.";
402  data->uniforms.emplace_back(std::move(uniform_description));
403  }
404 
405  const auto ubos = compiler_->get_shader_resources().uniform_buffers;
406  if (data->backend == RuntimeStageBackend::kVulkan && !ubos.empty()) {
407  if (ubos.size() != 1 && ubos[0].name != RuntimeStage::kVulkanUBOName) {
408  VALIDATION_LOG << "Expected a single UBO resource named "
409  "'"
411  << "' "
412  "for Vulkan runtime stage backend.";
413  return nullptr;
414  }
415 
416  const auto& ubo = ubos[0];
417 
418  size_t binding =
419  compiler_->get_decoration(ubo.id, spv::Decoration::DecorationBinding);
420  auto members = ReadStructMembers(ubo.type_id);
421  std::vector<fb::PaddingType> padding_layout;
422  std::vector<StructField> struct_fields;
423  struct_fields.reserve(members.size());
424  size_t float_count = 0;
425 
426  for (size_t i = 0; i < members.size(); i += 1) {
427  const auto& member = members[i];
428  std::vector<int> bytes;
429  switch (member.underlying_type) {
431  size_t padding_count =
432  (member.size + sizeof(float) - 1) / sizeof(float);
433  while (padding_count > 0) {
434  padding_layout.push_back(fb::PaddingType::kPadding);
435  padding_count--;
436  }
437  break;
438  }
440  StructField field_desc;
441  field_desc.name = member.name;
442  field_desc.byte_size =
443  member.size * member.array_elements.value_or(1);
444  struct_fields.push_back(field_desc);
445  if (member.array_elements > 1) {
446  // For each array element member, insert 1 layout property per byte
447  // and 0 layout property per byte of padding
448  for (auto i = 0; i < member.array_elements; i++) {
449  for (auto j = 0u; j < member.size / sizeof(float); j++) {
450  padding_layout.push_back(fb::PaddingType::kFloat);
451  }
452  for (auto j = 0u; j < member.element_padding / sizeof(float);
453  j++) {
454  padding_layout.push_back(fb::PaddingType::kPadding);
455  }
456  }
457  } else {
458  size_t member_float_count = member.byte_length / sizeof(float);
459  float_count += member_float_count;
460  while (member_float_count > 0) {
461  padding_layout.push_back(fb::PaddingType::kFloat);
462  member_float_count--;
463  }
464  }
465  break;
466  }
468  VALIDATION_LOG << "Non-floating-type struct member " << member.name
469  << " is not supported.";
470  return nullptr;
471  }
472  }
473  data->uniforms.emplace_back(UniformDescription{
474  .name = ubo.name,
475  .location = binding,
476  .binding = binding,
477  .type = spirv_cross::SPIRType::Struct,
478  .padding_layout = std::move(padding_layout),
479  .struct_fields = std::move(struct_fields),
480  .struct_float_count = float_count,
481  });
482  }
483 
484  // We only need to worry about storing vertex attributes.
485  if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
486  const auto inputs = compiler_->get_shader_resources().stage_inputs;
487  auto input_offsets = ComputeOffsets(inputs);
488  for (const auto& input : inputs) {
489  std::optional<size_t> offset = GetOffset(input.id, input_offsets);
490 
491  const auto type = compiler_->get_type(input.type_id);
492 
493  InputDescription input_description;
494  input_description.name = input.name;
495  input_description.location = compiler_->get_decoration(
496  input.id, spv::Decoration::DecorationLocation);
497  input_description.set = compiler_->get_decoration(
498  input.id, spv::Decoration::DecorationDescriptorSet);
499  input_description.binding = compiler_->get_decoration(
500  input.id, spv::Decoration::DecorationBinding);
501  input_description.type = type.basetype;
502  input_description.bit_width = type.width;
503  input_description.vec_size = type.vecsize;
504  input_description.columns = type.columns;
505  input_description.offset = offset.value_or(0u);
506  data->inputs.emplace_back(std::move(input_description));
507  }
508  }
509 
510  return data;
511 }
512 
513 std::shared_ptr<ShaderBundleData> Reflector::GenerateShaderBundleData() const {
514  const auto& entrypoints = compiler_->get_entry_points_and_stages();
515  if (entrypoints.size() != 1u) {
516  VALIDATION_LOG << "Single entrypoint not found.";
517  return nullptr;
518  }
519  auto data = std::make_shared<ShaderBundleData>(
520  options_.entry_point_name, //
521  entrypoints.front().execution_model, //
522  options_.target_platform //
523  );
524  data->SetShaderData(shader_data_);
525 
526  const auto uniforms = compiler_->get_shader_resources().uniform_buffers;
527  for (const auto& uniform : uniforms) {
528  ShaderBundleData::ShaderUniformStruct uniform_struct;
529  uniform_struct.name = uniform.name;
530  uniform_struct.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
532  uniform_struct.set = compiler_->get_decoration(
533  uniform.id, spv::Decoration::DecorationDescriptorSet);
534  uniform_struct.binding = compiler_->get_decoration(
535  uniform.id, spv::Decoration::DecorationBinding);
536 
537  const auto type = compiler_->get_type(uniform.type_id);
538  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
539  std::cerr << "Error: Uniform \"" << uniform.name
540  << "\" is not a struct. All Flutter GPU shader uniforms must "
541  "be structs."
542  << std::endl;
543  return nullptr;
544  }
545 
546  size_t size_in_bytes = 0;
547  for (const auto& struct_member : ReadStructMembers(uniform.type_id)) {
548  size_in_bytes += struct_member.byte_length;
549  if (StringStartsWith(struct_member.name, "_PADDING_")) {
550  continue;
551  }
552  ShaderBundleData::ShaderUniformStructField uniform_struct_field;
553  uniform_struct_field.name = struct_member.name;
554  uniform_struct_field.type = struct_member.base_type;
555  uniform_struct_field.offset_in_bytes = struct_member.offset;
556  uniform_struct_field.element_size_in_bytes = struct_member.size;
557  uniform_struct_field.total_size_in_bytes = struct_member.byte_length;
558  uniform_struct_field.array_elements = struct_member.array_elements;
559  uniform_struct.fields.push_back(uniform_struct_field);
560  }
561  uniform_struct.size_in_bytes = size_in_bytes;
562 
563  data->AddUniformStruct(uniform_struct);
564  }
565 
566  const auto sampled_images = compiler_->get_shader_resources().sampled_images;
567  for (const auto& image : sampled_images) {
568  ShaderBundleData::ShaderUniformTexture uniform_texture;
569  uniform_texture.name = image.name;
570  uniform_texture.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
572  uniform_texture.set = compiler_->get_decoration(
573  image.id, spv::Decoration::DecorationDescriptorSet);
574  uniform_texture.binding =
575  compiler_->get_decoration(image.id, spv::Decoration::DecorationBinding);
576  data->AddUniformTexture(uniform_texture);
577  }
578 
579  // We only need to worry about storing vertex attributes.
580  if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
581  const auto inputs = compiler_->get_shader_resources().stage_inputs;
582  auto input_offsets = ComputeOffsets(inputs);
583  for (const auto& input : inputs) {
584  std::optional<size_t> offset = GetOffset(input.id, input_offsets);
585 
586  const auto type = compiler_->get_type(input.type_id);
587 
588  InputDescription input_description;
589  input_description.name = input.name;
590  input_description.location = compiler_->get_decoration(
591  input.id, spv::Decoration::DecorationLocation);
592  input_description.set = compiler_->get_decoration(
593  input.id, spv::Decoration::DecorationDescriptorSet);
594  input_description.binding = compiler_->get_decoration(
595  input.id, spv::Decoration::DecorationBinding);
596  input_description.type = type.basetype;
597  input_description.bit_width = type.width;
598  input_description.vec_size = type.vecsize;
599  input_description.columns = type.columns;
600  input_description.offset = offset.value_or(0u);
601  data->AddInputDescription(std::move(input_description));
602  }
603  }
604 
605  return data;
606 }
607 
608 std::optional<uint32_t> Reflector::GetArrayElements(
609  const spirv_cross::SPIRType& type) const {
610  if (type.array.empty()) {
611  return std::nullopt;
612  }
613  FML_CHECK(type.array.size() == 1)
614  << "Multi-dimensional arrays are not supported.";
615  FML_CHECK(type.array_size_literal.front())
616  << "Must use a literal for array sizes.";
617  return type.array.front();
618 }
619 
620 static std::string ToString(CompilerBackend::Type type) {
621  switch (type) {
623  return "Metal Shading Language";
625  return "OpenGL Shading Language";
627  return "OpenGL Shading Language (Relaxed Vulkan Semantics)";
629  return "SkSL Shading Language";
630  }
631  FML_UNREACHABLE();
632 }
633 
634 std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
635  std::string_view tmpl) const {
636  inja::Environment env;
637  env.set_trim_blocks(true);
638  env.set_lstrip_blocks(true);
639 
640  env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
641  return ToCamelCase(args.at(0u)->get<std::string>());
642  });
643 
644  env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
645  return StringToShaderStage(args.at(0u)->get<std::string>());
646  });
647 
648  env.add_callback("get_generator_name", 0u,
649  [type = compiler_.GetType()](inja::Arguments& args) {
650  return ToString(type);
651  });
652 
653  auto inflated_template =
654  std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
655 
656  return std::make_shared<fml::NonOwnedMapping>(
657  reinterpret_cast<const uint8_t*>(inflated_template->data()),
658  inflated_template->size(), [inflated_template](auto, auto) {});
659 }
660 
661 std::vector<size_t> Reflector::ComputeOffsets(
662  const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
663  std::vector<size_t> offsets(resources.size(), 0);
664  if (resources.size() == 0) {
665  return offsets;
666  }
667  for (const auto& resource : resources) {
668  const auto type = compiler_->get_type(resource.type_id);
669  auto location = compiler_->get_decoration(
670  resource.id, spv::Decoration::DecorationLocation);
671  // Malformed shader, will be caught later on.
672  if (location >= resources.size() || location < 0) {
673  location = 0;
674  }
675  offsets[location] = (type.width * type.vecsize) / 8;
676  }
677  for (size_t i = 1; i < resources.size(); i++) {
678  offsets[i] += offsets[i - 1];
679  }
680  for (size_t i = resources.size() - 1; i > 0; i--) {
681  offsets[i] = offsets[i - 1];
682  }
683  offsets[0] = 0;
684 
685  return offsets;
686 }
687 
688 std::optional<size_t> Reflector::GetOffset(
689  spirv_cross::ID id,
690  const std::vector<size_t>& offsets) const {
691  uint32_t location =
692  compiler_->get_decoration(id, spv::Decoration::DecorationLocation);
693  if (location >= offsets.size()) {
694  return std::nullopt;
695  }
696  return offsets[location];
697 }
698 
699 std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
700  const spirv_cross::Resource& resource,
701  std::optional<size_t> offset) const {
702  nlohmann::json::object_t result;
703 
704  result["name"] = resource.name;
705  result["descriptor_set"] = compiler_->get_decoration(
706  resource.id, spv::Decoration::DecorationDescriptorSet);
707  result["binding"] = compiler_->get_decoration(
708  resource.id, spv::Decoration::DecorationBinding);
709  result["set"] = compiler_->get_decoration(
710  resource.id, spv::Decoration::DecorationDescriptorSet);
711  result["location"] = compiler_->get_decoration(
712  resource.id, spv::Decoration::DecorationLocation);
713  result["index"] =
714  compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
715  result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
717  result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
719  result["relaxed_precision"] =
720  compiler_->get_decoration(
721  resource.id, spv::Decoration::DecorationRelaxedPrecision) == 1;
722  result["offset"] = offset.value_or(0u);
723  auto type = ReflectType(resource.type_id);
724  if (!type.has_value()) {
725  return std::nullopt;
726  }
727  result["type"] = std::move(type.value());
728  return result;
729 }
730 
731 std::optional<nlohmann::json::object_t> Reflector::ReflectType(
732  const spirv_cross::TypeID& type_id) const {
733  nlohmann::json::object_t result;
734 
735  const auto type = compiler_->get_type(type_id);
736 
737  result["type_name"] = StructMember::BaseTypeToString(type.basetype);
738  result["bit_width"] = type.width;
739  result["vec_size"] = type.vecsize;
740  result["columns"] = type.columns;
741  auto& members = result["members"] = nlohmann::json::array_t{};
742  if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
743  for (const auto& struct_member : ReadStructMembers(type_id)) {
744  auto member = nlohmann::json::object_t{};
745  member["name"] = struct_member.name;
746  member["type"] = struct_member.type;
747  member["base_type"] =
748  StructMember::BaseTypeToString(struct_member.base_type);
749  member["offset"] = struct_member.offset;
750  member["size"] = struct_member.size;
751  member["byte_length"] = struct_member.byte_length;
752  if (struct_member.array_elements.has_value()) {
753  member["array_elements"] = struct_member.array_elements.value();
754  } else {
755  member["array_elements"] = "std::nullopt";
756  }
757  members.emplace_back(std::move(member));
758  }
759  }
760 
761  return result;
762 }
763 
764 std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
765  const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
766  bool compute_offsets) const {
767  nlohmann::json::array_t result;
768  result.reserve(resources.size());
769  std::vector<size_t> offsets;
770  if (compute_offsets) {
771  offsets = ComputeOffsets(resources);
772  }
773  for (const auto& resource : resources) {
774  std::optional<size_t> maybe_offset = std::nullopt;
775  if (compute_offsets) {
776  maybe_offset = GetOffset(resource.id, offsets);
777  }
778  if (auto reflected = ReflectResource(resource, maybe_offset);
779  reflected.has_value()) {
780  result.emplace_back(std::move(reflected.value()));
781  } else {
782  return std::nullopt;
783  }
784  }
785  return result;
786 }
787 
788 static std::string TypeNameWithPaddingOfSize(size_t size) {
789  std::stringstream stream;
790  stream << "Padding<" << size << ">";
791  return stream.str();
792 }
793 
794 struct KnownType {
795  std::string name;
796  size_t byte_size = 0;
797 };
798 
799 static std::optional<KnownType> ReadKnownScalarType(
800  spirv_cross::SPIRType::BaseType type) {
801  switch (type) {
802  case spirv_cross::SPIRType::BaseType::Boolean:
803  return KnownType{
804  .name = "bool",
805  .byte_size = sizeof(bool),
806  };
807  case spirv_cross::SPIRType::BaseType::Float:
808  return KnownType{
809  .name = "Scalar",
810  .byte_size = sizeof(Scalar),
811  };
812  case spirv_cross::SPIRType::BaseType::Half:
813  return KnownType{
814  .name = "Half",
815  .byte_size = sizeof(Half),
816  };
817  case spirv_cross::SPIRType::BaseType::UInt:
818  return KnownType{
819  .name = "uint32_t",
820  .byte_size = sizeof(uint32_t),
821  };
822  case spirv_cross::SPIRType::BaseType::Int:
823  return KnownType{
824  .name = "int32_t",
825  .byte_size = sizeof(int32_t),
826  };
827  default:
828  break;
829  }
830  return std::nullopt;
831 }
832 
833 //------------------------------------------------------------------------------
834 /// @brief Get the reflected struct size. In the vast majority of the
835 /// cases, this is the same as the declared struct size as given by
836 /// the compiler. But, additional padding may need to be introduced
837 /// after the end of the struct to keep in line with the alignment
838 /// requirement of the individual struct members. This method
839 /// figures out the actual size of the reflected struct that can be
840 /// referenced in native code.
841 ///
842 /// @param[in] members The members
843 ///
844 /// @return The reflected structure size.
845 ///
846 static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
847  auto struct_size = 0u;
848  for (const auto& member : members) {
849  struct_size += member.byte_length;
850  }
851  return struct_size;
852 }
853 
854 std::vector<StructMember> Reflector::ReadStructMembers(
855  const spirv_cross::TypeID& type_id) const {
856  const auto& struct_type = compiler_->get_type(type_id);
857  FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
858 
859  std::vector<StructMember> result;
860 
861  size_t current_byte_offset = 0;
862  size_t max_member_alignment = 0;
863 
864  for (size_t i = 0; i < struct_type.member_types.size(); i++) {
865  const spirv_cross::SPIRType& member =
866  compiler_->get_type(struct_type.member_types[i]);
867  const uint32_t struct_member_offset =
868  compiler_->type_struct_member_offset(struct_type, i);
869  std::optional<uint32_t> array_elements = GetArrayElements(member);
870 
871  if (struct_member_offset > current_byte_offset) {
872  const size_t alignment_pad = struct_member_offset - current_byte_offset;
873  result.emplace_back(StructMember{
874  /*p_type=*/TypeNameWithPaddingOfSize(alignment_pad),
875  /*p_base_type=*/spirv_cross::SPIRType::BaseType::Void,
876  /*p_name=*/
877  std::format("_PADDING_{}_", GetMemberNameAtIndex(struct_type, i)),
878  /*p_offset=*/current_byte_offset,
879  /*p_size=*/alignment_pad,
880  /*p_byte_length=*/alignment_pad,
881  /*p_array_elements=*/std::nullopt,
882  /*p_element_padding=*/0,
883  });
884  current_byte_offset += alignment_pad;
885  }
886 
887  max_member_alignment =
888  std::max<size_t>(max_member_alignment,
889  (member.width / 8) * member.columns * member.vecsize);
890 
891  FML_CHECK(current_byte_offset == struct_member_offset);
892 
893  // A user defined struct.
894  if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
895  const size_t size =
896  GetReflectedStructSize(ReadStructMembers(member.self));
897  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
898  if (stride == 0) {
899  stride = size;
900  }
901  uint32_t element_padding = stride - size;
902  result.emplace_back(StructMember{
903  /*p_type=*/compiler_->get_name(member.self),
904  /*p_base_type=*/member.basetype,
905  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
906  /*p_offset=*/struct_member_offset,
907  /*p_size=*/size,
908  /*p_byte_length=*/stride * array_elements.value_or(1),
909  /*p_array_elements=*/array_elements,
910  /*p_element_padding=*/element_padding,
911  });
912  current_byte_offset += stride * array_elements.value_or(1);
913  continue;
914  }
915 
916  // Mat2
917  if (member.basetype == spirv_cross::SPIRType::BaseType::Float &&
918  member.width == 32 && member.columns == 2 && member.vecsize == 2) {
919  // Mat2's are packaged like 2 vec2's, ie
920  // {val, val, padding, padding, val, val, padding, padding}.
921  uint32_t count = array_elements.value_or(1) * 2;
922  uint32_t stride = 16;
923  uint32_t total_length = stride * count;
924 
925  result.emplace_back(StructMember{
926  /*p_type=*/"Mat2",
927  /*p_base_type=*/spirv_cross::SPIRType::BaseType::Float,
928  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
929  /*p_offset=*/struct_member_offset,
930  /*p_size=*/sizeof(Point),
931  /*p_byte_length=*/total_length,
932  /*p_array_elements=*/count,
933  /*p_element_padding=*/8,
934  });
935  current_byte_offset += total_length;
936  continue;
937  }
938 
939  // Tightly packed 4x4 Matrix is special cased as we know how to work with
940  // those.
941  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
942  member.width == sizeof(Scalar) * 8 && //
943  member.columns == 4 && //
944  member.vecsize == 4 //
945  ) {
946  uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
947  uint32_t element_padding = stride - sizeof(Matrix);
948  result.emplace_back(StructMember{
949  /*p_type=*/"Matrix",
950  /*p_base_type=*/member.basetype,
951  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
952  /*p_offset=*/struct_member_offset,
953  /*p_size=*/sizeof(Matrix),
954  /*p_byte_length=*/stride * array_elements.value_or(1),
955  /*p_array_elements=*/array_elements,
956  /*p_element_padding=*/element_padding,
957  });
958  current_byte_offset += stride * array_elements.value_or(1);
959  continue;
960  }
961 
962  // Tightly packed UintPoint32 (uvec2)
963  if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
964  member.width == sizeof(uint32_t) * 8 && //
965  member.columns == 1 && //
966  member.vecsize == 2 //
967  ) {
968  uint32_t stride =
969  GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
970  uint32_t element_padding = stride - sizeof(UintPoint32);
971  result.emplace_back(StructMember{
972  /*p_type=*/"UintPoint32",
973  /*p_base_type=*/member.basetype,
974  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
975  /*p_offset=*/struct_member_offset,
976  /*p_size=*/sizeof(UintPoint32),
977  /*p_byte_length=*/stride * array_elements.value_or(1),
978  /*p_array_elements=*/array_elements,
979  /*p_element_padding=*/element_padding,
980  });
981  current_byte_offset += stride * array_elements.value_or(1);
982  continue;
983  }
984 
985  // Tightly packed UintPoint32 (ivec2)
986  if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
987  member.width == sizeof(int32_t) * 8 && //
988  member.columns == 1 && //
989  member.vecsize == 2 //
990  ) {
991  uint32_t stride =
992  GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
993  uint32_t element_padding = stride - sizeof(IPoint32);
994  result.emplace_back(StructMember{
995  /*p_type=*/"IPoint32",
996  /*p_base_type=*/member.basetype,
997  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
998  /*p_offset=*/struct_member_offset,
999  /*p_size=*/sizeof(IPoint32),
1000  /*p_byte_length=*/stride * array_elements.value_or(1),
1001  /*p_array_elements=*/array_elements,
1002  /*p_element_padding=*/element_padding,
1003  });
1004  current_byte_offset += stride * array_elements.value_or(1);
1005  continue;
1006  }
1007 
1008  // Tightly packed Point (vec2).
1009  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
1010  member.width == sizeof(float) * 8 && //
1011  member.columns == 1 && //
1012  member.vecsize == 2 //
1013  ) {
1014  uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
1015  uint32_t element_padding = stride - sizeof(Point);
1016  result.emplace_back(StructMember{
1017  /*p_type=*/"Point",
1018  /*p_base_type=*/member.basetype,
1019  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1020  /*p_offset=*/struct_member_offset,
1021  /*p_size=*/sizeof(Point),
1022  /*p_byte_length=*/stride * array_elements.value_or(1),
1023  /*p_array_elements=*/array_elements,
1024  /*p_element_padding=*/element_padding,
1025  });
1026  current_byte_offset += stride * array_elements.value_or(1);
1027  continue;
1028  }
1029 
1030  // Tightly packed Vector3.
1031  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
1032  member.width == sizeof(float) * 8 && //
1033  member.columns == 1 && //
1034  member.vecsize == 3 //
1035  ) {
1036  uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
1037  uint32_t element_padding = stride - sizeof(Vector3);
1038  result.emplace_back(StructMember{
1039  /*p_type=*/"Vector3",
1040  /*p_base_type=*/member.basetype,
1041  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1042  /*p_offset=*/struct_member_offset,
1043  /*p_size=*/sizeof(Vector3),
1044  /*p_byte_length=*/stride * array_elements.value_or(1),
1045  /*p_array_elements=*/array_elements,
1046  /*p_element_padding=*/element_padding,
1047  });
1048  current_byte_offset += stride * array_elements.value_or(1);
1049  continue;
1050  }
1051 
1052  // Tightly packed Vector4.
1053  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
1054  member.width == sizeof(float) * 8 && //
1055  member.columns == 1 && //
1056  member.vecsize == 4 //
1057  ) {
1058  uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
1059  uint32_t element_padding = stride - sizeof(Vector4);
1060  result.emplace_back(StructMember{
1061  /*p_type=*/"Vector4",
1062  /*p_base_type=*/member.basetype,
1063  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1064  /*p_offset=*/struct_member_offset,
1065  /*p_size=*/sizeof(Vector4),
1066  /*p_byte_length=*/stride * array_elements.value_or(1),
1067  /*p_array_elements=*/array_elements,
1068  /*p_element_padding=*/element_padding,
1069  });
1070  current_byte_offset += stride * array_elements.value_or(1);
1071  continue;
1072  }
1073 
1074  // Tightly packed half Point (vec2).
1075  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1076  member.width == sizeof(Half) * 8 && //
1077  member.columns == 1 && //
1078  member.vecsize == 2 //
1079  ) {
1080  uint32_t stride =
1081  GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
1082  uint32_t element_padding = stride - sizeof(HalfVector2);
1083  result.emplace_back(StructMember{
1084  /*p_type=*/"HalfVector2",
1085  /*p_base_type=*/member.basetype,
1086  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1087  /*p_offset=*/struct_member_offset,
1088  /*p_size=*/sizeof(HalfVector2),
1089  /*p_byte_length=*/stride * array_elements.value_or(1),
1090  /*p_array_elements=*/array_elements,
1091  /*p_element_padding=*/element_padding,
1092  });
1093  current_byte_offset += stride * array_elements.value_or(1);
1094  continue;
1095  }
1096 
1097  // Tightly packed Half Float Vector3.
1098  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1099  member.width == sizeof(Half) * 8 && //
1100  member.columns == 1 && //
1101  member.vecsize == 3 //
1102  ) {
1103  uint32_t stride =
1104  GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
1105  uint32_t element_padding = stride - sizeof(HalfVector3);
1106  result.emplace_back(StructMember{
1107  /*p_type=*/"HalfVector3",
1108  /*p_base_type=*/member.basetype,
1109  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1110  /*p_offset=*/struct_member_offset,
1111  /*p_size=*/sizeof(HalfVector3),
1112  /*p_byte_length=*/stride * array_elements.value_or(1),
1113  /*p_array_elements=*/array_elements,
1114  /*p_element_padding=*/element_padding,
1115  });
1116  current_byte_offset += stride * array_elements.value_or(1);
1117  continue;
1118  }
1119 
1120  // Tightly packed Half Float Vector4.
1121  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1122  member.width == sizeof(Half) * 8 && //
1123  member.columns == 1 && //
1124  member.vecsize == 4 //
1125  ) {
1126  uint32_t stride =
1127  GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
1128  uint32_t element_padding = stride - sizeof(HalfVector4);
1129  result.emplace_back(StructMember{
1130  /*p_type=*/"HalfVector4",
1131  /*p_base_type=*/member.basetype,
1132  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1133  /*p_offset=*/struct_member_offset,
1134  /*p_size=*/sizeof(HalfVector4),
1135  /*p_byte_length=*/stride * array_elements.value_or(1),
1136  /*p_array_elements=*/array_elements,
1137  /*p_element_padding=*/element_padding,
1138  });
1139  current_byte_offset += stride * array_elements.value_or(1);
1140  continue;
1141  }
1142 
1143  // Other isolated scalars (like bool, int, float/Scalar, etc..).
1144  {
1145  auto maybe_known_type = ReadKnownScalarType(member.basetype);
1146  if (maybe_known_type.has_value() && //
1147  member.columns == 1 && //
1148  member.vecsize == 1 //
1149  ) {
1150  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1151  if (stride == 0) {
1152  stride = maybe_known_type.value().byte_size;
1153  }
1154  uint32_t element_padding = stride - maybe_known_type.value().byte_size;
1155  // Add the type directly.
1156  result.emplace_back(StructMember{
1157  /*p_type=*/maybe_known_type.value().name,
1158  /*p_base_type=*/member.basetype,
1159  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1160  /*p_offset=*/struct_member_offset,
1161  /*p_size=*/maybe_known_type.value().byte_size,
1162  /*p_byte_length=*/stride * array_elements.value_or(1),
1163  /*p_array_elements=*/array_elements,
1164  /*p_element_padding=*/element_padding,
1165  });
1166  current_byte_offset += stride * array_elements.value_or(1);
1167  continue;
1168  }
1169  }
1170 
1171  // Catch all for unknown types. Just add the necessary padding to the struct
1172  // and move on.
1173  {
1174  const size_t size = (member.width * member.columns * member.vecsize) / 8u;
1175  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1176  if (stride == 0) {
1177  stride = size;
1178  }
1179  size_t element_padding = stride - size;
1180  result.emplace_back(StructMember{
1181  /*p_type=*/TypeNameWithPaddingOfSize(size),
1182  /*p_base_type=*/member.basetype,
1183  /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1184  /*p_offset=*/struct_member_offset,
1185  /*p_size=*/size,
1186  /*p_byte_length=*/stride * array_elements.value_or(1),
1187  /*p_array_elements=*/array_elements,
1188  /*p_element_padding=*/element_padding,
1189  });
1190  current_byte_offset += stride * array_elements.value_or(1);
1191  continue;
1192  }
1193  }
1194 
1195  if (max_member_alignment > 0u) {
1196  const size_t struct_length = current_byte_offset;
1197  {
1198  const size_t excess = struct_length % max_member_alignment;
1199  if (excess != 0) {
1200  const auto padding = max_member_alignment - excess;
1201  result.emplace_back(StructMember{
1202  /*p_type=*/TypeNameWithPaddingOfSize(padding),
1203  /*p_base_type=*/spirv_cross::SPIRType::BaseType::Void,
1204  /*p_name=*/"_PADDING_",
1205  /*p_offset=*/current_byte_offset,
1206  /*p_size=*/padding,
1207  /*p_byte_length=*/padding,
1208  /*p_array_elements=*/std::nullopt,
1209  /*p_element_padding=*/0,
1210  });
1211  }
1212  }
1213  }
1214 
1215  return result;
1216 }
1217 
1218 std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
1219  const spirv_cross::TypeID& type_id) const {
1220  const auto& type = compiler_->get_type(type_id);
1221  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
1222  return std::nullopt;
1223  }
1224 
1225  const auto struct_name = compiler_->get_name(type_id);
1226  if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
1227  return std::nullopt;
1228  }
1229 
1230  auto struct_members = ReadStructMembers(type_id);
1231  auto reflected_struct_size = GetReflectedStructSize(struct_members);
1232 
1233  StructDefinition struc;
1234  struc.name = struct_name;
1235  struc.byte_length = reflected_struct_size;
1236  struc.members = std::move(struct_members);
1237  return struc;
1238 }
1239 
1240 nlohmann::json::object_t Reflector::EmitStructDefinition(
1241  std::optional<Reflector::StructDefinition> struc) const {
1242  nlohmann::json::object_t result;
1243  result["name"] = struc->name;
1244  result["byte_length"] = struc->byte_length;
1245  auto& members = result["members"] = nlohmann::json::array_t{};
1246  for (const auto& struct_member : struc->members) {
1247  auto& member = members.emplace_back(nlohmann::json::object_t{});
1248  member["name"] = struct_member.name;
1249  member["type"] = struct_member.type;
1250  member["base_type"] =
1251  StructMember::BaseTypeToString(struct_member.base_type);
1252  member["offset"] = struct_member.offset;
1253  member["byte_length"] = struct_member.byte_length;
1254  if (struct_member.array_elements.has_value()) {
1255  member["array_elements"] = struct_member.array_elements.value();
1256  } else {
1257  member["array_elements"] = "std::nullopt";
1258  }
1259  member["element_padding"] = struct_member.element_padding;
1260  }
1261  return result;
1262 }
1263 
1264 struct VertexType {
1265  std::string type_name;
1266  spirv_cross::SPIRType::BaseType base_type;
1267  std::string variable_name;
1268  size_t byte_length = 0u;
1269 };
1270 
1272  const spirv_cross::Compiler& compiler,
1273  const spirv_cross::Resource* resource) {
1274  VertexType result;
1275  result.variable_name = resource->name;
1276  const auto& type = compiler.get_type(resource->type_id);
1277  result.base_type = type.basetype;
1278  const auto total_size = type.columns * type.vecsize * type.width / 8u;
1279  result.byte_length = total_size;
1280 
1281  if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1282  type.columns == 1u && type.vecsize == 2u &&
1283  type.width == sizeof(float) * 8u) {
1284  result.type_name = "Point";
1285  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1286  type.columns == 1u && type.vecsize == 4u &&
1287  type.width == sizeof(float) * 8u) {
1288  result.type_name = "Vector4";
1289  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1290  type.columns == 1u && type.vecsize == 3u &&
1291  type.width == sizeof(float) * 8u) {
1292  result.type_name = "Vector3";
1293  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1294  type.columns == 1u && type.vecsize == 1u &&
1295  type.width == sizeof(float) * 8u) {
1296  result.type_name = "Scalar";
1297  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1298  type.columns == 1u && type.vecsize == 1u &&
1299  type.width == sizeof(int32_t) * 8u) {
1300  result.type_name = "int32_t";
1301  } else {
1302  // Catch all unknown padding.
1303  result.type_name = TypeNameWithPaddingOfSize(total_size);
1304  }
1305 
1306  return result;
1307 }
1308 
1309 std::optional<Reflector::StructDefinition>
1310 Reflector::ReflectPerVertexStructDefinition(
1311  const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1312  // Avoid emitting a zero sized structure. The code gen templates assume a
1313  // non-zero size.
1314  if (stage_inputs.empty()) {
1315  return std::nullopt;
1316  }
1317 
1318  // Validate locations are contiguous and there are no duplicates.
1319  std::set<uint32_t> locations;
1320  for (const auto& input : stage_inputs) {
1321  auto location = compiler_->get_decoration(
1322  input.id, spv::Decoration::DecorationLocation);
1323  if (locations.count(location) != 0) {
1324  // Duplicate location. Bail.
1325  return std::nullopt;
1326  }
1327  locations.insert(location);
1328  }
1329 
1330  for (size_t i = 0; i < locations.size(); i++) {
1331  if (locations.count(i) != 1) {
1332  // Locations are not contiguous. This usually happens when a single stage
1333  // input takes multiple input slots. No reflection information can be
1334  // generated for such cases anyway. So bail! It is up to the shader author
1335  // to make sure one stage input maps to a single input slot.
1336  return std::nullopt;
1337  }
1338  }
1339 
1340  auto input_for_location =
1341  [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1342  for (const auto& input : stage_inputs) {
1343  auto location = compiler_->get_decoration(
1344  input.id, spv::Decoration::DecorationLocation);
1345  if (location == queried_location) {
1346  return &input;
1347  }
1348  }
1349  // This really cannot happen with all the validation above.
1350  FML_UNREACHABLE();
1351  return nullptr;
1352  };
1353 
1354  StructDefinition struc;
1355  struc.name = "PerVertexData";
1356  struc.byte_length = 0u;
1357  for (size_t i = 0; i < locations.size(); i++) {
1358  auto resource = input_for_location(i);
1359  if (resource == nullptr) {
1360  return std::nullopt;
1361  }
1362  const auto vertex_type =
1363  VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1364 
1365  auto member = StructMember{
1366  /*p_type=*/vertex_type.type_name,
1367  /*p_base_type=*/vertex_type.base_type,
1368  /*p_name=*/vertex_type.variable_name,
1369  /*p_offset=*/struc.byte_length,
1370  /*p_size=*/vertex_type.byte_length,
1371  /*p_byte_length=*/vertex_type.byte_length,
1372  /*p_array_elements=*/std::nullopt,
1373  /*p_element_padding=*/0,
1374  };
1375  struc.byte_length += vertex_type.byte_length;
1376  struc.members.emplace_back(std::move(member));
1377  }
1378  return struc;
1379 }
1380 
1381 std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1382  const spirv_cross::SPIRType& parent_type,
1383  size_t index) const {
1384  if (parent_type.type_alias != 0) {
1385  return GetMemberNameAtIndexIfExists(
1386  compiler_->get_type(parent_type.type_alias), index);
1387  }
1388 
1389  if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1390  const auto& members = found->second.members;
1391  if (index < members.size() && !members[index].alias.empty()) {
1392  return members[index].alias;
1393  }
1394  }
1395  return std::nullopt;
1396 }
1397 
1398 std::string Reflector::GetMemberNameAtIndex(
1399  const spirv_cross::SPIRType& parent_type,
1400  size_t index,
1401  std::string suffix) const {
1402  if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1403  name.has_value()) {
1404  return name.value();
1405  }
1406  static std::atomic_size_t sUnnamedMembersID;
1407  std::stringstream stream;
1408  stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1409  return stream.str();
1410 }
1411 
1412 std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1413  const spirv_cross::ShaderResources& resources,
1414  spv::ExecutionModel execution_model) const {
1415  std::vector<BindPrototype> prototypes;
1416  for (const auto& uniform_buffer : resources.uniform_buffers) {
1417  auto& proto = prototypes.emplace_back(BindPrototype{});
1418  proto.return_type = "bool";
1419  proto.name = ToCamelCase(uniform_buffer.name);
1420  proto.descriptor_type = "DescriptorType::kUniformBuffer";
1421  {
1422  std::stringstream stream;
1423  stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1424  << ".";
1425  proto.docstring = stream.str();
1426  }
1427  proto.args.push_back(BindPrototypeArgument{
1428  .type_name = "ResourceBinder&",
1429  .argument_name = "command",
1430  });
1431  proto.args.push_back(BindPrototypeArgument{
1432  .type_name = "BufferView",
1433  .argument_name = "view",
1434  });
1435  }
1436  for (const auto& storage_buffer : resources.storage_buffers) {
1437  auto& proto = prototypes.emplace_back(BindPrototype{});
1438  proto.return_type = "bool";
1439  proto.name = ToCamelCase(storage_buffer.name);
1440  proto.descriptor_type = "DescriptorType::kStorageBuffer";
1441  {
1442  std::stringstream stream;
1443  stream << "Bind storage buffer for resource named " << storage_buffer.name
1444  << ".";
1445  proto.docstring = stream.str();
1446  }
1447  proto.args.push_back(BindPrototypeArgument{
1448  .type_name = "ResourceBinder&",
1449  .argument_name = "command",
1450  });
1451  proto.args.push_back(BindPrototypeArgument{
1452  .type_name = "BufferView",
1453  .argument_name = "view",
1454  });
1455  }
1456  for (const auto& sampled_image : resources.sampled_images) {
1457  auto& proto = prototypes.emplace_back(BindPrototype{});
1458  proto.return_type = "bool";
1459  proto.name = ToCamelCase(sampled_image.name);
1460  proto.descriptor_type = "DescriptorType::kSampledImage";
1461  {
1462  std::stringstream stream;
1463  stream << "Bind combined image sampler for resource named "
1464  << sampled_image.name << ".";
1465  proto.docstring = stream.str();
1466  }
1467  proto.args.push_back(BindPrototypeArgument{
1468  .type_name = "ResourceBinder&",
1469  .argument_name = "command",
1470  });
1471  proto.args.push_back(BindPrototypeArgument{
1472  .type_name = "std::shared_ptr<const Texture>",
1473  .argument_name = "texture",
1474  });
1475  proto.args.push_back(BindPrototypeArgument{
1476  .type_name = "raw_ptr<const Sampler>",
1477  .argument_name = "sampler",
1478  });
1479  }
1480  for (const auto& separate_image : resources.separate_images) {
1481  auto& proto = prototypes.emplace_back(BindPrototype{});
1482  proto.return_type = "bool";
1483  proto.name = ToCamelCase(separate_image.name);
1484  proto.descriptor_type = "DescriptorType::kImage";
1485  {
1486  std::stringstream stream;
1487  stream << "Bind separate image for resource named " << separate_image.name
1488  << ".";
1489  proto.docstring = stream.str();
1490  }
1491  proto.args.push_back(BindPrototypeArgument{
1492  .type_name = "Command&",
1493  .argument_name = "command",
1494  });
1495  proto.args.push_back(BindPrototypeArgument{
1496  .type_name = "std::shared_ptr<const Texture>",
1497  .argument_name = "texture",
1498  });
1499  }
1500  for (const auto& separate_sampler : resources.separate_samplers) {
1501  auto& proto = prototypes.emplace_back(BindPrototype{});
1502  proto.return_type = "bool";
1503  proto.name = ToCamelCase(separate_sampler.name);
1504  proto.descriptor_type = "DescriptorType::kSampler";
1505  {
1506  std::stringstream stream;
1507  stream << "Bind separate sampler for resource named "
1508  << separate_sampler.name << ".";
1509  proto.docstring = stream.str();
1510  }
1511  proto.args.push_back(BindPrototypeArgument{
1512  .type_name = "Command&",
1513  .argument_name = "command",
1514  });
1515  proto.args.push_back(BindPrototypeArgument{
1516  .type_name = "std::shared_ptr<const Sampler>",
1517  .argument_name = "sampler",
1518  });
1519  }
1520  return prototypes;
1521 }
1522 
1523 nlohmann::json::array_t Reflector::EmitBindPrototypes(
1524  const spirv_cross::ShaderResources& resources,
1525  spv::ExecutionModel execution_model) const {
1526  const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1527  nlohmann::json::array_t result;
1528  for (const auto& res : prototypes) {
1529  auto& item = result.emplace_back(nlohmann::json::object_t{});
1530  item["return_type"] = res.return_type;
1531  item["name"] = res.name;
1532  item["docstring"] = res.docstring;
1533  item["descriptor_type"] = res.descriptor_type;
1534  auto& args = item["args"] = nlohmann::json::array_t{};
1535  for (const auto& arg : res.args) {
1536  auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1537  json_arg["type_name"] = arg.type_name;
1538  json_arg["argument_name"] = arg.argument_name;
1539  }
1540  }
1541  return result;
1542 }
1543 
1544 } // namespace compiler
1545 } // namespace impeller
GLenum type
static const char * kVulkanUBOName
Definition: runtime_stage.h:23
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition: reflector.cc:64
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition: reflector.cc:110
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition: reflector.cc:127
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition: reflector.cc:131
std::shared_ptr< ShaderBundleData > GetShaderBundleData() const
Definition: reflector.cc:136
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition: reflector.cc:123
uint32_t location
int32_t value
Vector2 padding
The halo padding in source space.
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition: reflector.cc:799
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition: reflector.cc:788
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
Definition: reflector.cc:1271
static std::string ToString(CompilerBackend::Type type)
Definition: reflector.cc:620
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition: reflector.cc:846
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition: reflector.cc:317
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition: reflector.cc:35
static std::string StringToShaderStage(const std::string &str)
Definition: reflector.cc:48
bool TargetPlatformIsMetal(TargetPlatform platform)
Definition: types.cc:277
constexpr std::string_view kReflectionHeaderTemplate
std::string ToCamelCase(std::string_view string)
Definition: utilities.cc:38
constexpr std::string_view kReflectionCCTemplate
bool StringStartsWith(const std::string &target, const std::string &prefix)
Definition: utilities.cc:86
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
float Scalar
Definition: scalar.h:19
TPoint< Scalar > Point
Definition: point.h:425
TPoint< int32_t > IPoint32
Definition: point.h:427
TPoint< uint32_t > UintPoint32
Definition: point.h:428
constexpr auto kPadding
Definition: comparable.h:95
A storage only class for half precision floating point.
Definition: half.h:41
spirv_cross::Compiler * GetCompiler()
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition: reflector.h:44
spirv_cross::SPIRType::BaseType base_type
Definition: reflector.cc:1266
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:69
#define VALIDATION_LOG
Definition: validation.h:91