Flutter Impeller
blit_pass_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
10 #include "vulkan/vulkan_core.h"
11 #include "vulkan/vulkan_enums.hpp"
12 #include "vulkan/vulkan_structs.hpp"
13 
14 namespace impeller {
15 
16 static void InsertImageMemoryBarrier(const vk::CommandBuffer& cmd,
17  const vk::Image& image,
18  vk::AccessFlags src_access_mask,
19  vk::AccessFlags dst_access_mask,
20  vk::ImageLayout old_layout,
21  vk::ImageLayout new_layout,
22  vk::PipelineStageFlags src_stage,
23  vk::PipelineStageFlags dst_stage,
24  uint32_t base_mip_level,
25  uint32_t mip_level_count = 1u) {
26  if (old_layout == new_layout) {
27  return;
28  }
29 
30  vk::ImageMemoryBarrier barrier;
31  barrier.srcAccessMask = src_access_mask;
32  barrier.dstAccessMask = dst_access_mask;
33  barrier.oldLayout = old_layout;
34  barrier.newLayout = new_layout;
35  barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
36  barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
37  barrier.image = image;
38  barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
39  barrier.subresourceRange.baseMipLevel = base_mip_level;
40  barrier.subresourceRange.levelCount = mip_level_count;
41  barrier.subresourceRange.baseArrayLayer = 0u;
42  barrier.subresourceRange.layerCount = 1u;
43 
44  cmd.pipelineBarrier(src_stage, dst_stage, {}, nullptr, nullptr, barrier);
45 }
46 
47 BlitPassVK::BlitPassVK(std::shared_ptr<CommandBufferVK> command_buffer)
48  : command_buffer_(std::move(command_buffer)) {}
49 
50 BlitPassVK::~BlitPassVK() = default;
51 
52 void BlitPassVK::OnSetLabel(std::string label) {
53  if (label.empty()) {
54  return;
55  }
56  label_ = std::move(label);
57 }
58 
59 // |BlitPass|
60 bool BlitPassVK::IsValid() const {
61  return true;
62 }
63 
64 // |BlitPass|
65 bool BlitPassVK::EncodeCommands(
66  const std::shared_ptr<Allocator>& transients_allocator) const {
67  return true;
68 }
69 
70 // |BlitPass|
71 bool BlitPassVK::OnCopyTextureToTextureCommand(
72  std::shared_ptr<Texture> source,
73  std::shared_ptr<Texture> destination,
74  IRect source_region,
75  IPoint destination_origin,
76  std::string label) {
77  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
78 
79  const auto& src = TextureVK::Cast(*source);
80  const auto& dst = TextureVK::Cast(*destination);
81 
82  if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
83  return false;
84  }
85 
86  BarrierVK src_barrier;
87  src_barrier.cmd_buffer = cmd_buffer;
88  src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
89  src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
90  vk::AccessFlagBits::eShaderWrite |
91  vk::AccessFlagBits::eColorAttachmentWrite;
92  src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
93  vk::PipelineStageFlagBits::eFragmentShader |
94  vk::PipelineStageFlagBits::eColorAttachmentOutput;
95  src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
96  src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
97 
98  BarrierVK dst_barrier;
99  dst_barrier.cmd_buffer = cmd_buffer;
100  dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
101  dst_barrier.src_access = {};
102  dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
103  dst_barrier.dst_access =
104  vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
105  dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
106  vk::PipelineStageFlagBits::eTransfer;
107 
108  if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
109  VALIDATION_LOG << "Could not complete layout transitions.";
110  return false;
111  }
112 
113  vk::ImageCopy image_copy;
114 
115  image_copy.setSrcSubresource(
116  vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
117  image_copy.setDstSubresource(
118  vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
119 
120  image_copy.srcOffset =
121  vk::Offset3D(source_region.GetX(), source_region.GetY(), 0);
122  image_copy.dstOffset =
123  vk::Offset3D(destination_origin.x, destination_origin.y, 0);
124  image_copy.extent =
125  vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1);
126 
127  // Issue the copy command now that the images are already in the right
128  // layouts.
129  cmd_buffer.copyImage(src.GetImage(), //
130  src_barrier.new_layout, //
131  dst.GetImage(), //
132  dst_barrier.new_layout, //
133  image_copy //
134  );
135 
136  // If this is an onscreen texture, do not transition the layout
137  // back to shader read.
138  if (dst.IsSwapchainImage()) {
139  return true;
140  }
141 
142  BarrierVK barrier;
143  barrier.cmd_buffer = cmd_buffer;
144  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
145  barrier.src_access = {};
146  barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
147  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
148  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
149 
150  return dst.SetLayout(barrier);
151 }
152 
153 // |BlitPass|
154 bool BlitPassVK::OnCopyTextureToBufferCommand(
155  std::shared_ptr<Texture> source,
156  std::shared_ptr<DeviceBuffer> destination,
157  IRect source_region,
158  size_t destination_offset,
159  std::string label) {
160  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
161 
162  // cast source and destination to TextureVK
163  const auto& src = TextureVK::Cast(*source);
164 
165  if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
166  return false;
167  }
168 
169  BarrierVK barrier;
170  barrier.cmd_buffer = cmd_buffer;
171  barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
172  barrier.src_access = vk::AccessFlagBits::eShaderWrite |
173  vk::AccessFlagBits::eTransferWrite |
174  vk::AccessFlagBits::eColorAttachmentWrite;
175  barrier.src_stage = vk::PipelineStageFlagBits::eFragmentShader |
176  vk::PipelineStageFlagBits::eTransfer |
177  vk::PipelineStageFlagBits::eColorAttachmentOutput;
178  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
179  barrier.dst_stage = vk::PipelineStageFlagBits::eVertexShader |
180  vk::PipelineStageFlagBits::eFragmentShader;
181 
182  const auto& dst = DeviceBufferVK::Cast(*destination);
183 
184  vk::BufferImageCopy image_copy;
185  image_copy.setBufferOffset(destination_offset);
186  image_copy.setBufferRowLength(0);
187  image_copy.setBufferImageHeight(0);
188  image_copy.setImageSubresource(
189  vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
190  image_copy.setImageOffset(
191  vk::Offset3D(source_region.GetX(), source_region.GetY(), 0));
192  image_copy.setImageExtent(
193  vk::Extent3D(source_region.GetWidth(), source_region.GetHeight(), 1));
194 
195  if (!src.SetLayout(barrier)) {
196  VALIDATION_LOG << "Could not encode layout transition.";
197  return false;
198  }
199 
200  cmd_buffer.copyImageToBuffer(src.GetImage(), //
201  barrier.new_layout, //
202  dst.GetBuffer(), //
203  image_copy //
204  );
205 
206  // If the buffer is used for readback, then apply a transfer -> host memory
207  // barrier.
208  if (destination->GetDeviceBufferDescriptor().readback) {
209  vk::MemoryBarrier barrier;
210  barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
211  barrier.dstAccessMask = vk::AccessFlagBits::eHostRead;
212 
213  cmd_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
214  vk::PipelineStageFlagBits::eHost, {}, 1,
215  &barrier, 0, {}, 0, {});
216  }
217 
218  return true;
219 }
220 
221 bool BlitPassVK::ConvertTextureToShaderRead(
222  const std::shared_ptr<Texture>& texture) {
223  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
224 
225  BarrierVK barrier;
226  barrier.cmd_buffer = cmd_buffer;
227  barrier.src_access = vk::AccessFlagBits::eTransferWrite;
228  barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
229  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
230  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
231 
232  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
233 
234  const auto& texture_vk = TextureVK::Cast(*texture);
235 
236  if (!command_buffer_->Track(texture)) {
237  return false;
238  }
239 
240  return texture_vk.SetLayout(barrier);
241 }
242 
243 // |BlitPass|
244 bool BlitPassVK::OnCopyBufferToTextureCommand(
245  BufferView source,
246  std::shared_ptr<Texture> destination,
247  IRect destination_region,
248  std::string label,
249  uint32_t slice,
250  bool convert_to_read) {
251  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
252 
253  // cast destination to TextureVK
254  const auto& dst = TextureVK::Cast(*destination);
255  const auto& src = DeviceBufferVK::Cast(*source.buffer);
256 
257  if (!command_buffer_->Track(source.buffer) ||
258  !command_buffer_->Track(destination)) {
259  return false;
260  }
261 
262  BarrierVK dst_barrier;
263  dst_barrier.cmd_buffer = cmd_buffer;
264  dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
265  dst_barrier.src_access = {};
266  dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
267  dst_barrier.dst_access =
268  vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
269  dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
270  vk::PipelineStageFlagBits::eTransfer;
271 
272  vk::BufferImageCopy image_copy;
273  image_copy.setBufferOffset(source.range.offset);
274  image_copy.setBufferRowLength(0);
275  image_copy.setBufferImageHeight(0);
276  image_copy.setImageSubresource(
277  vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1));
278  image_copy.imageOffset.x = destination_region.GetX();
279  image_copy.imageOffset.y = destination_region.GetY();
280  image_copy.imageOffset.z = 0u;
281  image_copy.imageExtent.width = destination_region.GetWidth();
282  image_copy.imageExtent.height = destination_region.GetHeight();
283  image_copy.imageExtent.depth = 1u;
284 
285  // Note: this barrier should do nothing if we're already in the transfer dst
286  // optimal state. This is important for performance of repeated blit pass
287  // encoding.
288  if (!dst.SetLayout(dst_barrier)) {
289  VALIDATION_LOG << "Could not encode layout transition.";
290  return false;
291  }
292 
293  cmd_buffer.copyBufferToImage(src.GetBuffer(), //
294  dst.GetImage(), //
295  dst_barrier.new_layout, //
296  image_copy //
297  );
298 
299  // Transition to shader-read.
300  if (convert_to_read) {
301  BarrierVK barrier;
302  barrier.cmd_buffer = cmd_buffer;
303  barrier.src_access = vk::AccessFlagBits::eTransferWrite;
304  barrier.src_stage = vk::PipelineStageFlagBits::eTransfer;
305  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
306  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
307 
308  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
309 
310  if (!dst.SetLayout(barrier)) {
311  return false;
312  }
313  }
314 
315  return true;
316 }
317 
318 // |BlitPass|
319 bool BlitPassVK::ResizeTexture(const std::shared_ptr<Texture>& source,
320  const std::shared_ptr<Texture>& destination) {
321  const auto& cmd_buffer = command_buffer_->GetCommandBuffer();
322 
323  const auto& src = TextureVK::Cast(*source);
324  const auto& dst = TextureVK::Cast(*destination);
325 
326  if (!command_buffer_->Track(source) || !command_buffer_->Track(destination)) {
327  return false;
328  }
329 
330  BarrierVK src_barrier;
331  src_barrier.cmd_buffer = cmd_buffer;
332  src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal;
333  src_barrier.src_access = vk::AccessFlagBits::eTransferWrite |
334  vk::AccessFlagBits::eShaderWrite |
335  vk::AccessFlagBits::eColorAttachmentWrite;
336  src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer |
337  vk::PipelineStageFlagBits::eFragmentShader |
338  vk::PipelineStageFlagBits::eColorAttachmentOutput;
339  src_barrier.dst_access = vk::AccessFlagBits::eTransferRead;
340  src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer;
341 
342  BarrierVK dst_barrier;
343  dst_barrier.cmd_buffer = cmd_buffer;
344  dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal;
345  dst_barrier.src_access = {};
346  dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
347  dst_barrier.dst_access =
348  vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite;
349  dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader |
350  vk::PipelineStageFlagBits::eTransfer;
351 
352  if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) {
353  VALIDATION_LOG << "Could not complete layout transitions.";
354  return false;
355  }
356 
357  vk::ImageBlit blit;
358  blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
359  blit.srcSubresource.baseArrayLayer = 0u;
360  blit.srcSubresource.layerCount = 1u;
361  blit.srcSubresource.mipLevel = 0;
362 
363  blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
364  blit.dstSubresource.baseArrayLayer = 0u;
365  blit.dstSubresource.layerCount = 1u;
366  blit.dstSubresource.mipLevel = 0;
367 
368  // offsets[0] is origin.
369  blit.srcOffsets[1].x = std::max<int32_t>(source->GetSize().width, 1u);
370  blit.srcOffsets[1].y = std::max<int32_t>(source->GetSize().height, 1u);
371  blit.srcOffsets[1].z = 1u;
372 
373  // offsets[0] is origin.
374  blit.dstOffsets[1].x = std::max<int32_t>(destination->GetSize().width, 1u);
375  blit.dstOffsets[1].y = std::max<int32_t>(destination->GetSize().height, 1u);
376  blit.dstOffsets[1].z = 1u;
377 
378  cmd_buffer.blitImage(src.GetImage(), //
379  src_barrier.new_layout, //
380  dst.GetImage(), //
381  dst_barrier.new_layout, //
382  1, //
383  &blit, //
384  vk::Filter::eLinear
385 
386  );
387 
388  // Convert back to shader read
389 
390  BarrierVK barrier;
391  barrier.cmd_buffer = cmd_buffer;
392  barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
393  barrier.src_access = {};
394  barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe;
395  barrier.dst_access = vk::AccessFlagBits::eShaderRead;
396  barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader;
397 
398  return dst.SetLayout(barrier);
399 }
400 
401 // |BlitPass|
402 bool BlitPassVK::OnGenerateMipmapCommand(std::shared_ptr<Texture> texture,
403  std::string label) {
404  auto& src = TextureVK::Cast(*texture);
405 
406  const auto size = src.GetTextureDescriptor().size;
407  uint32_t mip_count = src.GetTextureDescriptor().mip_count;
408 
409  if (mip_count < 2u) {
410  return true;
411  }
412 
413  const auto& image = src.GetImage();
414  const auto& cmd = command_buffer_->GetCommandBuffer();
415 
416  if (!command_buffer_->Track(texture)) {
417  return false;
418  }
419 
420  // Initialize all mip levels to be in TransferDst mode. Later, in a loop,
421  // after writing to that mip level, we'll first switch its layout to
422  // TransferSrc to prepare the mip level after it, use the image as the source
423  // of the blit, before finally switching it to ShaderReadOnly so its available
424  // for sampling in a shader.
426  /*cmd=*/cmd,
427  /*image=*/image,
428  /*src_access_mask=*/vk::AccessFlagBits::eTransferWrite |
429  vk::AccessFlagBits::eColorAttachmentWrite,
430  /*dst_access_mask=*/vk::AccessFlagBits::eTransferRead,
431  /*old_layout=*/src.GetLayout(),
432  /*new_layout=*/vk::ImageLayout::eTransferDstOptimal,
433  /*src_stage=*/vk::PipelineStageFlagBits::eTransfer |
434  vk::PipelineStageFlagBits::eColorAttachmentOutput,
435  /*dst_stage=*/vk::PipelineStageFlagBits::eTransfer,
436  /*base_mip_level=*/0u,
437  /*mip_level_count=*/mip_count);
438 
439  vk::ImageMemoryBarrier barrier;
440  barrier.image = image;
441  barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
442  barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
443  barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
444  barrier.subresourceRange.baseArrayLayer = 0;
445  barrier.subresourceRange.layerCount = 1;
446  barrier.subresourceRange.levelCount = 1;
447 
448  // Blit from the mip level N - 1 to mip level N.
449  size_t width = size.width;
450  size_t height = size.height;
451  for (size_t mip_level = 1u; mip_level < mip_count; mip_level++) {
452  barrier.subresourceRange.baseMipLevel = mip_level - 1;
453  barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
454  barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal;
455  barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
456  barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead;
457 
458  // We just finished writing to the previous (N-1) mip level or it was the
459  // base mip level. These were initialized to TransferDst earler. We are now
460  // going to read from it to write to the current level (N) . So it must be
461  // converted to TransferSrc.
462  cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
463  vk::PipelineStageFlagBits::eTransfer, {}, {}, {},
464  {barrier});
465 
466  vk::ImageBlit blit;
467  blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
468  blit.srcSubresource.baseArrayLayer = 0u;
469  blit.srcSubresource.layerCount = 1u;
470  blit.srcSubresource.mipLevel = mip_level - 1;
471 
472  blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
473  blit.dstSubresource.baseArrayLayer = 0u;
474  blit.dstSubresource.layerCount = 1u;
475  blit.dstSubresource.mipLevel = mip_level;
476 
477  // offsets[0] is origin.
478  blit.srcOffsets[1].x = std::max<int32_t>(width, 1u);
479  blit.srcOffsets[1].y = std::max<int32_t>(height, 1u);
480  blit.srcOffsets[1].z = 1u;
481 
482  width = width / 2;
483  height = height / 2;
484 
485  // offsets[0] is origin.
486  blit.dstOffsets[1].x = std::max<int32_t>(width, 1u);
487  blit.dstOffsets[1].y = std::max<int32_t>(height, 1u);
488  blit.dstOffsets[1].z = 1u;
489 
490  cmd.blitImage(image, // src image
491  vk::ImageLayout::eTransferSrcOptimal, // src layout
492  image, // dst image
493  vk::ImageLayout::eTransferDstOptimal, // dst layout
494  1u, // region count
495  &blit, // regions
496  vk::Filter::eLinear // filter
497  );
498 
499  barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal;
500  barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
501  barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead;
502  barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
503 
504  // Now that the blit is done, the image at the previous level (N-1)
505  // is done reading from (TransferSrc)/ Now we must prepare it to be read
506  // from a shader (ShaderReadOnly).
507  cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
508  vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
509  {barrier});
510  }
511 
512  barrier.subresourceRange.baseMipLevel = mip_count - 1;
513  barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal;
514  barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
515  barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
516  barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
517 
518  cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
519  vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {},
520  {barrier});
521 
522  // We modified the layouts of this image from underneath it. Tell it its new
523  // state so it doesn't try to perform redundant transitions under the hood.
524  src.SetLayoutWithoutEncoding(vk::ImageLayout::eShaderReadOnlyOptimal);
525  src.SetMipMapGenerated();
526 
527  return true;
528 }
529 
530 } // namespace impeller
barrier_vk.h
command_buffer_vk.h
impeller::InsertImageMemoryBarrier
static void InsertImageMemoryBarrier(const vk::CommandBuffer &cmd, const vk::Image &image, vk::AccessFlags src_access_mask, vk::AccessFlags dst_access_mask, vk::ImageLayout old_layout, vk::ImageLayout new_layout, vk::PipelineStageFlags src_stage, vk::PipelineStageFlags dst_stage, uint32_t base_mip_level, uint32_t mip_level_count=1u)
Definition: blit_pass_vk.cc:16
blit_pass_vk.h
texture_vk.h
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:91
std
Definition: comparable.h:95
impeller::IPoint
TPoint< int64_t > IPoint
Definition: point.h:328
impeller::IRect
IRect64 IRect
Definition: rect.h:779
impeller
Definition: allocation.cc:12