naga/back/glsl/mod.rs
1/*!
2Backend for [GLSL][glsl] (OpenGL Shading Language).
3
4The main structure is [`Writer`], it maintains internal state that is used
5to output a [`Module`](crate::Module) into glsl
6
7# Supported versions
8### Core
9- 330
10- 400
11- 410
12- 420
13- 430
14- 450
15
16### ES
17- 300
18- 310
19
20[glsl]: https://www.khronos.org/registry/OpenGL/index_gl.php
21*/
22
23// GLSL is mostly a superset of C but it also removes some parts of it this is a list of relevant
24// aspects for this backend.
25//
26// The most notable change is the introduction of the version preprocessor directive that must
27// always be the first line of a glsl file and is written as
28// `#version number profile`
29// `number` is the version itself (i.e. 300) and `profile` is the
30// shader profile we only support "core" and "es", the former is used in desktop applications and
31// the later is used in embedded contexts, mobile devices and browsers. Each one as it's own
32// versions (at the time of writing this the latest version for "core" is 460 and for "es" is 320)
33//
34// Other important preprocessor addition is the extension directive which is written as
35// `#extension name: behaviour`
36// Extensions provide increased features in a plugin fashion but they aren't required to be
37// supported hence why they are called extensions, that's why `behaviour` is used it specifies
38// whether the extension is strictly required or if it should only be enabled if needed. In our case
39// when we use extensions we set behaviour to `require` always.
40//
41// The only thing that glsl removes that makes a difference are pointers.
42//
43// Additions that are relevant for the backend are the discard keyword, the introduction of
44// vector, matrices, samplers, image types and functions that provide common shader operations
45
46pub use features::Features;
47
48use crate::{
49 back,
50 proc::{self, NameKey},
51 valid, Handle, ShaderStage, TypeInner,
52};
53use features::FeaturesManager;
54use std::{
55 cmp::Ordering,
56 fmt,
57 fmt::{Error as FmtError, Write},
58 mem,
59};
60use thiserror::Error;
61
62/// Contains the features related code and the features querying method
63mod features;
64/// Contains a constant with a slice of all the reserved keywords RESERVED_KEYWORDS
65mod keywords;
66
67/// List of supported `core` GLSL versions.
68pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[140, 150, 330, 400, 410, 420, 430, 440, 450, 460];
69/// List of supported `es` GLSL versions.
70pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320];
71
72/// The suffix of the variable that will hold the calculated clamped level
73/// of detail for bounds checking in `ImageLoad`
74const CLAMPED_LOD_SUFFIX: &str = "_clamped_lod";
75
76pub(crate) const MODF_FUNCTION: &str = "naga_modf";
77pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
78
79// Must match code in glsl_built_in
80pub const FIRST_INSTANCE_BINDING: &str = "naga_vs_first_instance";
81
82/// Mapping between resources and bindings.
83pub type BindingMap = std::collections::BTreeMap<crate::ResourceBinding, u8>;
84
85impl crate::AtomicFunction {
86 const fn to_glsl(self) -> &'static str {
87 match self {
88 Self::Add | Self::Subtract => "Add",
89 Self::And => "And",
90 Self::InclusiveOr => "Or",
91 Self::ExclusiveOr => "Xor",
92 Self::Min => "Min",
93 Self::Max => "Max",
94 Self::Exchange { compare: None } => "Exchange",
95 Self::Exchange { compare: Some(_) } => "", //TODO
96 }
97 }
98}
99
100impl crate::AddressSpace {
101 const fn is_buffer(&self) -> bool {
102 match *self {
103 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => true,
104 _ => false,
105 }
106 }
107
108 /// Whether a variable with this address space can be initialized
109 const fn initializable(&self) -> bool {
110 match *self {
111 crate::AddressSpace::Function | crate::AddressSpace::Private => true,
112 crate::AddressSpace::WorkGroup
113 | crate::AddressSpace::Uniform
114 | crate::AddressSpace::Storage { .. }
115 | crate::AddressSpace::Handle
116 | crate::AddressSpace::PushConstant => false,
117 }
118 }
119}
120
121/// A GLSL version.
122#[derive(Debug, Copy, Clone, PartialEq)]
123#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
124#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
125pub enum Version {
126 /// `core` GLSL.
127 Desktop(u16),
128 /// `es` GLSL.
129 Embedded { version: u16, is_webgl: bool },
130}
131
132impl Version {
133 /// Create a new gles version
134 pub const fn new_gles(version: u16) -> Self {
135 Self::Embedded {
136 version,
137 is_webgl: false,
138 }
139 }
140
141 /// Returns true if self is `Version::Embedded` (i.e. is a es version)
142 const fn is_es(&self) -> bool {
143 match *self {
144 Version::Desktop(_) => false,
145 Version::Embedded { .. } => true,
146 }
147 }
148
149 /// Returns true if targeting WebGL
150 const fn is_webgl(&self) -> bool {
151 match *self {
152 Version::Desktop(_) => false,
153 Version::Embedded { is_webgl, .. } => is_webgl,
154 }
155 }
156
157 /// Checks the list of currently supported versions and returns true if it contains the
158 /// specified version
159 ///
160 /// # Notes
161 /// As an invalid version number will never be added to the supported version list
162 /// so this also checks for version validity
163 fn is_supported(&self) -> bool {
164 match *self {
165 Version::Desktop(v) => SUPPORTED_CORE_VERSIONS.contains(&v),
166 Version::Embedded { version: v, .. } => SUPPORTED_ES_VERSIONS.contains(&v),
167 }
168 }
169
170 fn supports_io_locations(&self) -> bool {
171 *self >= Version::Desktop(330) || *self >= Version::new_gles(300)
172 }
173
174 /// Checks if the version supports all of the explicit layouts:
175 /// - `location=` qualifiers for bindings
176 /// - `binding=` qualifiers for resources
177 ///
178 /// Note: `location=` for vertex inputs and fragment outputs is supported
179 /// unconditionally for GLES 300.
180 fn supports_explicit_locations(&self) -> bool {
181 *self >= Version::Desktop(420) || *self >= Version::new_gles(310)
182 }
183
184 fn supports_early_depth_test(&self) -> bool {
185 *self >= Version::Desktop(130) || *self >= Version::new_gles(310)
186 }
187
188 fn supports_std430_layout(&self) -> bool {
189 *self >= Version::Desktop(430) || *self >= Version::new_gles(310)
190 }
191
192 fn supports_fma_function(&self) -> bool {
193 *self >= Version::Desktop(400) || *self >= Version::new_gles(320)
194 }
195
196 fn supports_integer_functions(&self) -> bool {
197 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
198 }
199
200 fn supports_frexp_function(&self) -> bool {
201 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
202 }
203
204 fn supports_derivative_control(&self) -> bool {
205 *self >= Version::Desktop(450)
206 }
207}
208
209impl PartialOrd for Version {
210 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
211 match (*self, *other) {
212 (Version::Desktop(x), Version::Desktop(y)) => Some(x.cmp(&y)),
213 (Version::Embedded { version: x, .. }, Version::Embedded { version: y, .. }) => {
214 Some(x.cmp(&y))
215 }
216 _ => None,
217 }
218 }
219}
220
221impl fmt::Display for Version {
222 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
223 match *self {
224 Version::Desktop(v) => write!(f, "{v} core"),
225 Version::Embedded { version: v, .. } => write!(f, "{v} es"),
226 }
227 }
228}
229
230bitflags::bitflags! {
231 /// Configuration flags for the [`Writer`].
232 #[cfg_attr(feature = "serialize", derive(serde::Serialize))]
233 #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
234 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
235 pub struct WriterFlags: u32 {
236 /// Flip output Y and extend Z from (0, 1) to (-1, 1).
237 const ADJUST_COORDINATE_SPACE = 0x1;
238 /// Supports GL_EXT_texture_shadow_lod on the host, which provides
239 /// additional functions on shadows and arrays of shadows.
240 const TEXTURE_SHADOW_LOD = 0x2;
241 /// Supports ARB_shader_draw_parameters on the host, which provides
242 /// support for `gl_BaseInstanceARB`, `gl_BaseVertexARB`, and `gl_DrawIDARB`.
243 const DRAW_PARAMETERS = 0x4;
244 /// Include unused global variables, constants and functions. By default the output will exclude
245 /// global variables that are not used in the specified entrypoint (including indirect use),
246 /// all constant declarations, and functions that use excluded global variables.
247 const INCLUDE_UNUSED_ITEMS = 0x10;
248 /// Emit `PointSize` output builtin to vertex shaders, which is
249 /// required for drawing with `PointList` topology.
250 ///
251 /// https://registry.khronos.org/OpenGL/specs/es/3.2/GLSL_ES_Specification_3.20.html#built-in-language-variables
252 /// The variable gl_PointSize is intended for a shader to write the size of the point to be rasterized. It is measured in pixels.
253 /// If gl_PointSize is not written to, its value is undefined in subsequent pipe stages.
254 const FORCE_POINT_SIZE = 0x20;
255 }
256}
257
258/// Configuration used in the [`Writer`].
259#[derive(Debug, Clone)]
260#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
261#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
262pub struct Options {
263 /// The GLSL version to be used.
264 pub version: Version,
265 /// Configuration flags for the [`Writer`].
266 pub writer_flags: WriterFlags,
267 /// Map of resources association to binding locations.
268 pub binding_map: BindingMap,
269 /// Should workgroup variables be zero initialized (by polyfilling)?
270 pub zero_initialize_workgroup_memory: bool,
271}
272
273impl Default for Options {
274 fn default() -> Self {
275 Options {
276 version: Version::new_gles(310),
277 writer_flags: WriterFlags::ADJUST_COORDINATE_SPACE,
278 binding_map: BindingMap::default(),
279 zero_initialize_workgroup_memory: true,
280 }
281 }
282}
283
284/// A subset of options meant to be changed per pipeline.
285#[derive(Debug, Clone)]
286#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
287#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
288pub struct PipelineOptions {
289 /// The stage of the entry point.
290 pub shader_stage: ShaderStage,
291 /// The name of the entry point.
292 ///
293 /// If no entry point that matches is found while creating a [`Writer`], a error will be thrown.
294 pub entry_point: String,
295 /// How many views to render to, if doing multiview rendering.
296 pub multiview: Option<std::num::NonZeroU32>,
297}
298
299#[derive(Debug)]
300pub struct VaryingLocation {
301 /// The location of the global.
302 /// This corresponds to `layout(location = ..)` in GLSL.
303 pub location: u32,
304 /// The index which can be used for dual source blending.
305 /// This corresponds to `layout(index = ..)` in GLSL.
306 pub index: u32,
307}
308
309/// Reflection info for texture mappings and uniforms.
310#[derive(Debug)]
311pub struct ReflectionInfo {
312 /// Mapping between texture names and variables/samplers.
313 pub texture_mapping: crate::FastHashMap<String, TextureMapping>,
314 /// Mapping between uniform variables and names.
315 pub uniforms: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
316 /// Mapping between names and attribute locations.
317 pub varying: crate::FastHashMap<String, VaryingLocation>,
318 /// List of push constant items in the shader.
319 pub push_constant_items: Vec<PushConstantItem>,
320}
321
322/// Mapping between a texture and its sampler, if it exists.
323///
324/// GLSL pre-Vulkan has no concept of separate textures and samplers. Instead, everything is a
325/// `gsamplerN` where `g` is the scalar type and `N` is the dimension. But naga uses separate textures
326/// and samplers in the IR, so the backend produces a [`FastHashMap`](crate::FastHashMap) with the texture name
327/// as a key and a [`TextureMapping`] as a value. This way, the user knows where to bind.
328///
329/// [`Storage`](crate::ImageClass::Storage) images produce `gimageN` and don't have an associated sampler,
330/// so the [`sampler`](Self::sampler) field will be [`None`].
331#[derive(Debug, Clone)]
332pub struct TextureMapping {
333 /// Handle to the image global variable.
334 pub texture: Handle<crate::GlobalVariable>,
335 /// Handle to the associated sampler global variable, if it exists.
336 pub sampler: Option<Handle<crate::GlobalVariable>>,
337}
338
339/// All information to bind a single uniform value to the shader.
340///
341/// Push constants are emulated using traditional uniforms in OpenGL.
342///
343/// These are composed of a set of primitives (scalar, vector, matrix) that
344/// are given names. Because they are not backed by the concept of a buffer,
345/// we must do the work of calculating the offset of each primitive in the
346/// push constant block.
347#[derive(Debug, Clone)]
348pub struct PushConstantItem {
349 /// GL uniform name for the item. This name is the same as if you were
350 /// to access it directly from a GLSL shader.
351 ///
352 /// The with the following example, the following names will be generated,
353 /// one name per GLSL uniform.
354 ///
355 /// ```glsl
356 /// struct InnerStruct {
357 /// value: f32,
358 /// }
359 ///
360 /// struct PushConstant {
361 /// InnerStruct inner;
362 /// vec4 array[2];
363 /// }
364 ///
365 /// uniform PushConstants _push_constant_binding_cs;
366 /// ```
367 ///
368 /// ```text
369 /// - _push_constant_binding_cs.inner.value
370 /// - _push_constant_binding_cs.array[0]
371 /// - _push_constant_binding_cs.array[1]
372 /// ```
373 ///
374 pub access_path: String,
375 /// Type of the uniform. This will only ever be a scalar, vector, or matrix.
376 pub ty: Handle<crate::Type>,
377 /// The offset in the push constant memory block this uniform maps to.
378 ///
379 /// The size of the uniform can be derived from the type.
380 pub offset: u32,
381}
382
383/// Helper structure that generates a number
384#[derive(Default)]
385struct IdGenerator(u32);
386
387impl IdGenerator {
388 /// Generates a number that's guaranteed to be unique for this `IdGenerator`
389 fn generate(&mut self) -> u32 {
390 // It's just an increasing number but it does the job
391 let ret = self.0;
392 self.0 += 1;
393 ret
394 }
395}
396
397/// Assorted options needed for generating varyings.
398#[derive(Clone, Copy)]
399struct VaryingOptions {
400 output: bool,
401 targeting_webgl: bool,
402 draw_parameters: bool,
403}
404
405impl VaryingOptions {
406 const fn from_writer_options(options: &Options, output: bool) -> Self {
407 Self {
408 output,
409 targeting_webgl: options.version.is_webgl(),
410 draw_parameters: options.writer_flags.contains(WriterFlags::DRAW_PARAMETERS),
411 }
412 }
413}
414
415/// Helper wrapper used to get a name for a varying
416///
417/// Varying have different naming schemes depending on their binding:
418/// - Varyings with builtin bindings get the from [`glsl_built_in`].
419/// - Varyings with location bindings are named `_S_location_X` where `S` is a
420/// prefix identifying which pipeline stage the varying connects, and `X` is
421/// the location.
422struct VaryingName<'a> {
423 binding: &'a crate::Binding,
424 stage: ShaderStage,
425 options: VaryingOptions,
426}
427impl fmt::Display for VaryingName<'_> {
428 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
429 match *self.binding {
430 crate::Binding::Location {
431 second_blend_source: true,
432 ..
433 } => {
434 write!(f, "_fs2p_location1",)
435 }
436 crate::Binding::Location { location, .. } => {
437 let prefix = match (self.stage, self.options.output) {
438 (ShaderStage::Compute, _) => unreachable!(),
439 // pipeline to vertex
440 (ShaderStage::Vertex, false) => "p2vs",
441 // vertex to fragment
442 (ShaderStage::Vertex, true) | (ShaderStage::Fragment, false) => "vs2fs",
443 // fragment to pipeline
444 (ShaderStage::Fragment, true) => "fs2p",
445 };
446 write!(f, "_{prefix}_location{location}",)
447 }
448 crate::Binding::BuiltIn(built_in) => {
449 write!(f, "{}", glsl_built_in(built_in, self.options))
450 }
451 }
452 }
453}
454
455impl ShaderStage {
456 const fn to_str(self) -> &'static str {
457 match self {
458 ShaderStage::Compute => "cs",
459 ShaderStage::Fragment => "fs",
460 ShaderStage::Vertex => "vs",
461 }
462 }
463}
464
465/// Shorthand result used internally by the backend
466type BackendResult<T = ()> = Result<T, Error>;
467
468/// A GLSL compilation error.
469#[derive(Debug, Error)]
470pub enum Error {
471 /// A error occurred while writing to the output.
472 #[error("Format error")]
473 FmtError(#[from] FmtError),
474 /// The specified [`Version`] doesn't have all required [`Features`].
475 ///
476 /// Contains the missing [`Features`].
477 #[error("The selected version doesn't support {0:?}")]
478 MissingFeatures(Features),
479 /// [`AddressSpace::PushConstant`](crate::AddressSpace::PushConstant) was used more than
480 /// once in the entry point, which isn't supported.
481 #[error("Multiple push constants aren't supported")]
482 MultiplePushConstants,
483 /// The specified [`Version`] isn't supported.
484 #[error("The specified version isn't supported")]
485 VersionNotSupported,
486 /// The entry point couldn't be found.
487 #[error("The requested entry point couldn't be found")]
488 EntryPointNotFound,
489 /// A call was made to an unsupported external.
490 #[error("A call was made to an unsupported external: {0}")]
491 UnsupportedExternal(String),
492 /// A scalar with an unsupported width was requested.
493 #[error("A scalar with an unsupported width was requested: {0:?}")]
494 UnsupportedScalar(crate::Scalar),
495 /// A image was used with multiple samplers, which isn't supported.
496 #[error("A image was used with multiple samplers")]
497 ImageMultipleSamplers,
498 #[error("{0}")]
499 Custom(String),
500 #[error("overrides should not be present at this stage")]
501 Override,
502}
503
504/// Binary operation with a different logic on the GLSL side.
505enum BinaryOperation {
506 /// Vector comparison should use the function like `greaterThan()`, etc.
507 VectorCompare,
508 /// Vector component wise operation; used to polyfill unsupported ops like `|` and `&` for `bvecN`'s
509 VectorComponentWise,
510 /// GLSL `%` is SPIR-V `OpUMod/OpSMod` and `mod()` is `OpFMod`, but [`BinaryOperator::Modulo`](crate::BinaryOperator::Modulo) is `OpFRem`.
511 Modulo,
512 /// Any plain operation. No additional logic required.
513 Other,
514}
515
516/// Writer responsible for all code generation.
517pub struct Writer<'a, W> {
518 // Inputs
519 /// The module being written.
520 module: &'a crate::Module,
521 /// The module analysis.
522 info: &'a valid::ModuleInfo,
523 /// The output writer.
524 out: W,
525 /// User defined configuration to be used.
526 options: &'a Options,
527 /// The bound checking policies to be used
528 policies: proc::BoundsCheckPolicies,
529
530 // Internal State
531 /// Features manager used to store all the needed features and write them.
532 features: FeaturesManager,
533 namer: proc::Namer,
534 /// A map with all the names needed for writing the module
535 /// (generated by a [`Namer`](crate::proc::Namer)).
536 names: crate::FastHashMap<NameKey, String>,
537 /// A map with the names of global variables needed for reflections.
538 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
539 /// The selected entry point.
540 entry_point: &'a crate::EntryPoint,
541 /// The index of the selected entry point.
542 entry_point_idx: proc::EntryPointIndex,
543 /// A generator for unique block numbers.
544 block_id: IdGenerator,
545 /// Set of expressions that have associated temporary variables.
546 named_expressions: crate::NamedExpressions,
547 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
548 need_bake_expressions: back::NeedBakeExpressions,
549 /// How many views to render to, if doing multiview rendering.
550 multiview: Option<std::num::NonZeroU32>,
551 /// Mapping of varying variables to their location. Needed for reflections.
552 varying: crate::FastHashMap<String, VaryingLocation>,
553}
554
555impl<'a, W: Write> Writer<'a, W> {
556 /// Creates a new [`Writer`] instance.
557 ///
558 /// # Errors
559 /// - If the version specified is invalid or supported.
560 /// - If the entry point couldn't be found in the module.
561 /// - If the version specified doesn't support some used features.
562 pub fn new(
563 out: W,
564 module: &'a crate::Module,
565 info: &'a valid::ModuleInfo,
566 options: &'a Options,
567 pipeline_options: &'a PipelineOptions,
568 policies: proc::BoundsCheckPolicies,
569 ) -> Result<Self, Error> {
570 if !module.overrides.is_empty() {
571 return Err(Error::Override);
572 }
573
574 // Check if the requested version is supported
575 if !options.version.is_supported() {
576 log::error!("Version {}", options.version);
577 return Err(Error::VersionNotSupported);
578 }
579
580 // Try to find the entry point and corresponding index
581 let ep_idx = module
582 .entry_points
583 .iter()
584 .position(|ep| {
585 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
586 })
587 .ok_or(Error::EntryPointNotFound)?;
588
589 // Generate a map with names required to write the module
590 let mut names = crate::FastHashMap::default();
591 let mut namer = proc::Namer::default();
592 namer.reset(
593 module,
594 keywords::RESERVED_KEYWORDS,
595 &[],
596 &[],
597 &[
598 "gl_", // all GL built-in variables
599 "_group", // all normal bindings
600 "_push_constant_binding_", // all push constant bindings
601 ],
602 &mut names,
603 );
604
605 // Build the instance
606 let mut this = Self {
607 module,
608 info,
609 out,
610 options,
611 policies,
612
613 namer,
614 features: FeaturesManager::new(),
615 names,
616 reflection_names_globals: crate::FastHashMap::default(),
617 entry_point: &module.entry_points[ep_idx],
618 entry_point_idx: ep_idx as u16,
619 multiview: pipeline_options.multiview,
620 block_id: IdGenerator::default(),
621 named_expressions: Default::default(),
622 need_bake_expressions: Default::default(),
623 varying: Default::default(),
624 };
625
626 // Find all features required to print this module
627 this.collect_required_features()?;
628
629 Ok(this)
630 }
631
632 /// Writes the [`Module`](crate::Module) as glsl to the output
633 ///
634 /// # Notes
635 /// If an error occurs while writing, the output might have been written partially
636 ///
637 /// # Panics
638 /// Might panic if the module is invalid
639 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
640 // We use `writeln!(self.out)` throughout the write to add newlines
641 // to make the output more readable
642
643 let es = self.options.version.is_es();
644
645 // Write the version (It must be the first thing or it isn't a valid glsl output)
646 writeln!(self.out, "#version {}", self.options.version)?;
647 // Write all the needed extensions
648 //
649 // This used to be the last thing being written as it allowed to search for features while
650 // writing the module saving some loops but some older versions (420 or less) required the
651 // extensions to appear before being used, even though extensions are part of the
652 // preprocessor not the processor ¯\_(ツ)_/¯
653 self.features.write(self.options, &mut self.out)?;
654
655 // glsl es requires a precision to be specified for floats and ints
656 // TODO: Should this be user configurable?
657 if es {
658 writeln!(self.out)?;
659 writeln!(self.out, "precision highp float;")?;
660 writeln!(self.out, "precision highp int;")?;
661 writeln!(self.out)?;
662 }
663
664 if self.entry_point.stage == ShaderStage::Compute {
665 let workgroup_size = self.entry_point.workgroup_size;
666 writeln!(
667 self.out,
668 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
669 workgroup_size[0], workgroup_size[1], workgroup_size[2]
670 )?;
671 writeln!(self.out)?;
672 }
673
674 if self.entry_point.stage == ShaderStage::Vertex
675 && !self
676 .options
677 .writer_flags
678 .contains(WriterFlags::DRAW_PARAMETERS)
679 && self.features.contains(Features::INSTANCE_INDEX)
680 {
681 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
682 writeln!(self.out)?;
683 }
684
685 // Enable early depth tests if needed
686 if let Some(depth_test) = self.entry_point.early_depth_test {
687 // If early depth test is supported for this version of GLSL
688 if self.options.version.supports_early_depth_test() {
689 writeln!(self.out, "layout(early_fragment_tests) in;")?;
690
691 if let Some(conservative) = depth_test.conservative {
692 use crate::ConservativeDepth as Cd;
693
694 let depth = match conservative {
695 Cd::GreaterEqual => "greater",
696 Cd::LessEqual => "less",
697 Cd::Unchanged => "unchanged",
698 };
699 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
700 }
701 writeln!(self.out)?;
702 } else {
703 log::warn!(
704 "Early depth testing is not supported for this version of GLSL: {}",
705 self.options.version
706 );
707 }
708 }
709
710 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
711 if let Some(multiview) = self.multiview.as_ref() {
712 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
713 writeln!(self.out)?;
714 }
715 }
716
717 // Write struct types.
718 //
719 // This are always ordered because the IR is structured in a way that
720 // you can't make a struct without adding all of its members first.
721 for (handle, ty) in self.module.types.iter() {
722 if let TypeInner::Struct { ref members, .. } = ty.inner {
723 // Structures ending with runtime-sized arrays can only be
724 // rendered as shader storage blocks in GLSL, not stand-alone
725 // struct types.
726 if !self.module.types[members.last().unwrap().ty]
727 .inner
728 .is_dynamically_sized(&self.module.types)
729 {
730 let name = &self.names[&NameKey::Type(handle)];
731 write!(self.out, "struct {name} ")?;
732 self.write_struct_body(handle, members)?;
733 writeln!(self.out, ";")?;
734 }
735 }
736 }
737
738 // Write functions to create special types.
739 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
740 match type_key {
741 &crate::PredeclaredType::ModfResult { size, width }
742 | &crate::PredeclaredType::FrexpResult { size, width } => {
743 let arg_type_name_owner;
744 let arg_type_name = if let Some(size) = size {
745 arg_type_name_owner =
746 format!("{}vec{}", if width == 8 { "d" } else { "" }, size as u8);
747 &arg_type_name_owner
748 } else if width == 8 {
749 "double"
750 } else {
751 "float"
752 };
753
754 let other_type_name_owner;
755 let (defined_func_name, called_func_name, other_type_name) =
756 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
757 (MODF_FUNCTION, "modf", arg_type_name)
758 } else {
759 let other_type_name = if let Some(size) = size {
760 other_type_name_owner = format!("ivec{}", size as u8);
761 &other_type_name_owner
762 } else {
763 "int"
764 };
765 (FREXP_FUNCTION, "frexp", other_type_name)
766 };
767
768 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
769
770 writeln!(self.out)?;
771 if !self.options.version.supports_frexp_function()
772 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
773 {
774 writeln!(
775 self.out,
776 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
777 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
778 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
779 return {struct_name}(fract, other);
780}}",
781 )?;
782 } else {
783 writeln!(
784 self.out,
785 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
786 {other_type_name} other;
787 {arg_type_name} fract = {called_func_name}(arg, other);
788 return {struct_name}(fract, other);
789}}",
790 )?;
791 }
792 }
793 &crate::PredeclaredType::AtomicCompareExchangeWeakResult { .. } => {}
794 }
795 }
796
797 // Write all named constants
798 let mut constants = self
799 .module
800 .constants
801 .iter()
802 .filter(|&(_, c)| c.name.is_some())
803 .peekable();
804 while let Some((handle, _)) = constants.next() {
805 self.write_global_constant(handle)?;
806 // Add extra newline for readability on last iteration
807 if constants.peek().is_none() {
808 writeln!(self.out)?;
809 }
810 }
811
812 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
813
814 // Write the globals
815 //
816 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
817 // we filter all globals that aren't used by the selected entry point as they might be
818 // interfere with each other (i.e. two globals with the same location but different with
819 // different classes)
820 let include_unused = self
821 .options
822 .writer_flags
823 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
824 for (handle, global) in self.module.global_variables.iter() {
825 let is_unused = ep_info[handle].is_empty();
826 if !include_unused && is_unused {
827 continue;
828 }
829
830 match self.module.types[global.ty].inner {
831 // We treat images separately because they might require
832 // writing the storage format
833 TypeInner::Image {
834 mut dim,
835 arrayed,
836 class,
837 } => {
838 // Gather the storage format if needed
839 let storage_format_access = match self.module.types[global.ty].inner {
840 TypeInner::Image {
841 class: crate::ImageClass::Storage { format, access },
842 ..
843 } => Some((format, access)),
844 _ => None,
845 };
846
847 if dim == crate::ImageDimension::D1 && es {
848 dim = crate::ImageDimension::D2
849 }
850
851 // Gether the location if needed
852 let layout_binding = if self.options.version.supports_explicit_locations() {
853 let br = global.binding.as_ref().unwrap();
854 self.options.binding_map.get(br).cloned()
855 } else {
856 None
857 };
858
859 // Write all the layout qualifiers
860 if layout_binding.is_some() || storage_format_access.is_some() {
861 write!(self.out, "layout(")?;
862 if let Some(binding) = layout_binding {
863 write!(self.out, "binding = {binding}")?;
864 }
865 if let Some((format, _)) = storage_format_access {
866 let format_str = glsl_storage_format(format)?;
867 let separator = match layout_binding {
868 Some(_) => ",",
869 None => "",
870 };
871 write!(self.out, "{separator}{format_str}")?;
872 }
873 write!(self.out, ") ")?;
874 }
875
876 if let Some((_, access)) = storage_format_access {
877 self.write_storage_access(access)?;
878 }
879
880 // All images in glsl are `uniform`
881 // The trailing space is important
882 write!(self.out, "uniform ")?;
883
884 // write the type
885 //
886 // This is way we need the leading space because `write_image_type` doesn't add
887 // any spaces at the beginning or end
888 self.write_image_type(dim, arrayed, class)?;
889
890 // Finally write the name and end the global with a `;`
891 // The leading space is important
892 let global_name = self.get_global_name(handle, global);
893 writeln!(self.out, " {global_name};")?;
894 writeln!(self.out)?;
895
896 self.reflection_names_globals.insert(handle, global_name);
897 }
898 // glsl has no concept of samplers so we just ignore it
899 TypeInner::Sampler { .. } => continue,
900 // All other globals are written by `write_global`
901 _ => {
902 self.write_global(handle, global)?;
903 // Add a newline (only for readability)
904 writeln!(self.out)?;
905 }
906 }
907 }
908
909 for arg in self.entry_point.function.arguments.iter() {
910 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
911 }
912 if let Some(ref result) = self.entry_point.function.result {
913 self.write_varying(result.binding.as_ref(), result.ty, true)?;
914 }
915 writeln!(self.out)?;
916
917 // Write all regular functions
918 for (handle, function) in self.module.functions.iter() {
919 // Check that the function doesn't use globals that aren't supported
920 // by the current entry point
921 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
922 continue;
923 }
924
925 let fun_info = &self.info[handle];
926
927 // Skip functions that that are not compatible with this entry point's stage.
928 //
929 // When validation is enabled, it rejects modules whose entry points try to call
930 // incompatible functions, so if we got this far, then any functions incompatible
931 // with our selected entry point must not be used.
932 //
933 // When validation is disabled, `fun_info.available_stages` is always just
934 // `ShaderStages::all()`, so this will write all functions in the module, and
935 // the downstream GLSL compiler will catch any problems.
936 if !fun_info.available_stages.contains(ep_info.available_stages) {
937 continue;
938 }
939
940 // Write the function
941 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
942
943 writeln!(self.out)?;
944 }
945
946 self.write_function(
947 back::FunctionType::EntryPoint(self.entry_point_idx),
948 &self.entry_point.function,
949 ep_info,
950 )?;
951
952 // Add newline at the end of file
953 writeln!(self.out)?;
954
955 // Collect all reflection info and return it to the user
956 self.collect_reflection_info()
957 }
958
959 fn write_array_size(
960 &mut self,
961 base: Handle<crate::Type>,
962 size: crate::ArraySize,
963 ) -> BackendResult {
964 write!(self.out, "[")?;
965
966 // Write the array size
967 // Writes nothing if `ArraySize::Dynamic`
968 match size {
969 crate::ArraySize::Constant(size) => {
970 write!(self.out, "{size}")?;
971 }
972 crate::ArraySize::Dynamic => (),
973 }
974
975 write!(self.out, "]")?;
976
977 if let TypeInner::Array {
978 base: next_base,
979 size: next_size,
980 ..
981 } = self.module.types[base].inner
982 {
983 self.write_array_size(next_base, next_size)?;
984 }
985
986 Ok(())
987 }
988
989 /// Helper method used to write value types
990 ///
991 /// # Notes
992 /// Adds no trailing or leading whitespace
993 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
994 match *inner {
995 // Scalars are simple we just get the full name from `glsl_scalar`
996 TypeInner::Scalar(scalar)
997 | TypeInner::Atomic(scalar)
998 | TypeInner::ValuePointer {
999 size: None,
1000 scalar,
1001 space: _,
1002 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
1003 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
1004 TypeInner::Vector { size, scalar }
1005 | TypeInner::ValuePointer {
1006 size: Some(size),
1007 scalar,
1008 space: _,
1009 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
1010 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
1011 // doubles are allowed), `M` is the columns count and `N` is the rows count
1012 //
1013 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
1014 // extra branch to write matrices this way
1015 TypeInner::Matrix {
1016 columns,
1017 rows,
1018 scalar,
1019 } => write!(
1020 self.out,
1021 "{}mat{}x{}",
1022 glsl_scalar(scalar)?.prefix,
1023 columns as u8,
1024 rows as u8
1025 )?,
1026 // GLSL arrays are written as `type name[size]`
1027 // Here we only write the size of the array i.e. `[size]`
1028 // Base `type` and `name` should be written outside
1029 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
1030 // Write all variants instead of `_` so that if new variants are added a
1031 // no exhaustiveness error is thrown
1032 TypeInner::Pointer { .. }
1033 | TypeInner::Struct { .. }
1034 | TypeInner::Image { .. }
1035 | TypeInner::Sampler { .. }
1036 | TypeInner::AccelerationStructure
1037 | TypeInner::RayQuery
1038 | TypeInner::BindingArray { .. } => {
1039 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
1040 }
1041 }
1042
1043 Ok(())
1044 }
1045
1046 /// Helper method used to write non image/sampler types
1047 ///
1048 /// # Notes
1049 /// Adds no trailing or leading whitespace
1050 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
1051 match self.module.types[ty].inner {
1052 // glsl has no pointer types so just write types as normal and loads are skipped
1053 TypeInner::Pointer { base, .. } => self.write_type(base),
1054 // glsl structs are written as just the struct name
1055 TypeInner::Struct { .. } => {
1056 // Get the struct name
1057 let name = &self.names[&NameKey::Type(ty)];
1058 write!(self.out, "{name}")?;
1059 Ok(())
1060 }
1061 // glsl array has the size separated from the base type
1062 TypeInner::Array { base, .. } => self.write_type(base),
1063 ref other => self.write_value_type(other),
1064 }
1065 }
1066
1067 /// Helper method to write a image type
1068 ///
1069 /// # Notes
1070 /// Adds no leading or trailing whitespace
1071 fn write_image_type(
1072 &mut self,
1073 dim: crate::ImageDimension,
1074 arrayed: bool,
1075 class: crate::ImageClass,
1076 ) -> BackendResult {
1077 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
1078 // and modifiers
1079 //
1080 // There exists two image types
1081 // - sampler - for sampled images
1082 // - image - for storage images
1083 //
1084 // There are three possible modifiers that can be used together and must be written in
1085 // this order to be valid
1086 // - MS - used if it's a multisampled image
1087 // - Array - used if it's an image array
1088 // - Shadow - used if it's a depth image
1089 use crate::ImageClass as Ic;
1090
1091 let (base, kind, ms, comparison) = match class {
1092 Ic::Sampled { kind, multi: true } => ("sampler", kind, "MS", ""),
1093 Ic::Sampled { kind, multi: false } => ("sampler", kind, "", ""),
1094 Ic::Depth { multi: true } => ("sampler", crate::ScalarKind::Float, "MS", ""),
1095 Ic::Depth { multi: false } => ("sampler", crate::ScalarKind::Float, "", "Shadow"),
1096 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
1097 };
1098
1099 let precision = if self.options.version.is_es() {
1100 "highp "
1101 } else {
1102 ""
1103 };
1104
1105 write!(
1106 self.out,
1107 "{}{}{}{}{}{}{}",
1108 precision,
1109 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
1110 base,
1111 glsl_dimension(dim),
1112 ms,
1113 if arrayed { "Array" } else { "" },
1114 comparison
1115 )?;
1116
1117 Ok(())
1118 }
1119
1120 /// Helper method used to write non images/sampler globals
1121 ///
1122 /// # Notes
1123 /// Adds a newline
1124 ///
1125 /// # Panics
1126 /// If the global has type sampler
1127 fn write_global(
1128 &mut self,
1129 handle: Handle<crate::GlobalVariable>,
1130 global: &crate::GlobalVariable,
1131 ) -> BackendResult {
1132 if self.options.version.supports_explicit_locations() {
1133 if let Some(ref br) = global.binding {
1134 match self.options.binding_map.get(br) {
1135 Some(binding) => {
1136 let layout = match global.space {
1137 crate::AddressSpace::Storage { .. } => {
1138 if self.options.version.supports_std430_layout() {
1139 "std430, "
1140 } else {
1141 "std140, "
1142 }
1143 }
1144 crate::AddressSpace::Uniform => "std140, ",
1145 _ => "",
1146 };
1147 write!(self.out, "layout({layout}binding = {binding}) ")?
1148 }
1149 None => {
1150 log::debug!("unassigned binding for {:?}", global.name);
1151 if let crate::AddressSpace::Storage { .. } = global.space {
1152 if self.options.version.supports_std430_layout() {
1153 write!(self.out, "layout(std430) ")?
1154 }
1155 }
1156 }
1157 }
1158 }
1159 }
1160
1161 if let crate::AddressSpace::Storage { access } = global.space {
1162 self.write_storage_access(access)?;
1163 }
1164
1165 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
1166 write!(self.out, "{storage_qualifier} ")?;
1167 }
1168
1169 match global.space {
1170 crate::AddressSpace::Private => {
1171 self.write_simple_global(handle, global)?;
1172 }
1173 crate::AddressSpace::WorkGroup => {
1174 self.write_simple_global(handle, global)?;
1175 }
1176 crate::AddressSpace::PushConstant => {
1177 self.write_simple_global(handle, global)?;
1178 }
1179 crate::AddressSpace::Uniform => {
1180 self.write_interface_block(handle, global)?;
1181 }
1182 crate::AddressSpace::Storage { .. } => {
1183 self.write_interface_block(handle, global)?;
1184 }
1185 // A global variable in the `Function` address space is a
1186 // contradiction in terms.
1187 crate::AddressSpace::Function => unreachable!(),
1188 // Textures and samplers are handled directly in `Writer::write`.
1189 crate::AddressSpace::Handle => unreachable!(),
1190 }
1191
1192 Ok(())
1193 }
1194
1195 fn write_simple_global(
1196 &mut self,
1197 handle: Handle<crate::GlobalVariable>,
1198 global: &crate::GlobalVariable,
1199 ) -> BackendResult {
1200 self.write_type(global.ty)?;
1201 write!(self.out, " ")?;
1202 self.write_global_name(handle, global)?;
1203
1204 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1205 self.write_array_size(base, size)?;
1206 }
1207
1208 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
1209 write!(self.out, " = ")?;
1210 if let Some(init) = global.init {
1211 self.write_const_expr(init)?;
1212 } else {
1213 self.write_zero_init_value(global.ty)?;
1214 }
1215 }
1216
1217 writeln!(self.out, ";")?;
1218
1219 if let crate::AddressSpace::PushConstant = global.space {
1220 let global_name = self.get_global_name(handle, global);
1221 self.reflection_names_globals.insert(handle, global_name);
1222 }
1223
1224 Ok(())
1225 }
1226
1227 /// Write an interface block for a single Naga global.
1228 ///
1229 /// Write `block_name { members }`. Since `block_name` must be unique
1230 /// between blocks and structs, we add `_block_ID` where `ID` is a
1231 /// `IdGenerator` generated number. Write `members` in the same way we write
1232 /// a struct's members.
1233 fn write_interface_block(
1234 &mut self,
1235 handle: Handle<crate::GlobalVariable>,
1236 global: &crate::GlobalVariable,
1237 ) -> BackendResult {
1238 // Write the block name, it's just the struct name appended with `_block_ID`
1239 let ty_name = &self.names[&NameKey::Type(global.ty)];
1240 let block_name = format!(
1241 "{}_block_{}{:?}",
1242 // avoid double underscores as they are reserved in GLSL
1243 ty_name.trim_end_matches('_'),
1244 self.block_id.generate(),
1245 self.entry_point.stage,
1246 );
1247 write!(self.out, "{block_name} ")?;
1248 self.reflection_names_globals.insert(handle, block_name);
1249
1250 match self.module.types[global.ty].inner {
1251 crate::TypeInner::Struct { ref members, .. }
1252 if self.module.types[members.last().unwrap().ty]
1253 .inner
1254 .is_dynamically_sized(&self.module.types) =>
1255 {
1256 // Structs with dynamically sized arrays must have their
1257 // members lifted up as members of the interface block. GLSL
1258 // can't write such struct types anyway.
1259 self.write_struct_body(global.ty, members)?;
1260 write!(self.out, " ")?;
1261 self.write_global_name(handle, global)?;
1262 }
1263 _ => {
1264 // A global of any other type is written as the sole member
1265 // of the interface block. Since the interface block is
1266 // anonymous, this becomes visible in the global scope.
1267 write!(self.out, "{{ ")?;
1268 self.write_type(global.ty)?;
1269 write!(self.out, " ")?;
1270 self.write_global_name(handle, global)?;
1271 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1272 self.write_array_size(base, size)?;
1273 }
1274 write!(self.out, "; }}")?;
1275 }
1276 }
1277
1278 writeln!(self.out, ";")?;
1279
1280 Ok(())
1281 }
1282
1283 /// Helper method used to find which expressions of a given function require baking
1284 ///
1285 /// # Notes
1286 /// Clears `need_bake_expressions` set before adding to it
1287 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
1288 use crate::Expression;
1289 self.need_bake_expressions.clear();
1290 for (fun_handle, expr) in func.expressions.iter() {
1291 let expr_info = &info[fun_handle];
1292 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
1293 if min_ref_count <= expr_info.ref_count {
1294 self.need_bake_expressions.insert(fun_handle);
1295 }
1296
1297 let inner = expr_info.ty.inner_with(&self.module.types);
1298
1299 if let Expression::Math {
1300 fun,
1301 arg,
1302 arg1,
1303 arg2,
1304 ..
1305 } = *expr
1306 {
1307 match fun {
1308 crate::MathFunction::Dot => {
1309 // if the expression is a Dot product with integer arguments,
1310 // then the args needs baking as well
1311 if let TypeInner::Scalar(crate::Scalar { kind, .. }) = *inner {
1312 match kind {
1313 crate::ScalarKind::Sint | crate::ScalarKind::Uint => {
1314 self.need_bake_expressions.insert(arg);
1315 self.need_bake_expressions.insert(arg1.unwrap());
1316 }
1317 _ => {}
1318 }
1319 }
1320 }
1321 crate::MathFunction::ExtractBits => {
1322 // Only argument 1 is re-used.
1323 self.need_bake_expressions.insert(arg1.unwrap());
1324 }
1325 crate::MathFunction::InsertBits => {
1326 // Only argument 2 is re-used.
1327 self.need_bake_expressions.insert(arg2.unwrap());
1328 }
1329 crate::MathFunction::CountLeadingZeros => {
1330 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
1331 self.need_bake_expressions.insert(arg);
1332 }
1333 }
1334 _ => {}
1335 }
1336 }
1337 }
1338 }
1339
1340 /// Helper method used to get a name for a global
1341 ///
1342 /// Globals have different naming schemes depending on their binding:
1343 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
1344 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
1345 /// is the group and `Y` is the binding
1346 fn get_global_name(
1347 &self,
1348 handle: Handle<crate::GlobalVariable>,
1349 global: &crate::GlobalVariable,
1350 ) -> String {
1351 match (&global.binding, global.space) {
1352 (&Some(ref br), _) => {
1353 format!(
1354 "_group_{}_binding_{}_{}",
1355 br.group,
1356 br.binding,
1357 self.entry_point.stage.to_str()
1358 )
1359 }
1360 (&None, crate::AddressSpace::PushConstant) => {
1361 format!("_push_constant_binding_{}", self.entry_point.stage.to_str())
1362 }
1363 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
1364 }
1365 }
1366
1367 /// Helper method used to write a name for a global without additional heap allocation
1368 fn write_global_name(
1369 &mut self,
1370 handle: Handle<crate::GlobalVariable>,
1371 global: &crate::GlobalVariable,
1372 ) -> BackendResult {
1373 match (&global.binding, global.space) {
1374 (&Some(ref br), _) => write!(
1375 self.out,
1376 "_group_{}_binding_{}_{}",
1377 br.group,
1378 br.binding,
1379 self.entry_point.stage.to_str()
1380 )?,
1381 (&None, crate::AddressSpace::PushConstant) => write!(
1382 self.out,
1383 "_push_constant_binding_{}",
1384 self.entry_point.stage.to_str()
1385 )?,
1386 (&None, _) => write!(
1387 self.out,
1388 "{}",
1389 &self.names[&NameKey::GlobalVariable(handle)]
1390 )?,
1391 }
1392
1393 Ok(())
1394 }
1395
1396 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
1397 ///
1398 /// A Naga entry point's arguments and return value are rendered in GLSL as
1399 /// variables at global scope with the `in` and `out` storage qualifiers.
1400 /// The code we generate for `main` loads from all the `in` globals into
1401 /// appropriately named locals. Before it returns, `main` assigns the
1402 /// components of its return value into all the `out` globals.
1403 ///
1404 /// This function writes a declaration for one such GLSL global,
1405 /// representing a value passed into or returned from [`self.entry_point`]
1406 /// that has a [`Location`] binding. The global's name is generated based on
1407 /// the location index and the shader stages being connected; see
1408 /// [`VaryingName`]. This means we don't need to know the names of
1409 /// arguments, just their types and bindings.
1410 ///
1411 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1412 /// bindings; `main` will read from or assign to the appropriate GLSL
1413 /// special variable; these are pre-declared. As an exception, we do declare
1414 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1415 /// needed.
1416 ///
1417 /// Use `output` together with [`self.entry_point.stage`] to determine which
1418 /// shader stages are being connected, and choose the `in` or `out` storage
1419 /// qualifier.
1420 ///
1421 /// [`self.entry_point`]: Writer::entry_point
1422 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1423 /// [`Location`]: crate::Binding::Location
1424 /// [`BuiltIn`]: crate::Binding::BuiltIn
1425 fn write_varying(
1426 &mut self,
1427 binding: Option<&crate::Binding>,
1428 ty: Handle<crate::Type>,
1429 output: bool,
1430 ) -> Result<(), Error> {
1431 // For a struct, emit a separate global for each member with a binding.
1432 if let crate::TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1433 for member in members {
1434 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1435 }
1436 return Ok(());
1437 }
1438
1439 let binding = match binding {
1440 None => return Ok(()),
1441 Some(binding) => binding,
1442 };
1443
1444 let (location, interpolation, sampling, second_blend_source) = match *binding {
1445 crate::Binding::Location {
1446 location,
1447 interpolation,
1448 sampling,
1449 second_blend_source,
1450 } => (location, interpolation, sampling, second_blend_source),
1451 crate::Binding::BuiltIn(built_in) => {
1452 if let crate::BuiltIn::Position { invariant: true } = built_in {
1453 match (self.options.version, self.entry_point.stage) {
1454 (
1455 Version::Embedded {
1456 version: 300,
1457 is_webgl: true,
1458 },
1459 ShaderStage::Fragment,
1460 ) => {
1461 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1462 // OpenGL ES in general (waiting on confirmation).
1463 //
1464 // See https://github.com/KhronosGroup/WebGL/issues/3518
1465 }
1466 _ => {
1467 writeln!(
1468 self.out,
1469 "invariant {};",
1470 glsl_built_in(
1471 built_in,
1472 VaryingOptions::from_writer_options(self.options, output)
1473 )
1474 )?;
1475 }
1476 }
1477 }
1478 return Ok(());
1479 }
1480 };
1481
1482 // Write the interpolation modifier if needed
1483 //
1484 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1485 // shaders' input globals or vertex shaders' output globals.
1486 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1487 ShaderStage::Vertex => output,
1488 ShaderStage::Fragment => !output,
1489 ShaderStage::Compute => false,
1490 };
1491
1492 // Write the I/O locations, if allowed
1493 let io_location = if self.options.version.supports_explicit_locations()
1494 || !emit_interpolation_and_auxiliary
1495 {
1496 if self.options.version.supports_io_locations() {
1497 if second_blend_source {
1498 write!(self.out, "layout(location = {location}, index = 1) ")?;
1499 } else {
1500 write!(self.out, "layout(location = {location}) ")?;
1501 }
1502 None
1503 } else {
1504 Some(VaryingLocation {
1505 location,
1506 index: second_blend_source as u32,
1507 })
1508 }
1509 } else {
1510 None
1511 };
1512
1513 // Write the interpolation qualifier.
1514 if let Some(interp) = interpolation {
1515 if emit_interpolation_and_auxiliary {
1516 write!(self.out, "{} ", glsl_interpolation(interp))?;
1517 }
1518 }
1519
1520 // Write the sampling auxiliary qualifier.
1521 //
1522 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1523 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1524 // here, regardless of the version.
1525 if let Some(sampling) = sampling {
1526 if emit_interpolation_and_auxiliary {
1527 if let Some(qualifier) = glsl_sampling(sampling) {
1528 write!(self.out, "{qualifier} ")?;
1529 }
1530 }
1531 }
1532
1533 // Write the input/output qualifier.
1534 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1535
1536 // Write the type
1537 // `write_type` adds no leading or trailing spaces
1538 self.write_type(ty)?;
1539
1540 // Finally write the global name and end the global with a `;` and a newline
1541 // Leading space is important
1542 let vname = VaryingName {
1543 binding: &crate::Binding::Location {
1544 location,
1545 interpolation: None,
1546 sampling: None,
1547 second_blend_source,
1548 },
1549 stage: self.entry_point.stage,
1550 options: VaryingOptions::from_writer_options(self.options, output),
1551 };
1552 writeln!(self.out, " {vname};")?;
1553
1554 if let Some(location) = io_location {
1555 self.varying.insert(vname.to_string(), location);
1556 }
1557
1558 Ok(())
1559 }
1560
1561 /// Helper method used to write functions (both entry points and regular functions)
1562 ///
1563 /// # Notes
1564 /// Adds a newline
1565 fn write_function(
1566 &mut self,
1567 ty: back::FunctionType,
1568 func: &crate::Function,
1569 info: &valid::FunctionInfo,
1570 ) -> BackendResult {
1571 // Create a function context for the function being written
1572 let ctx = back::FunctionCtx {
1573 ty,
1574 info,
1575 expressions: &func.expressions,
1576 named_expressions: &func.named_expressions,
1577 };
1578
1579 self.named_expressions.clear();
1580 self.update_expressions_to_bake(func, info);
1581
1582 // Write the function header
1583 //
1584 // glsl headers are the same as in c:
1585 // `ret_type name(args)`
1586 // `ret_type` is the return type
1587 // `name` is the function name
1588 // `args` is a comma separated list of `type name`
1589 // | - `type` is the argument type
1590 // | - `name` is the argument name
1591
1592 // Start by writing the return type if any otherwise write void
1593 // This is the only place where `void` is a valid type
1594 // (though it's more a keyword than a type)
1595 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1596 write!(self.out, "void")?;
1597 } else if let Some(ref result) = func.result {
1598 self.write_type(result.ty)?;
1599 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1600 self.write_array_size(base, size)?
1601 }
1602 } else {
1603 write!(self.out, "void")?;
1604 }
1605
1606 // Write the function name and open parentheses for the argument list
1607 let function_name = match ctx.ty {
1608 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1609 back::FunctionType::EntryPoint(_) => "main",
1610 };
1611 write!(self.out, " {function_name}(")?;
1612
1613 // Write the comma separated argument list
1614 //
1615 // We need access to `Self` here so we use the reference passed to the closure as an
1616 // argument instead of capturing as that would cause a borrow checker error
1617 let arguments = match ctx.ty {
1618 back::FunctionType::EntryPoint(_) => &[][..],
1619 back::FunctionType::Function(_) => &func.arguments,
1620 };
1621 let arguments: Vec<_> = arguments
1622 .iter()
1623 .enumerate()
1624 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1625 TypeInner::Sampler { .. } => false,
1626 _ => true,
1627 })
1628 .collect();
1629 self.write_slice(&arguments, |this, _, &(i, arg)| {
1630 // Write the argument type
1631 match this.module.types[arg.ty].inner {
1632 // We treat images separately because they might require
1633 // writing the storage format
1634 TypeInner::Image {
1635 dim,
1636 arrayed,
1637 class,
1638 } => {
1639 // Write the storage format if needed
1640 if let TypeInner::Image {
1641 class: crate::ImageClass::Storage { format, .. },
1642 ..
1643 } = this.module.types[arg.ty].inner
1644 {
1645 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1646 }
1647
1648 // write the type
1649 //
1650 // This is way we need the leading space because `write_image_type` doesn't add
1651 // any spaces at the beginning or end
1652 this.write_image_type(dim, arrayed, class)?;
1653 }
1654 TypeInner::Pointer { base, .. } => {
1655 // write parameter qualifiers
1656 write!(this.out, "inout ")?;
1657 this.write_type(base)?;
1658 }
1659 // All other types are written by `write_type`
1660 _ => {
1661 this.write_type(arg.ty)?;
1662 }
1663 }
1664
1665 // Write the argument name
1666 // The leading space is important
1667 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1668
1669 // Write array size
1670 match this.module.types[arg.ty].inner {
1671 TypeInner::Array { base, size, .. } => {
1672 this.write_array_size(base, size)?;
1673 }
1674 TypeInner::Pointer { base, .. } => {
1675 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1676 this.write_array_size(base, size)?;
1677 }
1678 }
1679 _ => {}
1680 }
1681
1682 Ok(())
1683 })?;
1684
1685 // Close the parentheses and open braces to start the function body
1686 writeln!(self.out, ") {{")?;
1687
1688 if self.options.zero_initialize_workgroup_memory
1689 && ctx.ty.is_compute_entry_point(self.module)
1690 {
1691 self.write_workgroup_variables_initialization(&ctx)?;
1692 }
1693
1694 // Compose the function arguments from globals, in case of an entry point.
1695 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1696 let stage = self.module.entry_points[ep_index as usize].stage;
1697 for (index, arg) in func.arguments.iter().enumerate() {
1698 write!(self.out, "{}", back::INDENT)?;
1699 self.write_type(arg.ty)?;
1700 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1701 write!(self.out, " {name}")?;
1702 write!(self.out, " = ")?;
1703 match self.module.types[arg.ty].inner {
1704 crate::TypeInner::Struct { ref members, .. } => {
1705 self.write_type(arg.ty)?;
1706 write!(self.out, "(")?;
1707 for (index, member) in members.iter().enumerate() {
1708 let varying_name = VaryingName {
1709 binding: member.binding.as_ref().unwrap(),
1710 stage,
1711 options: VaryingOptions::from_writer_options(self.options, false),
1712 };
1713 if index != 0 {
1714 write!(self.out, ", ")?;
1715 }
1716 write!(self.out, "{varying_name}")?;
1717 }
1718 writeln!(self.out, ");")?;
1719 }
1720 _ => {
1721 let varying_name = VaryingName {
1722 binding: arg.binding.as_ref().unwrap(),
1723 stage,
1724 options: VaryingOptions::from_writer_options(self.options, false),
1725 };
1726 writeln!(self.out, "{varying_name};")?;
1727 }
1728 }
1729 }
1730 }
1731
1732 // Write all function locals
1733 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1734 //
1735 // Always adds a newline
1736 for (handle, local) in func.local_variables.iter() {
1737 // Write indentation (only for readability) and the type
1738 // `write_type` adds no trailing space
1739 write!(self.out, "{}", back::INDENT)?;
1740 self.write_type(local.ty)?;
1741
1742 // Write the local name
1743 // The leading space is important
1744 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1745 // Write size for array type
1746 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1747 self.write_array_size(base, size)?;
1748 }
1749 // Write the local initializer if needed
1750 if let Some(init) = local.init {
1751 // Put the equal signal only if there's a initializer
1752 // The leading and trailing spaces aren't needed but help with readability
1753 write!(self.out, " = ")?;
1754
1755 // Write the constant
1756 // `write_constant` adds no trailing or leading space/newline
1757 self.write_expr(init, &ctx)?;
1758 } else if is_value_init_supported(self.module, local.ty) {
1759 write!(self.out, " = ")?;
1760 self.write_zero_init_value(local.ty)?;
1761 }
1762
1763 // Finish the local with `;` and add a newline (only for readability)
1764 writeln!(self.out, ";")?
1765 }
1766
1767 // Write the function body (statement list)
1768 for sta in func.body.iter() {
1769 // Write a statement, the indentation should always be 1 when writing the function body
1770 // `write_stmt` adds a newline
1771 self.write_stmt(sta, &ctx, back::Level(1))?;
1772 }
1773
1774 // Close braces and add a newline
1775 writeln!(self.out, "}}")?;
1776
1777 Ok(())
1778 }
1779
1780 fn write_workgroup_variables_initialization(
1781 &mut self,
1782 ctx: &back::FunctionCtx,
1783 ) -> BackendResult {
1784 let mut vars = self
1785 .module
1786 .global_variables
1787 .iter()
1788 .filter(|&(handle, var)| {
1789 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1790 })
1791 .peekable();
1792
1793 if vars.peek().is_some() {
1794 let level = back::Level(1);
1795
1796 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1797
1798 for (handle, var) in vars {
1799 let name = &self.names[&NameKey::GlobalVariable(handle)];
1800 write!(self.out, "{}{} = ", level.next(), name)?;
1801 self.write_zero_init_value(var.ty)?;
1802 writeln!(self.out, ";")?;
1803 }
1804
1805 writeln!(self.out, "{level}}}")?;
1806 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
1807 }
1808
1809 Ok(())
1810 }
1811
1812 /// Write a list of comma separated `T` values using a writer function `F`.
1813 ///
1814 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
1815 /// borrow checker issues (using for example a closure with `self` will cause issues), the
1816 /// second argument is the 0 based index of the element on the list, and the last element is
1817 /// a reference to the element `T` being written
1818 ///
1819 /// # Notes
1820 /// - Adds no newlines or leading/trailing whitespace
1821 /// - The last element won't have a trailing `,`
1822 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
1823 &mut self,
1824 data: &[T],
1825 mut f: F,
1826 ) -> BackendResult {
1827 // Loop through `data` invoking `f` for each element
1828 for (index, item) in data.iter().enumerate() {
1829 if index != 0 {
1830 write!(self.out, ", ")?;
1831 }
1832 f(self, index as u32, item)?;
1833 }
1834
1835 Ok(())
1836 }
1837
1838 /// Helper method used to write global constants
1839 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
1840 write!(self.out, "const ")?;
1841 let constant = &self.module.constants[handle];
1842 self.write_type(constant.ty)?;
1843 let name = &self.names[&NameKey::Constant(handle)];
1844 write!(self.out, " {name}")?;
1845 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
1846 self.write_array_size(base, size)?;
1847 }
1848 write!(self.out, " = ")?;
1849 self.write_const_expr(constant.init)?;
1850 writeln!(self.out, ";")?;
1851 Ok(())
1852 }
1853
1854 /// Helper method used to output a dot product as an arithmetic expression
1855 ///
1856 fn write_dot_product(
1857 &mut self,
1858 arg: Handle<crate::Expression>,
1859 arg1: Handle<crate::Expression>,
1860 size: usize,
1861 ctx: &back::FunctionCtx,
1862 ) -> BackendResult {
1863 // Write parentheses around the dot product expression to prevent operators
1864 // with different precedences from applying earlier.
1865 write!(self.out, "(")?;
1866
1867 // Cycle trough all the components of the vector
1868 for index in 0..size {
1869 let component = back::COMPONENTS[index];
1870 // Write the addition to the previous product
1871 // This will print an extra '+' at the beginning but that is fine in glsl
1872 write!(self.out, " + ")?;
1873 // Write the first vector expression, this expression is marked to be
1874 // cached so unless it can't be cached (for example, it's a Constant)
1875 // it shouldn't produce large expressions.
1876 self.write_expr(arg, ctx)?;
1877 // Access the current component on the first vector
1878 write!(self.out, ".{component} * ")?;
1879 // Write the second vector expression, this expression is marked to be
1880 // cached so unless it can't be cached (for example, it's a Constant)
1881 // it shouldn't produce large expressions.
1882 self.write_expr(arg1, ctx)?;
1883 // Access the current component on the second vector
1884 write!(self.out, ".{component}")?;
1885 }
1886
1887 write!(self.out, ")")?;
1888 Ok(())
1889 }
1890
1891 /// Helper method used to write structs
1892 ///
1893 /// # Notes
1894 /// Ends in a newline
1895 fn write_struct_body(
1896 &mut self,
1897 handle: Handle<crate::Type>,
1898 members: &[crate::StructMember],
1899 ) -> BackendResult {
1900 // glsl structs are written as in C
1901 // `struct name() { members };`
1902 // | `struct` is a keyword
1903 // | `name` is the struct name
1904 // | `members` is a semicolon separated list of `type name`
1905 // | `type` is the member type
1906 // | `name` is the member name
1907 writeln!(self.out, "{{")?;
1908
1909 for (idx, member) in members.iter().enumerate() {
1910 // The indentation is only for readability
1911 write!(self.out, "{}", back::INDENT)?;
1912
1913 match self.module.types[member.ty].inner {
1914 TypeInner::Array {
1915 base,
1916 size,
1917 stride: _,
1918 } => {
1919 self.write_type(base)?;
1920 write!(
1921 self.out,
1922 " {}",
1923 &self.names[&NameKey::StructMember(handle, idx as u32)]
1924 )?;
1925 // Write [size]
1926 self.write_array_size(base, size)?;
1927 // Newline is important
1928 writeln!(self.out, ";")?;
1929 }
1930 _ => {
1931 // Write the member type
1932 // Adds no trailing space
1933 self.write_type(member.ty)?;
1934
1935 // Write the member name and put a semicolon
1936 // The leading space is important
1937 // All members must have a semicolon even the last one
1938 writeln!(
1939 self.out,
1940 " {};",
1941 &self.names[&NameKey::StructMember(handle, idx as u32)]
1942 )?;
1943 }
1944 }
1945 }
1946
1947 write!(self.out, "}}")?;
1948 Ok(())
1949 }
1950
1951 /// Helper method used to write statements
1952 ///
1953 /// # Notes
1954 /// Always adds a newline
1955 fn write_stmt(
1956 &mut self,
1957 sta: &crate::Statement,
1958 ctx: &back::FunctionCtx,
1959 level: back::Level,
1960 ) -> BackendResult {
1961 use crate::Statement;
1962
1963 match *sta {
1964 // This is where we can generate intermediate constants for some expression types.
1965 Statement::Emit(ref range) => {
1966 for handle in range.clone() {
1967 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
1968 let expr_name = if ptr_class.is_some() {
1969 // GLSL can't save a pointer-valued expression in a variable,
1970 // but we shouldn't ever need to: they should never be named expressions,
1971 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
1972 None
1973 } else if let Some(name) = ctx.named_expressions.get(&handle) {
1974 // Front end provides names for all variables at the start of writing.
1975 // But we write them to step by step. We need to recache them
1976 // Otherwise, we could accidentally write variable name instead of full expression.
1977 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
1978 Some(self.namer.call(name))
1979 } else if self.need_bake_expressions.contains(&handle) {
1980 Some(format!("{}{}", back::BAKE_PREFIX, handle.index()))
1981 } else {
1982 None
1983 };
1984
1985 // If we are going to write an `ImageLoad` next and the target image
1986 // is sampled and we are using the `Restrict` policy for bounds
1987 // checking images we need to write a local holding the clamped lod.
1988 if let crate::Expression::ImageLoad {
1989 image,
1990 level: Some(level_expr),
1991 ..
1992 } = ctx.expressions[handle]
1993 {
1994 if let TypeInner::Image {
1995 class: crate::ImageClass::Sampled { .. },
1996 ..
1997 } = *ctx.resolve_type(image, &self.module.types)
1998 {
1999 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
2000 write!(self.out, "{level}")?;
2001 self.write_clamped_lod(ctx, handle, image, level_expr)?
2002 }
2003 }
2004 }
2005
2006 if let Some(name) = expr_name {
2007 write!(self.out, "{level}")?;
2008 self.write_named_expr(handle, name, handle, ctx)?;
2009 }
2010 }
2011 }
2012 // Blocks are simple we just need to write the block statements between braces
2013 // We could also just print the statements but this is more readable and maps more
2014 // closely to the IR
2015 Statement::Block(ref block) => {
2016 write!(self.out, "{level}")?;
2017 writeln!(self.out, "{{")?;
2018 for sta in block.iter() {
2019 // Increase the indentation to help with readability
2020 self.write_stmt(sta, ctx, level.next())?
2021 }
2022 writeln!(self.out, "{level}}}")?
2023 }
2024 // Ifs are written as in C:
2025 // ```
2026 // if(condition) {
2027 // accept
2028 // } else {
2029 // reject
2030 // }
2031 // ```
2032 Statement::If {
2033 condition,
2034 ref accept,
2035 ref reject,
2036 } => {
2037 write!(self.out, "{level}")?;
2038 write!(self.out, "if (")?;
2039 self.write_expr(condition, ctx)?;
2040 writeln!(self.out, ") {{")?;
2041
2042 for sta in accept {
2043 // Increase indentation to help with readability
2044 self.write_stmt(sta, ctx, level.next())?;
2045 }
2046
2047 // If there are no statements in the reject block we skip writing it
2048 // This is only for readability
2049 if !reject.is_empty() {
2050 writeln!(self.out, "{level}}} else {{")?;
2051
2052 for sta in reject {
2053 // Increase indentation to help with readability
2054 self.write_stmt(sta, ctx, level.next())?;
2055 }
2056 }
2057
2058 writeln!(self.out, "{level}}}")?
2059 }
2060 // Switch are written as in C:
2061 // ```
2062 // switch (selector) {
2063 // // Fallthrough
2064 // case label:
2065 // block
2066 // // Non fallthrough
2067 // case label:
2068 // block
2069 // break;
2070 // default:
2071 // block
2072 // }
2073 // ```
2074 // Where the `default` case happens isn't important but we put it last
2075 // so that we don't need to print a `break` for it
2076 Statement::Switch {
2077 selector,
2078 ref cases,
2079 } => {
2080 // Start the switch
2081 write!(self.out, "{level}")?;
2082 write!(self.out, "switch(")?;
2083 self.write_expr(selector, ctx)?;
2084 writeln!(self.out, ") {{")?;
2085
2086 // Write all cases
2087 let l2 = level.next();
2088 for case in cases {
2089 match case.value {
2090 crate::SwitchValue::I32(value) => write!(self.out, "{l2}case {value}:")?,
2091 crate::SwitchValue::U32(value) => write!(self.out, "{l2}case {value}u:")?,
2092 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
2093 }
2094
2095 let write_block_braces = !(case.fall_through && case.body.is_empty());
2096 if write_block_braces {
2097 writeln!(self.out, " {{")?;
2098 } else {
2099 writeln!(self.out)?;
2100 }
2101
2102 for sta in case.body.iter() {
2103 self.write_stmt(sta, ctx, l2.next())?;
2104 }
2105
2106 if !case.fall_through && case.body.last().map_or(true, |s| !s.is_terminator()) {
2107 writeln!(self.out, "{}break;", l2.next())?;
2108 }
2109
2110 if write_block_braces {
2111 writeln!(self.out, "{l2}}}")?;
2112 }
2113 }
2114
2115 writeln!(self.out, "{level}}}")?
2116 }
2117 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
2118 // while true loop and appending the continuing block to the body resulting on:
2119 // ```
2120 // bool loop_init = true;
2121 // while(true) {
2122 // if (!loop_init) { <continuing> }
2123 // loop_init = false;
2124 // <body>
2125 // }
2126 // ```
2127 Statement::Loop {
2128 ref body,
2129 ref continuing,
2130 break_if,
2131 } => {
2132 if !continuing.is_empty() || break_if.is_some() {
2133 let gate_name = self.namer.call("loop_init");
2134 writeln!(self.out, "{level}bool {gate_name} = true;")?;
2135 writeln!(self.out, "{level}while(true) {{")?;
2136 let l2 = level.next();
2137 let l3 = l2.next();
2138 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
2139 for sta in continuing {
2140 self.write_stmt(sta, ctx, l3)?;
2141 }
2142 if let Some(condition) = break_if {
2143 write!(self.out, "{l3}if (")?;
2144 self.write_expr(condition, ctx)?;
2145 writeln!(self.out, ") {{")?;
2146 writeln!(self.out, "{}break;", l3.next())?;
2147 writeln!(self.out, "{l3}}}")?;
2148 }
2149 writeln!(self.out, "{l2}}}")?;
2150 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
2151 } else {
2152 writeln!(self.out, "{level}while(true) {{")?;
2153 }
2154 for sta in body {
2155 self.write_stmt(sta, ctx, level.next())?;
2156 }
2157 writeln!(self.out, "{level}}}")?
2158 }
2159 // Break, continue and return as written as in C
2160 // `break;`
2161 Statement::Break => {
2162 write!(self.out, "{level}")?;
2163 writeln!(self.out, "break;")?
2164 }
2165 // `continue;`
2166 Statement::Continue => {
2167 write!(self.out, "{level}")?;
2168 writeln!(self.out, "continue;")?
2169 }
2170 // `return expr;`, `expr` is optional
2171 Statement::Return { value } => {
2172 write!(self.out, "{level}")?;
2173 match ctx.ty {
2174 back::FunctionType::Function(_) => {
2175 write!(self.out, "return")?;
2176 // Write the expression to be returned if needed
2177 if let Some(expr) = value {
2178 write!(self.out, " ")?;
2179 self.write_expr(expr, ctx)?;
2180 }
2181 writeln!(self.out, ";")?;
2182 }
2183 back::FunctionType::EntryPoint(ep_index) => {
2184 let mut has_point_size = false;
2185 let ep = &self.module.entry_points[ep_index as usize];
2186 if let Some(ref result) = ep.function.result {
2187 let value = value.unwrap();
2188 match self.module.types[result.ty].inner {
2189 crate::TypeInner::Struct { ref members, .. } => {
2190 let temp_struct_name = match ctx.expressions[value] {
2191 crate::Expression::Compose { .. } => {
2192 let return_struct = "_tmp_return";
2193 write!(
2194 self.out,
2195 "{} {} = ",
2196 &self.names[&NameKey::Type(result.ty)],
2197 return_struct
2198 )?;
2199 self.write_expr(value, ctx)?;
2200 writeln!(self.out, ";")?;
2201 write!(self.out, "{level}")?;
2202 Some(return_struct)
2203 }
2204 _ => None,
2205 };
2206
2207 for (index, member) in members.iter().enumerate() {
2208 if let Some(crate::Binding::BuiltIn(
2209 crate::BuiltIn::PointSize,
2210 )) = member.binding
2211 {
2212 has_point_size = true;
2213 }
2214
2215 let varying_name = VaryingName {
2216 binding: member.binding.as_ref().unwrap(),
2217 stage: ep.stage,
2218 options: VaryingOptions::from_writer_options(
2219 self.options,
2220 true,
2221 ),
2222 };
2223 write!(self.out, "{varying_name} = ")?;
2224
2225 if let Some(struct_name) = temp_struct_name {
2226 write!(self.out, "{struct_name}")?;
2227 } else {
2228 self.write_expr(value, ctx)?;
2229 }
2230
2231 // Write field name
2232 writeln!(
2233 self.out,
2234 ".{};",
2235 &self.names
2236 [&NameKey::StructMember(result.ty, index as u32)]
2237 )?;
2238 write!(self.out, "{level}")?;
2239 }
2240 }
2241 _ => {
2242 let name = VaryingName {
2243 binding: result.binding.as_ref().unwrap(),
2244 stage: ep.stage,
2245 options: VaryingOptions::from_writer_options(
2246 self.options,
2247 true,
2248 ),
2249 };
2250 write!(self.out, "{name} = ")?;
2251 self.write_expr(value, ctx)?;
2252 writeln!(self.out, ";")?;
2253 write!(self.out, "{level}")?;
2254 }
2255 }
2256 }
2257
2258 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
2259 == ShaderStage::Vertex;
2260 if is_vertex_stage
2261 && self
2262 .options
2263 .writer_flags
2264 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
2265 {
2266 writeln!(
2267 self.out,
2268 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
2269 )?;
2270 write!(self.out, "{level}")?;
2271 }
2272
2273 if is_vertex_stage
2274 && self
2275 .options
2276 .writer_flags
2277 .contains(WriterFlags::FORCE_POINT_SIZE)
2278 && !has_point_size
2279 {
2280 writeln!(self.out, "gl_PointSize = 1.0;")?;
2281 write!(self.out, "{level}")?;
2282 }
2283 writeln!(self.out, "return;")?;
2284 }
2285 }
2286 }
2287 // This is one of the places were glsl adds to the syntax of C in this case the discard
2288 // keyword which ceases all further processing in a fragment shader, it's called OpKill
2289 // in spir-v that's why it's called `Statement::Kill`
2290 Statement::Kill => writeln!(self.out, "{level}discard;")?,
2291 Statement::Barrier(flags) => {
2292 self.write_barrier(flags, level)?;
2293 }
2294 // Stores in glsl are just variable assignments written as `pointer = value;`
2295 Statement::Store { pointer, value } => {
2296 write!(self.out, "{level}")?;
2297 self.write_expr(pointer, ctx)?;
2298 write!(self.out, " = ")?;
2299 self.write_expr(value, ctx)?;
2300 writeln!(self.out, ";")?
2301 }
2302 Statement::WorkGroupUniformLoad { pointer, result } => {
2303 // GLSL doesn't have pointers, which means that this backend needs to ensure that
2304 // the actual "loading" is happening between the two barriers.
2305 // This is done in `Emit` by never emitting a variable name for pointer variables
2306 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
2307
2308 let result_name = format!("{}{}", back::BAKE_PREFIX, result.index());
2309 write!(self.out, "{level}")?;
2310 // Expressions cannot have side effects, so just writing the expression here is fine.
2311 self.write_named_expr(pointer, result_name, result, ctx)?;
2312
2313 self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
2314 }
2315 // Stores a value into an image.
2316 Statement::ImageStore {
2317 image,
2318 coordinate,
2319 array_index,
2320 value,
2321 } => {
2322 write!(self.out, "{level}")?;
2323 self.write_image_store(ctx, image, coordinate, array_index, value)?
2324 }
2325 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
2326 Statement::Call {
2327 function,
2328 ref arguments,
2329 result,
2330 } => {
2331 write!(self.out, "{level}")?;
2332 if let Some(expr) = result {
2333 let name = format!("{}{}", back::BAKE_PREFIX, expr.index());
2334 let result = self.module.functions[function].result.as_ref().unwrap();
2335 self.write_type(result.ty)?;
2336 write!(self.out, " {name}")?;
2337 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2338 {
2339 self.write_array_size(base, size)?
2340 }
2341 write!(self.out, " = ")?;
2342 self.named_expressions.insert(expr, name);
2343 }
2344 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2345 let arguments: Vec<_> = arguments
2346 .iter()
2347 .enumerate()
2348 .filter_map(|(i, arg)| {
2349 let arg_ty = self.module.functions[function].arguments[i].ty;
2350 match self.module.types[arg_ty].inner {
2351 TypeInner::Sampler { .. } => None,
2352 _ => Some(*arg),
2353 }
2354 })
2355 .collect();
2356 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2357 writeln!(self.out, ");")?
2358 }
2359 Statement::Atomic {
2360 pointer,
2361 ref fun,
2362 value,
2363 result,
2364 } => {
2365 write!(self.out, "{level}")?;
2366 let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
2367 let res_ty = ctx.resolve_type(result, &self.module.types);
2368 self.write_value_type(res_ty)?;
2369 write!(self.out, " {res_name} = ")?;
2370 self.named_expressions.insert(result, res_name);
2371
2372 let fun_str = fun.to_glsl();
2373 write!(self.out, "atomic{fun_str}(")?;
2374 self.write_expr(pointer, ctx)?;
2375 write!(self.out, ", ")?;
2376 // handle the special cases
2377 match *fun {
2378 crate::AtomicFunction::Subtract => {
2379 // we just wrote `InterlockedAdd`, so negate the argument
2380 write!(self.out, "-")?;
2381 }
2382 crate::AtomicFunction::Exchange { compare: Some(_) } => {
2383 return Err(Error::Custom(
2384 "atomic CompareExchange is not implemented".to_string(),
2385 ));
2386 }
2387 _ => {}
2388 }
2389 self.write_expr(value, ctx)?;
2390 writeln!(self.out, ");")?;
2391 }
2392 Statement::RayQuery { .. } => unreachable!(),
2393 Statement::SubgroupBallot { result, predicate } => {
2394 write!(self.out, "{level}")?;
2395 let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
2396 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2397 self.write_value_type(res_ty)?;
2398 write!(self.out, " {res_name} = ")?;
2399 self.named_expressions.insert(result, res_name);
2400
2401 write!(self.out, "subgroupBallot(")?;
2402 match predicate {
2403 Some(predicate) => self.write_expr(predicate, ctx)?,
2404 None => write!(self.out, "true")?,
2405 }
2406 writeln!(self.out, ");")?;
2407 }
2408 Statement::SubgroupCollectiveOperation {
2409 op,
2410 collective_op,
2411 argument,
2412 result,
2413 } => {
2414 write!(self.out, "{level}")?;
2415 let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
2416 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2417 self.write_value_type(res_ty)?;
2418 write!(self.out, " {res_name} = ")?;
2419 self.named_expressions.insert(result, res_name);
2420
2421 match (collective_op, op) {
2422 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2423 write!(self.out, "subgroupAll(")?
2424 }
2425 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2426 write!(self.out, "subgroupAny(")?
2427 }
2428 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2429 write!(self.out, "subgroupAdd(")?
2430 }
2431 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2432 write!(self.out, "subgroupMul(")?
2433 }
2434 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2435 write!(self.out, "subgroupMax(")?
2436 }
2437 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2438 write!(self.out, "subgroupMin(")?
2439 }
2440 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2441 write!(self.out, "subgroupAnd(")?
2442 }
2443 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2444 write!(self.out, "subgroupOr(")?
2445 }
2446 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2447 write!(self.out, "subgroupXor(")?
2448 }
2449 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2450 write!(self.out, "subgroupExclusiveAdd(")?
2451 }
2452 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2453 write!(self.out, "subgroupExclusiveMul(")?
2454 }
2455 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2456 write!(self.out, "subgroupInclusiveAdd(")?
2457 }
2458 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2459 write!(self.out, "subgroupInclusiveMul(")?
2460 }
2461 _ => unimplemented!(),
2462 }
2463 self.write_expr(argument, ctx)?;
2464 writeln!(self.out, ");")?;
2465 }
2466 Statement::SubgroupGather {
2467 mode,
2468 argument,
2469 result,
2470 } => {
2471 write!(self.out, "{level}")?;
2472 let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
2473 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2474 self.write_value_type(res_ty)?;
2475 write!(self.out, " {res_name} = ")?;
2476 self.named_expressions.insert(result, res_name);
2477
2478 match mode {
2479 crate::GatherMode::BroadcastFirst => {
2480 write!(self.out, "subgroupBroadcastFirst(")?;
2481 }
2482 crate::GatherMode::Broadcast(_) => {
2483 write!(self.out, "subgroupBroadcast(")?;
2484 }
2485 crate::GatherMode::Shuffle(_) => {
2486 write!(self.out, "subgroupShuffle(")?;
2487 }
2488 crate::GatherMode::ShuffleDown(_) => {
2489 write!(self.out, "subgroupShuffleDown(")?;
2490 }
2491 crate::GatherMode::ShuffleUp(_) => {
2492 write!(self.out, "subgroupShuffleUp(")?;
2493 }
2494 crate::GatherMode::ShuffleXor(_) => {
2495 write!(self.out, "subgroupShuffleXor(")?;
2496 }
2497 }
2498 self.write_expr(argument, ctx)?;
2499 match mode {
2500 crate::GatherMode::BroadcastFirst => {}
2501 crate::GatherMode::Broadcast(index)
2502 | crate::GatherMode::Shuffle(index)
2503 | crate::GatherMode::ShuffleDown(index)
2504 | crate::GatherMode::ShuffleUp(index)
2505 | crate::GatherMode::ShuffleXor(index) => {
2506 write!(self.out, ", ")?;
2507 self.write_expr(index, ctx)?;
2508 }
2509 }
2510 writeln!(self.out, ");")?;
2511 }
2512 }
2513
2514 Ok(())
2515 }
2516
2517 /// Write a const expression.
2518 ///
2519 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2520 /// constant expression arena, as GLSL expression.
2521 ///
2522 /// # Notes
2523 /// Adds no newlines or leading/trailing whitespace
2524 ///
2525 /// [`Expression`]: crate::Expression
2526 /// [`Module`]: crate::Module
2527 fn write_const_expr(&mut self, expr: Handle<crate::Expression>) -> BackendResult {
2528 self.write_possibly_const_expr(
2529 expr,
2530 &self.module.global_expressions,
2531 |expr| &self.info[expr],
2532 |writer, expr| writer.write_const_expr(expr),
2533 )
2534 }
2535
2536 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2537 ///
2538 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2539 /// as as GLSL expression. This must be one of the [`Expression`] variants
2540 /// that is allowed to occur in constant expressions.
2541 ///
2542 /// Use `write_expression` to write subexpressions.
2543 ///
2544 /// This is the common code for `write_expr`, which handles arbitrary
2545 /// runtime expressions, and `write_const_expr`, which only handles
2546 /// const-expressions. Each of those callers passes itself (essentially) as
2547 /// the `write_expression` callback, so that subexpressions are restricted
2548 /// to the appropriate variants.
2549 ///
2550 /// # Notes
2551 /// Adds no newlines or leading/trailing whitespace
2552 ///
2553 /// [`Expression`]: crate::Expression
2554 fn write_possibly_const_expr<'w, I, E>(
2555 &'w mut self,
2556 expr: Handle<crate::Expression>,
2557 expressions: &crate::Arena<crate::Expression>,
2558 info: I,
2559 write_expression: E,
2560 ) -> BackendResult
2561 where
2562 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2563 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2564 {
2565 use crate::Expression;
2566
2567 match expressions[expr] {
2568 Expression::Literal(literal) => {
2569 match literal {
2570 // Floats are written using `Debug` instead of `Display` because it always appends the
2571 // decimal part even it's zero which is needed for a valid glsl float constant
2572 crate::Literal::F64(value) => write!(self.out, "{:?}LF", value)?,
2573 crate::Literal::F32(value) => write!(self.out, "{:?}", value)?,
2574 // Unsigned integers need a `u` at the end
2575 //
2576 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2577 // always write it as the extra branch wouldn't have any benefit in readability
2578 crate::Literal::U32(value) => write!(self.out, "{}u", value)?,
2579 crate::Literal::I32(value) => write!(self.out, "{}", value)?,
2580 crate::Literal::Bool(value) => write!(self.out, "{}", value)?,
2581 crate::Literal::I64(_) => {
2582 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2583 }
2584 crate::Literal::U64(_) => {
2585 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2586 }
2587 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2588 return Err(Error::Custom(
2589 "Abstract types should not appear in IR presented to backends".into(),
2590 ));
2591 }
2592 }
2593 }
2594 Expression::Constant(handle) => {
2595 let constant = &self.module.constants[handle];
2596 if constant.name.is_some() {
2597 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2598 } else {
2599 self.write_const_expr(constant.init)?;
2600 }
2601 }
2602 Expression::ZeroValue(ty) => {
2603 self.write_zero_init_value(ty)?;
2604 }
2605 Expression::Compose { ty, ref components } => {
2606 self.write_type(ty)?;
2607
2608 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2609 self.write_array_size(base, size)?;
2610 }
2611
2612 write!(self.out, "(")?;
2613 for (index, component) in components.iter().enumerate() {
2614 if index != 0 {
2615 write!(self.out, ", ")?;
2616 }
2617 write_expression(self, *component)?;
2618 }
2619 write!(self.out, ")")?
2620 }
2621 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2622 Expression::Splat { size: _, value } => {
2623 let resolved = info(expr).inner_with(&self.module.types);
2624 self.write_value_type(resolved)?;
2625 write!(self.out, "(")?;
2626 write_expression(self, value)?;
2627 write!(self.out, ")")?
2628 }
2629 _ => unreachable!(),
2630 }
2631
2632 Ok(())
2633 }
2634
2635 /// Helper method to write expressions
2636 ///
2637 /// # Notes
2638 /// Doesn't add any newlines or leading/trailing spaces
2639 fn write_expr(
2640 &mut self,
2641 expr: Handle<crate::Expression>,
2642 ctx: &back::FunctionCtx,
2643 ) -> BackendResult {
2644 use crate::Expression;
2645
2646 if let Some(name) = self.named_expressions.get(&expr) {
2647 write!(self.out, "{name}")?;
2648 return Ok(());
2649 }
2650
2651 match ctx.expressions[expr] {
2652 Expression::Literal(_)
2653 | Expression::Constant(_)
2654 | Expression::ZeroValue(_)
2655 | Expression::Compose { .. }
2656 | Expression::Splat { .. } => {
2657 self.write_possibly_const_expr(
2658 expr,
2659 ctx.expressions,
2660 |expr| &ctx.info[expr].ty,
2661 |writer, expr| writer.write_expr(expr, ctx),
2662 )?;
2663 }
2664 Expression::Override(_) => return Err(Error::Override),
2665 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2666 Expression::Access { base, index } => {
2667 self.write_expr(base, ctx)?;
2668 write!(self.out, "[")?;
2669 self.write_expr(index, ctx)?;
2670 write!(self.out, "]")?
2671 }
2672 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2673 // be applied to structs, in this case we need to find the name of the field at that
2674 // index and write `base.field_name`
2675 Expression::AccessIndex { base, index } => {
2676 self.write_expr(base, ctx)?;
2677
2678 let base_ty_res = &ctx.info[base].ty;
2679 let mut resolved = base_ty_res.inner_with(&self.module.types);
2680 let base_ty_handle = match *resolved {
2681 TypeInner::Pointer { base, space: _ } => {
2682 resolved = &self.module.types[base].inner;
2683 Some(base)
2684 }
2685 _ => base_ty_res.handle(),
2686 };
2687
2688 match *resolved {
2689 TypeInner::Vector { .. } => {
2690 // Write vector access as a swizzle
2691 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2692 }
2693 TypeInner::Matrix { .. }
2694 | TypeInner::Array { .. }
2695 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
2696 TypeInner::Struct { .. } => {
2697 // This will never panic in case the type is a `Struct`, this is not true
2698 // for other types so we can only check while inside this match arm
2699 let ty = base_ty_handle.unwrap();
2700
2701 write!(
2702 self.out,
2703 ".{}",
2704 &self.names[&NameKey::StructMember(ty, index)]
2705 )?
2706 }
2707 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
2708 }
2709 }
2710 // `Swizzle` adds a few letters behind the dot.
2711 Expression::Swizzle {
2712 size,
2713 vector,
2714 pattern,
2715 } => {
2716 self.write_expr(vector, ctx)?;
2717 write!(self.out, ".")?;
2718 for &sc in pattern[..size as usize].iter() {
2719 self.out.write_char(back::COMPONENTS[sc as usize])?;
2720 }
2721 }
2722 // Function arguments are written as the argument name
2723 Expression::FunctionArgument(pos) => {
2724 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
2725 }
2726 // Global variables need some special work for their name but
2727 // `get_global_name` does the work for us
2728 Expression::GlobalVariable(handle) => {
2729 let global = &self.module.global_variables[handle];
2730 self.write_global_name(handle, global)?
2731 }
2732 // A local is written as it's name
2733 Expression::LocalVariable(handle) => {
2734 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
2735 }
2736 // glsl has no pointers so there's no load operation, just write the pointer expression
2737 Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
2738 // `ImageSample` is a bit complicated compared to the rest of the IR.
2739 //
2740 // First there are three variations depending whether the sample level is explicitly set,
2741 // if it's automatic or it it's bias:
2742 // `texture(image, coordinate)` - Automatic sample level
2743 // `texture(image, coordinate, bias)` - Bias sample level
2744 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
2745 //
2746 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
2747 Expression::ImageSample {
2748 image,
2749 sampler: _, //TODO?
2750 gather,
2751 coordinate,
2752 array_index,
2753 offset,
2754 level,
2755 depth_ref,
2756 } => {
2757 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
2758 TypeInner::Image {
2759 dim,
2760 class,
2761 arrayed,
2762 ..
2763 } => (dim, class, arrayed),
2764 _ => unreachable!(),
2765 };
2766 let mut err = None;
2767 if dim == crate::ImageDimension::Cube {
2768 if offset.is_some() {
2769 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
2770 }
2771 if arrayed
2772 && matches!(class, crate::ImageClass::Depth { .. })
2773 && matches!(level, crate::SampleLevel::Gradient { .. })
2774 {
2775 err = Some("samplerCubeArrayShadow don't support textureGrad");
2776 }
2777 }
2778 if gather.is_some() && level != crate::SampleLevel::Zero {
2779 err = Some("textureGather doesn't support LOD parameters");
2780 }
2781 if let Some(err) = err {
2782 return Err(Error::Custom(String::from(err)));
2783 }
2784
2785 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
2786 // unless `GL_EXT_texture_shadow_lod` is present.
2787 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
2788 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
2789 || (dim == crate::ImageDimension::D2 && arrayed))
2790 && level == crate::SampleLevel::Zero
2791 && matches!(class, crate::ImageClass::Depth { .. })
2792 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
2793
2794 // Write the function to be used depending on the sample level
2795 let fun_name = match level {
2796 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
2797 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
2798 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
2799 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
2800 crate::SampleLevel::Gradient { .. } => "textureGrad",
2801 };
2802 let offset_name = match offset {
2803 Some(_) => "Offset",
2804 None => "",
2805 };
2806
2807 write!(self.out, "{fun_name}{offset_name}(")?;
2808
2809 // Write the image that will be used
2810 self.write_expr(image, ctx)?;
2811 // The space here isn't required but it helps with readability
2812 write!(self.out, ", ")?;
2813
2814 // We need to get the coordinates vector size to later build a vector that's `size + 1`
2815 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
2816 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
2817 TypeInner::Vector { size, .. } => size as u8,
2818 TypeInner::Scalar { .. } => 1,
2819 _ => unreachable!(),
2820 };
2821
2822 if array_index.is_some() {
2823 coord_dim += 1;
2824 }
2825 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
2826 if merge_depth_ref {
2827 coord_dim += 1;
2828 }
2829
2830 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
2831 let is_vec = tex_1d_hack || coord_dim != 1;
2832 // Compose a new texture coordinates vector
2833 if is_vec {
2834 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
2835 }
2836 self.write_expr(coordinate, ctx)?;
2837 if tex_1d_hack {
2838 write!(self.out, ", 0.0")?;
2839 }
2840 if let Some(expr) = array_index {
2841 write!(self.out, ", ")?;
2842 self.write_expr(expr, ctx)?;
2843 }
2844 if merge_depth_ref {
2845 write!(self.out, ", ")?;
2846 self.write_expr(depth_ref.unwrap(), ctx)?;
2847 }
2848 if is_vec {
2849 write!(self.out, ")")?;
2850 }
2851
2852 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
2853 write!(self.out, ", ")?;
2854 self.write_expr(expr, ctx)?;
2855 }
2856
2857 match level {
2858 // Auto needs no more arguments
2859 crate::SampleLevel::Auto => (),
2860 // Zero needs level set to 0
2861 crate::SampleLevel::Zero => {
2862 if workaround_lod_with_grad {
2863 let vec_dim = match dim {
2864 crate::ImageDimension::Cube => 3,
2865 _ => 2,
2866 };
2867 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
2868 } else if gather.is_none() {
2869 write!(self.out, ", 0.0")?;
2870 }
2871 }
2872 // Exact and bias require another argument
2873 crate::SampleLevel::Exact(expr) => {
2874 write!(self.out, ", ")?;
2875 self.write_expr(expr, ctx)?;
2876 }
2877 crate::SampleLevel::Bias(_) => {
2878 // This needs to be done after the offset writing
2879 }
2880 crate::SampleLevel::Gradient { x, y } => {
2881 // If we are using sampler2D to replace sampler1D, we also
2882 // need to make sure to use vec2 gradients
2883 if tex_1d_hack {
2884 write!(self.out, ", vec2(")?;
2885 self.write_expr(x, ctx)?;
2886 write!(self.out, ", 0.0)")?;
2887 write!(self.out, ", vec2(")?;
2888 self.write_expr(y, ctx)?;
2889 write!(self.out, ", 0.0)")?;
2890 } else {
2891 write!(self.out, ", ")?;
2892 self.write_expr(x, ctx)?;
2893 write!(self.out, ", ")?;
2894 self.write_expr(y, ctx)?;
2895 }
2896 }
2897 }
2898
2899 if let Some(constant) = offset {
2900 write!(self.out, ", ")?;
2901 if tex_1d_hack {
2902 write!(self.out, "ivec2(")?;
2903 }
2904 self.write_const_expr(constant)?;
2905 if tex_1d_hack {
2906 write!(self.out, ", 0)")?;
2907 }
2908 }
2909
2910 // Bias is always the last argument
2911 if let crate::SampleLevel::Bias(expr) = level {
2912 write!(self.out, ", ")?;
2913 self.write_expr(expr, ctx)?;
2914 }
2915
2916 if let (Some(component), None) = (gather, depth_ref) {
2917 write!(self.out, ", {}", component as usize)?;
2918 }
2919
2920 // End the function
2921 write!(self.out, ")")?
2922 }
2923 Expression::ImageLoad {
2924 image,
2925 coordinate,
2926 array_index,
2927 sample,
2928 level,
2929 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
2930 // Query translates into one of the:
2931 // - textureSize/imageSize
2932 // - textureQueryLevels
2933 // - textureSamples/imageSamples
2934 Expression::ImageQuery { image, query } => {
2935 use crate::ImageClass;
2936
2937 // This will only panic if the module is invalid
2938 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
2939 TypeInner::Image {
2940 dim,
2941 arrayed: _,
2942 class,
2943 } => (dim, class),
2944 _ => unreachable!(),
2945 };
2946 let components = match dim {
2947 crate::ImageDimension::D1 => 1,
2948 crate::ImageDimension::D2 => 2,
2949 crate::ImageDimension::D3 => 3,
2950 crate::ImageDimension::Cube => 2,
2951 };
2952
2953 if let crate::ImageQuery::Size { .. } = query {
2954 match components {
2955 1 => write!(self.out, "uint(")?,
2956 _ => write!(self.out, "uvec{components}(")?,
2957 }
2958 } else {
2959 write!(self.out, "uint(")?;
2960 }
2961
2962 match query {
2963 crate::ImageQuery::Size { level } => {
2964 match class {
2965 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
2966 write!(self.out, "textureSize(")?;
2967 self.write_expr(image, ctx)?;
2968 if let Some(expr) = level {
2969 let cast_to_int = matches!(
2970 *ctx.resolve_type(expr, &self.module.types),
2971 crate::TypeInner::Scalar(crate::Scalar {
2972 kind: crate::ScalarKind::Uint,
2973 ..
2974 })
2975 );
2976
2977 write!(self.out, ", ")?;
2978
2979 if cast_to_int {
2980 write!(self.out, "int(")?;
2981 }
2982
2983 self.write_expr(expr, ctx)?;
2984
2985 if cast_to_int {
2986 write!(self.out, ")")?;
2987 }
2988 } else if !multi {
2989 // All textureSize calls requires an lod argument
2990 // except for multisampled samplers
2991 write!(self.out, ", 0")?;
2992 }
2993 }
2994 ImageClass::Storage { .. } => {
2995 write!(self.out, "imageSize(")?;
2996 self.write_expr(image, ctx)?;
2997 }
2998 }
2999 write!(self.out, ")")?;
3000 if components != 1 || self.options.version.is_es() {
3001 write!(self.out, ".{}", &"xyz"[..components])?;
3002 }
3003 }
3004 crate::ImageQuery::NumLevels => {
3005 write!(self.out, "textureQueryLevels(",)?;
3006 self.write_expr(image, ctx)?;
3007 write!(self.out, ")",)?;
3008 }
3009 crate::ImageQuery::NumLayers => {
3010 let fun_name = match class {
3011 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
3012 ImageClass::Storage { .. } => "imageSize",
3013 };
3014 write!(self.out, "{fun_name}(")?;
3015 self.write_expr(image, ctx)?;
3016 // All textureSize calls requires an lod argument
3017 // except for multisampled samplers
3018 if class.is_multisampled() {
3019 write!(self.out, ", 0")?;
3020 }
3021 write!(self.out, ")")?;
3022 if components != 1 || self.options.version.is_es() {
3023 write!(self.out, ".{}", back::COMPONENTS[components])?;
3024 }
3025 }
3026 crate::ImageQuery::NumSamples => {
3027 let fun_name = match class {
3028 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
3029 "textureSamples"
3030 }
3031 ImageClass::Storage { .. } => "imageSamples",
3032 };
3033 write!(self.out, "{fun_name}(")?;
3034 self.write_expr(image, ctx)?;
3035 write!(self.out, ")",)?;
3036 }
3037 }
3038
3039 write!(self.out, ")")?;
3040 }
3041 Expression::Unary { op, expr } => {
3042 let operator_or_fn = match op {
3043 crate::UnaryOperator::Negate => "-",
3044 crate::UnaryOperator::LogicalNot => {
3045 match *ctx.resolve_type(expr, &self.module.types) {
3046 TypeInner::Vector { .. } => "not",
3047 _ => "!",
3048 }
3049 }
3050 crate::UnaryOperator::BitwiseNot => "~",
3051 };
3052 write!(self.out, "{operator_or_fn}(")?;
3053
3054 self.write_expr(expr, ctx)?;
3055
3056 write!(self.out, ")")?
3057 }
3058 // `Binary` we just write `left op right`, except when dealing with
3059 // comparison operations on vectors as they are implemented with
3060 // builtin functions.
3061 // Once again we wrap everything in parentheses to avoid precedence issues
3062 Expression::Binary {
3063 mut op,
3064 left,
3065 right,
3066 } => {
3067 // Holds `Some(function_name)` if the binary operation is
3068 // implemented as a function call
3069 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
3070
3071 let left_inner = ctx.resolve_type(left, &self.module.types);
3072 let right_inner = ctx.resolve_type(right, &self.module.types);
3073
3074 let function = match (left_inner, right_inner) {
3075 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
3076 Bo::Less
3077 | Bo::LessEqual
3078 | Bo::Greater
3079 | Bo::GreaterEqual
3080 | Bo::Equal
3081 | Bo::NotEqual => BinaryOperation::VectorCompare,
3082 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
3083 Bo::And if scalar.kind == Sk::Bool => {
3084 op = crate::BinaryOperator::LogicalAnd;
3085 BinaryOperation::VectorComponentWise
3086 }
3087 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
3088 op = crate::BinaryOperator::LogicalOr;
3089 BinaryOperation::VectorComponentWise
3090 }
3091 _ => BinaryOperation::Other,
3092 },
3093 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
3094 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
3095 Bo::Modulo => BinaryOperation::Modulo,
3096 _ => BinaryOperation::Other,
3097 },
3098 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
3099 Bo::InclusiveOr => {
3100 op = crate::BinaryOperator::LogicalOr;
3101 BinaryOperation::Other
3102 }
3103 Bo::And => {
3104 op = crate::BinaryOperator::LogicalAnd;
3105 BinaryOperation::Other
3106 }
3107 _ => BinaryOperation::Other,
3108 },
3109 _ => BinaryOperation::Other,
3110 },
3111 };
3112
3113 match function {
3114 BinaryOperation::VectorCompare => {
3115 let op_str = match op {
3116 Bo::Less => "lessThan(",
3117 Bo::LessEqual => "lessThanEqual(",
3118 Bo::Greater => "greaterThan(",
3119 Bo::GreaterEqual => "greaterThanEqual(",
3120 Bo::Equal => "equal(",
3121 Bo::NotEqual => "notEqual(",
3122 _ => unreachable!(),
3123 };
3124 write!(self.out, "{op_str}")?;
3125 self.write_expr(left, ctx)?;
3126 write!(self.out, ", ")?;
3127 self.write_expr(right, ctx)?;
3128 write!(self.out, ")")?;
3129 }
3130 BinaryOperation::VectorComponentWise => {
3131 self.write_value_type(left_inner)?;
3132 write!(self.out, "(")?;
3133
3134 let size = match *left_inner {
3135 Ti::Vector { size, .. } => size,
3136 _ => unreachable!(),
3137 };
3138
3139 for i in 0..size as usize {
3140 if i != 0 {
3141 write!(self.out, ", ")?;
3142 }
3143
3144 self.write_expr(left, ctx)?;
3145 write!(self.out, ".{}", back::COMPONENTS[i])?;
3146
3147 write!(self.out, " {} ", back::binary_operation_str(op))?;
3148
3149 self.write_expr(right, ctx)?;
3150 write!(self.out, ".{}", back::COMPONENTS[i])?;
3151 }
3152
3153 write!(self.out, ")")?;
3154 }
3155 // TODO: handle undefined behavior of BinaryOperator::Modulo
3156 //
3157 // sint:
3158 // if right == 0 return 0
3159 // if left == min(type_of(left)) && right == -1 return 0
3160 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
3161 //
3162 // uint:
3163 // if right == 0 return 0
3164 //
3165 // float:
3166 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
3167 BinaryOperation::Modulo => {
3168 write!(self.out, "(")?;
3169
3170 // write `e1 - e2 * trunc(e1 / e2)`
3171 self.write_expr(left, ctx)?;
3172 write!(self.out, " - ")?;
3173 self.write_expr(right, ctx)?;
3174 write!(self.out, " * ")?;
3175 write!(self.out, "trunc(")?;
3176 self.write_expr(left, ctx)?;
3177 write!(self.out, " / ")?;
3178 self.write_expr(right, ctx)?;
3179 write!(self.out, ")")?;
3180
3181 write!(self.out, ")")?;
3182 }
3183 BinaryOperation::Other => {
3184 write!(self.out, "(")?;
3185
3186 self.write_expr(left, ctx)?;
3187 write!(self.out, " {} ", back::binary_operation_str(op))?;
3188 self.write_expr(right, ctx)?;
3189
3190 write!(self.out, ")")?;
3191 }
3192 }
3193 }
3194 // `Select` is written as `condition ? accept : reject`
3195 // We wrap everything in parentheses to avoid precedence issues
3196 Expression::Select {
3197 condition,
3198 accept,
3199 reject,
3200 } => {
3201 let cond_ty = ctx.resolve_type(condition, &self.module.types);
3202 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
3203 true
3204 } else {
3205 false
3206 };
3207
3208 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
3209 if vec_select {
3210 // Glsl defines that for mix when the condition is a boolean the first element
3211 // is picked if condition is false and the second if condition is true
3212 write!(self.out, "mix(")?;
3213 self.write_expr(reject, ctx)?;
3214 write!(self.out, ", ")?;
3215 self.write_expr(accept, ctx)?;
3216 write!(self.out, ", ")?;
3217 self.write_expr(condition, ctx)?;
3218 } else {
3219 write!(self.out, "(")?;
3220 self.write_expr(condition, ctx)?;
3221 write!(self.out, " ? ")?;
3222 self.write_expr(accept, ctx)?;
3223 write!(self.out, " : ")?;
3224 self.write_expr(reject, ctx)?;
3225 }
3226
3227 write!(self.out, ")")?
3228 }
3229 // `Derivative` is a function call to a glsl provided function
3230 Expression::Derivative { axis, ctrl, expr } => {
3231 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
3232 let fun_name = if self.options.version.supports_derivative_control() {
3233 match (axis, ctrl) {
3234 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
3235 (Axis::X, Ctrl::Fine) => "dFdxFine",
3236 (Axis::X, Ctrl::None) => "dFdx",
3237 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
3238 (Axis::Y, Ctrl::Fine) => "dFdyFine",
3239 (Axis::Y, Ctrl::None) => "dFdy",
3240 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
3241 (Axis::Width, Ctrl::Fine) => "fwidthFine",
3242 (Axis::Width, Ctrl::None) => "fwidth",
3243 }
3244 } else {
3245 match axis {
3246 Axis::X => "dFdx",
3247 Axis::Y => "dFdy",
3248 Axis::Width => "fwidth",
3249 }
3250 };
3251 write!(self.out, "{fun_name}(")?;
3252 self.write_expr(expr, ctx)?;
3253 write!(self.out, ")")?
3254 }
3255 // `Relational` is a normal function call to some glsl provided functions
3256 Expression::Relational { fun, argument } => {
3257 use crate::RelationalFunction as Rf;
3258
3259 let fun_name = match fun {
3260 Rf::IsInf => "isinf",
3261 Rf::IsNan => "isnan",
3262 Rf::All => "all",
3263 Rf::Any => "any",
3264 };
3265 write!(self.out, "{fun_name}(")?;
3266
3267 self.write_expr(argument, ctx)?;
3268
3269 write!(self.out, ")")?
3270 }
3271 Expression::Math {
3272 fun,
3273 arg,
3274 arg1,
3275 arg2,
3276 arg3,
3277 } => {
3278 use crate::MathFunction as Mf;
3279
3280 let fun_name = match fun {
3281 // comparison
3282 Mf::Abs => "abs",
3283 Mf::Min => "min",
3284 Mf::Max => "max",
3285 Mf::Clamp => {
3286 let scalar_kind = ctx
3287 .resolve_type(arg, &self.module.types)
3288 .scalar_kind()
3289 .unwrap();
3290 match scalar_kind {
3291 crate::ScalarKind::Float => "clamp",
3292 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3293 // instruction to determine the value. This is fine according to the WGSL spec for float
3294 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3295 _ => {
3296 write!(self.out, "min(max(")?;
3297 self.write_expr(arg, ctx)?;
3298 write!(self.out, ", ")?;
3299 self.write_expr(arg1.unwrap(), ctx)?;
3300 write!(self.out, "), ")?;
3301 self.write_expr(arg2.unwrap(), ctx)?;
3302 write!(self.out, ")")?;
3303
3304 return Ok(());
3305 }
3306 }
3307 }
3308 Mf::Saturate => {
3309 write!(self.out, "clamp(")?;
3310
3311 self.write_expr(arg, ctx)?;
3312
3313 match *ctx.resolve_type(arg, &self.module.types) {
3314 crate::TypeInner::Vector { size, .. } => write!(
3315 self.out,
3316 ", vec{}(0.0), vec{0}(1.0)",
3317 back::vector_size_str(size)
3318 )?,
3319 _ => write!(self.out, ", 0.0, 1.0")?,
3320 }
3321
3322 write!(self.out, ")")?;
3323
3324 return Ok(());
3325 }
3326 // trigonometry
3327 Mf::Cos => "cos",
3328 Mf::Cosh => "cosh",
3329 Mf::Sin => "sin",
3330 Mf::Sinh => "sinh",
3331 Mf::Tan => "tan",
3332 Mf::Tanh => "tanh",
3333 Mf::Acos => "acos",
3334 Mf::Asin => "asin",
3335 Mf::Atan => "atan",
3336 Mf::Asinh => "asinh",
3337 Mf::Acosh => "acosh",
3338 Mf::Atanh => "atanh",
3339 Mf::Radians => "radians",
3340 Mf::Degrees => "degrees",
3341 // glsl doesn't have atan2 function
3342 // use two-argument variation of the atan function
3343 Mf::Atan2 => "atan",
3344 // decomposition
3345 Mf::Ceil => "ceil",
3346 Mf::Floor => "floor",
3347 Mf::Round => "roundEven",
3348 Mf::Fract => "fract",
3349 Mf::Trunc => "trunc",
3350 Mf::Modf => MODF_FUNCTION,
3351 Mf::Frexp => FREXP_FUNCTION,
3352 Mf::Ldexp => "ldexp",
3353 // exponent
3354 Mf::Exp => "exp",
3355 Mf::Exp2 => "exp2",
3356 Mf::Log => "log",
3357 Mf::Log2 => "log2",
3358 Mf::Pow => "pow",
3359 // geometry
3360 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3361 crate::TypeInner::Vector {
3362 scalar:
3363 crate::Scalar {
3364 kind: crate::ScalarKind::Float,
3365 ..
3366 },
3367 ..
3368 } => "dot",
3369 crate::TypeInner::Vector { size, .. } => {
3370 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3371 }
3372 _ => unreachable!(
3373 "Correct TypeInner for dot product should be already validated"
3374 ),
3375 },
3376 Mf::Outer => "outerProduct",
3377 Mf::Cross => "cross",
3378 Mf::Distance => "distance",
3379 Mf::Length => "length",
3380 Mf::Normalize => "normalize",
3381 Mf::FaceForward => "faceforward",
3382 Mf::Reflect => "reflect",
3383 Mf::Refract => "refract",
3384 // computational
3385 Mf::Sign => "sign",
3386 Mf::Fma => {
3387 if self.options.version.supports_fma_function() {
3388 // Use the fma function when available
3389 "fma"
3390 } else {
3391 // No fma support. Transform the function call into an arithmetic expression
3392 write!(self.out, "(")?;
3393
3394 self.write_expr(arg, ctx)?;
3395 write!(self.out, " * ")?;
3396
3397 let arg1 =
3398 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3399 self.write_expr(arg1, ctx)?;
3400 write!(self.out, " + ")?;
3401
3402 let arg2 =
3403 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3404 self.write_expr(arg2, ctx)?;
3405 write!(self.out, ")")?;
3406
3407 return Ok(());
3408 }
3409 }
3410 Mf::Mix => "mix",
3411 Mf::Step => "step",
3412 Mf::SmoothStep => "smoothstep",
3413 Mf::Sqrt => "sqrt",
3414 Mf::InverseSqrt => "inversesqrt",
3415 Mf::Inverse => "inverse",
3416 Mf::Transpose => "transpose",
3417 Mf::Determinant => "determinant",
3418 // bits
3419 Mf::CountTrailingZeros => {
3420 match *ctx.resolve_type(arg, &self.module.types) {
3421 crate::TypeInner::Vector { size, scalar, .. } => {
3422 let s = back::vector_size_str(size);
3423 if let crate::ScalarKind::Uint = scalar.kind {
3424 write!(self.out, "min(uvec{s}(findLSB(")?;
3425 self.write_expr(arg, ctx)?;
3426 write!(self.out, ")), uvec{s}(32u))")?;
3427 } else {
3428 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3429 self.write_expr(arg, ctx)?;
3430 write!(self.out, ")), uvec{s}(32u)))")?;
3431 }
3432 }
3433 crate::TypeInner::Scalar(scalar) => {
3434 if let crate::ScalarKind::Uint = scalar.kind {
3435 write!(self.out, "min(uint(findLSB(")?;
3436 self.write_expr(arg, ctx)?;
3437 write!(self.out, ")), 32u)")?;
3438 } else {
3439 write!(self.out, "int(min(uint(findLSB(")?;
3440 self.write_expr(arg, ctx)?;
3441 write!(self.out, ")), 32u))")?;
3442 }
3443 }
3444 _ => unreachable!(),
3445 };
3446 return Ok(());
3447 }
3448 Mf::CountLeadingZeros => {
3449 if self.options.version.supports_integer_functions() {
3450 match *ctx.resolve_type(arg, &self.module.types) {
3451 crate::TypeInner::Vector { size, scalar } => {
3452 let s = back::vector_size_str(size);
3453
3454 if let crate::ScalarKind::Uint = scalar.kind {
3455 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3456 self.write_expr(arg, ctx)?;
3457 write!(self.out, "))")?;
3458 } else {
3459 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3460 self.write_expr(arg, ctx)?;
3461 write!(self.out, "), ivec{s}(0), lessThan(")?;
3462 self.write_expr(arg, ctx)?;
3463 write!(self.out, ", ivec{s}(0)))")?;
3464 }
3465 }
3466 crate::TypeInner::Scalar(scalar) => {
3467 if let crate::ScalarKind::Uint = scalar.kind {
3468 write!(self.out, "uint(31 - findMSB(")?;
3469 } else {
3470 write!(self.out, "(")?;
3471 self.write_expr(arg, ctx)?;
3472 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3473 }
3474
3475 self.write_expr(arg, ctx)?;
3476 write!(self.out, "))")?;
3477 }
3478 _ => unreachable!(),
3479 };
3480 } else {
3481 match *ctx.resolve_type(arg, &self.module.types) {
3482 crate::TypeInner::Vector { size, scalar } => {
3483 let s = back::vector_size_str(size);
3484
3485 if let crate::ScalarKind::Uint = scalar.kind {
3486 write!(self.out, "uvec{s}(")?;
3487 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3488 self.write_expr(arg, ctx)?;
3489 write!(self.out, ") + 0.5)))")?;
3490 } else {
3491 write!(self.out, "ivec{s}(")?;
3492 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3493 self.write_expr(arg, ctx)?;
3494 write!(self.out, ") + 0.5)), ")?;
3495 write!(self.out, "vec{s}(0.0), lessThan(")?;
3496 self.write_expr(arg, ctx)?;
3497 write!(self.out, ", ivec{s}(0u))))")?;
3498 }
3499 }
3500 crate::TypeInner::Scalar(scalar) => {
3501 if let crate::ScalarKind::Uint = scalar.kind {
3502 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3503 self.write_expr(arg, ctx)?;
3504 write!(self.out, ") + 0.5)))")?;
3505 } else {
3506 write!(self.out, "(")?;
3507 self.write_expr(arg, ctx)?;
3508 write!(self.out, " < 0 ? 0 : int(")?;
3509 write!(self.out, "31.0 - floor(log2(float(")?;
3510 self.write_expr(arg, ctx)?;
3511 write!(self.out, ") + 0.5))))")?;
3512 }
3513 }
3514 _ => unreachable!(),
3515 };
3516 }
3517
3518 return Ok(());
3519 }
3520 Mf::CountOneBits => "bitCount",
3521 Mf::ReverseBits => "bitfieldReverse",
3522 Mf::ExtractBits => {
3523 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3524 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3525 // will return out-of-spec values if the extracted range is not within the bit width.
3526 //
3527 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3528 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3529 //
3530 // w = sizeof(x) * 8
3531 // o = min(offset, w)
3532 // c = min(count, w - o)
3533 //
3534 // bitfieldExtract(x, o, c)
3535 //
3536 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3537 let scalar_bits = ctx
3538 .resolve_type(arg, &self.module.types)
3539 .scalar_width()
3540 .unwrap()
3541 * 8;
3542
3543 write!(self.out, "bitfieldExtract(")?;
3544 self.write_expr(arg, ctx)?;
3545 write!(self.out, ", int(min(")?;
3546 self.write_expr(arg1.unwrap(), ctx)?;
3547 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3548 self.write_expr(arg2.unwrap(), ctx)?;
3549 write!(self.out, ", {scalar_bits}u - min(")?;
3550 self.write_expr(arg1.unwrap(), ctx)?;
3551 write!(self.out, ", {scalar_bits}u))))")?;
3552
3553 return Ok(());
3554 }
3555 Mf::InsertBits => {
3556 // InsertBits has the same considerations as ExtractBits above
3557 let scalar_bits = ctx
3558 .resolve_type(arg, &self.module.types)
3559 .scalar_width()
3560 .unwrap()
3561 * 8;
3562
3563 write!(self.out, "bitfieldInsert(")?;
3564 self.write_expr(arg, ctx)?;
3565 write!(self.out, ", ")?;
3566 self.write_expr(arg1.unwrap(), ctx)?;
3567 write!(self.out, ", int(min(")?;
3568 self.write_expr(arg2.unwrap(), ctx)?;
3569 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3570 self.write_expr(arg3.unwrap(), ctx)?;
3571 write!(self.out, ", {scalar_bits}u - min(")?;
3572 self.write_expr(arg2.unwrap(), ctx)?;
3573 write!(self.out, ", {scalar_bits}u))))")?;
3574
3575 return Ok(());
3576 }
3577 Mf::FindLsb => "findLSB",
3578 Mf::FindMsb => "findMSB",
3579 // data packing
3580 Mf::Pack4x8snorm => "packSnorm4x8",
3581 Mf::Pack4x8unorm => "packUnorm4x8",
3582 Mf::Pack2x16snorm => "packSnorm2x16",
3583 Mf::Pack2x16unorm => "packUnorm2x16",
3584 Mf::Pack2x16float => "packHalf2x16",
3585 // data unpacking
3586 Mf::Unpack4x8snorm => "unpackSnorm4x8",
3587 Mf::Unpack4x8unorm => "unpackUnorm4x8",
3588 Mf::Unpack2x16snorm => "unpackSnorm2x16",
3589 Mf::Unpack2x16unorm => "unpackUnorm2x16",
3590 Mf::Unpack2x16float => "unpackHalf2x16",
3591 };
3592
3593 let extract_bits = fun == Mf::ExtractBits;
3594 let insert_bits = fun == Mf::InsertBits;
3595
3596 // Some GLSL functions always return signed integers (like findMSB),
3597 // so they need to be cast to uint if the argument is also an uint.
3598 let ret_might_need_int_to_uint =
3599 matches!(fun, Mf::FindLsb | Mf::FindMsb | Mf::CountOneBits | Mf::Abs);
3600
3601 // Some GLSL functions only accept signed integers (like abs),
3602 // so they need their argument cast from uint to int.
3603 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
3604
3605 // Check if the argument is an unsigned integer and return the vector size
3606 // in case it's a vector
3607 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
3608 crate::TypeInner::Scalar(crate::Scalar {
3609 kind: crate::ScalarKind::Uint,
3610 ..
3611 }) => Some(None),
3612 crate::TypeInner::Vector {
3613 scalar:
3614 crate::Scalar {
3615 kind: crate::ScalarKind::Uint,
3616 ..
3617 },
3618 size,
3619 } => Some(Some(size)),
3620 _ => None,
3621 };
3622
3623 // Cast to uint if the function needs it
3624 if ret_might_need_int_to_uint {
3625 if let Some(maybe_size) = maybe_uint_size {
3626 match maybe_size {
3627 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
3628 None => write!(self.out, "uint(")?,
3629 }
3630 }
3631 }
3632
3633 write!(self.out, "{fun_name}(")?;
3634
3635 // Cast to int if the function needs it
3636 if arg_might_need_uint_to_int {
3637 if let Some(maybe_size) = maybe_uint_size {
3638 match maybe_size {
3639 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
3640 None => write!(self.out, "int(")?,
3641 }
3642 }
3643 }
3644
3645 self.write_expr(arg, ctx)?;
3646
3647 // Close the cast from uint to int
3648 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
3649 write!(self.out, ")")?
3650 }
3651
3652 if let Some(arg) = arg1 {
3653 write!(self.out, ", ")?;
3654 if extract_bits {
3655 write!(self.out, "int(")?;
3656 self.write_expr(arg, ctx)?;
3657 write!(self.out, ")")?;
3658 } else {
3659 self.write_expr(arg, ctx)?;
3660 }
3661 }
3662 if let Some(arg) = arg2 {
3663 write!(self.out, ", ")?;
3664 if extract_bits || insert_bits {
3665 write!(self.out, "int(")?;
3666 self.write_expr(arg, ctx)?;
3667 write!(self.out, ")")?;
3668 } else {
3669 self.write_expr(arg, ctx)?;
3670 }
3671 }
3672 if let Some(arg) = arg3 {
3673 write!(self.out, ", ")?;
3674 if insert_bits {
3675 write!(self.out, "int(")?;
3676 self.write_expr(arg, ctx)?;
3677 write!(self.out, ")")?;
3678 } else {
3679 self.write_expr(arg, ctx)?;
3680 }
3681 }
3682 write!(self.out, ")")?;
3683
3684 // Close the cast from int to uint
3685 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
3686 write!(self.out, ")")?
3687 }
3688 }
3689 // `As` is always a call.
3690 // If `convert` is true the function name is the type
3691 // Else the function name is one of the glsl provided bitcast functions
3692 Expression::As {
3693 expr,
3694 kind: target_kind,
3695 convert,
3696 } => {
3697 let inner = ctx.resolve_type(expr, &self.module.types);
3698 match convert {
3699 Some(width) => {
3700 // this is similar to `write_type`, but with the target kind
3701 let scalar = glsl_scalar(crate::Scalar {
3702 kind: target_kind,
3703 width,
3704 })?;
3705 match *inner {
3706 TypeInner::Matrix { columns, rows, .. } => write!(
3707 self.out,
3708 "{}mat{}x{}",
3709 scalar.prefix, columns as u8, rows as u8
3710 )?,
3711 TypeInner::Vector { size, .. } => {
3712 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
3713 }
3714 _ => write!(self.out, "{}", scalar.full)?,
3715 }
3716
3717 write!(self.out, "(")?;
3718 self.write_expr(expr, ctx)?;
3719 write!(self.out, ")")?
3720 }
3721 None => {
3722 use crate::ScalarKind as Sk;
3723
3724 let target_vector_type = match *inner {
3725 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
3726 size,
3727 scalar: crate::Scalar {
3728 kind: target_kind,
3729 width: scalar.width,
3730 },
3731 }),
3732 _ => None,
3733 };
3734
3735 let source_kind = inner.scalar_kind().unwrap();
3736
3737 match (source_kind, target_kind, target_vector_type) {
3738 // No conversion needed
3739 (Sk::Sint, Sk::Sint, _)
3740 | (Sk::Uint, Sk::Uint, _)
3741 | (Sk::Float, Sk::Float, _)
3742 | (Sk::Bool, Sk::Bool, _) => {
3743 self.write_expr(expr, ctx)?;
3744 return Ok(());
3745 }
3746
3747 // Cast to/from floats
3748 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
3749 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
3750 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
3751 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
3752
3753 // Cast between vector types
3754 (_, _, Some(vector)) => {
3755 self.write_value_type(&vector)?;
3756 }
3757
3758 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
3759 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
3760 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
3761 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
3762 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
3763 write!(self.out, "bool")?
3764 }
3765
3766 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
3767 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
3768 };
3769
3770 write!(self.out, "(")?;
3771 self.write_expr(expr, ctx)?;
3772 write!(self.out, ")")?;
3773 }
3774 }
3775 }
3776 // These expressions never show up in `Emit`.
3777 Expression::CallResult(_)
3778 | Expression::AtomicResult { .. }
3779 | Expression::RayQueryProceedResult
3780 | Expression::WorkGroupUniformLoadResult { .. }
3781 | Expression::SubgroupOperationResult { .. }
3782 | Expression::SubgroupBallotResult => unreachable!(),
3783 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
3784 Expression::ArrayLength(expr) => {
3785 write!(self.out, "uint(")?;
3786 self.write_expr(expr, ctx)?;
3787 write!(self.out, ".length())")?
3788 }
3789 // not supported yet
3790 Expression::RayQueryGetIntersection { .. } => unreachable!(),
3791 }
3792
3793 Ok(())
3794 }
3795
3796 /// Helper function to write the local holding the clamped lod
3797 fn write_clamped_lod(
3798 &mut self,
3799 ctx: &back::FunctionCtx,
3800 expr: Handle<crate::Expression>,
3801 image: Handle<crate::Expression>,
3802 level_expr: Handle<crate::Expression>,
3803 ) -> Result<(), Error> {
3804 // Define our local and start a call to `clamp`
3805 write!(
3806 self.out,
3807 "int {}{}{} = clamp(",
3808 back::BAKE_PREFIX,
3809 expr.index(),
3810 CLAMPED_LOD_SUFFIX
3811 )?;
3812 // Write the lod that will be clamped
3813 self.write_expr(level_expr, ctx)?;
3814 // Set the min value to 0 and start a call to `textureQueryLevels` to get
3815 // the maximum value
3816 write!(self.out, ", 0, textureQueryLevels(")?;
3817 // Write the target image as an argument to `textureQueryLevels`
3818 self.write_expr(image, ctx)?;
3819 // Close the call to `textureQueryLevels` subtract 1 from it since
3820 // the lod argument is 0 based, close the `clamp` call and end the
3821 // local declaration statement.
3822 writeln!(self.out, ") - 1);")?;
3823
3824 Ok(())
3825 }
3826
3827 // Helper method used to retrieve how many elements a coordinate vector
3828 // for the images operations need.
3829 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
3830 // openGL es doesn't have 1D images so we need workaround it
3831 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
3832 // Get how many components the coordinate vector needs for the dimensions only
3833 let tex_coord_size = match dim {
3834 crate::ImageDimension::D1 => 1,
3835 crate::ImageDimension::D2 => 2,
3836 crate::ImageDimension::D3 => 3,
3837 crate::ImageDimension::Cube => 2,
3838 };
3839 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
3840 // and another 1 if we need to workaround 1D images by making them 2D
3841 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
3842 }
3843
3844 /// Helper method to write the coordinate vector for image operations
3845 fn write_texture_coord(
3846 &mut self,
3847 ctx: &back::FunctionCtx,
3848 vector_size: u8,
3849 coordinate: Handle<crate::Expression>,
3850 array_index: Option<Handle<crate::Expression>>,
3851 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
3852 tex_1d_hack: bool,
3853 ) -> Result<(), Error> {
3854 match array_index {
3855 // If the image needs an array indice we need to add it to the end of our
3856 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
3857 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
3858 // is important for 1D arrayed images).
3859 Some(layer_expr) => {
3860 write!(self.out, "ivec{vector_size}(")?;
3861 self.write_expr(coordinate, ctx)?;
3862 write!(self.out, ", ")?;
3863 // If we are replacing sampler1D with sampler2D we also need
3864 // to add another zero to the coordinates vector for the y component
3865 if tex_1d_hack {
3866 write!(self.out, "0, ")?;
3867 }
3868 self.write_expr(layer_expr, ctx)?;
3869 write!(self.out, ")")?;
3870 }
3871 // Otherwise write just the expression (and the 1D hack if needed)
3872 None => {
3873 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
3874 TypeInner::Scalar(crate::Scalar {
3875 kind: crate::ScalarKind::Uint,
3876 ..
3877 }) => Some(None),
3878 TypeInner::Vector {
3879 size,
3880 scalar:
3881 crate::Scalar {
3882 kind: crate::ScalarKind::Uint,
3883 ..
3884 },
3885 } => Some(Some(size as u32)),
3886 _ => None,
3887 };
3888 if tex_1d_hack {
3889 write!(self.out, "ivec2(")?;
3890 } else if uvec_size.is_some() {
3891 match uvec_size {
3892 Some(None) => write!(self.out, "int(")?,
3893 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
3894 _ => {}
3895 }
3896 }
3897 self.write_expr(coordinate, ctx)?;
3898 if tex_1d_hack {
3899 write!(self.out, ", 0)")?;
3900 } else if uvec_size.is_some() {
3901 write!(self.out, ")")?;
3902 }
3903 }
3904 }
3905
3906 Ok(())
3907 }
3908
3909 /// Helper method to write the `ImageStore` statement
3910 fn write_image_store(
3911 &mut self,
3912 ctx: &back::FunctionCtx,
3913 image: Handle<crate::Expression>,
3914 coordinate: Handle<crate::Expression>,
3915 array_index: Option<Handle<crate::Expression>>,
3916 value: Handle<crate::Expression>,
3917 ) -> Result<(), Error> {
3918 use crate::ImageDimension as IDim;
3919
3920 // NOTE: openGL requires that `imageStore`s have no effets when the texel is invalid
3921 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3922
3923 // This will only panic if the module is invalid
3924 let dim = match *ctx.resolve_type(image, &self.module.types) {
3925 TypeInner::Image { dim, .. } => dim,
3926 _ => unreachable!(),
3927 };
3928
3929 // Begin our call to `imageStore`
3930 write!(self.out, "imageStore(")?;
3931 self.write_expr(image, ctx)?;
3932 // Separate the image argument from the coordinates
3933 write!(self.out, ", ")?;
3934
3935 // openGL es doesn't have 1D images so we need workaround it
3936 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3937 // Write the coordinate vector
3938 self.write_texture_coord(
3939 ctx,
3940 // Get the size of the coordinate vector
3941 self.get_coordinate_vector_size(dim, array_index.is_some()),
3942 coordinate,
3943 array_index,
3944 tex_1d_hack,
3945 )?;
3946
3947 // Separate the coordinate from the value to write and write the expression
3948 // of the value to write.
3949 write!(self.out, ", ")?;
3950 self.write_expr(value, ctx)?;
3951 // End the call to `imageStore` and the statement.
3952 writeln!(self.out, ");")?;
3953
3954 Ok(())
3955 }
3956
3957 /// Helper method for writing an `ImageLoad` expression.
3958 #[allow(clippy::too_many_arguments)]
3959 fn write_image_load(
3960 &mut self,
3961 handle: Handle<crate::Expression>,
3962 ctx: &back::FunctionCtx,
3963 image: Handle<crate::Expression>,
3964 coordinate: Handle<crate::Expression>,
3965 array_index: Option<Handle<crate::Expression>>,
3966 sample: Option<Handle<crate::Expression>>,
3967 level: Option<Handle<crate::Expression>>,
3968 ) -> Result<(), Error> {
3969 use crate::ImageDimension as IDim;
3970
3971 // `ImageLoad` is a bit complicated.
3972 // There are two functions one for sampled
3973 // images another for storage images, the former uses `texelFetch` and the
3974 // latter uses `imageLoad`.
3975 //
3976 // Furthermore we have `level` which is always `Some` for sampled images
3977 // and `None` for storage images, so we end up with two functions:
3978 // - `texelFetch(image, coordinate, level)` for sampled images
3979 // - `imageLoad(image, coordinate)` for storage images
3980 //
3981 // Finally we also have to consider bounds checking, for storage images
3982 // this is easy since openGL requires that invalid texels always return
3983 // 0, for sampled images we need to either verify that all arguments are
3984 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
3985
3986 // This will only panic if the module is invalid
3987 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
3988 TypeInner::Image {
3989 dim,
3990 arrayed: _,
3991 class,
3992 } => (dim, class),
3993 _ => unreachable!(),
3994 };
3995
3996 // Get the name of the function to be used for the load operation
3997 // and the policy to be used with it.
3998 let (fun_name, policy) = match class {
3999 // Sampled images inherit the policy from the user passed policies
4000 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4001 crate::ImageClass::Storage { .. } => {
4002 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4003 // "Invalid image loads will return a vector where the value of R, G, and B components
4004 // is 0 and the value of the A component is undefined."
4005 //
4006 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4007 // "Invalid image loads will return zero."
4008 //
4009 // So, we only inject bounds checks for ES
4010 let policy = if self.options.version.is_es() {
4011 self.policies.image_load
4012 } else {
4013 proc::BoundsCheckPolicy::Unchecked
4014 };
4015 ("imageLoad", policy)
4016 }
4017 // TODO: Is there even a function for this?
4018 crate::ImageClass::Depth { multi: _ } => {
4019 return Err(Error::Custom(
4020 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4021 ))
4022 }
4023 };
4024
4025 // openGL es doesn't have 1D images so we need workaround it
4026 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4027 // Get the size of the coordinate vector
4028 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4029
4030 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4031 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4032 // ternary operator since we are in the middle of an expression and
4033 // need to return a value.
4034 //
4035 // NOTE: glsl does short circuit when evaluating logical
4036 // expressions so we can be sure that after we test a
4037 // condition it will be true for the next ones
4038
4039 // Write parentheses around the ternary operator to prevent problems with
4040 // expressions emitted before or after it having more precedence
4041 write!(self.out, "(",)?;
4042
4043 // The lod check needs to precede the size check since we need
4044 // to use the lod to get the size of the image at that level.
4045 if let Some(level_expr) = level {
4046 self.write_expr(level_expr, ctx)?;
4047 write!(self.out, " < textureQueryLevels(",)?;
4048 self.write_expr(image, ctx)?;
4049 // Chain the next check
4050 write!(self.out, ") && ")?;
4051 }
4052
4053 // Check that the sample arguments doesn't exceed the number of samples
4054 if let Some(sample_expr) = sample {
4055 self.write_expr(sample_expr, ctx)?;
4056 write!(self.out, " < textureSamples(",)?;
4057 self.write_expr(image, ctx)?;
4058 // Chain the next check
4059 write!(self.out, ") && ")?;
4060 }
4061
4062 // We now need to write the size checks for the coordinates and array index
4063 // first we write the comparison function in case the image is 1D non arrayed
4064 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4065 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4066 // need to use the `lessThan` function but it returns a vector of booleans (one
4067 // for each comparison) so we need to fold it all in one scalar boolean, since
4068 // we want all comparisons to pass we use the `all` function which will only
4069 // return `true` if all the elements of the boolean vector are also `true`.
4070 //
4071 // So we'll end with one of the following forms
4072 // - `coord < textureSize(image, lod)` for 1D images
4073 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4074 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4075 // for arrayed images
4076 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4077
4078 if vector_size != 1 {
4079 write!(self.out, "all(lessThan(")?;
4080 }
4081
4082 // Write the coordinate vector
4083 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4084
4085 if vector_size != 1 {
4086 // If we used the `lessThan` function we need to separate the
4087 // coordinates from the image size.
4088 write!(self.out, ", ")?;
4089 } else {
4090 // If we didn't use it (ie. 1D images) we perform the comparison
4091 // using the less than operator.
4092 write!(self.out, " < ")?;
4093 }
4094
4095 // Call `textureSize` to get our image size
4096 write!(self.out, "textureSize(")?;
4097 self.write_expr(image, ctx)?;
4098 // `textureSize` uses the lod as a second argument for mipmapped images
4099 if let Some(level_expr) = level {
4100 // Separate the image from the lod
4101 write!(self.out, ", ")?;
4102 self.write_expr(level_expr, ctx)?;
4103 }
4104 // Close the `textureSize` call
4105 write!(self.out, ")")?;
4106
4107 if vector_size != 1 {
4108 // Close the `all` and `lessThan` calls
4109 write!(self.out, "))")?;
4110 }
4111
4112 // Finally end the condition part of the ternary operator
4113 write!(self.out, " ? ")?;
4114 }
4115
4116 // Begin the call to the function used to load the texel
4117 write!(self.out, "{fun_name}(")?;
4118 self.write_expr(image, ctx)?;
4119 write!(self.out, ", ")?;
4120
4121 // If we are using `Restrict` bounds checking we need to pass valid texel
4122 // coordinates, to do so we use the `clamp` function to get a value between
4123 // 0 and the image size - 1 (indexing begins at 0)
4124 if let proc::BoundsCheckPolicy::Restrict = policy {
4125 write!(self.out, "clamp(")?;
4126 }
4127
4128 // Write the coordinate vector
4129 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4130
4131 // If we are using `Restrict` bounds checking we need to write the rest of the
4132 // clamp we initiated before writing the coordinates.
4133 if let proc::BoundsCheckPolicy::Restrict = policy {
4134 // Write the min value 0
4135 if vector_size == 1 {
4136 write!(self.out, ", 0")?;
4137 } else {
4138 write!(self.out, ", ivec{vector_size}(0)")?;
4139 }
4140 // Start the `textureSize` call to use as the max value.
4141 write!(self.out, ", textureSize(")?;
4142 self.write_expr(image, ctx)?;
4143 // If the image is mipmapped we need to add the lod argument to the
4144 // `textureSize` call, but this needs to be the clamped lod, this should
4145 // have been generated earlier and put in a local.
4146 if class.is_mipmapped() {
4147 write!(
4148 self.out,
4149 ", {}{}{}",
4150 back::BAKE_PREFIX,
4151 handle.index(),
4152 CLAMPED_LOD_SUFFIX
4153 )?;
4154 }
4155 // Close the `textureSize` call
4156 write!(self.out, ")")?;
4157
4158 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4159 if vector_size == 1 {
4160 write!(self.out, " - 1")?;
4161 } else {
4162 write!(self.out, " - ivec{vector_size}(1)")?;
4163 }
4164
4165 // Close the `clamp` call
4166 write!(self.out, ")")?;
4167
4168 // Add the clamped lod (if present) as the second argument to the
4169 // image load function.
4170 if level.is_some() {
4171 write!(
4172 self.out,
4173 ", {}{}{}",
4174 back::BAKE_PREFIX,
4175 handle.index(),
4176 CLAMPED_LOD_SUFFIX
4177 )?;
4178 }
4179
4180 // If a sample argument is needed we need to clamp it between 0 and
4181 // the number of samples the image has.
4182 if let Some(sample_expr) = sample {
4183 write!(self.out, ", clamp(")?;
4184 self.write_expr(sample_expr, ctx)?;
4185 // Set the min value to 0 and start the call to `textureSamples`
4186 write!(self.out, ", 0, textureSamples(")?;
4187 self.write_expr(image, ctx)?;
4188 // Close the `textureSamples` call, subtract 1 from it since the sample
4189 // argument is zero based, and close the `clamp` call
4190 writeln!(self.out, ") - 1)")?;
4191 }
4192 } else if let Some(sample_or_level) = sample.or(level) {
4193 // If no bounds checking is need just add the sample or level argument
4194 // after the coordinates
4195 write!(self.out, ", ")?;
4196 self.write_expr(sample_or_level, ctx)?;
4197 }
4198
4199 // Close the image load function.
4200 write!(self.out, ")")?;
4201
4202 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4203 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4204 // second branch which is just a 0 value.
4205 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4206 // Get the kind of the output value.
4207 let kind = match class {
4208 // Only sampled images can reach here since storage images
4209 // don't need bounds checks and depth images aren't implemented
4210 crate::ImageClass::Sampled { kind, .. } => kind,
4211 _ => unreachable!(),
4212 };
4213
4214 // End the first branch
4215 write!(self.out, " : ")?;
4216 // Write the 0 value
4217 write!(
4218 self.out,
4219 "{}vec4(",
4220 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4221 )?;
4222 self.write_zero_init_scalar(kind)?;
4223 // Close the zero value constructor
4224 write!(self.out, ")")?;
4225 // Close the parentheses surrounding our ternary
4226 write!(self.out, ")")?;
4227 }
4228
4229 Ok(())
4230 }
4231
4232 fn write_named_expr(
4233 &mut self,
4234 handle: Handle<crate::Expression>,
4235 name: String,
4236 // The expression which is being named.
4237 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4238 named: Handle<crate::Expression>,
4239 ctx: &back::FunctionCtx,
4240 ) -> BackendResult {
4241 match ctx.info[named].ty {
4242 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4243 TypeInner::Struct { .. } => {
4244 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4245 write!(self.out, "{ty_name}")?;
4246 }
4247 _ => {
4248 self.write_type(ty_handle)?;
4249 }
4250 },
4251 proc::TypeResolution::Value(ref inner) => {
4252 self.write_value_type(inner)?;
4253 }
4254 }
4255
4256 let resolved = ctx.resolve_type(named, &self.module.types);
4257
4258 write!(self.out, " {name}")?;
4259 if let TypeInner::Array { base, size, .. } = *resolved {
4260 self.write_array_size(base, size)?;
4261 }
4262 write!(self.out, " = ")?;
4263 self.write_expr(handle, ctx)?;
4264 writeln!(self.out, ";")?;
4265 self.named_expressions.insert(named, name);
4266
4267 Ok(())
4268 }
4269
4270 /// Helper function that write string with default zero initialization for supported types
4271 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4272 let inner = &self.module.types[ty].inner;
4273 match *inner {
4274 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4275 self.write_zero_init_scalar(scalar.kind)?;
4276 }
4277 TypeInner::Vector { scalar, .. } => {
4278 self.write_value_type(inner)?;
4279 write!(self.out, "(")?;
4280 self.write_zero_init_scalar(scalar.kind)?;
4281 write!(self.out, ")")?;
4282 }
4283 TypeInner::Matrix { .. } => {
4284 self.write_value_type(inner)?;
4285 write!(self.out, "(")?;
4286 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4287 write!(self.out, ")")?;
4288 }
4289 TypeInner::Array { base, size, .. } => {
4290 let count = match size
4291 .to_indexable_length(self.module)
4292 .expect("Bad array size")
4293 {
4294 proc::IndexableLength::Known(count) => count,
4295 proc::IndexableLength::Dynamic => return Ok(()),
4296 };
4297 self.write_type(base)?;
4298 self.write_array_size(base, size)?;
4299 write!(self.out, "(")?;
4300 for _ in 1..count {
4301 self.write_zero_init_value(base)?;
4302 write!(self.out, ", ")?;
4303 }
4304 // write last parameter without comma and space
4305 self.write_zero_init_value(base)?;
4306 write!(self.out, ")")?;
4307 }
4308 TypeInner::Struct { ref members, .. } => {
4309 let name = &self.names[&NameKey::Type(ty)];
4310 write!(self.out, "{name}(")?;
4311 for (index, member) in members.iter().enumerate() {
4312 if index != 0 {
4313 write!(self.out, ", ")?;
4314 }
4315 self.write_zero_init_value(member.ty)?;
4316 }
4317 write!(self.out, ")")?;
4318 }
4319 _ => unreachable!(),
4320 }
4321
4322 Ok(())
4323 }
4324
4325 /// Helper function that write string with zero initialization for scalar
4326 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4327 match kind {
4328 crate::ScalarKind::Bool => write!(self.out, "false")?,
4329 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4330 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4331 crate::ScalarKind::Sint => write!(self.out, "0")?,
4332 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4333 return Err(Error::Custom(
4334 "Abstract types should not appear in IR presented to backends".to_string(),
4335 ))
4336 }
4337 }
4338
4339 Ok(())
4340 }
4341
4342 /// Issue a memory barrier. Please note that to ensure visibility,
4343 /// OpenGL always requires a call to the `barrier()` function after a `memoryBarrier*()`
4344 fn write_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4345 if flags.contains(crate::Barrier::STORAGE) {
4346 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4347 }
4348 if flags.contains(crate::Barrier::WORK_GROUP) {
4349 writeln!(self.out, "{level}memoryBarrierShared();")?;
4350 }
4351 if flags.contains(crate::Barrier::SUB_GROUP) {
4352 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4353 }
4354 writeln!(self.out, "{level}barrier();")?;
4355 Ok(())
4356 }
4357
4358 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4359 ///
4360 /// glsl allows adding both `readonly` and `writeonly` but this means that
4361 /// they can only be used to query information about the resource which isn't what
4362 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4363 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4364 if !storage_access.contains(crate::StorageAccess::STORE) {
4365 write!(self.out, "readonly ")?;
4366 }
4367 if !storage_access.contains(crate::StorageAccess::LOAD) {
4368 write!(self.out, "writeonly ")?;
4369 }
4370 Ok(())
4371 }
4372
4373 /// Helper method used to produce the reflection info that's returned to the user
4374 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4375 use std::collections::hash_map::Entry;
4376 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4377 let mut texture_mapping = crate::FastHashMap::default();
4378 let mut uniforms = crate::FastHashMap::default();
4379
4380 for sampling in info.sampling_set.iter() {
4381 let tex_name = self.reflection_names_globals[&sampling.image].clone();
4382
4383 match texture_mapping.entry(tex_name) {
4384 Entry::Vacant(v) => {
4385 v.insert(TextureMapping {
4386 texture: sampling.image,
4387 sampler: Some(sampling.sampler),
4388 });
4389 }
4390 Entry::Occupied(e) => {
4391 if e.get().sampler != Some(sampling.sampler) {
4392 log::error!("Conflicting samplers for {}", e.key());
4393 return Err(Error::ImageMultipleSamplers);
4394 }
4395 }
4396 }
4397 }
4398
4399 let mut push_constant_info = None;
4400 for (handle, var) in self.module.global_variables.iter() {
4401 if info[handle].is_empty() {
4402 continue;
4403 }
4404 match self.module.types[var.ty].inner {
4405 crate::TypeInner::Image { .. } => {
4406 let tex_name = self.reflection_names_globals[&handle].clone();
4407 match texture_mapping.entry(tex_name) {
4408 Entry::Vacant(v) => {
4409 v.insert(TextureMapping {
4410 texture: handle,
4411 sampler: None,
4412 });
4413 }
4414 Entry::Occupied(_) => {
4415 // already used with a sampler, do nothing
4416 }
4417 }
4418 }
4419 _ => match var.space {
4420 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
4421 let name = self.reflection_names_globals[&handle].clone();
4422 uniforms.insert(handle, name);
4423 }
4424 crate::AddressSpace::PushConstant => {
4425 let name = self.reflection_names_globals[&handle].clone();
4426 push_constant_info = Some((name, var.ty));
4427 }
4428 _ => (),
4429 },
4430 }
4431 }
4432
4433 let mut push_constant_segments = Vec::new();
4434 let mut push_constant_items = vec![];
4435
4436 if let Some((name, ty)) = push_constant_info {
4437 // We don't have a layouter available to us, so we need to create one.
4438 //
4439 // This is potentially a bit wasteful, but the set of types in the program
4440 // shouldn't be too large.
4441 let mut layouter = crate::proc::Layouter::default();
4442 layouter.update(self.module.to_ctx()).unwrap();
4443
4444 // We start with the name of the binding itself.
4445 push_constant_segments.push(name);
4446
4447 // We then recursively collect all the uniform fields of the push constant.
4448 self.collect_push_constant_items(
4449 ty,
4450 &mut push_constant_segments,
4451 &layouter,
4452 &mut 0,
4453 &mut push_constant_items,
4454 );
4455 }
4456
4457 Ok(ReflectionInfo {
4458 texture_mapping,
4459 uniforms,
4460 varying: mem::take(&mut self.varying),
4461 push_constant_items,
4462 })
4463 }
4464
4465 fn collect_push_constant_items(
4466 &mut self,
4467 ty: Handle<crate::Type>,
4468 segments: &mut Vec<String>,
4469 layouter: &crate::proc::Layouter,
4470 offset: &mut u32,
4471 items: &mut Vec<PushConstantItem>,
4472 ) {
4473 // At this point in the recursion, `segments` contains the path
4474 // needed to access `ty` from the root.
4475
4476 let layout = &layouter[ty];
4477 *offset = layout.alignment.round_up(*offset);
4478 match self.module.types[ty].inner {
4479 // All these types map directly to GL uniforms.
4480 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
4481 // Build the full name, by combining all current segments.
4482 let name: String = segments.iter().map(String::as_str).collect();
4483 items.push(PushConstantItem {
4484 access_path: name,
4485 offset: *offset,
4486 ty,
4487 });
4488 *offset += layout.size;
4489 }
4490 // Arrays are recursed into.
4491 TypeInner::Array { base, size, .. } => {
4492 let crate::ArraySize::Constant(count) = size else {
4493 unreachable!("Cannot have dynamic arrays in push constants");
4494 };
4495
4496 for i in 0..count.get() {
4497 // Add the array accessor and recurse.
4498 segments.push(format!("[{}]", i));
4499 self.collect_push_constant_items(base, segments, layouter, offset, items);
4500 segments.pop();
4501 }
4502
4503 // Ensure the stride is kept by rounding up to the alignment.
4504 *offset = layout.alignment.round_up(*offset)
4505 }
4506 TypeInner::Struct { ref members, .. } => {
4507 for (index, member) in members.iter().enumerate() {
4508 // Add struct accessor and recurse.
4509 segments.push(format!(
4510 ".{}",
4511 self.names[&NameKey::StructMember(ty, index as u32)]
4512 ));
4513 self.collect_push_constant_items(member.ty, segments, layouter, offset, items);
4514 segments.pop();
4515 }
4516
4517 // Ensure ending padding is kept by rounding up to the alignment.
4518 *offset = layout.alignment.round_up(*offset)
4519 }
4520 _ => unreachable!(),
4521 }
4522 }
4523}
4524
4525/// Structure returned by [`glsl_scalar`]
4526///
4527/// It contains both a prefix used in other types and the full type name
4528struct ScalarString<'a> {
4529 /// The prefix used to compose other types
4530 prefix: &'a str,
4531 /// The name of the scalar type
4532 full: &'a str,
4533}
4534
4535/// Helper function that returns scalar related strings
4536///
4537/// Check [`ScalarString`] for the information provided
4538///
4539/// # Errors
4540/// If a [`Float`](crate::ScalarKind::Float) with an width that isn't 4 or 8
4541const fn glsl_scalar(scalar: crate::Scalar) -> Result<ScalarString<'static>, Error> {
4542 use crate::ScalarKind as Sk;
4543
4544 Ok(match scalar.kind {
4545 Sk::Sint => ScalarString {
4546 prefix: "i",
4547 full: "int",
4548 },
4549 Sk::Uint => ScalarString {
4550 prefix: "u",
4551 full: "uint",
4552 },
4553 Sk::Float => match scalar.width {
4554 4 => ScalarString {
4555 prefix: "",
4556 full: "float",
4557 },
4558 8 => ScalarString {
4559 prefix: "d",
4560 full: "double",
4561 },
4562 _ => return Err(Error::UnsupportedScalar(scalar)),
4563 },
4564 Sk::Bool => ScalarString {
4565 prefix: "b",
4566 full: "bool",
4567 },
4568 Sk::AbstractInt | Sk::AbstractFloat => {
4569 return Err(Error::UnsupportedScalar(scalar));
4570 }
4571 })
4572}
4573
4574/// Helper function that returns the glsl variable name for a builtin
4575const fn glsl_built_in(built_in: crate::BuiltIn, options: VaryingOptions) -> &'static str {
4576 use crate::BuiltIn as Bi;
4577
4578 match built_in {
4579 Bi::Position { .. } => {
4580 if options.output {
4581 "gl_Position"
4582 } else {
4583 "gl_FragCoord"
4584 }
4585 }
4586 Bi::ViewIndex if options.targeting_webgl => "int(gl_ViewID_OVR)",
4587 Bi::ViewIndex => "gl_ViewIndex",
4588 // vertex
4589 Bi::BaseInstance => "uint(gl_BaseInstance)",
4590 Bi::BaseVertex => "uint(gl_BaseVertex)",
4591 Bi::ClipDistance => "gl_ClipDistance",
4592 Bi::CullDistance => "gl_CullDistance",
4593 Bi::InstanceIndex => {
4594 if options.draw_parameters {
4595 "(uint(gl_InstanceID) + uint(gl_BaseInstanceARB))"
4596 } else {
4597 // Must match FIRST_INSTANCE_BINDING
4598 "(uint(gl_InstanceID) + naga_vs_first_instance)"
4599 }
4600 }
4601 Bi::PointSize => "gl_PointSize",
4602 Bi::VertexIndex => "uint(gl_VertexID)",
4603 // fragment
4604 Bi::FragDepth => "gl_FragDepth",
4605 Bi::PointCoord => "gl_PointCoord",
4606 Bi::FrontFacing => "gl_FrontFacing",
4607 Bi::PrimitiveIndex => "uint(gl_PrimitiveID)",
4608 Bi::SampleIndex => "gl_SampleID",
4609 Bi::SampleMask => {
4610 if options.output {
4611 "gl_SampleMask"
4612 } else {
4613 "gl_SampleMaskIn"
4614 }
4615 }
4616 // compute
4617 Bi::GlobalInvocationId => "gl_GlobalInvocationID",
4618 Bi::LocalInvocationId => "gl_LocalInvocationID",
4619 Bi::LocalInvocationIndex => "gl_LocalInvocationIndex",
4620 Bi::WorkGroupId => "gl_WorkGroupID",
4621 Bi::WorkGroupSize => "gl_WorkGroupSize",
4622 Bi::NumWorkGroups => "gl_NumWorkGroups",
4623 // subgroup
4624 Bi::NumSubgroups => "gl_NumSubgroups",
4625 Bi::SubgroupId => "gl_SubgroupID",
4626 Bi::SubgroupSize => "gl_SubgroupSize",
4627 Bi::SubgroupInvocationId => "gl_SubgroupInvocationID",
4628 }
4629}
4630
4631/// Helper function that returns the string corresponding to the address space
4632const fn glsl_storage_qualifier(space: crate::AddressSpace) -> Option<&'static str> {
4633 use crate::AddressSpace as As;
4634
4635 match space {
4636 As::Function => None,
4637 As::Private => None,
4638 As::Storage { .. } => Some("buffer"),
4639 As::Uniform => Some("uniform"),
4640 As::Handle => Some("uniform"),
4641 As::WorkGroup => Some("shared"),
4642 As::PushConstant => Some("uniform"),
4643 }
4644}
4645
4646/// Helper function that returns the string corresponding to the glsl interpolation qualifier
4647const fn glsl_interpolation(interpolation: crate::Interpolation) -> &'static str {
4648 use crate::Interpolation as I;
4649
4650 match interpolation {
4651 I::Perspective => "smooth",
4652 I::Linear => "noperspective",
4653 I::Flat => "flat",
4654 }
4655}
4656
4657/// Return the GLSL auxiliary qualifier for the given sampling value.
4658const fn glsl_sampling(sampling: crate::Sampling) -> Option<&'static str> {
4659 use crate::Sampling as S;
4660
4661 match sampling {
4662 S::Center => None,
4663 S::Centroid => Some("centroid"),
4664 S::Sample => Some("sample"),
4665 }
4666}
4667
4668/// Helper function that returns the glsl dimension string of [`ImageDimension`](crate::ImageDimension)
4669const fn glsl_dimension(dim: crate::ImageDimension) -> &'static str {
4670 use crate::ImageDimension as IDim;
4671
4672 match dim {
4673 IDim::D1 => "1D",
4674 IDim::D2 => "2D",
4675 IDim::D3 => "3D",
4676 IDim::Cube => "Cube",
4677 }
4678}
4679
4680/// Helper function that returns the glsl storage format string of [`StorageFormat`](crate::StorageFormat)
4681fn glsl_storage_format(format: crate::StorageFormat) -> Result<&'static str, Error> {
4682 use crate::StorageFormat as Sf;
4683
4684 Ok(match format {
4685 Sf::R8Unorm => "r8",
4686 Sf::R8Snorm => "r8_snorm",
4687 Sf::R8Uint => "r8ui",
4688 Sf::R8Sint => "r8i",
4689 Sf::R16Uint => "r16ui",
4690 Sf::R16Sint => "r16i",
4691 Sf::R16Float => "r16f",
4692 Sf::Rg8Unorm => "rg8",
4693 Sf::Rg8Snorm => "rg8_snorm",
4694 Sf::Rg8Uint => "rg8ui",
4695 Sf::Rg8Sint => "rg8i",
4696 Sf::R32Uint => "r32ui",
4697 Sf::R32Sint => "r32i",
4698 Sf::R32Float => "r32f",
4699 Sf::Rg16Uint => "rg16ui",
4700 Sf::Rg16Sint => "rg16i",
4701 Sf::Rg16Float => "rg16f",
4702 Sf::Rgba8Unorm => "rgba8",
4703 Sf::Rgba8Snorm => "rgba8_snorm",
4704 Sf::Rgba8Uint => "rgba8ui",
4705 Sf::Rgba8Sint => "rgba8i",
4706 Sf::Rgb10a2Uint => "rgb10_a2ui",
4707 Sf::Rgb10a2Unorm => "rgb10_a2",
4708 Sf::Rg11b10Float => "r11f_g11f_b10f",
4709 Sf::Rg32Uint => "rg32ui",
4710 Sf::Rg32Sint => "rg32i",
4711 Sf::Rg32Float => "rg32f",
4712 Sf::Rgba16Uint => "rgba16ui",
4713 Sf::Rgba16Sint => "rgba16i",
4714 Sf::Rgba16Float => "rgba16f",
4715 Sf::Rgba32Uint => "rgba32ui",
4716 Sf::Rgba32Sint => "rgba32i",
4717 Sf::Rgba32Float => "rgba32f",
4718 Sf::R16Unorm => "r16",
4719 Sf::R16Snorm => "r16_snorm",
4720 Sf::Rg16Unorm => "rg16",
4721 Sf::Rg16Snorm => "rg16_snorm",
4722 Sf::Rgba16Unorm => "rgba16",
4723 Sf::Rgba16Snorm => "rgba16_snorm",
4724
4725 Sf::Bgra8Unorm => {
4726 return Err(Error::Custom(
4727 "Support format BGRA8 is not implemented".into(),
4728 ))
4729 }
4730 })
4731}
4732
4733fn is_value_init_supported(module: &crate::Module, ty: Handle<crate::Type>) -> bool {
4734 match module.types[ty].inner {
4735 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => true,
4736 TypeInner::Array { base, size, .. } => {
4737 size != crate::ArraySize::Dynamic && is_value_init_supported(module, base)
4738 }
4739 TypeInner::Struct { ref members, .. } => members
4740 .iter()
4741 .all(|member| is_value_init_supported(module, member.ty)),
4742 _ => false,
4743 }
4744}