wgpu_hal/gles/
device.rs

1use super::{conv, PrivateCapabilities};
2use crate::auxil::map_naga_stage;
3use glow::HasContext;
4use std::{
5    cmp::max,
6    convert::TryInto,
7    ptr,
8    sync::{Arc, Mutex},
9};
10
11use arrayvec::ArrayVec;
12#[cfg(native)]
13use std::mem;
14use std::sync::atomic::Ordering;
15
16type ShaderStage<'a> = (
17    naga::ShaderStage,
18    &'a crate::ProgrammableStage<'a, super::Api>,
19);
20type NameBindingMap = rustc_hash::FxHashMap<String, (super::BindingRegister, u8)>;
21
22struct CompilationContext<'a> {
23    layout: &'a super::PipelineLayout,
24    sampler_map: &'a mut super::SamplerBindMap,
25    name_binding_map: &'a mut NameBindingMap,
26    push_constant_items: &'a mut Vec<naga::back::glsl::PushConstantItem>,
27    multiview: Option<std::num::NonZeroU32>,
28}
29
30impl CompilationContext<'_> {
31    fn consume_reflection(
32        self,
33        gl: &glow::Context,
34        module: &naga::Module,
35        ep_info: &naga::valid::FunctionInfo,
36        reflection_info: naga::back::glsl::ReflectionInfo,
37        naga_stage: naga::ShaderStage,
38        program: glow::Program,
39    ) {
40        for (handle, var) in module.global_variables.iter() {
41            if ep_info[handle].is_empty() {
42                continue;
43            }
44            let register = match var.space {
45                naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
46                naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
47                _ => continue,
48            };
49
50            let br = var.binding.as_ref().unwrap();
51            let slot = self.layout.get_slot(br);
52
53            let name = match reflection_info.uniforms.get(&handle) {
54                Some(name) => name.clone(),
55                None => continue,
56            };
57            log::trace!(
58                "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
59                var.name.as_ref(),
60                &name,
61                register,
62                slot
63            );
64            self.name_binding_map.insert(name, (register, slot));
65        }
66
67        for (name, mapping) in reflection_info.texture_mapping {
68            let var = &module.global_variables[mapping.texture];
69            let register = match module.types[var.ty].inner {
70                naga::TypeInner::Image {
71                    class: naga::ImageClass::Storage { .. },
72                    ..
73                } => super::BindingRegister::Images,
74                _ => super::BindingRegister::Textures,
75            };
76
77            let tex_br = var.binding.as_ref().unwrap();
78            let texture_linear_index = self.layout.get_slot(tex_br);
79
80            self.name_binding_map
81                .insert(name, (register, texture_linear_index));
82            if let Some(sampler_handle) = mapping.sampler {
83                let sam_br = module.global_variables[sampler_handle]
84                    .binding
85                    .as_ref()
86                    .unwrap();
87                let sampler_linear_index = self.layout.get_slot(sam_br);
88                self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
89            }
90        }
91
92        for (name, location) in reflection_info.varying {
93            match naga_stage {
94                naga::ShaderStage::Vertex => {
95                    assert_eq!(location.index, 0);
96                    unsafe { gl.bind_attrib_location(program, location.location, &name) }
97                }
98                naga::ShaderStage::Fragment => {
99                    assert_eq!(location.index, 0);
100                    unsafe { gl.bind_frag_data_location(program, location.location, &name) }
101                }
102                naga::ShaderStage::Compute => {}
103            }
104        }
105
106        *self.push_constant_items = reflection_info.push_constant_items;
107    }
108}
109
110impl super::Device {
111    /// # Safety
112    ///
113    /// - `name` must be created respecting `desc`
114    /// - `name` must be a texture
115    /// - If `drop_guard` is [`None`], wgpu-hal will take ownership of the texture. If `drop_guard` is
116    ///   [`Some`], the texture must be valid until the drop implementation
117    ///   of the drop guard is called.
118    #[cfg(any(native, Emscripten))]
119    pub unsafe fn texture_from_raw(
120        &self,
121        name: std::num::NonZeroU32,
122        desc: &crate::TextureDescriptor,
123        drop_guard: Option<crate::DropGuard>,
124    ) -> super::Texture {
125        super::Texture {
126            inner: super::TextureInner::Texture {
127                raw: glow::NativeTexture(name),
128                target: super::Texture::get_info_from_desc(desc),
129            },
130            drop_guard,
131            mip_level_count: desc.mip_level_count,
132            array_layer_count: desc.array_layer_count(),
133            format: desc.format,
134            format_desc: self.shared.describe_texture_format(desc.format),
135            copy_size: desc.copy_extent(),
136        }
137    }
138
139    /// # Safety
140    ///
141    /// - `name` must be created respecting `desc`
142    /// - `name` must be a renderbuffer
143    /// - If `drop_guard` is [`None`], wgpu-hal will take ownership of the renderbuffer. If `drop_guard` is
144    ///   [`Some`], the renderbuffer must be valid until the drop implementation
145    ///   of the drop guard is called.
146    #[cfg(any(native, Emscripten))]
147    pub unsafe fn texture_from_raw_renderbuffer(
148        &self,
149        name: std::num::NonZeroU32,
150        desc: &crate::TextureDescriptor,
151        drop_guard: Option<crate::DropGuard>,
152    ) -> super::Texture {
153        super::Texture {
154            inner: super::TextureInner::Renderbuffer {
155                raw: glow::NativeRenderbuffer(name),
156            },
157            drop_guard,
158            mip_level_count: desc.mip_level_count,
159            array_layer_count: desc.array_layer_count(),
160            format: desc.format,
161            format_desc: self.shared.describe_texture_format(desc.format),
162            copy_size: desc.copy_extent(),
163        }
164    }
165
166    unsafe fn compile_shader(
167        gl: &glow::Context,
168        shader: &str,
169        naga_stage: naga::ShaderStage,
170        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
171    ) -> Result<glow::Shader, crate::PipelineError> {
172        let target = match naga_stage {
173            naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
174            naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
175            naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
176        };
177
178        let raw = unsafe { gl.create_shader(target) }.unwrap();
179        #[cfg(native)]
180        if gl.supports_debug() {
181            //TODO: remove all transmutes from `object_label`
182            // https://github.com/grovesNL/glow/issues/186
183            let name = unsafe { mem::transmute(raw) };
184            unsafe { gl.object_label(glow::SHADER, name, label) };
185        }
186
187        unsafe { gl.shader_source(raw, shader) };
188        unsafe { gl.compile_shader(raw) };
189
190        log::debug!("\tCompiled shader {:?}", raw);
191
192        let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
193        let msg = unsafe { gl.get_shader_info_log(raw) };
194        if compiled_ok {
195            if !msg.is_empty() {
196                log::warn!("\tCompile: {}", msg);
197            }
198            Ok(raw)
199        } else {
200            log::error!("\tShader compilation failed: {}", msg);
201            unsafe { gl.delete_shader(raw) };
202            Err(crate::PipelineError::Linkage(
203                map_naga_stage(naga_stage),
204                msg,
205            ))
206        }
207    }
208
209    fn create_shader(
210        gl: &glow::Context,
211        naga_stage: naga::ShaderStage,
212        stage: &crate::ProgrammableStage<super::Api>,
213        context: CompilationContext,
214        program: glow::Program,
215    ) -> Result<glow::Shader, crate::PipelineError> {
216        use naga::back::glsl;
217        let pipeline_options = glsl::PipelineOptions {
218            shader_stage: naga_stage,
219            entry_point: stage.entry_point.to_string(),
220            multiview: context.multiview,
221        };
222
223        let (module, info) = naga::back::pipeline_constants::process_overrides(
224            &stage.module.naga.module,
225            &stage.module.naga.info,
226            stage.constants,
227        )
228        .map_err(|e| {
229            let msg = format!("{e}");
230            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
231        })?;
232
233        let entry_point_index = module
234            .entry_points
235            .iter()
236            .position(|ep| ep.name.as_str() == stage.entry_point)
237            .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
238
239        use naga::proc::BoundsCheckPolicy;
240        // The image bounds checks require the TEXTURE_LEVELS feature available in GL core 4.3+.
241        let version = gl.version();
242        let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
243            BoundsCheckPolicy::ReadZeroSkipWrite
244        } else {
245            BoundsCheckPolicy::Unchecked
246        };
247
248        // Other bounds check are either provided by glsl or not implemented yet.
249        let policies = naga::proc::BoundsCheckPolicies {
250            index: BoundsCheckPolicy::Unchecked,
251            buffer: BoundsCheckPolicy::Unchecked,
252            image_load: image_check,
253            image_store: BoundsCheckPolicy::Unchecked,
254            binding_array: BoundsCheckPolicy::Unchecked,
255        };
256
257        let mut output = String::new();
258        let needs_temp_options = stage.zero_initialize_workgroup_memory
259            != context.layout.naga_options.zero_initialize_workgroup_memory;
260        let mut temp_options;
261        let naga_options = if needs_temp_options {
262            // We use a conditional here, as cloning the naga_options could be expensive
263            // That is, we want to avoid doing that unless we cannot avoid it
264            temp_options = context.layout.naga_options.clone();
265            temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
266            &temp_options
267        } else {
268            &context.layout.naga_options
269        };
270        let mut writer = glsl::Writer::new(
271            &mut output,
272            &module,
273            &info,
274            naga_options,
275            &pipeline_options,
276            policies,
277        )
278        .map_err(|e| {
279            let msg = format!("{e}");
280            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
281        })?;
282
283        let reflection_info = writer.write().map_err(|e| {
284            let msg = format!("{e}");
285            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
286        })?;
287
288        log::debug!("Naga generated shader:\n{}", output);
289
290        context.consume_reflection(
291            gl,
292            &module,
293            info.get_entry_point(entry_point_index),
294            reflection_info,
295            naga_stage,
296            program,
297        );
298
299        unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
300    }
301
302    unsafe fn create_pipeline<'a>(
303        &self,
304        gl: &glow::Context,
305        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
306        layout: &super::PipelineLayout,
307        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
308        multiview: Option<std::num::NonZeroU32>,
309    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
310        let mut program_stages = ArrayVec::new();
311        let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
312        for group in &*layout.group_infos {
313            group_to_binding_to_slot.push(group.binding_to_slot.clone());
314        }
315        for &(naga_stage, stage) in &shaders {
316            program_stages.push(super::ProgramStage {
317                naga_stage: naga_stage.to_owned(),
318                shader_id: stage.module.id,
319                entry_point: stage.entry_point.to_owned(),
320                zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
321            });
322        }
323        let mut guard = self
324            .shared
325            .program_cache
326            .try_lock()
327            .expect("Couldn't acquire program_cache lock");
328        // This guard ensures that we can't accidentally destroy a program whilst we're about to reuse it
329        // The only place that destroys a pipeline is also locking on `program_cache`
330        let program = guard
331            .entry(super::ProgramCacheKey {
332                stages: program_stages,
333                group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
334            })
335            .or_insert_with(|| unsafe {
336                Self::create_program(
337                    gl,
338                    shaders,
339                    layout,
340                    label,
341                    multiview,
342                    self.shared.shading_language_version,
343                    self.shared.private_caps,
344                )
345            })
346            .to_owned()?;
347        drop(guard);
348
349        Ok(program)
350    }
351
352    unsafe fn create_program<'a>(
353        gl: &glow::Context,
354        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
355        layout: &super::PipelineLayout,
356        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
357        multiview: Option<std::num::NonZeroU32>,
358        glsl_version: naga::back::glsl::Version,
359        private_caps: PrivateCapabilities,
360    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
361        let glsl_version = match glsl_version {
362            naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
363            naga::back::glsl::Version::Desktop(version) => format!("{version}"),
364        };
365        let program = unsafe { gl.create_program() }.unwrap();
366        #[cfg(native)]
367        if let Some(label) = label {
368            if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
369                let name = unsafe { mem::transmute(program) };
370                unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
371            }
372        }
373
374        let mut name_binding_map = NameBindingMap::default();
375        let mut push_constant_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
376        let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
377        let mut has_stages = wgt::ShaderStages::empty();
378        let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
379
380        for &(naga_stage, stage) in &shaders {
381            has_stages |= map_naga_stage(naga_stage);
382            let pc_item = {
383                push_constant_items.push(Vec::new());
384                push_constant_items.last_mut().unwrap()
385            };
386            let context = CompilationContext {
387                layout,
388                sampler_map: &mut sampler_map,
389                name_binding_map: &mut name_binding_map,
390                push_constant_items: pc_item,
391                multiview,
392            };
393
394            let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
395            shaders_to_delete.push(shader);
396        }
397
398        // Create empty fragment shader if only vertex shader is present
399        if has_stages == wgt::ShaderStages::VERTEX {
400            let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
401            log::info!("Only vertex shader is present. Creating an empty fragment shader",);
402            let shader = unsafe {
403                Self::compile_shader(
404                    gl,
405                    &shader_src,
406                    naga::ShaderStage::Fragment,
407                    Some("(wgpu internal) dummy fragment shader"),
408                )
409            }?;
410            shaders_to_delete.push(shader);
411        }
412
413        for &shader in shaders_to_delete.iter() {
414            unsafe { gl.attach_shader(program, shader) };
415        }
416        unsafe { gl.link_program(program) };
417
418        for shader in shaders_to_delete {
419            unsafe { gl.delete_shader(shader) };
420        }
421
422        log::debug!("\tLinked program {:?}", program);
423
424        let linked_ok = unsafe { gl.get_program_link_status(program) };
425        let msg = unsafe { gl.get_program_info_log(program) };
426        if !linked_ok {
427            return Err(crate::PipelineError::Linkage(has_stages, msg));
428        }
429        if !msg.is_empty() {
430            log::warn!("\tLink: {}", msg);
431        }
432
433        if !private_caps.contains(super::PrivateCapabilities::SHADER_BINDING_LAYOUT) {
434            // This remapping is only needed if we aren't able to put the binding layout
435            // in the shader. We can't remap storage buffers this way.
436            unsafe { gl.use_program(Some(program)) };
437            for (ref name, (register, slot)) in name_binding_map {
438                log::trace!("Get binding {:?} from program {:?}", name, program);
439                match register {
440                    super::BindingRegister::UniformBuffers => {
441                        let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
442                        log::trace!("\tBinding slot {slot} to block index {index}");
443                        unsafe { gl.uniform_block_binding(program, index, slot as _) };
444                    }
445                    super::BindingRegister::StorageBuffers => {
446                        let index =
447                            unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
448                        log::error!(
449                            "Unable to re-map shader storage block {} to {}",
450                            name,
451                            index
452                        );
453                        return Err(crate::DeviceError::Lost.into());
454                    }
455                    super::BindingRegister::Textures | super::BindingRegister::Images => {
456                        let location = unsafe { gl.get_uniform_location(program, name) };
457                        unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
458                    }
459                }
460            }
461        }
462
463        let mut uniforms = ArrayVec::new();
464
465        for (stage_idx, stage_items) in push_constant_items.into_iter().enumerate() {
466            for item in stage_items {
467                let naga_module = &shaders[stage_idx].1.module.naga.module;
468                let type_inner = &naga_module.types[item.ty].inner;
469
470                let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
471
472                log::trace!(
473                    "push constant item: name={}, ty={:?}, offset={}, location={:?}",
474                    item.access_path,
475                    type_inner,
476                    item.offset,
477                    location,
478                );
479
480                if let Some(location) = location {
481                    uniforms.push(super::PushConstantDesc {
482                        location,
483                        offset: item.offset,
484                        size_bytes: type_inner.size(naga_module.to_ctx()),
485                        ty: type_inner.clone(),
486                    });
487                }
488            }
489        }
490
491        let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
492            // If this returns none (the uniform isn't active), that's fine, we just won't set it.
493            unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
494        } else {
495            None
496        };
497
498        Ok(Arc::new(super::PipelineInner {
499            program,
500            sampler_map,
501            first_instance_location,
502            push_constant_descs: uniforms,
503        }))
504    }
505}
506
507impl crate::Device for super::Device {
508    type A = super::Api;
509
510    unsafe fn exit(self, queue: super::Queue) {
511        let gl = &self.shared.context.lock();
512        unsafe { gl.delete_vertex_array(self.main_vao) };
513        unsafe { gl.delete_framebuffer(queue.draw_fbo) };
514        unsafe { gl.delete_framebuffer(queue.copy_fbo) };
515        unsafe { gl.delete_buffer(queue.zero_buffer) };
516    }
517
518    unsafe fn create_buffer(
519        &self,
520        desc: &crate::BufferDescriptor,
521    ) -> Result<super::Buffer, crate::DeviceError> {
522        let target = if desc.usage.contains(crate::BufferUses::INDEX) {
523            glow::ELEMENT_ARRAY_BUFFER
524        } else {
525            glow::ARRAY_BUFFER
526        };
527
528        let emulate_map = self
529            .shared
530            .workarounds
531            .contains(super::Workarounds::EMULATE_BUFFER_MAP)
532            || !self
533                .shared
534                .private_caps
535                .contains(super::PrivateCapabilities::BUFFER_ALLOCATION);
536
537        if emulate_map && desc.usage.intersects(crate::BufferUses::MAP_WRITE) {
538            return Ok(super::Buffer {
539                raw: None,
540                target,
541                size: desc.size,
542                map_flags: 0,
543                data: Some(Arc::new(Mutex::new(vec![0; desc.size as usize]))),
544            });
545        }
546
547        let gl = &self.shared.context.lock();
548
549        let target = if desc.usage.contains(crate::BufferUses::INDEX) {
550            glow::ELEMENT_ARRAY_BUFFER
551        } else {
552            glow::ARRAY_BUFFER
553        };
554
555        let is_host_visible = desc
556            .usage
557            .intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE);
558        let is_coherent = desc
559            .memory_flags
560            .contains(crate::MemoryFlags::PREFER_COHERENT);
561
562        let mut map_flags = 0;
563        if desc.usage.contains(crate::BufferUses::MAP_READ) {
564            map_flags |= glow::MAP_READ_BIT;
565        }
566        if desc.usage.contains(crate::BufferUses::MAP_WRITE) {
567            map_flags |= glow::MAP_WRITE_BIT;
568        }
569
570        let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
571        unsafe { gl.bind_buffer(target, raw) };
572        let raw_size = desc
573            .size
574            .try_into()
575            .map_err(|_| crate::DeviceError::OutOfMemory)?;
576
577        if self
578            .shared
579            .private_caps
580            .contains(super::PrivateCapabilities::BUFFER_ALLOCATION)
581        {
582            if is_host_visible {
583                map_flags |= glow::MAP_PERSISTENT_BIT;
584                if is_coherent {
585                    map_flags |= glow::MAP_COHERENT_BIT;
586                }
587            }
588            // TODO: may also be required for other calls involving `buffer_sub_data_u8_slice` (e.g. copy buffer to buffer and clear buffer)
589            if desc.usage.intersects(crate::BufferUses::QUERY_RESOLVE) {
590                map_flags |= glow::DYNAMIC_STORAGE_BIT;
591            }
592            unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
593        } else {
594            assert!(!is_coherent);
595            let usage = if is_host_visible {
596                if desc.usage.contains(crate::BufferUses::MAP_READ) {
597                    glow::STREAM_READ
598                } else {
599                    glow::DYNAMIC_DRAW
600                }
601            } else {
602                // Even if the usage doesn't contain SRC_READ, we update it internally at least once
603                // Some vendors take usage very literally and STATIC_DRAW will freeze us with an empty buffer
604                // https://github.com/gfx-rs/wgpu/issues/3371
605                glow::DYNAMIC_DRAW
606            };
607            unsafe { gl.buffer_data_size(target, raw_size, usage) };
608        }
609
610        unsafe { gl.bind_buffer(target, None) };
611
612        if !is_coherent && desc.usage.contains(crate::BufferUses::MAP_WRITE) {
613            map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
614        }
615        //TODO: do we need `glow::MAP_UNSYNCHRONIZED_BIT`?
616
617        #[cfg(native)]
618        if let Some(label) = desc.label {
619            if self
620                .shared
621                .private_caps
622                .contains(PrivateCapabilities::DEBUG_FNS)
623            {
624                let name = unsafe { mem::transmute(raw) };
625                unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
626            }
627        }
628
629        let data = if emulate_map && desc.usage.contains(crate::BufferUses::MAP_READ) {
630            Some(Arc::new(Mutex::new(vec![0; desc.size as usize])))
631        } else {
632            None
633        };
634
635        Ok(super::Buffer {
636            raw,
637            target,
638            size: desc.size,
639            map_flags,
640            data,
641        })
642    }
643    unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
644        if let Some(raw) = buffer.raw {
645            let gl = &self.shared.context.lock();
646            unsafe { gl.delete_buffer(raw) };
647        }
648    }
649
650    unsafe fn map_buffer(
651        &self,
652        buffer: &super::Buffer,
653        range: crate::MemoryRange,
654    ) -> Result<crate::BufferMapping, crate::DeviceError> {
655        let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
656        let ptr = match buffer.raw {
657            None => {
658                let mut vec = buffer.data.as_ref().unwrap().lock().unwrap();
659                let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
660                slice.as_mut_ptr()
661            }
662            Some(raw) => {
663                let gl = &self.shared.context.lock();
664                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
665                let ptr = if let Some(ref map_read_allocation) = buffer.data {
666                    let mut guard = map_read_allocation.lock().unwrap();
667                    let slice = guard.as_mut_slice();
668                    unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
669                    slice.as_mut_ptr()
670                } else {
671                    unsafe {
672                        gl.map_buffer_range(
673                            buffer.target,
674                            range.start as i32,
675                            (range.end - range.start) as i32,
676                            buffer.map_flags,
677                        )
678                    }
679                };
680                unsafe { gl.bind_buffer(buffer.target, None) };
681                ptr
682            }
683        };
684        Ok(crate::BufferMapping {
685            ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
686            is_coherent,
687        })
688    }
689    unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
690        if let Some(raw) = buffer.raw {
691            if buffer.data.is_none() {
692                let gl = &self.shared.context.lock();
693                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
694                unsafe { gl.unmap_buffer(buffer.target) };
695                unsafe { gl.bind_buffer(buffer.target, None) };
696            }
697        }
698        Ok(())
699    }
700    unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
701    where
702        I: Iterator<Item = crate::MemoryRange>,
703    {
704        if let Some(raw) = buffer.raw {
705            let gl = &self.shared.context.lock();
706            unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
707            for range in ranges {
708                unsafe {
709                    gl.flush_mapped_buffer_range(
710                        buffer.target,
711                        range.start as i32,
712                        (range.end - range.start) as i32,
713                    )
714                };
715            }
716        }
717    }
718    unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
719        //TODO: do we need to do anything?
720    }
721
722    unsafe fn create_texture(
723        &self,
724        desc: &crate::TextureDescriptor,
725    ) -> Result<super::Texture, crate::DeviceError> {
726        let gl = &self.shared.context.lock();
727
728        let render_usage = crate::TextureUses::COLOR_TARGET
729            | crate::TextureUses::DEPTH_STENCIL_WRITE
730            | crate::TextureUses::DEPTH_STENCIL_READ;
731        let format_desc = self.shared.describe_texture_format(desc.format);
732
733        let inner = if render_usage.contains(desc.usage)
734            && desc.dimension == wgt::TextureDimension::D2
735            && desc.size.depth_or_array_layers == 1
736        {
737            let raw = unsafe { gl.create_renderbuffer().unwrap() };
738            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
739            if desc.sample_count > 1 {
740                unsafe {
741                    gl.renderbuffer_storage_multisample(
742                        glow::RENDERBUFFER,
743                        desc.sample_count as i32,
744                        format_desc.internal,
745                        desc.size.width as i32,
746                        desc.size.height as i32,
747                    )
748                };
749            } else {
750                unsafe {
751                    gl.renderbuffer_storage(
752                        glow::RENDERBUFFER,
753                        format_desc.internal,
754                        desc.size.width as i32,
755                        desc.size.height as i32,
756                    )
757                };
758            }
759
760            #[cfg(native)]
761            if let Some(label) = desc.label {
762                if self
763                    .shared
764                    .private_caps
765                    .contains(PrivateCapabilities::DEBUG_FNS)
766                {
767                    let name = unsafe { mem::transmute(raw) };
768                    unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
769                }
770            }
771
772            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
773            super::TextureInner::Renderbuffer { raw }
774        } else {
775            let raw = unsafe { gl.create_texture().unwrap() };
776            let target = super::Texture::get_info_from_desc(desc);
777
778            unsafe { gl.bind_texture(target, Some(raw)) };
779            //Note: this has to be done before defining the storage!
780            match desc.format.sample_type(None, Some(self.shared.features)) {
781                Some(
782                    wgt::TextureSampleType::Float { filterable: false }
783                    | wgt::TextureSampleType::Uint
784                    | wgt::TextureSampleType::Sint,
785                ) => {
786                    // reset default filtering mode
787                    unsafe {
788                        gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
789                    };
790                    unsafe {
791                        gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
792                    };
793                }
794                _ => {}
795            }
796
797            if conv::is_layered_target(target) {
798                unsafe {
799                    if self
800                        .shared
801                        .private_caps
802                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
803                    {
804                        gl.tex_storage_3d(
805                            target,
806                            desc.mip_level_count as i32,
807                            format_desc.internal,
808                            desc.size.width as i32,
809                            desc.size.height as i32,
810                            desc.size.depth_or_array_layers as i32,
811                        )
812                    } else if target == glow::TEXTURE_3D {
813                        let mut width = desc.size.width;
814                        let mut height = desc.size.width;
815                        let mut depth = desc.size.depth_or_array_layers;
816                        for i in 0..desc.mip_level_count {
817                            gl.tex_image_3d(
818                                target,
819                                i as i32,
820                                format_desc.internal as i32,
821                                width as i32,
822                                height as i32,
823                                depth as i32,
824                                0,
825                                format_desc.external,
826                                format_desc.data_type,
827                                None,
828                            );
829                            width = max(1, width / 2);
830                            height = max(1, height / 2);
831                            depth = max(1, depth / 2);
832                        }
833                    } else {
834                        let mut width = desc.size.width;
835                        let mut height = desc.size.width;
836                        for i in 0..desc.mip_level_count {
837                            gl.tex_image_3d(
838                                target,
839                                i as i32,
840                                format_desc.internal as i32,
841                                width as i32,
842                                height as i32,
843                                desc.size.depth_or_array_layers as i32,
844                                0,
845                                format_desc.external,
846                                format_desc.data_type,
847                                None,
848                            );
849                            width = max(1, width / 2);
850                            height = max(1, height / 2);
851                        }
852                    }
853                };
854            } else if desc.sample_count > 1 {
855                unsafe {
856                    gl.tex_storage_2d_multisample(
857                        target,
858                        desc.sample_count as i32,
859                        format_desc.internal,
860                        desc.size.width as i32,
861                        desc.size.height as i32,
862                        true,
863                    )
864                };
865            } else {
866                unsafe {
867                    if self
868                        .shared
869                        .private_caps
870                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
871                    {
872                        gl.tex_storage_2d(
873                            target,
874                            desc.mip_level_count as i32,
875                            format_desc.internal,
876                            desc.size.width as i32,
877                            desc.size.height as i32,
878                        )
879                    } else if target == glow::TEXTURE_CUBE_MAP {
880                        let mut width = desc.size.width;
881                        let mut height = desc.size.width;
882                        for i in 0..desc.mip_level_count {
883                            for face in [
884                                glow::TEXTURE_CUBE_MAP_POSITIVE_X,
885                                glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
886                                glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
887                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
888                                glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
889                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
890                            ] {
891                                gl.tex_image_2d(
892                                    face,
893                                    i as i32,
894                                    format_desc.internal as i32,
895                                    width as i32,
896                                    height as i32,
897                                    0,
898                                    format_desc.external,
899                                    format_desc.data_type,
900                                    None,
901                                );
902                            }
903                            width = max(1, width / 2);
904                            height = max(1, height / 2);
905                        }
906                    } else {
907                        let mut width = desc.size.width;
908                        let mut height = desc.size.width;
909                        for i in 0..desc.mip_level_count {
910                            gl.tex_image_2d(
911                                target,
912                                i as i32,
913                                format_desc.internal as i32,
914                                width as i32,
915                                height as i32,
916                                0,
917                                format_desc.external,
918                                format_desc.data_type,
919                                None,
920                            );
921                            width = max(1, width / 2);
922                            height = max(1, height / 2);
923                        }
924                    }
925                };
926            }
927
928            #[cfg(native)]
929            if let Some(label) = desc.label {
930                if self
931                    .shared
932                    .private_caps
933                    .contains(PrivateCapabilities::DEBUG_FNS)
934                {
935                    let name = unsafe { mem::transmute(raw) };
936                    unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
937                }
938            }
939
940            unsafe { gl.bind_texture(target, None) };
941            super::TextureInner::Texture { raw, target }
942        };
943
944        Ok(super::Texture {
945            inner,
946            drop_guard: None,
947            mip_level_count: desc.mip_level_count,
948            array_layer_count: desc.array_layer_count(),
949            format: desc.format,
950            format_desc,
951            copy_size: desc.copy_extent(),
952        })
953    }
954    unsafe fn destroy_texture(&self, texture: super::Texture) {
955        if texture.drop_guard.is_none() {
956            let gl = &self.shared.context.lock();
957            match texture.inner {
958                super::TextureInner::Renderbuffer { raw, .. } => {
959                    unsafe { gl.delete_renderbuffer(raw) };
960                }
961                super::TextureInner::DefaultRenderbuffer => {}
962                super::TextureInner::Texture { raw, .. } => {
963                    unsafe { gl.delete_texture(raw) };
964                }
965                #[cfg(webgl)]
966                super::TextureInner::ExternalFramebuffer { .. } => {}
967            }
968        }
969
970        // For clarity, we explicitly drop the drop guard. Although this has no real semantic effect as the
971        // end of the scope will drop the drop guard since this function takes ownership of the texture.
972        drop(texture.drop_guard);
973    }
974
975    unsafe fn create_texture_view(
976        &self,
977        texture: &super::Texture,
978        desc: &crate::TextureViewDescriptor,
979    ) -> Result<super::TextureView, crate::DeviceError> {
980        Ok(super::TextureView {
981            //TODO: use `conv::map_view_dimension(desc.dimension)`?
982            inner: texture.inner.clone(),
983            aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
984            mip_levels: desc.range.mip_range(texture.mip_level_count),
985            array_layers: desc.range.layer_range(texture.array_layer_count),
986            format: texture.format,
987        })
988    }
989    unsafe fn destroy_texture_view(&self, _view: super::TextureView) {}
990
991    unsafe fn create_sampler(
992        &self,
993        desc: &crate::SamplerDescriptor,
994    ) -> Result<super::Sampler, crate::DeviceError> {
995        let gl = &self.shared.context.lock();
996
997        let raw = unsafe { gl.create_sampler().unwrap() };
998
999        let (min, mag) =
1000            conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1001
1002        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1003        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1004
1005        unsafe {
1006            gl.sampler_parameter_i32(
1007                raw,
1008                glow::TEXTURE_WRAP_S,
1009                conv::map_address_mode(desc.address_modes[0]) as i32,
1010            )
1011        };
1012        unsafe {
1013            gl.sampler_parameter_i32(
1014                raw,
1015                glow::TEXTURE_WRAP_T,
1016                conv::map_address_mode(desc.address_modes[1]) as i32,
1017            )
1018        };
1019        unsafe {
1020            gl.sampler_parameter_i32(
1021                raw,
1022                glow::TEXTURE_WRAP_R,
1023                conv::map_address_mode(desc.address_modes[2]) as i32,
1024            )
1025        };
1026
1027        if let Some(border_color) = desc.border_color {
1028            let border = match border_color {
1029                wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1030                    [0.0; 4]
1031                }
1032                wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1033                wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1034            };
1035            unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1036        }
1037
1038        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1039        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1040
1041        // If clamp is not 1, we know anisotropy is supported up to 16x
1042        if desc.anisotropy_clamp != 1 {
1043            unsafe {
1044                gl.sampler_parameter_i32(
1045                    raw,
1046                    glow::TEXTURE_MAX_ANISOTROPY,
1047                    desc.anisotropy_clamp as i32,
1048                )
1049            };
1050        }
1051
1052        //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0);
1053
1054        if let Some(compare) = desc.compare {
1055            unsafe {
1056                gl.sampler_parameter_i32(
1057                    raw,
1058                    glow::TEXTURE_COMPARE_MODE,
1059                    glow::COMPARE_REF_TO_TEXTURE as i32,
1060                )
1061            };
1062            unsafe {
1063                gl.sampler_parameter_i32(
1064                    raw,
1065                    glow::TEXTURE_COMPARE_FUNC,
1066                    conv::map_compare_func(compare) as i32,
1067                )
1068            };
1069        }
1070
1071        #[cfg(native)]
1072        if let Some(label) = desc.label {
1073            if self
1074                .shared
1075                .private_caps
1076                .contains(PrivateCapabilities::DEBUG_FNS)
1077            {
1078                let name = unsafe { mem::transmute(raw) };
1079                unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1080            }
1081        }
1082
1083        Ok(super::Sampler { raw })
1084    }
1085    unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1086        let gl = &self.shared.context.lock();
1087        unsafe { gl.delete_sampler(sampler.raw) };
1088    }
1089
1090    unsafe fn create_command_encoder(
1091        &self,
1092        _desc: &crate::CommandEncoderDescriptor<super::Api>,
1093    ) -> Result<super::CommandEncoder, crate::DeviceError> {
1094        Ok(super::CommandEncoder {
1095            cmd_buffer: super::CommandBuffer::default(),
1096            state: Default::default(),
1097            private_caps: self.shared.private_caps,
1098        })
1099    }
1100    unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
1101
1102    unsafe fn create_bind_group_layout(
1103        &self,
1104        desc: &crate::BindGroupLayoutDescriptor,
1105    ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1106        Ok(super::BindGroupLayout {
1107            entries: Arc::from(desc.entries),
1108        })
1109    }
1110    unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {}
1111
1112    unsafe fn create_pipeline_layout(
1113        &self,
1114        desc: &crate::PipelineLayoutDescriptor<super::Api>,
1115    ) -> Result<super::PipelineLayout, crate::DeviceError> {
1116        use naga::back::glsl;
1117
1118        let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1119        let mut num_samplers = 0u8;
1120        let mut num_textures = 0u8;
1121        let mut num_images = 0u8;
1122        let mut num_uniform_buffers = 0u8;
1123        let mut num_storage_buffers = 0u8;
1124
1125        let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1126        writer_flags.set(
1127            glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1128            self.shared
1129                .private_caps
1130                .contains(super::PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1131        );
1132        writer_flags.set(
1133            glsl::WriterFlags::DRAW_PARAMETERS,
1134            self.shared
1135                .private_caps
1136                .contains(super::PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1137        );
1138        // We always force point size to be written and it will be ignored by the driver if it's not a point list primitive.
1139        // https://github.com/gfx-rs/wgpu/pull/3440/files#r1095726950
1140        writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1141        let mut binding_map = glsl::BindingMap::default();
1142
1143        for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1144            // create a vector with the size enough to hold all the bindings, filled with `!0`
1145            let mut binding_to_slot = vec![
1146                !0;
1147                bg_layout
1148                    .entries
1149                    .iter()
1150                    .map(|b| b.binding)
1151                    .max()
1152                    .map_or(0, |idx| idx as usize + 1)
1153            ]
1154            .into_boxed_slice();
1155
1156            for entry in bg_layout.entries.iter() {
1157                let counter = match entry.ty {
1158                    wgt::BindingType::Sampler { .. } => &mut num_samplers,
1159                    wgt::BindingType::Texture { .. } => &mut num_textures,
1160                    wgt::BindingType::StorageTexture { .. } => &mut num_images,
1161                    wgt::BindingType::Buffer {
1162                        ty: wgt::BufferBindingType::Uniform,
1163                        ..
1164                    } => &mut num_uniform_buffers,
1165                    wgt::BindingType::Buffer {
1166                        ty: wgt::BufferBindingType::Storage { .. },
1167                        ..
1168                    } => &mut num_storage_buffers,
1169                    wgt::BindingType::AccelerationStructure => unimplemented!(),
1170                };
1171
1172                binding_to_slot[entry.binding as usize] = *counter;
1173                let br = naga::ResourceBinding {
1174                    group: group_index as u32,
1175                    binding: entry.binding,
1176                };
1177                binding_map.insert(br, *counter);
1178                *counter += entry.count.map_or(1, |c| c.get() as u8);
1179            }
1180
1181            group_infos.push(super::BindGroupLayoutInfo {
1182                entries: Arc::clone(&bg_layout.entries),
1183                binding_to_slot,
1184            });
1185        }
1186
1187        Ok(super::PipelineLayout {
1188            group_infos: group_infos.into_boxed_slice(),
1189            naga_options: glsl::Options {
1190                version: self.shared.shading_language_version,
1191                writer_flags,
1192                binding_map,
1193                zero_initialize_workgroup_memory: true,
1194            },
1195        })
1196    }
1197    unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {}
1198
1199    unsafe fn create_bind_group(
1200        &self,
1201        desc: &crate::BindGroupDescriptor<super::Api>,
1202    ) -> Result<super::BindGroup, crate::DeviceError> {
1203        let mut contents = Vec::new();
1204
1205        let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1206            let layout = desc
1207                .layout
1208                .entries
1209                .iter()
1210                .find(|layout_entry| layout_entry.binding == entry.binding)
1211                .expect("internal error: no layout entry found with binding slot");
1212            (entry, layout)
1213        });
1214        for (entry, layout) in layout_and_entry_iter {
1215            let binding = match layout.ty {
1216                wgt::BindingType::Buffer { .. } => {
1217                    let bb = &desc.buffers[entry.resource_index as usize];
1218                    super::RawBinding::Buffer {
1219                        raw: bb.buffer.raw.unwrap(),
1220                        offset: bb.offset as i32,
1221                        size: match bb.size {
1222                            Some(s) => s.get() as i32,
1223                            None => (bb.buffer.size - bb.offset) as i32,
1224                        },
1225                    }
1226                }
1227                wgt::BindingType::Sampler { .. } => {
1228                    let sampler = desc.samplers[entry.resource_index as usize];
1229                    super::RawBinding::Sampler(sampler.raw)
1230                }
1231                wgt::BindingType::Texture { view_dimension, .. } => {
1232                    let view = desc.textures[entry.resource_index as usize].view;
1233                    if view.array_layers.start != 0 {
1234                        log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1235                            "This is an implementation problem of wgpu-hal/gles backend.")
1236                    }
1237                    let (raw, target) = view.inner.as_native();
1238
1239                    super::Texture::log_failing_target_heuristics(view_dimension, target);
1240
1241                    super::RawBinding::Texture {
1242                        raw,
1243                        target,
1244                        aspects: view.aspects,
1245                        mip_levels: view.mip_levels.clone(),
1246                    }
1247                }
1248                wgt::BindingType::StorageTexture {
1249                    access,
1250                    format,
1251                    view_dimension,
1252                } => {
1253                    let view = desc.textures[entry.resource_index as usize].view;
1254                    let format_desc = self.shared.describe_texture_format(format);
1255                    let (raw, _target) = view.inner.as_native();
1256                    super::RawBinding::Image(super::ImageBinding {
1257                        raw,
1258                        mip_level: view.mip_levels.start,
1259                        array_layer: match view_dimension {
1260                            wgt::TextureViewDimension::D2Array
1261                            | wgt::TextureViewDimension::CubeArray => None,
1262                            _ => Some(view.array_layers.start),
1263                        },
1264                        access: conv::map_storage_access(access),
1265                        format: format_desc.internal,
1266                    })
1267                }
1268                wgt::BindingType::AccelerationStructure => unimplemented!(),
1269            };
1270            contents.push(binding);
1271        }
1272
1273        Ok(super::BindGroup {
1274            contents: contents.into_boxed_slice(),
1275        })
1276    }
1277    unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {}
1278
1279    unsafe fn create_shader_module(
1280        &self,
1281        desc: &crate::ShaderModuleDescriptor,
1282        shader: crate::ShaderInput,
1283    ) -> Result<super::ShaderModule, crate::ShaderError> {
1284        Ok(super::ShaderModule {
1285            naga: match shader {
1286                crate::ShaderInput::SpirV(_) => {
1287                    panic!("`Features::SPIRV_SHADER_PASSTHROUGH` is not enabled")
1288                }
1289                crate::ShaderInput::Naga(naga) => naga,
1290            },
1291            label: desc.label.map(|str| str.to_string()),
1292            id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1293        })
1294    }
1295    unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {}
1296
1297    unsafe fn create_render_pipeline(
1298        &self,
1299        desc: &crate::RenderPipelineDescriptor<super::Api>,
1300    ) -> Result<super::RenderPipeline, crate::PipelineError> {
1301        let gl = &self.shared.context.lock();
1302        let mut shaders = ArrayVec::new();
1303        shaders.push((naga::ShaderStage::Vertex, &desc.vertex_stage));
1304        if let Some(ref fs) = desc.fragment_stage {
1305            shaders.push((naga::ShaderStage::Fragment, fs));
1306        }
1307        let inner =
1308            unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?;
1309
1310        let (vertex_buffers, vertex_attributes) = {
1311            let mut buffers = Vec::new();
1312            let mut attributes = Vec::new();
1313            for (index, vb_layout) in desc.vertex_buffers.iter().enumerate() {
1314                buffers.push(super::VertexBufferDesc {
1315                    step: vb_layout.step_mode,
1316                    stride: vb_layout.array_stride as u32,
1317                });
1318                for vat in vb_layout.attributes.iter() {
1319                    let format_desc = conv::describe_vertex_format(vat.format);
1320                    attributes.push(super::AttributeDesc {
1321                        location: vat.shader_location,
1322                        offset: vat.offset as u32,
1323                        buffer_index: index as u32,
1324                        format_desc,
1325                    });
1326                }
1327            }
1328            (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1329        };
1330
1331        let color_targets = {
1332            let mut targets = Vec::new();
1333            for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1334                targets.push(super::ColorTargetDesc {
1335                    mask: ct.write_mask,
1336                    blend: ct.blend.as_ref().map(conv::map_blend),
1337                });
1338            }
1339            //Note: if any of the states are different, and `INDEPENDENT_BLEND` flag
1340            // is not exposed, then this pipeline will not bind correctly.
1341            targets.into_boxed_slice()
1342        };
1343
1344        Ok(super::RenderPipeline {
1345            inner,
1346            primitive: desc.primitive,
1347            vertex_buffers,
1348            vertex_attributes,
1349            color_targets,
1350            depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1351                function: conv::map_compare_func(ds.depth_compare),
1352                mask: ds.depth_write_enabled,
1353            }),
1354            depth_bias: desc
1355                .depth_stencil
1356                .as_ref()
1357                .map(|ds| ds.bias)
1358                .unwrap_or_default(),
1359            stencil: desc
1360                .depth_stencil
1361                .as_ref()
1362                .map(|ds| conv::map_stencil(&ds.stencil)),
1363            alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1364        })
1365    }
1366    unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1367        let mut program_cache = self.shared.program_cache.lock();
1368        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache`
1369        // This is safe to assume as long as:
1370        // - `RenderPipeline` can't be cloned
1371        // - The only place that we can get a new reference is during `program_cache.lock()`
1372        if Arc::strong_count(&pipeline.inner) == 2 {
1373            program_cache.retain(|_, v| match *v {
1374                Ok(ref p) => p.program != pipeline.inner.program,
1375                Err(_) => false,
1376            });
1377            let gl = &self.shared.context.lock();
1378            unsafe { gl.delete_program(pipeline.inner.program) };
1379        }
1380    }
1381
1382    unsafe fn create_compute_pipeline(
1383        &self,
1384        desc: &crate::ComputePipelineDescriptor<super::Api>,
1385    ) -> Result<super::ComputePipeline, crate::PipelineError> {
1386        let gl = &self.shared.context.lock();
1387        let mut shaders = ArrayVec::new();
1388        shaders.push((naga::ShaderStage::Compute, &desc.stage));
1389        let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1390
1391        Ok(super::ComputePipeline { inner })
1392    }
1393    unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1394        let mut program_cache = self.shared.program_cache.lock();
1395        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache``
1396        // This is safe to assume as long as:
1397        // - `ComputePipeline` can't be cloned
1398        // - The only place that we can get a new reference is during `program_cache.lock()`
1399        if Arc::strong_count(&pipeline.inner) == 2 {
1400            program_cache.retain(|_, v| match *v {
1401                Ok(ref p) => p.program != pipeline.inner.program,
1402                Err(_) => false,
1403            });
1404            let gl = &self.shared.context.lock();
1405            unsafe { gl.delete_program(pipeline.inner.program) };
1406        }
1407    }
1408
1409    #[cfg_attr(target_arch = "wasm32", allow(unused))]
1410    unsafe fn create_query_set(
1411        &self,
1412        desc: &wgt::QuerySetDescriptor<crate::Label>,
1413    ) -> Result<super::QuerySet, crate::DeviceError> {
1414        let gl = &self.shared.context.lock();
1415
1416        let mut queries = Vec::with_capacity(desc.count as usize);
1417        for _ in 0..desc.count {
1418            let query =
1419                unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1420
1421            // We aren't really able to, in general, label queries.
1422            //
1423            // We could take a timestamp here to "initialize" the query,
1424            // but that's a bit of a hack, and we don't want to insert
1425            // random timestamps into the command stream of we don't have to.
1426
1427            queries.push(query);
1428        }
1429
1430        Ok(super::QuerySet {
1431            queries: queries.into_boxed_slice(),
1432            target: match desc.ty {
1433                wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1434                wgt::QueryType::Timestamp => glow::TIMESTAMP,
1435                _ => unimplemented!(),
1436            },
1437        })
1438    }
1439    unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1440        let gl = &self.shared.context.lock();
1441        for &query in set.queries.iter() {
1442            unsafe { gl.delete_query(query) };
1443        }
1444    }
1445    unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1446        Ok(super::Fence {
1447            last_completed: 0,
1448            pending: Vec::new(),
1449        })
1450    }
1451    unsafe fn destroy_fence(&self, fence: super::Fence) {
1452        let gl = &self.shared.context.lock();
1453        for (_, sync) in fence.pending {
1454            unsafe { gl.delete_sync(sync) };
1455        }
1456    }
1457    unsafe fn get_fence_value(
1458        &self,
1459        fence: &super::Fence,
1460    ) -> Result<crate::FenceValue, crate::DeviceError> {
1461        #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1462        Ok(fence.get_latest(&self.shared.context.lock()))
1463    }
1464    unsafe fn wait(
1465        &self,
1466        fence: &super::Fence,
1467        wait_value: crate::FenceValue,
1468        timeout_ms: u32,
1469    ) -> Result<bool, crate::DeviceError> {
1470        if fence.last_completed < wait_value {
1471            let gl = &self.shared.context.lock();
1472            let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1473                0
1474            } else {
1475                (timeout_ms as u64 * 1_000_000).min(!0u32 as u64)
1476            };
1477            if let Some(&(_, sync)) = fence
1478                .pending
1479                .iter()
1480                .find(|&&(value, _)| value >= wait_value)
1481            {
1482                return match unsafe {
1483                    gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32)
1484                } {
1485                    // for some reason firefox returns WAIT_FAILED, to investigate
1486                    #[cfg(any(webgl, Emscripten))]
1487                    glow::WAIT_FAILED => {
1488                        log::warn!("wait failed!");
1489                        Ok(false)
1490                    }
1491                    glow::TIMEOUT_EXPIRED => Ok(false),
1492                    glow::CONDITION_SATISFIED | glow::ALREADY_SIGNALED => Ok(true),
1493                    _ => Err(crate::DeviceError::Lost),
1494                };
1495            }
1496        }
1497        Ok(true)
1498    }
1499
1500    unsafe fn start_capture(&self) -> bool {
1501        #[cfg(all(native, feature = "renderdoc"))]
1502        return unsafe {
1503            self.render_doc
1504                .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1505        };
1506        #[allow(unreachable_code)]
1507        false
1508    }
1509    unsafe fn stop_capture(&self) {
1510        #[cfg(all(native, feature = "renderdoc"))]
1511        unsafe {
1512            self.render_doc
1513                .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1514        }
1515    }
1516    unsafe fn create_acceleration_structure(
1517        &self,
1518        _desc: &crate::AccelerationStructureDescriptor,
1519    ) -> Result<(), crate::DeviceError> {
1520        unimplemented!()
1521    }
1522    unsafe fn get_acceleration_structure_build_sizes<'a>(
1523        &self,
1524        _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Api>,
1525    ) -> crate::AccelerationStructureBuildSizes {
1526        unimplemented!()
1527    }
1528    unsafe fn get_acceleration_structure_device_address(
1529        &self,
1530        _acceleration_structure: &(),
1531    ) -> wgt::BufferAddress {
1532        unimplemented!()
1533    }
1534    unsafe fn destroy_acceleration_structure(&self, _acceleration_structure: ()) {}
1535}
1536
1537#[cfg(send_sync)]
1538unsafe impl Sync for super::Device {}
1539#[cfg(send_sync)]
1540unsafe impl Send for super::Device {}