wgpu/lib.rs
1//! A cross-platform graphics and compute library based on [WebGPU](https://gpuweb.github.io/gpuweb/).
2//!
3//! To start using the API, create an [`Instance`].
4//!
5//! ## Feature flags
6#![doc = document_features::document_features!()]
7//!
8//! ### Feature Aliases
9//!
10//! These features aren't actually features on the crate itself, but a convenient shorthand for
11//! complicated cases.
12//!
13//! - **`wgpu_core`** --- Enabled when there is any non-webgpu backend enabled on the platform.
14//! - **`naga`** ---- Enabled when any non-wgsl shader input is enabled.
15//!
16
17#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
18#![doc(html_logo_url = "https://raw.githubusercontent.com/gfx-rs/wgpu/trunk/logo.png")]
19#![warn(missing_docs, rust_2018_idioms, unsafe_op_in_unsafe_fn)]
20
21mod backend;
22mod context;
23pub mod util;
24#[macro_use]
25mod macros;
26
27use std::{
28 any::Any,
29 borrow::Cow,
30 cmp::Ordering,
31 collections::HashMap,
32 error, fmt,
33 future::Future,
34 marker::PhantomData,
35 num::{NonZeroU32, NonZeroU64},
36 ops::{Bound, Deref, DerefMut, Range, RangeBounds},
37 sync::Arc,
38 thread,
39};
40
41#[allow(unused_imports)] // Unused if all backends are disabled.
42use context::Context;
43
44use context::{DeviceRequest, DynContext, ObjectId};
45use parking_lot::Mutex;
46
47use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
48pub use wgt::{
49 AdapterInfo, AddressMode, AstcBlock, AstcChannel, Backend, Backends, BindGroupLayoutEntry,
50 BindingType, BlendComponent, BlendFactor, BlendOperation, BlendState, BufferAddress,
51 BufferBindingType, BufferSize, BufferUsages, Color, ColorTargetState, ColorWrites,
52 CommandBufferDescriptor, CompareFunction, CompositeAlphaMode, DepthBiasState,
53 DepthStencilState, DeviceLostReason, DeviceType, DownlevelCapabilities, DownlevelFlags,
54 Dx12Compiler, DynamicOffset, Extent3d, Face, Features, FilterMode, FrontFace,
55 Gles3MinorVersion, ImageDataLayout, ImageSubresourceRange, IndexFormat, InstanceDescriptor,
56 InstanceFlags, Limits, MaintainResult, MultisampleState, Origin2d, Origin3d,
57 PipelineStatisticsTypes, PolygonMode, PowerPreference, PredefinedColorSpace, PresentMode,
58 PresentationTimestamp, PrimitiveState, PrimitiveTopology, PushConstantRange, QueryType,
59 RenderBundleDepthStencil, SamplerBindingType, SamplerBorderColor, ShaderLocation, ShaderModel,
60 ShaderStages, StencilFaceState, StencilOperation, StencilState, StorageTextureAccess,
61 SurfaceCapabilities, SurfaceStatus, TextureAspect, TextureDimension, TextureFormat,
62 TextureFormatFeatureFlags, TextureFormatFeatures, TextureSampleType, TextureUsages,
63 TextureViewDimension, VertexAttribute, VertexFormat, VertexStepMode, WasmNotSend,
64 WasmNotSendSync, WasmNotSync, COPY_BUFFER_ALIGNMENT, COPY_BYTES_PER_ROW_ALIGNMENT,
65 MAP_ALIGNMENT, PUSH_CONSTANT_ALIGNMENT, QUERY_RESOLVE_BUFFER_ALIGNMENT, QUERY_SET_MAX_QUERIES,
66 QUERY_SIZE, VERTEX_STRIDE_ALIGNMENT,
67};
68
69/// Re-export of our `wgpu-core` dependency.
70///
71#[cfg(wgpu_core)]
72pub use ::wgc as core;
73
74/// Re-export of our `wgpu-hal` dependency.
75///
76///
77#[cfg(wgpu_core)]
78pub use ::hal;
79
80/// Re-export of our `naga` dependency.
81///
82#[cfg(wgpu_core)]
83#[cfg_attr(docsrs, doc(cfg(any(wgpu_core, naga))))]
84// We re-export wgpu-core's re-export of naga, as we may not have direct access to it.
85pub use ::wgc::naga;
86/// Re-export of our `naga` dependency.
87///
88#[cfg(all(not(wgpu_core), naga))]
89#[cfg_attr(docsrs, doc(cfg(any(wgpu_core, naga))))]
90// If that's not available, we re-export our own.
91pub use naga;
92
93/// Re-export of our `raw-window-handle` dependency.
94///
95pub use raw_window_handle as rwh;
96
97/// Re-export of our `web-sys` dependency.
98///
99#[cfg(any(webgl, webgpu))]
100pub use web_sys;
101
102// wasm-only types, we try to keep as many types non-platform
103// specific, but these need to depend on web-sys.
104#[cfg(any(webgpu, webgl))]
105pub use wgt::{ExternalImageSource, ImageCopyExternalImage};
106
107/// Filter for error scopes.
108#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
109pub enum ErrorFilter {
110 /// Catch only out-of-memory errors.
111 OutOfMemory,
112 /// Catch only validation errors.
113 Validation,
114 /// Catch only internal errors.
115 Internal,
116}
117static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
118
119type C = dyn DynContext;
120#[cfg(send_sync)]
121type Data = dyn Any + Send + Sync;
122#[cfg(not(send_sync))]
123type Data = dyn Any;
124
125/// Context for all other wgpu objects. Instance of wgpu.
126///
127/// This is the first thing you create when using wgpu.
128/// Its primary use is to create [`Adapter`]s and [`Surface`]s.
129///
130/// Does not have to be kept alive.
131///
132/// Corresponds to [WebGPU `GPU`](https://gpuweb.github.io/gpuweb/#gpu-interface).
133#[derive(Debug)]
134pub struct Instance {
135 context: Arc<C>,
136}
137#[cfg(send_sync)]
138static_assertions::assert_impl_all!(Instance: Send, Sync);
139
140/// Handle to a physical graphics and/or compute device.
141///
142/// Adapters can be used to open a connection to the corresponding [`Device`]
143/// on the host system by using [`Adapter::request_device`].
144///
145/// Does not have to be kept alive.
146///
147/// Corresponds to [WebGPU `GPUAdapter`](https://gpuweb.github.io/gpuweb/#gpu-adapter).
148#[derive(Debug)]
149pub struct Adapter {
150 context: Arc<C>,
151 id: ObjectId,
152 data: Box<Data>,
153}
154#[cfg(send_sync)]
155static_assertions::assert_impl_all!(Adapter: Send, Sync);
156
157impl Drop for Adapter {
158 fn drop(&mut self) {
159 if !thread::panicking() {
160 self.context.adapter_drop(&self.id, self.data.as_ref())
161 }
162 }
163}
164
165/// Open connection to a graphics and/or compute device.
166///
167/// Responsible for the creation of most rendering and compute resources.
168/// These are then used in commands, which are submitted to a [`Queue`].
169///
170/// A device may be requested from an adapter with [`Adapter::request_device`].
171///
172/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
173#[derive(Debug)]
174pub struct Device {
175 context: Arc<C>,
176 id: ObjectId,
177 data: Box<Data>,
178}
179#[cfg(send_sync)]
180static_assertions::assert_impl_all!(Device: Send, Sync);
181
182/// Identifier for a particular call to [`Queue::submit`]. Can be used
183/// as part of an argument to [`Device::poll`] to block for a particular
184/// submission to finish.
185///
186/// This type is unique to the Rust API of `wgpu`.
187/// There is no analogue in the WebGPU specification.
188#[derive(Debug, Clone)]
189pub struct SubmissionIndex(ObjectId, Arc<crate::Data>);
190#[cfg(send_sync)]
191static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
192
193/// The mapped portion of a buffer, if any, and its outstanding views.
194///
195/// This ensures that views fall within the mapped range and don't overlap, and
196/// also takes care of turning `Option<BufferSize>` sizes into actual buffer
197/// offsets.
198#[derive(Debug)]
199struct MapContext {
200 /// The overall size of the buffer.
201 ///
202 /// This is just a convenient copy of [`Buffer::size`].
203 total_size: BufferAddress,
204
205 /// The range of the buffer that is mapped.
206 ///
207 /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
208 /// the buffer is mapped at creation time, and when you call `map_async` on
209 /// some [`BufferSlice`] (so technically, it indicates the portion that is
210 /// *or has been requested to be* mapped.)
211 ///
212 /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
213 initial_range: Range<BufferAddress>,
214
215 /// The ranges covered by all outstanding [`BufferView`]s and
216 /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
217 /// within `initial_range`.
218 sub_ranges: Vec<Range<BufferAddress>>,
219}
220
221impl MapContext {
222 fn new(total_size: BufferAddress) -> Self {
223 Self {
224 total_size,
225 initial_range: 0..0,
226 sub_ranges: Vec::new(),
227 }
228 }
229
230 /// Record that the buffer is no longer mapped.
231 fn reset(&mut self) {
232 self.initial_range = 0..0;
233
234 assert!(
235 self.sub_ranges.is_empty(),
236 "You cannot unmap a buffer that still has accessible mapped views"
237 );
238 }
239
240 /// Record that the `size` bytes of the buffer at `offset` are now viewed.
241 ///
242 /// Return the byte offset within the buffer of the end of the viewed range.
243 ///
244 /// # Panics
245 ///
246 /// This panics if the given range overlaps with any existing range.
247 fn add(&mut self, offset: BufferAddress, size: Option<BufferSize>) -> BufferAddress {
248 let end = match size {
249 Some(s) => offset + s.get(),
250 None => self.initial_range.end,
251 };
252 assert!(self.initial_range.start <= offset && end <= self.initial_range.end);
253 // This check is essential for avoiding undefined behavior: it is the
254 // only thing that ensures that `&mut` references to the buffer's
255 // contents don't alias anything else.
256 for sub in self.sub_ranges.iter() {
257 assert!(
258 end <= sub.start || offset >= sub.end,
259 "Intersecting map range with {sub:?}"
260 );
261 }
262 self.sub_ranges.push(offset..end);
263 end
264 }
265
266 /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
267 ///
268 /// # Panics
269 ///
270 /// This panics if the given range does not exactly match one previously
271 /// passed to [`add`].
272 ///
273 /// [`add]`: MapContext::add
274 fn remove(&mut self, offset: BufferAddress, size: Option<BufferSize>) {
275 let end = match size {
276 Some(s) => offset + s.get(),
277 None => self.initial_range.end,
278 };
279
280 let index = self
281 .sub_ranges
282 .iter()
283 .position(|r| *r == (offset..end))
284 .expect("unable to remove range from map context");
285 self.sub_ranges.swap_remove(index);
286 }
287}
288
289/// Handle to a GPU-accessible buffer.
290///
291/// Created with [`Device::create_buffer`] or
292/// [`DeviceExt::create_buffer_init`](util::DeviceExt::create_buffer_init).
293///
294/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
295///
296/// # Mapping buffers
297///
298/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
299/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
300/// `&mut [u8]` slice of bytes. Buffers created with the
301/// [`mapped_at_creation`][mac] flag set are also mapped initially.
302///
303/// Depending on the hardware, the buffer could be memory shared between CPU and
304/// GPU, so that the CPU has direct access to the same bytes the GPU will
305/// consult; or it may be ordinary CPU memory, whose contents the system must
306/// copy to/from the GPU as needed. This crate's API is designed to work the
307/// same way in either case: at any given time, a buffer is either mapped and
308/// available to the CPU, or unmapped and ready for use by the GPU, but never
309/// both. This makes it impossible for either side to observe changes by the
310/// other immediately, and any necessary transfers can be carried out when the
311/// buffer transitions from one state to the other.
312///
313/// There are two ways to map a buffer:
314///
315/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
316/// buffer is mapped when it is created. This is the easiest way to initialize
317/// a new buffer. You can set `mapped_at_creation` on any kind of buffer,
318/// regardless of its [`usage`] flags.
319///
320/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
321/// flags, then you can call `buffer.slice(range).map_async(mode, callback)`
322/// to map the portion of `buffer` given by `range`. This waits for the GPU to
323/// finish using the buffer, and invokes `callback` as soon as the buffer is
324/// safe for the CPU to access.
325///
326/// Once a buffer is mapped:
327///
328/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
329/// [`BufferView`], which dereferences to a `&[u8]` that you can use to read
330/// the buffer's contents.
331///
332/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
333/// [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
334/// read and write the buffer's contents.
335///
336/// The given `range` must fall within the mapped portion of the buffer. If you
337/// attempt to access overlapping ranges, even for shared access only, these
338/// methods panic.
339///
340/// For example:
341///
342/// ```no_run
343/// # let buffer: wgpu::Buffer = todo!();
344/// let slice = buffer.slice(10..20);
345/// slice.map_async(wgpu::MapMode::Read, |result| {
346/// match result {
347/// Ok(()) => {
348/// let view = slice.get_mapped_range();
349/// // read data from `view`, which dereferences to `&[u8]`
350/// }
351/// Err(e) => {
352/// // handle mapping error
353/// }
354/// }
355/// });
356/// ```
357///
358/// This example calls `Buffer::slice` to obtain a [`BufferSlice`] referring to
359/// the second ten bytes of `buffer`. (To obtain access to the entire buffer,
360/// you could call `buffer.slice(..)`.) The code then calls `map_async` to wait
361/// for the buffer to be available, and finally calls `get_mapped_range` on the
362/// slice to actually get at the bytes.
363///
364/// If using `map_async` directly is awkward, you may find it more convenient to
365/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
366/// However, those each have their own tradeoffs; the asynchronous nature of GPU
367/// execution makes it hard to avoid friction altogether.
368///
369/// While a buffer is mapped, you must not submit any commands to the GPU that
370/// access it. You may record command buffers that use the buffer, but you must
371/// not submit such command buffers.
372///
373/// When you are done using the buffer on the CPU, you must call
374/// [`Buffer::unmap`] to make it available for use by the GPU again. All
375/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
376/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
377///
378/// ## Mapping buffers on the web
379///
380/// When compiled to WebAssembly and running in a browser content process,
381/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
382/// In this context, `wgpu` is further isolated from the GPU:
383///
384/// - Depending on the browser's WebGPU implementation, mapping and unmapping
385/// buffers probably entails copies between WebAssembly linear memory and the
386/// graphics driver's buffers.
387///
388/// - All modern web browsers isolate web content in its own sandboxed process,
389/// which can only interact with the GPU via interprocess communication (IPC).
390/// Although most browsers' IPC systems use shared memory for large data
391/// transfers, there will still probably need to be copies into and out of the
392/// shared memory buffers.
393///
394/// All of these copies contribute to the cost of buffer mapping in this
395/// configuration.
396///
397/// [`usage`]: BufferDescriptor::usage
398/// [mac]: BufferDescriptor::mapped_at_creation
399/// [`MAP_READ`]: BufferUsages::MAP_READ
400/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
401#[derive(Debug)]
402pub struct Buffer {
403 context: Arc<C>,
404 id: ObjectId,
405 data: Box<Data>,
406 map_context: Mutex<MapContext>,
407 size: wgt::BufferAddress,
408 usage: BufferUsages,
409 // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
410}
411#[cfg(send_sync)]
412static_assertions::assert_impl_all!(Buffer: Send, Sync);
413
414/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
415///
416/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
417///
418/// ```no_run
419/// # let buffer: wgpu::Buffer = todo!();
420/// let slice = buffer.slice(10..20);
421/// ```
422///
423/// This returns a slice referring to the second ten bytes of `buffer`. To get a
424/// slice of the entire `Buffer`:
425///
426/// ```no_run
427/// # let buffer: wgpu::Buffer = todo!();
428/// let whole_buffer_slice = buffer.slice(..);
429/// ```
430///
431/// A [`BufferSlice`] is nothing more than a reference to the `Buffer` and a
432/// starting and ending position. To access the slice's contents on the CPU, you
433/// must first [map] the buffer, and then call [`BufferSlice::get_mapped_range`]
434/// or [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
435/// contents, which dereferences to a `&[u8]` or `&mut [u8]`.
436///
437/// You can also pass buffer slices to methods like
438/// [`RenderPass::set_vertex_buffer`] and [`RenderPass::set_index_buffer`] to
439/// indicate which data a draw call should consume.
440///
441/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
442/// specification, an offset and size are specified as arguments to each call
443/// working with the [`Buffer`], instead.
444///
445/// [map]: Buffer#mapping-buffers
446#[derive(Copy, Clone, Debug)]
447pub struct BufferSlice<'a> {
448 buffer: &'a Buffer,
449 offset: BufferAddress,
450 size: Option<BufferSize>,
451}
452#[cfg(send_sync)]
453static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
454
455/// Handle to a texture on the GPU.
456///
457/// It can be created with [`Device::create_texture`].
458///
459/// Corresponds to [WebGPU `GPUTexture`](https://gpuweb.github.io/gpuweb/#texture-interface).
460#[derive(Debug)]
461pub struct Texture {
462 context: Arc<C>,
463 id: ObjectId,
464 data: Box<Data>,
465 owned: bool,
466 descriptor: TextureDescriptor<'static>,
467}
468#[cfg(send_sync)]
469static_assertions::assert_impl_all!(Texture: Send, Sync);
470
471/// Handle to a texture view.
472///
473/// A `TextureView` object describes a texture and associated metadata needed by a
474/// [`RenderPipeline`] or [`BindGroup`].
475///
476/// Corresponds to [WebGPU `GPUTextureView`](https://gpuweb.github.io/gpuweb/#gputextureview).
477#[derive(Debug)]
478pub struct TextureView {
479 context: Arc<C>,
480 id: ObjectId,
481 data: Box<Data>,
482}
483#[cfg(send_sync)]
484static_assertions::assert_impl_all!(TextureView: Send, Sync);
485
486/// Handle to a sampler.
487///
488/// A `Sampler` object defines how a pipeline will sample from a [`TextureView`]. Samplers define
489/// image filters (including anisotropy) and address (wrapping) modes, among other things. See
490/// the documentation for [`SamplerDescriptor`] for more information.
491///
492/// It can be created with [`Device::create_sampler`].
493///
494/// Corresponds to [WebGPU `GPUSampler`](https://gpuweb.github.io/gpuweb/#sampler-interface).
495#[derive(Debug)]
496pub struct Sampler {
497 context: Arc<C>,
498 id: ObjectId,
499 data: Box<Data>,
500}
501#[cfg(send_sync)]
502static_assertions::assert_impl_all!(Sampler: Send, Sync);
503
504impl Drop for Sampler {
505 fn drop(&mut self) {
506 if !thread::panicking() {
507 self.context.sampler_drop(&self.id, self.data.as_ref());
508 }
509 }
510}
511
512/// Describes a [`Surface`].
513///
514/// For use with [`Surface::configure`].
515///
516/// Corresponds to [WebGPU `GPUCanvasConfiguration`](
517/// https://gpuweb.github.io/gpuweb/#canvas-configuration).
518pub type SurfaceConfiguration = wgt::SurfaceConfiguration<Vec<TextureFormat>>;
519static_assertions::assert_impl_all!(SurfaceConfiguration: Send, Sync);
520
521/// Handle to a presentable surface.
522///
523/// A `Surface` represents a platform-specific surface (e.g. a window) onto which rendered images may
524/// be presented. A `Surface` may be created with the function [`Instance::create_surface`].
525///
526/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
527/// [`GPUCanvasContext`](https://gpuweb.github.io/gpuweb/#canvas-context)
528/// serves a similar role.
529pub struct Surface<'window> {
530 context: Arc<C>,
531
532 /// Optionally, keep the source of the handle used for the surface alive.
533 ///
534 /// This is useful for platforms where the surface is created from a window and the surface
535 /// would become invalid when the window is dropped.
536 _handle_source: Option<Box<dyn WindowHandle + 'window>>,
537
538 /// Wgpu-core surface id.
539 id: ObjectId,
540
541 /// Additional surface data returned by [`DynContext::instance_create_surface`].
542 surface_data: Box<Data>,
543
544 // Stores the latest `SurfaceConfiguration` that was set using `Surface::configure`.
545 // It is required to set the attributes of the `SurfaceTexture` in the
546 // `Surface::get_current_texture` method.
547 // Because the `Surface::configure` method operates on an immutable reference this type has to
548 // be wrapped in a mutex and since the configuration is only supplied after the surface has
549 // been created is is additionally wrapped in an option.
550 config: Mutex<Option<SurfaceConfiguration>>,
551}
552
553// This custom implementation is required because [`Surface::_surface`] doesn't
554// require [`Debug`](fmt::Debug), which we should not require from the user.
555impl<'window> fmt::Debug for Surface<'window> {
556 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
557 f.debug_struct("Surface")
558 .field("context", &self.context)
559 .field(
560 "_handle_source",
561 &if self._handle_source.is_some() {
562 "Some"
563 } else {
564 "None"
565 },
566 )
567 .field("id", &self.id)
568 .field("data", &self.surface_data)
569 .field("config", &self.config)
570 .finish()
571 }
572}
573
574#[cfg(send_sync)]
575static_assertions::assert_impl_all!(Surface<'_>: Send, Sync);
576
577impl Drop for Surface<'_> {
578 fn drop(&mut self) {
579 if !thread::panicking() {
580 self.context
581 .surface_drop(&self.id, self.surface_data.as_ref())
582 }
583 }
584}
585
586/// Super trait for window handles as used in [`SurfaceTarget`].
587pub trait WindowHandle: HasWindowHandle + HasDisplayHandle + WasmNotSendSync {}
588
589impl<T> WindowHandle for T where T: HasWindowHandle + HasDisplayHandle + WasmNotSendSync {}
590
591/// The window/canvas/surface/swap-chain/etc. a surface is attached to, for use with safe surface creation.
592///
593/// This is either a window or an actual web canvas depending on the platform and
594/// enabled features.
595/// Refer to the individual variants for more information.
596///
597/// See also [`SurfaceTargetUnsafe`] for unsafe variants.
598#[non_exhaustive]
599pub enum SurfaceTarget<'window> {
600 /// Window handle producer.
601 ///
602 /// If the specified display and window handle are not supported by any of the backends, then the surface
603 /// will not be supported by any adapters.
604 ///
605 /// # Errors
606 ///
607 /// - On WebGL2: surface creation returns an error if the browser does not support WebGL2,
608 /// or declines to provide GPU access (such as due to a resource shortage).
609 ///
610 /// # Panics
611 ///
612 /// - On macOS/Metal: will panic if not called on the main thread.
613 /// - On web: will panic if the `raw_window_handle` does not properly refer to a
614 /// canvas element.
615 Window(Box<dyn WindowHandle + 'window>),
616
617 /// Surface from a `web_sys::HtmlCanvasElement`.
618 ///
619 /// The `canvas` argument must be a valid `<canvas>` element to
620 /// create a surface upon.
621 ///
622 /// # Errors
623 ///
624 /// - On WebGL2: surface creation will return an error if the browser does not support WebGL2,
625 /// or declines to provide GPU access (such as due to a resource shortage).
626 #[cfg(any(webgpu, webgl))]
627 Canvas(web_sys::HtmlCanvasElement),
628
629 /// Surface from a `web_sys::OffscreenCanvas`.
630 ///
631 /// The `canvas` argument must be a valid `OffscreenCanvas` object
632 /// to create a surface upon.
633 ///
634 /// # Errors
635 ///
636 /// - On WebGL2: surface creation will return an error if the browser does not support WebGL2,
637 /// or declines to provide GPU access (such as due to a resource shortage).
638 #[cfg(any(webgpu, webgl))]
639 OffscreenCanvas(web_sys::OffscreenCanvas),
640}
641
642impl<'a, T> From<T> for SurfaceTarget<'a>
643where
644 T: WindowHandle + 'a,
645{
646 fn from(window: T) -> Self {
647 Self::Window(Box::new(window))
648 }
649}
650
651/// The window/canvas/surface/swap-chain/etc. a surface is attached to, for use with unsafe surface creation.
652///
653/// This is either a window or an actual web canvas depending on the platform and
654/// enabled features.
655/// Refer to the individual variants for more information.
656///
657/// See also [`SurfaceTarget`] for safe variants.
658#[non_exhaustive]
659pub enum SurfaceTargetUnsafe {
660 /// Raw window & display handle.
661 ///
662 /// If the specified display and window handle are not supported by any of the backends, then the surface
663 /// will not be supported by any adapters.
664 ///
665 /// # Safety
666 ///
667 /// - `raw_window_handle` & `raw_display_handle` must be valid objects to create a surface upon.
668 /// - `raw_window_handle` & `raw_display_handle` must remain valid until after the returned
669 /// [`Surface`] is dropped.
670 RawHandle {
671 /// Raw display handle, underlying display must outlive the surface created from this.
672 raw_display_handle: raw_window_handle::RawDisplayHandle,
673
674 /// Raw display handle, underlying window must outlive the surface created from this.
675 raw_window_handle: raw_window_handle::RawWindowHandle,
676 },
677
678 /// Surface from `CoreAnimationLayer`.
679 ///
680 /// # Safety
681 ///
682 /// - layer must be a valid object to create a surface upon.
683 #[cfg(metal)]
684 CoreAnimationLayer(*mut std::ffi::c_void),
685
686 /// Surface from `IDCompositionVisual`.
687 ///
688 /// # Safety
689 ///
690 /// - visual must be a valid IDCompositionVisual to create a surface upon.
691 #[cfg(dx12)]
692 CompositionVisual(*mut std::ffi::c_void),
693
694 /// Surface from DX12 `SurfaceHandle`.
695 ///
696 /// # Safety
697 ///
698 /// - surface_handle must be a valid SurfaceHandle to create a surface upon.
699 #[cfg(dx12)]
700 SurfaceHandle(*mut std::ffi::c_void),
701
702 /// Surface from DX12 `SwapChainPanel`.
703 ///
704 /// # Safety
705 ///
706 /// - visual must be a valid SwapChainPanel to create a surface upon.
707 #[cfg(dx12)]
708 SwapChainPanel(*mut std::ffi::c_void),
709}
710
711impl SurfaceTargetUnsafe {
712 /// Creates a [`SurfaceTargetUnsafe::RawHandle`] from a window.
713 ///
714 /// # Safety
715 ///
716 /// - `window` must outlive the resulting surface target
717 /// (and subsequently the surface created for this target).
718 pub unsafe fn from_window<T>(window: &T) -> Result<Self, raw_window_handle::HandleError>
719 where
720 T: HasDisplayHandle + HasWindowHandle,
721 {
722 Ok(Self::RawHandle {
723 raw_display_handle: window.display_handle()?.as_raw(),
724 raw_window_handle: window.window_handle()?.as_raw(),
725 })
726 }
727}
728
729/// Handle to a binding group layout.
730///
731/// A `BindGroupLayout` is a handle to the GPU-side layout of a binding group. It can be used to
732/// create a [`BindGroupDescriptor`] object, which in turn can be used to create a [`BindGroup`]
733/// object with [`Device::create_bind_group`]. A series of `BindGroupLayout`s can also be used to
734/// create a [`PipelineLayoutDescriptor`], which can be used to create a [`PipelineLayout`].
735///
736/// It can be created with [`Device::create_bind_group_layout`].
737///
738/// Corresponds to [WebGPU `GPUBindGroupLayout`](
739/// https://gpuweb.github.io/gpuweb/#gpubindgrouplayout).
740#[derive(Debug)]
741pub struct BindGroupLayout {
742 context: Arc<C>,
743 id: ObjectId,
744 data: Box<Data>,
745}
746#[cfg(send_sync)]
747static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync);
748
749impl Drop for BindGroupLayout {
750 fn drop(&mut self) {
751 if !thread::panicking() {
752 self.context
753 .bind_group_layout_drop(&self.id, self.data.as_ref());
754 }
755 }
756}
757
758/// Handle to a binding group.
759///
760/// A `BindGroup` represents the set of resources bound to the bindings described by a
761/// [`BindGroupLayout`]. It can be created with [`Device::create_bind_group`]. A `BindGroup` can
762/// be bound to a particular [`RenderPass`] with [`RenderPass::set_bind_group`], or to a
763/// [`ComputePass`] with [`ComputePass::set_bind_group`].
764///
765/// Corresponds to [WebGPU `GPUBindGroup`](https://gpuweb.github.io/gpuweb/#gpubindgroup).
766#[derive(Debug)]
767pub struct BindGroup {
768 context: Arc<C>,
769 id: ObjectId,
770 data: Box<Data>,
771}
772#[cfg(send_sync)]
773static_assertions::assert_impl_all!(BindGroup: Send, Sync);
774
775impl Drop for BindGroup {
776 fn drop(&mut self) {
777 if !thread::panicking() {
778 self.context.bind_group_drop(&self.id, self.data.as_ref());
779 }
780 }
781}
782
783/// Handle to a compiled shader module.
784///
785/// A `ShaderModule` represents a compiled shader module on the GPU. It can be created by passing
786/// source code to [`Device::create_shader_module`] or valid SPIR-V binary to
787/// [`Device::create_shader_module_spirv`]. Shader modules are used to define programmable stages
788/// of a pipeline.
789///
790/// Corresponds to [WebGPU `GPUShaderModule`](https://gpuweb.github.io/gpuweb/#shader-module).
791#[derive(Debug)]
792pub struct ShaderModule {
793 context: Arc<C>,
794 id: ObjectId,
795 data: Box<Data>,
796}
797#[cfg(send_sync)]
798static_assertions::assert_impl_all!(ShaderModule: Send, Sync);
799
800impl Drop for ShaderModule {
801 fn drop(&mut self) {
802 if !thread::panicking() {
803 self.context
804 .shader_module_drop(&self.id, self.data.as_ref());
805 }
806 }
807}
808
809/// Source of a shader module.
810///
811/// The source will be parsed and validated.
812///
813/// Any necessary shader translation (e.g. from WGSL to SPIR-V or vice versa)
814/// will be done internally by wgpu.
815///
816/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
817/// only WGSL source code strings are accepted.
818#[cfg_attr(feature = "naga-ir", allow(clippy::large_enum_variant))]
819#[derive(Clone, Debug)]
820#[non_exhaustive]
821pub enum ShaderSource<'a> {
822 /// SPIR-V module represented as a slice of words.
823 ///
824 /// See also: [`util::make_spirv`], [`include_spirv`]
825 #[cfg(feature = "spirv")]
826 SpirV(Cow<'a, [u32]>),
827 /// GLSL module as a string slice.
828 ///
829 /// Note: GLSL is not yet fully supported and must be a specific ShaderStage.
830 #[cfg(feature = "glsl")]
831 Glsl {
832 /// The source code of the shader.
833 shader: Cow<'a, str>,
834 /// The shader stage that the shader targets. For example, `naga::ShaderStage::Vertex`
835 stage: naga::ShaderStage,
836 /// Defines to unlock configured shader features.
837 defines: naga::FastHashMap<String, String>,
838 },
839 /// WGSL module as a string slice.
840 #[cfg(feature = "wgsl")]
841 Wgsl(Cow<'a, str>),
842 /// Naga module.
843 #[cfg(feature = "naga-ir")]
844 Naga(Cow<'static, naga::Module>),
845 /// Dummy variant because `Naga` doesn't have a lifetime and without enough active features it
846 /// could be the last one active.
847 #[doc(hidden)]
848 Dummy(PhantomData<&'a ()>),
849}
850static_assertions::assert_impl_all!(ShaderSource<'_>: Send, Sync);
851
852/// Descriptor for use with [`Device::create_shader_module`].
853///
854/// Corresponds to [WebGPU `GPUShaderModuleDescriptor`](
855/// https://gpuweb.github.io/gpuweb/#dictdef-gpushadermoduledescriptor).
856#[derive(Clone, Debug)]
857pub struct ShaderModuleDescriptor<'a> {
858 /// Debug label of the shader module. This will show up in graphics debuggers for easy identification.
859 pub label: Label<'a>,
860 /// Source code for the shader.
861 pub source: ShaderSource<'a>,
862}
863static_assertions::assert_impl_all!(ShaderModuleDescriptor<'_>: Send, Sync);
864
865/// Descriptor for a shader module given by SPIR-V binary, for use with
866/// [`Device::create_shader_module_spirv`].
867///
868/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
869/// only WGSL source code strings are accepted.
870#[derive(Debug)]
871pub struct ShaderModuleDescriptorSpirV<'a> {
872 /// Debug label of the shader module. This will show up in graphics debuggers for easy identification.
873 pub label: Label<'a>,
874 /// Binary SPIR-V data, in 4-byte words.
875 pub source: Cow<'a, [u32]>,
876}
877static_assertions::assert_impl_all!(ShaderModuleDescriptorSpirV<'_>: Send, Sync);
878
879/// Handle to a pipeline layout.
880///
881/// A `PipelineLayout` object describes the available binding groups of a pipeline.
882/// It can be created with [`Device::create_pipeline_layout`].
883///
884/// Corresponds to [WebGPU `GPUPipelineLayout`](https://gpuweb.github.io/gpuweb/#gpupipelinelayout).
885#[derive(Debug)]
886pub struct PipelineLayout {
887 context: Arc<C>,
888 id: ObjectId,
889 data: Box<Data>,
890}
891#[cfg(send_sync)]
892static_assertions::assert_impl_all!(PipelineLayout: Send, Sync);
893
894impl Drop for PipelineLayout {
895 fn drop(&mut self) {
896 if !thread::panicking() {
897 self.context
898 .pipeline_layout_drop(&self.id, self.data.as_ref());
899 }
900 }
901}
902
903/// Handle to a rendering (graphics) pipeline.
904///
905/// A `RenderPipeline` object represents a graphics pipeline and its stages, bindings, vertex
906/// buffers and targets. It can be created with [`Device::create_render_pipeline`].
907///
908/// Corresponds to [WebGPU `GPURenderPipeline`](https://gpuweb.github.io/gpuweb/#render-pipeline).
909#[derive(Debug)]
910pub struct RenderPipeline {
911 context: Arc<C>,
912 id: ObjectId,
913 data: Box<Data>,
914}
915#[cfg(send_sync)]
916static_assertions::assert_impl_all!(RenderPipeline: Send, Sync);
917
918impl Drop for RenderPipeline {
919 fn drop(&mut self) {
920 if !thread::panicking() {
921 self.context
922 .render_pipeline_drop(&self.id, self.data.as_ref());
923 }
924 }
925}
926
927impl RenderPipeline {
928 /// Get an object representing the bind group layout at a given index.
929 pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
930 let context = Arc::clone(&self.context);
931 let (id, data) =
932 self.context
933 .render_pipeline_get_bind_group_layout(&self.id, self.data.as_ref(), index);
934 BindGroupLayout { context, id, data }
935 }
936}
937
938/// Handle to a compute pipeline.
939///
940/// A `ComputePipeline` object represents a compute pipeline and its single shader stage.
941/// It can be created with [`Device::create_compute_pipeline`].
942///
943/// Corresponds to [WebGPU `GPUComputePipeline`](https://gpuweb.github.io/gpuweb/#compute-pipeline).
944#[derive(Debug)]
945pub struct ComputePipeline {
946 context: Arc<C>,
947 id: ObjectId,
948 data: Box<Data>,
949}
950#[cfg(send_sync)]
951static_assertions::assert_impl_all!(ComputePipeline: Send, Sync);
952
953impl Drop for ComputePipeline {
954 fn drop(&mut self) {
955 if !thread::panicking() {
956 self.context
957 .compute_pipeline_drop(&self.id, self.data.as_ref());
958 }
959 }
960}
961
962impl ComputePipeline {
963 /// Get an object representing the bind group layout at a given index.
964 pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
965 let context = Arc::clone(&self.context);
966 let (id, data) = self.context.compute_pipeline_get_bind_group_layout(
967 &self.id,
968 self.data.as_ref(),
969 index,
970 );
971 BindGroupLayout { context, id, data }
972 }
973}
974
975/// Handle to a command buffer on the GPU.
976///
977/// A `CommandBuffer` represents a complete sequence of commands that may be submitted to a command
978/// queue with [`Queue::submit`]. A `CommandBuffer` is obtained by recording a series of commands to
979/// a [`CommandEncoder`] and then calling [`CommandEncoder::finish`].
980///
981/// Corresponds to [WebGPU `GPUCommandBuffer`](https://gpuweb.github.io/gpuweb/#command-buffer).
982#[derive(Debug)]
983pub struct CommandBuffer {
984 context: Arc<C>,
985 id: Option<ObjectId>,
986 data: Option<Box<Data>>,
987}
988#[cfg(send_sync)]
989static_assertions::assert_impl_all!(CommandBuffer: Send, Sync);
990
991impl Drop for CommandBuffer {
992 fn drop(&mut self) {
993 if !thread::panicking() {
994 if let Some(id) = self.id.take() {
995 self.context
996 .command_buffer_drop(&id, self.data.take().unwrap().as_ref());
997 }
998 }
999 }
1000}
1001
1002/// Encodes a series of GPU operations.
1003///
1004/// A command encoder can record [`RenderPass`]es, [`ComputePass`]es,
1005/// and transfer operations between driver-managed resources like [`Buffer`]s and [`Texture`]s.
1006///
1007/// When finished recording, call [`CommandEncoder::finish`] to obtain a [`CommandBuffer`] which may
1008/// be submitted for execution.
1009///
1010/// Corresponds to [WebGPU `GPUCommandEncoder`](https://gpuweb.github.io/gpuweb/#command-encoder).
1011#[derive(Debug)]
1012pub struct CommandEncoder {
1013 context: Arc<C>,
1014 id: Option<ObjectId>,
1015 data: Box<Data>,
1016}
1017#[cfg(send_sync)]
1018static_assertions::assert_impl_all!(CommandEncoder: Send, Sync);
1019
1020impl Drop for CommandEncoder {
1021 fn drop(&mut self) {
1022 if !thread::panicking() {
1023 if let Some(id) = self.id.take() {
1024 self.context.command_encoder_drop(&id, self.data.as_ref());
1025 }
1026 }
1027 }
1028}
1029
1030/// In-progress recording of a render pass: a list of render commands in a [`CommandEncoder`].
1031///
1032/// It can be created with [`CommandEncoder::begin_render_pass()`], whose [`RenderPassDescriptor`]
1033/// specifies the attachments (textures) that will be rendered to.
1034///
1035/// Most of the methods on `RenderPass` serve one of two purposes, identifiable by their names:
1036///
1037/// * `draw_*()`: Drawing (that is, encoding a render command, which, when executed by the GPU, will
1038/// rasterize something and execute shaders).
1039/// * `set_*()`: Setting part of the [render state](https://gpuweb.github.io/gpuweb/#renderstate)
1040/// for future drawing commands.
1041///
1042/// A render pass may contain any number of drawing commands, and before/between each command the
1043/// render state may be updated however you wish; each drawing command will be executed using the
1044/// render state that has been set when the `draw_*()` function is called.
1045///
1046/// Corresponds to [WebGPU `GPURenderPassEncoder`](
1047/// https://gpuweb.github.io/gpuweb/#render-pass-encoder).
1048#[derive(Debug)]
1049pub struct RenderPass<'a> {
1050 id: ObjectId,
1051 data: Box<Data>,
1052 parent: &'a mut CommandEncoder,
1053}
1054
1055/// In-progress recording of a compute pass.
1056///
1057/// It can be created with [`CommandEncoder::begin_compute_pass`].
1058///
1059/// Corresponds to [WebGPU `GPUComputePassEncoder`](
1060/// https://gpuweb.github.io/gpuweb/#compute-pass-encoder).
1061#[derive(Debug)]
1062pub struct ComputePass<'a> {
1063 id: ObjectId,
1064 data: Box<Data>,
1065 parent: &'a mut CommandEncoder,
1066}
1067
1068/// Encodes a series of GPU operations into a reusable "render bundle".
1069///
1070/// It only supports a handful of render commands, but it makes them reusable.
1071/// It can be created with [`Device::create_render_bundle_encoder`].
1072/// It can be executed onto a [`CommandEncoder`] using [`RenderPass::execute_bundles`].
1073///
1074/// Executing a [`RenderBundle`] is often more efficient than issuing the underlying commands
1075/// manually.
1076///
1077/// Corresponds to [WebGPU `GPURenderBundleEncoder`](
1078/// https://gpuweb.github.io/gpuweb/#gpurenderbundleencoder).
1079#[derive(Debug)]
1080pub struct RenderBundleEncoder<'a> {
1081 context: Arc<C>,
1082 id: ObjectId,
1083 data: Box<Data>,
1084 parent: &'a Device,
1085 /// This type should be !Send !Sync, because it represents an allocation on this thread's
1086 /// command buffer.
1087 _p: PhantomData<*const u8>,
1088}
1089static_assertions::assert_not_impl_any!(RenderBundleEncoder<'_>: Send, Sync);
1090
1091/// Pre-prepared reusable bundle of GPU operations.
1092///
1093/// It only supports a handful of render commands, but it makes them reusable. Executing a
1094/// [`RenderBundle`] is often more efficient than issuing the underlying commands manually.
1095///
1096/// It can be created by use of a [`RenderBundleEncoder`], and executed onto a [`CommandEncoder`]
1097/// using [`RenderPass::execute_bundles`].
1098///
1099/// Corresponds to [WebGPU `GPURenderBundle`](https://gpuweb.github.io/gpuweb/#render-bundle).
1100#[derive(Debug)]
1101pub struct RenderBundle {
1102 context: Arc<C>,
1103 id: ObjectId,
1104 data: Box<Data>,
1105}
1106#[cfg(send_sync)]
1107static_assertions::assert_impl_all!(RenderBundle: Send, Sync);
1108
1109impl Drop for RenderBundle {
1110 fn drop(&mut self) {
1111 if !thread::panicking() {
1112 self.context
1113 .render_bundle_drop(&self.id, self.data.as_ref());
1114 }
1115 }
1116}
1117
1118/// Handle to a query set.
1119///
1120/// It can be created with [`Device::create_query_set`].
1121///
1122/// Corresponds to [WebGPU `GPUQuerySet`](https://gpuweb.github.io/gpuweb/#queryset).
1123#[derive(Debug)]
1124pub struct QuerySet {
1125 context: Arc<C>,
1126 id: ObjectId,
1127 data: Box<Data>,
1128}
1129#[cfg(send_sync)]
1130#[cfg(send_sync)]
1131static_assertions::assert_impl_all!(QuerySet: Send, Sync);
1132
1133impl Drop for QuerySet {
1134 fn drop(&mut self) {
1135 if !thread::panicking() {
1136 self.context.query_set_drop(&self.id, self.data.as_ref());
1137 }
1138 }
1139}
1140
1141/// Handle to a command queue on a device.
1142///
1143/// A `Queue` executes recorded [`CommandBuffer`] objects and provides convenience methods
1144/// for writing to [buffers](Queue::write_buffer) and [textures](Queue::write_texture).
1145/// It can be created along with a [`Device`] by calling [`Adapter::request_device`].
1146///
1147/// Corresponds to [WebGPU `GPUQueue`](https://gpuweb.github.io/gpuweb/#gpu-queue).
1148#[derive(Debug)]
1149pub struct Queue {
1150 context: Arc<C>,
1151 id: ObjectId,
1152 data: Box<Data>,
1153}
1154#[cfg(send_sync)]
1155static_assertions::assert_impl_all!(Queue: Send, Sync);
1156
1157impl Drop for Queue {
1158 fn drop(&mut self) {
1159 if !thread::panicking() {
1160 self.context.queue_drop(&self.id, self.data.as_ref());
1161 }
1162 }
1163}
1164
1165/// Resource that can be bound to a pipeline.
1166///
1167/// Corresponds to [WebGPU `GPUBindingResource`](
1168/// https://gpuweb.github.io/gpuweb/#typedefdef-gpubindingresource).
1169#[non_exhaustive]
1170#[derive(Clone, Debug)]
1171pub enum BindingResource<'a> {
1172 /// Binding is backed by a buffer.
1173 ///
1174 /// Corresponds to [`wgt::BufferBindingType::Uniform`] and [`wgt::BufferBindingType::Storage`]
1175 /// with [`BindGroupLayoutEntry::count`] set to None.
1176 Buffer(BufferBinding<'a>),
1177 /// Binding is backed by an array of buffers.
1178 ///
1179 /// [`Features::BUFFER_BINDING_ARRAY`] must be supported to use this feature.
1180 ///
1181 /// Corresponds to [`wgt::BufferBindingType::Uniform`] and [`wgt::BufferBindingType::Storage`]
1182 /// with [`BindGroupLayoutEntry::count`] set to Some.
1183 BufferArray(&'a [BufferBinding<'a>]),
1184 /// Binding is a sampler.
1185 ///
1186 /// Corresponds to [`wgt::BindingType::Sampler`] with [`BindGroupLayoutEntry::count`] set to None.
1187 Sampler(&'a Sampler),
1188 /// Binding is backed by an array of samplers.
1189 ///
1190 /// [`Features::TEXTURE_BINDING_ARRAY`] must be supported to use this feature.
1191 ///
1192 /// Corresponds to [`wgt::BindingType::Sampler`] with [`BindGroupLayoutEntry::count`] set
1193 /// to Some.
1194 SamplerArray(&'a [&'a Sampler]),
1195 /// Binding is backed by a texture.
1196 ///
1197 /// Corresponds to [`wgt::BindingType::Texture`] and [`wgt::BindingType::StorageTexture`] with
1198 /// [`BindGroupLayoutEntry::count`] set to None.
1199 TextureView(&'a TextureView),
1200 /// Binding is backed by an array of textures.
1201 ///
1202 /// [`Features::TEXTURE_BINDING_ARRAY`] must be supported to use this feature.
1203 ///
1204 /// Corresponds to [`wgt::BindingType::Texture`] and [`wgt::BindingType::StorageTexture`] with
1205 /// [`BindGroupLayoutEntry::count`] set to Some.
1206 TextureViewArray(&'a [&'a TextureView]),
1207}
1208#[cfg(send_sync)]
1209static_assertions::assert_impl_all!(BindingResource<'_>: Send, Sync);
1210
1211/// Describes the segment of a buffer to bind.
1212///
1213/// Corresponds to [WebGPU `GPUBufferBinding`](
1214/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferbinding).
1215#[derive(Clone, Debug)]
1216pub struct BufferBinding<'a> {
1217 /// The buffer to bind.
1218 pub buffer: &'a Buffer,
1219
1220 /// Base offset of the buffer, in bytes.
1221 ///
1222 /// If the [`has_dynamic_offset`] field of this buffer's layout entry is
1223 /// `true`, the offset here will be added to the dynamic offset passed to
1224 /// [`RenderPass::set_bind_group`] or [`ComputePass::set_bind_group`].
1225 ///
1226 /// If the buffer was created with [`BufferUsages::UNIFORM`], then this
1227 /// offset must be a multiple of
1228 /// [`Limits::min_uniform_buffer_offset_alignment`].
1229 ///
1230 /// If the buffer was created with [`BufferUsages::STORAGE`], then this
1231 /// offset must be a multiple of
1232 /// [`Limits::min_storage_buffer_offset_alignment`].
1233 ///
1234 /// [`has_dynamic_offset`]: BindingType::Buffer::has_dynamic_offset
1235 pub offset: BufferAddress,
1236
1237 /// Size of the binding in bytes, or `None` for using the rest of the buffer.
1238 pub size: Option<BufferSize>,
1239}
1240#[cfg(send_sync)]
1241static_assertions::assert_impl_all!(BufferBinding<'_>: Send, Sync);
1242
1243/// Operation to perform to the output attachment at the start of a render pass.
1244///
1245/// Corresponds to [WebGPU `GPULoadOp`](https://gpuweb.github.io/gpuweb/#enumdef-gpuloadop),
1246/// plus the corresponding clearValue.
1247#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
1248#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
1249pub enum LoadOp<V> {
1250 /// Loads the specified value for this attachment into the render pass.
1251 ///
1252 /// On some GPU hardware (primarily mobile), "clear" is significantly cheaper
1253 /// because it avoids loading data from main memory into tile-local memory.
1254 ///
1255 /// On other GPU hardware, there isn’t a significant difference.
1256 ///
1257 /// As a result, it is recommended to use "clear" rather than "load" in cases
1258 /// where the initial value doesn’t matter
1259 /// (e.g. the render target will be cleared using a skybox).
1260 Clear(V),
1261 /// Loads the existing value for this attachment into the render pass.
1262 Load,
1263}
1264
1265impl<V: Default> Default for LoadOp<V> {
1266 fn default() -> Self {
1267 Self::Clear(Default::default())
1268 }
1269}
1270
1271/// Operation to perform to the output attachment at the end of a render pass.
1272///
1273/// Corresponds to [WebGPU `GPUStoreOp`](https://gpuweb.github.io/gpuweb/#enumdef-gpustoreop).
1274#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Default)]
1275#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
1276pub enum StoreOp {
1277 /// Stores the resulting value of the render pass for this attachment.
1278 #[default]
1279 Store,
1280 /// Discards the resulting value of the render pass for this attachment.
1281 ///
1282 /// The attachment will be treated as uninitialized afterwards.
1283 /// (If only either Depth or Stencil texture-aspects is set to `Discard`,
1284 /// the respective other texture-aspect will be preserved.)
1285 ///
1286 /// This can be significantly faster on tile-based render hardware.
1287 ///
1288 /// Prefer this if the attachment is not read by subsequent passes.
1289 Discard,
1290}
1291
1292/// Pair of load and store operations for an attachment aspect.
1293///
1294/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
1295/// separate `loadOp` and `storeOp` fields are used instead.
1296#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
1297#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
1298pub struct Operations<V> {
1299 /// How data should be read through this attachment.
1300 pub load: LoadOp<V>,
1301 /// Whether data will be written to through this attachment.
1302 ///
1303 /// Note that resolve textures (if specified) are always written to,
1304 /// regardless of this setting.
1305 pub store: StoreOp,
1306}
1307
1308impl<V: Default> Default for Operations<V> {
1309 #[inline]
1310 fn default() -> Self {
1311 Self {
1312 load: LoadOp::<V>::default(),
1313 store: StoreOp::default(),
1314 }
1315 }
1316}
1317
1318/// Describes the timestamp writes of a render pass.
1319///
1320/// For use with [`RenderPassDescriptor`].
1321/// At least one of `beginning_of_pass_write_index` and `end_of_pass_write_index` must be `Some`.
1322///
1323/// Corresponds to [WebGPU `GPURenderPassTimestampWrite`](
1324/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderpasstimestampwrites).
1325#[derive(Clone, Debug)]
1326pub struct RenderPassTimestampWrites<'a> {
1327 /// The query set to write to.
1328 pub query_set: &'a QuerySet,
1329 /// The index of the query set at which a start timestamp of this pass is written, if any.
1330 pub beginning_of_pass_write_index: Option<u32>,
1331 /// The index of the query set at which an end timestamp of this pass is written, if any.
1332 pub end_of_pass_write_index: Option<u32>,
1333}
1334#[cfg(send_sync)]
1335static_assertions::assert_impl_all!(RenderPassTimestampWrites<'_>: Send, Sync);
1336
1337/// Describes a color attachment to a [`RenderPass`].
1338///
1339/// For use with [`RenderPassDescriptor`].
1340///
1341/// Corresponds to [WebGPU `GPURenderPassColorAttachment`](
1342/// https://gpuweb.github.io/gpuweb/#color-attachments).
1343#[derive(Clone, Debug)]
1344pub struct RenderPassColorAttachment<'tex> {
1345 /// The view to use as an attachment.
1346 pub view: &'tex TextureView,
1347 /// The view that will receive the resolved output if multisampling is used.
1348 ///
1349 /// If set, it is always written to, regardless of how [`Self::ops`] is configured.
1350 pub resolve_target: Option<&'tex TextureView>,
1351 /// What operations will be performed on this color attachment.
1352 pub ops: Operations<Color>,
1353}
1354#[cfg(send_sync)]
1355static_assertions::assert_impl_all!(RenderPassColorAttachment<'_>: Send, Sync);
1356
1357/// Describes a depth/stencil attachment to a [`RenderPass`].
1358///
1359/// For use with [`RenderPassDescriptor`].
1360///
1361/// Corresponds to [WebGPU `GPURenderPassDepthStencilAttachment`](
1362/// https://gpuweb.github.io/gpuweb/#depth-stencil-attachments).
1363#[derive(Clone, Debug)]
1364pub struct RenderPassDepthStencilAttachment<'tex> {
1365 /// The view to use as an attachment.
1366 pub view: &'tex TextureView,
1367 /// What operations will be performed on the depth part of the attachment.
1368 pub depth_ops: Option<Operations<f32>>,
1369 /// What operations will be performed on the stencil part of the attachment.
1370 pub stencil_ops: Option<Operations<u32>>,
1371}
1372#[cfg(send_sync)]
1373static_assertions::assert_impl_all!(RenderPassDepthStencilAttachment<'_>: Send, Sync);
1374
1375// The underlying types are also exported so that documentation shows up for them
1376
1377/// Object debugging label.
1378pub type Label<'a> = Option<&'a str>;
1379pub use wgt::RequestAdapterOptions as RequestAdapterOptionsBase;
1380/// Additional information required when requesting an adapter.
1381///
1382/// For use with [`Instance::request_adapter`].
1383///
1384/// Corresponds to [WebGPU `GPURequestAdapterOptions`](
1385/// https://gpuweb.github.io/gpuweb/#dictdef-gpurequestadapteroptions).
1386pub type RequestAdapterOptions<'a, 'b> = RequestAdapterOptionsBase<&'a Surface<'b>>;
1387#[cfg(send_sync)]
1388static_assertions::assert_impl_all!(RequestAdapterOptions<'_, '_>: Send, Sync);
1389/// Describes a [`Device`].
1390///
1391/// For use with [`Adapter::request_device`].
1392///
1393/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
1394/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
1395pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
1396static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
1397/// Describes a [`Buffer`].
1398///
1399/// For use with [`Device::create_buffer`].
1400///
1401/// Corresponds to [WebGPU `GPUBufferDescriptor`](
1402/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
1403pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
1404static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
1405/// Describes a [`CommandEncoder`].
1406///
1407/// For use with [`Device::create_command_encoder`].
1408///
1409/// Corresponds to [WebGPU `GPUCommandEncoderDescriptor`](
1410/// https://gpuweb.github.io/gpuweb/#dictdef-gpucommandencoderdescriptor).
1411pub type CommandEncoderDescriptor<'a> = wgt::CommandEncoderDescriptor<Label<'a>>;
1412static_assertions::assert_impl_all!(CommandEncoderDescriptor<'_>: Send, Sync);
1413/// Describes a [`RenderBundle`].
1414///
1415/// For use with [`RenderBundleEncoder::finish`].
1416///
1417/// Corresponds to [WebGPU `GPURenderBundleDescriptor`](
1418/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderbundledescriptor).
1419pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
1420static_assertions::assert_impl_all!(RenderBundleDescriptor<'_>: Send, Sync);
1421/// Describes a [`Texture`].
1422///
1423/// For use with [`Device::create_texture`].
1424///
1425/// Corresponds to [WebGPU `GPUTextureDescriptor`](
1426/// https://gpuweb.github.io/gpuweb/#dictdef-gputexturedescriptor).
1427pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>, &'a [TextureFormat]>;
1428static_assertions::assert_impl_all!(TextureDescriptor<'_>: Send, Sync);
1429/// Describes a [`QuerySet`].
1430///
1431/// For use with [`Device::create_query_set`].
1432///
1433/// Corresponds to [WebGPU `GPUQuerySetDescriptor`](
1434/// https://gpuweb.github.io/gpuweb/#dictdef-gpuquerysetdescriptor).
1435pub type QuerySetDescriptor<'a> = wgt::QuerySetDescriptor<Label<'a>>;
1436static_assertions::assert_impl_all!(QuerySetDescriptor<'_>: Send, Sync);
1437pub use wgt::Maintain as MaintainBase;
1438/// Passed to [`Device::poll`] to control how and if it should block.
1439pub type Maintain = wgt::Maintain<SubmissionIndex>;
1440#[cfg(send_sync)]
1441static_assertions::assert_impl_all!(Maintain: Send, Sync);
1442
1443/// Describes a [`TextureView`].
1444///
1445/// For use with [`Texture::create_view`].
1446///
1447/// Corresponds to [WebGPU `GPUTextureViewDescriptor`](
1448/// https://gpuweb.github.io/gpuweb/#dictdef-gputextureviewdescriptor).
1449#[derive(Clone, Debug, Default, Eq, PartialEq)]
1450pub struct TextureViewDescriptor<'a> {
1451 /// Debug label of the texture view. This will show up in graphics debuggers for easy identification.
1452 pub label: Label<'a>,
1453 /// Format of the texture view. Either must be the same as the texture format or in the list
1454 /// of `view_formats` in the texture's descriptor.
1455 pub format: Option<TextureFormat>,
1456 /// The dimension of the texture view. For 1D textures, this must be `D1`. For 2D textures it must be one of
1457 /// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `D3`
1458 pub dimension: Option<TextureViewDimension>,
1459 /// Aspect of the texture. Color textures must be [`TextureAspect::All`].
1460 pub aspect: TextureAspect,
1461 /// Base mip level.
1462 pub base_mip_level: u32,
1463 /// Mip level count.
1464 /// If `Some(count)`, `base_mip_level + count` must be less or equal to underlying texture mip count.
1465 /// If `None`, considered to include the rest of the mipmap levels, but at least 1 in total.
1466 pub mip_level_count: Option<u32>,
1467 /// Base array layer.
1468 pub base_array_layer: u32,
1469 /// Layer count.
1470 /// If `Some(count)`, `base_array_layer + count` must be less or equal to the underlying array count.
1471 /// If `None`, considered to include the rest of the array layers, but at least 1 in total.
1472 pub array_layer_count: Option<u32>,
1473}
1474static_assertions::assert_impl_all!(TextureViewDescriptor<'_>: Send, Sync);
1475
1476/// Describes a [`PipelineLayout`].
1477///
1478/// For use with [`Device::create_pipeline_layout`].
1479///
1480/// Corresponds to [WebGPU `GPUPipelineLayoutDescriptor`](
1481/// https://gpuweb.github.io/gpuweb/#dictdef-gpupipelinelayoutdescriptor).
1482#[derive(Clone, Debug, Default)]
1483pub struct PipelineLayoutDescriptor<'a> {
1484 /// Debug label of the pipeline layout. This will show up in graphics debuggers for easy identification.
1485 pub label: Label<'a>,
1486 /// Bind groups that this pipeline uses. The first entry will provide all the bindings for
1487 /// "set = 0", second entry will provide all the bindings for "set = 1" etc.
1488 pub bind_group_layouts: &'a [&'a BindGroupLayout],
1489 /// Set of push constant ranges this pipeline uses. Each shader stage that uses push constants
1490 /// must define the range in push constant memory that corresponds to its single `layout(push_constant)`
1491 /// uniform block.
1492 ///
1493 /// If this array is non-empty, the [`Features::PUSH_CONSTANTS`] must be enabled.
1494 pub push_constant_ranges: &'a [PushConstantRange],
1495}
1496#[cfg(send_sync)]
1497static_assertions::assert_impl_all!(PipelineLayoutDescriptor<'_>: Send, Sync);
1498
1499/// Describes a [`Sampler`].
1500///
1501/// For use with [`Device::create_sampler`].
1502///
1503/// Corresponds to [WebGPU `GPUSamplerDescriptor`](
1504/// https://gpuweb.github.io/gpuweb/#dictdef-gpusamplerdescriptor).
1505#[derive(Clone, Debug, PartialEq)]
1506pub struct SamplerDescriptor<'a> {
1507 /// Debug label of the sampler. This will show up in graphics debuggers for easy identification.
1508 pub label: Label<'a>,
1509 /// How to deal with out of bounds accesses in the u (i.e. x) direction
1510 pub address_mode_u: AddressMode,
1511 /// How to deal with out of bounds accesses in the v (i.e. y) direction
1512 pub address_mode_v: AddressMode,
1513 /// How to deal with out of bounds accesses in the w (i.e. z) direction
1514 pub address_mode_w: AddressMode,
1515 /// How to filter the texture when it needs to be magnified (made larger)
1516 pub mag_filter: FilterMode,
1517 /// How to filter the texture when it needs to be minified (made smaller)
1518 pub min_filter: FilterMode,
1519 /// How to filter between mip map levels
1520 pub mipmap_filter: FilterMode,
1521 /// Minimum level of detail (i.e. mip level) to use
1522 pub lod_min_clamp: f32,
1523 /// Maximum level of detail (i.e. mip level) to use
1524 pub lod_max_clamp: f32,
1525 /// If this is enabled, this is a comparison sampler using the given comparison function.
1526 pub compare: Option<CompareFunction>,
1527 /// Must be at least 1. If this is not 1, all filter modes must be linear.
1528 pub anisotropy_clamp: u16,
1529 /// Border color to use when address_mode is [`AddressMode::ClampToBorder`]
1530 pub border_color: Option<SamplerBorderColor>,
1531}
1532static_assertions::assert_impl_all!(SamplerDescriptor<'_>: Send, Sync);
1533
1534impl Default for SamplerDescriptor<'_> {
1535 fn default() -> Self {
1536 Self {
1537 label: None,
1538 address_mode_u: Default::default(),
1539 address_mode_v: Default::default(),
1540 address_mode_w: Default::default(),
1541 mag_filter: Default::default(),
1542 min_filter: Default::default(),
1543 mipmap_filter: Default::default(),
1544 lod_min_clamp: 0.0,
1545 lod_max_clamp: 32.0,
1546 compare: None,
1547 anisotropy_clamp: 1,
1548 border_color: None,
1549 }
1550 }
1551}
1552
1553/// An element of a [`BindGroupDescriptor`], consisting of a bindable resource
1554/// and the slot to bind it to.
1555///
1556/// Corresponds to [WebGPU `GPUBindGroupEntry`](
1557/// https://gpuweb.github.io/gpuweb/#dictdef-gpubindgroupentry).
1558#[derive(Clone, Debug)]
1559pub struct BindGroupEntry<'a> {
1560 /// Slot for which binding provides resource. Corresponds to an entry of the same
1561 /// binding index in the [`BindGroupLayoutDescriptor`].
1562 pub binding: u32,
1563 /// Resource to attach to the binding
1564 pub resource: BindingResource<'a>,
1565}
1566#[cfg(send_sync)]
1567static_assertions::assert_impl_all!(BindGroupEntry<'_>: Send, Sync);
1568
1569/// Describes a group of bindings and the resources to be bound.
1570///
1571/// For use with [`Device::create_bind_group`].
1572///
1573/// Corresponds to [WebGPU `GPUBindGroupDescriptor`](
1574/// https://gpuweb.github.io/gpuweb/#dictdef-gpubindgroupdescriptor).
1575#[derive(Clone, Debug)]
1576pub struct BindGroupDescriptor<'a> {
1577 /// Debug label of the bind group. This will show up in graphics debuggers for easy identification.
1578 pub label: Label<'a>,
1579 /// The [`BindGroupLayout`] that corresponds to this bind group.
1580 pub layout: &'a BindGroupLayout,
1581 /// The resources to bind to this bind group.
1582 pub entries: &'a [BindGroupEntry<'a>],
1583}
1584#[cfg(send_sync)]
1585static_assertions::assert_impl_all!(BindGroupDescriptor<'_>: Send, Sync);
1586
1587/// Describes the attachments of a render pass.
1588///
1589/// For use with [`CommandEncoder::begin_render_pass`].
1590///
1591/// Note: separate lifetimes are needed because the texture views (`'tex`)
1592/// have to live as long as the pass is recorded, while everything else (`'desc`) doesn't.
1593///
1594/// Corresponds to [WebGPU `GPURenderPassDescriptor`](
1595/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderpassdescriptor).
1596#[derive(Clone, Debug, Default)]
1597pub struct RenderPassDescriptor<'tex, 'desc> {
1598 /// Debug label of the render pass. This will show up in graphics debuggers for easy identification.
1599 pub label: Label<'desc>,
1600 /// The color attachments of the render pass.
1601 pub color_attachments: &'desc [Option<RenderPassColorAttachment<'tex>>],
1602 /// The depth and stencil attachment of the render pass, if any.
1603 pub depth_stencil_attachment: Option<RenderPassDepthStencilAttachment<'tex>>,
1604 /// Defines which timestamp values will be written for this pass, and where to write them to.
1605 ///
1606 /// Requires [`Features::TIMESTAMP_QUERY`] to be enabled.
1607 pub timestamp_writes: Option<RenderPassTimestampWrites<'desc>>,
1608 /// Defines where the occlusion query results will be stored for this pass.
1609 pub occlusion_query_set: Option<&'tex QuerySet>,
1610}
1611#[cfg(send_sync)]
1612static_assertions::assert_impl_all!(RenderPassDescriptor<'_, '_>: Send, Sync);
1613
1614/// Describes how the vertex buffer is interpreted.
1615///
1616/// For use in [`VertexState`].
1617///
1618/// Corresponds to [WebGPU `GPUVertexBufferLayout`](
1619/// https://gpuweb.github.io/gpuweb/#dictdef-gpuvertexbufferlayout).
1620#[derive(Clone, Debug, Hash, Eq, PartialEq)]
1621pub struct VertexBufferLayout<'a> {
1622 /// The stride, in bytes, between elements of this buffer.
1623 pub array_stride: BufferAddress,
1624 /// How often this vertex buffer is "stepped" forward.
1625 pub step_mode: VertexStepMode,
1626 /// The list of attributes which comprise a single vertex.
1627 pub attributes: &'a [VertexAttribute],
1628}
1629static_assertions::assert_impl_all!(VertexBufferLayout<'_>: Send, Sync);
1630
1631/// Describes the vertex processing in a render pipeline.
1632///
1633/// For use in [`RenderPipelineDescriptor`].
1634///
1635/// Corresponds to [WebGPU `GPUVertexState`](
1636/// https://gpuweb.github.io/gpuweb/#dictdef-gpuvertexstate).
1637#[derive(Clone, Debug)]
1638pub struct VertexState<'a> {
1639 /// The compiled shader module for this stage.
1640 pub module: &'a ShaderModule,
1641 /// The name of the entry point in the compiled shader. There must be a function with this name
1642 /// in the shader.
1643 pub entry_point: &'a str,
1644 /// Advanced options for when this pipeline is compiled
1645 ///
1646 /// This implements `Default`, and for most users can be set to `Default::default()`
1647 pub compilation_options: PipelineCompilationOptions<'a>,
1648 /// The format of any vertex buffers used with this pipeline.
1649 pub buffers: &'a [VertexBufferLayout<'a>],
1650}
1651#[cfg(send_sync)]
1652static_assertions::assert_impl_all!(VertexState<'_>: Send, Sync);
1653
1654/// Describes the fragment processing in a render pipeline.
1655///
1656/// For use in [`RenderPipelineDescriptor`].
1657///
1658/// Corresponds to [WebGPU `GPUFragmentState`](
1659/// https://gpuweb.github.io/gpuweb/#dictdef-gpufragmentstate).
1660#[derive(Clone, Debug)]
1661pub struct FragmentState<'a> {
1662 /// The compiled shader module for this stage.
1663 pub module: &'a ShaderModule,
1664 /// The name of the entry point in the compiled shader. There must be a function with this name
1665 /// in the shader.
1666 pub entry_point: &'a str,
1667 /// Advanced options for when this pipeline is compiled
1668 ///
1669 /// This implements `Default`, and for most users can be set to `Default::default()`
1670 pub compilation_options: PipelineCompilationOptions<'a>,
1671 /// The color state of the render targets.
1672 pub targets: &'a [Option<ColorTargetState>],
1673}
1674#[cfg(send_sync)]
1675static_assertions::assert_impl_all!(FragmentState<'_>: Send, Sync);
1676
1677/// Describes a render (graphics) pipeline.
1678///
1679/// For use with [`Device::create_render_pipeline`].
1680///
1681/// Corresponds to [WebGPU `GPURenderPipelineDescriptor`](
1682/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderpipelinedescriptor).
1683#[derive(Clone, Debug)]
1684pub struct RenderPipelineDescriptor<'a> {
1685 /// Debug label of the pipeline. This will show up in graphics debuggers for easy identification.
1686 pub label: Label<'a>,
1687 /// The layout of bind groups for this pipeline.
1688 pub layout: Option<&'a PipelineLayout>,
1689 /// The compiled vertex stage, its entry point, and the input buffers layout.
1690 pub vertex: VertexState<'a>,
1691 /// The properties of the pipeline at the primitive assembly and rasterization level.
1692 pub primitive: PrimitiveState,
1693 /// The effect of draw calls on the depth and stencil aspects of the output target, if any.
1694 pub depth_stencil: Option<DepthStencilState>,
1695 /// The multi-sampling properties of the pipeline.
1696 pub multisample: MultisampleState,
1697 /// The compiled fragment stage, its entry point, and the color targets.
1698 pub fragment: Option<FragmentState<'a>>,
1699 /// If the pipeline will be used with a multiview render pass, this indicates how many array
1700 /// layers the attachments will have.
1701 pub multiview: Option<NonZeroU32>,
1702}
1703#[cfg(send_sync)]
1704static_assertions::assert_impl_all!(RenderPipelineDescriptor<'_>: Send, Sync);
1705
1706/// Describes the timestamp writes of a compute pass.
1707///
1708/// For use with [`ComputePassDescriptor`].
1709/// At least one of `beginning_of_pass_write_index` and `end_of_pass_write_index` must be `Some`.
1710///
1711/// Corresponds to [WebGPU `GPUComputePassTimestampWrites`](
1712/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepasstimestampwrites).
1713#[derive(Clone, Debug)]
1714pub struct ComputePassTimestampWrites<'a> {
1715 /// The query set to write to.
1716 pub query_set: &'a QuerySet,
1717 /// The index of the query set at which a start timestamp of this pass is written, if any.
1718 pub beginning_of_pass_write_index: Option<u32>,
1719 /// The index of the query set at which an end timestamp of this pass is written, if any.
1720 pub end_of_pass_write_index: Option<u32>,
1721}
1722#[cfg(send_sync)]
1723static_assertions::assert_impl_all!(ComputePassTimestampWrites<'_>: Send, Sync);
1724
1725/// Describes the attachments of a compute pass.
1726///
1727/// For use with [`CommandEncoder::begin_compute_pass`].
1728///
1729/// Corresponds to [WebGPU `GPUComputePassDescriptor`](
1730/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepassdescriptor).
1731#[derive(Clone, Default, Debug)]
1732pub struct ComputePassDescriptor<'a> {
1733 /// Debug label of the compute pass. This will show up in graphics debuggers for easy identification.
1734 pub label: Label<'a>,
1735 /// Defines which timestamp values will be written for this pass, and where to write them to.
1736 ///
1737 /// Requires [`Features::TIMESTAMP_QUERY`] to be enabled.
1738 pub timestamp_writes: Option<ComputePassTimestampWrites<'a>>,
1739}
1740#[cfg(send_sync)]
1741static_assertions::assert_impl_all!(ComputePassDescriptor<'_>: Send, Sync);
1742
1743#[derive(Clone, Debug)]
1744/// Advanced options for use when a pipeline is compiled
1745///
1746/// This implements `Default`, and for most users can be set to `Default::default()`
1747pub struct PipelineCompilationOptions<'a> {
1748 /// Specifies the values of pipeline-overridable constants in the shader module.
1749 ///
1750 /// If an `@id` attribute was specified on the declaration,
1751 /// the key must be the pipeline constant ID as a decimal ASCII number; if not,
1752 /// the key must be the constant's identifier name.
1753 ///
1754 /// The value may represent any of WGSL's concrete scalar types.
1755 pub constants: &'a HashMap<String, f64>,
1756 /// Whether workgroup scoped memory will be initialized with zero values for this stage.
1757 ///
1758 /// This is required by the WebGPU spec, but may have overhead which can be avoided
1759 /// for cross-platform applications
1760 pub zero_initialize_workgroup_memory: bool,
1761}
1762
1763impl<'a> Default for PipelineCompilationOptions<'a> {
1764 fn default() -> Self {
1765 // HashMap doesn't have a const constructor, due to the use of RandomState
1766 // This does introduce some synchronisation costs, but these should be minor,
1767 // and might be cheaper than the alternative of getting new random state
1768 static DEFAULT_CONSTANTS: std::sync::OnceLock<HashMap<String, f64>> =
1769 std::sync::OnceLock::new();
1770 let constants = DEFAULT_CONSTANTS.get_or_init(Default::default);
1771 Self {
1772 constants,
1773 zero_initialize_workgroup_memory: true,
1774 }
1775 }
1776}
1777
1778/// Describes a compute pipeline.
1779///
1780/// For use with [`Device::create_compute_pipeline`].
1781///
1782/// Corresponds to [WebGPU `GPUComputePipelineDescriptor`](
1783/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepipelinedescriptor).
1784#[derive(Clone, Debug)]
1785pub struct ComputePipelineDescriptor<'a> {
1786 /// Debug label of the pipeline. This will show up in graphics debuggers for easy identification.
1787 pub label: Label<'a>,
1788 /// The layout of bind groups for this pipeline.
1789 pub layout: Option<&'a PipelineLayout>,
1790 /// The compiled shader module for this stage.
1791 pub module: &'a ShaderModule,
1792 /// The name of the entry point in the compiled shader. There must be a function with this name
1793 /// and no return value in the shader.
1794 pub entry_point: &'a str,
1795 /// Advanced options for when this pipeline is compiled
1796 ///
1797 /// This implements `Default`, and for most users can be set to `Default::default()`
1798 pub compilation_options: PipelineCompilationOptions<'a>,
1799}
1800#[cfg(send_sync)]
1801static_assertions::assert_impl_all!(ComputePipelineDescriptor<'_>: Send, Sync);
1802
1803pub use wgt::ImageCopyBuffer as ImageCopyBufferBase;
1804/// View of a buffer which can be used to copy to/from a texture.
1805///
1806/// Corresponds to [WebGPU `GPUImageCopyBuffer`](
1807/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopybuffer).
1808pub type ImageCopyBuffer<'a> = ImageCopyBufferBase<&'a Buffer>;
1809#[cfg(send_sync)]
1810static_assertions::assert_impl_all!(ImageCopyBuffer<'_>: Send, Sync);
1811
1812pub use wgt::ImageCopyTexture as ImageCopyTextureBase;
1813/// View of a texture which can be used to copy to/from a buffer/texture.
1814///
1815/// Corresponds to [WebGPU `GPUImageCopyTexture`](
1816/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopytexture).
1817pub type ImageCopyTexture<'a> = ImageCopyTextureBase<&'a Texture>;
1818#[cfg(send_sync)]
1819static_assertions::assert_impl_all!(ImageCopyTexture<'_>: Send, Sync);
1820
1821pub use wgt::ImageCopyTextureTagged as ImageCopyTextureTaggedBase;
1822/// View of a texture which can be used to copy to a texture, including
1823/// color space and alpha premultiplication information.
1824///
1825/// Corresponds to [WebGPU `GPUImageCopyTextureTagged`](
1826/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopytexturetagged).
1827pub type ImageCopyTextureTagged<'a> = ImageCopyTextureTaggedBase<&'a Texture>;
1828#[cfg(send_sync)]
1829static_assertions::assert_impl_all!(ImageCopyTexture<'_>: Send, Sync);
1830
1831/// Describes a [`BindGroupLayout`].
1832///
1833/// For use with [`Device::create_bind_group_layout`].
1834///
1835/// Corresponds to [WebGPU `GPUBindGroupLayoutDescriptor`](
1836/// https://gpuweb.github.io/gpuweb/#dictdef-gpubindgrouplayoutdescriptor).
1837#[derive(Clone, Debug)]
1838pub struct BindGroupLayoutDescriptor<'a> {
1839 /// Debug label of the bind group layout. This will show up in graphics debuggers for easy identification.
1840 pub label: Label<'a>,
1841
1842 /// Array of entries in this BindGroupLayout
1843 pub entries: &'a [BindGroupLayoutEntry],
1844}
1845static_assertions::assert_impl_all!(BindGroupLayoutDescriptor<'_>: Send, Sync);
1846
1847/// Describes a [`RenderBundleEncoder`].
1848///
1849/// For use with [`Device::create_render_bundle_encoder`].
1850///
1851/// Corresponds to [WebGPU `GPURenderBundleEncoderDescriptor`](
1852/// https://gpuweb.github.io/gpuweb/#dictdef-gpurenderbundleencoderdescriptor).
1853#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
1854pub struct RenderBundleEncoderDescriptor<'a> {
1855 /// Debug label of the render bundle encoder. This will show up in graphics debuggers for easy identification.
1856 pub label: Label<'a>,
1857 /// The formats of the color attachments that this render bundle is capable to rendering to. This
1858 /// must match the formats of the color attachments in the render pass this render bundle is executed in.
1859 pub color_formats: &'a [Option<TextureFormat>],
1860 /// Information about the depth attachment that this render bundle is capable to rendering to. This
1861 /// must match the format of the depth attachments in the render pass this render bundle is executed in.
1862 pub depth_stencil: Option<RenderBundleDepthStencil>,
1863 /// Sample count this render bundle is capable of rendering to. This must match the pipelines and
1864 /// the render passes it is used in.
1865 pub sample_count: u32,
1866 /// If this render bundle will rendering to multiple array layers in the attachments at the same time.
1867 pub multiview: Option<NonZeroU32>,
1868}
1869static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor<'_>: Send, Sync);
1870
1871/// Surface texture that can be rendered to.
1872/// Result of a successful call to [`Surface::get_current_texture`].
1873///
1874/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
1875/// the [`GPUCanvasContext`](https://gpuweb.github.io/gpuweb/#canvas-context) provides
1876/// a texture without any additional information.
1877#[derive(Debug)]
1878pub struct SurfaceTexture {
1879 /// Accessible view of the frame.
1880 pub texture: Texture,
1881 /// `true` if the acquired buffer can still be used for rendering,
1882 /// but should be recreated for maximum performance.
1883 pub suboptimal: bool,
1884 presented: bool,
1885 detail: Box<dyn AnyWasmNotSendSync>,
1886}
1887#[cfg(send_sync)]
1888static_assertions::assert_impl_all!(SurfaceTexture: Send, Sync);
1889
1890/// Result of an unsuccessful call to [`Surface::get_current_texture`].
1891#[derive(Clone, PartialEq, Eq, Debug)]
1892pub enum SurfaceError {
1893 /// A timeout was encountered while trying to acquire the next frame.
1894 Timeout,
1895 /// The underlying surface has changed, and therefore the swap chain must be updated.
1896 Outdated,
1897 /// The swap chain has been lost and needs to be recreated.
1898 Lost,
1899 /// There is no more memory left to allocate a new frame.
1900 OutOfMemory,
1901}
1902static_assertions::assert_impl_all!(SurfaceError: Send, Sync);
1903
1904impl fmt::Display for SurfaceError {
1905 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1906 write!(f, "{}", match self {
1907 Self::Timeout => "A timeout was encountered while trying to acquire the next frame",
1908 Self::Outdated => "The underlying surface has changed, and therefore the swap chain must be updated",
1909 Self::Lost => "The swap chain has been lost and needs to be recreated",
1910 Self::OutOfMemory => "There is no more memory left to allocate a new frame",
1911 })
1912 }
1913}
1914
1915impl error::Error for SurfaceError {}
1916
1917impl Default for Instance {
1918 /// Creates a new instance of wgpu with default options.
1919 ///
1920 /// Backends are set to `Backends::all()`, and FXC is chosen as the `dx12_shader_compiler`.
1921 ///
1922 /// # Panics
1923 ///
1924 /// If no backend feature for the active target platform is enabled,
1925 /// this method will panic, see [`Instance::enabled_backend_features()`].
1926 fn default() -> Self {
1927 Self::new(InstanceDescriptor::default())
1928 }
1929}
1930
1931impl Instance {
1932 /// Returns which backends can be picked for the current build configuration.
1933 ///
1934 /// The returned set depends on a combination of target platform and enabled features.
1935 /// This does *not* do any runtime checks and is exclusively based on compile time information.
1936 ///
1937 /// `InstanceDescriptor::backends` does not need to be a subset of this,
1938 /// but any backend that is not in this set, will not be picked.
1939 ///
1940 /// TODO: Right now it's otherwise not possible yet to opt-out of all features on some platforms.
1941 /// See <https://github.com/gfx-rs/wgpu/issues/3514>
1942 /// * Windows/Linux/Android: always enables Vulkan and GLES with no way to opt out
1943 pub const fn enabled_backend_features() -> Backends {
1944 let mut backends = Backends::empty();
1945
1946 if cfg!(native) {
1947 if cfg!(metal) {
1948 backends = backends.union(Backends::METAL);
1949 }
1950 if cfg!(dx12) {
1951 backends = backends.union(Backends::DX12);
1952 }
1953
1954 // Windows, Android, Linux currently always enable Vulkan and OpenGL.
1955 // See <https://github.com/gfx-rs/wgpu/issues/3514>
1956 if cfg!(target_os = "windows") || cfg!(unix) {
1957 backends = backends.union(Backends::VULKAN).union(Backends::GL);
1958 }
1959
1960 // Vulkan on Mac/iOS is only available through vulkan-portability.
1961 if (cfg!(target_os = "ios") || cfg!(target_os = "macos"))
1962 && cfg!(feature = "vulkan-portability")
1963 {
1964 backends = backends.union(Backends::VULKAN);
1965 }
1966
1967 // GL on Mac is only available through angle.
1968 if cfg!(target_os = "macos") && cfg!(feature = "angle") {
1969 backends = backends.union(Backends::GL);
1970 }
1971 } else {
1972 if cfg!(webgpu) {
1973 backends = backends.union(Backends::BROWSER_WEBGPU);
1974 }
1975 if cfg!(webgl) {
1976 backends = backends.union(Backends::GL);
1977 }
1978 }
1979
1980 backends
1981 }
1982
1983 /// Create an new instance of wgpu.
1984 ///
1985 /// # Arguments
1986 ///
1987 /// - `instance_desc` - Has fields for which [backends][Backends] wgpu will choose
1988 /// during instantiation, and which [DX12 shader compiler][Dx12Compiler] wgpu will use.
1989 ///
1990 /// [`Backends::BROWSER_WEBGPU`] takes a special role:
1991 /// If it is set and WebGPU support is detected, this instance will *only* be able to create
1992 /// WebGPU adapters. If you instead want to force use of WebGL, either
1993 /// disable the `webgpu` compile-time feature or do add the [`Backends::BROWSER_WEBGPU`]
1994 /// flag to the the `instance_desc`'s `backends` field.
1995 /// If it is set and WebGPU support is *not* detected, the instance will use wgpu-core
1996 /// to create adapters. Meaning that if the `webgl` feature is enabled, it is able to create
1997 /// a WebGL adapter.
1998 ///
1999 /// # Panics
2000 ///
2001 /// If no backend feature for the active target platform is enabled,
2002 /// this method will panic, see [`Instance::enabled_backend_features()`].
2003 #[allow(unreachable_code)]
2004 pub fn new(_instance_desc: InstanceDescriptor) -> Self {
2005 if Self::enabled_backend_features().is_empty() {
2006 panic!(
2007 "No wgpu backend feature that is implemented for the target platform was enabled. \
2008 See `wgpu::Instance::enabled_backend_features()` for more information."
2009 );
2010 }
2011
2012 #[cfg(webgpu)]
2013 {
2014 let is_only_available_backend = !cfg!(wgpu_core);
2015 let requested_webgpu = _instance_desc.backends.contains(Backends::BROWSER_WEBGPU);
2016 let support_webgpu =
2017 crate::backend::get_browser_gpu_property().map_or(false, |gpu| !gpu.is_undefined());
2018
2019 if is_only_available_backend || (requested_webgpu && support_webgpu) {
2020 return Self {
2021 context: Arc::from(crate::backend::ContextWebGpu::init(_instance_desc)),
2022 };
2023 }
2024 }
2025
2026 #[cfg(wgpu_core)]
2027 {
2028 return Self {
2029 context: Arc::from(crate::backend::ContextWgpuCore::init(_instance_desc)),
2030 };
2031 }
2032
2033 unreachable!(
2034 "Earlier check of `enabled_backend_features` should have prevented getting here!"
2035 );
2036 }
2037
2038 /// Create an new instance of wgpu from a wgpu-hal instance.
2039 ///
2040 /// # Arguments
2041 ///
2042 /// - `hal_instance` - wgpu-hal instance.
2043 ///
2044 /// # Safety
2045 ///
2046 /// Refer to the creation of wgpu-hal Instance for every backend.
2047 #[cfg(wgpu_core)]
2048 pub unsafe fn from_hal<A: wgc::hal_api::HalApi>(hal_instance: A::Instance) -> Self {
2049 Self {
2050 context: Arc::new(unsafe {
2051 crate::backend::ContextWgpuCore::from_hal_instance::<A>(hal_instance)
2052 }),
2053 }
2054 }
2055
2056 /// Return a reference to a specific backend instance, if available.
2057 ///
2058 /// If this `Instance` has a wgpu-hal [`Instance`] for backend
2059 /// `A`, return a reference to it. Otherwise, return `None`.
2060 ///
2061 /// # Safety
2062 ///
2063 /// - The raw instance handle returned must not be manually destroyed.
2064 ///
2065 /// [`Instance`]: hal::Api::Instance
2066 #[cfg(wgpu_core)]
2067 pub unsafe fn as_hal<A: wgc::hal_api::HalApi>(&self) -> Option<&A::Instance> {
2068 self.context
2069 .as_any()
2070 // If we don't have a wgpu-core instance, we don't have a hal instance either.
2071 .downcast_ref::<crate::backend::ContextWgpuCore>()
2072 .and_then(|ctx| unsafe { ctx.instance_as_hal::<A>() })
2073 }
2074
2075 /// Create an new instance of wgpu from a wgpu-core instance.
2076 ///
2077 /// # Arguments
2078 ///
2079 /// - `core_instance` - wgpu-core instance.
2080 ///
2081 /// # Safety
2082 ///
2083 /// Refer to the creation of wgpu-core Instance.
2084 #[cfg(wgpu_core)]
2085 pub unsafe fn from_core(core_instance: wgc::instance::Instance) -> Self {
2086 Self {
2087 context: Arc::new(unsafe {
2088 crate::backend::ContextWgpuCore::from_core_instance(core_instance)
2089 }),
2090 }
2091 }
2092
2093 /// Retrieves all available [`Adapter`]s that match the given [`Backends`].
2094 ///
2095 /// Always returns an empty vector if the instance decided upon creation to
2096 /// target WebGPU since adapter creation is always async on WebGPU.
2097 ///
2098 /// # Arguments
2099 ///
2100 /// - `backends` - Backends from which to enumerate adapters.
2101 #[cfg(wgpu_core)]
2102 pub fn enumerate_adapters(&self, backends: Backends) -> Vec<Adapter> {
2103 let context = Arc::clone(&self.context);
2104 self.context
2105 .as_any()
2106 .downcast_ref::<crate::backend::ContextWgpuCore>()
2107 .map(|ctx| {
2108 ctx.enumerate_adapters(backends)
2109 .into_iter()
2110 .map(move |id| crate::Adapter {
2111 context: Arc::clone(&context),
2112 id: ObjectId::from(id),
2113 data: Box::new(()),
2114 })
2115 .collect()
2116 })
2117 .unwrap_or_default()
2118 }
2119
2120 /// Retrieves an [`Adapter`] which matches the given [`RequestAdapterOptions`].
2121 ///
2122 /// Some options are "soft", so treated as non-mandatory. Others are "hard".
2123 ///
2124 /// If no adapters are found that suffice all the "hard" options, `None` is returned.
2125 pub fn request_adapter(
2126 &self,
2127 options: &RequestAdapterOptions<'_, '_>,
2128 ) -> impl Future<Output = Option<Adapter>> + WasmNotSend {
2129 let context = Arc::clone(&self.context);
2130 let adapter = self.context.instance_request_adapter(options);
2131 async move {
2132 adapter
2133 .await
2134 .map(|(id, data)| Adapter { context, id, data })
2135 }
2136 }
2137
2138 /// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`].
2139 ///
2140 /// # Safety
2141 ///
2142 /// `hal_adapter` must be created from this instance internal handle.
2143 #[cfg(wgpu_core)]
2144 pub unsafe fn create_adapter_from_hal<A: wgc::hal_api::HalApi>(
2145 &self,
2146 hal_adapter: hal::ExposedAdapter<A>,
2147 ) -> Adapter {
2148 let context = Arc::clone(&self.context);
2149 let id = unsafe {
2150 context
2151 .as_any()
2152 .downcast_ref::<crate::backend::ContextWgpuCore>()
2153 .unwrap()
2154 .create_adapter_from_hal(hal_adapter)
2155 .into()
2156 };
2157 Adapter {
2158 context,
2159 id,
2160 data: Box::new(()),
2161 }
2162 }
2163
2164 /// Creates a new surface targeting a given window/canvas/surface/etc..
2165 ///
2166 /// Internally, this creates surfaces for all backends that are enabled for this instance.
2167 ///
2168 /// See [`SurfaceTarget`] for what targets are supported.
2169 /// See [`Instance::create_surface_unsafe`] for surface creation with unsafe target variants.
2170 ///
2171 /// Most commonly used are window handles (or provider of windows handles)
2172 /// which can be passed directly as they're automatically converted to [`SurfaceTarget`].
2173 pub fn create_surface<'window>(
2174 &self,
2175 target: impl Into<SurfaceTarget<'window>>,
2176 ) -> Result<Surface<'window>, CreateSurfaceError> {
2177 // Handle origin (i.e. window) to optionally take ownership of to make the surface outlast the window.
2178 let handle_source;
2179
2180 let target = target.into();
2181 let mut surface = match target {
2182 SurfaceTarget::Window(window) => unsafe {
2183 let surface = self.create_surface_unsafe(
2184 SurfaceTargetUnsafe::from_window(&window).map_err(|e| CreateSurfaceError {
2185 inner: CreateSurfaceErrorKind::RawHandle(e),
2186 })?,
2187 );
2188 handle_source = Some(window);
2189
2190 surface
2191 }?,
2192
2193 #[cfg(any(webgpu, webgl))]
2194 SurfaceTarget::Canvas(canvas) => {
2195 handle_source = None;
2196
2197 let value: &wasm_bindgen::JsValue = &canvas;
2198 let obj = std::ptr::NonNull::from(value).cast();
2199 let raw_window_handle = raw_window_handle::WebCanvasWindowHandle::new(obj).into();
2200 let raw_display_handle = raw_window_handle::WebDisplayHandle::new().into();
2201
2202 // Note that we need to call this while we still have `value` around.
2203 // This is safe without storing canvas to `handle_origin` since the surface will create a copy internally.
2204 unsafe {
2205 self.create_surface_unsafe(SurfaceTargetUnsafe::RawHandle {
2206 raw_display_handle,
2207 raw_window_handle,
2208 })
2209 }?
2210 }
2211
2212 #[cfg(any(webgpu, webgl))]
2213 SurfaceTarget::OffscreenCanvas(canvas) => {
2214 handle_source = None;
2215
2216 let value: &wasm_bindgen::JsValue = &canvas;
2217 let obj = std::ptr::NonNull::from(value).cast();
2218 let raw_window_handle =
2219 raw_window_handle::WebOffscreenCanvasWindowHandle::new(obj).into();
2220 let raw_display_handle = raw_window_handle::WebDisplayHandle::new().into();
2221
2222 // Note that we need to call this while we still have `value` around.
2223 // This is safe without storing canvas to `handle_origin` since the surface will create a copy internally.
2224 unsafe {
2225 self.create_surface_unsafe(SurfaceTargetUnsafe::RawHandle {
2226 raw_display_handle,
2227 raw_window_handle,
2228 })
2229 }?
2230 }
2231 };
2232
2233 surface._handle_source = handle_source;
2234
2235 Ok(surface)
2236 }
2237
2238 /// Creates a new surface targeting a given window/canvas/surface/etc. using an unsafe target.
2239 ///
2240 /// Internally, this creates surfaces for all backends that are enabled for this instance.
2241 ///
2242 /// See [`SurfaceTargetUnsafe`] for what targets are supported.
2243 /// See [`Instance::create_surface`] for surface creation with safe target variants.
2244 ///
2245 /// # Safety
2246 ///
2247 /// - See respective [`SurfaceTargetUnsafe`] variants for safety requirements.
2248 pub unsafe fn create_surface_unsafe<'window>(
2249 &self,
2250 target: SurfaceTargetUnsafe,
2251 ) -> Result<Surface<'window>, CreateSurfaceError> {
2252 let (id, data) = unsafe { self.context.instance_create_surface(target) }?;
2253
2254 Ok(Surface {
2255 context: Arc::clone(&self.context),
2256 _handle_source: None,
2257 id,
2258 surface_data: data,
2259 config: Mutex::new(None),
2260 })
2261 }
2262
2263 /// Polls all devices.
2264 ///
2265 /// If `force_wait` is true and this is not running on the web, then this
2266 /// function will block until all in-flight buffers have been mapped and
2267 /// all submitted commands have finished execution.
2268 ///
2269 /// Return `true` if all devices' queues are empty, or `false` if there are
2270 /// queue submissions still in flight. (Note that, unless access to all
2271 /// [`Queue`s] associated with this [`Instance`] is coordinated somehow,
2272 /// this information could be out of date by the time the caller receives
2273 /// it. `Queue`s can be shared between threads, and other threads could
2274 /// submit new work at any time.)
2275 ///
2276 /// On the web, this is a no-op. `Device`s are automatically polled.
2277 ///
2278 /// [`Queue`s]: Queue
2279 pub fn poll_all(&self, force_wait: bool) -> bool {
2280 self.context.instance_poll_all_devices(force_wait)
2281 }
2282
2283 /// Generates memory report.
2284 ///
2285 /// Returns `None` if the feature is not supported by the backend
2286 /// which happens only when WebGPU is pre-selected by the instance creation.
2287 #[cfg(wgpu_core)]
2288 pub fn generate_report(&self) -> Option<wgc::global::GlobalReport> {
2289 self.context
2290 .as_any()
2291 .downcast_ref::<crate::backend::ContextWgpuCore>()
2292 .map(|ctx| ctx.generate_report())
2293 }
2294}
2295
2296impl Adapter {
2297 /// Requests a connection to a physical device, creating a logical device.
2298 ///
2299 /// Returns the [`Device`] together with a [`Queue`] that executes command buffers.
2300 ///
2301 /// # Arguments
2302 ///
2303 /// - `desc` - Description of the features and limits requested from the given device.
2304 /// - `trace_path` - Can be used for API call tracing, if that feature is
2305 /// enabled in `wgpu-core`.
2306 ///
2307 /// # Panics
2308 ///
2309 /// - Features specified by `desc` are not supported by this adapter.
2310 /// - Unsafe features were requested but not enabled when requesting the adapter.
2311 /// - Limits requested exceed the values provided by the adapter.
2312 /// - Adapter does not support all features wgpu requires to safely operate.
2313 pub fn request_device(
2314 &self,
2315 desc: &DeviceDescriptor<'_>,
2316 trace_path: Option<&std::path::Path>,
2317 ) -> impl Future<Output = Result<(Device, Queue), RequestDeviceError>> + WasmNotSend {
2318 let context = Arc::clone(&self.context);
2319 let device = DynContext::adapter_request_device(
2320 &*self.context,
2321 &self.id,
2322 self.data.as_ref(),
2323 desc,
2324 trace_path,
2325 );
2326 async move {
2327 device.await.map(
2328 |DeviceRequest {
2329 device_id,
2330 device_data,
2331 queue_id,
2332 queue_data,
2333 }| {
2334 (
2335 Device {
2336 context: Arc::clone(&context),
2337 id: device_id,
2338 data: device_data,
2339 },
2340 Queue {
2341 context,
2342 id: queue_id,
2343 data: queue_data,
2344 },
2345 )
2346 },
2347 )
2348 }
2349 }
2350
2351 /// Create a wgpu [`Device`] and [`Queue`] from a wgpu-hal `OpenDevice`
2352 ///
2353 /// # Safety
2354 ///
2355 /// - `hal_device` must be created from this adapter internal handle.
2356 /// - `desc.features` must be a subset of `hal_device` features.
2357 #[cfg(wgpu_core)]
2358 pub unsafe fn create_device_from_hal<A: wgc::hal_api::HalApi>(
2359 &self,
2360 hal_device: hal::OpenDevice<A>,
2361 desc: &DeviceDescriptor<'_>,
2362 trace_path: Option<&std::path::Path>,
2363 ) -> Result<(Device, Queue), RequestDeviceError> {
2364 let context = Arc::clone(&self.context);
2365 unsafe {
2366 self.context
2367 .as_any()
2368 .downcast_ref::<crate::backend::ContextWgpuCore>()
2369 // Part of the safety requirements is that the device was generated from the same adapter.
2370 // Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices.
2371 .unwrap()
2372 .create_device_from_hal(&self.id.into(), hal_device, desc, trace_path)
2373 }
2374 .map(|(device, queue)| {
2375 (
2376 Device {
2377 context: Arc::clone(&context),
2378 id: device.id().into(),
2379 data: Box::new(device),
2380 },
2381 Queue {
2382 context,
2383 id: queue.id().into(),
2384 data: Box::new(queue),
2385 },
2386 )
2387 })
2388 }
2389
2390 /// Apply a callback to this `Adapter`'s underlying backend adapter.
2391 ///
2392 /// If this `Adapter` is implemented by the backend API given by `A` (Vulkan,
2393 /// Dx12, etc.), then apply `hal_adapter_callback` to `Some(&adapter)`, where
2394 /// `adapter` is the underlying backend adapter type, [`A::Adapter`].
2395 ///
2396 /// If this `Adapter` uses a different backend, apply `hal_adapter_callback`
2397 /// to `None`.
2398 ///
2399 /// The adapter is locked for reading while `hal_adapter_callback` runs. If
2400 /// the callback attempts to perform any `wgpu` operations that require
2401 /// write access to the adapter, deadlock will occur. The locks are
2402 /// automatically released when the callback returns.
2403 ///
2404 /// # Safety
2405 ///
2406 /// - The raw handle passed to the callback must not be manually destroyed.
2407 ///
2408 /// [`A::Adapter`]: hal::Api::Adapter
2409 #[cfg(wgpu_core)]
2410 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Adapter>) -> R, R>(
2411 &self,
2412 hal_adapter_callback: F,
2413 ) -> R {
2414 if let Some(ctx) = self
2415 .context
2416 .as_any()
2417 .downcast_ref::<crate::backend::ContextWgpuCore>()
2418 {
2419 unsafe { ctx.adapter_as_hal::<A, F, R>(self.id.into(), hal_adapter_callback) }
2420 } else {
2421 hal_adapter_callback(None)
2422 }
2423 }
2424
2425 /// Returns whether this adapter may present to the passed surface.
2426 pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool {
2427 DynContext::adapter_is_surface_supported(
2428 &*self.context,
2429 &self.id,
2430 self.data.as_ref(),
2431 &surface.id,
2432 surface.surface_data.as_ref(),
2433 )
2434 }
2435
2436 /// The features which can be used to create devices on this adapter.
2437 pub fn features(&self) -> Features {
2438 DynContext::adapter_features(&*self.context, &self.id, self.data.as_ref())
2439 }
2440
2441 /// The best limits which can be used to create devices on this adapter.
2442 pub fn limits(&self) -> Limits {
2443 DynContext::adapter_limits(&*self.context, &self.id, self.data.as_ref())
2444 }
2445
2446 /// Get info about the adapter itself.
2447 pub fn get_info(&self) -> AdapterInfo {
2448 DynContext::adapter_get_info(&*self.context, &self.id, self.data.as_ref())
2449 }
2450
2451 /// Get info about the adapter itself.
2452 pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities {
2453 DynContext::adapter_downlevel_capabilities(&*self.context, &self.id, self.data.as_ref())
2454 }
2455
2456 /// Returns the features supported for a given texture format by this adapter.
2457 ///
2458 /// Note that the WebGPU spec further restricts the available usages/features.
2459 /// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature.
2460 pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures {
2461 DynContext::adapter_get_texture_format_features(
2462 &*self.context,
2463 &self.id,
2464 self.data.as_ref(),
2465 format,
2466 )
2467 }
2468
2469 /// Generates a timestamp using the clock used by the presentation engine.
2470 ///
2471 /// When comparing completely opaque timestamp systems, we need a way of generating timestamps that signal
2472 /// the exact same time. You can do this by calling your own timestamp function immediately after a call to
2473 /// this function. This should result in timestamps that are 0.5 to 5 microseconds apart. There are locks
2474 /// that must be taken during the call, so don't call your function before.
2475 ///
2476 /// ```no_run
2477 /// # let adapter: wgpu::Adapter = panic!();
2478 /// # let some_code = || wgpu::PresentationTimestamp::INVALID_TIMESTAMP;
2479 /// use std::time::{Duration, Instant};
2480 /// let presentation = adapter.get_presentation_timestamp();
2481 /// let instant = Instant::now();
2482 ///
2483 /// // We can now turn a new presentation timestamp into an Instant.
2484 /// let some_pres_timestamp = some_code();
2485 /// let duration = Duration::from_nanos((some_pres_timestamp.0 - presentation.0) as u64);
2486 /// let new_instant: Instant = instant + duration;
2487 /// ```
2488 //
2489 /// [Instant]: std::time::Instant
2490 pub fn get_presentation_timestamp(&self) -> PresentationTimestamp {
2491 DynContext::adapter_get_presentation_timestamp(&*self.context, &self.id, self.data.as_ref())
2492 }
2493}
2494
2495impl Device {
2496 /// Check for resource cleanups and mapping callbacks. Will block if [`Maintain::Wait`] is passed.
2497 ///
2498 /// Return `true` if the queue is empty, or `false` if there are more queue
2499 /// submissions still in flight. (Note that, unless access to the [`Queue`] is
2500 /// coordinated somehow, this information could be out of date by the time
2501 /// the caller receives it. `Queue`s can be shared between threads, so
2502 /// other threads could submit new work at any time.)
2503 ///
2504 /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
2505 pub fn poll(&self, maintain: Maintain) -> MaintainResult {
2506 DynContext::device_poll(&*self.context, &self.id, self.data.as_ref(), maintain)
2507 }
2508
2509 /// The features which can be used on this device.
2510 ///
2511 /// No additional features can be used, even if the underlying adapter can support them.
2512 pub fn features(&self) -> Features {
2513 DynContext::device_features(&*self.context, &self.id, self.data.as_ref())
2514 }
2515
2516 /// The limits which can be used on this device.
2517 ///
2518 /// No better limits can be used, even if the underlying adapter can support them.
2519 pub fn limits(&self) -> Limits {
2520 DynContext::device_limits(&*self.context, &self.id, self.data.as_ref())
2521 }
2522
2523 /// Creates a shader module from either SPIR-V or WGSL source code.
2524 ///
2525 /// <div class="warning">
2526 // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
2527 // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
2528 ///
2529 /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
2530 /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
2531 /// However, on some build profiles and platforms, the default stack size for a thread may be
2532 /// exceeded before this limit is reached during parsing. Callers should ensure that there is
2533 /// enough stack space for this, particularly if calls to this method are exposed to user
2534 /// input.
2535 ///
2536 /// </div>
2537 pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
2538 let (id, data) = DynContext::device_create_shader_module(
2539 &*self.context,
2540 &self.id,
2541 self.data.as_ref(),
2542 desc,
2543 wgt::ShaderBoundChecks::new(),
2544 );
2545 ShaderModule {
2546 context: Arc::clone(&self.context),
2547 id,
2548 data,
2549 }
2550 }
2551
2552 /// Creates a shader module from either SPIR-V or WGSL source code without runtime checks.
2553 ///
2554 /// # Safety
2555 /// In contrast with [`create_shader_module`](Self::create_shader_module) this function
2556 /// creates a shader module without runtime checks which allows shaders to perform
2557 /// operations which can lead to undefined behavior like indexing out of bounds, thus it's
2558 /// the caller responsibility to pass a shader which doesn't perform any of this
2559 /// operations.
2560 ///
2561 /// This has no effect on web.
2562 pub unsafe fn create_shader_module_unchecked(
2563 &self,
2564 desc: ShaderModuleDescriptor<'_>,
2565 ) -> ShaderModule {
2566 let (id, data) = DynContext::device_create_shader_module(
2567 &*self.context,
2568 &self.id,
2569 self.data.as_ref(),
2570 desc,
2571 unsafe { wgt::ShaderBoundChecks::unchecked() },
2572 );
2573 ShaderModule {
2574 context: Arc::clone(&self.context),
2575 id,
2576 data,
2577 }
2578 }
2579
2580 /// Creates a shader module from SPIR-V binary directly.
2581 ///
2582 /// # Safety
2583 ///
2584 /// This function passes binary data to the backend as-is and can potentially result in a
2585 /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid SPIR-V.
2586 ///
2587 /// See also [`include_spirv_raw!`] and [`util::make_spirv_raw`].
2588 pub unsafe fn create_shader_module_spirv(
2589 &self,
2590 desc: &ShaderModuleDescriptorSpirV<'_>,
2591 ) -> ShaderModule {
2592 let (id, data) = unsafe {
2593 DynContext::device_create_shader_module_spirv(
2594 &*self.context,
2595 &self.id,
2596 self.data.as_ref(),
2597 desc,
2598 )
2599 };
2600 ShaderModule {
2601 context: Arc::clone(&self.context),
2602 id,
2603 data,
2604 }
2605 }
2606
2607 /// Creates an empty [`CommandEncoder`].
2608 pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
2609 let (id, data) = DynContext::device_create_command_encoder(
2610 &*self.context,
2611 &self.id,
2612 self.data.as_ref(),
2613 desc,
2614 );
2615 CommandEncoder {
2616 context: Arc::clone(&self.context),
2617 id: Some(id),
2618 data,
2619 }
2620 }
2621
2622 /// Creates an empty [`RenderBundleEncoder`].
2623 pub fn create_render_bundle_encoder(
2624 &self,
2625 desc: &RenderBundleEncoderDescriptor<'_>,
2626 ) -> RenderBundleEncoder<'_> {
2627 let (id, data) = DynContext::device_create_render_bundle_encoder(
2628 &*self.context,
2629 &self.id,
2630 self.data.as_ref(),
2631 desc,
2632 );
2633 RenderBundleEncoder {
2634 context: Arc::clone(&self.context),
2635 id,
2636 data,
2637 parent: self,
2638 _p: Default::default(),
2639 }
2640 }
2641
2642 /// Creates a new [`BindGroup`].
2643 pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
2644 let (id, data) = DynContext::device_create_bind_group(
2645 &*self.context,
2646 &self.id,
2647 self.data.as_ref(),
2648 desc,
2649 );
2650 BindGroup {
2651 context: Arc::clone(&self.context),
2652 id,
2653 data,
2654 }
2655 }
2656
2657 /// Creates a [`BindGroupLayout`].
2658 pub fn create_bind_group_layout(
2659 &self,
2660 desc: &BindGroupLayoutDescriptor<'_>,
2661 ) -> BindGroupLayout {
2662 let (id, data) = DynContext::device_create_bind_group_layout(
2663 &*self.context,
2664 &self.id,
2665 self.data.as_ref(),
2666 desc,
2667 );
2668 BindGroupLayout {
2669 context: Arc::clone(&self.context),
2670 id,
2671 data,
2672 }
2673 }
2674
2675 /// Creates a [`PipelineLayout`].
2676 pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
2677 let (id, data) = DynContext::device_create_pipeline_layout(
2678 &*self.context,
2679 &self.id,
2680 self.data.as_ref(),
2681 desc,
2682 );
2683 PipelineLayout {
2684 context: Arc::clone(&self.context),
2685 id,
2686 data,
2687 }
2688 }
2689
2690 /// Creates a [`RenderPipeline`].
2691 pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
2692 let (id, data) = DynContext::device_create_render_pipeline(
2693 &*self.context,
2694 &self.id,
2695 self.data.as_ref(),
2696 desc,
2697 );
2698 RenderPipeline {
2699 context: Arc::clone(&self.context),
2700 id,
2701 data,
2702 }
2703 }
2704
2705 /// Creates a [`ComputePipeline`].
2706 pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
2707 let (id, data) = DynContext::device_create_compute_pipeline(
2708 &*self.context,
2709 &self.id,
2710 self.data.as_ref(),
2711 desc,
2712 );
2713 ComputePipeline {
2714 context: Arc::clone(&self.context),
2715 id,
2716 data,
2717 }
2718 }
2719
2720 /// Creates a [`Buffer`].
2721 pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
2722 let mut map_context = MapContext::new(desc.size);
2723 if desc.mapped_at_creation {
2724 map_context.initial_range = 0..desc.size;
2725 }
2726
2727 let (id, data) =
2728 DynContext::device_create_buffer(&*self.context, &self.id, self.data.as_ref(), desc);
2729
2730 Buffer {
2731 context: Arc::clone(&self.context),
2732 id,
2733 data,
2734 map_context: Mutex::new(map_context),
2735 size: desc.size,
2736 usage: desc.usage,
2737 }
2738 }
2739
2740 /// Creates a new [`Texture`].
2741 ///
2742 /// `desc` specifies the general format of the texture.
2743 pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
2744 let (id, data) =
2745 DynContext::device_create_texture(&*self.context, &self.id, self.data.as_ref(), desc);
2746 Texture {
2747 context: Arc::clone(&self.context),
2748 id,
2749 data,
2750 owned: true,
2751 descriptor: TextureDescriptor {
2752 label: None,
2753 view_formats: &[],
2754 ..desc.clone()
2755 },
2756 }
2757 }
2758
2759 /// Creates a [`Texture`] from a wgpu-hal Texture.
2760 ///
2761 /// # Safety
2762 ///
2763 /// - `hal_texture` must be created from this device internal handle
2764 /// - `hal_texture` must be created respecting `desc`
2765 /// - `hal_texture` must be initialized
2766 #[cfg(wgpu_core)]
2767 pub unsafe fn create_texture_from_hal<A: wgc::hal_api::HalApi>(
2768 &self,
2769 hal_texture: A::Texture,
2770 desc: &TextureDescriptor<'_>,
2771 ) -> Texture {
2772 let texture = unsafe {
2773 self.context
2774 .as_any()
2775 .downcast_ref::<crate::backend::ContextWgpuCore>()
2776 // Part of the safety requirements is that the texture was generated from the same hal device.
2777 // Therefore, unwrap is fine here since only WgpuCoreContext has the ability to create hal textures.
2778 .unwrap()
2779 .create_texture_from_hal::<A>(
2780 hal_texture,
2781 self.data.as_ref().downcast_ref().unwrap(),
2782 desc,
2783 )
2784 };
2785 Texture {
2786 context: Arc::clone(&self.context),
2787 id: ObjectId::from(texture.id()),
2788 data: Box::new(texture),
2789 owned: true,
2790 descriptor: TextureDescriptor {
2791 label: None,
2792 view_formats: &[],
2793 ..desc.clone()
2794 },
2795 }
2796 }
2797
2798 /// Creates a [`Buffer`] from a wgpu-hal Buffer.
2799 ///
2800 /// # Safety
2801 ///
2802 /// - `hal_buffer` must be created from this device internal handle
2803 /// - `hal_buffer` must be created respecting `desc`
2804 /// - `hal_buffer` must be initialized
2805 #[cfg(wgpu_core)]
2806 pub unsafe fn create_buffer_from_hal<A: wgc::hal_api::HalApi>(
2807 &self,
2808 hal_buffer: A::Buffer,
2809 desc: &BufferDescriptor<'_>,
2810 ) -> Buffer {
2811 let mut map_context = MapContext::new(desc.size);
2812 if desc.mapped_at_creation {
2813 map_context.initial_range = 0..desc.size;
2814 }
2815
2816 let (id, buffer) = unsafe {
2817 self.context
2818 .as_any()
2819 .downcast_ref::<crate::backend::ContextWgpuCore>()
2820 // Part of the safety requirements is that the buffer was generated from the same hal device.
2821 // Therefore, unwrap is fine here since only WgpuCoreContext has the ability to create hal buffers.
2822 .unwrap()
2823 .create_buffer_from_hal::<A>(
2824 hal_buffer,
2825 self.data.as_ref().downcast_ref().unwrap(),
2826 desc,
2827 )
2828 };
2829
2830 Buffer {
2831 context: Arc::clone(&self.context),
2832 id: ObjectId::from(id),
2833 data: Box::new(buffer),
2834 map_context: Mutex::new(map_context),
2835 size: desc.size,
2836 usage: desc.usage,
2837 }
2838 }
2839
2840 /// Creates a new [`Sampler`].
2841 ///
2842 /// `desc` specifies the behavior of the sampler.
2843 pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
2844 let (id, data) =
2845 DynContext::device_create_sampler(&*self.context, &self.id, self.data.as_ref(), desc);
2846 Sampler {
2847 context: Arc::clone(&self.context),
2848 id,
2849 data,
2850 }
2851 }
2852
2853 /// Creates a new [`QuerySet`].
2854 pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
2855 let (id, data) =
2856 DynContext::device_create_query_set(&*self.context, &self.id, self.data.as_ref(), desc);
2857 QuerySet {
2858 context: Arc::clone(&self.context),
2859 id,
2860 data,
2861 }
2862 }
2863
2864 /// Set a callback for errors that are not handled in error scopes.
2865 pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
2866 self.context
2867 .device_on_uncaptured_error(&self.id, self.data.as_ref(), handler);
2868 }
2869
2870 /// Push an error scope.
2871 pub fn push_error_scope(&self, filter: ErrorFilter) {
2872 self.context
2873 .device_push_error_scope(&self.id, self.data.as_ref(), filter);
2874 }
2875
2876 /// Pop an error scope.
2877 pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
2878 self.context
2879 .device_pop_error_scope(&self.id, self.data.as_ref())
2880 }
2881
2882 /// Starts frame capture.
2883 pub fn start_capture(&self) {
2884 DynContext::device_start_capture(&*self.context, &self.id, self.data.as_ref())
2885 }
2886
2887 /// Stops frame capture.
2888 pub fn stop_capture(&self) {
2889 DynContext::device_stop_capture(&*self.context, &self.id, self.data.as_ref())
2890 }
2891
2892 /// Apply a callback to this `Device`'s underlying backend device.
2893 ///
2894 /// If this `Device` is implemented by the backend API given by `A` (Vulkan,
2895 /// Dx12, etc.), then apply `hal_device_callback` to `Some(&device)`, where
2896 /// `device` is the underlying backend device type, [`A::Device`].
2897 ///
2898 /// If this `Device` uses a different backend, apply `hal_device_callback`
2899 /// to `None`.
2900 ///
2901 /// The device is locked for reading while `hal_device_callback` runs. If
2902 /// the callback attempts to perform any `wgpu` operations that require
2903 /// write access to the device (destroying a buffer, say), deadlock will
2904 /// occur. The locks are automatically released when the callback returns.
2905 ///
2906 /// # Safety
2907 ///
2908 /// - The raw handle passed to the callback must not be manually destroyed.
2909 ///
2910 /// [`A::Device`]: hal::Api::Device
2911 #[cfg(wgpu_core)]
2912 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Device>) -> R, R>(
2913 &self,
2914 hal_device_callback: F,
2915 ) -> Option<R> {
2916 self.context
2917 .as_any()
2918 .downcast_ref::<crate::backend::ContextWgpuCore>()
2919 .map(|ctx| unsafe {
2920 ctx.device_as_hal::<A, F, R>(
2921 self.data.as_ref().downcast_ref().unwrap(),
2922 hal_device_callback,
2923 )
2924 })
2925 }
2926
2927 /// Destroy this device.
2928 pub fn destroy(&self) {
2929 DynContext::device_destroy(&*self.context, &self.id, self.data.as_ref())
2930 }
2931
2932 /// Set a DeviceLostCallback on this device.
2933 pub fn set_device_lost_callback(
2934 &self,
2935 callback: impl Fn(DeviceLostReason, String) + Send + 'static,
2936 ) {
2937 DynContext::device_set_device_lost_callback(
2938 &*self.context,
2939 &self.id,
2940 self.data.as_ref(),
2941 Box::new(callback),
2942 )
2943 }
2944
2945 /// Test-only function to make this device invalid.
2946 #[doc(hidden)]
2947 pub fn make_invalid(&self) {
2948 DynContext::device_make_invalid(&*self.context, &self.id, self.data.as_ref())
2949 }
2950}
2951
2952impl Drop for Device {
2953 fn drop(&mut self) {
2954 if !thread::panicking() {
2955 self.context.device_drop(&self.id, self.data.as_ref());
2956 }
2957 }
2958}
2959
2960/// Requesting a device from an [`Adapter`] failed.
2961#[derive(Clone, Debug)]
2962pub struct RequestDeviceError {
2963 inner: RequestDeviceErrorKind,
2964}
2965#[derive(Clone, Debug)]
2966enum RequestDeviceErrorKind {
2967 /// Error from [`wgpu_core`].
2968 // must match dependency cfg
2969 #[cfg(wgpu_core)]
2970 Core(wgc::instance::RequestDeviceError),
2971
2972 /// Error from web API that was called by `wgpu` to request a device.
2973 ///
2974 /// (This is currently never used by the webgl backend, but it could be.)
2975 #[cfg(webgpu)]
2976 WebGpu(wasm_bindgen::JsValue),
2977}
2978
2979#[cfg(send_sync)]
2980unsafe impl Send for RequestDeviceErrorKind {}
2981#[cfg(send_sync)]
2982unsafe impl Sync for RequestDeviceErrorKind {}
2983
2984#[cfg(send_sync)]
2985static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
2986
2987impl fmt::Display for RequestDeviceError {
2988 fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
2989 match &self.inner {
2990 #[cfg(wgpu_core)]
2991 RequestDeviceErrorKind::Core(error) => error.fmt(_f),
2992 #[cfg(webgpu)]
2993 RequestDeviceErrorKind::WebGpu(error_js_value) => {
2994 // wasm-bindgen provides a reasonable error stringification via `Debug` impl
2995 write!(_f, "{error_js_value:?}")
2996 }
2997 #[cfg(not(any(webgpu, wgpu_core)))]
2998 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
2999 }
3000 }
3001}
3002
3003impl error::Error for RequestDeviceError {
3004 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
3005 match &self.inner {
3006 #[cfg(wgpu_core)]
3007 RequestDeviceErrorKind::Core(error) => error.source(),
3008 #[cfg(webgpu)]
3009 RequestDeviceErrorKind::WebGpu(_) => None,
3010 #[cfg(not(any(webgpu, wgpu_core)))]
3011 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
3012 }
3013 }
3014}
3015
3016#[cfg(wgpu_core)]
3017impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
3018 fn from(error: wgc::instance::RequestDeviceError) -> Self {
3019 Self {
3020 inner: RequestDeviceErrorKind::Core(error),
3021 }
3022 }
3023}
3024
3025/// [`Instance::create_surface()`] or a related function failed.
3026#[derive(Clone, Debug)]
3027#[non_exhaustive]
3028pub struct CreateSurfaceError {
3029 inner: CreateSurfaceErrorKind,
3030}
3031#[derive(Clone, Debug)]
3032enum CreateSurfaceErrorKind {
3033 /// Error from [`wgpu_hal`].
3034 #[cfg(wgpu_core)]
3035 Hal(wgc::instance::CreateSurfaceError),
3036
3037 /// Error from WebGPU surface creation.
3038 #[allow(dead_code)] // may be unused depending on target and features
3039 Web(String),
3040
3041 /// Error when trying to get a [`DisplayHandle`] or a [`WindowHandle`] from
3042 /// `raw_window_handle`.
3043 RawHandle(raw_window_handle::HandleError),
3044}
3045static_assertions::assert_impl_all!(CreateSurfaceError: Send, Sync);
3046
3047impl fmt::Display for CreateSurfaceError {
3048 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3049 match &self.inner {
3050 #[cfg(wgpu_core)]
3051 CreateSurfaceErrorKind::Hal(e) => e.fmt(f),
3052 CreateSurfaceErrorKind::Web(e) => e.fmt(f),
3053 CreateSurfaceErrorKind::RawHandle(e) => e.fmt(f),
3054 }
3055 }
3056}
3057
3058impl error::Error for CreateSurfaceError {
3059 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
3060 match &self.inner {
3061 #[cfg(wgpu_core)]
3062 CreateSurfaceErrorKind::Hal(e) => e.source(),
3063 CreateSurfaceErrorKind::Web(_) => None,
3064 CreateSurfaceErrorKind::RawHandle(e) => e.source(),
3065 }
3066 }
3067}
3068
3069#[cfg(wgpu_core)]
3070impl From<wgc::instance::CreateSurfaceError> for CreateSurfaceError {
3071 fn from(e: wgc::instance::CreateSurfaceError) -> Self {
3072 Self {
3073 inner: CreateSurfaceErrorKind::Hal(e),
3074 }
3075 }
3076}
3077
3078/// Error occurred when trying to async map a buffer.
3079#[derive(Clone, PartialEq, Eq, Debug)]
3080pub struct BufferAsyncError;
3081static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
3082
3083impl fmt::Display for BufferAsyncError {
3084 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3085 write!(f, "Error occurred when trying to async map a buffer")
3086 }
3087}
3088
3089impl error::Error for BufferAsyncError {}
3090
3091/// Type of buffer mapping.
3092#[derive(Debug, Clone, Copy, Eq, PartialEq)]
3093pub enum MapMode {
3094 /// Map only for reading
3095 Read,
3096 /// Map only for writing
3097 Write,
3098}
3099static_assertions::assert_impl_all!(MapMode: Send, Sync);
3100
3101fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
3102 bounds: S,
3103) -> (BufferAddress, Option<BufferSize>) {
3104 let offset = match bounds.start_bound() {
3105 Bound::Included(&bound) => bound,
3106 Bound::Excluded(&bound) => bound + 1,
3107 Bound::Unbounded => 0,
3108 };
3109 let size = match bounds.end_bound() {
3110 Bound::Included(&bound) => Some(bound + 1 - offset),
3111 Bound::Excluded(&bound) => Some(bound - offset),
3112 Bound::Unbounded => None,
3113 }
3114 .map(|size| BufferSize::new(size).expect("Buffer slices can not be empty"));
3115
3116 (offset, size)
3117}
3118
3119/// Read only view into a mapped buffer.
3120///
3121/// To get a `BufferView`, first [map] the buffer, and then
3122/// call `buffer.slice(range).get_mapped_range()`.
3123///
3124/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
3125/// slice methods to access the buffer's contents. It also implements
3126/// `AsRef<[u8]>`, if that's more convenient.
3127///
3128/// If you try to create overlapping views of a buffer, mutable or
3129/// otherwise, `get_mapped_range` will panic.
3130///
3131/// [map]: Buffer#mapping-buffers
3132#[derive(Debug)]
3133pub struct BufferView<'a> {
3134 slice: BufferSlice<'a>,
3135 data: Box<dyn crate::context::BufferMappedRange>,
3136}
3137
3138/// Write only view into mapped buffer.
3139///
3140/// To get a `BufferViewMut`, first [map] the buffer, and then
3141/// call `buffer.slice(range).get_mapped_range_mut()`.
3142///
3143/// `BufferViewMut` dereferences to `&mut [u8]`, so you can use all the usual
3144/// Rust slice methods to access the buffer's contents. It also implements
3145/// `AsMut<[u8]>`, if that's more convenient.
3146///
3147/// It is possible to read the buffer using this view, but doing so is not
3148/// recommended, as it is likely to be slow.
3149///
3150/// If you try to create overlapping views of a buffer, mutable or
3151/// otherwise, `get_mapped_range_mut` will panic.
3152///
3153/// [map]: Buffer#mapping-buffers
3154#[derive(Debug)]
3155pub struct BufferViewMut<'a> {
3156 slice: BufferSlice<'a>,
3157 data: Box<dyn crate::context::BufferMappedRange>,
3158 readable: bool,
3159}
3160
3161impl std::ops::Deref for BufferView<'_> {
3162 type Target = [u8];
3163
3164 #[inline]
3165 fn deref(&self) -> &[u8] {
3166 self.data.slice()
3167 }
3168}
3169
3170impl AsRef<[u8]> for BufferView<'_> {
3171 #[inline]
3172 fn as_ref(&self) -> &[u8] {
3173 self.data.slice()
3174 }
3175}
3176
3177impl AsMut<[u8]> for BufferViewMut<'_> {
3178 #[inline]
3179 fn as_mut(&mut self) -> &mut [u8] {
3180 self.data.slice_mut()
3181 }
3182}
3183
3184impl Deref for BufferViewMut<'_> {
3185 type Target = [u8];
3186
3187 fn deref(&self) -> &Self::Target {
3188 if !self.readable {
3189 log::warn!("Reading from a BufferViewMut is slow and not recommended.");
3190 }
3191
3192 self.data.slice()
3193 }
3194}
3195
3196impl DerefMut for BufferViewMut<'_> {
3197 fn deref_mut(&mut self) -> &mut Self::Target {
3198 self.data.slice_mut()
3199 }
3200}
3201
3202impl Drop for BufferView<'_> {
3203 fn drop(&mut self) {
3204 self.slice
3205 .buffer
3206 .map_context
3207 .lock()
3208 .remove(self.slice.offset, self.slice.size);
3209 }
3210}
3211
3212impl Drop for BufferViewMut<'_> {
3213 fn drop(&mut self) {
3214 self.slice
3215 .buffer
3216 .map_context
3217 .lock()
3218 .remove(self.slice.offset, self.slice.size);
3219 }
3220}
3221
3222impl Buffer {
3223 /// Return the binding view of the entire buffer.
3224 pub fn as_entire_binding(&self) -> BindingResource<'_> {
3225 BindingResource::Buffer(self.as_entire_buffer_binding())
3226 }
3227
3228 /// Return the binding view of the entire buffer.
3229 pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
3230 BufferBinding {
3231 buffer: self,
3232 offset: 0,
3233 size: None,
3234 }
3235 }
3236
3237 /// Use only a portion of this Buffer for a given operation. Choosing a range with no end
3238 /// will use the rest of the buffer. Using a totally unbounded range will use the entire buffer.
3239 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
3240 let (offset, size) = range_to_offset_size(bounds);
3241 BufferSlice {
3242 buffer: self,
3243 offset,
3244 size,
3245 }
3246 }
3247
3248 /// Flushes any pending write operations and unmaps the buffer from host memory.
3249 pub fn unmap(&self) {
3250 self.map_context.lock().reset();
3251 DynContext::buffer_unmap(&*self.context, &self.id, self.data.as_ref());
3252 }
3253
3254 /// Destroy the associated native resources as soon as possible.
3255 pub fn destroy(&self) {
3256 DynContext::buffer_destroy(&*self.context, &self.id, self.data.as_ref());
3257 }
3258
3259 /// Returns the length of the buffer allocation in bytes.
3260 ///
3261 /// This is always equal to the `size` that was specified when creating the buffer.
3262 pub fn size(&self) -> BufferAddress {
3263 self.size
3264 }
3265
3266 /// Returns the allowed usages for this `Buffer`.
3267 ///
3268 /// This is always equal to the `usage` that was specified when creating the buffer.
3269 pub fn usage(&self) -> BufferUsages {
3270 self.usage
3271 }
3272}
3273
3274impl<'a> BufferSlice<'a> {
3275 /// Map the buffer. Buffer is ready to map once the callback is called.
3276 ///
3277 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
3278 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
3279 ///
3280 /// The callback will be called on the thread that first calls the above functions after the gpu work
3281 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
3282 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
3283 /// and used to set flags, send messages, etc.
3284 pub fn map_async(
3285 &self,
3286 mode: MapMode,
3287 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
3288 ) {
3289 let mut mc = self.buffer.map_context.lock();
3290 assert_eq!(
3291 mc.initial_range,
3292 0..0,
3293 "Buffer {:?} is already mapped",
3294 self.buffer.id
3295 );
3296 let end = match self.size {
3297 Some(s) => self.offset + s.get(),
3298 None => mc.total_size,
3299 };
3300 mc.initial_range = self.offset..end;
3301
3302 DynContext::buffer_map_async(
3303 &*self.buffer.context,
3304 &self.buffer.id,
3305 self.buffer.data.as_ref(),
3306 mode,
3307 self.offset..end,
3308 Box::new(callback),
3309 )
3310 }
3311
3312 /// Synchronously and immediately map a buffer for reading. If the buffer is not immediately mappable
3313 /// through [`BufferDescriptor::mapped_at_creation`] or [`BufferSlice::map_async`], will panic.
3314 pub fn get_mapped_range(&self) -> BufferView<'a> {
3315 let end = self.buffer.map_context.lock().add(self.offset, self.size);
3316 let data = DynContext::buffer_get_mapped_range(
3317 &*self.buffer.context,
3318 &self.buffer.id,
3319 self.buffer.data.as_ref(),
3320 self.offset..end,
3321 );
3322 BufferView { slice: *self, data }
3323 }
3324
3325 /// Synchronously and immediately map a buffer for reading. If the buffer is not immediately mappable
3326 /// through [`BufferDescriptor::mapped_at_creation`] or [`BufferSlice::map_async`], will fail.
3327 ///
3328 /// This is useful when targeting WebGPU and you want to pass mapped data directly to js.
3329 /// Unlike `get_mapped_range` which unconditionally copies mapped data into the wasm heap,
3330 /// this function directly hands you the ArrayBuffer that we mapped the data into in js.
3331 ///
3332 /// This is only available on WebGPU, on any other backends this will return `None`.
3333 #[cfg(webgpu)]
3334 pub fn get_mapped_range_as_array_buffer(&self) -> Option<js_sys::ArrayBuffer> {
3335 self.buffer
3336 .context
3337 .as_any()
3338 .downcast_ref::<crate::backend::ContextWebGpu>()
3339 .map(|ctx| {
3340 let buffer_data = crate::context::downcast_ref(self.buffer.data.as_ref());
3341 let end = self.buffer.map_context.lock().add(self.offset, self.size);
3342 ctx.buffer_get_mapped_range_as_array_buffer(buffer_data, self.offset..end)
3343 })
3344 }
3345
3346 /// Synchronously and immediately map a buffer for writing. If the buffer is not immediately mappable
3347 /// through [`BufferDescriptor::mapped_at_creation`] or [`BufferSlice::map_async`], will panic.
3348 pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> {
3349 let end = self.buffer.map_context.lock().add(self.offset, self.size);
3350 let data = DynContext::buffer_get_mapped_range(
3351 &*self.buffer.context,
3352 &self.buffer.id,
3353 self.buffer.data.as_ref(),
3354 self.offset..end,
3355 );
3356 BufferViewMut {
3357 slice: *self,
3358 data,
3359 readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
3360 }
3361 }
3362}
3363
3364impl Drop for Buffer {
3365 fn drop(&mut self) {
3366 if !thread::panicking() {
3367 self.context.buffer_drop(&self.id, self.data.as_ref());
3368 }
3369 }
3370}
3371
3372impl Texture {
3373 /// Returns the inner hal Texture using a callback. The hal texture will be `None` if the
3374 /// backend type argument does not match with this wgpu Texture
3375 ///
3376 /// # Safety
3377 ///
3378 /// - The raw handle obtained from the hal Texture must not be manually destroyed
3379 #[cfg(wgpu_core)]
3380 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Texture>) -> R, R>(
3381 &self,
3382 hal_texture_callback: F,
3383 ) -> R {
3384 let texture = self.data.as_ref().downcast_ref().unwrap();
3385
3386 if let Some(ctx) = self
3387 .context
3388 .as_any()
3389 .downcast_ref::<crate::backend::ContextWgpuCore>()
3390 {
3391 unsafe { ctx.texture_as_hal::<A, F, R>(texture, hal_texture_callback) }
3392 } else {
3393 hal_texture_callback(None)
3394 }
3395 }
3396
3397 /// Creates a view of this texture.
3398 pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView {
3399 let (id, data) =
3400 DynContext::texture_create_view(&*self.context, &self.id, self.data.as_ref(), desc);
3401 TextureView {
3402 context: Arc::clone(&self.context),
3403 id,
3404 data,
3405 }
3406 }
3407
3408 /// Destroy the associated native resources as soon as possible.
3409 pub fn destroy(&self) {
3410 DynContext::texture_destroy(&*self.context, &self.id, self.data.as_ref());
3411 }
3412
3413 /// Make an `ImageCopyTexture` representing the whole texture.
3414 pub fn as_image_copy(&self) -> ImageCopyTexture<'_> {
3415 ImageCopyTexture {
3416 texture: self,
3417 mip_level: 0,
3418 origin: Origin3d::ZERO,
3419 aspect: TextureAspect::All,
3420 }
3421 }
3422
3423 /// Returns the size of this `Texture`.
3424 ///
3425 /// This is always equal to the `size` that was specified when creating the texture.
3426 pub fn size(&self) -> Extent3d {
3427 self.descriptor.size
3428 }
3429
3430 /// Returns the width of this `Texture`.
3431 ///
3432 /// This is always equal to the `size.width` that was specified when creating the texture.
3433 pub fn width(&self) -> u32 {
3434 self.descriptor.size.width
3435 }
3436
3437 /// Returns the height of this `Texture`.
3438 ///
3439 /// This is always equal to the `size.height` that was specified when creating the texture.
3440 pub fn height(&self) -> u32 {
3441 self.descriptor.size.height
3442 }
3443
3444 /// Returns the depth or layer count of this `Texture`.
3445 ///
3446 /// This is always equal to the `size.depth_or_array_layers` that was specified when creating the texture.
3447 pub fn depth_or_array_layers(&self) -> u32 {
3448 self.descriptor.size.depth_or_array_layers
3449 }
3450
3451 /// Returns the mip_level_count of this `Texture`.
3452 ///
3453 /// This is always equal to the `mip_level_count` that was specified when creating the texture.
3454 pub fn mip_level_count(&self) -> u32 {
3455 self.descriptor.mip_level_count
3456 }
3457
3458 /// Returns the sample_count of this `Texture`.
3459 ///
3460 /// This is always equal to the `sample_count` that was specified when creating the texture.
3461 pub fn sample_count(&self) -> u32 {
3462 self.descriptor.sample_count
3463 }
3464
3465 /// Returns the dimension of this `Texture`.
3466 ///
3467 /// This is always equal to the `dimension` that was specified when creating the texture.
3468 pub fn dimension(&self) -> TextureDimension {
3469 self.descriptor.dimension
3470 }
3471
3472 /// Returns the format of this `Texture`.
3473 ///
3474 /// This is always equal to the `format` that was specified when creating the texture.
3475 pub fn format(&self) -> TextureFormat {
3476 self.descriptor.format
3477 }
3478
3479 /// Returns the allowed usages of this `Texture`.
3480 ///
3481 /// This is always equal to the `usage` that was specified when creating the texture.
3482 pub fn usage(&self) -> TextureUsages {
3483 self.descriptor.usage
3484 }
3485}
3486
3487impl Drop for Texture {
3488 fn drop(&mut self) {
3489 if self.owned && !thread::panicking() {
3490 self.context.texture_drop(&self.id, self.data.as_ref());
3491 }
3492 }
3493}
3494
3495impl Drop for TextureView {
3496 fn drop(&mut self) {
3497 if !thread::panicking() {
3498 self.context.texture_view_drop(&self.id, self.data.as_ref());
3499 }
3500 }
3501}
3502
3503impl CommandEncoder {
3504 /// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution.
3505 pub fn finish(mut self) -> CommandBuffer {
3506 let (id, data) = DynContext::command_encoder_finish(
3507 &*self.context,
3508 self.id.take().unwrap(),
3509 self.data.as_mut(),
3510 );
3511 CommandBuffer {
3512 context: Arc::clone(&self.context),
3513 id: Some(id),
3514 data: Some(data),
3515 }
3516 }
3517
3518 /// Begins recording of a render pass.
3519 ///
3520 /// This function returns a [`RenderPass`] object which records a single render pass.
3521 pub fn begin_render_pass<'pass>(
3522 &'pass mut self,
3523 desc: &RenderPassDescriptor<'pass, '_>,
3524 ) -> RenderPass<'pass> {
3525 let id = self.id.as_ref().unwrap();
3526 let (id, data) = DynContext::command_encoder_begin_render_pass(
3527 &*self.context,
3528 id,
3529 self.data.as_ref(),
3530 desc,
3531 );
3532 RenderPass {
3533 id,
3534 data,
3535 parent: self,
3536 }
3537 }
3538
3539 /// Begins recording of a compute pass.
3540 ///
3541 /// This function returns a [`ComputePass`] object which records a single compute pass.
3542 pub fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor<'_>) -> ComputePass<'_> {
3543 let id = self.id.as_ref().unwrap();
3544 let (id, data) = DynContext::command_encoder_begin_compute_pass(
3545 &*self.context,
3546 id,
3547 self.data.as_ref(),
3548 desc,
3549 );
3550 ComputePass {
3551 id,
3552 data,
3553 parent: self,
3554 }
3555 }
3556
3557 /// Copy data from one buffer to another.
3558 ///
3559 /// # Panics
3560 ///
3561 /// - Buffer offsets or copy size not a multiple of [`COPY_BUFFER_ALIGNMENT`].
3562 /// - Copy would overrun buffer.
3563 /// - Copy within the same buffer.
3564 pub fn copy_buffer_to_buffer(
3565 &mut self,
3566 source: &Buffer,
3567 source_offset: BufferAddress,
3568 destination: &Buffer,
3569 destination_offset: BufferAddress,
3570 copy_size: BufferAddress,
3571 ) {
3572 DynContext::command_encoder_copy_buffer_to_buffer(
3573 &*self.context,
3574 self.id.as_ref().unwrap(),
3575 self.data.as_ref(),
3576 &source.id,
3577 source.data.as_ref(),
3578 source_offset,
3579 &destination.id,
3580 destination.data.as_ref(),
3581 destination_offset,
3582 copy_size,
3583 );
3584 }
3585
3586 /// Copy data from a buffer to a texture.
3587 pub fn copy_buffer_to_texture(
3588 &mut self,
3589 source: ImageCopyBuffer<'_>,
3590 destination: ImageCopyTexture<'_>,
3591 copy_size: Extent3d,
3592 ) {
3593 DynContext::command_encoder_copy_buffer_to_texture(
3594 &*self.context,
3595 self.id.as_ref().unwrap(),
3596 self.data.as_ref(),
3597 source,
3598 destination,
3599 copy_size,
3600 );
3601 }
3602
3603 /// Copy data from a texture to a buffer.
3604 pub fn copy_texture_to_buffer(
3605 &mut self,
3606 source: ImageCopyTexture<'_>,
3607 destination: ImageCopyBuffer<'_>,
3608 copy_size: Extent3d,
3609 ) {
3610 DynContext::command_encoder_copy_texture_to_buffer(
3611 &*self.context,
3612 self.id.as_ref().unwrap(),
3613 self.data.as_ref(),
3614 source,
3615 destination,
3616 copy_size,
3617 );
3618 }
3619
3620 /// Copy data from one texture to another.
3621 ///
3622 /// # Panics
3623 ///
3624 /// - Textures are not the same type
3625 /// - If a depth texture, or a multisampled texture, the entire texture must be copied
3626 /// - Copy would overrun either texture
3627 pub fn copy_texture_to_texture(
3628 &mut self,
3629 source: ImageCopyTexture<'_>,
3630 destination: ImageCopyTexture<'_>,
3631 copy_size: Extent3d,
3632 ) {
3633 DynContext::command_encoder_copy_texture_to_texture(
3634 &*self.context,
3635 self.id.as_ref().unwrap(),
3636 self.data.as_ref(),
3637 source,
3638 destination,
3639 copy_size,
3640 );
3641 }
3642
3643 /// Clears texture to zero.
3644 ///
3645 /// Note that unlike with clear_buffer, `COPY_DST` usage is not required.
3646 ///
3647 /// # Implementation notes
3648 ///
3649 /// - implemented either via buffer copies and render/depth target clear, path depends on texture usages
3650 /// - behaves like texture zero init, but is performed immediately (clearing is *not* delayed via marking it as uninitialized)
3651 ///
3652 /// # Panics
3653 ///
3654 /// - `CLEAR_TEXTURE` extension not enabled
3655 /// - Range is out of bounds
3656 pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) {
3657 DynContext::command_encoder_clear_texture(
3658 &*self.context,
3659 self.id.as_ref().unwrap(),
3660 self.data.as_ref(),
3661 texture,
3662 subresource_range,
3663 );
3664 }
3665
3666 /// Clears buffer to zero.
3667 ///
3668 /// # Panics
3669 ///
3670 /// - Buffer does not have `COPY_DST` usage.
3671 /// - Range is out of bounds
3672 pub fn clear_buffer(
3673 &mut self,
3674 buffer: &Buffer,
3675 offset: BufferAddress,
3676 size: Option<BufferAddress>,
3677 ) {
3678 DynContext::command_encoder_clear_buffer(
3679 &*self.context,
3680 self.id.as_ref().unwrap(),
3681 self.data.as_ref(),
3682 buffer,
3683 offset,
3684 size,
3685 );
3686 }
3687
3688 /// Inserts debug marker.
3689 pub fn insert_debug_marker(&mut self, label: &str) {
3690 let id = self.id.as_ref().unwrap();
3691 DynContext::command_encoder_insert_debug_marker(
3692 &*self.context,
3693 id,
3694 self.data.as_ref(),
3695 label,
3696 );
3697 }
3698
3699 /// Start record commands and group it into debug marker group.
3700 pub fn push_debug_group(&mut self, label: &str) {
3701 let id = self.id.as_ref().unwrap();
3702 DynContext::command_encoder_push_debug_group(&*self.context, id, self.data.as_ref(), label);
3703 }
3704
3705 /// Stops command recording and creates debug group.
3706 pub fn pop_debug_group(&mut self) {
3707 let id = self.id.as_ref().unwrap();
3708 DynContext::command_encoder_pop_debug_group(&*self.context, id, self.data.as_ref());
3709 }
3710
3711 /// Resolves a query set, writing the results into the supplied destination buffer.
3712 ///
3713 /// Occlusion and timestamp queries are 8 bytes each (see [`crate::QUERY_SIZE`]). For pipeline statistics queries,
3714 /// see [`PipelineStatisticsTypes`] for more information.
3715 pub fn resolve_query_set(
3716 &mut self,
3717 query_set: &QuerySet,
3718 query_range: Range<u32>,
3719 destination: &Buffer,
3720 destination_offset: BufferAddress,
3721 ) {
3722 DynContext::command_encoder_resolve_query_set(
3723 &*self.context,
3724 self.id.as_ref().unwrap(),
3725 self.data.as_ref(),
3726 &query_set.id,
3727 query_set.data.as_ref(),
3728 query_range.start,
3729 query_range.end - query_range.start,
3730 &destination.id,
3731 destination.data.as_ref(),
3732 destination_offset,
3733 )
3734 }
3735
3736 /// Returns the inner hal CommandEncoder using a callback. The hal command encoder will be `None` if the
3737 /// backend type argument does not match with this wgpu CommandEncoder
3738 ///
3739 /// This method will start the wgpu_core level command recording.
3740 ///
3741 /// # Safety
3742 ///
3743 /// - The raw handle obtained from the hal CommandEncoder must not be manually destroyed
3744 #[cfg(wgpu_core)]
3745 pub unsafe fn as_hal_mut<
3746 A: wgc::hal_api::HalApi,
3747 F: FnOnce(Option<&mut A::CommandEncoder>) -> R,
3748 R,
3749 >(
3750 &mut self,
3751 hal_command_encoder_callback: F,
3752 ) -> Option<R> {
3753 use core::id::CommandEncoderId;
3754
3755 self.context
3756 .as_any()
3757 .downcast_ref::<crate::backend::ContextWgpuCore>()
3758 .map(|ctx| unsafe {
3759 ctx.command_encoder_as_hal_mut::<A, F, R>(
3760 CommandEncoderId::from(self.id.unwrap()),
3761 hal_command_encoder_callback,
3762 )
3763 })
3764 }
3765}
3766
3767/// [`Features::TIMESTAMP_QUERY_INSIDE_ENCODERS`] must be enabled on the device in order to call these functions.
3768impl CommandEncoder {
3769 /// Issue a timestamp command at this point in the queue.
3770 /// The timestamp will be written to the specified query set, at the specified index.
3771 ///
3772 /// Must be multiplied by [`Queue::get_timestamp_period`] to get
3773 /// the value in nanoseconds. Absolute values have no meaning,
3774 /// but timestamps can be subtracted to get the time it takes
3775 /// for a string of operations to complete.
3776 ///
3777 /// Attention: Since commands within a command recorder may be reordered,
3778 /// there is no strict guarantee that timestamps are taken after all commands
3779 /// recorded so far and all before all commands recorded after.
3780 /// This may depend both on the backend and the driver.
3781 pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
3782 DynContext::command_encoder_write_timestamp(
3783 &*self.context,
3784 self.id.as_ref().unwrap(),
3785 self.data.as_mut(),
3786 &query_set.id,
3787 query_set.data.as_ref(),
3788 query_index,
3789 )
3790 }
3791}
3792
3793impl<'a> RenderPass<'a> {
3794 /// Sets the active bind group for a given bind group index. The bind group layout
3795 /// in the active pipeline when any `draw_*()` method is called must match the layout of
3796 /// this bind group.
3797 ///
3798 /// If the bind group have dynamic offsets, provide them in binding order.
3799 /// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`]
3800 /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
3801 ///
3802 /// Subsequent draw calls’ shader executions will be able to access data in these bind groups.
3803 pub fn set_bind_group(
3804 &mut self,
3805 index: u32,
3806 bind_group: &'a BindGroup,
3807 offsets: &[DynamicOffset],
3808 ) {
3809 DynContext::render_pass_set_bind_group(
3810 &*self.parent.context,
3811 &mut self.id,
3812 self.data.as_mut(),
3813 index,
3814 &bind_group.id,
3815 bind_group.data.as_ref(),
3816 offsets,
3817 )
3818 }
3819
3820 /// Sets the active render pipeline.
3821 ///
3822 /// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
3823 pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
3824 DynContext::render_pass_set_pipeline(
3825 &*self.parent.context,
3826 &mut self.id,
3827 self.data.as_mut(),
3828 &pipeline.id,
3829 pipeline.data.as_ref(),
3830 )
3831 }
3832
3833 /// Sets the blend color as used by some of the blending modes.
3834 ///
3835 /// Subsequent blending tests will test against this value.
3836 /// If this method has not been called, the blend constant defaults to [`Color::TRANSPARENT`]
3837 /// (all components zero).
3838 pub fn set_blend_constant(&mut self, color: Color) {
3839 DynContext::render_pass_set_blend_constant(
3840 &*self.parent.context,
3841 &mut self.id,
3842 self.data.as_mut(),
3843 color,
3844 )
3845 }
3846
3847 /// Sets the active index buffer.
3848 ///
3849 /// Subsequent calls to [`draw_indexed`](RenderPass::draw_indexed) on this [`RenderPass`] will
3850 /// use `buffer` as the source index buffer.
3851 pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
3852 DynContext::render_pass_set_index_buffer(
3853 &*self.parent.context,
3854 &mut self.id,
3855 self.data.as_mut(),
3856 &buffer_slice.buffer.id,
3857 buffer_slice.buffer.data.as_ref(),
3858 index_format,
3859 buffer_slice.offset,
3860 buffer_slice.size,
3861 )
3862 }
3863
3864 /// Assign a vertex buffer to a slot.
3865 ///
3866 /// Subsequent calls to [`draw`] and [`draw_indexed`] on this
3867 /// [`RenderPass`] will use `buffer` as one of the source vertex buffers.
3868 ///
3869 /// The `slot` refers to the index of the matching descriptor in
3870 /// [`VertexState::buffers`].
3871 ///
3872 /// [`draw`]: RenderPass::draw
3873 /// [`draw_indexed`]: RenderPass::draw_indexed
3874 pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
3875 DynContext::render_pass_set_vertex_buffer(
3876 &*self.parent.context,
3877 &mut self.id,
3878 self.data.as_mut(),
3879 slot,
3880 &buffer_slice.buffer.id,
3881 buffer_slice.buffer.data.as_ref(),
3882 buffer_slice.offset,
3883 buffer_slice.size,
3884 )
3885 }
3886
3887 /// Sets the scissor rectangle used during the rasterization stage.
3888 /// After transformation into [viewport coordinates](https://www.w3.org/TR/webgpu/#viewport-coordinates).
3889 ///
3890 /// Subsequent draw calls will discard any fragments which fall outside the scissor rectangle.
3891 /// If this method has not been called, the scissor rectangle defaults to the entire bounds of
3892 /// the render targets.
3893 ///
3894 /// The function of the scissor rectangle resembles [`set_viewport()`](Self::set_viewport),
3895 /// but it does not affect the coordinate system, only which fragments are discarded.
3896 pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
3897 DynContext::render_pass_set_scissor_rect(
3898 &*self.parent.context,
3899 &mut self.id,
3900 self.data.as_mut(),
3901 x,
3902 y,
3903 width,
3904 height,
3905 );
3906 }
3907
3908 /// Sets the viewport used during the rasterization stage to linearly map
3909 /// from [normalized device coordinates](https://www.w3.org/TR/webgpu/#ndc) to [viewport coordinates](https://www.w3.org/TR/webgpu/#viewport-coordinates).
3910 ///
3911 /// Subsequent draw calls will only draw within this region.
3912 /// If this method has not been called, the viewport defaults to the entire bounds of the render
3913 /// targets.
3914 pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) {
3915 DynContext::render_pass_set_viewport(
3916 &*self.parent.context,
3917 &mut self.id,
3918 self.data.as_mut(),
3919 x,
3920 y,
3921 w,
3922 h,
3923 min_depth,
3924 max_depth,
3925 );
3926 }
3927
3928 /// Sets the stencil reference.
3929 ///
3930 /// Subsequent stencil tests will test against this value.
3931 /// If this method has not been called, the stencil reference value defaults to `0`.
3932 pub fn set_stencil_reference(&mut self, reference: u32) {
3933 DynContext::render_pass_set_stencil_reference(
3934 &*self.parent.context,
3935 &mut self.id,
3936 self.data.as_mut(),
3937 reference,
3938 );
3939 }
3940
3941 /// Inserts debug marker.
3942 pub fn insert_debug_marker(&mut self, label: &str) {
3943 DynContext::render_pass_insert_debug_marker(
3944 &*self.parent.context,
3945 &mut self.id,
3946 self.data.as_mut(),
3947 label,
3948 );
3949 }
3950
3951 /// Start record commands and group it into debug marker group.
3952 pub fn push_debug_group(&mut self, label: &str) {
3953 DynContext::render_pass_push_debug_group(
3954 &*self.parent.context,
3955 &mut self.id,
3956 self.data.as_mut(),
3957 label,
3958 );
3959 }
3960
3961 /// Stops command recording and creates debug group.
3962 pub fn pop_debug_group(&mut self) {
3963 DynContext::render_pass_pop_debug_group(
3964 &*self.parent.context,
3965 &mut self.id,
3966 self.data.as_mut(),
3967 );
3968 }
3969
3970 /// Draws primitives from the active vertex buffer(s).
3971 ///
3972 /// The active vertex buffer(s) can be set with [`RenderPass::set_vertex_buffer`].
3973 /// Does not use an Index Buffer. If you need this see [`RenderPass::draw_indexed`]
3974 ///
3975 /// Panics if vertices Range is outside of the range of the vertices range of any set vertex buffer.
3976 ///
3977 /// vertices: The range of vertices to draw.
3978 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
3979 /// E.g.of how its used internally
3980 /// ```rust ignore
3981 /// for instance_id in instance_range {
3982 /// for vertex_id in vertex_range {
3983 /// let vertex = vertex[vertex_id];
3984 /// vertex_shader(vertex, vertex_id, instance_id);
3985 /// }
3986 /// }
3987 /// ```
3988 ///
3989 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
3990 /// It is not affected by changes to the state that are performed after it is called.
3991 pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
3992 DynContext::render_pass_draw(
3993 &*self.parent.context,
3994 &mut self.id,
3995 self.data.as_mut(),
3996 vertices,
3997 instances,
3998 )
3999 }
4000
4001 /// Draws indexed primitives using the active index buffer and the active vertex buffers.
4002 ///
4003 /// The active index buffer can be set with [`RenderPass::set_index_buffer`]
4004 /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
4005 ///
4006 /// Panics if indices Range is outside of the range of the indices range of any set index buffer.
4007 ///
4008 /// indices: The range of indices to draw.
4009 /// base_vertex: value added to each index value before indexing into the vertex buffers.
4010 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
4011 /// E.g.of how its used internally
4012 /// ```rust ignore
4013 /// for instance_id in instance_range {
4014 /// for index_index in index_range {
4015 /// let vertex_id = index_buffer[index_index];
4016 /// let adjusted_vertex_id = vertex_id + base_vertex;
4017 /// let vertex = vertex[adjusted_vertex_id];
4018 /// vertex_shader(vertex, adjusted_vertex_id, instance_id);
4019 /// }
4020 /// }
4021 /// ```
4022 ///
4023 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
4024 /// It is not affected by changes to the state that are performed after it is called.
4025 pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
4026 DynContext::render_pass_draw_indexed(
4027 &*self.parent.context,
4028 &mut self.id,
4029 self.data.as_mut(),
4030 indices,
4031 base_vertex,
4032 instances,
4033 );
4034 }
4035
4036 /// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
4037 ///
4038 /// This is like calling [`RenderPass::draw`] but the contents of the call are specified in the `indirect_buffer`.
4039 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirectArgs`](crate::util::DrawIndirectArgs).
4040 ///
4041 /// Indirect drawing has some caveats depending on the features available. We are not currently able to validate
4042 /// these and issue an error.
4043 /// - If [`Features::INDIRECT_FIRST_INSTANCE`] is not present on the adapter,
4044 /// [`DrawIndirect::first_instance`](crate::util::DrawIndirectArgs::first_instance) will be ignored.
4045 /// - If [`DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW`] is not present on the adapter,
4046 /// any use of `@builtin(vertex_index)` or `@builtin(instance_index)` in the vertex shader will have different values.
4047 ///
4048 /// See details on the individual flags for more information.
4049 pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
4050 DynContext::render_pass_draw_indirect(
4051 &*self.parent.context,
4052 &mut self.id,
4053 self.data.as_mut(),
4054 &indirect_buffer.id,
4055 indirect_buffer.data.as_ref(),
4056 indirect_offset,
4057 );
4058 }
4059
4060 /// Draws indexed primitives using the active index buffer and the active vertex buffers,
4061 /// based on the contents of the `indirect_buffer`.
4062 ///
4063 /// This is like calling [`RenderPass::draw_indexed`] but the contents of the call are specified in the `indirect_buffer`.
4064 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirectArgs`](crate::util::DrawIndexedIndirectArgs).
4065 ///
4066 /// Indirect drawing has some caveats depending on the features available. We are not currently able to validate
4067 /// these and issue an error.
4068 /// - If [`Features::INDIRECT_FIRST_INSTANCE`] is not present on the adapter,
4069 /// [`DrawIndexedIndirect::first_instance`](crate::util::DrawIndexedIndirectArgs::first_instance) will be ignored.
4070 /// - If [`DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW`] is not present on the adapter,
4071 /// any use of `@builtin(vertex_index)` or `@builtin(instance_index)` in the vertex shader will have different values.
4072 ///
4073 /// See details on the individual flags for more information.
4074 pub fn draw_indexed_indirect(
4075 &mut self,
4076 indirect_buffer: &'a Buffer,
4077 indirect_offset: BufferAddress,
4078 ) {
4079 DynContext::render_pass_draw_indexed_indirect(
4080 &*self.parent.context,
4081 &mut self.id,
4082 self.data.as_mut(),
4083 &indirect_buffer.id,
4084 indirect_buffer.data.as_ref(),
4085 indirect_offset,
4086 );
4087 }
4088
4089 /// Execute a [render bundle][RenderBundle], which is a set of pre-recorded commands
4090 /// that can be run together.
4091 ///
4092 /// Commands in the bundle do not inherit this render pass's current render state, and after the
4093 /// bundle has executed, the state is **cleared** (reset to defaults, not the previous state).
4094 pub fn execute_bundles<I: IntoIterator<Item = &'a RenderBundle>>(&mut self, render_bundles: I) {
4095 let mut render_bundles = render_bundles
4096 .into_iter()
4097 .map(|rb| (&rb.id, rb.data.as_ref()));
4098
4099 DynContext::render_pass_execute_bundles(
4100 &*self.parent.context,
4101 &mut self.id,
4102 self.data.as_mut(),
4103 &mut render_bundles,
4104 )
4105 }
4106}
4107
4108/// [`Features::MULTI_DRAW_INDIRECT`] must be enabled on the device in order to call these functions.
4109impl<'a> RenderPass<'a> {
4110 /// Dispatches multiple draw calls from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
4111 /// `count` draw calls are issued.
4112 ///
4113 /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
4114 ///
4115 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirectArgs`](crate::util::DrawIndirectArgs).
4116 /// These draw structures are expected to be tightly packed.
4117 ///
4118 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
4119 /// It is not affected by changes to the state that are performed after it is called.
4120 pub fn multi_draw_indirect(
4121 &mut self,
4122 indirect_buffer: &'a Buffer,
4123 indirect_offset: BufferAddress,
4124 count: u32,
4125 ) {
4126 DynContext::render_pass_multi_draw_indirect(
4127 &*self.parent.context,
4128 &mut self.id,
4129 self.data.as_mut(),
4130 &indirect_buffer.id,
4131 indirect_buffer.data.as_ref(),
4132 indirect_offset,
4133 count,
4134 );
4135 }
4136
4137 /// Dispatches multiple draw calls from the active index buffer and the active vertex buffers,
4138 /// based on the contents of the `indirect_buffer`. `count` draw calls are issued.
4139 ///
4140 /// The active index buffer can be set with [`RenderPass::set_index_buffer`], while the active
4141 /// vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
4142 ///
4143 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirectArgs`](crate::util::DrawIndexedIndirectArgs).
4144 /// These draw structures are expected to be tightly packed.
4145 ///
4146 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
4147 /// It is not affected by changes to the state that are performed after it is called.
4148 pub fn multi_draw_indexed_indirect(
4149 &mut self,
4150 indirect_buffer: &'a Buffer,
4151 indirect_offset: BufferAddress,
4152 count: u32,
4153 ) {
4154 DynContext::render_pass_multi_draw_indexed_indirect(
4155 &*self.parent.context,
4156 &mut self.id,
4157 self.data.as_mut(),
4158 &indirect_buffer.id,
4159 indirect_buffer.data.as_ref(),
4160 indirect_offset,
4161 count,
4162 );
4163 }
4164}
4165
4166/// [`Features::MULTI_DRAW_INDIRECT_COUNT`] must be enabled on the device in order to call these functions.
4167impl<'a> RenderPass<'a> {
4168 /// Dispatches multiple draw calls from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
4169 /// The count buffer is read to determine how many draws to issue.
4170 ///
4171 /// The indirect buffer must be long enough to account for `max_count` draws, however only `count`
4172 /// draws will be read. If `count` is greater than `max_count`, `max_count` will be used.
4173 ///
4174 /// The active vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
4175 ///
4176 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirectArgs`](crate::util::DrawIndirectArgs).
4177 /// These draw structures are expected to be tightly packed.
4178 ///
4179 /// The structure expected in `count_buffer` is the following:
4180 ///
4181 /// ```rust
4182 /// #[repr(C)]
4183 /// struct DrawIndirectCount {
4184 /// count: u32, // Number of draw calls to issue.
4185 /// }
4186 /// ```
4187 ///
4188 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
4189 /// It is not affected by changes to the state that are performed after it is called.
4190 pub fn multi_draw_indirect_count(
4191 &mut self,
4192 indirect_buffer: &'a Buffer,
4193 indirect_offset: BufferAddress,
4194 count_buffer: &'a Buffer,
4195 count_offset: BufferAddress,
4196 max_count: u32,
4197 ) {
4198 DynContext::render_pass_multi_draw_indirect_count(
4199 &*self.parent.context,
4200 &mut self.id,
4201 self.data.as_mut(),
4202 &indirect_buffer.id,
4203 indirect_buffer.data.as_ref(),
4204 indirect_offset,
4205 &count_buffer.id,
4206 count_buffer.data.as_ref(),
4207 count_offset,
4208 max_count,
4209 );
4210 }
4211
4212 /// Dispatches multiple draw calls from the active index buffer and the active vertex buffers,
4213 /// based on the contents of the `indirect_buffer`. The count buffer is read to determine how many draws to issue.
4214 ///
4215 /// The indirect buffer must be long enough to account for `max_count` draws, however only `count`
4216 /// draws will be read. If `count` is greater than `max_count`, `max_count` will be used.
4217 ///
4218 /// The active index buffer can be set with [`RenderPass::set_index_buffer`], while the active
4219 /// vertex buffers can be set with [`RenderPass::set_vertex_buffer`].
4220 ///
4221 ///
4222 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirectArgs`](crate::util::DrawIndexedIndirectArgs).
4223 ///
4224 /// These draw structures are expected to be tightly packed.
4225 ///
4226 /// The structure expected in `count_buffer` is the following:
4227 ///
4228 /// ```rust
4229 /// #[repr(C)]
4230 /// struct DrawIndexedIndirectCount {
4231 /// count: u32, // Number of draw calls to issue.
4232 /// }
4233 /// ```
4234 ///
4235 /// This drawing command uses the current render state, as set by preceding `set_*()` methods.
4236 /// It is not affected by changes to the state that are performed after it is called.
4237 pub fn multi_draw_indexed_indirect_count(
4238 &mut self,
4239 indirect_buffer: &'a Buffer,
4240 indirect_offset: BufferAddress,
4241 count_buffer: &'a Buffer,
4242 count_offset: BufferAddress,
4243 max_count: u32,
4244 ) {
4245 DynContext::render_pass_multi_draw_indexed_indirect_count(
4246 &*self.parent.context,
4247 &mut self.id,
4248 self.data.as_mut(),
4249 &indirect_buffer.id,
4250 indirect_buffer.data.as_ref(),
4251 indirect_offset,
4252 &count_buffer.id,
4253 count_buffer.data.as_ref(),
4254 count_offset,
4255 max_count,
4256 );
4257 }
4258}
4259
4260/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
4261impl<'a> RenderPass<'a> {
4262 /// Set push constant data for subsequent draw calls.
4263 ///
4264 /// Write the bytes in `data` at offset `offset` within push constant
4265 /// storage, all of which are accessible by all the pipeline stages in
4266 /// `stages`, and no others. Both `offset` and the length of `data` must be
4267 /// multiples of [`PUSH_CONSTANT_ALIGNMENT`], which is always 4.
4268 ///
4269 /// For example, if `offset` is `4` and `data` is eight bytes long, this
4270 /// call will write `data` to bytes `4..12` of push constant storage.
4271 ///
4272 /// # Stage matching
4273 ///
4274 /// Every byte in the affected range of push constant storage must be
4275 /// accessible to exactly the same set of pipeline stages, which must match
4276 /// `stages`. If there are two bytes of storage that are accessible by
4277 /// different sets of pipeline stages - say, one is accessible by fragment
4278 /// shaders, and the other is accessible by both fragment shaders and vertex
4279 /// shaders - then no single `set_push_constants` call may affect both of
4280 /// them; to write both, you must make multiple calls, each with the
4281 /// appropriate `stages` value.
4282 ///
4283 /// Which pipeline stages may access a given byte is determined by the
4284 /// pipeline's [`PushConstant`] global variable and (if it is a struct) its
4285 /// members' offsets.
4286 ///
4287 /// For example, suppose you have twelve bytes of push constant storage,
4288 /// where bytes `0..8` are accessed by the vertex shader, and bytes `4..12`
4289 /// are accessed by the fragment shader. This means there are three byte
4290 /// ranges each accessed by a different set of stages:
4291 ///
4292 /// - Bytes `0..4` are accessed only by the fragment shader.
4293 ///
4294 /// - Bytes `4..8` are accessed by both the fragment shader and the vertex shader.
4295 ///
4296 /// - Bytes `8..12` are accessed only by the vertex shader.
4297 ///
4298 /// To write all twelve bytes requires three `set_push_constants` calls, one
4299 /// for each range, each passing the matching `stages` mask.
4300 ///
4301 /// [`PushConstant`]: https://docs.rs/naga/latest/naga/enum.StorageClass.html#variant.PushConstant
4302 pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
4303 DynContext::render_pass_set_push_constants(
4304 &*self.parent.context,
4305 &mut self.id,
4306 self.data.as_mut(),
4307 stages,
4308 offset,
4309 data,
4310 );
4311 }
4312}
4313
4314/// [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`] must be enabled on the device in order to call these functions.
4315impl<'a> RenderPass<'a> {
4316 /// Issue a timestamp command at this point in the queue. The
4317 /// timestamp will be written to the specified query set, at the specified index.
4318 ///
4319 /// Must be multiplied by [`Queue::get_timestamp_period`] to get
4320 /// the value in nanoseconds. Absolute values have no meaning,
4321 /// but timestamps can be subtracted to get the time it takes
4322 /// for a string of operations to complete.
4323 pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
4324 DynContext::render_pass_write_timestamp(
4325 &*self.parent.context,
4326 &mut self.id,
4327 self.data.as_mut(),
4328 &query_set.id,
4329 query_set.data.as_ref(),
4330 query_index,
4331 )
4332 }
4333}
4334
4335impl<'a> RenderPass<'a> {
4336 /// Start a occlusion query on this render pass. It can be ended with
4337 /// `end_occlusion_query`. Occlusion queries may not be nested.
4338 pub fn begin_occlusion_query(&mut self, query_index: u32) {
4339 DynContext::render_pass_begin_occlusion_query(
4340 &*self.parent.context,
4341 &mut self.id,
4342 self.data.as_mut(),
4343 query_index,
4344 );
4345 }
4346
4347 /// End the occlusion query on this render pass. It can be started with
4348 /// `begin_occlusion_query`. Occlusion queries may not be nested.
4349 pub fn end_occlusion_query(&mut self) {
4350 DynContext::render_pass_end_occlusion_query(
4351 &*self.parent.context,
4352 &mut self.id,
4353 self.data.as_mut(),
4354 );
4355 }
4356}
4357
4358/// [`Features::PIPELINE_STATISTICS_QUERY`] must be enabled on the device in order to call these functions.
4359impl<'a> RenderPass<'a> {
4360 /// Start a pipeline statistics query on this render pass. It can be ended with
4361 /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4362 pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
4363 DynContext::render_pass_begin_pipeline_statistics_query(
4364 &*self.parent.context,
4365 &mut self.id,
4366 self.data.as_mut(),
4367 &query_set.id,
4368 query_set.data.as_ref(),
4369 query_index,
4370 );
4371 }
4372
4373 /// End the pipeline statistics query on this render pass. It can be started with
4374 /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4375 pub fn end_pipeline_statistics_query(&mut self) {
4376 DynContext::render_pass_end_pipeline_statistics_query(
4377 &*self.parent.context,
4378 &mut self.id,
4379 self.data.as_mut(),
4380 );
4381 }
4382}
4383
4384impl<'a> Drop for RenderPass<'a> {
4385 fn drop(&mut self) {
4386 if !thread::panicking() {
4387 let parent_id = self.parent.id.as_ref().unwrap();
4388 self.parent.context.command_encoder_end_render_pass(
4389 parent_id,
4390 self.parent.data.as_ref(),
4391 &mut self.id,
4392 self.data.as_mut(),
4393 );
4394 }
4395 }
4396}
4397
4398impl<'a> ComputePass<'a> {
4399 /// Sets the active bind group for a given bind group index. The bind group layout
4400 /// in the active pipeline when the `dispatch()` function is called must match the layout of this bind group.
4401 ///
4402 /// If the bind group have dynamic offsets, provide them in the binding order.
4403 /// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`]
4404 /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
4405 pub fn set_bind_group(
4406 &mut self,
4407 index: u32,
4408 bind_group: &'a BindGroup,
4409 offsets: &[DynamicOffset],
4410 ) {
4411 DynContext::compute_pass_set_bind_group(
4412 &*self.parent.context,
4413 &mut self.id,
4414 self.data.as_mut(),
4415 index,
4416 &bind_group.id,
4417 bind_group.data.as_ref(),
4418 offsets,
4419 );
4420 }
4421
4422 /// Sets the active compute pipeline.
4423 pub fn set_pipeline(&mut self, pipeline: &'a ComputePipeline) {
4424 DynContext::compute_pass_set_pipeline(
4425 &*self.parent.context,
4426 &mut self.id,
4427 self.data.as_mut(),
4428 &pipeline.id,
4429 pipeline.data.as_ref(),
4430 );
4431 }
4432
4433 /// Inserts debug marker.
4434 pub fn insert_debug_marker(&mut self, label: &str) {
4435 DynContext::compute_pass_insert_debug_marker(
4436 &*self.parent.context,
4437 &mut self.id,
4438 self.data.as_mut(),
4439 label,
4440 );
4441 }
4442
4443 /// Start record commands and group it into debug marker group.
4444 pub fn push_debug_group(&mut self, label: &str) {
4445 DynContext::compute_pass_push_debug_group(
4446 &*self.parent.context,
4447 &mut self.id,
4448 self.data.as_mut(),
4449 label,
4450 );
4451 }
4452
4453 /// Stops command recording and creates debug group.
4454 pub fn pop_debug_group(&mut self) {
4455 DynContext::compute_pass_pop_debug_group(
4456 &*self.parent.context,
4457 &mut self.id,
4458 self.data.as_mut(),
4459 );
4460 }
4461
4462 /// Dispatches compute work operations.
4463 ///
4464 /// `x`, `y` and `z` denote the number of work groups to dispatch in each dimension.
4465 pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
4466 DynContext::compute_pass_dispatch_workgroups(
4467 &*self.parent.context,
4468 &mut self.id,
4469 self.data.as_mut(),
4470 x,
4471 y,
4472 z,
4473 );
4474 }
4475
4476 /// Dispatches compute work operations, based on the contents of the `indirect_buffer`.
4477 ///
4478 /// The structure expected in `indirect_buffer` must conform to [`DispatchIndirectArgs`](crate::util::DispatchIndirectArgs).
4479 pub fn dispatch_workgroups_indirect(
4480 &mut self,
4481 indirect_buffer: &'a Buffer,
4482 indirect_offset: BufferAddress,
4483 ) {
4484 DynContext::compute_pass_dispatch_workgroups_indirect(
4485 &*self.parent.context,
4486 &mut self.id,
4487 self.data.as_mut(),
4488 &indirect_buffer.id,
4489 indirect_buffer.data.as_ref(),
4490 indirect_offset,
4491 );
4492 }
4493}
4494
4495/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
4496impl<'a> ComputePass<'a> {
4497 /// Set push constant data for subsequent dispatch calls.
4498 ///
4499 /// Write the bytes in `data` at offset `offset` within push constant
4500 /// storage. Both `offset` and the length of `data` must be
4501 /// multiples of [`PUSH_CONSTANT_ALIGNMENT`], which is always 4.
4502 ///
4503 /// For example, if `offset` is `4` and `data` is eight bytes long, this
4504 /// call will write `data` to bytes `4..12` of push constant storage.
4505 pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
4506 DynContext::compute_pass_set_push_constants(
4507 &*self.parent.context,
4508 &mut self.id,
4509 self.data.as_mut(),
4510 offset,
4511 data,
4512 );
4513 }
4514}
4515
4516/// [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`] must be enabled on the device in order to call these functions.
4517impl<'a> ComputePass<'a> {
4518 /// Issue a timestamp command at this point in the queue. The timestamp will be written to the specified query set, at the specified index.
4519 ///
4520 /// Must be multiplied by [`Queue::get_timestamp_period`] to get
4521 /// the value in nanoseconds. Absolute values have no meaning,
4522 /// but timestamps can be subtracted to get the time it takes
4523 /// for a string of operations to complete.
4524 pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
4525 DynContext::compute_pass_write_timestamp(
4526 &*self.parent.context,
4527 &mut self.id,
4528 self.data.as_mut(),
4529 &query_set.id,
4530 query_set.data.as_ref(),
4531 query_index,
4532 )
4533 }
4534}
4535
4536/// [`Features::PIPELINE_STATISTICS_QUERY`] must be enabled on the device in order to call these functions.
4537impl<'a> ComputePass<'a> {
4538 /// Start a pipeline statistics query on this compute pass. It can be ended with
4539 /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4540 pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
4541 DynContext::compute_pass_begin_pipeline_statistics_query(
4542 &*self.parent.context,
4543 &mut self.id,
4544 self.data.as_mut(),
4545 &query_set.id,
4546 query_set.data.as_ref(),
4547 query_index,
4548 );
4549 }
4550
4551 /// End the pipeline statistics query on this compute pass. It can be started with
4552 /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
4553 pub fn end_pipeline_statistics_query(&mut self) {
4554 DynContext::compute_pass_end_pipeline_statistics_query(
4555 &*self.parent.context,
4556 &mut self.id,
4557 self.data.as_mut(),
4558 );
4559 }
4560}
4561
4562impl<'a> Drop for ComputePass<'a> {
4563 fn drop(&mut self) {
4564 if !thread::panicking() {
4565 let parent_id = self.parent.id.as_ref().unwrap();
4566 self.parent.context.command_encoder_end_compute_pass(
4567 parent_id,
4568 self.parent.data.as_ref(),
4569 &mut self.id,
4570 self.data.as_mut(),
4571 );
4572 }
4573 }
4574}
4575
4576impl<'a> RenderBundleEncoder<'a> {
4577 /// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes.
4578 pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle {
4579 let (id, data) =
4580 DynContext::render_bundle_encoder_finish(&*self.context, self.id, self.data, desc);
4581 RenderBundle {
4582 context: Arc::clone(&self.context),
4583 id,
4584 data,
4585 }
4586 }
4587
4588 /// Sets the active bind group for a given bind group index. The bind group layout
4589 /// in the active pipeline when any `draw()` function is called must match the layout of this bind group.
4590 ///
4591 /// If the bind group have dynamic offsets, provide them in the binding order.
4592 pub fn set_bind_group(
4593 &mut self,
4594 index: u32,
4595 bind_group: &'a BindGroup,
4596 offsets: &[DynamicOffset],
4597 ) {
4598 DynContext::render_bundle_encoder_set_bind_group(
4599 &*self.parent.context,
4600 &mut self.id,
4601 self.data.as_mut(),
4602 index,
4603 &bind_group.id,
4604 bind_group.data.as_ref(),
4605 offsets,
4606 )
4607 }
4608
4609 /// Sets the active render pipeline.
4610 ///
4611 /// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
4612 pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
4613 DynContext::render_bundle_encoder_set_pipeline(
4614 &*self.parent.context,
4615 &mut self.id,
4616 self.data.as_mut(),
4617 &pipeline.id,
4618 pipeline.data.as_ref(),
4619 )
4620 }
4621
4622 /// Sets the active index buffer.
4623 ///
4624 /// Subsequent calls to [`draw_indexed`](RenderBundleEncoder::draw_indexed) on this [`RenderBundleEncoder`] will
4625 /// use `buffer` as the source index buffer.
4626 pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
4627 DynContext::render_bundle_encoder_set_index_buffer(
4628 &*self.parent.context,
4629 &mut self.id,
4630 self.data.as_mut(),
4631 &buffer_slice.buffer.id,
4632 buffer_slice.buffer.data.as_ref(),
4633 index_format,
4634 buffer_slice.offset,
4635 buffer_slice.size,
4636 )
4637 }
4638
4639 /// Assign a vertex buffer to a slot.
4640 ///
4641 /// Subsequent calls to [`draw`] and [`draw_indexed`] on this
4642 /// [`RenderBundleEncoder`] will use `buffer` as one of the source vertex buffers.
4643 ///
4644 /// The `slot` refers to the index of the matching descriptor in
4645 /// [`VertexState::buffers`].
4646 ///
4647 /// [`draw`]: RenderBundleEncoder::draw
4648 /// [`draw_indexed`]: RenderBundleEncoder::draw_indexed
4649 pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
4650 DynContext::render_bundle_encoder_set_vertex_buffer(
4651 &*self.parent.context,
4652 &mut self.id,
4653 self.data.as_mut(),
4654 slot,
4655 &buffer_slice.buffer.id,
4656 buffer_slice.buffer.data.as_ref(),
4657 buffer_slice.offset,
4658 buffer_slice.size,
4659 )
4660 }
4661
4662 /// Draws primitives from the active vertex buffer(s).
4663 ///
4664 /// The active vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4665 /// Does not use an Index Buffer. If you need this see [`RenderBundleEncoder::draw_indexed`]
4666 ///
4667 /// Panics if vertices Range is outside of the range of the vertices range of any set vertex buffer.
4668 ///
4669 /// vertices: The range of vertices to draw.
4670 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
4671 /// E.g.of how its used internally
4672 /// ```rust ignore
4673 /// for instance_id in instance_range {
4674 /// for vertex_id in vertex_range {
4675 /// let vertex = vertex[vertex_id];
4676 /// vertex_shader(vertex, vertex_id, instance_id);
4677 /// }
4678 /// }
4679 /// ```
4680 pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
4681 DynContext::render_bundle_encoder_draw(
4682 &*self.parent.context,
4683 &mut self.id,
4684 self.data.as_mut(),
4685 vertices,
4686 instances,
4687 )
4688 }
4689
4690 /// Draws indexed primitives using the active index buffer and the active vertex buffer(s).
4691 ///
4692 /// The active index buffer can be set with [`RenderBundleEncoder::set_index_buffer`].
4693 /// The active vertex buffer(s) can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4694 ///
4695 /// Panics if indices Range is outside of the range of the indices range of any set index buffer.
4696 ///
4697 /// indices: The range of indices to draw.
4698 /// base_vertex: value added to each index value before indexing into the vertex buffers.
4699 /// instances: Range of Instances to draw. Use 0..1 if instance buffers are not used.
4700 /// E.g.of how its used internally
4701 /// ```rust ignore
4702 /// for instance_id in instance_range {
4703 /// for index_index in index_range {
4704 /// let vertex_id = index_buffer[index_index];
4705 /// let adjusted_vertex_id = vertex_id + base_vertex;
4706 /// let vertex = vertex[adjusted_vertex_id];
4707 /// vertex_shader(vertex, adjusted_vertex_id, instance_id);
4708 /// }
4709 /// }
4710 /// ```
4711 pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
4712 DynContext::render_bundle_encoder_draw_indexed(
4713 &*self.parent.context,
4714 &mut self.id,
4715 self.data.as_mut(),
4716 indices,
4717 base_vertex,
4718 instances,
4719 );
4720 }
4721
4722 /// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
4723 ///
4724 /// The active vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4725 ///
4726 /// The structure expected in `indirect_buffer` must conform to [`DrawIndirectArgs`](crate::util::DrawIndirectArgs).
4727 pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
4728 DynContext::render_bundle_encoder_draw_indirect(
4729 &*self.parent.context,
4730 &mut self.id,
4731 self.data.as_mut(),
4732 &indirect_buffer.id,
4733 indirect_buffer.data.as_ref(),
4734 indirect_offset,
4735 );
4736 }
4737
4738 /// Draws indexed primitives using the active index buffer and the active vertex buffers,
4739 /// based on the contents of the `indirect_buffer`.
4740 ///
4741 /// The active index buffer can be set with [`RenderBundleEncoder::set_index_buffer`], while the active
4742 /// vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
4743 ///
4744 /// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirectArgs`](crate::util::DrawIndexedIndirectArgs).
4745 pub fn draw_indexed_indirect(
4746 &mut self,
4747 indirect_buffer: &'a Buffer,
4748 indirect_offset: BufferAddress,
4749 ) {
4750 DynContext::render_bundle_encoder_draw_indexed_indirect(
4751 &*self.parent.context,
4752 &mut self.id,
4753 self.data.as_mut(),
4754 &indirect_buffer.id,
4755 indirect_buffer.data.as_ref(),
4756 indirect_offset,
4757 );
4758 }
4759}
4760
4761/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
4762impl<'a> RenderBundleEncoder<'a> {
4763 /// Set push constant data.
4764 ///
4765 /// Offset is measured in bytes, but must be a multiple of [`PUSH_CONSTANT_ALIGNMENT`].
4766 ///
4767 /// Data size must be a multiple of 4 and must have an alignment of 4.
4768 /// For example, with an offset of 4 and an array of `[u8; 8]`, that will write to the range
4769 /// of 4..12.
4770 ///
4771 /// For each byte in the range of push constant data written, the union of the stages of all push constant
4772 /// ranges that covers that byte must be exactly `stages`. There's no good way of explaining this simply,
4773 /// so here are some examples:
4774 ///
4775 /// ```text
4776 /// For the given ranges:
4777 /// - 0..4 Vertex
4778 /// - 4..8 Fragment
4779 /// ```
4780 ///
4781 /// You would need to upload this in two set_push_constants calls. First for the `Vertex` range, second for the `Fragment` range.
4782 ///
4783 /// ```text
4784 /// For the given ranges:
4785 /// - 0..8 Vertex
4786 /// - 4..12 Fragment
4787 /// ```
4788 ///
4789 /// You would need to upload this in three set_push_constants calls. First for the `Vertex` only range 0..4, second
4790 /// for the `Vertex | Fragment` range 4..8, third for the `Fragment` range 8..12.
4791 pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) {
4792 DynContext::render_bundle_encoder_set_push_constants(
4793 &*self.parent.context,
4794 &mut self.id,
4795 self.data.as_mut(),
4796 stages,
4797 offset,
4798 data,
4799 );
4800 }
4801}
4802
4803/// A write-only view into a staging buffer.
4804///
4805/// Reading into this buffer won't yield the contents of the buffer from the
4806/// GPU and is likely to be slow. Because of this, although [`AsMut`] is
4807/// implemented for this type, [`AsRef`] is not.
4808pub struct QueueWriteBufferView<'a> {
4809 queue: &'a Queue,
4810 buffer: &'a Buffer,
4811 offset: BufferAddress,
4812 inner: Box<dyn context::QueueWriteBuffer>,
4813}
4814#[cfg(send_sync)]
4815static_assertions::assert_impl_all!(QueueWriteBufferView<'_>: Send, Sync);
4816
4817impl Deref for QueueWriteBufferView<'_> {
4818 type Target = [u8];
4819
4820 fn deref(&self) -> &Self::Target {
4821 log::warn!("Reading from a QueueWriteBufferView won't yield the contents of the buffer and may be slow.");
4822 self.inner.slice()
4823 }
4824}
4825
4826impl DerefMut for QueueWriteBufferView<'_> {
4827 fn deref_mut(&mut self) -> &mut Self::Target {
4828 self.inner.slice_mut()
4829 }
4830}
4831
4832impl<'a> AsMut<[u8]> for QueueWriteBufferView<'a> {
4833 fn as_mut(&mut self) -> &mut [u8] {
4834 self.inner.slice_mut()
4835 }
4836}
4837
4838impl<'a> Drop for QueueWriteBufferView<'a> {
4839 fn drop(&mut self) {
4840 DynContext::queue_write_staging_buffer(
4841 &*self.queue.context,
4842 &self.queue.id,
4843 self.queue.data.as_ref(),
4844 &self.buffer.id,
4845 self.buffer.data.as_ref(),
4846 self.offset,
4847 &*self.inner,
4848 );
4849 }
4850}
4851
4852impl Queue {
4853 /// Schedule a data write into `buffer` starting at `offset`.
4854 ///
4855 /// This method is intended to have low performance costs.
4856 /// As such, the write is not immediately submitted, and instead enqueued
4857 /// internally to happen at the start of the next `submit()` call.
4858 ///
4859 /// This method fails if `data` overruns the size of `buffer` starting at `offset`.
4860 pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
4861 DynContext::queue_write_buffer(
4862 &*self.context,
4863 &self.id,
4864 self.data.as_ref(),
4865 &buffer.id,
4866 buffer.data.as_ref(),
4867 offset,
4868 data,
4869 )
4870 }
4871
4872 /// Schedule a data write into `buffer` starting at `offset` via the returned
4873 /// [`QueueWriteBufferView`].
4874 ///
4875 /// Reading from this buffer is slow and will not yield the actual contents of the buffer.
4876 ///
4877 /// This method is intended to have low performance costs.
4878 /// As such, the write is not immediately submitted, and instead enqueued
4879 /// internally to happen at the start of the next `submit()` call.
4880 ///
4881 /// This method fails if `size` is greater than the size of `buffer` starting at `offset`.
4882 #[must_use]
4883 pub fn write_buffer_with<'a>(
4884 &'a self,
4885 buffer: &'a Buffer,
4886 offset: BufferAddress,
4887 size: BufferSize,
4888 ) -> Option<QueueWriteBufferView<'a>> {
4889 profiling::scope!("Queue::write_buffer_with");
4890 DynContext::queue_validate_write_buffer(
4891 &*self.context,
4892 &self.id,
4893 self.data.as_ref(),
4894 &buffer.id,
4895 buffer.data.as_ref(),
4896 offset,
4897 size,
4898 )?;
4899 let staging_buffer = DynContext::queue_create_staging_buffer(
4900 &*self.context,
4901 &self.id,
4902 self.data.as_ref(),
4903 size,
4904 )?;
4905 Some(QueueWriteBufferView {
4906 queue: self,
4907 buffer,
4908 offset,
4909 inner: staging_buffer,
4910 })
4911 }
4912
4913 /// Schedule a write of some data into a texture.
4914 ///
4915 /// * `data` contains the texels to be written, which must be in
4916 /// [the same format as the texture](TextureFormat).
4917 /// * `data_layout` describes the memory layout of `data`, which does not necessarily
4918 /// have to have tightly packed rows.
4919 /// * `texture` specifies the texture to write into, and the location within the
4920 /// texture (coordinate offset, mip level) that will be overwritten.
4921 /// * `size` is the size, in texels, of the region to be written.
4922 ///
4923 /// This method is intended to have low performance costs.
4924 /// As such, the write is not immediately submitted, and instead enqueued
4925 /// internally to happen at the start of the next `submit()` call.
4926 /// However, `data` will be immediately copied into staging memory; so the caller may
4927 /// discard it any time after this call completes.
4928 ///
4929 /// This method fails if `size` overruns the size of `texture`, or if `data` is too short.
4930 pub fn write_texture(
4931 &self,
4932 texture: ImageCopyTexture<'_>,
4933 data: &[u8],
4934 data_layout: ImageDataLayout,
4935 size: Extent3d,
4936 ) {
4937 DynContext::queue_write_texture(
4938 &*self.context,
4939 &self.id,
4940 self.data.as_ref(),
4941 texture,
4942 data,
4943 data_layout,
4944 size,
4945 )
4946 }
4947
4948 /// Schedule a copy of data from `image` into `texture`.
4949 #[cfg(any(webgpu, webgl))]
4950 pub fn copy_external_image_to_texture(
4951 &self,
4952 source: &wgt::ImageCopyExternalImage,
4953 dest: ImageCopyTextureTagged<'_>,
4954 size: Extent3d,
4955 ) {
4956 DynContext::queue_copy_external_image_to_texture(
4957 &*self.context,
4958 &self.id,
4959 self.data.as_ref(),
4960 source,
4961 dest,
4962 size,
4963 )
4964 }
4965
4966 /// Submits a series of finished command buffers for execution.
4967 pub fn submit<I: IntoIterator<Item = CommandBuffer>>(
4968 &self,
4969 command_buffers: I,
4970 ) -> SubmissionIndex {
4971 let mut command_buffers = command_buffers
4972 .into_iter()
4973 .map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap()));
4974
4975 let (raw, data) = DynContext::queue_submit(
4976 &*self.context,
4977 &self.id,
4978 self.data.as_ref(),
4979 &mut command_buffers,
4980 );
4981
4982 SubmissionIndex(raw, data)
4983 }
4984
4985 /// Gets the amount of nanoseconds each tick of a timestamp query represents.
4986 ///
4987 /// Returns zero if timestamp queries are unsupported.
4988 ///
4989 /// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>`
4990 /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
4991 pub fn get_timestamp_period(&self) -> f32 {
4992 DynContext::queue_get_timestamp_period(&*self.context, &self.id, self.data.as_ref())
4993 }
4994
4995 /// Registers a callback when the previous call to submit finishes running on the gpu. This callback
4996 /// being called implies that all mapped buffer callbacks which were registered before this call will
4997 /// have been called.
4998 ///
4999 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
5000 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
5001 ///
5002 /// The callback will be called on the thread that first calls the above functions after the gpu work
5003 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
5004 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
5005 /// and used to set flags, send messages, etc.
5006 pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
5007 DynContext::queue_on_submitted_work_done(
5008 &*self.context,
5009 &self.id,
5010 self.data.as_ref(),
5011 Box::new(callback),
5012 )
5013 }
5014}
5015
5016impl SurfaceTexture {
5017 /// Schedule this texture to be presented on the owning surface.
5018 ///
5019 /// Needs to be called after any work on the texture is scheduled via [`Queue::submit`].
5020 ///
5021 /// # Platform dependent behavior
5022 ///
5023 /// On Wayland, `present` will attach a `wl_buffer` to the underlying `wl_surface` and commit the new surface
5024 /// state. If it is desired to do things such as request a frame callback, scale the surface using the viewporter
5025 /// or synchronize other double buffered state, then these operations should be done before the call to `present`.
5026 pub fn present(mut self) {
5027 self.presented = true;
5028 DynContext::surface_present(
5029 &*self.texture.context,
5030 &self.texture.id,
5031 // This call to as_ref is essential because we want the DynContext implementation to see the inner
5032 // value of the Box (T::SurfaceOutputDetail), not the Box itself.
5033 self.detail.as_ref(),
5034 );
5035 }
5036}
5037
5038impl Drop for SurfaceTexture {
5039 fn drop(&mut self) {
5040 if !self.presented && !thread::panicking() {
5041 DynContext::surface_texture_discard(
5042 &*self.texture.context,
5043 &self.texture.id,
5044 // This call to as_ref is essential because we want the DynContext implementation to see the inner
5045 // value of the Box (T::SurfaceOutputDetail), not the Box itself.
5046 self.detail.as_ref(),
5047 );
5048 }
5049 }
5050}
5051
5052impl Surface<'_> {
5053 /// Returns the capabilities of the surface when used with the given adapter.
5054 ///
5055 /// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter.
5056 pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities {
5057 DynContext::surface_get_capabilities(
5058 &*self.context,
5059 &self.id,
5060 self.surface_data.as_ref(),
5061 &adapter.id,
5062 adapter.data.as_ref(),
5063 )
5064 }
5065
5066 /// Return a default `SurfaceConfiguration` from width and height to use for the [`Surface`] with this adapter.
5067 ///
5068 /// Returns None if the surface isn't supported by this adapter
5069 pub fn get_default_config(
5070 &self,
5071 adapter: &Adapter,
5072 width: u32,
5073 height: u32,
5074 ) -> Option<SurfaceConfiguration> {
5075 let caps = self.get_capabilities(adapter);
5076 Some(SurfaceConfiguration {
5077 usage: wgt::TextureUsages::RENDER_ATTACHMENT,
5078 format: *caps.formats.first()?,
5079 width,
5080 height,
5081 desired_maximum_frame_latency: 2,
5082 present_mode: *caps.present_modes.first()?,
5083 alpha_mode: wgt::CompositeAlphaMode::Auto,
5084 view_formats: vec![],
5085 })
5086 }
5087
5088 /// Initializes [`Surface`] for presentation.
5089 ///
5090 /// # Panics
5091 ///
5092 /// - A old [`SurfaceTexture`] is still alive referencing an old surface.
5093 /// - Texture format requested is unsupported on the surface.
5094 /// - `config.width` or `config.height` is zero.
5095 pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) {
5096 DynContext::surface_configure(
5097 &*self.context,
5098 &self.id,
5099 self.surface_data.as_ref(),
5100 &device.id,
5101 device.data.as_ref(),
5102 config,
5103 );
5104
5105 let mut conf = self.config.lock();
5106 *conf = Some(config.clone());
5107 }
5108
5109 /// Returns the next texture to be presented by the swapchain for drawing.
5110 ///
5111 /// In order to present the [`SurfaceTexture`] returned by this method,
5112 /// first a [`Queue::submit`] needs to be done with some work rendering to this texture.
5113 /// Then [`SurfaceTexture::present`] needs to be called.
5114 ///
5115 /// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated,
5116 /// recreating the swapchain will panic.
5117 pub fn get_current_texture(&self) -> Result<SurfaceTexture, SurfaceError> {
5118 let (texture_id, texture_data, status, detail) = DynContext::surface_get_current_texture(
5119 &*self.context,
5120 &self.id,
5121 self.surface_data.as_ref(),
5122 );
5123
5124 let suboptimal = match status {
5125 SurfaceStatus::Good => false,
5126 SurfaceStatus::Suboptimal => true,
5127 SurfaceStatus::Timeout => return Err(SurfaceError::Timeout),
5128 SurfaceStatus::Outdated => return Err(SurfaceError::Outdated),
5129 SurfaceStatus::Lost => return Err(SurfaceError::Lost),
5130 };
5131
5132 let guard = self.config.lock();
5133 let config = guard
5134 .as_ref()
5135 .expect("This surface has not been configured yet.");
5136
5137 let descriptor = TextureDescriptor {
5138 label: None,
5139 size: Extent3d {
5140 width: config.width,
5141 height: config.height,
5142 depth_or_array_layers: 1,
5143 },
5144 format: config.format,
5145 usage: config.usage,
5146 mip_level_count: 1,
5147 sample_count: 1,
5148 dimension: TextureDimension::D2,
5149 view_formats: &[],
5150 };
5151
5152 texture_id
5153 .zip(texture_data)
5154 .map(|(id, data)| SurfaceTexture {
5155 texture: Texture {
5156 context: Arc::clone(&self.context),
5157 id,
5158 data,
5159 owned: false,
5160 descriptor,
5161 },
5162 suboptimal,
5163 presented: false,
5164 detail,
5165 })
5166 .ok_or(SurfaceError::Lost)
5167 }
5168
5169 /// Returns the inner hal Surface using a callback. The hal surface will be `None` if the
5170 /// backend type argument does not match with this wgpu Surface
5171 ///
5172 /// # Safety
5173 ///
5174 /// - The raw handle obtained from the hal Surface must not be manually destroyed
5175 #[cfg(wgpu_core)]
5176 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::Surface>) -> R, R>(
5177 &mut self,
5178 hal_surface_callback: F,
5179 ) -> Option<R> {
5180 self.context
5181 .as_any()
5182 .downcast_ref::<crate::backend::ContextWgpuCore>()
5183 .map(|ctx| unsafe {
5184 ctx.surface_as_hal::<A, F, R>(
5185 self.surface_data.downcast_ref().unwrap(),
5186 hal_surface_callback,
5187 )
5188 })
5189 }
5190}
5191
5192/// Opaque globally-unique identifier
5193#[repr(transparent)]
5194pub struct Id<T>(NonZeroU64, PhantomData<*mut T>);
5195
5196impl<T> Id<T> {
5197 /// For testing use only. We provide no guarantees about the actual value of the ids.
5198 #[doc(hidden)]
5199 pub fn inner(&self) -> u64 {
5200 self.0.get()
5201 }
5202}
5203
5204// SAFETY: `Id` is a bare `NonZeroU64`, the type parameter is a marker purely to avoid confusing Ids
5205// returned for different types , so `Id` can safely implement Send and Sync.
5206unsafe impl<T> Send for Id<T> {}
5207
5208// SAFETY: See the implementation for `Send`.
5209unsafe impl<T> Sync for Id<T> {}
5210
5211impl<T> Clone for Id<T> {
5212 fn clone(&self) -> Self {
5213 *self
5214 }
5215}
5216
5217impl<T> Copy for Id<T> {}
5218
5219impl<T> fmt::Debug for Id<T> {
5220 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
5221 f.debug_tuple("Id").field(&self.0).finish()
5222 }
5223}
5224
5225impl<T> PartialEq for Id<T> {
5226 fn eq(&self, other: &Id<T>) -> bool {
5227 self.0 == other.0
5228 }
5229}
5230
5231impl<T> Eq for Id<T> {}
5232
5233impl<T> PartialOrd for Id<T> {
5234 fn partial_cmp(&self, other: &Id<T>) -> Option<Ordering> {
5235 Some(self.cmp(other))
5236 }
5237}
5238
5239impl<T> Ord for Id<T> {
5240 fn cmp(&self, other: &Id<T>) -> Ordering {
5241 self.0.cmp(&other.0)
5242 }
5243}
5244
5245impl<T> std::hash::Hash for Id<T> {
5246 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
5247 self.0.hash(state)
5248 }
5249}
5250
5251impl Adapter {
5252 /// Returns a globally-unique identifier for this `Adapter`.
5253 ///
5254 /// Calling this method multiple times on the same object will always return the same value.
5255 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5256 pub fn global_id(&self) -> Id<Self> {
5257 Id(self.id.global_id(), PhantomData)
5258 }
5259}
5260
5261impl Device {
5262 /// Returns a globally-unique identifier for this `Device`.
5263 ///
5264 /// Calling this method multiple times on the same object will always return the same value.
5265 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5266 pub fn global_id(&self) -> Id<Self> {
5267 Id(self.id.global_id(), PhantomData)
5268 }
5269}
5270
5271impl Queue {
5272 /// Returns a globally-unique identifier for this `Queue`.
5273 ///
5274 /// Calling this method multiple times on the same object will always return the same value.
5275 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5276 pub fn global_id(&self) -> Id<Self> {
5277 Id(self.id.global_id(), PhantomData)
5278 }
5279}
5280
5281impl ShaderModule {
5282 /// Returns a globally-unique identifier for this `ShaderModule`.
5283 ///
5284 /// Calling this method multiple times on the same object will always return the same value.
5285 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5286 pub fn global_id(&self) -> Id<Self> {
5287 Id(self.id.global_id(), PhantomData)
5288 }
5289}
5290
5291impl BindGroupLayout {
5292 /// Returns a globally-unique identifier for this `BindGroupLayout`.
5293 ///
5294 /// Calling this method multiple times on the same object will always return the same value.
5295 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5296 pub fn global_id(&self) -> Id<Self> {
5297 Id(self.id.global_id(), PhantomData)
5298 }
5299}
5300
5301impl BindGroup {
5302 /// Returns a globally-unique identifier for this `BindGroup`.
5303 ///
5304 /// Calling this method multiple times on the same object will always return the same value.
5305 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5306 pub fn global_id(&self) -> Id<Self> {
5307 Id(self.id.global_id(), PhantomData)
5308 }
5309}
5310
5311impl TextureView {
5312 /// Returns a globally-unique identifier for this `TextureView`.
5313 ///
5314 /// Calling this method multiple times on the same object will always return the same value.
5315 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5316 pub fn global_id(&self) -> Id<Self> {
5317 Id(self.id.global_id(), PhantomData)
5318 }
5319
5320 /// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the
5321 /// backend type argument does not match with this wgpu Texture
5322 ///
5323 /// # Safety
5324 ///
5325 /// - The raw handle obtained from the hal TextureView must not be manually destroyed
5326 #[cfg(wgpu_core)]
5327 pub unsafe fn as_hal<A: wgc::hal_api::HalApi, F: FnOnce(Option<&A::TextureView>) -> R, R>(
5328 &self,
5329 hal_texture_view_callback: F,
5330 ) -> R {
5331 use core::id::TextureViewId;
5332
5333 let texture_view_id = TextureViewId::from(self.id);
5334
5335 if let Some(ctx) = self
5336 .context
5337 .as_any()
5338 .downcast_ref::<crate::backend::ContextWgpuCore>()
5339 {
5340 unsafe {
5341 ctx.texture_view_as_hal::<A, F, R>(texture_view_id, hal_texture_view_callback)
5342 }
5343 } else {
5344 hal_texture_view_callback(None)
5345 }
5346 }
5347}
5348
5349impl Sampler {
5350 /// Returns a globally-unique identifier for this `Sampler`.
5351 ///
5352 /// Calling this method multiple times on the same object will always return the same value.
5353 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5354 pub fn global_id(&self) -> Id<Self> {
5355 Id(self.id.global_id(), PhantomData)
5356 }
5357}
5358
5359impl Buffer {
5360 /// Returns a globally-unique identifier for this `Buffer`.
5361 ///
5362 /// Calling this method multiple times on the same object will always return the same value.
5363 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5364 pub fn global_id(&self) -> Id<Self> {
5365 Id(self.id.global_id(), PhantomData)
5366 }
5367}
5368
5369impl Texture {
5370 /// Returns a globally-unique identifier for this `Texture`.
5371 ///
5372 /// Calling this method multiple times on the same object will always return the same value.
5373 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5374 pub fn global_id(&self) -> Id<Self> {
5375 Id(self.id.global_id(), PhantomData)
5376 }
5377}
5378
5379impl QuerySet {
5380 /// Returns a globally-unique identifier for this `QuerySet`.
5381 ///
5382 /// Calling this method multiple times on the same object will always return the same value.
5383 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5384 pub fn global_id(&self) -> Id<Self> {
5385 Id(self.id.global_id(), PhantomData)
5386 }
5387}
5388
5389impl PipelineLayout {
5390 /// Returns a globally-unique identifier for this `PipelineLayout`.
5391 ///
5392 /// Calling this method multiple times on the same object will always return the same value.
5393 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5394 pub fn global_id(&self) -> Id<Self> {
5395 Id(self.id.global_id(), PhantomData)
5396 }
5397}
5398
5399impl RenderPipeline {
5400 /// Returns a globally-unique identifier for this `RenderPipeline`.
5401 ///
5402 /// Calling this method multiple times on the same object will always return the same value.
5403 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5404 pub fn global_id(&self) -> Id<Self> {
5405 Id(self.id.global_id(), PhantomData)
5406 }
5407}
5408
5409impl ComputePipeline {
5410 /// Returns a globally-unique identifier for this `ComputePipeline`.
5411 ///
5412 /// Calling this method multiple times on the same object will always return the same value.
5413 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5414 pub fn global_id(&self) -> Id<Self> {
5415 Id(self.id.global_id(), PhantomData)
5416 }
5417}
5418
5419impl RenderBundle {
5420 /// Returns a globally-unique identifier for this `RenderBundle`.
5421 ///
5422 /// Calling this method multiple times on the same object will always return the same value.
5423 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5424 pub fn global_id(&self) -> Id<Self> {
5425 Id(self.id.global_id(), PhantomData)
5426 }
5427}
5428
5429impl Surface<'_> {
5430 /// Returns a globally-unique identifier for this `Surface`.
5431 ///
5432 /// Calling this method multiple times on the same object will always return the same value.
5433 /// The returned value is guaranteed to be different for all resources created from the same `Instance`.
5434 pub fn global_id(&self) -> Id<Surface<'_>> {
5435 Id(self.id.global_id(), PhantomData)
5436 }
5437}
5438
5439/// Type for the callback of uncaptured error handler
5440pub trait UncapturedErrorHandler: Fn(Error) + Send + 'static {}
5441impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + 'static {}
5442
5443/// Error type
5444#[derive(Debug)]
5445pub enum Error {
5446 /// Out of memory error
5447 OutOfMemory {
5448 /// Lower level source of the error.
5449 #[cfg(send_sync)]
5450 #[cfg_attr(docsrs, doc(cfg(all())))]
5451 source: Box<dyn error::Error + Send + 'static>,
5452 /// Lower level source of the error.
5453 #[cfg(not(send_sync))]
5454 #[cfg_attr(docsrs, doc(cfg(all())))]
5455 source: Box<dyn error::Error + 'static>,
5456 },
5457 /// Validation error, signifying a bug in code or data
5458 Validation {
5459 /// Lower level source of the error.
5460 #[cfg(send_sync)]
5461 #[cfg_attr(docsrs, doc(cfg(all())))]
5462 source: Box<dyn error::Error + Send + 'static>,
5463 /// Lower level source of the error.
5464 #[cfg(not(send_sync))]
5465 #[cfg_attr(docsrs, doc(cfg(all())))]
5466 source: Box<dyn error::Error + 'static>,
5467 /// Description of the validation error.
5468 description: String,
5469 },
5470 /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
5471 ///
5472 /// These could be due to internal implementation or system limits being reached.
5473 Internal {
5474 /// Lower level source of the error.
5475 #[cfg(send_sync)]
5476 #[cfg_attr(docsrs, doc(cfg(all())))]
5477 source: Box<dyn error::Error + Send + 'static>,
5478 /// Lower level source of the error.
5479 #[cfg(not(send_sync))]
5480 #[cfg_attr(docsrs, doc(cfg(all())))]
5481 source: Box<dyn error::Error + 'static>,
5482 /// Description of the internal GPU error.
5483 description: String,
5484 },
5485}
5486#[cfg(send_sync)]
5487static_assertions::assert_impl_all!(Error: Send);
5488
5489impl error::Error for Error {
5490 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
5491 match self {
5492 Error::OutOfMemory { source } => Some(source.as_ref()),
5493 Error::Validation { source, .. } => Some(source.as_ref()),
5494 Error::Internal { source, .. } => Some(source.as_ref()),
5495 }
5496 }
5497}
5498
5499impl fmt::Display for Error {
5500 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
5501 match self {
5502 Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
5503 Error::Validation { description, .. } => f.write_str(description),
5504 Error::Internal { description, .. } => f.write_str(description),
5505 }
5506 }
5507}
5508
5509use send_sync::*;
5510
5511mod send_sync {
5512 use std::any::Any;
5513 use std::fmt;
5514
5515 use wgt::WasmNotSendSync;
5516
5517 pub trait AnyWasmNotSendSync: Any + WasmNotSendSync {
5518 fn upcast_any_ref(&self) -> &dyn Any;
5519 }
5520 impl<T: Any + WasmNotSendSync> AnyWasmNotSendSync for T {
5521 #[inline]
5522 fn upcast_any_ref(&self) -> &dyn Any {
5523 self
5524 }
5525 }
5526
5527 impl dyn AnyWasmNotSendSync + 'static {
5528 #[inline]
5529 pub fn downcast_ref<T: 'static>(&self) -> Option<&T> {
5530 self.upcast_any_ref().downcast_ref::<T>()
5531 }
5532 }
5533
5534 impl fmt::Debug for dyn AnyWasmNotSendSync {
5535 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
5536 f.debug_struct("Any").finish_non_exhaustive()
5537 }
5538 }
5539}
5540
5541#[cfg(test)]
5542mod tests {
5543 use crate::BufferSize;
5544
5545 #[test]
5546 fn range_to_offset_size_works() {
5547 assert_eq!(crate::range_to_offset_size(0..2), (0, BufferSize::new(2)));
5548 assert_eq!(crate::range_to_offset_size(2..5), (2, BufferSize::new(3)));
5549 assert_eq!(crate::range_to_offset_size(..), (0, None));
5550 assert_eq!(crate::range_to_offset_size(21..), (21, None));
5551 assert_eq!(crate::range_to_offset_size(0..), (0, None));
5552 assert_eq!(crate::range_to_offset_size(..21), (0, BufferSize::new(21)));
5553 }
5554
5555 #[test]
5556 #[should_panic]
5557 fn range_to_offset_size_panics_for_empty_range() {
5558 crate::range_to_offset_size(123..123);
5559 }
5560
5561 #[test]
5562 #[should_panic]
5563 fn range_to_offset_size_panics_for_unbounded_empty_range() {
5564 crate::range_to_offset_size(..0);
5565 }
5566}