wgpu_core/device/
life.rs

1use crate::{
2    binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
3    command::RenderBundle,
4    device::{
5        queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
6        DeviceError, DeviceLostClosure,
7    },
8    hal_api::HalApi,
9    id,
10    lock::Mutex,
11    pipeline::{ComputePipeline, RenderPipeline},
12    resource::{
13        self, Buffer, DestroyedBuffer, DestroyedTexture, QuerySet, Resource, Sampler,
14        StagingBuffer, Texture, TextureView,
15    },
16    snatch::SnatchGuard,
17    track::{ResourceTracker, Tracker, TrackerIndex},
18    FastHashMap, SubmissionIndex,
19};
20use smallvec::SmallVec;
21
22use std::sync::Arc;
23use thiserror::Error;
24
25/// A struct that keeps lists of resources that are no longer needed by the user.
26pub(crate) struct ResourceMaps<A: HalApi> {
27    pub buffers: FastHashMap<TrackerIndex, Arc<Buffer<A>>>,
28    pub staging_buffers: FastHashMap<TrackerIndex, Arc<StagingBuffer<A>>>,
29    pub textures: FastHashMap<TrackerIndex, Arc<Texture<A>>>,
30    pub texture_views: FastHashMap<TrackerIndex, Arc<TextureView<A>>>,
31    pub samplers: FastHashMap<TrackerIndex, Arc<Sampler<A>>>,
32    pub bind_groups: FastHashMap<TrackerIndex, Arc<BindGroup<A>>>,
33    pub bind_group_layouts: FastHashMap<TrackerIndex, Arc<BindGroupLayout<A>>>,
34    pub render_pipelines: FastHashMap<TrackerIndex, Arc<RenderPipeline<A>>>,
35    pub compute_pipelines: FastHashMap<TrackerIndex, Arc<ComputePipeline<A>>>,
36    pub pipeline_layouts: FastHashMap<TrackerIndex, Arc<PipelineLayout<A>>>,
37    pub render_bundles: FastHashMap<TrackerIndex, Arc<RenderBundle<A>>>,
38    pub query_sets: FastHashMap<TrackerIndex, Arc<QuerySet<A>>>,
39    pub destroyed_buffers: FastHashMap<TrackerIndex, Arc<DestroyedBuffer<A>>>,
40    pub destroyed_textures: FastHashMap<TrackerIndex, Arc<DestroyedTexture<A>>>,
41}
42
43impl<A: HalApi> ResourceMaps<A> {
44    pub(crate) fn new() -> Self {
45        ResourceMaps {
46            buffers: FastHashMap::default(),
47            staging_buffers: FastHashMap::default(),
48            textures: FastHashMap::default(),
49            texture_views: FastHashMap::default(),
50            samplers: FastHashMap::default(),
51            bind_groups: FastHashMap::default(),
52            bind_group_layouts: FastHashMap::default(),
53            render_pipelines: FastHashMap::default(),
54            compute_pipelines: FastHashMap::default(),
55            pipeline_layouts: FastHashMap::default(),
56            render_bundles: FastHashMap::default(),
57            query_sets: FastHashMap::default(),
58            destroyed_buffers: FastHashMap::default(),
59            destroyed_textures: FastHashMap::default(),
60        }
61    }
62
63    pub(crate) fn clear(&mut self) {
64        let ResourceMaps {
65            buffers,
66            staging_buffers,
67            textures,
68            texture_views,
69            samplers,
70            bind_groups,
71            bind_group_layouts,
72            render_pipelines,
73            compute_pipelines,
74            pipeline_layouts,
75            render_bundles,
76            query_sets,
77            destroyed_buffers,
78            destroyed_textures,
79        } = self;
80        buffers.clear();
81        staging_buffers.clear();
82        textures.clear();
83        texture_views.clear();
84        samplers.clear();
85        bind_groups.clear();
86        bind_group_layouts.clear();
87        render_pipelines.clear();
88        compute_pipelines.clear();
89        pipeline_layouts.clear();
90        render_bundles.clear();
91        query_sets.clear();
92        destroyed_buffers.clear();
93        destroyed_textures.clear();
94    }
95
96    pub(crate) fn extend(&mut self, mut other: Self) {
97        let ResourceMaps {
98            buffers,
99            staging_buffers,
100            textures,
101            texture_views,
102            samplers,
103            bind_groups,
104            bind_group_layouts,
105            render_pipelines,
106            compute_pipelines,
107            pipeline_layouts,
108            render_bundles,
109            query_sets,
110            destroyed_buffers,
111            destroyed_textures,
112        } = self;
113        buffers.extend(other.buffers.drain());
114        staging_buffers.extend(other.staging_buffers.drain());
115        textures.extend(other.textures.drain());
116        texture_views.extend(other.texture_views.drain());
117        samplers.extend(other.samplers.drain());
118        bind_groups.extend(other.bind_groups.drain());
119        bind_group_layouts.extend(other.bind_group_layouts.drain());
120        render_pipelines.extend(other.render_pipelines.drain());
121        compute_pipelines.extend(other.compute_pipelines.drain());
122        pipeline_layouts.extend(other.pipeline_layouts.drain());
123        render_bundles.extend(other.render_bundles.drain());
124        query_sets.extend(other.query_sets.drain());
125        destroyed_buffers.extend(other.destroyed_buffers.drain());
126        destroyed_textures.extend(other.destroyed_textures.drain());
127    }
128}
129
130/// A command submitted to the GPU for execution.
131///
132/// ## Keeping resources alive while the GPU is using them
133///
134/// [`wgpu_hal`] requires that, when a command is submitted to a queue, all the
135/// resources it uses must remain alive until it has finished executing.
136///
137/// The natural way to satisfy this would be for `ActiveSubmission` to hold
138/// strong references to all the resources used by its commands. However, that
139/// would entail dropping those strong references every time a queue submission
140/// finishes, adjusting the reference counts of all the resources it used. This
141/// is usually needless work: it's rare for the active submission queue to be
142/// the final reference to an object. Usually the user is still holding on to
143/// it.
144///
145/// To avoid this, an `ActiveSubmission` does not initially hold any strong
146/// references to its commands' resources. Instead, each resource tracks the
147/// most recent submission index at which it has been used in
148/// [`ResourceInfo::submission_index`]. When the user drops a resource, if the
149/// submission in which it was last used is still present in the device's queue,
150/// we add the resource to [`ActiveSubmission::last_resources`]. Finally, when
151/// this `ActiveSubmission` is dequeued and dropped in
152/// [`LifetimeTracker::triage_submissions`], we drop `last_resources` along with
153/// it. Thus, unless a resource is dropped by the user, it doesn't need to be
154/// touched at all when processing completed work.
155///
156/// However, it's not clear that this is effective. See [#5560].
157///
158/// [`wgpu_hal`]: hal
159/// [`ResourceInfo::submission_index`]: crate::resource::ResourceInfo
160/// [#5560]: https://github.com/gfx-rs/wgpu/issues/5560
161struct ActiveSubmission<A: HalApi> {
162    /// The index of the submission we track.
163    ///
164    /// When `Device::fence`'s value is greater than or equal to this, our queue
165    /// submission has completed.
166    index: SubmissionIndex,
167
168    /// Resources to be freed once this queue submission has completed.
169    ///
170    /// When the device is polled, for completed submissions,
171    /// `triage_submissions` removes resources that don't need to be held alive any longer
172    /// from there.
173    ///
174    /// This includes things like temporary resources and resources that are
175    /// used by submitted commands but have been dropped by the user (meaning that
176    /// this submission is their last reference.)
177    last_resources: ResourceMaps<A>,
178
179    /// Buffers to be mapped once this submission has completed.
180    mapped: Vec<Arc<Buffer<A>>>,
181
182    /// Command buffers used by this submission, and the encoder that owns them.
183    ///
184    /// [`wgpu_hal::Queue::submit`] requires the submitted command buffers to
185    /// remain alive until the submission has completed execution. Command
186    /// encoders double as allocation pools for command buffers, so holding them
187    /// here and cleaning them up in [`LifetimeTracker::triage_submissions`]
188    /// satisfies that requirement.
189    ///
190    /// Once this submission has completed, the command buffers are reset and
191    /// the command encoder is recycled.
192    ///
193    /// [`wgpu_hal::Queue::submit`]: hal::Queue::submit
194    encoders: Vec<EncoderInFlight<A>>,
195
196    /// List of queue "on_submitted_work_done" closures to be called once this
197    /// submission has completed.
198    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
199}
200
201#[derive(Clone, Debug, Error)]
202#[non_exhaustive]
203pub enum WaitIdleError {
204    #[error(transparent)]
205    Device(#[from] DeviceError),
206    #[error("Tried to wait using a submission index from the wrong device. Submission index is from device {0:?}. Called poll on device {1:?}.")]
207    WrongSubmissionIndex(id::QueueId, id::DeviceId),
208    #[error("GPU got stuck :(")]
209    StuckGpu,
210}
211
212/// Resource tracking for a device.
213///
214/// ## Host mapping buffers
215///
216/// A buffer cannot be mapped until all active queue submissions that use it
217/// have completed. To that end:
218///
219/// -   Each buffer's `ResourceInfo::submission_index` records the index of the
220///     most recent queue submission that uses that buffer.
221///
222/// -   Calling `Global::buffer_map_async` adds the buffer to
223///     `self.mapped`, and changes `Buffer::map_state` to prevent it
224///     from being used in any new submissions.
225///
226/// -   When the device is polled, the following `LifetimeTracker` methods decide
227///     what should happen next:
228///
229///     1)  `triage_mapped` drains `self.mapped`, checking the submission index
230///         of each buffer against the queue submissions that have finished
231///         execution. Buffers used by submissions still in flight go in
232///         `self.active[index].mapped`, and the rest go into
233///         `self.ready_to_map`.
234///
235///     2)  `triage_submissions` moves entries in `self.active[i]` for completed
236///         submissions to `self.ready_to_map`.  At this point, both
237///         `self.active` and `self.ready_to_map` are up to date with the given
238///         submission index.
239///
240///     3)  `handle_mapping` drains `self.ready_to_map` and actually maps the
241///         buffers, collecting a list of notification closures to call. But any
242///         buffers that were dropped by the user get moved to
243///         `self.free_resources`.
244///
245/// Only calling `Global::buffer_map_async` clones a new `Arc` for the
246/// buffer. This new `Arc` is only dropped by `handle_mapping`.
247pub(crate) struct LifetimeTracker<A: HalApi> {
248    /// Resources that the user has requested be mapped, but which are used by
249    /// queue submissions still in flight.
250    mapped: Vec<Arc<Buffer<A>>>,
251
252    /// Buffers can be used in a submission that is yet to be made, by the
253    /// means of `write_buffer()`, so we have a special place for them.
254    pub future_suspected_buffers: Vec<Arc<Buffer<A>>>,
255
256    /// Textures can be used in the upcoming submission by `write_texture`.
257    pub future_suspected_textures: Vec<Arc<Texture<A>>>,
258
259    /// Resources whose user handle has died (i.e. drop/destroy has been called)
260    /// and will likely be ready for destruction soon.
261    pub suspected_resources: ResourceMaps<A>,
262
263    /// Resources used by queue submissions still in flight. One entry per
264    /// submission, with older submissions appearing before younger.
265    ///
266    /// Entries are added by `track_submission` and drained by
267    /// `LifetimeTracker::triage_submissions`. Lots of methods contribute data
268    /// to particular entries.
269    active: Vec<ActiveSubmission<A>>,
270
271    /// Buffers the user has asked us to map, and which are not used by any
272    /// queue submission still in flight.
273    ready_to_map: Vec<Arc<Buffer<A>>>,
274
275    /// Queue "on_submitted_work_done" closures that were initiated for while there is no
276    /// currently pending submissions. These cannot be immediately invoked as they
277    /// must happen _after_ all mapped buffer callbacks are mapped, so we defer them
278    /// here until the next time the device is maintained.
279    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
280
281    /// Closure to be called on "lose the device". This is invoked directly by
282    /// device.lose or by the UserCallbacks returned from maintain when the device
283    /// has been destroyed and its queues are empty.
284    pub device_lost_closure: Option<DeviceLostClosure>,
285}
286
287impl<A: HalApi> LifetimeTracker<A> {
288    pub fn new() -> Self {
289        Self {
290            mapped: Vec::new(),
291            future_suspected_buffers: Vec::new(),
292            future_suspected_textures: Vec::new(),
293            suspected_resources: ResourceMaps::new(),
294            active: Vec::new(),
295            ready_to_map: Vec::new(),
296            work_done_closures: SmallVec::new(),
297            device_lost_closure: None,
298        }
299    }
300
301    /// Return true if there are no queue submissions still in flight.
302    pub fn queue_empty(&self) -> bool {
303        self.active.is_empty()
304    }
305
306    /// Start tracking resources associated with a new queue submission.
307    pub fn track_submission(
308        &mut self,
309        index: SubmissionIndex,
310        temp_resources: impl Iterator<Item = TempResource<A>>,
311        encoders: Vec<EncoderInFlight<A>>,
312    ) {
313        let mut last_resources = ResourceMaps::new();
314        for res in temp_resources {
315            match res {
316                TempResource::Buffer(raw) => {
317                    last_resources
318                        .buffers
319                        .insert(raw.as_info().tracker_index(), raw);
320                }
321                TempResource::StagingBuffer(raw) => {
322                    last_resources
323                        .staging_buffers
324                        .insert(raw.as_info().tracker_index(), raw);
325                }
326                TempResource::DestroyedBuffer(destroyed) => {
327                    last_resources
328                        .destroyed_buffers
329                        .insert(destroyed.tracker_index, destroyed);
330                }
331                TempResource::Texture(raw) => {
332                    last_resources
333                        .textures
334                        .insert(raw.as_info().tracker_index(), raw);
335                }
336                TempResource::DestroyedTexture(destroyed) => {
337                    last_resources
338                        .destroyed_textures
339                        .insert(destroyed.tracker_index, destroyed);
340                }
341            }
342        }
343
344        self.active.push(ActiveSubmission {
345            index,
346            last_resources,
347            mapped: Vec::new(),
348            encoders,
349            work_done_closures: SmallVec::new(),
350        });
351    }
352
353    pub fn post_submit(&mut self) {
354        for v in self.future_suspected_buffers.drain(..) {
355            self.suspected_resources
356                .buffers
357                .insert(v.as_info().tracker_index(), v);
358        }
359        for v in self.future_suspected_textures.drain(..) {
360            self.suspected_resources
361                .textures
362                .insert(v.as_info().tracker_index(), v);
363        }
364    }
365
366    pub(crate) fn map(&mut self, value: &Arc<Buffer<A>>) {
367        self.mapped.push(value.clone());
368    }
369
370    /// Sort out the consequences of completed submissions.
371    ///
372    /// Assume that all submissions up through `last_done` have completed.
373    ///
374    /// -   Buffers used by those submissions are now ready to map, if requested.
375    ///     Add any buffers in the submission's [`mapped`] list to
376    ///     [`self.ready_to_map`], where [`LifetimeTracker::handle_mapping`]
377    ///     will find them.
378    ///
379    /// -   Resources whose final use was in those submissions are now ready to
380    ///     free. Dropping the submission's [`last_resources`] table does so.
381    ///
382    /// Return a list of [`SubmittedWorkDoneClosure`]s to run.
383    ///
384    /// [`mapped`]: ActiveSubmission::mapped
385    /// [`self.ready_to_map`]: LifetimeTracker::ready_to_map
386    /// [`last_resources`]: ActiveSubmission::last_resources
387    /// [`SubmittedWorkDoneClosure`]: crate::device::queue::SubmittedWorkDoneClosure
388    #[must_use]
389    pub fn triage_submissions(
390        &mut self,
391        last_done: SubmissionIndex,
392        command_allocator: &crate::command::CommandAllocator<A>,
393    ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> {
394        profiling::scope!("triage_submissions");
395
396        //TODO: enable when `is_sorted_by_key` is stable
397        //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
398        let done_count = self
399            .active
400            .iter()
401            .position(|a| a.index > last_done)
402            .unwrap_or(self.active.len());
403
404        let mut work_done_closures: SmallVec<_> = self.work_done_closures.drain(..).collect();
405        for a in self.active.drain(..done_count) {
406            log::debug!("Active submission {} is done", a.index);
407            self.ready_to_map.extend(a.mapped);
408            for encoder in a.encoders {
409                let raw = unsafe { encoder.land() };
410                command_allocator.release_encoder(raw);
411            }
412            work_done_closures.extend(a.work_done_closures);
413        }
414        work_done_closures
415    }
416
417    pub fn schedule_resource_destruction(
418        &mut self,
419        temp_resource: TempResource<A>,
420        last_submit_index: SubmissionIndex,
421    ) {
422        let resources = self
423            .active
424            .iter_mut()
425            .find(|a| a.index == last_submit_index)
426            .map(|a| &mut a.last_resources);
427        if let Some(resources) = resources {
428            match temp_resource {
429                TempResource::Buffer(raw) => {
430                    resources.buffers.insert(raw.as_info().tracker_index(), raw);
431                }
432                TempResource::StagingBuffer(raw) => {
433                    resources
434                        .staging_buffers
435                        .insert(raw.as_info().tracker_index(), raw);
436                }
437                TempResource::DestroyedBuffer(destroyed) => {
438                    resources
439                        .destroyed_buffers
440                        .insert(destroyed.tracker_index, destroyed);
441                }
442                TempResource::Texture(raw) => {
443                    resources
444                        .textures
445                        .insert(raw.as_info().tracker_index(), raw);
446                }
447                TempResource::DestroyedTexture(destroyed) => {
448                    resources
449                        .destroyed_textures
450                        .insert(destroyed.tracker_index, destroyed);
451                }
452            }
453        }
454    }
455
456    pub fn add_work_done_closure(&mut self, closure: SubmittedWorkDoneClosure) {
457        match self.active.last_mut() {
458            Some(active) => {
459                active.work_done_closures.push(closure);
460            }
461            // We must defer the closure until all previously occurring map_async closures
462            // have fired. This is required by the spec.
463            None => {
464                self.work_done_closures.push(closure);
465            }
466        }
467    }
468}
469
470impl<A: HalApi> LifetimeTracker<A> {
471    fn triage_resources<R>(
472        resources_map: &mut FastHashMap<TrackerIndex, Arc<R>>,
473        active: &mut [ActiveSubmission<A>],
474        trackers: &mut impl ResourceTracker,
475        get_resource_map: impl Fn(&mut ResourceMaps<A>) -> &mut FastHashMap<TrackerIndex, Arc<R>>,
476    ) -> Vec<Arc<R>>
477    where
478        R: Resource,
479    {
480        let mut removed_resources = Vec::new();
481        resources_map.retain(|&index, resource| {
482            let submit_index = resource.as_info().submission_index();
483            let non_referenced_resources = active
484                .iter_mut()
485                .find(|a| a.index == submit_index)
486                .map(|a| &mut a.last_resources);
487
488            let is_removed = trackers.remove_abandoned(index);
489            if is_removed {
490                removed_resources.push(resource.clone());
491                if let Some(resources) = non_referenced_resources {
492                    get_resource_map(resources).insert(index, resource.clone());
493                }
494            }
495            !is_removed
496        });
497        removed_resources
498    }
499
500    fn triage_suspected_render_bundles(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
501        let mut trackers = trackers.lock();
502        let resource_map = &mut self.suspected_resources.render_bundles;
503        let mut removed_resources = Self::triage_resources(
504            resource_map,
505            self.active.as_mut_slice(),
506            &mut trackers.bundles,
507            |maps| &mut maps.render_bundles,
508        );
509        removed_resources.drain(..).for_each(|bundle| {
510            for v in bundle.used.buffers.write().drain_resources() {
511                self.suspected_resources
512                    .buffers
513                    .insert(v.as_info().tracker_index(), v);
514            }
515            for v in bundle.used.textures.write().drain_resources() {
516                self.suspected_resources
517                    .textures
518                    .insert(v.as_info().tracker_index(), v);
519            }
520            for v in bundle.used.bind_groups.write().drain_resources() {
521                self.suspected_resources
522                    .bind_groups
523                    .insert(v.as_info().tracker_index(), v);
524            }
525            for v in bundle.used.render_pipelines.write().drain_resources() {
526                self.suspected_resources
527                    .render_pipelines
528                    .insert(v.as_info().tracker_index(), v);
529            }
530            for v in bundle.used.query_sets.write().drain_resources() {
531                self.suspected_resources
532                    .query_sets
533                    .insert(v.as_info().tracker_index(), v);
534            }
535        });
536        self
537    }
538
539    fn triage_suspected_bind_groups(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
540        let mut trackers = trackers.lock();
541        let resource_map = &mut self.suspected_resources.bind_groups;
542        let mut removed_resource = Self::triage_resources(
543            resource_map,
544            self.active.as_mut_slice(),
545            &mut trackers.bind_groups,
546            |maps| &mut maps.bind_groups,
547        );
548        removed_resource.drain(..).for_each(|bind_group| {
549            for v in bind_group.used.buffers.drain_resources() {
550                self.suspected_resources
551                    .buffers
552                    .insert(v.as_info().tracker_index(), v);
553            }
554            for v in bind_group.used.textures.drain_resources() {
555                self.suspected_resources
556                    .textures
557                    .insert(v.as_info().tracker_index(), v);
558            }
559            for v in bind_group.used.views.drain_resources() {
560                self.suspected_resources
561                    .texture_views
562                    .insert(v.as_info().tracker_index(), v);
563            }
564            for v in bind_group.used.samplers.drain_resources() {
565                self.suspected_resources
566                    .samplers
567                    .insert(v.as_info().tracker_index(), v);
568            }
569
570            self.suspected_resources.bind_group_layouts.insert(
571                bind_group.layout.as_info().tracker_index(),
572                bind_group.layout.clone(),
573            );
574        });
575        self
576    }
577
578    fn triage_suspected_texture_views(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
579        let mut trackers = trackers.lock();
580        let resource_map = &mut self.suspected_resources.texture_views;
581        Self::triage_resources(
582            resource_map,
583            self.active.as_mut_slice(),
584            &mut trackers.views,
585            |maps| &mut maps.texture_views,
586        );
587        self
588    }
589
590    fn triage_suspected_textures(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
591        let mut trackers = trackers.lock();
592        let resource_map = &mut self.suspected_resources.textures;
593        Self::triage_resources(
594            resource_map,
595            self.active.as_mut_slice(),
596            &mut trackers.textures,
597            |maps| &mut maps.textures,
598        );
599
600        // We may have been suspected because a texture view or bind group
601        // referring to us was dropped. Remove stale weak references, so that
602        // the backlink table doesn't grow without bound.
603        for texture in self.suspected_resources.textures.values() {
604            texture.views.lock().retain(|view| view.strong_count() > 0);
605            texture
606                .bind_groups
607                .lock()
608                .retain(|bg| bg.strong_count() > 0);
609        }
610
611        self
612    }
613
614    fn triage_suspected_samplers(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
615        let mut trackers = trackers.lock();
616        let resource_map = &mut self.suspected_resources.samplers;
617        Self::triage_resources(
618            resource_map,
619            self.active.as_mut_slice(),
620            &mut trackers.samplers,
621            |maps| &mut maps.samplers,
622        );
623        self
624    }
625
626    fn triage_suspected_buffers(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
627        let mut trackers = trackers.lock();
628        let resource_map = &mut self.suspected_resources.buffers;
629        Self::triage_resources(
630            resource_map,
631            self.active.as_mut_slice(),
632            &mut trackers.buffers,
633            |maps| &mut maps.buffers,
634        );
635
636        // We may have been suspected because a bind group referring to us was
637        // dropped. Remove stale weak references, so that the backlink table
638        // doesn't grow without bound.
639        for buffer in self.suspected_resources.buffers.values() {
640            buffer.bind_groups.lock().retain(|bg| bg.strong_count() > 0);
641        }
642
643        self
644    }
645
646    fn triage_suspected_destroyed_buffers(&mut self) {
647        for (id, buffer) in self.suspected_resources.destroyed_buffers.drain() {
648            let submit_index = buffer.submission_index;
649            if let Some(resources) = self.active.iter_mut().find(|a| a.index == submit_index) {
650                resources
651                    .last_resources
652                    .destroyed_buffers
653                    .insert(id, buffer);
654            }
655        }
656    }
657
658    fn triage_suspected_destroyed_textures(&mut self) {
659        for (id, texture) in self.suspected_resources.destroyed_textures.drain() {
660            let submit_index = texture.submission_index;
661            if let Some(resources) = self.active.iter_mut().find(|a| a.index == submit_index) {
662                resources
663                    .last_resources
664                    .destroyed_textures
665                    .insert(id, texture);
666            }
667        }
668    }
669
670    fn triage_suspected_compute_pipelines(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
671        let mut trackers = trackers.lock();
672        let resource_map = &mut self.suspected_resources.compute_pipelines;
673        let mut removed_resources = Self::triage_resources(
674            resource_map,
675            self.active.as_mut_slice(),
676            &mut trackers.compute_pipelines,
677            |maps| &mut maps.compute_pipelines,
678        );
679        removed_resources.drain(..).for_each(|compute_pipeline| {
680            self.suspected_resources.pipeline_layouts.insert(
681                compute_pipeline.layout.as_info().tracker_index(),
682                compute_pipeline.layout.clone(),
683            );
684        });
685        self
686    }
687
688    fn triage_suspected_render_pipelines(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
689        let mut trackers = trackers.lock();
690        let resource_map = &mut self.suspected_resources.render_pipelines;
691        let mut removed_resources = Self::triage_resources(
692            resource_map,
693            self.active.as_mut_slice(),
694            &mut trackers.render_pipelines,
695            |maps| &mut maps.render_pipelines,
696        );
697        removed_resources.drain(..).for_each(|render_pipeline| {
698            self.suspected_resources.pipeline_layouts.insert(
699                render_pipeline.layout.as_info().tracker_index(),
700                render_pipeline.layout.clone(),
701            );
702        });
703        self
704    }
705
706    fn triage_suspected_pipeline_layouts(&mut self) -> &mut Self {
707        let mut removed_resources = Vec::new();
708        self.suspected_resources
709            .pipeline_layouts
710            .retain(|_pipeline_layout_id, pipeline_layout| {
711                removed_resources.push(pipeline_layout.clone());
712                false
713            });
714        removed_resources.drain(..).for_each(|pipeline_layout| {
715            for bgl in &pipeline_layout.bind_group_layouts {
716                self.suspected_resources
717                    .bind_group_layouts
718                    .insert(bgl.as_info().tracker_index(), bgl.clone());
719            }
720        });
721        self
722    }
723
724    fn triage_suspected_bind_group_layouts(&mut self) -> &mut Self {
725        //Note: this has to happen after all the suspected pipelines are destroyed
726        //Note: nothing else can bump the refcount since the guard is locked exclusively
727        //Note: same BGL can appear multiple times in the list, but only the last
728        self.suspected_resources.bind_group_layouts.clear();
729
730        self
731    }
732
733    fn triage_suspected_query_sets(&mut self, trackers: &Mutex<Tracker<A>>) -> &mut Self {
734        let mut trackers = trackers.lock();
735        let resource_map = &mut self.suspected_resources.query_sets;
736        Self::triage_resources(
737            resource_map,
738            self.active.as_mut_slice(),
739            &mut trackers.query_sets,
740            |maps| &mut maps.query_sets,
741        );
742        self
743    }
744
745    fn triage_suspected_staging_buffers(&mut self) -> &mut Self {
746        self.suspected_resources.staging_buffers.clear();
747
748        self
749    }
750
751    /// Identify resources to free, according to `trackers` and `self.suspected_resources`.
752    ///
753    /// Remove from `trackers`, the [`Tracker`] belonging to same [`Device`] as
754    /// `self`, each resource mentioned in [`self.suspected_resources`]. If
755    /// `trackers` held the final reference to that resource, add it to the
756    /// appropriate free list, to be destroyed by the hal:
757    ///
758    /// -   Add resources used by queue submissions still in flight to the
759    ///     [`last_resources`] table of the last such submission's entry in
760    ///     [`self.active`]. When that submission has finished execution. the
761    ///     [`triage_submissions`] method will remove from the tracker and the
762    ///     resource reference count will be responsible carrying out deallocation.
763    ///
764    /// ## Entrained resources
765    ///
766    /// This function finds resources that are used only by other resources
767    /// ready to be freed, and adds those to the free lists as well. For
768    /// example, if there's some texture `T` used only by some texture view
769    /// `TV`, then if `TV` can be freed, `T` gets added to the free lists too.
770    ///
771    /// Since `wgpu-core` resource ownership patterns are acyclic, we can visit
772    /// each type that can be owned after all types that could possibly own
773    /// it. This way, we can detect all free-able objects in a single pass,
774    /// simply by starting with types that are roots of the ownership DAG (like
775    /// render bundles) and working our way towards leaf types (like buffers).
776    ///
777    /// [`Device`]: super::Device
778    /// [`self.suspected_resources`]: LifetimeTracker::suspected_resources
779    /// [`last_resources`]: ActiveSubmission::last_resources
780    /// [`self.active`]: LifetimeTracker::active
781    /// [`triage_submissions`]: LifetimeTracker::triage_submissions
782    pub(crate) fn triage_suspected(&mut self, trackers: &Mutex<Tracker<A>>) {
783        profiling::scope!("triage_suspected");
784
785        //NOTE: the order is important to release resources that depends between each other!
786        self.triage_suspected_render_bundles(trackers);
787        self.triage_suspected_compute_pipelines(trackers);
788        self.triage_suspected_render_pipelines(trackers);
789        self.triage_suspected_bind_groups(trackers);
790        self.triage_suspected_pipeline_layouts();
791        self.triage_suspected_bind_group_layouts();
792        self.triage_suspected_query_sets(trackers);
793        self.triage_suspected_samplers(trackers);
794        self.triage_suspected_staging_buffers();
795        self.triage_suspected_texture_views(trackers);
796        self.triage_suspected_textures(trackers);
797        self.triage_suspected_buffers(trackers);
798        self.triage_suspected_destroyed_buffers();
799        self.triage_suspected_destroyed_textures();
800    }
801
802    /// Determine which buffers are ready to map, and which must wait for the
803    /// GPU.
804    ///
805    /// See the documentation for [`LifetimeTracker`] for details.
806    pub(crate) fn triage_mapped(&mut self) {
807        if self.mapped.is_empty() {
808            return;
809        }
810
811        for buffer in self.mapped.drain(..) {
812            let submit_index = buffer.info.submission_index();
813            log::trace!(
814                "Mapping of {:?} at submission {:?} gets assigned to active {:?}",
815                buffer.info.id(),
816                submit_index,
817                self.active.iter().position(|a| a.index == submit_index)
818            );
819
820            self.active
821                .iter_mut()
822                .find(|a| a.index == submit_index)
823                .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
824                .push(buffer);
825        }
826    }
827
828    /// Map the buffers in `self.ready_to_map`.
829    ///
830    /// Return a list of mapping notifications to send.
831    ///
832    /// See the documentation for [`LifetimeTracker`] for details.
833    #[must_use]
834    pub(crate) fn handle_mapping(
835        &mut self,
836        raw: &A::Device,
837        trackers: &Mutex<Tracker<A>>,
838        snatch_guard: &SnatchGuard,
839    ) -> Vec<super::BufferMapPendingClosure> {
840        if self.ready_to_map.is_empty() {
841            return Vec::new();
842        }
843        let mut pending_callbacks: Vec<super::BufferMapPendingClosure> =
844            Vec::with_capacity(self.ready_to_map.len());
845
846        for buffer in self.ready_to_map.drain(..) {
847            let tracker_index = buffer.info.tracker_index();
848            let is_removed = {
849                let mut trackers = trackers.lock();
850                trackers.buffers.remove_abandoned(tracker_index)
851            };
852            if is_removed {
853                *buffer.map_state.lock() = resource::BufferMapState::Idle;
854                log::trace!("Buffer ready to map {tracker_index:?} is not tracked anymore");
855            } else {
856                // This _cannot_ be inlined into the match. If it is, the lock will be held
857                // open through the whole match, resulting in a deadlock when we try to re-lock
858                // the buffer back to active.
859                let mapping = std::mem::replace(
860                    &mut *buffer.map_state.lock(),
861                    resource::BufferMapState::Idle,
862                );
863                let pending_mapping = match mapping {
864                    resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
865                    // Mapping cancelled
866                    resource::BufferMapState::Idle => continue,
867                    // Mapping queued at least twice by map -> unmap -> map
868                    // and was already successfully mapped below
869                    resource::BufferMapState::Active { .. } => {
870                        *buffer.map_state.lock() = mapping;
871                        continue;
872                    }
873                    _ => panic!("No pending mapping."),
874                };
875                let status = if pending_mapping.range.start != pending_mapping.range.end {
876                    log::debug!("Buffer {tracker_index:?} map state -> Active");
877                    let host = pending_mapping.op.host;
878                    let size = pending_mapping.range.end - pending_mapping.range.start;
879                    match super::map_buffer(
880                        raw,
881                        &buffer,
882                        pending_mapping.range.start,
883                        size,
884                        host,
885                        snatch_guard,
886                    ) {
887                        Ok(ptr) => {
888                            *buffer.map_state.lock() = resource::BufferMapState::Active {
889                                ptr,
890                                range: pending_mapping.range.start
891                                    ..pending_mapping.range.start + size,
892                                host,
893                            };
894                            Ok(())
895                        }
896                        Err(e) => {
897                            log::error!("Mapping failed: {e}");
898                            Err(e)
899                        }
900                    }
901                } else {
902                    *buffer.map_state.lock() = resource::BufferMapState::Active {
903                        ptr: std::ptr::NonNull::dangling(),
904                        range: pending_mapping.range,
905                        host: pending_mapping.op.host,
906                    };
907                    Ok(())
908                };
909                pending_callbacks.push((pending_mapping.op, status));
910            }
911        }
912        pending_callbacks
913    }
914}