bevy_render/batching/
gpu_preprocessing.rs

1//! Batching functionality when GPU preprocessing is in use.
2
3use bevy_app::{App, Plugin};
4use bevy_derive::{Deref, DerefMut};
5use bevy_ecs::{
6    entity::Entity,
7    query::{Has, With},
8    schedule::IntoSystemConfigs as _,
9    system::{Query, Res, ResMut, Resource, StaticSystemParam},
10    world::{FromWorld, World},
11};
12use bevy_encase_derive::ShaderType;
13use bevy_utils::EntityHashMap;
14use bytemuck::{Pod, Zeroable};
15use nonmax::NonMaxU32;
16use smallvec::smallvec;
17use wgpu::{BindingResource, BufferUsages, DownlevelFlags, Features};
18
19use crate::{
20    render_phase::{
21        BinnedPhaseItem, BinnedRenderPhaseBatch, CachedRenderPipelinePhaseItem,
22        PhaseItemExtraIndex, SortedPhaseItem, SortedRenderPhase, UnbatchableBinnedEntityIndices,
23        ViewBinnedRenderPhases, ViewSortedRenderPhases,
24    },
25    render_resource::{BufferVec, GpuArrayBufferable, RawBufferVec, UninitBufferVec},
26    renderer::{RenderAdapter, RenderDevice, RenderQueue},
27    view::{GpuCulling, ViewTarget},
28    Render, RenderApp, RenderSet,
29};
30
31use super::{BatchMeta, GetBatchData, GetFullBatchData};
32
33pub struct BatchingPlugin;
34
35impl Plugin for BatchingPlugin {
36    fn build(&self, app: &mut App) {
37        let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
38            return;
39        };
40
41        render_app
42            .insert_resource(IndirectParametersBuffer::new())
43            .add_systems(
44                Render,
45                write_indirect_parameters_buffer.in_set(RenderSet::PrepareResourcesFlush),
46            );
47    }
48
49    fn finish(&self, app: &mut App) {
50        let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
51            return;
52        };
53
54        render_app.init_resource::<GpuPreprocessingSupport>();
55    }
56}
57
58/// Records whether GPU preprocessing and/or GPU culling are supported on the
59/// device.
60///
61/// No GPU preprocessing is supported on WebGL because of the lack of compute
62/// shader support.  GPU preprocessing is supported on DirectX 12, but due to [a
63/// `wgpu` limitation] GPU culling is not.
64///
65/// [a `wgpu` limitation]: https://github.com/gfx-rs/wgpu/issues/2471
66#[derive(Clone, Copy, PartialEq, Resource)]
67pub enum GpuPreprocessingSupport {
68    /// No GPU preprocessing support is available at all.
69    None,
70    /// GPU preprocessing is available, but GPU culling isn't.
71    PreprocessingOnly,
72    /// Both GPU preprocessing and GPU culling are available.
73    Culling,
74}
75
76/// The GPU buffers holding the data needed to render batches.
77///
78/// For example, in the 3D PBR pipeline this holds `MeshUniform`s, which are the
79/// `BD` type parameter in that mode.
80///
81/// We have a separate *buffer data input* type (`BDI`) here, which a compute
82/// shader is expected to expand to the full buffer data (`BD`) type. GPU
83/// uniform building is generally faster and uses less system RAM to VRAM bus
84/// bandwidth, but only implemented for some pipelines (for example, not in the
85/// 2D pipeline at present) and only when compute shader is available.
86#[derive(Resource)]
87pub struct BatchedInstanceBuffers<BD, BDI>
88where
89    BD: GpuArrayBufferable + Sync + Send + 'static,
90    BDI: Pod,
91{
92    /// A storage area for the buffer data that the GPU compute shader is
93    /// expected to write to.
94    ///
95    /// There will be one entry for each index.
96    pub data_buffer: UninitBufferVec<BD>,
97
98    /// The index of the buffer data in the current input buffer that
99    /// corresponds to each instance.
100    ///
101    /// This is keyed off each view. Each view has a separate buffer.
102    pub work_item_buffers: EntityHashMap<Entity, PreprocessWorkItemBuffer>,
103
104    /// The uniform data inputs for the current frame.
105    ///
106    /// These are uploaded during the extraction phase.
107    pub current_input_buffer: RawBufferVec<BDI>,
108
109    /// The uniform data inputs for the previous frame.
110    ///
111    /// The indices don't generally line up between `current_input_buffer`
112    /// and `previous_input_buffer`, because, among other reasons, entities
113    /// can spawn or despawn between frames. Instead, each current buffer
114    /// data input uniform is expected to contain the index of the
115    /// corresponding buffer data input uniform in this list.
116    pub previous_input_buffer: RawBufferVec<BDI>,
117}
118
119/// The buffer of GPU preprocessing work items for a single view.
120pub struct PreprocessWorkItemBuffer {
121    /// The buffer of work items.
122    pub buffer: BufferVec<PreprocessWorkItem>,
123    /// True if we're using GPU culling.
124    pub gpu_culling: bool,
125}
126
127/// One invocation of the preprocessing shader: i.e. one mesh instance in a
128/// view.
129#[derive(Clone, Copy, Pod, Zeroable, ShaderType)]
130#[repr(C)]
131pub struct PreprocessWorkItem {
132    /// The index of the batch input data in the input buffer that the shader
133    /// reads from.
134    pub input_index: u32,
135    /// In direct mode, this is the index of the `MeshUniform` in the output
136    /// buffer that we write to. In indirect mode, this is the index of the
137    /// [`IndirectParameters`].
138    pub output_index: u32,
139}
140
141/// The `wgpu` indirect parameters structure.
142///
143/// This is actually a union of the two following structures:
144///
145/// ```
146/// #[repr(C)]
147/// struct ArrayIndirectParameters {
148///     vertex_count: u32,
149///     instance_count: u32,
150///     first_vertex: u32,
151///     first_instance: u32,
152/// }
153///
154/// #[repr(C)]
155/// struct ElementIndirectParameters {
156///     index_count: u32,
157///     instance_count: u32,
158///     first_vertex: u32,
159///     base_vertex: u32,
160///     first_instance: u32,
161/// }
162/// ```
163///
164/// We actually generally treat these two variants identically in code. To do
165/// that, we make the following two observations:
166///
167/// 1. `instance_count` is in the same place in both structures. So we can
168/// access it regardless of the structure we're looking at.
169///
170/// 2. The second structure is one word larger than the first. Thus we need to
171/// pad out the first structure by one word in order to place both structures in
172/// an array. If we pad out `ArrayIndirectParameters` by copying the
173/// `first_instance` field into the padding, then the resulting union structure
174/// will always have a read-only copy of `first_instance` in the final word. We
175/// take advantage of this in the shader to reduce branching.
176#[derive(Clone, Copy, Pod, Zeroable, ShaderType)]
177#[repr(C)]
178pub struct IndirectParameters {
179    /// For `ArrayIndirectParameters`, `vertex_count`; for
180    /// `ElementIndirectParameters`, `index_count`.
181    pub vertex_or_index_count: u32,
182
183    /// The number of instances we're going to draw.
184    ///
185    /// This field is in the same place in both structures.
186    pub instance_count: u32,
187
188    /// The index of the first vertex we're to draw.
189    pub first_vertex: u32,
190
191    /// For `ArrayIndirectParameters`, `first_instance`; for
192    /// `ElementIndirectParameters`, `base_vertex`.
193    pub base_vertex_or_first_instance: u32,
194
195    /// For `ArrayIndirectParameters`, this is padding; for
196    /// `ElementIndirectParameters`, this is `first_instance`.
197    ///
198    /// Conventionally, we copy `first_instance` into this field when padding
199    /// out `ArrayIndirectParameters`. That way, shader code can read this value
200    /// at the same place, regardless of the specific structure this represents.
201    pub first_instance: u32,
202}
203
204/// The buffer containing the list of [`IndirectParameters`], for draw commands.
205#[derive(Resource, Deref, DerefMut)]
206pub struct IndirectParametersBuffer(pub BufferVec<IndirectParameters>);
207
208impl IndirectParametersBuffer {
209    /// Creates the indirect parameters buffer.
210    pub fn new() -> IndirectParametersBuffer {
211        IndirectParametersBuffer(BufferVec::new(
212            BufferUsages::STORAGE | BufferUsages::INDIRECT,
213        ))
214    }
215}
216
217impl Default for IndirectParametersBuffer {
218    fn default() -> Self {
219        Self::new()
220    }
221}
222
223impl FromWorld for GpuPreprocessingSupport {
224    fn from_world(world: &mut World) -> Self {
225        let adapter = world.resource::<RenderAdapter>();
226        let device = world.resource::<RenderDevice>();
227
228        if device.limits().max_compute_workgroup_size_x == 0 ||
229            // filter some Qualcomm devices on Android as they crash when using GPU preprocessing
230            (cfg!(target_os = "android") && {
231                let name = adapter.get_info().name;
232                // filter out Adreno 730 and earlier GPUs (except 720, it's newer than 730)
233                name.strip_prefix("Adreno (TM) ").is_some_and(|version|
234                    version != "720" && version.parse::<u16>().is_ok_and(|version| version <= 730)
235                )
236            })
237        {
238            GpuPreprocessingSupport::None
239        } else if !device
240            .features()
241            .contains(Features::INDIRECT_FIRST_INSTANCE) ||
242            !adapter.get_downlevel_capabilities().flags.contains(
243        DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW)
244        {
245            GpuPreprocessingSupport::PreprocessingOnly
246        } else {
247            GpuPreprocessingSupport::Culling
248        }
249    }
250}
251
252impl<BD, BDI> BatchedInstanceBuffers<BD, BDI>
253where
254    BD: GpuArrayBufferable + Sync + Send + 'static,
255    BDI: Pod,
256{
257    /// Creates new buffers.
258    pub fn new() -> Self {
259        BatchedInstanceBuffers {
260            data_buffer: UninitBufferVec::new(BufferUsages::STORAGE),
261            work_item_buffers: EntityHashMap::default(),
262            current_input_buffer: RawBufferVec::new(BufferUsages::STORAGE),
263            previous_input_buffer: RawBufferVec::new(BufferUsages::STORAGE),
264        }
265    }
266
267    /// Returns the binding of the buffer that contains the per-instance data.
268    ///
269    /// This buffer needs to be filled in via a compute shader.
270    pub fn instance_data_binding(&self) -> Option<BindingResource> {
271        self.data_buffer
272            .buffer()
273            .map(|buffer| buffer.as_entire_binding())
274    }
275
276    /// Clears out the buffers in preparation for a new frame.
277    pub fn clear(&mut self) {
278        self.data_buffer.clear();
279        self.current_input_buffer.clear();
280        self.previous_input_buffer.clear();
281        for work_item_buffer in self.work_item_buffers.values_mut() {
282            work_item_buffer.buffer.clear();
283        }
284    }
285}
286
287impl<BD, BDI> Default for BatchedInstanceBuffers<BD, BDI>
288where
289    BD: GpuArrayBufferable + Sync + Send + 'static,
290    BDI: Pod,
291{
292    fn default() -> Self {
293        Self::new()
294    }
295}
296
297/// Information about a render batch that we're building up during a sorted
298/// render phase.
299struct SortedRenderBatch<F>
300where
301    F: GetBatchData,
302{
303    /// The index of the first phase item in this batch in the list of phase
304    /// items.
305    phase_item_start_index: u32,
306
307    /// The index of the first instance in this batch in the instance buffer.
308    instance_start_index: u32,
309
310    /// The index of the indirect parameters for this batch in the
311    /// [`IndirectParametersBuffer`].
312    ///
313    /// If CPU culling is being used, then this will be `None`.
314    indirect_parameters_index: Option<NonMaxU32>,
315
316    /// Metadata that can be used to determine whether an instance can be placed
317    /// into this batch.
318    ///
319    /// If `None`, the item inside is unbatchable.
320    meta: Option<BatchMeta<F::CompareData>>,
321}
322
323impl<F> SortedRenderBatch<F>
324where
325    F: GetBatchData,
326{
327    /// Finalizes this batch and updates the [`SortedRenderPhase`] with the
328    /// appropriate indices.
329    ///
330    /// `instance_end_index` is the index of the last instance in this batch
331    /// plus one.
332    fn flush<I>(self, instance_end_index: u32, phase: &mut SortedRenderPhase<I>)
333    where
334        I: CachedRenderPipelinePhaseItem + SortedPhaseItem,
335    {
336        let (batch_range, batch_extra_index) =
337            phase.items[self.phase_item_start_index as usize].batch_range_and_extra_index_mut();
338        *batch_range = self.instance_start_index..instance_end_index;
339        *batch_extra_index =
340            PhaseItemExtraIndex::maybe_indirect_parameters_index(self.indirect_parameters_index);
341    }
342}
343
344/// A system that runs early in extraction and clears out all the
345/// [`BatchedInstanceBuffers`] for the frame.
346///
347/// We have to run this during extraction because, if GPU preprocessing is in
348/// use, the extraction phase will write to the mesh input uniform buffers
349/// directly, so the buffers need to be cleared before then.
350pub fn clear_batched_gpu_instance_buffers<GFBD>(
351    gpu_batched_instance_buffers: Option<
352        ResMut<BatchedInstanceBuffers<GFBD::BufferData, GFBD::BufferInputData>>,
353    >,
354) where
355    GFBD: GetFullBatchData,
356{
357    if let Some(mut gpu_batched_instance_buffers) = gpu_batched_instance_buffers {
358        gpu_batched_instance_buffers.clear();
359    }
360}
361
362/// A system that removes GPU preprocessing work item buffers that correspond to
363/// deleted [`ViewTarget`]s.
364///
365/// This is a separate system from [`clear_batched_gpu_instance_buffers`]
366/// because [`ViewTarget`]s aren't created until after the extraction phase is
367/// completed.
368pub fn delete_old_work_item_buffers<GFBD>(
369    mut gpu_batched_instance_buffers: ResMut<
370        BatchedInstanceBuffers<GFBD::BufferData, GFBD::BufferInputData>,
371    >,
372    view_targets: Query<Entity, With<ViewTarget>>,
373) where
374    GFBD: GetFullBatchData,
375{
376    gpu_batched_instance_buffers
377        .work_item_buffers
378        .retain(|entity, _| view_targets.contains(*entity));
379}
380
381/// Batch the items in a sorted render phase, when GPU instance buffer building
382/// is in use. This means comparing metadata needed to draw each phase item and
383/// trying to combine the draws into a batch.
384pub fn batch_and_prepare_sorted_render_phase<I, GFBD>(
385    gpu_array_buffer: ResMut<BatchedInstanceBuffers<GFBD::BufferData, GFBD::BufferInputData>>,
386    mut indirect_parameters_buffer: ResMut<IndirectParametersBuffer>,
387    mut sorted_render_phases: ResMut<ViewSortedRenderPhases<I>>,
388    mut views: Query<(Entity, Has<GpuCulling>)>,
389    system_param_item: StaticSystemParam<GFBD::Param>,
390) where
391    I: CachedRenderPipelinePhaseItem + SortedPhaseItem,
392    GFBD: GetFullBatchData,
393{
394    // We only process GPU-built batch data in this function.
395    let BatchedInstanceBuffers {
396        ref mut data_buffer,
397        ref mut work_item_buffers,
398        ..
399    } = gpu_array_buffer.into_inner();
400
401    for (view, gpu_culling) in &mut views {
402        let Some(phase) = sorted_render_phases.get_mut(&view) else {
403            continue;
404        };
405
406        // Create the work item buffer if necessary.
407        let work_item_buffer =
408            work_item_buffers
409                .entry(view)
410                .or_insert_with(|| PreprocessWorkItemBuffer {
411                    buffer: BufferVec::new(BufferUsages::STORAGE),
412                    gpu_culling,
413                });
414
415        // Walk through the list of phase items, building up batches as we go.
416        let mut batch: Option<SortedRenderBatch<GFBD>> = None;
417        for current_index in 0..phase.items.len() {
418            // Get the index of the input data, and comparison metadata, for
419            // this entity.
420            let current_batch_input_index = GFBD::get_index_and_compare_data(
421                &system_param_item,
422                phase.items[current_index].entity(),
423            );
424
425            // Unpack that index and metadata. Note that it's possible for index
426            // and/or metadata to not be present, which signifies that this
427            // entity is unbatchable. In that case, we break the batch here.
428            // If the index isn't present the item is not part of this pipeline and so will be skipped.
429            let Some((current_input_index, current_meta)) = current_batch_input_index else {
430                // Break a batch if we need to.
431                if let Some(batch) = batch.take() {
432                    batch.flush(data_buffer.len() as u32, phase);
433                }
434
435                continue;
436            };
437            let current_meta =
438                current_meta.map(|meta| BatchMeta::new(&phase.items[current_index], meta));
439
440            // Determine if this entity can be included in the batch we're
441            // building up.
442            let can_batch = batch.as_ref().is_some_and(|batch| {
443                // `None` for metadata indicates that the items are unbatchable.
444                match (&current_meta, &batch.meta) {
445                    (Some(current_meta), Some(batch_meta)) => current_meta == batch_meta,
446                    (_, _) => false,
447                }
448            });
449
450            // Make space in the data buffer for this instance.
451            let current_entity = phase.items[current_index].entity();
452            let output_index = data_buffer.add() as u32;
453
454            // If we can't batch, break the existing batch and make a new one.
455            if !can_batch {
456                // Break a batch if we need to.
457                if let Some(batch) = batch.take() {
458                    batch.flush(output_index, phase);
459                }
460
461                // Start a new batch.
462                let indirect_parameters_index = if gpu_culling {
463                    GFBD::get_batch_indirect_parameters_index(
464                        &system_param_item,
465                        &mut indirect_parameters_buffer,
466                        current_entity,
467                        output_index,
468                    )
469                } else {
470                    None
471                };
472                batch = Some(SortedRenderBatch {
473                    phase_item_start_index: current_index as u32,
474                    instance_start_index: output_index,
475                    indirect_parameters_index,
476                    meta: current_meta,
477                });
478            }
479
480            // Add a new preprocessing work item so that the preprocessing
481            // shader will copy the per-instance data over.
482            if let Some(batch) = batch.as_ref() {
483                work_item_buffer.buffer.push(PreprocessWorkItem {
484                    input_index: current_input_index.into(),
485                    output_index: match batch.indirect_parameters_index {
486                        Some(indirect_parameters_index) => indirect_parameters_index.into(),
487                        None => output_index,
488                    },
489                });
490            }
491        }
492
493        // Flush the final batch if necessary.
494        if let Some(batch) = batch.take() {
495            batch.flush(data_buffer.len() as u32, phase);
496        }
497    }
498}
499
500/// Creates batches for a render phase that uses bins.
501pub fn batch_and_prepare_binned_render_phase<BPI, GFBD>(
502    gpu_array_buffer: ResMut<BatchedInstanceBuffers<GFBD::BufferData, GFBD::BufferInputData>>,
503    mut indirect_parameters_buffer: ResMut<IndirectParametersBuffer>,
504    mut binned_render_phases: ResMut<ViewBinnedRenderPhases<BPI>>,
505    mut views: Query<(Entity, Has<GpuCulling>)>,
506    param: StaticSystemParam<GFBD::Param>,
507) where
508    BPI: BinnedPhaseItem,
509    GFBD: GetFullBatchData,
510{
511    let system_param_item = param.into_inner();
512
513    let BatchedInstanceBuffers {
514        ref mut data_buffer,
515        ref mut work_item_buffers,
516        ..
517    } = gpu_array_buffer.into_inner();
518
519    for (view, gpu_culling) in &mut views {
520        let Some(phase) = binned_render_phases.get_mut(&view) else {
521            continue;
522        };
523
524        // Create the work item buffer if necessary; otherwise, just mark it as
525        // used this frame.
526        let work_item_buffer =
527            work_item_buffers
528                .entry(view)
529                .or_insert_with(|| PreprocessWorkItemBuffer {
530                    buffer: BufferVec::new(BufferUsages::STORAGE),
531                    gpu_culling,
532                });
533
534        // Prepare batchables.
535
536        for key in &phase.batchable_mesh_keys {
537            let mut batch: Option<BinnedRenderPhaseBatch> = None;
538            for &entity in &phase.batchable_mesh_values[key] {
539                let Some(input_index) = GFBD::get_binned_index(&system_param_item, entity) else {
540                    continue;
541                };
542                let output_index = data_buffer.add() as u32;
543
544                match batch {
545                    Some(ref mut batch) => {
546                        batch.instance_range.end = output_index + 1;
547                        work_item_buffer.buffer.push(PreprocessWorkItem {
548                            input_index: input_index.into(),
549                            output_index: batch
550                                .extra_index
551                                .as_indirect_parameters_index()
552                                .unwrap_or(output_index),
553                        });
554                    }
555
556                    None if gpu_culling => {
557                        let indirect_parameters_index = GFBD::get_batch_indirect_parameters_index(
558                            &system_param_item,
559                            &mut indirect_parameters_buffer,
560                            entity,
561                            output_index,
562                        );
563                        work_item_buffer.buffer.push(PreprocessWorkItem {
564                            input_index: input_index.into(),
565                            output_index: indirect_parameters_index.unwrap_or_default().into(),
566                        });
567                        batch = Some(BinnedRenderPhaseBatch {
568                            representative_entity: entity,
569                            instance_range: output_index..output_index + 1,
570                            extra_index: PhaseItemExtraIndex::maybe_indirect_parameters_index(
571                                indirect_parameters_index,
572                            ),
573                        });
574                    }
575
576                    None => {
577                        work_item_buffer.buffer.push(PreprocessWorkItem {
578                            input_index: input_index.into(),
579                            output_index,
580                        });
581                        batch = Some(BinnedRenderPhaseBatch {
582                            representative_entity: entity,
583                            instance_range: output_index..output_index + 1,
584                            extra_index: PhaseItemExtraIndex::NONE,
585                        });
586                    }
587                }
588            }
589
590            if let Some(batch) = batch {
591                phase.batch_sets.push(smallvec![batch]);
592            }
593        }
594
595        // Prepare unbatchables.
596        for key in &phase.unbatchable_mesh_keys {
597            let unbatchables = phase.unbatchable_mesh_values.get_mut(key).unwrap();
598            for &entity in &unbatchables.entities {
599                let Some(input_index) = GFBD::get_binned_index(&system_param_item, entity) else {
600                    continue;
601                };
602                let output_index = data_buffer.add() as u32;
603
604                if gpu_culling {
605                    let indirect_parameters_index = GFBD::get_batch_indirect_parameters_index(
606                        &system_param_item,
607                        &mut indirect_parameters_buffer,
608                        entity,
609                        output_index,
610                    )
611                    .unwrap_or_default();
612                    work_item_buffer.buffer.push(PreprocessWorkItem {
613                        input_index: input_index.into(),
614                        output_index: indirect_parameters_index.into(),
615                    });
616                    unbatchables
617                        .buffer_indices
618                        .add(UnbatchableBinnedEntityIndices {
619                            instance_index: indirect_parameters_index.into(),
620                            extra_index: PhaseItemExtraIndex::indirect_parameters_index(
621                                indirect_parameters_index.into(),
622                            ),
623                        });
624                } else {
625                    work_item_buffer.buffer.push(PreprocessWorkItem {
626                        input_index: input_index.into(),
627                        output_index,
628                    });
629                    unbatchables
630                        .buffer_indices
631                        .add(UnbatchableBinnedEntityIndices {
632                            instance_index: output_index,
633                            extra_index: PhaseItemExtraIndex::NONE,
634                        });
635                }
636            }
637        }
638    }
639}
640
641/// A system that writes all instance buffers to the GPU.
642pub fn write_batched_instance_buffers<GFBD>(
643    render_device: Res<RenderDevice>,
644    render_queue: Res<RenderQueue>,
645    gpu_array_buffer: ResMut<BatchedInstanceBuffers<GFBD::BufferData, GFBD::BufferInputData>>,
646) where
647    GFBD: GetFullBatchData,
648{
649    let BatchedInstanceBuffers {
650        ref mut data_buffer,
651        work_item_buffers: ref mut index_buffers,
652        ref mut current_input_buffer,
653        previous_input_buffer: _,
654    } = gpu_array_buffer.into_inner();
655
656    data_buffer.write_buffer(&render_device);
657    current_input_buffer.write_buffer(&render_device, &render_queue);
658    // There's no need to write `previous_input_buffer`, as we wrote
659    // that on the previous frame, and it hasn't changed.
660
661    for index_buffer in index_buffers.values_mut() {
662        index_buffer
663            .buffer
664            .write_buffer(&render_device, &render_queue);
665    }
666}
667
668pub fn write_indirect_parameters_buffer(
669    render_device: Res<RenderDevice>,
670    render_queue: Res<RenderQueue>,
671    mut indirect_parameters_buffer: ResMut<IndirectParametersBuffer>,
672) {
673    indirect_parameters_buffer.write_buffer(&render_device, &render_queue);
674    indirect_parameters_buffer.clear();
675}