glam/f32/sse2/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, sse2::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5#[cfg(not(target_arch = "spirv"))]
6use core::fmt;
7use core::iter::{Product, Sum};
8use core::{f32, ops::*};
9
10#[cfg(target_arch = "x86")]
11use core::arch::x86::*;
12#[cfg(target_arch = "x86_64")]
13use core::arch::x86_64::*;
14
15#[repr(C)]
16union UnionCast {
17    a: [f32; 4],
18    v: Vec4,
19}
20
21/// Creates a 4-dimensional vector.
22#[inline(always)]
23#[must_use]
24pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
25    Vec4::new(x, y, z, w)
26}
27
28/// A 4-dimensional vector.
29///
30/// SIMD vector types are used for storage on supported platforms.
31///
32/// This type is 16 byte aligned.
33#[derive(Clone, Copy)]
34#[repr(transparent)]
35pub struct Vec4(pub(crate) __m128);
36
37impl Vec4 {
38    /// All zeroes.
39    pub const ZERO: Self = Self::splat(0.0);
40
41    /// All ones.
42    pub const ONE: Self = Self::splat(1.0);
43
44    /// All negative ones.
45    pub const NEG_ONE: Self = Self::splat(-1.0);
46
47    /// All `f32::MIN`.
48    pub const MIN: Self = Self::splat(f32::MIN);
49
50    /// All `f32::MAX`.
51    pub const MAX: Self = Self::splat(f32::MAX);
52
53    /// All `f32::NAN`.
54    pub const NAN: Self = Self::splat(f32::NAN);
55
56    /// All `f32::INFINITY`.
57    pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59    /// All `f32::NEG_INFINITY`.
60    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62    /// A unit vector pointing along the positive X axis.
63    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
64
65    /// A unit vector pointing along the positive Y axis.
66    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
67
68    /// A unit vector pointing along the positive Z axis.
69    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
70
71    /// A unit vector pointing along the positive W axis.
72    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
73
74    /// A unit vector pointing along the negative X axis.
75    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
76
77    /// A unit vector pointing along the negative Y axis.
78    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
79
80    /// A unit vector pointing along the negative Z axis.
81    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
82
83    /// A unit vector pointing along the negative W axis.
84    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
85
86    /// The unit axes.
87    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
88
89    /// Creates a new vector.
90    #[inline(always)]
91    #[must_use]
92    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
93        unsafe { UnionCast { a: [x, y, z, w] }.v }
94    }
95
96    /// Creates a vector with all elements set to `v`.
97    #[inline]
98    #[must_use]
99    pub const fn splat(v: f32) -> Self {
100        unsafe { UnionCast { a: [v; 4] }.v }
101    }
102
103    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
104    /// for each element of `self`.
105    ///
106    /// A true element in the mask uses the corresponding element from `if_true`, and false
107    /// uses the element from `if_false`.
108    #[inline]
109    #[must_use]
110    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
111        Self(unsafe {
112            _mm_or_ps(
113                _mm_andnot_ps(mask.0, if_false.0),
114                _mm_and_ps(if_true.0, mask.0),
115            )
116        })
117    }
118
119    /// Creates a new vector from an array.
120    #[inline]
121    #[must_use]
122    pub const fn from_array(a: [f32; 4]) -> Self {
123        Self::new(a[0], a[1], a[2], a[3])
124    }
125
126    /// `[x, y, z, w]`
127    #[inline]
128    #[must_use]
129    pub const fn to_array(&self) -> [f32; 4] {
130        unsafe { *(self as *const Vec4 as *const [f32; 4]) }
131    }
132
133    /// Creates a vector from the first 4 values in `slice`.
134    ///
135    /// # Panics
136    ///
137    /// Panics if `slice` is less than 4 elements long.
138    #[inline]
139    #[must_use]
140    pub const fn from_slice(slice: &[f32]) -> Self {
141        Self::new(slice[0], slice[1], slice[2], slice[3])
142    }
143
144    /// Writes the elements of `self` to the first 4 elements in `slice`.
145    ///
146    /// # Panics
147    ///
148    /// Panics if `slice` is less than 4 elements long.
149    #[inline]
150    pub fn write_to_slice(self, slice: &mut [f32]) {
151        unsafe {
152            assert!(slice.len() >= 4);
153            _mm_storeu_ps(slice.as_mut_ptr(), self.0);
154        }
155    }
156
157    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
158    ///
159    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
160    ///
161    /// To truncate to [`Vec3A`] use [`Vec3A::from()`].
162    #[inline]
163    #[must_use]
164    pub fn truncate(self) -> Vec3 {
165        use crate::swizzles::Vec4Swizzles;
166        self.xyz()
167    }
168
169    /// Creates a 4D vector from `self` with the given value of `x`.
170    #[inline]
171    #[must_use]
172    pub fn with_x(mut self, x: f32) -> Self {
173        self.x = x;
174        self
175    }
176
177    /// Creates a 4D vector from `self` with the given value of `y`.
178    #[inline]
179    #[must_use]
180    pub fn with_y(mut self, y: f32) -> Self {
181        self.y = y;
182        self
183    }
184
185    /// Creates a 4D vector from `self` with the given value of `z`.
186    #[inline]
187    #[must_use]
188    pub fn with_z(mut self, z: f32) -> Self {
189        self.z = z;
190        self
191    }
192
193    /// Creates a 4D vector from `self` with the given value of `w`.
194    #[inline]
195    #[must_use]
196    pub fn with_w(mut self, w: f32) -> Self {
197        self.w = w;
198        self
199    }
200
201    /// Computes the dot product of `self` and `rhs`.
202    #[inline]
203    #[must_use]
204    pub fn dot(self, rhs: Self) -> f32 {
205        unsafe { dot4(self.0, rhs.0) }
206    }
207
208    /// Returns a vector where every component is the dot product of `self` and `rhs`.
209    #[inline]
210    #[must_use]
211    pub fn dot_into_vec(self, rhs: Self) -> Self {
212        Self(unsafe { dot4_into_m128(self.0, rhs.0) })
213    }
214
215    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
216    ///
217    /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`.
218    #[inline]
219    #[must_use]
220    pub fn min(self, rhs: Self) -> Self {
221        Self(unsafe { _mm_min_ps(self.0, rhs.0) })
222    }
223
224    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
225    ///
226    /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`.
227    #[inline]
228    #[must_use]
229    pub fn max(self, rhs: Self) -> Self {
230        Self(unsafe { _mm_max_ps(self.0, rhs.0) })
231    }
232
233    /// Component-wise clamping of values, similar to [`f32::clamp`].
234    ///
235    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
236    ///
237    /// # Panics
238    ///
239    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
240    #[inline]
241    #[must_use]
242    pub fn clamp(self, min: Self, max: Self) -> Self {
243        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
244        self.max(min).min(max)
245    }
246
247    /// Returns the horizontal minimum of `self`.
248    ///
249    /// In other words this computes `min(x, y, ..)`.
250    #[inline]
251    #[must_use]
252    pub fn min_element(self) -> f32 {
253        unsafe {
254            let v = self.0;
255            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
256            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
257            _mm_cvtss_f32(v)
258        }
259    }
260
261    /// Returns the horizontal maximum of `self`.
262    ///
263    /// In other words this computes `max(x, y, ..)`.
264    #[inline]
265    #[must_use]
266    pub fn max_element(self) -> f32 {
267        unsafe {
268            let v = self.0;
269            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
270            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
271            _mm_cvtss_f32(v)
272        }
273    }
274
275    /// Returns the sum of all elements of `self`.
276    ///
277    /// In other words, this computes `self.x + self.y + ..`.
278    #[inline]
279    #[must_use]
280    pub fn element_sum(self) -> f32 {
281        unsafe {
282            let v = self.0;
283            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
284            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
285            _mm_cvtss_f32(v)
286        }
287    }
288
289    /// Returns the product of all elements of `self`.
290    ///
291    /// In other words, this computes `self.x * self.y * ..`.
292    #[inline]
293    #[must_use]
294    pub fn element_product(self) -> f32 {
295        unsafe {
296            let v = self.0;
297            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
298            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
299            _mm_cvtss_f32(v)
300        }
301    }
302
303    /// Returns a vector mask containing the result of a `==` comparison for each element of
304    /// `self` and `rhs`.
305    ///
306    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
307    /// elements.
308    #[inline]
309    #[must_use]
310    pub fn cmpeq(self, rhs: Self) -> BVec4A {
311        BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
312    }
313
314    /// Returns a vector mask containing the result of a `!=` comparison for each element of
315    /// `self` and `rhs`.
316    ///
317    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
318    /// elements.
319    #[inline]
320    #[must_use]
321    pub fn cmpne(self, rhs: Self) -> BVec4A {
322        BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
323    }
324
325    /// Returns a vector mask containing the result of a `>=` comparison for each element of
326    /// `self` and `rhs`.
327    ///
328    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
329    /// elements.
330    #[inline]
331    #[must_use]
332    pub fn cmpge(self, rhs: Self) -> BVec4A {
333        BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
334    }
335
336    /// Returns a vector mask containing the result of a `>` comparison for each element of
337    /// `self` and `rhs`.
338    ///
339    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
340    /// elements.
341    #[inline]
342    #[must_use]
343    pub fn cmpgt(self, rhs: Self) -> BVec4A {
344        BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
345    }
346
347    /// Returns a vector mask containing the result of a `<=` comparison for each element of
348    /// `self` and `rhs`.
349    ///
350    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
351    /// elements.
352    #[inline]
353    #[must_use]
354    pub fn cmple(self, rhs: Self) -> BVec4A {
355        BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
356    }
357
358    /// Returns a vector mask containing the result of a `<` comparison for each element of
359    /// `self` and `rhs`.
360    ///
361    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
362    /// elements.
363    #[inline]
364    #[must_use]
365    pub fn cmplt(self, rhs: Self) -> BVec4A {
366        BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
367    }
368
369    /// Returns a vector containing the absolute value of each element of `self`.
370    #[inline]
371    #[must_use]
372    pub fn abs(self) -> Self {
373        Self(unsafe { crate::sse2::m128_abs(self.0) })
374    }
375
376    /// Returns a vector with elements representing the sign of `self`.
377    ///
378    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
379    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
380    /// - `NAN` if the number is `NAN`
381    #[inline]
382    #[must_use]
383    pub fn signum(self) -> Self {
384        unsafe {
385            let result = Self(_mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0));
386            let mask = self.is_nan_mask();
387            Self::select(mask, self, result)
388        }
389    }
390
391    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
392    #[inline]
393    #[must_use]
394    pub fn copysign(self, rhs: Self) -> Self {
395        unsafe {
396            let mask = Self::splat(-0.0);
397            Self(_mm_or_ps(
398                _mm_and_ps(rhs.0, mask.0),
399                _mm_andnot_ps(mask.0, self.0),
400            ))
401        }
402    }
403
404    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
405    ///
406    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
407    /// into the first lowest bit, element `y` into the second, etc.
408    #[inline]
409    #[must_use]
410    pub fn is_negative_bitmask(self) -> u32 {
411        unsafe { _mm_movemask_ps(self.0) as u32 }
412    }
413
414    /// Returns `true` if, and only if, all elements are finite.  If any element is either
415    /// `NaN`, positive or negative infinity, this will return `false`.
416    #[inline]
417    #[must_use]
418    pub fn is_finite(self) -> bool {
419        self.x.is_finite() && self.y.is_finite() && self.z.is_finite() && self.w.is_finite()
420    }
421
422    /// Returns `true` if any elements are `NaN`.
423    #[inline]
424    #[must_use]
425    pub fn is_nan(self) -> bool {
426        self.is_nan_mask().any()
427    }
428
429    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
430    ///
431    /// In other words, this computes `[x.is_nan(), y.is_nan(), z.is_nan(), w.is_nan()]`.
432    #[inline]
433    #[must_use]
434    pub fn is_nan_mask(self) -> BVec4A {
435        BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
436    }
437
438    /// Computes the length of `self`.
439    #[doc(alias = "magnitude")]
440    #[inline]
441    #[must_use]
442    pub fn length(self) -> f32 {
443        unsafe {
444            let dot = dot4_in_x(self.0, self.0);
445            _mm_cvtss_f32(_mm_sqrt_ps(dot))
446        }
447    }
448
449    /// Computes the squared length of `self`.
450    ///
451    /// This is faster than `length()` as it avoids a square root operation.
452    #[doc(alias = "magnitude2")]
453    #[inline]
454    #[must_use]
455    pub fn length_squared(self) -> f32 {
456        self.dot(self)
457    }
458
459    /// Computes `1.0 / length()`.
460    ///
461    /// For valid results, `self` must _not_ be of length zero.
462    #[inline]
463    #[must_use]
464    pub fn length_recip(self) -> f32 {
465        unsafe {
466            let dot = dot4_in_x(self.0, self.0);
467            _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
468        }
469    }
470
471    /// Computes the Euclidean distance between two points in space.
472    #[inline]
473    #[must_use]
474    pub fn distance(self, rhs: Self) -> f32 {
475        (self - rhs).length()
476    }
477
478    /// Compute the squared euclidean distance between two points in space.
479    #[inline]
480    #[must_use]
481    pub fn distance_squared(self, rhs: Self) -> f32 {
482        (self - rhs).length_squared()
483    }
484
485    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
486    #[inline]
487    #[must_use]
488    pub fn div_euclid(self, rhs: Self) -> Self {
489        Self::new(
490            math::div_euclid(self.x, rhs.x),
491            math::div_euclid(self.y, rhs.y),
492            math::div_euclid(self.z, rhs.z),
493            math::div_euclid(self.w, rhs.w),
494        )
495    }
496
497    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
498    ///
499    /// [Euclidean division]: f32::rem_euclid
500    #[inline]
501    #[must_use]
502    pub fn rem_euclid(self, rhs: Self) -> Self {
503        Self::new(
504            math::rem_euclid(self.x, rhs.x),
505            math::rem_euclid(self.y, rhs.y),
506            math::rem_euclid(self.z, rhs.z),
507            math::rem_euclid(self.w, rhs.w),
508        )
509    }
510
511    /// Returns `self` normalized to length 1.0.
512    ///
513    /// For valid results, `self` must _not_ be of length zero, nor very close to zero.
514    ///
515    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
516    ///
517    /// Panics
518    ///
519    /// Will panic if `self` is zero length when `glam_assert` is enabled.
520    #[inline]
521    #[must_use]
522    pub fn normalize(self) -> Self {
523        unsafe {
524            let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
525            #[allow(clippy::let_and_return)]
526            let normalized = Self(_mm_div_ps(self.0, length));
527            glam_assert!(normalized.is_finite());
528            normalized
529        }
530    }
531
532    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
533    ///
534    /// In particular, if the input is zero (or very close to zero), or non-finite,
535    /// the result of this operation will be `None`.
536    ///
537    /// See also [`Self::normalize_or_zero()`].
538    #[inline]
539    #[must_use]
540    pub fn try_normalize(self) -> Option<Self> {
541        let rcp = self.length_recip();
542        if rcp.is_finite() && rcp > 0.0 {
543            Some(self * rcp)
544        } else {
545            None
546        }
547    }
548
549    /// Returns `self` normalized to length 1.0 if possible, else returns a
550    /// fallback value.
551    ///
552    /// In particular, if the input is zero (or very close to zero), or non-finite,
553    /// the result of this operation will be the fallback value.
554    ///
555    /// See also [`Self::try_normalize()`].
556    #[inline]
557    #[must_use]
558    pub fn normalize_or(self, fallback: Self) -> Self {
559        let rcp = self.length_recip();
560        if rcp.is_finite() && rcp > 0.0 {
561            self * rcp
562        } else {
563            fallback
564        }
565    }
566
567    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
568    ///
569    /// In particular, if the input is zero (or very close to zero), or non-finite,
570    /// the result of this operation will be zero.
571    ///
572    /// See also [`Self::try_normalize()`].
573    #[inline]
574    #[must_use]
575    pub fn normalize_or_zero(self) -> Self {
576        self.normalize_or(Self::ZERO)
577    }
578
579    /// Returns whether `self` is length `1.0` or not.
580    ///
581    /// Uses a precision threshold of approximately `1e-4`.
582    #[inline]
583    #[must_use]
584    pub fn is_normalized(self) -> bool {
585        math::abs(self.length_squared() - 1.0) <= 2e-4
586    }
587
588    /// Returns the vector projection of `self` onto `rhs`.
589    ///
590    /// `rhs` must be of non-zero length.
591    ///
592    /// # Panics
593    ///
594    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
595    #[inline]
596    #[must_use]
597    pub fn project_onto(self, rhs: Self) -> Self {
598        let other_len_sq_rcp = rhs.dot(rhs).recip();
599        glam_assert!(other_len_sq_rcp.is_finite());
600        rhs * self.dot(rhs) * other_len_sq_rcp
601    }
602
603    /// Returns the vector rejection of `self` from `rhs`.
604    ///
605    /// The vector rejection is the vector perpendicular to the projection of `self` onto
606    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
607    ///
608    /// `rhs` must be of non-zero length.
609    ///
610    /// # Panics
611    ///
612    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
613    #[inline]
614    #[must_use]
615    pub fn reject_from(self, rhs: Self) -> Self {
616        self - self.project_onto(rhs)
617    }
618
619    /// Returns the vector projection of `self` onto `rhs`.
620    ///
621    /// `rhs` must be normalized.
622    ///
623    /// # Panics
624    ///
625    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
626    #[inline]
627    #[must_use]
628    pub fn project_onto_normalized(self, rhs: Self) -> Self {
629        glam_assert!(rhs.is_normalized());
630        rhs * self.dot(rhs)
631    }
632
633    /// Returns the vector rejection of `self` from `rhs`.
634    ///
635    /// The vector rejection is the vector perpendicular to the projection of `self` onto
636    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
637    ///
638    /// `rhs` must be normalized.
639    ///
640    /// # Panics
641    ///
642    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
643    #[inline]
644    #[must_use]
645    pub fn reject_from_normalized(self, rhs: Self) -> Self {
646        self - self.project_onto_normalized(rhs)
647    }
648
649    /// Returns a vector containing the nearest integer to a number for each element of `self`.
650    /// Round half-way cases away from 0.0.
651    #[inline]
652    #[must_use]
653    pub fn round(self) -> Self {
654        Self(unsafe { m128_round(self.0) })
655    }
656
657    /// Returns a vector containing the largest integer less than or equal to a number for each
658    /// element of `self`.
659    #[inline]
660    #[must_use]
661    pub fn floor(self) -> Self {
662        Self(unsafe { m128_floor(self.0) })
663    }
664
665    /// Returns a vector containing the smallest integer greater than or equal to a number for
666    /// each element of `self`.
667    #[inline]
668    #[must_use]
669    pub fn ceil(self) -> Self {
670        Self(unsafe { m128_ceil(self.0) })
671    }
672
673    /// Returns a vector containing the integer part each element of `self`. This means numbers are
674    /// always truncated towards zero.
675    #[inline]
676    #[must_use]
677    pub fn trunc(self) -> Self {
678        Self(unsafe { m128_trunc(self.0) })
679    }
680
681    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
682    ///
683    /// Note that this differs from the GLSL implementation of `fract` which returns
684    /// `self - self.floor()`.
685    ///
686    /// Note that this is fast but not precise for large numbers.
687    #[inline]
688    #[must_use]
689    pub fn fract(self) -> Self {
690        self - self.trunc()
691    }
692
693    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
694    ///
695    /// Note that this differs from the Rust implementation of `fract` which returns
696    /// `self - self.trunc()`.
697    ///
698    /// Note that this is fast but not precise for large numbers.
699    #[inline]
700    #[must_use]
701    pub fn fract_gl(self) -> Self {
702        self - self.floor()
703    }
704
705    /// Returns a vector containing `e^self` (the exponential function) for each element of
706    /// `self`.
707    #[inline]
708    #[must_use]
709    pub fn exp(self) -> Self {
710        Self::new(
711            math::exp(self.x),
712            math::exp(self.y),
713            math::exp(self.z),
714            math::exp(self.w),
715        )
716    }
717
718    /// Returns a vector containing each element of `self` raised to the power of `n`.
719    #[inline]
720    #[must_use]
721    pub fn powf(self, n: f32) -> Self {
722        Self::new(
723            math::powf(self.x, n),
724            math::powf(self.y, n),
725            math::powf(self.z, n),
726            math::powf(self.w, n),
727        )
728    }
729
730    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
731    #[inline]
732    #[must_use]
733    pub fn recip(self) -> Self {
734        Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
735    }
736
737    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
738    ///
739    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
740    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
741    /// extrapolated.
742    #[doc(alias = "mix")]
743    #[inline]
744    #[must_use]
745    pub fn lerp(self, rhs: Self, s: f32) -> Self {
746        self + ((rhs - self) * s)
747    }
748
749    /// Moves towards `rhs` based on the value `d`.
750    ///
751    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
752    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
753    #[inline]
754    #[must_use]
755    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
756        let a = rhs - *self;
757        let len = a.length();
758        if len <= d || len <= 1e-4 {
759            return rhs;
760        }
761        *self + a / len * d
762    }
763
764    /// Calculates the midpoint between `self` and `rhs`.
765    ///
766    /// The midpoint is the average of, or halfway point between, two vectors.
767    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
768    /// while being slightly cheaper to compute.
769    #[inline]
770    pub fn midpoint(self, rhs: Self) -> Self {
771        (self + rhs) * 0.5
772    }
773
774    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
775    /// less than or equal to `max_abs_diff`.
776    ///
777    /// This can be used to compare if two vectors contain similar elements. It works best when
778    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
779    /// the values being compared against.
780    ///
781    /// For more see
782    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
783    #[inline]
784    #[must_use]
785    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
786        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
787    }
788
789    /// Returns a vector with a length no less than `min` and no more than `max`
790    ///
791    /// # Panics
792    ///
793    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
794    #[inline]
795    #[must_use]
796    pub fn clamp_length(self, min: f32, max: f32) -> Self {
797        glam_assert!(min <= max);
798        let length_sq = self.length_squared();
799        if length_sq < min * min {
800            min * (self / math::sqrt(length_sq))
801        } else if length_sq > max * max {
802            max * (self / math::sqrt(length_sq))
803        } else {
804            self
805        }
806    }
807
808    /// Returns a vector with a length no more than `max`
809    #[inline]
810    #[must_use]
811    pub fn clamp_length_max(self, max: f32) -> Self {
812        let length_sq = self.length_squared();
813        if length_sq > max * max {
814            max * (self / math::sqrt(length_sq))
815        } else {
816            self
817        }
818    }
819
820    /// Returns a vector with a length no less than `min`
821    #[inline]
822    #[must_use]
823    pub fn clamp_length_min(self, min: f32) -> Self {
824        let length_sq = self.length_squared();
825        if length_sq < min * min {
826            min * (self / math::sqrt(length_sq))
827        } else {
828            self
829        }
830    }
831
832    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
833    /// error, yielding a more accurate result than an unfused multiply-add.
834    ///
835    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
836    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
837    /// and will be heavily dependant on designing algorithms with specific target hardware in
838    /// mind.
839    #[inline]
840    #[must_use]
841    pub fn mul_add(self, a: Self, b: Self) -> Self {
842        #[cfg(target_feature = "fma")]
843        unsafe {
844            Self(_mm_fmadd_ps(self.0, a.0, b.0))
845        }
846        #[cfg(not(target_feature = "fma"))]
847        Self::new(
848            math::mul_add(self.x, a.x, b.x),
849            math::mul_add(self.y, a.y, b.y),
850            math::mul_add(self.z, a.z, b.z),
851            math::mul_add(self.w, a.w, b.w),
852        )
853    }
854
855    /// Casts all elements of `self` to `f64`.
856    #[inline]
857    #[must_use]
858    pub fn as_dvec4(&self) -> crate::DVec4 {
859        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
860    }
861
862    /// Casts all elements of `self` to `i16`.
863    #[inline]
864    #[must_use]
865    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
866        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
867    }
868
869    /// Casts all elements of `self` to `u16`.
870    #[inline]
871    #[must_use]
872    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
873        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
874    }
875
876    /// Casts all elements of `self` to `i32`.
877    #[inline]
878    #[must_use]
879    pub fn as_ivec4(&self) -> crate::IVec4 {
880        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
881    }
882
883    /// Casts all elements of `self` to `u32`.
884    #[inline]
885    #[must_use]
886    pub fn as_uvec4(&self) -> crate::UVec4 {
887        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
888    }
889
890    /// Casts all elements of `self` to `i64`.
891    #[inline]
892    #[must_use]
893    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
894        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
895    }
896
897    /// Casts all elements of `self` to `u64`.
898    #[inline]
899    #[must_use]
900    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
901        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
902    }
903}
904
905impl Default for Vec4 {
906    #[inline(always)]
907    fn default() -> Self {
908        Self::ZERO
909    }
910}
911
912impl PartialEq for Vec4 {
913    #[inline]
914    fn eq(&self, rhs: &Self) -> bool {
915        self.cmpeq(*rhs).all()
916    }
917}
918
919impl Div<Vec4> for Vec4 {
920    type Output = Self;
921    #[inline]
922    fn div(self, rhs: Self) -> Self {
923        Self(unsafe { _mm_div_ps(self.0, rhs.0) })
924    }
925}
926
927impl DivAssign<Vec4> for Vec4 {
928    #[inline]
929    fn div_assign(&mut self, rhs: Self) {
930        self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
931    }
932}
933
934impl Div<f32> for Vec4 {
935    type Output = Self;
936    #[inline]
937    fn div(self, rhs: f32) -> Self {
938        Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
939    }
940}
941
942impl DivAssign<f32> for Vec4 {
943    #[inline]
944    fn div_assign(&mut self, rhs: f32) {
945        self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
946    }
947}
948
949impl Div<Vec4> for f32 {
950    type Output = Vec4;
951    #[inline]
952    fn div(self, rhs: Vec4) -> Vec4 {
953        Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
954    }
955}
956
957impl Mul<Vec4> for Vec4 {
958    type Output = Self;
959    #[inline]
960    fn mul(self, rhs: Self) -> Self {
961        Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
962    }
963}
964
965impl MulAssign<Vec4> for Vec4 {
966    #[inline]
967    fn mul_assign(&mut self, rhs: Self) {
968        self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
969    }
970}
971
972impl Mul<f32> for Vec4 {
973    type Output = Self;
974    #[inline]
975    fn mul(self, rhs: f32) -> Self {
976        Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
977    }
978}
979
980impl MulAssign<f32> for Vec4 {
981    #[inline]
982    fn mul_assign(&mut self, rhs: f32) {
983        self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
984    }
985}
986
987impl Mul<Vec4> for f32 {
988    type Output = Vec4;
989    #[inline]
990    fn mul(self, rhs: Vec4) -> Vec4 {
991        Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
992    }
993}
994
995impl Add<Vec4> for Vec4 {
996    type Output = Self;
997    #[inline]
998    fn add(self, rhs: Self) -> Self {
999        Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1000    }
1001}
1002
1003impl AddAssign<Vec4> for Vec4 {
1004    #[inline]
1005    fn add_assign(&mut self, rhs: Self) {
1006        self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1007    }
1008}
1009
1010impl Add<f32> for Vec4 {
1011    type Output = Self;
1012    #[inline]
1013    fn add(self, rhs: f32) -> Self {
1014        Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1015    }
1016}
1017
1018impl AddAssign<f32> for Vec4 {
1019    #[inline]
1020    fn add_assign(&mut self, rhs: f32) {
1021        self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1022    }
1023}
1024
1025impl Add<Vec4> for f32 {
1026    type Output = Vec4;
1027    #[inline]
1028    fn add(self, rhs: Vec4) -> Vec4 {
1029        Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1030    }
1031}
1032
1033impl Sub<Vec4> for Vec4 {
1034    type Output = Self;
1035    #[inline]
1036    fn sub(self, rhs: Self) -> Self {
1037        Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1038    }
1039}
1040
1041impl SubAssign<Vec4> for Vec4 {
1042    #[inline]
1043    fn sub_assign(&mut self, rhs: Vec4) {
1044        self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1045    }
1046}
1047
1048impl Sub<f32> for Vec4 {
1049    type Output = Self;
1050    #[inline]
1051    fn sub(self, rhs: f32) -> Self {
1052        Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1053    }
1054}
1055
1056impl SubAssign<f32> for Vec4 {
1057    #[inline]
1058    fn sub_assign(&mut self, rhs: f32) {
1059        self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1060    }
1061}
1062
1063impl Sub<Vec4> for f32 {
1064    type Output = Vec4;
1065    #[inline]
1066    fn sub(self, rhs: Vec4) -> Vec4 {
1067        Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1068    }
1069}
1070
1071impl Rem<Vec4> for Vec4 {
1072    type Output = Self;
1073    #[inline]
1074    fn rem(self, rhs: Self) -> Self {
1075        unsafe {
1076            let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1077            Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1078        }
1079    }
1080}
1081
1082impl RemAssign<Vec4> for Vec4 {
1083    #[inline]
1084    fn rem_assign(&mut self, rhs: Self) {
1085        *self = self.rem(rhs);
1086    }
1087}
1088
1089impl Rem<f32> for Vec4 {
1090    type Output = Self;
1091    #[inline]
1092    fn rem(self, rhs: f32) -> Self {
1093        self.rem(Self::splat(rhs))
1094    }
1095}
1096
1097impl RemAssign<f32> for Vec4 {
1098    #[inline]
1099    fn rem_assign(&mut self, rhs: f32) {
1100        *self = self.rem(Self::splat(rhs));
1101    }
1102}
1103
1104impl Rem<Vec4> for f32 {
1105    type Output = Vec4;
1106    #[inline]
1107    fn rem(self, rhs: Vec4) -> Vec4 {
1108        Vec4::splat(self).rem(rhs)
1109    }
1110}
1111
1112#[cfg(not(target_arch = "spirv"))]
1113impl AsRef<[f32; 4]> for Vec4 {
1114    #[inline]
1115    fn as_ref(&self) -> &[f32; 4] {
1116        unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1117    }
1118}
1119
1120#[cfg(not(target_arch = "spirv"))]
1121impl AsMut<[f32; 4]> for Vec4 {
1122    #[inline]
1123    fn as_mut(&mut self) -> &mut [f32; 4] {
1124        unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1125    }
1126}
1127
1128impl Sum for Vec4 {
1129    #[inline]
1130    fn sum<I>(iter: I) -> Self
1131    where
1132        I: Iterator<Item = Self>,
1133    {
1134        iter.fold(Self::ZERO, Self::add)
1135    }
1136}
1137
1138impl<'a> Sum<&'a Self> for Vec4 {
1139    #[inline]
1140    fn sum<I>(iter: I) -> Self
1141    where
1142        I: Iterator<Item = &'a Self>,
1143    {
1144        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1145    }
1146}
1147
1148impl Product for Vec4 {
1149    #[inline]
1150    fn product<I>(iter: I) -> Self
1151    where
1152        I: Iterator<Item = Self>,
1153    {
1154        iter.fold(Self::ONE, Self::mul)
1155    }
1156}
1157
1158impl<'a> Product<&'a Self> for Vec4 {
1159    #[inline]
1160    fn product<I>(iter: I) -> Self
1161    where
1162        I: Iterator<Item = &'a Self>,
1163    {
1164        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1165    }
1166}
1167
1168impl Neg for Vec4 {
1169    type Output = Self;
1170    #[inline]
1171    fn neg(self) -> Self {
1172        Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1173    }
1174}
1175
1176impl Index<usize> for Vec4 {
1177    type Output = f32;
1178    #[inline]
1179    fn index(&self, index: usize) -> &Self::Output {
1180        match index {
1181            0 => &self.x,
1182            1 => &self.y,
1183            2 => &self.z,
1184            3 => &self.w,
1185            _ => panic!("index out of bounds"),
1186        }
1187    }
1188}
1189
1190impl IndexMut<usize> for Vec4 {
1191    #[inline]
1192    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1193        match index {
1194            0 => &mut self.x,
1195            1 => &mut self.y,
1196            2 => &mut self.z,
1197            3 => &mut self.w,
1198            _ => panic!("index out of bounds"),
1199        }
1200    }
1201}
1202
1203#[cfg(not(target_arch = "spirv"))]
1204impl fmt::Display for Vec4 {
1205    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1206        if let Some(p) = f.precision() {
1207            write!(
1208                f,
1209                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1210                p, self.x, p, self.y, p, self.z, p, self.w
1211            )
1212        } else {
1213            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1214        }
1215    }
1216}
1217
1218#[cfg(not(target_arch = "spirv"))]
1219impl fmt::Debug for Vec4 {
1220    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1221        fmt.debug_tuple(stringify!(Vec4))
1222            .field(&self.x)
1223            .field(&self.y)
1224            .field(&self.z)
1225            .field(&self.w)
1226            .finish()
1227    }
1228}
1229
1230impl From<Vec4> for __m128 {
1231    #[inline]
1232    fn from(t: Vec4) -> Self {
1233        t.0
1234    }
1235}
1236
1237impl From<__m128> for Vec4 {
1238    #[inline]
1239    fn from(t: __m128) -> Self {
1240        Self(t)
1241    }
1242}
1243
1244impl From<[f32; 4]> for Vec4 {
1245    #[inline]
1246    fn from(a: [f32; 4]) -> Self {
1247        Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1248    }
1249}
1250
1251impl From<Vec4> for [f32; 4] {
1252    #[inline]
1253    fn from(v: Vec4) -> Self {
1254        use crate::Align16;
1255        use core::mem::MaybeUninit;
1256        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1257        unsafe {
1258            _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1259            out.assume_init().0
1260        }
1261    }
1262}
1263
1264impl From<(f32, f32, f32, f32)> for Vec4 {
1265    #[inline]
1266    fn from(t: (f32, f32, f32, f32)) -> Self {
1267        Self::new(t.0, t.1, t.2, t.3)
1268    }
1269}
1270
1271impl From<Vec4> for (f32, f32, f32, f32) {
1272    #[inline]
1273    fn from(v: Vec4) -> Self {
1274        use crate::Align16;
1275        use core::mem::MaybeUninit;
1276        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1277        unsafe {
1278            _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1279            out.assume_init().0
1280        }
1281    }
1282}
1283
1284impl From<(Vec3A, f32)> for Vec4 {
1285    #[inline]
1286    fn from((v, w): (Vec3A, f32)) -> Self {
1287        v.extend(w)
1288    }
1289}
1290
1291impl From<(f32, Vec3A)> for Vec4 {
1292    #[inline]
1293    fn from((x, v): (f32, Vec3A)) -> Self {
1294        Self::new(x, v.x, v.y, v.z)
1295    }
1296}
1297
1298impl From<(Vec3, f32)> for Vec4 {
1299    #[inline]
1300    fn from((v, w): (Vec3, f32)) -> Self {
1301        Self::new(v.x, v.y, v.z, w)
1302    }
1303}
1304
1305impl From<(f32, Vec3)> for Vec4 {
1306    #[inline]
1307    fn from((x, v): (f32, Vec3)) -> Self {
1308        Self::new(x, v.x, v.y, v.z)
1309    }
1310}
1311
1312impl From<(Vec2, f32, f32)> for Vec4 {
1313    #[inline]
1314    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1315        Self::new(v.x, v.y, z, w)
1316    }
1317}
1318
1319impl From<(Vec2, Vec2)> for Vec4 {
1320    #[inline]
1321    fn from((v, u): (Vec2, Vec2)) -> Self {
1322        Self::new(v.x, v.y, u.x, u.y)
1323    }
1324}
1325
1326impl Deref for Vec4 {
1327    type Target = crate::deref::Vec4<f32>;
1328    #[inline]
1329    fn deref(&self) -> &Self::Target {
1330        unsafe { &*(self as *const Self).cast() }
1331    }
1332}
1333
1334impl DerefMut for Vec4 {
1335    #[inline]
1336    fn deref_mut(&mut self) -> &mut Self::Target {
1337        unsafe { &mut *(self as *mut Self).cast() }
1338    }
1339}
1340
1341impl From<BVec4> for Vec4 {
1342    #[inline]
1343    fn from(v: BVec4) -> Self {
1344        Self::new(
1345            f32::from(v.x),
1346            f32::from(v.y),
1347            f32::from(v.z),
1348            f32::from(v.w),
1349        )
1350    }
1351}
1352
1353#[cfg(not(feature = "scalar-math"))]
1354
1355impl From<BVec4A> for Vec4 {
1356    #[inline]
1357    fn from(v: BVec4A) -> Self {
1358        let bool_array: [bool; 4] = v.into();
1359        Self::new(
1360            f32::from(bool_array[0]),
1361            f32::from(bool_array[1]),
1362            f32::from(bool_array[2]),
1363            f32::from(bool_array[3]),
1364        )
1365    }
1366}