1use crate::{f32::math, sse2::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5#[cfg(not(target_arch = "spirv"))]
6use core::fmt;
7use core::iter::{Product, Sum};
8use core::{f32, ops::*};
9
10#[cfg(target_arch = "x86")]
11use core::arch::x86::*;
12#[cfg(target_arch = "x86_64")]
13use core::arch::x86_64::*;
14
15#[repr(C)]
16union UnionCast {
17 a: [f32; 4],
18 v: Vec4,
19}
20
21#[inline(always)]
23#[must_use]
24pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
25 Vec4::new(x, y, z, w)
26}
27
28#[derive(Clone, Copy)]
34#[repr(transparent)]
35pub struct Vec4(pub(crate) __m128);
36
37impl Vec4 {
38 pub const ZERO: Self = Self::splat(0.0);
40
41 pub const ONE: Self = Self::splat(1.0);
43
44 pub const NEG_ONE: Self = Self::splat(-1.0);
46
47 pub const MIN: Self = Self::splat(f32::MIN);
49
50 pub const MAX: Self = Self::splat(f32::MAX);
52
53 pub const NAN: Self = Self::splat(f32::NAN);
55
56 pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
64
65 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
67
68 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
70
71 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
73
74 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
76
77 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
79
80 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
82
83 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
85
86 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
88
89 #[inline(always)]
91 #[must_use]
92 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
93 unsafe { UnionCast { a: [x, y, z, w] }.v }
94 }
95
96 #[inline]
98 #[must_use]
99 pub const fn splat(v: f32) -> Self {
100 unsafe { UnionCast { a: [v; 4] }.v }
101 }
102
103 #[inline]
109 #[must_use]
110 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
111 Self(unsafe {
112 _mm_or_ps(
113 _mm_andnot_ps(mask.0, if_false.0),
114 _mm_and_ps(if_true.0, mask.0),
115 )
116 })
117 }
118
119 #[inline]
121 #[must_use]
122 pub const fn from_array(a: [f32; 4]) -> Self {
123 Self::new(a[0], a[1], a[2], a[3])
124 }
125
126 #[inline]
128 #[must_use]
129 pub const fn to_array(&self) -> [f32; 4] {
130 unsafe { *(self as *const Vec4 as *const [f32; 4]) }
131 }
132
133 #[inline]
139 #[must_use]
140 pub const fn from_slice(slice: &[f32]) -> Self {
141 Self::new(slice[0], slice[1], slice[2], slice[3])
142 }
143
144 #[inline]
150 pub fn write_to_slice(self, slice: &mut [f32]) {
151 unsafe {
152 assert!(slice.len() >= 4);
153 _mm_storeu_ps(slice.as_mut_ptr(), self.0);
154 }
155 }
156
157 #[inline]
163 #[must_use]
164 pub fn truncate(self) -> Vec3 {
165 use crate::swizzles::Vec4Swizzles;
166 self.xyz()
167 }
168
169 #[inline]
171 #[must_use]
172 pub fn with_x(mut self, x: f32) -> Self {
173 self.x = x;
174 self
175 }
176
177 #[inline]
179 #[must_use]
180 pub fn with_y(mut self, y: f32) -> Self {
181 self.y = y;
182 self
183 }
184
185 #[inline]
187 #[must_use]
188 pub fn with_z(mut self, z: f32) -> Self {
189 self.z = z;
190 self
191 }
192
193 #[inline]
195 #[must_use]
196 pub fn with_w(mut self, w: f32) -> Self {
197 self.w = w;
198 self
199 }
200
201 #[inline]
203 #[must_use]
204 pub fn dot(self, rhs: Self) -> f32 {
205 unsafe { dot4(self.0, rhs.0) }
206 }
207
208 #[inline]
210 #[must_use]
211 pub fn dot_into_vec(self, rhs: Self) -> Self {
212 Self(unsafe { dot4_into_m128(self.0, rhs.0) })
213 }
214
215 #[inline]
219 #[must_use]
220 pub fn min(self, rhs: Self) -> Self {
221 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
222 }
223
224 #[inline]
228 #[must_use]
229 pub fn max(self, rhs: Self) -> Self {
230 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
231 }
232
233 #[inline]
241 #[must_use]
242 pub fn clamp(self, min: Self, max: Self) -> Self {
243 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
244 self.max(min).min(max)
245 }
246
247 #[inline]
251 #[must_use]
252 pub fn min_element(self) -> f32 {
253 unsafe {
254 let v = self.0;
255 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
256 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
257 _mm_cvtss_f32(v)
258 }
259 }
260
261 #[inline]
265 #[must_use]
266 pub fn max_element(self) -> f32 {
267 unsafe {
268 let v = self.0;
269 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
270 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
271 _mm_cvtss_f32(v)
272 }
273 }
274
275 #[inline]
279 #[must_use]
280 pub fn element_sum(self) -> f32 {
281 unsafe {
282 let v = self.0;
283 let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
284 let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
285 _mm_cvtss_f32(v)
286 }
287 }
288
289 #[inline]
293 #[must_use]
294 pub fn element_product(self) -> f32 {
295 unsafe {
296 let v = self.0;
297 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
298 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
299 _mm_cvtss_f32(v)
300 }
301 }
302
303 #[inline]
309 #[must_use]
310 pub fn cmpeq(self, rhs: Self) -> BVec4A {
311 BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
312 }
313
314 #[inline]
320 #[must_use]
321 pub fn cmpne(self, rhs: Self) -> BVec4A {
322 BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
323 }
324
325 #[inline]
331 #[must_use]
332 pub fn cmpge(self, rhs: Self) -> BVec4A {
333 BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
334 }
335
336 #[inline]
342 #[must_use]
343 pub fn cmpgt(self, rhs: Self) -> BVec4A {
344 BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
345 }
346
347 #[inline]
353 #[must_use]
354 pub fn cmple(self, rhs: Self) -> BVec4A {
355 BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
356 }
357
358 #[inline]
364 #[must_use]
365 pub fn cmplt(self, rhs: Self) -> BVec4A {
366 BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
367 }
368
369 #[inline]
371 #[must_use]
372 pub fn abs(self) -> Self {
373 Self(unsafe { crate::sse2::m128_abs(self.0) })
374 }
375
376 #[inline]
382 #[must_use]
383 pub fn signum(self) -> Self {
384 unsafe {
385 let result = Self(_mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0));
386 let mask = self.is_nan_mask();
387 Self::select(mask, self, result)
388 }
389 }
390
391 #[inline]
393 #[must_use]
394 pub fn copysign(self, rhs: Self) -> Self {
395 unsafe {
396 let mask = Self::splat(-0.0);
397 Self(_mm_or_ps(
398 _mm_and_ps(rhs.0, mask.0),
399 _mm_andnot_ps(mask.0, self.0),
400 ))
401 }
402 }
403
404 #[inline]
409 #[must_use]
410 pub fn is_negative_bitmask(self) -> u32 {
411 unsafe { _mm_movemask_ps(self.0) as u32 }
412 }
413
414 #[inline]
417 #[must_use]
418 pub fn is_finite(self) -> bool {
419 self.x.is_finite() && self.y.is_finite() && self.z.is_finite() && self.w.is_finite()
420 }
421
422 #[inline]
424 #[must_use]
425 pub fn is_nan(self) -> bool {
426 self.is_nan_mask().any()
427 }
428
429 #[inline]
433 #[must_use]
434 pub fn is_nan_mask(self) -> BVec4A {
435 BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
436 }
437
438 #[doc(alias = "magnitude")]
440 #[inline]
441 #[must_use]
442 pub fn length(self) -> f32 {
443 unsafe {
444 let dot = dot4_in_x(self.0, self.0);
445 _mm_cvtss_f32(_mm_sqrt_ps(dot))
446 }
447 }
448
449 #[doc(alias = "magnitude2")]
453 #[inline]
454 #[must_use]
455 pub fn length_squared(self) -> f32 {
456 self.dot(self)
457 }
458
459 #[inline]
463 #[must_use]
464 pub fn length_recip(self) -> f32 {
465 unsafe {
466 let dot = dot4_in_x(self.0, self.0);
467 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
468 }
469 }
470
471 #[inline]
473 #[must_use]
474 pub fn distance(self, rhs: Self) -> f32 {
475 (self - rhs).length()
476 }
477
478 #[inline]
480 #[must_use]
481 pub fn distance_squared(self, rhs: Self) -> f32 {
482 (self - rhs).length_squared()
483 }
484
485 #[inline]
487 #[must_use]
488 pub fn div_euclid(self, rhs: Self) -> Self {
489 Self::new(
490 math::div_euclid(self.x, rhs.x),
491 math::div_euclid(self.y, rhs.y),
492 math::div_euclid(self.z, rhs.z),
493 math::div_euclid(self.w, rhs.w),
494 )
495 }
496
497 #[inline]
501 #[must_use]
502 pub fn rem_euclid(self, rhs: Self) -> Self {
503 Self::new(
504 math::rem_euclid(self.x, rhs.x),
505 math::rem_euclid(self.y, rhs.y),
506 math::rem_euclid(self.z, rhs.z),
507 math::rem_euclid(self.w, rhs.w),
508 )
509 }
510
511 #[inline]
521 #[must_use]
522 pub fn normalize(self) -> Self {
523 unsafe {
524 let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
525 #[allow(clippy::let_and_return)]
526 let normalized = Self(_mm_div_ps(self.0, length));
527 glam_assert!(normalized.is_finite());
528 normalized
529 }
530 }
531
532 #[inline]
539 #[must_use]
540 pub fn try_normalize(self) -> Option<Self> {
541 let rcp = self.length_recip();
542 if rcp.is_finite() && rcp > 0.0 {
543 Some(self * rcp)
544 } else {
545 None
546 }
547 }
548
549 #[inline]
557 #[must_use]
558 pub fn normalize_or(self, fallback: Self) -> Self {
559 let rcp = self.length_recip();
560 if rcp.is_finite() && rcp > 0.0 {
561 self * rcp
562 } else {
563 fallback
564 }
565 }
566
567 #[inline]
574 #[must_use]
575 pub fn normalize_or_zero(self) -> Self {
576 self.normalize_or(Self::ZERO)
577 }
578
579 #[inline]
583 #[must_use]
584 pub fn is_normalized(self) -> bool {
585 math::abs(self.length_squared() - 1.0) <= 2e-4
586 }
587
588 #[inline]
596 #[must_use]
597 pub fn project_onto(self, rhs: Self) -> Self {
598 let other_len_sq_rcp = rhs.dot(rhs).recip();
599 glam_assert!(other_len_sq_rcp.is_finite());
600 rhs * self.dot(rhs) * other_len_sq_rcp
601 }
602
603 #[inline]
614 #[must_use]
615 pub fn reject_from(self, rhs: Self) -> Self {
616 self - self.project_onto(rhs)
617 }
618
619 #[inline]
627 #[must_use]
628 pub fn project_onto_normalized(self, rhs: Self) -> Self {
629 glam_assert!(rhs.is_normalized());
630 rhs * self.dot(rhs)
631 }
632
633 #[inline]
644 #[must_use]
645 pub fn reject_from_normalized(self, rhs: Self) -> Self {
646 self - self.project_onto_normalized(rhs)
647 }
648
649 #[inline]
652 #[must_use]
653 pub fn round(self) -> Self {
654 Self(unsafe { m128_round(self.0) })
655 }
656
657 #[inline]
660 #[must_use]
661 pub fn floor(self) -> Self {
662 Self(unsafe { m128_floor(self.0) })
663 }
664
665 #[inline]
668 #[must_use]
669 pub fn ceil(self) -> Self {
670 Self(unsafe { m128_ceil(self.0) })
671 }
672
673 #[inline]
676 #[must_use]
677 pub fn trunc(self) -> Self {
678 Self(unsafe { m128_trunc(self.0) })
679 }
680
681 #[inline]
688 #[must_use]
689 pub fn fract(self) -> Self {
690 self - self.trunc()
691 }
692
693 #[inline]
700 #[must_use]
701 pub fn fract_gl(self) -> Self {
702 self - self.floor()
703 }
704
705 #[inline]
708 #[must_use]
709 pub fn exp(self) -> Self {
710 Self::new(
711 math::exp(self.x),
712 math::exp(self.y),
713 math::exp(self.z),
714 math::exp(self.w),
715 )
716 }
717
718 #[inline]
720 #[must_use]
721 pub fn powf(self, n: f32) -> Self {
722 Self::new(
723 math::powf(self.x, n),
724 math::powf(self.y, n),
725 math::powf(self.z, n),
726 math::powf(self.w, n),
727 )
728 }
729
730 #[inline]
732 #[must_use]
733 pub fn recip(self) -> Self {
734 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
735 }
736
737 #[doc(alias = "mix")]
743 #[inline]
744 #[must_use]
745 pub fn lerp(self, rhs: Self, s: f32) -> Self {
746 self + ((rhs - self) * s)
747 }
748
749 #[inline]
754 #[must_use]
755 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
756 let a = rhs - *self;
757 let len = a.length();
758 if len <= d || len <= 1e-4 {
759 return rhs;
760 }
761 *self + a / len * d
762 }
763
764 #[inline]
770 pub fn midpoint(self, rhs: Self) -> Self {
771 (self + rhs) * 0.5
772 }
773
774 #[inline]
784 #[must_use]
785 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
786 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
787 }
788
789 #[inline]
795 #[must_use]
796 pub fn clamp_length(self, min: f32, max: f32) -> Self {
797 glam_assert!(min <= max);
798 let length_sq = self.length_squared();
799 if length_sq < min * min {
800 min * (self / math::sqrt(length_sq))
801 } else if length_sq > max * max {
802 max * (self / math::sqrt(length_sq))
803 } else {
804 self
805 }
806 }
807
808 #[inline]
810 #[must_use]
811 pub fn clamp_length_max(self, max: f32) -> Self {
812 let length_sq = self.length_squared();
813 if length_sq > max * max {
814 max * (self / math::sqrt(length_sq))
815 } else {
816 self
817 }
818 }
819
820 #[inline]
822 #[must_use]
823 pub fn clamp_length_min(self, min: f32) -> Self {
824 let length_sq = self.length_squared();
825 if length_sq < min * min {
826 min * (self / math::sqrt(length_sq))
827 } else {
828 self
829 }
830 }
831
832 #[inline]
840 #[must_use]
841 pub fn mul_add(self, a: Self, b: Self) -> Self {
842 #[cfg(target_feature = "fma")]
843 unsafe {
844 Self(_mm_fmadd_ps(self.0, a.0, b.0))
845 }
846 #[cfg(not(target_feature = "fma"))]
847 Self::new(
848 math::mul_add(self.x, a.x, b.x),
849 math::mul_add(self.y, a.y, b.y),
850 math::mul_add(self.z, a.z, b.z),
851 math::mul_add(self.w, a.w, b.w),
852 )
853 }
854
855 #[inline]
857 #[must_use]
858 pub fn as_dvec4(&self) -> crate::DVec4 {
859 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
860 }
861
862 #[inline]
864 #[must_use]
865 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
866 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
867 }
868
869 #[inline]
871 #[must_use]
872 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
873 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
874 }
875
876 #[inline]
878 #[must_use]
879 pub fn as_ivec4(&self) -> crate::IVec4 {
880 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
881 }
882
883 #[inline]
885 #[must_use]
886 pub fn as_uvec4(&self) -> crate::UVec4 {
887 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
888 }
889
890 #[inline]
892 #[must_use]
893 pub fn as_i64vec4(&self) -> crate::I64Vec4 {
894 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
895 }
896
897 #[inline]
899 #[must_use]
900 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
901 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
902 }
903}
904
905impl Default for Vec4 {
906 #[inline(always)]
907 fn default() -> Self {
908 Self::ZERO
909 }
910}
911
912impl PartialEq for Vec4 {
913 #[inline]
914 fn eq(&self, rhs: &Self) -> bool {
915 self.cmpeq(*rhs).all()
916 }
917}
918
919impl Div<Vec4> for Vec4 {
920 type Output = Self;
921 #[inline]
922 fn div(self, rhs: Self) -> Self {
923 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
924 }
925}
926
927impl DivAssign<Vec4> for Vec4 {
928 #[inline]
929 fn div_assign(&mut self, rhs: Self) {
930 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
931 }
932}
933
934impl Div<f32> for Vec4 {
935 type Output = Self;
936 #[inline]
937 fn div(self, rhs: f32) -> Self {
938 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
939 }
940}
941
942impl DivAssign<f32> for Vec4 {
943 #[inline]
944 fn div_assign(&mut self, rhs: f32) {
945 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
946 }
947}
948
949impl Div<Vec4> for f32 {
950 type Output = Vec4;
951 #[inline]
952 fn div(self, rhs: Vec4) -> Vec4 {
953 Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
954 }
955}
956
957impl Mul<Vec4> for Vec4 {
958 type Output = Self;
959 #[inline]
960 fn mul(self, rhs: Self) -> Self {
961 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
962 }
963}
964
965impl MulAssign<Vec4> for Vec4 {
966 #[inline]
967 fn mul_assign(&mut self, rhs: Self) {
968 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
969 }
970}
971
972impl Mul<f32> for Vec4 {
973 type Output = Self;
974 #[inline]
975 fn mul(self, rhs: f32) -> Self {
976 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
977 }
978}
979
980impl MulAssign<f32> for Vec4 {
981 #[inline]
982 fn mul_assign(&mut self, rhs: f32) {
983 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
984 }
985}
986
987impl Mul<Vec4> for f32 {
988 type Output = Vec4;
989 #[inline]
990 fn mul(self, rhs: Vec4) -> Vec4 {
991 Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
992 }
993}
994
995impl Add<Vec4> for Vec4 {
996 type Output = Self;
997 #[inline]
998 fn add(self, rhs: Self) -> Self {
999 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1000 }
1001}
1002
1003impl AddAssign<Vec4> for Vec4 {
1004 #[inline]
1005 fn add_assign(&mut self, rhs: Self) {
1006 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1007 }
1008}
1009
1010impl Add<f32> for Vec4 {
1011 type Output = Self;
1012 #[inline]
1013 fn add(self, rhs: f32) -> Self {
1014 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1015 }
1016}
1017
1018impl AddAssign<f32> for Vec4 {
1019 #[inline]
1020 fn add_assign(&mut self, rhs: f32) {
1021 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1022 }
1023}
1024
1025impl Add<Vec4> for f32 {
1026 type Output = Vec4;
1027 #[inline]
1028 fn add(self, rhs: Vec4) -> Vec4 {
1029 Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1030 }
1031}
1032
1033impl Sub<Vec4> for Vec4 {
1034 type Output = Self;
1035 #[inline]
1036 fn sub(self, rhs: Self) -> Self {
1037 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1038 }
1039}
1040
1041impl SubAssign<Vec4> for Vec4 {
1042 #[inline]
1043 fn sub_assign(&mut self, rhs: Vec4) {
1044 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1045 }
1046}
1047
1048impl Sub<f32> for Vec4 {
1049 type Output = Self;
1050 #[inline]
1051 fn sub(self, rhs: f32) -> Self {
1052 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1053 }
1054}
1055
1056impl SubAssign<f32> for Vec4 {
1057 #[inline]
1058 fn sub_assign(&mut self, rhs: f32) {
1059 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1060 }
1061}
1062
1063impl Sub<Vec4> for f32 {
1064 type Output = Vec4;
1065 #[inline]
1066 fn sub(self, rhs: Vec4) -> Vec4 {
1067 Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1068 }
1069}
1070
1071impl Rem<Vec4> for Vec4 {
1072 type Output = Self;
1073 #[inline]
1074 fn rem(self, rhs: Self) -> Self {
1075 unsafe {
1076 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1077 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1078 }
1079 }
1080}
1081
1082impl RemAssign<Vec4> for Vec4 {
1083 #[inline]
1084 fn rem_assign(&mut self, rhs: Self) {
1085 *self = self.rem(rhs);
1086 }
1087}
1088
1089impl Rem<f32> for Vec4 {
1090 type Output = Self;
1091 #[inline]
1092 fn rem(self, rhs: f32) -> Self {
1093 self.rem(Self::splat(rhs))
1094 }
1095}
1096
1097impl RemAssign<f32> for Vec4 {
1098 #[inline]
1099 fn rem_assign(&mut self, rhs: f32) {
1100 *self = self.rem(Self::splat(rhs));
1101 }
1102}
1103
1104impl Rem<Vec4> for f32 {
1105 type Output = Vec4;
1106 #[inline]
1107 fn rem(self, rhs: Vec4) -> Vec4 {
1108 Vec4::splat(self).rem(rhs)
1109 }
1110}
1111
1112#[cfg(not(target_arch = "spirv"))]
1113impl AsRef<[f32; 4]> for Vec4 {
1114 #[inline]
1115 fn as_ref(&self) -> &[f32; 4] {
1116 unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1117 }
1118}
1119
1120#[cfg(not(target_arch = "spirv"))]
1121impl AsMut<[f32; 4]> for Vec4 {
1122 #[inline]
1123 fn as_mut(&mut self) -> &mut [f32; 4] {
1124 unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1125 }
1126}
1127
1128impl Sum for Vec4 {
1129 #[inline]
1130 fn sum<I>(iter: I) -> Self
1131 where
1132 I: Iterator<Item = Self>,
1133 {
1134 iter.fold(Self::ZERO, Self::add)
1135 }
1136}
1137
1138impl<'a> Sum<&'a Self> for Vec4 {
1139 #[inline]
1140 fn sum<I>(iter: I) -> Self
1141 where
1142 I: Iterator<Item = &'a Self>,
1143 {
1144 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1145 }
1146}
1147
1148impl Product for Vec4 {
1149 #[inline]
1150 fn product<I>(iter: I) -> Self
1151 where
1152 I: Iterator<Item = Self>,
1153 {
1154 iter.fold(Self::ONE, Self::mul)
1155 }
1156}
1157
1158impl<'a> Product<&'a Self> for Vec4 {
1159 #[inline]
1160 fn product<I>(iter: I) -> Self
1161 where
1162 I: Iterator<Item = &'a Self>,
1163 {
1164 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1165 }
1166}
1167
1168impl Neg for Vec4 {
1169 type Output = Self;
1170 #[inline]
1171 fn neg(self) -> Self {
1172 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1173 }
1174}
1175
1176impl Index<usize> for Vec4 {
1177 type Output = f32;
1178 #[inline]
1179 fn index(&self, index: usize) -> &Self::Output {
1180 match index {
1181 0 => &self.x,
1182 1 => &self.y,
1183 2 => &self.z,
1184 3 => &self.w,
1185 _ => panic!("index out of bounds"),
1186 }
1187 }
1188}
1189
1190impl IndexMut<usize> for Vec4 {
1191 #[inline]
1192 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1193 match index {
1194 0 => &mut self.x,
1195 1 => &mut self.y,
1196 2 => &mut self.z,
1197 3 => &mut self.w,
1198 _ => panic!("index out of bounds"),
1199 }
1200 }
1201}
1202
1203#[cfg(not(target_arch = "spirv"))]
1204impl fmt::Display for Vec4 {
1205 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1206 if let Some(p) = f.precision() {
1207 write!(
1208 f,
1209 "[{:.*}, {:.*}, {:.*}, {:.*}]",
1210 p, self.x, p, self.y, p, self.z, p, self.w
1211 )
1212 } else {
1213 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1214 }
1215 }
1216}
1217
1218#[cfg(not(target_arch = "spirv"))]
1219impl fmt::Debug for Vec4 {
1220 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1221 fmt.debug_tuple(stringify!(Vec4))
1222 .field(&self.x)
1223 .field(&self.y)
1224 .field(&self.z)
1225 .field(&self.w)
1226 .finish()
1227 }
1228}
1229
1230impl From<Vec4> for __m128 {
1231 #[inline]
1232 fn from(t: Vec4) -> Self {
1233 t.0
1234 }
1235}
1236
1237impl From<__m128> for Vec4 {
1238 #[inline]
1239 fn from(t: __m128) -> Self {
1240 Self(t)
1241 }
1242}
1243
1244impl From<[f32; 4]> for Vec4 {
1245 #[inline]
1246 fn from(a: [f32; 4]) -> Self {
1247 Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1248 }
1249}
1250
1251impl From<Vec4> for [f32; 4] {
1252 #[inline]
1253 fn from(v: Vec4) -> Self {
1254 use crate::Align16;
1255 use core::mem::MaybeUninit;
1256 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1257 unsafe {
1258 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1259 out.assume_init().0
1260 }
1261 }
1262}
1263
1264impl From<(f32, f32, f32, f32)> for Vec4 {
1265 #[inline]
1266 fn from(t: (f32, f32, f32, f32)) -> Self {
1267 Self::new(t.0, t.1, t.2, t.3)
1268 }
1269}
1270
1271impl From<Vec4> for (f32, f32, f32, f32) {
1272 #[inline]
1273 fn from(v: Vec4) -> Self {
1274 use crate::Align16;
1275 use core::mem::MaybeUninit;
1276 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1277 unsafe {
1278 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1279 out.assume_init().0
1280 }
1281 }
1282}
1283
1284impl From<(Vec3A, f32)> for Vec4 {
1285 #[inline]
1286 fn from((v, w): (Vec3A, f32)) -> Self {
1287 v.extend(w)
1288 }
1289}
1290
1291impl From<(f32, Vec3A)> for Vec4 {
1292 #[inline]
1293 fn from((x, v): (f32, Vec3A)) -> Self {
1294 Self::new(x, v.x, v.y, v.z)
1295 }
1296}
1297
1298impl From<(Vec3, f32)> for Vec4 {
1299 #[inline]
1300 fn from((v, w): (Vec3, f32)) -> Self {
1301 Self::new(v.x, v.y, v.z, w)
1302 }
1303}
1304
1305impl From<(f32, Vec3)> for Vec4 {
1306 #[inline]
1307 fn from((x, v): (f32, Vec3)) -> Self {
1308 Self::new(x, v.x, v.y, v.z)
1309 }
1310}
1311
1312impl From<(Vec2, f32, f32)> for Vec4 {
1313 #[inline]
1314 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1315 Self::new(v.x, v.y, z, w)
1316 }
1317}
1318
1319impl From<(Vec2, Vec2)> for Vec4 {
1320 #[inline]
1321 fn from((v, u): (Vec2, Vec2)) -> Self {
1322 Self::new(v.x, v.y, u.x, u.y)
1323 }
1324}
1325
1326impl Deref for Vec4 {
1327 type Target = crate::deref::Vec4<f32>;
1328 #[inline]
1329 fn deref(&self) -> &Self::Target {
1330 unsafe { &*(self as *const Self).cast() }
1331 }
1332}
1333
1334impl DerefMut for Vec4 {
1335 #[inline]
1336 fn deref_mut(&mut self) -> &mut Self::Target {
1337 unsafe { &mut *(self as *mut Self).cast() }
1338 }
1339}
1340
1341impl From<BVec4> for Vec4 {
1342 #[inline]
1343 fn from(v: BVec4) -> Self {
1344 Self::new(
1345 f32::from(v.x),
1346 f32::from(v.y),
1347 f32::from(v.z),
1348 f32::from(v.w),
1349 )
1350 }
1351}
1352
1353#[cfg(not(feature = "scalar-math"))]
1354
1355impl From<BVec4A> for Vec4 {
1356 #[inline]
1357 fn from(v: BVec4A) -> Self {
1358 let bool_array: [bool; 4] = v.into();
1359 Self::new(
1360 f32::from(bool_array[0]),
1361 f32::from(bool_array[1]),
1362 f32::from(bool_array[2]),
1363 f32::from(bool_array[3]),
1364 )
1365 }
1366}