1use crate::{f32::math, sse2::*, BVec3, BVec3A, Vec2, Vec3, Vec4};
4
5#[cfg(not(target_arch = "spirv"))]
6use core::fmt;
7use core::iter::{Product, Sum};
8use core::{f32, ops::*};
9
10#[cfg(target_arch = "x86")]
11use core::arch::x86::*;
12#[cfg(target_arch = "x86_64")]
13use core::arch::x86_64::*;
14
15#[repr(C)]
16union UnionCast {
17 a: [f32; 4],
18 v: Vec3A,
19}
20
21#[inline(always)]
23#[must_use]
24pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
25 Vec3A::new(x, y, z)
26}
27
28#[derive(Clone, Copy)]
38#[repr(transparent)]
39pub struct Vec3A(pub(crate) __m128);
40
41impl Vec3A {
42 pub const ZERO: Self = Self::splat(0.0);
44
45 pub const ONE: Self = Self::splat(1.0);
47
48 pub const NEG_ONE: Self = Self::splat(-1.0);
50
51 pub const MIN: Self = Self::splat(f32::MIN);
53
54 pub const MAX: Self = Self::splat(f32::MAX);
56
57 pub const NAN: Self = Self::splat(f32::NAN);
59
60 pub const INFINITY: Self = Self::splat(f32::INFINITY);
62
63 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
65
66 pub const X: Self = Self::new(1.0, 0.0, 0.0);
68
69 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
71
72 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
74
75 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
77
78 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
80
81 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
83
84 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
86
87 #[inline(always)]
89 #[must_use]
90 pub const fn new(x: f32, y: f32, z: f32) -> Self {
91 unsafe { UnionCast { a: [x, y, z, z] }.v }
92 }
93
94 #[inline]
96 #[must_use]
97 pub const fn splat(v: f32) -> Self {
98 unsafe { UnionCast { a: [v; 4] }.v }
99 }
100
101 #[inline]
107 #[must_use]
108 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
109 Self(unsafe {
110 _mm_or_ps(
111 _mm_andnot_ps(mask.0, if_false.0),
112 _mm_and_ps(if_true.0, mask.0),
113 )
114 })
115 }
116
117 #[inline]
119 #[must_use]
120 pub const fn from_array(a: [f32; 3]) -> Self {
121 Self::new(a[0], a[1], a[2])
122 }
123
124 #[inline]
126 #[must_use]
127 pub const fn to_array(&self) -> [f32; 3] {
128 unsafe { *(self as *const Vec3A as *const [f32; 3]) }
129 }
130
131 #[inline]
137 #[must_use]
138 pub const fn from_slice(slice: &[f32]) -> Self {
139 Self::new(slice[0], slice[1], slice[2])
140 }
141
142 #[inline]
148 pub fn write_to_slice(self, slice: &mut [f32]) {
149 slice[0] = self.x;
150 slice[1] = self.y;
151 slice[2] = self.z;
152 }
153
154 #[allow(dead_code)]
156 #[inline]
157 #[must_use]
158 pub(crate) fn from_vec4(v: Vec4) -> Self {
159 Self(v.0)
160 }
161
162 #[inline]
164 #[must_use]
165 pub fn extend(self, w: f32) -> Vec4 {
166 Vec4::new(self.x, self.y, self.z, w)
167 }
168
169 #[inline]
173 #[must_use]
174 pub fn truncate(self) -> Vec2 {
175 use crate::swizzles::Vec3Swizzles;
176 self.xy()
177 }
178
179 #[inline]
181 #[must_use]
182 pub fn with_x(mut self, x: f32) -> Self {
183 self.x = x;
184 self
185 }
186
187 #[inline]
189 #[must_use]
190 pub fn with_y(mut self, y: f32) -> Self {
191 self.y = y;
192 self
193 }
194
195 #[inline]
197 #[must_use]
198 pub fn with_z(mut self, z: f32) -> Self {
199 self.z = z;
200 self
201 }
202
203 #[inline]
205 #[must_use]
206 pub fn dot(self, rhs: Self) -> f32 {
207 unsafe { dot3(self.0, rhs.0) }
208 }
209
210 #[inline]
212 #[must_use]
213 pub fn dot_into_vec(self, rhs: Self) -> Self {
214 Self(unsafe { dot3_into_m128(self.0, rhs.0) })
215 }
216
217 #[inline]
219 #[must_use]
220 pub fn cross(self, rhs: Self) -> Self {
221 unsafe {
222 let lhszxy = _mm_shuffle_ps(self.0, self.0, 0b01_01_00_10);
228 let rhszxy = _mm_shuffle_ps(rhs.0, rhs.0, 0b01_01_00_10);
229 let lhszxy_rhs = _mm_mul_ps(lhszxy, rhs.0);
230 let rhszxy_lhs = _mm_mul_ps(rhszxy, self.0);
231 let sub = _mm_sub_ps(lhszxy_rhs, rhszxy_lhs);
232 Self(_mm_shuffle_ps(sub, sub, 0b01_01_00_10))
233 }
234 }
235
236 #[inline]
240 #[must_use]
241 pub fn min(self, rhs: Self) -> Self {
242 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
243 }
244
245 #[inline]
249 #[must_use]
250 pub fn max(self, rhs: Self) -> Self {
251 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
252 }
253
254 #[inline]
262 #[must_use]
263 pub fn clamp(self, min: Self, max: Self) -> Self {
264 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
265 self.max(min).min(max)
266 }
267
268 #[inline]
272 #[must_use]
273 pub fn min_element(self) -> f32 {
274 unsafe {
275 let v = self.0;
276 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b01_01_10_10));
277 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
278 _mm_cvtss_f32(v)
279 }
280 }
281
282 #[inline]
286 #[must_use]
287 pub fn max_element(self) -> f32 {
288 unsafe {
289 let v = self.0;
290 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_10_10));
291 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
292 _mm_cvtss_f32(v)
293 }
294 }
295
296 #[inline]
300 #[must_use]
301 pub fn element_sum(self) -> f32 {
302 unsafe {
303 let v = self.0;
304 let v = _mm_add_ps(v, _mm_shuffle_ps(v, Self::ZERO.0, 0b00_11_00_01));
305 let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
306 _mm_cvtss_f32(v)
307 }
308 }
309
310 #[inline]
314 #[must_use]
315 pub fn element_product(self) -> f32 {
316 unsafe {
317 let v = self.0;
318 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, Self::ONE.0, 0b00_11_00_01));
319 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
320 _mm_cvtss_f32(v)
321 }
322 }
323
324 #[inline]
330 #[must_use]
331 pub fn cmpeq(self, rhs: Self) -> BVec3A {
332 BVec3A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
333 }
334
335 #[inline]
341 #[must_use]
342 pub fn cmpne(self, rhs: Self) -> BVec3A {
343 BVec3A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
344 }
345
346 #[inline]
352 #[must_use]
353 pub fn cmpge(self, rhs: Self) -> BVec3A {
354 BVec3A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
355 }
356
357 #[inline]
363 #[must_use]
364 pub fn cmpgt(self, rhs: Self) -> BVec3A {
365 BVec3A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
366 }
367
368 #[inline]
374 #[must_use]
375 pub fn cmple(self, rhs: Self) -> BVec3A {
376 BVec3A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
377 }
378
379 #[inline]
385 #[must_use]
386 pub fn cmplt(self, rhs: Self) -> BVec3A {
387 BVec3A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
388 }
389
390 #[inline]
392 #[must_use]
393 pub fn abs(self) -> Self {
394 Self(unsafe { crate::sse2::m128_abs(self.0) })
395 }
396
397 #[inline]
403 #[must_use]
404 pub fn signum(self) -> Self {
405 unsafe {
406 let result = Self(_mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0));
407 let mask = self.is_nan_mask();
408 Self::select(mask, self, result)
409 }
410 }
411
412 #[inline]
414 #[must_use]
415 pub fn copysign(self, rhs: Self) -> Self {
416 unsafe {
417 let mask = Self::splat(-0.0);
418 Self(_mm_or_ps(
419 _mm_and_ps(rhs.0, mask.0),
420 _mm_andnot_ps(mask.0, self.0),
421 ))
422 }
423 }
424
425 #[inline]
430 #[must_use]
431 pub fn is_negative_bitmask(self) -> u32 {
432 unsafe { (_mm_movemask_ps(self.0) as u32) & 0x7 }
433 }
434
435 #[inline]
438 #[must_use]
439 pub fn is_finite(self) -> bool {
440 self.x.is_finite() && self.y.is_finite() && self.z.is_finite()
441 }
442
443 #[inline]
445 #[must_use]
446 pub fn is_nan(self) -> bool {
447 self.is_nan_mask().any()
448 }
449
450 #[inline]
454 #[must_use]
455 pub fn is_nan_mask(self) -> BVec3A {
456 BVec3A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
457 }
458
459 #[doc(alias = "magnitude")]
461 #[inline]
462 #[must_use]
463 pub fn length(self) -> f32 {
464 unsafe {
465 let dot = dot3_in_x(self.0, self.0);
466 _mm_cvtss_f32(_mm_sqrt_ps(dot))
467 }
468 }
469
470 #[doc(alias = "magnitude2")]
474 #[inline]
475 #[must_use]
476 pub fn length_squared(self) -> f32 {
477 self.dot(self)
478 }
479
480 #[inline]
484 #[must_use]
485 pub fn length_recip(self) -> f32 {
486 unsafe {
487 let dot = dot3_in_x(self.0, self.0);
488 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
489 }
490 }
491
492 #[inline]
494 #[must_use]
495 pub fn distance(self, rhs: Self) -> f32 {
496 (self - rhs).length()
497 }
498
499 #[inline]
501 #[must_use]
502 pub fn distance_squared(self, rhs: Self) -> f32 {
503 (self - rhs).length_squared()
504 }
505
506 #[inline]
508 #[must_use]
509 pub fn div_euclid(self, rhs: Self) -> Self {
510 Self::new(
511 math::div_euclid(self.x, rhs.x),
512 math::div_euclid(self.y, rhs.y),
513 math::div_euclid(self.z, rhs.z),
514 )
515 }
516
517 #[inline]
521 #[must_use]
522 pub fn rem_euclid(self, rhs: Self) -> Self {
523 Self::new(
524 math::rem_euclid(self.x, rhs.x),
525 math::rem_euclid(self.y, rhs.y),
526 math::rem_euclid(self.z, rhs.z),
527 )
528 }
529
530 #[inline]
540 #[must_use]
541 pub fn normalize(self) -> Self {
542 unsafe {
543 let length = _mm_sqrt_ps(dot3_into_m128(self.0, self.0));
544 #[allow(clippy::let_and_return)]
545 let normalized = Self(_mm_div_ps(self.0, length));
546 glam_assert!(normalized.is_finite());
547 normalized
548 }
549 }
550
551 #[inline]
558 #[must_use]
559 pub fn try_normalize(self) -> Option<Self> {
560 let rcp = self.length_recip();
561 if rcp.is_finite() && rcp > 0.0 {
562 Some(self * rcp)
563 } else {
564 None
565 }
566 }
567
568 #[inline]
576 #[must_use]
577 pub fn normalize_or(self, fallback: Self) -> Self {
578 let rcp = self.length_recip();
579 if rcp.is_finite() && rcp > 0.0 {
580 self * rcp
581 } else {
582 fallback
583 }
584 }
585
586 #[inline]
593 #[must_use]
594 pub fn normalize_or_zero(self) -> Self {
595 self.normalize_or(Self::ZERO)
596 }
597
598 #[inline]
602 #[must_use]
603 pub fn is_normalized(self) -> bool {
604 math::abs(self.length_squared() - 1.0) <= 2e-4
605 }
606
607 #[inline]
615 #[must_use]
616 pub fn project_onto(self, rhs: Self) -> Self {
617 let other_len_sq_rcp = rhs.dot(rhs).recip();
618 glam_assert!(other_len_sq_rcp.is_finite());
619 rhs * self.dot(rhs) * other_len_sq_rcp
620 }
621
622 #[inline]
633 #[must_use]
634 pub fn reject_from(self, rhs: Self) -> Self {
635 self - self.project_onto(rhs)
636 }
637
638 #[inline]
646 #[must_use]
647 pub fn project_onto_normalized(self, rhs: Self) -> Self {
648 glam_assert!(rhs.is_normalized());
649 rhs * self.dot(rhs)
650 }
651
652 #[inline]
663 #[must_use]
664 pub fn reject_from_normalized(self, rhs: Self) -> Self {
665 self - self.project_onto_normalized(rhs)
666 }
667
668 #[inline]
671 #[must_use]
672 pub fn round(self) -> Self {
673 Self(unsafe { m128_round(self.0) })
674 }
675
676 #[inline]
679 #[must_use]
680 pub fn floor(self) -> Self {
681 Self(unsafe { m128_floor(self.0) })
682 }
683
684 #[inline]
687 #[must_use]
688 pub fn ceil(self) -> Self {
689 Self(unsafe { m128_ceil(self.0) })
690 }
691
692 #[inline]
695 #[must_use]
696 pub fn trunc(self) -> Self {
697 Self(unsafe { m128_trunc(self.0) })
698 }
699
700 #[inline]
707 #[must_use]
708 pub fn fract(self) -> Self {
709 self - self.trunc()
710 }
711
712 #[inline]
719 #[must_use]
720 pub fn fract_gl(self) -> Self {
721 self - self.floor()
722 }
723
724 #[inline]
727 #[must_use]
728 pub fn exp(self) -> Self {
729 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
730 }
731
732 #[inline]
734 #[must_use]
735 pub fn powf(self, n: f32) -> Self {
736 Self::new(
737 math::powf(self.x, n),
738 math::powf(self.y, n),
739 math::powf(self.z, n),
740 )
741 }
742
743 #[inline]
745 #[must_use]
746 pub fn recip(self) -> Self {
747 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
748 }
749
750 #[doc(alias = "mix")]
756 #[inline]
757 #[must_use]
758 pub fn lerp(self, rhs: Self, s: f32) -> Self {
759 self + ((rhs - self) * s)
760 }
761
762 #[inline]
767 #[must_use]
768 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
769 let a = rhs - *self;
770 let len = a.length();
771 if len <= d || len <= 1e-4 {
772 return rhs;
773 }
774 *self + a / len * d
775 }
776
777 #[inline]
783 pub fn midpoint(self, rhs: Self) -> Self {
784 (self + rhs) * 0.5
785 }
786
787 #[inline]
797 #[must_use]
798 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
799 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
800 }
801
802 #[inline]
808 #[must_use]
809 pub fn clamp_length(self, min: f32, max: f32) -> Self {
810 glam_assert!(min <= max);
811 let length_sq = self.length_squared();
812 if length_sq < min * min {
813 min * (self / math::sqrt(length_sq))
814 } else if length_sq > max * max {
815 max * (self / math::sqrt(length_sq))
816 } else {
817 self
818 }
819 }
820
821 #[inline]
823 #[must_use]
824 pub fn clamp_length_max(self, max: f32) -> Self {
825 let length_sq = self.length_squared();
826 if length_sq > max * max {
827 max * (self / math::sqrt(length_sq))
828 } else {
829 self
830 }
831 }
832
833 #[inline]
835 #[must_use]
836 pub fn clamp_length_min(self, min: f32) -> Self {
837 let length_sq = self.length_squared();
838 if length_sq < min * min {
839 min * (self / math::sqrt(length_sq))
840 } else {
841 self
842 }
843 }
844
845 #[inline]
853 #[must_use]
854 pub fn mul_add(self, a: Self, b: Self) -> Self {
855 #[cfg(target_feature = "fma")]
856 unsafe {
857 Self(_mm_fmadd_ps(self.0, a.0, b.0))
858 }
859 #[cfg(not(target_feature = "fma"))]
860 Self::new(
861 math::mul_add(self.x, a.x, b.x),
862 math::mul_add(self.y, a.y, b.y),
863 math::mul_add(self.z, a.z, b.z),
864 )
865 }
866
867 #[inline]
871 #[must_use]
872 pub fn angle_between(self, rhs: Self) -> f32 {
873 math::acos_approx(
874 self.dot(rhs)
875 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
876 )
877 }
878
879 #[inline]
886 #[must_use]
887 pub fn any_orthogonal_vector(&self) -> Self {
888 if math::abs(self.x) > math::abs(self.y) {
890 Self::new(-self.z, 0.0, self.x) } else {
892 Self::new(0.0, self.z, -self.y) }
894 }
895
896 #[inline]
904 #[must_use]
905 pub fn any_orthonormal_vector(&self) -> Self {
906 glam_assert!(self.is_normalized());
907 let sign = math::signum(self.z);
909 let a = -1.0 / (sign + self.z);
910 let b = self.x * self.y * a;
911 Self::new(b, sign + self.y * self.y * a, -self.y)
912 }
913
914 #[inline]
921 #[must_use]
922 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
923 glam_assert!(self.is_normalized());
924 let sign = math::signum(self.z);
926 let a = -1.0 / (sign + self.z);
927 let b = self.x * self.y * a;
928 (
929 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
930 Self::new(b, sign + self.y * self.y * a, -self.y),
931 )
932 }
933
934 #[inline]
936 #[must_use]
937 pub fn as_dvec3(&self) -> crate::DVec3 {
938 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
939 }
940
941 #[inline]
943 #[must_use]
944 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
945 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
946 }
947
948 #[inline]
950 #[must_use]
951 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
952 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
953 }
954
955 #[inline]
957 #[must_use]
958 pub fn as_ivec3(&self) -> crate::IVec3 {
959 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
960 }
961
962 #[inline]
964 #[must_use]
965 pub fn as_uvec3(&self) -> crate::UVec3 {
966 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
967 }
968
969 #[inline]
971 #[must_use]
972 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
973 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
974 }
975
976 #[inline]
978 #[must_use]
979 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
980 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
981 }
982}
983
984impl Default for Vec3A {
985 #[inline(always)]
986 fn default() -> Self {
987 Self::ZERO
988 }
989}
990
991impl PartialEq for Vec3A {
992 #[inline]
993 fn eq(&self, rhs: &Self) -> bool {
994 self.cmpeq(*rhs).all()
995 }
996}
997
998impl Div<Vec3A> for Vec3A {
999 type Output = Self;
1000 #[inline]
1001 fn div(self, rhs: Self) -> Self {
1002 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
1003 }
1004}
1005
1006impl DivAssign<Vec3A> for Vec3A {
1007 #[inline]
1008 fn div_assign(&mut self, rhs: Self) {
1009 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1010 }
1011}
1012
1013impl Div<f32> for Vec3A {
1014 type Output = Self;
1015 #[inline]
1016 fn div(self, rhs: f32) -> Self {
1017 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1018 }
1019}
1020
1021impl DivAssign<f32> for Vec3A {
1022 #[inline]
1023 fn div_assign(&mut self, rhs: f32) {
1024 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1025 }
1026}
1027
1028impl Div<Vec3A> for f32 {
1029 type Output = Vec3A;
1030 #[inline]
1031 fn div(self, rhs: Vec3A) -> Vec3A {
1032 Vec3A(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1033 }
1034}
1035
1036impl Mul<Vec3A> for Vec3A {
1037 type Output = Self;
1038 #[inline]
1039 fn mul(self, rhs: Self) -> Self {
1040 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1041 }
1042}
1043
1044impl MulAssign<Vec3A> for Vec3A {
1045 #[inline]
1046 fn mul_assign(&mut self, rhs: Self) {
1047 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1048 }
1049}
1050
1051impl Mul<f32> for Vec3A {
1052 type Output = Self;
1053 #[inline]
1054 fn mul(self, rhs: f32) -> Self {
1055 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1056 }
1057}
1058
1059impl MulAssign<f32> for Vec3A {
1060 #[inline]
1061 fn mul_assign(&mut self, rhs: f32) {
1062 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1063 }
1064}
1065
1066impl Mul<Vec3A> for f32 {
1067 type Output = Vec3A;
1068 #[inline]
1069 fn mul(self, rhs: Vec3A) -> Vec3A {
1070 Vec3A(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1071 }
1072}
1073
1074impl Add<Vec3A> for Vec3A {
1075 type Output = Self;
1076 #[inline]
1077 fn add(self, rhs: Self) -> Self {
1078 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1079 }
1080}
1081
1082impl AddAssign<Vec3A> for Vec3A {
1083 #[inline]
1084 fn add_assign(&mut self, rhs: Self) {
1085 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1086 }
1087}
1088
1089impl Add<f32> for Vec3A {
1090 type Output = Self;
1091 #[inline]
1092 fn add(self, rhs: f32) -> Self {
1093 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1094 }
1095}
1096
1097impl AddAssign<f32> for Vec3A {
1098 #[inline]
1099 fn add_assign(&mut self, rhs: f32) {
1100 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1101 }
1102}
1103
1104impl Add<Vec3A> for f32 {
1105 type Output = Vec3A;
1106 #[inline]
1107 fn add(self, rhs: Vec3A) -> Vec3A {
1108 Vec3A(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1109 }
1110}
1111
1112impl Sub<Vec3A> for Vec3A {
1113 type Output = Self;
1114 #[inline]
1115 fn sub(self, rhs: Self) -> Self {
1116 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1117 }
1118}
1119
1120impl SubAssign<Vec3A> for Vec3A {
1121 #[inline]
1122 fn sub_assign(&mut self, rhs: Vec3A) {
1123 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1124 }
1125}
1126
1127impl Sub<f32> for Vec3A {
1128 type Output = Self;
1129 #[inline]
1130 fn sub(self, rhs: f32) -> Self {
1131 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1132 }
1133}
1134
1135impl SubAssign<f32> for Vec3A {
1136 #[inline]
1137 fn sub_assign(&mut self, rhs: f32) {
1138 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1139 }
1140}
1141
1142impl Sub<Vec3A> for f32 {
1143 type Output = Vec3A;
1144 #[inline]
1145 fn sub(self, rhs: Vec3A) -> Vec3A {
1146 Vec3A(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1147 }
1148}
1149
1150impl Rem<Vec3A> for Vec3A {
1151 type Output = Self;
1152 #[inline]
1153 fn rem(self, rhs: Self) -> Self {
1154 unsafe {
1155 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1156 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1157 }
1158 }
1159}
1160
1161impl RemAssign<Vec3A> for Vec3A {
1162 #[inline]
1163 fn rem_assign(&mut self, rhs: Self) {
1164 *self = self.rem(rhs);
1165 }
1166}
1167
1168impl Rem<f32> for Vec3A {
1169 type Output = Self;
1170 #[inline]
1171 fn rem(self, rhs: f32) -> Self {
1172 self.rem(Self::splat(rhs))
1173 }
1174}
1175
1176impl RemAssign<f32> for Vec3A {
1177 #[inline]
1178 fn rem_assign(&mut self, rhs: f32) {
1179 *self = self.rem(Self::splat(rhs));
1180 }
1181}
1182
1183impl Rem<Vec3A> for f32 {
1184 type Output = Vec3A;
1185 #[inline]
1186 fn rem(self, rhs: Vec3A) -> Vec3A {
1187 Vec3A::splat(self).rem(rhs)
1188 }
1189}
1190
1191#[cfg(not(target_arch = "spirv"))]
1192impl AsRef<[f32; 3]> for Vec3A {
1193 #[inline]
1194 fn as_ref(&self) -> &[f32; 3] {
1195 unsafe { &*(self as *const Vec3A as *const [f32; 3]) }
1196 }
1197}
1198
1199#[cfg(not(target_arch = "spirv"))]
1200impl AsMut<[f32; 3]> for Vec3A {
1201 #[inline]
1202 fn as_mut(&mut self) -> &mut [f32; 3] {
1203 unsafe { &mut *(self as *mut Vec3A as *mut [f32; 3]) }
1204 }
1205}
1206
1207impl Sum for Vec3A {
1208 #[inline]
1209 fn sum<I>(iter: I) -> Self
1210 where
1211 I: Iterator<Item = Self>,
1212 {
1213 iter.fold(Self::ZERO, Self::add)
1214 }
1215}
1216
1217impl<'a> Sum<&'a Self> for Vec3A {
1218 #[inline]
1219 fn sum<I>(iter: I) -> Self
1220 where
1221 I: Iterator<Item = &'a Self>,
1222 {
1223 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1224 }
1225}
1226
1227impl Product for Vec3A {
1228 #[inline]
1229 fn product<I>(iter: I) -> Self
1230 where
1231 I: Iterator<Item = Self>,
1232 {
1233 iter.fold(Self::ONE, Self::mul)
1234 }
1235}
1236
1237impl<'a> Product<&'a Self> for Vec3A {
1238 #[inline]
1239 fn product<I>(iter: I) -> Self
1240 where
1241 I: Iterator<Item = &'a Self>,
1242 {
1243 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1244 }
1245}
1246
1247impl Neg for Vec3A {
1248 type Output = Self;
1249 #[inline]
1250 fn neg(self) -> Self {
1251 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1252 }
1253}
1254
1255impl Index<usize> for Vec3A {
1256 type Output = f32;
1257 #[inline]
1258 fn index(&self, index: usize) -> &Self::Output {
1259 match index {
1260 0 => &self.x,
1261 1 => &self.y,
1262 2 => &self.z,
1263 _ => panic!("index out of bounds"),
1264 }
1265 }
1266}
1267
1268impl IndexMut<usize> for Vec3A {
1269 #[inline]
1270 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1271 match index {
1272 0 => &mut self.x,
1273 1 => &mut self.y,
1274 2 => &mut self.z,
1275 _ => panic!("index out of bounds"),
1276 }
1277 }
1278}
1279
1280#[cfg(not(target_arch = "spirv"))]
1281impl fmt::Display for Vec3A {
1282 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1283 if let Some(p) = f.precision() {
1284 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
1285 } else {
1286 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
1287 }
1288 }
1289}
1290
1291#[cfg(not(target_arch = "spirv"))]
1292impl fmt::Debug for Vec3A {
1293 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1294 fmt.debug_tuple(stringify!(Vec3A))
1295 .field(&self.x)
1296 .field(&self.y)
1297 .field(&self.z)
1298 .finish()
1299 }
1300}
1301
1302impl From<Vec3A> for __m128 {
1303 #[inline]
1304 fn from(t: Vec3A) -> Self {
1305 t.0
1306 }
1307}
1308
1309impl From<__m128> for Vec3A {
1310 #[inline]
1311 fn from(t: __m128) -> Self {
1312 Self(t)
1313 }
1314}
1315
1316impl From<[f32; 3]> for Vec3A {
1317 #[inline]
1318 fn from(a: [f32; 3]) -> Self {
1319 Self::new(a[0], a[1], a[2])
1320 }
1321}
1322
1323impl From<Vec3A> for [f32; 3] {
1324 #[inline]
1325 fn from(v: Vec3A) -> Self {
1326 use crate::Align16;
1327 use core::mem::MaybeUninit;
1328 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1329 unsafe {
1330 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1331 out.assume_init().0
1332 }
1333 }
1334}
1335
1336impl From<(f32, f32, f32)> for Vec3A {
1337 #[inline]
1338 fn from(t: (f32, f32, f32)) -> Self {
1339 Self::new(t.0, t.1, t.2)
1340 }
1341}
1342
1343impl From<Vec3A> for (f32, f32, f32) {
1344 #[inline]
1345 fn from(v: Vec3A) -> Self {
1346 use crate::Align16;
1347 use core::mem::MaybeUninit;
1348 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1349 unsafe {
1350 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1351 out.assume_init().0
1352 }
1353 }
1354}
1355
1356impl From<Vec3> for Vec3A {
1357 #[inline]
1358 fn from(v: Vec3) -> Self {
1359 Self::new(v.x, v.y, v.z)
1360 }
1361}
1362
1363impl From<Vec4> for Vec3A {
1364 #[inline]
1368 fn from(v: Vec4) -> Self {
1369 Self(v.0)
1370 }
1371}
1372
1373impl From<Vec3A> for Vec3 {
1374 #[inline]
1375 fn from(v: Vec3A) -> Self {
1376 use crate::Align16;
1377 use core::mem::MaybeUninit;
1378 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1379 unsafe {
1380 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1381 out.assume_init().0
1382 }
1383 }
1384}
1385
1386impl From<(Vec2, f32)> for Vec3A {
1387 #[inline]
1388 fn from((v, z): (Vec2, f32)) -> Self {
1389 Self::new(v.x, v.y, z)
1390 }
1391}
1392
1393impl Deref for Vec3A {
1394 type Target = crate::deref::Vec3<f32>;
1395 #[inline]
1396 fn deref(&self) -> &Self::Target {
1397 unsafe { &*(self as *const Self).cast() }
1398 }
1399}
1400
1401impl DerefMut for Vec3A {
1402 #[inline]
1403 fn deref_mut(&mut self) -> &mut Self::Target {
1404 unsafe { &mut *(self as *mut Self).cast() }
1405 }
1406}
1407
1408impl From<BVec3> for Vec3A {
1409 #[inline]
1410 fn from(v: BVec3) -> Self {
1411 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
1412 }
1413}
1414
1415impl From<BVec3A> for Vec3A {
1416 #[inline]
1417 fn from(v: BVec3A) -> Self {
1418 let bool_array: [bool; 3] = v.into();
1419 Self::new(
1420 f32::from(bool_array[0]),
1421 f32::from(bool_array[1]),
1422 f32::from(bool_array[2]),
1423 )
1424 }
1425}