diff --git a/crates/rustc_codegen_spirv/src/abi.rs b/crates/rustc_codegen_spirv/src/abi.rs index 04d1bf9860..f0a69ac74d 100644 --- a/crates/rustc_codegen_spirv/src/abi.rs +++ b/crates/rustc_codegen_spirv/src/abi.rs @@ -741,8 +741,16 @@ fn trans_aggregate<'tcx>(cx: &CodegenCx<'tcx>, span: Span, ty: TyAndLayout<'tcx> // There's a potential for this array to be sized, but the element to be unsized, e.g. `[[u8]; 5]`. // However, I think rust disallows all these cases, so assert this here. assert_eq!(count, 0); + let element_spirv = cx.lookup_type(element_type); + // Calculate stride with alignment for runtime arrays + let stride = element_spirv.physical_size(cx).and_then(|_| { + element_spirv + .sizeof(cx) + .map(|size| size.align_to(element_spirv.alignof(cx)).bytes() as u32) + }); SpirvType::RuntimeArray { element: element_type, + stride, } .def(span, cx) } else if count == 0 { @@ -756,9 +764,13 @@ fn trans_aggregate<'tcx>(cx: &CodegenCx<'tcx>, span: Span, ty: TyAndLayout<'tcx> .expect("Unexpected unsized type in sized FieldsShape::Array") .align_to(element_spv.alignof(cx)); assert_eq!(stride_spv, stride); + // For arrays with explicit layout, use the actual stride from Rust's layout + // which already accounts for alignment + let array_stride = element_spv.physical_size(cx).map(|_| stride.bytes() as u32); SpirvType::Array { element: element_type, count: count_const, + stride: array_stride, } .def(span, cx) } @@ -1060,8 +1072,17 @@ fn trans_intrinsic_type<'tcx>( // We use a generic param to indicate the underlying element type. // The SPIR-V element type will be generated from the first generic param. if let Some(elem_ty) = args.types().next() { + let element_type = cx.layout_of(elem_ty).spirv_type(span, cx); + let element_spirv = cx.lookup_type(element_type); + // Calculate stride with alignment for intrinsic runtime arrays + let stride = element_spirv.physical_size(cx).and_then(|_| { + element_spirv + .sizeof(cx) + .map(|size| size.align_to(element_spirv.alignof(cx)).bytes() as u32) + }); Ok(SpirvType::RuntimeArray { - element: cx.layout_of(elem_ty).spirv_type(span, cx), + element: element_type, + stride, } .def(span, cx)) } else { diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index 3a8b4c1801..75c1540186 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -256,7 +256,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ) .def(self) } - SpirvType::Array { element, count } => { + SpirvType::Array { element, count, .. } => { let elem_pat = self.memset_const_pattern(&self.lookup_type(element), fill_byte); let count = self.builder.lookup_const_scalar(count).unwrap() as usize; self.constant_composite( @@ -301,7 +301,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { _ => self.fatal(format!("memset on float width {width} not implemented yet")), }, SpirvType::Adt { .. } => self.fatal("memset on structs not implemented yet"), - SpirvType::Array { element, count } => { + SpirvType::Array { element, count, .. } => { let elem_pat = self.memset_dynamic_pattern(&self.lookup_type(element), fill_var); let count = self.builder.lookup_const_scalar(count).unwrap() as usize; self.emit() @@ -590,7 +590,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } SpirvType::Vector { element, .. } | SpirvType::Array { element, .. } - | SpirvType::RuntimeArray { element } + | SpirvType::RuntimeArray { element, .. } | SpirvType::Matrix { element, .. } => { trace!("recovering access chain from Vector, Array, RuntimeArray, or Matrix"); ty = element; @@ -687,7 +687,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } // If it's an array, vector, or matrix, indexing yields the element type. SpirvType::Array { element, .. } - | SpirvType::RuntimeArray { element } + | SpirvType::RuntimeArray { element, .. } | SpirvType::Vector { element, .. } | SpirvType::Matrix { element, .. } => element, // Special case: If we started with a byte GEP (`is_byte_gep` is true) and diff --git a/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs b/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs index 4704f903a8..a2fc840bf5 100644 --- a/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs +++ b/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs @@ -123,7 +123,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { element, count, ), - SpirvType::Array { element, count } => { + SpirvType::Array { element, count, .. } => { let count = match self.builder.lookup_const_scalar(count) { Some(count) => count as u32, None => return self.load_err(original_type, result_type), @@ -322,7 +322,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { element, count, ), - SpirvType::Array { element, count } => { + SpirvType::Array { element, count, .. } => { let count = match self.builder.lookup_const_scalar(count) { Some(count) => count as u32, None => return self.store_err(original_type, value), diff --git a/crates/rustc_codegen_spirv/src/builder/spirv_asm.rs b/crates/rustc_codegen_spirv/src/builder/spirv_asm.rs index 91da22473d..5778a841ab 100644 --- a/crates/rustc_codegen_spirv/src/builder/spirv_asm.rs +++ b/crates/rustc_codegen_spirv/src/builder/spirv_asm.rs @@ -302,10 +302,21 @@ impl<'cx, 'tcx> Builder<'cx, 'tcx> { self.err("OpTypeArray in asm! is not supported yet"); return; } - Op::TypeRuntimeArray => SpirvType::RuntimeArray { - element: inst.operands[0].unwrap_id_ref(), + Op::TypeRuntimeArray => { + let element_type = inst.operands[0].unwrap_id_ref(); + let element_spirv = self.lookup_type(element_type); + // Calculate stride with alignment for asm runtime arrays + let stride = element_spirv.physical_size(self).and_then(|_| { + element_spirv + .sizeof(self) + .map(|size| size.align_to(element_spirv.alignof(self)).bytes() as u32) + }); + SpirvType::RuntimeArray { + element: element_type, + stride, + } + .def(self.span(), self) } - .def(self.span(), self), Op::TypePointer => { let storage_class = inst.operands[0].unwrap_storage_class(); if storage_class != StorageClass::Generic { @@ -704,7 +715,7 @@ impl<'cx, 'tcx> Builder<'cx, 'tcx> { }; ty = match cx.lookup_type(ty) { SpirvType::Array { element, .. } - | SpirvType::RuntimeArray { element } + | SpirvType::RuntimeArray { element, .. } // HACK(eddyb) this is pretty bad because it's not // checking that the index is an `OpConstant 0`, but // there's no other valid choice anyway. diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs index 1461f23246..5d888ea81d 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs @@ -484,7 +484,7 @@ impl<'tcx> CodegenCx<'tcx> { } self.constant_composite(ty, values.into_iter()) } - SpirvType::Array { element, count } => { + SpirvType::Array { element, count, .. } => { let count = self.builder.lookup_const_scalar(count).unwrap() as usize; let values = (0..count).map(|_| { self.read_from_const_alloc(alloc, offset, element) @@ -522,7 +522,7 @@ impl<'tcx> CodegenCx<'tcx> { *offset = final_offset; result } - SpirvType::RuntimeArray { element } => { + SpirvType::RuntimeArray { element, .. } => { let mut values = Vec::new(); while offset.bytes_usize() != alloc.inner().len() { values.push( diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs b/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs index a2ad70a010..fa34939e43 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs @@ -261,7 +261,7 @@ impl<'tcx> CodegenCx<'tcx> { let value_spirv_type = value_layout.spirv_type(hir_param.ty_span, self); // Some types automatically specify a storage class. Compute that here. let element_ty = match self.lookup_type(value_spirv_type) { - SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element } => { + SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element, .. } => { self.lookup_type(element) } ty => ty, @@ -505,7 +505,7 @@ impl<'tcx> CodegenCx<'tcx> { && { // Peel off arrays first (used for "descriptor indexing"). let outermost_or_array_element = match self.lookup_type(value_spirv_type) { - SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element } => { + SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element, .. } => { element } _ => value_spirv_type, @@ -966,7 +966,7 @@ impl<'tcx> CodegenCx<'tcx> { SpirvType::Vector { element, .. } | SpirvType::Matrix { element, .. } | SpirvType::Array { element, .. } - | SpirvType::RuntimeArray { element } + | SpirvType::RuntimeArray { element, .. } | SpirvType::Pointer { pointee: element } | SpirvType::InterfaceBlock { inner_type: element, diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs b/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs index f0005a9e42..97b5a0fbcf 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs @@ -165,9 +165,17 @@ impl<'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'tcx> { } fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type { + let ty_spirv = self.lookup_type(ty); + // Calculate stride with alignment + let stride = ty_spirv.physical_size(self).and_then(|_| { + ty_spirv + .sizeof(self) + .map(|size| size.align_to(ty_spirv.alignof(self)).bytes() as u32) + }); SpirvType::Array { element: ty, count: self.constant_u64(DUMMY_SP, len), + stride, } .def(DUMMY_SP, self) } diff --git a/crates/rustc_codegen_spirv/src/spirv_type.rs b/crates/rustc_codegen_spirv/src/spirv_type.rs index d674f2542f..6fb5309439 100644 --- a/crates/rustc_codegen_spirv/src/spirv_type.rs +++ b/crates/rustc_codegen_spirv/src/spirv_type.rs @@ -55,9 +55,15 @@ pub enum SpirvType<'tcx> { element: Word, /// Note: array count is ref to constant. count: SpirvValue, + /// Whether this array has an explicit stride decoration. + /// None means no decoration, Some(stride) means decorated with that stride. + stride: Option, }, RuntimeArray { element: Word, + /// Whether this array has an explicit stride decoration. + /// None means no decoration, Some(stride) means decorated with that stride. + stride: Option, }, Pointer { pointee: Word, @@ -181,16 +187,32 @@ impl SpirvType<'_> { } Self::Vector { element, count } => cx.emit_global().type_vector_id(id, element, count), Self::Matrix { element, count } => cx.emit_global().type_matrix_id(id, element, count), - Self::Array { element, count } => { + Self::Array { + element, + count, + stride, + } => { let result = cx .emit_global() .type_array_id(id, element, count.def_cx(cx)); - Self::decorate_array_stride(result, element, cx); + if let Some(stride_bytes) = stride { + cx.emit_global().decorate( + result, + Decoration::ArrayStride, + iter::once(Operand::LiteralBit32(stride_bytes)), + ); + } result } - Self::RuntimeArray { element } => { + Self::RuntimeArray { element, stride } => { let result = cx.emit_global().type_runtime_array_id(id, element); - Self::decorate_array_stride(result, element, cx); + if let Some(stride_bytes) = stride { + cx.emit_global().decorate( + result, + Decoration::ArrayStride, + iter::once(Operand::LiteralBit32(stride_bytes)), + ); + } result } Self::Pointer { pointee } => { @@ -258,19 +280,6 @@ impl SpirvType<'_> { result } - fn decorate_array_stride(result: u32, element: u32, cx: &CodegenCx<'_>) { - let mut emit = cx.emit_global(); - let ty = cx.lookup_type(element); - if let Some(element_size) = ty.physical_size(cx) { - // ArrayStride decoration wants in *bytes* - emit.decorate( - result, - Decoration::ArrayStride, - iter::once(Operand::LiteralBit32(element_size.bytes() as u32)), - ); - } - } - /// `def_with_id` is used by the `RecursivePointeeCache` to handle `OpTypeForwardPointer`: when /// emitting the subsequent `OpTypePointer`, the ID is already known and must be re-used. pub fn def_with_id(self, cx: &CodegenCx<'_>, def_span: Span, id: Word) -> Word { @@ -332,7 +341,7 @@ impl SpirvType<'_> { cx.lookup_type(element).sizeof(cx)? * count.next_power_of_two() as u64 } Self::Matrix { element, count } => cx.lookup_type(element).sizeof(cx)? * count as u64, - Self::Array { element, count } => { + Self::Array { element, count, .. } => { cx.lookup_type(element).sizeof(cx)? * cx.builder .lookup_const_scalar(count) @@ -367,7 +376,7 @@ impl SpirvType<'_> { ) .expect("alignof: Vectors must have power-of-2 size"), Self::Array { element, .. } - | Self::RuntimeArray { element } + | Self::RuntimeArray { element, .. } | Self::Matrix { element, .. } => cx.lookup_type(element).alignof(cx), Self::Pointer { .. } => cx.tcx.data_layout.pointer_align.abi, Self::Image { .. } @@ -388,7 +397,11 @@ impl SpirvType<'_> { Self::Adt { size, .. } => size, - Self::Array { element, count } => Some( + Self::Array { + element, + count, + stride: _, + } => Some( cx.lookup_type(element).physical_size(cx)? * cx.builder .lookup_const_scalar(count) @@ -432,8 +445,18 @@ impl SpirvType<'_> { SpirvType::Float(width) => SpirvType::Float(width), SpirvType::Vector { element, count } => SpirvType::Vector { element, count }, SpirvType::Matrix { element, count } => SpirvType::Matrix { element, count }, - SpirvType::Array { element, count } => SpirvType::Array { element, count }, - SpirvType::RuntimeArray { element } => SpirvType::RuntimeArray { element }, + SpirvType::Array { + element, + count, + stride, + } => SpirvType::Array { + element, + count, + stride, + }, + SpirvType::RuntimeArray { element, stride } => { + SpirvType::RuntimeArray { element, stride } + } SpirvType::Pointer { pointee } => SpirvType::Pointer { pointee }, SpirvType::Image { sampled_type, @@ -561,7 +584,11 @@ impl fmt::Debug for SpirvTypePrinter<'_, '_> { .field("element", &self.cx.debug_type(element)) .field("count", &count) .finish(), - SpirvType::Array { element, count } => f + SpirvType::Array { + element, + count, + stride, + } => f .debug_struct("Array") .field("id", &self.id) .field("element", &self.cx.debug_type(element)) @@ -573,11 +600,13 @@ impl fmt::Debug for SpirvTypePrinter<'_, '_> { .lookup_const_scalar(count) .expect("Array type has invalid count value"), ) + .field("stride", &stride) .finish(), - SpirvType::RuntimeArray { element } => f + SpirvType::RuntimeArray { element, stride } => f .debug_struct("RuntimeArray") .field("id", &self.id) .field("element", &self.cx.debug_type(element)) + .field("stride", &stride) .finish(), SpirvType::Pointer { pointee } => f .debug_struct("Pointer") @@ -720,14 +749,14 @@ impl SpirvTypePrinter<'_, '_> { ty(self.cx, stack, f, element)?; write!(f, "x{count}") } - SpirvType::Array { element, count } => { + SpirvType::Array { element, count, .. } => { let len = self.cx.builder.lookup_const_scalar(count); let len = len.expect("Array type has invalid count value"); f.write_str("[")?; ty(self.cx, stack, f, element)?; write!(f, "; {len}]") } - SpirvType::RuntimeArray { element } => { + SpirvType::RuntimeArray { element, .. } => { f.write_str("[")?; ty(self.cx, stack, f, element)?; f.write_str("]") diff --git a/tests/compiletests/ui/dis/array_stride_alignment.rs b/tests/compiletests/ui/dis/array_stride_alignment.rs new file mode 100644 index 0000000000..ebb6cafbb2 --- /dev/null +++ b/tests/compiletests/ui/dis/array_stride_alignment.rs @@ -0,0 +1,20 @@ +// build-pass +// compile-flags: -C llvm-args=--disassemble-globals +// CHECK: OpDecorate %{{[0-9]+}} ArrayStride 16 + +use spirv_std::spirv; + +// Test that array stride respects alignment requirements +// vec3 has size 12 bytes but alignment 16 bytes +// So array stride should be 16, not 12 +#[derive(Copy, Clone)] +pub struct AlignedBuffer { + data: [spirv_std::glam::Vec3; 4], +} + +#[spirv(compute(threads(1)))] +pub fn main_cs( + #[spirv(storage_buffer, descriptor_set = 0, binding = 0)] storage: &mut AlignedBuffer, +) { + storage.data[0] = spirv_std::glam::Vec3::new(1.0, 2.0, 3.0); +} diff --git a/tests/compiletests/ui/dis/array_stride_alignment.stderr b/tests/compiletests/ui/dis/array_stride_alignment.stderr new file mode 100644 index 0000000000..d4bc841642 --- /dev/null +++ b/tests/compiletests/ui/dis/array_stride_alignment.stderr @@ -0,0 +1,42 @@ +OpCapability Shader +OpCapability Float64 +OpCapability Int64 +OpCapability Int16 +OpCapability Int8 +OpCapability ShaderClockKHR +OpCapability VulkanMemoryModel +OpExtension "SPV_KHR_shader_clock" +OpMemoryModel Logical Vulkan +OpEntryPoint GLCompute %1 "main_cs" %2 +OpExecutionMode %1 LocalSize 1 1 1 +%3 = OpString "$OPSTRING_FILENAME/vec3.rs" +%4 = OpString "$OPSTRING_FILENAME/array_stride_alignment.rs" +OpSource Unknown 0 %3 "// Generated from vec.rs.tera template. Edit the template, not the generated file./n/nuse crate::{f32::math, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec4};/n/nuse core::fmt;/nuse core::iter::{Product, Sum};/nuse core::{f32, ops::*};/n/n/// Creates a 3-dimensional vector./n#[inline(always)]/n#[must_use]/npub const fn vec3(x: f32, y: f32, z: f32) -> Vec3 {/n Vec3::new(x, y, z)/n}/n/n/// A 3-dimensional vector./n#[derive(Clone, Copy, PartialEq)]/n#[cfg_attr(not(target_arch = /"spirv/"), repr(C))]/n#[cfg_attr(target_arch = /"spirv/", repr(simd))]/npub struct Vec3 {/n pub x: f32,/n pub y: f32,/n pub z: f32,/n}/n/nimpl Vec3 {/n /// All zeroes./n pub const ZERO: Self = Self::splat(0.0);/n/n /// All ones./n pub const ONE: Self = Self::splat(1.0);/n/n /// All negative ones./n pub const NEG_ONE: Self = Self::splat(-1.0);/n/n /// All `f32::MIN`./n pub const MIN: Self = Self::splat(f32::MIN);/n/n /// All `f32::MAX`./n pub const MAX: Self = Self::splat(f32::MAX);/n/n /// All `f32::NAN`./n pub const NAN: Self = Self::splat(f32::NAN);/n/n /// All `f32::INFINITY`./n pub const INFINITY: Self = Self::splat(f32::INFINITY);/n/n /// All `f32::NEG_INFINITY`./n pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);/n/n /// A unit vector pointing along the positive X axis./n pub const X: Self = Self::new(1.0, 0.0, 0.0);/n/n /// A unit vector pointing along the positive Y axis./n pub const Y: Self = Self::new(0.0, 1.0, 0.0);/n/n /// A unit vector pointing along the positive Z axis./n pub const Z: Self = Self::new(0.0, 0.0, 1.0);/n/n /// A unit vector pointing along the negative X axis./n pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);/n/n /// A unit vector pointing along the negative Y axis./n pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);/n/n /// A unit vector pointing along the negative Z axis./n pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);/n/n /// The unit axes./n pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];/n/n /// Vec3 uses Rust Portable SIMD/n pub const USES_CORE_SIMD: bool = false;/n /// Vec3 uses Arm NEON/n pub const USES_NEON: bool = false;/n /// Vec3 uses scalar math/n pub const USES_SCALAR_MATH: bool = true;/n /// Vec3 uses Intel SSE2/n pub const USES_SSE2: bool = false;/n /// Vec3 uses WebAssembly 128-bit SIMD/n pub const USES_WASM32_SIMD: bool = false;/n/n /// Creates a new vector./n #[inline(always)]/n #[must_use]/n pub const fn new(x: f32, y: f32, z: f32) -> Self {/n Self { x, y, z }/n }/n/n /// Creates a vector with all elements set to `v`./n #[inline]/n #[must_use]/n pub const fn splat(v: f32) -> Self {/n Self { x: v, y: v, z: v }/n }/n/n /// Returns a vector containing each element of `self` modified by a mapping function `f`./n #[inline]/n #[must_use]/n pub fn map(self, f: F) -> Self/n where/n F: Fn(f32) -> f32,/n {/n Self::new(f(self.x), f(self.y), f(self.z))/n }/n/n /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use/n /// for each element of `self`./n ////n /// A true element in the mask uses the corresponding element from `if_true`, and false/n /// uses the element from `if_false`./n #[inline]/n #[must_use]/n pub fn select(mask: BVec3, if_true: Self, if_false: Self) -> Self {/n Self {/n x: if mask.test(0) { if_true.x } else { if_false.x },/n y: if mask.test(1) { if_true.y } else { if_false.y },/n z: if mask.test(2) { if_true.z } else { if_false.z },/n }/n }/n/n /// Creates a new vector from an array./n #[inline]/n #[must_use]/n pub const fn from_array(a: [f32; 3]) -> Self {/n Self::new(a[0], a[1], a[2])/n }/n/n /// `[x, y, z]`/n #[inline]/n #[must_use]/n pub const fn to_array(&self) -> [f32; 3] {/n [self.x, self.y, self.z]/n }/n/n /// Creates a vector from the first 3 values in `slice`./n ////n /// # Panics/n ////n /// Panics if `slice` is less than 3 elements long./n #[inline]/n #[must_use]/n pub const fn from_slice(slice: &[f32]) -> Self {/n assert!(slice.len() >= 3);/n Self::new(slice[0], slice[1], slice[2])/n }/n/n /// Writes the elements of `self` to the first 3 elements in `slice`./n ////n /// # Panics/n ////n /// Panics if `slice` is less than 3 elements long./n #[inline]/n pub fn write_to_slice(self, slice: &mut [f32]) {/n slice[..3].copy_from_slice(&self.to_array());/n }/n/n /// Internal method for creating a 3D vector from a 4D vector, discarding `w`./n #[allow(dead_code)]/n #[inline]/n #[must_use]/n pub(crate) fn from_vec4(v: Vec4) -> Self {/n Self {/n x: v.x,/n y: v.y,/n z: v.z,/n }/n }/n/n /// Creates a 4D vector from `self` and the given `w` value./n #[inline]/n #[must_use]/n pub fn extend(self, w: f32) -> Vec4 {/n Vec4::new(self.x, self.y, self.z, w)/n }/n/n /// Creates a 2D vector from the `x` and `y` elements of `self`, discarding `z`./n ////n /// Truncation may also be performed by using [`self.xy()`][crate::swizzles::Vec3Swizzles::xy()]./n #[inline]/n #[must_use]/n pub fn truncate(self) -> Vec2 {/n use crate::swizzles::Vec3Swizzles;/n self.xy()/n }/n/n /// Creates a 3D vector from `self` with the given value of `x`./n #[inline]/n #[must_use]/n pub fn with_x(mut self, x: f32) -> Self {/n self.x = x;/n self/n }/n/n /// Creates a 3D vector from `self` with the given value of `y`./n #[inline]/n #[must_use]/n pub fn with_y(mut self, y: f32) -> Self {/n self.y = y;/n self/n }/n/n /// Creates a 3D vector from `self` with the given value of `z`./n #[inline]/n #[must_use]/n pub fn with_z(mut self, z: f32) -> Self {/n self.z = z;/n self/n }/n/n /// Computes the dot product of `self` and `rhs`./n #[inline]/n #[must_use]/n pub fn dot(self, rhs: Self) -> f32 {/n (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)/n }/n/n /// Returns a vector where every component is the dot product of `self` and `rhs`./n #[inline]/n #[must_use]/n pub fn dot_into_vec(self, rhs: Self) -> Self {/n Self::splat(self.dot(rhs))/n }/n/n /// Computes the cross product of `self` and `rhs`./n #[inline]/n #[must_use]/n pub fn cross(self, rhs: Self) -> Self {/n Self {/n x: self.y * rhs.z - rhs.y * self.z,/n y: self.z * rhs.x - rhs.z * self.x,/n z: self.x * rhs.y - rhs.x * self.y,/n }/n }/n/n /// Returns a vector containing the minimum values for each element of `self` and `rhs`./n ////n /// In other words this computes `[min(x, rhs.x), min(self.y, rhs.y), ..]`./n ////n /// NaN propogation does not follow IEEE 754-2008 semantics for minNum and may differ on/n /// different SIMD architectures./n #[inline]/n #[must_use]/n pub fn min(self, rhs: Self) -> Self {/n Self {/n x: if self.x < rhs.x { self.x } else { rhs.x },/n y: if self.y < rhs.y { self.y } else { rhs.y },/n z: if self.z < rhs.z { self.z } else { rhs.z },/n }/n }/n/n /// Returns a vector containing the maximum values for each element of `self` and `rhs`./n ////n /// In other words this computes `[max(self.x, rhs.x), max(self.y, rhs.y), ..]`./n ////n /// NaN propogation does not follow IEEE 754-2008 semantics for maxNum and may differ on/n /// different SIMD architectures./n #[inline]/n #[must_use]/n pub fn max(self, rhs: Self) -> Self {/n Self {/n x: if self.x > rhs.x { self.x } else { rhs.x },/n y: if self.y > rhs.y { self.y } else { rhs.y },/n z: if self.z > rhs.z { self.z } else { rhs.z },/n }/n }/n/n /// Component-wise clamping of values, similar to [`f32::clamp`]./n ////n /// Each element in `min` must be less-or-equal to the corresponding element in `max`./n ////n /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on/n /// different SIMD architectures./n ////n /// # Panics/n ////n /// Will panic if `min` is greater than `max` when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn clamp(self, min: Self, max: Self) -> Self {/n glam_assert!(min.cmple(max).all(), /"clamp: expected min <= max/");/n self.max(min).min(max)/n }/n/n /// Returns the horizontal minimum of `self`./n ////n /// In other words this computes `min(x, y, ..)`./n ////n /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on/n /// different SIMD architectures./n #[inline]/n #[must_use]/n pub fn min_element(self) -> f32 {/n let min = |a, b| if a < b { a } else { b };/n min(self.x, min(self.y, self.z))/n }/n/n /// Returns the horizontal maximum of `self`./n ////n /// In other words this computes `max(x, y, ..)`./n ////n /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on/n /// different SIMD architectures./n #[inline]/n #[must_use]/n pub fn max_element(self) -> f32 {/n let max = |a, b| if a > b { a } else { b };/n max(self.x, max(self.y, self.z))/n }/n/n /// Returns the index of the first minimum element of `self`./n #[doc(alias = /"argmin/")]/n #[inline]/n #[must_use]/n pub fn min_position(self) -> usize {/n let mut min = self.x;/n let mut index = 0;/n if self.y < min {/n min = self.y;/n index = 1;/n }/n if self.z < min {/n index = 2;/n }/n index/n }/n/n /// Returns the index of the first maximum element of `self`./n #[doc(alias = /"argmax/")]/n #[inline]/n #[must_use]/n pub fn max_position(self) -> usize {/n let mut max = self.x;/n let mut index = 0;/n if self.y > max {/n max = self.y;/n index = 1;/n }/n if self.z > max {/n index = 2;/n }/n index/n }/n/n /// Returns the sum of all elements of `self`./n ////n /// In other words, this computes `self.x + self.y + ..`./n #[inline]/n #[must_use]/n pub fn element_sum(self) -> f32 {/n self.x + self.y + self.z/n }/n/n /// Returns the product of all elements of `self`./n ////n /// In other words, this computes `self.x * self.y * ..`./n #[inline]/n #[must_use]/n pub fn element_product(self) -> f32 {/n self.x * self.y * self.z/n }/n/n /// Returns a vector mask containing the result of a `==` comparison for each element of/n /// `self` and `rhs`./n ////n /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all/n /// elements./n #[inline]/n #[must_use]/n pub fn cmpeq(self, rhs: Self) -> BVec3 {/n BVec3::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y), self.z.eq(&rhs.z))/n }/n/n /// Returns a vector mask containing the result of a `!=` comparison for each element of/n /// `self` and `rhs`./n ////n /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all/n /// elements./n #[inline]/n #[must_use]/n pub fn cmpne(self, rhs: Self) -> BVec3 {/n BVec3::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y), self.z.ne(&rhs.z))/n }/n/n /// Returns a vector mask containing the result of a `>=` comparison for each element of/n /// `self` and `rhs`./n ////n /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all/n /// elements./n #[inline]/n #[must_use]/n pub fn cmpge(self, rhs: Self) -> BVec3 {/n BVec3::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y), self.z.ge(&rhs.z))/n }/n/n /// Returns a vector mask containing the result of a `>` comparison for each element of/n /// `self` and `rhs`./n ////n /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all/n /// elements./n #[inline]/n #[must_use]/n pub fn cmpgt(self, rhs: Self) -> BVec3 {/n BVec3::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y), self.z.gt(&rhs.z))/n }/n/n /// Returns a vector mask containing the result of a `<=` comparison for each element of/n /// `self` and `rhs`./n ////n /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all/n /// elements./n #[inline]/n #[must_use]/n pub fn cmple(self, rhs: Self) -> BVec3 {/n BVec3::new(self.x.le(&rhs.x), self.y.le(&rhs.y), self.z.le(&rhs.z))/n }/n/n /// Returns a vector mask containing the result of a `<` comparison for each element of/n /// `self` and `rhs`./n ////n /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all/n /// elements./n #[inline]/n #[must_use]/n pub fn cmplt(self, rhs: Self) -> BVec3 {/n BVec3::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y), self.z.lt(&rhs.z))/n }/n/n /// Returns a vector containing the absolute value of each element of `self`./n #[inline]/n #[must_use]/n pub fn abs(self) -> Self {/n Self {/n x: math::abs(self.x),/n y: math::abs(self.y),/n z: math::abs(self.z),/n }/n }/n/n /// Returns a vector with elements representing the sign of `self`./n ////n /// - `1.0` if the number is positive, `+0.0` or `INFINITY`/n /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`/n /// - `NAN` if the number is `NAN`/n #[inline]/n #[must_use]/n pub fn signum(self) -> Self {/n Self {/n x: math::signum(self.x),/n y: math::signum(self.y),/n z: math::signum(self.z),/n }/n }/n/n /// Returns a vector with signs of `rhs` and the magnitudes of `self`./n #[inline]/n #[must_use]/n pub fn copysign(self, rhs: Self) -> Self {/n Self {/n x: math::copysign(self.x, rhs.x),/n y: math::copysign(self.y, rhs.y),/n z: math::copysign(self.z, rhs.z),/n }/n }/n/n /// Returns a bitmask with the lowest 3 bits set to the sign bits from the elements of `self`./n ////n /// A negative element results in a `1` bit and a positive element in a `0` bit. Element `x` goes/n /// into the first lowest bit, element `y` into the second, etc./n ////n /// An element is negative if it has a negative sign, including -0.0, NaNs with negative sign/n /// bit and negative infinity./n #[inline]/n #[must_use]/n pub fn is_negative_bitmask(self) -> u32 {/n (self.x.is_sign_negative() as u32)/n | ((self.y.is_sign_negative() as u32) << 1)/n | ((self.z.is_sign_negative() as u32) << 2)/n }/n/n /// Returns `true` if, and only if, all elements are finite. If any element is either/n /// `NaN`, positive or negative infinity, this will return `false`./n #[inline]/n #[must_use]/n pub fn is_finite(self) -> bool {/n self.x.is_finite() && self.y.is_finite() && self.z.is_finite()/n }/n/n /// Performs `is_finite` on each element of self, returning a vector mask of the results./n ////n /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`./n pub fn is_finite_mask(self) -> BVec3 {/n BVec3::new(self.x.is_finite(), self.y.is_finite(), self.z.is_finite())/n }/n/n /// Returns `true` if any elements are `NaN`./n #[inline]/n #[must_use]/n pub fn is_nan(self) -> bool {/n self.x.is_nan() || self.y.is_nan() || self.z.is_nan()/n }/n/n /// Performs `is_nan` on each element of self, returning a vector mask of the results./n ////n /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`./n #[inline]/n #[must_use]/n pub fn is_nan_mask(self) -> BVec3 {/n BVec3::new(self.x.is_nan(), self.y.is_nan(), self.z.is_nan())/n }/n/n /// Computes the length of `self`./n #[doc(alias = /"magnitude/")]/n #[inline]/n #[must_use]/n pub fn length(self) -> f32 {/n math::sqrt(self.dot(self))/n }/n/n /// Computes the squared length of `self`./n ////n /// This is faster than `length()` as it avoids a square root operation./n #[doc(alias = /"magnitude2/")]/n #[inline]/n #[must_use]/n pub fn length_squared(self) -> f32 {/n self.dot(self)/n }/n/n /// Computes `1.0 / length()`./n ////n /// For valid results, `self` must _not_ be of length zero./n #[inline]/n #[must_use]/n pub fn length_recip(self) -> f32 {/n self.length().recip()/n }/n/n /// Computes the Euclidean distance between two points in space./n #[inline]/n #[must_use]/n pub fn distance(self, rhs: Self) -> f32 {/n (self - rhs).length()/n }/n/n /// Compute the squared euclidean distance between two points in space./n #[inline]/n #[must_use]/n pub fn distance_squared(self, rhs: Self) -> f32 {/n (self - rhs).length_squared()/n }/n/n /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`./n #[inline]/n #[must_use]/n pub fn div_euclid(self, rhs: Self) -> Self {/n Self::new(/n math::div_euclid(self.x, rhs.x),/n math::div_euclid(self.y, rhs.y),/n math::div_euclid(self.z, rhs.z),/n )/n }/n/n /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`./n ////n /// [Euclidean division]: f32::rem_euclid/n #[inline]/n #[must_use]/n pub fn rem_euclid(self, rhs: Self) -> Self {/n Self::new(/n math::rem_euclid(self.x, rhs.x),/n math::rem_euclid(self.y, rhs.y),/n math::rem_euclid(self.z, rhs.z),/n )/n }/n/n /// Returns `self` normalized to length 1.0./n ////n /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero./n ////n /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`]./n ////n /// Panics/n ////n /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn normalize(self) -> Self {/n #[allow(clippy::let_and_return)]/n let normalized = self.mul(self.length_recip());/n glam_assert!(normalized.is_finite());/n normalized/n }/n/n /// Returns `self` normalized to length 1.0 if possible, else returns `None`./n ////n /// In particular, if the input is zero (or very close to zero), or non-finite,/n /// the result of this operation will be `None`./n ////n /// See also [`Self::normalize_or_zero()`]./n #[inline]/n #[must_use]/n pub fn try_normalize(self) -> Option {/n let rcp = self.length_recip();/n if rcp.is_finite() && rcp > 0.0 {/n Some(self * rcp)/n } else {/n None/n }/n }/n/n /// Returns `self` normalized to length 1.0 if possible, else returns a/n /// fallback value./n ////n /// In particular, if the input is zero (or very close to zero), or non-finite,/n /// the result of this operation will be the fallback value./n ////n /// See also [`Self::try_normalize()`]./n #[inline]/n #[must_use]/n pub fn normalize_or(self, fallback: Self) -> Self {/n let rcp = self.length_recip();/n if rcp.is_finite() && rcp > 0.0 {/n self * rcp/n } else {/n fallback/n }/n }/n/n /// Returns `self` normalized to length 1.0 if possible, else returns zero./n ////n /// In particular, if the input is zero (or very close to zero), or non-finite,/n /// the result of this operation will be zero./n ////n /// See also [`Self::try_normalize()`]./n #[inline]/n #[must_use]/n pub fn normalize_or_zero(self) -> Self {/n self.normalize_or(Self::ZERO)/n }/n/n /// Returns `self` normalized to length 1.0 and the length of `self`./n ////n /// If `self` is zero length then `(Self::X, 0.0)` is returned./n #[inline]/n #[must_use]/n pub fn normalize_and_length(self) -> (Self, f32) {/n let length = self.length();/n let rcp = 1.0 / length;/n if rcp.is_finite() && rcp > 0.0 {/n (self * rcp, length)/n } else {/n (Self::X, 0.0)/n }/n }/n/n /// Returns whether `self` is length `1.0` or not./n ////n /// Uses a precision threshold of approximately `1e-4`./n #[inline]/n #[must_use]/n pub fn is_normalized(self) -> bool {/n math::abs(self.length_squared() - 1.0) <= 2e-4/n }/n/n /// Returns the vector projection of `self` onto `rhs`./n ////n /// `rhs` must be of non-zero length./n ////n /// # Panics/n ////n /// Will panic if `rhs` is zero length when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn project_onto(self, rhs: Self) -> Self {/n let other_len_sq_rcp = rhs.dot(rhs).recip();/n glam_assert!(other_len_sq_rcp.is_finite());/n rhs * self.dot(rhs) * other_len_sq_rcp/n }/n/n /// Returns the vector rejection of `self` from `rhs`./n ////n /// The vector rejection is the vector perpendicular to the projection of `self` onto/n /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`./n ////n /// `rhs` must be of non-zero length./n ////n /// # Panics/n ////n /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled./n #[doc(alias(/"plane/"))]/n #[inline]/n #[must_use]/n pub fn reject_from(self, rhs: Self) -> Self {/n self - self.project_onto(rhs)/n }/n/n /// Returns the vector projection of `self` onto `rhs`./n ////n /// `rhs` must be normalized./n ////n /// # Panics/n ////n /// Will panic if `rhs` is not normalized when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn project_onto_normalized(self, rhs: Self) -> Self {/n glam_assert!(rhs.is_normalized());/n rhs * self.dot(rhs)/n }/n/n /// Returns the vector rejection of `self` from `rhs`./n ////n /// The vector rejection is the vector perpendicular to the projection of `self` onto/n /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`./n ////n /// `rhs` must be normalized./n ////n /// # Panics/n ////n /// Will panic if `rhs` is not normalized when `glam_assert` is enabled./n #[doc(alias(/"plane/"))]/n #[inline]/n #[must_use]/n pub fn reject_from_normalized(self, rhs: Self) -> Self {/n self - self.project_onto_normalized(rhs)/n }/n/n /// Returns a vector containing the nearest integer to a number for each element of `self`./n /// Round half-way cases away from 0.0./n #[inline]/n #[must_use]/n pub fn round(self) -> Self {/n Self {/n x: math::round(self.x),/n y: math::round(self.y),/n z: math::round(self.z),/n }/n }/n/n /// Returns a vector containing the largest integer less than or equal to a number for each/n /// element of `self`./n #[inline]/n #[must_use]/n pub fn floor(self) -> Self {/n Self {/n x: math::floor(self.x),/n y: math::floor(self.y),/n z: math::floor(self.z),/n }/n }/n/n /// Returns a vector containing the smallest integer greater than or equal to a number for/n /// each element of `self`./n #[inline]/n #[must_use]/n pub fn ceil(self) -> Self {/n Self {/n x: math::ceil(self.x),/n y: math::ceil(self.y),/n z: math::ceil(self.z),/n }/n }/n/n /// Returns a vector containing the integer part each element of `self`. This means numbers are/n /// always truncated towards zero./n #[inline]/n #[must_use]/n pub fn trunc(self) -> Self {/n Self {/n x: math::trunc(self.x),/n y: math::trunc(self.y),/n z: math::trunc(self.z),/n }/n }/n/n /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`./n ////n /// Note that this differs from the GLSL implementation of `fract` which returns/n /// `self - self.floor()`./n ////n /// Note that this is fast but not precise for large numbers./n #[inline]/n #[must_use]/n pub fn fract(self) -> Self {/n self - self.trunc()/n }/n/n /// Returns a vector containing the fractional part of the vector as `self - self.floor()`./n ////n /// Note that this differs from the Rust implementation of `fract` which returns/n /// `self - self.trunc()`./n ////n /// Note that this is fast but not precise for large numbers./n #[inline]/n #[must_use]/n pub fn fract_gl(self) -> Self {/n self - self.floor()/n }/n/n /// Returns a vector containing `e^self` (the exponential function) for each element of/n /// `self`./n #[inline]/n #[must_use]/n pub fn exp(self) -> Self {/n Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))/n }/n/n /// Returns a vector containing each element of `self` raised to the power of `n`./n #[inline]/n #[must_use]/n pub fn powf(self, n: f32) -> Self {/n Self::new(/n math::powf(self.x, n),/n math::powf(self.y, n),/n math::powf(self.z, n),/n )/n }/n/n /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`./n #[inline]/n #[must_use]/n pub fn recip(self) -> Self {/n Self {/n x: 1.0 / self.x,/n y: 1.0 / self.y,/n z: 1.0 / self.z,/n }/n }/n/n /// Performs a linear interpolation between `self` and `rhs` based on the value `s`./n ////n /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result/n /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly/n /// extrapolated./n #[doc(alias = /"mix/")]/n #[inline]/n #[must_use]/n pub fn lerp(self, rhs: Self, s: f32) -> Self {/n self * (1.0 - s) + rhs * s/n }/n/n /// Moves towards `rhs` based on the value `d`./n ////n /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to/n /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`./n #[inline]/n #[must_use]/n pub fn move_towards(&self, rhs: Self, d: f32) -> Self {/n let a = rhs - *self;/n let len = a.length();/n if len <= d || len <= 1e-4 {/n return rhs;/n }/n *self + a / len * d/n }/n/n /// Calculates the midpoint between `self` and `rhs`./n ////n /// The midpoint is the average of, or halfway point between, two vectors./n /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`/n /// while being slightly cheaper to compute./n #[inline]/n pub fn midpoint(self, rhs: Self) -> Self {/n (self + rhs) * 0.5/n }/n/n /// Returns true if the absolute difference of all elements between `self` and `rhs` is/n /// less than or equal to `max_abs_diff`./n ////n /// This can be used to compare if two vectors contain similar elements. It works best when/n /// comparing with a known value. The `max_abs_diff` that should be used used depends on/n /// the values being compared against./n ////n /// For more see/n /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/)./n #[inline]/n #[must_use]/n pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {/n self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()/n }/n/n /// Returns a vector with a length no less than `min` and no more than `max`./n ////n /// # Panics/n ////n /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn clamp_length(self, min: f32, max: f32) -> Self {/n glam_assert!(0.0 <= min);/n glam_assert!(min <= max);/n let length_sq = self.length_squared();/n if length_sq < min * min {/n min * (self / math::sqrt(length_sq))/n } else if length_sq > max * max {/n max * (self / math::sqrt(length_sq))/n } else {/n self/n }/n }/n/n /// Returns a vector with a length no more than `max`./n ////n /// # Panics/n ////n /// Will panic if `max` is negative when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn clamp_length_max(self, max: f32) -> Self {/n glam_assert!(0.0 <= max);/n let length_sq = self.length_squared();/n if length_sq > max * max {/n max * (self / math::sqrt(length_sq))/n } else {/n self/n }/n }/n/n /// Returns a vector with a length no less than `min`./n ////n /// # Panics/n ////n /// Will panic if `min` is negative when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn clamp_length_min(self, min: f32) -> Self {/n glam_assert!(0.0 <= min);/n let length_sq = self.length_squared();/n if length_sq < min * min {/n min * (self / math::sqrt(length_sq))/n } else {/n self/n }/n }/n/n /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding/n /// error, yielding a more accurate result than an unfused multiply-add./n ////n /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target/n /// architecture has a dedicated fma CPU instruction. However, this is not always true,/n /// and will be heavily dependant on designing algorithms with specific target hardware in/n /// mind./n #[inline]/n #[must_use]/n pub fn mul_add(self, a: Self, b: Self) -> Self {/n Self::new(/n math::mul_add(self.x, a.x, b.x),/n math::mul_add(self.y, a.y, b.y),/n math::mul_add(self.z, a.z, b.z),/n )/n }/n/n /// Returns the reflection vector for a given incident vector `self` and surface normal/n /// `normal`./n ////n /// `normal` must be normalized./n ////n /// # Panics/n ////n /// Will panic if `normal` is not normalized when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn reflect(self, normal: Self) -> Self {/n glam_assert!(normal.is_normalized());/n self - 2.0 * self.dot(normal) * normal/n }/n/n /// Returns the refraction direction for a given incident vector `self`, surface normal/n /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,/n /// a zero vector will be returned./n ////n /// `self` and `normal` must be normalized./n ////n /// # Panics/n ////n /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn refract(self, normal: Self, eta: f32) -> Self {/n glam_assert!(self.is_normalized());/n glam_assert!(normal.is_normalized());/n let n_dot_i = normal.dot(self);/n let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);/n if k >= 0.0 {/n eta * self - (eta * n_dot_i + math::sqrt(k)) * normal/n } else {/n Self::ZERO/n }/n }/n/n /// Returns the angle (in radians) between two vectors in the range `[0, +π]`./n ////n /// The inputs do not need to be unit vectors however they must be non-zero./n #[inline]/n #[must_use]/n pub fn angle_between(self, rhs: Self) -> f32 {/n math::acos_approx(/n self.dot(rhs)/n .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),/n )/n }/n/n /// Rotates towards `rhs` up to `max_angle` (in radians)./n ////n /// When `max_angle` is `0.0`, the result will be equal to `self`. When `max_angle` is equal to/n /// `self.angle_between(rhs)`, the result will be parallel to `rhs`. If `max_angle` is negative,/n /// rotates towards the exact opposite of `rhs`. Will not go past the target./n #[inline]/n #[must_use]/n pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {/n let angle_between = self.angle_between(rhs);/n // When `max_angle < 0`, rotate no further than `PI` radians away/n let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);/n let axis = self/n .cross(rhs)/n .try_normalize()/n .unwrap_or_else(|| self.any_orthogonal_vector().normalize());/n Quat::from_axis_angle(axis, angle) * self/n }/n/n /// Returns some vector that is orthogonal to the given one./n ////n /// The input vector must be finite and non-zero./n ////n /// The output vector is not necessarily unit length. For that use/n /// [`Self::any_orthonormal_vector()`] instead./n #[inline]/n #[must_use]/n pub fn any_orthogonal_vector(&self) -> Self {/n // This can probably be optimized/n if math::abs(self.x) > math::abs(self.y) {/n Self::new(-self.z, 0.0, self.x) // self.cross(Self::Y)/n } else {/n Self::new(0.0, self.z, -self.y) // self.cross(Self::X)/n }/n }/n/n /// Returns any unit vector that is orthogonal to the given one./n ////n /// The input vector must be unit length./n ////n /// # Panics/n ////n /// Will panic if `self` is not normalized when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn any_orthonormal_vector(&self) -> Self {/n glam_assert!(self.is_normalized());/n // From https://graphics.pixar.com/library/OrthonormalB/paper.pdf/n let sign = math::signum(self.z);/n let a = -1.0 / (sign + self.z);/n let b = self.x * self.y * a;/n Self::new(b, sign + self.y * self.y * a, -self.y)/n }/n/n /// Given a unit vector return two other vectors that together form an orthonormal/n /// basis. That is, all three vectors are orthogonal to each other and are normalized./n ////n /// # Panics/n ////n /// Will panic if `self` is not normalized when `glam_assert` is enabled./n #[inline]/n #[must_use]/n pub fn any_orthonormal_pair(&self) -> (Self, Self) {/n glam_assert!(self.is_normalized());/n // From https://graphics.pixar.com/library/OrthonormalB/paper.pdf/n let sign = math::signum(self.z);/n let a = -1.0 / (sign + self.z);/n let b = self.x * self.y * a;/n (/n Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),/n Self::new(b, sign + self.y * self.y * a, -self.y),/n )/n }/n/n /// Performs a spherical linear interpolation between `self` and `rhs` based on the value `s`./n ////n /// When `s` is `0.0`, the result will be equal to `self`. When `s` is `1.0`, the result/n /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly/n /// extrapolated./n #[inline]/n #[must_use]/n pub fn slerp(self, rhs: Self, s: f32) -> Self {/n let self_length = self.length();/n let rhs_length = rhs.length();/n // Cosine of the angle between the vectors [-1, 1], or NaN if either vector has a zero length/n let dot = self.dot(rhs) / (self_length * rhs_length);/n // If dot is close to 1 or -1, or is NaN the calculations for t1 and t2 break down/n if math::abs(dot) < 1.0 - 3e-7 {/n // Angle between the vectors [0, +π]/n let theta = math::acos_approx(dot);/n // Sine of the angle between vectors [0, 1]/n let sin_theta = math::sin(theta);/n let t1 = math::sin(theta * (1. - s));/n let t2 = math::sin(theta * s);/n/n // Interpolate vector lengths/n let result_length = self_length.lerp(rhs_length, s);/n // Scale the vectors to the target length and interpolate them/n return (self * (result_length / self_length) * t1/n + rhs * (result_length / rhs_length) * t2)/n * sin_theta.recip();/n }/n if dot < 0.0 {/n // Vectors are almost parallel in opposing directions/n/n // Create a rotation from self to rhs along some axis/n let axis = self.any_orthogonal_vector().normalize();/n let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);/n // Interpolate vector lengths/n let result_length = self_length.lerp(rhs_length, s);/n rotation * self * (result_length / self_length)/n } else {/n // Vectors are almost parallel in the same direction, or dot was NaN/n self.lerp(rhs, s)/n }/n }/n/n /// Casts all elements of `self` to `f64`./n #[inline]/n #[must_use]/n pub fn as_dvec3(&self) -> crate::DVec3 {/n crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)/n }/n/n /// Casts all elements of `self` to `i8`./n #[inline]/n #[must_use]/n pub fn as_i8vec3(&self) -> crate::I8Vec3 {/n crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)/n }/n/n /// Casts all elements of `self` to `u8`./n #[inline]/n #[must_use]/n pub fn as_u8vec3(&self) -> crate::U8Vec3 {/n crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)/n }/n/n /// Casts all elements of `self` to `i16`./n #[inline]/n #[must_use]/n pub fn as_i16vec3(&self) -> crate::I16Vec3 {/n crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)/n }/n/n /// Casts all elements of `self` to `u16`./n #[inline]/n #[must_use]/n pub fn as_u16vec3(&self) -> crate::U16Vec3 {/n crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)/n }/n/n /// Casts all elements of `self` to `i32`./n #[inline]/n #[must_use]/n pub fn as_ivec3(&self) -> crate::IVec3 {/n crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)/n }/n/n /// Casts all elements of `self` to `u32`./n #[inline]/n #[must_use]/n pub fn as_uvec3(&self) -> crate::UVec3 {/n crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)/n }/n/n /// Casts all elements of `self` to `i64`./n #[inline]/n #[must_use]/n pub fn as_i64vec3(&self) -> crate::I64Vec3 {/n crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)/n }/n/n /// Casts all elements of `self` to `u64`./n #[inline]/n #[must_use]/n pub fn as_u64vec3(&self) -> crate::U64Vec3 {/n crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)/n }/n/n /// Casts all elements of `self` to `usize`./n #[inline]/n #[must_use]/n pub fn as_usizevec3(&self) -> crate::USizeVec3 {/n crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)/n }/n}/n/nimpl Default for Vec3 {/n #[inline(always)]/n fn default() -> Self {/n Self::ZERO/n }/n}/n/nimpl Div for Vec3 {/n type Output = Self;/n #[inline]/n fn div(self, rhs: Self) -> Self {/n Self {/n x: self.x.div(rhs.x),/n y: self.y.div(rhs.y),/n z: self.z.div(rhs.z),/n }/n }/n}/n/nimpl Div<&Vec3> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: &Vec3) -> Vec3 {/n self.div(*rhs)/n }/n}/n/nimpl Div<&Vec3> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: &Vec3) -> Vec3 {/n (*self).div(*rhs)/n }/n}/n/nimpl Div for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: Vec3) -> Vec3 {/n (*self).div(rhs)/n }/n}/n/nimpl DivAssign for Vec3 {/n #[inline]/n fn div_assign(&mut self, rhs: Self) {/n self.x.div_assign(rhs.x);/n self.y.div_assign(rhs.y);/n self.z.div_assign(rhs.z);/n }/n}/n/nimpl DivAssign<&Vec3> for Vec3 {/n #[inline]/n fn div_assign(&mut self, rhs: &Vec3) {/n self.div_assign(*rhs)/n }/n}/n/nimpl Div for Vec3 {/n type Output = Self;/n #[inline]/n fn div(self, rhs: f32) -> Self {/n Self {/n x: self.x.div(rhs),/n y: self.y.div(rhs),/n z: self.z.div(rhs),/n }/n }/n}/n/nimpl Div<&f32> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: &f32) -> Vec3 {/n self.div(*rhs)/n }/n}/n/nimpl Div<&f32> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: &f32) -> Vec3 {/n (*self).div(*rhs)/n }/n}/n/nimpl Div for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: f32) -> Vec3 {/n (*self).div(rhs)/n }/n}/n/nimpl DivAssign for Vec3 {/n #[inline]/n fn div_assign(&mut self, rhs: f32) {/n self.x.div_assign(rhs);/n self.y.div_assign(rhs);/n self.z.div_assign(rhs);/n }/n}/n/nimpl DivAssign<&f32> for Vec3 {/n #[inline]/n fn div_assign(&mut self, rhs: &f32) {/n self.div_assign(*rhs)/n }/n}/n/nimpl Div for f32 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: Vec3) -> Vec3 {/n Vec3 {/n x: self.div(rhs.x),/n y: self.div(rhs.y),/n z: self.div(rhs.z),/n }/n }/n}/n/nimpl Div<&Vec3> for f32 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: &Vec3) -> Vec3 {/n self.div(*rhs)/n }/n}/n/nimpl Div<&Vec3> for &f32 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: &Vec3) -> Vec3 {/n (*self).div(*rhs)/n }/n}/n/nimpl Div for &f32 {/n type Output = Vec3;/n #[inline]/n fn div(self, rhs: Vec3) -> Vec3 {/n (*self).div(rhs)/n }/n}/n/nimpl Mul for Vec3 {/n type Output = Self;/n #[inline]/n fn mul(self, rhs: Self) -> Self {/n Self {/n x: self.x.mul(rhs.x),/n y: self.y.mul(rhs.y),/n z: self.z.mul(rhs.z),/n }/n }/n}/n/nimpl Mul<&Vec3> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: &Vec3) -> Vec3 {/n self.mul(*rhs)/n }/n}/n/nimpl Mul<&Vec3> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: &Vec3) -> Vec3 {/n (*self).mul(*rhs)/n }/n}/n/nimpl Mul for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: Vec3) -> Vec3 {/n (*self).mul(rhs)/n }/n}/n/nimpl MulAssign for Vec3 {/n #[inline]/n fn mul_assign(&mut self, rhs: Self) {/n self.x.mul_assign(rhs.x);/n self.y.mul_assign(rhs.y);/n self.z.mul_assign(rhs.z);/n }/n}/n/nimpl MulAssign<&Vec3> for Vec3 {/n #[inline]/n fn mul_assign(&mut self, rhs: &Vec3) {/n self.mul_assign(*rhs)/n }/n}/n/nimpl Mul for Vec3 {/n type Output = Self;/n #[inline]/n fn mul(self, rhs: f32) -> Self {/n Self {/n x: self.x.mul(rhs),/n y: self.y.mul(rhs),/n z: self.z.mul(rhs),/n }/n }/n}/n/nimpl Mul<&f32> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: &f32) -> Vec3 {/n self.mul(*rhs)/n }/n}/n/nimpl Mul<&f32> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: &f32) -> Vec3 {/n (*self).mul(*rhs)/n }/n}/n/nimpl Mul for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: f32) -> Vec3 {/n (*self).mul(rhs)/n }/n}/n/nimpl MulAssign for Vec3 {/n #[inline]/n fn mul_assign(&mut self, rhs: f32) {/n self.x.mul_assign(rhs);/n self.y.mul_assign(rhs);/n self.z.mul_assign(rhs);/n }/n}/n/nimpl MulAssign<&f32> for Vec3 {/n #[inline]/n fn mul_assign(&mut self, rhs: &f32) {/n self.mul_assign(*rhs)/n }/n}/n/nimpl Mul for f32 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: Vec3) -> Vec3 {/n Vec3 {/n x: self.mul(rhs.x),/n y: self.mul(rhs.y),/n z: self.mul(rhs.z),/n }/n }/n}/n/nimpl Mul<&Vec3> for f32 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: &Vec3) -> Vec3 {/n self.mul(*rhs)/n }/n}/n/nimpl Mul<&Vec3> for &f32 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: &Vec3) -> Vec3 {/n (*self).mul(*rhs)/n }/n}/n/nimpl Mul for &f32 {/n type Output = Vec3;/n #[inline]/n fn mul(self, rhs: Vec3) -> Vec3 {/n (*self).mul(rhs)/n }/n}/n/nimpl Add for Vec3 {/n type Output = Self;/n #[inline]/n fn add(self, rhs: Self) -> Self {/n Self {/n x: self.x.add(rhs.x),/n y: self.y.add(rhs.y),/n z: self.z.add(rhs.z),/n }/n }/n}/n/nimpl Add<&Vec3> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: &Vec3) -> Vec3 {/n self.add(*rhs)/n }/n}/n/nimpl Add<&Vec3> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: &Vec3) -> Vec3 {/n (*self).add(*rhs)/n }/n}/n/nimpl Add for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: Vec3) -> Vec3 {/n (*self).add(rhs)/n }/n}/n/nimpl AddAssign for Vec3 {/n #[inline]/n fn add_assign(&mut self, rhs: Self) {/n self.x.add_assign(rhs.x);/n self.y.add_assign(rhs.y);/n self.z.add_assign(rhs.z);/n }/n}/n/nimpl AddAssign<&Vec3> for Vec3 {/n #[inline]/n fn add_assign(&mut self, rhs: &Vec3) {/n self.add_assign(*rhs)/n }/n}/n/nimpl Add for Vec3 {/n type Output = Self;/n #[inline]/n fn add(self, rhs: f32) -> Self {/n Self {/n x: self.x.add(rhs),/n y: self.y.add(rhs),/n z: self.z.add(rhs),/n }/n }/n}/n/nimpl Add<&f32> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: &f32) -> Vec3 {/n self.add(*rhs)/n }/n}/n/nimpl Add<&f32> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: &f32) -> Vec3 {/n (*self).add(*rhs)/n }/n}/n/nimpl Add for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: f32) -> Vec3 {/n (*self).add(rhs)/n }/n}/n/nimpl AddAssign for Vec3 {/n #[inline]/n fn add_assign(&mut self, rhs: f32) {/n self.x.add_assign(rhs);/n self.y.add_assign(rhs);/n self.z.add_assign(rhs);/n }/n}/n/nimpl AddAssign<&f32> for Vec3 {/n #[inline]/n fn add_assign(&mut self, rhs: &f32) {/n self.add_assign(*rhs)/n }/n}/n/nimpl Add for f32 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: Vec3) -> Vec3 {/n Vec3 {/n x: self.add(rhs.x),/n y: self.add(rhs.y),/n z: self.add(rhs.z),/n }/n }/n}/n/nimpl Add<&Vec3> for f32 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: &Vec3) -> Vec3 {/n self.add(*rhs)/n }/n}/n/nimpl Add<&Vec3> for &f32 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: &Vec3) -> Vec3 {/n (*self).add(*rhs)/n }/n}/n/nimpl Add for &f32 {/n type Output = Vec3;/n #[inline]/n fn add(self, rhs: Vec3) -> Vec3 {/n (*self).add(rhs)/n }/n}/n/nimpl Sub for Vec3 {/n type Output = Self;/n #[inline]/n fn sub(self, rhs: Self) -> Self {/n Self {/n x: self.x.sub(rhs.x),/n y: self.y.sub(rhs.y),/n z: self.z.sub(rhs.z),/n }/n }/n}/n/nimpl Sub<&Vec3> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: &Vec3) -> Vec3 {/n self.sub(*rhs)/n }/n}/n/nimpl Sub<&Vec3> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: &Vec3) -> Vec3 {/n (*self).sub(*rhs)/n }/n}/n/nimpl Sub for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: Vec3) -> Vec3 {/n (*self).sub(rhs)/n }/n}/n/nimpl SubAssign for Vec3 {/n #[inline]/n fn sub_assign(&mut self, rhs: Vec3) {/n self.x.sub_assign(rhs.x);/n self.y.sub_assign(rhs.y);/n self.z.sub_assign(rhs.z);/n }/n}/n/nimpl SubAssign<&Vec3> for Vec3 {/n #[inline]/n fn sub_assign(&mut self, rhs: &Vec3) {/n self.sub_assign(*rhs)/n }/n}/n/nimpl Sub for Vec3 {/n type Output = Self;/n #[inline]/n fn sub(self, rhs: f32) -> Self {/n Self {/n x: self.x.sub(rhs),/n y: self.y.sub(rhs),/n z: self.z.sub(rhs),/n }/n }/n}/n/nimpl Sub<&f32> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: &f32) -> Vec3 {/n self.sub(*rhs)/n }/n}/n/nimpl Sub<&f32> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: &f32) -> Vec3 {/n (*self).sub(*rhs)/n }/n}/n/nimpl Sub for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: f32) -> Vec3 {/n (*self).sub(rhs)/n }/n}/n/nimpl SubAssign for Vec3 {/n #[inline]/n fn sub_assign(&mut self, rhs: f32) {/n self.x.sub_assign(rhs);/n self.y.sub_assign(rhs);/n self.z.sub_assign(rhs);/n }/n}/n/nimpl SubAssign<&f32> for Vec3 {/n #[inline]/n fn sub_assign(&mut self, rhs: &f32) {/n self.sub_assign(*rhs)/n }/n}/n/nimpl Sub for f32 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: Vec3) -> Vec3 {/n Vec3 {/n x: self.sub(rhs.x),/n y: self.sub(rhs.y),/n z: self.sub(rhs.z),/n }/n }/n}/n/nimpl Sub<&Vec3> for f32 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: &Vec3) -> Vec3 {/n self.sub(*rhs)/n }/n}/n/nimpl Sub<&Vec3> for &f32 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: &Vec3) -> Vec3 {/n (*self).sub(*rhs)/n }/n}/n/nimpl Sub for &f32 {/n type Output = Vec3;/n #[inline]/n fn sub(self, rhs: Vec3) -> Vec3 {/n (*self).sub(rhs)/n }/n}/n/nimpl Rem for Vec3 {/n type Output = Self;/n #[inline]/n fn rem(self, rhs: Self) -> Self {/n Self {/n x: self.x.rem(rhs.x),/n y: self.y.rem(rhs.y),/n z: self.z.rem(rhs.z),/n }/n }/n}/n/nimpl Rem<&Vec3> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: &Vec3) -> Vec3 {/n self.rem(*rhs)/n }/n}/n/nimpl Rem<&Vec3> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: &Vec3) -> Vec3 {/n (*self).rem(*rhs)/n }/n}/n/nimpl Rem for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: Vec3) -> Vec3 {/n (*self).rem(rhs)/n }/n}/n/nimpl RemAssign for Vec3 {/n #[inline]/n fn rem_assign(&mut self, rhs: Self) {/n self.x.rem_assign(rhs.x);/n self.y.rem_assign(rhs.y);/n self.z.rem_assign(rhs.z);/n }/n}/n/nimpl RemAssign<&Vec3> for Vec3 {/n #[inline]/n fn rem_assign(&mut self, rhs: &Vec3) {/n self.rem_assign(*rhs)/n }/n}/n/nimpl Rem for Vec3 {/n type Output = Self;/n #[inline]/n fn rem(self, rhs: f32) -> Self {/n Self {/n x: self.x.rem(rhs),/n y: self.y.rem(rhs),/n z: self.z.rem(rhs),/n }/n }/n}/n/nimpl Rem<&f32> for Vec3 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: &f32) -> Vec3 {/n self.rem(*rhs)/n }/n}/n/nimpl Rem<&f32> for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: &f32) -> Vec3 {/n (*self).rem(*rhs)/n }/n}/n/nimpl Rem for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: f32) -> Vec3 {/n (*self).rem(rhs)/n }/n}/n/nimpl RemAssign for Vec3 {/n #[inline]/n fn rem_assign(&mut self, rhs: f32) {/n self.x.rem_assign(rhs);/n self.y.rem_assign(rhs);/n self.z.rem_assign(rhs);/n }/n}/n/nimpl RemAssign<&f32> for Vec3 {/n #[inline]/n fn rem_assign(&mut self, rhs: &f32) {/n self.rem_assign(*rhs)/n }/n}/n/nimpl Rem for f32 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: Vec3) -> Vec3 {/n Vec3 {/n x: self.rem(rhs.x),/n y: self.rem(rhs.y),/n z: self.rem(rhs.z),/n }/n }/n}/n/nimpl Rem<&Vec3> for f32 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: &Vec3) -> Vec3 {/n self.rem(*rhs)/n }/n}/n/nimpl Rem<&Vec3> for &f32 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: &Vec3) -> Vec3 {/n (*self).rem(*rhs)/n }/n}/n/nimpl Rem for &f32 {/n type Output = Vec3;/n #[inline]/n fn rem(self, rhs: Vec3) -> Vec3 {/n (*self).rem(rhs)/n }/n}/n/n#[cfg(not(target_arch = /"spirv/"))]/nimpl AsRef<[f32; 3]> for Vec3 {/n #[inline]/n fn as_ref(&self) -> &[f32; 3] {/n unsafe { &*(self as *const Vec3 as *const [f32; 3]) }/n }/n}/n/n#[cfg(not(target_arch = /"spirv/"))]/nimpl AsMut<[f32; 3]> for Vec3 {/n #[inline]/n fn as_mut(&mut self) -> &mut [f32; 3] {/n unsafe { &mut *(self as *mut Vec3 as *mut [f32; 3]) }/n }/n}/n/nimpl Sum for Vec3 {/n #[inline]/n fn sum(iter: I) -> Self/n where/n I: Iterator,/n {/n iter.fold(Self::ZERO, Self::add)/n }/n}/n/nimpl<'a> Sum<&'a Self> for Vec3 {/n #[inline]/n fn sum(iter: I) -> Self/n where/n I: Iterator,/n {/n iter.fold(Self::ZERO, |a, &b| Self::add(a, b))/n }/n}/n/nimpl Product for Vec3 {/n #[inline]/n fn product(iter: I) -> Self/n where/n I: Iterator,/n {/n iter.fold(Self::ONE, Self::mul)/n }/n}/n/nimpl<'a> Product<&'a Self> for Vec3 {/n #[inline]/n fn product(iter: I) -> Self/n where/n I: Iterator,/n {/n iter.fold(Self::ONE, |a, &b| Self::mul(a, b))/n }/n}/n/nimpl Neg for Vec3 {/n type Output = Self;/n #[inline]/n fn neg(self) -> Self {/n Self {/n x: self.x.neg(),/n y: self.y.neg(),/n z: self.z.neg(),/n }/n }/n}/n/nimpl Neg for &Vec3 {/n type Output = Vec3;/n #[inline]/n fn neg(self) -> Vec3 {/n (*self).neg()/n }/n}/n/nimpl Index for Vec3 {/n type Output = f32;/n #[inline]/n fn index(&self, index: usize) -> &Self::Output {/n match index {/n 0 => &self.x,/n 1 => &self.y,/n 2 => &self.z,/n _ => panic!(/"index out of bounds/"),/n }/n }/n}/n/nimpl IndexMut for Vec3 {/n #[inline]/n fn index_mut(&mut self, index: usize) -> &mut Self::Output {/n match index {/n 0 => &mut self.x,/n 1 => &mut self.y,/n 2 => &mut self.z,/n _ => panic!(/"index out of bounds/"),/n }/n }/n}/n/nimpl fmt::Display for Vec3 {/n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {/n if let Some(p) = f.precision() {/n write!(f, /"[{:.*}, {:.*}, {:.*}]/", p, self.x, p, self.y, p, self.z)/n } else {/n write!(f, /"[{}, {}, {}]/", self.x, self.y, self.z)/n }/n }/n}/n/nimpl fmt::Debug for Vec3 {/n fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {/n fmt.debug_tuple(stringify!(Vec3))/n .field(&self.x)/n .field(&self.y)/n .field(&self.z)/n .finish()/n }/n}/n/nimpl From<[f32; 3]> for Vec3 {/n #[inline]/n fn from(a: [f32; 3]) -> Self {/n Self::new(a[0], a[1], a[2])/n }/n}/n/nimpl From for [f32; 3] {/n #[inline]/n fn from(v: Vec3) -> Self {/n [v.x, v.y, v.z]/n }/n}/n/nimpl From<(f32, f32, f32)> for Vec3 {/n #[inline]/n fn from(t: (f32, f32, f32)) -> Self {/n Self::new(t.0, t.1, t.2)/n }/n}/n/nimpl From for (f32, f32, f32) {/n #[inline]/n fn from(v: Vec3) -> Self {/n (v.x, v.y, v.z)/n }/n}/n/nimpl From<(Vec2, f32)> for Vec3 {/n #[inline]/n fn from((v, z): (Vec2, f32)) -> Self {/n Self::new(v.x, v.y, z)/n }/n}/n/nimpl From for Vec3 {/n #[inline]/n fn from(v: BVec3) -> Self {/n Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))/n }/n}/n/nimpl From for Vec3 {/n #[inline]/n fn from(v: BVec3A) -> Self {/n let bool_array: [bool; 3] = v.into();/n Self::new(/n f32::from(bool_array[0]),/n f32::from(bool_array[1]),/n f32::from(bool_array[2]),/n )/n }/n}/n" +OpSource Unknown 0 %4 "// build-pass/n// compile-flags: -C llvm-args=--disassemble-globals/n// CHECK: OpDecorate %{{[0-9]+}} ArrayStride 16/n/nuse spirv_std::spirv;/n/n// Test that array stride respects alignment requirements/n// vec3 has size 12 bytes but alignment 16 bytes/n// So array stride should be 16, not 12/n#[derive(Copy, Clone)]/npub struct AlignedBuffer {/n data: [spirv_std::glam::Vec3; 4],/n}/n/n#[spirv(compute(threads(1)))]/npub fn main_cs(/n #[spirv(storage_buffer, descriptor_set = 0, binding = 0)] storage: &mut AlignedBuffer,/n) {/n storage.data[0] = spirv_std::glam::Vec3::new(1.0, 2.0, 3.0);/n}/n" +OpName %5 "AlignedBuffer" +OpMemberName %5 0 "data" +OpName %2 "storage" +OpDecorate %6 ArrayStride 16 +OpMemberDecorate %5 0 Offset 0 +OpDecorate %7 Block +OpMemberDecorate %7 0 Offset 0 +OpDecorate %2 Binding 0 +OpDecorate %2 DescriptorSet 0 +%8 = OpTypeFloat 32 +%9 = OpTypeVector %8 3 +%10 = OpTypeInt 32 0 +%11 = OpConstant %10 4 +%6 = OpTypeArray %9 %11 +%5 = OpTypeStruct %6 +%7 = OpTypeStruct %5 +%12 = OpTypePointer StorageBuffer %7 +%13 = OpTypeVoid +%14 = OpTypeFunction %13 +%15 = OpTypePointer StorageBuffer %5 +%2 = OpVariable %12 StorageBuffer +%16 = OpConstant %10 0 +%17 = OpConstant %8 1065353216 +%18 = OpConstant %8 1073741824 +%19 = OpConstant %8 1077936128 +%20 = OpTypeBool +%21 = OpTypePointer StorageBuffer %9 diff --git a/tests/compiletests/ui/dis/array_with_stride.rs b/tests/compiletests/ui/dis/array_with_stride.rs new file mode 100644 index 0000000000..00c2c0229e --- /dev/null +++ b/tests/compiletests/ui/dis/array_with_stride.rs @@ -0,0 +1,19 @@ +// build-pass +// compile-flags: -C llvm-args=--disassemble-globals +// CHECK: OpDecorate %{{[0-9]+}} ArrayStride + +use spirv_std::spirv; + +// Arrays in storage buffers should have ArrayStride decoration +#[derive(Copy, Clone)] +pub struct StorageBuffer { + data: [f32; 4], +} + +#[spirv(compute(threads(1)))] +pub fn main_cs( + #[spirv(storage_buffer, descriptor_set = 0, binding = 0)] storage: &mut StorageBuffer, +) { + storage.data[0] = 1.0; + storage.data[1] = 2.0; +} diff --git a/tests/compiletests/ui/dis/array_with_stride.stderr b/tests/compiletests/ui/dis/array_with_stride.stderr new file mode 100644 index 0000000000..a380c91499 --- /dev/null +++ b/tests/compiletests/ui/dis/array_with_stride.stderr @@ -0,0 +1,39 @@ +OpCapability Shader +OpCapability Float64 +OpCapability Int64 +OpCapability Int16 +OpCapability Int8 +OpCapability ShaderClockKHR +OpCapability VulkanMemoryModel +OpExtension "SPV_KHR_shader_clock" +OpMemoryModel Logical Vulkan +OpEntryPoint GLCompute %1 "main_cs" %2 +OpExecutionMode %1 LocalSize 1 1 1 +%3 = OpString "$OPSTRING_FILENAME/array_with_stride.rs" +OpSource Unknown 0 %3 "// build-pass/n// compile-flags: -C llvm-args=--disassemble-globals/n// CHECK: OpDecorate %{{[0-9]+}} ArrayStride/n/nuse spirv_std::spirv;/n/n// Arrays in storage buffers should have ArrayStride decoration/n#[derive(Copy, Clone)]/npub struct StorageBuffer {/n data: [f32; 4],/n}/n/n#[spirv(compute(threads(1)))]/npub fn main_cs(/n #[spirv(storage_buffer, descriptor_set = 0, binding = 0)] storage: &mut StorageBuffer,/n) {/n storage.data[0] = 1.0;/n storage.data[1] = 2.0;/n}/n" +OpName %4 "StorageBuffer" +OpMemberName %4 0 "data" +OpName %2 "storage" +OpDecorate %5 ArrayStride 4 +OpMemberDecorate %4 0 Offset 0 +OpDecorate %6 Block +OpMemberDecorate %6 0 Offset 0 +OpDecorate %2 Binding 0 +OpDecorate %2 DescriptorSet 0 +%7 = OpTypeFloat 32 +%8 = OpTypeInt 32 0 +%9 = OpConstant %8 4 +%5 = OpTypeArray %7 %9 +%4 = OpTypeStruct %5 +%6 = OpTypeStruct %4 +%10 = OpTypePointer StorageBuffer %6 +%11 = OpTypeVoid +%12 = OpTypeFunction %11 +%13 = OpTypePointer StorageBuffer %4 +%2 = OpVariable %10 StorageBuffer +%14 = OpConstant %8 0 +%15 = OpTypeBool +%16 = OpTypePointer StorageBuffer %7 +%17 = OpConstant %7 1065353216 +%18 = OpConstant %8 1 +%19 = OpConstant %7 1073741824 diff --git a/tests/compiletests/ui/dis/array_without_stride.rs b/tests/compiletests/ui/dis/array_without_stride.rs new file mode 100644 index 0000000000..f9a9e3a892 --- /dev/null +++ b/tests/compiletests/ui/dis/array_without_stride.rs @@ -0,0 +1,21 @@ +// build-pass +// compile-flags: -C llvm-args=--disassemble-globals +// CHECK-NOT: ArrayStride + +use spirv_std::spirv; + +// Arrays containing pointers should NOT have ArrayStride decoration +// This tests the case from issue #266 where arrays in function/private storage +// shouldn't have explicit layout decorations + +// Use a struct with an array of pointers to ensure the array type is preserved +#[derive(Copy, Clone)] +pub struct WorkgroupData { + pointers: [*mut f32; 4], +} + +#[spirv(compute(threads(1)))] +pub fn main_cs(#[spirv(workgroup)] shared: &mut WorkgroupData) { + // Just read the pointer array to ensure it's used + let _first_ptr = shared.pointers[0]; +} diff --git a/tests/compiletests/ui/dis/array_without_stride.stderr b/tests/compiletests/ui/dis/array_without_stride.stderr new file mode 100644 index 0000000000..6d754f9edc --- /dev/null +++ b/tests/compiletests/ui/dis/array_without_stride.stderr @@ -0,0 +1,30 @@ +OpCapability Shader +OpCapability Float64 +OpCapability Int64 +OpCapability Int16 +OpCapability Int8 +OpCapability ShaderClockKHR +OpCapability VulkanMemoryModel +OpExtension "SPV_KHR_shader_clock" +OpMemoryModel Logical Vulkan +OpEntryPoint GLCompute %1 "main_cs" %2 +OpExecutionMode %1 LocalSize 1 1 1 +%3 = OpString "$OPSTRING_FILENAME/array_without_stride.rs" +OpSource Unknown 0 %3 "// build-pass/n// compile-flags: -C llvm-args=--disassemble-globals/n// CHECK-NOT: ArrayStride/n/nuse spirv_std::spirv;/n/n// Arrays containing pointers should NOT have ArrayStride decoration/n// This tests the case from issue #266 where arrays in function/private storage/n// shouldn't have explicit layout decorations/n/n// Use a struct with an array of pointers to ensure the array type is preserved/n#[derive(Copy, Clone)]/npub struct WorkgroupData {/n pointers: [*mut f32; 4],/n}/n/n#[spirv(compute(threads(1)))]/npub fn main_cs(#[spirv(workgroup)] shared: &mut WorkgroupData) {/n // Just read the pointer array to ensure it's used/n let _first_ptr = shared.pointers[0];/n}/n" +OpName %4 "WorkgroupData" +OpMemberName %4 0 "pointers" +OpName %2 "shared" +OpMemberDecorate %4 0 Offset 0 +%5 = OpTypeFloat 32 +%6 = OpTypePointer Function %5 +%7 = OpTypeInt 32 0 +%8 = OpConstant %7 4 +%9 = OpTypeArray %6 %8 +%4 = OpTypeStruct %9 +%10 = OpTypePointer Workgroup %4 +%11 = OpTypeVoid +%12 = OpTypeFunction %11 +%13 = OpTypeBool +%14 = OpConstant %7 0 +%15 = OpTypePointer Workgroup %6 +%2 = OpVariable %10 Workgroup