core/portable-simd/crates/core_simd/src/vector.rs
1use crate::simd::{
2 LaneCount, Mask, MaskElement, SupportedLaneCount, Swizzle,
3 cmp::SimdPartialOrd,
4 num::SimdUint,
5 ptr::{SimdConstPtr, SimdMutPtr},
6};
7
8/// A SIMD vector with the shape of `[T; N]` but the operations of `T`.
9///
10/// `Simd<T, N>` supports the operators (+, *, etc.) that `T` does in "elementwise" fashion.
11/// These take the element at each index from the left-hand side and right-hand side,
12/// perform the operation, then return the result in the same index in a vector of equal size.
13/// However, `Simd` differs from normal iteration and normal arrays:
14/// - `Simd<T, N>` executes `N` operations in a single step with no `break`s
15/// - `Simd<T, N>` can have an alignment greater than `T`, for better mechanical sympathy
16///
17/// By always imposing these constraints on `Simd`, it is easier to compile elementwise operations
18/// into machine instructions that can themselves be executed in parallel.
19///
20/// ```rust
21/// # #![feature(portable_simd)]
22/// # use core::simd::{Simd};
23/// # use core::array;
24/// let a: [i32; 4] = [-2, 0, 2, 4];
25/// let b = [10, 9, 8, 7];
26/// let sum = array::from_fn(|i| a[i] + b[i]);
27/// let prod = array::from_fn(|i| a[i] * b[i]);
28///
29/// // `Simd<T, N>` implements `From<[T; N]>`
30/// let (v, w) = (Simd::from(a), Simd::from(b));
31/// // Which means arrays implement `Into<Simd<T, N>>`.
32/// assert_eq!(v + w, sum.into());
33/// assert_eq!(v * w, prod.into());
34/// ```
35///
36///
37/// `Simd` with integer elements treats operators as wrapping, as if `T` was [`Wrapping<T>`].
38/// Thus, `Simd` does not implement `wrapping_add`, because that is the default behavior.
39/// This means there is no warning on overflows, even in "debug" builds.
40/// For most applications where `Simd` is appropriate, it is "not a bug" to wrap,
41/// and even "debug builds" are unlikely to tolerate the loss of performance.
42/// You may want to consider using explicitly checked arithmetic if such is required.
43/// Division by zero on integers still causes a panic, so
44/// you may want to consider using `f32` or `f64` if that is unacceptable.
45///
46/// [`Wrapping<T>`]: core::num::Wrapping
47///
48/// # Layout
49/// `Simd<T, N>` has a layout similar to `[T; N]` (identical "shapes"), with a greater alignment.
50/// `[T; N]` is aligned to `T`, but `Simd<T, N>` will have an alignment based on both `T` and `N`.
51/// Thus it is sound to [`transmute`] `Simd<T, N>` to `[T; N]` and should optimize to "zero cost",
52/// but the reverse transmutation may require a copy the compiler cannot simply elide.
53///
54/// # ABI "Features"
55/// Due to Rust's safety guarantees, `Simd<T, N>` is currently passed and returned via memory,
56/// not SIMD registers, except as an optimization. Using `#[inline]` on functions that accept
57/// `Simd<T, N>` or return it is recommended, at the cost of code generation time, as
58/// inlining SIMD-using functions can omit a large function prolog or epilog and thus
59/// improve both speed and code size. The need for this may be corrected in the future.
60///
61/// Using `#[inline(always)]` still requires additional care.
62///
63/// # Safe SIMD with Unsafe Rust
64///
65/// Operations with `Simd` are typically safe, but there are many reasons to want to combine SIMD with `unsafe` code.
66/// Care must be taken to respect differences between `Simd` and other types it may be transformed into or derived from.
67/// In particular, the layout of `Simd<T, N>` may be similar to `[T; N]`, and may allow some transmutations,
68/// but references to `[T; N]` are not interchangeable with those to `Simd<T, N>`.
69/// Thus, when using `unsafe` Rust to read and write `Simd<T, N>` through [raw pointers], it is a good idea to first try with
70/// [`read_unaligned`] and [`write_unaligned`]. This is because:
71/// - [`read`] and [`write`] require full alignment (in this case, `Simd<T, N>`'s alignment)
72/// - `Simd<T, N>` is often read from or written to [`[T]`](slice) and other types aligned to `T`
73/// - combining these actions violates the `unsafe` contract and explodes the program into
74/// a puff of **undefined behavior**
75/// - the compiler can implicitly adjust layouts to make unaligned reads or writes fully aligned
76/// if it sees the optimization
77/// - most contemporary processors with "aligned" and "unaligned" read and write instructions
78/// exhibit no performance difference if the "unaligned" variant is aligned at runtime
79///
80/// Less obligations mean unaligned reads and writes are less likely to make the program unsound,
81/// and may be just as fast as stricter alternatives.
82/// When trying to guarantee alignment, [`[T]::as_simd`][as_simd] is an option for
83/// converting `[T]` to `[Simd<T, N>]`, and allows soundly operating on an aligned SIMD body,
84/// but it may cost more time when handling the scalar head and tail.
85/// If these are not enough, it is most ideal to design data structures to be already aligned
86/// to `align_of::<Simd<T, N>>()` before using `unsafe` Rust to read or write.
87/// Other ways to compensate for these facts, like materializing `Simd` to or from an array first,
88/// are handled by safe methods like [`Simd::from_array`] and [`Simd::from_slice`].
89///
90/// [`transmute`]: core::mem::transmute
91/// [raw pointers]: pointer
92/// [`read_unaligned`]: pointer::read_unaligned
93/// [`write_unaligned`]: pointer::write_unaligned
94/// [`read`]: pointer::read
95/// [`write`]: pointer::write
96/// [as_simd]: slice::as_simd
97//
98// NOTE: Accessing the inner array directly in any way (e.g. by using the `.0` field syntax) or
99// directly constructing an instance of the type (i.e. `let vector = Simd(array)`) should be
100// avoided, as it will likely become illegal on `#[repr(simd)]` structs in the future. It also
101// causes rustc to emit illegal LLVM IR in some cases.
102#[repr(simd, packed)]
103pub struct Simd<T, const N: usize>([T; N])
104where
105 LaneCount<N>: SupportedLaneCount,
106 T: SimdElement;
107
108impl<T, const N: usize> Simd<T, N>
109where
110 LaneCount<N>: SupportedLaneCount,
111 T: SimdElement,
112{
113 /// Number of elements in this vector.
114 pub const LEN: usize = N;
115
116 /// Returns the number of elements in this SIMD vector.
117 ///
118 /// # Examples
119 ///
120 /// ```
121 /// # #![feature(portable_simd)]
122 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
123 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
124 /// # use simd::u32x4;
125 /// let v = u32x4::splat(0);
126 /// assert_eq!(v.len(), 4);
127 /// ```
128 #[inline]
129 #[allow(clippy::len_without_is_empty)]
130 pub const fn len(&self) -> usize {
131 Self::LEN
132 }
133
134 /// Constructs a new SIMD vector with all elements set to the given value.
135 ///
136 /// # Examples
137 ///
138 /// ```
139 /// # #![feature(portable_simd)]
140 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
141 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
142 /// # use simd::u32x4;
143 /// let v = u32x4::splat(8);
144 /// assert_eq!(v.as_array(), &[8, 8, 8, 8]);
145 /// ```
146 #[inline]
147 #[rustc_const_unstable(feature = "portable_simd", issue = "86656")]
148 pub const fn splat(value: T) -> Self {
149 const fn splat_const<T, const N: usize>(value: T) -> Simd<T, N>
150 where
151 T: SimdElement,
152 LaneCount<N>: SupportedLaneCount,
153 {
154 Simd::from_array([value; N])
155 }
156
157 fn splat_rt<T, const N: usize>(value: T) -> Simd<T, N>
158 where
159 T: SimdElement,
160 LaneCount<N>: SupportedLaneCount,
161 {
162 // This is preferred over `[value; N]`, since it's explicitly a splat:
163 // https://github.com/rust-lang/rust/issues/97804
164 struct Splat;
165 impl<const N: usize> Swizzle<N> for Splat {
166 const INDEX: [usize; N] = [0; N];
167 }
168
169 Splat::swizzle::<T, 1>(Simd::<T, 1>::from([value]))
170 }
171
172 core::intrinsics::const_eval_select((value,), splat_const, splat_rt)
173 }
174
175 /// Returns an array reference containing the entire SIMD vector.
176 ///
177 /// # Examples
178 ///
179 /// ```
180 /// # #![feature(portable_simd)]
181 /// # use core::simd::{Simd, u64x4};
182 /// let v: u64x4 = Simd::from_array([0, 1, 2, 3]);
183 /// assert_eq!(v.as_array(), &[0, 1, 2, 3]);
184 /// ```
185 #[inline]
186 pub const fn as_array(&self) -> &[T; N] {
187 // SAFETY: `Simd<T, N>` is just an overaligned `[T; N]` with
188 // potential padding at the end, so pointer casting to a
189 // `&[T; N]` is safe.
190 //
191 // NOTE: This deliberately doesn't just use `&self.0`, see the comment
192 // on the struct definition for details.
193 unsafe { &*(self as *const Self as *const [T; N]) }
194 }
195
196 /// Returns a mutable array reference containing the entire SIMD vector.
197 #[inline]
198 pub fn as_mut_array(&mut self) -> &mut [T; N] {
199 // SAFETY: `Simd<T, N>` is just an overaligned `[T; N]` with
200 // potential padding at the end, so pointer casting to a
201 // `&mut [T; N]` is safe.
202 //
203 // NOTE: This deliberately doesn't just use `&mut self.0`, see the comment
204 // on the struct definition for details.
205 unsafe { &mut *(self as *mut Self as *mut [T; N]) }
206 }
207
208 /// Loads a vector from an array of `T`.
209 ///
210 /// This function is necessary since `repr(simd)` has padding for non-power-of-2 vectors (at the time of writing).
211 /// With padding, `read_unaligned` will read past the end of an array of N elements.
212 ///
213 /// # Safety
214 /// Reading `ptr` must be safe, as if by `<*const [T; N]>::read`.
215 #[inline]
216 const unsafe fn load(ptr: *const [T; N]) -> Self {
217 // There are potentially simpler ways to write this function, but this should result in
218 // LLVM `load <N x T>`
219
220 let mut tmp = core::mem::MaybeUninit::<Self>::uninit();
221 // SAFETY: `Simd<T, N>` always contains `N` elements of type `T`. It may have padding
222 // which does not need to be initialized. The safety of reading `ptr` is ensured by the
223 // caller.
224 unsafe {
225 core::ptr::copy_nonoverlapping(ptr, tmp.as_mut_ptr().cast(), 1);
226 tmp.assume_init()
227 }
228 }
229
230 /// Store a vector to an array of `T`.
231 ///
232 /// See `load` as to why this function is necessary.
233 ///
234 /// # Safety
235 /// Writing to `ptr` must be safe, as if by `<*mut [T; N]>::write`.
236 #[inline]
237 const unsafe fn store(self, ptr: *mut [T; N]) {
238 // There are potentially simpler ways to write this function, but this should result in
239 // LLVM `store <N x T>`
240
241 // Creating a temporary helps LLVM turn the memcpy into a store.
242 let tmp = self;
243 // SAFETY: `Simd<T, N>` always contains `N` elements of type `T`. The safety of writing
244 // `ptr` is ensured by the caller.
245 unsafe { core::ptr::copy_nonoverlapping(tmp.as_array(), ptr, 1) }
246 }
247
248 /// Converts an array to a SIMD vector.
249 #[inline]
250 pub const fn from_array(array: [T; N]) -> Self {
251 // SAFETY: `&array` is safe to read.
252 //
253 // FIXME: We currently use a pointer load instead of `transmute_copy` because `repr(simd)`
254 // results in padding for non-power-of-2 vectors (so vectors are larger than arrays).
255 //
256 // NOTE: This deliberately doesn't just use `Self(array)`, see the comment
257 // on the struct definition for details.
258 unsafe { Self::load(&array) }
259 }
260
261 /// Converts a SIMD vector to an array.
262 #[inline]
263 pub const fn to_array(self) -> [T; N] {
264 let mut tmp = core::mem::MaybeUninit::uninit();
265 // SAFETY: writing to `tmp` is safe and initializes it.
266 //
267 // FIXME: We currently use a pointer store instead of `transmute_copy` because `repr(simd)`
268 // results in padding for non-power-of-2 vectors (so vectors are larger than arrays).
269 //
270 // NOTE: This deliberately doesn't just use `self.0`, see the comment
271 // on the struct definition for details.
272 unsafe {
273 self.store(tmp.as_mut_ptr());
274 tmp.assume_init()
275 }
276 }
277
278 /// Converts a slice to a SIMD vector containing `slice[..N]`.
279 ///
280 /// # Panics
281 ///
282 /// Panics if the slice's length is less than the vector's `Simd::N`.
283 /// Use `load_or_default` for an alternative that does not panic.
284 ///
285 /// # Example
286 ///
287 /// ```
288 /// # #![feature(portable_simd)]
289 /// # use core::simd::u32x4;
290 /// let source = vec![1, 2, 3, 4, 5, 6];
291 /// let v = u32x4::from_slice(&source);
292 /// assert_eq!(v.as_array(), &[1, 2, 3, 4]);
293 /// ```
294 #[must_use]
295 #[inline]
296 #[track_caller]
297 pub const fn from_slice(slice: &[T]) -> Self {
298 assert!(
299 slice.len() >= Self::LEN,
300 "slice length must be at least the number of elements"
301 );
302 // SAFETY: We just checked that the slice contains
303 // at least `N` elements.
304 unsafe { Self::load(slice.as_ptr().cast()) }
305 }
306
307 /// Writes a SIMD vector to the first `N` elements of a slice.
308 ///
309 /// # Panics
310 ///
311 /// Panics if the slice's length is less than the vector's `Simd::N`.
312 ///
313 /// # Example
314 ///
315 /// ```
316 /// # #![feature(portable_simd)]
317 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
318 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
319 /// # use simd::u32x4;
320 /// let mut dest = vec![0; 6];
321 /// let v = u32x4::from_array([1, 2, 3, 4]);
322 /// v.copy_to_slice(&mut dest);
323 /// assert_eq!(&dest, &[1, 2, 3, 4, 0, 0]);
324 /// ```
325 #[inline]
326 #[track_caller]
327 pub fn copy_to_slice(self, slice: &mut [T]) {
328 assert!(
329 slice.len() >= Self::LEN,
330 "slice length must be at least the number of elements"
331 );
332 // SAFETY: We just checked that the slice contains
333 // at least `N` elements.
334 unsafe { self.store(slice.as_mut_ptr().cast()) }
335 }
336
337 /// Reads contiguous elements from `slice`. Elements are read so long as they're in-bounds for
338 /// the `slice`. Otherwise, the default value for the element type is returned.
339 ///
340 /// # Examples
341 /// ```
342 /// # #![feature(portable_simd)]
343 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
344 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
345 /// # use simd::Simd;
346 /// let vec: Vec<i32> = vec![10, 11];
347 ///
348 /// let result = Simd::<i32, 4>::load_or_default(&vec);
349 /// assert_eq!(result, Simd::from_array([10, 11, 0, 0]));
350 /// ```
351 #[must_use]
352 #[inline]
353 pub fn load_or_default(slice: &[T]) -> Self
354 where
355 T: Default,
356 {
357 Self::load_or(slice, Default::default())
358 }
359
360 /// Reads contiguous elements from `slice`. Elements are read so long as they're in-bounds for
361 /// the `slice`. Otherwise, the corresponding value from `or` is passed through.
362 ///
363 /// # Examples
364 /// ```
365 /// # #![feature(portable_simd)]
366 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
367 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
368 /// # use simd::Simd;
369 /// let vec: Vec<i32> = vec![10, 11];
370 /// let or = Simd::from_array([-5, -4, -3, -2]);
371 ///
372 /// let result = Simd::load_or(&vec, or);
373 /// assert_eq!(result, Simd::from_array([10, 11, -3, -2]));
374 /// ```
375 #[must_use]
376 #[inline]
377 pub fn load_or(slice: &[T], or: Self) -> Self {
378 Self::load_select(slice, Mask::splat(true), or)
379 }
380
381 /// Reads contiguous elements from `slice`. Each element is read from memory if its
382 /// corresponding element in `enable` is `true`.
383 ///
384 /// When the element is disabled or out of bounds for the slice, that memory location
385 /// is not accessed and the corresponding value from `or` is passed through.
386 ///
387 /// # Examples
388 /// ```
389 /// # #![feature(portable_simd)]
390 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
391 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
392 /// # use simd::{Simd, Mask};
393 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
394 /// let enable = Mask::from_array([true, true, false, true]);
395 /// let or = Simd::from_array([-5, -4, -3, -2]);
396 ///
397 /// let result = Simd::load_select(&vec, enable, or);
398 /// assert_eq!(result, Simd::from_array([10, 11, -3, 13]));
399 /// ```
400 #[must_use]
401 #[inline]
402 pub fn load_select_or_default(slice: &[T], enable: Mask<<T as SimdElement>::Mask, N>) -> Self
403 where
404 T: Default,
405 {
406 Self::load_select(slice, enable, Default::default())
407 }
408
409 /// Reads contiguous elements from `slice`. Each element is read from memory if its
410 /// corresponding element in `enable` is `true`.
411 ///
412 /// When the element is disabled or out of bounds for the slice, that memory location
413 /// is not accessed and the corresponding value from `or` is passed through.
414 ///
415 /// # Examples
416 /// ```
417 /// # #![feature(portable_simd)]
418 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
419 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
420 /// # use simd::{Simd, Mask};
421 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
422 /// let enable = Mask::from_array([true, true, false, true]);
423 /// let or = Simd::from_array([-5, -4, -3, -2]);
424 ///
425 /// let result = Simd::load_select(&vec, enable, or);
426 /// assert_eq!(result, Simd::from_array([10, 11, -3, 13]));
427 /// ```
428 #[must_use]
429 #[inline]
430 pub fn load_select(
431 slice: &[T],
432 mut enable: Mask<<T as SimdElement>::Mask, N>,
433 or: Self,
434 ) -> Self {
435 enable &= mask_up_to(slice.len());
436 // SAFETY: We performed the bounds check by updating the mask. &[T] is properly aligned to
437 // the element.
438 unsafe { Self::load_select_ptr(slice.as_ptr(), enable, or) }
439 }
440
441 /// Reads contiguous elements from `slice`. Each element is read from memory if its
442 /// corresponding element in `enable` is `true`.
443 ///
444 /// When the element is disabled, that memory location is not accessed and the corresponding
445 /// value from `or` is passed through.
446 ///
447 /// # Safety
448 /// Enabled loads must not exceed the length of `slice`.
449 #[must_use]
450 #[inline]
451 pub unsafe fn load_select_unchecked(
452 slice: &[T],
453 enable: Mask<<T as SimdElement>::Mask, N>,
454 or: Self,
455 ) -> Self {
456 let ptr = slice.as_ptr();
457 // SAFETY: The safety of reading elements from `slice` is ensured by the caller.
458 unsafe { Self::load_select_ptr(ptr, enable, or) }
459 }
460
461 /// Reads contiguous elements starting at `ptr`. Each element is read from memory if its
462 /// corresponding element in `enable` is `true`.
463 ///
464 /// When the element is disabled, that memory location is not accessed and the corresponding
465 /// value from `or` is passed through.
466 ///
467 /// # Safety
468 /// Enabled `ptr` elements must be safe to read as if by `std::ptr::read`.
469 #[must_use]
470 #[inline]
471 pub unsafe fn load_select_ptr(
472 ptr: *const T,
473 enable: Mask<<T as SimdElement>::Mask, N>,
474 or: Self,
475 ) -> Self {
476 // SAFETY: The safety of reading elements through `ptr` is ensured by the caller.
477 unsafe {
478 core::intrinsics::simd::simd_masked_load::<
479 _,
480 _,
481 _,
482 { core::intrinsics::simd::SimdAlign::Element },
483 >(enable.to_int(), ptr, or)
484 }
485 }
486
487 /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
488 /// If an index is out-of-bounds, the element is instead selected from the `or` vector.
489 ///
490 /// # Examples
491 /// ```
492 /// # #![feature(portable_simd)]
493 /// # use core::simd::Simd;
494 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
495 /// let idxs = Simd::from_array([9, 3, 0, 5]); // Note the index that is out-of-bounds
496 /// let alt = Simd::from_array([-5, -4, -3, -2]);
497 ///
498 /// let result = Simd::gather_or(&vec, idxs, alt);
499 /// assert_eq!(result, Simd::from_array([-5, 13, 10, 15]));
500 /// ```
501 #[must_use]
502 #[inline]
503 pub fn gather_or(slice: &[T], idxs: Simd<usize, N>, or: Self) -> Self {
504 Self::gather_select(slice, Mask::splat(true), idxs, or)
505 }
506
507 /// Reads from indices in `slice` to construct a SIMD vector.
508 /// If an index is out-of-bounds, the element is set to the default given by `T: Default`.
509 ///
510 /// # Examples
511 /// ```
512 /// # #![feature(portable_simd)]
513 /// # use core::simd::Simd;
514 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
515 /// let idxs = Simd::from_array([9, 3, 0, 5]); // Note the index that is out-of-bounds
516 ///
517 /// let result = Simd::gather_or_default(&vec, idxs);
518 /// assert_eq!(result, Simd::from_array([0, 13, 10, 15]));
519 /// ```
520 #[must_use]
521 #[inline]
522 pub fn gather_or_default(slice: &[T], idxs: Simd<usize, N>) -> Self
523 where
524 T: Default,
525 {
526 Self::gather_or(slice, idxs, Self::splat(T::default()))
527 }
528
529 /// Reads from indices in `slice` to construct a SIMD vector.
530 /// The mask `enable`s all `true` indices and disables all `false` indices.
531 /// If an index is disabled or is out-of-bounds, the element is selected from the `or` vector.
532 ///
533 /// # Examples
534 /// ```
535 /// # #![feature(portable_simd)]
536 /// # use core::simd::{Simd, Mask};
537 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
538 /// let idxs = Simd::from_array([9, 3, 0, 5]); // Includes an out-of-bounds index
539 /// let alt = Simd::from_array([-5, -4, -3, -2]);
540 /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
541 ///
542 /// let result = Simd::gather_select(&vec, enable, idxs, alt);
543 /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
544 /// ```
545 #[must_use]
546 #[inline]
547 pub fn gather_select(
548 slice: &[T],
549 enable: Mask<isize, N>,
550 idxs: Simd<usize, N>,
551 or: Self,
552 ) -> Self {
553 let enable: Mask<isize, N> = enable & idxs.simd_lt(Simd::splat(slice.len()));
554 // Safety: We have masked-off out-of-bounds indices.
555 unsafe { Self::gather_select_unchecked(slice, enable, idxs, or) }
556 }
557
558 /// Reads from indices in `slice` to construct a SIMD vector.
559 /// The mask `enable`s all `true` indices and disables all `false` indices.
560 /// If an index is disabled, the element is selected from the `or` vector.
561 ///
562 /// # Safety
563 ///
564 /// Calling this function with an `enable`d out-of-bounds index is *[undefined behavior]*
565 /// even if the resulting value is not used.
566 ///
567 /// # Examples
568 /// ```
569 /// # #![feature(portable_simd)]
570 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
571 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
572 /// # use simd::{Simd, cmp::SimdPartialOrd, Mask};
573 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
574 /// let idxs = Simd::from_array([9, 3, 0, 5]); // Includes an out-of-bounds index
575 /// let alt = Simd::from_array([-5, -4, -3, -2]);
576 /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
577 /// // If this mask was used to gather, it would be unsound. Let's fix that.
578 /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
579 ///
580 /// // The out-of-bounds index has been masked, so it's safe to gather now.
581 /// let result = unsafe { Simd::gather_select_unchecked(&vec, enable, idxs, alt) };
582 /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
583 /// ```
584 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
585 #[must_use]
586 #[inline]
587 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
588 pub unsafe fn gather_select_unchecked(
589 slice: &[T],
590 enable: Mask<isize, N>,
591 idxs: Simd<usize, N>,
592 or: Self,
593 ) -> Self {
594 let base_ptr = Simd::<*const T, N>::splat(slice.as_ptr());
595 // Ferris forgive me, I have done pointer arithmetic here.
596 let ptrs = base_ptr.wrapping_add(idxs);
597 // Safety: The caller is responsible for determining the indices are okay to read
598 unsafe { Self::gather_select_ptr(ptrs, enable, or) }
599 }
600
601 /// Reads elementwise from pointers into a SIMD vector.
602 ///
603 /// # Safety
604 ///
605 /// Each read must satisfy the same conditions as [`core::ptr::read`].
606 ///
607 /// # Example
608 /// ```
609 /// # #![feature(portable_simd)]
610 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
611 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
612 /// # use simd::prelude::*;
613 /// let values = [6, 2, 4, 9];
614 /// let offsets = Simd::from_array([1, 0, 0, 3]);
615 /// let source = Simd::splat(values.as_ptr()).wrapping_add(offsets);
616 /// let gathered = unsafe { Simd::gather_ptr(source) };
617 /// assert_eq!(gathered, Simd::from_array([2, 6, 6, 9]));
618 /// ```
619 #[must_use]
620 #[inline]
621 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
622 pub unsafe fn gather_ptr(source: Simd<*const T, N>) -> Self
623 where
624 T: Default,
625 {
626 // TODO: add an intrinsic that doesn't use a passthru vector, and remove the T: Default bound
627 // Safety: The caller is responsible for upholding all invariants
628 unsafe { Self::gather_select_ptr(source, Mask::splat(true), Self::default()) }
629 }
630
631 /// Conditionally read elementwise from pointers into a SIMD vector.
632 /// The mask `enable`s all `true` pointers and disables all `false` pointers.
633 /// If a pointer is disabled, the element is selected from the `or` vector,
634 /// and no read is performed.
635 ///
636 /// # Safety
637 ///
638 /// Enabled elements must satisfy the same conditions as [`core::ptr::read`].
639 ///
640 /// # Example
641 /// ```
642 /// # #![feature(portable_simd)]
643 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
644 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
645 /// # use simd::prelude::*;
646 /// let values = [6, 2, 4, 9];
647 /// let enable = Mask::from_array([true, true, false, true]);
648 /// let offsets = Simd::from_array([1, 0, 0, 3]);
649 /// let source = Simd::splat(values.as_ptr()).wrapping_add(offsets);
650 /// let gathered = unsafe { Simd::gather_select_ptr(source, enable, Simd::splat(0)) };
651 /// assert_eq!(gathered, Simd::from_array([2, 6, 0, 9]));
652 /// ```
653 #[must_use]
654 #[inline]
655 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
656 pub unsafe fn gather_select_ptr(
657 source: Simd<*const T, N>,
658 enable: Mask<isize, N>,
659 or: Self,
660 ) -> Self {
661 // Safety: The caller is responsible for upholding all invariants
662 unsafe { core::intrinsics::simd::simd_gather(or, source, enable.to_int()) }
663 }
664
665 /// Conditionally write contiguous elements to `slice`. The `enable` mask controls
666 /// which elements are written, as long as they're in-bounds of the `slice`.
667 /// If the element is disabled or out of bounds, no memory access to that location
668 /// is made.
669 ///
670 /// # Examples
671 /// ```
672 /// # #![feature(portable_simd)]
673 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
674 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
675 /// # use simd::{Simd, Mask};
676 /// let mut arr = [0i32; 4];
677 /// let write = Simd::from_array([-5, -4, -3, -2]);
678 /// let enable = Mask::from_array([false, true, true, true]);
679 ///
680 /// write.store_select(&mut arr[..3], enable);
681 /// assert_eq!(arr, [0, -4, -3, 0]);
682 /// ```
683 #[inline]
684 pub fn store_select(self, slice: &mut [T], mut enable: Mask<<T as SimdElement>::Mask, N>) {
685 enable &= mask_up_to(slice.len());
686 // SAFETY: We performed the bounds check by updating the mask. &[T] is properly aligned to
687 // the element.
688 unsafe { self.store_select_ptr(slice.as_mut_ptr(), enable) }
689 }
690
691 /// Conditionally write contiguous elements to `slice`. The `enable` mask controls
692 /// which elements are written.
693 ///
694 /// # Safety
695 ///
696 /// Every enabled element must be in bounds for the `slice`.
697 ///
698 /// # Examples
699 /// ```
700 /// # #![feature(portable_simd)]
701 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
702 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
703 /// # use simd::{Simd, Mask};
704 /// let mut arr = [0i32; 4];
705 /// let write = Simd::from_array([-5, -4, -3, -2]);
706 /// let enable = Mask::from_array([false, true, true, true]);
707 ///
708 /// unsafe { write.store_select_unchecked(&mut arr, enable) };
709 /// assert_eq!(arr, [0, -4, -3, -2]);
710 /// ```
711 #[inline]
712 pub unsafe fn store_select_unchecked(
713 self,
714 slice: &mut [T],
715 enable: Mask<<T as SimdElement>::Mask, N>,
716 ) {
717 let ptr = slice.as_mut_ptr();
718 // SAFETY: The safety of writing elements in `slice` is ensured by the caller.
719 unsafe { self.store_select_ptr(ptr, enable) }
720 }
721
722 /// Conditionally write contiguous elements starting from `ptr`.
723 /// The `enable` mask controls which elements are written.
724 /// When disabled, the memory location corresponding to that element is not accessed.
725 ///
726 /// # Safety
727 ///
728 /// Memory addresses for element are calculated [`pointer::wrapping_offset`] and
729 /// each enabled element must satisfy the same conditions as [`core::ptr::write`].
730 #[inline]
731 pub unsafe fn store_select_ptr(self, ptr: *mut T, enable: Mask<<T as SimdElement>::Mask, N>) {
732 // SAFETY: The safety of writing elements through `ptr` is ensured by the caller.
733 unsafe {
734 core::intrinsics::simd::simd_masked_store::<
735 _,
736 _,
737 _,
738 { core::intrinsics::simd::SimdAlign::Element },
739 >(enable.to_int(), ptr, self)
740 }
741 }
742
743 /// Writes the values in a SIMD vector to potentially discontiguous indices in `slice`.
744 /// If an index is out-of-bounds, the write is suppressed without panicking.
745 /// If two elements in the scattered vector would write to the same index
746 /// only the last element is guaranteed to actually be written.
747 ///
748 /// # Examples
749 /// ```
750 /// # #![feature(portable_simd)]
751 /// # use core::simd::Simd;
752 /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
753 /// let idxs = Simd::from_array([9, 3, 0, 0]); // Note the duplicate index.
754 /// let vals = Simd::from_array([-27, 82, -41, 124]);
755 ///
756 /// vals.scatter(&mut vec, idxs); // two logical writes means the last wins.
757 /// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]);
758 /// ```
759 #[inline]
760 pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, N>) {
761 self.scatter_select(slice, Mask::splat(true), idxs)
762 }
763
764 /// Writes values from a SIMD vector to multiple potentially discontiguous indices in `slice`.
765 /// The mask `enable`s all `true` indices and disables all `false` indices.
766 /// If an enabled index is out-of-bounds, the write is suppressed without panicking.
767 /// If two enabled elements in the scattered vector would write to the same index,
768 /// only the last element is guaranteed to actually be written.
769 ///
770 /// # Examples
771 /// ```
772 /// # #![feature(portable_simd)]
773 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
774 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
775 /// # use simd::{Simd, Mask};
776 /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
777 /// let idxs = Simd::from_array([9, 3, 0, 0]); // Includes an out-of-bounds index
778 /// let vals = Simd::from_array([-27, 82, -41, 124]);
779 /// let enable = Mask::from_array([true, true, true, false]); // Includes a masked element
780 ///
781 /// vals.scatter_select(&mut vec, enable, idxs); // The last write is masked, thus omitted.
782 /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
783 /// ```
784 #[inline]
785 pub fn scatter_select(self, slice: &mut [T], enable: Mask<isize, N>, idxs: Simd<usize, N>) {
786 let enable: Mask<isize, N> = enable & idxs.simd_lt(Simd::splat(slice.len()));
787 // Safety: We have masked-off out-of-bounds indices.
788 unsafe { self.scatter_select_unchecked(slice, enable, idxs) }
789 }
790
791 /// Writes values from a SIMD vector to multiple potentially discontiguous indices in `slice`.
792 /// The mask `enable`s all `true` indices and disables all `false` indices.
793 /// If two enabled elements in the scattered vector would write to the same index,
794 /// only the last element is guaranteed to actually be written.
795 ///
796 /// # Safety
797 ///
798 /// Calling this function with an enabled out-of-bounds index is *[undefined behavior]*,
799 /// and may lead to memory corruption.
800 ///
801 /// # Examples
802 /// ```
803 /// # #![feature(portable_simd)]
804 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
805 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
806 /// # use simd::{Simd, cmp::SimdPartialOrd, Mask};
807 /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
808 /// let idxs = Simd::from_array([9, 3, 0, 0]);
809 /// let vals = Simd::from_array([-27, 82, -41, 124]);
810 /// let enable = Mask::from_array([true, true, true, false]); // Masks the final index
811 /// // If this mask was used to scatter, it would be unsound. Let's fix that.
812 /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
813 ///
814 /// // We have masked the OOB index, so it's safe to scatter now.
815 /// unsafe { vals.scatter_select_unchecked(&mut vec, enable, idxs); }
816 /// // The second write to index 0 was masked, thus omitted.
817 /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
818 /// ```
819 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
820 #[inline]
821 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
822 pub unsafe fn scatter_select_unchecked(
823 self,
824 slice: &mut [T],
825 enable: Mask<isize, N>,
826 idxs: Simd<usize, N>,
827 ) {
828 // Safety: This block works with *mut T derived from &mut 'a [T],
829 // which means it is delicate in Rust's borrowing model, circa 2021:
830 // &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts!
831 // Even though this block is largely safe methods, it must be exactly this way
832 // to prevent invalidating the raw ptrs while they're live.
833 // Thus, entering this block requires all values to use being already ready:
834 // 0. idxs we want to write to, which are used to construct the mask.
835 // 1. enable, which depends on an initial &'a [T] and the idxs.
836 // 2. actual values to scatter (self).
837 // 3. &mut [T] which will become our base ptr.
838 unsafe {
839 // Now Entering ☢️ *mut T Zone
840 let base_ptr = Simd::<*mut T, N>::splat(slice.as_mut_ptr());
841 // Ferris forgive me, I have done pointer arithmetic here.
842 let ptrs = base_ptr.wrapping_add(idxs);
843 // The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
844 self.scatter_select_ptr(ptrs, enable);
845 // Cleared ☢️ *mut T Zone
846 }
847 }
848
849 /// Writes pointers elementwise into a SIMD vector.
850 ///
851 /// # Safety
852 ///
853 /// Each write must satisfy the same conditions as [`core::ptr::write`].
854 ///
855 /// # Example
856 /// ```
857 /// # #![feature(portable_simd)]
858 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
859 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
860 /// # use simd::{Simd, ptr::SimdMutPtr};
861 /// let mut values = [0; 4];
862 /// let offset = Simd::from_array([3, 2, 1, 0]);
863 /// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
864 /// unsafe { Simd::from_array([6, 3, 5, 7]).scatter_ptr(ptrs); }
865 /// assert_eq!(values, [7, 5, 3, 6]);
866 /// ```
867 #[inline]
868 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
869 pub unsafe fn scatter_ptr(self, dest: Simd<*mut T, N>) {
870 // Safety: The caller is responsible for upholding all invariants
871 unsafe { self.scatter_select_ptr(dest, Mask::splat(true)) }
872 }
873
874 /// Conditionally write pointers elementwise into a SIMD vector.
875 /// The mask `enable`s all `true` pointers and disables all `false` pointers.
876 /// If a pointer is disabled, the write to its pointee is skipped.
877 ///
878 /// # Safety
879 ///
880 /// Enabled pointers must satisfy the same conditions as [`core::ptr::write`].
881 ///
882 /// # Example
883 /// ```
884 /// # #![feature(portable_simd)]
885 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
886 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
887 /// # use simd::{Mask, Simd, ptr::SimdMutPtr};
888 /// let mut values = [0; 4];
889 /// let offset = Simd::from_array([3, 2, 1, 0]);
890 /// let ptrs = Simd::splat(values.as_mut_ptr()).wrapping_add(offset);
891 /// let enable = Mask::from_array([true, true, false, false]);
892 /// unsafe { Simd::from_array([6, 3, 5, 7]).scatter_select_ptr(ptrs, enable); }
893 /// assert_eq!(values, [0, 0, 3, 6]);
894 /// ```
895 #[inline]
896 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
897 pub unsafe fn scatter_select_ptr(self, dest: Simd<*mut T, N>, enable: Mask<isize, N>) {
898 // Safety: The caller is responsible for upholding all invariants
899 unsafe { core::intrinsics::simd::simd_scatter(self, dest, enable.to_int()) }
900 }
901}
902
903impl<T, const N: usize> Copy for Simd<T, N>
904where
905 LaneCount<N>: SupportedLaneCount,
906 T: SimdElement,
907{
908}
909
910impl<T, const N: usize> Clone for Simd<T, N>
911where
912 LaneCount<N>: SupportedLaneCount,
913 T: SimdElement,
914{
915 #[inline]
916 fn clone(&self) -> Self {
917 *self
918 }
919}
920
921impl<T, const N: usize> Default for Simd<T, N>
922where
923 LaneCount<N>: SupportedLaneCount,
924 T: SimdElement + Default,
925{
926 #[inline]
927 fn default() -> Self {
928 Self::splat(T::default())
929 }
930}
931
932impl<T, const N: usize> PartialEq for Simd<T, N>
933where
934 LaneCount<N>: SupportedLaneCount,
935 T: SimdElement + PartialEq,
936{
937 #[inline]
938 fn eq(&self, other: &Self) -> bool {
939 // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
940 let mask = unsafe {
941 let tfvec: Simd<<T as SimdElement>::Mask, N> =
942 core::intrinsics::simd::simd_eq(*self, *other);
943 Mask::from_int_unchecked(tfvec)
944 };
945
946 // Two vectors are equal if all elements are equal when compared elementwise
947 mask.all()
948 }
949
950 #[allow(clippy::partialeq_ne_impl)]
951 #[inline]
952 fn ne(&self, other: &Self) -> bool {
953 // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
954 let mask = unsafe {
955 let tfvec: Simd<<T as SimdElement>::Mask, N> =
956 core::intrinsics::simd::simd_ne(*self, *other);
957 Mask::from_int_unchecked(tfvec)
958 };
959
960 // Two vectors are non-equal if any elements are non-equal when compared elementwise
961 mask.any()
962 }
963}
964
965/// Lexicographic order. For the SIMD elementwise minimum and maximum, use simd_min and simd_max instead.
966impl<T, const N: usize> PartialOrd for Simd<T, N>
967where
968 LaneCount<N>: SupportedLaneCount,
969 T: SimdElement + PartialOrd,
970{
971 #[inline]
972 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
973 // TODO use SIMD equality
974 self.to_array().partial_cmp(other.as_ref())
975 }
976}
977
978impl<T, const N: usize> Eq for Simd<T, N>
979where
980 LaneCount<N>: SupportedLaneCount,
981 T: SimdElement + Eq,
982{
983}
984
985/// Lexicographic order. For the SIMD elementwise minimum and maximum, use simd_min and simd_max instead.
986impl<T, const N: usize> Ord for Simd<T, N>
987where
988 LaneCount<N>: SupportedLaneCount,
989 T: SimdElement + Ord,
990{
991 #[inline]
992 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
993 // TODO use SIMD equality
994 self.to_array().cmp(other.as_ref())
995 }
996}
997
998impl<T, const N: usize> core::hash::Hash for Simd<T, N>
999where
1000 LaneCount<N>: SupportedLaneCount,
1001 T: SimdElement + core::hash::Hash,
1002{
1003 #[inline]
1004 fn hash<H>(&self, state: &mut H)
1005 where
1006 H: core::hash::Hasher,
1007 {
1008 self.as_array().hash(state)
1009 }
1010}
1011
1012// array references
1013impl<T, const N: usize> AsRef<[T; N]> for Simd<T, N>
1014where
1015 LaneCount<N>: SupportedLaneCount,
1016 T: SimdElement,
1017{
1018 #[inline]
1019 fn as_ref(&self) -> &[T; N] {
1020 self.as_array()
1021 }
1022}
1023
1024impl<T, const N: usize> AsMut<[T; N]> for Simd<T, N>
1025where
1026 LaneCount<N>: SupportedLaneCount,
1027 T: SimdElement,
1028{
1029 #[inline]
1030 fn as_mut(&mut self) -> &mut [T; N] {
1031 self.as_mut_array()
1032 }
1033}
1034
1035// slice references
1036impl<T, const N: usize> AsRef<[T]> for Simd<T, N>
1037where
1038 LaneCount<N>: SupportedLaneCount,
1039 T: SimdElement,
1040{
1041 #[inline]
1042 fn as_ref(&self) -> &[T] {
1043 self.as_array()
1044 }
1045}
1046
1047impl<T, const N: usize> AsMut<[T]> for Simd<T, N>
1048where
1049 LaneCount<N>: SupportedLaneCount,
1050 T: SimdElement,
1051{
1052 #[inline]
1053 fn as_mut(&mut self) -> &mut [T] {
1054 self.as_mut_array()
1055 }
1056}
1057
1058// vector/array conversion
1059impl<T, const N: usize> From<[T; N]> for Simd<T, N>
1060where
1061 LaneCount<N>: SupportedLaneCount,
1062 T: SimdElement,
1063{
1064 #[inline]
1065 fn from(array: [T; N]) -> Self {
1066 Self::from_array(array)
1067 }
1068}
1069
1070impl<T, const N: usize> From<Simd<T, N>> for [T; N]
1071where
1072 LaneCount<N>: SupportedLaneCount,
1073 T: SimdElement,
1074{
1075 #[inline]
1076 fn from(vector: Simd<T, N>) -> Self {
1077 vector.to_array()
1078 }
1079}
1080
1081impl<T, const N: usize> TryFrom<&[T]> for Simd<T, N>
1082where
1083 LaneCount<N>: SupportedLaneCount,
1084 T: SimdElement,
1085{
1086 type Error = core::array::TryFromSliceError;
1087
1088 #[inline]
1089 fn try_from(slice: &[T]) -> Result<Self, core::array::TryFromSliceError> {
1090 Ok(Self::from_array(slice.try_into()?))
1091 }
1092}
1093
1094impl<T, const N: usize> TryFrom<&mut [T]> for Simd<T, N>
1095where
1096 LaneCount<N>: SupportedLaneCount,
1097 T: SimdElement,
1098{
1099 type Error = core::array::TryFromSliceError;
1100
1101 #[inline]
1102 fn try_from(slice: &mut [T]) -> Result<Self, core::array::TryFromSliceError> {
1103 Ok(Self::from_array(slice.try_into()?))
1104 }
1105}
1106
1107mod sealed {
1108 pub trait Sealed {}
1109}
1110use sealed::Sealed;
1111
1112/// Marker trait for types that may be used as SIMD vector elements.
1113///
1114/// # Safety
1115/// This trait, when implemented, asserts the compiler can monomorphize
1116/// `#[repr(simd)]` structs with the marked type as an element.
1117/// Strictly, it is valid to impl if the vector will not be miscompiled.
1118/// Practically, it is user-unfriendly to impl it if the vector won't compile,
1119/// even when no soundness guarantees are broken by allowing the user to try.
1120pub unsafe trait SimdElement: Sealed + Copy {
1121 /// The mask element type corresponding to this element type.
1122 type Mask: MaskElement;
1123}
1124
1125impl Sealed for u8 {}
1126
1127// Safety: u8 is a valid SIMD element type, and is supported by this API
1128unsafe impl SimdElement for u8 {
1129 type Mask = i8;
1130}
1131
1132impl Sealed for u16 {}
1133
1134// Safety: u16 is a valid SIMD element type, and is supported by this API
1135unsafe impl SimdElement for u16 {
1136 type Mask = i16;
1137}
1138
1139impl Sealed for u32 {}
1140
1141// Safety: u32 is a valid SIMD element type, and is supported by this API
1142unsafe impl SimdElement for u32 {
1143 type Mask = i32;
1144}
1145
1146impl Sealed for u64 {}
1147
1148// Safety: u64 is a valid SIMD element type, and is supported by this API
1149unsafe impl SimdElement for u64 {
1150 type Mask = i64;
1151}
1152
1153impl Sealed for usize {}
1154
1155// Safety: usize is a valid SIMD element type, and is supported by this API
1156unsafe impl SimdElement for usize {
1157 type Mask = isize;
1158}
1159
1160impl Sealed for i8 {}
1161
1162// Safety: i8 is a valid SIMD element type, and is supported by this API
1163unsafe impl SimdElement for i8 {
1164 type Mask = i8;
1165}
1166
1167impl Sealed for i16 {}
1168
1169// Safety: i16 is a valid SIMD element type, and is supported by this API
1170unsafe impl SimdElement for i16 {
1171 type Mask = i16;
1172}
1173
1174impl Sealed for i32 {}
1175
1176// Safety: i32 is a valid SIMD element type, and is supported by this API
1177unsafe impl SimdElement for i32 {
1178 type Mask = i32;
1179}
1180
1181impl Sealed for i64 {}
1182
1183// Safety: i64 is a valid SIMD element type, and is supported by this API
1184unsafe impl SimdElement for i64 {
1185 type Mask = i64;
1186}
1187
1188impl Sealed for isize {}
1189
1190// Safety: isize is a valid SIMD element type, and is supported by this API
1191unsafe impl SimdElement for isize {
1192 type Mask = isize;
1193}
1194
1195impl Sealed for f32 {}
1196
1197// Safety: f32 is a valid SIMD element type, and is supported by this API
1198unsafe impl SimdElement for f32 {
1199 type Mask = i32;
1200}
1201
1202impl Sealed for f64 {}
1203
1204// Safety: f64 is a valid SIMD element type, and is supported by this API
1205unsafe impl SimdElement for f64 {
1206 type Mask = i64;
1207}
1208
1209impl<T> Sealed for *const T {}
1210
1211// Safety: (thin) const pointers are valid SIMD element types, and are supported by this API
1212//
1213// Fat pointers may be supported in the future.
1214unsafe impl<T> SimdElement for *const T
1215where
1216 T: core::ptr::Pointee<Metadata = ()>,
1217{
1218 type Mask = isize;
1219}
1220
1221impl<T> Sealed for *mut T {}
1222
1223// Safety: (thin) mut pointers are valid SIMD element types, and are supported by this API
1224//
1225// Fat pointers may be supported in the future.
1226unsafe impl<T> SimdElement for *mut T
1227where
1228 T: core::ptr::Pointee<Metadata = ()>,
1229{
1230 type Mask = isize;
1231}
1232
1233#[inline]
1234fn lane_indices<const N: usize>() -> Simd<usize, N>
1235where
1236 LaneCount<N>: SupportedLaneCount,
1237{
1238 #![allow(clippy::needless_range_loop)]
1239 let mut index = [0; N];
1240 for i in 0..N {
1241 index[i] = i;
1242 }
1243 Simd::from_array(index)
1244}
1245
1246#[inline]
1247fn mask_up_to<M, const N: usize>(len: usize) -> Mask<M, N>
1248where
1249 LaneCount<N>: SupportedLaneCount,
1250 M: MaskElement,
1251{
1252 let index = lane_indices::<N>();
1253 let max_value: u64 = M::max_unsigned();
1254 macro_rules! case {
1255 ($ty:ty) => {
1256 if N < <$ty>::MAX as usize && max_value as $ty as u64 == max_value {
1257 return index.cast().simd_lt(Simd::splat(len.min(N) as $ty)).cast();
1258 }
1259 };
1260 }
1261 case!(u8);
1262 case!(u16);
1263 case!(u32);
1264 case!(u64);
1265 index.simd_lt(Simd::splat(len)).cast()
1266}