|
2 | 2 |
|
3 | 3 | use super::MaskElement;
|
4 | 4 | use crate::simd::intrinsics;
|
5 |
| -use crate::simd::{LaneCount, Simd, SupportedLaneCount, ToBitMask}; |
| 5 | +use crate::simd::{LaneCount, Simd, SupportedLaneCount, ToBitMask, ToBitMaskArray}; |
6 | 6 |
|
7 | 7 | #[repr(transparent)]
|
8 | 8 | pub struct Mask<T, const LANES: usize>(Simd<T, LANES>)
|
@@ -127,6 +127,72 @@ where
|
127 | 127 | unsafe { Mask(intrinsics::simd_cast(self.0)) }
|
128 | 128 | }
|
129 | 129 |
|
| 130 | + #[inline] |
| 131 | + #[must_use = "method returns a new array and does not mutate the original value"] |
| 132 | + pub fn to_bitmask_array<const N: usize>(self) -> [u8; N] |
| 133 | + where |
| 134 | + super::Mask<T, LANES>: ToBitMaskArray, |
| 135 | + [(); <super::Mask<T, LANES> as ToBitMaskArray>::BYTES]: Sized, |
| 136 | + { |
| 137 | + assert_eq!(<super::Mask<T, LANES> as ToBitMaskArray>::BYTES, N); |
| 138 | + |
| 139 | + // Safety: N is the correct bitmask size |
| 140 | + // |
| 141 | + // The transmute below allows this function to be marked safe, since it will prevent |
| 142 | + // monomorphization errors in the case of an incorrect size. |
| 143 | + unsafe { |
| 144 | + // Compute the bitmask |
| 145 | + let bitmask: [u8; <super::Mask<T, LANES> as ToBitMaskArray>::BYTES] = |
| 146 | + intrinsics::simd_bitmask(self.0); |
| 147 | + |
| 148 | + // Transmute to the return type, previously asserted to be the same size |
| 149 | + let mut bitmask: [u8; N] = core::mem::transmute_copy(&bitmask); |
| 150 | + |
| 151 | + // LLVM assumes bit order should match endianness |
| 152 | + if cfg!(target_endian = "big") { |
| 153 | + for x in bitmask.as_mut() { |
| 154 | + *x = x.reverse_bits(); |
| 155 | + } |
| 156 | + }; |
| 157 | + |
| 158 | + bitmask |
| 159 | + } |
| 160 | + } |
| 161 | + |
| 162 | + #[inline] |
| 163 | + #[must_use = "method returns a new mask and does not mutate the original value"] |
| 164 | + pub fn from_bitmask_array<const N: usize>(mut bitmask: [u8; N]) -> Self |
| 165 | + where |
| 166 | + super::Mask<T, LANES>: ToBitMaskArray, |
| 167 | + [(); <super::Mask<T, LANES> as ToBitMaskArray>::BYTES]: Sized, |
| 168 | + { |
| 169 | + assert_eq!(<super::Mask<T, LANES> as ToBitMaskArray>::BYTES, N); |
| 170 | + |
| 171 | + // Safety: N is the correct bitmask size |
| 172 | + // |
| 173 | + // The transmute below allows this function to be marked safe, since it will prevent |
| 174 | + // monomorphization errors in the case of an incorrect size. |
| 175 | + unsafe { |
| 176 | + // LLVM assumes bit order should match endianness |
| 177 | + if cfg!(target_endian = "big") { |
| 178 | + for x in bitmask.as_mut() { |
| 179 | + *x = x.reverse_bits(); |
| 180 | + } |
| 181 | + } |
| 182 | + |
| 183 | + // Transmute to the bitmask type, previously asserted to be the same size |
| 184 | + let bitmask: [u8; <super::Mask<T, LANES> as ToBitMaskArray>::BYTES] = |
| 185 | + core::mem::transmute_copy(&bitmask); |
| 186 | + |
| 187 | + // Compute the regular mask |
| 188 | + Self::from_int_unchecked(intrinsics::simd_select_bitmask( |
| 189 | + bitmask, |
| 190 | + Self::splat(true).to_int(), |
| 191 | + Self::splat(false).to_int(), |
| 192 | + )) |
| 193 | + } |
| 194 | + } |
| 195 | + |
130 | 196 | #[inline]
|
131 | 197 | pub(crate) fn to_bitmask_integer<U: ReverseBits>(self) -> U
|
132 | 198 | where
|
|
0 commit comments