Update for new intrinsics

This commit is contained in:
Caleb Zulawski 2022-08-04 19:31:50 -04:00
parent 8a5a5732a1
commit 176cc81324
5 changed files with 107 additions and 97 deletions

View file

@ -1,79 +1,23 @@
use crate::simd::{intrinsics, LaneCount, Simd, SimdElement, SupportedLaneCount};
use crate::simd::SimdElement;
/// Supporting trait for `Simd::cast`. Typically doesn't need to be used directly.
pub unsafe trait SimdCast<Target: SimdElement>: SimdElement {
#[doc(hidden)]
fn cast<const LANES: usize>(x: Simd<Self, LANES>) -> Simd<Target, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
// Safety: implementing this trait indicates that the types are supported by `simd_as`
unsafe { intrinsics::simd_as(x) }
}
pub unsafe trait SimdCast: SimdElement {}
#[doc(hidden)]
unsafe fn cast_unchecked<const LANES: usize>(x: Simd<Self, LANES>) -> Simd<Target, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
// Safety: implementing this trait indicates that the types are supported by `simd_cast`
// The caller is responsible for the conversion invariants.
unsafe { intrinsics::simd_cast(x) }
}
}
unsafe impl SimdCast for i8 {}
unsafe impl SimdCast for i16 {}
unsafe impl SimdCast for i32 {}
unsafe impl SimdCast for i64 {}
unsafe impl SimdCast for isize {}
unsafe impl SimdCast for u8 {}
unsafe impl SimdCast for u16 {}
unsafe impl SimdCast for u32 {}
unsafe impl SimdCast for u64 {}
unsafe impl SimdCast for usize {}
unsafe impl SimdCast for f32 {}
unsafe impl SimdCast for f64 {}
macro_rules! into_number {
{ unsafe $from:ty as $to:ty } => {
// Safety: casting between numbers is supported by `simd_cast` and `simd_as`
unsafe impl SimdCast<$to> for $from {}
};
{ unsafe $($type:ty),* } => {
$(
into_number! { unsafe $type as i8 }
into_number! { unsafe $type as i16 }
into_number! { unsafe $type as i32 }
into_number! { unsafe $type as i64 }
into_number! { unsafe $type as isize }
/// Supporting trait for `Simd::cast_ptr`. Typically doesn't need to be used directly.
pub unsafe trait SimdCastPtr: SimdElement {}
into_number! { unsafe $type as u8 }
into_number! { unsafe $type as u16 }
into_number! { unsafe $type as u32 }
into_number! { unsafe $type as u64 }
into_number! { unsafe $type as usize }
into_number! { unsafe $type as f32 }
into_number! { unsafe $type as f64 }
)*
}
}
into_number! { unsafe i8, i16, i32, i64, isize, u8, u16, u32, u64, usize, f32, f64 }
// TODO uncomment pending PR to rustc
/*
macro_rules! into_pointer {
{ unsafe $($type:ty),* } => {
$(
// Safety: casting between numbers and pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T> SimdCast<$type> for *const T {}
// Safety: casting between numbers and pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T> SimdCast<$type> for *mut T {}
// Safety: casting between numbers and pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T> SimdCast<*const T> for $type {}
// Safety: casting between numbers and pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T> SimdCast<*mut T> for $type {}
)*
}
}
into_pointer! { unsafe i8, i16, i32, i64, isize, u8, u16, u32, u64, usize }
// Safety: casting between pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T, U> SimdCast<*const T> for *const U {}
// Safety: casting between pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T, U> SimdCast<*const T> for *mut U {}
// Safety: casting between pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T, U> SimdCast<*mut T> for *const U {}
// Safety: casting between pointers is supported by `simd_cast` and `simd_as`
unsafe impl<T, U> SimdCast<*mut T> for *mut U {}
*/
unsafe impl<T> SimdCastPtr for *const T {}
unsafe impl<T> SimdCastPtr for *mut T {}

View file

@ -40,6 +40,15 @@ pub trait SimdConstPtr: Copy + Sealed {
/// Equivalent to calling [`pointer::with_addr`] on each lane.
fn with_addr(self, addr: Self::Usize) -> Self;
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
/// in [`from_exposed_addr`].
fn expose_addr(self) -> Self::Usize;
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
///
/// Equivalent to calling [`pointer::from_exposed_addr`] on each lane.
fn from_exposed_addr(addr: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
@ -77,8 +86,7 @@ where
#[inline]
fn as_mut(self) -> Self::MutPtr {
unimplemented!()
//self.cast()
unsafe { intrinsics::simd_cast_ptr(self) }
}
#[inline]
@ -90,18 +98,25 @@ where
}
#[inline]
fn with_addr(self, _addr: Self::Usize) -> Self {
unimplemented!()
/*
fn with_addr(self, addr: Self::Usize) -> Self {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
// a wrapping_offset, so we can emulate it as such. This should properly
// restore pointer provenance even under today's compiler.
self.cast::<*const u8>()
self.cast_ptr::<*const u8>()
.wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
.cast()
*/
.cast_ptr()
}
#[inline]
fn expose_addr(self) -> Self::Usize {
unsafe { intrinsics::simd_expose_addr(self) }
}
#[inline]
fn from_exposed_addr(addr: Self::Usize) -> Self {
unsafe { intrinsics::simd_from_exposed_addr(addr) }
}
#[inline]

View file

@ -37,6 +37,15 @@ pub trait SimdMutPtr: Copy + Sealed {
/// Equivalent to calling [`pointer::with_addr`] on each lane.
fn with_addr(self, addr: Self::Usize) -> Self;
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
/// in [`from_exposed_addr`].
fn expose_addr(self) -> Self::Usize;
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
///
/// Equivalent to calling [`pointer::from_exposed_addr`] on each lane.
fn from_exposed_addr(addr: Self::Usize) -> Self;
/// Calculates the offset from a pointer using wrapping arithmetic.
///
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
@ -85,18 +94,25 @@ where
}
#[inline]
fn with_addr(self, _addr: Self::Usize) -> Self {
unimplemented!()
/*
fn with_addr(self, addr: Self::Usize) -> Self {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
// a wrapping_offset, so we can emulate it as such. This should properly
// restore pointer provenance even under today's compiler.
self.cast::<*mut u8>()
self.cast_ptr::<*mut u8>()
.wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
.cast()
*/
.cast_ptr()
}
#[inline]
fn expose_addr(self) -> Self::Usize {
unsafe { intrinsics::simd_expose_addr(self) }
}
#[inline]
fn from_exposed_addr(addr: Self::Usize) -> Self {
unsafe { intrinsics::simd_from_exposed_addr(addr) }
}
#[inline]

View file

@ -154,4 +154,27 @@ extern "platform-intrinsic" {
// equivalent to wrapping_offset
pub(crate) fn simd_arith_offset<T, U>(ptr: T, offset: U) -> T;
/*
/// equivalent to `T as U` semantics, specifically for pointers
pub(crate) fn simd_cast_ptr<T, U>(ptr: T) -> U;
/// expose a pointer as an address
pub(crate) fn simd_expose_addr<T, U>(ptr: T) -> U;
/// convert an exposed address back to a pointer
pub(crate) fn simd_from_exposed_addr<T, U>(addr: T) -> U;
*/
}
pub(crate) unsafe fn simd_cast_ptr<T, U>(_ptr: T) -> U {
unimplemented!()
}
pub(crate) unsafe fn simd_expose_addr<T, U>(_ptr: T) -> U {
unimplemented!()
}
pub(crate) unsafe fn simd_from_exposed_addr<T, U>(_addr: T) -> U {
unimplemented!()
}

View file

@ -1,6 +1,6 @@
use crate::simd::{
intrinsics, LaneCount, Mask, MaskElement, SimdCast, SimdConstPtr, SimdMutPtr, SimdPartialOrd,
SupportedLaneCount, Swizzle,
intrinsics, LaneCount, Mask, MaskElement, SimdCast, SimdCastPtr, SimdConstPtr, SimdMutPtr,
SimdPartialOrd, SupportedLaneCount, Swizzle,
};
/// A SIMD vector of `LANES` elements of type `T`. `Simd<T, N>` has the same shape as [`[T; N]`](array), but operates like `T`.
@ -209,11 +209,23 @@ where
#[must_use]
#[inline]
#[cfg(not(bootstrap))]
pub fn cast<U: SimdElement>(self) -> Simd<U, LANES>
pub fn cast<U: SimdCast>(self) -> Simd<U, LANES>
where
T: SimdCast<U>,
T: SimdCast,
{
SimdCast::cast(self)
// Safety: supported types are guaranteed by SimdCast
unsafe { intrinsics::simd_as(self) }
}
/// Lanewise casts pointers to another pointer type.
#[must_use]
#[inline]
pub fn cast_ptr<U: SimdCastPtr>(self) -> Simd<U, LANES>
where
T: SimdCastPtr,
{
// Safety: supported types are guaranteed by SimdCastPtr
unsafe { intrinsics::simd_cast_ptr(self) }
}
/// Rounds toward zero and converts to the same-width integer type, assuming that
@ -234,11 +246,11 @@ where
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub unsafe fn to_int_unchecked<I>(self) -> Simd<I, LANES>
where
T: core::convert::FloatToInt<I> + SimdCast<I>,
I: SimdElement,
T: core::convert::FloatToInt<I> + SimdCast,
I: SimdCast,
{
// Safety: the caller is responsible for the invariants
unsafe { SimdCast::cast_unchecked(self) }
// Safety: supported types are guaranteed by SimdCast, the caller is responsible for the extra invariants
unsafe { intrinsics::simd_cast(self) }
}
/// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.