Merge pull request #397 from AaronKutch/float_refactor

This commit is contained in:
Amanieu d'Antras 2021-04-02 23:36:28 +01:00 committed by GitHub
commit c31c2e0556
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 444 additions and 1767 deletions

View file

@ -109,6 +109,20 @@ jobs:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust
run: rustup update stable && rustup default stable && rustup component add rustfmt
- name: Install stable `rustfmt`
run: rustup set profile minimal && rustup default stable && rustup component add rustfmt
- run: cargo fmt -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
submodules: true
# Unlike rustfmt, stable clippy does not work on code with nightly features.
# This acquires the most recent nightly with a clippy component.
- name: Install nightly `clippy`
run: |
rustup set profile minimal && rustup default "nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/clippy)" && rustup component add clippy
- run: cargo clippy -- -D clippy::all

View file

@ -1,4 +1,4 @@
// Hack of a crate until rust-lang/rust#51647 is fixed
//! This is needed for tests on targets that require a `#[panic_handler]` function
#![feature(no_core)]
#![no_core]

View file

@ -137,9 +137,8 @@ where
a_significand <<= shift;
a_exponent -= shift;
}
} else
/* addition */
{
} else {
// addition
a_significand += b_significand;
// If the addition carried up, we need to right-shift the result and

View file

@ -63,25 +63,22 @@ fn cmp<F: Float>(a: F, b: F) -> Result {
// a and b as signed integers as we would with a fp_ting-point compare.
if a_srep & b_srep >= szero {
if a_srep < b_srep {
return Result::Less;
Result::Less
} else if a_srep == b_srep {
return Result::Equal;
Result::Equal
} else {
return Result::Greater;
Result::Greater
}
}
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
else {
if a_srep > b_srep {
return Result::Less;
} else if a_srep == b_srep {
return Result::Equal;
} else {
return Result::Greater;
}
} else if a_srep > b_srep {
Result::Less
} else if a_srep == b_srep {
Result::Equal
} else {
Result::Greater
}
}

View file

@ -1,90 +1,88 @@
use float::Float;
use int::Int;
use int::{CastInto, Int};
macro_rules! int_to_float {
($i:expr, $ity:ty, $fty:ty) => {{
let i = $i;
if i == 0 {
return 0.0;
}
fn int_to_float<I: Int, F: Float>(i: I) -> F
where
F::Int: CastInto<u32>,
F::Int: CastInto<I>,
I::UnsignedInt: CastInto<F::Int>,
u32: CastInto<F::Int>,
{
if i == I::ZERO {
return F::ZERO;
}
let mant_dig = <$fty>::SIGNIFICAND_BITS + 1;
let exponent_bias = <$fty>::EXPONENT_BIAS;
let two = I::UnsignedInt::ONE + I::UnsignedInt::ONE;
let four = two + two;
let sign = i < I::ZERO;
let mut x = Int::abs_diff(i, I::ZERO);
let n = <$ity as Int>::BITS;
let (s, a) = i.extract_sign();
let mut a = a;
// number of significant digits in the integer
let i_sd = I::BITS - x.leading_zeros();
// significant digits for the float, including implicit bit
let f_sd = F::SIGNIFICAND_BITS + 1;
// number of significant digits
let sd = n - a.leading_zeros();
// exponent
let mut exp = i_sd - 1;
// exponent
let mut e = sd - 1;
if I::BITS < f_sd {
return F::from_parts(
sign,
(exp + F::EXPONENT_BIAS).cast(),
x.cast() << (f_sd - exp - 1),
);
}
if <$ity as Int>::BITS < mant_dig {
return <$fty>::from_parts(
s,
(e + exponent_bias) as <$fty as Float>::Int,
(a as <$fty as Float>::Int) << (mant_dig - e - 1),
);
}
a = if sd > mant_dig {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
* 1 = msb 1 bit
* P = bit MANT_DIG-1 bits to the right of 1
* Q = bit MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
let mant_dig_plus_one = mant_dig + 1;
let mant_dig_plus_two = mant_dig + 2;
a = if sd == mant_dig_plus_one {
a << 1
} else if sd == mant_dig_plus_two {
a
} else {
(a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt
| ((a & <$ity as Int>::UnsignedInt::max_value())
.wrapping_shl((n + mant_dig_plus_two) - sd)
!= 0) as <$ity as Int>::UnsignedInt
};
/* finish: */
a |= ((a & 4) != 0) as <$ity as Int>::UnsignedInt; /* Or P into R */
a += 1; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to mant_dig or mant_dig+1 bits */
if (a & (1 << mant_dig)) != 0 {
a >>= 1;
e += 1;
}
a
/* a is now rounded to mant_dig bits */
x = if i_sd > f_sd {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = the implicit bit
// P = bit f_sd-1 bits to the right of 1
// Q = bit f_sd bits to the right of 1
// R = "or" of all bits to the right of Q
let f_sd_add2 = f_sd + 2;
x = if i_sd == (f_sd + 1) {
x << 1
} else if i_sd == f_sd_add2 {
x
} else {
a.wrapping_shl(mant_dig - sd)
/* a is now rounded to mant_dig bits */
(x >> (i_sd - f_sd_add2))
| Int::from_bool(
(x & I::UnsignedInt::MAX).wrapping_shl((I::BITS + f_sd_add2) - i_sd)
!= Int::ZERO,
)
};
<$fty>::from_parts(
s,
(e + exponent_bias) as <$fty as Float>::Int,
a as <$fty as Float>::Int,
)
}};
// R |= P
x |= Int::from_bool((x & four) != I::UnsignedInt::ZERO);
// round - this step may add a significant bit
x += Int::ONE;
// dump Q and R
x >>= 2;
// a is now rounded to f_sd or f_sd+1 bits
if (x & (I::UnsignedInt::ONE << f_sd)) != Int::ZERO {
x >>= 1;
exp += 1;
}
x
} else {
x.wrapping_shl(f_sd - i_sd)
};
F::from_parts(sign, (exp + F::EXPONENT_BIAS).cast(), x.cast())
}
intrinsics! {
#[arm_aeabi_alias = __aeabi_i2f]
pub extern "C" fn __floatsisf(i: i32) -> f32 {
int_to_float!(i, i32, f32)
int_to_float(i)
}
#[arm_aeabi_alias = __aeabi_i2d]
pub extern "C" fn __floatsidf(i: i32) -> f64 {
int_to_float!(i, i32, f64)
int_to_float(i)
}
#[maybe_use_optimized_c_shim]
@ -95,7 +93,7 @@ intrinsics! {
if cfg!(target_arch = "x86_64") {
i as f32
} else {
int_to_float!(i, i64, f32)
int_to_float(i)
}
}
@ -107,181 +105,172 @@ intrinsics! {
if cfg!(target_arch = "x86_64") {
i as f64
} else {
int_to_float!(i, i64, f64)
int_to_float(i)
}
}
#[unadjusted_on_win64]
pub extern "C" fn __floattisf(i: i128) -> f32 {
int_to_float!(i, i128, f32)
int_to_float(i)
}
#[unadjusted_on_win64]
pub extern "C" fn __floattidf(i: i128) -> f64 {
int_to_float!(i, i128, f64)
int_to_float(i)
}
#[arm_aeabi_alias = __aeabi_ui2f]
pub extern "C" fn __floatunsisf(i: u32) -> f32 {
int_to_float!(i, u32, f32)
int_to_float(i)
}
#[arm_aeabi_alias = __aeabi_ui2d]
pub extern "C" fn __floatunsidf(i: u32) -> f64 {
int_to_float!(i, u32, f64)
int_to_float(i)
}
#[maybe_use_optimized_c_shim]
#[arm_aeabi_alias = __aeabi_ul2f]
pub extern "C" fn __floatundisf(i: u64) -> f32 {
int_to_float!(i, u64, f32)
int_to_float(i)
}
#[maybe_use_optimized_c_shim]
#[arm_aeabi_alias = __aeabi_ul2d]
pub extern "C" fn __floatundidf(i: u64) -> f64 {
int_to_float!(i, u64, f64)
int_to_float(i)
}
#[unadjusted_on_win64]
pub extern "C" fn __floatuntisf(i: u128) -> f32 {
int_to_float!(i, u128, f32)
int_to_float(i)
}
#[unadjusted_on_win64]
pub extern "C" fn __floatuntidf(i: u128) -> f64 {
int_to_float!(i, u128, f64)
int_to_float(i)
}
}
#[derive(PartialEq)]
enum Sign {
Positive,
Negative,
}
fn float_to_int<F: Float, I: Int>(f: F) -> I
where
F::ExpInt: CastInto<u32>,
u32: CastInto<F::ExpInt>,
F::Int: CastInto<I>,
{
// converting NaNs is UB, so we don't consider them
macro_rules! float_to_int {
($f:expr, $fty:ty, $ity:ty) => {{
let f = $f;
let fixint_min = <$ity>::min_value();
let fixint_max = <$ity>::max_value();
let fixint_bits = <$ity as Int>::BITS as usize;
let fixint_unsigned = fixint_min == 0;
let sign = f.sign();
let mut exp = f.exp();
let sign_bit = <$fty>::SIGN_MASK;
let significand_bits = <$fty>::SIGNIFICAND_BITS as usize;
let exponent_bias = <$fty>::EXPONENT_BIAS as usize;
//let exponent_max = <$fty>::exponent_max() as usize;
// if less than one or unsigned & negative
if (exp < F::EXPONENT_BIAS.cast()) || (!I::SIGNED && sign) {
return I::ZERO;
}
exp -= F::EXPONENT_BIAS.cast();
// Break a into sign, exponent, significand
let a_rep = <$fty>::repr(f);
let a_abs = a_rep & !sign_bit;
// this is used to work around -1 not being available for unsigned
let sign = if (a_rep & sign_bit) == 0 {
Sign::Positive
// If the value is too large for `I`, saturate.
let bits: F::ExpInt = I::BITS.cast();
let max = if I::SIGNED {
bits - F::ExpInt::ONE
} else {
bits
};
if max <= exp {
return if sign {
// It happens that I::MIN is handled correctly
I::MIN
} else {
Sign::Negative
I::MAX
};
let mut exponent = (a_abs >> significand_bits) as usize;
let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT;
};
// if < 1 or unsigned & negative
if exponent < exponent_bias || fixint_unsigned && sign == Sign::Negative {
return 0;
}
exponent -= exponent_bias;
// `0 <= exp < max`
// If the value is infinity, saturate.
// If the value is too large for the integer type, 0.
if exponent
>= (if fixint_unsigned {
fixint_bits
} else {
fixint_bits - 1
})
{
return if sign == Sign::Positive {
fixint_max
} else {
fixint_min
};
}
// If 0 <= exponent < significand_bits, right shift to get the result.
// Otherwise, shift left.
// (sign - 1) will never overflow as negative signs are already returned as 0 for unsigned
let r = if exponent < significand_bits {
(significand >> (significand_bits - exponent)) as $ity
// If 0 <= exponent < F::SIGNIFICAND_BITS, right shift to get the result. Otherwise, shift left.
let sig_bits: F::ExpInt = F::SIGNIFICAND_BITS.cast();
// The larger integer has to be casted into, or else the shift overflows
let r: I = if F::Int::BITS < I::BITS {
let tmp: I = if exp < sig_bits {
f.imp_frac().cast() >> (sig_bits - exp).cast()
} else {
(significand as $ity) << (exponent - significand_bits)
f.imp_frac().cast() << (exp - sig_bits).cast()
};
if sign == Sign::Negative {
(!r).wrapping_add(1)
tmp
} else {
let tmp: F::Int = if exp < sig_bits {
f.imp_frac() >> (sig_bits - exp).cast()
} else {
r
}
}};
f.imp_frac() << (exp - sig_bits).cast()
};
tmp.cast()
};
if sign {
r.wrapping_neg()
} else {
r
}
}
intrinsics! {
#[arm_aeabi_alias = __aeabi_f2iz]
pub extern "C" fn __fixsfsi(f: f32) -> i32 {
float_to_int!(f, f32, i32)
float_to_int(f)
}
#[arm_aeabi_alias = __aeabi_f2lz]
pub extern "C" fn __fixsfdi(f: f32) -> i64 {
float_to_int!(f, f32, i64)
float_to_int(f)
}
#[unadjusted_on_win64]
pub extern "C" fn __fixsfti(f: f32) -> i128 {
float_to_int!(f, f32, i128)
float_to_int(f)
}
#[arm_aeabi_alias = __aeabi_d2iz]
pub extern "C" fn __fixdfsi(f: f64) -> i32 {
float_to_int!(f, f64, i32)
float_to_int(f)
}
#[arm_aeabi_alias = __aeabi_d2lz]
pub extern "C" fn __fixdfdi(f: f64) -> i64 {
float_to_int!(f, f64, i64)
float_to_int(f)
}
#[unadjusted_on_win64]
pub extern "C" fn __fixdfti(f: f64) -> i128 {
float_to_int!(f, f64, i128)
float_to_int(f)
}
#[arm_aeabi_alias = __aeabi_f2uiz]
pub extern "C" fn __fixunssfsi(f: f32) -> u32 {
float_to_int!(f, f32, u32)
float_to_int(f)
}
#[arm_aeabi_alias = __aeabi_f2ulz]
pub extern "C" fn __fixunssfdi(f: f32) -> u64 {
float_to_int!(f, f32, u64)
float_to_int(f)
}
#[unadjusted_on_win64]
pub extern "C" fn __fixunssfti(f: f32) -> u128 {
float_to_int!(f, f32, u128)
float_to_int(f)
}
#[arm_aeabi_alias = __aeabi_d2uiz]
pub extern "C" fn __fixunsdfsi(f: f64) -> u32 {
float_to_int!(f, f64, u32)
float_to_int(f)
}
#[arm_aeabi_alias = __aeabi_d2ulz]
pub extern "C" fn __fixunsdfdi(f: f64) -> u64 {
float_to_int!(f, f64, u64)
float_to_int(f)
}
#[unadjusted_on_win64]
pub extern "C" fn __fixunsdfti(f: f64) -> u128 {
float_to_int!(f, f64, u128)
float_to_int(f)
}
}

View file

@ -1,3 +1,7 @@
// The functions are complex with many branches, and explicit
// `return`s makes it clear where function exit points are
#![allow(clippy::needless_return)]
use float::Float;
use int::{CastInto, DInt, HInt, Int};

View file

@ -15,6 +15,7 @@ pub mod sub;
#[doc(hidden)]
pub trait Float:
Copy
+ core::fmt::Debug
+ PartialEq
+ PartialOrd
+ ops::AddAssign
@ -30,6 +31,9 @@ pub trait Float:
/// A int of the same with as the float
type SignedInt: Int;
/// An int capable of containing the exponent bits plus a sign bit. This is signed.
type ExpInt: Int;
const ZERO: Self;
const ONE: Self;
@ -71,6 +75,18 @@ pub trait Float:
/// compared.
fn eq_repr(self, rhs: Self) -> bool;
/// Returns the sign bit
fn sign(self) -> bool;
/// Returns the exponent with bias
fn exp(self) -> Self::ExpInt;
/// Returns the significand with no implicit bit (or the "fractional" part)
fn frac(self) -> Self::Int;
/// Returns the significand with implicit bit
fn imp_frac(self) -> Self::Int;
/// Returns a `Self::Int` transmuted back to `Self`
fn from_repr(a: Self::Int) -> Self;
@ -81,14 +97,16 @@ pub trait Float:
fn normalize(significand: Self::Int) -> (i32, Self::Int);
/// Returns if `self` is subnormal
fn is_subnormal(&self) -> bool;
fn is_subnormal(self) -> bool;
}
macro_rules! float_impl {
($ty:ident, $ity:ident, $sity:ident, $bits:expr, $significand_bits:expr) => {
($ty:ident, $ity:ident, $sity:ident, $expty:ident, $bits:expr, $significand_bits:expr) => {
impl Float for $ty {
type Int = $ity;
type SignedInt = $sity;
type ExpInt = $expty;
const ZERO: Self = 0.0;
const ONE: Self = 1.0;
@ -113,6 +131,18 @@ macro_rules! float_impl {
self.repr() == rhs.repr()
}
}
fn sign(self) -> bool {
self.signed_repr() < Self::SignedInt::ZERO
}
fn exp(self) -> Self::ExpInt {
((self.to_bits() & Self::EXPONENT_MASK) >> Self::SIGNIFICAND_BITS) as Self::ExpInt
}
fn frac(self) -> Self::Int {
self.to_bits() & Self::SIGNIFICAND_MASK
}
fn imp_frac(self) -> Self::Int {
self.frac() | Self::IMPLICIT_BIT
}
fn from_repr(a: Self::Int) -> Self {
Self::from_bits(a)
}
@ -132,12 +162,12 @@ macro_rules! float_impl {
significand << shift as Self::Int,
)
}
fn is_subnormal(&self) -> bool {
fn is_subnormal(self) -> bool {
(self.repr() & Self::EXPONENT_MASK) == Self::Int::ZERO
}
}
};
}
float_impl!(f32, u32, i32, 32, 23);
float_impl!(f64, u64, i64, 64, 52);
float_impl!(f32, u32, i32, i16, 32, 23);
float_impl!(f64, u64, i64, i16, 64, 52);

View file

@ -181,7 +181,7 @@ where
product_high += product_high & one;
}
return F::from_repr(product_high);
F::from_repr(product_high)
}
intrinsics! {

View file

@ -1,40 +1,36 @@
use float::Float;
use int::Int;
trait Pow: Float {
/// Returns `a` raised to the power `b`
fn pow(self, mut b: i32) -> Self {
let mut a = self;
let recip = b < 0;
let mut r = Self::ONE;
loop {
if (b & 1) != 0 {
r *= a;
}
b = b.aborting_div(2);
if b == 0 {
break;
}
a *= a;
/// Returns `a` raised to the power `b`
fn pow<F: Float>(a: F, b: i32) -> F {
let mut a = a;
let recip = b < 0;
let mut pow = i32::abs_diff(b, 0);
let mut mul = F::ONE;
loop {
if (pow & 1) != 0 {
mul *= a;
}
pow >>= 1;
if pow == 0 {
break;
}
a *= a;
}
if recip {
Self::ONE / r
} else {
r
}
if recip {
F::ONE / mul
} else {
mul
}
}
impl Pow for f32 {}
impl Pow for f64 {}
intrinsics! {
pub extern "C" fn __powisf2(a: f32, b: i32) -> f32 {
a.pow(b)
pow(a, b)
}
pub extern "C" fn __powidf2(a: f64, b: i32) -> f64 {
a.pow(b)
pow(a, b)
}
}

View file

@ -15,9 +15,11 @@ pub use self::leading_zeros::__clzsi2;
#[doc(hidden)]
pub trait Int:
Copy
+ core::fmt::Debug
+ PartialEq
+ PartialOrd
+ ops::AddAssign
+ ops::SubAssign
+ ops::BitAndAssign
+ ops::BitOrAssign
+ ops::BitXorAssign
@ -38,12 +40,16 @@ pub trait Int:
/// Unsigned version of Self
type UnsignedInt: Int;
/// If `Self` is a signed integer
const SIGNED: bool;
/// The bitwidth of the int type
const BITS: u32;
const ZERO: Self;
const ONE: Self;
const MIN: Self;
const MAX: Self;
/// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing
/// in `testcrate`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,111,
@ -52,18 +58,6 @@ pub trait Int:
/// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128.
const FUZZ_NUM: usize;
/// Extracts the sign from self and returns a tuple.
///
/// # Examples
///
/// ```rust,ignore
/// let i = -25_i32;
/// let (sign, u) = i.extract_sign();
/// assert_eq!(sign, true);
/// assert_eq!(u, 25_u32);
/// ```
fn extract_sign(self) -> (bool, Self::UnsignedInt);
fn unsigned(self) -> Self::UnsignedInt;
fn from_unsigned(unsigned: Self::UnsignedInt) -> Self;
@ -77,8 +71,6 @@ pub trait Int:
// copied from primitive integers, but put in a trait
fn is_zero(self) -> bool;
fn max_value() -> Self;
fn min_value() -> Self;
fn wrapping_neg(self) -> Self;
fn wrapping_add(self, other: Self) -> Self;
fn wrapping_mul(self, other: Self) -> Self;
@ -87,25 +79,18 @@ pub trait Int:
fn wrapping_shr(self, other: u32) -> Self;
fn rotate_left(self, other: u32) -> Self;
fn overflowing_add(self, other: Self) -> (Self, bool);
fn aborting_div(self, other: Self) -> Self;
fn aborting_rem(self, other: Self) -> Self;
fn leading_zeros(self) -> u32;
}
fn unwrap<T>(t: Option<T>) -> T {
match t {
Some(t) => t,
None => ::abort(),
}
}
macro_rules! int_impl_common {
($ty:ty) => {
const BITS: u32 = <Self>::BITS;
const BITS: u32 = <Self as Int>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
const FUZZ_LENGTHS: [u8; 20] = {
let bits = <Self as Int>::BITS;
@ -177,14 +162,6 @@ macro_rules! int_impl_common {
self == Self::ZERO
}
fn max_value() -> Self {
<Self>::max_value()
}
fn min_value() -> Self {
<Self>::min_value()
}
fn wrapping_neg(self) -> Self {
<Self>::wrapping_neg(self)
}
@ -217,14 +194,6 @@ macro_rules! int_impl_common {
<Self>::overflowing_add(self, other)
}
fn aborting_div(self, other: Self) -> Self {
unwrap(<Self>::checked_div(self, other))
}
fn aborting_rem(self, other: Self) -> Self {
unwrap(<Self>::checked_rem(self, other))
}
fn leading_zeros(self) -> u32 {
<Self>::leading_zeros(self)
}
@ -237,20 +206,22 @@ macro_rules! int_impl {
type OtherSign = $ity;
type UnsignedInt = $uty;
fn extract_sign(self) -> (bool, $uty) {
(false, self)
}
fn unsigned(self) -> $uty {
self
}
// It makes writing macros easier if this is implemented for both signed and unsigned
#[allow(clippy::wrong_self_convention)]
fn from_unsigned(me: $uty) -> Self {
me
}
fn abs_diff(self, other: Self) -> Self {
(self.wrapping_sub(other) as $ity).wrapping_abs() as $uty
if self < other {
other.wrapping_sub(self)
} else {
self.wrapping_sub(other)
}
}
int_impl_common!($uty);
@ -260,14 +231,6 @@ macro_rules! int_impl {
type OtherSign = $uty;
type UnsignedInt = $uty;
fn extract_sign(self) -> (bool, $uty) {
if self < 0 {
(true, (!(self as $uty)).wrapping_add(1))
} else {
(false, self as $uty)
}
}
fn unsigned(self) -> $uty {
self as $uty
}
@ -391,13 +354,14 @@ impl_h_int!(
);
/// Trait to express (possibly lossy) casting of integers
pub(crate) trait CastInto<T: Copy>: Copy {
#[doc(hidden)]
pub trait CastInto<T: Copy>: Copy {
fn cast(self) -> T;
}
macro_rules! cast_into {
($ty:ty) => {
cast_into!($ty; usize, isize, u32, i32, u64, i64, u128, i128);
cast_into!($ty; usize, isize, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128);
};
($ty:ty; $($into:ty),*) => {$(
impl CastInto<$into> for $ty {
@ -408,6 +372,12 @@ macro_rules! cast_into {
)*};
}
cast_into!(usize);
cast_into!(isize);
cast_into!(u8);
cast_into!(i8);
cast_into!(u16);
cast_into!(i16);
cast_into!(u32);
cast_into!(i32);
cast_into!(u64);

View file

@ -1,5 +1,12 @@
// TODO: when `unsafe_block_in_unsafe_fn` is stabilized, remove this
#![allow(unused_unsafe)]
// The functions are complex with many branches, and explicit
// `return`s makes it clear where function exit points are
#![allow(clippy::needless_return)]
#![allow(clippy::comparison_chain)]
// Clippy is confused by the complex configuration
#![allow(clippy::if_same_then_else)]
#![allow(clippy::needless_bool)]
//! This `specialized_div_rem` module is originally from version 1.0.0 of the
//! `specialized-div-rem` crate. Note that `for` loops with ranges are not used in this

View file

@ -16,6 +16,8 @@
// compiler on ABIs and such, so we should be "good enough" for now and changes
// to the `u128` ABI will be reflected here.
#![allow(improper_ctypes, improper_ctypes_definitions)]
// `mem::swap` cannot be used because it may generate references to memcpy in unoptimized code.
#![allow(clippy::manual_swap)]
// We disable #[no_mangle] for tests so that we can verify the test results
// against the native compiler-rt implementations of the builtins.
@ -30,11 +32,6 @@
#[cfg(test)]
extern crate core;
#[allow(unused_unsafe)]
fn abort() -> ! {
unsafe { core::intrinsics::abort() }
}
#[macro_use]
mod macros;

View file

@ -1,3 +1,6 @@
// Trying to satisfy clippy here is hopeless
#![allow(clippy::style)]
#[allow(warnings)]
#[cfg(target_pointer_width = "16")]
type c_int = i16;

View file

@ -8,14 +8,11 @@ edition = "2018"
test = false
doctest = false
[build-dependencies]
rand = "0.7"
[dependencies]
# For fuzzing tests we want a deterministic seedable RNG. We also eliminate potential
# problems with system RNGs on the variety of platforms this crate is tested on.
# `xoshiro128**` is used for its quality, size, and speed at generating `u32` shift amounts.
rand_xoshiro = "0.4"
rand_xoshiro = "0.6"
[dependencies.compiler_builtins]
path = ".."

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,5 @@
#![allow(unused_macros)]
use testcrate::*;
macro_rules! sum {
@ -107,3 +109,18 @@ fn float_addsub() {
f64, __adddf3, __subdf3;
);
}
#[cfg(target_arch = "arm")]
#[test]
fn float_addsub_arm() {
use compiler_builtins::float::{
add::{__adddf3vfp, __addsf3vfp},
sub::{__subdf3vfp, __subsf3vfp},
Float,
};
float_sum!(
f32, __addsf3vfp, __subsf3vfp;
f64, __adddf3vfp, __subdf3vfp;
);
}

View file

@ -1,3 +1,5 @@
#![allow(unused_macros)]
use testcrate::*;
macro_rules! cmp {
@ -50,3 +52,61 @@ fn float_comparisons() {
);
});
}
macro_rules! cmp2 {
($x:ident, $y:ident, $($unordered_val:expr, $fn_std:expr, $fn_builtins:ident);*;) => {
$(
let cmp0: i32 = if $x.is_nan() || $y.is_nan() {
$unordered_val
} else {
$fn_std as i32
};
let cmp1: i32 = $fn_builtins($x, $y);
if cmp0 != cmp1 {
panic!("{}({}, {}): std: {}, builtins: {}", stringify!($fn_builtins), $x, $y, cmp0, cmp1);
}
)*
};
}
#[cfg(target_arch = "arm")]
#[test]
fn float_comparisons_arm() {
use compiler_builtins::float::cmp::{
__aeabi_dcmpeq, __aeabi_dcmpge, __aeabi_dcmpgt, __aeabi_dcmple, __aeabi_dcmplt,
__aeabi_fcmpeq, __aeabi_fcmpge, __aeabi_fcmpgt, __aeabi_fcmple, __aeabi_fcmplt, __eqdf2vfp,
__eqsf2vfp, __gedf2vfp, __gesf2vfp, __gtdf2vfp, __gtsf2vfp, __ledf2vfp, __lesf2vfp,
__ltdf2vfp, __ltsf2vfp, __nedf2vfp, __nesf2vfp,
};
fuzz_float_2(N, |x: f32, y: f32| {
cmp2!(x, y,
0, x < y, __aeabi_fcmplt;
0, x <= y, __aeabi_fcmple;
0, x == y, __aeabi_fcmpeq;
0, x >= y, __aeabi_fcmpge;
0, x > y, __aeabi_fcmpgt;
0, x < y, __ltsf2vfp;
0, x <= y, __lesf2vfp;
0, x == y, __eqsf2vfp;
0, x >= y, __gesf2vfp;
0, x > y, __gtsf2vfp;
1, x != y, __nesf2vfp;
);
});
fuzz_float_2(N, |x: f64, y: f64| {
cmp2!(x, y,
0, x < y, __aeabi_dcmplt;
0, x <= y, __aeabi_dcmple;
0, x == y, __aeabi_dcmpeq;
0, x >= y, __aeabi_dcmpge;
0, x > y, __aeabi_dcmpgt;
0, x < y, __ltdf2vfp;
0, x <= y, __ledf2vfp;
0, x == y, __eqdf2vfp;
0, x >= y, __gedf2vfp;
0, x > y, __gtdf2vfp;
1, x != y, __nedf2vfp;
);
});
}

View file

@ -1,3 +1,5 @@
#![allow(unused_macros)]
use compiler_builtins::int::sdiv::{__divmoddi4, __divmodsi4, __divmodti4};
use compiler_builtins::int::udiv::{__udivmoddi4, __udivmodsi4, __udivmodti4, u128_divide_sparc};
use testcrate::*;
@ -108,7 +110,7 @@ macro_rules! float {
let quo0 = x / y;
let quo1: $i = $fn(x, y);
// division of subnormals is not currently handled
if !(Float::is_subnormal(&quo0) || Float::is_subnormal(&quo1)) {
if !(Float::is_subnormal(quo0) || Float::is_subnormal(quo1)) {
if !Float::eq_repr(quo0, quo1) {
panic!(
"{}({}, {}): std: {}, builtins: {}",
@ -134,3 +136,17 @@ fn float_div() {
f64, __divdf3;
);
}
#[cfg(target_arch = "arm")]
#[test]
fn float_div_arm() {
use compiler_builtins::float::{
div::{__divdf3vfp, __divsf3vfp},
Float,
};
float!(
f32, __divsf3vfp;
f64, __divdf3vfp;
);
}

View file

@ -1,37 +0,0 @@
#![feature(lang_items)]
#![allow(bad_style)]
#![allow(unused_imports)]
#![no_std]
extern crate compiler_builtins as builtins;
#[cfg(all(
target_arch = "arm",
not(any(target_env = "gnu", target_env = "musl")),
target_os = "linux",
test
))]
extern crate utest_cortex_m_qemu;
#[cfg(all(
target_arch = "arm",
not(any(target_env = "gnu", target_env = "musl")),
target_os = "linux",
test
))]
#[macro_use]
extern crate utest_macros;
#[cfg(all(
target_arch = "arm",
not(any(target_env = "gnu", target_env = "musl")),
target_os = "linux",
test
))]
macro_rules! panic { // overrides `panic!`
($($tt:tt)*) => {
upanic!($($tt)*);
};
}
include!(concat!(env!("OUT_DIR"), "/generated.rs"));

View file

@ -1,3 +1,7 @@
// makes configuration easier
#![allow(unused_macros)]
use compiler_builtins::float::Float;
use testcrate::*;
/// Make sure that the the edge case tester and randomized tester don't break, and list examples of
@ -89,46 +93,87 @@ fn leading_zeros() {
})
}
#[test]
fn float_extend() {
fuzz_float(N, |x: f32| {
let tmp0 = x as f64;
let tmp1: f64 = compiler_builtins::float::extend::__extendsfdf2(x);
if !compiler_builtins::float::Float::eq_repr(tmp0, tmp1) {
panic!("__extendsfdf2({}): std: {}, builtins: {}", x, tmp0, tmp1);
}
});
macro_rules! extend {
($fX:ident, $fD:ident, $fn:ident) => {
fuzz_float(N, |x: $fX| {
let tmp0 = x as $fD;
let tmp1: $fD = $fn(x);
if !Float::eq_repr(tmp0, tmp1) {
panic!(
"{}({}): std: {}, builtins: {}",
stringify!($fn),
x,
tmp0,
tmp1
);
}
});
};
}
// This doesn't quite work because of issues related to
#[test]
fn float_extend() {
use compiler_builtins::float::extend::__extendsfdf2;
extend!(f32, f64, __extendsfdf2);
}
#[cfg(target_arch = "arm")]
#[test]
fn float_extend_arm() {
use compiler_builtins::float::extend::__extendsfdf2vfp;
extend!(f32, f64, __extendsfdf2vfp);
}
// This is approximate because of issues related to
// https://github.com/rust-lang/rust/issues/73920.
// TODO how do we resolve this?
/*
// TODO how do we resolve this indeterminacy?
macro_rules! pow {
($($f:ty, $fn:ident);*;) => {
($($f:ty, $tolerance:expr, $fn:ident);*;) => {
$(
fuzz_float_2(N, |x: $f, y: $f| {
let n = y as i32;
let tmp0: $f = x.powi(n);
let tmp1: $f = $fn(x, n);
if tmp0 != tmp1 {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn), x, y, tmp0, tmp1
);
if !(Float::is_subnormal(x) || Float::is_subnormal(y) || x.is_nan()) {
let n = y.to_bits() & !<$f as Float>::SIGNIFICAND_MASK;
let n = (n as <$f as Float>::SignedInt) >> <$f as Float>::SIGNIFICAND_BITS;
let n = n as i32;
let tmp0: $f = x.powi(n);
let tmp1: $f = $fn(x, n);
let (a, b) = if tmp0 < tmp1 {
(tmp0, tmp1)
} else {
(tmp1, tmp0)
};
let good = {
if a == b {
// handles infinity equality
true
} else if a < $tolerance {
b < $tolerance
} else {
let quo = b / a;
(quo < (1. + $tolerance)) && (quo > (1. - $tolerance))
}
};
if !good {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn), x, n, tmp0, tmp1
);
}
}
});
)*
};
}
#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
#[test]
fn float_pow() {
use compiler_builtins::float::pow::{__powidf2, __powisf2};
pow!(
f32, __powisf2;
f64, __powidf2;
f32, 1e-4, __powisf2;
f64, 1e-12, __powidf2;
);
}
*/

View file

@ -1,3 +1,5 @@
#![allow(unused_macros)]
use testcrate::*;
macro_rules! mul {
@ -86,7 +88,7 @@ macro_rules! float_mul {
let mul0 = x * y;
let mul1: $f = $fn(x, y);
// multiplication of subnormals is not currently handled
if !(Float::is_subnormal(&mul0) || Float::is_subnormal(&mul1)) {
if !(Float::is_subnormal(mul0) || Float::is_subnormal(mul1)) {
if !Float::eq_repr(mul0, mul1) {
panic!(
"{}({}, {}): std: {}, builtins: {}",
@ -112,3 +114,17 @@ fn float_mul() {
f64, __muldf3;
);
}
#[cfg(target_arch = "arm")]
#[test]
fn float_mul_arm() {
use compiler_builtins::float::{
mul::{__muldf3vfp, __mulsf3vfp},
Float,
};
float_mul!(
f32, __mulsf3vfp;
f64, __muldf3vfp;
);
}