Split Int into Int and MinInt

`MinInt` contains the basic methods that are only needed by integers
involved in widening operations, i.e. big integers. `Int` retains all
other operations and convenience methods.
This commit is contained in:
Trevor Gross 2024-05-10 18:38:09 -05:00
parent 0da9e16bef
commit 10689247bc
12 changed files with 210 additions and 172 deletions

View file

@ -1,5 +1,5 @@
use crate::float::Float;
use crate::int::{CastInto, Int};
use crate::int::{CastInto, Int, MinInt};
/// Returns `a + b`
fn add<F: Float>(a: F, b: F) -> F
@ -57,9 +57,9 @@ where
}
// zero + anything = anything
if a_abs == Int::ZERO {
if a_abs == MinInt::ZERO {
// but we need to get the sign right for zero + zero
if b_abs == Int::ZERO {
if b_abs == MinInt::ZERO {
return F::from_repr(a.repr() & b.repr());
} else {
return b;
@ -67,7 +67,7 @@ where
}
// anything + zero = anything
if b_abs == Int::ZERO {
if b_abs == MinInt::ZERO {
return a;
}
}
@ -113,10 +113,10 @@ where
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
let align = a_exponent.wrapping_sub(b_exponent).cast();
if align != Int::ZERO {
if align != MinInt::ZERO {
if align < bits {
let sticky =
F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO);
F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != MinInt::ZERO);
b_significand = (b_significand >> align.cast()) | sticky;
} else {
b_significand = one; // sticky; b is known to be non-zero.
@ -125,8 +125,8 @@ where
if subtraction {
a_significand = a_significand.wrapping_sub(b_significand);
// If a == -b, return +zero.
if a_significand == Int::ZERO {
return F::from_repr(Int::ZERO);
if a_significand == MinInt::ZERO {
return F::from_repr(MinInt::ZERO);
}
// If partial cancellation occured, we need to left-shift the result
@ -143,8 +143,8 @@ where
// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
if a_significand & implicit_bit << 4 != Int::ZERO {
let sticky = F::Int::from_bool(a_significand & one != Int::ZERO);
if a_significand & implicit_bit << 4 != MinInt::ZERO {
let sticky = F::Int::from_bool(a_significand & one != MinInt::ZERO);
a_significand = a_significand >> 1 | sticky;
a_exponent += 1;
}
@ -160,7 +160,7 @@ where
// need to shift the significand.
let shift = (1 - a_exponent).cast();
let sticky =
F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO);
F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != MinInt::ZERO);
a_significand = a_significand >> shift.cast() | sticky;
a_exponent = 0;
}

View file

@ -1,7 +1,7 @@
#![allow(unreachable_code)]
use crate::float::Float;
use crate::int::Int;
use crate::int::MinInt;
#[derive(Clone, Copy)]
enum Result {

View file

@ -3,7 +3,9 @@
#![allow(clippy::needless_return)]
use crate::float::Float;
use crate::int::{CastInto, DInt, HInt, Int};
use crate::int::{CastInto, DInt, HInt, Int, MinInt};
use super::HalfRep;
fn div32<F: Float>(a: F, b: F) -> F
where
@ -454,15 +456,20 @@ where
fn div64<F: Float>(a: F, b: F) -> F
where
u32: CastInto<F::Int>,
F::Int: CastInto<u32>,
i32: CastInto<F::Int>,
F::Int: CastInto<i32>,
u64: CastInto<F::Int>,
F::Int: CastInto<HalfRep<F>>,
F::Int: From<HalfRep<F>>,
F::Int: From<u8>,
F::Int: CastInto<u64>,
i64: CastInto<F::Int>,
F::Int: CastInto<i64>,
F::Int: HInt,
F::Int: HInt + DInt,
u16: CastInto<F::Int>,
i32: CastInto<F::Int>,
i64: CastInto<F::Int>,
u32: CastInto<F::Int>,
u64: CastInto<F::Int>,
u64: CastInto<HalfRep<F>>,
{
const NUMBER_OF_HALF_ITERATIONS: usize = 3;
const NUMBER_OF_FULL_ITERATIONS: usize = 1;
@ -471,7 +478,7 @@ where
let one = F::Int::ONE;
let zero = F::Int::ZERO;
let hw = F::BITS / 2;
let lo_mask = u64::MAX >> hw;
let lo_mask = F::Int::MAX >> hw;
let significand_bits = F::SIGNIFICAND_BITS;
let max_exponent = F::EXPONENT_MAX;
@ -616,8 +623,9 @@ where
let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 {
// Starting with (n-1) half-width iterations
let b_uq1_hw: u32 =
(CastInto::<u64>::cast(b_significand) >> (significand_bits + 1 - hw)) as u32;
let b_uq1_hw: HalfRep<F> = CastInto::<HalfRep<F>>::cast(
CastInto::<u64>::cast(b_significand) >> (significand_bits + 1 - hw),
);
// C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW
// with W0 being either 16 or 32 and W0 <= HW.
@ -625,12 +633,13 @@ where
// b/2 is subtracted to obtain x0) wrapped to [0, 1) range.
// HW is at least 32. Shifting into the highest bits if needed.
let c_hw = (0x7504F333_u64 as u32).wrapping_shl(hw.wrapping_sub(32));
let c_hw = (CastInto::<HalfRep<F>>::cast(0x7504F333_u64)).wrapping_shl(hw.wrapping_sub(32));
// b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572,
// so x0 fits to UQ0.HW without wrapping.
let x_uq0_hw: u32 = {
let mut x_uq0_hw: u32 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */);
let x_uq0_hw: HalfRep<F> = {
let mut x_uq0_hw: HalfRep<F> =
c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */);
// dbg!(x_uq0_hw);
// An e_0 error is comprised of errors due to
// * x0 being an inherently imprecise first approximation of 1/b_hw
@ -661,8 +670,9 @@ where
// no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is
// expected to be strictly positive because b_UQ1_hw has its highest bit set
// and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1).
let corr_uq1_hw: u32 =
0.wrapping_sub(((x_uq0_hw as u64).wrapping_mul(b_uq1_hw as u64)) >> hw) as u32;
let corr_uq1_hw: HalfRep<F> = CastInto::<HalfRep<F>>::cast(zero.wrapping_sub(
((F::Int::from(x_uq0_hw)).wrapping_mul(F::Int::from(b_uq1_hw))) >> hw,
));
// dbg!(corr_uq1_hw);
// Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally
@ -677,7 +687,9 @@ where
// The fact corr_UQ1_hw was virtually round up (due to result of
// multiplication being **first** truncated, then negated - to improve
// error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw.
x_uq0_hw = ((x_uq0_hw as u64).wrapping_mul(corr_uq1_hw as u64) >> (hw - 1)) as u32;
x_uq0_hw = ((F::Int::from(x_uq0_hw)).wrapping_mul(F::Int::from(corr_uq1_hw))
>> (hw - 1))
.cast();
// dbg!(x_uq0_hw);
// Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t
// representation. In the latter case, x_UQ0_hw will be either 0 or 1 after
@ -707,7 +719,7 @@ where
// be not below that value (see g(x) above), so it is safe to decrement just
// once after the final iteration. On the other hand, an effective value of
// divisor changes after this point (from b_hw to b), so adjust here.
x_uq0_hw.wrapping_sub(1_u32)
x_uq0_hw.wrapping_sub(HalfRep::<F>::ONE)
};
// Error estimations for full-precision iterations are calculated just
@ -717,7 +729,7 @@ where
// Simulating operations on a twice_rep_t to perform a single final full-width
// iteration. Using ad-hoc multiplication implementations to take advantage
// of particular structure of operands.
let blo: u64 = (CastInto::<u64>::cast(b_uq1)) & lo_mask;
let blo: F::Int = b_uq1 & lo_mask;
// x_UQ0 = x_UQ0_hw * 2^HW - 1
// x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1
//
@ -726,19 +738,20 @@ where
// + [ x_UQ0_hw * blo ]
// - [ b_UQ1 ]
// = [ result ][.... discarded ...]
let corr_uq1 = negate_u64(
(x_uq0_hw as u64) * (b_uq1_hw as u64) + (((x_uq0_hw as u64) * (blo)) >> hw) - 1,
); // account for *possible* carry
let lo_corr = corr_uq1 & lo_mask;
let hi_corr = corr_uq1 >> hw;
let corr_uq1: F::Int = (F::Int::from(x_uq0_hw) * F::Int::from(b_uq1_hw)
+ ((F::Int::from(x_uq0_hw) * blo) >> hw))
.wrapping_sub(one)
.wrapping_neg(); // account for *possible* carry
let lo_corr: F::Int = corr_uq1 & lo_mask;
let hi_corr: F::Int = corr_uq1 >> hw;
// x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
let mut x_uq0: <F as Float>::Int = ((((x_uq0_hw as u64) * hi_corr) << 1)
.wrapping_add(((x_uq0_hw as u64) * lo_corr) >> (hw - 1))
.wrapping_sub(2))
.cast(); // 1 to account for the highest bit of corr_UQ1 can be 1
// 1 to account for possible carry
// Just like the case of half-width iterations but with possibility
// of overflowing by one extra Ulp of x_UQ0.
let mut x_uq0: F::Int = ((F::Int::from(x_uq0_hw) * hi_corr) << 1)
.wrapping_add((F::Int::from(x_uq0_hw) * lo_corr) >> (hw - 1))
.wrapping_sub(F::Int::from(2u8));
// 1 to account for the highest bit of corr_UQ1 can be 1
// 1 to account for possible carry
// Just like the case of half-width iterations but with possibility
// of overflowing by one extra Ulp of x_UQ0.
x_uq0 -= one;
// ... and then traditional fixup by 2 should work
@ -755,8 +768,8 @@ where
x_uq0
} else {
// C is (3/4 + 1/sqrt(2)) - 1 truncated to 64 fractional bits as UQ0.n
let c: <F as Float>::Int = (0x7504F333 << (F::BITS - 32)).cast();
let x_uq0: <F as Float>::Int = c.wrapping_sub(b_uq1);
let c: F::Int = (0x7504F333 << (F::BITS - 32)).cast();
let x_uq0: F::Int = c.wrapping_sub(b_uq1);
// E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-64
x_uq0
};
@ -806,7 +819,7 @@ where
// Now 1/b - (2*P) * 2^-W < x < 1/b
// FIXME Is x_UQ0 still >= 0.5?
let mut quotient: <F as Float>::Int = x_uq0.widen_mul(a_significand << 1).hi();
let mut quotient: F::Int = x_uq0.widen_mul(a_significand << 1).hi();
// Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W).
// quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1),
@ -868,7 +881,7 @@ where
// r = a - b * q
let abs_result = if written_exponent > 0 {
let mut ret = quotient & significand_mask;
ret |= ((written_exponent as u64) << significand_bits).cast();
ret |= written_exponent.cast() << significand_bits;
residual <<= 1;
ret
} else {

View file

@ -1,5 +1,5 @@
use crate::float::Float;
use crate::int::{CastInto, Int};
use crate::int::{CastInto, Int, MinInt};
/// Generic conversion from a narrower to a wider IEEE-754 floating-point type
fn extend<F: Float, R: Float>(a: F) -> R

View file

@ -1,6 +1,6 @@
use core::ops;
use super::int::Int;
use crate::int::{DInt, Int, MinInt};
pub mod add;
pub mod cmp;
@ -12,6 +12,9 @@ pub mod pow;
pub mod sub;
pub mod trunc;
/// Wrapper to extract the integer type half of the float's size
pub(crate) type HalfRep<F> = <<F as Float>::Int as DInt>::H;
public_test_dep! {
/// Trait for some basic operations on floats
#[allow(dead_code)]
@ -60,7 +63,7 @@ pub(crate) trait Float:
/// A mask for the significand
const SIGNIFICAND_MASK: Self::Int;
// The implicit bit of the float format
/// The implicit bit of the float format
const IMPLICIT_BIT: Self::Int;
/// A mask for the exponent

View file

@ -1,5 +1,5 @@
use crate::float::Float;
use crate::int::{CastInto, DInt, HInt, Int};
use crate::int::{CastInto, DInt, HInt, Int, MinInt};
fn mul<F: Float>(a: F, b: F) -> F
where

View file

@ -1,5 +1,5 @@
use crate::float::Float;
use crate::int::{CastInto, Int};
use crate::int::{CastInto, Int, MinInt};
fn trunc<F: Float, R: Float>(a: F) -> R
where

View file

@ -1,6 +1,6 @@
use crate::int::{DInt, Int};
use crate::int::{DInt, Int, MinInt};
trait UAddSub: DInt {
trait UAddSub: DInt + Int {
fn uadd(self, other: Self) -> Self {
let (lo, carry) = self.lo().overflowing_add(other.lo());
let hi = self.hi().wrapping_add(other.hi());
@ -22,7 +22,7 @@ impl UAddSub for u128 {}
trait AddSub: Int
where
<Self as Int>::UnsignedInt: UAddSub,
<Self as MinInt>::UnsignedInt: UAddSub,
{
fn add(self, other: Self) -> Self {
Self::from_unsigned(self.unsigned().uadd(other.unsigned()))
@ -37,7 +37,7 @@ impl AddSub for i128 {}
trait Addo: AddSub
where
<Self as Int>::UnsignedInt: UAddSub,
<Self as MinInt>::UnsignedInt: UAddSub,
{
fn addo(self, other: Self) -> (Self, bool) {
let sum = AddSub::add(self, other);
@ -50,7 +50,7 @@ impl Addo for u128 {}
trait Subo: AddSub
where
<Self as Int>::UnsignedInt: UAddSub,
<Self as MinInt>::UnsignedInt: UAddSub,
{
fn subo(self, other: Self) -> (Self, bool) {
let sum = AddSub::sub(self, other);

View file

@ -9,37 +9,22 @@ pub mod sdiv;
pub mod shift;
pub mod udiv;
pub use self::leading_zeros::__clzsi2;
pub use leading_zeros::__clzsi2;
public_test_dep! {
/// Trait for some basic operations on integers
/// Minimal integer implementations needed on all integer types, including wide integers.
#[allow(dead_code)]
pub(crate) trait Int:
Copy
pub(crate) trait MinInt: Copy
+ core::fmt::Debug
+ PartialEq
+ PartialOrd
+ ops::AddAssign
+ ops::SubAssign
+ ops::BitAndAssign
+ ops::BitOrAssign
+ ops::BitXorAssign
+ ops::ShlAssign<i32>
+ ops::ShrAssign<u32>
+ ops::Add<Output = Self>
+ ops::Sub<Output = Self>
+ ops::Div<Output = Self>
+ ops::Shl<u32, Output = Self>
+ ops::Shr<u32, Output = Self>
+ ops::BitOr<Output = Self>
+ ops::BitXor<Output = Self>
+ ops::BitAnd<Output = Self>
+ ops::Not<Output = Self>
+ ops::Shl<u32, Output = Self>
{
/// Type with the same width but other signedness
type OtherSign: Int;
type OtherSign: MinInt;
/// Unsigned version of Self
type UnsignedInt: Int;
type UnsignedInt: MinInt;
/// If `Self` is a signed integer
const SIGNED: bool;
@ -51,13 +36,47 @@ pub(crate) trait Int:
const ONE: Self;
const MIN: Self;
const MAX: Self;
}
}
public_test_dep! {
/// Trait for some basic operations on integers
#[allow(dead_code)]
pub(crate) trait Int: MinInt
+ PartialEq
+ PartialOrd
+ ops::AddAssign
+ ops::SubAssign
+ ops::BitAndAssign
+ ops::BitOrAssign
+ ops::BitXorAssign
+ ops::ShlAssign<i32>
+ ops::ShrAssign<u32>
+ ops::Add<Output = Self>
+ ops::Sub<Output = Self>
+ ops::Mul<Output = Self>
+ ops::Div<Output = Self>
+ ops::Shr<u32, Output = Self>
+ ops::BitXor<Output = Self>
+ ops::BitAnd<Output = Self>
{
/// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing
/// in `testcrate`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,111,
/// 112,119,120,125,126,127].
const FUZZ_LENGTHS: [u8; 20];
const FUZZ_LENGTHS: [u8; 20] = make_fuzz_lengths(<Self as MinInt>::BITS);
/// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128.
const FUZZ_NUM: usize;
const FUZZ_NUM: usize = {
let log2 = (<Self as MinInt>::BITS - 1).count_ones() as usize;
if log2 == 3 {
// case for u8
6
} else {
// 3 entries on each extreme, 2 in the middle, and 4 for each scale of intermediate
// boundaries.
8 + (4 * (log2 - 4))
}
};
fn unsigned(self) -> Self::UnsignedInt;
fn from_unsigned(unsigned: Self::UnsignedInt) -> Self;
@ -84,74 +103,54 @@ pub(crate) trait Int:
}
}
pub(crate) const fn make_fuzz_lengths(bits: u32) -> [u8; 20] {
let mut v = [0u8; 20];
v[0] = 0;
v[1] = 1;
v[2] = 2; // important for parity and the iX::MIN case when reversed
let mut i = 3;
// No need for any more until the byte boundary, because there should be no algorithms
// that are sensitive to anything not next to byte boundaries after 2. We also scale
// in powers of two, which is important to prevent u128 corner tests from getting too
// big.
let mut l = 8;
loop {
if l >= ((bits / 2) as u8) {
break;
}
// get both sides of the byte boundary
v[i] = l - 1;
i += 1;
v[i] = l;
i += 1;
l *= 2;
}
if bits != 8 {
// add the lower side of the middle boundary
v[i] = ((bits / 2) - 1) as u8;
i += 1;
}
// We do not want to jump directly from the Self::BITS/2 boundary to the Self::BITS
// boundary because of algorithms that split the high part up. We reverse the scaling
// as we go to Self::BITS.
let mid = i;
let mut j = 1;
loop {
v[i] = (bits as u8) - (v[mid - j]) - 1;
if j == mid {
break;
}
i += 1;
j += 1;
}
v
}
macro_rules! int_impl_common {
($ty:ty) => {
const BITS: u32 = <Self as Int>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
const FUZZ_LENGTHS: [u8; 20] = {
let bits = <Self as Int>::BITS;
let mut v = [0u8; 20];
v[0] = 0;
v[1] = 1;
v[2] = 2; // important for parity and the iX::MIN case when reversed
let mut i = 3;
// No need for any more until the byte boundary, because there should be no algorithms
// that are sensitive to anything not next to byte boundaries after 2. We also scale
// in powers of two, which is important to prevent u128 corner tests from getting too
// big.
let mut l = 8;
loop {
if l >= ((bits / 2) as u8) {
break;
}
// get both sides of the byte boundary
v[i] = l - 1;
i += 1;
v[i] = l;
i += 1;
l *= 2;
}
if bits != 8 {
// add the lower side of the middle boundary
v[i] = ((bits / 2) - 1) as u8;
i += 1;
}
// We do not want to jump directly from the Self::BITS/2 boundary to the Self::BITS
// boundary because of algorithms that split the high part up. We reverse the scaling
// as we go to Self::BITS.
let mid = i;
let mut j = 1;
loop {
v[i] = (bits as u8) - (v[mid - j]) - 1;
if j == mid {
break;
}
i += 1;
j += 1;
}
v
};
const FUZZ_NUM: usize = {
let log2 = (<Self as Int>::BITS - 1).count_ones() as usize;
if log2 == 3 {
// case for u8
6
} else {
// 3 entries on each extreme, 2 in the middle, and 4 for each scale of intermediate
// boundaries.
8 + (4 * (log2 - 4))
}
};
fn from_bool(b: bool) -> Self {
b as $ty
}
@ -204,10 +203,20 @@ macro_rules! int_impl_common {
macro_rules! int_impl {
($ity:ty, $uty:ty) => {
impl Int for $uty {
impl MinInt for $uty {
type OtherSign = $ity;
type UnsignedInt = $uty;
const BITS: u32 = <Self as MinInt>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
}
impl Int for $uty {
fn unsigned(self) -> $uty {
self
}
@ -229,10 +238,20 @@ macro_rules! int_impl {
int_impl_common!($uty);
}
impl Int for $ity {
impl MinInt for $ity {
type OtherSign = $uty;
type UnsignedInt = $uty;
const BITS: u32 = <Self as MinInt>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
}
impl Int for $ity {
fn unsigned(self) -> $uty {
self as $uty
}
@ -260,18 +279,22 @@ int_impl!(i128, u128);
public_test_dep! {
/// Trait for integers twice the bit width of another integer. This is implemented for all
/// primitives except for `u8`, because there is not a smaller primitive.
pub(crate) trait DInt: Int {
pub(crate) trait DInt: MinInt {
/// Integer that is half the bit width of the integer this trait is implemented for
type H: HInt<D = Self> + Int;
type H: HInt<D = Self>;
/// Returns the low half of `self`
fn lo(self) -> Self::H;
/// Returns the high half of `self`
fn hi(self) -> Self::H;
/// Returns the low and high halves of `self` as a tuple
fn lo_hi(self) -> (Self::H, Self::H);
fn lo_hi(self) -> (Self::H, Self::H) {
(self.lo(), self.hi())
}
/// Constructs an integer using lower and higher half parts
fn from_lo_hi(lo: Self::H, hi: Self::H) -> Self;
fn from_lo_hi(lo: Self::H, hi: Self::H) -> Self {
lo.zero_widen() | hi.widen_hi()
}
}
}
@ -280,7 +303,7 @@ public_test_dep! {
/// primitives except for `u128`, because it there is not a larger primitive.
pub(crate) trait HInt: Int {
/// Integer that is double the bit width of the integer this trait is implemented for
type D: DInt<H = Self> + Int;
type D: DInt<H = Self> + MinInt;
/// Widens (using default extension) the integer to have double bit width
fn widen(self) -> Self::D;
@ -288,7 +311,9 @@ pub(crate) trait HInt: Int {
/// around problems with associated type bounds (such as `Int<Othersign: DInt>`) being unstable
fn zero_widen(self) -> Self::D;
/// Widens the integer to have double bit width and shifts the integer into the higher bits
fn widen_hi(self) -> Self::D;
fn widen_hi(self) -> Self::D {
self.widen() << <Self as MinInt>::BITS
}
/// Widening multiplication with zero widening. This cannot overflow.
fn zero_widen_mul(self, rhs: Self) -> Self::D;
/// Widening multiplication. This cannot overflow.
@ -306,13 +331,7 @@ macro_rules! impl_d_int {
self as $X
}
fn hi(self) -> Self::H {
(self >> <$X as Int>::BITS) as $X
}
fn lo_hi(self) -> (Self::H, Self::H) {
(self.lo(), self.hi())
}
fn from_lo_hi(lo: Self::H, hi: Self::H) -> Self {
lo.zero_widen() | hi.widen_hi()
(self >> <$X as MinInt>::BITS) as $X
}
}
)*
@ -331,9 +350,6 @@ macro_rules! impl_h_int {
fn zero_widen(self) -> Self::D {
(self as $uH) as $X
}
fn widen_hi(self) -> Self::D {
(self as $X) << <$H as Int>::BITS
}
fn zero_widen_mul(self, rhs: Self) -> Self::D {
self.zero_widen().wrapping_mul(rhs.zero_widen())
}

View file

@ -1,6 +1,6 @@
use crate::int::{DInt, HInt, Int};
trait Mul: DInt
trait Mul: DInt + Int
where
Self::H: DInt,
{
@ -30,7 +30,7 @@ where
impl Mul for u64 {}
impl Mul for i128 {}
pub(crate) trait UMulo: Int + DInt {
pub(crate) trait UMulo: DInt + Int {
fn mulo(self, rhs: Self) -> (Self, bool) {
match (self.hi().is_zero(), rhs.hi().is_zero()) {
// overflow is guaranteed

View file

@ -1,4 +1,4 @@
use crate::int::{DInt, HInt, Int};
use crate::int::{DInt, HInt, Int, MinInt};
trait Ashl: DInt {
/// Returns `a << b`, requires `b < Self::BITS`

View file

@ -15,7 +15,7 @@
#![no_std]
use compiler_builtins::float::Float;
use compiler_builtins::int::Int;
use compiler_builtins::int::{Int, MinInt};
use rand_xoshiro::rand_core::{RngCore, SeedableRng};
use rand_xoshiro::Xoshiro128StarStar;
@ -101,7 +101,10 @@ macro_rules! edge_cases {
/// Feeds a series of fuzzing inputs to `f`. The fuzzer first uses an algorithm designed to find
/// edge cases, followed by a more random fuzzer that runs `n` times.
pub fn fuzz<I: Int, F: FnMut(I)>(n: u32, mut f: F) {
pub fn fuzz<I: Int, F: FnMut(I)>(n: u32, mut f: F)
where
<I as MinInt>::UnsignedInt: Int,
{
// edge case tester. Calls `f` 210 times for u128.
// zero gets skipped by the loop
f(I::ZERO);
@ -111,7 +114,7 @@ pub fn fuzz<I: Int, F: FnMut(I)>(n: u32, mut f: F) {
// random fuzzer
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x: I = Int::ZERO;
let mut x: I = MinInt::ZERO;
for _ in 0..n {
fuzz_step(&mut rng, &mut x);
f(x)
@ -119,7 +122,10 @@ pub fn fuzz<I: Int, F: FnMut(I)>(n: u32, mut f: F) {
}
/// The same as `fuzz`, except `f` has two inputs.
pub fn fuzz_2<I: Int, F: Fn(I, I)>(n: u32, f: F) {
pub fn fuzz_2<I: Int, F: Fn(I, I)>(n: u32, f: F)
where
<I as MinInt>::UnsignedInt: Int,
{
// Check cases where the first and second inputs are zero. Both call `f` 210 times for `u128`.
edge_cases!(I, case, {
f(I::ZERO, case);
@ -150,10 +156,10 @@ pub fn fuzz_shift<I: Int, F: Fn(I, u32)>(f: F) {
// Shift functions are very simple and do not need anything other than shifting a small
// set of random patterns for every fuzz length.
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x: I = Int::ZERO;
let mut x: I = MinInt::ZERO;
for i in 0..I::FUZZ_NUM {
fuzz_step(&mut rng, &mut x);
f(x, Int::ZERO);
f(x, MinInt::ZERO);
f(x, I::FUZZ_LENGTHS[i] as u32);
}
}