cleanup: Reuse MinInt and Int from libm in compiler-builtins

Since the two crates are now in the same repo, it is easier to share
code. Begin some deduplication with the integer traits.
This commit is contained in:
Trevor Gross 2025-04-21 09:35:55 +00:00
parent 5978b8b875
commit 851aa05aa0
12 changed files with 168 additions and 345 deletions

View file

@ -40,6 +40,75 @@ pub const N: u32 = if cfg!(target_arch = "x86_64") && !cfg!(debug_assertions) {
10_000
};
/// Additional constants that determine how the integer gets fuzzed.
trait FuzzInt: MinInt {
/// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing
/// in `builtins-test`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,
/// 111,112,119,120,125,126,127].
const FUZZ_LENGTHS: [u8; 20] = make_fuzz_lengths(Self::BITS);
/// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128.
const FUZZ_NUM: usize = {
let log2 = Self::BITS.ilog2() as usize;
if log2 == 3 {
// case for u8
6
} else {
// 3 entries on each extreme, 2 in the middle, and 4 for each scale of intermediate
// boundaries.
8 + (4 * (log2 - 4))
}
};
}
impl<I> FuzzInt for I where I: MinInt {}
const fn make_fuzz_lengths(bits: u32) -> [u8; 20] {
let mut v = [0u8; 20];
v[0] = 0;
v[1] = 1;
v[2] = 2; // important for parity and the iX::MIN case when reversed
let mut i = 3;
// No need for any more until the byte boundary, because there should be no algorithms
// that are sensitive to anything not next to byte boundaries after 2. We also scale
// in powers of two, which is important to prevent u128 corner tests from getting too
// big.
let mut l = 8;
loop {
if l >= ((bits / 2) as u8) {
break;
}
// get both sides of the byte boundary
v[i] = l - 1;
i += 1;
v[i] = l;
i += 1;
l *= 2;
}
if bits != 8 {
// add the lower side of the middle boundary
v[i] = ((bits / 2) - 1) as u8;
i += 1;
}
// We do not want to jump directly from the Self::BITS/2 boundary to the Self::BITS
// boundary because of algorithms that split the high part up. We reverse the scaling
// as we go to Self::BITS.
let mid = i;
let mut j = 1;
loop {
v[i] = (bits as u8) - (v[mid - j]) - 1;
if j == mid {
break;
}
i += 1;
j += 1;
}
v
}
/// Random fuzzing step. When run several times, it results in excellent fuzzing entropy such as:
/// 11110101010101011110111110011111
/// 10110101010100001011101011001010
@ -92,10 +161,9 @@ fn fuzz_step<I: Int>(rng: &mut Xoshiro128StarStar, x: &mut I) {
macro_rules! edge_cases {
($I:ident, $case:ident, $inner:block) => {
for i0 in 0..$I::FUZZ_NUM {
let mask_lo = (!$I::UnsignedInt::ZERO).wrapping_shr($I::FUZZ_LENGTHS[i0] as u32);
let mask_lo = (!$I::Unsigned::ZERO).wrapping_shr($I::FUZZ_LENGTHS[i0] as u32);
for i1 in i0..I::FUZZ_NUM {
let mask_hi =
(!$I::UnsignedInt::ZERO).wrapping_shl($I::FUZZ_LENGTHS[i1 - i0] as u32);
let mask_hi = (!$I::Unsigned::ZERO).wrapping_shl($I::FUZZ_LENGTHS[i1 - i0] as u32);
let $case = I::from_unsigned(mask_lo & mask_hi);
$inner
}
@ -107,7 +175,7 @@ macro_rules! edge_cases {
/// edge cases, followed by a more random fuzzer that runs `n` times.
pub fn fuzz<I: Int, F: FnMut(I)>(n: u32, mut f: F)
where
<I as MinInt>::UnsignedInt: Int,
<I as MinInt>::Unsigned: Int,
{
// edge case tester. Calls `f` 210 times for u128.
// zero gets skipped by the loop
@ -128,7 +196,7 @@ where
/// The same as `fuzz`, except `f` has two inputs.
pub fn fuzz_2<I: Int, F: Fn(I, I)>(n: u32, f: F)
where
<I as MinInt>::UnsignedInt: Int,
<I as MinInt>::Unsigned: Int,
{
// Check cases where the first and second inputs are zero. Both call `f` 210 times for `u128`.
edge_cases!(I, case, {

View file

@ -1,5 +1,5 @@
use crate::float::Float;
use crate::int::{CastInto, Int, MinInt};
use crate::int::{CastFrom, CastInto, Int, MinInt};
/// Returns `a + b`
fn add<F: Float>(a: F, b: F) -> F
@ -12,7 +12,7 @@ where
let one = F::Int::ONE;
let zero = F::Int::ZERO;
let bits = F::BITS.cast();
let bits: F::Int = F::BITS.cast();
let significand_bits = F::SIG_BITS;
let max_exponent = F::EXP_SAT;
@ -115,9 +115,10 @@ where
let align = a_exponent.wrapping_sub(b_exponent).cast();
if align != MinInt::ZERO {
if align < bits {
let sticky =
F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != MinInt::ZERO);
b_significand = (b_significand >> align.cast()) | sticky;
let sticky = F::Int::from_bool(
b_significand << u32::cast_from(bits.wrapping_sub(align)) != MinInt::ZERO,
);
b_significand = (b_significand >> u32::cast_from(align)) | sticky;
} else {
b_significand = one; // sticky; b is known to be non-zero.
}
@ -132,8 +133,8 @@ where
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if a_significand < implicit_bit << 3 {
let shift =
a_significand.leading_zeros() as i32 - (implicit_bit << 3).leading_zeros() as i32;
let shift = a_significand.leading_zeros() as i32
- (implicit_bit << 3u32).leading_zeros() as i32;
a_significand <<= shift;
a_exponent -= shift;
}
@ -159,9 +160,10 @@ where
// Result is denormal before rounding; the exponent is zero and we
// need to shift the significand.
let shift = (1 - a_exponent).cast();
let sticky =
F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != MinInt::ZERO);
a_significand = (a_significand >> shift.cast()) | sticky;
let sticky = F::Int::from_bool(
(a_significand << u32::cast_from(bits.wrapping_sub(shift))) != MinInt::ZERO,
);
a_significand = (a_significand >> u32::cast_from(shift)) | sticky;
a_exponent = 0;
}

View file

@ -72,7 +72,7 @@ mod int_to_float {
F: Float,
I: Int,
F::Int: CastFrom<I>,
Conv: Fn(I::UnsignedInt) -> F::Int,
Conv: Fn(I::Unsigned) -> F::Int,
{
let sign_bit = F::Int::cast_from(i >> (I::BITS - 1)) << (F::BITS - 1);
F::from_bits(conv(i.unsigned_abs()) | sign_bit)
@ -313,10 +313,10 @@ intrinsics! {
fn float_to_unsigned_int<F, U>(f: F) -> U
where
F: Float,
U: Int<UnsignedInt = U>,
U: Int<Unsigned = U>,
F::Int: CastInto<U>,
F::Int: CastFrom<u32>,
F::Int: CastInto<U::UnsignedInt>,
F::Int: CastInto<U::Unsigned>,
u32: CastFrom<F::Int>,
{
float_to_int_inner::<F, U, _, _>(f.to_bits(), |i: U| i, || U::MAX)
@ -327,8 +327,8 @@ fn float_to_signed_int<F, I>(f: F) -> I
where
F: Float,
I: Int + Neg<Output = I>,
I::UnsignedInt: Int,
F::Int: CastInto<I::UnsignedInt>,
I::Unsigned: Int,
F::Int: CastInto<I::Unsigned>,
F::Int: CastFrom<u32>,
u32: CastFrom<F::Int>,
{
@ -355,27 +355,27 @@ where
I: Int,
FnFoo: FnOnce(I) -> I,
FnOob: FnOnce() -> I,
I::UnsignedInt: Int,
F::Int: CastInto<I::UnsignedInt>,
I::Unsigned: Int,
F::Int: CastInto<I::Unsigned>,
F::Int: CastFrom<u32>,
u32: CastFrom<F::Int>,
{
let int_max_exp = F::EXP_BIAS + I::MAX.ilog2() + 1;
let foobar = F::EXP_BIAS + I::UnsignedInt::BITS - 1;
let foobar = F::EXP_BIAS + I::Unsigned::BITS - 1;
if fbits < F::ONE.to_bits() {
// < 0 gets rounded to 0
I::ZERO
} else if fbits < F::Int::cast_from(int_max_exp) << F::SIG_BITS {
// >= 1, < integer max
let m_base = if I::UnsignedInt::BITS >= F::Int::BITS {
I::UnsignedInt::cast_from(fbits) << (I::BITS - F::SIG_BITS - 1)
let m_base = if I::Unsigned::BITS >= F::Int::BITS {
I::Unsigned::cast_from(fbits) << (I::BITS - F::SIG_BITS - 1)
} else {
I::UnsignedInt::cast_from(fbits >> (F::SIG_BITS - I::BITS + 1))
I::Unsigned::cast_from(fbits >> (F::SIG_BITS - I::BITS + 1))
};
// Set the implicit 1-bit.
let m: I::UnsignedInt = (I::UnsignedInt::ONE << (I::BITS - 1)) | m_base;
let m: I::Unsigned = (I::Unsigned::ONE << (I::BITS - 1)) | m_base;
// Shift based on the exponent and bias.
let s: u32 = (foobar) - u32::cast_from(fbits >> F::SIG_BITS);

View file

@ -370,7 +370,7 @@ where
let hi_corr: F::Int = corr_uq1 >> hw;
// x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
let mut x_uq0: F::Int = ((F::Int::from(x_uq0_hw) * hi_corr) << 1)
let mut x_uq0: F::Int = ((F::Int::from(x_uq0_hw) * hi_corr) << 1u32)
.wrapping_add((F::Int::from(x_uq0_hw) * lo_corr) >> (hw - 1))
// 1 to account for the highest bit of corr_UQ1 can be 1
// 1 to account for possible carry

View file

@ -143,7 +143,7 @@ where
// a zero of the appropriate sign. Mathematically there is no need to
// handle this case separately, but we make it a special case to
// simplify the shift logic.
let shift = one.wrapping_sub(product_exponent.cast()).cast();
let shift: u32 = one.wrapping_sub(product_exponent.cast()).cast();
if shift >= bits {
return F::from_bits(product_sign);
}

View file

@ -20,10 +20,10 @@ pub trait Float:
+ ops::Rem<Output = Self>
{
/// A uint of the same width as the float
type Int: Int<OtherSign = Self::SignedInt, UnsignedInt = Self::Int>;
type Int: Int<OtherSign = Self::SignedInt, Unsigned = Self::Int>;
/// A int of the same width as the float
type SignedInt: Int + MinInt<OtherSign = Self::Int, UnsignedInt = Self::Int>;
type SignedInt: Int + MinInt<OtherSign = Self::Int, Unsigned = Self::Int>;
/// An int capable of containing the exponent bits plus a sign bit. This is signed.
type ExpInt: Int;

View file

@ -22,7 +22,7 @@ impl UAddSub for u128 {}
trait AddSub: Int
where
<Self as MinInt>::UnsignedInt: UAddSub,
<Self as MinInt>::Unsigned: UAddSub,
{
fn add(self, other: Self) -> Self {
Self::from_unsigned(self.unsigned().uadd(other.unsigned()))
@ -37,7 +37,7 @@ impl AddSub for i128 {}
trait Addo: AddSub
where
<Self as MinInt>::UnsignedInt: UAddSub,
<Self as MinInt>::Unsigned: UAddSub,
{
fn addo(self, other: Self) -> (Self, bool) {
let sum = AddSub::add(self, other);
@ -50,7 +50,7 @@ impl Addo for u128 {}
trait Subo: AddSub
where
<Self as MinInt>::UnsignedInt: UAddSub,
<Self as MinInt>::Unsigned: UAddSub,
{
fn subo(self, other: Self) -> (Self, bool) {
let sum = AddSub::sub(self, other);

View file

@ -45,7 +45,7 @@ impl i256 {
impl MinInt for u256 {
type OtherSign = i256;
type UnsignedInt = u256;
type Unsigned = u256;
const SIGNED: bool = false;
const BITS: u32 = 256;
@ -58,7 +58,7 @@ impl MinInt for u256 {
impl MinInt for i256 {
type OtherSign = u256;
type UnsignedInt = u256;
type Unsigned = u256;
const SIGNED: bool = false;
const BITS: u32 = 256;

View file

@ -9,11 +9,14 @@ pub use implementation::{leading_zeros_default, leading_zeros_riscv};
pub(crate) use implementation::{leading_zeros_default, leading_zeros_riscv};
mod implementation {
use crate::int::{CastInto, Int};
use crate::int::{CastFrom, Int};
/// Returns the number of leading binary zeros in `x`.
#[allow(dead_code)]
pub fn leading_zeros_default<T: Int + CastInto<usize>>(x: T) -> usize {
pub fn leading_zeros_default<I: Int>(x: I) -> usize
where
usize: CastFrom<I>,
{
// The basic idea is to test if the higher bits of `x` are zero and bisect the number
// of leading zeros. It is possible for all branches of the bisection to use the same
// code path by conditionally shifting the higher parts down to let the next bisection
@ -23,44 +26,48 @@ mod implementation {
// because it simplifies the final bisection step.
let mut x = x;
// the number of potential leading zeros
let mut z = T::BITS as usize;
let mut z = I::BITS as usize;
// a temporary
let mut t: T;
let mut t: I;
const { assert!(T::BITS <= 64) };
if T::BITS >= 64 {
const { assert!(I::BITS <= 64) };
if I::BITS >= 64 {
t = x >> 32;
if t != T::ZERO {
if t != I::ZERO {
z -= 32;
x = t;
}
}
if T::BITS >= 32 {
if I::BITS >= 32 {
t = x >> 16;
if t != T::ZERO {
if t != I::ZERO {
z -= 16;
x = t;
}
}
const { assert!(T::BITS >= 16) };
const { assert!(I::BITS >= 16) };
t = x >> 8;
if t != T::ZERO {
if t != I::ZERO {
z -= 8;
x = t;
}
t = x >> 4;
if t != T::ZERO {
if t != I::ZERO {
z -= 4;
x = t;
}
t = x >> 2;
if t != T::ZERO {
if t != I::ZERO {
z -= 2;
x = t;
}
// the last two bisections are combined into one conditional
t = x >> 1;
if t != T::ZERO { z - 2 } else { z - x.cast() }
if t != I::ZERO {
z - 2
} else {
z - usize::cast_from(x)
}
// We could potentially save a few cycles by using the LUT trick from
// "https://embeddedgurus.com/state-space/2014/09/
@ -82,10 +89,13 @@ mod implementation {
/// Returns the number of leading binary zeros in `x`.
#[allow(dead_code)]
pub fn leading_zeros_riscv<T: Int + CastInto<usize>>(x: T) -> usize {
pub fn leading_zeros_riscv<I: Int>(x: I) -> usize
where
usize: CastFrom<I>,
{
let mut x = x;
// the number of potential leading zeros
let mut z = T::BITS;
let mut z = I::BITS;
// a temporary
let mut t: u32;
@ -97,11 +107,11 @@ mod implementation {
// right). If we try to save an instruction by using `x < imm` for each bisection, we
// have to shift `x` left and compare with powers of two approaching `usize::MAX + 1`,
// but the immediate will never fit into 12 bits and never save an instruction.
const { assert!(T::BITS <= 64) };
if T::BITS >= 64 {
const { assert!(I::BITS <= 64) };
if I::BITS >= 64 {
// If the upper 32 bits of `x` are not all 0, `t` is set to `1 << 5`, otherwise
// `t` is set to 0.
t = ((x >= (T::ONE << 32)) as u32) << 5;
t = ((x >= (I::ONE << 32)) as u32) << 5;
// If `t` was set to `1 << 5`, then the upper 32 bits are shifted down for the
// next step to process.
x >>= t;
@ -109,27 +119,27 @@ mod implementation {
// leading zeros
z -= t;
}
if T::BITS >= 32 {
t = ((x >= (T::ONE << 16)) as u32) << 4;
if I::BITS >= 32 {
t = ((x >= (I::ONE << 16)) as u32) << 4;
x >>= t;
z -= t;
}
const { assert!(T::BITS >= 16) };
t = ((x >= (T::ONE << 8)) as u32) << 3;
const { assert!(I::BITS >= 16) };
t = ((x >= (I::ONE << 8)) as u32) << 3;
x >>= t;
z -= t;
t = ((x >= (T::ONE << 4)) as u32) << 2;
t = ((x >= (I::ONE << 4)) as u32) << 2;
x >>= t;
z -= t;
t = ((x >= (T::ONE << 2)) as u32) << 1;
t = ((x >= (I::ONE << 2)) as u32) << 1;
x >>= t;
z -= t;
t = (x >= (T::ONE << 1)) as u32;
t = (x >= (I::ONE << 1)) as u32;
x >>= t;
z -= t;
// All bits except the LSB are guaranteed to be zero for this final bisection step.
// If `x != 0` then `x == 1` and subtracts one potential zero from `z`.
z as usize - x.cast()
z as usize - usize::cast_from(x)
}
}

View file

@ -4,33 +4,38 @@ pub use implementation::trailing_zeros;
pub(crate) use implementation::trailing_zeros;
mod implementation {
use crate::int::{CastInto, Int};
use crate::int::{CastFrom, Int};
/// Returns number of trailing binary zeros in `x`.
#[allow(dead_code)]
pub fn trailing_zeros<T: Int + CastInto<u32> + CastInto<u16> + CastInto<u8>>(x: T) -> usize {
pub fn trailing_zeros<I: Int>(x: I) -> usize
where
u32: CastFrom<I>,
u16: CastFrom<I>,
u8: CastFrom<I>,
{
let mut x = x;
let mut r: u32 = 0;
let mut t: u32;
const { assert!(T::BITS <= 64) };
if T::BITS >= 64 {
r += ((CastInto::<u32>::cast(x) == 0) as u32) << 5; // if (x has no 32 small bits) t = 32 else 0
const { assert!(I::BITS <= 64) };
if I::BITS >= 64 {
r += ((u32::cast_from(x) == 0) as u32) << 5; // if (x has no 32 small bits) t = 32 else 0
x >>= r; // remove 32 zero bits
}
if T::BITS >= 32 {
t = ((CastInto::<u16>::cast(x) == 0) as u32) << 4; // if (x has no 16 small bits) t = 16 else 0
if I::BITS >= 32 {
t = ((u16::cast_from(x) == 0) as u32) << 4; // if (x has no 16 small bits) t = 16 else 0
r += t;
x >>= t; // x = [0 - 0xFFFF] + higher garbage bits
}
const { assert!(T::BITS >= 16) };
t = ((CastInto::<u8>::cast(x) == 0) as u32) << 3;
const { assert!(I::BITS >= 16) };
t = ((u8::cast_from(x) == 0) as u32) << 3;
x >>= t; // x = [0 - 0xFF] + higher garbage bits
r += t;
let mut x: u8 = x.cast();
let mut x: u8 = x.cast_lossy();
t = (((x & 0x0F) == 0) as u32) << 2;
x >>= t; // x = [0 - 0xF] + higher garbage bits

View file

@ -1,275 +1,4 @@
use core::ops;
/// Minimal integer implementations needed on all integer types, including wide integers.
#[allow(dead_code)]
pub trait MinInt:
Copy
+ core::fmt::Debug
+ ops::BitOr<Output = Self>
+ ops::Not<Output = Self>
+ ops::Shl<u32, Output = Self>
{
/// Type with the same width but other signedness
type OtherSign: MinInt;
/// Unsigned version of Self
type UnsignedInt: MinInt;
/// If `Self` is a signed integer
const SIGNED: bool;
/// The bitwidth of the int type
const BITS: u32;
const ZERO: Self;
const ONE: Self;
const MIN: Self;
const MAX: Self;
}
/// Trait for some basic operations on integers
#[allow(dead_code)]
pub trait Int:
MinInt
+ PartialEq
+ PartialOrd
+ ops::AddAssign
+ ops::SubAssign
+ ops::BitAndAssign
+ ops::BitOrAssign
+ ops::BitXorAssign
+ ops::ShlAssign<i32>
+ ops::ShrAssign<u32>
+ ops::Add<Output = Self>
+ ops::Sub<Output = Self>
+ ops::Mul<Output = Self>
+ ops::Div<Output = Self>
+ ops::Shr<u32, Output = Self>
+ ops::BitXor<Output = Self>
+ ops::BitAnd<Output = Self>
{
/// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing
/// in `builtins-test`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,
/// 111,112,119,120,125,126,127].
const FUZZ_LENGTHS: [u8; 20] = make_fuzz_lengths(<Self as MinInt>::BITS);
/// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128.
const FUZZ_NUM: usize = {
let log2 = (<Self as MinInt>::BITS - 1).count_ones() as usize;
if log2 == 3 {
// case for u8
6
} else {
// 3 entries on each extreme, 2 in the middle, and 4 for each scale of intermediate
// boundaries.
8 + (4 * (log2 - 4))
}
};
fn unsigned(self) -> Self::UnsignedInt;
fn from_unsigned(unsigned: Self::UnsignedInt) -> Self;
fn unsigned_abs(self) -> Self::UnsignedInt;
fn from_bool(b: bool) -> Self;
/// Prevents the need for excessive conversions between signed and unsigned
fn logical_shr(self, other: u32) -> Self;
/// Absolute difference between two integers.
fn abs_diff(self, other: Self) -> Self::UnsignedInt;
// copied from primitive integers, but put in a trait
fn is_zero(self) -> bool;
fn wrapping_neg(self) -> Self;
fn wrapping_add(self, other: Self) -> Self;
fn wrapping_mul(self, other: Self) -> Self;
fn wrapping_sub(self, other: Self) -> Self;
fn wrapping_shl(self, other: u32) -> Self;
fn wrapping_shr(self, other: u32) -> Self;
fn rotate_left(self, other: u32) -> Self;
fn overflowing_add(self, other: Self) -> (Self, bool);
fn leading_zeros(self) -> u32;
fn ilog2(self) -> u32;
}
pub(crate) const fn make_fuzz_lengths(bits: u32) -> [u8; 20] {
let mut v = [0u8; 20];
v[0] = 0;
v[1] = 1;
v[2] = 2; // important for parity and the iX::MIN case when reversed
let mut i = 3;
// No need for any more until the byte boundary, because there should be no algorithms
// that are sensitive to anything not next to byte boundaries after 2. We also scale
// in powers of two, which is important to prevent u128 corner tests from getting too
// big.
let mut l = 8;
loop {
if l >= ((bits / 2) as u8) {
break;
}
// get both sides of the byte boundary
v[i] = l - 1;
i += 1;
v[i] = l;
i += 1;
l *= 2;
}
if bits != 8 {
// add the lower side of the middle boundary
v[i] = ((bits / 2) - 1) as u8;
i += 1;
}
// We do not want to jump directly from the Self::BITS/2 boundary to the Self::BITS
// boundary because of algorithms that split the high part up. We reverse the scaling
// as we go to Self::BITS.
let mid = i;
let mut j = 1;
loop {
v[i] = (bits as u8) - (v[mid - j]) - 1;
if j == mid {
break;
}
i += 1;
j += 1;
}
v
}
macro_rules! int_impl_common {
($ty:ty) => {
fn from_bool(b: bool) -> Self {
b as $ty
}
fn logical_shr(self, other: u32) -> Self {
Self::from_unsigned(self.unsigned().wrapping_shr(other))
}
fn is_zero(self) -> bool {
self == Self::ZERO
}
fn wrapping_neg(self) -> Self {
<Self>::wrapping_neg(self)
}
fn wrapping_add(self, other: Self) -> Self {
<Self>::wrapping_add(self, other)
}
fn wrapping_mul(self, other: Self) -> Self {
<Self>::wrapping_mul(self, other)
}
fn wrapping_sub(self, other: Self) -> Self {
<Self>::wrapping_sub(self, other)
}
fn wrapping_shl(self, other: u32) -> Self {
<Self>::wrapping_shl(self, other)
}
fn wrapping_shr(self, other: u32) -> Self {
<Self>::wrapping_shr(self, other)
}
fn rotate_left(self, other: u32) -> Self {
<Self>::rotate_left(self, other)
}
fn overflowing_add(self, other: Self) -> (Self, bool) {
<Self>::overflowing_add(self, other)
}
fn leading_zeros(self) -> u32 {
<Self>::leading_zeros(self)
}
fn ilog2(self) -> u32 {
<Self>::ilog2(self)
}
};
}
macro_rules! int_impl {
($ity:ty, $uty:ty) => {
impl MinInt for $uty {
type OtherSign = $ity;
type UnsignedInt = $uty;
const BITS: u32 = <Self as MinInt>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
}
impl Int for $uty {
fn unsigned(self) -> $uty {
self
}
// It makes writing macros easier if this is implemented for both signed and unsigned
#[allow(clippy::wrong_self_convention)]
fn from_unsigned(me: $uty) -> Self {
me
}
fn unsigned_abs(self) -> Self {
self
}
fn abs_diff(self, other: Self) -> Self {
self.abs_diff(other)
}
int_impl_common!($uty);
}
impl MinInt for $ity {
type OtherSign = $uty;
type UnsignedInt = $uty;
const BITS: u32 = <Self as MinInt>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
}
impl Int for $ity {
fn unsigned(self) -> $uty {
self as $uty
}
fn from_unsigned(me: $uty) -> Self {
me as $ity
}
fn unsigned_abs(self) -> Self::UnsignedInt {
self.unsigned_abs()
}
fn abs_diff(self, other: Self) -> $uty {
self.abs_diff(other)
}
int_impl_common!($ity);
}
};
}
int_impl!(isize, usize);
int_impl!(i8, u8);
int_impl!(i16, u16);
int_impl!(i32, u32);
int_impl!(i64, u64);
int_impl!(i128, u128);
pub use crate::support::{Int, MinInt};
/// Trait for integers twice the bit width of another integer. This is implemented for all
/// primitives except for `u8`, because there is not a smaller primitive.

View file

@ -78,6 +78,7 @@ pub trait Int:
fn unsigned(self) -> Self::Unsigned;
fn from_unsigned(unsigned: Self::Unsigned) -> Self;
fn abs(self) -> Self;
fn unsigned_abs(self) -> Self::Unsigned;
fn from_bool(b: bool) -> Self;
@ -203,6 +204,10 @@ macro_rules! int_impl {
unimplemented!()
}
fn unsigned_abs(self) -> Self {
unimplemented!()
}
// It makes writing macros easier if this is implemented for both signed and unsigned
#[allow(clippy::wrong_self_convention)]
fn from_unsigned(me: $uty) -> Self {
@ -242,6 +247,10 @@ macro_rules! int_impl {
self.abs()
}
fn unsigned_abs(self) -> Self::Unsigned {
self.unsigned_abs()
}
fn from_unsigned(me: $uty) -> Self {
me as $ity
}