Completely overhaul fuzz testing

adds testing for almost every numerical intrinsic
This commit is contained in:
Aaron Kutch 2020-12-07 23:25:42 -06:00
parent f61c411e2b
commit 69a3c571f7
11 changed files with 937 additions and 129 deletions

View file

@ -72,6 +72,9 @@ pub trait Int:
/// Prevents the need for excessive conversions between signed and unsigned
fn logical_shr(self, other: u32) -> Self;
/// Absolute difference between two integers.
fn abs_diff(self, other: Self) -> Self::UnsignedInt;
// copied from primitive integers, but put in a trait
fn is_zero(self) -> bool;
fn max_value() -> Self;
@ -251,6 +254,10 @@ macro_rules! int_impl {
me
}
fn abs_diff(self, other: Self) -> Self {
(self.wrapping_sub(other) as $ity).wrapping_abs() as $uty
}
int_impl_common!($uty, $bits);
}
@ -274,6 +281,10 @@ macro_rules! int_impl {
me as $ity
}
fn abs_diff(self, other: Self) -> $uty {
self.wrapping_sub(other).wrapping_abs() as $uty
}
int_impl_common!($ity, $bits);
}
};

View file

@ -11,7 +11,7 @@ doctest = false
[build-dependencies]
rand = "0.7"
[dev-dependencies]
[dependencies]
# For fuzzing tests we want a deterministic seedable RNG. We also eliminate potential
# problems with system RNGs on the variety of platforms this crate is tested on.
# `xoshiro128**` is used for its quality, size, and speed at generating `u32` shift amounts.

View file

@ -1 +1,259 @@
//! This crate is for integration testing and fuzz testing of functions in `compiler-builtins`. This
//! includes publicly documented intrinsics and some internal alternative implementation functions
//! such as `usize_leading_zeros_riscv` (which are tested because they are configured for
//! architectures not tested by the CI).
//!
//! The general idea is to use a combination of edge case testing and randomized fuzz testing. The
//! edge case testing is crucial for checking cases like where both inputs are equal or equal to
//! special values such as `i128::MIN`, which is unlikely for the random fuzzer by itself to
//! encounter. The randomized fuzz testing is specially designed to cover wide swaths of search
//! space in as few iterations as possible. See `fuzz_values` in `testcrate/tests/misc.rs` for an
//! example.
//!
//! Some floating point tests are disabled for specific architectures, because they do not have
//! correct rounding.
#![no_std]
use compiler_builtins::float::Float;
use compiler_builtins::int::Int;
use rand_xoshiro::rand_core::{RngCore, SeedableRng};
use rand_xoshiro::Xoshiro128StarStar;
/// Sets the number of fuzz iterations run for most tests. In practice, the vast majority of bugs
/// are caught by the edge case testers. Most of the remaining bugs triggered by more complex
/// sequences are caught well within 10_000 fuzz iterations. For classes of algorithms like division
/// that are vulnerable to rare edge cases, we want 1_000_000 iterations to be more confident. In
/// practical CI, however, we only want to run the more strenuous test once to catch algorithmic
/// level bugs, and run the 10_000 iteration test on most targets. Target-dependent bugs are likely
/// to involve miscompilation and misconfiguration that is likely to break algorithms in quickly
/// caught ways. We choose to configure `N = 1_000_000` iterations for `x86_64` targets (and if
/// debug assertions are disabled. Tests without `--release` would take too long) which are likely
/// to have fast hardware, and run `N = 10_000` for all other targets.
pub const N: u32 = if cfg!(target_arch = "x86_64") && !cfg!(debug_assertions) {
1_000_000
} else {
10_000
};
/// Random fuzzing step. When run several times, it results in excellent fuzzing entropy such as:
/// 11110101010101011110111110011111
/// 10110101010100001011101011001010
/// 1000000000000000
/// 10000000000000110111110000001010
/// 1111011111111101010101111110101
/// 101111111110100000000101000000
/// 10000000110100000000100010101
/// 1010101010101000
fn fuzz_step<I: Int>(rng: &mut Xoshiro128StarStar, x: &mut I) {
let ones = !I::ZERO;
let bit_indexing_mask: u32 = I::BITS - 1;
// It happens that all the RNG we need can come from one call. 7 bits are needed to index a
// worst case 128 bit integer, and there are 4 indexes that need to be made plus 4 bits for
// selecting operations
let rng32 = rng.next_u32();
// Randomly OR, AND, and XOR randomly sized and shifted continuous strings of
// ones with `lhs` and `rhs`.
let r0 = bit_indexing_mask & rng32;
let r1 = bit_indexing_mask & (rng32 >> 7);
let mask = ones.wrapping_shl(r0).rotate_left(r1);
match (rng32 >> 14) % 4 {
0 => *x |= mask,
1 => *x &= mask,
// both 2 and 3 to make XORs as common as ORs and ANDs combined
_ => *x ^= mask,
}
// Alternating ones and zeros (e.x. 0b1010101010101010). This catches second-order
// problems that might occur for algorithms with two modes of operation (potentially
// there is some invariant that can be broken and maintained via alternating between modes,
// breaking the algorithm when it reaches the end).
let mut alt_ones = I::ONE;
for _ in 0..(I::BITS / 2) {
alt_ones <<= 2;
alt_ones |= I::ONE;
}
let r0 = bit_indexing_mask & (rng32 >> 16);
let r1 = bit_indexing_mask & (rng32 >> 23);
let mask = alt_ones.wrapping_shl(r0).rotate_left(r1);
match rng32 >> 30 {
0 => *x |= mask,
1 => *x &= mask,
_ => *x ^= mask,
}
}
// We need macros like this, because `#![no_std]` prevents us from using iterators
macro_rules! edge_cases {
($I:ident, $case:ident, $inner:block) => {
for i0 in 0..$I::FUZZ_NUM {
let mask_lo = (!$I::UnsignedInt::ZERO).wrapping_shr($I::FUZZ_LENGTHS[i0] as u32);
for i1 in i0..I::FUZZ_NUM {
let mask_hi =
(!$I::UnsignedInt::ZERO).wrapping_shl($I::FUZZ_LENGTHS[i1 - i0] as u32);
let $case = I::from_unsigned(mask_lo & mask_hi);
$inner
}
}
};
}
/// Feeds a series of fuzzing inputs to `f`. The fuzzer first uses an algorithm designed to find
/// edge cases, followed by a more random fuzzer that runs `n` times.
pub fn fuzz<I: Int, F: FnMut(I)>(n: u32, mut f: F) {
// edge case tester. Calls `f` 210 times for u128.
// zero gets skipped by the loop
f(I::ZERO);
edge_cases!(I, case, {
f(case);
});
// random fuzzer
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x: I = Int::ZERO;
for _ in 0..n {
fuzz_step(&mut rng, &mut x);
f(x)
}
}
/// The same as `fuzz`, except `f` has two inputs.
pub fn fuzz_2<I: Int, F: Fn(I, I)>(n: u32, f: F) {
// Check cases where the first and second inputs are zero. Both call `f` 210 times for `u128`.
edge_cases!(I, case, {
f(I::ZERO, case);
});
edge_cases!(I, case, {
f(case, I::ZERO);
});
// Nested edge tester. Calls `f` 44100 times for `u128`.
edge_cases!(I, case0, {
edge_cases!(I, case1, {
f(case0, case1);
})
});
// random fuzzer
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x: I = I::ZERO;
let mut y: I = I::ZERO;
for _ in 0..n {
fuzz_step(&mut rng, &mut x);
fuzz_step(&mut rng, &mut y);
f(x, y)
}
}
/// Tester for shift functions
pub fn fuzz_shift<I: Int, F: Fn(I, u32)>(f: F) {
// Shift functions are very simple and do not need anything other than shifting a small
// set of random patterns for every fuzz length.
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x: I = Int::ZERO;
for i in 0..I::FUZZ_NUM {
fuzz_step(&mut rng, &mut x);
f(x, Int::ZERO);
f(x, I::FUZZ_LENGTHS[i] as u32);
}
}
fn fuzz_float_step<F: Float>(rng: &mut Xoshiro128StarStar, f: &mut F) {
let rng32 = rng.next_u32();
// we need to fuzz the different parts of the float separately, because the masking on larger
// significands will tend to set the exponent to all ones or all zeros frequently
// sign bit fuzzing
let sign = (rng32 & 1) != 0;
// exponent fuzzing. Only 4 bits for the selector needed.
let ones = (F::Int::ONE << F::EXPONENT_BITS) - F::Int::ONE;
let r0 = (rng32 >> 1) % F::EXPONENT_BITS;
let r1 = (rng32 >> 5) % F::EXPONENT_BITS;
// custom rotate shift. Note that `F::Int` is unsigned, so we can shift right without smearing
// the sign bit.
let mask = if r1 == 0 {
ones.wrapping_shr(r0)
} else {
let tmp = ones.wrapping_shr(r0);
(tmp.wrapping_shl(r1) | tmp.wrapping_shr(F::EXPONENT_BITS - r1)) & ones
};
let mut exp = (f.repr() & F::EXPONENT_MASK) >> F::SIGNIFICAND_BITS;
match (rng32 >> 9) % 4 {
0 => exp |= mask,
1 => exp &= mask,
_ => exp ^= mask,
}
// significand fuzzing
let mut sig = f.repr() & F::SIGNIFICAND_MASK;
fuzz_step(rng, &mut sig);
sig &= F::SIGNIFICAND_MASK;
*f = F::from_parts(sign, exp, sig);
}
macro_rules! float_edge_cases {
($F:ident, $case:ident, $inner:block) => {
for exponent in [
F::Int::ZERO,
F::Int::ONE,
F::Int::ONE << (F::EXPONENT_BITS / 2),
(F::Int::ONE << (F::EXPONENT_BITS - 1)) - F::Int::ONE,
F::Int::ONE << (F::EXPONENT_BITS - 1),
(F::Int::ONE << (F::EXPONENT_BITS - 1)) + F::Int::ONE,
(F::Int::ONE << F::EXPONENT_BITS) - F::Int::ONE,
]
.iter()
{
for significand in [
F::Int::ZERO,
F::Int::ONE,
F::Int::ONE << (F::SIGNIFICAND_BITS / 2),
(F::Int::ONE << (F::SIGNIFICAND_BITS - 1)) - F::Int::ONE,
F::Int::ONE << (F::SIGNIFICAND_BITS - 1),
(F::Int::ONE << (F::SIGNIFICAND_BITS - 1)) + F::Int::ONE,
(F::Int::ONE << F::SIGNIFICAND_BITS) - F::Int::ONE,
]
.iter()
{
for sign in [false, true].iter() {
let $case = F::from_parts(*sign, *exponent, *significand);
$inner
}
}
}
};
}
pub fn fuzz_float<F: Float, E: Fn(F)>(n: u32, f: E) {
float_edge_cases!(F, case, {
f(case);
});
// random fuzzer
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x = F::ZERO;
for _ in 0..n {
fuzz_float_step(&mut rng, &mut x);
f(x);
}
}
pub fn fuzz_float_2<F: Float, E: Fn(F, F)>(n: u32, f: E) {
float_edge_cases!(F, case0, {
float_edge_cases!(F, case1, {
f(case0, case1);
});
});
// random fuzzer
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x = F::ZERO;
let mut y = F::ZERO;
for _ in 0..n {
fuzz_float_step(&mut rng, &mut x);
fuzz_float_step(&mut rng, &mut y);
f(x, y)
}
}

View file

@ -0,0 +1,109 @@
use testcrate::*;
macro_rules! sum {
($($i:ty, $fn_add:ident, $fn_sub:ident);*;) => {
$(
fuzz_2(N, |x: $i, y: $i| {
let add0 = x.wrapping_add(y);
let sub0 = x.wrapping_sub(y);
let add1: $i = $fn_add(x, y);
let sub1: $i = $fn_sub(x, y);
if add0 != add1 {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn_add), x, y, add0, add1
);
}
if sub0 != sub1 {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn_sub), x, y, sub0, sub1
);
}
});
)*
};
}
macro_rules! overflowing_sum {
($($i:ty, $fn_add:ident, $fn_sub:ident);*;) => {
$(
fuzz_2(N, |x: $i, y: $i| {
let add0 = x.overflowing_add(y);
let sub0 = x.overflowing_sub(y);
let add1: ($i, bool) = $fn_add(x, y);
let sub1: ($i, bool) = $fn_sub(x, y);
if add0.0 != add1.0 || add0.1 != add1.1 {
panic!(
"{}({}, {}): std: {:?}, builtins: {:?}",
stringify!($fn_add), x, y, add0, add1
);
}
if sub0.0 != sub1.0 || sub0.1 != sub1.1 {
panic!(
"{}({}, {}): std: {:?}, builtins: {:?}",
stringify!($fn_sub), x, y, sub0, sub1
);
}
});
)*
};
}
#[test]
fn addsub() {
use compiler_builtins::int::addsub::{
__rust_i128_add, __rust_i128_addo, __rust_i128_sub, __rust_i128_subo, __rust_u128_add,
__rust_u128_addo, __rust_u128_sub, __rust_u128_subo,
};
// Integer addition and subtraction is very simple, so 100 fuzzing passes should be plenty.
sum!(
u128, __rust_u128_add, __rust_u128_sub;
i128, __rust_i128_add, __rust_i128_sub;
);
overflowing_sum!(
u128, __rust_u128_addo, __rust_u128_subo;
i128, __rust_i128_addo, __rust_i128_subo;
);
}
macro_rules! float_sum {
($($f:ty, $fn_add:ident, $fn_sub:ident);*;) => {
$(
fuzz_float_2(N, |x: $f, y: $f| {
let add0 = x + y;
let sub0 = x - y;
let add1: $f = $fn_add(x, y);
let sub1: $f = $fn_sub(x, y);
if !Float::eq_repr(add0, add1) {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn_add), x, y, add0, add1
);
}
if !Float::eq_repr(sub0, sub1) {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn_sub), x, y, sub0, sub1
);
}
});
)*
};
}
#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
#[test]
fn float_addsub() {
use compiler_builtins::float::{
add::{__adddf3, __addsf3},
sub::{__subdf3, __subsf3},
Float,
};
float_sum!(
f32, __addsf3, __subsf3;
f64, __adddf3, __subdf3;
);
}

View file

@ -0,0 +1,52 @@
use testcrate::*;
macro_rules! cmp {
($x:ident, $y:ident, $($unordered_val:expr, $fn:ident);*;) => {
$(
let cmp0 = if $x.is_nan() || $y.is_nan() {
$unordered_val
} else if $x < $y {
-1
} else if $x == $y {
0
} else {
1
};
let cmp1 = $fn($x, $y);
if cmp0 != cmp1 {
panic!("{}({}, {}): std: {}, builtins: {}", stringify!($fn_builtins), $x, $y, cmp0, cmp1);
}
)*
};
}
#[test]
fn float_comparisons() {
use compiler_builtins::float::cmp::{
__eqdf2, __eqsf2, __gedf2, __gesf2, __gtdf2, __gtsf2, __ledf2, __lesf2, __ltdf2, __ltsf2,
__nedf2, __nesf2, __unorddf2, __unordsf2,
};
fuzz_float_2(N, |x: f32, y: f32| {
assert_eq!(__unordsf2(x, y) != 0, x.is_nan() || y.is_nan());
cmp!(x, y,
1, __ltsf2;
1, __lesf2;
1, __eqsf2;
-1, __gesf2;
-1, __gtsf2;
1, __nesf2;
);
});
fuzz_float_2(N, |x: f64, y: f64| {
assert_eq!(__unorddf2(x, y) != 0, x.is_nan() || y.is_nan());
cmp!(x, y,
1, __ltdf2;
1, __ledf2;
1, __eqdf2;
-1, __gedf2;
-1, __gtdf2;
1, __nedf2;
);
});
}

View file

@ -0,0 +1,125 @@
use testcrate::*;
macro_rules! i_to_f {
($($from:ty, $into:ty, $fn:ident);*;) => {
$(
fuzz(N, |x: $from| {
let f0 = x as $into;
let f1: $into = $fn(x);
// This makes sure that the conversion produced the best rounding possible, and does
// this independent of `x as $into` rounding correctly.
// This assumes that float to integer conversion is correct.
let y_minus_ulp = <$into>::from_bits(f1.to_bits().wrapping_sub(1)) as $from;
let y = f1 as $from;
let y_plus_ulp = <$into>::from_bits(f1.to_bits().wrapping_add(1)) as $from;
let error_minus = <$from as Int>::abs_diff(y_minus_ulp, x);
let error = <$from as Int>::abs_diff(y, x);
let error_plus = <$from as Int>::abs_diff(y_plus_ulp, x);
// The first two conditions check that none of the two closest float values are
// strictly closer in representation to `x`. The second makes sure that rounding is
// towards even significand if two float values are equally close to the integer.
if error_minus < error
|| error_plus < error
|| ((error_minus == error || error_plus == error)
&& ((f0.to_bits() & 1) != 0))
{
panic!(
"incorrect rounding by {}({}): {}, ({}, {}, {}), errors ({}, {}, {})",
stringify!($fn),
x,
f1.to_bits(),
y_minus_ulp,
y,
y_plus_ulp,
error_minus,
error,
error_plus,
);
}
// Test against native conversion. We disable testing on all `x86` because of
// rounding bugs with `i686`. `powerpc` also has the same rounding bug.
if f0 != f1 && !cfg!(any(
target_arch = "x86",
target_arch = "powerpc",
target_arch = "powerpc64"
)) {
panic!(
"{}({}): std: {}, builtins: {}",
stringify!($fn),
x,
f0,
f1,
);
}
});
)*
};
}
#[test]
fn int_to_float() {
use compiler_builtins::float::conv::{
__floatdidf, __floatdisf, __floatsidf, __floatsisf, __floattidf, __floattisf,
__floatundidf, __floatundisf, __floatunsidf, __floatunsisf, __floatuntidf, __floatuntisf,
};
use compiler_builtins::int::Int;
i_to_f!(
u32, f32, __floatunsisf;
u32, f64, __floatunsidf;
i32, f32, __floatsisf;
i32, f64, __floatsidf;
u64, f32, __floatundisf;
u64, f64, __floatundidf;
i64, f32, __floatdisf;
i64, f64, __floatdidf;
u128, f32, __floatuntisf;
u128, f64, __floatuntidf;
i128, f32, __floattisf;
i128, f64, __floattidf;
);
}
macro_rules! f_to_i {
($x:ident, $($f:ty, $fn:ident);*;) => {
$(
// it is undefined behavior in the first place to do conversions with NaNs
if !$x.is_nan() {
let conv0 = $x as $f;
let conv1: $f = $fn($x);
if conv0 != conv1 {
panic!("{}({}): std: {}, builtins: {}", stringify!($fn), $x, conv0, conv1);
}
}
)*
};
}
#[test]
fn float_to_int() {
use compiler_builtins::float::conv::{
__fixdfdi, __fixdfsi, __fixdfti, __fixsfdi, __fixsfsi, __fixsfti, __fixunsdfdi,
__fixunsdfsi, __fixunsdfti, __fixunssfdi, __fixunssfsi, __fixunssfti,
};
fuzz_float(N, |x: f32| {
f_to_i!(x,
u32, __fixunssfsi;
u64, __fixunssfdi;
u128, __fixunssfti;
i32, __fixsfsi;
i64, __fixsfdi;
i128, __fixsfti;
);
});
fuzz_float(N, |x: f64| {
f_to_i!(x,
u32, __fixunsdfsi;
u64, __fixunsdfdi;
u128, __fixunsdfti;
i32, __fixdfsi;
i64, __fixdfdi;
i128, __fixdfti;
);
});
}

View file

@ -1,8 +1,9 @@
use rand_xoshiro::rand_core::{RngCore, SeedableRng};
use rand_xoshiro::Xoshiro128StarStar;
use compiler_builtins::int::sdiv::{__divmoddi4, __divmodsi4, __divmodti4};
use compiler_builtins::int::udiv::{__udivmoddi4, __udivmodsi4, __udivmodti4};
use compiler_builtins::int::udiv::{__udivmoddi4, __udivmodsi4, __udivmodti4, u128_divide_sparc};
use testcrate::*;
// Division algorithms have by far the nastiest and largest number of edge cases, and experience shows
// that sometimes 100_000 iterations of the random fuzzer is needed.
/// Creates intensive test functions for division functions of a certain size
macro_rules! test {
@ -16,14 +17,17 @@ macro_rules! test {
) => {
#[test]
fn $test_name() {
fn assert_invariants(lhs: $uX, rhs: $uX) {
let rem: &mut $uX = &mut 0;
let quo: $uX = $unsigned_name(lhs, rhs, Some(rem));
let rem = *rem;
fuzz_2(N, |lhs, rhs| {
if rhs == 0 {
return;
}
let mut rem: $uX = 0;
let quo: $uX = $unsigned_name(lhs, rhs, Some(&mut rem));
if rhs <= rem || (lhs != rhs.wrapping_mul(quo).wrapping_add(rem)) {
panic!(
"unsigned division function failed with lhs:{} rhs:{} \
expected:({}, {}) found:({}, {})",
std:({}, {}) builtins:({}, {})",
lhs,
rhs,
lhs.wrapping_div(rhs),
@ -55,7 +59,7 @@ macro_rules! test {
if incorrect_rem || lhs != rhs.wrapping_mul(quo).wrapping_add(rem) {
panic!(
"signed division function failed with lhs:{} rhs:{} \
expected:({}, {}) found:({}, {})",
std:({}, {}) builtins:({}, {})",
lhs,
rhs,
lhs.wrapping_div(rhs),
@ -64,70 +68,7 @@ macro_rules! test {
rem
);
}
}
// Specially designed random fuzzer
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut lhs: $uX = 0;
let mut rhs: $uX = 0;
// all ones constant
let ones: $uX = !0;
// Alternating ones and zeros (e.x. 0b1010101010101010). This catches second-order
// problems that might occur for algorithms with two modes of operation (potentially
// there is some invariant that can be broken for large `duo` and maintained via
// alternating between modes, breaking the algorithm when it reaches the end).
let mut alt_ones: $uX = 1;
for _ in 0..($n / 2) {
alt_ones <<= 2;
alt_ones |= 1;
}
// creates a mask for indexing the bits of the type
let bit_indexing_mask = $n - 1;
for _ in 0..1_000_000 {
// Randomly OR, AND, and XOR randomly sized and shifted continuous strings of
// ones with `lhs` and `rhs`. This results in excellent fuzzing entropy such as:
// lhs:10101010111101000000000100101010 rhs: 1010101010000000000000001000001
// lhs:10101010111101000000000101001010 rhs: 1010101010101010101010100010100
// lhs:10101010111101000000000101001010 rhs:11101010110101010101010100001110
// lhs:10101010000000000000000001001010 rhs:10100010100000000000000000001010
// lhs:10101010000000000000000001001010 rhs: 10101010101010101000
// lhs:10101010000000000000000001100000 rhs:11111111111101010101010101001111
// lhs:10101010000000101010101011000000 rhs:11111111111101010101010100000111
// lhs:10101010101010101010101011101010 rhs: 1010100000000000000
// lhs:11111111110101101010101011010111 rhs: 1010100000000000000
// The msb is set half of the time by the fuzzer, but `assert_invariants` tests
// both the signed and unsigned functions.
let r0: u32 = bit_indexing_mask & rng.next_u32();
let r1: u32 = bit_indexing_mask & rng.next_u32();
let mask = ones.wrapping_shr(r0).rotate_left(r1);
match rng.next_u32() % 8 {
0 => lhs |= mask,
1 => lhs &= mask,
// both 2 and 3 to make XORs as common as ORs and ANDs combined, otherwise
// the entropy gets destroyed too often
2 | 3 => lhs ^= mask,
4 => rhs |= mask,
5 => rhs &= mask,
_ => rhs ^= mask,
}
// do the same for alternating ones and zeros
let r0: u32 = bit_indexing_mask & rng.next_u32();
let r1: u32 = bit_indexing_mask & rng.next_u32();
let mask = alt_ones.wrapping_shr(r0).rotate_left(r1);
match rng.next_u32() % 8 {
0 => lhs |= mask,
1 => lhs &= mask,
// both 2 and 3 to make XORs as common as ORs and ANDs combined, otherwise
// the entropy gets destroyed too often
2 | 3 => lhs ^= mask,
4 => rhs |= mask,
5 => rhs &= mask,
_ => rhs ^= mask,
}
if rhs != 0 {
assert_invariants(lhs, rhs);
}
}
});
}
};
}
@ -135,3 +76,61 @@ macro_rules! test {
test!(32, u32, i32, div_rem_si4, __udivmodsi4, __divmodsi4);
test!(64, u64, i64, div_rem_di4, __udivmoddi4, __divmoddi4);
test!(128, u128, i128, div_rem_ti4, __udivmodti4, __divmodti4);
#[test]
fn divide_sparc() {
fuzz_2(N, |lhs, rhs| {
if rhs == 0 {
return;
}
let mut rem: u128 = 0;
let quo: u128 = u128_divide_sparc(lhs, rhs, &mut rem);
if rhs <= rem || (lhs != rhs.wrapping_mul(quo).wrapping_add(rem)) {
panic!(
"u128_divide_sparc({}, {}): \
std:({}, {}), builtins:({}, {})",
lhs,
rhs,
lhs.wrapping_div(rhs),
lhs.wrapping_rem(rhs),
quo,
rem
);
}
});
}
macro_rules! float {
($($i:ty, $fn:ident);*;) => {
$(
fuzz_float_2(N, |x: $i, y: $i| {
let quo0 = x / y;
let quo1: $i = $fn(x, y);
// division of subnormals is not currently handled
if !(Float::is_subnormal(&quo0) || Float::is_subnormal(&quo1)) {
if !Float::eq_repr(quo0, quo1) {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn), x, y, quo0, quo1
);
}
}
});
)*
};
}
#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
#[test]
fn float_div() {
use compiler_builtins::float::{
div::{__divdf3, __divsf3},
Float,
};
float!(
f32, __divsf3;
f64, __divdf3;
);
}

View file

@ -1,54 +0,0 @@
use rand_xoshiro::rand_core::{RngCore, SeedableRng};
use rand_xoshiro::Xoshiro128StarStar;
use compiler_builtins::int::__clzsi2;
use compiler_builtins::int::leading_zeros::{
usize_leading_zeros_default, usize_leading_zeros_riscv,
};
#[test]
fn __clzsi2_test() {
// Binary fuzzer. We cannot just send a random number directly to `__clzsi2()`, because we need
// large sequences of zeros to test. This XORs, ANDs, and ORs random length strings of 1s to
// `x`. ORs insure sequences of ones, ANDs insures sequences of zeros, and XORs are not often
// destructive but add entropy.
let mut rng = Xoshiro128StarStar::seed_from_u64(0);
let mut x = 0usize;
// creates a mask for indexing the bits of the type
let bit_indexing_mask = usize::MAX.count_ones() - 1;
// 10000 iterations is enough to make sure edge cases like single set bits are tested and to go
// through many paths.
for _ in 0..10_000 {
let r0 = bit_indexing_mask & rng.next_u32();
// random length of ones
let ones: usize = !0 >> r0;
let r1 = bit_indexing_mask & rng.next_u32();
// random circular shift
let mask = ones.rotate_left(r1);
match rng.next_u32() % 4 {
0 => x |= mask,
1 => x &= mask,
// both 2 and 3 to make XORs as common as ORs and ANDs combined
_ => x ^= mask,
}
let lz = x.leading_zeros() as usize;
let lz0 = __clzsi2(x);
let lz1 = usize_leading_zeros_default(x);
let lz2 = usize_leading_zeros_riscv(x);
if lz0 != lz {
panic!("__clzsi2({}): expected: {}, found: {}", x, lz, lz0);
}
if lz1 != lz {
panic!(
"usize_leading_zeros_default({}): expected: {}, found: {}",
x, lz, lz1
);
}
if lz2 != lz {
panic!(
"usize_leading_zeros_riscv({}): expected: {}, found: {}",
x, lz, lz2
);
}
}
}

View file

@ -0,0 +1,134 @@
use testcrate::*;
/// Make sure that the the edge case tester and randomized tester don't break, and list examples of
/// fuzz values for documentation purposes.
#[test]
fn fuzz_values() {
const VALS: [u16; 47] = [
0b0, // edge cases
0b1111111111111111,
0b1111111111111110,
0b1111111111111100,
0b1111111110000000,
0b1111111100000000,
0b1110000000000000,
0b1100000000000000,
0b1000000000000000,
0b111111111111111,
0b111111111111110,
0b111111111111100,
0b111111110000000,
0b111111100000000,
0b110000000000000,
0b100000000000000,
0b11111111111111,
0b11111111111110,
0b11111111111100,
0b11111110000000,
0b11111100000000,
0b10000000000000,
0b111111111,
0b111111110,
0b111111100,
0b110000000,
0b100000000,
0b11111111,
0b11111110,
0b11111100,
0b10000000,
0b111,
0b110,
0b100,
0b11,
0b10,
0b1,
0b1010110100000, // beginning of random fuzzing
0b1100011001011010,
0b1001100101001111,
0b1101010100011010,
0b100010001,
0b1000000000000000,
0b1100000000000101,
0b1100111101010101,
0b1100010111111111,
0b1111110101111111,
];
let mut i = 0;
fuzz(10, |x: u16| {
assert_eq!(x, VALS[i]);
i += 1;
});
}
#[test]
fn leading_zeros() {
use compiler_builtins::int::__clzsi2;
use compiler_builtins::int::leading_zeros::{
usize_leading_zeros_default, usize_leading_zeros_riscv,
};
fuzz(N, |x: usize| {
let lz = x.leading_zeros() as usize;
let lz0 = __clzsi2(x);
let lz1 = usize_leading_zeros_default(x);
let lz2 = usize_leading_zeros_riscv(x);
if lz0 != lz {
panic!("__clzsi2({}): std: {}, builtins: {}", x, lz, lz0);
}
if lz1 != lz {
panic!(
"usize_leading_zeros_default({}): std: {}, builtins: {}",
x, lz, lz1
);
}
if lz2 != lz {
panic!(
"usize_leading_zeros_riscv({}): std: {}, builtins: {}",
x, lz, lz2
);
}
})
}
#[test]
fn float_extend() {
fuzz_float(N, |x: f32| {
let tmp0 = x as f64;
let tmp1: f64 = compiler_builtins::float::extend::__extendsfdf2(x);
if !compiler_builtins::float::Float::eq_repr(tmp0, tmp1) {
panic!("__extendsfdf2({}): std: {}, builtins: {}", x, tmp0, tmp1);
}
});
}
// This doesn't quite work because of issues related to
// https://github.com/rust-lang/rust/issues/73920.
// TODO how do we resolve this?
/*
macro_rules! pow {
($($f:ty, $fn:ident);*;) => {
$(
fuzz_float_2(N, |x: $f, y: $f| {
let n = y as i32;
let tmp0: $f = x.powi(n);
let tmp1: $f = $fn(x, n);
if tmp0 != tmp1 {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn), x, y, tmp0, tmp1
);
}
});
)*
};
}
#[test]
fn float_pow() {
use compiler_builtins::float::pow::{__powidf2, __powisf2};
pow!(
f32, __powisf2;
f64, __powidf2;
);
}
*/

View file

@ -0,0 +1,114 @@
use testcrate::*;
macro_rules! mul {
($($i:ty, $fn:ident);*;) => {
$(
fuzz_2(N, |x: $i, y: $i| {
let mul0 = x.wrapping_mul(y);
let mul1: $i = $fn(x, y);
if mul0 != mul1 {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn), x, y, mul0, mul1
);
}
});
)*
};
}
#[test]
fn mul() {
use compiler_builtins::int::mul::{__muldi3, __multi3};
mul!(
u64, __muldi3;
i128, __multi3;
);
}
macro_rules! overflowing_mul {
($($i:ty, $fn:ident);*;) => {
$(
fuzz_2(N, |x: $i, y: $i| {
let (mul0, o0) = x.overflowing_mul(y);
let mut o1 = 0i32;
let mul1: $i = $fn(x, y, &mut o1);
let o1 = o1 != 0;
if mul0 != mul1 || o0 != o1 {
panic!(
"{}({}, {}): std: ({}, {}), builtins: ({}, {})",
stringify!($fn), x, y, mul0, o0, mul1, o1
);
}
});
)*
};
}
#[test]
fn overflowing_mul() {
use compiler_builtins::int::mul::{
__mulodi4, __mulosi4, __muloti4, __rust_i128_mulo, __rust_u128_mulo,
};
overflowing_mul!(
i32, __mulosi4;
i64, __mulodi4;
i128, __muloti4;
);
fuzz_2(N, |x: u128, y: u128| {
let (mul0, o0) = x.overflowing_mul(y);
let (mul1, o1) = __rust_u128_mulo(x, y);
if mul0 != mul1 || o0 != o1 {
panic!(
"__rust_u128_mulo({}, {}): std: ({}, {}), builtins: ({}, {})",
x, y, mul0, o0, mul1, o1
);
}
let x = x as i128;
let y = y as i128;
let (mul0, o0) = x.overflowing_mul(y);
let (mul1, o1) = __rust_i128_mulo(x, y);
if mul0 != mul1 || o0 != o1 {
panic!(
"__rust_i128_mulo({}, {}): std: ({}, {}), builtins: ({}, {})",
x, y, mul0, o0, mul1, o1
);
}
});
}
macro_rules! float_mul {
($($f:ty, $fn:ident);*;) => {
$(
fuzz_float_2(N, |x: $f, y: $f| {
let mul0 = x * y;
let mul1: $f = $fn(x, y);
// multiplication of subnormals is not currently handled
if !(Float::is_subnormal(&mul0) || Float::is_subnormal(&mul1)) {
if !Float::eq_repr(mul0, mul1) {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn), x, y, mul0, mul1
);
}
}
});
)*
};
}
#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
#[test]
fn float_mul() {
use compiler_builtins::float::{
mul::{__muldf3, __mulsf3},
Float,
};
float_mul!(
f32, __mulsf3;
f64, __muldf3;
);
}

View file

@ -0,0 +1,60 @@
use testcrate::*;
macro_rules! shift {
($($i:ty, $fn_std:ident, $fn_builtins:ident);*;) => {
$(
fuzz_shift(|x: $i, s: u32| {
let tmp0: $i = x.$fn_std(s);
let tmp1: $i = $fn_builtins(x, s);
if tmp0 != tmp1 {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn_builtins), x, s, tmp0, tmp1
);
}
});
)*
};
}
macro_rules! overflowing_shift {
($($i:ty, $fn_std:ident, $fn_builtins:ident);*;) => {
$(
fuzz_shift(|x: $i, s: u32| {
let tmp0: $i = x.$fn_std(s);
let (tmp1, o1): ($i, bool) = $fn_builtins(x, s.into());
if tmp0 != tmp1 || o1 {
panic!(
"{}({}, {}): std: {}, builtins: {}",
stringify!($fn_builtins), x, s, tmp0, tmp1
);
}
});
)*
};
}
#[test]
fn shift() {
use compiler_builtins::int::shift::{
__ashldi3, __ashlsi3, __ashlti3, __ashrdi3, __ashrsi3, __ashrti3, __lshrdi3, __lshrsi3,
__lshrti3, __rust_i128_shlo, __rust_i128_shro, __rust_u128_shlo, __rust_u128_shro,
};
shift!(
u32, wrapping_shl, __ashlsi3;
u64, wrapping_shl, __ashldi3;
u128, wrapping_shl, __ashlti3;
i32, wrapping_shr, __ashrsi3;
i64, wrapping_shr, __ashrdi3;
i128, wrapping_shr, __ashrti3;
u32, wrapping_shr, __lshrsi3;
u64, wrapping_shr, __lshrdi3;
u128, wrapping_shr, __lshrti3;
);
overflowing_shift!(
u128, wrapping_shl, __rust_u128_shlo;
i128, wrapping_shl, __rust_i128_shlo;
u128, wrapping_shr, __rust_u128_shro;
i128, wrapping_shr, __rust_i128_shro;
);
}