Auto merge of #1729 - RalfJung:rustup, r=RalfJung
rustup; fix tests for new MIR optimization Somehow https://github.com/rust-lang/rust/pull/78360 manages to mask UB. This would make sense if there were loops or things like that, but there are not, so really this is just very confusing...
This commit is contained in:
commit
0fc9aee973
9 changed files with 14 additions and 117 deletions
5
ci.sh
5
ci.sh
|
|
@ -24,8 +24,9 @@ function run_tests {
|
|||
|
||||
./miri test --locked
|
||||
if [ -z "${MIRI_TEST_TARGET+exists}" ]; then
|
||||
# Only for host architecture: tests with MIR optimizations
|
||||
MIRIFLAGS="-Z mir-opt-level=3" ./miri test --locked
|
||||
# Only for host architecture: tests with optimizations (`-O` is what cargo passes, but crank MIR
|
||||
# optimizations up all the way).
|
||||
MIRIFLAGS="-O -Zmir-opt-level=3" ./miri test --locked
|
||||
fi
|
||||
|
||||
# On Windows, there is always "python", not "python3" or "python2".
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
d2731d8e9338d8fe844e19d3fbb39617753e65f4
|
||||
09db05762b283bed62d4f92729cfee4646519833
|
||||
|
|
|
|||
|
|
@ -27,9 +27,6 @@ pub fn main() {
|
|||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
{
|
||||
let mut stack_var = 0usize;
|
||||
|
|
@ -38,6 +35,8 @@ pub fn main() {
|
|||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Now `stack_var` gets deallocated.
|
||||
|
||||
} //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2)
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -1,52 +0,0 @@
|
|||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. read
|
||||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
drop(stack_var);
|
||||
}); //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2)
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
|
@ -27,9 +27,6 @@ pub fn main() {
|
|||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
{
|
||||
let mut stack_var = 0usize;
|
||||
|
|
@ -38,6 +35,8 @@ pub fn main() {
|
|||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Now `stack_var` gets deallocated.
|
||||
|
||||
} //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2)
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -1,53 +0,0 @@
|
|||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. read
|
||||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Note: Implicit read for drop(_) races with write, would detect race with deallocate after.
|
||||
drop(stack_var); //~ ERROR Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2)
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire) = 3;
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
// ignore-windows: No libc on Windows
|
||||
// error-pattern: deadlock
|
||||
|
||||
#![feature(rustc_private)]
|
||||
|
||||
|
|
@ -8,6 +9,6 @@ fn main() {
|
|||
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
|
||||
unsafe {
|
||||
assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0);
|
||||
libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock
|
||||
libc::pthread_rwlock_wrlock(rw.get());
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
// ignore-windows: No libc on Windows
|
||||
// error-pattern: deadlock
|
||||
|
||||
#![feature(rustc_private)]
|
||||
|
||||
|
|
@ -8,6 +9,6 @@ fn main() {
|
|||
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
|
||||
unsafe {
|
||||
assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0);
|
||||
libc::pthread_rwlock_rdlock(rw.get()); //~ ERROR: deadlock
|
||||
libc::pthread_rwlock_rdlock(rw.get());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
// ignore-windows: No libc on Windows
|
||||
// error-pattern: deadlock
|
||||
|
||||
#![feature(rustc_private)]
|
||||
|
||||
|
|
@ -8,6 +9,6 @@ fn main() {
|
|||
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
|
||||
unsafe {
|
||||
assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0);
|
||||
libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock
|
||||
libc::pthread_rwlock_wrlock(rw.get());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue