also test pthread_mutex/rwlock directly
turns out one of the synchronizations in rwlock_writer_unlock is unnecessary
This commit is contained in:
parent
fe4d3274f0
commit
781190f5b7
3 changed files with 146 additions and 21 deletions
|
|
@ -110,6 +110,7 @@ struct RwLock {
|
|||
/// must load the clock of the last write and must not
|
||||
/// add happens-before orderings between shared reader
|
||||
/// locks.
|
||||
/// This is only relevant when there is an active reader.
|
||||
data_race_reader: VClock,
|
||||
}
|
||||
|
||||
|
|
@ -485,6 +486,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
Entry::Vacant(_) => return false, // we did not even own this lock
|
||||
}
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
// Add this to the shared-release clock of all concurrent readers.
|
||||
data_race.validate_lock_release_shared(
|
||||
&mut rwlock.data_race_reader,
|
||||
reader,
|
||||
|
|
@ -539,20 +541,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
}
|
||||
rwlock.writer = None;
|
||||
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
|
||||
// Release memory to both reader and writer vector clocks
|
||||
// since this writer happens-before both the union of readers once they are finished
|
||||
// and the next writer
|
||||
// Release memory to next lock holder.
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.validate_lock_release(
|
||||
&mut rwlock.data_race,
|
||||
current_writer,
|
||||
current_span,
|
||||
);
|
||||
data_race.validate_lock_release(
|
||||
&mut rwlock.data_race_reader,
|
||||
current_writer,
|
||||
current_span,
|
||||
);
|
||||
}
|
||||
// The thread was a writer.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -1,24 +1,34 @@
|
|||
//@ignore-target-windows: No libc on Windows
|
||||
// We use `yield` to test specific interleavings, so disable automatic preemption.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
#![feature(sync_unsafe_cell)]
|
||||
|
||||
use std::cell::SyncUnsafeCell;
|
||||
use std::thread;
|
||||
use std::{mem, ptr};
|
||||
|
||||
fn main() {
|
||||
test_mutex_libc_init_recursive();
|
||||
test_mutex_libc_init_normal();
|
||||
test_mutex_libc_init_errorcheck();
|
||||
test_rwlock_libc_static_initializer();
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
test_mutex_libc_static_initializer_recursive();
|
||||
|
||||
test_mutex();
|
||||
check_rwlock_write();
|
||||
check_rwlock_read_no_deadlock();
|
||||
}
|
||||
|
||||
fn test_mutex_libc_init_recursive() {
|
||||
unsafe {
|
||||
let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed();
|
||||
let mut attr: libc::pthread_mutexattr_t = mem::zeroed();
|
||||
assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0);
|
||||
assert_eq!(
|
||||
libc::pthread_mutexattr_settype(&mut attr as *mut _, libc::PTHREAD_MUTEX_RECURSIVE),
|
||||
0,
|
||||
);
|
||||
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
|
||||
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
|
||||
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mut attr as *mut _), 0);
|
||||
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
|
||||
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0);
|
||||
|
|
@ -36,7 +46,7 @@ fn test_mutex_libc_init_recursive() {
|
|||
|
||||
fn test_mutex_libc_init_normal() {
|
||||
unsafe {
|
||||
let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed();
|
||||
let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed();
|
||||
assert_eq!(
|
||||
libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, 0x12345678),
|
||||
libc::EINVAL,
|
||||
|
|
@ -45,7 +55,7 @@ fn test_mutex_libc_init_normal() {
|
|||
libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL),
|
||||
0,
|
||||
);
|
||||
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
|
||||
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
|
||||
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0);
|
||||
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
|
||||
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY);
|
||||
|
|
@ -58,7 +68,7 @@ fn test_mutex_libc_init_normal() {
|
|||
|
||||
fn test_mutex_libc_init_errorcheck() {
|
||||
unsafe {
|
||||
let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed();
|
||||
let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed();
|
||||
assert_eq!(
|
||||
libc::pthread_mutexattr_settype(
|
||||
&mut mutexattr as *mut _,
|
||||
|
|
@ -66,7 +76,7 @@ fn test_mutex_libc_init_errorcheck() {
|
|||
),
|
||||
0,
|
||||
);
|
||||
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
|
||||
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
|
||||
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0);
|
||||
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
|
||||
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY);
|
||||
|
|
@ -98,6 +108,111 @@ fn test_mutex_libc_static_initializer_recursive() {
|
|||
}
|
||||
}
|
||||
|
||||
struct SendPtr<T> {
|
||||
ptr: *mut T,
|
||||
}
|
||||
unsafe impl<T> Send for SendPtr<T> {}
|
||||
impl<T> Copy for SendPtr<T> {}
|
||||
impl<T> Clone for SendPtr<T> {
|
||||
fn clone(&self) -> Self {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
fn test_mutex() {
|
||||
// Specifically *not* using `Arc` to make sure there is no synchronization apart from the mutex.
|
||||
unsafe {
|
||||
let data = SyncUnsafeCell::new((libc::PTHREAD_MUTEX_INITIALIZER, 0));
|
||||
let ptr = SendPtr { ptr: data.get() };
|
||||
let mut threads = Vec::new();
|
||||
|
||||
for _ in 0..3 {
|
||||
let thread = thread::spawn(move || {
|
||||
let ptr = ptr; // circumvent per-field closure capture
|
||||
let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0);
|
||||
assert_eq!(libc::pthread_mutex_lock(mutexptr), 0);
|
||||
thread::yield_now();
|
||||
(*ptr.ptr).1 += 1;
|
||||
assert_eq!(libc::pthread_mutex_unlock(mutexptr), 0);
|
||||
});
|
||||
threads.push(thread);
|
||||
}
|
||||
|
||||
for thread in threads {
|
||||
thread.join().unwrap();
|
||||
}
|
||||
|
||||
let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0);
|
||||
assert_eq!(libc::pthread_mutex_trylock(mutexptr), 0);
|
||||
assert_eq!((*ptr.ptr).1, 3);
|
||||
}
|
||||
}
|
||||
|
||||
fn check_rwlock_write() {
|
||||
unsafe {
|
||||
let data = SyncUnsafeCell::new((libc::PTHREAD_RWLOCK_INITIALIZER, 0));
|
||||
let ptr = SendPtr { ptr: data.get() };
|
||||
let mut threads = Vec::new();
|
||||
|
||||
for _ in 0..3 {
|
||||
let thread = thread::spawn(move || {
|
||||
let ptr = ptr; // circumvent per-field closure capture
|
||||
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
|
||||
assert_eq!(libc::pthread_rwlock_wrlock(rwlockptr), 0);
|
||||
thread::yield_now();
|
||||
(*ptr.ptr).1 += 1;
|
||||
assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0);
|
||||
});
|
||||
threads.push(thread);
|
||||
|
||||
let readthread = thread::spawn(move || {
|
||||
let ptr = ptr; // circumvent per-field closure capture
|
||||
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
|
||||
assert_eq!(libc::pthread_rwlock_rdlock(rwlockptr), 0);
|
||||
thread::yield_now();
|
||||
let val = (*ptr.ptr).1;
|
||||
assert!(val >= 0 && val <= 3);
|
||||
assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0);
|
||||
});
|
||||
threads.push(readthread);
|
||||
}
|
||||
|
||||
for thread in threads {
|
||||
thread.join().unwrap();
|
||||
}
|
||||
|
||||
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
|
||||
assert_eq!(libc::pthread_rwlock_tryrdlock(rwlockptr), 0);
|
||||
assert_eq!((*ptr.ptr).1, 3);
|
||||
}
|
||||
}
|
||||
|
||||
fn check_rwlock_read_no_deadlock() {
|
||||
unsafe {
|
||||
let l1 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
|
||||
let l1 = SendPtr { ptr: l1.get() };
|
||||
let l2 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
|
||||
let l2 = SendPtr { ptr: l2.get() };
|
||||
|
||||
// acquire l1 and hold it until after the other thread is done
|
||||
assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0);
|
||||
let handle = thread::spawn(move || {
|
||||
let l1 = l1; // circumvent per-field closure capture
|
||||
let l2 = l2; // circumvent per-field closure capture
|
||||
// acquire l2 before the other thread
|
||||
assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0);
|
||||
thread::yield_now();
|
||||
assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0);
|
||||
thread::yield_now();
|
||||
assert_eq!(libc::pthread_rwlock_unlock(l1.ptr), 0);
|
||||
assert_eq!(libc::pthread_rwlock_unlock(l2.ptr), 0);
|
||||
});
|
||||
thread::yield_now();
|
||||
assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0);
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// std::sync::RwLock does not even used pthread_rwlock any more.
|
||||
// Do some smoke testing of the API surface.
|
||||
fn test_rwlock_libc_static_initializer() {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
//@revisions: stack tree
|
||||
//@[tree]compile-flags: -Zmiri-tree-borrows
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance
|
||||
// We use `yield` to test specific interleavings, so disable automatic preemption.
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-preemption-rate=0
|
||||
|
||||
use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock};
|
||||
use std::thread;
|
||||
|
|
@ -119,13 +120,25 @@ fn check_rwlock_write() {
|
|||
let mut threads = Vec::new();
|
||||
|
||||
for _ in 0..3 {
|
||||
let data = Arc::clone(&data);
|
||||
let thread = thread::spawn(move || {
|
||||
let mut data = data.write().unwrap();
|
||||
thread::yield_now();
|
||||
*data += 1;
|
||||
let thread = thread::spawn({
|
||||
let data = Arc::clone(&data);
|
||||
move || {
|
||||
let mut data = data.write().unwrap();
|
||||
thread::yield_now();
|
||||
*data += 1;
|
||||
}
|
||||
});
|
||||
threads.push(thread);
|
||||
|
||||
let readthread = thread::spawn({
|
||||
let data = Arc::clone(&data);
|
||||
move || {
|
||||
let data = data.read().unwrap();
|
||||
thread::yield_now();
|
||||
assert!(*data >= 0 && *data <= 3);
|
||||
}
|
||||
});
|
||||
threads.push(readthread);
|
||||
}
|
||||
|
||||
for thread in threads {
|
||||
|
|
@ -144,8 +157,10 @@ fn check_rwlock_read_no_deadlock() {
|
|||
|
||||
let l1_copy = Arc::clone(&l1);
|
||||
let l2_copy = Arc::clone(&l2);
|
||||
// acquire l1 and hold it until after the other thread is done
|
||||
let _guard1 = l1.read().unwrap();
|
||||
let handle = thread::spawn(move || {
|
||||
// acquire l2 before the other thread
|
||||
let _guard2 = l2_copy.read().unwrap();
|
||||
thread::yield_now();
|
||||
let _guard1 = l1_copy.read().unwrap();
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue