Auto merge of #33861 - Amanieu:lock_elision_fix, r=alexcrichton
Make sure Mutex and RwLock can't be re-locked on the same thread Fixes #33770 r? @alexcrichton
This commit is contained in:
commit
9552bcdd92
7 changed files with 214 additions and 11 deletions
|
|
@ -204,10 +204,14 @@ impl<T> Mutex<T> {
|
|||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn new(t: T) -> Mutex<T> {
|
||||
Mutex {
|
||||
let mut m = Mutex {
|
||||
inner: box StaticMutex::new(),
|
||||
data: UnsafeCell::new(t),
|
||||
};
|
||||
unsafe {
|
||||
m.inner.lock.init();
|
||||
}
|
||||
m
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,12 @@ impl Mutex {
|
|||
/// first used with any of the functions below.
|
||||
pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
|
||||
|
||||
/// Prepare the mutex for use.
|
||||
///
|
||||
/// This should be called once the mutex is at a stable memory address.
|
||||
#[inline]
|
||||
pub unsafe fn init(&mut self) { self.0.init() }
|
||||
|
||||
/// Locks the mutex blocking the current thread until it is available.
|
||||
///
|
||||
/// Behavior is undefined if the mutex has been moved between this and any
|
||||
|
|
|
|||
|
|
@ -30,6 +30,39 @@ impl Mutex {
|
|||
Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn init(&mut self) {
|
||||
// Issue #33770
|
||||
//
|
||||
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
|
||||
// a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
|
||||
// try to re-lock it from the same thread when you already hold a lock.
|
||||
//
|
||||
// In practice, glibc takes advantage of this undefined behavior to
|
||||
// implement hardware lock elision, which uses hardware transactional
|
||||
// memory to avoid acquiring the lock. While a transaction is in
|
||||
// progress, the lock appears to be unlocked. This isn't a problem for
|
||||
// other threads since the transactional memory will abort if a conflict
|
||||
// is detected, however no abort is generated if re-locking from the
|
||||
// same thread.
|
||||
//
|
||||
// Since locking the same mutex twice will result in two aliasing &mut
|
||||
// references, we instead create the mutex with type
|
||||
// PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
|
||||
// re-lock it from the same thread, thus avoiding undefined behavior.
|
||||
//
|
||||
// We can't do anything for StaticMutex, but that type is deprecated
|
||||
// anyways.
|
||||
let mut attr: libc::pthread_mutexattr_t = mem::uninitialized();
|
||||
let r = libc::pthread_mutexattr_init(&mut attr);
|
||||
debug_assert_eq!(r, 0);
|
||||
let r = libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL);
|
||||
debug_assert_eq!(r, 0);
|
||||
let r = libc::pthread_mutex_init(self.inner.get(), &attr);
|
||||
debug_assert_eq!(r, 0);
|
||||
let r = libc::pthread_mutexattr_destroy(&mut attr);
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
let r = libc::pthread_mutex_lock(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
|
|
|
|||
|
|
@ -10,15 +10,24 @@
|
|||
|
||||
use libc;
|
||||
use cell::UnsafeCell;
|
||||
use sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub struct RWLock { inner: UnsafeCell<libc::pthread_rwlock_t> }
|
||||
pub struct RWLock {
|
||||
inner: UnsafeCell<libc::pthread_rwlock_t>,
|
||||
write_locked: UnsafeCell<bool>,
|
||||
num_readers: AtomicUsize,
|
||||
}
|
||||
|
||||
unsafe impl Send for RWLock {}
|
||||
unsafe impl Sync for RWLock {}
|
||||
|
||||
impl RWLock {
|
||||
pub const fn new() -> RWLock {
|
||||
RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER) }
|
||||
RWLock {
|
||||
inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
|
||||
write_locked: UnsafeCell::new(false),
|
||||
num_readers: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn read(&self) {
|
||||
|
|
@ -35,37 +44,86 @@ impl RWLock {
|
|||
//
|
||||
// We roughly maintain the deadlocking behavior by panicking to ensure
|
||||
// that this lock acquisition does not succeed.
|
||||
if r == libc::EDEADLK {
|
||||
//
|
||||
// We also check whether there this lock is already write locked. This
|
||||
// is only possible if it was write locked by the current thread and
|
||||
// the implementation allows recursive locking. The POSIX standard
|
||||
// doesn't require recursivly locking a rwlock to deadlock, but we can't
|
||||
// allow that because it could lead to aliasing issues.
|
||||
if r == libc::EDEADLK || *self.write_locked.get() {
|
||||
if r == 0 {
|
||||
self.raw_unlock();
|
||||
}
|
||||
panic!("rwlock read lock would result in deadlock");
|
||||
} else {
|
||||
debug_assert_eq!(r, 0);
|
||||
self.num_readers.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn try_read(&self) -> bool {
|
||||
libc::pthread_rwlock_tryrdlock(self.inner.get()) == 0
|
||||
let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
|
||||
if r == 0 {
|
||||
if *self.write_locked.get() {
|
||||
self.raw_unlock();
|
||||
false
|
||||
} else {
|
||||
self.num_readers.fetch_add(1, Ordering::Relaxed);
|
||||
true
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn write(&self) {
|
||||
let r = libc::pthread_rwlock_wrlock(self.inner.get());
|
||||
// see comments above for why we check for EDEADLK
|
||||
if r == libc::EDEADLK {
|
||||
// See comments above for why we check for EDEADLK and write_locked. We
|
||||
// also need to check that num_readers is 0.
|
||||
if r == libc::EDEADLK || *self.write_locked.get() ||
|
||||
self.num_readers.load(Ordering::Relaxed) != 0 {
|
||||
if r == 0 {
|
||||
self.raw_unlock();
|
||||
}
|
||||
panic!("rwlock write lock would result in deadlock");
|
||||
} else {
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
*self.write_locked.get() = true;
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn try_write(&self) -> bool {
|
||||
libc::pthread_rwlock_trywrlock(self.inner.get()) == 0
|
||||
let r = libc::pthread_rwlock_trywrlock(self.inner.get());
|
||||
if r == 0 {
|
||||
if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
|
||||
self.raw_unlock();
|
||||
false
|
||||
} else {
|
||||
*self.write_locked.get() = true;
|
||||
true
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn read_unlock(&self) {
|
||||
unsafe fn raw_unlock(&self) {
|
||||
let r = libc::pthread_rwlock_unlock(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn write_unlock(&self) { self.read_unlock() }
|
||||
pub unsafe fn read_unlock(&self) {
|
||||
debug_assert!(!*self.write_locked.get());
|
||||
self.num_readers.fetch_sub(1, Ordering::Relaxed);
|
||||
self.raw_unlock();
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn write_unlock(&self) {
|
||||
debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
|
||||
debug_assert!(*self.write_locked.get());
|
||||
*self.write_locked.get() = false;
|
||||
self.raw_unlock();
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn destroy(&self) {
|
||||
let r = libc::pthread_rwlock_destroy(self.inner.get());
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ impl Mutex {
|
|||
held: UnsafeCell::new(false),
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn init(&mut self) {}
|
||||
pub unsafe fn lock(&self) {
|
||||
match kind() {
|
||||
Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue