Auto merge of #3874 - RalfJung:sync, r=RalfJung

a bit of refactoring in "sync"

- Use `Box<dyn Any>` to keep the "extra data" local to the module implementing the primitive
- Pass around places, not pointers

Cc `@Mandragorian` -- sorry I couldn't resist and did the `Any` thing ;)
This commit is contained in:
bors 2024-09-09 17:17:02 +00:00
commit 0225309e97
6 changed files with 123 additions and 156 deletions

View file

@ -1,7 +1,6 @@
use std::collections::VecDeque;
use rustc_index::Idx;
use rustc_middle::ty::layout::TyAndLayout;
use super::sync::EvalContextExtPriv as _;
use super::vector_clock::VClock;
@ -30,14 +29,12 @@ impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn init_once_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx>,
lock_layout: TyAndLayout<'tcx>,
lock: &MPlaceTy<'tcx>,
offset: u64,
) -> InterpResult<'tcx, InitOnceId> {
let this = self.eval_context_mut();
this.get_or_create_id(
lock_op,
lock_layout,
lock,
offset,
|ecx| &mut ecx.machine.sync.init_onces,
|_| Ok(Default::default()),

View file

@ -1,10 +1,11 @@
use std::any::Any;
use std::collections::{hash_map::Entry, VecDeque};
use std::ops::Not;
use std::time::Duration;
use rustc_data_structures::fx::FxHashMap;
use rustc_index::{Idx, IndexVec};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_target::abi::Size;
use super::init_once::InitOnce;
use super::vector_clock::VClock;
@ -66,27 +67,6 @@ pub(super) use declare_id;
declare_id!(MutexId);
/// The mutex kind.
#[derive(Debug, Clone, Copy)]
#[non_exhaustive]
pub enum MutexKind {
Invalid,
Normal,
Default,
Recursive,
ErrorCheck,
}
#[derive(Debug)]
/// Additional data that may be used by shim implementations.
pub struct AdditionalMutexData {
/// The mutex kind, used by some mutex implementations like pthreads mutexes.
pub kind: MutexKind,
/// The address of the mutex.
pub address: u64,
}
/// The mutex state.
#[derive(Default, Debug)]
struct Mutex {
@ -100,18 +80,11 @@ struct Mutex {
clock: VClock,
/// Additional data that can be set by shim implementations.
data: Option<AdditionalMutexData>,
data: Option<Box<dyn Any>>,
}
declare_id!(RwLockId);
#[derive(Debug)]
/// Additional data that may be used by shim implementations.
pub struct AdditionalRwLockData {
/// The address of the rwlock.
pub address: u64,
}
/// The read-write lock state.
#[derive(Default, Debug)]
struct RwLock {
@ -146,7 +119,7 @@ struct RwLock {
clock_current_readers: VClock,
/// Additional data that can be set by shim implementations.
data: Option<AdditionalRwLockData>,
data: Option<Box<dyn Any>>,
}
declare_id!(CondvarId);
@ -206,21 +179,21 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
#[inline]
fn get_or_create_id<Id: SyncId + Idx, T>(
&mut self,
lock_op: &OpTy<'tcx>,
lock_layout: TyAndLayout<'tcx>,
lock: &MPlaceTy<'tcx>,
offset: u64,
get_objs: impl for<'a> Fn(&'a mut MiriInterpCx<'tcx>) -> &'a mut IndexVec<Id, T>,
create_obj: impl for<'a> FnOnce(&'a mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, Option<Id>> {
let this = self.eval_context_mut();
let value_place =
this.deref_pointer_and_offset(lock_op, offset, lock_layout, this.machine.layouts.u32)?;
let offset = Size::from_bytes(offset);
assert!(lock.layout.size >= offset + this.machine.layouts.u32.size);
let id_place = lock.offset(offset, this.machine.layouts.u32, this)?;
let next_index = get_objs(this).next_index();
// Since we are lazy, this update has to be atomic.
let (old, success) = this
.atomic_compare_exchange_scalar(
&value_place,
&id_place,
&ImmTy::from_uint(0u32, this.machine.layouts.u32),
Scalar::from_u32(next_index.to_u32()),
AtomicRwOrd::Relaxed, // deliberately *no* synchronization
@ -258,18 +231,18 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// - `obj` must be the new sync object.
fn create_id<Id: SyncId + Idx, T>(
&mut self,
lock_op: &OpTy<'tcx>,
lock_layout: TyAndLayout<'tcx>,
lock: &MPlaceTy<'tcx>,
offset: u64,
get_objs: impl for<'a> Fn(&'a mut MiriInterpCx<'tcx>) -> &'a mut IndexVec<Id, T>,
obj: T,
) -> InterpResult<'tcx, Id> {
let this = self.eval_context_mut();
let value_place =
this.deref_pointer_and_offset(lock_op, offset, lock_layout, this.machine.layouts.u32)?;
let offset = Size::from_bytes(offset);
assert!(lock.layout.size >= offset + this.machine.layouts.u32.size);
let id_place = lock.offset(offset, this.machine.layouts.u32, this)?;
let new_index = get_objs(this).push(obj);
this.write_scalar(Scalar::from_u32(new_index.to_u32()), &value_place)?;
this.write_scalar(Scalar::from_u32(new_index.to_u32()), &id_place)?;
Ok(new_index)
}
@ -302,15 +275,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Eagerly create and initialize a new mutex.
fn mutex_create(
&mut self,
lock_op: &OpTy<'tcx>,
lock_layout: TyAndLayout<'tcx>,
lock: &MPlaceTy<'tcx>,
offset: u64,
data: Option<AdditionalMutexData>,
data: Option<Box<dyn Any>>,
) -> InterpResult<'tcx, MutexId> {
let this = self.eval_context_mut();
this.create_id(
lock_op,
lock_layout,
lock,
offset,
|ecx| &mut ecx.machine.sync.mutexes,
Mutex { data, ..Default::default() },
@ -321,17 +292,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// `initialize_data` must return any additional data that a user wants to associate with the mutex.
fn mutex_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx>,
lock_layout: TyAndLayout<'tcx>,
lock: &MPlaceTy<'tcx>,
offset: u64,
initialize_data: impl for<'a> FnOnce(
&'a mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, Option<AdditionalMutexData>>,
) -> InterpResult<'tcx, Option<Box<dyn Any>>>,
) -> InterpResult<'tcx, MutexId> {
let this = self.eval_context_mut();
this.get_or_create_id(
lock_op,
lock_layout,
lock,
offset,
|ecx| &mut ecx.machine.sync.mutexes,
|ecx| initialize_data(ecx).map(|data| Mutex { data, ..Default::default() }),
@ -340,28 +309,25 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
/// Retrieve the additional data stored for a mutex.
fn mutex_get_data<'a>(&'a mut self, id: MutexId) -> Option<&'a AdditionalMutexData>
fn mutex_get_data<'a, T: 'static>(&'a mut self, id: MutexId) -> Option<&'a T>
where
'tcx: 'a,
{
let this = self.eval_context_ref();
this.machine.sync.mutexes[id].data.as_ref()
this.machine.sync.mutexes[id].data.as_deref().and_then(|p| p.downcast_ref::<T>())
}
fn rwlock_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx>,
lock_layout: TyAndLayout<'tcx>,
lock: &MPlaceTy<'tcx>,
offset: u64,
initialize_data: impl for<'a> FnOnce(
&'a mut MiriInterpCx<'tcx>,
)
-> InterpResult<'tcx, Option<AdditionalRwLockData>>,
) -> InterpResult<'tcx, Option<Box<dyn Any>>>,
) -> InterpResult<'tcx, RwLockId> {
let this = self.eval_context_mut();
this.get_or_create_id(
lock_op,
lock_layout,
lock,
offset,
|ecx| &mut ecx.machine.sync.rwlocks,
|ecx| initialize_data(ecx).map(|data| RwLock { data, ..Default::default() }),
@ -370,24 +336,22 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
/// Retrieve the additional data stored for a rwlock.
fn rwlock_get_data<'a>(&'a mut self, id: RwLockId) -> Option<&'a AdditionalRwLockData>
fn rwlock_get_data<'a, T: 'static>(&'a mut self, id: RwLockId) -> Option<&'a T>
where
'tcx: 'a,
{
let this = self.eval_context_ref();
this.machine.sync.rwlocks[id].data.as_ref()
this.machine.sync.rwlocks[id].data.as_deref().and_then(|p| p.downcast_ref::<T>())
}
fn condvar_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx>,
lock_layout: TyAndLayout<'tcx>,
lock: &MPlaceTy<'tcx>,
offset: u64,
) -> InterpResult<'tcx, CondvarId> {
let this = self.eval_context_mut();
this.get_or_create_id(
lock_op,
lock_layout,
lock,
offset,
|ecx| &mut ecx.machine.sync.condvars,
|_| Ok(Default::default()),

View file

@ -133,10 +133,7 @@ pub use crate::concurrency::{
cpu_affinity::MAX_CPUS,
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{
AdditionalMutexData, AdditionalRwLockData, CondvarId, EvalContextExt as _, MutexId,
MutexKind, RwLockId, SynchronizationObjects,
},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SynchronizationObjects},
thread::{
BlockReason, EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager,
TimeoutAnchor, TimeoutClock, UnblockCallback,

View file

@ -14,12 +14,13 @@ use crate::*;
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_getid(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> {
fn os_unfair_lock_getid(&mut self, lock_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> {
let this = self.eval_context_mut();
let lock = this.deref_pointer(lock_ptr)?;
// os_unfair_lock holds a 32-bit value, is initialized with zero and
// must be assumed to be opaque. Therefore, we can just store our
// internal mutex ID in the structure without anyone noticing.
this.mutex_get_or_create_id(lock_op, this.libc_ty_layout("os_unfair_lock"), 0, |_| Ok(None))
this.mutex_get_or_create_id(&lock, 0, |_| Ok(None))
}
}

View file

@ -18,10 +18,10 @@ fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u
fn mutexattr_get_kind<'tcx>(
ecx: &MiriInterpCx<'tcx>,
attr_op: &OpTy<'tcx>,
attr_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, i32> {
ecx.deref_pointer_and_read(
attr_op,
attr_ptr,
mutexattr_kind_offset(ecx)?,
ecx.libc_ty_layout("pthread_mutexattr_t"),
ecx.machine.layouts.i32,
@ -31,11 +31,11 @@ fn mutexattr_get_kind<'tcx>(
fn mutexattr_set_kind<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
attr_op: &OpTy<'tcx>,
attr_ptr: &OpTy<'tcx>,
kind: i32,
) -> InterpResult<'tcx, ()> {
ecx.deref_pointer_and_write(
attr_op,
attr_ptr,
mutexattr_kind_offset(ecx)?,
Scalar::from_i32(kind),
ecx.libc_ty_layout("pthread_mutexattr_t"),
@ -59,6 +59,25 @@ fn is_mutex_kind_normal<'tcx>(ecx: &MiriInterpCx<'tcx>, kind: i32) -> InterpResu
Ok(kind == (mutex_normal_kind | PTHREAD_MUTEX_NORMAL_FLAG))
}
/// The mutex kind.
#[derive(Debug, Clone, Copy)]
pub enum MutexKind {
Normal,
Default,
Recursive,
ErrorCheck,
}
#[derive(Debug)]
/// Additional data that we attach with each mutex instance.
pub struct AdditionalMutexData {
/// The mutex kind, used by some mutex implementations like pthreads mutexes.
pub kind: MutexKind,
/// The address of the mutex.
pub address: u64,
}
// pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
// We ignore the platform layout and store our own fields:
// - id: u32
@ -94,15 +113,14 @@ fn mutex_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
/// Eagerly create and initialize a new mutex.
fn mutex_create<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
mutex_op: &OpTy<'tcx>,
mutex_ptr: &OpTy<'tcx>,
kind: i32,
) -> InterpResult<'tcx> {
// FIXME: might be worth changing mutex_create to take the mplace
// rather than the `OpTy`.
let address = ecx.read_pointer(mutex_op)?.addr().bytes();
let mutex = ecx.deref_pointer(mutex_ptr)?;
let address = mutex.ptr().addr().bytes();
let kind = translate_kind(ecx, kind)?;
let data = Some(AdditionalMutexData { address, kind });
ecx.mutex_create(mutex_op, ecx.libc_ty_layout("pthread_mutex_t"), mutex_id_offset(ecx)?, data)?;
let data = Box::new(AdditionalMutexData { address, kind });
ecx.mutex_create(&mutex, mutex_id_offset(ecx)?, Some(data))?;
Ok(())
}
@ -112,27 +130,23 @@ fn mutex_create<'tcx>(
/// return an error if it has.
fn mutex_get_id<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
mutex_op: &OpTy<'tcx>,
mutex_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, MutexId> {
let address = ecx.read_pointer(mutex_op)?.addr().bytes();
let mutex = ecx.deref_pointer(mutex_ptr)?;
let address = mutex.ptr().addr().bytes();
// FIXME: might be worth changing mutex_get_or_create_id to take the mplace
// rather than the `OpTy`.
let id = ecx.mutex_get_or_create_id(
mutex_op,
ecx.libc_ty_layout("pthread_mutex_t"),
mutex_id_offset(ecx)?,
|ecx| {
// This is called if a static initializer was used and the lock has not been assigned
// an ID yet. We have to determine the mutex kind from the static initializer.
let kind = kind_from_static_initializer(ecx, mutex_op)?;
let id = ecx.mutex_get_or_create_id(&mutex, mutex_id_offset(ecx)?, |ecx| {
// This is called if a static initializer was used and the lock has not been assigned
// an ID yet. We have to determine the mutex kind from the static initializer.
let kind = kind_from_static_initializer(ecx, &mutex)?;
Ok(Some(AdditionalMutexData { kind, address }))
},
)?;
Ok(Some(Box::new(AdditionalMutexData { kind, address })))
})?;
// Check that the mutex has not been moved since last use.
let data = ecx.mutex_get_data(id).expect("data should be always exist for pthreads");
let data = ecx
.mutex_get_data::<AdditionalMutexData>(id)
.expect("data should always exist for pthreads");
if data.address != address {
throw_ub_format!("pthread_mutex_t can't be moved after first use")
}
@ -143,20 +157,15 @@ fn mutex_get_id<'tcx>(
/// Returns the kind of a static initializer.
fn kind_from_static_initializer<'tcx>(
ecx: &MiriInterpCx<'tcx>,
mutex_op: &OpTy<'tcx>,
mutex: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, MutexKind> {
// Only linux has static initializers other than PTHREAD_MUTEX_DEFAULT.
let kind = match &*ecx.tcx.sess.target.os {
"linux" => {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
ecx.deref_pointer_and_read(
mutex_op,
offset,
ecx.libc_ty_layout("pthread_mutex_t"),
ecx.machine.layouts.i32,
)?
.to_i32()?
let kind_place =
mutex.offset(Size::from_bytes(offset), ecx.machine.layouts.i32, ecx)?;
ecx.read_scalar(&kind_place)?.to_i32()?
}
| "illumos" | "solaris" | "macos" => ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT"),
os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
@ -183,6 +192,13 @@ fn translate_kind<'tcx>(ecx: &MiriInterpCx<'tcx>, kind: i32) -> InterpResult<'tc
// We ignore the platform layout and store our own fields:
// - id: u32
#[derive(Debug)]
/// Additional data that may be used by shim implementations.
pub struct AdditionalRwLockData {
/// The address of the rwlock.
pub address: u64,
}
fn rwlock_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
let offset = match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" => 0,
@ -211,19 +227,19 @@ fn rwlock_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
fn rwlock_get_id<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
rwlock_op: &OpTy<'tcx>,
rwlock_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, RwLockId> {
let address = ecx.read_pointer(rwlock_op)?.addr().bytes();
let rwlock = ecx.deref_pointer(rwlock_ptr)?;
let address = rwlock.ptr().addr().bytes();
let id = ecx.rwlock_get_or_create_id(
rwlock_op,
ecx.libc_ty_layout("pthread_rwlock_t"),
rwlock_id_offset(ecx)?,
|_| Ok(Some(AdditionalRwLockData { address })),
)?;
let id = ecx.rwlock_get_or_create_id(&rwlock, rwlock_id_offset(ecx)?, |_| {
Ok(Some(Box::new(AdditionalRwLockData { address })))
})?;
// Check that the rwlock has not been moved since last use.
let data = ecx.rwlock_get_data(id).expect("data should be always exist for pthreads");
let data = ecx
.rwlock_get_data::<AdditionalRwLockData>(id)
.expect("data should always exist for pthreads");
if data.address != address {
throw_ub_format!("pthread_rwlock_t can't be moved after first use")
}
@ -246,10 +262,10 @@ fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u
fn condattr_get_clock_id<'tcx>(
ecx: &MiriInterpCx<'tcx>,
attr_op: &OpTy<'tcx>,
attr_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, i32> {
ecx.deref_pointer_and_read(
attr_op,
attr_ptr,
condattr_clock_offset(ecx)?,
ecx.libc_ty_layout("pthread_condattr_t"),
ecx.machine.layouts.i32,
@ -259,11 +275,11 @@ fn condattr_get_clock_id<'tcx>(
fn condattr_set_clock_id<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
attr_op: &OpTy<'tcx>,
attr_ptr: &OpTy<'tcx>,
clock_id: i32,
) -> InterpResult<'tcx, ()> {
ecx.deref_pointer_and_write(
attr_op,
attr_ptr,
condattr_clock_offset(ecx)?,
Scalar::from_i32(clock_id),
ecx.libc_ty_layout("pthread_condattr_t"),
@ -337,21 +353,18 @@ fn cond_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> u64 {
fn cond_get_id<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
cond_op: &OpTy<'tcx>,
cond_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, CondvarId> {
ecx.condvar_get_or_create_id(
cond_op,
ecx.libc_ty_layout("pthread_cond_t"),
cond_id_offset(ecx)?,
)
let cond = ecx.deref_pointer(cond_ptr)?;
ecx.condvar_get_or_create_id(&cond, cond_id_offset(ecx)?)
}
fn cond_reset_id<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
cond_op: &OpTy<'tcx>,
cond_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, ()> {
ecx.deref_pointer_and_write(
cond_op,
cond_ptr,
cond_id_offset(ecx)?,
Scalar::from_i32(0),
ecx.libc_ty_layout("pthread_cond_t"),
@ -361,10 +374,10 @@ fn cond_reset_id<'tcx>(
fn cond_get_clock_id<'tcx>(
ecx: &MiriInterpCx<'tcx>,
cond_op: &OpTy<'tcx>,
cond_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, i32> {
ecx.deref_pointer_and_read(
cond_op,
cond_ptr,
cond_clock_offset(ecx),
ecx.libc_ty_layout("pthread_cond_t"),
ecx.machine.layouts.i32,
@ -374,11 +387,11 @@ fn cond_get_clock_id<'tcx>(
fn cond_set_clock_id<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
cond_op: &OpTy<'tcx>,
cond_ptr: &OpTy<'tcx>,
clock_id: i32,
) -> InterpResult<'tcx, ()> {
ecx.deref_pointer_and_write(
cond_op,
cond_ptr,
cond_clock_offset(ecx),
Scalar::from_i32(clock_id),
ecx.libc_ty_layout("pthread_cond_t"),
@ -490,8 +503,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let id = mutex_get_id(this, mutex_op)?;
let kind =
this.mutex_get_data(id).expect("data should always exist for pthread mutexes").kind;
let kind = this
.mutex_get_data::<AdditionalMutexData>(id)
.expect("data should always exist for pthread mutexes")
.kind;
let ret = if this.mutex_is_locked(id) {
let owner_thread = this.mutex_get_owner(id);
@ -509,10 +524,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.mutex_lock(id);
0
}
_ =>
throw_unsup_format!(
"called pthread_mutex_lock on an unsupported type of mutex"
),
}
}
} else {
@ -528,8 +539,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let id = mutex_get_id(this, mutex_op)?;
let kind =
this.mutex_get_data(id).expect("data should always exist for pthread mutexes").kind;
let kind = this
.mutex_get_data::<AdditionalMutexData>(id)
.expect("data should always exist for pthread mutexes")
.kind;
Ok(Scalar::from_i32(if this.mutex_is_locked(id) {
let owner_thread = this.mutex_get_owner(id);
@ -543,10 +556,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.mutex_lock(id);
0
}
_ =>
throw_unsup_format!(
"called pthread_mutex_trylock on an unsupported type of mutex"
),
}
}
} else {
@ -560,8 +569,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let id = mutex_get_id(this, mutex_op)?;
let kind =
this.mutex_get_data(id).expect("data should always exist for pthread mutexes").kind;
let kind = this
.mutex_get_data::<AdditionalMutexData>(id)
.expect("data should always exist for pthread mutexes")
.kind;
if let Some(_old_locked_count) = this.mutex_unlock(id)? {
// The mutex was locked by the current thread.
@ -581,10 +592,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
),
MutexKind::ErrorCheck | MutexKind::Recursive =>
Ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
_ =>
throw_unsup_format!(
"called pthread_mutex_unlock on an unsupported type of mutex"
),
}
}
}

View file

@ -10,9 +10,10 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Windows sync primitives are pointer sized.
// We only use the first 4 bytes for the id.
fn init_once_get_id(&mut self, init_once_op: &OpTy<'tcx>) -> InterpResult<'tcx, InitOnceId> {
fn init_once_get_id(&mut self, init_once_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, InitOnceId> {
let this = self.eval_context_mut();
this.init_once_get_or_create_id(init_once_op, this.windows_ty_layout("INIT_ONCE"), 0)
let init_once = this.deref_pointer(init_once_ptr)?;
this.init_once_get_or_create_id(&init_once, 0)
}
/// Returns `true` if we were succssful, `false` if we would block.