From a5db2c32e5f0b29c730451a80efc0b4751ca208e Mon Sep 17 00:00:00 2001 From: Andy Wang Date: Thu, 12 May 2022 20:31:40 +0100 Subject: [PATCH] Refactor to hide *_next_id functions --- src/shims/posix/sync.rs | 107 +++++++++++++++++++++----------------- src/shims/windows/sync.rs | 35 +++++++------ src/sync.rs | 72 +++++++++++++++++-------- 3 files changed, 129 insertions(+), 85 deletions(-) diff --git a/src/shims/posix/sync.rs b/src/shims/posix/sync.rs index f8c680c0e828..2e5da5b53798 100644 --- a/src/shims/posix/sync.rs +++ b/src/shims/posix/sync.rs @@ -113,23 +113,27 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>( mutex_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, MutexId> { let value_place = ecx.offset_and_layout_to_place(mutex_op, 4, ecx.machine.layouts.u32)?; - let (old, success) = ecx - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - ecx.mutex_next_id().to_u32_scalar().into(), - AtomicRwOp::Relaxed, - AtomicReadOp::Relaxed, - false, - )? - .to_scalar_pair()?; - if success.to_bool().expect("compare_exchange's second return value is a bool") { - let id = ecx.mutex_create(); - Ok(id) - } else { - Ok(MutexId::from_u32(old.to_u32().expect("layout is u32"))) - } + ecx.mutex_get_or_create(|ecx, next_id| { + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + next_id.to_u32_scalar().into(), + AtomicRwOp::Relaxed, + AtomicReadOp::Relaxed, + false, + )? + .to_scalar_pair() + .expect("compare_exchange returns a scalar pair"); + + Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { + // Caller of the closure needs to allocate next_id + None + } else { + Some(MutexId::from_u32(old.to_u32().expect("layout is u32"))) + }) + }) } // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. @@ -165,23 +169,27 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>( rwlock_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, RwLockId> { let value_place = ecx.offset_and_layout_to_place(rwlock_op, 4, ecx.machine.layouts.u32)?; - let (old, success) = ecx - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - ecx.rwlock_next_id().to_u32_scalar().into(), - AtomicRwOp::Relaxed, - AtomicReadOp::Relaxed, - false, - )? - .to_scalar_pair()?; - if success.to_bool().expect("compare_exchange's second return value is a bool") { - let id = ecx.rwlock_create(); - Ok(id) - } else { - Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) - } + ecx.rwlock_get_or_create(|ecx, next_id| { + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + next_id.to_u32_scalar().into(), + AtomicRwOp::Relaxed, + AtomicReadOp::Relaxed, + false, + )? + .to_scalar_pair() + .expect("compare_exchange returns a scalar pair"); + + Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { + // Caller of the closure needs to allocate next_id + None + } else { + Some(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) + }) + }) } // pthread_condattr_t @@ -246,23 +254,26 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>( ) -> InterpResult<'tcx, CondvarId> { let value_place = ecx.offset_and_layout_to_place(cond_op, 4, ecx.machine.layouts.u32)?; - let (old, success) = ecx - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - ecx.condvar_next_id().to_u32_scalar().into(), - AtomicRwOp::Relaxed, - AtomicReadOp::Relaxed, - false, - )? - .to_scalar_pair()?; + ecx.condvar_get_or_create(|ecx, next_id| { + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + next_id.to_u32_scalar().into(), + AtomicRwOp::Relaxed, + AtomicReadOp::Relaxed, + false, + )? + .to_scalar_pair() + .expect("compare_exchange returns a scalar pair"); - if success.to_bool().expect("compare_exchange's second return value is a bool") { - let id = ecx.condvar_create(); - Ok(id) - } else { - Ok(CondvarId::from_u32(old.to_u32().expect("layout is u32"))) - } + Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { + // Caller of the closure needs to allocate next_id + None + } else { + Some(CondvarId::from_u32(old.to_u32().expect("layout is u32"))) + }) + }) } fn cond_get_clock_id<'mir, 'tcx: 'mir>( diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index ccd65aac900f..ff10b3b6aafe 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -9,23 +9,26 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>( ) -> InterpResult<'tcx, RwLockId> { let value_place = ecx.offset_and_layout_to_place(lock_op, 0, ecx.machine.layouts.u32)?; - let (old, success) = ecx - .atomic_compare_exchange_scalar( - &value_place, - &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), - ecx.rwlock_next_id().to_u32_scalar().into(), - AtomicRwOp::AcqRel, - AtomicReadOp::Acquire, - false, - )? - .to_scalar_pair()?; + ecx.rwlock_get_or_create(|ecx, next_id| { + let (old, success) = ecx + .atomic_compare_exchange_scalar( + &value_place, + &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), + next_id.to_u32_scalar().into(), + AtomicRwOp::Relaxed, + AtomicReadOp::Relaxed, + false, + )? + .to_scalar_pair() + .expect("compare_exchange returns a scalar pair"); - if success.to_bool().expect("compare_exchange's second return value is a bool") { - let id = ecx.rwlock_create(); - Ok(id) - } else { - Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) - } + Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") { + // Caller of the closure needs to allocate next_id + None + } else { + Some(RwLockId::from_u32(old.to_u32().expect("layout is u32"))) + }) + }) } impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} diff --git a/src/sync.rs b/src/sync.rs index 8c5b8ebfec75..fb69c67eccd9 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -208,13 +208,6 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // situations. impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { - #[inline] - /// Peek the id of the next mutex - fn mutex_next_id(&self) -> MutexId { - let this = self.eval_context_ref(); - this.machine.threads.sync.mutexes.next_index() - } - #[inline] /// Create state for a new mutex. fn mutex_create(&mut self) -> MutexId { @@ -222,6 +215,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.machine.threads.sync.mutexes.push(Default::default()) } + #[inline] + /// Provides the closure with the next MutexId. Creates that mutex if the closure returns None, + /// otherwise returns the value from the closure + fn mutex_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, MutexId> + where + F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option>, + { + let this = self.eval_context_mut(); + if let Some(old) = existing(this, this.machine.threads.sync.mutexes.next_index())? { + Ok(old) + } else { + Ok(self.mutex_create()) + } + } + #[inline] /// Get the id of the thread that currently owns this lock. fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId { @@ -297,13 +305,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.block_thread(thread); } - #[inline] - /// Peek the id of the next read write lock - fn rwlock_next_id(&self) -> RwLockId { - let this = self.eval_context_ref(); - this.machine.threads.sync.rwlocks.next_index() - } - #[inline] /// Create state for a new read write lock. fn rwlock_create(&mut self) -> RwLockId { @@ -311,6 +312,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.machine.threads.sync.rwlocks.push(Default::default()) } + #[inline] + /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None, + /// otherwise returns the value from the closure + fn rwlock_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, RwLockId> + where + F: FnOnce( + &mut MiriEvalContext<'mir, 'tcx>, + RwLockId, + ) -> InterpResult<'tcx, Option>, + { + let this = self.eval_context_mut(); + if let Some(old) = existing(this, this.machine.threads.sync.rwlocks.next_index())? { + Ok(old) + } else { + Ok(self.rwlock_create()) + } + } + #[inline] /// Check if locked. fn rwlock_is_locked(&self, id: RwLockId) -> bool { @@ -452,13 +471,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.block_thread(writer); } - #[inline] - /// Peek the id of the next Condvar - fn condvar_next_id(&self) -> CondvarId { - let this = self.eval_context_ref(); - this.machine.threads.sync.condvars.next_index() - } - #[inline] /// Create state for a new conditional variable. fn condvar_create(&mut self) -> CondvarId { @@ -466,6 +478,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.machine.threads.sync.condvars.push(Default::default()) } + #[inline] + /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None, + /// otherwise returns the value from the closure + fn condvar_get_or_create(&mut self, existing: F) -> InterpResult<'tcx, CondvarId> + where + F: FnOnce( + &mut MiriEvalContext<'mir, 'tcx>, + CondvarId, + ) -> InterpResult<'tcx, Option>, + { + let this = self.eval_context_mut(); + if let Some(old) = existing(this, this.machine.threads.sync.condvars.next_index())? { + Ok(old) + } else { + Ok(self.condvar_create()) + } + } + #[inline] /// Is the conditional variable awaited? fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {