Tidy up comments and function layout, should fix most of the review notes.

This commit is contained in:
JCTyBlaidd 2020-11-15 18:30:26 +00:00
parent 2a40d9b7a0
commit 69fb6413dd
20 changed files with 1016 additions and 806 deletions

View file

@ -195,6 +195,9 @@ fn main() {
"-Zmiri-disable-stacked-borrows" => {
miri_config.stacked_borrows = false;
}
"-Zmiri-disable-data-race-detector" => {
miri_config.data_race_detector = false;
}
"-Zmiri-disable-alignment-check" => {
miri_config.check_alignment = miri::AlignmentCheck::None;
}

File diff suppressed because it is too large Load diff

View file

@ -48,6 +48,8 @@ pub struct MiriConfig {
pub tracked_alloc_id: Option<AllocId>,
/// Whether to track raw pointers in stacked borrows.
pub track_raw: bool,
/// Determine if data race detection should be enabled
pub data_race_detector: bool,
}
impl Default for MiriConfig {
@ -65,6 +67,7 @@ impl Default for MiriConfig {
tracked_call_id: None,
tracked_alloc_id: None,
track_raw: false,
data_race_detector: true,
}
}
}

View file

@ -55,7 +55,7 @@ pub use crate::shims::tls::{EvalContextExt as _, TlsData};
pub use crate::shims::EvalContextExt as _;
pub use crate::data_race::{
AtomicReadOp, AtomicWriteOp, AtomicRWOp, AtomicFenceOp, DataRaceLockHandle,
AtomicReadOp, AtomicWriteOp, AtomicRwOp, AtomicFenceOp,
EvalContextExt as DataRaceEvalContextExt
};
pub use crate::diagnostics::{
@ -81,7 +81,7 @@ pub use crate::sync::{
EvalContextExt as SyncEvalContextExt, CondvarId, MutexId, RwLockId
};
pub use crate::vector_clock::{
VClock, VSmallClockSet, VectorIdx, VTimestamp
VClock, VSmallClockMap, VectorIdx, VTimestamp
};
/// Insert rustc arguments at the beginning of the argument list that Miri wants to be

View file

@ -109,15 +109,16 @@ impl fmt::Display for MiriMemoryKind {
pub struct AllocExtra {
/// Stacked Borrows state is only added if it is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
/// Data race detection via the use of a vector-clock.
pub data_race: data_race::AllocExtra,
/// Data race detection via the use of a vector-clock,
/// this is only added if it is enabled.
pub data_race: Option<data_race::AllocExtra>,
}
/// Extra global memory data
#[derive(Clone, Debug)]
pub struct MemoryExtra {
pub stacked_borrows: Option<stacked_borrows::MemoryExtra>,
pub data_race: data_race::MemoryExtra,
pub data_race: Option<data_race::MemoryExtra>,
pub intptrcast: intptrcast::MemoryExtra,
/// Mapping extern static names to their canonical allocation.
@ -147,7 +148,11 @@ impl MemoryExtra {
} else {
None
};
let data_race = Rc::new(data_race::GlobalState::new());
let data_race = if config.data_race_detector {
Some(Rc::new(data_race::GlobalState::new()))
}else{
None
};
MemoryExtra {
stacked_borrows,
data_race,
@ -472,7 +477,11 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
// No stacks, no tag.
(None, Tag::Untagged)
};
let race_alloc = data_race::AllocExtra::new_allocation(&memory_extra.data_race, alloc.size);
let race_alloc = if let Some(data_race) = &memory_extra.data_race {
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size))
} else {
None
};
let mut stacked_borrows = memory_extra.stacked_borrows.as_ref().map(|sb| sb.borrow_mut());
let alloc: Allocation<Tag, Self::AllocExtra> = alloc.with_tags_and_extra(
|alloc| {
@ -590,7 +599,9 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.data_race.read(ptr, size)?;
if let Some(data_race) = &alloc.extra.data_race {
data_race.read(ptr, size)?;
}
if let Some(stacked_borrows) = &alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
@ -604,7 +615,9 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.data_race.write(ptr, size)?;
if let Some(data_race) = &mut alloc.extra.data_race {
data_race.write(ptr, size)?;
}
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
@ -618,7 +631,9 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.data_race.deallocate(ptr, size)?;
if let Some(data_race) = &mut alloc.extra.data_race {
data_race.deallocate(ptr, size)?;
}
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {

View file

@ -324,98 +324,98 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
"atomic_singlethreadfence_acqrel" => this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
"atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
"atomic_xchg" => this.atomic_exchange(args, dest, AtomicRWOp::SeqCst)?,
"atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRWOp::Acquire)?,
"atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRWOp::Release)?,
"atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRWOp::AcqRel)?,
"atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRWOp::Relaxed)?,
"atomic_xchg" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
"atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
"atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
"atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
"atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
"atomic_cxchg" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst
)?,
"atomic_cxchg_acq" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire
)?,
"atomic_cxchg_rel" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_acqrel" => this.atomic_compare_exchange
(args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire
)?,
"atomic_cxchg_relaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_acq_failrelaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_acqrel_failrelaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_failrelaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_failacq" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire
)?,
"atomic_cxchgweak" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst
)?,
"atomic_cxchgweak_acq" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire
)?,
"atomic_cxchgweak_rel" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_acqrel" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire
)?,
"atomic_cxchgweak_relaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_acq_failrelaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_acqrel_failrelaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_failrelaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_failacq" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire
)?,
"atomic_or" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::SeqCst)?,
"atomic_or_acq" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Acquire)?,
"atomic_or_rel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Release)?,
"atomic_or_acqrel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::AcqRel)?,
"atomic_or_relaxed" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Relaxed)?,
"atomic_xor" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::SeqCst)?,
"atomic_xor_acq" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Acquire)?,
"atomic_xor_rel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Release)?,
"atomic_xor_acqrel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::AcqRel)?,
"atomic_xor_relaxed" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Relaxed)?,
"atomic_and" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::SeqCst)?,
"atomic_and_acq" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Acquire)?,
"atomic_and_rel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Release)?,
"atomic_and_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::AcqRel)?,
"atomic_and_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Relaxed)?,
"atomic_nand" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::SeqCst)?,
"atomic_nand_acq" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Acquire)?,
"atomic_nand_rel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Release)?,
"atomic_nand_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::AcqRel)?,
"atomic_nand_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Relaxed)?,
"atomic_xadd" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::SeqCst)?,
"atomic_xadd_acq" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Acquire)?,
"atomic_xadd_rel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Release)?,
"atomic_xadd_acqrel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::AcqRel)?,
"atomic_xadd_relaxed" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Relaxed)?,
"atomic_xsub" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::SeqCst)?,
"atomic_xsub_acq" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Acquire)?,
"atomic_xsub_rel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Release)?,
"atomic_xsub_acqrel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::AcqRel)?,
"atomic_xsub_relaxed" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Relaxed)?,
"atomic_or" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRwOp::SeqCst)?,
"atomic_or_acq" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRwOp::Acquire)?,
"atomic_or_rel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRwOp::Release)?,
"atomic_or_acqrel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRwOp::AcqRel)?,
"atomic_or_relaxed" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRwOp::Relaxed)?,
"atomic_xor" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRwOp::SeqCst)?,
"atomic_xor_acq" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRwOp::Acquire)?,
"atomic_xor_rel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRwOp::Release)?,
"atomic_xor_acqrel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRwOp::AcqRel)?,
"atomic_xor_relaxed" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRwOp::Relaxed)?,
"atomic_and" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRwOp::SeqCst)?,
"atomic_and_acq" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRwOp::Acquire)?,
"atomic_and_rel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRwOp::Release)?,
"atomic_and_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRwOp::AcqRel)?,
"atomic_and_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRwOp::Relaxed)?,
"atomic_nand" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRwOp::SeqCst)?,
"atomic_nand_acq" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRwOp::Acquire)?,
"atomic_nand_rel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRwOp::Release)?,
"atomic_nand_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRwOp::AcqRel)?,
"atomic_nand_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRwOp::Relaxed)?,
"atomic_xadd" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRwOp::SeqCst)?,
"atomic_xadd_acq" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRwOp::Acquire)?,
"atomic_xadd_rel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRwOp::Release)?,
"atomic_xadd_acqrel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRwOp::AcqRel)?,
"atomic_xadd_relaxed" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRwOp::Relaxed)?,
"atomic_xsub" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRwOp::SeqCst)?,
"atomic_xsub_acq" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRwOp::Acquire)?,
"atomic_xsub_rel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRwOp::Release)?,
"atomic_xsub_acqrel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRwOp::AcqRel)?,
"atomic_xsub_relaxed" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRwOp::Relaxed)?,
// Query type information
@ -514,7 +514,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn atomic_op(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
op: mir::BinOp, neg: bool, atomic: AtomicRWOp
op: mir::BinOp, neg: bool, atomic: AtomicRwOp
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -524,39 +524,26 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
bug!("Atomic arithmetic operations only work on integer types");
}
let rhs = this.read_immediate(rhs)?;
let old = this.allow_data_races_mut(|this| {
this.read_immediate(place. into())
})?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
let old = this.atomic_op_immediate(place, rhs, op, neg, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
// Atomics wrap around on overflow.
let val = this.binary_op(op, old, rhs)?;
let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
this.allow_data_races_mut(|this| {
this.write_immediate(*val, place.into())
})?;
this.validate_atomic_rmw(place, atomic)?;
Ok(())
}
fn atomic_exchange(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, atomic: AtomicRWOp
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, atomic: AtomicRwOp
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let &[place, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let new = this.read_scalar(new)?;
let old = this.allow_data_races_mut(|this| {
this.read_scalar(place.into())
})?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
@ -564,18 +551,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
let old = this.atomic_exchange_scalar(place, new, atomic)?;
this.write_scalar(old, dest)?; // old value is returned
this.allow_data_races_mut(|this| {
this.write_scalar(new, place.into())
})?;
this.validate_atomic_rmw(place, atomic)?;
Ok(())
}
fn atomic_compare_exchange(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
success: AtomicRWOp, fail: AtomicReadOp
success: AtomicRwOp, fail: AtomicReadOp
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -584,13 +567,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(new)?;
// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successfull then try again with the success
// read ordering and write in the success case.
// Read as immediate for the sake of `binary_op()`
let old = this.allow_data_races_mut(|this| {
this.read_immediate(place.into())
})?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
@ -598,31 +574,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
// `binary_op` will bail if either of them is not a scalar.
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
let old = this.atomic_compare_exchange_scalar(
place, expect_old, new, success, fail
)?;
// Return old value.
this.write_immediate(res, dest)?;
// Update ptr depending on comparison.
// if successful, perform a full rw-atomic validation
// otherwise treat this as an atomic load with the fail ordering
if eq.to_bool()? {
this.allow_data_races_mut(|this| {
this.write_scalar(new, place.into())
})?;
this.validate_atomic_rmw(place, success)?;
} else {
this.validate_atomic_load(place, fail)?;
}
this.write_immediate(old, dest)?;
Ok(())
}
fn atomic_compare_exchange_weak(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
success: AtomicRWOp, fail: AtomicReadOp
success: AtomicRwOp, fail: AtomicReadOp
) -> InterpResult<'tcx> {
// FIXME: the weak part of this is currently not modelled,

View file

@ -78,7 +78,17 @@ pub fn futex<'tcx>(
// Read an `i32` through the pointer, regardless of any wrapper types.
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
// FIXME: this fails if `addr` is not a pointer type.
// FIXME: what form of atomic operation should the `futex` use to load the value?
// The atomic ordering for futex(https://man7.org/linux/man-pages/man2/futex.2.html):
// "The load of the value of the futex word is an
// atomic memory access (i.e., using atomic machine instructions
// of the respective architecture). This load, the comparison
// with the expected value, and starting to sleep are performed
// atomically and totally ordered with respect to other futex
// operations on the same futex word."
// SeqCst is total order over all operations, so uses acquire,
// either are equal under the current implementation.
// FIXME: is Acquire correct or should some additional ordering constraints be observed?
// FIXME: use RMW or similar?
let futex_val = this.read_scalar_at_offset_atomic(
addr.into(), 0, this.machine.layouts.i32, AtomicReadOp::Acquire
)?.to_i32()?;

View file

@ -64,7 +64,7 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>(
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
ecx.read_scalar_at_offset_atomic(
mutex_op, offset, ecx.machine.layouts.i32,
AtomicReadOp::SeqCst
AtomicReadOp::Acquire
)
}
@ -76,7 +76,7 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
ecx.write_scalar_at_offset_atomic(
mutex_op, offset, kind, ecx.machine.layouts.i32,
AtomicWriteOp::SeqCst
AtomicWriteOp::Release
)
}
@ -85,7 +85,7 @@ fn mutex_get_id<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(
mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::SeqCst
mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire
)
}
@ -96,7 +96,7 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
) -> InterpResult<'tcx, ()> {
ecx.write_scalar_at_offset_atomic(
mutex_op, 4, id, ecx.machine.layouts.u32,
AtomicWriteOp::SeqCst
AtomicWriteOp::Release
)
}
@ -129,7 +129,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>(
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(
rwlock_op, 4, ecx.machine.layouts.u32,
AtomicReadOp::SeqCst
AtomicReadOp::Acquire
)
}
@ -140,7 +140,7 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
) -> InterpResult<'tcx, ()> {
ecx.write_scalar_at_offset_atomic(
rwlock_op, 4, id, ecx.machine.layouts.u32,
AtomicWriteOp::SeqCst
AtomicWriteOp::Release
)
}
@ -196,7 +196,7 @@ fn cond_get_id<'mir, 'tcx: 'mir>(
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(
cond_op, 4, ecx.machine.layouts.u32,
AtomicReadOp::SeqCst
AtomicReadOp::Acquire
)
}
@ -207,7 +207,7 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
) -> InterpResult<'tcx, ()> {
ecx.write_scalar_at_offset_atomic(
cond_op, 4, id, ecx.machine.layouts.u32,
AtomicWriteOp::SeqCst
AtomicWriteOp::Release
)
}

View file

@ -15,14 +15,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
this.tcx.sess.warn(
"thread support is experimental.",
"thread support is experimental, no weak memory effects are currently emulated.",
);
// Create the new thread
let new_thread_id = this.create_thread();
// Write the current thread-id, switch to the next thread later
// to treat this write operation as occuring on this thread index
// to treat this write operation as occuring on the current thread.
let thread_info_place = this.deref_operand(thread)?;
this.write_scalar(
Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
@ -30,15 +30,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
)?;
// Read the function argument that will be sent to the new thread
// again perform the read before the thread starts executing.
// before the thread starts executing since reading after the
// context switch will incorrectly report a data-race.
let fn_ptr = this.read_scalar(start_routine)?.check_init()?;
let func_arg = this.read_immediate(arg)?;
// Also switch to new thread so that we can push the first stackframe.
// after this all accesses will be treated as occuring in the new thread
// Finally switch to new thread so that we can push the first stackframe.
// After this all accesses will be treated as occuring in the new thread.
let old_thread_id = this.set_active_thread(new_thread_id);
// Perform the function pointer load in the new thread frame
// Perform the function pointer load in the new thread frame.
let instance = this.memory.get_fn(fn_ptr)?.as_instance()?;
// Note: the returned value is currently ignored (see the FIXME in
@ -54,7 +55,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
StackPopCleanup::None { cleanup: true },
)?;
// Restore the old active thread frame
// Restore the old active thread frame.
this.set_active_thread(old_thread_id);
Ok(0)

View file

@ -62,7 +62,7 @@ struct Mutex {
/// The queue of threads waiting for this mutex.
queue: VecDeque<ThreadId>,
/// Data race handle
data_race: DataRaceLockHandle
data_race: VClock
}
declare_id!(RwLockId);
@ -80,9 +80,9 @@ struct RwLock {
/// The queue of reader threads waiting for this lock.
reader_queue: VecDeque<ThreadId>,
/// Data race handle for writers
data_race: DataRaceLockHandle,
data_race: VClock,
/// Data race handle for readers
data_race_reader: DataRaceLockHandle,
data_race_reader: VClock,
}
declare_id!(CondvarId);
@ -100,14 +100,14 @@ struct CondvarWaiter {
#[derive(Default, Debug)]
struct Condvar {
waiters: VecDeque<CondvarWaiter>,
data_race: DataRaceLockHandle,
data_race: VClock,
}
/// The futex state.
#[derive(Default, Debug)]
struct Futex {
waiters: VecDeque<FutexWaiter>,
data_race: DataRaceLockHandle,
data_race: VClock,
}
/// A thread waiting on a futex.
@ -213,7 +213,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
mutex.owner = Some(thread);
}
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
this.memory.extra.data_race.validate_lock_acquire(&mutex.data_race, thread);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.validate_lock_acquire(&mutex.data_race, thread);
}
}
/// Try unlocking by decreasing the lock count and returning the old lock
@ -241,7 +243,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
mutex.owner = None;
// The mutex is completely unlocked. Try transfering ownership
// to another thread.
this.memory.extra.data_race.validate_lock_release(&mut mutex.data_race, current_owner);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.validate_lock_release(&mut mutex.data_race, current_owner);
}
this.mutex_dequeue_and_lock(id);
}
Some(old_lock_count)
@ -297,7 +301,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
let count = rwlock.readers.entry(reader).or_insert(0);
*count = count.checked_add(1).expect("the reader counter overflowed");
this.memory.extra.data_race.validate_lock_acquire(&rwlock.data_race, reader);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.validate_lock_acquire(&rwlock.data_race, reader);
}
}
/// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
@ -319,7 +325,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
Entry::Vacant(_) => return false, // we did not even own this lock
}
this.memory.extra.data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
}
// The thread was a reader. If the lock is not held any more, give it to a writer.
if this.rwlock_is_locked(id).not() {
@ -328,7 +336,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// of the union of all reader data race handles, since the set of readers
// happen-before the writers
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
rwlock.data_race.set_values(&rwlock.data_race_reader);
rwlock.data_race.clone_from(&rwlock.data_race_reader);
this.rwlock_dequeue_and_lock_writer(id);
}
true
@ -355,7 +363,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
rwlock.writer = Some(writer);
this.memory.extra.data_race.validate_lock_acquire(&rwlock.data_race, writer);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.validate_lock_acquire(&rwlock.data_race, writer);
}
}
#[inline]
@ -373,8 +383,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Release memory to both reader and writer vector clocks
// since this writer happens-before both the union of readers once they are finished
// and the next writer
this.memory.extra.data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
this.memory.extra.data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
}
// The thread was a writer.
//
// We are prioritizing writers here against the readers. As a
@ -435,14 +447,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let condvar = &mut this.machine.threads.sync.condvars[id];
let data_race = &mut this.memory.extra.data_race;
let data_race = &this.memory.extra.data_race;
// Each condvar signal happens-before the end of the condvar wake
data_race.validate_lock_release(&mut condvar.data_race, current_thread);
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut condvar.data_race, current_thread);
}
condvar.waiters
.pop_front()
.map(|waiter| {
data_race.validate_lock_acquire(&mut condvar.data_race, waiter.thread);
if let Some(data_race) = data_race {
data_race.validate_lock_acquire(&mut condvar.data_race, waiter.thread);
}
(waiter.thread, waiter.mutex)
})
}
@ -466,12 +482,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr.erase_tag())?;
let data_race = &mut this.memory.extra.data_race;
let data_race = &this.memory.extra.data_race;
// Each futex-wake happens-before the end of the futex wait
data_race.validate_lock_release(&mut futex.data_race, current_thread);
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut futex.data_race, current_thread);
}
let res = futex.waiters.pop_front().map(|waiter| {
data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
if let Some(data_race) = data_race {
data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
}
waiter.thread
});
res

View file

@ -3,6 +3,7 @@
use std::cell::RefCell;
use std::collections::hash_map::Entry;
use std::convert::TryFrom;
use std::rc::Rc;
use std::num::TryFromIntError;
use std::time::{Duration, Instant, SystemTime};
@ -327,7 +328,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
}
/// Mark that the active thread tries to join the thread with `joined_thread_id`.
fn join_thread(&mut self, joined_thread_id: ThreadId, data_race: &data_race::GlobalState) -> InterpResult<'tcx> {
fn join_thread(&mut self, joined_thread_id: ThreadId, data_race: &Option<Rc<data_race::GlobalState>>) -> InterpResult<'tcx> {
if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
throw_ub_format!("trying to join a detached or already joined thread");
}
@ -351,9 +352,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
self.active_thread,
joined_thread_id
);
}else{
} else {
// The thread has already terminated - mark join happens-before
data_race.thread_joined(self.active_thread, joined_thread_id);
if let Some(data_race) = data_race {
data_race.thread_joined(self.active_thread, joined_thread_id);
}
}
Ok(())
}
@ -428,7 +431,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// Wakes up threads joining on the active one and deallocates thread-local statics.
/// The `AllocId` that can now be freed is returned.
fn thread_terminated(&mut self, data_race: &data_race::GlobalState) -> Vec<AllocId> {
fn thread_terminated(&mut self, data_race: &Option<Rc<data_race::GlobalState>>) -> Vec<AllocId> {
let mut free_tls_statics = Vec::new();
{
let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut();
@ -444,12 +447,16 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
});
}
// Set the thread into a terminated state in the data-race detector
data_race.thread_terminated();
if let Some(data_race) = data_race {
data_race.thread_terminated();
}
// Check if we need to unblock any threads.
for (i, thread) in self.threads.iter_enumerated_mut() {
if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
// The thread has terminated, mark happens-before edge to joining thread
data_race.thread_joined(i, self.active_thread);
if let Some(data_race) = data_race {
data_race.thread_joined(i, self.active_thread);
}
trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
thread.state = ThreadState::Enabled;
}
@ -463,7 +470,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// used in stateless model checkers such as Loom: run the active thread as
/// long as we can and switch only when we have to (the active thread was
/// blocked, terminated, or has explicitly asked to be preempted).
fn schedule(&mut self, data_race: &data_race::GlobalState) -> InterpResult<'tcx, SchedulingAction> {
fn schedule(&mut self, data_race: &Option<Rc<data_race::GlobalState>>) -> InterpResult<'tcx, SchedulingAction> {
// Check whether the thread has **just** terminated (`check_terminated`
// checks whether the thread has popped all its stack and if yes, sets
// the thread state to terminated).
@ -508,7 +515,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
if thread.state == ThreadState::Enabled {
if !self.yield_active_thread || id != self.active_thread {
self.active_thread = id;
data_race.thread_set_active(self.active_thread);
if let Some(data_race) = data_race {
data_race.thread_set_active(self.active_thread);
}
break;
}
}
@ -563,7 +572,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn create_thread(&mut self) -> ThreadId {
let this = self.eval_context_mut();
let id = this.machine.threads.create_thread();
this.memory.extra.data_race.thread_created(id);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.thread_created(id);
}
id
}
@ -576,7 +587,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data_race = &*this.memory.extra.data_race;
let data_race = &this.memory.extra.data_race;
this.machine.threads.join_thread(joined_thread_id, data_race)?;
Ok(())
}
@ -584,7 +595,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
let this = self.eval_context_mut();
this.memory.extra.data_race.thread_set_active(thread_id);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.thread_set_active(thread_id);
}
this.machine.threads.set_active_thread_id(thread_id)
}
@ -639,10 +652,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
let this = self.eval_context_mut();
if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
this.memory.extra.data_race.thread_set_name(
this.machine.threads.active_thread, string
);
if let Some(data_race) = &this.memory.extra.data_race {
if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
data_race.thread_set_name(
this.machine.threads.active_thread, string
);
}
}
this.machine.threads.set_thread_name(new_thread_name);
}
@ -713,7 +728,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
let this = self.eval_context_mut();
let data_race = &*this.memory.extra.data_race;
let data_race = &this.memory.extra.data_race;
this.machine.threads.schedule(data_race)
}
@ -724,7 +739,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data_race = &*this.memory.extra.data_race;
let data_race = &this.memory.extra.data_race;
for alloc_id in this.machine.threads.thread_terminated(data_race) {
let ptr = this.memory.global_base_pointer(alloc_id.into())?;
this.memory.deallocate(ptr, None, MiriMemoryKind::Tls.into())?;

View file

@ -1,121 +1,132 @@
use std::{
fmt::{self, Debug}, cmp::Ordering, ops::Index,
num::TryFromIntError, convert::TryFrom, mem
convert::TryFrom, mem
};
use smallvec::SmallVec;
use rustc_index::vec::Idx;
use rustc_data_structures::fx::FxHashMap;
/// A vector clock index, this is associated with a thread id
/// but in some cases one vector index may be shared with
/// multiple thread ids.
/// but in some cases one vector index may be shared with
/// multiple thread ids id it safe to do so.
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct VectorIdx(u32);
impl VectorIdx{
impl VectorIdx {
#[inline(always)]
pub fn to_u32(self) -> u32 {
self.0
}
pub const MAX_INDEX: VectorIdx = VectorIdx(u32::MAX);
}
impl Idx for VectorIdx {
#[inline]
fn new(idx: usize) -> Self {
VectorIdx(u32::try_from(idx).unwrap())
}
#[inline]
fn index(self) -> usize {
usize::try_from(self.0).unwrap()
}
}
impl TryFrom<u64> for VectorIdx {
type Error = TryFromIntError;
fn try_from(id: u64) -> Result<Self, Self::Error> {
u32::try_from(id).map(|id_u32| Self(id_u32))
}
}
impl From<u32> for VectorIdx {
#[inline]
fn from(id: u32) -> Self {
Self(id)
}
}
/// A sparse set of vector clocks, where each vector index
/// is associated with a vector clock.
/// This treats all vector clocks that have not been assigned
/// as equal to the all zero vector clocks
/// Is optimized for the common case where only 1 element is stored
/// in the set and the rest can be ignored, falling-back to
/// using an internal hash-map once more than 1 element is assigned
/// at any one time
/// A sparse mapping of vector index values to vector clocks, this
/// is optimized for the common case with only one element stored
/// inside the map.
/// This is used to store the set of currently active release
/// sequences at a given memory location, since RMW operations
/// allow for multiple release sequences to be active at once
/// and to be collapsed back to one active release sequence
/// once a non RMW atomic store operation occurs.
/// An all zero vector is considered to be equal to no
/// element stored internally since it will never be
/// stored and has no meaning as a release sequence
/// vector clock.
#[derive(Clone)]
pub struct VSmallClockSet(VSmallClockSetInner);
pub struct VSmallClockMap(VSmallClockMapInner);
#[derive(Clone)]
enum VSmallClockSetInner {
enum VSmallClockMapInner {
/// Zero or 1 vector elements, common
/// case for the sparse set.
/// case for the sparse set.
/// The all zero vector clock is treated
/// as equal to the empty element
/// as equal to the empty element.
Small(VectorIdx, VClock),
/// Hash-map of vector clocks
/// Hash-map of vector clocks.
Large(FxHashMap<VectorIdx, VClock>)
}
impl VSmallClockSet {
impl VSmallClockMap {
/// Remove all clock vectors from the map, setting them
/// to the zero vector
/// to the zero vector.
pub fn clear(&mut self) {
match &mut self.0 {
VSmallClockSetInner::Small(_, clock) => {
VSmallClockMapInner::Small(_, clock) => {
clock.set_zero_vector()
}
VSmallClockSetInner::Large(hash_map) => {
VSmallClockMapInner::Large(hash_map) => {
hash_map.clear();
}
}
}
/// Remove all clock vectors except for the clock vector
/// stored at the given index, which is retained
/// stored at the given index, which is retained.
pub fn retain_index(&mut self, index: VectorIdx) {
match &mut self.0 {
VSmallClockSetInner::Small(small_idx, clock) => {
VSmallClockMapInner::Small(small_idx, clock) => {
if index != *small_idx {
// The zero-vector is considered to equal
// the empty element
// the empty element.
clock.set_zero_vector()
}
},
VSmallClockSetInner::Large(hash_map) => {
hash_map.retain(|idx,_| {
*idx == index
});
VSmallClockMapInner::Large(hash_map) => {
let value = hash_map.remove(&index).unwrap_or_default();
self.0 = VSmallClockMapInner::Small(index, value);
}
}
}
/// Insert the vector clock into the associated vector
/// index
/// index.
pub fn insert(&mut self, index: VectorIdx, clock: &VClock) {
match &mut self.0 {
VSmallClockSetInner::Small(small_idx, small_clock) => {
VSmallClockMapInner::Small(small_idx, small_clock) => {
if small_clock.is_zero_vector() {
*small_idx = index;
small_clock.clone_from(clock);
}else if !clock.is_zero_vector() {
} else if !clock.is_zero_vector() {
// Convert to using the hash-map representation.
let mut hash_map = FxHashMap::default();
hash_map.insert(*small_idx, mem::take(small_clock));
hash_map.insert(index, clock.clone());
self.0 = VSmallClockSetInner::Large(hash_map);
self.0 = VSmallClockMapInner::Large(hash_map);
}
},
VSmallClockSetInner::Large(hash_map) => {
VSmallClockMapInner::Large(hash_map) => {
if !clock.is_zero_vector() {
hash_map.insert(index, clock.clone());
}
@ -127,41 +138,44 @@ impl VSmallClockSet {
/// vector index.
pub fn get(&self, index: VectorIdx) -> Option<&VClock> {
match &self.0 {
VSmallClockSetInner::Small(small_idx, small_clock) => {
VSmallClockMapInner::Small(small_idx, small_clock) => {
if *small_idx == index && !small_clock.is_zero_vector() {
Some(small_clock)
}else{
} else {
None
}
},
VSmallClockSetInner::Large(hash_map) => {
VSmallClockMapInner::Large(hash_map) => {
hash_map.get(&index)
}
}
}
}
impl Default for VSmallClockSet {
impl Default for VSmallClockMap {
#[inline]
fn default() -> Self {
VSmallClockSet(
VSmallClockSetInner::Small(VectorIdx::new(0), VClock::default())
VSmallClockMap(
VSmallClockMapInner::Small(VectorIdx::new(0), VClock::default())
)
}
}
impl Debug for VSmallClockSet {
impl Debug for VSmallClockMap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Print the contents of the small vector clock set as the map
// of vector index to vector clock that they represent
// of vector index to vector clock that they represent.
let mut map = f.debug_map();
match &self.0 {
VSmallClockSetInner::Small(small_idx, small_clock) => {
VSmallClockMapInner::Small(small_idx, small_clock) => {
if !small_clock.is_zero_vector() {
map.entry(&small_idx, &small_clock);
}
},
VSmallClockSetInner::Large(hash_map) => {
VSmallClockMapInner::Large(hash_map) => {
for (idx, elem) in hash_map.iter() {
map.entry(idx, elem);
}
@ -169,30 +183,35 @@ impl Debug for VSmallClockSet {
}
map.finish()
}
}
impl PartialEq for VSmallClockSet {
impl PartialEq for VSmallClockMap {
fn eq(&self, other: &Self) -> bool {
use VSmallClockSetInner::*;
use VSmallClockMapInner::*;
match (&self.0, &other.0) {
(Small(i1, c1), Small(i2, c2)) => {
if c1.is_zero_vector() {
// Either they are both zero or they are non-equal
c2.is_zero_vector()
}else{
} else {
// At least one is non-zero, so the full comparison is correct
i1 == i2 && c1 == c2
}
}
(VSmallClockSetInner::Small(idx, clock), VSmallClockSetInner::Large(hash_map)) |
(VSmallClockSetInner::Large(hash_map), VSmallClockSetInner::Small(idx, clock)) => {
(Small(idx, clock), Large(hash_map)) |
(Large(hash_map), Small(idx, clock)) => {
if hash_map.len() == 0 {
// Equal to the empty hash-map
clock.is_zero_vector()
}else if hash_map.len() == 1 {
} else if hash_map.len() == 1 {
// Equal to the hash-map with one element
let (hash_idx, hash_clock) = hash_map.iter().next().unwrap();
hash_idx == idx && hash_clock == clock
}else{
} else {
false
}
}
@ -201,32 +220,38 @@ impl PartialEq for VSmallClockSet {
}
}
}
}
impl Eq for VSmallClockSet {}
impl Eq for VSmallClockMap {}
/// The size of the vector-clock to store inline
/// clock vectors larger than this will be stored on the heap
/// clock vectors larger than this will be stored on the heap
const SMALL_VECTOR: usize = 4;
/// The type of the time-stamps recorded in the data-race detector
/// set to a type of unsigned integer
/// set to a type of unsigned integer
pub type VTimestamp = u32;
/// A vector clock for detecting data-races
/// invariants:
/// - the last element in a VClock must not be 0
/// -- this means that derive(PartialEq & Eq) is correct
/// -- as there is no implicit zero tail that might be equal
/// -- also simplifies the implementation of PartialOrd
/// A vector clock for detecting data-races, this is conceptually
/// a map from a vector index (and thus a thread id) to a timestamp.
/// The compare operations require that the invariant that the last
/// element in the internal timestamp slice must not be a 0, hence
/// all zero vector clocks are always represented by the empty slice;
/// and allows for the implementation of compare operations to short
/// circuit the calculation and return the correct result faster,
/// also this means that there is only one unique valid length
/// for each set of vector clock values and hence the PartialEq
// and Eq derivations are correct.
#[derive(PartialEq, Eq, Default, Debug)]
pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>);
impl VClock {
/// Create a new vector-clock containing all zeros except
/// for a value at the given index
/// for a value at the given index
pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
let len = index.index() + 1;
let mut vec = smallvec::smallvec![0; len];
@ -241,8 +266,8 @@ impl VClock {
}
/// Get a mutable slice to the internal vector with minimum `min_len`
/// elements, to preserve invariants this vector must modify
/// the `min_len`-1 nth element to a non-zero value
/// elements, to preserve invariants this vector must modify
/// the `min_len`-1 nth element to a non-zero value
#[inline]
fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [VTimestamp] {
if self.0.len() < min_len {
@ -253,7 +278,7 @@ impl VClock {
}
/// Increment the vector clock at a known index
/// this will panic if the vector index overflows
/// this will panic if the vector index overflows
#[inline]
pub fn increment_index(&mut self, idx: VectorIdx) {
let idx = idx.index();
@ -263,8 +288,8 @@ impl VClock {
}
// Join the two vector-clocks together, this
// sets each vector-element to the maximum value
// of that element in either of the two source elements.
// sets each vector-element to the maximum value
// of that element in either of the two source elements.
pub fn join(&mut self, other: &Self) {
let rhs_slice = other.as_slice();
let lhs_slice = self.get_mut_with_min_len(rhs_slice.len());
@ -291,30 +316,43 @@ impl VClock {
pub fn is_zero_vector(&self) -> bool {
self.0.is_empty()
}
}
impl Clone for VClock {
fn clone(&self) -> Self {
VClock(self.0.clone())
}
// Optimized clone-from, can be removed
// and replaced with a derive once a similar
// optimization is inserted into SmallVec's
// clone implementation.
fn clone_from(&mut self, source: &Self) {
let source_slice = source.as_slice();
self.0.clear();
self.0.extend_from_slice(source_slice);
}
}
impl PartialOrd for VClock {
fn partial_cmp(&self, other: &VClock) -> Option<Ordering> {
// Load the values as slices
let lhs_slice = self.as_slice();
let rhs_slice = other.as_slice();
// Iterate through the combined vector slice
// keeping track of the order that is currently possible to satisfy.
// If an ordering relation is detected to be impossible, then bail and
// directly return None
// Iterate through the combined vector slice continuously updating
// the value of `order` to the current comparison of the vector from
// index 0 to the currently checked index.
// An Equal ordering can be converted into Less or Greater ordering
// on finding an element that is less than or greater than the other
// but if one Greater and one Less element-wise comparison is found
// then no ordering is possible and so directly return an ordering
// of None.
let mut iter = lhs_slice.iter().zip(rhs_slice.iter());
let mut order = match iter.next() {
Some((lhs, rhs)) => lhs.cmp(rhs),
@ -332,23 +370,23 @@ impl PartialOrd for VClock {
}
}
//Now test if either left or right have trailing elements
// Now test if either left or right have trailing elements,
// by the invariant the trailing elements have at least 1
// non zero value, so no additional calculation is required
// to determine the result of the PartialOrder
// to determine the result of the PartialOrder.
let l_len = lhs_slice.len();
let r_len = rhs_slice.len();
match l_len.cmp(&r_len) {
// Equal has no additional elements: return current order
// Equal means no additional elements: return current order
Ordering::Equal => Some(order),
// Right has at least 1 element > than the implicit 0,
// so the only valid values are Ordering::Less or None
// so the only valid values are Ordering::Less or None.
Ordering::Less => match order {
Ordering::Less | Ordering::Equal => Some(Ordering::Less),
Ordering::Greater => None
}
// Left has at least 1 element > than the implicit 0,
// so the only valid values are Ordering::Greater or None
// so the only valid values are Ordering::Greater or None.
Ordering::Greater => match order {
Ordering::Greater | Ordering::Equal => Some(Ordering::Greater),
Ordering::Less => None
@ -362,28 +400,28 @@ impl PartialOrd for VClock {
let rhs_slice = other.as_slice();
// If l_len > r_len then at least one element
// in l_len is > than r_len, therefore the result
// is either Some(Greater) or None, so return false
// early.
// in l_len is > than r_len, therefore the result
// is either Some(Greater) or None, so return false
// early.
let l_len = lhs_slice.len();
let r_len = rhs_slice.len();
if l_len <= r_len {
// If any elements on the left are greater than the right
// then the result is None or Some(Greater), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l <= r, finally
// the case where the values are potentially equal needs to be considered
// and false returned as well
// then the result is None or Some(Greater), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l <= r, finally
// the case where the values are potentially equal needs to be considered
// and false returned as well
let mut equal = l_len == r_len;
for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
if l > r {
return false
}else if l < r {
} else if l < r {
equal = false;
}
}
!equal
}else{
} else {
false
}
}
@ -394,18 +432,18 @@ impl PartialOrd for VClock {
let rhs_slice = other.as_slice();
// If l_len > r_len then at least one element
// in l_len is > than r_len, therefore the result
// is either Some(Greater) or None, so return false
// early.
// in l_len is > than r_len, therefore the result
// is either Some(Greater) or None, so return false
// early.
let l_len = lhs_slice.len();
let r_len = rhs_slice.len();
if l_len <= r_len {
// If any elements on the left are greater than the right
// then the result is None or Some(Greater), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l <= r
// then the result is None or Some(Greater), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l <= r
!lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l > r)
}else{
} else {
false
}
}
@ -416,28 +454,28 @@ impl PartialOrd for VClock {
let rhs_slice = other.as_slice();
// If r_len > l_len then at least one element
// in r_len is > than l_len, therefore the result
// is either Some(Less) or None, so return false
// early.
// in r_len is > than l_len, therefore the result
// is either Some(Less) or None, so return false
// early.
let l_len = lhs_slice.len();
let r_len = rhs_slice.len();
if l_len >= r_len {
// If any elements on the left are less than the right
// then the result is None or Some(Less), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l >=, finally
// the case where the values are potentially equal needs to be considered
// and false returned as well
// then the result is None or Some(Less), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l >=, finally
// the case where the values are potentially equal needs to be considered
// and false returned as well
let mut equal = l_len == r_len;
for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
if l < r {
return false
}else if l > r {
} else if l > r {
equal = false;
}
}
!equal
}else{
} else {
false
}
}
@ -448,30 +486,33 @@ impl PartialOrd for VClock {
let rhs_slice = other.as_slice();
// If r_len > l_len then at least one element
// in r_len is > than l_len, therefore the result
// is either Some(Less) or None, so return false
// early.
// in r_len is > than l_len, therefore the result
// is either Some(Less) or None, so return false
// early.
let l_len = lhs_slice.len();
let r_len = rhs_slice.len();
if l_len >= r_len {
// If any elements on the left are less than the right
// then the result is None or Some(Less), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l >= r
// then the result is None or Some(Less), both of which
// return false, the earlier test asserts that no elements in the
// extended tail violate this assumption. Otherwise l >= r
!lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l < r)
}else{
} else {
false
}
}
}
impl Index<VectorIdx> for VClock {
type Output = VTimestamp;
#[inline]
fn index(&self, index: VectorIdx) -> &VTimestamp {
self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
}
}
@ -480,7 +521,8 @@ impl Index<VectorIdx> for VClock {
/// test suite
#[cfg(test)]
mod tests {
use super::{VClock, VTimestamp, VectorIdx, VSmallClockSet};
use super::{VClock, VTimestamp, VectorIdx, VSmallClockMap};
use std::cmp::Ordering;
#[test]
@ -536,7 +578,7 @@ mod tests {
let alt_compare = r.partial_cmp(&l);
assert_eq!(alt_compare, o.map(Ordering::reverse), "Invalid alt comparison\n l: {:?}\n r: {:?}",l,r);
//Test operatorsm with faster implementations
//Test operators with faster implementations
assert_eq!(
matches!(compare,Some(Ordering::Less)), l < r,
"Invalid (<):\n l: {:?}\n r: {:?}",l,r
@ -573,30 +615,31 @@ mod tests {
#[test]
pub fn test_vclock_set() {
let mut set = VSmallClockSet::default();
let mut map = VSmallClockMap::default();
let v1 = from_slice(&[3,0,1]);
let v2 = from_slice(&[4,2,3]);
let v3 = from_slice(&[4,8,3]);
set.insert(VectorIdx(0), &v1);
assert_eq!(set.get(VectorIdx(0)), Some(&v1));
set.insert(VectorIdx(5), &v2);
assert_eq!(set.get(VectorIdx(0)), Some(&v1));
assert_eq!(set.get(VectorIdx(5)), Some(&v2));
set.insert(VectorIdx(53), &v3);
assert_eq!(set.get(VectorIdx(0)), Some(&v1));
assert_eq!(set.get(VectorIdx(5)), Some(&v2));
assert_eq!(set.get(VectorIdx(53)), Some(&v3));
set.retain_index(VectorIdx(53));
assert_eq!(set.get(VectorIdx(0)), None);
assert_eq!(set.get(VectorIdx(5)), None);
assert_eq!(set.get(VectorIdx(53)), Some(&v3));
set.clear();
assert_eq!(set.get(VectorIdx(0)), None);
assert_eq!(set.get(VectorIdx(5)), None);
assert_eq!(set.get(VectorIdx(53)), None);
set.insert(VectorIdx(53), &v3);
assert_eq!(set.get(VectorIdx(0)), None);
assert_eq!(set.get(VectorIdx(5)), None);
assert_eq!(set.get(VectorIdx(53)), Some(&v3));
map.insert(VectorIdx(0), &v1);
assert_eq!(map.get(VectorIdx(0)), Some(&v1));
map.insert(VectorIdx(5), &v2);
assert_eq!(map.get(VectorIdx(0)), Some(&v1));
assert_eq!(map.get(VectorIdx(5)), Some(&v2));
map.insert(VectorIdx(53), &v3);
assert_eq!(map.get(VectorIdx(0)), Some(&v1));
assert_eq!(map.get(VectorIdx(5)), Some(&v2));
assert_eq!(map.get(VectorIdx(53)), Some(&v3));
map.retain_index(VectorIdx(53));
assert_eq!(map.get(VectorIdx(0)), None);
assert_eq!(map.get(VectorIdx(5)), None);
assert_eq!(map.get(VectorIdx(53)), Some(&v3));
map.clear();
assert_eq!(map.get(VectorIdx(0)), None);
assert_eq!(map.get(VectorIdx(5)), None);
assert_eq!(map.get(VectorIdx(53)), None);
map.insert(VectorIdx(53), &v3);
assert_eq!(map.get(VectorIdx(0)), None);
assert_eq!(map.get(VectorIdx(5)), None);
assert_eq!(map.get(VectorIdx(53)), Some(&v3));
}
}

View file

@ -1,2 +1,2 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.

View file

@ -1,2 +1,2 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.

View file

@ -1,4 +1,4 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.
thread '<unnamed>' panicked at 'Hello!', $DIR/simple.rs:54:9
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace

View file

@ -1,2 +1,2 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.

View file

@ -1,2 +1,2 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.

View file

@ -1,2 +1,2 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.

View file

@ -1,2 +1,2 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.

View file

@ -1,4 +1,4 @@
warning: thread support is experimental.
warning: thread support is experimental, no weak memory effects are currently emulated.
Thread 1 starting, will block on mutex
Thread 1 reported it has started