diff --git a/src/machine.rs b/src/machine.rs index 9612d9e19110..02c66915564d 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -150,7 +150,7 @@ impl MemoryExtra { }; let data_race = if config.data_race_detector { Some(Rc::new(data_race::GlobalState::new())) - }else{ + } else { None }; MemoryExtra { diff --git a/src/shims/posix/linux/sync.rs b/src/shims/posix/linux/sync.rs index 78244ab7b879..5243431194eb 100644 --- a/src/shims/posix/linux/sync.rs +++ b/src/shims/posix/linux/sync.rs @@ -85,12 +85,10 @@ pub fn futex<'tcx>( // with the expected value, and starting to sleep are performed // atomically and totally ordered with respect to other futex // operations on the same futex word." - // SeqCst is total order over all operations, so uses acquire, - // either are equal under the current implementation. - // FIXME: is Acquire correct or should some additional ordering constraints be observed? - // FIXME: use RMW or similar? + // SeqCst is total order over all operations. + // FIXME: check if this should be changed when weak memory orders are added. let futex_val = this.read_scalar_at_offset_atomic( - addr.into(), 0, this.machine.layouts.i32, AtomicReadOp::Acquire + addr.into(), 0, this.machine.layouts.i32, AtomicReadOp::SeqCst )?.to_i32()?; if val == futex_val { // The value still matches, so we block the trait make it wait for FUTEX_WAKE. diff --git a/src/sync.rs b/src/sync.rs index 828268c06ccf..4d488565faf3 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -61,7 +61,11 @@ struct Mutex { lock_count: usize, /// The queue of threads waiting for this mutex. queue: VecDeque, - /// Data race handle + /// Data race handle, this tracks the happens-before + /// relationship between each mutex access. It is + /// released to during unlock and acquired from during + /// locking, and therefore stores the clock of the last + /// thread to release this mutex. data_race: VClock } @@ -79,9 +83,24 @@ struct RwLock { writer_queue: VecDeque, /// The queue of reader threads waiting for this lock. reader_queue: VecDeque, - /// Data race handle for writers + /// Data race handle for writers, tracks the happens-before + /// ordering between each write access to a rwlock and is updated + /// after a sequence of concurrent readers to track the happens- + /// before ordering between the set of previous readers and + /// the current writer. + /// Contains the clock of the last thread to release a writer + /// lock or the joined clock of the set of last threads to release + /// shared reader locks. data_race: VClock, - /// Data race handle for readers + /// Data race handle for readers, this is temporary storage + /// for the combined happens-before ordering for between all + /// concurrent readers and the next writer, and the value + /// is stored to the main data_race variable once all + /// readers are finished. + /// Has to be stored separately since reader lock acquires + /// must load the clock of the last write and must not + /// add happens-before orderings between shared reader + /// locks. data_race_reader: VClock, } @@ -100,6 +119,11 @@ struct CondvarWaiter { #[derive(Default, Debug)] struct Condvar { waiters: VecDeque, + /// Tracks the happens-before relationship + /// between a cond-var signal and a cond-var + /// wait during a non-suprious signal event. + /// Contains the clock of the last thread to + /// perform a futex-signal. data_race: VClock, } @@ -107,6 +131,11 @@ struct Condvar { #[derive(Default, Debug)] struct Futex { waiters: VecDeque, + /// Tracks the happens-before relationship + /// between a futex-wake and a futex-wait + /// during a non-spurious wake event. + /// Contains the clock of the last thread to + /// perform a futex-wake. data_race: VClock, } diff --git a/src/vector_clock.rs b/src/vector_clock.rs index ddee98bcf624..6840d7e6cb99 100644 --- a/src/vector_clock.rs +++ b/src/vector_clock.rs @@ -11,7 +11,7 @@ use std::{ /// A vector clock index, this is associated with a thread id /// but in some cases one vector index may be shared with -/// multiple thread ids id it safe to do so. +/// multiple thread ids if it safe to do so. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct VectorIdx(u32); diff --git a/tests/compile-fail/data_race/relax_acquire_race.rs b/tests/compile-fail/data_race/relax_acquire_race.rs index f7d44c30b66b..4b736e57208a 100644 --- a/tests/compile-fail/data_race/relax_acquire_race.rs +++ b/tests/compile-fail/data_race/relax_acquire_race.rs @@ -31,7 +31,7 @@ pub fn main() { let j3 = spawn(move || { if SYNC.load(Ordering::Acquire) == 2 { *c.0 //~ ERROR Data race - }else{ + } else { 0 } }); diff --git a/tests/compile-fail/data_race/release_seq_race.rs b/tests/compile-fail/data_race/release_seq_race.rs index dc852cdb4d81..0278e9864353 100644 --- a/tests/compile-fail/data_race/release_seq_race.rs +++ b/tests/compile-fail/data_race/release_seq_race.rs @@ -35,7 +35,7 @@ pub fn main() { sleep(Duration::from_millis(1000)); if SYNC.load(Ordering::Acquire) == 3 { *c.0 //~ ERROR Data race - }else{ + } else { 0 } }); diff --git a/tests/compile-fail/data_race/rmw_race.rs b/tests/compile-fail/data_race/rmw_race.rs index bebd01efa171..c533f595f160 100644 --- a/tests/compile-fail/data_race/rmw_race.rs +++ b/tests/compile-fail/data_race/rmw_race.rs @@ -32,7 +32,7 @@ pub fn main() { let j3 = spawn(move || { if SYNC.load(Ordering::Acquire) == 3 { *c.0 //~ ERROR Data race - }else{ + } else { 0 } }); diff --git a/tests/run-pass/concurrency/data_race.rs b/tests/run-pass/concurrency/data_race.rs index 75e56e8c8d2a..64e90024ed49 100644 --- a/tests/run-pass/concurrency/data_race.rs +++ b/tests/run-pass/concurrency/data_race.rs @@ -28,7 +28,7 @@ fn test_fence_sync() { if SYNC.load(Ordering::Relaxed) == 1 { fence(Ordering::Acquire); unsafe { *evil_ptr.0 } - }else{ + } else { 0 } }); @@ -77,7 +77,7 @@ pub fn test_rmw_no_block() { let j3 = spawn(move || { if SYNC.load(Ordering::Acquire) == 2 { *c.0 - }else{ + } else { 0 } }); @@ -104,7 +104,7 @@ pub fn test_release_no_block() { let j2 = spawn(move || { if SYNC.load(Ordering::Acquire) == 3 { *c.0 - }else{ + } else { 0 } });