Auto merge of #876 - RalfJung:atomic, r=RalfJung

check that atomics are sufficiently aligned

Fixes https://github.com/rust-lang/miri/issues/475
This commit is contained in:
bors 2019-08-04 08:55:18 +00:00
commit ed30152b52
4 changed files with 109 additions and 48 deletions

View file

@ -1,7 +1,7 @@
use rustc_apfloat::Float;
use rustc::mir;
use rustc::mir::interpret::{InterpResult, PointerArithmetic};
use rustc::ty::layout::{self, LayoutOf, Size};
use rustc::ty::layout::{self, LayoutOf, Size, Align};
use rustc::ty;
use crate::{
@ -48,30 +48,44 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
}
"volatile_load" => {
let place = this.deref_operand(args[0])?;
this.copy_op(place.into(), dest)?;
}
"volatile_store" => {
let place = this.deref_operand(args[0])?;
this.copy_op(args[1], place.into())?;
}
"atomic_load" |
"atomic_load_relaxed" |
"atomic_load_acq" => {
let ptr = this.deref_operand(args[0])?;
let val = this.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
this.write_scalar(val, dest)?;
}
let place = this.deref_operand(args[0])?;
let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
"volatile_load" => {
let ptr = this.deref_operand(args[0])?;
this.copy_op(ptr.into(), dest)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(val, dest)?;
}
"atomic_store" |
"atomic_store_relaxed" |
"atomic_store_rel" => {
let ptr = this.deref_operand(args[0])?;
let place = this.deref_operand(args[0])?;
let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
this.write_scalar(val, ptr.into())?;
}
"volatile_store" => {
let ptr = this.deref_operand(args[0])?;
this.copy_op(args[1], ptr.into())?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(val, place.into())?;
}
"atomic_fence_acq" => {
@ -79,25 +93,39 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ptr = this.deref_operand(args[0])?;
let place = this.deref_operand(args[0])?;
let new = this.read_scalar(args[1])?;
let old = this.read_scalar(ptr.into())?;
let old = this.read_scalar(place.into())?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(old, dest)?; // old value is returned
this.write_scalar(new, ptr.into())?;
this.write_scalar(new, place.into())?;
}
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ptr = this.deref_operand(args[0])?;
let place = this.deref_operand(args[0])?;
let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(args[2])?;
let old = this.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op()`
let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
// binary_op will bail if either of them is not a scalar
let (eq, _) = this.binary_op(mir::BinOp::Eq, old, expect_old)?;
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
this.write_immediate(res, dest)?; // old value is returned
// update ptr depending on comparison
if eq.to_bool()? {
this.write_scalar(new, ptr.into())?;
this.write_scalar(new, place.into())?;
}
}
@ -131,12 +159,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
"atomic_xsub_rel" |
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let ptr = this.deref_operand(args[0])?;
if !ptr.layout.ty.is_integral() {
let place = this.deref_operand(args[0])?;
if !place.layout.ty.is_integral() {
bug!("Atomic arithmetic operations only work on integer types");
}
let rhs = this.read_immediate(args[1])?;
let old = this.read_immediate(ptr.into())?;
let old = this.read_immediate(place.into())?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_immediate(*old, dest)?; // old value is returned
let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
"or" => (mir::BinOp::BitOr, false),
@ -154,7 +189,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
} else {
val
};
this.write_scalar(val, ptr.into())?;
this.write_scalar(val, place.into())?;
}
"breakpoint" => unimplemented!(), // halt miri
@ -335,8 +370,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
"move_val_init" => {
let ptr = this.deref_operand(args[0])?;
this.copy_op(args[1], ptr.into())?;
let place = this.deref_operand(args[0])?;
this.copy_op(args[1], place.into())?;
}
"offset" => {

View file

@ -0,0 +1,12 @@
#![feature(core_intrinsics)]
fn main() {
// Do a 4-aligned u64 atomic access. That should be UB on all platforms,
// even if u64 only has alignment 4.
let z = [0u32; 2];
let zptr = &z as *const _ as *const u64;
unsafe {
::std::intrinsics::atomic_load(zptr);
//~^ ERROR tried to access memory with alignment 4, but alignment 8 is required
}
}

View file

@ -1,19 +0,0 @@
use std::sync::atomic::{AtomicBool, Ordering::*};
static mut ATOMIC: AtomicBool = AtomicBool::new(false);
fn main() {
unsafe {
assert_eq!(*ATOMIC.get_mut(), false);
ATOMIC.store(true, SeqCst);
assert_eq!(*ATOMIC.get_mut(), true);
ATOMIC.fetch_or(false, SeqCst);
assert_eq!(*ATOMIC.get_mut(), true);
ATOMIC.fetch_and(false, SeqCst);
assert_eq!(*ATOMIC.get_mut(), false);
ATOMIC.fetch_nand(true, SeqCst);
assert_eq!(*ATOMIC.get_mut(), true);
ATOMIC.fetch_xor(true, SeqCst);
assert_eq!(*ATOMIC.get_mut(), false);
}
}

View file

@ -1,8 +1,32 @@
use std::sync::atomic::{AtomicIsize, Ordering::*};
static ATOMIC: AtomicIsize = AtomicIsize::new(0);
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicU64, Ordering::*};
fn main() {
atomic_bool();
atomic_isize();
atomic_u64();
}
fn atomic_bool() {
static mut ATOMIC: AtomicBool = AtomicBool::new(false);
unsafe {
assert_eq!(*ATOMIC.get_mut(), false);
ATOMIC.store(true, SeqCst);
assert_eq!(*ATOMIC.get_mut(), true);
ATOMIC.fetch_or(false, SeqCst);
assert_eq!(*ATOMIC.get_mut(), true);
ATOMIC.fetch_and(false, SeqCst);
assert_eq!(*ATOMIC.get_mut(), false);
ATOMIC.fetch_nand(true, SeqCst);
assert_eq!(*ATOMIC.get_mut(), true);
ATOMIC.fetch_xor(true, SeqCst);
assert_eq!(*ATOMIC.get_mut(), false);
}
}
fn atomic_isize() {
static ATOMIC: AtomicIsize = AtomicIsize::new(0);
// Make sure trans can emit all the intrinsics correctly
assert_eq!(ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed), Ok(0));
assert_eq!(ATOMIC.compare_exchange(0, 2, Acquire, Relaxed), Err(1));
@ -27,3 +51,12 @@ fn main() {
ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok();
ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok();
}
fn atomic_u64() {
static ATOMIC: AtomicU64 = AtomicU64::new(0);
ATOMIC.store(1, SeqCst);
assert_eq!(ATOMIC.compare_exchange(0, 0x100, AcqRel, Acquire), Err(1));
assert_eq!(ATOMIC.compare_exchange_weak(1, 0x100, AcqRel, Acquire), Ok(1));
assert_eq!(ATOMIC.load(Relaxed), 0x100);
}