Auto merge of #820 - RalfJung:intptrcast, r=RalfJung

Make most tests work with Intptrcast

The one that still fails (amusingly, that's ptr_int_casts) requires some help from the librustc_mir side.
This commit is contained in:
bors 2019-07-03 17:13:09 +00:00
commit ce5e910e60
16 changed files with 109 additions and 63 deletions

View file

@ -279,6 +279,8 @@ Several `-Z` flags are relevant for Miri:
* `-Zalways-encode-mir` makes rustc dump MIR even for completely monomorphic
functions. This is needed so that Miri can execute such functions, so Miri
sets this flag per default.
* `-Zmir-emit-retag` controls whether `Retag` statements are emitted. Miri
enables this per default because it is needed for validation.
Moreover, Miri recognizes some environment variables:

View file

@ -32,11 +32,11 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
let mut ecx = InterpretCx::new(
tcx.at(syntax::source_map::DUMMY_SP),
ty::ParamEnv::reveal_all(),
Evaluator::new(config.validate),
Evaluator::new(),
);
// FIXME: InterpretCx::new should take an initial MemoryExtra
ecx.memory_mut().extra = MemoryExtra::with_rng(config.seed.map(StdRng::seed_from_u64));
ecx.memory_mut().extra = MemoryExtra::new(config.seed.map(StdRng::seed_from_u64), config.validate);
let main_instance = ty::Instance::mono(ecx.tcx.tcx, main_id);
let main_mir = ecx.load_mir(main_instance.def)?;

View file

@ -58,6 +58,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size)
);
assert!(size.bytes() > 0);
// Store how far we proceeded into the place so far. Everything to the left of
// this offset has already been handled, in the sense that the frozen parts
// have had `action` called on them.

View file

@ -4,7 +4,8 @@ use std::cmp::max;
use rand::Rng;
use rustc_mir::interpret::{AllocId, Pointer, InterpResult, Memory, AllocCheck};
use rustc::ty::layout::HasDataLayout;
use rustc_mir::interpret::{AllocId, Pointer, InterpResult, Memory, AllocCheck, PointerArithmetic};
use rustc_target::abi::Size;
use crate::{Evaluator, Tag, STACK_ADDR};
@ -75,7 +76,9 @@ impl<'mir, 'tcx> GlobalState {
let mut global_state = memory.extra.intptrcast.borrow_mut();
let global_state = &mut *global_state;
let (size, align) = memory.get_size_and_align(ptr.alloc_id, AllocCheck::Live)?;
// There is nothing wrong with a raw pointer being cast to an integer only after
// it became dangling. Hence `MaybeDead`.
let (size, align) = memory.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)?;
let base_addr = match global_state.base_addr.entry(ptr.alloc_id) {
Entry::Occupied(entry) => *entry.get(),
@ -107,7 +110,9 @@ impl<'mir, 'tcx> GlobalState {
};
debug_assert_eq!(base_addr % align.bytes(), 0); // sanity check
Ok(base_addr + ptr.offset.bytes())
// Add offset with the right kind of pointer-overflowing arithmetic.
let dl = memory.data_layout();
Ok(dl.overflowing_offset(base_addr, ptr.offset.bytes()).0)
}
/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple

View file

@ -41,7 +41,8 @@ impl Into<MemoryKind<MiriMemoryKind>> for MiriMemoryKind {
/// Extra per-allocation data
#[derive(Debug, Clone)]
pub struct AllocExtra {
pub stacked_borrows: stacked_borrows::AllocExtra,
/// Stacked Borrows state is only added if validation is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
}
/// Extra global memory data
@ -49,17 +50,22 @@ pub struct AllocExtra {
pub struct MemoryExtra {
pub stacked_borrows: stacked_borrows::MemoryExtra,
pub intptrcast: intptrcast::MemoryExtra,
/// The random number generator to use if Miri is running in non-deterministic mode and to
/// enable intptrcast
pub(crate) rng: Option<RefCell<StdRng>>
pub(crate) rng: Option<RefCell<StdRng>>,
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
}
impl MemoryExtra {
pub fn with_rng(rng: Option<StdRng>) -> Self {
pub fn new(rng: Option<StdRng>, validate: bool) -> Self {
MemoryExtra {
stacked_borrows: Default::default(),
intptrcast: Default::default(),
rng: rng.map(RefCell::new),
validate,
}
}
}
@ -82,13 +88,10 @@ pub struct Evaluator<'tcx> {
/// TLS state.
pub(crate) tls: TlsData<'tcx>,
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
}
impl<'tcx> Evaluator<'tcx> {
pub(crate) fn new(validate: bool) -> Self {
pub(crate) fn new() -> Self {
Evaluator {
env_vars: HashMap::default(),
argc: None,
@ -96,7 +99,6 @@ impl<'tcx> Evaluator<'tcx> {
cmd_line: None,
last_error: 0,
tls: TlsData::default(),
validate,
}
}
}
@ -135,7 +137,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
#[inline(always)]
fn enforce_validity(ecx: &InterpretCx<'mir, 'tcx, Self>) -> bool {
ecx.machine.validate
ecx.memory().extra.validate
}
/// Returns `Ok()` when the function was handled; fail otherwise.
@ -251,12 +253,17 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag) {
let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
let alloc = alloc.into_owned();
let (stacks, base_tag) = Stacks::new_allocation(
id,
Size::from_bytes(alloc.bytes.len() as u64),
Rc::clone(&memory.extra.stacked_borrows),
kind,
);
let (stacks, base_tag) = if !memory.extra.validate {
(None, Tag::Untagged)
} else {
let (stacks, base_tag) = Stacks::new_allocation(
id,
Size::from_bytes(alloc.bytes.len() as u64),
Rc::clone(&memory.extra.stacked_borrows),
kind,
);
(Some(stacks), base_tag)
};
if kind != MiriMemoryKind::Static.into() {
assert!(alloc.relocations.is_empty(), "Only statics can come initialized with inner pointers");
// Now we can rely on the inner pointers being static, too.
@ -268,7 +275,14 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
alloc.relocations.iter()
// The allocations in the relocations (pointers stored *inside* this allocation)
// all get the base pointer tag.
.map(|&(offset, ((), alloc))| (offset, (memory_extra.static_base_ptr(alloc), alloc)))
.map(|&(offset, ((), alloc))| {
let tag = if !memory.extra.validate {
Tag::Untagged
} else {
memory_extra.static_base_ptr(alloc)
};
(offset, (tag, alloc))
})
.collect()
),
undef_mask: alloc.undef_mask,
@ -286,7 +300,11 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
id: AllocId,
memory: &Memory<'mir, 'tcx, Self>,
) -> Self::PointerTag {
memory.extra.stacked_borrows.borrow_mut().static_base_ptr(id)
if !memory.extra.validate {
Tag::Untagged
} else {
memory.extra.stacked_borrows.borrow_mut().static_base_ptr(id)
}
}
#[inline(always)]
@ -295,12 +313,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
kind: mir::RetagKind,
place: PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag || !Self::enforce_validity(ecx) {
// No tracking, or no retagging. The latter is possible because a dependency of ours
// might be called with different flags than we are, so there are `Retag`
// statements but we do not want to execute them.
// Also, honor the whitelist in `enforce_validity` because otherwise we might retag
// uninitialized data.
if !Self::enforce_validity(ecx) {
// No tracking.
Ok(())
} else {
ecx.retag(kind, place)
@ -354,7 +368,11 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.stacked_borrows.memory_read(ptr, size)
if let Some(ref stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
@ -363,7 +381,11 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.stacked_borrows.memory_written(ptr, size)
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
@ -372,7 +394,11 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.stacked_borrows.memory_deallocated(ptr, size)
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {
Ok(())
}
}
}

View file

@ -56,10 +56,12 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
// If intptrcast is enabled and the operation is not an offset
// we can force the cast from pointers to integer addresses and
// then dispatch to rustc binary operation method
if self.memory().extra.rng.is_some() && bin_op != Offset {
// If intptrcast is enabled, treat everything of integer *type* at integer *value*.
if self.memory().extra.rng.is_some() && left.layout.ty.is_integral() {
// This is actually an integer operation, so dispatch back to the core engine.
// TODO: Once intptrcast is the default, librustc_mir should never even call us
// for integer types.
assert!(right.layout.ty.is_integral());
let l_bits = self.force_bits(left.imm.to_scalar()?, left.layout.size)?;
let r_bits = self.force_bits(right.imm.to_scalar()?, right.layout.size)?;
@ -186,6 +188,13 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
right: Scalar<Tag>,
) -> InterpResult<'tcx, bool> {
let size = self.pointer_size();
if self.memory().extra.rng.is_some() {
// Just compare the integers.
// TODO: Do we really want to *always* do that, even when comparing two live in-bounds pointers?
let left = self.force_bits(left, size)?;
let right = self.force_bits(right, size)?;
return Ok(left == right);
}
Ok(match (left, right) {
(Scalar::Raw { .. }, Scalar::Raw { .. }) =>
left.to_bits(size)? == right.to_bits(size)?,
@ -388,21 +397,24 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
.checked_mul(pointee_size)
.ok_or_else(|| InterpError::Overflow(mir::BinOp::Mul))?;
// Now let's see what kind of pointer this is.
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
self.pointer_inbounds(ptr)?;
let ptr = ptr.signed_offset(offset, self)?;
self.pointer_inbounds(ptr)?;
Ok(Scalar::Ptr(ptr))
} else {
// An integer pointer. They can only be offset by 0, and we pretend there
// is a little zero-sized allocation here.
if offset == 0 {
Ok(ptr)
} else {
err!(InvalidPointerMath)
let ptr = if offset == 0 {
match ptr {
Scalar::Ptr(ptr) => ptr,
Scalar::Raw { .. } => {
// Offset 0 on an integer. We accept that, pretending there is
// a little zero-sized allocation here.
return Ok(ptr);
}
}
}
} else {
// Offset > 0. We *require* a pointer.
self.force_ptr(ptr)?
};
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
self.pointer_inbounds(ptr)?;
let ptr = ptr.signed_offset(offset, self)?;
self.pointer_inbounds(ptr)?;
Ok(Scalar::Ptr(ptr))
}
}

View file

@ -538,6 +538,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let alloc = this.memory().get(ptr.alloc_id)?;
let stacked_borrows = alloc.extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
// Update the stacks.
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
// There could be existing unique pointers reborrowed from them that should remain valid!
@ -553,14 +554,14 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// We are only ever `SharedReadOnly` inside the frozen bits.
let perm = if frozen { Permission::SharedReadOnly } else { Permission::SharedReadWrite };
let item = Item { perm, tag: new_tag, protector };
alloc.extra.stacked_borrows.for_each(cur_ptr, size, |stack, global| {
stacked_borrows.for_each(cur_ptr, size, |stack, global| {
stack.grant(cur_ptr.tag, item, global)
})
});
}
};
let item = Item { perm, tag: new_tag, protector };
alloc.extra.stacked_borrows.for_each(ptr, size, |stack, global| {
stacked_borrows.for_each(ptr, size, |stack, global| {
stack.grant(ptr.tag, item, global)
})
}

View file

@ -1,4 +1,4 @@
// error-pattern: invalid arithmetic on pointers
// error-pattern: tried to interpret some bytes as a pointer
fn main() {
// Can't offset an integer pointer by non-zero offset.

View file

@ -69,7 +69,7 @@ fn compile_fail(path: &str, target: &str, opt: bool) {
run_tests("compile-fail", path, target, flags);
}
fn miri_pass(path: &str, target: &str, opt: bool) {
fn miri_pass(path: &str, target: &str, opt: bool, noseed: bool) {
let opt_str = if opt { " with optimizations" } else { "" };
eprintln!("{}", format!(
"## Running run-pass tests in {} against miri for target {}{}",
@ -81,6 +81,9 @@ fn miri_pass(path: &str, target: &str, opt: bool) {
let mut flags = Vec::new();
if opt {
flags.push("-Zmir-opt-level=3".to_owned());
} else if !noseed {
// Run with intptrcast. Avoid test matrix explosion by doing either this or opt-level=3.
flags.push("-Zmiri-seed=".to_owned());
}
run_tests("ui", path, target, flags);
@ -104,7 +107,8 @@ fn get_target() -> String {
}
fn run_pass_miri(opt: bool) {
miri_pass("tests/run-pass", &get_target(), opt);
miri_pass("tests/run-pass", &get_target(), opt, false);
miri_pass("tests/run-pass-noseed", &get_target(), opt, true);
}
fn compile_fail_miri(opt: bool) {

View file

@ -1,6 +0,0 @@
// compile-flags: -Zmiri-seed=
fn main() {
println!("Hello {}", 13);
println!("{:0<width$}", "hello", width = 10);
}

View file

@ -1,2 +0,0 @@
Hello 13
hello00000

View file

@ -1,3 +1,6 @@
// Validation disallows this becuase the reference is never cast to a raw pointer.
// compile-flags: -Zmiri-disable-validation
fn main() {
// If we are careful, we can exploit data layout...
let raw = unsafe {