Merge pull request #553 from solson/rustup

Stacked Borrow: Barriers
This commit is contained in:
Ralf Jung 2018-11-30 08:00:02 +01:00 committed by GitHub
commit 8d2bc97d7c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 369 additions and 238 deletions

View file

@ -1 +1 @@
nightly-2018-11-26
nightly-2018-11-30

View file

@ -289,7 +289,7 @@ impl<'tcx> Evaluator<'tcx> {
env_vars: HashMap::default(),
tls: TlsData::default(),
validate,
stacked_borrows: stacked_borrows::State::new(),
stacked_borrows: stacked_borrows::State::default(),
}
}
}
@ -301,6 +301,8 @@ type MiriEvalContext<'a, 'mir, 'tcx> = EvalContext<'a, 'mir, 'tcx, Evaluator<'tc
impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
type MemoryKinds = MiriMemoryKind;
type FrameExtra = stacked_borrows::CallId;
type MemoryExtra = stacked_borrows::MemoryState;
type AllocExtra = stacked_borrows::Stacks;
type PointerTag = Borrow;
@ -317,7 +319,6 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
// We walk up the stack a few frames to also cover their callees.
const WHITELIST: &[(&str, &str)] = &[
// Uses mem::uninitialized
("std::ptr::read", ""),
("std::sys::windows::mutex::Mutex::", ""),
];
for frame in ecx.stack().iter()
@ -405,8 +406,9 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
}
fn find_foreign_static(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
def_id: DefId,
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
memory_extra: &Self::MemoryExtra,
) -> EvalResult<'tcx, Cow<'tcx, Allocation<Borrow, Self::AllocExtra>>> {
let attrs = tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
@ -417,8 +419,10 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
let alloc = match &link_name[..] {
"__cxa_thread_atexit_impl" => {
// This should be all-zero, pointer-sized
let data = vec![0; tcx.data_layout.pointer_size.bytes() as usize];
Allocation::from_bytes(&data[..], tcx.data_layout.pointer_align.abi)
let size = tcx.data_layout.pointer_size;
let data = vec![0; size.bytes() as usize];
let extra = AllocationExtra::memory_allocated(size, memory_extra);
Allocation::from_bytes(&data[..], tcx.data_layout.pointer_align.abi, extra)
}
_ => return err!(Unimplemented(
format!("can't access foreign static: {}", link_name),
@ -434,9 +438,14 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
Ok(())
}
fn adjust_static_allocation(
alloc: &'_ Allocation
) -> Cow<'_, Allocation<Borrow, Self::AllocExtra>> {
fn adjust_static_allocation<'b>(
alloc: &'b Allocation,
memory_extra: &Self::MemoryExtra,
) -> Cow<'b, Allocation<Borrow, Self::AllocExtra>> {
let extra = AllocationExtra::memory_allocated(
Size::from_bytes(alloc.bytes.len() as u64),
memory_extra,
);
let alloc: Allocation<Borrow, Self::AllocExtra> = Allocation {
bytes: alloc.bytes.clone(),
relocations: Relocations::from_presorted(
@ -447,7 +456,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
undef_mask: alloc.undef_mask.clone(),
align: alloc.align,
mutability: alloc.mutability,
extra: Self::AllocExtra::default(),
extra,
};
Cow::Owned(alloc)
}
@ -529,4 +538,19 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
ecx.retag(fn_entry, place)
}
}
#[inline(always)]
fn stack_push(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
) -> EvalResult<'tcx, stacked_borrows::CallId> {
Ok(ecx.memory().extra.borrow_mut().new_call())
}
#[inline(always)]
fn stack_pop(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
extra: stacked_borrows::CallId,
) -> EvalResult<'tcx> {
Ok(ecx.memory().extra.borrow_mut().end_call(extra))
}
}

View file

@ -16,13 +16,6 @@ pub struct RangeMap<T> {
map: BTreeMap<Range, T>,
}
impl<T> Default for RangeMap<T> {
#[inline(always)]
fn default() -> Self {
RangeMap::new()
}
}
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
// by the second field.
// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
@ -73,9 +66,15 @@ impl Range {
}
impl<T> RangeMap<T> {
/// Create a new RangeMap for the given size, and with the given initial value used for
/// the entire range.
#[inline(always)]
pub fn new() -> RangeMap<T> {
RangeMap { map: BTreeMap::new() }
pub fn new(size: Size, init: T) -> RangeMap<T> {
let mut map = RangeMap { map: BTreeMap::new() };
if size.bytes() > 0 {
map.map.insert(Range { start: 0, end: size.bytes() }, init);
}
map
}
fn iter_with_range<'a>(
@ -95,6 +94,9 @@ impl<T> RangeMap<T> {
)
}
/// Provide read-only iteration over everything in the given range. This does
/// *not* split items if they overlap with the edges. Do not use this to mutate
/// through interior mutability.
pub fn iter<'a>(&'a self, offset: Size, len: Size) -> impl Iterator<Item = &'a T> + 'a {
self.iter_with_range(offset.bytes(), len.bytes()).map(|(_, data)| data)
}
@ -140,8 +142,7 @@ impl<T> RangeMap<T> {
/// Provide mutable iteration over everything in the given range. As a side-effect,
/// this will split entries in the map that are only partially hit by the given range,
/// to make sure that when they are mutated, the effect is constrained to the given range.
/// If there are gaps, leave them be.
pub fn iter_mut_with_gaps<'a>(
pub fn iter_mut<'a>(
&'a mut self,
offset: Size,
len: Size,
@ -174,64 +175,6 @@ impl<T> RangeMap<T> {
},
)
}
/// Provide a mutable iterator over everything in the given range, with the same side-effects as
/// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default
/// before yielding them in the iterator.
/// This is also how you insert.
pub fn iter_mut<'a>(&'a mut self, offset: Size, len: Size) -> impl Iterator<Item = &'a mut T> + 'a
where
T: Clone + Default,
{
if len.bytes() > 0 {
let offset = offset.bytes();
let len = len.bytes();
// Do a first iteration to collect the gaps
let mut gaps = Vec::new();
let mut last_end = offset;
for (range, _) in self.iter_with_range(offset, len) {
if last_end < range.start {
gaps.push(Range {
start: last_end,
end: range.start,
});
}
last_end = range.end;
}
if last_end < offset + len {
gaps.push(Range {
start: last_end,
end: offset + len,
});
}
// Add default for all gaps
for gap in gaps {
let old = self.map.insert(gap, Default::default());
assert!(old.is_none());
}
}
// Now provide mutable iteration
self.iter_mut_with_gaps(offset, len)
}
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
let mut remove = Vec::new();
for (range, data) in &self.map {
if !f(data) {
remove.push(*range);
}
}
for range in remove {
self.map.remove(&range);
}
}
}
#[cfg(test)]
@ -239,14 +182,13 @@ mod tests {
use super::*;
/// Query the map at every offset in the range and collect the results.
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64, default: Option<T>) -> Vec<T> {
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
(offset..offset + len)
.into_iter()
.map(|i| map
.iter(Size::from_bytes(i), Size::from_bytes(1))
.next()
.map(|&t| t)
.or(default)
.unwrap()
)
.collect()
@ -254,13 +196,13 @@ mod tests {
#[test]
fn basic_insert() {
let mut map = RangeMap::<i32>::new();
let mut map = RangeMap::<i32>::new(Size::from_bytes(20), -1);
// Insert
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(1)) {
*x = 42;
}
// Check
assert_eq!(to_vec(&map, 10, 1, None), vec![42]);
assert_eq!(to_vec(&map, 10, 1), vec![42]);
// Insert with size 0
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(0)) {
@ -269,12 +211,12 @@ mod tests {
for x in map.iter_mut(Size::from_bytes(11), Size::from_bytes(0)) {
*x = 19;
}
assert_eq!(to_vec(&map, 10, 2, Some(-1)), vec![42, -1]);
assert_eq!(to_vec(&map, 10, 2), vec![42, -1]);
}
#[test]
fn gaps() {
let mut map = RangeMap::<i32>::new();
let mut map = RangeMap::<i32>::new(Size::from_bytes(20), -1);
for x in map.iter_mut(Size::from_bytes(11), Size::from_bytes(1)) {
*x = 42;
}
@ -282,11 +224,10 @@ mod tests {
*x = 43;
}
assert_eq!(
to_vec(&map, 10, 10, Some(-1)),
to_vec(&map, 10, 10),
vec![-1, 42, -1, -1, -1, 43, -1, -1, -1, -1]
);
// Now request a range that needs three gaps filled
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(10)) {
if *x < 42 {
*x = 23;
@ -294,9 +235,18 @@ mod tests {
}
assert_eq!(
to_vec(&map, 10, 10, None),
to_vec(&map, 10, 10),
vec![23, 42, 23, 23, 23, 43, 23, 23, 23, 23]
);
assert_eq!(to_vec(&map, 13, 5, None), vec![23, 23, 43, 23, 23]);
assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 43, 23, 23]);
// Now request a range that goes beyond the initial size
for x in map.iter_mut(Size::from_bytes(15), Size::from_bytes(10)) {
*x = 19;
}
assert_eq!(map.iter(Size::from_bytes(19), Size::from_bytes(1))
.map(|&t| t).collect::<Vec<_>>(), vec![19]);
assert_eq!(map.iter(Size::from_bytes(20), Size::from_bytes(1))
.map(|&t| t).collect::<Vec<_>>(), vec![]);
}
}

View file

@ -1,4 +1,6 @@
use std::cell::RefCell;
use std::collections::HashSet;
use std::rc::Rc;
use rustc::ty::{self, layout::Size};
use rustc::hir::{Mutability, MutMutable, MutImmutable};
@ -10,6 +12,7 @@ use crate::{
};
pub type Timestamp = u64;
pub type CallId = u64;
/// Information about which kind of borrow was used to create the reference this is tagged
/// with.
@ -59,18 +62,7 @@ pub enum BorStackItem {
/// when there is no `UnsafeCell`.
Shr,
/// A barrier, tracking the function it belongs to by its index on the call stack
#[allow(dead_code)] // for future use
FnBarrier(usize)
}
impl BorStackItem {
#[inline(always)]
pub fn is_fn_barrier(self) -> bool {
match self {
BorStackItem::FnBarrier(_) => true,
_ => false,
}
}
FnBarrier(CallId)
}
/// Extra per-location state
@ -80,15 +72,6 @@ pub struct Stack {
frozen_since: Option<Timestamp>, // virtual frozen "item" on top of the stack
}
impl Default for Stack {
fn default() -> Self {
Stack {
borrows: vec![BorStackItem::Shr],
frozen_since: None,
}
}
}
impl Stack {
#[inline(always)]
pub fn is_frozen(&self) -> bool {
@ -107,17 +90,62 @@ pub enum RefKind {
Raw,
}
/// What kind of access is being performed?
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum AccessKind {
Read,
Write,
Dealloc,
}
/// Extra global state in the memory, available to the memory access hooks
#[derive(Debug)]
pub struct BarrierTracking {
next_id: CallId,
active_calls: HashSet<CallId>,
}
pub type MemoryState = Rc<RefCell<BarrierTracking>>;
impl Default for BarrierTracking {
fn default() -> Self {
BarrierTracking {
next_id: 0,
active_calls: HashSet::default(),
}
}
}
impl BarrierTracking {
pub fn new_call(&mut self) -> CallId {
let id = self.next_id;
trace!("new_call: Assigning ID {}", id);
self.active_calls.insert(id);
self.next_id += 1;
id
}
pub fn end_call(&mut self, id: CallId) {
assert!(self.active_calls.remove(&id));
}
fn is_active(&self, id: CallId) -> bool {
self.active_calls.contains(&id)
}
}
/// Extra global machine state
#[derive(Clone, Debug)]
pub struct State {
clock: Timestamp
}
impl State {
pub fn new() -> State {
impl Default for State {
fn default() -> Self {
State { clock: 0 }
}
}
impl State {
fn increment_clock(&mut self) -> Timestamp {
let val = self.clock;
self.clock = val + 1;
@ -126,10 +154,11 @@ impl State {
}
/// Extra per-allocation state
#[derive(Clone, Debug, Default)]
#[derive(Clone, Debug)]
pub struct Stacks {
// Even reading memory can have effects on the stack, so we need a `RefCell` here.
stacks: RefCell<RangeMap<Stack>>,
barrier_tracking: MemoryState,
}
/// Core per-location operations: deref, access, create.
@ -150,7 +179,11 @@ impl<'tcx> Stack {
/// going to read or write.
/// Returns the index of the item we matched, `None` if it was the frozen one.
/// `kind` indicates which kind of reference is being dereferenced.
fn deref(&self, bor: Borrow, kind: RefKind) -> Result<Option<usize>, String> {
fn deref(
&self,
bor: Borrow,
kind: RefKind,
) -> Result<Option<usize>, String> {
// Exclude unique ref with frozen tag.
if let (RefKind::Unique, Borrow::Shr(Some(_))) = (kind, bor) {
return Err(format!("Encountered mutable reference with frozen tag ({:?})", bor));
@ -172,7 +205,6 @@ impl<'tcx> Stack {
// If we got here, we have to look for our item in the stack.
for (idx, &itm) in self.borrows.iter().enumerate().rev() {
match (itm, bor) {
(BorStackItem::FnBarrier(_), _) => break,
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
// Found matching unique item. This satisfies U3.
return Ok(Some(idx))
@ -181,25 +213,29 @@ impl<'tcx> Stack {
// Found matching shared/raw item.
return Ok(Some(idx))
}
// Go on looking.
// Go on looking. We ignore barriers! When an `&mut` and an `&` alias,
// dereferencing the `&` is still possible (to reborrow), but doing
// an access is not.
_ => {}
}
}
// If we got here, we did not find our item. We have to error to satisfy U3.
Err(format!(
"Borrow being dereferenced ({:?}) does not exist on the stack, or is guarded by a barrier",
bor
))
Err(format!("Borrow being dereferenced ({:?}) does not exist on the stack", bor))
}
/// Perform an actual memory access using `bor`. We do not know any types here
/// or whether things should be frozen, but we *do* know if this is reading
/// or writing.
fn access(&mut self, bor: Borrow, is_write: bool) -> EvalResult<'tcx> {
fn access(
&mut self,
bor: Borrow,
kind: AccessKind,
barrier_tracking: &BarrierTracking,
) -> EvalResult<'tcx> {
// Check if we can match the frozen "item".
// Not possible on writes!
if self.is_frozen() {
if !is_write {
if kind == AccessKind::Read {
// When we are frozen, we just accept all reads. No harm in this.
// The deref already checked that `Uniq` items are in the stack, and that
// the location is frozen if it should be.
@ -212,32 +248,52 @@ impl<'tcx> Stack {
// Pop the stack until we have something matching.
while let Some(&itm) = self.borrows.last() {
match (itm, bor) {
(BorStackItem::FnBarrier(_), _) => break,
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
// Found matching unique item.
return Ok(())
(BorStackItem::FnBarrier(call), _) if barrier_tracking.is_active(call) => {
return err!(MachineError(format!(
"Stopping looking for borrow being accessed ({:?}) because of barrier ({})",
bor, call
)))
}
(BorStackItem::Shr, _) if !is_write => {
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
// Found matching unique item. Continue after the match.
}
(BorStackItem::Shr, _) if kind == AccessKind::Read => {
// When reading, everything can use a shared item!
// We do not want to do this when writing: Writing to an `&mut`
// should reaffirm its exclusivity (i.e., make sure it is
// on top of the stack).
return Ok(())
// on top of the stack). Continue after the match.
}
(BorStackItem::Shr, Borrow::Shr(_)) => {
// Found matching shared item.
return Ok(())
// Found matching shared item. Continue after the match.
}
_ => {
// Pop this. This ensures U2.
// Pop this, go on. This ensures U2.
let itm = self.borrows.pop().unwrap();
trace!("access: Popping {:?}", itm);
continue
}
}
// If we got here, we found a matching item. Congratulations!
// However, we are not done yet: If this access is deallocating, we must make sure
// there are no active barriers remaining on the stack.
if kind == AccessKind::Dealloc {
for &itm in self.borrows.iter().rev() {
match itm {
BorStackItem::FnBarrier(call) if barrier_tracking.is_active(call) => {
return err!(MachineError(format!(
"Deallocating with active barrier ({})", call
)))
}
_ => {},
}
}
}
// NOW we are done.
return Ok(())
}
// If we got here, we did not find our item.
err!(MachineError(format!(
"Borrow being accessed ({:?}) does not exist on the stack, or is guarded by a barrier",
"Borrow being accessed ({:?}) does not exist on the stack",
bor
)))
}
@ -247,18 +303,21 @@ impl<'tcx> Stack {
/// is met: We cannot push `Uniq` onto frozen stacks.
/// `kind` indicates which kind of reference is being created.
fn create(&mut self, bor: Borrow, kind: RefKind) {
// First, push the item. We do this even if we will later freeze, because we
// will allow mutation of shared data at the expense of unfreezing.
if self.frozen_since.is_some() {
// A frozen location, this should be impossible!
bug!("We should never try pushing to a frozen stack");
// A frozen location? Possible if we create a barrier, then push again.
assert!(bor.is_shared(), "We should never try creating a unique borrow for a frozen stack");
trace!("create: Not doing anything on frozen location");
return;
}
// First, push.
// First, push. We do this even if we will later freeze, because we
// will allow mutation of shared data at the expense of unfreezing.
let itm = match bor {
Borrow::Uniq(t) => BorStackItem::Uniq(t),
Borrow::Shr(_) => BorStackItem::Shr,
};
if *self.borrows.last().unwrap() == itm {
// This is just an optimization, no functional change: Avoid stacking
// multiple `Shr` on top of each other.
assert!(bor.is_shared());
trace!("create: Sharing a shared location is a NOP");
} else {
@ -276,6 +335,21 @@ impl<'tcx> Stack {
self.frozen_since = Some(bor_t);
}
}
/// Add a barrier
fn barrier(&mut self, call: CallId) {
let itm = BorStackItem::FnBarrier(call);
if *self.borrows.last().unwrap() == itm {
// This is just an optimization, no functional change: Avoid stacking
// multiple identical barriers on top of each other.
// This can happen when a function receives several shared references
// that overlap.
trace!("barrier: Avoiding redundant extra barrier");
} else {
trace!("barrier: Pushing barrier for call {}", call);
self.borrows.push(itm);
}
}
}
/// Higher-level per-location operations: deref, access, reborrow.
@ -289,9 +363,8 @@ impl<'tcx> Stacks {
) -> EvalResult<'tcx> {
trace!("deref for tag {:?} as {:?}: {:?}, size {}",
ptr.tag, kind, ptr, size.bytes());
let mut stacks = self.stacks.borrow_mut();
// We need `iter_mut` because `iter` would skip gaps!
for stack in stacks.iter_mut(ptr.offset, size) {
let stacks = self.stacks.borrow();
for stack in stacks.iter(ptr.offset, size) {
stack.deref(ptr.tag, kind).map_err(EvalErrorKind::MachineError)?;
}
Ok(())
@ -302,17 +375,16 @@ impl<'tcx> Stacks {
&self,
ptr: Pointer<Borrow>,
size: Size,
is_write: bool,
kind: AccessKind,
) -> EvalResult<'tcx> {
trace!("{} access of tag {:?}: {:?}, size {}",
if is_write { "read" } else { "write" },
ptr.tag, ptr, size.bytes());
trace!("{:?} access of tag {:?}: {:?}, size {}", kind, ptr.tag, ptr, size.bytes());
// Even reads can have a side-effect, by invalidating other references.
// This is fundamentally necessary since `&mut` asserts that there
// are no accesses through other references, not even reads.
let barrier_tracking = self.barrier_tracking.borrow();
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
stack.access(ptr.tag, is_write)?;
stack.access(ptr.tag, kind, &*barrier_tracking)?;
}
Ok(())
}
@ -323,12 +395,27 @@ impl<'tcx> Stacks {
&self,
ptr: Pointer<Borrow>,
size: Size,
mut barrier: Option<CallId>,
new_bor: Borrow,
new_kind: RefKind,
) -> EvalResult<'tcx> {
assert_eq!(new_bor.is_unique(), new_kind == RefKind::Unique);
trace!("reborrow for tag {:?} to {:?} as {:?}: {:?}, size {}",
ptr.tag, new_bor, new_kind, ptr, size.bytes());
if new_kind == RefKind::Raw {
// No barrier for raw, including `&UnsafeCell`. They can rightfully
// alias with `&mut`.
// FIXME: This means that the `dereferencable` attribute on non-frozen shared
// references is incorrect! They are dereferencable when the function is
// called, but might become non-dereferencable during the course of execution.
// Also see [1], [2].
//
// [1]: <https://internals.rust-lang.org/t/
// is-it-possible-to-be-memory-safe-with-deallocated-self/8457/8>,
// [2]: <https://lists.llvm.org/pipermail/llvm-dev/2018-July/124555.html>
barrier = None;
}
let barrier_tracking = self.barrier_tracking.borrow();
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
// Access source `ptr`, create new ref.
@ -337,21 +424,30 @@ impl<'tcx> Stacks {
// the stack than the one we come from, just use that.
// IOW, we check if `new_bor` *already* is "derived from" `ptr.tag`.
// This also checks frozenness, if required.
let bor_redundant = match (ptr_idx, stack.deref(new_bor, new_kind)) {
// If the new borrow works with the frozen item, or else if it lives
// above the old one in the stack, our job here is done.
(_, Ok(None)) => true,
(Some(ptr_idx), Ok(Some(new_idx))) if new_idx >= ptr_idx => true,
// Otherwise we need to create a new borrow.
_ => false,
};
let bor_redundant = barrier.is_none() &&
match (ptr_idx, stack.deref(new_bor, new_kind)) {
// If the new borrow works with the frozen item, or else if it lives
// above the old one in the stack, our job here is done.
(_, Ok(None)) => true,
(Some(ptr_idx), Ok(Some(new_idx))) if new_idx >= ptr_idx => true,
// Otherwise we need to create a new borrow.
_ => false,
};
if bor_redundant {
assert!(new_bor.is_shared(), "A unique reborrow can never be redundant");
trace!("reborrow is redundant");
continue;
}
// We need to do some actual work.
stack.access(ptr.tag, new_kind == RefKind::Unique)?;
let access_kind = if new_kind == RefKind::Unique {
AccessKind::Write
} else {
AccessKind::Read
};
stack.access(ptr.tag, access_kind, &*barrier_tracking)?;
if let Some(call) = barrier {
stack.barrier(call);
}
stack.create(new_bor, new_kind);
}
Ok(())
@ -359,14 +455,26 @@ impl<'tcx> Stacks {
}
/// Hooks and glue
impl AllocationExtra<Borrow> for Stacks {
impl AllocationExtra<Borrow, MemoryState> for Stacks {
#[inline(always)]
fn memory_allocated<'tcx>(size: Size, extra: &MemoryState) -> Self {
let stack = Stack {
borrows: vec![BorStackItem::Shr],
frozen_since: None,
};
Stacks {
stacks: RefCell::new(RangeMap::new(size, stack)),
barrier_tracking: Rc::clone(extra),
}
}
#[inline(always)]
fn memory_read<'tcx>(
alloc: &Allocation<Borrow, Stacks>,
ptr: Pointer<Borrow>,
size: Size,
) -> EvalResult<'tcx> {
alloc.extra.access(ptr, size, /*is_write*/false)
alloc.extra.access(ptr, size, AccessKind::Read)
}
#[inline(always)]
@ -375,7 +483,7 @@ impl AllocationExtra<Borrow> for Stacks {
ptr: Pointer<Borrow>,
size: Size,
) -> EvalResult<'tcx> {
alloc.extra.access(ptr, size, /*is_write*/true)
alloc.extra.access(ptr, size, AccessKind::Write)
}
#[inline(always)]
@ -384,20 +492,17 @@ impl AllocationExtra<Borrow> for Stacks {
ptr: Pointer<Borrow>,
size: Size,
) -> EvalResult<'tcx> {
// This is like mutating
alloc.extra.access(ptr, size, /*is_write*/true)
// FIXME: Error out of there are any barriers?
alloc.extra.access(ptr, size, AccessKind::Dealloc)
}
}
impl<'tcx> Stacks {
/// Pushes the first item to the stacks.
pub fn first_item(
pub(crate) fn first_item(
&mut self,
itm: BorStackItem,
size: Size
) {
assert!(!itm.is_fn_barrier());
for stack in self.stacks.get_mut().iter_mut(Size::ZERO, size) {
assert!(stack.borrows.len() == 1);
assert_eq!(stack.borrows.pop().unwrap(), BorStackItem::Shr);
@ -427,6 +532,7 @@ pub trait EvalContextExt<'tcx> {
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
fn_barrier: bool,
new_bor: Borrow
) -> EvalResult<'tcx, Pointer<Borrow>>;
@ -435,6 +541,7 @@ pub trait EvalContextExt<'tcx> {
&mut self,
ptr: ImmTy<'tcx, Borrow>,
mutbl: Mutability,
fn_barrier: bool,
) -> EvalResult<'tcx, Immediate<Borrow>>;
fn retag(
@ -527,7 +634,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
place: MPlaceTy<'tcx, Borrow>,
size: Size,
) -> EvalResult<'tcx> {
self.reborrow(place, size, Borrow::default())?;
self.reborrow(place, size, /*fn_barrier*/ false, Borrow::default())?;
Ok(())
}
@ -535,10 +642,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
fn_barrier: bool,
new_bor: Borrow
) -> EvalResult<'tcx, Pointer<Borrow>> {
let ptr = place.ptr.to_ptr()?;
let new_ptr = Pointer::new_with_tag(ptr.alloc_id, ptr.offset, new_bor);
let barrier = if fn_barrier { Some(self.frame().extra) } else { None };
trace!("reborrow: Creating new reference for {:?} (pointee {}): {:?}",
ptr, place.layout.ty, new_bor);
@ -550,12 +659,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
// Reference that cares about freezing. We need a frozen-sensitive reborrow.
self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
alloc.extra.reborrow(cur_ptr, size, new_bor, kind)
alloc.extra.reborrow(cur_ptr, size, barrier, new_bor, kind)
})?;
} else {
// Just treat this as one big chunk.
let kind = if new_bor.is_unique() { RefKind::Unique } else { RefKind::Raw };
alloc.extra.reborrow(ptr, size, new_bor, kind)?;
alloc.extra.reborrow(ptr, size, barrier, new_bor, kind)?;
}
Ok(new_ptr)
}
@ -564,6 +673,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
&mut self,
val: ImmTy<'tcx, Borrow>,
mutbl: Mutability,
fn_barrier: bool,
) -> EvalResult<'tcx, Immediate<Borrow>> {
// We want a place for where the ptr *points to*, so we get one.
let place = self.ref_to_mplace(val)?;
@ -583,7 +693,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
};
// Reborrow.
let new_ptr = self.reborrow(place, size, new_bor)?;
let new_ptr = self.reborrow(place, size, fn_barrier, new_bor)?;
// Return new ptr
let new_place = MemPlace { ptr: Scalar::Ptr(new_ptr), ..*place };
@ -592,35 +702,42 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
fn retag(
&mut self,
_fn_entry: bool,
fn_entry: bool,
place: PlaceTy<'tcx, Borrow>
) -> EvalResult<'tcx> {
// TODO: Honor `fn_entry`.
// Determine mutability and whether to add a barrier.
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
// making it useless.
fn qualify(ty: ty::Ty<'_>, fn_entry: bool) -> Option<(Mutability, bool)> {
match ty.sty {
// References are simple
ty::Ref(_, _, mutbl) => Some((mutbl, fn_entry)),
// Boxes do not get a barrier: Barriers reflect that references outlive the call
// they were passed in to; that's just not the case for boxes.
ty::Adt(..) if ty.is_box() => Some((MutMutable, false)),
_ => None,
}
}
// We need a visitor to visit all references. However, that requires
// a `MemPlace`, so we have a fast path for reference types that
// avoids allocating.
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
// making it useless.
if let Some(mutbl) = match place.layout.ty.sty {
ty::Ref(_, _, mutbl) => Some(mutbl),
ty::Adt(..) if place.layout.ty.is_box() => Some(MutMutable),
_ => None, // handled with the general case below
} {
if let Some((mutbl, barrier)) = qualify(place.layout.ty, fn_entry) {
// fast path
let val = self.read_immediate(self.place_to_op(place)?)?;
let val = self.retag_reference(val, mutbl)?;
let val = self.retag_reference(val, mutbl, barrier)?;
self.write_immediate(val, place)?;
return Ok(());
}
let place = self.force_allocation(place)?;
let mut visitor = RetagVisitor { ecx: self };
let mut visitor = RetagVisitor { ecx: self, fn_entry };
visitor.visit_value(place)?;
// The actual visitor
struct RetagVisitor<'ecx, 'a, 'mir, 'tcx> {
ecx: &'ecx mut MiriEvalContext<'a, 'mir, 'tcx>,
fn_entry: bool,
}
impl<'ecx, 'a, 'mir, 'tcx>
MutValueVisitor<'a, 'mir, 'tcx, Evaluator<'tcx>>
@ -639,14 +756,11 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
{
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
// making it useless.
let mutbl = match place.layout.ty.sty {
ty::Ref(_, _, mutbl) => mutbl,
ty::Adt(..) if place.layout.ty.is_box() => MutMutable,
_ => return Ok(()), // nothing to do
};
let val = self.ecx.read_immediate(place.into())?;
let val = self.ecx.retag_reference(val, mutbl)?;
self.ecx.write_immediate(val, place.into())?;
if let Some((mutbl, barrier)) = qualify(place.layout.ty, self.fn_entry) {
let val = self.ecx.read_immediate(place.into())?;
let val = self.ecx.retag_reference(val, mutbl, barrier)?;
self.ecx.write_immediate(val, place.into())?;
}
Ok(())
}
}

View file

@ -1,12 +1,17 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
pub fn safe(x: &mut i32, y: &mut i32) {} //~ ERROR: in conflict with lock WriteLock
}
use std::mem;
pub fn safe(x: &mut i32, y: &mut i32) {} //~ ERROR barrier
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&mut *x, &mut *x) };
let mut x = 0;
let xraw: *mut i32 = unsafe { mem::transmute(&mut x) };
// We need to apply some tricky to be able to call `safe` with two mutable references
// with the same tag: We transmute both the fn ptr (to take raw ptrs) and the argument
// (to be raw, but still have the unique tag).
let safe_raw: fn(x: *mut i32, y: *mut i32) = unsafe {
mem::transmute::<fn(&mut i32, &mut i32), _>(safe)
};
safe_raw(xraw, xraw);
}

View file

@ -1,12 +1,17 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
pub fn safe(x: &i32, y: &mut i32) {} //~ ERROR: in conflict with lock ReadLock
}
use std::mem;
pub fn safe(x: &i32, y: &mut i32) {} //~ ERROR barrier
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&*x, &mut *x) };
let mut x = 0;
let xref = &mut x;
let xraw: *mut i32 = unsafe { mem::transmute_copy(&xref) };
let xshr = &*xref;
// transmute fn ptr around so that we can avoid retagging
let safe_raw: fn(x: *const i32, y: *mut i32) = unsafe {
mem::transmute::<fn(&i32, &mut i32), _>(safe)
};
safe_raw(xshr, xraw);
}

View file

@ -1,12 +1,17 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
pub fn safe(x: &mut i32, y: &i32) {} //~ ERROR: in conflict with lock WriteLock
}
use std::mem;
pub fn safe(x: &mut i32, y: &i32) {} //~ ERROR does not exist on the stack
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&mut *x, &*x) };
let mut x = 0;
let xref = &mut x;
let xraw: *mut i32 = unsafe { mem::transmute_copy(&xref) };
let xshr = &*xref;
// transmute fn ptr around so that we can avoid retagging
let safe_raw: fn(x: *mut i32, y: *const i32) = unsafe {
mem::transmute::<fn(&mut i32, &i32), _>(safe)
};
safe_raw(xraw, xshr);
}

View file

@ -1,15 +1,19 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
use std::cell::Cell;
use std::mem;
use std::cell::Cell;
// Make sure &mut UnsafeCell also has a lock to it
pub fn safe(x: &mut Cell<i32>, y: &i32) {} //~ ERROR: in conflict with lock WriteLock
}
// Make sure &mut UnsafeCell also is exclusive
pub fn safe(x: &i32, y: &mut Cell<i32>) {} //~ ERROR barrier
fn main() {
let x = &mut 0 as *mut _;
unsafe { safe::safe(&mut *(x as *mut _), &*x) };
let mut x = 0;
let xref = &mut x;
let xraw: *mut i32 = unsafe { mem::transmute_copy(&xref) };
let xshr = &*xref;
// transmute fn ptr around so that we can avoid retagging
let safe_raw: fn(x: *const i32, y: *mut Cell<i32>) = unsafe {
mem::transmute::<fn(&i32, &mut Cell<i32>), _>(safe)
};
safe_raw(xshr, xraw as *mut _);
}

View file

@ -0,0 +1,13 @@
// error-pattern: Deallocating with active barrier
fn inner(x: &mut i32, f: fn(&mut i32)) {
// `f` may mutate, but it may not deallocate!
f(x)
}
fn main() {
inner(Box::leak(Box::new(0)), |x| {
let raw = x as *mut _;
drop(unsafe { Box::from_raw(raw) });
});
}

View file

@ -1,12 +1,9 @@
fn evil(x: &u32) {
// mutating shared ref without `UnsafeCell`
let x : *mut u32 = x as *const _ as *mut _;
unsafe { *x = 42; }
}
fn main() {
let target = Box::new(42); // has an implicit raw
let ref_ = &*target;
evil(ref_); // invalidates shared ref, activates raw
let _x = *ref_; //~ ERROR is not frozen
let xref = &*target;
{
let x : *mut u32 = xref as *const _ as *mut _;
unsafe { *x = 42; } // invalidates shared ref, activates raw
}
let _x = *xref; //~ ERROR is not frozen
}

View file

@ -0,0 +1,13 @@
fn inner(x: *mut i32, _y: &mut i32) {
// If `x` and `y` alias, retagging is fine with this... but we really
// shouldn't be allowed to use `x` at all because `y` was assumed to be
// unique for the duration of this call.
let _val = unsafe { *x }; //~ ERROR barrier
}
fn main() {
let mut x = 0;
let xraw = &mut x as *mut _;
let xref = unsafe { &mut *xraw };
inner(xraw, xref);
}

View file

@ -0,0 +1,13 @@
fn inner(x: *mut i32, _y: &i32) {
// If `x` and `y` alias, retagging is fine with this... but we really
// shouldn't be allowed to write to `x` at all because `y` was assumed to be
// immutable for the duration of this call.
unsafe { *x = 0 }; //~ ERROR barrier
}
fn main() {
let mut x = 0;
let xraw = &mut x as *mut _;
let xref = unsafe { &*xraw };
inner(xraw, xref);
}

View file

@ -21,7 +21,7 @@ fn unknown_code_1(x: &i32) { unsafe {
} }
fn unknown_code_2() { unsafe {
*LEAK = 7; //~ ERROR does not exist on the stack
*LEAK = 7; //~ ERROR barrier
} }
fn main() {

View file

@ -1,12 +0,0 @@
#![allow(unused_variables)]
// error-pattern: encountered undefined address in pointer
use std::mem;
fn make_raw() -> *const f32 {
unsafe { mem::uninitialized() }
}
fn main() {
let _x = make_raw();
}