suspend lvalues, not locks. refactor memory locking.

Due to this, we are back down to 88 tests
This commit is contained in:
Ralf Jung 2017-07-19 19:45:25 -07:00 committed by Oliver Schneider
parent 0b15db0cc2
commit ec6d289c5b
No known key found for this signature in database
GPG key ID: A69F8D225B3AD7D9
6 changed files with 247 additions and 171 deletions

View file

@ -202,6 +202,7 @@ fn main() {
// for auxilary builds in unit tests
args.push("-Zalways-encode-mir".to_owned());
args.push("-Zmir-emit-validate".to_owned());
rustc_driver::run_compiler(&args, &mut MiriCompilerCalls(RustcDefaultCalls), None, None);
}

View file

@ -54,9 +54,16 @@ pub enum EvalError<'tcx> {
MemoryLockViolation {
ptr: MemoryPointer,
len: u64,
frame: usize,
access: AccessKind,
lock: LockInfo,
},
MemoryAcquireConflict {
ptr: MemoryPointer,
len: u64,
kind: AccessKind,
lock: LockInfo,
},
InvalidMemoryLockRelease {
ptr: MemoryPointer,
len: u64,
@ -114,10 +121,12 @@ impl<'tcx> Error for EvalError<'tcx> {
"invalid use of NULL pointer",
MemoryLockViolation { .. } =>
"memory access conflicts with lock",
MemoryAcquireConflict { .. } =>
"new memory lock conflicts with existing lock",
ValidationFailure(..) =>
"type validation failed",
InvalidMemoryLockRelease { .. } =>
"memory lock released that was never acquired",
"invalid attempt to release write lock",
DeallocatedLockedMemory { .. } =>
"tried to deallocate memory in conflict with a lock",
ReadPointerAsBytes =>
@ -219,12 +228,16 @@ impl<'tcx> fmt::Display for EvalError<'tcx> {
if access { "memory access" } else { "pointer computed" },
ptr.offset, ptr.alloc_id, allocation_size)
},
MemoryLockViolation { ptr, len, access, ref lock } => {
write!(f, "{:?} access at {:?}, size {}, is in conflict with lock {:?}",
access, ptr, len, lock)
MemoryLockViolation { ptr, len, frame, access, ref lock } => {
write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}",
access, frame, ptr, len, lock)
}
MemoryAcquireConflict { ptr, len, kind, ref lock } => {
write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}",
kind, ptr, len, lock)
}
InvalidMemoryLockRelease { ptr, len } => {
write!(f, "tried to release memory write lock at {:?}, size {}, but the write lock is held by someone else",
write!(f, "tried to release memory write lock at {:?}, size {}, but the write lock is held by someone else or its a read lock",
ptr, len)
}
DeallocatedLockedMemory { ptr, ref lock } => {

View file

@ -4,6 +4,7 @@ use std::fmt::Write;
use rustc::hir::def_id::DefId;
use rustc::hir::map::definitions::DefPathData;
use rustc::middle::const_val::ConstVal;
use rustc::middle::region::CodeExtent;
use rustc::mir;
use rustc::traits::Reveal;
use rustc::ty::layout::{self, Layout, Size};
@ -21,6 +22,7 @@ use memory::{Memory, MemoryPointer, TlsKey, HasMemory};
use memory::Kind as MemoryKind;
use operator;
use value::{PrimVal, PrimValKind, Value, Pointer};
use validation::ValidationQuery;
pub struct EvalContext<'a, 'tcx: 'a> {
/// The results of the type checker, from rustc.
@ -29,6 +31,9 @@ pub struct EvalContext<'a, 'tcx: 'a> {
/// The virtual memory system.
pub(crate) memory: Memory<'a, 'tcx>,
/// Lvalues that were suspended by the validation subsystem, and will be recovered later
pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
/// Precomputed statics, constants and promoteds.
pub(crate) globals: HashMap<GlobalId<'tcx>, Global<'tcx>>,
@ -112,6 +117,12 @@ pub enum StackPopCleanup {
None,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct DynamicLifetime {
pub frame: usize,
pub region: Option<CodeExtent>, // "None" indicates "until the function ends"
}
#[derive(Copy, Clone, Debug)]
pub struct ResourceLimits {
pub memory_size: u64,
@ -134,6 +145,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
EvalContext {
tcx,
memory: Memory::new(&tcx.data_layout, limits.memory_size),
suspended: HashMap::new(),
globals: HashMap::new(),
stack: Vec::new(),
stack_limit: limits.stack_limit,

View file

@ -9,7 +9,7 @@ use rustc::middle::region::CodeExtent;
use error::{EvalError, EvalResult};
use value::{PrimVal, Pointer};
use eval_context::EvalContext;
use eval_context::{EvalContext, DynamicLifetime};
////////////////////////////////////////////////////////////////////////////////
// Locks
@ -24,7 +24,7 @@ mod range {
// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
// This kind of search breaks, if `end < start`, so don't do that!
#[derive(Eq, PartialEq, Ord, PartialOrd, Debug)]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct MemoryRange {
start: u64,
end: u64,
@ -39,14 +39,6 @@ mod range {
}
}
pub fn offset(&self) -> u64 {
self.start
}
pub fn len(&self) -> u64 {
self.end - self.start
}
pub fn range(offset: u64, len: u64) -> ops::Range<MemoryRange> {
assert!(len > 0);
// We select all elements that are within
@ -83,33 +75,21 @@ pub enum AccessKind {
Write,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
struct DynamicLifetime {
pub frame: usize,
pub region: Option<CodeExtent>, // "None" indicates "until the function ends"
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum LockStatus {
Held,
RecoverAfter(CodeExtent), // the frame is given by the surrounding LockInfo's lifetime.
}
/// Information about a lock that is or will be held.
/// Information about a lock that is currently held.
#[derive(Clone, Debug)]
pub struct LockInfo {
kind: AccessKind,
lifetime: DynamicLifetime,
status: LockStatus,
pub enum LockInfo {
WriteLock(DynamicLifetime),
ReadLock(Vec<DynamicLifetime>), // This should never be empty -- that would be a read lock held and nobody there to release it...
}
use self::LockInfo::*;
impl LockInfo {
fn access_permitted(&self, frame: usize, access: AccessKind) -> bool {
fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
use self::AccessKind::*;
match (self.kind, access) {
(Read, Read) => true, // Read access to read-locked region is okay, no matter who's holding the read lock.
(Write, _) if self.lifetime.frame == frame => true, // All access is okay when we hold the write lock.
_ => false, // Somebody else holding the write lock is not okay
match (self, access) {
(&ReadLock(_), Read) => true, // Read access to read-locked region is okay, no matter who's holding the read lock.
(&WriteLock(ref lft), _) if Some(lft.frame) == frame => true, // All access is okay when we hold the write lock.
_ => false, // Nothing else is okay.
}
}
}
@ -146,28 +126,27 @@ pub struct Allocation {
/// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate`
pub kind: Kind,
/// Memory regions that are locked by some function
locks: BTreeMap<MemoryRange, Vec<LockInfo>>,
locks: BTreeMap<MemoryRange, LockInfo>,
}
impl Allocation {
fn iter_locks<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=&'a LockInfo> + 'a {
fn iter_locks<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a LockInfo)> + 'a {
self.locks.range(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
.flat_map(|(_, locks)| locks.iter())
}
fn iter_lock_vecs_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a mut Vec<LockInfo>)> + 'a {
fn iter_locks_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a mut LockInfo)> + 'a {
self.locks.range_mut(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
}
fn check_locks<'tcx>(&self, frame: usize, offset: u64, len: u64, access: AccessKind) -> Result<(), LockInfo> {
fn check_locks<'tcx>(&self, frame: Option<usize>, offset: u64, len: u64, access: AccessKind) -> Result<(), LockInfo> {
if len == 0 {
return Ok(())
}
for lock in self.iter_locks(offset, len) {
// Check if the lock is active, and is in conflict with the access.
if lock.status == LockStatus::Held && !lock.access_permitted(frame, access) {
for (_, lock) in self.iter_locks(offset, len) {
// Check if the lock is in conflict with the access.
if !lock.access_permitted(frame, access) {
return Err(lock.clone());
}
}
@ -399,7 +378,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
// However, we should check *something*. For now, we make sure that there is no conflicting write
// lock by another frame. We *have* to permit deallocation if we hold a read lock.
// TODO: Figure out the exact rules here.
alloc.check_locks(self.cur_frame, 0, alloc.bytes.len() as u64, AccessKind::Read)
alloc.check_locks(Some(self.cur_frame), 0, alloc.bytes.len() as u64, AccessKind::Read)
.map_err(|lock| EvalError::DeallocatedLockedMemory { ptr, lock })?;
if alloc.kind != kind {
@ -553,88 +532,113 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
return Ok(())
}
let alloc = self.get(ptr.alloc_id)?;
alloc.check_locks(self.cur_frame, ptr.offset, len, access)
.map_err(|lock| EvalError::MemoryLockViolation { ptr, len, access, lock })
let frame = self.cur_frame;
alloc.check_locks(Some(frame), ptr.offset, len, access)
.map_err(|lock| EvalError::MemoryLockViolation { ptr, len, frame, access, lock })
}
/// Acquire the lock for the given lifetime
pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Option<CodeExtent>, kind: AccessKind) -> EvalResult<'tcx> {
use std::collections::btree_map::Entry::*;
let frame = self.cur_frame;
assert!(len > 0);
trace!("Acquiring {:?} lock at {:?}, size {} for region {:?}", kind, ptr, len, region);
trace!("Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}", frame, kind, ptr, len, region);
self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_locks(ptr, len, kind)?; // make sure we have the access we are acquiring
let lifetime = DynamicLifetime { frame: self.cur_frame, region };
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
alloc.locks.entry(MemoryRange::new(ptr.offset, len)).or_insert_with(|| Vec::new()).push(LockInfo { lifetime, kind, status: LockStatus::Held });
// Check if this conflicts with other locks
alloc.check_locks(None, ptr.offset, len, kind)
.map_err(|lock| EvalError::MemoryAcquireConflict { ptr, len, kind, lock })?;
let lifetime = DynamicLifetime { frame, region };
match (alloc.locks.entry(MemoryRange::new(ptr.offset, len)), kind) {
(Vacant(entry), AccessKind::Read) => { entry.insert(ReadLock(vec![lifetime])); },
(Vacant(entry), AccessKind::Write) => { entry.insert(WriteLock(lifetime)); },
(Occupied(mut entry), AccessKind::Read) =>
match *entry.get_mut() {
ReadLock(ref mut lifetimes) => lifetimes.push(lifetime),
WriteLock(_) => bug!("We already checked that there is no conflicting write lock"),
},
(Occupied(_), AccessKind::Write) => bug!("We already checked that there is no conflicting lock"),
};
Ok(())
}
/// Release a write lock prematurely. If there's just read locks, do nothing.
pub(crate) fn release_write_lock_until(&mut self, ptr: MemoryPointer, len: u64, release_until: Option<CodeExtent>) -> EvalResult<'tcx> {
pub(crate) fn release_write_lock(&mut self, ptr: MemoryPointer, len: u64) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let alloc = self.get_mut_unchecked(ptr.alloc_id)?;
for (range, locks) in alloc.iter_lock_vecs_mut(ptr.offset, len) {
// Check all locks in this region; make sure there are no conflicting write locks of other frames.
// Also, if we will recover later, perform our release by changing the lock status.
for lock in locks.iter_mut() {
if lock.kind == AccessKind::Read || lock.status != LockStatus::Held { continue; }
if lock.lifetime.frame != cur_frame {
let mut remove_list : Vec<MemoryRange> = Vec::new();
for (range, lock) in alloc.iter_locks_mut(ptr.offset, len) {
match *lock {
WriteLock(ref lft) => {
// Make sure we can release this lock
if lft.frame != cur_frame {
return Err(EvalError::InvalidMemoryLockRelease { ptr, len });
}
if !range.contained_in(ptr.offset, len) {
return Err(EvalError::Unimplemented(format!("miri does not support release part of a write-locked region")));
}
// Release it later. We cannot do this now.
remove_list.push(*range);
}
ReadLock(_) => {
return Err(EvalError::InvalidMemoryLockRelease { ptr, len });
}
if !range.contained_in(ptr.offset, len) {
return Err(EvalError::Unimplemented(format!("miri does not support release part of a write-locked region")));
}
let ptr = MemoryPointer { alloc_id : ptr.alloc_id, offset: range.offset() };
trace!("Releasing write lock at {:?}, size {} until {:?}", ptr, range.len(), release_until);
if let Some(region) = release_until {
lock.status = LockStatus::RecoverAfter(region);
}
}
// If we will not recover, we did not do anything above except for some checks. Now, erase the locks from the list.
if let None = release_until {
// Delete everything that's a held write lock. We already checked above that these are ours.
// Unfortunately, this duplicates the condition from above. Is there anything we can do about this?
locks.retain(|lock| lock.kind == AccessKind::Read || lock.status != LockStatus::Held);
},
}
}
for range in remove_list {
alloc.locks.remove(&range);
}
// TODO: Test that we actually released a write lock for the entire covered region.
Ok(())
}
pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<CodeExtent>) {
trace!("Releasing locks that expire at {:?}", ending_region);
let cur_frame = self.cur_frame;
let has_ended = |lock: &LockInfo| -> bool {
if lock.lifetime.frame != cur_frame {
let has_ended = |lifetime: &DynamicLifetime| -> bool {
if lifetime.frame != cur_frame {
return false;
}
match ending_region {
None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
// when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
// end of a function. Same for a function still having recoveries.
Some(ending_region) => lock.lifetime.region == Some(ending_region),
Some(ending_region) => lifetime.region == Some(ending_region),
}
};
for alloc in self.alloc_map.values_mut() {
for (_range, locks) in alloc.locks.iter_mut() {
// Collect things for removal as we cannot remove while iterating
let mut remove_list : Vec<MemoryRange> = Vec::new();
for (range, lock) in alloc.locks.iter_mut() {
// Delete everything that ends now -- i.e., keep only all the other lifetimes.
locks.retain(|lock| !has_ended(lock));
// Activate locks that get recovered now
if let Some(ending_region) = ending_region {
for lock in locks.iter_mut() {
if lock.lifetime.frame == cur_frame && lock.status == LockStatus::RecoverAfter(ending_region) {
// FIXME: Check if this triggers a conflict between active locks
lock.status = LockStatus::Held;
match *lock {
WriteLock(ref lft) => {
if has_ended(lft) {
remove_list.push(*range);
}
}
ReadLock(ref mut lfts) => {
lfts.retain(|lft| !has_ended(lft));
if lfts.is_empty() {
remove_list.push(*range);
}
},
}
}
// Perform delayed removal
for range in remove_list {
alloc.locks.remove(&range);
}
}
// TODO: It may happen now that we leave empty vectors in the map. Is it worth getting rid of them?
}
}

View file

@ -9,12 +9,11 @@ use rustc::mir;
use rustc::traits::Reveal;
use rustc::ty;
use rustc::ty::layout::Layout;
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::subst::Substs;
use error::{EvalResult, EvalError};
use eval_context::{EvalContext, StackPopCleanup};
use lvalue::{Global, GlobalId, Lvalue};
use validation::ValidationCtx;
use value::{Value, PrimVal};
use syntax::codemap::Span;
use syntax::ast::Mutability;
@ -132,14 +131,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// Validity checks.
Validate(op, ref lvalues) => {
for operand in lvalues {
// We need to monomorphize ty *without* erasing lifetimes
let ty = operand.ty.subst(self.tcx, self.substs());
let lvalue = self.eval_lvalue(&operand.lval)?;
self.validate(lvalue, ty, ValidationCtx::new(op))?;
self.validation_op(op, operand)?;
}
}
EndRegion(ce) => {
self.memory.locks_lifetime_ended(Some(ce));
self.end_region(ce)?;
}
// Defined to do nothing. These are added by optimization passes, to avoid changing the

View file

@ -1,51 +1,93 @@
use rustc::hir::Mutability;
use rustc::mir::{self, ValidationOp};
use rustc::hir::Mutability::*;
use rustc::mir::{self, ValidationOp, ValidationOperand};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::subst::Subst;
use rustc::traits::Reveal;
use rustc::infer::TransNormalize;
use rustc::middle::region::CodeExtent;
use error::{EvalError, EvalResult};
use eval_context::{EvalContext};
use eval_context::{EvalContext, DynamicLifetime};
use memory::AccessKind;
use value::Value;
use lvalue::{Lvalue, LvalueExtra};
// Validity checks
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue<'tcx>>;
#[derive(Copy, Clone, Debug)]
pub struct ValidationCtx {
op: ValidationOp,
region: Option<CodeExtent>,
mutbl: Mutability,
enum ValidationMode {
Acquire,
/// Recover because the given region ended
Recover(CodeExtent),
Release
}
impl ValidationCtx {
pub fn new(op: ValidationOp) -> Self {
ValidationCtx {
op, region: None, mutbl: Mutability::MutMutable,
impl ValidationMode {
fn acquiring(self) -> bool {
use self::ValidationMode::*;
match self {
Acquire | Recover(_) => true,
Release => false,
}
}
}
// Validity checks
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
fn validate_variant(
&mut self,
lvalue: Lvalue<'tcx>,
ty: Ty<'tcx>,
variant: &ty::VariantDef,
subst: &ty::subst::Substs<'tcx>,
vctx: ValidationCtx,
) -> EvalResult<'tcx> {
// TODO: Take visibility/privacy into account.
for (idx, field) in variant.fields.iter().enumerate() {
let field_ty = field.ty(self.tcx, subst);
let field_lvalue = self.lvalue_field(lvalue, idx, ty, field_ty)?;
self.validate(field_lvalue, field_ty, vctx)?;
pub(crate) fn validation_op(&mut self, op: ValidationOp, operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>) -> EvalResult<'tcx> {
// We need to monomorphize ty *without* erasing lifetimes
let ty = operand.ty.subst(self.tcx, self.substs());
let lval = self.eval_lvalue(&operand.lval)?;
let query = ValidationQuery { lval, ty, re: operand.re, mutbl: operand.mutbl };
let mode = match op {
ValidationOp::Acquire => ValidationMode::Acquire,
ValidationOp::Release => ValidationMode::Release,
ValidationOp::Suspend(ce) => {
if operand.mutbl == MutImmutable {
// Nothing to do when suspending immutable things
return Ok(());
}
let lft = DynamicLifetime { frame: self.cur_frame(), region: Some(ce) };
trace!("Suspending {:?} until {:?}", query, ce);
self.suspended.entry(lft).or_insert_with(Vec::new).push(query.clone());
ValidationMode::Release
}
};
self.validate(query, mode)?;
Ok(())
}
pub(crate) fn end_region(&mut self, ce: CodeExtent) -> EvalResult<'tcx> {
self.memory.locks_lifetime_ended(Some(ce));
// Recover suspended lvals
let lft = DynamicLifetime { frame: self.cur_frame(), region: Some(ce) };
if let Some(queries) = self.suspended.remove(&lft) {
for query in queries {
self.validate(query, ValidationMode::Recover(ce))?;
}
}
Ok(())
}
fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, vctx: ValidationCtx) -> EvalResult<'tcx> {
fn validate_variant(
&mut self,
query: ValidationQuery<'tcx>,
variant: &ty::VariantDef,
subst: &ty::subst::Substs<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx> {
// TODO: Maybe take visibility/privacy into account.
for (idx, field) in variant.fields.iter().enumerate() {
let field_ty = field.ty(self.tcx, subst);
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?;
}
Ok(())
}
fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, re: Option<CodeExtent>, mutbl: Mutability, mode: ValidationMode) -> EvalResult<'tcx> {
// Check alignment and non-NULLness
let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
let ptr = val.into_ptr(&mut self.memory)?;
@ -53,31 +95,41 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// Recurse
let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
self.validate(pointee_lvalue, pointee_ty, vctx)
self.validate(ValidationQuery { lval: pointee_lvalue, ty: pointee_ty, re, mutbl }, mode)
}
/// Validate the lvalue at the given type. If `release` is true, just do a release of all write locks
pub(super) fn validate(&mut self, lvalue: Lvalue<'tcx>, mut ty: Ty<'tcx>, mut vctx: ValidationCtx) -> EvalResult<'tcx>
/// Validate the lvalue at the given type. If `acquire` is false, just do a release of all write locks
fn validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMode) -> EvalResult<'tcx>
{
use rustc::ty::TypeVariants::*;
use rustc::ty::RegionKind::*;
use rustc::ty::AdtKind;
use self::Mutability::*;
// No point releasing shared stuff.
if !mode.acquiring() && query.mutbl == MutImmutable {
return Ok(());
}
// When we recover, we may see data whose validity *just* ended. Do not acquire it.
if let ValidationMode::Recover(ce) = mode {
if Some(ce) == query.re {
return Ok(());
}
}
// This is essentially a copy of normalize_associated_type, but without erasure
if ty.has_projection_types() {
if query.ty.has_projection_types() {
let param_env = ty::ParamEnv::empty(Reveal::All);
ty = self.tcx.infer_ctxt().enter(move |infcx| {
ty.trans_normalize(&infcx, param_env)
let old_ty = query.ty;
query.ty = self.tcx.infer_ctxt().enter(move |infcx| {
old_ty.trans_normalize(&infcx, param_env)
})
}
let ty = ty; // no more mutation
trace!("Validating {:?} at type {}, context {:?}", lvalue, ty, vctx);
trace!("{:?} on {:?}", mode, query);
// Decide whether this type *owns* the memory it covers (like integers), or whether it
// just assembles pieces (that each own their memory) together to a larger whole.
// TODO: Currently, we don't acquire locks for padding and discriminants. We should.
let is_owning = match ty.sty {
let is_owning = match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) |
TyBool | TyFloat(_) | TyChar | TyStr |
TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
@ -86,18 +138,18 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => bug!("I got an incomplete/unnormalized type for validation"),
};
if is_owning {
match lvalue {
match query.lval {
Lvalue::Ptr { ptr, extra, aligned: _ } => {
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(ty)? {
let len = match self.type_size(query.ty)? {
Some(size) => {
assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
size
}
None => {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(ty.sty, TyStr, "Found a surprising unsized owning type");
assert_eq!(query.ty.sty, TyStr, "Found a surprising unsized owning type");
// The extra must be the length, in bytes.
match extra {
LvalueExtra::Length(len) => len,
@ -108,11 +160,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// Handle locking
if len > 0 {
let ptr = ptr.to_ptr()?;
let access = match vctx.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
match vctx.op {
ValidationOp::Acquire => self.memory.acquire_lock(ptr, len, vctx.region, access)?,
ValidationOp::Release => self.memory.release_write_lock_until(ptr, len, None)?,
ValidationOp::Suspend(region) => self.memory.release_write_lock_until(ptr, len, Some(region))?,
let access = match query.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
if mode.acquiring() {
self.memory.acquire_lock(ptr, len, query.re, access)?;
} else {
self.memory.release_write_lock(ptr, len)?;
}
}
}
@ -122,7 +174,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
match ty.sty {
match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
// TODO: Make sure these are not undef.
// We could do a bounds-check and other sanity checks on the lvalue, but it would be a bug in miri for this to ever fail.
@ -136,33 +188,29 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Err(EvalError::ValidationFailure(format!("The empty type is never valid.")))
}
TyRef(region, ty::TypeAndMut { ty: pointee_ty, mutbl }) => {
let val = self.read_lvalue(lvalue)?;
let val = self.read_lvalue(query.lval)?;
// Sharing restricts our context
if mutbl == MutImmutable {
// Actually, in case of releasing-validation, this means we are done.
if vctx.op != ValidationOp::Acquire {
return Ok(());
}
vctx.mutbl = MutImmutable;
query.mutbl = MutImmutable;
}
// Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
// we record the region of this borrow to the context.
if vctx.region == None {
if query.re == None {
match *region {
ReScope(ce) => vctx.region = Some(ce),
ReScope(ce) => query.re = Some(ce),
// It is possible for us to encode erased lifetimes here because the lifetimes in
// this functions' Subst will be erased.
_ => {},
}
}
self.validate_ptr(val, pointee_ty, vctx)
self.validate_ptr(val, pointee_ty, query.re, query.mutbl, mode)
}
TyAdt(adt, _) if adt.is_box() => {
let val = self.read_lvalue(lvalue)?;
self.validate_ptr(val, ty.boxed_ty(), vctx)
let val = self.read_lvalue(query.lval)?;
self.validate_ptr(val, query.ty.boxed_ty(), query.re, query.mutbl, mode)
}
TyFnPtr(_sig) => {
let ptr = self.read_lvalue(lvalue)?.into_ptr(&mut self.memory)?.to_ptr()?;
let ptr = self.read_lvalue(query.lval)?.into_ptr(&mut self.memory)?.to_ptr()?;
self.memory.get_fn(ptr)?;
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
Ok(())
@ -175,28 +223,28 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// Compound types
TySlice(elem_ty) => {
let len = match lvalue {
let len = match query.lval {
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
_ => bug!("acquire_valid of a TySlice given non-slice lvalue: {:?}", lvalue),
_ => bug!("acquire_valid of a TySlice given non-slice lvalue: {:?}", query.lval),
};
for i in 0..len {
let inner_lvalue = self.lvalue_index(lvalue, ty, i)?;
self.validate(inner_lvalue, elem_ty, vctx)?;
let inner_lvalue = self.lvalue_index(query.lval, query.ty, i)?;
self.validate(ValidationQuery { lval: inner_lvalue, ty: elem_ty, ..query }, mode)?;
}
Ok(())
}
TyArray(elem_ty, len) => {
for i in 0..len {
let inner_lvalue = self.lvalue_index(lvalue, ty, i as u64)?;
self.validate(inner_lvalue, elem_ty, vctx)?;
let inner_lvalue = self.lvalue_index(query.lval, query.ty, i as u64)?;
self.validate(ValidationQuery { lval: inner_lvalue, ty: elem_ty, ..query }, mode)?;
}
Ok(())
}
TyDynamic(_data, _region) => {
// Check that this is a valid vtable
let vtable = match lvalue {
let vtable = match query.lval {
Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
_ => bug!("acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}", lvalue),
_ => bug!("acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}", query.lval),
};
self.read_size_and_align_from_vtable(vtable)?;
// TODO: Check that the vtable contains all the function pointers we expect it to have.
@ -207,16 +255,18 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(())
}
TyAdt(adt, subst) => {
if Some(adt.did) == self.tcx.lang_items.unsafe_cell_type() {
// No locks for unsafe cells. Also no other validation, the only field is private anyway.
if Some(adt.did) == self.tcx.lang_items.unsafe_cell_type() /*&& query.mutbl == MutImmutable*/ {
// No locks for shared unsafe cells. Also no other validation, the only field is private anyway.
// FIXME: For now we also don't acquire locks for mutable UnsafeCell, because this gets violated a lot
// by unsafe code.
return Ok(());
}
match adt.adt_kind() {
AdtKind::Enum => {
// TODO: Can we get the discriminant without forcing an allocation?
let ptr = self.force_allocation(lvalue)?.to_ptr()?;
let discr = self.read_discriminant_value(ptr, ty)?;
let ptr = self.force_allocation(query.lval)?.to_ptr()?;
let discr = self.read_discriminant_value(ptr, query.ty)?;
// Get variant index for discriminant
let variant_idx = adt.discriminants(self.tcx)
@ -226,21 +276,21 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
if variant.fields.len() > 0 {
// Downcast to this variant, if needed
let lvalue = if adt.variants.len() > 1 {
self.eval_lvalue_projection(lvalue, ty, &mir::ProjectionElem::Downcast(adt, variant_idx))?
let lval = if adt.variants.len() > 1 {
self.eval_lvalue_projection(query.lval, query.ty, &mir::ProjectionElem::Downcast(adt, variant_idx))?
} else {
lvalue
query.lval
};
// Recursively validate the fields
self.validate_variant(lvalue, ty, variant, subst, vctx)
self.validate_variant(ValidationQuery { lval, ..query} , variant, subst, mode)
} else {
// No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
Ok(())
}
}
AdtKind::Struct => {
self.validate_variant(lvalue, ty, adt.struct_variant(), subst, vctx)
self.validate_variant(query, adt.struct_variant(), subst, mode)
}
AdtKind::Union => {
// No guarantees are provided for union types.
@ -251,15 +301,15 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
TyTuple(ref types, _) => {
for (idx, field_ty) in types.iter().enumerate() {
let field_lvalue = self.lvalue_field(lvalue, idx, ty, field_ty)?;
self.validate(field_lvalue, field_ty, vctx)?;
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?;
}
Ok(())
}
TyClosure(def_id, ref closure_substs) => {
for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
let field_lvalue = self.lvalue_field(lvalue, idx, ty, field_ty)?;
self.validate(field_lvalue, field_ty, vctx)?;
let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?;
self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?;
}
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
// Is there other things we can/should check? Like vtable pointers?