Re-do the way locking is done during verification

We now lock at the "leaves" on the types, rather than locking at references.
In particular, Validate for sth. of non-reference lvalue will also lock the "outer" memory.
Also change the way we release write locks, and some refactoring in the memory.
This commit is contained in:
Ralf Jung 2017-07-13 20:08:35 -07:00 committed by Oliver Schneider
parent 7c6e6cf492
commit 4457a52d4f
No known key found for this signature in database
GPG key ID: A69F8D225B3AD7D9
4 changed files with 145 additions and 64 deletions

View file

@ -57,11 +57,15 @@ pub enum EvalError<'tcx> {
access: AccessKind,
lock: LockInfo,
},
ValidationFailure(String),
InvalidMemoryLockRelease {
ptr: MemoryPointer,
len: u64,
},
DeallocatedLockedMemory {
ptr: MemoryPointer,
lock: LockInfo,
},
ValidationFailure(String),
CalledClosureAsFunction,
VtableForArgumentlessMethod,
ModifiedConstantMemory,
@ -70,7 +74,6 @@ pub enum EvalError<'tcx> {
TypeNotPrimitive(Ty<'tcx>),
ReallocatedWrongMemoryKind(Kind, Kind),
DeallocatedWrongMemoryKind(Kind, Kind),
DeallocatedLockedMemory,
ReallocateNonBasePtr,
DeallocateNonBasePtr,
IncorrectAllocationInformation,
@ -113,10 +116,10 @@ impl<'tcx> Error for EvalError<'tcx> {
"memory access conflicts with lock",
ValidationFailure(..) =>
"type validation failed",
DeallocatedLockedMemory =>
"deallocated memory while a lock was held",
InvalidMemoryLockRelease { .. } =>
"memory lock released that was never acquired",
DeallocatedLockedMemory { .. } =>
"tried to deallocate memory in conflict with a lock",
ReadPointerAsBytes =>
"a raw memory access tried to access part of a pointer value as raw bytes",
ReadBytesAsPointer =>
@ -221,9 +224,13 @@ impl<'tcx> fmt::Display for EvalError<'tcx> {
access, ptr, len, lock)
}
InvalidMemoryLockRelease { ptr, len } => {
write!(f, "tried to release memory write lock at {:?}, size {}, which was not acquired by this function",
write!(f, "tried to release memory write lock at {:?}, size {}, but the write lock is held by someone else",
ptr, len)
}
DeallocatedLockedMemory { ptr, lock } => {
write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}",
ptr, lock)
}
ValidationFailure(ref err) => {
write!(f, "type validation failed: {}", err)
}

View file

@ -503,27 +503,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, vctx: ValidationCtx) -> EvalResult<'tcx> {
use self::TyMutability::*;
// Check alignment and non-NULLness
let (len, align) = self.size_and_align_of_dst(pointee_ty, val)?;
let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
let ptr = val.into_ptr(&mut self.memory)?;
self.memory.check_align(ptr, align)?;
// For ZSTs, do no more
if len == 0 {
return Ok(())
}
// Acquire lock (also establishing that this is in-bounds etc.)
let ptr = ptr.to_ptr()?;
let access = match vctx.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
match vctx.op {
ValidationOp::Acquire => self.memory.acquire_lock(ptr, len, vctx.region, access)?,
ValidationOp::Release => self.memory.release_lock_until(ptr, len, None)?,
ValidationOp::Suspend(region) => self.memory.release_lock_until(ptr, len, Some(region))?,
}
// Recurse
let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
self.validate(pointee_lvalue, pointee_ty, vctx)
@ -538,14 +522,64 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
use self::TyMutability::*;
trace!("Validating {:?} at type {}, context {:?}", lvalue, ty, vctx);
// Decide whether this type *owns* the memory it covers (like integers), or whether it
// just assembles pieces (that each own their memory) together to a larger whole.
// TODO: Currently, we don't acquire locks for padding and discriminants. We should.
let is_owning = match ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) |
TyBool | TyFloat(_) | TyChar | TyStr |
TyRef(..) => true,
TyAdt(adt, _) if adt.is_box() => true,
TyAdt(_, _) | TyTuple(..) | TyClosure(..) => false,
TyParam(_) | TyInfer(_) => bug!("I got an incomplete type for validation"),
_ => return Err(EvalError::Unimplemented(format!("Unimplemented type encountered when checking validity."))),
};
if is_owning {
match lvalue {
Lvalue::Ptr { ptr, extra, aligned: _ } => {
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(ty)? {
Some(size) => {
assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
size
}
None => {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(ty.sty, TyStr, "Found a surprising unsized owning type");
// The extra must be the length, in bytes.
match extra {
LvalueExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
}
};
// Handle locking
if len > 0 {
let ptr = ptr.to_ptr()?;
let access = match vctx.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
match vctx.op {
ValidationOp::Acquire => self.memory.acquire_lock(ptr, len, vctx.region, access)?,
ValidationOp::Release => self.memory.release_write_lock_until(ptr, len, None)?,
ValidationOp::Suspend(region) => self.memory.release_write_lock_until(ptr, len, Some(region))?,
}
}
}
Lvalue::Local { ..} | Lvalue::Global(..) => {
// These are not backed by memory, so we have nothing to do.
}
}
}
match ty.sty {
TyChar | TyInt(_) | TyUint(_) | TyRawPtr(_) => {
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
// TODO: Make sure these are not undef.
// We could do a bounds-check and other sanity checks on the lvalue, but it would be a bug in miri for this to ever fail.
Ok(())
}
TyBool | TyFloat(_) | TyStr => {
// TODO: Check if these are valid bool/float/UTF-8, respectively (and in particular, not undef).
TyBool | TyFloat(_) | TyChar | TyStr => {
// TODO: Check if these are valid bool/float/codepoint/UTF-8, respectively (and in particular, not undef).
Ok(())
}
TyNever => {
@ -643,8 +677,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
Ok(())
}
TyParam(_) | TyInfer(_) => bug!("I got an incomplete type for validation"),
_ => unimplemented!("Unimplemented type encountered when checking validity.")
_ => bug!("We already establishd that this is a type we support.")
}
}
}

View file

@ -39,6 +39,14 @@ mod range {
}
}
pub fn offset(&self) -> u64 {
self.start
}
pub fn len(&self) -> u64 {
self.end - self.start
}
pub fn range(offset: u64, len: u64) -> ops::Range<MemoryRange> {
assert!(len > 0);
// We select all elements that are within
@ -141,6 +149,32 @@ pub struct Allocation {
locks: BTreeMap<MemoryRange, Vec<LockInfo>>,
}
impl Allocation {
fn iter_locks<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item=&'a LockInfo> + 'a {
self.locks.range(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
.flat_map(|(_, locks)| locks.iter())
}
fn iter_lock_vecs_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item=(&'a MemoryRange, &'a mut Vec<LockInfo>)> + 'a {
self.locks.range_mut(MemoryRange::range(offset, len))
.filter(move |&(range, _)| range.overlaps(offset, len))
}
fn check_locks<'tcx>(&self, frame: usize, offset: u64, len: u64, access: AccessKind) -> Result<(), LockInfo> {
if len == 0 {
return Ok(())
}
for lock in self.iter_locks(offset, len) {
// Check if the lock is active, and is in conflict with the access.
if lock.status == LockStatus::Held && !lock.access_permitted(frame, access) {
return Err(*lock);
}
}
Ok(())
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Kind {
/// Error if deallocated any other way than `rust_deallocate`
@ -359,13 +393,16 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Some(alloc) => alloc,
None => return Err(EvalError::DoubleFree),
};
// It is okay for us to still holds locks on deallocation -- for example, we could store data we own
// in a local, and the local could be deallocated (from StorageDead) before the function returns.
// However, we must have write access to the entire allocation.
alloc.check_locks(self.cur_frame, 0, alloc.bytes.len() as u64, AccessKind::Write)
.map_err(|lock| EvalError::DeallocatedLockedMemory { ptr, lock })?;
if alloc.kind != kind {
return Err(EvalError::DeallocatedWrongMemoryKind(alloc.kind, kind));
}
if alloc.locks.values().any(|locks| !locks.is_empty()) {
return Err(EvalError::DeallocatedLockedMemory);
}
if let Some((size, align)) = size_and_align {
if size != alloc.bytes.len() as u64 || align != alloc.align {
return Err(EvalError::IncorrectAllocationInformation);
@ -510,24 +547,18 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
/// Locking
impl<'a, 'tcx> Memory<'a, 'tcx> {
pub(crate) fn check_locks(&self, ptr: MemoryPointer, len: u64, access: AccessKind) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
for (range, locks) in alloc.locks.range(MemoryRange::range(ptr.offset, len)) {
for lock in locks {
// Check if the lock is active, overlaps this access, and is in conflict with the access.
if lock.status == LockStatus::Held && range.overlaps(ptr.offset, len) && !lock.access_permitted(self.cur_frame, access) {
return Err(EvalError::MemoryLockViolation { ptr, len, access, lock: *lock });
}
}
if len == 0 {
return Ok(())
}
Ok(())
let alloc = self.get(ptr.alloc_id)?;
alloc.check_locks(self.cur_frame, ptr.offset, len, access)
.map_err(|lock| EvalError::MemoryLockViolation { ptr, len, access, lock })
}
/// Acquire the lock for the given lifetime
pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Option<CodeExtent>, kind: AccessKind) -> EvalResult<'tcx> {
assert!(len > 0);
trace!("Acquiring {:?} lock at {:?}, size {} for region {:?}", kind, ptr, len, region);
if len == 0 {
return Ok(());
}
self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_locks(ptr, len, kind)?; // make sure we have the access we are acquiring
let lifetime = DynamicLifetime { frame: self.cur_frame, region };
@ -536,31 +567,39 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
Ok(())
}
/// Release a write lock prematurely
pub(crate) fn release_lock_until(&mut self, ptr: MemoryPointer, len: u64, release_until: Option<CodeExtent>) -> EvalResult<'tcx> {
trace!("Releasing write lock at {:?}, size {} until {:?}", ptr, len, release_until);
// Make sure there are no read locks and no *other* write locks here
if let Err(_) = self.check_locks(ptr, len, AccessKind::Write) {
return Err(EvalError::InvalidMemoryLockRelease { ptr, len });
}
/// Release a write lock prematurely. If there's just read locks, do nothing.
pub(crate) fn release_write_lock_until(&mut self, ptr: MemoryPointer, len: u64, release_until: Option<CodeExtent>) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let alloc = self.get_mut(ptr.alloc_id)?;
let lock_infos = alloc.locks.get_mut(&MemoryRange::new(ptr.offset, len))
.ok_or(EvalError::InvalidMemoryLockRelease { ptr, len })?;
// Find the lock. There can only be one active write lock, so this is uniquely defined.
let lock_info_idx = lock_infos.iter().position(|lock_info| lock_info.status == LockStatus::Held)
.ok_or(EvalError::InvalidMemoryLockRelease { ptr, len })?;
{
let lock_info = &mut lock_infos[lock_info_idx];
assert_eq!(lock_info.lifetime.frame, cur_frame);
if let Some(ce) = release_until {
lock_info.status = LockStatus::RecoverAfter(ce);
return Ok(());
for (range, locks) in alloc.iter_lock_vecs_mut(ptr.offset, len) {
if !range.contains(ptr.offset, len) {
return Err(EvalError::Unimplemented(format!("miri does not support release part of a write-locked region")));
}
// Check all locks in this region; make sure there are no conflicting write locks of other frames.
// Also, if we will recover later, perform our release by changing the lock status.
for lock in locks.iter_mut() {
if lock.kind == AccessKind::Read || lock.status != LockStatus::Held { continue; }
if lock.lifetime.frame != cur_frame {
return Err(EvalError::InvalidMemoryLockRelease { ptr, len });
}
let ptr = MemoryPointer { alloc_id : ptr.alloc_id, offset: range.offset() };
trace!("Releasing write lock at {:?}, size {} until {:?}", ptr, range.len(), release_until);
if let Some(region) = release_until {
lock.status = LockStatus::RecoverAfter(region);
}
}
// If we will not recove, we did not do anything above except for some checks. Now, erase the locks from the list.
if let None = release_until {
// Delete everything that's a held write lock. We already checked above that these are ours.
// Unfortunately, this duplicates the condition from above. Is there anything we can do about this?
locks.retain(|lock| lock.kind == AccessKind::Read || lock.status != LockStatus::Held);
}
}
// Falling through to here means we want to entirely remove the lock. The control-flow is somewhat weird because of lexical lifetimes.
lock_infos.remove(lock_info_idx);
// TODO: It may happen now that we leave an empty vector in the map. Is it worth getting rid of them?
Ok(())
}
@ -581,12 +620,13 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
for alloc in self.alloc_map.values_mut() {
for (_range, locks) in alloc.locks.iter_mut() {
// Delete everything that ends now -- i.e., keep only all the other lifeimes.
// Delete everything that ends now -- i.e., keep only all the other lifetimes.
locks.retain(|lock| !has_ended(lock));
// Activate locks that get recovered now
if let Some(ending_region) = ending_region {
for lock in locks.iter_mut() {
if lock.lifetime.frame == cur_frame && lock.status == LockStatus::RecoverAfter(ending_region) {
// FIXME: Check if this triggers a conflict between active locks
lock.status = LockStatus::Held;
}
}

View file

@ -1,6 +1,7 @@
#![feature(
i128_type,
rustc_private,
conservative_impl_trait,
)]
// From rustc.