Re-do packed memory accesses
We now track in the lvalue whether what we computed is expected to be aligend or not, and then set some state in the memory system accordingly to make it (not) do alignment checks
This commit is contained in:
parent
fda18f64cd
commit
c149c3fc6a
7 changed files with 87 additions and 139 deletions
|
|
@ -446,6 +446,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
let dest = Lvalue::Ptr {
|
||||
ptr: dest_ptr.into(),
|
||||
extra: LvalueExtra::DowncastVariant(variant_idx),
|
||||
aligned: true,
|
||||
};
|
||||
|
||||
self.assign_fields(dest, dest_ty, operands)
|
||||
|
|
@ -496,8 +497,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
use rustc::mir::Rvalue::*;
|
||||
match *rvalue {
|
||||
Use(ref operand) => {
|
||||
let value = self.eval_operand(operand)?;
|
||||
let (value, aligned) = self.eval_operand_maybe_unaligned(operand)?;
|
||||
self.memory.reads_are_aligned = aligned;
|
||||
self.write_value(value, dest, dest_ty)?;
|
||||
self.memory.reads_are_aligned = true;
|
||||
}
|
||||
|
||||
BinaryOp(bin_op, ref left, ref right) => {
|
||||
|
|
@ -528,15 +531,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
self.inc_step_counter_and_check_limit(operands.len() as u64)?;
|
||||
use rustc::ty::layout::Layout::*;
|
||||
match *dest_layout {
|
||||
Univariant { ref variant, .. } => {
|
||||
if variant.packed {
|
||||
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, variant.stride().bytes());
|
||||
}
|
||||
self.assign_fields(dest, dest_ty, operands)?;
|
||||
}
|
||||
|
||||
Array { .. } => {
|
||||
Univariant { .. } | Array { .. } => {
|
||||
self.assign_fields(dest, dest_ty, operands)?;
|
||||
}
|
||||
|
||||
|
|
@ -547,10 +542,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
.expect("broken mir: Adt variant id invalid")
|
||||
.to_u128_unchecked();
|
||||
let discr_size = discr.size().bytes();
|
||||
if variants[variant].packed {
|
||||
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, variants[variant].stride().bytes());
|
||||
}
|
||||
|
||||
self.assign_discr_and_fields(
|
||||
dest,
|
||||
|
|
@ -587,12 +578,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
StructWrappedNullablePointer { nndiscr, ref nonnull, ref discrfield, .. } => {
|
||||
StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
|
||||
if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
|
||||
if nonnull.packed {
|
||||
let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, nonnull.stride().bytes());
|
||||
}
|
||||
if nndiscr == variant as u64 {
|
||||
self.assign_fields(dest, dest_ty, operands)?;
|
||||
} else {
|
||||
|
|
@ -682,7 +669,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
|
||||
Ref(_, _, ref lvalue) => {
|
||||
let src = self.eval_lvalue(lvalue)?;
|
||||
let (ptr, extra) = self.force_allocation(src)?.to_ptr_and_extra();
|
||||
// We ignore the alignment of the lvalue here -- this rvalue produces sth. of type &, which must always be aligned.
|
||||
let (ptr, extra, _aligned) = self.force_allocation(src)?.to_ptr_extra_aligned();
|
||||
let ty = self.lvalue_ty(lvalue);
|
||||
|
||||
let val = match extra {
|
||||
|
|
@ -695,7 +683,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
|
||||
// Check alignment and non-NULLness.
|
||||
let (_, align) = self.size_and_align_of_dst(ty, val)?;
|
||||
self.memory.check_align(ptr, align, 0)?;
|
||||
self.memory.check_align(ptr, align)?;
|
||||
|
||||
self.write_value(val, dest, dest_ty)?;
|
||||
}
|
||||
|
|
@ -967,7 +955,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
self.value_to_primval(value, ty)
|
||||
}
|
||||
|
||||
pub(super) fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Value> {
|
||||
pub(super) fn eval_operand_maybe_unaligned(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, (Value, bool)> {
|
||||
use rustc::mir::Operand::*;
|
||||
match *op {
|
||||
Consume(ref lvalue) => self.eval_and_read_lvalue(lvalue),
|
||||
|
|
@ -993,11 +981,16 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
}
|
||||
};
|
||||
|
||||
Ok(value)
|
||||
Ok((value, true))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Value> {
|
||||
// This is called when the packed flag is not taken into account. Ignore alignment.
|
||||
Ok(self.eval_operand_maybe_unaligned(op)?.0)
|
||||
}
|
||||
|
||||
pub(super) fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
|
||||
self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
|
||||
}
|
||||
|
|
@ -1131,9 +1124,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
self.write_value_possibly_by_val(src_val, write_dest, dest.value, dest_ty)
|
||||
},
|
||||
|
||||
Lvalue::Ptr { ptr, extra } => {
|
||||
Lvalue::Ptr { ptr, extra, aligned } => {
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
self.write_value_to_ptr(src_val, ptr, dest_ty)
|
||||
self.memory.writes_are_aligned = aligned;
|
||||
let r = self.write_value_to_ptr(src_val, ptr, dest_ty);
|
||||
self.memory.writes_are_aligned = true;
|
||||
r
|
||||
}
|
||||
|
||||
Lvalue::Local { frame, local } => {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ pub enum Lvalue<'tcx> {
|
|||
/// before ever being dereferenced.
|
||||
ptr: Pointer,
|
||||
extra: LvalueExtra,
|
||||
/// Remember whether this lvalue is *supposed* to be aligned.
|
||||
aligned: bool,
|
||||
},
|
||||
|
||||
/// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
|
||||
|
|
@ -68,24 +70,25 @@ impl<'tcx> Lvalue<'tcx> {
|
|||
}
|
||||
|
||||
pub(crate) fn from_primval_ptr(ptr: Pointer) -> Self {
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None }
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true }
|
||||
}
|
||||
|
||||
pub(crate) fn from_ptr(ptr: MemoryPointer) -> Self {
|
||||
Self::from_primval_ptr(ptr.into())
|
||||
}
|
||||
|
||||
pub(super) fn to_ptr_and_extra(self) -> (Pointer, LvalueExtra) {
|
||||
pub(super) fn to_ptr_extra_aligned(self) -> (Pointer, LvalueExtra, bool) {
|
||||
match self {
|
||||
Lvalue::Ptr { ptr, extra } => (ptr, extra),
|
||||
Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
|
||||
_ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
|
||||
let (ptr, extra) = self.to_ptr_and_extra();
|
||||
let (ptr, extra, aligned) = self.to_ptr_extra_aligned();
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
assert_eq!(aligned, true, "tried converting an unaligned lvalue into a ptr");
|
||||
ptr.to_ptr()
|
||||
}
|
||||
|
||||
|
|
@ -175,13 +178,14 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
|
||||
/// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
|
||||
pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, (Value, bool)> {
|
||||
let ty = self.lvalue_ty(lvalue);
|
||||
// Shortcut for things like accessing a fat pointer's field,
|
||||
// which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory
|
||||
// and returning an `Lvalue::Ptr` to it
|
||||
if let Some(val) = self.try_read_lvalue(lvalue)? {
|
||||
return Ok(val);
|
||||
return Ok((val, true));
|
||||
}
|
||||
let lvalue = self.eval_lvalue(lvalue)?;
|
||||
|
||||
|
|
@ -190,15 +194,15 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
}
|
||||
|
||||
match lvalue {
|
||||
Lvalue::Ptr { ptr, extra } => {
|
||||
Lvalue::Ptr { ptr, extra, aligned } => {
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
Ok(Value::ByRef(ptr))
|
||||
Ok((Value::ByRef(ptr), aligned))
|
||||
}
|
||||
Lvalue::Local { frame, local } => {
|
||||
self.stack[frame].get_local(local)
|
||||
Ok((self.stack[frame].get_local(local)?, true))
|
||||
}
|
||||
Lvalue::Global(cid) => {
|
||||
Ok(self.globals.get(&cid).expect("global not cached").value)
|
||||
Ok((self.globals.get(&cid).expect("global not cached").value, true))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -239,7 +243,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
},
|
||||
|
||||
General { ref variants, .. } => {
|
||||
let (_, base_extra) = base.to_ptr_and_extra();
|
||||
let (_, base_extra, _) = base.to_ptr_extra_aligned();
|
||||
if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
|
||||
// +1 for the discriminant, which is field 0
|
||||
(variants[variant_idx].offsets[field_index + 1], variants[variant_idx].packed)
|
||||
|
|
@ -289,8 +293,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
};
|
||||
|
||||
// Do not allocate in trivial cases
|
||||
let (base_ptr, base_extra) = match base {
|
||||
Lvalue::Ptr { ptr, extra } => (ptr, extra),
|
||||
let (base_ptr, base_extra, aligned) = match base {
|
||||
Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
|
||||
Lvalue::Local { frame, local } => match self.stack[frame].get_local(local)? {
|
||||
// in case the type has a single field, just return the value
|
||||
Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => {
|
||||
|
|
@ -299,7 +303,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
},
|
||||
Value::ByRef(_) |
|
||||
Value::ByValPair(..) |
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_and_extra(),
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
|
||||
},
|
||||
Lvalue::Global(cid) => match self.globals.get(&cid).expect("uncached global").value {
|
||||
// in case the type has a single field, just return the value
|
||||
|
|
@ -309,7 +313,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
},
|
||||
Value::ByRef(_) |
|
||||
Value::ByValPair(..) |
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_and_extra(),
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -325,11 +329,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
|
||||
let field_ty = self.monomorphize(field_ty, self.substs());
|
||||
|
||||
if packed {
|
||||
let size = self.type_size(field_ty)?.expect("packed struct must be sized");
|
||||
self.memory.mark_packed(ptr.to_ptr()?, size);
|
||||
}
|
||||
|
||||
let extra = if self.type_is_sized(field_ty) {
|
||||
LvalueExtra::None
|
||||
} else {
|
||||
|
|
@ -343,7 +342,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
base_extra
|
||||
};
|
||||
|
||||
Ok(Lvalue::Ptr { ptr, extra })
|
||||
Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
|
||||
}
|
||||
|
||||
fn eval_lvalue_projection(
|
||||
|
|
@ -351,7 +350,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
proj: &mir::LvalueProjection<'tcx>,
|
||||
) -> EvalResult<'tcx, Lvalue<'tcx>> {
|
||||
use rustc::mir::ProjectionElem::*;
|
||||
let (ptr, extra) = match proj.elem {
|
||||
let (ptr, extra, aligned) = match proj.elem {
|
||||
Field(field, field_ty) => {
|
||||
let base = self.eval_lvalue(&proj.base)?;
|
||||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
|
|
@ -364,7 +363,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
let base_layout = self.type_layout(base_ty)?;
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, base_extra) = base.to_ptr_and_extra();
|
||||
let (base_ptr, base_extra, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
use rustc::ty::layout::Layout::*;
|
||||
let extra = match *base_layout {
|
||||
|
|
@ -372,12 +371,14 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra,
|
||||
_ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
|
||||
};
|
||||
(base_ptr, extra)
|
||||
(base_ptr, extra, aligned)
|
||||
}
|
||||
|
||||
Deref => {
|
||||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
let val = self.eval_and_read_lvalue(&proj.base)?;
|
||||
let (val, _aligned) = self.eval_and_read_lvalue(&proj.base)?;
|
||||
// Conservatively, the intermediate accesses of a Deref lvalue do not take into account the packed flag.
|
||||
// Hence we ignore alignment here.
|
||||
|
||||
let pointee_type = match base_ty.sty {
|
||||
ty::TyRawPtr(ref tam) |
|
||||
|
|
@ -391,13 +392,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
match self.tcx.struct_tail(pointee_type).sty {
|
||||
ty::TyDynamic(..) => {
|
||||
let (ptr, vtable) = val.expect_ptr_vtable_pair(&self.memory)?;
|
||||
(ptr, LvalueExtra::Vtable(vtable))
|
||||
(ptr, LvalueExtra::Vtable(vtable), true)
|
||||
},
|
||||
ty::TyStr | ty::TySlice(_) => {
|
||||
let (ptr, len) = val.expect_slice(&self.memory)?;
|
||||
(ptr, LvalueExtra::Length(len))
|
||||
(ptr, LvalueExtra::Length(len), true)
|
||||
},
|
||||
_ => (val.read_ptr(&self.memory)?, LvalueExtra::None),
|
||||
_ => (val.read_ptr(&self.memory)?, LvalueExtra::None, true),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -406,7 +407,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_and_extra();
|
||||
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, len) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
|
||||
|
|
@ -415,7 +416,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
let n = self.value_to_primval(n_ptr, usize)?.to_u64()?;
|
||||
assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
|
||||
let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
|
||||
(ptr, LvalueExtra::None)
|
||||
(ptr, LvalueExtra::None, aligned)
|
||||
}
|
||||
|
||||
ConstantIndex { offset, min_length, from_end } => {
|
||||
|
|
@ -423,7 +424,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_and_extra();
|
||||
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized");
|
||||
|
|
@ -436,7 +437,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
};
|
||||
|
||||
let ptr = base_ptr.offset(index * elem_size, self.memory.layout)?;
|
||||
(ptr, LvalueExtra::None)
|
||||
(ptr, LvalueExtra::None, aligned)
|
||||
}
|
||||
|
||||
Subslice { from, to } => {
|
||||
|
|
@ -444,18 +445,18 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_and_extra();
|
||||
let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
|
||||
assert!(u64::from(from) <= n - u64::from(to));
|
||||
let ptr = base_ptr.offset(u64::from(from) * elem_size, self.memory.layout)?;
|
||||
let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
|
||||
(ptr, extra)
|
||||
(ptr, extra, aligned)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Lvalue::Ptr { ptr, extra })
|
||||
Ok(Lvalue::Ptr { ptr, extra, aligned })
|
||||
}
|
||||
|
||||
pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
|
||||
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque, BTreeSet};
|
||||
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
|
||||
use std::{fmt, iter, ptr, mem, io};
|
||||
|
||||
use rustc::ty;
|
||||
|
|
@ -124,20 +124,6 @@ pub struct Memory<'a, 'tcx> {
|
|||
/// Target machine data layout to emulate.
|
||||
pub layout: &'a TargetDataLayout,
|
||||
|
||||
/// List of memory regions containing packed structures.
|
||||
///
|
||||
/// We mark memory as "packed" or "unaligned" for a single statement, and clear the marking
|
||||
/// afterwards. In the case where no packed structs are present, it's just a single emptyness
|
||||
/// check of a set instead of heavily influencing all memory access code as other solutions
|
||||
/// would. This is simpler than the alternative of passing a "packed" parameter to every
|
||||
/// load/store method.
|
||||
///
|
||||
/// One disadvantage of this solution is the fact that you can cast a pointer to a packed
|
||||
/// struct to a pointer to a normal struct and if you access a field of both in the same MIR
|
||||
/// statement, the normal struct access will succeed even though it shouldn't. But even with
|
||||
/// mir optimizations, that situation is hard/impossible to produce.
|
||||
packed: BTreeSet<Entry>,
|
||||
|
||||
/// A cache for basic byte allocations keyed by their contents. This is used to deduplicate
|
||||
/// allocations for string and bytestring literals.
|
||||
literal_alloc_cache: HashMap<Vec<u8>, AllocId>,
|
||||
|
|
@ -147,6 +133,11 @@ pub struct Memory<'a, 'tcx> {
|
|||
|
||||
/// The Key to use for the next thread-local allocation.
|
||||
next_thread_local: TlsKey,
|
||||
|
||||
/// To avoid having to pass flags to every single memory access, we have some global state saying whether
|
||||
/// alignment checking is currently enforced for read and/or write accesses.
|
||||
pub reads_are_aligned: bool,
|
||||
pub writes_are_aligned: bool,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
|
|
@ -159,11 +150,12 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
|||
layout,
|
||||
memory_size: max_memory,
|
||||
memory_usage: 0,
|
||||
packed: BTreeSet::new(),
|
||||
static_alloc: HashSet::new(),
|
||||
literal_alloc_cache: HashMap::new(),
|
||||
thread_local: BTreeMap::new(),
|
||||
next_thread_local: 0,
|
||||
reads_are_aligned: true,
|
||||
writes_are_aligned: true,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -278,30 +270,10 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
|||
self.layout.endian
|
||||
}
|
||||
|
||||
pub fn check_align(&self, ptr: Pointer, align: u64, len: u64) -> EvalResult<'tcx> {
|
||||
pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> {
|
||||
let offset = match ptr.into_inner_primval() {
|
||||
PrimVal::Ptr(ptr) => {
|
||||
let alloc = self.get(ptr.alloc_id)?;
|
||||
// check whether the memory was marked as packed
|
||||
// we select all elements that have the correct alloc_id and are within
|
||||
// the range given by the offset into the allocation and the length
|
||||
let start = Entry {
|
||||
alloc_id: ptr.alloc_id,
|
||||
packed_start: 0,
|
||||
packed_end: ptr.offset + len,
|
||||
};
|
||||
let end = Entry {
|
||||
alloc_id: ptr.alloc_id,
|
||||
packed_start: ptr.offset + len,
|
||||
packed_end: 0,
|
||||
};
|
||||
for &Entry { packed_start, packed_end, .. } in self.packed.range(start..end) {
|
||||
// if the region we are checking is covered by a region in `packed`
|
||||
// ignore the actual alignment
|
||||
if packed_start <= ptr.offset && (ptr.offset + len) <= packed_end {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
if alloc.align < align {
|
||||
return Err(EvalError::AlignmentCheckFailed {
|
||||
has: alloc.align,
|
||||
|
|
@ -338,18 +310,6 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn mark_packed(&mut self, ptr: MemoryPointer, len: u64) {
|
||||
self.packed.insert(Entry {
|
||||
alloc_id: ptr.alloc_id,
|
||||
packed_start: ptr.offset,
|
||||
packed_end: ptr.offset + len,
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn clear_packed(&mut self) {
|
||||
self.packed.clear();
|
||||
}
|
||||
|
||||
pub(crate) fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>) -> TlsKey {
|
||||
let new_key = self.next_thread_local;
|
||||
self.next_thread_local += 1;
|
||||
|
|
@ -426,20 +386,6 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same
|
||||
// by the second field, and if those are the same, too, then by the third field.
|
||||
// This is exactly what we need for our purposes, since a range within an allocation
|
||||
// will give us all `Entry`s that have that `AllocId`, and whose `packed_start` is <= than
|
||||
// the one we're looking for, but not > the end of the range we're checking.
|
||||
// At the same time the `packed_end` is irrelevant for the sorting and range searching, but used for the check.
|
||||
// This kind of search breaks, if `packed_end < packed_start`, so don't do that!
|
||||
#[derive(Eq, PartialEq, Ord, PartialOrd)]
|
||||
struct Entry {
|
||||
alloc_id: AllocId,
|
||||
packed_start: u64,
|
||||
packed_end: u64,
|
||||
}
|
||||
|
||||
/// Allocation accessors
|
||||
impl<'a, 'tcx> Memory<'a, 'tcx> {
|
||||
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
|
||||
|
|
@ -576,7 +522,9 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
|||
return Ok(&[]);
|
||||
}
|
||||
// FIXME: check alignment for zst memory accesses?
|
||||
self.check_align(ptr.into(), align, size)?;
|
||||
if self.reads_are_aligned {
|
||||
self.check_align(ptr.into(), align)?;
|
||||
}
|
||||
self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
|
||||
let alloc = self.get(ptr.alloc_id)?;
|
||||
assert_eq!(ptr.offset as usize as u64, ptr.offset);
|
||||
|
|
@ -590,7 +538,9 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
|
|||
return Ok(&mut []);
|
||||
}
|
||||
// FIXME: check alignment for zst memory accesses?
|
||||
self.check_align(ptr.into(), align, size)?;
|
||||
if self.writes_are_aligned {
|
||||
self.check_align(ptr.into(), align)?;
|
||||
}
|
||||
self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
|
||||
let alloc = self.get_mut(ptr.alloc_id)?;
|
||||
assert_eq!(ptr.offset as usize as u64, ptr.offset);
|
||||
|
|
|
|||
|
|
@ -28,8 +28,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
|
||||
/// Returns true as long as there are more things to do.
|
||||
pub fn step(&mut self) -> EvalResult<'tcx, bool> {
|
||||
// see docs on the `Memory::packed` field for why we do this
|
||||
self.memory.clear_packed();
|
||||
assert!(self.memory.reads_are_aligned && self.memory.writes_are_aligned, "Someone forgot to clear the 'unaligned' flag");
|
||||
self.inc_step_counter_and_check_limit(1)?;
|
||||
if self.stack.is_empty() {
|
||||
return Ok(false);
|
||||
|
|
|
|||
|
|
@ -12,9 +12,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
pub(crate) fn drop_lvalue(&mut self, lval: Lvalue<'tcx>, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> {
|
||||
trace!("drop_lvalue: {:#?}", lval);
|
||||
let val = match self.force_allocation(lval)? {
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => ptr.to_value_with_vtable(vtable),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => ptr.to_value_with_len(len),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => ptr.to_value(),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: true } => ptr.to_value_with_vtable(vtable),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: true } => ptr.to_value_with_len(len),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => ptr.to_value(),
|
||||
_ => bug!("force_allocation broken"),
|
||||
};
|
||||
self.drop(val, instance, ty, span)
|
||||
|
|
|
|||
|
|
@ -270,8 +270,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
};
|
||||
match dest {
|
||||
Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => self.memory.write_repeat(ptr, 0, size)?,
|
||||
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat ptr target"),
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => self.memory.write_repeat(ptr, 0, size)?,
|
||||
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"),
|
||||
Lvalue::Global(cid) => self.modify_global(cid, init)?,
|
||||
}
|
||||
}
|
||||
|
|
@ -394,11 +394,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
|
||||
"transmute" => {
|
||||
let src_ty = substs.type_at(0);
|
||||
let dest_ty = substs.type_at(1);
|
||||
let size = self.type_size(dest_ty)?.expect("transmute() type must be sized");
|
||||
let ptr = self.force_allocation(dest)?.to_ptr()?;
|
||||
self.memory.mark_packed(ptr, size);
|
||||
self.memory.writes_are_aligned = false;
|
||||
self.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)?;
|
||||
self.memory.writes_are_aligned = true;
|
||||
}
|
||||
|
||||
"unchecked_shl" => {
|
||||
|
|
@ -448,9 +447,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
};
|
||||
match dest {
|
||||
Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None } =>
|
||||
Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } =>
|
||||
self.memory.mark_definedness(ptr, size, false)?,
|
||||
Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat ptr target"),
|
||||
Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"),
|
||||
Lvalue::Global(cid) => self.modify_global(cid, uninit)?,
|
||||
}
|
||||
}
|
||||
|
|
@ -465,7 +464,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
|
|||
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
|
||||
if count > 0 {
|
||||
// TODO: Should we, at least, validate the alignment? (Also see memory::copy)
|
||||
self.memory.check_align(ptr, ty_align, size * count)?;
|
||||
self.memory.check_align(ptr, ty_align)?;
|
||||
self.memory.write_repeat(ptr, val_byte, size * count)?;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ struct S {
|
|||
}
|
||||
|
||||
fn main() {
|
||||
let x = S {
|
||||
let mut x = S {
|
||||
a: 42,
|
||||
b: 99,
|
||||
};
|
||||
|
|
@ -16,4 +16,7 @@ fn main() {
|
|||
// can't do `assert_eq!(x.a, 42)`, because `assert_eq!` takes a reference
|
||||
assert_eq!({x.a}, 42);
|
||||
assert_eq!({x.b}, 99);
|
||||
|
||||
x.b = 77;
|
||||
assert_eq!({x.b}, 77);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue