diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index cb685f83aba1..f13e26fee3ee 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -392,6 +392,11 @@ for ::mir::interpret::ConstValue<'gcx> { } } +impl_stable_hash_for!(enum mir::interpret::ScalarMaybeUndef { + Scalar(v), + Undef +}); + impl_stable_hash_for!(enum mir::interpret::Value { Scalar(v), ScalarPair(a, b), @@ -466,9 +471,9 @@ for ::mir::interpret::Scalar { mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Bits { bits, defined } => { + Bits { bits, size } => { bits.hash_stable(hcx, hasher); - defined.hash_stable(hcx, hasher); + size.hash_stable(hcx, hasher); }, Ptr(ptr) => ptr.hash_stable(hcx, hasher), } diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 4164fe3fd933..a0980b06230c 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -13,7 +13,7 @@ pub use self::error::{ FrameInfo, ConstEvalResult, }; -pub use self::value::{Scalar, Value, ConstValue}; +pub use self::value::{Scalar, Value, ConstValue, ScalarMaybeUndef}; use std::fmt; use mir; diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index ffd138c9c481..d61a687ccfad 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -15,7 +15,7 @@ pub enum ConstValue<'tcx> { /// to allow HIR creation to happen for everything before needing to be able to run constant /// evaluation Unevaluated(DefId, &'tcx Substs<'tcx>), - /// Used only for types with layout::abi::Scalar ABI and ZSTs which use Scalar::undef() + /// Used only for types with layout::abi::Scalar ABI and ZSTs Scalar(Scalar), /// Used only for types with layout::abi::ScalarPair ScalarPair(Scalar, Scalar), @@ -25,12 +25,12 @@ pub enum ConstValue<'tcx> { impl<'tcx> ConstValue<'tcx> { #[inline] - pub fn from_byval_value(val: Value) -> Self { - match val { + pub fn from_byval_value(val: Value) -> EvalResult<'static, Self> { + Ok(match val { Value::ByRef(..) => bug!(), - Value::ScalarPair(a, b) => ConstValue::ScalarPair(a, b), - Value::Scalar(val) => ConstValue::Scalar(val), - } + Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.read()?, b.read()?), + Value::Scalar(val) => ConstValue::Scalar(val.read()?), + }) } #[inline] @@ -38,18 +38,13 @@ impl<'tcx> ConstValue<'tcx> { match *self { ConstValue::Unevaluated(..) | ConstValue::ByRef(..) => None, - ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a, b)), - ConstValue::Scalar(val) => Some(Value::Scalar(val)), + ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a.into(), b.into())), + ConstValue::Scalar(val) => Some(Value::Scalar(val.into())), } } #[inline] - pub fn from_scalar(val: Scalar) -> Self { - ConstValue::Scalar(val) - } - - #[inline] - pub fn to_scalar(&self) -> Option { + pub fn try_to_scalar(&self) -> Option { match *self { ConstValue::Unevaluated(..) | ConstValue::ByRef(..) | @@ -60,12 +55,12 @@ impl<'tcx> ConstValue<'tcx> { #[inline] pub fn to_bits(&self, size: Size) -> Option { - self.to_scalar()?.to_bits(size).ok() + self.try_to_scalar()?.to_bits(size).ok() } #[inline] pub fn to_ptr(&self) -> Option { - self.to_scalar()?.to_ptr().ok() + self.try_to_scalar()?.to_ptr().ok() } } @@ -81,8 +76,8 @@ impl<'tcx> ConstValue<'tcx> { #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] pub enum Value { ByRef(Scalar, Align), - Scalar(Scalar), - ScalarPair(Scalar, Scalar), + Scalar(ScalarMaybeUndef), + ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), } impl<'tcx> ty::TypeFoldable<'tcx> for Value { @@ -98,23 +93,23 @@ impl<'tcx> Scalar { pub fn ptr_null(cx: C) -> Self { Scalar::Bits { bits: 0, - defined: cx.data_layout().pointer_size.bits() as u8, + size: cx.data_layout().pointer_size.bytes() as u8, } } + pub fn to_value_with_len(self, len: u64, cx: C) -> Value { + ScalarMaybeUndef::Scalar(self).to_value_with_len(len, cx) + } + pub fn ptr_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); match self { - Scalar::Bits { bits, defined } => { - let pointer_size = layout.pointer_size.bits() as u8; - if defined < pointer_size { - err!(ReadUndefBytes) - } else { - Ok(Scalar::Bits { - bits: layout.signed_offset(bits as u64, i)? as u128, - defined: pointer_size, - }) - } + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.pointer_size.bytes()); + Ok(Scalar::Bits { + bits: layout.signed_offset(bits as u64, i)? as u128, + size, + }) } Scalar::Ptr(ptr) => ptr.signed_offset(i, layout).map(Scalar::Ptr), } @@ -123,65 +118,43 @@ impl<'tcx> Scalar { pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); match self { - Scalar::Bits { bits, defined } => { - let pointer_size = layout.pointer_size.bits() as u8; - if defined < pointer_size { - err!(ReadUndefBytes) - } else { - Ok(Scalar::Bits { - bits: layout.offset(bits as u64, i.bytes())? as u128, - defined: pointer_size, - }) - } + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.pointer_size.bytes()); + Ok(Scalar::Bits { + bits: layout.offset(bits as u64, i.bytes())? as u128, + size, + }) } Scalar::Ptr(ptr) => ptr.offset(i, layout).map(Scalar::Ptr), } } - pub fn ptr_wrapping_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_wrapping_signed_offset(self, i: i64, cx: C) -> Self { let layout = cx.data_layout(); match self { - Scalar::Bits { bits, defined } => { - let pointer_size = layout.pointer_size.bits() as u8; - if defined < pointer_size { - err!(ReadUndefBytes) - } else { - Ok(Scalar::Bits { - bits: layout.wrapping_signed_offset(bits as u64, i) as u128, - defined: pointer_size, - }) + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.pointer_size.bytes()); + Scalar::Bits { + bits: layout.wrapping_signed_offset(bits as u64, i) as u128, + size, + } } - } - Scalar::Ptr(ptr) => Ok(Scalar::Ptr(ptr.wrapping_signed_offset(i, layout))), + Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, layout)), } } - pub fn is_null_ptr(self, cx: C) -> EvalResult<'tcx, bool> { + pub fn is_null_ptr(self, cx: C) -> bool { match self { - Scalar::Bits { - bits, defined, - } => if defined < cx.data_layout().pointer_size.bits() as u8 { - err!(ReadUndefBytes) - } else { - Ok(bits == 0) + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, cx.data_layout().pointer_size.bytes()); + bits == 0 }, - Scalar::Ptr(_) => Ok(false), + Scalar::Ptr(_) => false, } } - pub fn to_value_with_len(self, len: u64, cx: C) -> Value { - Value::ScalarPair(self, Scalar::Bits { - bits: len as u128, - defined: cx.data_layout().pointer_size.bits() as u8, - }) - } - - pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { - Value::ScalarPair(self, Scalar::Ptr(vtable)) - } - pub fn to_value(self) -> Value { - Value::Scalar(self) + Value::Scalar(ScalarMaybeUndef::Scalar(self)) } } @@ -199,8 +172,9 @@ impl From for Scalar { pub enum Scalar { /// The raw bytes of a simple value. Bits { - /// The first `defined` number of bits are valid - defined: u8, + /// The first `size` bytes are the value. + /// Do not try to read less or more bytes that that + size: u8, bits: u128, }, @@ -210,25 +184,81 @@ pub enum Scalar { Ptr(Pointer), } -impl<'tcx> Scalar { - pub fn undef() -> Self { - Scalar::Bits { bits: 0, defined: 0 } +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum ScalarMaybeUndef { + Scalar(Scalar), + Undef, +} + +impl From for ScalarMaybeUndef { + fn from(s: Scalar) -> Self { + ScalarMaybeUndef::Scalar(s) + } +} + +impl ScalarMaybeUndef { + pub fn read(self) -> EvalResult<'static, Scalar> { + match self { + ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), + ScalarMaybeUndef::Undef => err!(ReadUndefBytes), + } } + pub fn to_value_with_len(self, len: u64, cx: C) -> Value { + Value::ScalarPair(self.into(), Scalar::Bits { + bits: len as u128, + size: cx.data_layout().pointer_size.bytes() as u8, + }.into()) + } + + pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { + Value::ScalarPair(self.into(), Scalar::Ptr(vtable).into()) + } + + pub fn ptr_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + match self { + ScalarMaybeUndef::Scalar(scalar) => { + scalar.ptr_signed_offset(i, cx).map(ScalarMaybeUndef::Scalar) + }, + ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef) + } + } + + pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { + match self { + ScalarMaybeUndef::Scalar(scalar) => { + scalar.ptr_offset(i, cx).map(ScalarMaybeUndef::Scalar) + }, + ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef) + } + } + + pub fn ptr_wrapping_signed_offset(self, i: i64, cx: C) -> Self { + match self { + ScalarMaybeUndef::Scalar(scalar) => { + ScalarMaybeUndef::Scalar(scalar.ptr_wrapping_signed_offset(i, cx)) + }, + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef + } + } +} + +impl<'tcx> Scalar { pub fn from_bool(b: bool) -> Self { - // FIXME: can we make defined `1`? - Scalar::Bits { bits: b as u128, defined: 8 } + Scalar::Bits { bits: b as u128, size: 1 } } pub fn from_char(c: char) -> Self { - Scalar::Bits { bits: c as u128, defined: 32 } + Scalar::Bits { bits: c as u128, size: 4 } } - pub fn to_bits(self, size: Size) -> EvalResult<'tcx, u128> { + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { match self { - Scalar::Bits { .. } if size.bits() == 0 => bug!("to_bits cannot be used with zsts"), - Scalar::Bits { bits, defined } if size.bits() <= defined as u64 => Ok(bits), - Scalar::Bits { .. } => err!(ReadUndefBytes), + Scalar::Bits { bits, size } => { + assert_eq!(target_size.bytes(), size as u64); + assert_ne!(size, 0, "to_bits cannot be used with zsts"); + Ok(bits) + } Scalar::Ptr(_) => err!(ReadPointerAsBytes), } } @@ -256,8 +286,8 @@ impl<'tcx> Scalar { pub fn to_bool(self) -> EvalResult<'tcx, bool> { match self { - Scalar::Bits { bits: 0, defined: 8 } => Ok(false), - Scalar::Bits { bits: 1, defined: 8 } => Ok(true), + Scalar::Bits { bits: 0, size: 1 } => Ok(false), + Scalar::Bits { bits: 1, size: 1 } => Ok(true), _ => err!(InvalidBool), } } diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 4bfb4c96497f..dae5709ba114 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -17,7 +17,7 @@ use hir::def::CtorKind; use hir::def_id::DefId; use hir::{self, HirId, InlineAsm}; use middle::region; -use mir::interpret::{EvalErrorKind, Scalar, Value}; +use mir::interpret::{EvalErrorKind, Scalar, Value, ScalarMaybeUndef}; use mir::visit::MirVisitable; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; @@ -1465,10 +1465,10 @@ impl<'tcx> TerminatorKind<'tcx> { .map(|&u| { let mut s = String::new(); print_miri_value( - Value::Scalar(Scalar::Bits { + Scalar::Bits { bits: u, - defined: size.bits() as u8, - }), + size: size.bytes() as u8, + }.to_value(), switch_ty, &mut s, ).unwrap(); @@ -2225,45 +2225,58 @@ pub fn fmt_const_val(fmt: &mut W, const_val: &ty::Const) -> fmt::Resul pub fn print_miri_value(value: Value, ty: Ty, f: &mut W) -> fmt::Result { use ty::TypeVariants::*; - match (value, &ty.sty) { - (Value::Scalar(Scalar::Bits { bits: 0, .. }), &TyBool) => write!(f, "false"), - (Value::Scalar(Scalar::Bits { bits: 1, .. }), &TyBool) => write!(f, "true"), - (Value::Scalar(Scalar::Bits { bits, .. }), &TyFloat(ast::FloatTy::F32)) => { - write!(f, "{}f32", Single::from_bits(bits)) - } - (Value::Scalar(Scalar::Bits { bits, .. }), &TyFloat(ast::FloatTy::F64)) => { - write!(f, "{}f64", Double::from_bits(bits)) - } - (Value::Scalar(Scalar::Bits { bits, .. }), &TyUint(ui)) => write!(f, "{:?}{}", bits, ui), - (Value::Scalar(Scalar::Bits { bits, .. }), &TyInt(i)) => { - let bit_width = ty::tls::with(|tcx| { - let ty = tcx.lift_to_global(&ty).unwrap(); - tcx.layout_of(ty::ParamEnv::empty().and(ty)) - .unwrap() - .size - .bits() - }); - let shift = 128 - bit_width; - write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i) - } - (Value::Scalar(Scalar::Bits { bits, .. }), &TyChar) => { - write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()) - } - (_, &TyFnDef(did, _)) => write!(f, "{}", item_path_str(did)), - ( - Value::ScalarPair(Scalar::Ptr(ptr), Scalar::Bits { bits: len, .. }), - &TyRef(_, &ty::TyS { sty: TyStr, .. }, _), - ) => ty::tls::with(|tcx| match tcx.alloc_map.lock().get(ptr.alloc_id) { - Some(interpret::AllocType::Memory(alloc)) => { - assert_eq!(len as usize as u128, len); - let slice = &alloc.bytes[(ptr.offset.bytes() as usize)..][..(len as usize)]; - let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri"); - write!(f, "{:?}", s) + // print some primitives + if let Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) = value { + match ty.sty { + TyBool if bits == 0 => return write!(f, "false"), + TyBool if bits == 1 => return write!(f, "true"), + TyFloat(ast::FloatTy::F32) => return write!(f, "{}f32", Single::from_bits(bits)), + TyFloat(ast::FloatTy::F64) => return write!(f, "{}f64", Double::from_bits(bits)), + TyUint(ui) => return write!(f, "{:?}{}", bits, ui), + TyInt(i) => { + let bit_width = ty::tls::with(|tcx| { + let ty = tcx.lift_to_global(&ty).unwrap(); + tcx.layout_of(ty::ParamEnv::empty().and(ty)) + .unwrap() + .size + .bits() + }); + let shift = 128 - bit_width; + return write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i); } - _ => write!(f, "pointer to erroneous constant {:?}, {:?}", ptr, len), - }), - _ => write!(f, "{:?}:{}", value, ty), + TyChar => return write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()), + _ => {}, + } } + // print function definitons + if let TyFnDef(did, _) = ty.sty { + return write!(f, "{}", item_path_str(did)); + } + // print string literals + if let Value::ScalarPair(ptr, len) = value { + if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = ptr { + if let ScalarMaybeUndef::Scalar(Scalar::Bits { bits: len, .. }) = len { + if let TyRef(_, &ty::TyS { sty: TyStr, .. }, _) = ty.sty { + return ty::tls::with(|tcx| { + let alloc = tcx.alloc_map.lock().get(ptr.alloc_id); + if let Some(interpret::AllocType::Memory(alloc)) = alloc { + assert_eq!(len as usize as u128, len); + let slice = &alloc + .bytes + [(ptr.offset.bytes() as usize)..] + [..(len as usize)]; + let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri"); + write!(f, "{:?}", s) + } else { + write!(f, "pointer to erroneous constant {:?}, {:?}", ptr, len) + } + }); + } + } + } + } + // just raw dump everything else + write!(f, "{:?}:{}", value, ty) } fn item_path_str(def_id: DefId) -> String { diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index dd3818882433..96b4edce86b3 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -1887,22 +1887,13 @@ impl<'tcx> Const<'tcx> { }) } - #[inline] - pub fn from_byval_value( - tcx: TyCtxt<'_, '_, 'tcx>, - val: Value, - ty: Ty<'tcx>, - ) -> &'tcx Self { - Self::from_const_value(tcx, ConstValue::from_byval_value(val), ty) - } - #[inline] pub fn from_scalar( tcx: TyCtxt<'_, '_, 'tcx>, val: Scalar, ty: Ty<'tcx>, ) -> &'tcx Self { - Self::from_const_value(tcx, ConstValue::from_scalar(val), ty) + Self::from_const_value(tcx, ConstValue::Scalar(val), ty) } #[inline] @@ -1918,12 +1909,12 @@ impl<'tcx> Const<'tcx> { let shift = 128 - size.bits(); let truncated = (bits << shift) >> shift; assert_eq!(truncated, bits, "from_bits called with untruncated value"); - Self::from_scalar(tcx, Scalar::Bits { bits, defined: size.bits() as u8 }, ty.value) + Self::from_scalar(tcx, Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value) } #[inline] pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self { - Self::from_scalar(tcx, Scalar::undef(), ty) + Self::from_scalar(tcx, Scalar::Bits { bits: 0, size: 0 }, ty) } #[inline] @@ -1960,11 +1951,6 @@ impl<'tcx> Const<'tcx> { self.val.to_byval_value() } - #[inline] - pub fn to_scalar(&self) -> Option { - self.val.to_scalar() - } - #[inline] pub fn assert_bits( &self, diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 341ed9df64b5..267db4467c21 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -39,10 +39,12 @@ pub fn scalar_to_llvm( ) -> &'ll Value { let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() }; match cv { - Scalar::Bits { defined, .. } if (defined as u64) < bitsize || defined == 0 => { - C_undef(Type::ix(cx, bitsize)) + Scalar::Bits { size: 0, .. } => { + assert_eq!(0, layout.value.size(cx).bytes()); + C_undef(Type::ix(cx, 0)) }, - Scalar::Bits { bits, .. } => { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.value.size(cx).bytes()); let llval = C_uint_big(Type::ix(cx, bitsize), bits); if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } @@ -192,7 +194,7 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::Field::new(field as usize), c, )?; - if let Some(prim) = field.to_scalar() { + if let Some(prim) = field.val.try_to_scalar() { let layout = bx.cx.layout_of(field_ty); let scalar = match layout.abi { layout::Abi::Scalar(ref x) => x, diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 4a7225c3a76d..70148fc91760 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -151,14 +151,14 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { let trunc = |n| { let param_ty = self.param_env.and(self.tcx.lift_to_global(&ty).unwrap()); - let bit_width = self.tcx.layout_of(param_ty).unwrap().size.bits(); - trace!("trunc {} with size {} and shift {}", n, bit_width, 128 - bit_width); - let shift = 128 - bit_width; + let width = self.tcx.layout_of(param_ty).unwrap().size; + trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits()); + let shift = 128 - width.bits(); let result = (n << shift) >> shift; trace!("trunc result: {}", result); ConstValue::Scalar(Scalar::Bits { bits: result, - defined: bit_width as u8, + size: width.bytes() as u8, }) }; @@ -168,7 +168,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { let s = s.as_str(); let id = self.tcx.allocate_bytes(s.as_bytes()); let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, self.tcx); - ConstValue::from_byval_value(value) + ConstValue::from_byval_value(value).unwrap() }, LitKind::ByteStr(ref data) => { let id = self.tcx.allocate_bytes(data); @@ -176,7 +176,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { }, LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { bits: n as u128, - defined: 8, + size: 1, }), LitKind::Int(n, _) if neg => { let n = n as i128; @@ -194,14 +194,8 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { }; parse_float(n, fty) } - LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bits { - bits: b as u128, - defined: 8, - }), - LitKind::Char(c) => ConstValue::Scalar(Scalar::Bits { - bits: c as u128, - defined: 32, - }), + LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)), + LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)), }; ty::Const::from_const_value(self.tcx, lit, ty) } diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index 53511c1c127d..a60513116b2e 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -19,7 +19,7 @@ pub(crate) use self::check_match::check_match; use interpret::{const_val_field, const_variant_index, self}; use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability}; -use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, Value}; +use rustc::mir::interpret::{Scalar, GlobalId, ConstValue}; use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region}; use rustc::ty::subst::{Substs, Kind}; use rustc::hir::{self, PatKind, RangeEnd}; @@ -1080,8 +1080,9 @@ pub fn compare_const_vals<'a, 'tcx>( l.partial_cmp(&r) }, ty::TyInt(_) => { - let a = interpret::sign_extend(tcx, a, ty.value).expect("layout error for TyInt"); - let b = interpret::sign_extend(tcx, b, ty.value).expect("layout error for TyInt"); + let layout = tcx.layout_of(ty).ok()?; + let a = interpret::sign_extend(a, layout); + let b = interpret::sign_extend(b, layout); Some((a as i128).cmp(&(b as i128))) }, _ => Some(a.cmp(&b)), @@ -1090,16 +1091,16 @@ pub fn compare_const_vals<'a, 'tcx>( if let ty::TyRef(_, rty, _) = ty.value.sty { if let ty::TyStr = rty.sty { - match (a.to_byval_value(), b.to_byval_value()) { + match (a.val, b.val) { ( - Some(Value::ScalarPair( + ConstValue::ScalarPair( Scalar::Ptr(ptr_a), len_a, - )), - Some(Value::ScalarPair( + ), + ConstValue::ScalarPair( Scalar::Ptr(ptr_b), len_b, - )) + ), ) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => { if let Ok(len_a) = len_a.to_bits(tcx.data_layout.pointer_size) { if let Ok(len_b) = len_b.to_bits(tcx.data_layout.pointer_size) { @@ -1142,7 +1143,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, let s = s.as_str(); let id = tcx.allocate_bytes(s.as_bytes()); let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, tcx); - ConstValue::from_byval_value(value) + ConstValue::from_byval_value(value).unwrap() }, LitKind::ByteStr(ref data) => { let id = tcx.allocate_bytes(data); @@ -1150,7 +1151,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, }, LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { bits: n as u128, - defined: 8, + size: 1, }), LitKind::Int(n, _) => { enum Int { @@ -1188,10 +1189,10 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, Int::Signed(IntTy::I128)| Int::Unsigned(UintTy::U128) => n, _ => bug!(), }; - let defined = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size.bits() as u8; + let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size.bytes() as u8; ConstValue::Scalar(Scalar::Bits { bits: n, - defined, + size, }) }, LitKind::Float(n, fty) => { @@ -1204,14 +1205,8 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, }; parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)? } - LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bits { - bits: b as u128, - defined: 8, - }), - LitKind::Char(c) => ConstValue::Scalar(Scalar::Bits { - bits: c as u128, - defined: 32, - }), + LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)), + LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)), }; Ok(ty::Const::from_const_value(tcx, lit, ty)) } @@ -1224,7 +1219,7 @@ pub fn parse_float<'tcx>( let num = num.as_str(); use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::Float; - let (bits, defined) = match fty { + let (bits, size) = match fty { ast::FloatTy::F32 => { num.parse::().map_err(|_| ())?; let mut f = num.parse::().unwrap_or_else(|e| { @@ -1233,7 +1228,7 @@ pub fn parse_float<'tcx>( if neg { f = -f; } - (f.to_bits(), 32) + (f.to_bits(), 4) } ast::FloatTy::F64 => { num.parse::().map_err(|_| ())?; @@ -1243,9 +1238,9 @@ pub fn parse_float<'tcx>( if neg { f = -f; } - (f.to_bits(), 64) + (f.to_bits(), 8) } }; - Ok(ConstValue::Scalar(Scalar::Bits { bits, defined })) + Ok(ConstValue::Scalar(Scalar::Bits { bits, size })) } diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 7bcf4ef6588d..4e705254331a 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -1,5 +1,5 @@ use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, TyLayout}; use syntax::ast::{FloatTy, IntTy, UintTy}; use rustc_apfloat::ieee::{Single, Double}; @@ -18,11 +18,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { dest_ty: Ty<'tcx>, dest: Place, ) -> EvalResult<'tcx> { + let src_layout = self.layout_of(src.ty)?; + let dst_layout = self.layout_of(dest_ty)?; use rustc::mir::CastKind::*; match kind { Unsize => { - let src_layout = self.layout_of(src.ty)?; - let dst_layout = self.layout_of(dest_ty)?; self.unsize_into(src.value, src_layout, dest, dst_layout)?; } @@ -57,16 +57,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let discr_val = def .discriminant_for_variant(*self.tcx, index) .val; - let defined = self - .layout_of(dest_ty) - .unwrap() - .size - .bits() as u8; return self.write_scalar( dest, Scalar::Bits { bits: discr_val, - defined, + size: dst_layout.size.bytes() as u8, }, dest_ty); } @@ -76,9 +71,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } let src_val = self.value_to_scalar(src)?; - let dest_val = self.cast_scalar(src_val, src.ty, dest_ty)?; + let dest_val = self.cast_scalar(src_val, src_layout, dst_layout)?; let valty = ValTy { - value: Value::Scalar(dest_val), + value: Value::Scalar(dest_val.into()), ty: dest_ty, }; self.write_value(valty, dest)?; @@ -100,7 +95,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); let fn_ptr = self.memory.create_fn_alloc(instance?); let valty = ValTy { - value: Value::Scalar(fn_ptr.into()), + value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()), ty: dest_ty, }; self.write_value(valty, dest)?; @@ -136,7 +131,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ); let fn_ptr = self.memory.create_fn_alloc(instance); let valty = ValTy { - value: Value::Scalar(fn_ptr.into()), + value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()), ty: dest_ty, }; self.write_value(valty, dest)?; @@ -151,20 +146,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub(super) fn cast_scalar( &self, val: Scalar, - src_ty: Ty<'tcx>, - dest_ty: Ty<'tcx>, + src_layout: TyLayout<'tcx>, + dest_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Scalar> { use rustc::ty::TypeVariants::*; - trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty); + trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty); match val { - Scalar::Bits { defined: 0, .. } => Ok(val), - Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty), - Scalar::Bits { bits, .. } => { - // TODO(oli-obk): check defined bits here - match src_ty.sty { - TyFloat(fty) => self.cast_from_float(bits, fty, dest_ty), - _ => self.cast_from_int(bits, src_ty, dest_ty), + Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_layout.ty), + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, src_layout.size.bytes()); + match src_layout.ty.sty { + TyFloat(fty) => self.cast_from_float(bits, fty, dest_layout.ty), + _ => self.cast_from_int(bits, src_layout, dest_layout), } } } @@ -173,56 +167,58 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { fn cast_from_int( &self, v: u128, - src_ty: Ty<'tcx>, - dest_ty: Ty<'tcx>, + src_layout: TyLayout<'tcx>, + dest_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Scalar> { - let signed = self.layout_of(src_ty)?.abi.is_signed(); + let signed = src_layout.abi.is_signed(); let v = if signed { - self.sign_extend(v, src_ty)? + self.sign_extend(v, src_layout) } else { v }; - trace!("cast_from_int: {}, {}, {}", v, src_ty, dest_ty); + trace!("cast_from_int: {}, {}, {}", v, src_layout.ty, dest_layout.ty); use rustc::ty::TypeVariants::*; - match dest_ty.sty { + match dest_layout.ty.sty { TyInt(_) | TyUint(_) => { - let v = self.truncate(v, dest_ty)?; + let v = self.truncate(v, dest_layout); Ok(Scalar::Bits { bits: v, - defined: self.layout_of(dest_ty).unwrap().size.bits() as u8, + size: dest_layout.size.bytes() as u8, }) } TyFloat(FloatTy::F32) if signed => Ok(Scalar::Bits { bits: Single::from_i128(v as i128).value.to_bits(), - defined: 32, + size: 4, }), TyFloat(FloatTy::F64) if signed => Ok(Scalar::Bits { bits: Double::from_i128(v as i128).value.to_bits(), - defined: 64, + size: 8, }), TyFloat(FloatTy::F32) => Ok(Scalar::Bits { bits: Single::from_u128(v).value.to_bits(), - defined: 32, + size: 4, }), TyFloat(FloatTy::F64) => Ok(Scalar::Bits { bits: Double::from_u128(v).value.to_bits(), - defined: 64, + size: 8, }), - TyChar if v as u8 as u128 == v => Ok(Scalar::Bits { bits: v, defined: 32 }), - TyChar => err!(InvalidChar(v)), + TyChar => { + assert_eq!(v as u8 as u128, v); + Ok(Scalar::Bits { bits: v, size: 4 }) + }, // No alignment check needed for raw pointers. But we have to truncate to target ptr size. TyRawPtr(_) => { Ok(Scalar::Bits { bits: self.memory.truncate_to_ptr(v).0 as u128, - defined: self.memory.pointer_size().bits() as u8, + size: self.memory.pointer_size().bytes() as u8, }) }, // Casts to bool are not permitted by rustc, no need to handle them here. - _ => err!(Unimplemented(format!("int to {:?} cast", dest_ty))), + _ => err!(Unimplemented(format!("int to {:?} cast", dest_layout.ty))), } } @@ -236,11 +232,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_u128(width).value, - defined: width as u8, + size: (width / 8) as u8, }), FloatTy::F64 => Ok(Scalar::Bits { bits: Double::from_bits(bits).to_u128(width).value, - defined: width as u8, + size: (width / 8) as u8, }), } }, @@ -250,11 +246,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_i128(width).value as u128, - defined: width as u8, + size: (width / 8) as u8, }), FloatTy::F64 => Ok(Scalar::Bits { bits: Double::from_bits(bits).to_i128(width).value as u128, - defined: width as u8, + size: (width / 8) as u8, }), } }, @@ -262,24 +258,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { TyFloat(FloatTy::F32) if fty == FloatTy::F64 => { Ok(Scalar::Bits { bits: Single::to_bits(Double::from_bits(bits).convert(&mut false).value), - defined: 32, + size: 4, }) }, // f32 -> f64 TyFloat(FloatTy::F64) if fty == FloatTy::F32 => { Ok(Scalar::Bits { bits: Double::to_bits(Single::from_bits(bits).convert(&mut false).value), - defined: 64, + size: 8, }) }, // identity cast TyFloat(FloatTy:: F64) => Ok(Scalar::Bits { bits, - defined: 64, + size: 8, }), TyFloat(FloatTy:: F32) => Ok(Scalar::Bits { bits, - defined: 32, + size: 4, }), _ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))), } diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index 873fef75bb9e..9ea33b24d129 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -2,11 +2,12 @@ use std::fmt; use std::error::Error; use rustc::hir; -use rustc::mir::interpret::{ConstEvalErr}; +use rustc::mir::interpret::{ConstEvalErr, ScalarMaybeUndef}; use rustc::mir; use rustc::ty::{self, TyCtxt, Ty, Instance}; use rustc::ty::layout::{self, LayoutOf, Primitive, TyLayout}; use rustc::ty::subst::Subst; +use rustc_data_structures::indexed_vec::IndexVec; use syntax::ast::Mutability; use syntax::codemap::Span; @@ -28,13 +29,16 @@ pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( let param_env = tcx.param_env(instance.def_id()); let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeEvaluator, ()); // insert a stack frame so any queries have the correct substs - ecx.push_stack_frame( + ecx.stack.push(super::eval_context::Frame { + block: mir::START_BLOCK, + locals: IndexVec::new(), instance, span, mir, - Place::undef(), - StackPopCleanup::None, - )?; + return_place: Place::undef(), + return_to_block: StackPopCleanup::None, + stmt: 0, + }); Ok(ecx) } @@ -76,7 +80,7 @@ pub fn value_to_const_value<'tcx>( ) -> &'tcx ty::Const<'tcx> { let layout = ecx.layout_of(ty).unwrap(); match (val, &layout.abi) { - (Value::Scalar(Scalar::Bits { defined: 0, ..}), _) if layout.is_zst() => {}, + (Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size: 0, ..})), _) if layout.is_zst() => {}, (Value::ByRef(..), _) | (Value::Scalar(_), &layout::Abi::Scalar(_)) | (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {}, @@ -84,8 +88,8 @@ pub fn value_to_const_value<'tcx>( } let val = (|| { match val { - Value::Scalar(val) => Ok(ConstValue::Scalar(val)), - Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)), + Value::Scalar(val) => Ok(ConstValue::Scalar(val.read()?)), + Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a.read()?, b.read()?)), Value::ByRef(ptr, align) => { let ptr = ptr.to_ptr().unwrap(); let alloc = ecx.memory.get(ptr.alloc_id)?; @@ -307,7 +311,7 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let elem_align = ecx.layout_of(elem_ty)?.align.abi(); let align_val = Scalar::Bits { bits: elem_align as u128, - defined: dest_layout.size.bits() as u8, + size: dest_layout.size.bytes() as u8, }; ecx.write_scalar(dest, align_val, dest_layout.ty)?; } @@ -317,7 +321,7 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let size = ecx.layout_of(ty)?.size.bytes() as u128; let size_val = Scalar::Bits { bits: size, - defined: dest_layout.size.bits() as u8, + size: dest_layout.size.bytes() as u8, }; ecx.write_scalar(dest, size_val, dest_layout.ty)?; } @@ -327,7 +331,7 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let type_id = ecx.tcx.type_id_hash(ty) as u128; let id_val = Scalar::Bits { bits: type_id, - defined: dest_layout.size.bits() as u8, + size: dest_layout.size.bytes() as u8, }; ecx.write_scalar(dest, id_val, dest_layout.ty)?; } @@ -437,7 +441,7 @@ pub fn const_val_field<'a, 'tcx>( let place = ecx.allocate_place_for_value(value, layout, variant)?; let (place, layout) = ecx.place_field(place, field, layout)?; let (ptr, align) = place.to_ptr_align(); - let mut new_value = Value::ByRef(ptr, align); + let mut new_value = Value::ByRef(ptr.read()?, align); new_value = ecx.try_read_by_ref(new_value, layout.ty)?; use rustc_data_structures::indexed_vec::Idx; match (value, new_value) { @@ -562,6 +566,9 @@ pub fn const_eval_provider<'a, 'tcx>( }; if tcx.is_static(def_id).is_some() { err.report_as_error(ecx.tcx, "could not evaluate static initializer"); + if tcx.sess.err_count() == 0 { + span_bug!(span, "static eval failure didn't emit an error: {:#?}", err); + } } err.into() }) @@ -572,11 +579,11 @@ fn numeric_intrinsic<'tcx>( bits: u128, kind: Primitive, ) -> EvalResult<'tcx, Scalar> { - let defined = match kind { - Primitive::Int(integer, _) => integer.size().bits() as u8, + let size = match kind { + Primitive::Int(integer, _) => integer.size(), _ => bug!("invalid `{}` argument: {:?}", name, bits), }; - let extra = 128 - defined as u128; + let extra = 128 - size.bits() as u128; let bits_out = match name { "ctpop" => bits.count_ones() as u128, "ctlz" => bits.leading_zeros() as u128 - extra, @@ -584,5 +591,5 @@ fn numeric_intrinsic<'tcx>( "bswap" => (bits << extra).swap_bytes(), _ => bug!("not a numeric intrinsic: {}", name), }; - Ok(Scalar::Bits { bits: bits_out, defined }) + Ok(Scalar::Bits { bits: bits_out, size: size.bytes() as u8 }) } diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index c6c1a1d1ebb2..b6b593b15a6a 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -15,6 +15,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc::mir::interpret::{ GlobalId, Value, Scalar, FrameInfo, AllocType, EvalResult, EvalErrorKind, Pointer, ConstValue, + ScalarMaybeUndef, }; use syntax::codemap::{self, Span}; @@ -105,9 +106,7 @@ pub struct Frame<'mir, 'tcx: 'mir> { /// `[return_ptr, arguments..., variables..., temporaries...]`. The locals are stored as `Option`s. /// `None` represents a local that is currently dead, while a live local /// can either directly contain `Scalar` or refer to some part of an `Allocation`. - /// - /// Before being initialized, arguments are `Value::Scalar(Scalar::undef())` and other locals are `None`. - pub locals: IndexVec>, + pub locals: IndexVec, //////////////////////////////////////////////////////////////////////////////// // Current position within the function @@ -120,6 +119,21 @@ pub struct Frame<'mir, 'tcx: 'mir> { pub stmt: usize, } +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub enum LocalValue { + Dead, + Live(Value), +} + +impl LocalValue { + pub fn access(self) -> EvalResult<'static, Value> { + match self { + LocalValue::Dead => err!(DeadLocal), + LocalValue::Live(val) => Ok(val), + } + } +} + impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {} impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> { @@ -395,8 +409,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align)) }, - ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)), - ConstValue::Scalar(val) => Ok(Value::Scalar(val)), + ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a.into(), b.into())), + ConstValue::Scalar(val) => Ok(Value::Scalar(val.into())), } } @@ -538,8 +552,26 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ) -> EvalResult<'tcx> { ::log_settings::settings().indentation += 1; - let locals = if mir.local_decls.len() > 1 { - let mut locals = IndexVec::from_elem(Some(Value::Scalar(Scalar::undef())), &mir.local_decls); + // first push a stack frame so we have access to the local substs + self.stack.push(Frame { + mir, + block: mir::START_BLOCK, + return_to_block, + return_place, + // empty local array, we fill it in below, after we are inside the stack frame and + // all methods actually know about the frame + locals: IndexVec::new(), + span, + instance, + stmt: 0, + }); + + // don't allocate at all for trivial constants + if mir.local_decls.len() > 1 { + let mut locals = IndexVec::from_elem(LocalValue::Dead, &mir.local_decls); + for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) { + *local = LocalValue::Live(self.init_value(decl.ty)?); + } match self.tcx.describe_def(instance.def_id()) { // statics and constants don't have `Storage*` statements, no need to look for them Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {}, @@ -550,29 +582,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M use rustc::mir::StatementKind::{StorageDead, StorageLive}; match stmt.kind { StorageLive(local) | - StorageDead(local) => locals[local] = None, + StorageDead(local) => locals[local] = LocalValue::Dead, _ => {} } } } }, } - locals - } else { - // don't allocate at all for trivial constants - IndexVec::new() - }; - - self.stack.push(Frame { - mir, - block: mir::START_BLOCK, - return_to_block, - return_place, - locals, - span, - instance, - stmt: 0, - }); + self.frame_mut().locals = locals; + } self.memory.cur_frame = self.cur_frame(); @@ -598,7 +616,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let Place::Ptr { ptr, .. } = frame.return_place { // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions self.memory.mark_static_initialized( - ptr.to_ptr()?.alloc_id, + ptr.read()?.to_ptr()?.alloc_id, mutable, )? } else { @@ -616,8 +634,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok(()) } - pub fn deallocate_local(&mut self, local: Option) -> EvalResult<'tcx> { - if let Some(Value::ByRef(ptr, _align)) = local { + pub fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { + // FIXME: should we tell the user that there was a local which was never written to? + if let LocalValue::Live(Value::ByRef(ptr, _align)) = local { trace!("deallocating local"); let ptr = ptr.to_ptr()?; self.memory.dump_alloc(ptr.alloc_id); @@ -637,6 +656,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ) -> EvalResult<'tcx> { let dest = self.eval_place(place)?; let dest_ty = self.place_ty(place); + let dest_layout = self.layout_of(dest_ty)?; use rustc::mir::Rvalue::*; match *rvalue { @@ -675,7 +695,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M UnaryOp(un_op, ref operand) => { let val = self.eval_operand_to_scalar(operand)?; - let val = self.unary_op(un_op, val, dest_ty)?; + let val = self.unary_op(un_op, val, dest_layout)?; self.write_scalar( dest, val, @@ -724,6 +744,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align(); if length > 0 { + let dest = dest.read()?; //write the first value self.write_value_to_ptr(value, dest, dest_align, elem_ty)?; @@ -739,12 +760,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let src = self.eval_place(place)?; let ty = self.place_ty(place); let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx); - let defined = self.memory.pointer_size().bits() as u8; + let size = self.memory.pointer_size().bytes() as u8; self.write_scalar( dest, Scalar::Bits { bits: len as u128, - defined, + size, }, dest_ty, )?; @@ -757,7 +778,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra(); let val = match extra { - PlaceExtra::None => ptr.to_value(), + PlaceExtra::None => Value::Scalar(ptr), PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx), PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable), PlaceExtra::DowncastVariant(..) => { @@ -781,12 +802,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "SizeOf nullary MIR operator called for unsized type"); - let defined = self.memory.pointer_size().bits() as u8; + let size = self.memory.pointer_size().bytes() as u8; self.write_scalar( dest, Scalar::Bits { bits: layout.size.bytes() as u128, - defined, + size, }, dest_ty, )?; @@ -803,10 +824,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; let place = self.eval_place(place)?; let discr_val = self.read_discriminant_value(place, layout)?; - let defined = self.layout_of(dest_ty).unwrap().size.bits() as u8; + let size = self.layout_of(dest_ty).unwrap().size.bytes() as u8; self.write_scalar(dest, Scalar::Bits { bits: discr_val, - defined, + size, }, dest_ty)?; } } @@ -957,10 +978,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M assert!(variants_start == variants_end); dataful_variant as u128 }, - Scalar::Bits { bits: raw_discr, defined } => { - if defined < discr.size.bits() as u8 { - return err!(ReadUndefBytes); - } + Scalar::Bits { bits: raw_discr, size } => { + assert_eq!(size as u64, discr.size.bytes()); let discr = raw_discr.wrapping_sub(niche_start) .wrapping_add(variants_start); if variants_start <= discr && discr <= variants_end { @@ -1002,14 +1021,14 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M // raw discriminants for enums are isize or bigger during // their computation, but the in-memory tag is the smallest possible // representation - let size = tag.value.size(self.tcx.tcx).bits(); - let shift = 128 - size; + let size = tag.value.size(self.tcx.tcx); + let shift = 128 - size.bits(); let discr_val = (discr_val << shift) >> shift; let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?; self.write_scalar(discr_dest, Scalar::Bits { bits: discr_val, - defined: size as u8, + size: size.bytes() as u8, }, tag.ty)?; } layout::Variants::NicheFilling { @@ -1025,7 +1044,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M .wrapping_add(niche_start); self.write_scalar(niche_dest, Scalar::Bits { bits: niche_value, - defined: niche.size.bits() as u8, + size: niche.size.bytes() as u8, }, niche.ty)?; } } @@ -1072,22 +1091,22 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { let new_place = match place { Place::Local { frame, local } => { - match self.stack[frame].locals[local] { - None => return err!(DeadLocal), - Some(Value::ByRef(ptr, align)) => { + match self.stack[frame].locals[local].access()? { + Value::ByRef(ptr, align) => { Place::Ptr { - ptr, + ptr: ptr.into(), align, extra: PlaceExtra::None, } } - Some(val) => { + val => { let ty = self.stack[frame].mir.local_decls[local].ty; let ty = self.monomorphize(ty, self.stack[frame].instance.substs); let layout = self.layout_of(ty)?; let ptr = self.alloc_ptr(layout)?; self.stack[frame].locals[local] = - Some(Value::ByRef(ptr.into(), layout.align)); // it stays live + LocalValue::Live(Value::ByRef(ptr.into(), layout.align)); // it stays live + let place = Place::from_ptr(ptr, layout.align); self.write_value(ValTy { value: val, ty }, place)?; place @@ -1137,11 +1156,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn write_scalar( &mut self, dest: Place, - val: Scalar, + val: impl Into, dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx> { let valty = ValTy { - value: Value::Scalar(val), + value: Value::Scalar(val.into()), ty: dest_ty, }; self.write_value(valty, dest) @@ -1160,15 +1179,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M match dest { Place::Ptr { ptr, align, extra } => { assert_eq!(extra, PlaceExtra::None); - self.write_value_to_ptr(src_val, ptr, align, dest_ty) + self.write_value_to_ptr(src_val, ptr.read()?, align, dest_ty) } Place::Local { frame, local } => { - let dest = self.stack[frame].get_local(local)?; + let old_val = self.stack[frame].locals[local].access()?; self.write_value_possibly_by_val( src_val, |this, val| this.stack[frame].set_local(local, val), - dest, + old_val, dest_ty, ) } @@ -1183,6 +1202,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M old_dest_val: Value, dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx> { + // FIXME: this should be a layout check, not underlying value if let Value::ByRef(dest_ptr, align) = old_dest_val { // If the value is already `ByRef` (that is, backed by an `Allocation`), // then we must write the new value into this allocation, because there may be @@ -1239,10 +1259,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M layout::Primitive::Int(_, signed) => signed, _ => false, }, - _ => match scalar { - Scalar::Bits { defined: 0, .. } => false, - _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout), - } + _ => false, }; self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed) } @@ -1278,20 +1295,22 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pointee_ty: Ty<'tcx>, ) -> EvalResult<'tcx, Value> { let ptr_size = self.memory.pointer_size(); - let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into(); + let p: ScalarMaybeUndef = self.memory.read_ptr_sized(ptr, ptr_align)?; if self.type_is_sized(pointee_ty) { - Ok(p.to_value()) + Ok(Value::Scalar(p)) } else { trace!("reading fat pointer extra of type {}", pointee_ty); let extra = ptr.offset(ptr_size, self)?; match self.tcx.struct_tail(pointee_ty).sty { - ty::TyDynamic(..) => Ok(p.to_value_with_vtable( - self.memory.read_ptr_sized(extra, ptr_align)?.to_ptr()?, + ty::TyDynamic(..) => Ok(Value::ScalarPair( + p, + self.memory.read_ptr_sized(extra, ptr_align)?, )), ty::TySlice(..) | ty::TyStr => { let len = self .memory .read_ptr_sized(extra, ptr_align)? + .read()? .to_bits(ptr_size)?; Ok(p.to_value_with_len(len as u64, self.tcx.tcx)) }, @@ -1347,8 +1366,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M match ty.sty { ty::TyChar => { assert_eq!(size.bytes(), 4); - if ::std::char::from_u32(bits as u32).is_none() { - return err!(InvalidChar(bits)); + let c = self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(4))?.read()?.to_bits(Size::from_bytes(4))? as u32; + match ::std::char::from_u32(c) { + Some(..) => (), + None => return err!(InvalidChar(c as u128)), } } _ => {}, @@ -1534,7 +1555,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.memory.check_align(ptr, ptr_align)?; if layout.size.bytes() == 0 { - return Ok(Some(Value::Scalar(Scalar::undef()))); + return Ok(Some(Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })))); } let ptr = ptr.to_ptr()?; @@ -1670,7 +1691,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } let (src_f_value, src_field) = match src { Value::ByRef(ptr, align) => { - let src_place = Place::from_scalar_ptr(ptr, align); + let src_place = Place::from_scalar_ptr(ptr.into(), align); let (src_f_place, src_field) = self.place_field(src_place, mir::Field::new(i), src_layout)?; (self.read_place(src_f_place)?, src_field) @@ -1717,7 +1738,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } write!(msg, ":").unwrap(); - match self.stack[frame].get_local(local) { + match self.stack[frame].locals[local].access() { Err(err) => { if let EvalErrorKind::DeadLocal = err.kind { write!(msg, " is dead").unwrap(); @@ -1736,16 +1757,16 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Ok(Value::Scalar(val)) => { write!(msg, " {:?}", val).unwrap(); - if let Scalar::Ptr(ptr) = val { + if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val { allocs.push(ptr.alloc_id); } } Ok(Value::ScalarPair(val1, val2)) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); - if let Scalar::Ptr(ptr) = val1 { + if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 { allocs.push(ptr.alloc_id); } - if let Scalar::Ptr(ptr) = val2 { + if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 { allocs.push(ptr.alloc_id); } } @@ -1756,7 +1777,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Place::Ptr { ptr, align, .. } => { match ptr { - Scalar::Ptr(ptr) => { + ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => { trace!("by align({}) ref:", align.abi()); self.memory.dump_alloc(ptr.alloc_id); } @@ -1766,21 +1787,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } - /// Convenience function to ensure correct usage of locals - pub fn modify_local(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx> - where - F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, - { - let val = self.stack[frame].get_local(local)?; - let new_val = f(self, val)?; - self.stack[frame].set_local(local, new_val)?; - // FIXME(solson): Run this when setting to Undef? (See previous version of this code.) - // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) { - // self.memory.deallocate(ptr)?; - // } - Ok(()) - } - pub fn generate_stacktrace(&self, explicit_span: Option) -> (Vec, Span) { let mut last_span = None; let mut frames = Vec::new(); @@ -1819,12 +1825,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M (frames, self.tcx.span) } - pub fn sign_extend(&self, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { - super::sign_extend(self.tcx.tcx, value, ty) + pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 { + super::sign_extend(value, ty) } - pub fn truncate(&self, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { - super::truncate(self.tcx.tcx, value, ty) + pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 { + super::truncate(value, ty) } fn write_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result { @@ -1893,34 +1899,45 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } } + + pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> { + trace!("{:?} is now live", local); + + let ty = self.frame().mir.local_decls[local].ty; + let init = self.init_value(ty)?; + // StorageLive *always* kills the value that's currently stored + Ok(mem::replace(&mut self.frame_mut().locals[local], LocalValue::Live(init))) + } + + fn init_value(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + let ty = self.monomorphize(ty, self.substs()); + let layout = self.layout_of(ty)?; + Ok(match layout.abi { + layout::Abi::Scalar(..) => Value::Scalar(ScalarMaybeUndef::Undef), + layout::Abi::ScalarPair(..) => Value::ScalarPair( + ScalarMaybeUndef::Undef, + ScalarMaybeUndef::Undef, + ), + _ => Value::ByRef(self.alloc_ptr(ty)?.into(), layout.align), + }) + } } impl<'mir, 'tcx> Frame<'mir, 'tcx> { - pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> { - self.locals[local].ok_or_else(|| EvalErrorKind::DeadLocal.into()) - } - fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> { match self.locals[local] { - None => err!(DeadLocal), - Some(ref mut local) => { + LocalValue::Dead => err!(DeadLocal), + LocalValue::Live(ref mut local) => { *local = value; Ok(()) } } } - pub fn storage_live(&mut self, local: mir::Local) -> Option { - trace!("{:?} is now live", local); - - // StorageLive *always* kills the value that's currently stored - mem::replace(&mut self.locals[local], Some(Value::Scalar(Scalar::undef()))) - } - /// Returns the old value of the local - pub fn storage_dead(&mut self, local: mir::Local) -> Option { + pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue { trace!("{:?} is now dead", local); - self.locals[local].take() + mem::replace(&mut self.locals[local], LocalValue::Dead) } } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 6e5cfe3bb3ee..20846c377acd 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -7,7 +7,7 @@ use rustc::ty::Instance; use rustc::ty::ParamEnv; use rustc::ty::query::TyCtxtAt; use rustc::ty::layout::{self, Align, TargetDataLayout, Size}; -use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, +use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, ScalarMaybeUndef, EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType}; pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint}; use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher}; @@ -272,10 +272,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { let alloc = self.get(ptr.alloc_id)?; (ptr.offset.bytes(), alloc.align) } - Scalar::Bits { bits, defined } => { - if (defined as u64) < self.pointer_size().bits() { - return err!(ReadUndefBytes); - } + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, self.pointer_size().bytes()); // FIXME: what on earth does this line do? docs or fix needed! let v = ((bits as u128) % (1 << self.pointer_size().bytes())) as u64; if v == 0 { @@ -756,7 +754,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, Scalar> { + pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> { self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer let endianness = self.endianness(); let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?; @@ -764,7 +762,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // We must not return Ok() for unaligned pointers! if self.check_defined(ptr, size).is_err() { // this inflates undefined bytes to the entire scalar, even if only a few bytes are undefined - return Ok(Scalar::undef().into()); + return Ok(ScalarMaybeUndef::Undef); } // Now we do the actual reading let bits = read_target_uint(endianness, bytes).unwrap(); @@ -776,44 +774,52 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } else { let alloc = self.get(ptr.alloc_id)?; match alloc.relocations.get(&ptr.offset) { - Some(&alloc_id) => return Ok(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into()), + Some(&alloc_id) => return Ok(ScalarMaybeUndef::Scalar(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into())), None => {}, } } // We don't. Just return the bits. - Ok(Scalar::Bits { + Ok(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, - defined: size.bits() as u8, - }) + size: size.bytes() as u8, + })) } - pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, Scalar> { + pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, ScalarMaybeUndef> { self.read_scalar(ptr, ptr_align, self.pointer_size()) } - pub fn write_scalar(&mut self, ptr: Scalar, ptr_align: Align, val: Scalar, size: Size, signed: bool) -> EvalResult<'tcx> { + pub fn write_scalar(&mut self, ptr: Scalar, ptr_align: Align, val: ScalarMaybeUndef, type_size: Size, signed: bool) -> EvalResult<'tcx> { let endianness = self.endianness(); + let val = match val { + ScalarMaybeUndef::Scalar(scalar) => scalar, + ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false), + }; + let bytes = match val { Scalar::Ptr(val) => { - assert_eq!(size, self.pointer_size()); + assert_eq!(type_size, self.pointer_size()); val.offset.bytes() as u128 } - Scalar::Bits { bits, defined } if defined as u64 >= size.bits() && size.bits() != 0 => bits, - - Scalar::Bits { .. } => { - self.check_align(ptr.into(), ptr_align)?; - self.mark_definedness(ptr, size, false)?; + Scalar::Bits { size: 0, .. } => { + // nothing to do for ZSTs + assert_eq!(type_size.bytes(), 0); return Ok(()); } + + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, type_size.bytes()); + bits + }, }; let ptr = ptr.to_ptr()?; { - let align = self.int_align(size); - let dst = self.get_bytes_mut(ptr, size, ptr_align.min(align))?; + let align = self.int_align(type_size); + let dst = self.get_bytes_mut(ptr, type_size, ptr_align.min(align))?; if signed { write_target_int(endianness, dst, bytes as i128).unwrap(); } else { @@ -835,7 +841,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: Scalar) -> EvalResult<'tcx> { + pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> { let ptr_size = self.pointer_size(); self.write_scalar(ptr.into(), ptr_align, val, ptr_size, false) } @@ -984,7 +990,7 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { fn into_ptr( &self, value: Value, - ) -> EvalResult<'tcx, Scalar> { + ) -> EvalResult<'tcx, ScalarMaybeUndef> { Ok(match value { Value::ByRef(ptr, align) => { self.memory().read_ptr_sized(ptr.to_ptr()?, align)? @@ -997,7 +1003,7 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { fn into_ptr_vtable_pair( &self, value: Value, - ) -> EvalResult<'tcx, (Scalar, Pointer)> { + ) -> EvalResult<'tcx, (ScalarMaybeUndef, Pointer)> { match value { Value::ByRef(ref_ptr, align) => { let mem = self.memory(); @@ -1005,11 +1011,11 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { let vtable = mem.read_ptr_sized( ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, align - )?.to_ptr()?; + )?.read()?.to_ptr()?; Ok((ptr, vtable)) } - Value::ScalarPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)), + Value::ScalarPair(ptr, vtable) => Ok((ptr, vtable.read()?.to_ptr()?)), _ => bug!("expected ptr and vtable, got {:?}", value), } } @@ -1017,7 +1023,7 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { fn into_slice( &self, value: Value, - ) -> EvalResult<'tcx, (Scalar, u64)> { + ) -> EvalResult<'tcx, (ScalarMaybeUndef, u64)> { match value { Value::ByRef(ref_ptr, align) => { let mem = self.memory(); @@ -1025,12 +1031,12 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { let len = mem.read_ptr_sized( ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, align - )?.to_bits(mem.pointer_size())? as u64; + )?.read()?.to_bits(mem.pointer_size())? as u64; Ok((ptr, len)) } Value::ScalarPair(ptr, val) => { - let len = val.to_bits(self.memory().pointer_size())?; - Ok((ptr.into(), len as u64)) + let len = val.read()?.to_bits(self.memory().pointer_size())?; + Ok((ptr, len as u64)) } Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value), } diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 0c921f66198e..bc77f6e29d27 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -11,8 +11,10 @@ mod step; mod terminator; mod traits; -pub use self::eval_context::{EvalContext, Frame, StackPopCleanup, - TyAndPacked, ValTy}; +pub use self::eval_context::{ + EvalContext, Frame, StackPopCleanup, + TyAndPacked, ValTy, +}; pub use self::place::{Place, PlaceExtra}; @@ -34,26 +36,21 @@ pub use self::machine::Machine; pub use self::memory::{write_target_uint, write_target_int, read_target_uint}; -use rustc::mir::interpret::{EvalResult, EvalErrorKind}; -use rustc::ty::{Ty, TyCtxt, ParamEnv}; +use rustc::ty::layout::TyLayout; -pub fn sign_extend<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { - let param_env = ParamEnv::empty(); - let layout = tcx.layout_of(param_env.and(ty)).map_err(|layout| EvalErrorKind::Layout(layout))?; +pub fn sign_extend(value: u128, layout: TyLayout<'_>) -> u128 { let size = layout.size.bits(); assert!(layout.abi.is_signed()); // sign extend let shift = 128 - size; // shift the unsigned value to the left // and back to the right as signed (essentially fills with FF on the left) - Ok((((value << shift) as i128) >> shift) as u128) + (((value << shift) as i128) >> shift) as u128 } -pub fn truncate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { - let param_env = ParamEnv::empty(); - let layout = tcx.layout_of(param_env.and(ty)).map_err(|layout| EvalErrorKind::Layout(layout))?; +pub fn truncate(value: u128, layout: TyLayout<'_>) -> u128 { let size = layout.size.bits(); let shift = 128 - size; // truncate (shift left to drop out leftover values, shift right to fill with zeroes) - Ok((value << shift) >> shift) + (value << shift) >> shift } diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 8320add71576..567cc10b721b 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -1,7 +1,7 @@ use rustc::mir; use rustc::ty::{self, Ty, layout}; use syntax::ast::FloatTy; -use rustc::ty::layout::LayoutOf; +use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; @@ -32,7 +32,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx> { let (val, overflowed) = self.binop_with_overflow(op, left, right)?; - let val = Value::ScalarPair(val, Scalar::from_bool(overflowed)); + let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); let valty = ValTy { value: val, ty: dest_ty, @@ -97,13 +97,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let signed = left_layout.abi.is_signed(); let mut oflo = (r as u32 as u128) != r; let mut r = r as u32; - let size = left_layout.size.bits() as u32; - oflo |= r >= size; + let size = left_layout.size; + oflo |= r >= size.bits() as u32; if oflo { - r %= size; + r %= size.bits() as u32; } let result = if signed { - let l = self.sign_extend(l, left_ty)? as i128; + let l = self.sign_extend(l, left_layout) as i128; let result = match bin_op { Shl => l << r, Shr => l >> r, @@ -117,10 +117,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => bug!("it has already been checked that this is a shift op"), } }; - let truncated = self.truncate(result, left_ty)?; + let truncated = self.truncate(result, left_layout); return Ok((Scalar::Bits { bits: truncated, - defined: size as u8, + size: size.bytes() as u8, }, oflo)); } @@ -145,8 +145,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => None, }; if let Some(op) = op { - let l = self.sign_extend(l, left_ty)? as i128; - let r = self.sign_extend(r, right_ty)? as i128; + let l = self.sign_extend(l, left_layout) as i128; + let r = self.sign_extend(r, right_layout) as i128; return Ok((Scalar::from_bool(op(&l, &r)), false)); } let op: Option (i128, bool)> = match bin_op { @@ -160,14 +160,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => None, }; if let Some(op) = op { - let l128 = self.sign_extend(l, left_ty)? as i128; - let r = self.sign_extend(r, right_ty)? as i128; - let size = left_layout.size.bits(); + let l128 = self.sign_extend(l, left_layout) as i128; + let r = self.sign_extend(r, right_layout) as i128; + let size = left_layout.size; match bin_op { Rem | Div => { // int_min / -1 - if r == -1 && l == (1 << (size - 1)) { - return Ok((Scalar::Bits { bits: l, defined: size as u8 }, true)); + if r == -1 && l == (1 << (size.bits() - 1)) { + return Ok((Scalar::Bits { bits: l, size: size.bytes() as u8 }, true)); } }, _ => {}, @@ -175,27 +175,27 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { trace!("{}, {}, {}", l, l128, r); let (result, mut oflo) = op(l128, r); trace!("{}, {}", result, oflo); - if !oflo && size != 128 { - let max = 1 << (size - 1); + if !oflo && size.bits() != 128 { + let max = 1 << (size.bits() - 1); oflo = result >= max || result < -max; } let result = result as u128; - let truncated = self.truncate(result, left_ty)?; + let truncated = self.truncate(result, left_layout); return Ok((Scalar::Bits { bits: truncated, - defined: size as u8, + size: size.bytes() as u8, }, oflo)); } } if let ty::TyFloat(fty) = left_ty.sty { macro_rules! float_math { - ($ty:path, $bitsize:expr) => {{ + ($ty:path, $size:expr) => {{ let l = <$ty>::from_bits(l); let r = <$ty>::from_bits(r); let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>| Scalar::Bits { bits: res.value.to_bits(), - defined: $bitsize, + size: $size, }; let val = match bin_op { Eq => Scalar::from_bool(l == r), @@ -215,12 +215,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }}; } match fty { - FloatTy::F32 => float_math!(Single, 32), - FloatTy::F64 => float_math!(Double, 64), + FloatTy::F32 => float_math!(Single, 4), + FloatTy::F64 => float_math!(Double, 8), } } - let bit_width = self.layout_of(left_ty).unwrap().size.bits() as u8; + let size = self.layout_of(left_ty).unwrap().size.bytes() as u8; // only ints left let val = match bin_op { @@ -232,9 +232,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Gt => Scalar::from_bool(l > r), Ge => Scalar::from_bool(l >= r), - BitOr => Scalar::Bits { bits: l | r, defined: bit_width }, - BitAnd => Scalar::Bits { bits: l & r, defined: bit_width }, - BitXor => Scalar::Bits { bits: l ^ r, defined: bit_width }, + BitOr => Scalar::Bits { bits: l | r, size }, + BitAnd => Scalar::Bits { bits: l & r, size }, + BitXor => Scalar::Bits { bits: l ^ r, size }, Add | Sub | Mul | Rem | Div => { let op: fn(u128, u128) -> (u128, bool) = match bin_op { @@ -248,10 +248,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => bug!(), }; let (result, oflo) = op(l, r); - let truncated = self.truncate(result, left_ty)?; + let truncated = self.truncate(result, left_layout); return Ok((Scalar::Bits { bits: truncated, - defined: bit_width, + size, }, oflo || truncated != result)); } @@ -275,17 +275,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { &self, un_op: mir::UnOp, val: Scalar, - ty: Ty<'tcx>, + layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Scalar> { use rustc::mir::UnOp::*; use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::Float; - let size = self.layout_of(ty)?.size; + let size = layout.size; let bytes = val.to_bits(size)?; - let size = size.bits(); - let result_bytes = match (un_op, &ty.sty) { + let result_bytes = match (un_op, &layout.ty.sty) { (Not, ty::TyBool) => !val.to_bool()? as u128, @@ -294,13 +293,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { (Neg, ty::TyFloat(FloatTy::F32)) => Single::to_bits(-Single::from_bits(bytes)), (Neg, ty::TyFloat(FloatTy::F64)) => Double::to_bits(-Double::from_bits(bytes)), - (Neg, _) if bytes == (1 << (size - 1)) => return err!(OverflowNeg), + (Neg, _) if bytes == (1 << (size.bits() - 1)) => return err!(OverflowNeg), (Neg, _) => (-(bytes as i128)) as u128, }; Ok(Scalar::Bits { - bits: self.truncate(result_bytes, ty)?, - defined: size as u8, + bits: self.truncate(result_bytes, layout), + size: size.bytes() as u8, }) } } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 59bf2ae6c0fe..0265768ad8bb 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -3,7 +3,7 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; use rustc_data_structures::indexed_vec::Idx; -use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer}; +use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer, ScalarMaybeUndef}; use super::{EvalContext, Machine, ValTy}; use interpret::memory::HasMemory; @@ -14,7 +14,7 @@ pub enum Place { /// A place may have an invalid (integral or undef) pointer, /// since it might be turned back into a reference /// before ever being dereferenced. - ptr: Scalar, + ptr: ScalarMaybeUndef, align: Align, extra: PlaceExtra, }, @@ -35,10 +35,10 @@ pub enum PlaceExtra { impl<'tcx> Place { /// Produces a Place that will error if attempted to be read from pub fn undef() -> Self { - Self::from_scalar_ptr(Scalar::undef().into(), Align::from_bytes(1, 1).unwrap()) + Self::from_scalar_ptr(ScalarMaybeUndef::Undef, Align::from_bytes(1, 1).unwrap()) } - pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + pub fn from_scalar_ptr(ptr: ScalarMaybeUndef, align: Align) -> Self { Place::Ptr { ptr, align, @@ -47,10 +47,10 @@ impl<'tcx> Place { } pub fn from_ptr(ptr: Pointer, align: Align) -> Self { - Self::from_scalar_ptr(ptr.into(), align) + Self::from_scalar_ptr(ScalarMaybeUndef::Scalar(ptr.into()), align) } - pub fn to_ptr_align_extra(self) -> (Scalar, Align, PlaceExtra) { + pub fn to_ptr_align_extra(self) -> (ScalarMaybeUndef, Align, PlaceExtra) { match self { Place::Ptr { ptr, align, extra } => (ptr, align, extra), _ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self), @@ -58,17 +58,17 @@ impl<'tcx> Place { } } - pub fn to_ptr_align(self) -> (Scalar, Align) { + pub fn to_ptr_align(self) -> (ScalarMaybeUndef, Align) { let (ptr, align, _extra) = self.to_ptr_align_extra(); (ptr, align) } - +/* pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { // At this point, we forget about the alignment information -- the place has been turned into a reference, // and no matter where it came from, it now must be aligned. self.to_ptr_align().0.to_ptr() } - +*/ pub(super) fn elem_ty_and_len( self, ty: Ty<'tcx>, @@ -106,7 +106,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // Might allow this in the future, right now there's no way to do this from Rust code anyway Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer), // Directly reading a local will always succeed - Local(local) => self.frame().get_local(local).map(Some), + Local(local) => self.frame().locals[local].access().map(Some), // No fast path for statics. Reading from statics is rare and would require another // Machine function to handle differently in miri. Promoted(_) | @@ -129,7 +129,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let field = base_layout.field(self, field_index)?; if field.size.bytes() == 0 { return Ok(( - Value::Scalar(Scalar::undef()), + Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })), field, )); } @@ -197,9 +197,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match place { Place::Ptr { ptr, align, extra } => { assert_eq!(extra, PlaceExtra::None); - Ok(Value::ByRef(ptr, align)) + Ok(Value::ByRef(ptr.read()?, align)) } - Place::Local { frame, local } => self.stack[frame].get_local(local), + Place::Local { frame, local } => self.stack[frame].locals[local].access(), } } @@ -220,7 +220,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { })?; if let Value::ByRef(ptr, align) = val { Place::Ptr { - ptr, + ptr: ptr.into(), align, extra: PlaceExtra::None, } @@ -238,7 +238,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }; let alloc = Machine::init_static(self, cid)?; Place::Ptr { - ptr: Scalar::Ptr(alloc.into()), + ptr: ScalarMaybeUndef::Scalar(Scalar::Ptr(alloc.into())), align: layout.align, extra: PlaceExtra::None, } @@ -276,14 +276,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (base_ptr, base_align, base_extra) = match base { Place::Ptr { ptr, align, extra } => (ptr, align, extra), Place::Local { frame, local } => { - match (&self.stack[frame].get_local(local)?, &base_layout.abi) { + match (self.stack[frame].locals[local].access()?, &base_layout.abi) { // in case the field covers the entire type, just return the value - (&Value::Scalar(_), &layout::Abi::Scalar(_)) | - (&Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) - if offset.bytes() == 0 && field.size == base_layout.size => - { - return Ok((base, field)); - } + (Value::Scalar(_), &layout::Abi::Scalar(_)) | + (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) + if offset.bytes() == 0 && field.size == base_layout.size => { + return Ok((base, field)) + }, _ => self.force_allocation(base)?.to_ptr_align_extra(), } } @@ -413,7 +412,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } Index(local) => { - let value = self.frame().get_local(local)?; + let value = self.frame().locals[local].access()?; let ty = self.tcx.types.usize; let n = self .value_to_scalar(ValTy { value, ty })? diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index db90714d0e62..57b56db14bb4 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -90,7 +90,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // Mark locals as alive StorageLive(local) => { - let old_val = self.frame_mut().storage_live(local); + let old_val = self.storage_live(local)?; self.deallocate_local(old_val)?; } diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs index d750c1f47a66..76aafc712138 100644 --- a/src/librustc_mir/interpret/terminator/drop.rs +++ b/src/librustc_mir/interpret/terminator/drop.rs @@ -2,7 +2,7 @@ use rustc::mir::BasicBlock; use rustc::ty::{self, Ty}; use syntax::codemap::Span; -use rustc::mir::interpret::{EvalResult, Scalar, Value}; +use rustc::mir::interpret::{EvalResult, Value}; use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { @@ -33,7 +33,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ptr, align: _, extra: PlaceExtra::None, - } => ptr.to_value(), + } => Value::Scalar(ptr), _ => bug!("force_allocation broken"), }; self.drop(val, instance, ty, span, target) @@ -51,17 +51,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let instance = match ty.sty { ty::TyDynamic(..) => { - let vtable = match arg { - Value::ScalarPair(_, Scalar::Ptr(vtable)) => vtable, - _ => bug!("expected fat ptr, got {:?}", arg), - }; - match self.read_drop_type_from_vtable(vtable)? { - Some(func) => func, - // no drop fn -> bail out - None => { - self.goto_block(target); - return Ok(()) - }, + if let Value::ScalarPair(_, vtable) = arg { + self.read_drop_type_from_vtable(vtable.read()?.to_ptr()?)? + } else { + bug!("expected fat ptr, got {:?}", arg); } } _ => instance, diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index 56dd3f603b69..cdd35cfd94e6 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -4,7 +4,7 @@ use rustc::ty::layout::{LayoutOf, Size}; use syntax::codemap::Span; use rustc_target::spec::abi::Abi; -use rustc::mir::interpret::{EvalResult, Scalar}; +use rustc::mir::interpret::{EvalResult, Scalar, Value}; use super::{EvalContext, Place, Machine, ValTy}; use rustc_data_structures::indexed_vec::Idx; @@ -47,7 +47,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { for (index, &const_int) in values.iter().enumerate() { // Compare using binary_op - let const_int = Scalar::Bits { bits: const_int, defined: 128 }; + let const_int = Scalar::Bits { bits: const_int, size: discr_layout.size.bytes() as u8 }; let res = self.binary_op(mir::BinOp::Eq, discr_prim, discr_val.ty, const_int, discr_val.ty @@ -392,12 +392,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let fn_ptr = self.memory.read_ptr_sized( vtable.offset(ptr_size * (idx as u64 + 3), &self)?, ptr_align - )?.to_ptr()?; + )?.read()?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; let mut args = args.to_vec(); let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty; args[0].ty = ty; - args[0].value = ptr.to_value(); + args[0].value = Value::Scalar(ptr); // recurse with concrete function self.eval_fn_call(instance, destination, &args, span, sig) } diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index b6c7feda19fa..d2e264a3a4fa 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -1,6 +1,6 @@ use rustc::ty::{self, Ty}; use rustc::ty::layout::{Size, Align, LayoutOf}; -use rustc::mir::interpret::{Scalar, Value, Pointer, EvalResult}; +use rustc::mir::interpret::{Scalar, Pointer, EvalResult}; use syntax::ast::Mutability; @@ -36,25 +36,25 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); let drop = self.memory.create_fn_alloc(drop); - self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; + self.memory.write_ptr_sized_unsigned(vtable, ptr_align, Scalar::Ptr(drop).into())?; let size_ptr = vtable.offset(ptr_size, &self)?; self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { bits: size as u128, - defined: ptr_size.bits() as u8, - })?; + size: ptr_size.bytes() as u8, + }.into())?; let align_ptr = vtable.offset(ptr_size * 2, &self)?; self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { bits: align as u128, - defined: ptr_size.bits() as u8, - })?; + size: ptr_size.bytes() as u8, + }.into())?; for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { let instance = self.resolve(def_id, substs)?; let fn_ptr = self.memory.create_fn_alloc(instance); let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; - self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; + self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?; } } @@ -69,16 +69,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn read_drop_type_from_vtable( &self, vtable: Pointer, - ) -> EvalResult<'tcx, Option>> { + ) -> EvalResult<'tcx, ty::Instance<'tcx>> { // we don't care about the pointee type, we just want a pointer let pointer_align = self.tcx.data_layout.pointer_align; - let pointer_size = self.tcx.data_layout.pointer_size.bits() as u8; - match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? { - // some values don't need to call a drop impl, so the value is null - Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None), - Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), - _ => err!(ReadBytesAsPointer), - } + let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.read()?.to_ptr()?; + self.memory.get_fn(drop_fn) } pub fn read_size_and_align_from_vtable( @@ -87,11 +82,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, (Size, Align)> { let pointer_size = self.memory.pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; + let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.read()?.to_bits(pointer_size)? as u64; let align = self.memory.read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align - )?.to_bits(pointer_size)? as u64; + )?.read()?.to_bits(pointer_size)? as u64; Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) } } diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index 9902fe98cc01..7c5b895fa757 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -17,7 +17,7 @@ use rustc::mir::{Constant, Location, Place, Mir, Operand, Rvalue, Local}; use rustc::mir::{NullOp, StatementKind, Statement, BasicBlock, LocalKind}; use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem}; use rustc::mir::visit::{Visitor, PlaceContext}; -use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind}; +use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind, ScalarMaybeUndef}; use rustc::ty::{TyCtxt, self, Instance}; use rustc::mir::interpret::{Value, Scalar, GlobalId, EvalResult}; use interpret::EvalContext; @@ -368,7 +368,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some(( Value::Scalar(Scalar::Bits { bits: n as u128, - defined: self.tcx.data_layout.pointer_size.bits() as u8, + size: self.tcx.data_layout.pointer_size.bytes() as u8, }), self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?, span, @@ -390,7 +390,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { let prim = self.use_ecx(source_info, |this| { this.ecx.value_to_scalar(ValTy { value: val.0, ty: val.1.ty }) })?; - let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1.ty))?; + let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1))?; Some((Value::Scalar(val), place_layout, span)) } Rvalue::CheckedBinaryOp(op, ref left, ref right) | @@ -449,8 +449,8 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { })?; let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue { Value::ScalarPair( - val, - Scalar::from_bool(overflow), + val.into(), + Scalar::from_bool(overflow).into(), ) } else { if overflow { @@ -458,7 +458,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { let _: Option<()> = self.use_ecx(source_info, |_| Err(err)); return None; } - Value::Scalar(val) + Value::Scalar(val.into()) }; Some((val, place_layout, span)) }, @@ -576,7 +576,7 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { if let TerminatorKind::Assert { expected, msg, cond, .. } = kind { if let Some(value) = self.eval_operand(cond, source_info) { trace!("assertion on {:?} should be {:?}", value, expected); - if Value::Scalar(Scalar::from_bool(*expected)) != value.0 { + if Value::Scalar(Scalar::from_bool(*expected).into()) != value.0 { // poison all places this operand references so that further code // doesn't use the invalid value match cond { @@ -613,14 +613,18 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { .eval_operand(len, source_info) .expect("len must be const"); let len = match len.0 { - Value::Scalar(Scalar::Bits { bits, ..}) => bits, + Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { + bits, .. + })) => bits, _ => bug!("const len not primitive: {:?}", len), }; let index = self .eval_operand(index, source_info) .expect("index must be const"); let index = match index.0 { - Value::Scalar(Scalar::Bits { bits, .. }) => bits, + Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { + bits, .. + })) => bits, _ => bug!("const index not primitive: {:?}", index), }; format!(