Merge remote-tracking branch 'miri/upstream' into miri
This commit is contained in:
commit
df5e122eda
17 changed files with 8266 additions and 0 deletions
25
.editorconfig
Normal file
25
.editorconfig
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# EditorConfig helps developers define and maintain consistent
|
||||
# coding styles between different editors and IDEs
|
||||
# editorconfig.org
|
||||
|
||||
root = true
|
||||
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.rs]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.toml]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
122
src/librustc/mir/interpret/cast.rs
Normal file
122
src/librustc/mir/interpret/cast.rs
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
use rustc::ty::{self, Ty};
|
||||
use syntax::ast::{FloatTy, IntTy, UintTy};
|
||||
|
||||
use super::{PrimVal, EvalContext, EvalResult, MemoryPointer, PointerArithmetic, Machine};
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
pub(super) fn cast_primval(
|
||||
&self,
|
||||
val: PrimVal,
|
||||
src_ty: Ty<'tcx>,
|
||||
dest_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx, PrimVal> {
|
||||
trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty);
|
||||
let src_kind = self.ty_to_primval_kind(src_ty)?;
|
||||
|
||||
match val {
|
||||
PrimVal::Undef => Ok(PrimVal::Undef),
|
||||
PrimVal::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty),
|
||||
val @ PrimVal::Bytes(_) => {
|
||||
use super::PrimValKind::*;
|
||||
match src_kind {
|
||||
F32 => self.cast_from_float(val.to_f32()? as f64, dest_ty),
|
||||
F64 => self.cast_from_float(val.to_f64()?, dest_ty),
|
||||
|
||||
I8 | I16 | I32 | I64 | I128 => {
|
||||
self.cast_from_signed_int(val.to_i128()?, dest_ty)
|
||||
}
|
||||
|
||||
Bool | Char | U8 | U16 | U32 | U64 | U128 | FnPtr | Ptr => {
|
||||
self.cast_from_int(val.to_u128()?, dest_ty, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cast_from_signed_int(&self, val: i128, ty: ty::Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
|
||||
self.cast_from_int(val as u128, ty, val < 0)
|
||||
}
|
||||
|
||||
fn int_to_int(&self, v: i128, ty: IntTy) -> u128 {
|
||||
match ty {
|
||||
IntTy::I8 => v as i8 as u128,
|
||||
IntTy::I16 => v as i16 as u128,
|
||||
IntTy::I32 => v as i32 as u128,
|
||||
IntTy::I64 => v as i64 as u128,
|
||||
IntTy::I128 => v as u128,
|
||||
IntTy::Is => {
|
||||
let ty = self.tcx.sess.target.isize_ty;
|
||||
self.int_to_int(v, ty)
|
||||
}
|
||||
}
|
||||
}
|
||||
fn int_to_uint(&self, v: u128, ty: UintTy) -> u128 {
|
||||
match ty {
|
||||
UintTy::U8 => v as u8 as u128,
|
||||
UintTy::U16 => v as u16 as u128,
|
||||
UintTy::U32 => v as u32 as u128,
|
||||
UintTy::U64 => v as u64 as u128,
|
||||
UintTy::U128 => v,
|
||||
UintTy::Us => {
|
||||
let ty = self.tcx.sess.target.usize_ty;
|
||||
self.int_to_uint(v, ty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cast_from_int(
|
||||
&self,
|
||||
v: u128,
|
||||
ty: ty::Ty<'tcx>,
|
||||
negative: bool,
|
||||
) -> EvalResult<'tcx, PrimVal> {
|
||||
trace!("cast_from_int: {}, {}, {}", v, ty, negative);
|
||||
use rustc::ty::TypeVariants::*;
|
||||
match ty.sty {
|
||||
// Casts to bool are not permitted by rustc, no need to handle them here.
|
||||
TyInt(ty) => Ok(PrimVal::Bytes(self.int_to_int(v as i128, ty))),
|
||||
TyUint(ty) => Ok(PrimVal::Bytes(self.int_to_uint(v, ty))),
|
||||
|
||||
TyFloat(FloatTy::F64) if negative => Ok(PrimVal::from_f64(v as i128 as f64)),
|
||||
TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(v as f64)),
|
||||
TyFloat(FloatTy::F32) if negative => Ok(PrimVal::from_f32(v as i128 as f32)),
|
||||
TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(v as f32)),
|
||||
|
||||
TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)),
|
||||
TyChar => err!(InvalidChar(v)),
|
||||
|
||||
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
|
||||
TyRawPtr(_) => Ok(PrimVal::Bytes(self.memory.truncate_to_ptr(v).0 as u128)),
|
||||
|
||||
_ => err!(Unimplemented(format!("int to {:?} cast", ty))),
|
||||
}
|
||||
}
|
||||
|
||||
fn cast_from_float(&self, val: f64, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
|
||||
use rustc::ty::TypeVariants::*;
|
||||
match ty.sty {
|
||||
// Casting negative floats to unsigned integers yields zero.
|
||||
TyUint(_) if val < 0.0 => self.cast_from_int(0, ty, false),
|
||||
TyInt(_) if val < 0.0 => self.cast_from_int(val as i128 as u128, ty, true),
|
||||
|
||||
TyInt(_) | ty::TyUint(_) => self.cast_from_int(val as u128, ty, false),
|
||||
|
||||
TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(val)),
|
||||
TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(val as f32)),
|
||||
_ => err!(Unimplemented(format!("float to {:?} cast", ty))),
|
||||
}
|
||||
}
|
||||
|
||||
fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
|
||||
use rustc::ty::TypeVariants::*;
|
||||
match ty.sty {
|
||||
// Casting to a reference or fn pointer is not permitted by rustc, no need to support it here.
|
||||
TyRawPtr(_) |
|
||||
TyInt(IntTy::Is) |
|
||||
TyUint(UintTy::Us) => Ok(PrimVal::Ptr(ptr)),
|
||||
TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes),
|
||||
_ => err!(Unimplemented(format!("ptr to {:?} cast", ty))),
|
||||
}
|
||||
}
|
||||
}
|
||||
259
src/librustc/mir/interpret/const_eval.rs
Normal file
259
src/librustc/mir/interpret/const_eval.rs
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
use rustc::traits::Reveal;
|
||||
use rustc::ty::{self, TyCtxt, Ty, Instance, layout};
|
||||
use rustc::mir;
|
||||
|
||||
use syntax::ast::Mutability;
|
||||
use syntax::codemap::Span;
|
||||
|
||||
use super::{EvalResult, EvalError, EvalErrorKind, GlobalId, Lvalue, Value, PrimVal, EvalContext,
|
||||
StackPopCleanup, PtrAndAlign, MemoryKind, ValTy};
|
||||
|
||||
use rustc_const_math::ConstInt;
|
||||
|
||||
use std::fmt;
|
||||
use std::error::Error;
|
||||
|
||||
pub fn eval_body_as_primval<'a, 'tcx>(
|
||||
tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
instance: Instance<'tcx>,
|
||||
) -> EvalResult<'tcx, (PrimVal, Ty<'tcx>)> {
|
||||
let limits = super::ResourceLimits::default();
|
||||
let mut ecx = EvalContext::<CompileTimeFunctionEvaluator>::new(tcx, limits, (), ());
|
||||
let cid = GlobalId {
|
||||
instance,
|
||||
promoted: None,
|
||||
};
|
||||
if ecx.tcx.has_attr(instance.def_id(), "linkage") {
|
||||
return Err(ConstEvalError::NotConst("extern global".to_string()).into());
|
||||
}
|
||||
|
||||
let mir = ecx.load_mir(instance.def)?;
|
||||
if !ecx.globals.contains_key(&cid) {
|
||||
let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)?
|
||||
.expect("unsized global");
|
||||
let align = ecx.type_align_with_substs(mir.return_ty, instance.substs)?;
|
||||
let ptr = ecx.memory.allocate(
|
||||
size,
|
||||
align,
|
||||
MemoryKind::UninitializedStatic,
|
||||
)?;
|
||||
let aligned = !ecx.is_packed(mir.return_ty)?;
|
||||
ecx.globals.insert(
|
||||
cid,
|
||||
PtrAndAlign {
|
||||
ptr: ptr.into(),
|
||||
aligned,
|
||||
},
|
||||
);
|
||||
let mutable = !mir.return_ty.is_freeze(
|
||||
ecx.tcx,
|
||||
ty::ParamEnv::empty(Reveal::All),
|
||||
mir.span,
|
||||
);
|
||||
let mutability = if mutable {
|
||||
Mutability::Mutable
|
||||
} else {
|
||||
Mutability::Immutable
|
||||
};
|
||||
let cleanup = StackPopCleanup::MarkStatic(mutability);
|
||||
let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
|
||||
trace!("const_eval: pushing stack frame for global: {}", name);
|
||||
ecx.push_stack_frame(
|
||||
instance,
|
||||
mir.span,
|
||||
mir,
|
||||
Lvalue::from_ptr(ptr),
|
||||
cleanup,
|
||||
)?;
|
||||
|
||||
while ecx.step()? {}
|
||||
}
|
||||
let value = Value::ByRef(*ecx.globals.get(&cid).expect("global not cached"));
|
||||
let valty = ValTy {
|
||||
value,
|
||||
ty: mir.return_ty,
|
||||
};
|
||||
Ok((ecx.value_to_primval(valty)?, mir.return_ty))
|
||||
}
|
||||
|
||||
pub fn eval_body_as_integer<'a, 'tcx>(
|
||||
tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||
instance: Instance<'tcx>,
|
||||
) -> EvalResult<'tcx, ConstInt> {
|
||||
let (prim, ty) = eval_body_as_primval(tcx, instance)?;
|
||||
let prim = prim.to_bytes()?;
|
||||
use syntax::ast::{IntTy, UintTy};
|
||||
use rustc::ty::TypeVariants::*;
|
||||
use rustc_const_math::{ConstIsize, ConstUsize};
|
||||
Ok(match ty.sty {
|
||||
TyInt(IntTy::I8) => ConstInt::I8(prim as i128 as i8),
|
||||
TyInt(IntTy::I16) => ConstInt::I16(prim as i128 as i16),
|
||||
TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32),
|
||||
TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64),
|
||||
TyInt(IntTy::I128) => ConstInt::I128(prim as i128),
|
||||
TyInt(IntTy::Is) => ConstInt::Isize(
|
||||
ConstIsize::new(prim as i128 as i64, tcx.sess.target.isize_ty)
|
||||
.expect("miri should already have errored"),
|
||||
),
|
||||
TyUint(UintTy::U8) => ConstInt::U8(prim as u8),
|
||||
TyUint(UintTy::U16) => ConstInt::U16(prim as u16),
|
||||
TyUint(UintTy::U32) => ConstInt::U32(prim as u32),
|
||||
TyUint(UintTy::U64) => ConstInt::U64(prim as u64),
|
||||
TyUint(UintTy::U128) => ConstInt::U128(prim),
|
||||
TyUint(UintTy::Us) => ConstInt::Usize(
|
||||
ConstUsize::new(prim as u64, tcx.sess.target.usize_ty)
|
||||
.expect("miri should already have errored"),
|
||||
),
|
||||
_ => {
|
||||
return Err(
|
||||
ConstEvalError::NeedsRfc(
|
||||
"evaluating anything other than isize/usize during typeck".to_string(),
|
||||
).into(),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
struct CompileTimeFunctionEvaluator;
|
||||
|
||||
impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
|
||||
fn into(self) -> EvalError<'tcx> {
|
||||
EvalErrorKind::MachineError(Box::new(self)).into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum ConstEvalError {
|
||||
NeedsRfc(String),
|
||||
NotConst(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for ConstEvalError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
use self::ConstEvalError::*;
|
||||
match *self {
|
||||
NeedsRfc(ref msg) => {
|
||||
write!(
|
||||
f,
|
||||
"\"{}\" needs an rfc before being allowed inside constants",
|
||||
msg
|
||||
)
|
||||
}
|
||||
NotConst(ref msg) => write!(f, "Cannot evaluate within constants: \"{}\"", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ConstEvalError {
|
||||
fn description(&self) -> &str {
|
||||
use self::ConstEvalError::*;
|
||||
match *self {
|
||||
NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants",
|
||||
NotConst(_) => "this feature is not compatible with constant evaluation",
|
||||
}
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&Error> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> super::Machine<'tcx> for CompileTimeFunctionEvaluator {
|
||||
type Data = ();
|
||||
type MemoryData = ();
|
||||
type MemoryKinds = !;
|
||||
fn eval_fn_call<'a>(
|
||||
ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
destination: Option<(Lvalue, mir::BasicBlock)>,
|
||||
_args: &[ValTy<'tcx>],
|
||||
span: Span,
|
||||
_sig: ty::FnSig<'tcx>,
|
||||
) -> EvalResult<'tcx, bool> {
|
||||
if !ecx.tcx.is_const_fn(instance.def_id()) {
|
||||
return Err(
|
||||
ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(),
|
||||
);
|
||||
}
|
||||
let mir = match ecx.load_mir(instance.def) {
|
||||
Ok(mir) => mir,
|
||||
Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
|
||||
// some simple things like `malloc` might get accepted in the future
|
||||
return Err(
|
||||
ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path))
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
Err(other) => return Err(other),
|
||||
};
|
||||
let (return_lvalue, return_to_block) = match destination {
|
||||
Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)),
|
||||
None => (Lvalue::undef(), StackPopCleanup::None),
|
||||
};
|
||||
|
||||
ecx.push_stack_frame(
|
||||
instance,
|
||||
span,
|
||||
mir,
|
||||
return_lvalue,
|
||||
return_to_block,
|
||||
)?;
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn call_intrinsic<'a>(
|
||||
_ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
_instance: ty::Instance<'tcx>,
|
||||
_args: &[ValTy<'tcx>],
|
||||
_dest: Lvalue,
|
||||
_dest_ty: Ty<'tcx>,
|
||||
_dest_layout: &'tcx layout::Layout,
|
||||
_target: mir::BasicBlock,
|
||||
) -> EvalResult<'tcx> {
|
||||
Err(
|
||||
ConstEvalError::NeedsRfc("calling intrinsics".to_string()).into(),
|
||||
)
|
||||
}
|
||||
|
||||
fn try_ptr_op<'a>(
|
||||
_ecx: &EvalContext<'a, 'tcx, Self>,
|
||||
_bin_op: mir::BinOp,
|
||||
left: PrimVal,
|
||||
_left_ty: Ty<'tcx>,
|
||||
right: PrimVal,
|
||||
_right_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx, Option<(PrimVal, bool)>> {
|
||||
if left.is_bytes() && right.is_bytes() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Err(
|
||||
ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn mark_static_initialized(m: !) -> EvalResult<'tcx> {
|
||||
m
|
||||
}
|
||||
|
||||
fn box_alloc<'a>(
|
||||
_ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
_ty: ty::Ty<'tcx>,
|
||||
_dest: Lvalue,
|
||||
) -> EvalResult<'tcx> {
|
||||
Err(
|
||||
ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(),
|
||||
)
|
||||
}
|
||||
|
||||
fn global_item_with_linkage<'a>(
|
||||
_ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
_instance: ty::Instance<'tcx>,
|
||||
_mutability: Mutability,
|
||||
) -> EvalResult<'tcx> {
|
||||
Err(
|
||||
ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(),
|
||||
)
|
||||
}
|
||||
}
|
||||
313
src/librustc/mir/interpret/error.rs
Normal file
313
src/librustc/mir/interpret/error.rs
Normal file
|
|
@ -0,0 +1,313 @@
|
|||
use std::error::Error;
|
||||
use std::{fmt, env};
|
||||
|
||||
use rustc::mir;
|
||||
use rustc::ty::{FnSig, Ty, layout};
|
||||
|
||||
use super::{
|
||||
MemoryPointer, Lock, AccessKind
|
||||
};
|
||||
|
||||
use rustc_const_math::ConstMathErr;
|
||||
use syntax::codemap::Span;
|
||||
use backtrace::Backtrace;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EvalError<'tcx> {
|
||||
pub kind: EvalErrorKind<'tcx>,
|
||||
pub backtrace: Option<Backtrace>,
|
||||
}
|
||||
|
||||
impl<'tcx> From<EvalErrorKind<'tcx>> for EvalError<'tcx> {
|
||||
fn from(kind: EvalErrorKind<'tcx>) -> Self {
|
||||
let backtrace = match env::var("RUST_BACKTRACE") {
|
||||
Ok(ref val) if !val.is_empty() => Some(Backtrace::new_unresolved()),
|
||||
_ => None
|
||||
};
|
||||
EvalError {
|
||||
kind,
|
||||
backtrace,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum EvalErrorKind<'tcx> {
|
||||
/// This variant is used by machines to signal their own errors that do not
|
||||
/// match an existing variant
|
||||
MachineError(Box<Error>),
|
||||
FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>),
|
||||
NoMirFor(String),
|
||||
UnterminatedCString(MemoryPointer),
|
||||
DanglingPointerDeref,
|
||||
DoubleFree,
|
||||
InvalidMemoryAccess,
|
||||
InvalidFunctionPointer,
|
||||
InvalidBool,
|
||||
InvalidDiscriminant,
|
||||
PointerOutOfBounds {
|
||||
ptr: MemoryPointer,
|
||||
access: bool,
|
||||
allocation_size: u64,
|
||||
},
|
||||
InvalidNullPointerUsage,
|
||||
ReadPointerAsBytes,
|
||||
ReadBytesAsPointer,
|
||||
InvalidPointerMath,
|
||||
ReadUndefBytes,
|
||||
DeadLocal,
|
||||
InvalidBoolOp(mir::BinOp),
|
||||
Unimplemented(String),
|
||||
DerefFunctionPointer,
|
||||
ExecuteMemory,
|
||||
ArrayIndexOutOfBounds(Span, u64, u64),
|
||||
Math(Span, ConstMathErr),
|
||||
Intrinsic(String),
|
||||
OverflowingMath,
|
||||
InvalidChar(u128),
|
||||
OutOfMemory {
|
||||
allocation_size: u64,
|
||||
memory_size: u64,
|
||||
memory_usage: u64,
|
||||
},
|
||||
ExecutionTimeLimitReached,
|
||||
StackFrameLimitReached,
|
||||
OutOfTls,
|
||||
TlsOutOfBounds,
|
||||
AbiViolation(String),
|
||||
AlignmentCheckFailed {
|
||||
required: u64,
|
||||
has: u64,
|
||||
},
|
||||
MemoryLockViolation {
|
||||
ptr: MemoryPointer,
|
||||
len: u64,
|
||||
frame: usize,
|
||||
access: AccessKind,
|
||||
lock: Lock,
|
||||
},
|
||||
MemoryAcquireConflict {
|
||||
ptr: MemoryPointer,
|
||||
len: u64,
|
||||
kind: AccessKind,
|
||||
lock: Lock,
|
||||
},
|
||||
InvalidMemoryLockRelease {
|
||||
ptr: MemoryPointer,
|
||||
len: u64,
|
||||
frame: usize,
|
||||
lock: Lock,
|
||||
},
|
||||
DeallocatedLockedMemory {
|
||||
ptr: MemoryPointer,
|
||||
lock: Lock,
|
||||
},
|
||||
ValidationFailure(String),
|
||||
CalledClosureAsFunction,
|
||||
VtableForArgumentlessMethod,
|
||||
ModifiedConstantMemory,
|
||||
AssumptionNotHeld,
|
||||
InlineAsm,
|
||||
TypeNotPrimitive(Ty<'tcx>),
|
||||
ReallocatedWrongMemoryKind(String, String),
|
||||
DeallocatedWrongMemoryKind(String, String),
|
||||
ReallocateNonBasePtr,
|
||||
DeallocateNonBasePtr,
|
||||
IncorrectAllocationInformation,
|
||||
Layout(layout::LayoutError<'tcx>),
|
||||
HeapAllocZeroBytes,
|
||||
HeapAllocNonPowerOfTwoAlignment(u64),
|
||||
Unreachable,
|
||||
Panic,
|
||||
ReadFromReturnPointer,
|
||||
PathNotFound(Vec<String>),
|
||||
}
|
||||
|
||||
pub type EvalResult<'tcx, T = ()> = Result<T, EvalError<'tcx>>;
|
||||
|
||||
impl<'tcx> Error for EvalError<'tcx> {
|
||||
fn description(&self) -> &str {
|
||||
use self::EvalErrorKind::*;
|
||||
match self.kind {
|
||||
MachineError(ref inner) => inner.description(),
|
||||
FunctionPointerTyMismatch(..) =>
|
||||
"tried to call a function through a function pointer of a different type",
|
||||
InvalidMemoryAccess =>
|
||||
"tried to access memory through an invalid pointer",
|
||||
DanglingPointerDeref =>
|
||||
"dangling pointer was dereferenced",
|
||||
DoubleFree =>
|
||||
"tried to deallocate dangling pointer",
|
||||
InvalidFunctionPointer =>
|
||||
"tried to use a function pointer after offsetting it",
|
||||
InvalidBool =>
|
||||
"invalid boolean value read",
|
||||
InvalidDiscriminant =>
|
||||
"invalid enum discriminant value read",
|
||||
PointerOutOfBounds { .. } =>
|
||||
"pointer offset outside bounds of allocation",
|
||||
InvalidNullPointerUsage =>
|
||||
"invalid use of NULL pointer",
|
||||
MemoryLockViolation { .. } =>
|
||||
"memory access conflicts with lock",
|
||||
MemoryAcquireConflict { .. } =>
|
||||
"new memory lock conflicts with existing lock",
|
||||
ValidationFailure(..) =>
|
||||
"type validation failed",
|
||||
InvalidMemoryLockRelease { .. } =>
|
||||
"invalid attempt to release write lock",
|
||||
DeallocatedLockedMemory { .. } =>
|
||||
"tried to deallocate memory in conflict with a lock",
|
||||
ReadPointerAsBytes =>
|
||||
"a raw memory access tried to access part of a pointer value as raw bytes",
|
||||
ReadBytesAsPointer =>
|
||||
"a memory access tried to interpret some bytes as a pointer",
|
||||
InvalidPointerMath =>
|
||||
"attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations",
|
||||
ReadUndefBytes =>
|
||||
"attempted to read undefined bytes",
|
||||
DeadLocal =>
|
||||
"tried to access a dead local variable",
|
||||
InvalidBoolOp(_) =>
|
||||
"invalid boolean operation",
|
||||
Unimplemented(ref msg) => msg,
|
||||
DerefFunctionPointer =>
|
||||
"tried to dereference a function pointer",
|
||||
ExecuteMemory =>
|
||||
"tried to treat a memory pointer as a function pointer",
|
||||
ArrayIndexOutOfBounds(..) =>
|
||||
"array index out of bounds",
|
||||
Math(..) =>
|
||||
"mathematical operation failed",
|
||||
Intrinsic(..) =>
|
||||
"intrinsic failed",
|
||||
OverflowingMath =>
|
||||
"attempted to do overflowing math",
|
||||
NoMirFor(..) =>
|
||||
"mir not found",
|
||||
InvalidChar(..) =>
|
||||
"tried to interpret an invalid 32-bit value as a char",
|
||||
OutOfMemory{..} =>
|
||||
"could not allocate more memory",
|
||||
ExecutionTimeLimitReached =>
|
||||
"reached the configured maximum execution time",
|
||||
StackFrameLimitReached =>
|
||||
"reached the configured maximum number of stack frames",
|
||||
OutOfTls =>
|
||||
"reached the maximum number of representable TLS keys",
|
||||
TlsOutOfBounds =>
|
||||
"accessed an invalid (unallocated) TLS key",
|
||||
AbiViolation(ref msg) => msg,
|
||||
AlignmentCheckFailed{..} =>
|
||||
"tried to execute a misaligned read or write",
|
||||
CalledClosureAsFunction =>
|
||||
"tried to call a closure through a function pointer",
|
||||
VtableForArgumentlessMethod =>
|
||||
"tried to call a vtable function without arguments",
|
||||
ModifiedConstantMemory =>
|
||||
"tried to modify constant memory",
|
||||
AssumptionNotHeld =>
|
||||
"`assume` argument was false",
|
||||
InlineAsm =>
|
||||
"miri does not support inline assembly",
|
||||
TypeNotPrimitive(_) =>
|
||||
"expected primitive type, got nonprimitive",
|
||||
ReallocatedWrongMemoryKind(_, _) =>
|
||||
"tried to reallocate memory from one kind to another",
|
||||
DeallocatedWrongMemoryKind(_, _) =>
|
||||
"tried to deallocate memory of the wrong kind",
|
||||
ReallocateNonBasePtr =>
|
||||
"tried to reallocate with a pointer not to the beginning of an existing object",
|
||||
DeallocateNonBasePtr =>
|
||||
"tried to deallocate with a pointer not to the beginning of an existing object",
|
||||
IncorrectAllocationInformation =>
|
||||
"tried to deallocate or reallocate using incorrect alignment or size",
|
||||
Layout(_) =>
|
||||
"rustc layout computation failed",
|
||||
UnterminatedCString(_) =>
|
||||
"attempted to get length of a null terminated string, but no null found before end of allocation",
|
||||
HeapAllocZeroBytes =>
|
||||
"tried to re-, de- or allocate zero bytes on the heap",
|
||||
HeapAllocNonPowerOfTwoAlignment(_) =>
|
||||
"tried to re-, de-, or allocate heap memory with alignment that is not a power of two",
|
||||
Unreachable =>
|
||||
"entered unreachable code",
|
||||
Panic =>
|
||||
"the evaluated program panicked",
|
||||
ReadFromReturnPointer =>
|
||||
"tried to read from the return pointer",
|
||||
EvalErrorKind::PathNotFound(_) =>
|
||||
"a path could not be resolved, maybe the crate is not loaded",
|
||||
}
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&Error> {
|
||||
use self::EvalErrorKind::*;
|
||||
match self.kind {
|
||||
MachineError(ref inner) => Some(&**inner),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> fmt::Display for EvalError<'tcx> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
use self::EvalErrorKind::*;
|
||||
match self.kind {
|
||||
PointerOutOfBounds { ptr, access, allocation_size } => {
|
||||
write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}",
|
||||
if access { "memory access" } else { "pointer computed" },
|
||||
ptr.offset, ptr.alloc_id, allocation_size)
|
||||
},
|
||||
MemoryLockViolation { ptr, len, frame, access, ref lock } => {
|
||||
write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}",
|
||||
access, frame, ptr, len, lock)
|
||||
}
|
||||
MemoryAcquireConflict { ptr, len, kind, ref lock } => {
|
||||
write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}",
|
||||
kind, ptr, len, lock)
|
||||
}
|
||||
InvalidMemoryLockRelease { ptr, len, frame, ref lock } => {
|
||||
write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but cannot release lock {:?}",
|
||||
frame, ptr, len, lock)
|
||||
}
|
||||
DeallocatedLockedMemory { ptr, ref lock } => {
|
||||
write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}",
|
||||
ptr, lock)
|
||||
}
|
||||
ValidationFailure(ref err) => {
|
||||
write!(f, "type validation failed: {}", err)
|
||||
}
|
||||
NoMirFor(ref func) => write!(f, "no mir for `{}`", func),
|
||||
FunctionPointerTyMismatch(sig, got) =>
|
||||
write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got),
|
||||
ArrayIndexOutOfBounds(span, len, index) =>
|
||||
write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span),
|
||||
ReallocatedWrongMemoryKind(ref old, ref new) =>
|
||||
write!(f, "tried to reallocate memory from {} to {}", old, new),
|
||||
DeallocatedWrongMemoryKind(ref old, ref new) =>
|
||||
write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new),
|
||||
Math(span, ref err) =>
|
||||
write!(f, "{:?} at {:?}", err, span),
|
||||
Intrinsic(ref err) =>
|
||||
write!(f, "{}", err),
|
||||
InvalidChar(c) =>
|
||||
write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
|
||||
OutOfMemory { allocation_size, memory_size, memory_usage } =>
|
||||
write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory",
|
||||
allocation_size, memory_size - memory_usage, memory_size),
|
||||
AlignmentCheckFailed { required, has } =>
|
||||
write!(f, "tried to access memory with alignment {}, but alignment {} is required",
|
||||
has, required),
|
||||
TypeNotPrimitive(ty) =>
|
||||
write!(f, "expected primitive type, got {}", ty),
|
||||
Layout(ref err) =>
|
||||
write!(f, "rustc layout computation failed: {:?}", err),
|
||||
PathNotFound(ref path) =>
|
||||
write!(f, "Cannot find path {:?}", path),
|
||||
MachineError(ref inner) =>
|
||||
write!(f, "machine error: {}", inner),
|
||||
_ => write!(f, "{}", self.description()),
|
||||
}
|
||||
}
|
||||
}
|
||||
2534
src/librustc/mir/interpret/eval_context.rs
Normal file
2534
src/librustc/mir/interpret/eval_context.rs
Normal file
File diff suppressed because it is too large
Load diff
506
src/librustc/mir/interpret/lvalue.rs
Normal file
506
src/librustc/mir/interpret/lvalue.rs
Normal file
|
|
@ -0,0 +1,506 @@
|
|||
use rustc::mir;
|
||||
use rustc::ty::layout::{Size, Align};
|
||||
use rustc::ty::{self, Ty};
|
||||
use rustc_data_structures::indexed_vec::Idx;
|
||||
|
||||
use super::{EvalResult, EvalContext, MemoryPointer, PrimVal, Value, Pointer, Machine, PtrAndAlign, ValTy};
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum Lvalue {
|
||||
/// An lvalue referring to a value allocated in the `Memory` system.
|
||||
Ptr {
|
||||
/// An lvalue may have an invalid (integral or undef) pointer,
|
||||
/// since it might be turned back into a reference
|
||||
/// before ever being dereferenced.
|
||||
ptr: PtrAndAlign,
|
||||
extra: LvalueExtra,
|
||||
},
|
||||
|
||||
/// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
|
||||
/// a Mir local index.
|
||||
Local { frame: usize, local: mir::Local },
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub enum LvalueExtra {
|
||||
None,
|
||||
Length(u64),
|
||||
Vtable(MemoryPointer),
|
||||
DowncastVariant(usize),
|
||||
}
|
||||
|
||||
/// Uniquely identifies a specific constant or static.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub struct GlobalId<'tcx> {
|
||||
/// For a constant or static, the `Instance` of the item itself.
|
||||
/// For a promoted global, the `Instance` of the function they belong to.
|
||||
pub instance: ty::Instance<'tcx>,
|
||||
|
||||
/// The index for promoted globals within their function's `Mir`.
|
||||
pub promoted: Option<mir::Promoted>,
|
||||
}
|
||||
|
||||
impl<'tcx> Lvalue {
|
||||
/// Produces an Lvalue that will error if attempted to be read from
|
||||
pub fn undef() -> Self {
|
||||
Self::from_primval_ptr(PrimVal::Undef.into())
|
||||
}
|
||||
|
||||
pub fn from_primval_ptr(ptr: Pointer) -> Self {
|
||||
Lvalue::Ptr {
|
||||
ptr: PtrAndAlign { ptr, aligned: true },
|
||||
extra: LvalueExtra::None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_ptr(ptr: MemoryPointer) -> Self {
|
||||
Self::from_primval_ptr(ptr.into())
|
||||
}
|
||||
|
||||
pub(super) fn to_ptr_extra_aligned(self) -> (PtrAndAlign, LvalueExtra) {
|
||||
match self {
|
||||
Lvalue::Ptr { ptr, extra } => (ptr, extra),
|
||||
_ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
|
||||
let (ptr, extra) = self.to_ptr_extra_aligned();
|
||||
// At this point, we forget about the alignment information -- the lvalue has been turned into a reference,
|
||||
// and no matter where it came from, it now must be aligned.
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
ptr.to_ptr()
|
||||
}
|
||||
|
||||
pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
|
||||
match ty.sty {
|
||||
ty::TyArray(elem, n) => (elem, n.val.to_const_int().unwrap().to_u64().unwrap() as u64),
|
||||
|
||||
ty::TySlice(elem) => {
|
||||
match self {
|
||||
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len),
|
||||
_ => {
|
||||
bug!(
|
||||
"elem_ty_and_len of a TySlice given non-slice lvalue: {:?}",
|
||||
self
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
/// Reads a value from the lvalue without going through the intermediate step of obtaining
|
||||
/// a `miri::Lvalue`
|
||||
pub fn try_read_lvalue(
|
||||
&mut self,
|
||||
lvalue: &mir::Lvalue<'tcx>,
|
||||
) -> EvalResult<'tcx, Option<Value>> {
|
||||
use rustc::mir::Lvalue::*;
|
||||
match *lvalue {
|
||||
// Might allow this in the future, right now there's no way to do this from Rust code anyway
|
||||
Local(mir::RETURN_POINTER) => err!(ReadFromReturnPointer),
|
||||
// Directly reading a local will always succeed
|
||||
Local(local) => self.frame().get_local(local).map(Some),
|
||||
// Directly reading a static will always succeed
|
||||
Static(ref static_) => {
|
||||
let instance = ty::Instance::mono(self.tcx, static_.def_id);
|
||||
let cid = GlobalId {
|
||||
instance,
|
||||
promoted: None,
|
||||
};
|
||||
Ok(Some(Value::ByRef(
|
||||
*self.globals.get(&cid).expect("global not cached"),
|
||||
)))
|
||||
}
|
||||
Projection(ref proj) => self.try_read_lvalue_projection(proj),
|
||||
}
|
||||
}
|
||||
|
||||
fn try_read_lvalue_projection(
|
||||
&mut self,
|
||||
proj: &mir::LvalueProjection<'tcx>,
|
||||
) -> EvalResult<'tcx, Option<Value>> {
|
||||
use rustc::mir::ProjectionElem::*;
|
||||
let base = match self.try_read_lvalue(&proj.base)? {
|
||||
Some(base) => base,
|
||||
None => return Ok(None),
|
||||
};
|
||||
let base_ty = self.lvalue_ty(&proj.base);
|
||||
match proj.elem {
|
||||
Field(field, _) => match (field.index(), base) {
|
||||
// the only field of a struct
|
||||
(0, Value::ByVal(val)) => Ok(Some(Value::ByVal(val))),
|
||||
// split fat pointers, 2 element tuples, ...
|
||||
(0...1, Value::ByValPair(a, b)) if self.get_field_count(base_ty)? == 2 => {
|
||||
let val = [a, b][field.index()];
|
||||
Ok(Some(Value::ByVal(val)))
|
||||
},
|
||||
// the only field of a struct is a fat pointer
|
||||
(0, Value::ByValPair(..)) => Ok(Some(base)),
|
||||
_ => Ok(None),
|
||||
},
|
||||
// The NullablePointer cases should work fine, need to take care for normal enums
|
||||
Downcast(..) |
|
||||
Subslice { .. } |
|
||||
// reading index 0 or index 1 from a ByVal or ByVal pair could be optimized
|
||||
ConstantIndex { .. } | Index(_) |
|
||||
// No way to optimize this projection any better than the normal lvalue path
|
||||
Deref => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
|
||||
pub(super) fn eval_and_read_lvalue(
|
||||
&mut self,
|
||||
lvalue: &mir::Lvalue<'tcx>,
|
||||
) -> EvalResult<'tcx, Value> {
|
||||
// Shortcut for things like accessing a fat pointer's field,
|
||||
// which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory
|
||||
// and returning an `Lvalue::Ptr` to it
|
||||
if let Some(val) = self.try_read_lvalue(lvalue)? {
|
||||
return Ok(val);
|
||||
}
|
||||
let lvalue = self.eval_lvalue(lvalue)?;
|
||||
self.read_lvalue(lvalue)
|
||||
}
|
||||
|
||||
pub fn read_lvalue(&self, lvalue: Lvalue) -> EvalResult<'tcx, Value> {
|
||||
match lvalue {
|
||||
Lvalue::Ptr { ptr, extra } => {
|
||||
assert_eq!(extra, LvalueExtra::None);
|
||||
Ok(Value::ByRef(ptr))
|
||||
}
|
||||
Lvalue::Local { frame, local } => self.stack[frame].get_local(local),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue> {
|
||||
use rustc::mir::Lvalue::*;
|
||||
let lvalue = match *mir_lvalue {
|
||||
Local(mir::RETURN_POINTER) => self.frame().return_lvalue,
|
||||
Local(local) => Lvalue::Local {
|
||||
frame: self.cur_frame(),
|
||||
local,
|
||||
},
|
||||
|
||||
Static(ref static_) => {
|
||||
let instance = ty::Instance::mono(self.tcx, static_.def_id);
|
||||
let gid = GlobalId {
|
||||
instance,
|
||||
promoted: None,
|
||||
};
|
||||
Lvalue::Ptr {
|
||||
ptr: *self.globals.get(&gid).expect("uncached global"),
|
||||
extra: LvalueExtra::None,
|
||||
}
|
||||
}
|
||||
|
||||
Projection(ref proj) => {
|
||||
let ty = self.lvalue_ty(&proj.base);
|
||||
let lvalue = self.eval_lvalue(&proj.base)?;
|
||||
return self.eval_lvalue_projection(lvalue, ty, &proj.elem);
|
||||
}
|
||||
};
|
||||
|
||||
if log_enabled!(::log::LogLevel::Trace) {
|
||||
self.dump_local(lvalue);
|
||||
}
|
||||
|
||||
Ok(lvalue)
|
||||
}
|
||||
|
||||
pub fn lvalue_field(
|
||||
&mut self,
|
||||
base: Lvalue,
|
||||
field: mir::Field,
|
||||
base_ty: Ty<'tcx>,
|
||||
field_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx, Lvalue> {
|
||||
use rustc::ty::layout::Layout::*;
|
||||
|
||||
let base_layout = self.type_layout(base_ty)?;
|
||||
let field_index = field.index();
|
||||
let (offset, packed) = match *base_layout {
|
||||
Univariant { ref variant, .. } => (variant.offsets[field_index], variant.packed),
|
||||
|
||||
// mir optimizations treat single variant enums as structs
|
||||
General { ref variants, .. } if variants.len() == 1 => {
|
||||
(variants[0].offsets[field_index], variants[0].packed)
|
||||
}
|
||||
|
||||
General { ref variants, .. } => {
|
||||
let (_, base_extra) = base.to_ptr_extra_aligned();
|
||||
if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
|
||||
// +1 for the discriminant, which is field 0
|
||||
assert!(!variants[variant_idx].packed);
|
||||
(variants[variant_idx].offsets[field_index + 1], false)
|
||||
} else {
|
||||
bug!("field access on enum had no variant index");
|
||||
}
|
||||
}
|
||||
|
||||
RawNullablePointer { .. } => {
|
||||
assert_eq!(field_index, 0);
|
||||
return Ok(base);
|
||||
}
|
||||
|
||||
StructWrappedNullablePointer { ref nonnull, .. } => {
|
||||
(nonnull.offsets[field_index], nonnull.packed)
|
||||
}
|
||||
|
||||
UntaggedUnion { .. } => return Ok(base),
|
||||
|
||||
Vector { element, count } => {
|
||||
let field = field_index as u64;
|
||||
assert!(field < count);
|
||||
let elem_size = element.size(&self.tcx.data_layout).bytes();
|
||||
(Size::from_bytes(field * elem_size), false)
|
||||
}
|
||||
|
||||
// We treat arrays + fixed sized indexing like field accesses
|
||||
Array { .. } => {
|
||||
let field = field_index as u64;
|
||||
let elem_size = match base_ty.sty {
|
||||
ty::TyArray(elem_ty, n) => {
|
||||
assert!(field < n.val.to_const_int().unwrap().to_u64().unwrap() as u64);
|
||||
self.type_size(elem_ty)?.expect("array elements are sized") as u64
|
||||
}
|
||||
_ => {
|
||||
bug!(
|
||||
"lvalue_field: got Array layout but non-array type {:?}",
|
||||
base_ty
|
||||
)
|
||||
}
|
||||
};
|
||||
(Size::from_bytes(field * elem_size), false)
|
||||
}
|
||||
|
||||
FatPointer { .. } => {
|
||||
let bytes = field_index as u64 * self.memory.pointer_size();
|
||||
let offset = Size::from_bytes(bytes);
|
||||
(offset, false)
|
||||
}
|
||||
|
||||
_ => bug!("field access on non-product type: {:?}", base_layout),
|
||||
};
|
||||
|
||||
// Do not allocate in trivial cases
|
||||
let (base_ptr, base_extra) = match base {
|
||||
Lvalue::Ptr { ptr, extra } => (ptr, extra),
|
||||
Lvalue::Local { frame, local } => {
|
||||
match self.stack[frame].get_local(local)? {
|
||||
// in case the type has a single field, just return the value
|
||||
Value::ByVal(_)
|
||||
if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(
|
||||
false,
|
||||
) => {
|
||||
assert_eq!(
|
||||
offset.bytes(),
|
||||
0,
|
||||
"ByVal can only have 1 non zst field with offset 0"
|
||||
);
|
||||
return Ok(base);
|
||||
}
|
||||
Value::ByRef { .. } |
|
||||
Value::ByValPair(..) |
|
||||
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let offset = match base_extra {
|
||||
LvalueExtra::Vtable(tab) => {
|
||||
let (_, align) = self.size_and_align_of_dst(
|
||||
base_ty,
|
||||
base_ptr.ptr.to_value_with_vtable(tab),
|
||||
)?;
|
||||
offset
|
||||
.abi_align(Align::from_bytes(align, align).unwrap())
|
||||
.bytes()
|
||||
}
|
||||
_ => offset.bytes(),
|
||||
};
|
||||
|
||||
let mut ptr = base_ptr.offset(offset, &self)?;
|
||||
// if we were unaligned, stay unaligned
|
||||
// no matter what we were, if we are packed, we must not be aligned anymore
|
||||
ptr.aligned &= !packed;
|
||||
|
||||
let field_ty = self.monomorphize(field_ty, self.substs());
|
||||
|
||||
let extra = if self.type_is_sized(field_ty) {
|
||||
LvalueExtra::None
|
||||
} else {
|
||||
match base_extra {
|
||||
LvalueExtra::None => bug!("expected fat pointer"),
|
||||
LvalueExtra::DowncastVariant(..) => {
|
||||
bug!("Rust doesn't support unsized fields in enum variants")
|
||||
}
|
||||
LvalueExtra::Vtable(_) |
|
||||
LvalueExtra::Length(_) => {}
|
||||
}
|
||||
base_extra
|
||||
};
|
||||
|
||||
Ok(Lvalue::Ptr { ptr, extra })
|
||||
}
|
||||
|
||||
pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue> {
|
||||
Ok(match self.tcx.struct_tail(ty).sty {
|
||||
ty::TyDynamic(..) => {
|
||||
let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?;
|
||||
Lvalue::Ptr {
|
||||
ptr: PtrAndAlign { ptr, aligned: true },
|
||||
extra: LvalueExtra::Vtable(vtable),
|
||||
}
|
||||
}
|
||||
ty::TyStr | ty::TySlice(_) => {
|
||||
let (ptr, len) = val.into_slice(&self.memory)?;
|
||||
Lvalue::Ptr {
|
||||
ptr: PtrAndAlign { ptr, aligned: true },
|
||||
extra: LvalueExtra::Length(len),
|
||||
}
|
||||
}
|
||||
_ => Lvalue::from_primval_ptr(val.into_ptr(&self.memory)?),
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn lvalue_index(
|
||||
&mut self,
|
||||
base: Lvalue,
|
||||
outer_ty: Ty<'tcx>,
|
||||
n: u64,
|
||||
) -> EvalResult<'tcx, Lvalue> {
|
||||
// Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect(
|
||||
"slice element must be sized",
|
||||
);
|
||||
assert!(
|
||||
n < len,
|
||||
"Tried to access element {} of array/slice with length {}",
|
||||
n,
|
||||
len
|
||||
);
|
||||
let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
|
||||
Ok(Lvalue::Ptr {
|
||||
ptr,
|
||||
extra: LvalueExtra::None,
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn eval_lvalue_projection(
|
||||
&mut self,
|
||||
base: Lvalue,
|
||||
base_ty: Ty<'tcx>,
|
||||
proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
|
||||
) -> EvalResult<'tcx, Lvalue> {
|
||||
use rustc::mir::ProjectionElem::*;
|
||||
let (ptr, extra) = match *proj_elem {
|
||||
Field(field, field_ty) => {
|
||||
return self.lvalue_field(base, field, base_ty, field_ty);
|
||||
}
|
||||
|
||||
Downcast(_, variant) => {
|
||||
let base_layout = self.type_layout(base_ty)?;
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, base_extra) = base.to_ptr_extra_aligned();
|
||||
|
||||
use rustc::ty::layout::Layout::*;
|
||||
let extra = match *base_layout {
|
||||
General { .. } => LvalueExtra::DowncastVariant(variant),
|
||||
RawNullablePointer { .. } |
|
||||
StructWrappedNullablePointer { .. } => base_extra,
|
||||
_ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
|
||||
};
|
||||
(base_ptr, extra)
|
||||
}
|
||||
|
||||
Deref => {
|
||||
let val = self.read_lvalue(base)?;
|
||||
|
||||
let pointee_type = match base_ty.sty {
|
||||
ty::TyRawPtr(ref tam) |
|
||||
ty::TyRef(_, ref tam) => tam.ty,
|
||||
ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(),
|
||||
_ => bug!("can only deref pointer types"),
|
||||
};
|
||||
|
||||
trace!("deref to {} on {:?}", pointee_type, val);
|
||||
|
||||
return self.val_to_lvalue(val, pointee_type);
|
||||
}
|
||||
|
||||
Index(local) => {
|
||||
let value = self.frame().get_local(local)?;
|
||||
let ty = self.tcx.types.usize;
|
||||
let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
|
||||
return self.lvalue_index(base, base_ty, n);
|
||||
}
|
||||
|
||||
ConstantIndex {
|
||||
offset,
|
||||
min_length,
|
||||
from_end,
|
||||
} => {
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect(
|
||||
"sequence element must be sized",
|
||||
);
|
||||
assert!(n >= min_length as u64);
|
||||
|
||||
let index = if from_end {
|
||||
n - u64::from(offset)
|
||||
} else {
|
||||
u64::from(offset)
|
||||
};
|
||||
|
||||
let ptr = base_ptr.offset(index * elem_size, &self)?;
|
||||
(ptr, LvalueExtra::None)
|
||||
}
|
||||
|
||||
Subslice { from, to } => {
|
||||
// FIXME(solson)
|
||||
let base = self.force_allocation(base)?;
|
||||
let (base_ptr, _) = base.to_ptr_extra_aligned();
|
||||
|
||||
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
|
||||
let elem_size = self.type_size(elem_ty)?.expect(
|
||||
"slice element must be sized",
|
||||
);
|
||||
assert!(u64::from(from) <= n - u64::from(to));
|
||||
let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
|
||||
// sublicing arrays produces arrays
|
||||
let extra = if self.type_is_sized(base_ty) {
|
||||
LvalueExtra::None
|
||||
} else {
|
||||
LvalueExtra::Length(n - u64::from(to) - u64::from(from))
|
||||
};
|
||||
(ptr, extra)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Lvalue::Ptr { ptr, extra })
|
||||
}
|
||||
|
||||
pub fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
|
||||
self.monomorphize(
|
||||
lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx),
|
||||
self.substs(),
|
||||
)
|
||||
}
|
||||
}
|
||||
82
src/librustc/mir/interpret/machine.rs
Normal file
82
src/librustc/mir/interpret/machine.rs
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
//! This module contains everything needed to instantiate an interpreter.
|
||||
//! This separation exists to ensure that no fancy miri features like
|
||||
//! interpreting common C functions leak into CTFE.
|
||||
|
||||
use super::{EvalResult, EvalContext, Lvalue, PrimVal, ValTy};
|
||||
|
||||
use rustc::{mir, ty};
|
||||
use syntax::codemap::Span;
|
||||
use syntax::ast::Mutability;
|
||||
|
||||
/// Methods of this trait signifies a point where CTFE evaluation would fail
|
||||
/// and some use case dependent behaviour can instead be applied
|
||||
pub trait Machine<'tcx>: Sized {
|
||||
/// Additional data that can be accessed via the EvalContext
|
||||
type Data;
|
||||
|
||||
/// Additional data that can be accessed via the Memory
|
||||
type MemoryData;
|
||||
|
||||
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
|
||||
type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone;
|
||||
|
||||
/// Entry point to all function calls.
|
||||
///
|
||||
/// Returns Ok(true) when the function was handled completely
|
||||
/// e.g. due to missing mir
|
||||
///
|
||||
/// Returns Ok(false) if a new stack frame was pushed
|
||||
fn eval_fn_call<'a>(
|
||||
ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
destination: Option<(Lvalue, mir::BasicBlock)>,
|
||||
args: &[ValTy<'tcx>],
|
||||
span: Span,
|
||||
sig: ty::FnSig<'tcx>,
|
||||
) -> EvalResult<'tcx, bool>;
|
||||
|
||||
/// directly process an intrinsic without pushing a stack frame.
|
||||
fn call_intrinsic<'a>(
|
||||
ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[ValTy<'tcx>],
|
||||
dest: Lvalue,
|
||||
dest_ty: ty::Ty<'tcx>,
|
||||
dest_layout: &'tcx ty::layout::Layout,
|
||||
target: mir::BasicBlock,
|
||||
) -> EvalResult<'tcx>;
|
||||
|
||||
/// Called for all binary operations except on float types.
|
||||
///
|
||||
/// Returns `None` if the operation should be handled by the integer
|
||||
/// op code in order to share more code between machines
|
||||
///
|
||||
/// Returns a (value, overflowed) pair if the operation succeeded
|
||||
fn try_ptr_op<'a>(
|
||||
ecx: &EvalContext<'a, 'tcx, Self>,
|
||||
bin_op: mir::BinOp,
|
||||
left: PrimVal,
|
||||
left_ty: ty::Ty<'tcx>,
|
||||
right: PrimVal,
|
||||
right_ty: ty::Ty<'tcx>,
|
||||
) -> EvalResult<'tcx, Option<(PrimVal, bool)>>;
|
||||
|
||||
/// Called when trying to mark machine defined `MemoryKinds` as static
|
||||
fn mark_static_initialized(m: Self::MemoryKinds) -> EvalResult<'tcx>;
|
||||
|
||||
/// Heap allocations via the `box` keyword
|
||||
///
|
||||
/// Returns a pointer to the allocated memory
|
||||
fn box_alloc<'a>(
|
||||
ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
ty: ty::Ty<'tcx>,
|
||||
dest: Lvalue,
|
||||
) -> EvalResult<'tcx>;
|
||||
|
||||
/// Called when trying to access a global declared with a `linkage` attribute
|
||||
fn global_item_with_linkage<'a>(
|
||||
ecx: &mut EvalContext<'a, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
mutability: Mutability,
|
||||
) -> EvalResult<'tcx>;
|
||||
}
|
||||
1700
src/librustc/mir/interpret/memory.rs
Normal file
1700
src/librustc/mir/interpret/memory.rs
Normal file
File diff suppressed because it is too large
Load diff
42
src/librustc/mir/interpret/mod.rs
Normal file
42
src/librustc/mir/interpret/mod.rs
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
//! An interpreter for MIR used in CTFE and by miri
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! err {
|
||||
($($tt:tt)*) => { Err($crate::interpret::EvalErrorKind::$($tt)*.into()) };
|
||||
}
|
||||
|
||||
mod cast;
|
||||
mod const_eval;
|
||||
mod error;
|
||||
mod eval_context;
|
||||
mod lvalue;
|
||||
mod validation;
|
||||
mod machine;
|
||||
mod memory;
|
||||
mod operator;
|
||||
mod range_map;
|
||||
mod step;
|
||||
mod terminator;
|
||||
mod traits;
|
||||
mod value;
|
||||
|
||||
pub use self::error::{EvalError, EvalResult, EvalErrorKind};
|
||||
|
||||
pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup, DynamicLifetime,
|
||||
TyAndPacked, PtrAndAlign, ValTy};
|
||||
|
||||
pub use self::lvalue::{Lvalue, LvalueExtra, GlobalId};
|
||||
|
||||
pub use self::memory::{AllocId, Memory, MemoryPointer, MemoryKind, HasMemory, AccessKind, AllocIdKind};
|
||||
|
||||
use self::memory::{PointerArithmetic, Lock};
|
||||
|
||||
use self::range_map::RangeMap;
|
||||
|
||||
pub use self::value::{PrimVal, PrimValKind, Value, Pointer};
|
||||
|
||||
pub use self::const_eval::{eval_body_as_integer, eval_body_as_primval};
|
||||
|
||||
pub use self::machine::Machine;
|
||||
|
||||
pub use self::validation::{ValidationQuery, AbsLvalue};
|
||||
268
src/librustc/mir/interpret/operator.rs
Normal file
268
src/librustc/mir/interpret/operator.rs
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
use rustc::mir;
|
||||
use rustc::ty::Ty;
|
||||
use rustc_const_math::ConstFloat;
|
||||
use syntax::ast::FloatTy;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use super::{EvalResult, EvalContext, Lvalue, Machine, ValTy};
|
||||
|
||||
use super::value::{PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64, f32_to_bytes,
|
||||
f64_to_bytes};
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
fn binop_with_overflow(
|
||||
&mut self,
|
||||
op: mir::BinOp,
|
||||
left: ValTy<'tcx>,
|
||||
right: ValTy<'tcx>,
|
||||
) -> EvalResult<'tcx, (PrimVal, bool)> {
|
||||
let left_val = self.value_to_primval(left)?;
|
||||
let right_val = self.value_to_primval(right)?;
|
||||
self.binary_op(op, left_val, left.ty, right_val, right.ty)
|
||||
}
|
||||
|
||||
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
|
||||
/// and a boolean signifying the potential overflow to the destination.
|
||||
pub fn intrinsic_with_overflow(
|
||||
&mut self,
|
||||
op: mir::BinOp,
|
||||
left: ValTy<'tcx>,
|
||||
right: ValTy<'tcx>,
|
||||
dest: Lvalue,
|
||||
dest_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx> {
|
||||
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
|
||||
let val = Value::ByValPair(val, PrimVal::from_bool(overflowed));
|
||||
let valty = ValTy {
|
||||
value: val,
|
||||
ty: dest_ty,
|
||||
};
|
||||
self.write_value(valty, dest)
|
||||
}
|
||||
|
||||
/// Applies the binary operation `op` to the arguments and writes the result to the
|
||||
/// destination. Returns `true` if the operation overflowed.
|
||||
pub fn intrinsic_overflowing(
|
||||
&mut self,
|
||||
op: mir::BinOp,
|
||||
left: ValTy<'tcx>,
|
||||
right: ValTy<'tcx>,
|
||||
dest: Lvalue,
|
||||
dest_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx, bool> {
|
||||
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
|
||||
self.write_primval(dest, val, dest_ty)?;
|
||||
Ok(overflowed)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! overflow {
|
||||
($op:ident, $l:expr, $r:expr) => ({
|
||||
let (val, overflowed) = $l.$op($r);
|
||||
let primval = PrimVal::Bytes(val as u128);
|
||||
Ok((primval, overflowed))
|
||||
})
|
||||
}
|
||||
|
||||
macro_rules! int_arithmetic {
|
||||
($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
|
||||
let l = $l;
|
||||
let r = $r;
|
||||
use super::PrimValKind::*;
|
||||
match $kind {
|
||||
I8 => overflow!($int_op, l as i8, r as i8),
|
||||
I16 => overflow!($int_op, l as i16, r as i16),
|
||||
I32 => overflow!($int_op, l as i32, r as i32),
|
||||
I64 => overflow!($int_op, l as i64, r as i64),
|
||||
I128 => overflow!($int_op, l as i128, r as i128),
|
||||
U8 => overflow!($int_op, l as u8, r as u8),
|
||||
U16 => overflow!($int_op, l as u16, r as u16),
|
||||
U32 => overflow!($int_op, l as u32, r as u32),
|
||||
U64 => overflow!($int_op, l as u64, r as u64),
|
||||
U128 => overflow!($int_op, l as u128, r as u128),
|
||||
_ => bug!("int_arithmetic should only be called on int primvals"),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
macro_rules! int_shift {
|
||||
($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({
|
||||
let l = $l;
|
||||
let r = $r;
|
||||
let r_wrapped = r as u32;
|
||||
match $kind {
|
||||
I8 => overflow!($int_op, l as i8, r_wrapped),
|
||||
I16 => overflow!($int_op, l as i16, r_wrapped),
|
||||
I32 => overflow!($int_op, l as i32, r_wrapped),
|
||||
I64 => overflow!($int_op, l as i64, r_wrapped),
|
||||
I128 => overflow!($int_op, l as i128, r_wrapped),
|
||||
U8 => overflow!($int_op, l as u8, r_wrapped),
|
||||
U16 => overflow!($int_op, l as u16, r_wrapped),
|
||||
U32 => overflow!($int_op, l as u32, r_wrapped),
|
||||
U64 => overflow!($int_op, l as u64, r_wrapped),
|
||||
U128 => overflow!($int_op, l as u128, r_wrapped),
|
||||
_ => bug!("int_shift should only be called on int primvals"),
|
||||
}.map(|(val, over)| (val, over || r != r_wrapped as u128))
|
||||
})
|
||||
}
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
/// Returns the result of the specified operation and whether it overflowed.
|
||||
pub fn binary_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
left: PrimVal,
|
||||
left_ty: Ty<'tcx>,
|
||||
right: PrimVal,
|
||||
right_ty: Ty<'tcx>,
|
||||
) -> EvalResult<'tcx, (PrimVal, bool)> {
|
||||
use rustc::mir::BinOp::*;
|
||||
use super::PrimValKind::*;
|
||||
|
||||
let left_kind = self.ty_to_primval_kind(left_ty)?;
|
||||
let right_kind = self.ty_to_primval_kind(right_ty)?;
|
||||
//trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
|
||||
|
||||
// I: Handle operations that support pointers
|
||||
if !left_kind.is_float() && !right_kind.is_float() {
|
||||
if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? {
|
||||
return Ok(handled);
|
||||
}
|
||||
}
|
||||
|
||||
// II: From now on, everything must be bytes, no pointers
|
||||
let l = left.to_bytes()?;
|
||||
let r = right.to_bytes()?;
|
||||
|
||||
// These ops can have an RHS with a different numeric type.
|
||||
if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) {
|
||||
return match bin_op {
|
||||
Shl => int_shift!(left_kind, overflowing_shl, l, r),
|
||||
Shr => int_shift!(left_kind, overflowing_shr, l, r),
|
||||
_ => bug!("it has already been checked that this is a shift op"),
|
||||
};
|
||||
}
|
||||
|
||||
if left_kind != right_kind {
|
||||
let msg = format!(
|
||||
"unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
|
||||
bin_op,
|
||||
left,
|
||||
left_kind,
|
||||
right,
|
||||
right_kind
|
||||
);
|
||||
return err!(Unimplemented(msg));
|
||||
}
|
||||
|
||||
let float_op = |op, l, r, ty| {
|
||||
let l = ConstFloat {
|
||||
bits: l,
|
||||
ty,
|
||||
};
|
||||
let r = ConstFloat {
|
||||
bits: r,
|
||||
ty,
|
||||
};
|
||||
match op {
|
||||
Eq => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Equal),
|
||||
Ne => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Equal),
|
||||
Lt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Less),
|
||||
Le => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Greater),
|
||||
Gt => PrimVal::from_bool(l.try_cmp(r).unwrap() == Ordering::Greater),
|
||||
Ge => PrimVal::from_bool(l.try_cmp(r).unwrap() != Ordering::Less),
|
||||
Add => PrimVal::Bytes((l + r).unwrap().bits),
|
||||
Sub => PrimVal::Bytes((l - r).unwrap().bits),
|
||||
Mul => PrimVal::Bytes((l * r).unwrap().bits),
|
||||
Div => PrimVal::Bytes((l / r).unwrap().bits),
|
||||
Rem => PrimVal::Bytes((l % r).unwrap().bits),
|
||||
_ => bug!("invalid float op: `{:?}`", op),
|
||||
}
|
||||
};
|
||||
|
||||
let val = match (bin_op, left_kind) {
|
||||
(_, F32) => float_op(bin_op, l, r, FloatTy::F32),
|
||||
(_, F64) => float_op(bin_op, l, r, FloatTy::F64),
|
||||
|
||||
|
||||
(Eq, _) => PrimVal::from_bool(l == r),
|
||||
(Ne, _) => PrimVal::from_bool(l != r),
|
||||
|
||||
(Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)),
|
||||
(Lt, _) => PrimVal::from_bool(l < r),
|
||||
(Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)),
|
||||
(Le, _) => PrimVal::from_bool(l <= r),
|
||||
(Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)),
|
||||
(Gt, _) => PrimVal::from_bool(l > r),
|
||||
(Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)),
|
||||
(Ge, _) => PrimVal::from_bool(l >= r),
|
||||
|
||||
(BitOr, _) => PrimVal::Bytes(l | r),
|
||||
(BitAnd, _) => PrimVal::Bytes(l & r),
|
||||
(BitXor, _) => PrimVal::Bytes(l ^ r),
|
||||
|
||||
(Add, k) if k.is_int() => return int_arithmetic!(k, overflowing_add, l, r),
|
||||
(Sub, k) if k.is_int() => return int_arithmetic!(k, overflowing_sub, l, r),
|
||||
(Mul, k) if k.is_int() => return int_arithmetic!(k, overflowing_mul, l, r),
|
||||
(Div, k) if k.is_int() => return int_arithmetic!(k, overflowing_div, l, r),
|
||||
(Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r),
|
||||
|
||||
_ => {
|
||||
let msg = format!(
|
||||
"unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
|
||||
bin_op,
|
||||
left,
|
||||
left_kind,
|
||||
right,
|
||||
right_kind
|
||||
);
|
||||
return err!(Unimplemented(msg));
|
||||
}
|
||||
};
|
||||
|
||||
Ok((val, false))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unary_op<'tcx>(
|
||||
un_op: mir::UnOp,
|
||||
val: PrimVal,
|
||||
val_kind: PrimValKind,
|
||||
) -> EvalResult<'tcx, PrimVal> {
|
||||
use rustc::mir::UnOp::*;
|
||||
use super::PrimValKind::*;
|
||||
|
||||
let bytes = val.to_bytes()?;
|
||||
|
||||
let result_bytes = match (un_op, val_kind) {
|
||||
(Not, Bool) => !val.to_bool()? as u128,
|
||||
|
||||
(Not, U8) => !(bytes as u8) as u128,
|
||||
(Not, U16) => !(bytes as u16) as u128,
|
||||
(Not, U32) => !(bytes as u32) as u128,
|
||||
(Not, U64) => !(bytes as u64) as u128,
|
||||
(Not, U128) => !bytes,
|
||||
|
||||
(Not, I8) => !(bytes as i8) as u128,
|
||||
(Not, I16) => !(bytes as i16) as u128,
|
||||
(Not, I32) => !(bytes as i32) as u128,
|
||||
(Not, I64) => !(bytes as i64) as u128,
|
||||
(Not, I128) => !(bytes as i128) as u128,
|
||||
|
||||
(Neg, I8) => -(bytes as i8) as u128,
|
||||
(Neg, I16) => -(bytes as i16) as u128,
|
||||
(Neg, I32) => -(bytes as i32) as u128,
|
||||
(Neg, I64) => -(bytes as i64) as u128,
|
||||
(Neg, I128) => -(bytes as i128) as u128,
|
||||
|
||||
(Neg, F32) => f32_to_bytes(-bytes_to_f32(bytes)),
|
||||
(Neg, F64) => f64_to_bytes(-bytes_to_f64(bytes)),
|
||||
|
||||
_ => {
|
||||
let msg = format!("unimplemented unary op: {:?}, {:?}", un_op, val);
|
||||
return err!(Unimplemented(msg));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(PrimVal::Bytes(result_bytes))
|
||||
}
|
||||
250
src/librustc/mir/interpret/range_map.rs
Normal file
250
src/librustc/mir/interpret/range_map.rs
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
//! Implements a map from integer indices to data.
|
||||
//! Rather than storing data for every index, internally, this maps entire ranges to the data.
|
||||
//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as
|
||||
//! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated).
|
||||
//! Users must not depend on whether a range is coalesced or not, even though this is observable
|
||||
//! via the iteration APIs.
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RangeMap<T> {
|
||||
map: BTreeMap<Range, T>,
|
||||
}
|
||||
|
||||
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
|
||||
// by the second field.
|
||||
// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
|
||||
// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
|
||||
// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
|
||||
// This kind of search breaks, if `end < start`, so don't do that!
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
|
||||
struct Range {
|
||||
start: u64,
|
||||
end: u64, // Invariant: end > start
|
||||
}
|
||||
|
||||
impl Range {
|
||||
fn range(offset: u64, len: u64) -> ops::Range<Range> {
|
||||
assert!(len > 0);
|
||||
// We select all elements that are within
|
||||
// the range given by the offset into the allocation and the length.
|
||||
// This is sound if all ranges that intersect with the argument range, are in the
|
||||
// resulting range of ranges.
|
||||
let left = Range {
|
||||
// lowest range to include `offset`
|
||||
start: 0,
|
||||
end: offset + 1,
|
||||
};
|
||||
let right = Range {
|
||||
// lowest (valid) range not to include `offset+len`
|
||||
start: offset + len,
|
||||
end: offset + len + 1,
|
||||
};
|
||||
left..right
|
||||
}
|
||||
|
||||
/// Tests if all of [offset, offset+len) are contained in this range.
|
||||
fn overlaps(&self, offset: u64, len: u64) -> bool {
|
||||
assert!(len > 0);
|
||||
offset < self.end && offset + len >= self.start
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> RangeMap<T> {
|
||||
pub fn new() -> RangeMap<T> {
|
||||
RangeMap { map: BTreeMap::new() }
|
||||
}
|
||||
|
||||
fn iter_with_range<'a>(
|
||||
&'a self,
|
||||
offset: u64,
|
||||
len: u64,
|
||||
) -> impl Iterator<Item = (&'a Range, &'a T)> + 'a {
|
||||
assert!(len > 0);
|
||||
self.map.range(Range::range(offset, len)).filter_map(
|
||||
move |(range,
|
||||
data)| {
|
||||
if range.overlaps(offset, len) {
|
||||
Some((range, data))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item = &'a T> + 'a {
|
||||
self.iter_with_range(offset, len).map(|(_, data)| data)
|
||||
}
|
||||
|
||||
fn split_entry_at(&mut self, offset: u64)
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
let range = match self.iter_with_range(offset, 1).next() {
|
||||
Some((&range, _)) => range,
|
||||
None => return,
|
||||
};
|
||||
assert!(
|
||||
range.start <= offset && range.end > offset,
|
||||
"We got a range that doesn't even contain what we asked for."
|
||||
);
|
||||
// There is an entry overlapping this position, see if we have to split it
|
||||
if range.start < offset {
|
||||
let data = self.map.remove(&range).unwrap();
|
||||
let old = self.map.insert(
|
||||
Range {
|
||||
start: range.start,
|
||||
end: offset,
|
||||
},
|
||||
data.clone(),
|
||||
);
|
||||
assert!(old.is_none());
|
||||
let old = self.map.insert(
|
||||
Range {
|
||||
start: offset,
|
||||
end: range.end,
|
||||
},
|
||||
data,
|
||||
);
|
||||
assert!(old.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a {
|
||||
self.map.values_mut()
|
||||
}
|
||||
|
||||
/// Provide mutable iteration over everything in the given range. As a side-effect,
|
||||
/// this will split entries in the map that are only partially hit by the given range,
|
||||
/// to make sure that when they are mutated, the effect is constrained to the given range.
|
||||
pub fn iter_mut_with_gaps<'a>(
|
||||
&'a mut self,
|
||||
offset: u64,
|
||||
len: u64,
|
||||
) -> impl Iterator<Item = &'a mut T> + 'a
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
assert!(len > 0);
|
||||
// Preparation: Split first and last entry as needed.
|
||||
self.split_entry_at(offset);
|
||||
self.split_entry_at(offset + len);
|
||||
// Now we can provide a mutable iterator
|
||||
self.map.range_mut(Range::range(offset, len)).filter_map(
|
||||
move |(&range, data)| {
|
||||
if range.overlaps(offset, len) {
|
||||
assert!(
|
||||
offset <= range.start && offset + len >= range.end,
|
||||
"The splitting went wrong"
|
||||
);
|
||||
Some(data)
|
||||
} else {
|
||||
// Skip this one
|
||||
None
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Provide a mutable iterator over everything in the given range, with the same side-effects as
|
||||
/// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default.
|
||||
/// This is also how you insert.
|
||||
pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item = &'a mut T> + 'a
|
||||
where
|
||||
T: Clone + Default,
|
||||
{
|
||||
// Do a first iteration to collect the gaps
|
||||
let mut gaps = Vec::new();
|
||||
let mut last_end = offset;
|
||||
for (range, _) in self.iter_with_range(offset, len) {
|
||||
if last_end < range.start {
|
||||
gaps.push(Range {
|
||||
start: last_end,
|
||||
end: range.start,
|
||||
});
|
||||
}
|
||||
last_end = range.end;
|
||||
}
|
||||
if last_end < offset + len {
|
||||
gaps.push(Range {
|
||||
start: last_end,
|
||||
end: offset + len,
|
||||
});
|
||||
}
|
||||
|
||||
// Add default for all gaps
|
||||
for gap in gaps {
|
||||
let old = self.map.insert(gap, Default::default());
|
||||
assert!(old.is_none());
|
||||
}
|
||||
|
||||
// Now provide mutable iteration
|
||||
self.iter_mut_with_gaps(offset, len)
|
||||
}
|
||||
|
||||
pub fn retain<F>(&mut self, mut f: F)
|
||||
where
|
||||
F: FnMut(&T) -> bool,
|
||||
{
|
||||
let mut remove = Vec::new();
|
||||
for (range, data) in self.map.iter() {
|
||||
if !f(data) {
|
||||
remove.push(*range);
|
||||
}
|
||||
}
|
||||
|
||||
for range in remove {
|
||||
self.map.remove(&range);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Query the map at every offset in the range and collect the results.
|
||||
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
|
||||
(offset..offset + len)
|
||||
.into_iter()
|
||||
.map(|i| *map.iter(i, 1).next().unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_insert() {
|
||||
let mut map = RangeMap::<i32>::new();
|
||||
// Insert
|
||||
for x in map.iter_mut(10, 1) {
|
||||
*x = 42;
|
||||
}
|
||||
// Check
|
||||
assert_eq!(to_vec(&map, 10, 1), vec![42]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gaps() {
|
||||
let mut map = RangeMap::<i32>::new();
|
||||
for x in map.iter_mut(11, 1) {
|
||||
*x = 42;
|
||||
}
|
||||
for x in map.iter_mut(15, 1) {
|
||||
*x = 42;
|
||||
}
|
||||
|
||||
// Now request a range that needs three gaps filled
|
||||
for x in map.iter_mut(10, 10) {
|
||||
if *x != 42 {
|
||||
*x = 23;
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
to_vec(&map, 10, 10),
|
||||
vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23]
|
||||
);
|
||||
assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]);
|
||||
}
|
||||
}
|
||||
402
src/librustc/mir/interpret/step.rs
Normal file
402
src/librustc/mir/interpret/step.rs
Normal file
|
|
@ -0,0 +1,402 @@
|
|||
//! This module contains the `EvalContext` methods for executing a single step of the interpreter.
|
||||
//!
|
||||
//! The main entry point is the `step` method.
|
||||
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::hir;
|
||||
use rustc::mir::visit::{Visitor, LvalueContext};
|
||||
use rustc::mir;
|
||||
use rustc::traits::Reveal;
|
||||
use rustc::ty;
|
||||
use rustc::ty::layout::Layout;
|
||||
use rustc::ty::subst::Substs;
|
||||
use rustc::middle::const_val::ConstVal;
|
||||
|
||||
use super::{EvalResult, EvalContext, StackPopCleanup, PtrAndAlign, GlobalId, Lvalue,
|
||||
MemoryKind, Machine, PrimVal};
|
||||
|
||||
use syntax::codemap::Span;
|
||||
use syntax::ast::Mutability;
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> {
|
||||
self.steps_remaining = self.steps_remaining.saturating_sub(n);
|
||||
if self.steps_remaining > 0 {
|
||||
Ok(())
|
||||
} else {
|
||||
err!(ExecutionTimeLimitReached)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true as long as there are more things to do.
|
||||
pub fn step(&mut self) -> EvalResult<'tcx, bool> {
|
||||
self.inc_step_counter_and_check_limit(1)?;
|
||||
if self.stack.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let block = self.frame().block;
|
||||
let stmt_id = self.frame().stmt;
|
||||
let mir = self.mir();
|
||||
let basic_block = &mir.basic_blocks()[block];
|
||||
|
||||
if let Some(stmt) = basic_block.statements.get(stmt_id) {
|
||||
let mut new = Ok(0);
|
||||
ConstantExtractor {
|
||||
span: stmt.source_info.span,
|
||||
instance: self.frame().instance,
|
||||
ecx: self,
|
||||
mir,
|
||||
new_constants: &mut new,
|
||||
}.visit_statement(
|
||||
block,
|
||||
stmt,
|
||||
mir::Location {
|
||||
block,
|
||||
statement_index: stmt_id,
|
||||
},
|
||||
);
|
||||
// if ConstantExtractor added new frames, we don't execute anything here
|
||||
// but await the next call to step
|
||||
if new? == 0 {
|
||||
self.statement(stmt)?;
|
||||
}
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let terminator = basic_block.terminator();
|
||||
let mut new = Ok(0);
|
||||
ConstantExtractor {
|
||||
span: terminator.source_info.span,
|
||||
instance: self.frame().instance,
|
||||
ecx: self,
|
||||
mir,
|
||||
new_constants: &mut new,
|
||||
}.visit_terminator(
|
||||
block,
|
||||
terminator,
|
||||
mir::Location {
|
||||
block,
|
||||
statement_index: stmt_id,
|
||||
},
|
||||
);
|
||||
// if ConstantExtractor added new frames, we don't execute anything here
|
||||
// but await the next call to step
|
||||
if new? == 0 {
|
||||
self.terminator(terminator)?;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
|
||||
trace!("{:?}", stmt);
|
||||
|
||||
use rustc::mir::StatementKind::*;
|
||||
|
||||
// Some statements (e.g. box) push new stack frames. We have to record the stack frame number
|
||||
// *before* executing the statement.
|
||||
let frame_idx = self.cur_frame();
|
||||
|
||||
match stmt.kind {
|
||||
Assign(ref lvalue, ref rvalue) => self.eval_rvalue_into_lvalue(rvalue, lvalue)?,
|
||||
|
||||
SetDiscriminant {
|
||||
ref lvalue,
|
||||
variant_index,
|
||||
} => {
|
||||
let dest = self.eval_lvalue(lvalue)?;
|
||||
let dest_ty = self.lvalue_ty(lvalue);
|
||||
let dest_layout = self.type_layout(dest_ty)?;
|
||||
|
||||
match *dest_layout {
|
||||
Layout::General { discr, .. } => {
|
||||
let discr_size = discr.size().bytes();
|
||||
let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
|
||||
self.memory.write_primval(
|
||||
dest_ptr,
|
||||
PrimVal::Bytes(variant_index as u128),
|
||||
discr_size,
|
||||
false
|
||||
)?
|
||||
}
|
||||
|
||||
Layout::RawNullablePointer { nndiscr, .. } => {
|
||||
if variant_index as u64 != nndiscr {
|
||||
self.write_null(dest, dest_ty)?;
|
||||
}
|
||||
}
|
||||
|
||||
Layout::StructWrappedNullablePointer {
|
||||
nndiscr,
|
||||
ref discrfield_source,
|
||||
..
|
||||
} => {
|
||||
if variant_index as u64 != nndiscr {
|
||||
self.write_struct_wrapped_null_pointer(
|
||||
dest_ty,
|
||||
nndiscr,
|
||||
discrfield_source,
|
||||
dest,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
_ => {
|
||||
bug!(
|
||||
"SetDiscriminant on {} represented as {:#?}",
|
||||
dest_ty,
|
||||
dest_layout
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark locals as alive
|
||||
StorageLive(local) => {
|
||||
let old_val = self.frame_mut().storage_live(local)?;
|
||||
self.deallocate_local(old_val)?;
|
||||
}
|
||||
|
||||
// Mark locals as dead
|
||||
StorageDead(local) => {
|
||||
let old_val = self.frame_mut().storage_dead(local)?;
|
||||
self.deallocate_local(old_val)?;
|
||||
}
|
||||
|
||||
// Validity checks.
|
||||
Validate(op, ref lvalues) => {
|
||||
for operand in lvalues {
|
||||
self.validation_op(op, operand)?;
|
||||
}
|
||||
}
|
||||
EndRegion(ce) => {
|
||||
self.end_region(Some(ce))?;
|
||||
}
|
||||
|
||||
// Defined to do nothing. These are added by optimization passes, to avoid changing the
|
||||
// size of MIR constantly.
|
||||
Nop => {}
|
||||
|
||||
InlineAsm { .. } => return err!(InlineAsm),
|
||||
}
|
||||
|
||||
self.stack[frame_idx].stmt += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
|
||||
trace!("{:?}", terminator.kind);
|
||||
self.eval_terminator(terminator)?;
|
||||
if !self.stack.is_empty() {
|
||||
trace!("// {:?}", self.frame().block);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// returns `true` if a stackframe was pushed
|
||||
fn global_item(
|
||||
&mut self,
|
||||
def_id: DefId,
|
||||
substs: &'tcx Substs<'tcx>,
|
||||
span: Span,
|
||||
mutability: Mutability,
|
||||
) -> EvalResult<'tcx, bool> {
|
||||
let instance = self.resolve_associated_const(def_id, substs);
|
||||
let cid = GlobalId {
|
||||
instance,
|
||||
promoted: None,
|
||||
};
|
||||
if self.globals.contains_key(&cid) {
|
||||
return Ok(false);
|
||||
}
|
||||
if self.tcx.has_attr(def_id, "linkage") {
|
||||
M::global_item_with_linkage(self, cid.instance, mutability)?;
|
||||
return Ok(false);
|
||||
}
|
||||
let mir = self.load_mir(instance.def)?;
|
||||
let size = self.type_size_with_substs(mir.return_ty, substs)?.expect(
|
||||
"unsized global",
|
||||
);
|
||||
let align = self.type_align_with_substs(mir.return_ty, substs)?;
|
||||
let ptr = self.memory.allocate(
|
||||
size,
|
||||
align,
|
||||
MemoryKind::UninitializedStatic,
|
||||
)?;
|
||||
let aligned = !self.is_packed(mir.return_ty)?;
|
||||
self.globals.insert(
|
||||
cid,
|
||||
PtrAndAlign {
|
||||
ptr: ptr.into(),
|
||||
aligned,
|
||||
},
|
||||
);
|
||||
let internally_mutable = !mir.return_ty.is_freeze(
|
||||
self.tcx,
|
||||
ty::ParamEnv::empty(Reveal::All),
|
||||
span,
|
||||
);
|
||||
let mutability = if mutability == Mutability::Mutable || internally_mutable {
|
||||
Mutability::Mutable
|
||||
} else {
|
||||
Mutability::Immutable
|
||||
};
|
||||
let cleanup = StackPopCleanup::MarkStatic(mutability);
|
||||
let name = ty::tls::with(|tcx| tcx.item_path_str(def_id));
|
||||
trace!("pushing stack frame for global: {}", name);
|
||||
self.push_stack_frame(
|
||||
instance,
|
||||
span,
|
||||
mir,
|
||||
Lvalue::from_ptr(ptr),
|
||||
cleanup,
|
||||
)?;
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
// WARNING: This code pushes new stack frames. Make sure that any methods implemented on this
|
||||
// type don't ever access ecx.stack[ecx.cur_frame()], as that will change. This includes, e.g.,
|
||||
// using the current stack frame's substitution.
|
||||
// Basically don't call anything other than `load_mir`, `alloc_ptr`, `push_stack_frame`.
|
||||
struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b, M: Machine<'tcx> + 'a> {
|
||||
span: Span,
|
||||
ecx: &'a mut EvalContext<'b, 'tcx, M>,
|
||||
mir: &'tcx mir::Mir<'tcx>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
new_constants: &'a mut EvalResult<'tcx, u64>,
|
||||
}
|
||||
|
||||
impl<'a, 'b, 'tcx, M: Machine<'tcx>> ConstantExtractor<'a, 'b, 'tcx, M> {
|
||||
fn try<F: FnOnce(&mut Self) -> EvalResult<'tcx, bool>>(&mut self, f: F) {
|
||||
// previous constant errored
|
||||
let n = match *self.new_constants {
|
||||
Ok(n) => n,
|
||||
Err(_) => return,
|
||||
};
|
||||
match f(self) {
|
||||
// everything ok + a new stackframe
|
||||
Ok(true) => *self.new_constants = Ok(n + 1),
|
||||
// constant correctly evaluated, but no new stackframe
|
||||
Ok(false) => {}
|
||||
// constant eval errored
|
||||
Err(err) => *self.new_constants = Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx, M> {
|
||||
fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Location) {
|
||||
self.super_constant(constant, location);
|
||||
match constant.literal {
|
||||
// already computed by rustc
|
||||
mir::Literal::Value { value: &ty::Const { val: ConstVal::Unevaluated(def_id, substs), .. } } => {
|
||||
self.try(|this| {
|
||||
this.ecx.global_item(
|
||||
def_id,
|
||||
substs,
|
||||
constant.span,
|
||||
Mutability::Immutable,
|
||||
)
|
||||
});
|
||||
}
|
||||
mir::Literal::Value { .. } => {}
|
||||
mir::Literal::Promoted { index } => {
|
||||
let cid = GlobalId {
|
||||
instance: self.instance,
|
||||
promoted: Some(index),
|
||||
};
|
||||
if self.ecx.globals.contains_key(&cid) {
|
||||
return;
|
||||
}
|
||||
let mir = &self.mir.promoted[index];
|
||||
self.try(|this| {
|
||||
let size = this.ecx
|
||||
.type_size_with_substs(mir.return_ty, this.instance.substs)?
|
||||
.expect("unsized global");
|
||||
let align = this.ecx.type_align_with_substs(
|
||||
mir.return_ty,
|
||||
this.instance.substs,
|
||||
)?;
|
||||
let ptr = this.ecx.memory.allocate(
|
||||
size,
|
||||
align,
|
||||
MemoryKind::UninitializedStatic,
|
||||
)?;
|
||||
let aligned = !this.ecx.is_packed(mir.return_ty)?;
|
||||
this.ecx.globals.insert(
|
||||
cid,
|
||||
PtrAndAlign {
|
||||
ptr: ptr.into(),
|
||||
aligned,
|
||||
},
|
||||
);
|
||||
trace!("pushing stack frame for {:?}", index);
|
||||
this.ecx.push_stack_frame(
|
||||
this.instance,
|
||||
constant.span,
|
||||
mir,
|
||||
Lvalue::from_ptr(ptr),
|
||||
StackPopCleanup::MarkStatic(Mutability::Immutable),
|
||||
)?;
|
||||
Ok(true)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_lvalue(
|
||||
&mut self,
|
||||
lvalue: &mir::Lvalue<'tcx>,
|
||||
context: LvalueContext<'tcx>,
|
||||
location: mir::Location,
|
||||
) {
|
||||
self.super_lvalue(lvalue, context, location);
|
||||
if let mir::Lvalue::Static(ref static_) = *lvalue {
|
||||
let def_id = static_.def_id;
|
||||
let substs = self.ecx.tcx.intern_substs(&[]);
|
||||
let span = self.span;
|
||||
if let Some(node_item) = self.ecx.tcx.hir.get_if_local(def_id) {
|
||||
if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item {
|
||||
if let hir::ItemStatic(_, m, _) = *node {
|
||||
self.try(|this| {
|
||||
this.ecx.global_item(
|
||||
def_id,
|
||||
substs,
|
||||
span,
|
||||
if m == hir::MutMutable {
|
||||
Mutability::Mutable
|
||||
} else {
|
||||
Mutability::Immutable
|
||||
},
|
||||
)
|
||||
});
|
||||
return;
|
||||
} else {
|
||||
bug!("static def id doesn't point to static");
|
||||
}
|
||||
} else {
|
||||
bug!("static def id doesn't point to item");
|
||||
}
|
||||
} else {
|
||||
let def = self.ecx.tcx.describe_def(def_id).expect("static not found");
|
||||
if let hir::def::Def::Static(_, mutable) = def {
|
||||
self.try(|this| {
|
||||
this.ecx.global_item(
|
||||
def_id,
|
||||
substs,
|
||||
span,
|
||||
if mutable {
|
||||
Mutability::Mutable
|
||||
} else {
|
||||
Mutability::Immutable
|
||||
},
|
||||
)
|
||||
});
|
||||
} else {
|
||||
bug!("static found but isn't a static: {:?}", def);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
83
src/librustc/mir/interpret/terminator/drop.rs
Normal file
83
src/librustc/mir/interpret/terminator/drop.rs
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
use rustc::mir::BasicBlock;
|
||||
use rustc::ty::{self, Ty};
|
||||
use syntax::codemap::Span;
|
||||
|
||||
use interpret::{EvalResult, EvalContext, Lvalue, LvalueExtra, PrimVal, Value,
|
||||
Machine, ValTy};
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
pub(crate) fn drop_lvalue(
|
||||
&mut self,
|
||||
lval: Lvalue,
|
||||
instance: ty::Instance<'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
span: Span,
|
||||
target: BasicBlock,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("drop_lvalue: {:#?}", lval);
|
||||
// We take the address of the object. This may well be unaligned, which is fine for us here.
|
||||
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
|
||||
// by rustc.
|
||||
let val = match self.force_allocation(lval)? {
|
||||
Lvalue::Ptr {
|
||||
ptr,
|
||||
extra: LvalueExtra::Vtable(vtable),
|
||||
} => ptr.ptr.to_value_with_vtable(vtable),
|
||||
Lvalue::Ptr {
|
||||
ptr,
|
||||
extra: LvalueExtra::Length(len),
|
||||
} => ptr.ptr.to_value_with_len(len),
|
||||
Lvalue::Ptr {
|
||||
ptr,
|
||||
extra: LvalueExtra::None,
|
||||
} => ptr.ptr.to_value(),
|
||||
_ => bug!("force_allocation broken"),
|
||||
};
|
||||
self.drop(val, instance, ty, span, target)
|
||||
}
|
||||
|
||||
fn drop(
|
||||
&mut self,
|
||||
arg: Value,
|
||||
instance: ty::Instance<'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
span: Span,
|
||||
target: BasicBlock,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def);
|
||||
|
||||
let instance = match ty.sty {
|
||||
ty::TyDynamic(..) => {
|
||||
let vtable = match arg {
|
||||
Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable,
|
||||
_ => bug!("expected fat ptr, got {:?}", arg),
|
||||
};
|
||||
match self.read_drop_type_from_vtable(vtable)? {
|
||||
Some(func) => func,
|
||||
// no drop fn -> bail out
|
||||
None => {
|
||||
self.goto_block(target);
|
||||
return Ok(())
|
||||
},
|
||||
}
|
||||
}
|
||||
_ => instance,
|
||||
};
|
||||
|
||||
// the drop function expects a reference to the value
|
||||
let valty = ValTy {
|
||||
value: arg,
|
||||
ty: self.tcx.mk_mut_ptr(ty),
|
||||
};
|
||||
|
||||
let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone();
|
||||
|
||||
self.eval_fn_call(
|
||||
instance,
|
||||
Some((Lvalue::undef(), target)),
|
||||
&vec![valty],
|
||||
span,
|
||||
fn_sig,
|
||||
)
|
||||
}
|
||||
}
|
||||
411
src/librustc/mir/interpret/terminator/mod.rs
Normal file
411
src/librustc/mir/interpret/terminator/mod.rs
Normal file
|
|
@ -0,0 +1,411 @@
|
|||
use rustc::mir;
|
||||
use rustc::ty::{self, TypeVariants};
|
||||
use rustc::ty::layout::Layout;
|
||||
use syntax::codemap::Span;
|
||||
use syntax::abi::Abi;
|
||||
|
||||
use super::{EvalResult, EvalContext, eval_context,
|
||||
PtrAndAlign, Lvalue, PrimVal, Value, Machine, ValTy};
|
||||
|
||||
use rustc_data_structures::indexed_vec::Idx;
|
||||
|
||||
mod drop;
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
pub fn goto_block(&mut self, target: mir::BasicBlock) {
|
||||
self.frame_mut().block = target;
|
||||
self.frame_mut().stmt = 0;
|
||||
}
|
||||
|
||||
pub(super) fn eval_terminator(
|
||||
&mut self,
|
||||
terminator: &mir::Terminator<'tcx>,
|
||||
) -> EvalResult<'tcx> {
|
||||
use rustc::mir::TerminatorKind::*;
|
||||
match terminator.kind {
|
||||
Return => {
|
||||
self.dump_local(self.frame().return_lvalue);
|
||||
self.pop_stack_frame()?
|
||||
}
|
||||
|
||||
Goto { target } => self.goto_block(target),
|
||||
|
||||
SwitchInt {
|
||||
ref discr,
|
||||
ref values,
|
||||
ref targets,
|
||||
..
|
||||
} => {
|
||||
// FIXME(CTFE): forbid branching
|
||||
let discr_val = self.eval_operand(discr)?;
|
||||
let discr_prim = self.value_to_primval(discr_val)?;
|
||||
|
||||
// Branch to the `otherwise` case by default, if no match is found.
|
||||
let mut target_block = targets[targets.len() - 1];
|
||||
|
||||
for (index, const_int) in values.iter().enumerate() {
|
||||
let prim = PrimVal::Bytes(const_int.to_u128_unchecked());
|
||||
if discr_prim.to_bytes()? == prim.to_bytes()? {
|
||||
target_block = targets[index];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self.goto_block(target_block);
|
||||
}
|
||||
|
||||
Call {
|
||||
ref func,
|
||||
ref args,
|
||||
ref destination,
|
||||
..
|
||||
} => {
|
||||
let destination = match *destination {
|
||||
Some((ref lv, target)) => Some((self.eval_lvalue(lv)?, target)),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let func_ty = self.operand_ty(func);
|
||||
let (fn_def, sig) = match func_ty.sty {
|
||||
ty::TyFnPtr(sig) => {
|
||||
let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?;
|
||||
let instance = self.memory.get_fn(fn_ptr)?;
|
||||
let instance_ty = instance.def.def_ty(self.tcx);
|
||||
let instance_ty = self.monomorphize(instance_ty, instance.substs);
|
||||
match instance_ty.sty {
|
||||
ty::TyFnDef(..) => {
|
||||
let real_sig = instance_ty.fn_sig(self.tcx);
|
||||
let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
|
||||
let real_sig = self.tcx.erase_late_bound_regions_and_normalize(&real_sig);
|
||||
if !self.check_sig_compat(sig, real_sig)? {
|
||||
return err!(FunctionPointerTyMismatch(real_sig, sig));
|
||||
}
|
||||
}
|
||||
ref other => bug!("instance def ty: {:?}", other),
|
||||
}
|
||||
(instance, sig)
|
||||
}
|
||||
ty::TyFnDef(def_id, substs) => (
|
||||
eval_context::resolve(self.tcx, def_id, substs),
|
||||
func_ty.fn_sig(self.tcx),
|
||||
),
|
||||
_ => {
|
||||
let msg = format!("can't handle callee of type {:?}", func_ty);
|
||||
return err!(Unimplemented(msg));
|
||||
}
|
||||
};
|
||||
let args = self.operands_to_args(args)?;
|
||||
let sig = self.tcx.erase_late_bound_regions_and_normalize(&sig);
|
||||
self.eval_fn_call(
|
||||
fn_def,
|
||||
destination,
|
||||
&args,
|
||||
terminator.source_info.span,
|
||||
sig,
|
||||
)?;
|
||||
}
|
||||
|
||||
Drop {
|
||||
ref location,
|
||||
target,
|
||||
..
|
||||
} => {
|
||||
// FIXME(CTFE): forbid drop in const eval
|
||||
let lval = self.eval_lvalue(location)?;
|
||||
let ty = self.lvalue_ty(location);
|
||||
let ty = eval_context::apply_param_substs(self.tcx, self.substs(), &ty);
|
||||
trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
|
||||
|
||||
let instance = eval_context::resolve_drop_in_place(self.tcx, ty);
|
||||
self.drop_lvalue(
|
||||
lval,
|
||||
instance,
|
||||
ty,
|
||||
terminator.source_info.span,
|
||||
target,
|
||||
)?;
|
||||
}
|
||||
|
||||
Assert {
|
||||
ref cond,
|
||||
expected,
|
||||
ref msg,
|
||||
target,
|
||||
..
|
||||
} => {
|
||||
let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?;
|
||||
if expected == cond_val {
|
||||
self.goto_block(target);
|
||||
} else {
|
||||
use rustc::mir::AssertMessage::*;
|
||||
return match *msg {
|
||||
BoundsCheck { ref len, ref index } => {
|
||||
let span = terminator.source_info.span;
|
||||
let len = self.eval_operand_to_primval(len)
|
||||
.expect("can't eval len")
|
||||
.to_u64()?;
|
||||
let index = self.eval_operand_to_primval(index)
|
||||
.expect("can't eval index")
|
||||
.to_u64()?;
|
||||
err!(ArrayIndexOutOfBounds(span, len, index))
|
||||
}
|
||||
Math(ref err) => {
|
||||
err!(Math(terminator.source_info.span, err.clone()))
|
||||
}
|
||||
GeneratorResumedAfterReturn |
|
||||
GeneratorResumedAfterPanic => unimplemented!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Yield { .. } => unimplemented!("{:#?}", terminator.kind),
|
||||
GeneratorDrop => unimplemented!(),
|
||||
DropAndReplace { .. } => unimplemented!(),
|
||||
Resume => unimplemented!(),
|
||||
Unreachable => return err!(Unreachable),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`.
|
||||
/// FIXME: This should take into account the platform-dependent ABI description.
|
||||
fn check_sig_compat(
|
||||
&mut self,
|
||||
sig: ty::FnSig<'tcx>,
|
||||
real_sig: ty::FnSig<'tcx>,
|
||||
) -> EvalResult<'tcx, bool> {
|
||||
fn check_ty_compat<'tcx>(ty: ty::Ty<'tcx>, real_ty: ty::Ty<'tcx>) -> bool {
|
||||
if ty == real_ty {
|
||||
return true;
|
||||
} // This is actually a fast pointer comparison
|
||||
return match (&ty.sty, &real_ty.sty) {
|
||||
// Permit changing the pointer type of raw pointers and references as well as
|
||||
// mutability of raw pointers.
|
||||
// TODO: Should not be allowed when fat pointers are involved.
|
||||
(&TypeVariants::TyRawPtr(_), &TypeVariants::TyRawPtr(_)) => true,
|
||||
(&TypeVariants::TyRef(_, _), &TypeVariants::TyRef(_, _)) => {
|
||||
ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
|
||||
}
|
||||
// rule out everything else
|
||||
_ => false,
|
||||
};
|
||||
}
|
||||
|
||||
if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic &&
|
||||
sig.inputs_and_output.len() == real_sig.inputs_and_output.len() &&
|
||||
sig.inputs_and_output
|
||||
.iter()
|
||||
.zip(real_sig.inputs_and_output)
|
||||
.all(|(ty, real_ty)| check_ty_compat(ty, real_ty))
|
||||
{
|
||||
// Definitely good.
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
if sig.variadic || real_sig.variadic {
|
||||
// We're not touching this
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// We need to allow what comes up when a non-capturing closure is cast to a fn().
|
||||
match (sig.abi, real_sig.abi) {
|
||||
(Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric.
|
||||
if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => {
|
||||
// First argument of real_sig must be a ZST
|
||||
let fst_ty = real_sig.inputs_and_output[0];
|
||||
let layout = self.type_layout(fst_ty)?;
|
||||
let size = layout.size(&self.tcx.data_layout).bytes();
|
||||
if size == 0 {
|
||||
// Second argument must be a tuple matching the argument list of sig
|
||||
let snd_ty = real_sig.inputs_and_output[1];
|
||||
match snd_ty.sty {
|
||||
TypeVariants::TyTuple(tys, _) if sig.inputs().len() == tys.len() =>
|
||||
if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
|
||||
return Ok(true)
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
|
||||
// Nope, this doesn't work.
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
fn eval_fn_call(
|
||||
&mut self,
|
||||
instance: ty::Instance<'tcx>,
|
||||
destination: Option<(Lvalue, mir::BasicBlock)>,
|
||||
args: &[ValTy<'tcx>],
|
||||
span: Span,
|
||||
sig: ty::FnSig<'tcx>,
|
||||
) -> EvalResult<'tcx> {
|
||||
trace!("eval_fn_call: {:#?}", instance);
|
||||
match instance.def {
|
||||
ty::InstanceDef::Intrinsic(..) => {
|
||||
let (ret, target) = match destination {
|
||||
Some(dest) => dest,
|
||||
_ => return err!(Unreachable),
|
||||
};
|
||||
let ty = sig.output();
|
||||
let layout = self.type_layout(ty)?;
|
||||
M::call_intrinsic(self, instance, args, ret, ty, layout, target)?;
|
||||
self.dump_local(ret);
|
||||
Ok(())
|
||||
}
|
||||
// FIXME: figure out why we can't just go through the shim
|
||||
ty::InstanceDef::ClosureOnceShim { .. } => {
|
||||
if M::eval_fn_call(self, instance, destination, args, span, sig)? {
|
||||
return Ok(());
|
||||
}
|
||||
let mut arg_locals = self.frame().mir.args_iter();
|
||||
match sig.abi {
|
||||
// closure as closure once
|
||||
Abi::RustCall => {
|
||||
for (arg_local, &valty) in arg_locals.zip(args) {
|
||||
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
|
||||
self.write_value(valty, dest)?;
|
||||
}
|
||||
}
|
||||
// non capture closure as fn ptr
|
||||
// need to inject zst ptr for closure object (aka do nothing)
|
||||
// and need to pack arguments
|
||||
Abi::Rust => {
|
||||
trace!(
|
||||
"arg_locals: {:?}",
|
||||
self.frame().mir.args_iter().collect::<Vec<_>>()
|
||||
);
|
||||
trace!("args: {:?}", args);
|
||||
let local = arg_locals.nth(1).unwrap();
|
||||
for (i, &valty) in args.into_iter().enumerate() {
|
||||
let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field(
|
||||
mir::Field::new(i),
|
||||
valty.ty,
|
||||
))?;
|
||||
self.write_value(valty, dest)?;
|
||||
}
|
||||
}
|
||||
_ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
ty::InstanceDef::FnPtrShim(..) |
|
||||
ty::InstanceDef::DropGlue(..) |
|
||||
ty::InstanceDef::CloneShim(..) |
|
||||
ty::InstanceDef::Item(_) => {
|
||||
// Push the stack frame, and potentially be entirely done if the call got hooked
|
||||
if M::eval_fn_call(self, instance, destination, args, span, sig)? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Pass the arguments
|
||||
let mut arg_locals = self.frame().mir.args_iter();
|
||||
trace!("ABI: {:?}", sig.abi);
|
||||
trace!(
|
||||
"arg_locals: {:?}",
|
||||
self.frame().mir.args_iter().collect::<Vec<_>>()
|
||||
);
|
||||
trace!("args: {:?}", args);
|
||||
match sig.abi {
|
||||
Abi::RustCall => {
|
||||
assert_eq!(args.len(), 2);
|
||||
|
||||
{
|
||||
// write first argument
|
||||
let first_local = arg_locals.next().unwrap();
|
||||
let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?;
|
||||
self.write_value(args[0], dest)?;
|
||||
}
|
||||
|
||||
// unpack and write all other args
|
||||
let layout = self.type_layout(args[1].ty)?;
|
||||
if let (&ty::TyTuple(fields, _),
|
||||
&Layout::Univariant { ref variant, .. }) = (&args[1].ty.sty, layout)
|
||||
{
|
||||
trace!("fields: {:?}", fields);
|
||||
if self.frame().mir.args_iter().count() == fields.len() + 1 {
|
||||
let offsets = variant.offsets.iter().map(|s| s.bytes());
|
||||
match args[1].value {
|
||||
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
|
||||
assert!(
|
||||
aligned,
|
||||
"Unaligned ByRef-values cannot occur as function arguments"
|
||||
);
|
||||
for ((offset, ty), arg_local) in
|
||||
offsets.zip(fields).zip(arg_locals)
|
||||
{
|
||||
let arg = Value::by_ref(ptr.offset(offset, &self)?);
|
||||
let dest =
|
||||
self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
|
||||
trace!(
|
||||
"writing arg {:?} to {:?} (type: {})",
|
||||
arg,
|
||||
dest,
|
||||
ty
|
||||
);
|
||||
let valty = ValTy {
|
||||
value: arg,
|
||||
ty,
|
||||
};
|
||||
self.write_value(valty, dest)?;
|
||||
}
|
||||
}
|
||||
Value::ByVal(PrimVal::Undef) => {}
|
||||
other => {
|
||||
assert_eq!(fields.len(), 1);
|
||||
let dest = self.eval_lvalue(&mir::Lvalue::Local(
|
||||
arg_locals.next().unwrap(),
|
||||
))?;
|
||||
let valty = ValTy {
|
||||
value: other,
|
||||
ty: fields[0],
|
||||
};
|
||||
self.write_value(valty, dest)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trace!("manual impl of rust-call ABI");
|
||||
// called a manual impl of a rust-call function
|
||||
let dest = self.eval_lvalue(
|
||||
&mir::Lvalue::Local(arg_locals.next().unwrap()),
|
||||
)?;
|
||||
self.write_value(args[1], dest)?;
|
||||
}
|
||||
} else {
|
||||
bug!(
|
||||
"rust-call ABI tuple argument was {:#?}, {:#?}",
|
||||
args[1].ty,
|
||||
layout
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
for (arg_local, &valty) in arg_locals.zip(args) {
|
||||
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
|
||||
self.write_value(valty, dest)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
// cannot use the shim here, because that will only result in infinite recursion
|
||||
ty::InstanceDef::Virtual(_, idx) => {
|
||||
let ptr_size = self.memory.pointer_size();
|
||||
let (ptr, vtable) = args[0].into_ptr_vtable_pair(&self.memory)?;
|
||||
let fn_ptr = self.memory.read_ptr_sized_unsigned(
|
||||
vtable.offset(ptr_size * (idx as u64 + 3), &self)?
|
||||
)?.to_ptr()?;
|
||||
let instance = self.memory.get_fn(fn_ptr)?;
|
||||
let mut args = args.to_vec();
|
||||
let ty = self.get_field_ty(args[0].ty, 0)?.ty; // TODO: packed flag is ignored
|
||||
args[0].ty = ty;
|
||||
args[0].value = ptr.to_value();
|
||||
// recurse with concrete function
|
||||
self.eval_fn_call(instance, destination, &args, span, sig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
137
src/librustc/mir/interpret/traits.rs
Normal file
137
src/librustc/mir/interpret/traits.rs
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
use rustc::traits::{self, Reveal};
|
||||
use rustc::hir::def_id::DefId;
|
||||
use rustc::ty::subst::Substs;
|
||||
use rustc::ty::{self, Ty};
|
||||
use syntax::codemap::DUMMY_SP;
|
||||
use syntax::ast::{self, Mutability};
|
||||
|
||||
use super::{EvalResult, EvalContext, eval_context, MemoryPointer, MemoryKind, Value, PrimVal,
|
||||
Machine};
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
pub(crate) fn fulfill_obligation(
|
||||
&self,
|
||||
trait_ref: ty::PolyTraitRef<'tcx>,
|
||||
) -> traits::Vtable<'tcx, ()> {
|
||||
// Do the initial selection for the obligation. This yields the shallow result we are
|
||||
// looking for -- that is, what specific impl.
|
||||
self.tcx.infer_ctxt().enter(|infcx| {
|
||||
let mut selcx = traits::SelectionContext::new(&infcx);
|
||||
|
||||
let obligation = traits::Obligation::new(
|
||||
traits::ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID),
|
||||
ty::ParamEnv::empty(Reveal::All),
|
||||
trait_ref.to_poly_trait_predicate(),
|
||||
);
|
||||
let selection = selcx.select(&obligation).unwrap().unwrap();
|
||||
|
||||
// Currently, we use a fulfillment context to completely resolve all nested obligations.
|
||||
// This is because they can inform the inference of the impl's type parameters.
|
||||
let mut fulfill_cx = traits::FulfillmentContext::new();
|
||||
let vtable = selection.map(|predicate| {
|
||||
fulfill_cx.register_predicate_obligation(&infcx, predicate);
|
||||
});
|
||||
infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable)
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
|
||||
/// objects.
|
||||
///
|
||||
/// The `trait_ref` encodes the erased self type. Hence if we are
|
||||
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
|
||||
/// `trait_ref` would map `T:Trait`.
|
||||
pub fn get_vtable(
|
||||
&mut self,
|
||||
ty: Ty<'tcx>,
|
||||
trait_ref: ty::PolyTraitRef<'tcx>,
|
||||
) -> EvalResult<'tcx, MemoryPointer> {
|
||||
debug!("get_vtable(trait_ref={:?})", trait_ref);
|
||||
|
||||
let size = self.type_size(trait_ref.self_ty())?.expect(
|
||||
"can't create a vtable for an unsized type",
|
||||
);
|
||||
let align = self.type_align(trait_ref.self_ty())?;
|
||||
|
||||
let ptr_size = self.memory.pointer_size();
|
||||
let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref);
|
||||
let vtable = self.memory.allocate(
|
||||
ptr_size * (3 + methods.count() as u64),
|
||||
ptr_size,
|
||||
MemoryKind::UninitializedStatic,
|
||||
)?;
|
||||
|
||||
let drop = eval_context::resolve_drop_in_place(self.tcx, ty);
|
||||
let drop = self.memory.create_fn_alloc(drop);
|
||||
self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?;
|
||||
|
||||
let size_ptr = vtable.offset(ptr_size, &self)?;
|
||||
self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?;
|
||||
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
|
||||
self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?;
|
||||
|
||||
for (i, method) in ::rustc::traits::get_vtable_methods(self.tcx, trait_ref).enumerate() {
|
||||
if let Some((def_id, substs)) = method {
|
||||
let instance = eval_context::resolve(self.tcx, def_id, substs);
|
||||
let fn_ptr = self.memory.create_fn_alloc(instance);
|
||||
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
|
||||
self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?;
|
||||
}
|
||||
}
|
||||
|
||||
self.memory.mark_static_initalized(
|
||||
vtable.alloc_id,
|
||||
Mutability::Mutable,
|
||||
)?;
|
||||
|
||||
Ok(vtable)
|
||||
}
|
||||
|
||||
pub fn read_drop_type_from_vtable(
|
||||
&self,
|
||||
vtable: MemoryPointer,
|
||||
) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
|
||||
// we don't care about the pointee type, we just want a pointer
|
||||
match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? {
|
||||
// some values don't need to call a drop impl, so the value is null
|
||||
Value::ByVal(PrimVal::Bytes(0)) => Ok(None),
|
||||
Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
|
||||
_ => err!(ReadBytesAsPointer),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_size_and_align_from_vtable(
|
||||
&self,
|
||||
vtable: MemoryPointer,
|
||||
) -> EvalResult<'tcx, (u64, u64)> {
|
||||
let pointer_size = self.memory.pointer_size();
|
||||
let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64;
|
||||
let align = self.memory.read_ptr_sized_unsigned(
|
||||
vtable.offset(pointer_size * 2, self)?
|
||||
)?.to_bytes()? as u64;
|
||||
Ok((size, align))
|
||||
}
|
||||
|
||||
pub(crate) fn resolve_associated_const(
|
||||
&self,
|
||||
def_id: DefId,
|
||||
substs: &'tcx Substs<'tcx>,
|
||||
) -> ty::Instance<'tcx> {
|
||||
if let Some(trait_id) = self.tcx.trait_of_item(def_id) {
|
||||
let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, substs));
|
||||
let vtable = self.fulfill_obligation(trait_ref);
|
||||
if let traits::VtableImpl(vtable_impl) = vtable {
|
||||
let name = self.tcx.item_name(def_id);
|
||||
let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id).find(
|
||||
|item| {
|
||||
item.kind == ty::AssociatedKind::Const && item.name == name
|
||||
},
|
||||
);
|
||||
if let Some(assoc_const) = assoc_const_opt {
|
||||
return ty::Instance::new(assoc_const.def_id, vtable_impl.substs);
|
||||
}
|
||||
}
|
||||
}
|
||||
ty::Instance::new(def_id, substs)
|
||||
}
|
||||
}
|
||||
727
src/librustc/mir/interpret/validation.rs
Normal file
727
src/librustc/mir/interpret/validation.rs
Normal file
|
|
@ -0,0 +1,727 @@
|
|||
use rustc::hir::{self, Mutability};
|
||||
use rustc::hir::Mutability::*;
|
||||
use rustc::mir::{self, ValidationOp, ValidationOperand};
|
||||
use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
|
||||
use rustc::ty::subst::{Substs, Subst};
|
||||
use rustc::traits;
|
||||
use rustc::infer::InferCtxt;
|
||||
use rustc::traits::Reveal;
|
||||
use rustc::middle::region;
|
||||
use rustc_data_structures::indexed_vec::Idx;
|
||||
|
||||
use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, DynamicLifetime, AccessKind, Value,
|
||||
Lvalue, LvalueExtra, Machine, ValTy};
|
||||
|
||||
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsLvalue<'tcx>, Lvalue)>;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
enum ValidationMode {
|
||||
Acquire,
|
||||
/// Recover because the given region ended
|
||||
Recover(region::Scope),
|
||||
ReleaseUntil(Option<region::Scope>),
|
||||
}
|
||||
|
||||
impl ValidationMode {
|
||||
fn acquiring(self) -> bool {
|
||||
use self::ValidationMode::*;
|
||||
match self {
|
||||
Acquire | Recover(_) => true,
|
||||
ReleaseUntil(_) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Abstract lvalues
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum AbsLvalue<'tcx> {
|
||||
Local(mir::Local),
|
||||
Static(hir::def_id::DefId),
|
||||
Projection(Box<AbsLvalueProjection<'tcx>>),
|
||||
}
|
||||
|
||||
type AbsLvalueProjection<'tcx> = mir::Projection<'tcx, AbsLvalue<'tcx>, u64, ()>;
|
||||
type AbsLvalueElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
|
||||
|
||||
impl<'tcx> AbsLvalue<'tcx> {
|
||||
pub fn field(self, f: mir::Field) -> AbsLvalue<'tcx> {
|
||||
self.elem(mir::ProjectionElem::Field(f, ()))
|
||||
}
|
||||
|
||||
pub fn deref(self) -> AbsLvalue<'tcx> {
|
||||
self.elem(mir::ProjectionElem::Deref)
|
||||
}
|
||||
|
||||
pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsLvalue<'tcx> {
|
||||
self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
|
||||
}
|
||||
|
||||
pub fn index(self, index: u64) -> AbsLvalue<'tcx> {
|
||||
self.elem(mir::ProjectionElem::Index(index))
|
||||
}
|
||||
|
||||
fn elem(self, elem: AbsLvalueElem<'tcx>) -> AbsLvalue<'tcx> {
|
||||
AbsLvalue::Projection(Box::new(AbsLvalueProjection {
|
||||
base: self,
|
||||
elem,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
|
||||
fn abstract_lvalue_projection(&self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, AbsLvalueProjection<'tcx>> {
|
||||
use self::mir::ProjectionElem::*;
|
||||
|
||||
let elem = match proj.elem {
|
||||
Deref => Deref,
|
||||
Field(f, _) => Field(f, ()),
|
||||
Index(v) => {
|
||||
let value = self.frame().get_local(v)?;
|
||||
let ty = self.tcx.types.usize;
|
||||
let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
|
||||
Index(n)
|
||||
},
|
||||
ConstantIndex { offset, min_length, from_end } =>
|
||||
ConstantIndex { offset, min_length, from_end },
|
||||
Subslice { from, to } =>
|
||||
Subslice { from, to },
|
||||
Downcast(adt, sz) => Downcast(adt, sz),
|
||||
};
|
||||
Ok(AbsLvalueProjection {
|
||||
base: self.abstract_lvalue(&proj.base)?,
|
||||
elem
|
||||
})
|
||||
}
|
||||
|
||||
fn abstract_lvalue(&self, lval: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, AbsLvalue<'tcx>> {
|
||||
Ok(match lval {
|
||||
&mir::Lvalue::Local(l) => AbsLvalue::Local(l),
|
||||
&mir::Lvalue::Static(ref s) => AbsLvalue::Static(s.def_id),
|
||||
&mir::Lvalue::Projection(ref p) =>
|
||||
AbsLvalue::Projection(Box::new(self.abstract_lvalue_projection(&*p)?)),
|
||||
})
|
||||
}
|
||||
|
||||
// Validity checks
|
||||
pub(crate) fn validation_op(
|
||||
&mut self,
|
||||
op: ValidationOp,
|
||||
operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>,
|
||||
) -> EvalResult<'tcx> {
|
||||
// If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands
|
||||
// because other crates may have been compiled with mir-emit-validate > 0. Ignore those
|
||||
// commands. This makes mir-emit-validate also a flag to control whether miri will do
|
||||
// validation or not.
|
||||
if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
debug_assert!(self.memory.cur_frame == self.cur_frame());
|
||||
|
||||
// HACK: Determine if this method is whitelisted and hence we do not perform any validation.
|
||||
// We currently insta-UB on anything passing around uninitialized memory, so we have to whitelist
|
||||
// the places that are allowed to do that.
|
||||
// The second group is stuff libstd does that is forbidden even under relaxed validation.
|
||||
{
|
||||
// The regexp we use for filtering
|
||||
use regex::Regex;
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new("^(\
|
||||
(std|alloc::heap::__core)::mem::(uninitialized|forget)::|\
|
||||
<(std|alloc)::heap::Heap as (std::heap|alloc::allocator)::Alloc>::|\
|
||||
<(std|alloc::heap::__core)::mem::ManuallyDrop<T>><.*>::new$|\
|
||||
<(std|alloc::heap::__core)::mem::ManuallyDrop<T> as std::ops::DerefMut><.*>::deref_mut$|\
|
||||
(std|alloc::heap::__core)::ptr::read::|\
|
||||
\
|
||||
<std::sync::Arc<T>><.*>::inner$|\
|
||||
<std::sync::Arc<T>><.*>::drop_slow$|\
|
||||
(std::heap|alloc::allocator)::Layout::for_value::|\
|
||||
(std|alloc::heap::__core)::mem::(size|align)_of_val::\
|
||||
)").unwrap();
|
||||
}
|
||||
// Now test
|
||||
let name = self.stack[self.cur_frame()].instance.to_string();
|
||||
if RE.is_match(&name) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// We need to monomorphize ty *without* erasing lifetimes
|
||||
let ty = operand.ty.subst(self.tcx, self.substs());
|
||||
let lval = self.eval_lvalue(&operand.lval)?;
|
||||
let abs_lval = self.abstract_lvalue(&operand.lval)?;
|
||||
let query = ValidationQuery {
|
||||
lval: (abs_lval, lval),
|
||||
ty,
|
||||
re: operand.re,
|
||||
mutbl: operand.mutbl,
|
||||
};
|
||||
|
||||
// Check the mode, and also perform mode-specific operations
|
||||
let mode = match op {
|
||||
ValidationOp::Acquire => ValidationMode::Acquire,
|
||||
ValidationOp::Release => ValidationMode::ReleaseUntil(None),
|
||||
ValidationOp::Suspend(scope) => {
|
||||
if query.mutbl == MutMutable {
|
||||
let lft = DynamicLifetime {
|
||||
frame: self.cur_frame(),
|
||||
region: Some(scope), // Notably, we only ever suspend things for given regions.
|
||||
// Suspending for the entire function does not make any sense.
|
||||
};
|
||||
trace!("Suspending {:?} until {:?}", query, scope);
|
||||
self.suspended.entry(lft).or_insert_with(Vec::new).push(
|
||||
query.clone(),
|
||||
);
|
||||
}
|
||||
ValidationMode::ReleaseUntil(Some(scope))
|
||||
}
|
||||
};
|
||||
self.validate(query, mode)
|
||||
}
|
||||
|
||||
/// Release locks and executes suspensions of the given region (or the entire fn, in case of None).
|
||||
pub(crate) fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx> {
|
||||
debug_assert!(self.memory.cur_frame == self.cur_frame());
|
||||
self.memory.locks_lifetime_ended(scope);
|
||||
match scope {
|
||||
Some(scope) => {
|
||||
// Recover suspended lvals
|
||||
let lft = DynamicLifetime {
|
||||
frame: self.cur_frame(),
|
||||
region: Some(scope),
|
||||
};
|
||||
if let Some(queries) = self.suspended.remove(&lft) {
|
||||
for query in queries {
|
||||
trace!("Recovering {:?} from suspension", query);
|
||||
self.validate(query, ValidationMode::Recover(scope))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// Clean suspension table of current frame
|
||||
let cur_frame = self.cur_frame();
|
||||
self.suspended.retain(|lft, _| {
|
||||
lft.frame != cur_frame // keep only what is in the other (lower) frames
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
|
||||
return normalize_associated_type(self.tcx, &ty);
|
||||
|
||||
use syntax::codemap::{Span, DUMMY_SP};
|
||||
|
||||
// We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior
|
||||
fn normalize_projections_in<'a, 'gcx, 'tcx, T>(
|
||||
self_: &InferCtxt<'a, 'gcx, 'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
value: &T,
|
||||
) -> T::Lifted
|
||||
where
|
||||
T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
|
||||
{
|
||||
let mut selcx = traits::SelectionContext::new(self_);
|
||||
let cause = traits::ObligationCause::dummy();
|
||||
let traits::Normalized {
|
||||
value: result,
|
||||
obligations,
|
||||
} = traits::normalize(&mut selcx, param_env, cause, value);
|
||||
|
||||
let mut fulfill_cx = traits::FulfillmentContext::new();
|
||||
|
||||
for obligation in obligations {
|
||||
fulfill_cx.register_predicate_obligation(self_, obligation);
|
||||
}
|
||||
|
||||
drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result)
|
||||
}
|
||||
|
||||
fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>(
|
||||
self_: &InferCtxt<'a, 'gcx, 'tcx>,
|
||||
span: Span,
|
||||
fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
|
||||
result: &T,
|
||||
) -> T::Lifted
|
||||
where
|
||||
T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
|
||||
{
|
||||
// In principle, we only need to do this so long as `result`
|
||||
// contains unbound type parameters. It could be a slight
|
||||
// optimization to stop iterating early.
|
||||
match fulfill_cx.select_all_or_error(self_) {
|
||||
Ok(()) => { }
|
||||
Err(errors) => {
|
||||
span_bug!(
|
||||
span,
|
||||
"Encountered errors `{:?}` resolving bounds after type-checking",
|
||||
errors
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let result = self_.resolve_type_vars_if_possible(result);
|
||||
let result = self_.tcx.fold_regions(
|
||||
&result,
|
||||
&mut false,
|
||||
|r, _| match *r {
|
||||
ty::ReVar(_) => self_.tcx.types.re_erased,
|
||||
_ => r,
|
||||
},
|
||||
);
|
||||
|
||||
match self_.tcx.lift_to_global(&result) {
|
||||
Some(result) => result,
|
||||
None => {
|
||||
span_bug!(span, "Uninferred types/regions in `{:?}`", result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> {
|
||||
fn my_trans_normalize<'a, 'tcx>(
|
||||
&self,
|
||||
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
) -> Self;
|
||||
}
|
||||
|
||||
macro_rules! items { ($($item:item)+) => ($($item)+) }
|
||||
macro_rules! impl_trans_normalize {
|
||||
($lt_gcx:tt, $($ty:ty),+) => {
|
||||
items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty {
|
||||
fn my_trans_normalize<'a, 'tcx>(&self,
|
||||
infcx: &InferCtxt<'a, $lt_gcx, 'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>)
|
||||
-> Self {
|
||||
normalize_projections_in(infcx, param_env, self)
|
||||
}
|
||||
})+);
|
||||
}
|
||||
}
|
||||
|
||||
impl_trans_normalize!('gcx,
|
||||
Ty<'gcx>,
|
||||
&'gcx Substs<'gcx>,
|
||||
ty::FnSig<'gcx>,
|
||||
ty::PolyFnSig<'gcx>,
|
||||
ty::ClosureSubsts<'gcx>,
|
||||
ty::PolyTraitRef<'gcx>,
|
||||
ty::ExistentialTraitRef<'gcx>
|
||||
);
|
||||
|
||||
fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T
|
||||
where
|
||||
T: MyTransNormalize<'tcx>,
|
||||
{
|
||||
let param_env = ty::ParamEnv::empty(Reveal::All);
|
||||
|
||||
if !value.has_projections() {
|
||||
return value.clone();
|
||||
}
|
||||
|
||||
self_.infer_ctxt().enter(|infcx| {
|
||||
value.my_trans_normalize(&infcx, param_env)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_variant(
|
||||
&mut self,
|
||||
query: ValidationQuery<'tcx>,
|
||||
variant: &ty::VariantDef,
|
||||
subst: &ty::subst::Substs<'tcx>,
|
||||
mode: ValidationMode,
|
||||
) -> EvalResult<'tcx> {
|
||||
// TODO: Maybe take visibility/privacy into account.
|
||||
for (idx, field_def) in variant.fields.iter().enumerate() {
|
||||
let field_ty = field_def.ty(self.tcx, subst);
|
||||
let field = mir::Field::new(idx);
|
||||
let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
|
||||
self.validate(
|
||||
ValidationQuery {
|
||||
lval: (query.lval.0.clone().field(field), field_lvalue),
|
||||
ty: field_ty,
|
||||
..query
|
||||
},
|
||||
mode,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_ptr(
|
||||
&mut self,
|
||||
val: Value,
|
||||
abs_lval: AbsLvalue<'tcx>,
|
||||
pointee_ty: Ty<'tcx>,
|
||||
re: Option<region::Scope>,
|
||||
mutbl: Mutability,
|
||||
mode: ValidationMode,
|
||||
) -> EvalResult<'tcx> {
|
||||
// Check alignment and non-NULLness
|
||||
let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
|
||||
let ptr = val.into_ptr(&self.memory)?;
|
||||
self.memory.check_align(ptr, align, None)?;
|
||||
|
||||
// Recurse
|
||||
let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
|
||||
self.validate(
|
||||
ValidationQuery {
|
||||
lval: (abs_lval.deref(), pointee_lvalue),
|
||||
ty: pointee_ty,
|
||||
re,
|
||||
mutbl,
|
||||
},
|
||||
mode,
|
||||
)
|
||||
}
|
||||
|
||||
/// Validate the lvalue at the given type. If `acquire` is false, just do a release of all write locks
|
||||
fn validate(
|
||||
&mut self,
|
||||
mut query: ValidationQuery<'tcx>,
|
||||
mode: ValidationMode,
|
||||
) -> EvalResult<'tcx> {
|
||||
use rustc::ty::TypeVariants::*;
|
||||
use rustc::ty::RegionKind::*;
|
||||
use rustc::ty::AdtKind;
|
||||
|
||||
// No point releasing shared stuff.
|
||||
if !mode.acquiring() && query.mutbl == MutImmutable {
|
||||
return Ok(());
|
||||
}
|
||||
// When we recover, we may see data whose validity *just* ended. Do not acquire it.
|
||||
if let ValidationMode::Recover(ending_ce) = mode {
|
||||
if query.re == Some(ending_ce) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
query.ty = self.normalize_type_unerased(&query.ty);
|
||||
trace!("{:?} on {:?}", mode, query);
|
||||
|
||||
// Decide whether this type *owns* the memory it covers (like integers), or whether it
|
||||
// just assembles pieces (that each own their memory) together to a larger whole.
|
||||
// TODO: Currently, we don't acquire locks for padding and discriminants. We should.
|
||||
let is_owning = match query.ty.sty {
|
||||
TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr |
|
||||
TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
|
||||
TyAdt(adt, _) if adt.is_box() => true,
|
||||
TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) |
|
||||
TyDynamic(..) | TyGenerator(..) => false,
|
||||
TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => {
|
||||
bug!("I got an incomplete/unnormalized type for validation")
|
||||
}
|
||||
};
|
||||
if is_owning {
|
||||
// We need to lock. So we need memory. So we have to force_acquire.
|
||||
// Tracking the same state for locals not backed by memory would just duplicate too
|
||||
// much machinery.
|
||||
// FIXME: We ignore alignment.
|
||||
let (ptr, extra) = self.force_allocation(query.lval.1)?.to_ptr_extra_aligned();
|
||||
// Determine the size
|
||||
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
|
||||
let len = match self.type_size(query.ty)? {
|
||||
Some(size) => {
|
||||
assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
|
||||
size
|
||||
}
|
||||
None => {
|
||||
// The only unsized typ we concider "owning" is TyStr.
|
||||
assert_eq!(
|
||||
query.ty.sty,
|
||||
TyStr,
|
||||
"Found a surprising unsized owning type"
|
||||
);
|
||||
// The extra must be the length, in bytes.
|
||||
match extra {
|
||||
LvalueExtra::Length(len) => len,
|
||||
_ => bug!("TyStr must have a length as extra"),
|
||||
}
|
||||
}
|
||||
};
|
||||
// Handle locking
|
||||
if len > 0 {
|
||||
let ptr = ptr.to_ptr()?;
|
||||
match query.mutbl {
|
||||
MutImmutable => {
|
||||
if mode.acquiring() {
|
||||
self.memory.acquire_lock(
|
||||
ptr,
|
||||
len,
|
||||
query.re,
|
||||
AccessKind::Read,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
// No releasing of read locks, ever.
|
||||
MutMutable => {
|
||||
match mode {
|
||||
ValidationMode::Acquire => {
|
||||
self.memory.acquire_lock(
|
||||
ptr,
|
||||
len,
|
||||
query.re,
|
||||
AccessKind::Write,
|
||||
)?
|
||||
}
|
||||
ValidationMode::Recover(ending_ce) => {
|
||||
self.memory.recover_write_lock(
|
||||
ptr,
|
||||
len,
|
||||
&query.lval.0,
|
||||
query.re,
|
||||
ending_ce,
|
||||
)?
|
||||
}
|
||||
ValidationMode::ReleaseUntil(suspended_ce) => {
|
||||
self.memory.suspend_write_lock(
|
||||
ptr,
|
||||
len,
|
||||
&query.lval.0,
|
||||
suspended_ce,
|
||||
)?
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let res = do catch {
|
||||
match query.ty.sty {
|
||||
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
|
||||
if mode.acquiring() {
|
||||
// Make sure we can read this.
|
||||
let val = self.read_lvalue(query.lval.1)?;
|
||||
self.follow_by_ref_value(val, query.ty)?;
|
||||
// FIXME: It would be great to rule out Undef here, but that doesn't actually work.
|
||||
// Passing around undef data is a thing that e.g. Vec::extend_with does.
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
TyBool | TyFloat(_) | TyChar => {
|
||||
if mode.acquiring() {
|
||||
let val = self.read_lvalue(query.lval.1)?;
|
||||
let val = self.value_to_primval(ValTy { value: val, ty: query.ty })?;
|
||||
val.to_bytes()?;
|
||||
// TODO: Check if these are valid bool/float/codepoint/UTF-8
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
TyNever => err!(ValidationFailure(format!("The empty type is never valid."))),
|
||||
TyRef(region,
|
||||
ty::TypeAndMut {
|
||||
ty: pointee_ty,
|
||||
mutbl,
|
||||
}) => {
|
||||
let val = self.read_lvalue(query.lval.1)?;
|
||||
// Sharing restricts our context
|
||||
if mutbl == MutImmutable {
|
||||
query.mutbl = MutImmutable;
|
||||
}
|
||||
// Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
|
||||
// we record the region of this borrow to the context.
|
||||
if query.re == None {
|
||||
match *region {
|
||||
ReScope(scope) => query.re = Some(scope),
|
||||
// It is possible for us to encounter erased lifetimes here because the lifetimes in
|
||||
// this functions' Subst will be erased.
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
self.validate_ptr(val, query.lval.0, pointee_ty, query.re, query.mutbl, mode)
|
||||
}
|
||||
TyAdt(adt, _) if adt.is_box() => {
|
||||
let val = self.read_lvalue(query.lval.1)?;
|
||||
self.validate_ptr(val, query.lval.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)
|
||||
}
|
||||
TyFnPtr(_sig) => {
|
||||
let ptr = self.read_lvalue(query.lval.1)?
|
||||
.into_ptr(&self.memory)?
|
||||
.to_ptr()?;
|
||||
self.memory.get_fn(ptr)?;
|
||||
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
|
||||
Ok(())
|
||||
}
|
||||
TyFnDef(..) => {
|
||||
// This is a zero-sized type with all relevant data sitting in the type.
|
||||
// There is nothing to validate.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Compound types
|
||||
TyStr => {
|
||||
// TODO: Validate strings
|
||||
Ok(())
|
||||
}
|
||||
TySlice(elem_ty) => {
|
||||
let len = match query.lval.1 {
|
||||
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
|
||||
_ => {
|
||||
bug!(
|
||||
"acquire_valid of a TySlice given non-slice lvalue: {:?}",
|
||||
query.lval
|
||||
)
|
||||
}
|
||||
};
|
||||
for i in 0..len {
|
||||
let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i)?;
|
||||
self.validate(
|
||||
ValidationQuery {
|
||||
lval: (query.lval.0.clone().index(i), inner_lvalue),
|
||||
ty: elem_ty,
|
||||
..query
|
||||
},
|
||||
mode,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
TyArray(elem_ty, len) => {
|
||||
let len = len.val.to_const_int().unwrap().to_u64().unwrap();
|
||||
for i in 0..len {
|
||||
let inner_lvalue = self.lvalue_index(query.lval.1, query.ty, i as u64)?;
|
||||
self.validate(
|
||||
ValidationQuery {
|
||||
lval: (query.lval.0.clone().index(i as u64), inner_lvalue),
|
||||
ty: elem_ty,
|
||||
..query
|
||||
},
|
||||
mode,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
TyDynamic(_data, _region) => {
|
||||
// Check that this is a valid vtable
|
||||
let vtable = match query.lval.1 {
|
||||
Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable,
|
||||
_ => {
|
||||
bug!(
|
||||
"acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}",
|
||||
query.lval
|
||||
)
|
||||
}
|
||||
};
|
||||
self.read_size_and_align_from_vtable(vtable)?;
|
||||
// TODO: Check that the vtable contains all the function pointers we expect it to have.
|
||||
// Trait objects cannot have any operations performed
|
||||
// on them directly. We cannot, in general, even acquire any locks as the trait object *could*
|
||||
// contain an UnsafeCell. If we call functions to get access to data, we will validate
|
||||
// their return values. So, it doesn't seem like there's anything else to do.
|
||||
Ok(())
|
||||
}
|
||||
TyAdt(adt, subst) => {
|
||||
if Some(adt.did) == self.tcx.lang_items().unsafe_cell_type() &&
|
||||
query.mutbl == MutImmutable
|
||||
{
|
||||
// No locks for shared unsafe cells. Also no other validation, the only field is private anyway.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match adt.adt_kind() {
|
||||
AdtKind::Enum => {
|
||||
// TODO: Can we get the discriminant without forcing an allocation?
|
||||
let ptr = self.force_allocation(query.lval.1)?.to_ptr()?;
|
||||
let discr = self.read_discriminant_value(ptr, query.ty)?;
|
||||
|
||||
// Get variant index for discriminant
|
||||
let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| {
|
||||
variant_discr.to_u128_unchecked() == discr
|
||||
});
|
||||
let variant_idx = match variant_idx {
|
||||
Some(val) => val,
|
||||
None => return err!(InvalidDiscriminant),
|
||||
};
|
||||
let variant = &adt.variants[variant_idx];
|
||||
|
||||
if variant.fields.len() > 0 {
|
||||
// Downcast to this variant, if needed
|
||||
let lval = if adt.variants.len() > 1 {
|
||||
(
|
||||
query.lval.0.downcast(adt, variant_idx),
|
||||
self.eval_lvalue_projection(
|
||||
query.lval.1,
|
||||
query.ty,
|
||||
&mir::ProjectionElem::Downcast(adt, variant_idx),
|
||||
)?,
|
||||
)
|
||||
} else {
|
||||
query.lval
|
||||
};
|
||||
|
||||
// Recursively validate the fields
|
||||
self.validate_variant(
|
||||
ValidationQuery { lval, ..query },
|
||||
variant,
|
||||
subst,
|
||||
mode,
|
||||
)
|
||||
} else {
|
||||
// No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
AdtKind::Struct => {
|
||||
self.validate_variant(query, adt.struct_variant(), subst, mode)
|
||||
}
|
||||
AdtKind::Union => {
|
||||
// No guarantees are provided for union types.
|
||||
// TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
TyTuple(ref types, _) => {
|
||||
for (idx, field_ty) in types.iter().enumerate() {
|
||||
let field = mir::Field::new(idx);
|
||||
let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
|
||||
self.validate(
|
||||
ValidationQuery {
|
||||
lval: (query.lval.0.clone().field(field), field_lvalue),
|
||||
ty: field_ty,
|
||||
..query
|
||||
},
|
||||
mode,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
TyClosure(def_id, ref closure_substs) => {
|
||||
for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
|
||||
let field = mir::Field::new(idx);
|
||||
let field_lvalue = self.lvalue_field(query.lval.1, field, query.ty, field_ty)?;
|
||||
self.validate(
|
||||
ValidationQuery {
|
||||
lval: (query.lval.0.clone().field(field), field_lvalue),
|
||||
ty: field_ty,
|
||||
..query
|
||||
},
|
||||
mode,
|
||||
)?;
|
||||
}
|
||||
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
|
||||
// Is there other things we can/should check? Like vtable pointers?
|
||||
Ok(())
|
||||
}
|
||||
// FIXME: generators aren't validated right now
|
||||
TyGenerator(..) => Ok(()),
|
||||
_ => bug!("We already established that this is a type we support. ({})", query.ty),
|
||||
}
|
||||
};
|
||||
match res {
|
||||
// ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because
|
||||
// we have to release the return value of a function; due to destination-passing-style
|
||||
// the callee may directly write there.
|
||||
// TODO: Ideally we would know whether the destination is already initialized, and only
|
||||
// release if it is. But of course that can't even always be statically determined.
|
||||
Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
|
||||
if mode == ValidationMode::ReleaseUntil(None) => {
|
||||
return Ok(());
|
||||
}
|
||||
res => res,
|
||||
}
|
||||
}
|
||||
}
|
||||
405
src/librustc/mir/interpret/value.rs
Normal file
405
src/librustc/mir/interpret/value.rs
Normal file
|
|
@ -0,0 +1,405 @@
|
|||
#![allow(unknown_lints)]
|
||||
|
||||
use rustc::ty::layout::HasDataLayout;
|
||||
|
||||
use super::{EvalResult, Memory, MemoryPointer, HasMemory, PointerArithmetic, Machine, PtrAndAlign};
|
||||
|
||||
pub(super) fn bytes_to_f32(bytes: u128) -> f32 {
|
||||
f32::from_bits(bytes as u32)
|
||||
}
|
||||
|
||||
pub(super) fn bytes_to_f64(bytes: u128) -> f64 {
|
||||
f64::from_bits(bytes as u64)
|
||||
}
|
||||
|
||||
pub(super) fn f32_to_bytes(f: f32) -> u128 {
|
||||
f.to_bits() as u128
|
||||
}
|
||||
|
||||
pub(super) fn f64_to_bytes(f: f64) -> u128 {
|
||||
f.to_bits() as u128
|
||||
}
|
||||
|
||||
/// A `Value` represents a single self-contained Rust value.
|
||||
///
|
||||
/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve
|
||||
/// value held directly, outside of any allocation (`ByVal`). For `ByRef`-values, we remember
|
||||
/// whether the pointer is supposed to be aligned or not (also see Lvalue).
|
||||
///
|
||||
/// For optimization of a few very common cases, there is also a representation for a pair of
|
||||
/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary
|
||||
/// operations and fat pointers. This idea was taken from rustc's trans.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum Value {
|
||||
ByRef(PtrAndAlign),
|
||||
ByVal(PrimVal),
|
||||
ByValPair(PrimVal, PrimVal),
|
||||
}
|
||||
|
||||
/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally.
|
||||
/// This type clears up a few APIs where having a `PrimVal` argument for something that is
|
||||
/// potentially an integer pointer or a pointer to an allocation was unclear.
|
||||
///
|
||||
/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just
|
||||
/// the representation of pointers. Also all the sites that convert between primvals and pointers
|
||||
/// are explicit now (and rare!)
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Pointer {
|
||||
primval: PrimVal,
|
||||
}
|
||||
|
||||
impl<'tcx> Pointer {
|
||||
pub fn null() -> Self {
|
||||
PrimVal::Bytes(0).into()
|
||||
}
|
||||
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
|
||||
self.primval.to_ptr()
|
||||
}
|
||||
pub fn into_inner_primval(self) -> PrimVal {
|
||||
self.primval
|
||||
}
|
||||
|
||||
pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
|
||||
let layout = cx.data_layout();
|
||||
match self.primval {
|
||||
PrimVal::Bytes(b) => {
|
||||
assert_eq!(b as u64 as u128, b);
|
||||
Ok(Pointer::from(
|
||||
PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128),
|
||||
))
|
||||
}
|
||||
PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from),
|
||||
PrimVal::Undef => err!(ReadUndefBytes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
|
||||
let layout = cx.data_layout();
|
||||
match self.primval {
|
||||
PrimVal::Bytes(b) => {
|
||||
assert_eq!(b as u64 as u128, b);
|
||||
Ok(Pointer::from(
|
||||
PrimVal::Bytes(layout.offset(b as u64, i)? as u128),
|
||||
))
|
||||
}
|
||||
PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from),
|
||||
PrimVal::Undef => err!(ReadUndefBytes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
|
||||
let layout = cx.data_layout();
|
||||
match self.primval {
|
||||
PrimVal::Bytes(b) => {
|
||||
assert_eq!(b as u64 as u128, b);
|
||||
Ok(Pointer::from(PrimVal::Bytes(
|
||||
layout.wrapping_signed_offset(b as u64, i) as u128,
|
||||
)))
|
||||
}
|
||||
PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))),
|
||||
PrimVal::Undef => err!(ReadUndefBytes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_null(self) -> EvalResult<'tcx, bool> {
|
||||
match self.primval {
|
||||
PrimVal::Bytes(b) => Ok(b == 0),
|
||||
PrimVal::Ptr(_) => Ok(false),
|
||||
PrimVal::Undef => err!(ReadUndefBytes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_value_with_len(self, len: u64) -> Value {
|
||||
Value::ByValPair(self.primval, PrimVal::from_u128(len as u128))
|
||||
}
|
||||
|
||||
pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value {
|
||||
Value::ByValPair(self.primval, PrimVal::Ptr(vtable))
|
||||
}
|
||||
|
||||
pub fn to_value(self) -> Value {
|
||||
Value::ByVal(self.primval)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::convert::From<PrimVal> for Pointer {
|
||||
fn from(primval: PrimVal) -> Self {
|
||||
Pointer { primval }
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::convert::From<MemoryPointer> for Pointer {
|
||||
fn from(ptr: MemoryPointer) -> Self {
|
||||
PrimVal::Ptr(ptr).into()
|
||||
}
|
||||
}
|
||||
|
||||
/// A `PrimVal` represents an immediate, primitive value existing outside of a
|
||||
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
|
||||
/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes
|
||||
/// of a simple value, a pointer into another `Allocation`, or be undefined.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum PrimVal {
|
||||
/// The raw bytes of a simple value.
|
||||
Bytes(u128),
|
||||
|
||||
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
|
||||
/// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the
|
||||
/// relocation and its associated offset together as a `MemoryPointer` here.
|
||||
Ptr(MemoryPointer),
|
||||
|
||||
/// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe
|
||||
/// to copy around, just like undefined bytes in an `Allocation`.
|
||||
Undef,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum PrimValKind {
|
||||
I8, I16, I32, I64, I128,
|
||||
U8, U16, U32, U64, U128,
|
||||
F32, F64,
|
||||
Ptr, FnPtr,
|
||||
Bool,
|
||||
Char,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx: 'a> Value {
|
||||
#[inline]
|
||||
pub fn by_ref(ptr: Pointer) -> Self {
|
||||
Value::ByRef(PtrAndAlign { ptr, aligned: true })
|
||||
}
|
||||
|
||||
/// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
|
||||
/// this may have to perform a load.
|
||||
pub fn into_ptr<M: Machine<'tcx>>(
|
||||
&self,
|
||||
mem: &Memory<'a, 'tcx, M>,
|
||||
) -> EvalResult<'tcx, Pointer> {
|
||||
use self::Value::*;
|
||||
Ok(match *self {
|
||||
ByRef(PtrAndAlign { ptr, aligned }) => {
|
||||
mem.read_maybe_aligned(aligned, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))?
|
||||
}
|
||||
ByVal(ptr) |
|
||||
ByValPair(ptr, _) => ptr,
|
||||
}.into())
|
||||
}
|
||||
|
||||
pub(super) fn into_ptr_vtable_pair<M: Machine<'tcx>>(
|
||||
&self,
|
||||
mem: &Memory<'a, 'tcx, M>,
|
||||
) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
|
||||
use self::Value::*;
|
||||
match *self {
|
||||
ByRef(PtrAndAlign {
|
||||
ptr: ref_ptr,
|
||||
aligned,
|
||||
}) => {
|
||||
mem.read_maybe_aligned(aligned, |mem| {
|
||||
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
|
||||
let vtable = mem.read_ptr_sized_unsigned(
|
||||
ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?,
|
||||
)?.to_ptr()?;
|
||||
Ok((ptr, vtable))
|
||||
})
|
||||
}
|
||||
|
||||
ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
|
||||
|
||||
ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
|
||||
_ => bug!("expected ptr and vtable, got {:?}", self),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn into_slice<M: Machine<'tcx>>(
|
||||
&self,
|
||||
mem: &Memory<'a, 'tcx, M>,
|
||||
) -> EvalResult<'tcx, (Pointer, u64)> {
|
||||
use self::Value::*;
|
||||
match *self {
|
||||
ByRef(PtrAndAlign {
|
||||
ptr: ref_ptr,
|
||||
aligned,
|
||||
}) => {
|
||||
mem.read_maybe_aligned(aligned, |mem| {
|
||||
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
|
||||
let len = mem.read_ptr_sized_unsigned(
|
||||
ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?,
|
||||
)?.to_bytes()? as u64;
|
||||
Ok((ptr, len))
|
||||
})
|
||||
}
|
||||
ByValPair(ptr, val) => {
|
||||
let len = val.to_u128()?;
|
||||
assert_eq!(len as u64 as u128, len);
|
||||
Ok((ptr.into(), len as u64))
|
||||
}
|
||||
ByVal(PrimVal::Undef) => err!(ReadUndefBytes),
|
||||
ByVal(_) => bug!("expected ptr and length, got {:?}", self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> PrimVal {
|
||||
pub fn from_u128(n: u128) -> Self {
|
||||
PrimVal::Bytes(n)
|
||||
}
|
||||
|
||||
pub fn from_i128(n: i128) -> Self {
|
||||
PrimVal::Bytes(n as u128)
|
||||
}
|
||||
|
||||
pub fn from_f32(f: f32) -> Self {
|
||||
PrimVal::Bytes(f32_to_bytes(f))
|
||||
}
|
||||
|
||||
pub fn from_f64(f: f64) -> Self {
|
||||
PrimVal::Bytes(f64_to_bytes(f))
|
||||
}
|
||||
|
||||
pub fn from_bool(b: bool) -> Self {
|
||||
PrimVal::Bytes(b as u128)
|
||||
}
|
||||
|
||||
pub fn from_char(c: char) -> Self {
|
||||
PrimVal::Bytes(c as u128)
|
||||
}
|
||||
|
||||
pub fn to_bytes(self) -> EvalResult<'tcx, u128> {
|
||||
match self {
|
||||
PrimVal::Bytes(b) => Ok(b),
|
||||
PrimVal::Ptr(_) => err!(ReadPointerAsBytes),
|
||||
PrimVal::Undef => err!(ReadUndefBytes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
|
||||
match self {
|
||||
PrimVal::Bytes(_) => err!(ReadBytesAsPointer),
|
||||
PrimVal::Ptr(p) => Ok(p),
|
||||
PrimVal::Undef => err!(ReadUndefBytes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_bytes(self) -> bool {
|
||||
match self {
|
||||
PrimVal::Bytes(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_ptr(self) -> bool {
|
||||
match self {
|
||||
PrimVal::Ptr(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_undef(self) -> bool {
|
||||
match self {
|
||||
PrimVal::Undef => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_u128(self) -> EvalResult<'tcx, u128> {
|
||||
self.to_bytes()
|
||||
}
|
||||
|
||||
pub fn to_u64(self) -> EvalResult<'tcx, u64> {
|
||||
self.to_bytes().map(|b| {
|
||||
assert_eq!(b as u64 as u128, b);
|
||||
b as u64
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_i32(self) -> EvalResult<'tcx, i32> {
|
||||
self.to_bytes().map(|b| {
|
||||
assert_eq!(b as i32 as u128, b);
|
||||
b as i32
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_i128(self) -> EvalResult<'tcx, i128> {
|
||||
self.to_bytes().map(|b| b as i128)
|
||||
}
|
||||
|
||||
pub fn to_i64(self) -> EvalResult<'tcx, i64> {
|
||||
self.to_bytes().map(|b| {
|
||||
assert_eq!(b as i64 as u128, b);
|
||||
b as i64
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_f32(self) -> EvalResult<'tcx, f32> {
|
||||
self.to_bytes().map(bytes_to_f32)
|
||||
}
|
||||
|
||||
pub fn to_f64(self) -> EvalResult<'tcx, f64> {
|
||||
self.to_bytes().map(bytes_to_f64)
|
||||
}
|
||||
|
||||
pub fn to_bool(self) -> EvalResult<'tcx, bool> {
|
||||
match self.to_bytes()? {
|
||||
0 => Ok(false),
|
||||
1 => Ok(true),
|
||||
_ => err!(InvalidBool),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimValKind {
|
||||
pub fn is_int(self) -> bool {
|
||||
use self::PrimValKind::*;
|
||||
match self {
|
||||
I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_signed_int(self) -> bool {
|
||||
use self::PrimValKind::*;
|
||||
match self {
|
||||
I8 | I16 | I32 | I64 | I128 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_float(self) -> bool {
|
||||
use self::PrimValKind::*;
|
||||
match self {
|
||||
F32 | F64 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_uint_size(size: u64) -> Self {
|
||||
match size {
|
||||
1 => PrimValKind::U8,
|
||||
2 => PrimValKind::U16,
|
||||
4 => PrimValKind::U32,
|
||||
8 => PrimValKind::U64,
|
||||
16 => PrimValKind::U128,
|
||||
_ => bug!("can't make uint with size {}", size),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_int_size(size: u64) -> Self {
|
||||
match size {
|
||||
1 => PrimValKind::I8,
|
||||
2 => PrimValKind::I16,
|
||||
4 => PrimValKind::I32,
|
||||
8 => PrimValKind::I64,
|
||||
16 => PrimValKind::I128,
|
||||
_ => bug!("can't make int with size {}", size),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_ptr(self) -> bool {
|
||||
use self::PrimValKind::*;
|
||||
match self {
|
||||
Ptr | FnPtr => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue