Merge pull request #156 from oli-obk/master

Use rustc generated shims instead of interpreter hacks
This commit is contained in:
Eduard-Mihai Burtescu 2017-04-21 14:23:37 +03:00 committed by GitHub
commit 9a32772f92
14 changed files with 991 additions and 1116 deletions

View file

@ -2,18 +2,16 @@ language: rust
rust:
- nightly
before_script:
- |
pip install 'travis-cargo<0.2' --user &&
export PATH=$HOME/.local/bin:$PATH
- sh ~/rust-installer/rustup.sh --add-target=i686-unknown-linux-gnu --prefix=/home/travis/rust -y --disable-sudo
- sh ~/rust-installer/rustup.sh --add-target=i686-pc-windows-gnu --prefix=/home/travis/rust -y --disable-sudo
- sh ~/rust-installer/rustup.sh --add-target=i686-pc-windows-msvc --prefix=/home/travis/rust -y --disable-sudo
- export PATH=$HOME/.local/bin:$PATH
- rustup target add i686-unknown-linux-gnu
- rustup target add i686-pc-windows-gnu
- rustup target add i686-pc-windows-msvc
script:
- |
export RUST_SYSROOT=$HOME/rust &&
travis-cargo build &&
travis-cargo test &&
travis-cargo install &&
cargo build &&
cargo test &&
cargo install &&
cd cargo-miri-test &&
cargo miri &&
cargo miri test &&

2
Cargo.lock generated
View file

@ -21,7 +21,7 @@ dependencies = [
[[package]]
name = "byteorder"
version = "1.0.0"
source = "git+https://github.com/quininer/byteorder.git?branch=i128#ef51df297aa833d0b6639aae328a95597fc07d75"
source = "git+https://github.com/quininer/byteorder.git?branch=i128#9bab6d7783f81da50feb234a120c918d9eabba6e"
[[package]]
name = "cargo_metadata"

View file

@ -2,7 +2,7 @@ use std::error::Error;
use std::fmt;
use rustc::mir;
use rustc::ty::{FnSig, Ty, layout};
use memory::{Pointer, Function};
use memory::Pointer;
use rustc_const_math::ConstMathErr;
use syntax::codemap::Span;
@ -52,9 +52,6 @@ pub enum EvalError<'tcx> {
DeallocatedStaticMemory,
Layout(layout::LayoutError<'tcx>),
Unreachable,
ExpectedConcreteFunction(Function<'tcx>),
ExpectedDropGlue(Function<'tcx>),
ManuallyCalledDropGlue,
Panic,
}
@ -128,12 +125,6 @@ impl<'tcx> Error for EvalError<'tcx> {
"attempted to get length of a null terminated string, but no null found before end of allocation",
EvalError::Unreachable =>
"entered unreachable code",
EvalError::ExpectedConcreteFunction(_) =>
"tried to use glue function as function",
EvalError::ExpectedDropGlue(_) =>
"tried to use non-drop-glue function as drop glue",
EvalError::ManuallyCalledDropGlue =>
"tried to manually invoke drop glue",
EvalError::Panic =>
"the evaluated program panicked",
}

View file

@ -5,13 +5,18 @@ use std::fmt::Write;
use rustc::hir::def_id::DefId;
use rustc::hir::map::definitions::DefPathData;
use rustc::middle::const_val::ConstVal;
use rustc_const_math::{ConstInt, ConstUsize};
use rustc::mir;
use rustc::traits::Reveal;
use rustc::ty::layout::{self, Layout, Size};
use rustc::ty::subst::{self, Subst, Substs};
use rustc::ty::subst::{Subst, Substs, Kind};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Binder};
use rustc::traits;
use rustc_data_structures::indexed_vec::Idx;
use syntax::codemap::{self, DUMMY_SP};
use syntax::codemap::{self, DUMMY_SP, Span};
use syntax::ast;
use syntax::abi::Abi;
use syntax::symbol::Symbol;
use error::{EvalError, EvalResult};
use lvalue::{Global, GlobalId, Lvalue, LvalueExtra};
@ -41,6 +46,9 @@ pub struct EvalContext<'a, 'tcx: 'a> {
/// This prevents infinite loops and huge computations from freezing up const eval.
/// Remove once halting problem is solved.
pub(crate) steps_remaining: u64,
/// Drop glue for arrays and slices
pub(crate) seq_drop_glue: MirRef<'tcx>,
}
/// A stack frame.
@ -52,11 +60,8 @@ pub struct Frame<'tcx> {
/// The MIR for the function called on this frame.
pub mir: MirRef<'tcx>,
/// The def_id of the current function.
pub def_id: DefId,
/// type substitutions for the current function invocation.
pub substs: &'tcx Substs<'tcx>,
/// The def_id and substs of the current function
pub instance: ty::Instance<'tcx>,
/// The span of the call site.
pub span: codemap::Span,
@ -78,12 +83,6 @@ pub struct Frame<'tcx> {
/// Before being initialized, all locals are `Value::ByVal(PrimVal::Undef)`.
pub locals: Vec<Value>,
/// Temporary allocations introduced to save stackframes
/// This is pure interpreter magic and has nothing to do with how rustc does it
/// An example is calling an FnMut closure that has been converted to a FnOnce closure
/// The value's destructor will be called and the memory freed when the stackframe finishes
pub interpreter_temporaries: Vec<(Pointer, Ty<'tcx>)>,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
@ -130,6 +129,181 @@ impl Default for ResourceLimits {
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, limits: ResourceLimits) -> Self {
let source_info = mir::SourceInfo {
span: DUMMY_SP,
scope: mir::ARGUMENT_VISIBILITY_SCOPE
};
// i = 0; len = Len(*a0); goto head;
let start_block = mir::BasicBlockData {
statements: vec![
mir::Statement {
source_info,
kind: mir::StatementKind::Assign(
mir::Lvalue::Local(mir::Local::new(2)),
mir::Rvalue::Use(mir::Operand::Constant(mir::Constant {
span: DUMMY_SP,
ty: tcx.types.usize,
literal: mir::Literal::Value {
value: ConstVal::Integral(ConstInt::Usize(ConstUsize::new(0, tcx.sess.target.uint_type).unwrap())),
},
}))
)
},
mir::Statement {
source_info,
kind: mir::StatementKind::Assign(
mir::Lvalue::Local(mir::Local::new(3)),
mir::Rvalue::Len(mir::Lvalue::Projection(Box::new(mir::LvalueProjection {
base: mir::Lvalue::Local(mir::Local::new(1)),
elem: mir::ProjectionElem::Deref,
}))),
)
},
],
terminator: Some(mir::Terminator {
source_info: source_info,
kind: mir::TerminatorKind::Goto { target: mir::BasicBlock::new(1) },
}),
is_cleanup: false
};
// head: done = i == len; switch done { 1 => ret, 0 => loop }
let head = mir::BasicBlockData {
statements: vec![
mir::Statement {
source_info,
kind: mir::StatementKind::Assign(
mir::Lvalue::Local(mir::Local::new(4)),
mir::Rvalue::BinaryOp(
mir::BinOp::Eq,
mir::Operand::Consume(mir::Lvalue::Local(mir::Local::new(2))),
mir::Operand::Consume(mir::Lvalue::Local(mir::Local::new(3))),
)
)
},
],
terminator: Some(mir::Terminator {
source_info: source_info,
kind: mir::TerminatorKind::SwitchInt {
targets: vec![
mir::BasicBlock::new(2),
mir::BasicBlock::new(4),
],
discr: mir::Operand::Consume(mir::Lvalue::Local(mir::Local::new(4))),
switch_ty: tcx.types.bool,
values: vec![ConstInt::U8(0)].into(),
},
}),
is_cleanup: false
};
// loop: drop (*a0)[i]; goto inc;
let loop_ = mir::BasicBlockData {
statements: Vec::new(),
terminator: Some(mir::Terminator {
source_info: source_info,
kind: mir::TerminatorKind::Drop {
target: mir::BasicBlock::new(3),
unwind: None,
location: mir::Lvalue::Projection(Box::new(
mir::LvalueProjection {
base: mir::Lvalue::Projection(Box::new(
mir::LvalueProjection {
base: mir::Lvalue::Local(mir::Local::new(1)),
elem: mir::ProjectionElem::Deref,
}
)),
elem: mir::ProjectionElem::Index(mir::Operand::Consume(mir::Lvalue::Local(mir::Local::new(2)))),
}
)),
},
}),
is_cleanup: false
};
// inc: i++; goto head;
let inc = mir::BasicBlockData {
statements: vec![
mir::Statement {
source_info,
kind: mir::StatementKind::Assign(
mir::Lvalue::Local(mir::Local::new(2)),
mir::Rvalue::BinaryOp(
mir::BinOp::Add,
mir::Operand::Consume(mir::Lvalue::Local(mir::Local::new(2))),
mir::Operand::Constant(mir::Constant {
span: DUMMY_SP,
ty: tcx.types.usize,
literal: mir::Literal::Value {
value: ConstVal::Integral(ConstInt::Usize(ConstUsize::new(1, tcx.sess.target.uint_type).unwrap())),
},
}),
)
)
},
],
terminator: Some(mir::Terminator {
source_info: source_info,
kind: mir::TerminatorKind::Goto { target: mir::BasicBlock::new(1) },
}),
is_cleanup: false
};
// ret: return;
let ret = mir::BasicBlockData {
statements: Vec::new(),
terminator: Some(mir::Terminator {
source_info: source_info,
kind: mir::TerminatorKind::Return,
}),
is_cleanup: false
};
let locals = vec![
mir::LocalDecl {
mutability: mir::Mutability::Mut,
ty: tcx.mk_nil(),
name: None,
source_info,
is_user_variable: false,
},
mir::LocalDecl {
mutability: mir::Mutability::Mut,
ty: tcx.mk_mut_ptr(tcx.mk_slice(tcx.mk_param(0, Symbol::intern("T")))),
name: None,
source_info,
is_user_variable: false,
},
mir::LocalDecl {
mutability: mir::Mutability::Mut,
ty: tcx.types.usize,
name: None,
source_info,
is_user_variable: false,
},
mir::LocalDecl {
mutability: mir::Mutability::Mut,
ty: tcx.types.usize,
name: None,
source_info,
is_user_variable: false,
},
mir::LocalDecl {
mutability: mir::Mutability::Mut,
ty: tcx.types.bool,
name: None,
source_info,
is_user_variable: false,
},
];
let seq_drop_glue = mir::Mir::new(
vec![start_block, head, loop_, inc, ret].into_iter().collect(),
Vec::new().into_iter().collect(), // vis scopes
Vec::new().into_iter().collect(), // promoted
tcx.mk_nil(), // return type
locals.into_iter().collect(),
1, // arg_count
Vec::new(), // upvars
DUMMY_SP,
);
let seq_drop_glue = tcx.alloc_mir(seq_drop_glue);
// Perma-borrow MIR from shims to prevent mutation.
::std::mem::forget(seq_drop_glue.borrow());
EvalContext {
tcx,
memory: Memory::new(&tcx.data_layout, limits.memory_size),
@ -137,6 +311,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
stack: Vec::new(),
stack_limit: limits.stack_limit,
steps_remaining: limits.step_limit,
seq_drop_glue: seq_drop_glue.borrow(),
}
}
@ -172,7 +347,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::from_u128(s.len() as u128)))
}
pub(super) fn const_to_value(&mut self, const_val: &ConstVal) -> EvalResult<'tcx, Value> {
pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
use rustc::middle::const_val::ConstVal::*;
use rustc_const_math::ConstFloat;
@ -194,7 +369,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Struct(_) => unimplemented!(),
Tuple(_) => unimplemented!(),
Function(_, _) => unimplemented!(),
// function items are zero sized and thus have no readable value
Function(..) => PrimVal::Undef,
Array(_) => unimplemented!(),
Repeat(_, _) => unimplemented!(),
};
@ -208,12 +384,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
ty.is_sized(self.tcx, &self.tcx.empty_parameter_environment(), DUMMY_SP)
}
pub fn load_mir(&self, def_id: DefId) -> EvalResult<'tcx, MirRef<'tcx>> {
trace!("load mir {:?}", def_id);
if def_id.is_local() || self.tcx.sess.cstore.is_item_mir_available(def_id) {
Ok(self.tcx.item_mir(def_id))
} else {
Err(EvalError::NoMirFor(self.tcx.item_path_str(def_id)))
pub fn load_mir(&self, instance: ty::InstanceDef<'tcx>) -> EvalResult<'tcx, MirRef<'tcx>> {
trace!("load mir {:?}", instance);
match instance {
ty::InstanceDef::Item(def_id) => self.tcx.maybe_item_mir(def_id).ok_or_else(|| EvalError::NoMirFor(self.tcx.item_path_str(def_id))),
_ => Ok(self.tcx.instance_mir(instance)),
}
}
@ -272,13 +447,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub fn push_stack_frame(
&mut self,
def_id: DefId,
instance: ty::Instance<'tcx>,
span: codemap::Span,
mir: MirRef<'tcx>,
substs: &'tcx Substs<'tcx>,
return_lvalue: Lvalue<'tcx>,
return_to_block: StackPopCleanup,
temporaries: Vec<(Pointer, Ty<'tcx>)>,
) -> EvalResult<'tcx> {
::log_settings::settings().indentation += 1;
@ -293,10 +466,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
return_to_block,
return_lvalue,
locals,
interpreter_temporaries: temporaries,
span,
def_id,
substs,
instance,
stmt: 0,
});
@ -352,13 +523,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
}
// drop and deallocate all temporary allocations
for (ptr, ty) in frame.interpreter_temporaries {
trace!("dropping temporary allocation");
let mut drops = Vec::new();
self.drop(Lvalue::from_ptr(ptr), ty, &mut drops)?;
self.eval_drop_impls(drops, frame.span)?;
}
Ok(())
}
@ -665,8 +830,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
ReifyFnPointer => match self.operand_ty(operand).sty {
ty::TyFnDef(def_id, substs, sig) => {
let fn_ptr = self.memory.create_fn_ptr(def_id, substs, sig);
ty::TyFnDef(def_id, substs, _) => {
let instance = resolve(self.tcx, def_id, substs);
let fn_ptr = self.memory.create_fn_alloc(instance);
self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?;
},
ref other => bug!("reify fn pointer on {:?}", other),
@ -682,8 +848,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
ClosureFnPointer => match self.operand_ty(operand).sty {
ty::TyClosure(def_id, substs) => {
let fn_ty = self.tcx.closure_type(def_id);
let fn_ptr = self.memory.create_fn_ptr_from_noncapture_closure(def_id, substs, fn_ty);
let instance = resolve_closure(self.tcx, def_id, substs, ty::ClosureKind::FnOnce);
let fn_ptr = self.memory.create_fn_alloc(instance);
self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?;
},
ref other => bug!("reify fn pointer on {:?}", other),
@ -835,26 +1001,20 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
match *op {
Consume(ref lvalue) => self.eval_and_read_lvalue(lvalue),
Constant(mir::Constant { ref literal, ty, .. }) => {
Constant(mir::Constant { ref literal, .. }) => {
use rustc::mir::Literal;
let value = match *literal {
Literal::Value { ref value } => self.const_to_value(value)?,
Literal::Item { def_id, substs } => {
if let ty::TyFnDef(..) = ty.sty {
// function items are zero sized
Value::ByRef(self.memory.allocate(0, 0)?)
} else {
let (def_id, substs) = self.resolve_associated_const(def_id, substs);
let cid = GlobalId { def_id, substs, promoted: None };
self.globals.get(&cid).expect("static/const not cached").value
}
let instance = self.resolve_associated_const(def_id, substs);
let cid = GlobalId { instance, promoted: None };
self.globals.get(&cid).expect("static/const not cached").value
}
Literal::Promoted { index } => {
let cid = GlobalId {
def_id: self.frame().def_id,
substs: self.substs(),
instance: self.frame().instance,
promoted: Some(index),
};
self.globals.get(&cid).expect("promoted not cached").value
@ -891,8 +1051,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
},
val => {
let ty = self.stack[frame].mir.local_decls[local].ty;
let ty = self.monomorphize(ty, self.stack[frame].substs);
let substs = self.stack[frame].substs;
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
let substs = self.stack[frame].instance.substs;
let ptr = self.alloc_ptr_with_substs(ty, substs)?;
self.stack[frame].locals[local.index() - 1] = Value::ByRef(ptr);
self.write_value_to_ptr(val, ptr, ty)?;
@ -911,7 +1071,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
match global_val.value {
Value::ByRef(ptr) => Lvalue::from_ptr(ptr),
_ => {
let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.substs)?;
let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.instance.substs)?;
self.memory.mark_static(ptr.alloc_id);
self.write_value_to_ptr(global_val.value, ptr, global_val.ty)?;
// see comment on `initialized` field
@ -1289,7 +1449,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
pub(super) fn substs(&self) -> &'tcx Substs<'tcx> {
self.frame().substs
self.frame().instance.substs
}
fn unsize_into_ptr(
@ -1320,7 +1480,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
(_, &ty::TyDynamic(ref data, _)) => {
let trait_ref = data.principal().unwrap().with_self_ty(self.tcx, src_pointee_ty);
let trait_ref = self.tcx.erase_regions(&trait_ref);
let vtable = self.get_vtable(trait_ref)?;
let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
let ptr = src.read_ptr(&self.memory)?;
let ptr = PrimVal::Ptr(ptr);
let extra = PrimVal::Ptr(vtable);
@ -1519,7 +1679,8 @@ pub fn eval_main<'a, 'tcx: 'a>(
limits: ResourceLimits,
) {
let mut ecx = EvalContext::new(tcx, limits);
let mir = ecx.load_mir(def_id).expect("main function's MIR not found");
let instance = ty::Instance::mono(tcx, def_id);
let mir = ecx.load_mir(instance.def).expect("main function's MIR not found");
if !mir.return_ty.is_nil() || mir.arg_count != 0 {
let msg = "miri does not support main functions without `fn()` type signatures";
@ -1528,13 +1689,11 @@ pub fn eval_main<'a, 'tcx: 'a>(
}
ecx.push_stack_frame(
def_id,
instance,
DUMMY_SP,
mir,
tcx.intern_substs(&[]),
Lvalue::from_ptr(Pointer::zst_ptr()),
StackPopCleanup::None,
Vec::new(),
).expect("could not allocate first stack frame");
loop {
@ -1564,23 +1723,12 @@ fn report(tcx: TyCtxt, ecx: &EvalContext, e: EvalError) {
block.terminator().source_info.span
};
let mut err = tcx.sess.struct_span_err(span, &e.to_string());
for &Frame { def_id, substs, span, .. } in ecx.stack().iter().rev() {
if tcx.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr {
for &Frame { instance, span, .. } in ecx.stack().iter().rev() {
if tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr {
err.span_note(span, "inside call to closure");
continue;
}
// FIXME(solson): Find a way to do this without this Display impl hack.
use rustc::util::ppaux;
use std::fmt;
struct Instance<'tcx>(DefId, &'tcx subst::Substs<'tcx>);
impl<'tcx> ::std::panic::UnwindSafe for Instance<'tcx> {}
impl<'tcx> ::std::panic::RefUnwindSafe for Instance<'tcx> {}
impl<'tcx> fmt::Display for Instance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ppaux::parameterized(f, self.1, self.0, &[])
}
}
err.span_note(span, &format!("inside call to {}", Instance(def_id, substs)));
err.span_note(span, &format!("inside call to {}", instance));
}
err.emit();
}
@ -1657,3 +1805,344 @@ impl<'b, 'tcx: 'b> IntoValTyPair<'tcx> for &'b mir::Operand<'tcx> {
Ok((value, value_ty))
}
}
/// FIXME: expose trans::monomorphize::resolve_closure
pub fn resolve_closure<'a, 'tcx> (
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
requested_kind: ty::ClosureKind,
) -> ty::Instance<'tcx> {
let actual_kind = tcx.closure_kind(def_id);
match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
_ => ty::Instance::new(def_id, substs.substs)
}
}
fn fn_once_adapter_instance<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_did: DefId,
substs: ty::ClosureSubsts<'tcx>,
) -> ty::Instance<'tcx> {
debug!("fn_once_adapter_shim({:?}, {:?})",
closure_did,
substs);
let fn_once = tcx.lang_items.fn_once_trait().unwrap();
let call_once = tcx.associated_items(fn_once)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
let def = ty::InstanceDef::ClosureOnceShim { call_once };
let self_ty = tcx.mk_closure_from_closure_substs(
closure_did, substs);
let sig = tcx.closure_type(closure_did).subst(tcx, substs.substs);
let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
assert_eq!(sig.inputs().len(), 1);
let substs = tcx.mk_substs([
Kind::from(self_ty),
Kind::from(sig.inputs()[0]),
].iter().cloned());
debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
ty::Instance { def, substs }
}
fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind,
trait_closure_kind: ty::ClosureKind)
-> Result<bool, ()>
{
match (actual_closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
(ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
// No adapter needed.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
// The closure fn `llfn` is a `fn(&self, ...)`. We want a
// `fn(&mut self, ...)`. In fact, at trans time, these are
// basically the same thing, so we can just return llfn.
Ok(false)
}
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
// self, ...)`. We want a `fn(self, ...)`. We can produce
// this by doing something like:
//
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
Ok(true)
}
_ => Err(()),
}
}
/// The point where linking happens. Resolve a (def_id, substs)
/// pair to an instance.
pub fn resolve<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>
) -> ty::Instance<'tcx> {
debug!("resolve(def_id={:?}, substs={:?})",
def_id, substs);
let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
debug!(" => associated item, attempting to find impl");
let item = tcx.associated_item(def_id);
resolve_associated_item(tcx, &item, trait_def_id, substs)
} else {
let item_type = def_ty(tcx, def_id, substs);
let def = match item_type.sty {
ty::TyFnDef(_, _, f) if
f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic =>
{
debug!(" => intrinsic");
ty::InstanceDef::Intrinsic(def_id)
}
_ => {
if Some(def_id) == tcx.lang_items.drop_in_place_fn() {
let ty = substs.type_at(0);
if needs_drop_glue(tcx, ty) {
debug!(" => nontrivial drop glue");
ty::InstanceDef::DropGlue(def_id, Some(ty))
} else {
debug!(" => trivial drop glue");
ty::InstanceDef::DropGlue(def_id, None)
}
} else {
debug!(" => free item");
ty::InstanceDef::Item(def_id)
}
}
};
ty::Instance { def, substs }
};
debug!("resolve(def_id={:?}, substs={:?}) = {}",
def_id, substs, result);
result
}
pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bool {
assert!(t.is_normalized_for_trans());
let t = tcx.erase_regions(&t);
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `true` in some cases where we could have
// returned `false` does not appear unsound. The impact on
// code quality is unknown at this time.)
let env = tcx.empty_parameter_environment();
if !tcx.type_needs_drop_given_env(t, &env) {
return false;
}
match t.sty {
ty::TyAdt(def, _) if def.is_box() => {
let typ = t.boxed_ty();
if !tcx.type_needs_drop_given_env(typ, &env) && type_is_sized(tcx, typ) {
tcx.infer_ctxt((), traits::Reveal::All).enter(|infcx| {
let layout = t.layout(&infcx).unwrap();
if layout.size(&tcx.data_layout).bytes() == 0 {
// `Box<ZeroSizeType>` does not allocate.
false
} else {
true
}
})
} else {
true
}
}
_ => true
}
}
fn resolve_associated_item<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_item: &ty::AssociatedItem,
trait_id: DefId,
rcvr_substs: &'tcx Substs<'tcx>
) -> ty::Instance<'tcx> {
let def_id = trait_item.def_id;
debug!("resolve_associated_item(trait_item={:?}, \
trait_id={:?}, \
rcvr_substs={:?})",
def_id, trait_id, rcvr_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
let vtbl = fulfill_obligation(tcx, DUMMY_SP, ty::Binder(trait_ref));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
match vtbl {
::rustc::traits::VtableImpl(impl_data) => {
let (def_id, substs) = ::rustc::traits::find_associated_item(
tcx, trait_item, rcvr_substs, &impl_data);
let substs = tcx.erase_regions(&substs);
ty::Instance::new(def_id, substs)
}
::rustc::traits::VtableClosure(closure_data) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs,
trait_closure_kind)
}
::rustc::traits::VtableFnPointer(ref data) => {
ty::Instance {
def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
substs: rcvr_substs
}
}
::rustc::traits::VtableObject(ref data) => {
let index = tcx.get_vtable_index_of_object_method(data, def_id);
ty::Instance {
def: ty::InstanceDef::Virtual(def_id, index),
substs: rcvr_substs
}
}
_ => {
bug!("static call to invalid vtable: {:?}", vtbl)
}
}
}
pub fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> Ty<'tcx>
{
let ty = tcx.item_type(def_id);
apply_param_substs(tcx, substs, &ty)
}
/// Monomorphizes a type from the AST by first applying the in-scope
/// substitutions and then normalizing any associated types.
pub fn apply_param_substs<'a, 'tcx, T>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_substs: &Substs<'tcx>,
value: &T)
-> T
where T: ::rustc::infer::TransNormalize<'tcx>
{
debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value);
let substituted = value.subst(tcx, param_substs);
let substituted = tcx.erase_regions(&substituted);
AssociatedTypeNormalizer{ tcx }.fold(&substituted)
}
struct AssociatedTypeNormalizer<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx> AssociatedTypeNormalizer<'a, 'tcx> {
fn fold<T: TypeFoldable<'tcx>>(&mut self, value: &T) -> T {
if !value.has_projection_types() {
value.clone()
} else {
value.fold_with(self)
}
}
}
impl<'a, 'tcx> ::rustc::ty::fold::TypeFolder<'tcx, 'tcx> for AssociatedTypeNormalizer<'a, 'tcx> {
fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
if !ty.has_projection_types() {
ty
} else {
self.tcx.normalize_associated_type(&ty)
}
}
}
fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
// generics are weird, don't run this function on a generic
assert!(!ty.needs_subst());
ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP)
}
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
span: Span,
trait_ref: ty::PolyTraitRef<'tcx>)
-> traits::Vtable<'tcx, ()>
{
// Remove any references to regions; this helps improve caching.
let trait_ref = tcx.erase_regions(&trait_ref);
debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
trait_ref, trait_ref.def_id());
// Do the initial selection for the obligation. This yields the
// shallow result we are looking for -- that is, what specific impl.
tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let mut selcx = traits::SelectionContext::new(&infcx);
let obligation_cause = traits::ObligationCause::misc(span,
ast::DUMMY_NODE_ID);
let obligation = traits::Obligation::new(obligation_cause,
trait_ref.to_poly_trait_predicate());
let selection = match selcx.select(&obligation) {
Ok(Some(selection)) => selection,
Ok(None) => {
// Ambiguity can happen when monomorphizing during trans
// expands to some humongo type that never occurred
// statically -- this humongo type can then overflow,
// leading to an ambiguous result. So report this as an
// overflow bug, since I believe this is the only case
// where ambiguity can result.
debug!("Encountered ambiguity selecting `{:?}` during trans, \
presuming due to overflow",
trait_ref);
tcx.sess.span_fatal(span,
"reached the recursion limit during monomorphization \
(selection ambiguity)");
}
Err(e) => {
span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
e, trait_ref)
}
};
debug!("fulfill_obligation: selection={:?}", selection);
// Currently, we use a fulfillment context to completely resolve
// all nested obligations. This is because they can inform the
// inference of the impl's type parameters.
let mut fulfill_cx = traits::FulfillmentContext::new();
let vtable = selection.map(|predicate| {
debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
fulfill_cx.register_predicate_obligation(&infcx, predicate);
});
let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
debug!("Cache miss: {:?} => {:?}", trait_ref, vtable);
vtable
})
}
pub fn resolve_drop_in_place<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>,
) -> ty::Instance<'tcx>
{
let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem);
let substs = tcx.intern_substs(&[Kind::from(ty)]);
resolve(tcx, def_id, substs)
}

View file

@ -1,11 +1,9 @@
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc::ty::layout::{Size, Align};
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty};
use rustc_data_structures::indexed_vec::Idx;
use error::EvalResult;
use error::{EvalError, EvalResult};
use eval_context::{EvalContext};
use memory::Pointer;
use value::{PrimVal, Value};
@ -42,15 +40,9 @@ pub enum LvalueExtra {
/// Uniquely identifies a specific constant or static.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct GlobalId<'tcx> {
/// For a constant or static, the `DefId` of the item itself.
/// For a promoted global, the `DefId` of the function they belong to.
pub(super) def_id: DefId,
/// For statics and constants this is `Substs::empty()`, so only promoteds and associated
/// constants actually have something useful here. We could special case statics and constants,
/// but that would only require more branching when working with constants, and not bring any
/// real benefits.
pub(super) substs: &'tcx Substs<'tcx>,
/// For a constant or static, the `Instance` of the item itself.
/// For a promoted global, the `Instance` of the function they belong to.
pub(super) instance: ty::Instance<'tcx>,
/// The index for promoted globals within their function's `Mir`.
pub(super) promoted: Option<mir::Promoted>,
@ -116,18 +108,24 @@ impl<'tcx> Global<'tcx> {
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
let ty = self.lvalue_ty(lvalue);
let lvalue = self.eval_lvalue(lvalue)?;
Ok(self.read_lvalue(lvalue))
}
pub fn read_lvalue(&self, lvalue: Lvalue<'tcx>) -> Value {
if ty.is_never() {
return Err(EvalError::Unreachable);
}
match lvalue {
Lvalue::Ptr { ptr, extra } => {
assert_eq!(extra, LvalueExtra::None);
Value::ByRef(ptr)
Ok(Value::ByRef(ptr))
}
Lvalue::Local { frame, local, field } => {
Ok(self.stack[frame].get_local(local, field.map(|(i, _)| i)))
}
Lvalue::Global(cid) => {
Ok(self.globals.get(&cid).expect("global not cached").value)
}
Lvalue::Local { frame, local, field } => self.stack[frame].get_local(local, field.map(|(i, _)| i)),
Lvalue::Global(cid) => self.globals.get(&cid).expect("global not cached").value,
}
}
@ -138,8 +136,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Local(local) => Lvalue::Local { frame: self.stack.len() - 1, local, field: None },
Static(ref static_) => {
let substs = self.tcx.intern_substs(&[]);
Lvalue::Global(GlobalId { def_id: static_.def_id, substs, promoted: None })
let instance = ty::Instance::mono(self.tcx, static_.def_id);
Lvalue::Global(GlobalId { instance, promoted: None })
}
Projection(ref proj) => return self.eval_lvalue_projection(proj),
@ -208,6 +206,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
(Size::from_bytes(field * elem_size), false)
}
FatPointer { .. } => {
let bytes = field_index as u64 * self.memory.pointer_size();
let offset = Size::from_bytes(bytes);
(offset, false)
}
_ => bug!("field access on non-product type: {:?}", base_layout),
};

View file

@ -2,9 +2,7 @@ use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque, BTreeSet};
use std::{fmt, iter, ptr, mem, io};
use rustc::hir::def_id::DefId;
use rustc::ty::{self, PolyFnSig, ClosureSubsts};
use rustc::ty::subst::Substs;
use rustc::ty;
use rustc::ty::layout::{self, TargetDataLayout};
use error::{EvalError, EvalResult};
@ -102,44 +100,6 @@ impl Pointer {
}
}
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
/// Identifies a specific monomorphized function
pub struct FunctionDefinition<'tcx> {
pub def_id: DefId,
pub substs: &'tcx Substs<'tcx>,
pub sig: PolyFnSig<'tcx>,
}
/// Either a concrete function, or a glue function
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum Function<'tcx> {
/// A function or method created by compiling code
Concrete(FunctionDefinition<'tcx>),
/// Glue required to call a regular function through a Fn(Mut|Once) trait object
FnDefAsTraitObject(FunctionDefinition<'tcx>),
/// A drop glue function only needs to know the real type, and then miri can extract
/// that type from a vtable's drop pointer.
/// Instead of storing some drop function, we act as if there are no trait objects, by
/// mapping trait objects to their real types before acting on them.
DropGlue(ty::Ty<'tcx>),
/// Glue required to treat the ptr part of a fat pointer
/// as a function pointer
FnPtrAsTraitObject(PolyFnSig<'tcx>),
/// Glue for Closures
Closure(FunctionDefinition<'tcx>),
/// Glue for noncapturing closures casted to function pointers
NonCaptureClosureAsFnPtr(FunctionDefinition<'tcx>),
}
impl<'tcx> Function<'tcx> {
pub fn expect_drop_glue_real_ty(self) -> EvalResult<'tcx, ty::Ty<'tcx>> {
match self {
Function::DropGlue(real_ty) => Ok(real_ty),
other => Err(EvalError::ExpectedDropGlue(other)),
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Top-level interpreter memory
////////////////////////////////////////////////////////////////////////////////
@ -165,10 +125,10 @@ pub struct Memory<'a, 'tcx> {
/// Function "allocations". They exist solely so pointers have something to point to, and
/// we can figure out what they point to.
functions: HashMap<AllocId, Function<'tcx>>,
functions: HashMap<AllocId, ty::Instance<'tcx>>,
/// Inverse map of `functions` so we don't allocate a new pointer every time we need one
function_alloc_cache: HashMap<Function<'tcx>, AllocId>,
function_alloc_cache: HashMap<ty::Instance<'tcx>, AllocId>,
/// Target machine data layout to emulate.
pub layout: &'a TargetDataLayout,
@ -214,55 +174,15 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
self.alloc_map.iter()
}
pub fn create_closure_ptr(&mut self, def_id: DefId, substs: ClosureSubsts<'tcx>, sig: PolyFnSig<'tcx>) -> Pointer {
self.create_fn_alloc(Function::Closure(FunctionDefinition {
def_id,
substs: substs.substs,
sig,
}))
}
pub fn create_fn_ptr_from_noncapture_closure(&mut self, def_id: DefId, substs: ClosureSubsts<'tcx>, sig: PolyFnSig<'tcx>) -> Pointer {
self.create_fn_alloc(Function::NonCaptureClosureAsFnPtr(FunctionDefinition {
def_id,
substs: substs.substs,
sig,
}))
}
pub fn create_fn_as_trait_glue(&mut self, def_id: DefId, substs: &'tcx Substs, sig: PolyFnSig<'tcx>) -> Pointer {
self.create_fn_alloc(Function::FnDefAsTraitObject(FunctionDefinition {
def_id,
substs,
sig,
}))
}
pub fn create_fn_ptr_as_trait_glue(&mut self, sig: PolyFnSig<'tcx>) -> Pointer {
self.create_fn_alloc(Function::FnPtrAsTraitObject(sig))
}
pub fn create_drop_glue(&mut self, ty: ty::Ty<'tcx>) -> Pointer {
self.create_fn_alloc(Function::DropGlue(ty))
}
pub fn create_fn_ptr(&mut self, def_id: DefId, substs: &'tcx Substs, sig: PolyFnSig<'tcx>) -> Pointer {
self.create_fn_alloc(Function::Concrete(FunctionDefinition {
def_id,
substs,
sig,
}))
}
fn create_fn_alloc(&mut self, def: Function<'tcx>) -> Pointer {
if let Some(&alloc_id) = self.function_alloc_cache.get(&def) {
pub fn create_fn_alloc(&mut self, instance: ty::Instance<'tcx>) -> Pointer {
if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) {
return Pointer::new(alloc_id, 0);
}
let id = self.next_id;
debug!("creating fn ptr: {}", id);
self.next_id.0 += 1;
self.functions.insert(id, def);
self.function_alloc_cache.insert(def, id);
self.functions.insert(id, instance);
self.function_alloc_cache.insert(instance, id);
Pointer::new(id, 0)
}
@ -469,7 +389,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
}
pub fn get_fn(&self, id: AllocId) -> EvalResult<'tcx, Function<'tcx>> {
pub fn get_fn(&self, id: AllocId) -> EvalResult<'tcx, ty::Instance<'tcx>> {
debug!("reading fn ptr: {}", id);
match self.functions.get(&id) {
Some(&fndef) => Ok(fndef),
@ -501,28 +421,8 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
let alloc = match (self.alloc_map.get(&id), self.functions.get(&id)) {
(Some(a), None) => a,
(None, Some(&Function::Concrete(fn_def))) => {
trace!("{} {}", msg, dump_fn_def(fn_def));
continue;
},
(None, Some(&Function::DropGlue(real_ty))) => {
trace!("{} drop glue for {}", msg, real_ty);
continue;
},
(None, Some(&Function::FnDefAsTraitObject(fn_def))) => {
trace!("{} fn as Fn glue for {}", msg, dump_fn_def(fn_def));
continue;
},
(None, Some(&Function::FnPtrAsTraitObject(fn_def))) => {
trace!("{} fn ptr as Fn glue (signature: {:?})", msg, fn_def);
continue;
},
(None, Some(&Function::Closure(fn_def))) => {
trace!("{} closure glue for {}", msg, dump_fn_def(fn_def));
continue;
},
(None, Some(&Function::NonCaptureClosureAsFnPtr(fn_def))) => {
trace!("{} non-capture closure as fn ptr glue for {}", msg, dump_fn_def(fn_def));
(None, Some(instance)) => {
trace!("{} {}", msg, instance);
continue;
},
(None, None) => {
@ -594,11 +494,6 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
}
}
fn dump_fn_def<'tcx>(fn_def: FunctionDefinition<'tcx>) -> String {
let name = ty::tls::with(|tcx| tcx.item_path_str(fn_def.def_id));
format!("function pointer: {}: {}", name, fn_def.sig.skip_binder())
}
/// Byte accessors
impl<'a, 'tcx> Memory<'a, 'tcx> {
fn get_bytes_unchecked(&self, ptr: Pointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {

View file

@ -45,8 +45,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let mut new = Ok(0);
ConstantExtractor {
span: stmt.source_info.span,
substs: self.substs(),
def_id: self.frame().def_id,
instance: self.frame().instance,
ecx: self,
mir: Ref::clone(&mir),
new_constants: &mut new,
@ -63,8 +62,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let mut new = Ok(0);
ConstantExtractor {
span: terminator.source_info.span,
substs: self.substs(),
def_id: self.frame().def_id,
instance: self.frame().instance,
ecx: self,
mir: Ref::clone(&mir),
new_constants: &mut new,
@ -145,8 +143,7 @@ struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b> {
span: Span,
ecx: &'a mut EvalContext<'b, 'tcx>,
mir: MirRef<'tcx>,
def_id: DefId,
substs: &'tcx subst::Substs<'tcx>,
instance: ty::Instance<'tcx>,
new_constants: &'a mut EvalResult<'tcx, u64>,
}
@ -158,26 +155,24 @@ impl<'a, 'b, 'tcx> ConstantExtractor<'a, 'b, 'tcx> {
span: Span,
shared: bool,
) {
let (def_id, substs) = self.ecx.resolve_associated_const(def_id, substs);
let cid = GlobalId { def_id, substs, promoted: None };
let instance = self.ecx.resolve_associated_const(def_id, substs);
let cid = GlobalId { instance, promoted: None };
if self.ecx.globals.contains_key(&cid) {
return;
}
self.try(|this| {
let mir = this.ecx.load_mir(def_id)?;
let mir = this.ecx.load_mir(instance.def)?;
this.ecx.globals.insert(cid, Global::uninitialized(mir.return_ty));
let mutable = !shared || mir.return_ty.type_contents(this.ecx.tcx).interior_unsafe();
let cleanup = StackPopCleanup::MarkStatic(mutable);
let name = ty::tls::with(|tcx| tcx.item_path_str(def_id));
trace!("pushing stack frame for global: {}", name);
this.ecx.push_stack_frame(
def_id,
instance,
span,
mir,
substs,
Lvalue::Global(cid),
cleanup,
Vec::new(),
)
});
}
@ -200,18 +195,11 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx> {
// already computed by rustc
mir::Literal::Value { .. } => {}
mir::Literal::Item { def_id, substs } => {
if let ty::TyFnDef(..) = constant.ty.sty {
// No need to do anything here,
// because the type is the actual function, not the signature of the function.
// Thus we can simply create a zero sized allocation in `evaluate_operand`
} else {
self.global_item(def_id, substs, constant.span, true);
}
self.global_item(def_id, substs, constant.span, true);
},
mir::Literal::Promoted { index } => {
let cid = GlobalId {
def_id: self.def_id,
substs: self.substs,
instance: self.instance,
promoted: Some(index),
};
if self.ecx.globals.contains_key(&cid) {
@ -220,16 +208,14 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx> {
let mir = Ref::clone(&self.mir);
let mir = Ref::map(mir, |mir| &mir.promoted[index]);
self.try(|this| {
let ty = this.ecx.monomorphize(mir.return_ty, this.substs);
let ty = this.ecx.monomorphize(mir.return_ty, this.instance.substs);
this.ecx.globals.insert(cid, Global::uninitialized(ty));
trace!("pushing stack frame for {:?}", index);
this.ecx.push_stack_frame(this.def_id,
this.ecx.push_stack_frame(this.instance,
constant.span,
mir,
this.substs,
Lvalue::Global(cid),
StackPopCleanup::MarkStatic(false),
Vec::new())
StackPopCleanup::MarkStatic(false))
});
}
}

View file

@ -1,224 +1,82 @@
use rustc::hir::def_id::DefId;
use rustc::traits;
use rustc::ty::layout::Layout;
use rustc::ty::subst::{Substs, Kind};
use rustc::ty::{self, Ty};
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::subst::Kind;
use syntax::codemap::Span;
use error::{EvalError, EvalResult};
use eval_context::{EvalContext, monomorphize_field_ty, StackPopCleanup};
use error::EvalResult;
use eval_context::{EvalContext, StackPopCleanup};
use lvalue::{Lvalue, LvalueExtra};
use memory::Pointer;
use value::PrimVal;
use value::Value;
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
/// Creates stack frames for all drop impls. See `drop` for the actual content.
pub fn eval_drop_impls(&mut self, drops: Vec<(DefId, Value, &'tcx Substs<'tcx>)>, span: Span) -> EvalResult<'tcx> {
// add them to the stack in reverse order, because the impl that needs to run the last
// is the one that needs to be at the bottom of the stack
for (drop_def_id, self_arg, substs) in drops.into_iter().rev() {
let mir = self.load_mir(drop_def_id)?;
trace!("substs for drop glue: {:?}", substs);
self.push_stack_frame(
drop_def_id,
span,
mir,
substs,
Lvalue::from_ptr(Pointer::zst_ptr()),
StackPopCleanup::None,
Vec::new(),
)?;
let mut arg_locals = self.frame().mir.args_iter();
let first = arg_locals.next().expect("drop impl has self arg");
assert!(arg_locals.next().is_none(), "drop impl should have only one arg");
let dest = self.eval_lvalue(&mir::Lvalue::Local(first))?;
let ty = self.frame().mir.local_decls[first].ty;
self.write_value(self_arg, dest, ty)?;
}
Ok(())
pub(crate) fn drop_lvalue(&mut self, lval: Lvalue<'tcx>, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> {
trace!("drop_lvalue: {:#?}", lval);
let val = match self.force_allocation(lval)? {
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Ptr(vtable)),
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len as u128)),
Lvalue::Ptr { ptr, extra: LvalueExtra::None } => Value::ByVal(PrimVal::Ptr(ptr)),
_ => bug!("force_allocation broken"),
};
self.drop(val, instance, ty, span)
}
pub(crate) fn drop(&mut self, mut arg: Value, mut instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> {
trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def);
/// push DefIds of drop impls and their argument on the given vector
pub fn drop(
&mut self,
lval: Lvalue<'tcx>,
ty: Ty<'tcx>,
drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>,
) -> EvalResult<'tcx> {
if !self.type_needs_drop(ty) {
debug!("no need to drop {:?}", ty);
if let ty::InstanceDef::DropGlue(_, None) = instance.def {
trace!("nothing to do, aborting");
// we don't actually need to drop anything
return Ok(());
}
trace!("need to drop {:?} at {:?}", ty, lval);
match ty.sty {
// special case `Box` to deallocate the inner allocation
ty::TyAdt(ref def, _) if def.is_box() => {
let contents_ty = ty.boxed_ty();
let val = self.read_lvalue(lval);
// we are going through the read_value path, because that already does all the
// checks for the trait object types. We'd only be repeating ourselves here.
let val = self.follow_by_ref_value(val, ty)?;
trace!("box dealloc on {:?}", val);
match val {
Value::ByRef(_) => bug!("follow_by_ref_value can't result in ByRef"),
Value::ByVal(ptr) => {
assert!(self.type_is_sized(contents_ty));
let contents_ptr = ptr.to_ptr()?;
self.drop(Lvalue::from_ptr(contents_ptr), contents_ty, drop)?;
},
Value::ByValPair(prim_ptr, extra) => {
let ptr = prim_ptr.to_ptr()?;
let extra = match self.tcx.struct_tail(contents_ty).sty {
ty::TyDynamic(..) => LvalueExtra::Vtable(extra.to_ptr()?),
ty::TyStr | ty::TySlice(_) => LvalueExtra::Length(extra.to_u64()?),
_ => bug!("invalid fat pointer type: {}", ty),
};
self.drop(Lvalue::Ptr { ptr, extra }, contents_ty, drop)?;
},
}
// We cannot use Box's destructor, because it is a no-op and only exists to reduce
// the number of hacks required in the compiler around the Box type.
let box_free_fn = self.tcx.lang_items.box_free_fn().expect("no box_free lang item");
let substs = self.tcx.intern_substs(&[Kind::from(contents_ty)]);
// this is somewhat hacky, but hey, there's no representation difference between
// pointers, `Box`es and references, so
// #[lang = "box_free"] unsafe fn box_free<T>(ptr: *mut T)
// is the same as
// fn drop(&mut self) if Self is Box<T>
drop.push((box_free_fn, val, substs));
}
ty::TyAdt(adt_def, substs) => {
// FIXME: some structs are represented as ByValPair
let mut lval = self.force_allocation(lval)?;
let (adt_ptr, extra) = lval.to_ptr_and_extra();
// run drop impl before the fields' drop impls
if let Some(destructor) = adt_def.destructor(self.tcx) {
let trait_ref = ty::Binder(ty::TraitRef {
def_id: self.tcx.lang_items.drop_trait().unwrap(),
substs: self.tcx.mk_substs_trait(ty, &[]),
});
let vtable = match self.fulfill_obligation(trait_ref) {
traits::VtableImpl(data) => data,
_ => bug!("dtor for {:?} is not an impl???", ty)
};
let val = match extra {
LvalueExtra::None => Value::ByVal(PrimVal::Ptr(adt_ptr)),
LvalueExtra::DowncastVariant(_) => bug!("downcast variant in drop"),
LvalueExtra::Length(n) => Value::ByValPair(PrimVal::Ptr(adt_ptr), PrimVal::from_u128(n as u128)),
LvalueExtra::Vtable(vtable) => Value::ByValPair(PrimVal::Ptr(adt_ptr), PrimVal::Ptr(vtable)),
};
drop.push((destructor.did, val, vtable.substs));
}
let layout = self.type_layout(ty)?;
let fields = match *layout {
Layout::Univariant { .. } => &adt_def.struct_variant().fields,
Layout::General { .. } => {
let discr_val = self.read_discriminant_value(adt_ptr, ty)? as u128;
let ptr = self.force_allocation(lval)?.to_ptr();
match adt_def.discriminants(self.tcx).position(|v| discr_val == v.to_u128_unchecked()) {
Some(i) => {
lval = Lvalue::Ptr {
ptr,
extra: LvalueExtra::DowncastVariant(i),
};
&adt_def.variants[i].fields
},
None => return Err(EvalError::InvalidDiscriminant),
}
},
Layout::StructWrappedNullablePointer { .. } |
Layout::RawNullablePointer { .. } => {
let discr = self.read_discriminant_value(adt_ptr, ty)?;
assert_eq!(discr as usize as u128, discr);
&adt_def.variants[discr as usize].fields
},
Layout::CEnum { .. } => return Ok(()),
_ => bug!("{:?} is not an adt layout", layout),
};
let tcx = self.tcx;
self.drop_fields(
fields.iter().map(|field| monomorphize_field_ty(tcx, field, substs)),
lval,
ty,
drop,
)?;
}
ty::TyTuple(fields, _) =>
self.drop_fields(fields.into_iter().cloned(), lval, ty, drop)?,
let mir = match ty.sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = match lval {
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => (ptr, vtable),
_ => bug!("expected an lvalue with a vtable"),
let vtable = match arg {
Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable,
_ => bug!("expected fat ptr, got {:?}", arg),
};
if let Some(real_ty) = self.read_drop_type_from_vtable(vtable)? {
self.drop(Lvalue::from_ptr(ptr), real_ty, drop)?;
match self.read_drop_type_from_vtable(vtable)? {
Some(func) => {
instance = func;
self.load_mir(func.def)?
},
// no drop fn -> bail out
None => return Ok(()),
}
}
ty::TySlice(elem_ty) => {
let (ptr, len) = match lval {
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => (ptr, len),
_ => bug!("expected an lvalue with a length"),
},
ty::TyArray(elem, n) => {
instance.substs = self.tcx.mk_substs([
Kind::from(elem),
].iter().cloned());
let ptr = match arg {
Value::ByVal(PrimVal::Ptr(src_ptr)) => src_ptr,
_ => bug!("expected thin ptr, got {:?}", arg),
};
let size = self.type_size(elem_ty)?.expect("slice element must be sized");
// FIXME: this creates a lot of stack frames if the element type has
// a drop impl
for i in 0..len {
self.drop(Lvalue::from_ptr(ptr.offset(i * size)), elem_ty, drop)?;
}
}
arg = Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(n as u128));
::eval_context::MirRef::clone(&self.seq_drop_glue)
},
ty::TySlice(elem) => {
instance.substs = self.tcx.mk_substs([
Kind::from(elem),
].iter().cloned());
::eval_context::MirRef::clone(&self.seq_drop_glue)
},
_ => self.load_mir(instance.def)?,
};
ty::TyArray(elem_ty, len) => {
let lval = self.force_allocation(lval)?;
let (ptr, extra) = match lval {
Lvalue::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("expected an lvalue with optional extra data"),
};
let size = self.type_size(elem_ty)?.expect("array element cannot be unsized");
// FIXME: this creates a lot of stack frames if the element type has
// a drop impl
for i in 0..(len as u64) {
self.drop(Lvalue::Ptr { ptr: ptr.offset(i * size), extra }, elem_ty, drop)?;
}
}
self.push_stack_frame(
instance,
span,
mir,
Lvalue::from_ptr(Pointer::zst_ptr()),
StackPopCleanup::None,
)?;
ty::TyClosure(def_id, substs) => {
let fields = substs.upvar_tys(def_id, self.tcx);
self.drop_fields(fields, lval, ty, drop)?;
}
_ => bug!(),
}
Ok(())
}
fn drop_fields<I>(
&mut self,
fields: I,
lval: Lvalue<'tcx>,
ty: Ty<'tcx>,
drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>,
) -> EvalResult<'tcx>
where I: Iterator<Item=Ty<'tcx>>,
{
trace!("drop_fields: {:?} of type {}", lval, ty);
for (i, field_ty) in fields.enumerate() {
let field_lval = self.lvalue_field(lval, i, ty, field_ty)?;
self.drop(field_lval, field_ty, drop)?;
}
Ok(())
}
fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
self.tcx.type_needs_drop_given_env(ty, &self.tcx.empty_parameter_environment())
let mut arg_locals = self.frame().mir.args_iter();
assert_eq!(self.frame().mir.arg_count, 1);
let arg_local = arg_locals.next().unwrap();
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
let arg_ty = self.tcx.mk_mut_ptr(ty);
self.write_value(arg, dest, arg_ty)
}
}

View file

@ -1,4 +1,3 @@
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc::ty::layout::{Layout, Size, Align};
use rustc::ty::subst::Substs;
@ -13,8 +12,7 @@ use value::{PrimVal, PrimValKind, Value};
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(super) fn call_intrinsic(
&mut self,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
instance: ty::Instance<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Lvalue<'tcx>,
dest_ty: Ty<'tcx>,
@ -30,8 +28,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let usize = self.tcx.types.usize;
let f32 = self.tcx.types.f32;
let f64 = self.tcx.types.f64;
let substs = instance.substs;
let intrinsic_name = &self.tcx.item_name(def_id).as_str()[..];
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
match intrinsic_name {
"add_with_overflow" =>
self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?,
@ -171,35 +170,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
}
"drop_in_place" => {
let ty = substs.type_at(0);
trace!("drop in place on {}", ty);
let ptr_ty = self.tcx.mk_mut_ptr(ty);
let lvalue = match self.follow_by_ref_value(arg_vals[0], ptr_ty)? {
Value::ByRef(_) => bug!("follow_by_ref_value returned ByRef"),
Value::ByVal(value) => Lvalue::from_ptr(value.to_ptr()?),
Value::ByValPair(ptr, extra) => Lvalue::Ptr {
ptr: ptr.to_ptr()?,
extra: match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => LvalueExtra::Vtable(extra.to_ptr()?),
ty::TyStr | ty::TySlice(_) => LvalueExtra::Length(extra.to_u64()?),
_ => bug!("invalid fat pointer type: {}", ptr_ty),
},
},
};
let mut drops = Vec::new();
self.drop(lvalue, ty, &mut drops)?;
let span = {
let frame = self.frame();
frame.mir[frame.block].terminator().source_info.span
};
// need to change the block before pushing the drop impl stack frames
// we could do this for all intrinsics before evaluating the intrinsics, but if
// the evaluation fails, we should not have moved forward
self.goto_block(target);
return self.eval_drop_impls(drops, span);
}
"sinf32" | "fabsf32" | "cosf32" |
"sqrtf32" | "expf32" | "exp2f32" |
"logf32" | "log10f32" | "log2f32" |

View file

@ -1,9 +1,7 @@
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc::ty::layout::Layout;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty};
use rustc_const_math::ConstInt;
use rustc::ty::layout::Layout;
use syntax::codemap::Span;
use syntax::attr;
use syntax::abi::Abi;
@ -11,12 +9,13 @@ use syntax::abi::Abi;
use error::{EvalError, EvalResult};
use eval_context::{EvalContext, IntegerExt, StackPopCleanup, is_inhabited};
use lvalue::Lvalue;
use memory::{Pointer, FunctionDefinition, Function};
use memory::Pointer;
use value::PrimVal;
use value::Value;
use rustc_data_structures::indexed_vec::Idx;
mod intrinsic;
mod drop;
mod intrinsic;
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(super) fn goto_block(&mut self, target: mir::BasicBlock) {
@ -63,67 +62,50 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
};
let func_ty = self.operand_ty(func);
let fn_def = match func_ty.sty {
ty::TyFnPtr(bare_sig) => {
let bare_sig = self.erase_lifetimes(&bare_sig);
let (fn_def, sig) = match func_ty.sty {
ty::TyFnPtr(sig) => {
let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?;
let fn_def = self.memory.get_fn(fn_ptr.alloc_id)?;
match fn_def {
Function::Concrete(fn_def) => {
// transmuting function pointers in miri is fine as long as the number of
// arguments and the abi don't change.
let sig = self.erase_lifetimes(&fn_def.sig);
if sig.abi != bare_sig.abi ||
sig.variadic != bare_sig.variadic ||
sig.inputs_and_output != bare_sig.inputs_and_output {
return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig));
let instance = self.memory.get_fn(fn_ptr.alloc_id)?;
let instance_ty = instance.def.def_ty(self.tcx);
let instance_ty = self.monomorphize(instance_ty, instance.substs);
match instance_ty.sty {
ty::TyFnDef(_, _, real_sig) => {
let sig = self.erase_lifetimes(&sig);
let real_sig = self.erase_lifetimes(&real_sig);
match instance.def {
// FIXME: this needs checks for weird transmutes
// we need to bail here, because noncapturing closures as fn ptrs fail the checks
ty::InstanceDef::ClosureOnceShim{..} => {}
_ => if sig.abi != real_sig.abi ||
sig.variadic != real_sig.variadic ||
sig.inputs_and_output != real_sig.inputs_and_output {
return Err(EvalError::FunctionPointerTyMismatch(real_sig, sig));
},
}
},
Function::NonCaptureClosureAsFnPtr(fn_def) => {
let sig = self.erase_lifetimes(&fn_def.sig);
assert_eq!(sig.abi, Abi::RustCall);
if sig.variadic != bare_sig.variadic ||
sig.inputs().len() != 1 {
return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig));
}
if let ty::TyTuple(fields, _) = sig.inputs()[0].sty {
if **fields != *bare_sig.inputs() {
return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig));
}
} else {
return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig));
}
},
other => return Err(EvalError::ExpectedConcreteFunction(other)),
ref other => bug!("instance def ty: {:?}", other),
}
self.memory.get_fn(fn_ptr.alloc_id)?
(instance, sig)
},
ty::TyFnDef(def_id, substs, fn_ty) => Function::Concrete(FunctionDefinition {
def_id,
substs,
sig: fn_ty,
}),
ty::TyFnDef(def_id, substs, sig) => (::eval_context::resolve(self.tcx, def_id, substs), sig),
_ => {
let msg = format!("can't handle callee of type {:?}", func_ty);
return Err(EvalError::Unimplemented(msg));
}
};
self.eval_fn_call(fn_def, destination, args, terminator.source_info.span)?;
let sig = self.erase_lifetimes(&sig);
self.eval_fn_call(fn_def, destination, args, terminator.source_info.span, sig)?;
}
Drop { ref location, target, .. } => {
trace!("TerminatorKind::drop: {:?}, {:?}", location, self.substs());
let lval = self.eval_lvalue(location)?;
let ty = self.lvalue_ty(location);
// we can't generate the drop stack frames on the fly,
// because that would change our call stack
// and very much confuse the further processing of the drop glue
let mut drops = Vec::new();
self.drop(lval, ty, &mut drops)?;
self.goto_block(target);
self.eval_drop_impls(drops, terminator.source_info.span)?;
let ty = ::eval_context::apply_param_substs(self.tcx, self.substs(), &ty);
let instance = ::eval_context::resolve_drop_in_place(self.tcx, ty);
self.drop_lvalue(lval, instance, ty, terminator.source_info.span)?;
}
Assert { ref cond, expected, ref msg, target, .. } => {
@ -158,162 +140,241 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
fn eval_fn_call(
&mut self,
fn_def: Function<'tcx>,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
arg_operands: &[mir::Operand<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx> {
use syntax::abi::Abi;
match fn_def {
// Intrinsics can only be addressed directly
Function::Concrete(FunctionDefinition { def_id, substs, sig }) if sig.abi() == Abi::RustIntrinsic => {
let sig = self.erase_lifetimes(&sig);
let ty = sig.output();
let layout = self.type_layout(ty)?;
trace!("eval_fn_call: {:#?}", instance);
match instance.def {
ty::InstanceDef::Intrinsic(..) => {
let (ret, target) = match destination {
Some(dest) if is_inhabited(self.tcx, ty) => dest,
Some(dest) => dest,
_ => return Err(EvalError::Unreachable),
};
self.call_intrinsic(def_id, substs, arg_operands, ret, ty, layout, target)?;
self.dump_local(ret);
Ok(())
},
// C functions can only be addressed directly
Function::Concrete(FunctionDefinition { def_id, sig, ..}) if sig.abi() == Abi::C => {
let sig = self.erase_lifetimes(&sig);
let ty = sig.output();
let (ret, target) = destination.unwrap();
self.call_c_abi(def_id, arg_operands, ret, ty)?;
if !is_inhabited(self.tcx, ty) {
return Err(EvalError::Unreachable);
}
let layout = self.type_layout(ty)?;
self.call_intrinsic(instance, arg_operands, ret, ty, layout, target)?;
self.dump_local(ret);
self.goto_block(target);
Ok(())
},
Function::DropGlue(_) => Err(EvalError::ManuallyCalledDropGlue),
Function::Concrete(FunctionDefinition { def_id, sig, substs }) if sig.abi() == Abi::Rust || sig.abi() == Abi::RustCall => {
ty::InstanceDef::ClosureOnceShim{..} => {
let mut args = Vec::new();
for arg in arg_operands {
let arg_val = self.eval_operand(arg)?;
let arg_ty = self.operand_ty(arg);
args.push((arg_val, arg_ty));
}
// Only trait methods can have a Self parameter.
let (resolved_def_id, resolved_substs, temporaries) =
if let Some(trait_id) = self.tcx.trait_of_item(def_id) {
self.trait_method(trait_id, def_id, substs, &mut args)?
} else {
(def_id, substs, Vec::new())
};
// FIXME(eddyb) Detect ADT constructors more efficiently.
if let Some(adt_def) = sig.output().skip_binder().ty_adt_def() {
let dids = adt_def.variants.iter().map(|v| v.did);
let discrs = adt_def.discriminants(self.tcx).map(ConstInt::to_u128_unchecked);
if let Some((_, disr_val)) = dids.zip(discrs).find(|&(did, _)| resolved_def_id == did) {
let (lvalue, target) = destination.expect("tuple struct constructors can't diverge");
let dest_ty = self.tcx.item_type(adt_def.did);
let dest_layout = self.type_layout(dest_ty)?;
trace!("layout({:?}) = {:#?}", dest_ty, dest_layout);
match *dest_layout {
Layout::Univariant { .. } => {
assert_eq!(disr_val, 0);
self.assign_fields(lvalue, dest_ty, args)?;
},
Layout::General { discr, ref variants, .. } => {
let discr_size = discr.size().bytes();
self.assign_discr_and_fields(
lvalue,
dest_ty,
variants[disr_val as usize].offsets[0].bytes(),
args,
disr_val,
disr_val as usize,
discr_size,
)?;
},
Layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
if nndiscr as u128 == disr_val {
self.assign_fields(lvalue, dest_ty, args)?;
} else {
for (_, ty) in args {
assert_eq!(self.type_size(ty)?, Some(0));
}
let (offset, ty) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
// FIXME(solson)
let dest = self.force_allocation(lvalue)?.to_ptr();
let dest = dest.offset(offset.bytes());
let dest_size = self.type_size(ty)?
.expect("bad StructWrappedNullablePointer discrfield");
self.memory.write_int(dest, 0, dest_size)?;
}
},
Layout::RawNullablePointer { .. } => {
assert_eq!(args.len(), 1);
let (val, ty) = args.pop().unwrap();
self.write_value(val, lvalue, ty)?;
},
_ => bug!("bad layout for tuple struct constructor: {:?}", dest_layout),
if self.eval_fn_call_inner(
instance,
destination,
span,
)? {
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
match sig.abi {
// closure as closure once
Abi::RustCall => {
for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) {
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
self.write_value(arg_val, dest, arg_ty)?;
}
},
// non capture closure as fn ptr
// need to inject zst ptr for closure object (aka do nothing)
// and need to pack arguments
Abi::Rust => {
trace!("arg_locals: {:?}", self.frame().mir.args_iter().collect::<Vec<_>>());
trace!("arg_operands: {:?}", arg_operands);
let local = arg_locals.nth(1).unwrap();
for (i, (arg_val, arg_ty)) in args.into_iter().enumerate() {
let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field(mir::Field::new(i), arg_ty))?;
self.write_value(arg_val, dest, arg_ty)?;
}
},
_ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
}
Ok(())
}
ty::InstanceDef::Item(_) => {
match sig.abi {
Abi::C => {
let ty = sig.output();
let (ret, target) = destination.unwrap();
self.call_c_abi(instance.def_id(), arg_operands, ret, ty)?;
self.dump_local(ret);
self.goto_block(target);
return Ok(());
}
},
Abi::Rust | Abi::RustCall => {},
_ => unimplemented!(),
}
self.eval_fn_call_inner(
resolved_def_id,
resolved_substs,
destination,
args,
temporaries,
span,
)
},
Function::NonCaptureClosureAsFnPtr(FunctionDefinition { def_id, substs, sig }) if sig.abi() == Abi::RustCall => {
let sig = self.erase_lifetimes(&sig);
let mut args = Vec::new();
for arg in arg_operands {
let arg_val = self.eval_operand(arg)?;
let arg_ty = self.operand_ty(arg);
args.push((arg_val, arg_ty));
}
args.insert(0, (
Value::ByVal(PrimVal::Undef),
sig.inputs()[0],
));
self.eval_fn_call_inner(
def_id,
substs,
if self.eval_fn_call_inner(
instance,
destination,
args,
Vec::new(),
span,
)? {
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
trace!("ABI: {:?}", sig.abi);
trace!("arg_locals: {:?}", self.frame().mir.args_iter().collect::<Vec<_>>());
trace!("arg_operands: {:?}", arg_operands);
match sig.abi {
Abi::Rust => {
for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) {
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
self.write_value(arg_val, dest, arg_ty)?;
}
}
Abi::RustCall => {
assert_eq!(args.len(), 2);
{ // write first argument
let first_local = arg_locals.next().unwrap();
let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?;
let (arg_val, arg_ty) = args.remove(0);
self.write_value(arg_val, dest, arg_ty)?;
}
// unpack and write all other args
let (arg_val, arg_ty) = args.remove(0);
let layout = self.type_layout(arg_ty)?;
if let (&ty::TyTuple(fields, _), &Layout::Univariant { ref variant, .. }) = (&arg_ty.sty, layout) {
trace!("fields: {:?}", fields);
if self.frame().mir.args_iter().count() == fields.len() + 1 {
let offsets = variant.offsets.iter().map(|s| s.bytes());
match arg_val {
Value::ByRef(ptr) => {
for ((offset, ty), arg_local) in offsets.zip(fields).zip(arg_locals) {
let arg = Value::ByRef(ptr.offset(offset));
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
trace!("writing arg {:?} to {:?} (type: {})", arg, dest, ty);
self.write_value(arg, dest, ty)?;
}
},
Value::ByVal(PrimVal::Undef) => {},
other => {
assert_eq!(fields.len(), 1);
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_locals.next().unwrap()))?;
self.write_value(other, dest, fields[0])?;
}
}
} else {
trace!("manual impl of rust-call ABI");
// called a manual impl of a rust-call function
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_locals.next().unwrap()))?;
self.write_value(arg_val, dest, arg_ty)?;
}
} else {
bug!("rust-call ABI tuple argument was {:?}, {:?}", arg_ty, layout);
}
}
_ => unimplemented!(),
}
Ok(())
},
ty::InstanceDef::DropGlue(..) => {
assert_eq!(arg_operands.len(), 1);
assert_eq!(sig.abi, Abi::Rust);
let val = self.eval_operand(&arg_operands[0])?;
let ty = self.operand_ty(&arg_operands[0]);
let (_, target) = destination.expect("diverging drop glue");
self.goto_block(target);
// FIXME: deduplicate these matches
let pointee_type = match ty.sty {
ty::TyRawPtr(ref tam) |
ty::TyRef(_, ref tam) => tam.ty,
ty::TyAdt(ref def, _) if def.is_box() => ty.boxed_ty(),
_ => bug!("can only deref pointer types"),
};
self.drop(val, instance, pointee_type, span)
},
ty::InstanceDef::FnPtrShim(..) => {
trace!("ABI: {}", sig.abi);
let mut args = Vec::new();
for arg in arg_operands {
let arg_val = self.eval_operand(arg)?;
let arg_ty = self.operand_ty(arg);
args.push((arg_val, arg_ty));
}
if self.eval_fn_call_inner(
instance,
destination,
span,
)? {
return Ok(());
}
let arg_locals = self.frame().mir.args_iter();
match sig.abi {
Abi::Rust => {
args.remove(0);
},
Abi::RustCall => {},
_ => unimplemented!(),
};
for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) {
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
self.write_value(arg_val, dest, arg_ty)?;
}
Ok(())
},
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let (_, vtable) = self.eval_operand(&arg_operands[0])?.expect_ptr_vtable_pair(&self.memory)?;
let fn_ptr = self.memory.read_ptr(vtable.offset(ptr_size * (idx as u64 + 3)))?;
let instance = self.memory.get_fn(fn_ptr.alloc_id)?;
let mut arg_operands = arg_operands.to_vec();
let ty = self.operand_ty(&arg_operands[0]);
let ty = self.get_field_ty(ty, 0)?;
match arg_operands[0] {
mir::Operand::Consume(ref mut lval) => *lval = lval.clone().field(mir::Field::new(0), ty),
_ => bug!("virtual call first arg cannot be a constant"),
}
// recurse with concrete function
self.eval_fn_call(
instance,
destination,
&arg_operands,
span,
sig,
)
}
Function::Concrete(fn_def) => Err(EvalError::Unimplemented(format!("can't handle function with {:?} ABI", fn_def.sig.abi()))),
other => Err(EvalError::Unimplemented(format!("can't call function kind {:#?}", other))),
},
}
}
/// Returns Ok(true) when the function was handled completely due to mir not being available
fn eval_fn_call_inner(
&mut self,
resolved_def_id: DefId,
resolved_substs: &'tcx Substs,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>,
args: Vec<(Value, Ty<'tcx>)>,
temporaries: Vec<(Pointer, Ty<'tcx>)>,
span: Span,
) -> EvalResult<'tcx> {
trace!("eval_fn_call_inner: {:#?}, {:#?}, {:#?}", args, temporaries, destination);
) -> EvalResult<'tcx, bool> {
trace!("eval_fn_call_inner: {:#?}, {:#?}", instance, destination);
let mir = match self.load_mir(resolved_def_id) {
// Only trait methods can have a Self parameter.
let mir = match self.load_mir(instance.def) {
Ok(mir) => mir,
Err(EvalError::NoMirFor(path)) => {
match &path[..] {
// let's just ignore all output for now
"std::io::_print" => {
self.goto_block(destination.unwrap().1);
return Ok(());
return Ok(true);
},
"std::thread::Builder::new" => return Err(EvalError::Unimplemented("miri does not support threading".to_owned())),
"std::env::args" => return Err(EvalError::Unimplemented("miri does not support program arguments".to_owned())),
@ -326,7 +387,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let bool = self.tcx.types.bool;
self.write_primval(lval, PrimVal::from_bool(false), bool)?;
self.goto_block(block);
return Ok(());
return Ok(true);
}
_ => {},
}
@ -344,23 +405,14 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
};
self.push_stack_frame(
resolved_def_id,
instance,
span,
mir,
resolved_substs,
return_lvalue,
return_to_block,
temporaries,
)?;
let arg_locals = self.frame().mir.args_iter();
assert_eq!(self.frame().mir.arg_count, args.len());
for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) {
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
self.write_value(arg_val, dest, arg_ty)?;
}
Ok(())
Ok(false)
}
pub fn read_discriminant_value(&self, adt_ptr: Pointer, adt_ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
@ -529,34 +581,5 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// as if the call just completed and it's returning to the
// current frame.
Ok(())
}
pub(crate) fn unpack_fn_args(&self, args: &mut Vec<(Value, Ty<'tcx>)>) -> EvalResult<'tcx> {
if let Some((last, last_ty)) = args.pop() {
let last_layout = self.type_layout(last_ty)?;
match (&last_ty.sty, last_layout) {
(&ty::TyTuple(fields, _),
&Layout::Univariant { ref variant, .. }) => {
let offsets = variant.offsets.iter().map(|s| s.bytes());
match last {
Value::ByRef(last_ptr) => {
for (offset, ty) in offsets.zip(fields) {
let arg = Value::ByRef(last_ptr.offset(offset));
args.push((arg, ty));
}
},
// propagate undefs
undef @ Value::ByVal(PrimVal::Undef) => {
for field_ty in fields {
args.push((undef, field_ty));
}
},
_ => bug!("rust-call ABI tuple argument was {:?}, but {:?} were expected", last, fields),
}
}
ty => bug!("expected tuple as last argument in function with 'rust-call' ABI, got {:?}", ty),
}
}
Ok(())
}
}
}

View file

@ -1,178 +1,17 @@
use rustc::traits::{self, Reveal, SelectionContext};
use rustc::traits::{self, Reveal};
use eval_context::EvalContext;
use memory::Pointer;
use rustc::hir::def_id::DefId;
use rustc::ty::fold::TypeFoldable;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::{self, Ty};
use syntax::codemap::DUMMY_SP;
use syntax::{ast, abi};
use syntax::ast;
use error::{EvalError, EvalResult};
use memory::Function;
use value::PrimVal;
use value::Value;
use error::EvalResult;
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
/// Trait method, which has to be resolved to an impl method.
pub(crate) fn trait_method(
&mut self,
trait_id: DefId,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
args: &mut Vec<(Value, Ty<'tcx>)>,
) -> EvalResult<'tcx, (DefId, &'tcx Substs<'tcx>, Vec<(Pointer, Ty<'tcx>)>)> {
let trait_ref = ty::TraitRef::from_method(self.tcx, trait_id, substs);
let trait_ref = self.tcx.normalize_associated_type(&ty::Binder(trait_ref));
match self.fulfill_obligation(trait_ref) {
traits::VtableImpl(vtable_impl) => {
let impl_did = vtable_impl.impl_def_id;
let mname = self.tcx.item_name(def_id);
// Create a concatenated set of substitutions which includes those from the impl
// and those from the method:
let (did, substs) = find_method(self.tcx, substs, impl_did, vtable_impl.substs, mname);
Ok((did, substs, Vec::new()))
}
traits::VtableClosure(vtable_closure) => {
let trait_closure_kind = self.tcx
.lang_items
.fn_trait_kind(trait_id)
.expect("The substitutions should have no type parameters remaining after passing through fulfill_obligation");
let closure_kind = self.tcx.closure_kind(vtable_closure.closure_def_id);
trace!("closures {:?}, {:?}", closure_kind, trait_closure_kind);
self.unpack_fn_args(args)?;
let mut temporaries = Vec::new();
match (closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
(ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {} // No adapter needed.
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// The closure fn is a `fn(&self, ...)` or `fn(&mut self, ...)`.
// We want a `fn(self, ...)`.
// We can produce this by doing something like:
//
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
// Interpreter magic: insert an intermediate pointer, so we can skip the
// intermediate function call.
let ptr = match args[0].0 {
Value::ByRef(ptr) => ptr,
Value::ByVal(primval) => {
let ptr = self.alloc_ptr(args[0].1)?;
let size = self.type_size(args[0].1)?.expect("closures are sized");
self.memory.write_primval(ptr, primval, size)?;
ptr
},
Value::ByValPair(a, b) => {
let ptr = self.alloc_ptr(args[0].1)?;
self.write_pair_to_ptr(a, b, ptr, args[0].1)?;
ptr
},
};
temporaries.push((ptr, args[0].1));
args[0].0 = Value::ByVal(PrimVal::Ptr(ptr));
args[0].1 = self.tcx.mk_mut_ptr(args[0].1);
}
_ => bug!("cannot convert {:?} to {:?}", closure_kind, trait_closure_kind),
}
Ok((vtable_closure.closure_def_id, vtable_closure.substs.substs, temporaries))
}
traits::VtableFnPointer(vtable_fn_ptr) => {
if let ty::TyFnDef(did, substs, _) = vtable_fn_ptr.fn_ty.sty {
args.remove(0);
self.unpack_fn_args(args)?;
Ok((did, substs, Vec::new()))
} else {
bug!("VtableFnPointer did not contain a concrete function: {:?}", vtable_fn_ptr)
}
}
traits::VtableObject(ref data) => {
let idx = self.tcx.get_vtable_index_of_object_method(data, def_id) as u64;
if args.is_empty() {
return Err(EvalError::VtableForArgumentlessMethod);
}
let (self_ptr, vtable) = args[0].0.expect_ptr_vtable_pair(&self.memory)?;
let idx = idx + 3;
let offset = idx * self.memory.pointer_size();
let fn_ptr = self.memory.read_ptr(vtable.offset(offset))?;
trace!("args: {:#?}", args);
match self.memory.get_fn(fn_ptr.alloc_id)? {
Function::FnDefAsTraitObject(fn_def) => {
trace!("sig: {:#?}", fn_def.sig);
assert!(fn_def.sig.abi() != abi::Abi::RustCall);
assert_eq!(args.len(), 2);
// a function item turned into a closure trait object
// the first arg is just there to give use the vtable
args.remove(0);
self.unpack_fn_args(args)?;
Ok((fn_def.def_id, fn_def.substs, Vec::new()))
},
Function::DropGlue(_) => Err(EvalError::ManuallyCalledDropGlue),
Function::Concrete(fn_def) => {
let sig = self.erase_lifetimes(&fn_def.sig);
trace!("sig: {:#?}", sig);
args[0] = (
Value::ByVal(PrimVal::Ptr(self_ptr)),
sig.inputs()[0],
);
Ok((fn_def.def_id, fn_def.substs, Vec::new()))
},
Function::NonCaptureClosureAsFnPtr(fn_def) => {
let sig = self.erase_lifetimes(&fn_def.sig);
args.insert(0, (
Value::ByVal(PrimVal::Undef),
sig.inputs()[0],
));
Ok((fn_def.def_id, fn_def.substs, Vec::new()))
}
Function::Closure(fn_def) => {
self.unpack_fn_args(args)?;
Ok((fn_def.def_id, fn_def.substs, Vec::new()))
}
Function::FnPtrAsTraitObject(sig) => {
let sig = self.erase_lifetimes(&sig);
trace!("sig: {:#?}", sig);
// the first argument was the fat ptr
args.remove(0);
self.unpack_fn_args(args)?;
let fn_ptr = self.memory.read_ptr(self_ptr)?;
let fn_def = match self.memory.get_fn(fn_ptr.alloc_id)? {
Function::Concrete(fn_def) => {
let fn_def_sig = self.erase_lifetimes(&fn_def.sig);
assert_eq!(sig, fn_def_sig);
fn_def
},
Function::NonCaptureClosureAsFnPtr(fn_def) => {
let fn_def_sig = self.erase_lifetimes(&fn_def.sig);
args.insert(0, (
Value::ByVal(PrimVal::Undef),
fn_def_sig.inputs()[0],
));
fn_def
},
other => bug!("FnPtrAsTraitObject for {:?}", other),
};
Ok((fn_def.def_id, fn_def.substs, Vec::new()))
}
}
},
vtable => bug!("resolved vtable bad vtable {:?} in trans", vtable),
}
}
pub(crate) fn fulfill_obligation(&self, trait_ref: ty::PolyTraitRef<'tcx>) -> traits::Vtable<'tcx, ()> {
// Do the initial selection for the obligation. This yields the shallow result we are
@ -202,106 +41,28 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
/// The `trait_ref` encodes the erased self type. Hence if we are
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
/// `trait_ref` would map `T:Trait`.
pub fn get_vtable(&mut self, trait_ref: ty::PolyTraitRef<'tcx>) -> EvalResult<'tcx, Pointer> {
let tcx = self.tcx;
pub fn get_vtable(&mut self, ty: Ty<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) -> EvalResult<'tcx, Pointer> {
debug!("get_vtable(trait_ref={:?})", trait_ref);
let methods: Vec<_> = traits::supertraits(tcx, trait_ref).flat_map(|trait_ref| {
match self.fulfill_obligation(trait_ref) {
// Should default trait error here?
traits::VtableDefaultImpl(_) |
traits::VtableBuiltin(_) => {
Vec::new().into_iter()
}
traits::VtableImpl(traits::VtableImplData { impl_def_id: id, substs, .. }) => {
self.get_vtable_methods(id, substs)
.into_iter()
.map(|opt_mth| opt_mth.map(|mth| {
let fn_ty = self.tcx.item_type(mth.method.def_id);
let fn_ty = match fn_ty.sty {
ty::TyFnDef(_, _, fn_ty) => fn_ty,
_ => bug!("bad function type: {}", fn_ty),
};
let fn_ty = self.tcx.erase_regions(&fn_ty);
self.memory.create_fn_ptr(mth.method.def_id, mth.substs, fn_ty)
}))
.collect::<Vec<_>>()
.into_iter()
}
traits::VtableClosure(
traits::VtableClosureData {
closure_def_id,
substs,
..
}
) => {
let closure_type = self.tcx.closure_type(closure_def_id);
vec![Some(self.memory.create_closure_ptr(closure_def_id, substs, closure_type))].into_iter()
}
// turn a function definition into a Fn trait object
traits::VtableFnPointer(traits::VtableFnPointerData { fn_ty, .. }) => {
match fn_ty.sty {
ty::TyFnDef(did, substs, bare_fn_ty) => {
vec![Some(self.memory.create_fn_as_trait_glue(did, substs, bare_fn_ty))].into_iter()
},
ty::TyFnPtr(bare_fn_ty) => {
vec![Some(self.memory.create_fn_ptr_as_trait_glue(bare_fn_ty))].into_iter()
},
_ => bug!("bad VtableFnPointer fn_ty: {:#?}", fn_ty.sty),
}
}
traits::VtableObject(ref data) => {
// this would imply that the Self type being erased is
// an object type; this cannot happen because we
// cannot cast an unsized type into a trait object
bug!("cannot get vtable for an object type: {:?}",
data);
}
vtable @ traits::VtableParam(..) => {
bug!("resolved vtable for {:?} to bad vtable {:?} in trans",
trait_ref,
vtable);
}
}
}).collect();
let size = self.type_size(trait_ref.self_ty())?.expect("can't create a vtable for an unsized type");
let align = self.type_align(trait_ref.self_ty())?;
let ptr_size = self.memory.pointer_size();
let vtable = self.memory.allocate(ptr_size * (3 + methods.len() as u64), ptr_size)?;
let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref);
let vtable = self.memory.allocate(ptr_size * (3 + methods.count() as u64), ptr_size)?;
// in case there is no drop function to be called, this still needs to be initialized
self.memory.write_usize(vtable, 0)?;
if let ty::TyAdt(adt_def, substs) = trait_ref.self_ty().sty {
if let Some(destructor) = adt_def.destructor(self.tcx) {
let fn_ty = match self.tcx.item_type(destructor.did).sty {
ty::TyFnDef(_, _, fn_ty) => self.tcx.erase_regions(&fn_ty),
_ => bug!("drop method is not a TyFnDef"),
};
let fn_ty = self.erase_lifetimes(&fn_ty);
// The real type is taken from the self argument in `fn drop(&mut self)`
let real_ty = match fn_ty.inputs()[0].sty {
ty::TyRef(_, mt) => self.monomorphize(mt.ty, substs),
_ => bug!("first argument of Drop::drop must be &mut T"),
};
let fn_ptr = self.memory.create_drop_glue(real_ty);
self.memory.write_ptr(vtable, fn_ptr)?;
}
}
let drop = ::eval_context::resolve_drop_in_place(self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop);
self.memory.write_ptr(vtable, drop)?;
self.memory.write_usize(vtable.offset(ptr_size), size)?;
self.memory.write_usize(vtable.offset(ptr_size * 2), align)?;
for (i, method) in methods.into_iter().enumerate() {
if let Some(method) = method {
self.memory.write_ptr(vtable.offset(ptr_size * (3 + i as u64)), method)?;
for (i, method) in ::rustc::traits::get_vtable_methods(self.tcx, trait_ref).enumerate() {
if let Some((def_id, substs)) = method {
let instance = ::eval_context::resolve(self.tcx, def_id, substs);
let fn_ptr = self.memory.create_fn_alloc(instance);
self.memory.write_ptr(vtable.offset(ptr_size * (3 + i as u64)), fn_ptr)?;
}
}
@ -310,7 +71,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(vtable)
}
pub fn read_drop_type_from_vtable(&self, vtable: Pointer) -> EvalResult<'tcx, Option<Ty<'tcx>>> {
pub fn read_drop_type_from_vtable(&self, vtable: Pointer) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
let drop_fn = self.memory.read_ptr(vtable)?;
// just a sanity check
@ -320,7 +81,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
if drop_fn == Pointer::from_int(0) {
Ok(None)
} else {
self.memory.get_fn(drop_fn.alloc_id)?.expect_drop_glue_real_ty().map(Some)
self.memory.get_fn(drop_fn.alloc_id).map(Some)
}
}
@ -331,99 +92,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok((size, align))
}
fn get_vtable_methods(&mut self, impl_id: DefId, substs: &'tcx Substs<'tcx>) -> Vec<Option<ImplMethod<'tcx>>> {
debug!("get_vtable_methods(impl_id={:?}, substs={:?}", impl_id, substs);
let trait_id = match self.tcx.impl_trait_ref(impl_id) {
Some(t_id) => t_id.def_id,
None => bug!("make_impl_vtable: don't know how to \
make a vtable for a type impl!")
};
self.tcx.populate_implementations_for_trait_if_necessary(trait_id);
self.tcx
.associated_items(trait_id)
// Filter out non-method items.
.filter_map(|trait_method_type| {
if trait_method_type.kind != ty::AssociatedKind::Method {
return None;
}
debug!("get_vtable_methods: trait_method_type={:?}",
trait_method_type);
let name = trait_method_type.name;
// Some methods cannot be called on an object; skip those.
if !self.tcx.is_vtable_safe_method(trait_id, &trait_method_type) {
debug!("get_vtable_methods: not vtable safe");
return Some(None);
}
debug!("get_vtable_methods: trait_method_type={:?}",
trait_method_type);
// the method may have some early-bound lifetimes, add
// regions for those
let method_substs = Substs::for_item(self.tcx, trait_method_type.def_id,
|_, _| self.tcx.mk_region(ty::ReErased),
|_, _| self.tcx.types.err);
// The substitutions we have are on the impl, so we grab
// the method type from the impl to substitute into.
let mth = get_impl_method(self.tcx, method_substs, impl_id, substs, name);
debug!("get_vtable_methods: mth={:?}", mth);
// If this is a default method, it's possible that it
// relies on where clauses that do not hold for this
// particular set of type parameters. Note that this
// method could then never be called, so we do not want to
// try and trans it, in that case. Issue #23435.
if mth.is_provided {
let predicates = self.tcx.item_predicates(trait_method_type.def_id).instantiate_own(self.tcx, mth.substs);
if !self.normalize_and_test_predicates(predicates.predicates) {
debug!("get_vtable_methods: predicates do not hold");
return Some(None);
}
}
Some(Some(mth))
})
.collect()
}
/// Normalizes the predicates and checks whether they hold. If this
/// returns false, then either normalize encountered an error or one
/// of the predicates did not hold. Used when creating vtables to
/// check for unsatisfiable methods.
fn normalize_and_test_predicates(&mut self, predicates: Vec<ty::Predicate<'tcx>>) -> bool {
debug!("normalize_and_test_predicates(predicates={:?})",
predicates);
self.tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let mut selcx = SelectionContext::new(&infcx);
let mut fulfill_cx = traits::FulfillmentContext::new();
let cause = traits::ObligationCause::dummy();
let traits::Normalized { value: predicates, obligations } =
traits::normalize(&mut selcx, cause.clone(), &predicates);
for obligation in obligations {
fulfill_cx.register_predicate_obligation(&infcx, obligation);
}
for predicate in predicates {
let obligation = traits::Obligation::new(cause.clone(), predicate);
fulfill_cx.register_predicate_obligation(&infcx, obligation);
}
fulfill_cx.select_all_or_error(&infcx).is_ok()
})
}
pub(crate) fn resolve_associated_const(
&self,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
) -> (DefId, &'tcx Substs<'tcx>) {
) -> ty::Instance<'tcx> {
if let Some(trait_id) = self.tcx.trait_of_item(def_id) {
let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, substs));
let vtable = self.fulfill_obligation(trait_ref);
@ -432,86 +105,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id)
.find(|item| item.kind == ty::AssociatedKind::Const && item.name == name);
if let Some(assoc_const) = assoc_const_opt {
return (assoc_const.def_id, vtable_impl.substs);
return ty::Instance::new(assoc_const.def_id, vtable_impl.substs);
}
}
}
(def_id, substs)
}
}
#[derive(Debug)]
pub(super) struct ImplMethod<'tcx> {
pub(super) method: ty::AssociatedItem,
pub(super) substs: &'tcx Substs<'tcx>,
pub(super) is_provided: bool,
}
/// Locates the applicable definition of a method, given its name.
pub(super) fn get_impl_method<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
substs: &'tcx Substs<'tcx>,
impl_def_id: DefId,
impl_substs: &'tcx Substs<'tcx>,
name: ast::Name,
) -> ImplMethod<'tcx> {
assert!(!substs.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
let trait_def = tcx.lookup_trait_def(trait_def_id);
match trait_def.ancestors(impl_def_id).defs(tcx, name, ty::AssociatedKind::Method).next() {
Some(node_item) => {
let substs = tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let substs = substs.rebase_onto(tcx, trait_def_id, impl_substs);
let substs = traits::translate_substs(&infcx, impl_def_id,
substs, node_item.node);
tcx.lift(&substs).unwrap_or_else(|| {
bug!("trans::meth::get_impl_method: translate_substs \
returned {:?} which contains inference types/regions",
substs);
})
});
ImplMethod {
method: node_item.item,
substs,
is_provided: node_item.node.is_from_trait(),
}
}
None => {
bug!("method {:?} not found in {:?}", name, impl_def_id)
}
}
}
/// Locates the applicable definition of a method, given its name.
pub fn find_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
substs: &'tcx Substs<'tcx>,
impl_def_id: DefId,
impl_substs: &'tcx Substs<'tcx>,
name: ast::Name)
-> (DefId, &'tcx Substs<'tcx>)
{
assert!(!substs.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
let trait_def = tcx.lookup_trait_def(trait_def_id);
match trait_def.ancestors(impl_def_id).defs(tcx, name, ty::AssociatedKind::Method).next() {
Some(node_item) => {
let substs = tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let substs = substs.rebase_onto(tcx, trait_def_id, impl_substs);
let substs = traits::translate_substs(&infcx, impl_def_id, substs, node_item.node);
tcx.lift(&substs).unwrap_or_else(|| {
bug!("find_method: translate_substs \
returned {:?} which contains inference types/regions",
substs);
})
});
(node_item.item.def_id, substs)
}
None => {
bug!("method {:?} not found in {:?}", name, impl_def_id)
}
ty::Instance::new(def_id, substs)
}
}

View file

@ -0,0 +1,4 @@
fn main() {
let f: fn(i32) -> Option<i32> = Some::<i32>;
f(42);
}

View file

@ -0,0 +1,19 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that overloaded calls work with zero arity closures
// pretty-expanded FIXME #23616
fn main() {
let functions: [Box<Fn() -> Option<()>>; 1] = [Box::new(|| None)];
let _: Option<Vec<()>> = functions.iter().map(|f| (*f)()).collect();
}

View file

@ -0,0 +1,41 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
trait T {
fn print(&self);
}
#[derive(Debug)]
struct S {
s: isize,
}
impl T for S {
fn print(&self) {
println!("{:?}", self);
}
}
fn print_t(t: &T) {
t.print();
}
fn print_s(s: &S) {
s.print();
}
pub fn main() {
let s: Box<S> = box S { s: 5 };
print_s(&*s);
let t: Box<T> = s as Box<T>;
print_t(&*t);
}