Merge pull request #57 from oli-obk/master

fix enum down casting and backtrace panics
This commit is contained in:
Scott Olson 2016-09-21 07:29:08 -06:00 committed by GitHub
commit 07b8500505
6 changed files with 360 additions and 171 deletions

View file

@ -907,9 +907,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Downcast(_, variant) => {
use rustc::ty::layout::Layout::*;
match *base_layout {
General { discr, .. } => {
General { ref variants, .. } => {
return Ok(Lvalue {
ptr: base.ptr.offset(discr.size().bytes() as isize),
ptr: base.ptr.offset(variants[variant].field_offset(1).bytes() as isize),
extra: LvalueExtra::DowncastVariant(variant),
});
}
@ -1188,13 +1188,7 @@ fn report(tcx: TyCtxt, ecx: &EvalContext, e: EvalError) {
ppaux::parameterized(f, self.1, self.0, ppaux::Ns::Value, &[])
}
}
let inst = Instance(def_id, substs);
match ::std::panic::catch_unwind(|| {
format!("inside call to {}", inst)
}) {
Ok(msg) => err.span_note(span, &msg),
Err(_) => err.span_note(span, &format!("ppaux::parameterized failed: {:?}, {:?}", def_id, substs)),
};
err.span_note(span, &format!("inside call to {}", Instance(def_id, substs)));
}
err.emit();
}

View file

@ -0,0 +1,251 @@
use rustc::hir::def_id::DefId;
use rustc::mir::repr as mir;
use rustc::ty::layout::Layout;
use rustc::ty::subst::Substs;
use rustc::ty;
use error::{EvalError, EvalResult};
use memory::Pointer;
use interpreter::EvalContext;
use primval;
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(super) fn call_intrinsic(
&mut self,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Pointer,
dest_layout: &'tcx Layout,
) -> EvalResult<'tcx, ()> {
// TODO(solson): We can probably remove this _to_ptr easily.
let args_res: EvalResult<Vec<Pointer>> = args.iter()
.map(|arg| self.eval_operand_to_ptr(arg))
.collect();
let args_ptrs = args_res?;
let pointer_size = self.memory.pointer_size();
match &self.tcx.item_name(def_id).as_str()[..] {
"add_with_overflow" => self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_layout)?,
"sub_with_overflow" => self.intrinsic_with_overflow(mir::BinOp::Sub, &args[0], &args[1], dest, dest_layout)?,
"mul_with_overflow" => self.intrinsic_with_overflow(mir::BinOp::Mul, &args[0], &args[1], dest, dest_layout)?,
"arith_offset" => {
let ptr = self.memory.read_ptr(args_ptrs[0])?;
let offset = self.memory.read_int(args_ptrs[1], pointer_size)?;
let new_ptr = ptr.offset(offset as isize);
self.memory.write_ptr(dest, new_ptr)?;
}
"assume" => {
if !self.memory.read_bool(args_ptrs[0])? {
return Err(EvalError::AssumptionNotHeld);
}
}
"breakpoint" => unimplemented!(), // halt miri
"copy" |
"copy_nonoverlapping" => {
// FIXME: check whether overlapping occurs
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty);
let elem_align = self.type_align(elem_ty);
let src = self.memory.read_ptr(args_ptrs[0])?;
let dest = self.memory.read_ptr(args_ptrs[1])?;
let count = self.memory.read_isize(args_ptrs[2])?;
self.memory.copy(src, dest, count as usize * elem_size, elem_align)?;
}
"ctpop" => {
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty);
let num = self.memory.read_uint(args_ptrs[0], elem_size)?.count_ones();
self.memory.write_uint(dest, num.into(), elem_size)?;
}
"ctlz" => {
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty);
let num = self.memory.read_uint(args_ptrs[0], elem_size)?.leading_zeros();
self.memory.write_uint(dest, num.into(), elem_size)?;
}
"discriminant_value" => {
let ty = substs.type_at(0);
let adt_ptr = self.memory.read_ptr(args_ptrs[0])?;
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
self.memory.write_uint(dest, discr_val, 8)?;
}
"fabsf32" => {
let f = self.memory.read_f32(args_ptrs[0])?;
self.memory.write_f32(dest, f.abs())?;
}
"fabsf64" => {
let f = self.memory.read_f64(args_ptrs[0])?;
self.memory.write_f64(dest, f.abs())?;
}
"fadd_fast" => {
let ty = substs.type_at(0);
let a = self.read_primval(args_ptrs[0], ty)?;
let b = self.read_primval(args_ptrs[0], ty)?;
let result = primval::binary_op(mir::BinOp::Add, a, b)?;
self.memory.write_primval(dest, result.0)?;
}
"likely" |
"unlikely" |
"forget" => {}
"init" => self.memory.write_repeat(dest, 0, dest_layout.size(&self.tcx.data_layout).bytes() as usize)?,
"min_align_of" => {
let elem_ty = substs.type_at(0);
let elem_align = self.type_align(elem_ty);
self.memory.write_uint(dest, elem_align as u64, pointer_size)?;
}
"pref_align_of" => {
let ty = substs.type_at(0);
let layout = self.type_layout(ty);
let align = layout.align(&self.tcx.data_layout).pref();
self.memory.write_uint(dest, align, pointer_size)?;
}
"move_val_init" => {
let ty = substs.type_at(0);
let ptr = self.memory.read_ptr(args_ptrs[0])?;
self.move_(args_ptrs[1], ptr, ty)?;
}
"needs_drop" => {
let ty = substs.type_at(0);
self.memory.write_bool(dest, self.tcx.type_needs_drop_given_env(ty, &self.tcx.empty_parameter_environment()))?;
}
"offset" => {
let pointee_ty = substs.type_at(0);
let pointee_size = self.type_size(pointee_ty) as isize;
let ptr_arg = args_ptrs[0];
let offset = self.memory.read_isize(args_ptrs[1])?;
match self.memory.read_ptr(ptr_arg) {
Ok(ptr) => {
let result_ptr = ptr.offset(offset as isize * pointee_size);
self.memory.write_ptr(dest, result_ptr)?;
}
Err(EvalError::ReadBytesAsPointer) => {
let addr = self.memory.read_isize(ptr_arg)?;
let result_addr = addr + offset * pointee_size as i64;
self.memory.write_isize(dest, result_addr)?;
}
Err(e) => return Err(e),
}
}
"overflowing_sub" => {
self.intrinsic_overflowing(mir::BinOp::Sub, &args[0], &args[1], dest)?;
}
"overflowing_mul" => {
self.intrinsic_overflowing(mir::BinOp::Mul, &args[0], &args[1], dest)?;
}
"overflowing_add" => {
self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest)?;
}
"powif32" => {
let f = self.memory.read_f32(args_ptrs[0])?;
let i = self.memory.read_int(args_ptrs[1], 4)?;
self.memory.write_f32(dest, f.powi(i as i32))?;
}
"powif64" => {
let f = self.memory.read_f32(args_ptrs[0])?;
let i = self.memory.read_int(args_ptrs[1], 4)?;
self.memory.write_f32(dest, f.powi(i as i32))?;
}
"sqrtf32" => {
let f = self.memory.read_f32(args_ptrs[0])?;
self.memory.write_f32(dest, f.sqrt())?;
}
"sqrtf64" => {
let f = self.memory.read_f64(args_ptrs[0])?;
self.memory.write_f64(dest, f.sqrt())?;
}
"size_of" => {
let ty = substs.type_at(0);
let size = self.type_size(ty) as u64;
self.memory.write_uint(dest, size, pointer_size)?;
}
"size_of_val" => {
let ty = substs.type_at(0);
if self.type_is_sized(ty) {
let size = self.type_size(ty) as u64;
self.memory.write_uint(dest, size, pointer_size)?;
} else {
match ty.sty {
ty::TySlice(_) | ty::TyStr => {
let elem_ty = ty.sequence_element_type(self.tcx);
let elem_size = self.type_size(elem_ty) as u64;
let ptr_size = self.memory.pointer_size() as isize;
let n = self.memory.read_usize(args_ptrs[0].offset(ptr_size))?;
self.memory.write_uint(dest, n * elem_size, pointer_size)?;
}
_ => return Err(EvalError::Unimplemented(format!("unimplemented: size_of_val::<{:?}>", ty))),
}
}
}
// FIXME: wait for eval_operand_to_ptr to be gone
/*
"type_name" => {
let ty = substs.type_at(0);
let ty_name = ty.to_string();
let s = self.str_to_value(&ty_name)?;
self.memory.write_ptr(dest, s)?;
}*/
"type_id" => {
let ty = substs.type_at(0);
let n = self.tcx.type_id_hash(ty);
self.memory.write_uint(dest, n, 8)?;
}
"transmute" => {
let ty = substs.type_at(0);
self.move_(args_ptrs[0], dest, ty)?;
}
"try" => unimplemented!(),
"uninit" => self.memory.mark_definedness(dest, dest_layout.size(&self.tcx.data_layout).bytes() as usize, false)?,
"volatile_load" => {
let ty = substs.type_at(0);
let ptr = self.memory.read_ptr(args_ptrs[0])?;
self.move_(ptr, dest, ty)?;
}
"volatile_store" => {
let ty = substs.type_at(0);
let dest = self.memory.read_ptr(args_ptrs[0])?;
self.move_(args_ptrs[1], dest, ty)?;
}
name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
}
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
Ok(())
}
}

View file

@ -15,6 +15,8 @@ use error::{EvalError, EvalResult};
use memory::Pointer;
use super::{EvalContext, IntegerExt, StackPopCleanup};
mod intrinsics;
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub(super) fn goto_block(&mut self, target: mir::BasicBlock) {
@ -218,7 +220,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Some((ptr, block)) => (Some(ptr), StackPopCleanup::Goto(block)),
None => (None, StackPopCleanup::None),
};
self.push_stack_frame(def_id, span, mir, resolved_substs, return_ptr, return_to_block)?;
self.push_stack_frame(resolved_def_id, span, mir, resolved_substs, return_ptr, return_to_block)?;
for (i, (src, src_ty)) in arg_srcs.into_iter().enumerate() {
let dest = self.frame().locals[i];
@ -270,152 +272,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(if not_null { nndiscr } else { 1 - nndiscr })
}
fn call_intrinsic(
&mut self,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
args: &[mir::Operand<'tcx>],
dest: Pointer,
dest_layout: &'tcx Layout,
) -> EvalResult<'tcx, ()> {
// TODO(solson): We can probably remove this _to_ptr easily.
let args_res: EvalResult<Vec<Pointer>> = args.iter()
.map(|arg| self.eval_operand_to_ptr(arg))
.collect();
let args_ptrs = args_res?;
let pointer_size = self.memory.pointer_size();
match &self.tcx.item_name(def_id).as_str()[..] {
"add_with_overflow" => self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_layout)?,
"sub_with_overflow" => self.intrinsic_with_overflow(mir::BinOp::Sub, &args[0], &args[1], dest, dest_layout)?,
"mul_with_overflow" => self.intrinsic_with_overflow(mir::BinOp::Mul, &args[0], &args[1], dest, dest_layout)?,
"assume" => {
if !self.memory.read_bool(args_ptrs[0])? {
return Err(EvalError::AssumptionNotHeld);
}
}
"copy_nonoverlapping" => {
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty);
let elem_align = self.type_align(elem_ty);
let src = self.memory.read_ptr(args_ptrs[0])?;
let dest = self.memory.read_ptr(args_ptrs[1])?;
let count = self.memory.read_isize(args_ptrs[2])?;
self.memory.copy(src, dest, count as usize * elem_size, elem_align)?;
}
"ctpop" => {
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty);
let num = self.memory.read_uint(args_ptrs[0], elem_size)?.count_ones();
self.memory.write_uint(dest, num.into(), elem_size)?;
}
"ctlz" => {
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty);
let num = self.memory.read_uint(args_ptrs[0], elem_size)?.leading_zeros();
self.memory.write_uint(dest, num.into(), elem_size)?;
}
"discriminant_value" => {
let ty = substs.type_at(0);
let adt_ptr = self.memory.read_ptr(args_ptrs[0])?;
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
self.memory.write_uint(dest, discr_val, 8)?;
}
"forget" => {}
"init" => self.memory.write_repeat(dest, 0, dest_layout.size(&self.tcx.data_layout).bytes() as usize)?,
"min_align_of" => {
let elem_ty = substs.type_at(0);
let elem_align = self.type_align(elem_ty);
self.memory.write_uint(dest, elem_align as u64, pointer_size)?;
}
"move_val_init" => {
let ty = substs.type_at(0);
let ptr = self.memory.read_ptr(args_ptrs[0])?;
self.move_(args_ptrs[1], ptr, ty)?;
}
"offset" => {
let pointee_ty = substs.type_at(0);
let pointee_size = self.type_size(pointee_ty) as isize;
let ptr_arg = args_ptrs[0];
let offset = self.memory.read_isize(args_ptrs[1])?;
match self.memory.read_ptr(ptr_arg) {
Ok(ptr) => {
let result_ptr = ptr.offset(offset as isize * pointee_size);
self.memory.write_ptr(dest, result_ptr)?;
}
Err(EvalError::ReadBytesAsPointer) => {
let addr = self.memory.read_isize(ptr_arg)?;
let result_addr = addr + offset * pointee_size as i64;
self.memory.write_isize(dest, result_addr)?;
}
Err(e) => return Err(e),
}
}
"overflowing_sub" => {
self.intrinsic_overflowing(mir::BinOp::Sub, &args[0], &args[1], dest)?;
}
"overflowing_mul" => {
self.intrinsic_overflowing(mir::BinOp::Mul, &args[0], &args[1], dest)?;
}
"overflowing_add" => {
self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest)?;
}
"size_of" => {
let ty = substs.type_at(0);
let size = self.type_size(ty) as u64;
self.memory.write_uint(dest, size, pointer_size)?;
}
"size_of_val" => {
let ty = substs.type_at(0);
if self.type_is_sized(ty) {
let size = self.type_size(ty) as u64;
self.memory.write_uint(dest, size, pointer_size)?;
} else {
match ty.sty {
ty::TySlice(_) | ty::TyStr => {
let elem_ty = ty.sequence_element_type(self.tcx);
let elem_size = self.type_size(elem_ty) as u64;
let ptr_size = self.memory.pointer_size() as isize;
let n = self.memory.read_usize(args_ptrs[0].offset(ptr_size))?;
self.memory.write_uint(dest, n * elem_size, pointer_size)?;
}
_ => return Err(EvalError::Unimplemented(format!("unimplemented: size_of_val::<{:?}>", ty))),
}
}
}
"transmute" => {
let ty = substs.type_at(0);
self.move_(args_ptrs[0], dest, ty)?;
}
"uninit" => self.memory.mark_definedness(dest, dest_layout.size(&self.tcx.data_layout).bytes() as usize, false)?,
name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
}
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
Ok(())
}
fn call_c_abi(
&mut self,
def_id: DefId,
@ -512,7 +368,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
/// Trait method, which has to be resolved to an impl method.
fn trait_method(
&self,
&mut self,
trait_id: DefId,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
@ -527,13 +383,45 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let mname = self.tcx.item_name(def_id);
// Create a concatenated set of substitutions which includes those from the impl
// and those from the method:
let mth = get_impl_method(self.tcx, substs, impl_did, vtable_impl.substs, mname);
let (did, substs) = find_method(self.tcx, substs, impl_did, vtable_impl.substs, mname);
Ok((mth.method.def_id, mth.substs))
Ok((did, substs))
}
traits::VtableClosure(vtable_closure) =>
Ok((vtable_closure.closure_def_id, vtable_closure.substs.func_substs)),
traits::VtableClosure(vtable_closure) => {
let trait_closure_kind = self.tcx
.lang_items
.fn_trait_kind(trait_id)
.expect("The substitutions should have no type parameters remaining after passing through fulfill_obligation");
let closure_kind = self.tcx.closure_kind(vtable_closure.closure_def_id);
trace!("closures {:?}, {:?}", closure_kind, trait_closure_kind);
match (closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
(ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {} // No adapter needed.
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// The closure fn is a `fn(&self, ...)` or `fn(&mut self, ...)`.
// We want a `fn(self, ...)`.
// We can produce this by doing something like:
//
// fn call_once(self, ...) { call_mut(&self, ...) }
// fn call_once(mut self, ...) { call_mut(&mut self, ...) }
//
// These are both the same at trans time.
// interpreter magic: insert an intermediate pointer, so we can skip the intermediate function call
// FIXME: this is a memory leak, should probably add the pointer to the current stack
let ptr_size = self.memory.pointer_size();
let first = self.memory.allocate(ptr_size, ptr_size)?;
self.memory.copy(args[0].0, first, ptr_size, ptr_size)?;
self.memory.write_ptr(args[0].0, first)?;
}
_ => bug!("cannot convert {:?} to {:?}", closure_kind, trait_closure_kind),
}
Ok((vtable_closure.closure_def_id, vtable_closure.substs.func_substs))
}
traits::VtableFnPointer(vtable_fn_ptr) => {
if let ty::TyFnDef(did, ref substs, _) = vtable_fn_ptr.fn_ty.sty {
@ -636,3 +524,35 @@ pub(super) fn get_impl_method<'a, 'tcx>(
}
}
}
/// Locates the applicable definition of a method, given its name.
pub fn find_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
substs: &'tcx Substs<'tcx>,
impl_def_id: DefId,
impl_substs: &'tcx Substs<'tcx>,
name: ast::Name)
-> (DefId, &'tcx Substs<'tcx>)
{
assert!(!substs.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
let trait_def = tcx.lookup_trait_def(trait_def_id);
match trait_def.ancestors(impl_def_id).fn_defs(tcx, name).next() {
Some(node_item) => {
let substs = tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| {
let substs = substs.rebase_onto(tcx, trait_def_id, impl_substs);
let substs = traits::translate_substs(&infcx, impl_def_id, substs, node_item.node);
tcx.lift(&substs).unwrap_or_else(|| {
bug!("find_method: translate_substs \
returned {:?} which contains inference types/regions",
substs);
})
});
(node_item.item.def_id, substs)
}
None => {
bug!("method {:?} not found in {:?}", name, impl_def_id)
}
}
}

View file

@ -36,6 +36,7 @@ pub use interpreter::{
Frame,
eval_main,
run_mir_passes,
StackPopCleanup,
};
pub use memory::{

View file

@ -21,18 +21,28 @@ fn crazy_closure() -> (i32, i32, i32) {
inner(10)
}
// TODO(solson): Implement closure argument adjustment and uncomment this test.
// fn closure_arg_adjustment_problem() -> i64 {
// fn once<F: FnOnce(i64)>(f: F) { f(2); }
// let mut y = 1;
// {
// let f = |x| y += x;
// once(f);
// }
// y
// }
fn closure_arg_adjustment_problem() -> i64 {
fn once<F: FnOnce(i64)>(f: F) { f(2); }
let mut y = 1;
{
let f = |x| y += x;
once(f);
}
y
}
fn fn_once_closure_with_multiple_args() -> i64 {
fn once<F: FnOnce(i64, i64) -> i64>(f: F) -> i64 { f(2, 3) }
let y = 1;
{
let f = |x, z| x + y + z;
once(f)
}
}
fn main() {
assert_eq!(simple(), 12);
assert_eq!(crazy_closure(), (84, 10, 10));
assert_eq!(closure_arg_adjustment_problem(), 3);
assert_eq!(fn_once_closure_with_multiple_args(), 6);
}

View file

@ -0,0 +1,13 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
assert!(Ok::<i32, String>(42) == Ok(42));
}