Merge pull request #434 from solson/rustup

rustup for big refactor; kill most of validation
This commit is contained in:
Oliver Schneider 2018-08-30 10:31:05 +02:00 committed by GitHub
commit 752accf4e4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 752 additions and 2200 deletions

View file

@ -1 +1 @@
nightly-2018-08-14
nightly-2018-08-30

View file

@ -1,10 +1,8 @@
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, LayoutOf, Size};
use rustc::ty;
use rustc::ty::layout::{Align, LayoutOf, Size};
use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
use syntax::attr;
use syntax::codemap::Span;
use std::mem;
@ -14,196 +12,130 @@ use tls::MemoryExt;
use super::memory::MemoryKind;
fn write_discriminant_value<'a, 'mir, 'tcx: 'a + 'mir>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>>,
dest_ty: Ty<'tcx>,
dest: Place,
variant_index: usize,
) -> EvalResult<'tcx> {
let layout = ecx.layout_of(dest_ty)?;
match layout.variants {
layout::Variants::Single { index } => {
if index != variant_index {
// If the layout of an enum is `Single`, all
// other variants are necessarily uninhabited.
assert_eq!(layout.for_variant(&ecx, variant_index).abi,
layout::Abi::Uninhabited);
}
}
layout::Variants::Tagged { .. } => {
let discr_val = dest_ty.ty_adt_def().unwrap()
.discriminant_for_variant(*ecx.tcx, variant_index)
.val;
let (discr_dest, discr) = ecx.place_field(dest, mir::Field::new(0), layout)?;
ecx.write_scalar(discr_dest, Scalar::from_uint(discr_val, discr.size), discr.ty)?;
}
layout::Variants::NicheFilling {
dataful_variant,
ref niche_variants,
niche_start,
..
} => {
if variant_index != dataful_variant {
let (niche_dest, niche) =
ecx.place_field(dest, mir::Field::new(0), layout)?;
let niche_value = ((variant_index - niche_variants.start()) as u128)
.wrapping_add(niche_start);
ecx.write_scalar(niche_dest, Scalar::from_uint(niche_value, niche.size), niche.ty)?;
}
}
}
Ok(())
}
pub trait EvalContextExt<'tcx> {
fn call_foreign_item(
pub trait EvalContextExt<'tcx, 'mir> {
/// Emulate calling a foreign item, fail if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[ValTy<'tcx>],
dest: Place,
dest_ty: Ty<'tcx>,
dest_block: mir::BasicBlock,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx>;
fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>>;
fn call_missing_fn(
/// Emulate a function that should have MIR but does not.
/// This is solely to support execution without full MIR.
/// Fail if emulating this function is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_missing_fn(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
args: &[ValTy<'tcx>],
sig: ty::FnSig<'tcx>,
path: String,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx>;
fn eval_fn_call(
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool>;
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>;
fn write_null(&mut self, dest: Place, dest_layout: TyLayout<'tcx>) -> EvalResult<'tcx>;
fn write_null(&mut self, dest: PlaceTy<'tcx>) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
fn eval_fn_call(
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
trace!("eval_fn_call: {:#?}, {:#?}", instance, destination);
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
let def_id = instance.def_id();
let item_path = self.tcx.absolute_item_path_str(def_id);
match &*item_path {
"std::sys::unix::thread::guard::init" | "std::sys::unix::thread::guard::current" => {
// Return None, as it doesn't make sense to return Some, because miri detects stack overflow itself.
let ret_ty = sig.output();
match ret_ty.sty {
ty::TyAdt(ref adt_def, _) => {
assert!(adt_def.is_enum(), "Unexpected return type for {}", item_path);
let none_variant_index = adt_def.variants.iter().position(|def| {
def.name.as_str() == "None"
}).expect("No None variant");
let (return_place, return_to_block) = destination.unwrap();
write_discriminant_value(self, ret_ty, return_place, none_variant_index)?;
self.goto_block(return_to_block);
return Ok(true);
}
_ => panic!("Unexpected return type for {}", item_path)
}
}
"std::sys::unix::fast_thread_local::register_dtor" => {
// TODO: register the dtor
let (_return_place, return_to_block) = destination.unwrap();
self.goto_block(return_to_block);
return Ok(true);
}
_ => {}
// first run the common hooks also supported by CTFE
if self.hook_fn(instance, args, dest)? {
self.goto_block(ret)?;
return Ok(None);
}
// there are some more lang items we want to hook that CTFE does not hook (yet)
if self.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested
let n = u128::max_value();
let amt = 128 - self.memory.pointer_size().bytes() * 8;
let (dest, return_to_block) = destination.unwrap();
let ty = self.tcx.types.usize;
let ptr_size = self.memory.pointer_size();
self.write_scalar(dest, Scalar::from_uint((n << amt) >> amt, ptr_size), ty)?;
self.goto_block(return_to_block);
return Ok(true);
let dest = dest.unwrap();
let n = self.truncate(n, dest.layout);
self.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
self.goto_block(ret)?;
return Ok(None);
}
// Try to see if we can do something about foreign items
if self.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
self.emulate_foreign_item(
instance.def_id(),
args,
dest.unwrap(),
ret.unwrap(),
)?;
// `goto_block` already handled
return Ok(None);
}
// Otherwise we really want to see the MIR -- but if we do not have it, maybe we can
// emulate something. This is a HACK to support running without a full-MIR libstd.
let mir = match self.load_mir(instance.def) {
Ok(mir) => mir,
Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
self.call_missing_fn(
instance,
destination,
args,
sig,
self.emulate_missing_fn(
path,
args,
dest,
ret,
)?;
return Ok(true);
// `goto_block` already handled
return Ok(None);
}
Err(other) => return Err(other),
};
let (return_place, return_to_block) = match destination {
Some((place, block)) => (place, StackPopCleanup::Goto(block)),
None => (Place::undef(), StackPopCleanup::None),
};
self.push_stack_frame(
instance,
span,
mir,
return_place,
return_to_block,
)?;
Ok(false)
Ok(Some(mir))
}
fn call_foreign_item(
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[ValTy<'tcx>],
dest: Place,
dest_ty: Ty<'tcx>,
dest_block: mir::BasicBlock,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx> {
let attrs = self.tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(name) => name.as_str(),
None => self.tcx.item_name(def_id).as_str(),
};
let dest_layout = self.layout_of(dest_ty)?;
match &link_name[..] {
"malloc" => {
let size = self.value_to_scalar(args[0])?.to_usize(self)?;
let size = self.read_scalar(args[0])?.to_usize(&self)?;
if size == 0 {
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
} else {
let align = self.tcx.data_layout.pointer_align;
let ptr = self.memory.allocate(Size::from_bytes(size), align, MemoryKind::C.into())?;
self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
}
"free" => {
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
if !ptr.is_null() {
self.memory.deallocate(
ptr.to_ptr()?,
@ -214,8 +146,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
}
"__rust_alloc" => {
let size = self.value_to_scalar(args[0])?.to_usize(self)?;
let align = self.value_to_scalar(args[1])?.to_usize(self)?;
let size = self.read_scalar(args[0])?.to_usize(&self)?;
let align = self.read_scalar(args[1])?.to_usize(&self)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
@ -225,11 +157,11 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
let ptr = self.memory.allocate(Size::from_bytes(size),
Align::from_bytes(align, align).unwrap(),
MemoryKind::Rust.into())?;
self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_alloc_zeroed" => {
let size = self.value_to_scalar(args[0])?.to_usize(self)?;
let align = self.value_to_scalar(args[1])?.to_usize(self)?;
let size = self.read_scalar(args[0])?.to_usize(&self)?;
let align = self.read_scalar(args[1])?.to_usize(&self)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
@ -240,12 +172,12 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
Align::from_bytes(align, align).unwrap(),
MemoryKind::Rust.into())?;
self.memory.write_repeat(ptr.into(), 0, Size::from_bytes(size))?;
self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
let old_size = self.value_to_scalar(args[1])?.to_usize(self)?;
let align = self.value_to_scalar(args[2])?.to_usize(self)?;
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
let align = self.read_scalar(args[2])?.to_usize(&self)?;
if old_size == 0 {
return err!(HeapAllocZeroBytes);
}
@ -259,10 +191,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
)?;
}
"__rust_realloc" => {
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
let old_size = self.value_to_scalar(args[1])?.to_usize(self)?;
let align = self.value_to_scalar(args[2])?.to_usize(self)?;
let new_size = self.value_to_scalar(args[3])?.to_usize(self)?;
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
let align = self.read_scalar(args[2])?.to_usize(&self)?;
let new_size = self.read_scalar(args[3])?.to_usize(&self)?;
if old_size == 0 || new_size == 0 {
return err!(HeapAllocZeroBytes);
}
@ -277,7 +209,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
Align::from_bytes(align, align).unwrap(),
MemoryKind::Rust.into(),
)?;
self.write_scalar(dest, Scalar::Ptr(new_ptr), dest_ty)?;
self.write_scalar(Scalar::Ptr(new_ptr), dest)?;
}
"syscall" => {
@ -286,7 +218,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
//
// libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
// is called if a `HashMap` is created the regular way.
match self.value_to_scalar(args[0])?.to_usize(self)? {
match self.read_scalar(args[0])?.to_usize(&self)? {
318 | 511 => {
return err!(Unimplemented(
"miri does not support random number generators".to_owned(),
@ -301,8 +233,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
}
"dlsym" => {
let _handle = self.into_ptr(args[0].value)?;
let symbol = self.into_ptr(args[1].value)?.unwrap_or_err()?.to_ptr()?;
let _handle = self.read_scalar(args[0])?;
let symbol = self.read_scalar(args[1])?.to_ptr()?;
let symbol_name = self.memory.read_c_str(symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
@ -315,21 +247,22 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
"__rust_maybe_catch_panic" => {
// fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure
let u8_ptr_ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
let f = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
let data = self.into_ptr(args[1].value)?.unwrap_or_err()?;
let f = self.read_scalar(args[0])?.to_ptr()?;
let data = self.read_scalar(args[1])?.not_undef()?;
let f_instance = self.memory.get_fn(f)?;
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call. TODO: Consider making this re-usable? EvalContext::step does sth. similar for the TLS dtors,
// and of course eval_main.
let mir = self.load_mir(f_instance.def)?;
let closure_dest = Place::null(&self);
self.push_stack_frame(
f_instance,
mir.span,
mir,
Place::undef(),
StackPopCleanup::Goto(dest_block),
closure_dest,
StackPopCleanup::Goto(Some(ret)), // directly return to caller
)?;
let mut args = self.frame().mir.args_iter();
@ -340,25 +273,24 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
),
)?;
let arg_dest = self.eval_place(&mir::Place::Local(arg_local))?;
self.write_ptr(arg_dest, data, u8_ptr_ty)?;
self.write_scalar(data, arg_dest)?;
assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
// We ourselves return 0
self.write_null(dest, dest_layout)?;
// We ourselves will return 0, eventually (because we will not return if we paniced)
self.write_null(dest)?;
// Don't fall through
// Don't fall through, we do NOT want to `goto_block`!
return Ok(());
}
"__rust_start_panic" => {
return err!(Panic);
}
"__rust_start_panic" =>
return err!(MachineError("the evaluated program panicked".to_string())),
"memcmp" => {
let left = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let right = self.into_ptr(args[1].value)?.unwrap_or_err()?;
let n = Size::from_bytes(self.value_to_scalar(args[2])?.to_usize(self)?);
let left = self.read_scalar(args[0])?.not_undef()?;
let right = self.read_scalar(args[1])?.not_undef()?;
let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(&self)?);
let result = {
let left_bytes = self.memory.read_bytes(left, n)?;
@ -373,58 +305,57 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
};
self.write_scalar(
dest,
Scalar::from_i32(result),
dest_ty,
dest,
)?;
}
"memrchr" => {
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let val = self.value_to_scalar(args[1])?.to_bytes()? as u8;
let num = self.value_to_scalar(args[2])?.to_usize(self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
let num = self.read_scalar(args[2])?.to_usize(&self)?;
if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().rev().position(
|&c| c == val,
)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), &self)?;
self.write_ptr(dest, new_ptr, dest_ty)?;
self.write_scalar(new_ptr, dest)?;
} else {
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
}
}
"memchr" => {
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let val = self.value_to_scalar(args[1])?.to_bytes()? as u8;
let num = self.value_to_scalar(args[2])?.to_usize(self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
let num = self.read_scalar(args[2])?.to_usize(&self)?;
if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().position(
|&c| c == val,
)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), &self)?;
self.write_ptr(dest, new_ptr, dest_ty)?;
self.write_scalar(new_ptr, dest)?;
} else {
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
}
}
"getenv" => {
let result = {
let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
let name = self.memory.read_c_str(name_ptr)?;
match self.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::null(self.memory.pointer_size()),
}
};
self.write_scalar(dest, result, dest_ty)?;
self.write_scalar(result, dest)?;
}
"unsetenv" => {
let mut success = None;
{
let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null() {
let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
if !name.is_empty() && !name.contains(&b'=') {
@ -436,17 +367,17 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
if let Some(var) = old {
self.memory.deallocate(var, None, MemoryKind::Env.into())?;
}
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
} else {
self.write_scalar(dest, Scalar::from_int(-1, dest_layout.size), dest_ty)?;
self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"setenv" => {
let mut new = None;
{
let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let value_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?.to_ptr()?;
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
let value = self.memory.read_c_str(value_ptr)?;
if !name_ptr.is_null() {
let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
@ -472,16 +403,16 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
{
self.memory.deallocate(var, None, MemoryKind::Env.into())?;
}
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
} else {
self.write_scalar(dest, Scalar::from_int(-1, dest_layout.size), dest_ty)?;
self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"write" => {
let fd = self.value_to_scalar(args[0])?.to_bytes()?;
let buf = self.into_ptr(args[1].value)?.unwrap_or_err()?;
let n = self.value_to_scalar(args[2])?.to_bytes()? as u64;
let fd = self.read_scalar(args[0])?.to_bytes()?;
let buf = self.read_scalar(args[1])?.not_undef()?;
let n = self.read_scalar(args[2])?.to_bytes()? as u64;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
@ -501,36 +432,31 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
warn!("Ignored output to FD {}", fd);
n as i64 // pretend it all went well
}; // now result is the value we return back to the program
let ptr_size = self.memory.pointer_size();
self.write_scalar(
Scalar::from_int(result, dest.layout.size),
dest,
Scalar::from_int(result, ptr_size),
dest_ty,
)?;
}
"strlen" => {
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let n = self.memory.read_c_str(ptr)?.len();
let ptr_size = self.memory.pointer_size();
self.write_scalar(dest, Scalar::from_uint(n as u64, ptr_size), dest_ty)?;
self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
// Some things needed for sys::thread initialization to go through
"signal" | "sigaction" | "sigaltstack" => {
let ptr_size = self.memory.pointer_size();
self.write_scalar(dest, Scalar::null(ptr_size), dest_ty)?;
self.write_scalar(Scalar::null(dest.layout.size), dest)?;
}
"sysconf" => {
let name = self.value_to_scalar(args[0])?.to_usize(self)?;
let ptr_size = self.memory.pointer_size();
let name = self.read_scalar(args[0])?.to_i32()?;
trace!("sysconf() called with name {}", name);
// cache the sysconf integers via miri's global cache
let paths = &[
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, ptr_size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, ptr_size)),
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, dest.layout.size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
];
let mut result = None;
for &(path, path_value) in paths {
@ -540,7 +466,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
promoted: None,
};
let const_val = self.const_eval(cid)?;
let value = const_val.unwrap_usize(self.tcx.tcx);
let value = const_val.unwrap_bits(
self.tcx.tcx,
ty::ParamEnv::empty().and(self.tcx.types.i32)) as i32;
if value == name {
result = Some(path_value);
break;
@ -548,7 +476,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
}
}
if let Some(result) = result {
self.write_scalar(dest, result, dest_ty)?;
self.write_scalar(result, dest)?;
} else {
return err!(Unimplemented(
format!("Unimplemented sysconf name: {}", name),
@ -558,10 +486,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
// Hook pthread calls that go to the thread-local storage memory subsystem
"pthread_key_create" => {
let key_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let key_ptr = self.read_scalar(args[0])?.to_ptr()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
let dtor = match self.into_ptr(args[1].value)?.unwrap_or_err()? {
let dtor = match self.read_scalar(args[1])?.not_undef()? {
Scalar::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?),
Scalar::Bits { bits: 0, size } => {
assert_eq!(size as u64, self.memory.pointer_size().bytes());
@ -571,7 +499,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
};
// Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
let key_type = args[0].ty.builtin_deref(true)
let key_type = args[0].layout.ty.builtin_deref(true)
.ok_or_else(|| EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
let key_layout = self.layout_of(key_type)?;
@ -585,54 +513,76 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
key_layout.align,
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
key_layout.align,
false,
)?;
// Return success (0)
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
}
"pthread_key_delete" => {
let key = self.value_to_scalar(args[0])?.to_bytes()?;
let key = self.read_scalar(args[0])?.to_bytes()?;
self.memory.delete_tls_key(key)?;
// Return success (0)
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
}
"pthread_getspecific" => {
let key = self.value_to_scalar(args[0])?.to_bytes()?;
let key = self.read_scalar(args[0])?.to_bytes()?;
let ptr = self.memory.load_tls(key)?;
self.write_ptr(dest, ptr, dest_ty)?;
self.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = self.value_to_scalar(args[0])?.to_bytes()?;
let new_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?;
let key = self.read_scalar(args[0])?.to_bytes()?;
let new_ptr = self.read_scalar(args[1])?.not_undef()?;
self.memory.store_tls(key, new_ptr)?;
// Return success (0)
self.write_null(dest, dest_layout)?;
self.write_null(dest)?;
}
"_tlv_atexit" => {
return err!(Unimplemented("Thread-local store is not fully supported on macOS".to_owned()));
// FIXME: Register the dtor
},
// Stub out all the other pthread calls to just return 0
link_name if link_name.starts_with("pthread_") => {
debug!("ignoring C ABI call: {}", link_name);
self.write_null(dest, dest_layout)?;
// Determining stack base address
"pthread_attr_init" | "pthread_attr_destroy" | "pthread_attr_get_np" |
"pthread_getattr_np" | "pthread_self" | "pthread_get_stacksize_np" => {
self.write_null(dest)?;
}
"pthread_attr_getstack" => {
// second argument is where we are supposed to write the stack size
let ptr = self.ref_to_mplace(self.read_value(args[1])?)?;
let stackaddr = Scalar::from_int(0x80000, args[1].layout.size); // just any address
self.write_scalar(stackaddr, ptr.into())?;
// return 0
self.write_null(dest)?;
}
"pthread_get_stackaddr_np" => {
let stackaddr = Scalar::from_int(0x80000, dest.layout.size); // just any address
self.write_scalar(stackaddr, dest)?;
}
// Stub out calls for condvar, mutex and rwlock to just return 0
"pthread_mutexattr_init" | "pthread_mutexattr_settype" | "pthread_mutex_init" |
"pthread_mutexattr_destroy" | "pthread_mutex_lock" | "pthread_mutex_unlock" |
"pthread_mutex_destroy" | "pthread_rwlock_rdlock" | "pthread_rwlock_unlock" |
"pthread_rwlock_wrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" |
"pthread_condattr_setclock" | "pthread_cond_init" | "pthread_condattr_destroy" |
"pthread_cond_destroy" => {
self.write_null(dest)?;
}
"mmap" => {
// This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
let addr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
self.write_ptr(dest, addr, dest_ty)?;
let addr = self.read_scalar(args[0])?.not_undef()?;
self.write_scalar(addr, dest)?;
}
"mprotect" => {
self.write_null(dest)?;
}
// Windows API subs
"AddVectoredExceptionHandler" => {
// any non zero value works for the stdlib. This is just used for stackoverflows anyway
let ptr_size = self.memory.pointer_size();
self.write_scalar(dest, Scalar::from_int(1, ptr_size), dest_ty)?;
self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
},
"InitializeCriticalSection" |
"EnterCriticalSection" |
@ -645,11 +595,11 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
"GetProcAddress" |
"TryEnterCriticalSection" => {
// pretend these do not exist/nothing happened, by returning zero
self.write_scalar(dest, Scalar::from_int(0, dest_layout.size), dest_ty)?;
self.write_null(dest)?;
},
"GetLastError" => {
// this is c::ERROR_CALL_NOT_IMPLEMENTED
self.write_scalar(dest, Scalar::from_int(120, dest_layout.size), dest_ty)?;
self.write_scalar(Scalar::from_int(120, dest.layout.size), dest)?;
},
// Windows TLS
@ -660,23 +610,23 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
let key = self.memory.create_tls_key(None) as u128;
// Figure out how large a TLS key actually is. This is c::DWORD.
if dest_layout.size.bits() < 128 && key >= (1u128 << dest_layout.size.bits() as u128) {
if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
return err!(OutOfTls);
}
self.write_scalar(dest, Scalar::from_uint(key, dest_layout.size), dest_layout.ty)?;
self.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
}
"TlsGetValue" => {
let key = self.value_to_scalar(args[0])?.to_bytes()?;
let key = self.read_scalar(args[0])?.to_bytes()?;
let ptr = self.memory.load_tls(key)?;
self.write_ptr(dest, ptr, dest_ty)?;
self.write_scalar(ptr, dest)?;
}
"TlsSetValue" => {
let key = self.value_to_scalar(args[0])?.to_bytes()?;
let new_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?;
let key = self.read_scalar(args[0])?.to_bytes()?;
let new_ptr = self.read_scalar(args[1])?.not_undef()?;
self.memory.store_tls(key, new_ptr)?;
// Return success (1)
self.write_scalar(dest, Scalar::from_int(1, dest_layout.size), dest_ty)?;
self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
}
// We can't execute anything else
@ -687,11 +637,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
}
}
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
self.dump_local(dest);
self.goto_block(dest_block);
self.goto_block(Some(ret))?;
self.dump_place(*dest);
Ok(())
}
@ -729,41 +676,27 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
})
}
fn call_missing_fn(
fn emulate_missing_fn(
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
args: &[ValTy<'tcx>],
sig: ty::FnSig<'tcx>,
path: String,
_args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx> {
// In some cases in non-MIR libstd-mode, not having a destination is legit. Handle these early.
match &path[..] {
"std::panicking::rust_panic_with_hook" |
"core::panicking::panic_fmt::::panic_impl" |
"std::rt::begin_panic_fmt" => return err!(Panic),
"std::rt::begin_panic_fmt" =>
return err!(MachineError("the evaluated program panicked".to_string())),
_ => {}
}
let dest_ty = sig.output();
let (dest, dest_block) = destination.ok_or_else(
let dest = dest.ok_or_else(
// Must be some function we do not support
|| EvalErrorKind::NoMirFor(path.clone()),
)?;
if self.tcx.is_foreign_item(instance.def_id()) {
// An external function
// TODO: That functions actually has a similar preamble to what follows here. May make sense to
// unify these two mechanisms for "hooking into missing functions".
self.call_foreign_item(
instance.def_id(),
args,
dest,
dest_ty,
dest_block,
)?;
return Ok(());
}
match &path[..] {
// A Rust function is missing, which means we are running with MIR missing for libstd (or other dependencies).
// Still, we can make many things mostly work by "emulating" or ignoring some functions.
@ -784,22 +717,18 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
"std::panicking::panicking" |
"std::rt::panicking" => {
// we abort on panic -> `std::rt::panicking` always returns false
let bool = self.tcx.types.bool;
self.write_scalar(dest, Scalar::from_bool(false), bool)?;
self.write_scalar(Scalar::from_bool(false), dest)?;
}
_ => return err!(NoMirFor(path)),
}
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
self.dump_local(dest);
self.goto_block(dest_block);
self.goto_block(ret)?;
self.dump_place(*dest);
Ok(())
}
fn write_null(&mut self, dest: Place, dest_layout: TyLayout<'tcx>) -> EvalResult<'tcx> {
self.write_scalar(dest, Scalar::null(dest_layout.size), dest_layout.ty)
fn write_null(&mut self, dest: PlaceTy<'tcx>) -> EvalResult<'tcx> {
self.write_scalar(Scalar::null(dest.layout.size), dest)
}
}

View file

@ -1,128 +1,71 @@
use mir;
use rustc::ty::Ty;
use rustc::ty::layout::{LayoutOf, Size};
use rustc::ty::layout::Size;
use super::{Scalar, ScalarExt, EvalResult, EvalContext, ValTy};
use rustc_mir::interpret::sign_extend;
use super::{Scalar, ScalarMaybeUndef, EvalResult};
pub trait EvalContextExt<'tcx> {
fn wrapping_pointer_offset(
&self,
ptr: Scalar,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar>;
fn pointer_offset(
&self,
ptr: Scalar,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar>;
fn value_to_isize(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, i64>;
fn value_to_usize(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, u64>;
fn value_to_i32(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, i32>;
fn value_to_u8(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, u8>;
pub trait ScalarExt {
fn null(size: Size) -> Self;
fn from_i32(i: i32) -> Self;
fn from_uint(i: impl Into<u128>, ptr_size: Size) -> Self;
fn from_int(i: impl Into<i128>, ptr_size: Size) -> Self;
fn from_f32(f: f32) -> Self;
fn from_f64(f: f64) -> Self;
fn is_null(self) -> bool;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
fn wrapping_pointer_offset(
&self,
ptr: Scalar,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar> {
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.overflowing_mul(pointee_size).0;
Ok(ptr.ptr_wrapping_signed_offset(offset, self))
pub trait FalibleScalarExt {
/// HACK: this function just extracts all bits if `defined != 0`
/// Mainly used for args of C-functions and we should totally correctly fetch the size
/// of their arguments
fn to_bytes(self) -> EvalResult<'static, u128>;
}
impl ScalarExt for Scalar {
fn null(size: Size) -> Self {
Scalar::Bits { bits: 0, size: size.bytes() as u8 }
}
fn pointer_offset(
&self,
ptr: Scalar,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar> {
// This function raises an error if the offset moves the pointer outside of its allocation. We consider
// ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0).
// We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own
// allocation.
fn from_i32(i: i32) -> Self {
Scalar::Bits { bits: i as u32 as u128, size: 4 }
}
if ptr.is_null() {
// NULL pointers must only be offset by 0
return if offset == 0 {
Ok(ptr)
} else {
err!(InvalidNullPointerUsage)
};
fn from_uint(i: impl Into<u128>, size: Size) -> Self {
Scalar::Bits { bits: i.into(), size: size.bytes() as u8 }
}
fn from_int(i: impl Into<i128>, size: Size) -> Self {
Scalar::Bits { bits: i.into() as u128, size: size.bytes() as u8 }
}
fn from_f32(f: f32) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 4 }
}
fn from_f64(f: f64) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 8 }
}
fn is_null(self) -> bool {
match self {
Scalar::Bits { bits, .. } => bits == 0,
Scalar::Ptr(_) => false
}
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
if let Some(offset) = offset.checked_mul(pointee_size) {
let ptr = ptr.ptr_signed_offset(offset, self)?;
// Do not do bounds-checking for integers; they can never alias a normal pointer anyway.
if let Scalar::Ptr(ptr) = ptr {
self.memory.check_bounds(ptr, false)?;
} else if ptr.is_null() {
// We moved *to* a NULL pointer. That seems wrong, LLVM considers the NULL pointer its own small allocation. Reject this, for now.
return err!(InvalidNullPointerUsage);
}
Ok(ptr)
} else {
err!(Overflow(mir::BinOp::Mul))
}
}
fn value_to_isize(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, i64> {
assert_eq!(value.ty, self.tcx.types.isize);
let raw = self.value_to_scalar(value)?.to_bits(self.memory.pointer_size())?;
let raw = sign_extend(raw, self.layout_of(self.tcx.types.isize).unwrap());
Ok(raw as i64)
}
fn value_to_usize(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, u64> {
assert_eq!(value.ty, self.tcx.types.usize);
self.value_to_scalar(value)?.to_bits(self.memory.pointer_size()).map(|v| v as u64)
}
fn value_to_i32(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, i32> {
assert_eq!(value.ty, self.tcx.types.i32);
let raw = self.value_to_scalar(value)?.to_bits(Size::from_bits(32))?;
let raw = sign_extend(raw, self.layout_of(self.tcx.types.i32).unwrap());
Ok(raw as i32)
}
fn value_to_u8(
&self,
value: ValTy<'tcx>,
) -> EvalResult<'tcx, u8> {
assert_eq!(value.ty, self.tcx.types.u8);
self.value_to_scalar(value)?.to_bits(Size::from_bits(8)).map(|v| v as u8)
}
}
impl FalibleScalarExt for Scalar {
fn to_bytes(self) -> EvalResult<'static, u128> {
match self {
Scalar::Bits { bits, size } => {
assert_ne!(size, 0);
Ok(bits)
},
Scalar::Ptr(_) => err!(ReadPointerAsBytes),
}
}
}
impl FalibleScalarExt for ScalarMaybeUndef {
fn to_bytes(self) -> EvalResult<'static, u128> {
self.not_undef()?.to_bytes()
}
}

View file

@ -1,22 +1,20 @@
use rustc::mir;
use rustc::ty::layout::{TyLayout, LayoutOf, Size, Primitive, Integer::*};
use rustc::ty::layout::{self, LayoutOf, Size};
use rustc::ty;
use rustc::mir::interpret::{EvalResult, Scalar, Value, ScalarMaybeUndef};
use rustc_mir::interpret::{Place, PlaceExtra, HasMemory, EvalContext, ValTy};
use rustc::mir::interpret::{EvalResult, Scalar, ScalarMaybeUndef};
use rustc_mir::interpret::{
PlaceTy, EvalContext, OpTy, Value
};
use helpers::EvalContextExt as HelperEvalContextExt;
use super::ScalarExt;
use super::{ScalarExt, FalibleScalarExt, OperatorEvalContextExt};
pub trait EvalContextExt<'tcx> {
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[ValTy<'tcx>],
dest: Place,
dest_layout: TyLayout<'tcx>,
target: mir::BasicBlock,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx>;
}
@ -24,54 +22,63 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[ValTy<'tcx>],
dest: Place,
dest_layout: TyLayout<'tcx>,
target: mir::BasicBlock,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
if self.emulate_intrinsic(instance, args, dest)? {
return Ok(());
}
let substs = instance.substs;
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
match intrinsic_name {
"add_with_overflow" => {
self.intrinsic_with_overflow(
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
self.binop_with_overflow(
mir::BinOp::Add,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?
}
"sub_with_overflow" => {
self.intrinsic_with_overflow(
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
self.binop_with_overflow(
mir::BinOp::Sub,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?
}
"mul_with_overflow" => {
self.intrinsic_with_overflow(
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
self.binop_with_overflow(
mir::BinOp::Mul,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?
}
"arith_offset" => {
let offset = self.value_to_isize(args[1])?;
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_ptr(dest, result_ptr, dest_layout.ty)?;
let offset = self.read_scalar(args[1])?.to_isize(&self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let pointee_ty = substs.type_at(0);
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.overflowing_mul(pointee_size).0;
let result_ptr = ptr.ptr_wrapping_signed_offset(offset, &self);
self.write_scalar(result_ptr, dest)?;
}
"assume" => {
let cond = self.value_to_scalar(args[0])?.to_bool()?;
let cond = self.read_scalar(args[0])?.to_bool()?;
if !cond {
return err!(AssumptionNotHeld);
}
@ -81,24 +88,18 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
"atomic_load_relaxed" |
"atomic_load_acq" |
"volatile_load" => {
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let align = self.layout_of(args[0].ty)?.align;
let valty = ValTy {
value: Value::ByRef(ptr, align),
ty: substs.type_at(0),
};
self.write_value(valty, dest)?;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, dest)?;
}
"atomic_store" |
"atomic_store_relaxed" |
"atomic_store_rel" |
"volatile_store" => {
let ty = substs.type_at(0);
let align = self.layout_of(ty)?.align;
let dest = self.into_ptr(args[0].value)?.unwrap_or_err()?;
self.write_value_to_ptr(args[1].value, dest, align, ty)?;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, ptr.into())?;
}
"atomic_fence_acq" => {
@ -106,47 +107,26 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
}
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ty = substs.type_at(0);
let align = self.layout_of(ty)?.align;
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let change = self.value_to_scalar(args[1])?;
let old = self.read_value(ptr, align, ty)?;
let old = match old {
Value::Scalar(val) => val,
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
Value::ScalarPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
};
self.write_scalar(dest, old, ty)?;
self.write_scalar(
Place::from_scalar_ptr(ptr.into(), align),
change,
ty,
)?;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
let new = self.read_scalar(args[1])?;
let old = self.read_scalar(ptr.into())?;
self.write_scalar(old, dest)?; // old value is returned
self.write_scalar(new, ptr.into())?;
}
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ty = substs.type_at(0);
let align = self.layout_of(ty)?.align;
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let expect_old = self.value_to_scalar(args[1])?;
let change = self.value_to_scalar(args[2])?;
let old = self.read_value(ptr, align, ty)?;
let old = match old {
Value::Scalar(val) => val.unwrap_or_err()?,
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
Value::ScalarPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
};
let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
let valty = ValTy {
value: Value::ScalarPair(old.into(), val.into()),
ty: dest_layout.ty,
};
self.write_value(valty, dest)?;
self.write_scalar(
Place::from_scalar_ptr(ptr.into(), dest_layout.align),
change,
ty,
)?;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
let expect_old = self.read_value(args[1])?; // read as value for the sake of `binary_op()`
let new = self.read_scalar(args[2])?;
let old = self.read_value(ptr.into())?; // read as value for the sake of `binary_op()`
// binary_op will bail if either of them is not a scalar
let (eq, _) = self.binary_op(mir::BinOp::Eq, old, expect_old)?;
let res = Value::ScalarPair(old.to_scalar_or_undef(), eq.into());
self.write_value(res, dest)?; // old value is returned
// update ptr depending on comparison
if eq.to_bool()? {
self.write_scalar(new, ptr.into())?;
}
}
"atomic_or" |
@ -174,19 +154,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
"atomic_xsub_rel" |
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let ty = substs.type_at(0);
let align = self.layout_of(ty)?.align;
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let change = self.value_to_scalar(args[1])?;
let old = self.read_value(ptr, align, ty)?;
let old = match old {
Value::Scalar(val) => val,
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
Value::ScalarPair(..) => {
bug!("atomic_xadd_relaxed doesn't work with nonprimitives")
}
};
self.write_scalar(dest, old, ty)?;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
let rhs = self.read_value(args[1])?;
let old = self.read_value(ptr.into())?;
self.write_value(*old, dest)?; // old value is returned
let op = match intrinsic_name.split('_').nth(1).unwrap() {
"or" => mir::BinOp::BitOr,
"xor" => mir::BinOp::BitXor,
@ -196,8 +167,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
_ => bug!(),
};
// FIXME: what do atomics do on overflow?
let (val, _) = self.binary_op(op, old.unwrap_or_err()?, ty, change, ty)?;
self.write_scalar(Place::from_scalar_ptr(ptr.into(), dest_layout.align), val, ty)?;
let (val, _) = self.binary_op(op, old, rhs)?;
self.write_scalar(val, ptr.into())?;
}
"breakpoint" => unimplemented!(), // halt miri
@ -207,13 +178,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
let elem_ty = substs.type_at(0);
let elem_layout = self.layout_of(elem_ty)?;
let elem_size = elem_layout.size.bytes();
let count = self.value_to_usize(args[2])?;
let count = self.read_scalar(args[2])?.to_usize(&self)?;
if count * elem_size != 0 {
// TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
// Also see the write_bytes intrinsic.
let elem_align = elem_layout.align;
let src = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let dest = self.into_ptr(args[1].value)?.unwrap_or_err()?;
let src = self.read_scalar(args[0])?.not_undef()?;
let dest = self.read_scalar(args[1])?.not_undef()?;
self.memory.copy(
src,
elem_align,
@ -225,37 +196,15 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
}
}
"ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
let ty = substs.type_at(0);
let num = self.value_to_scalar(args[0])?.to_bytes()?;
let kind = match self.layout_of(ty)?.abi {
ty::layout::Abi::Scalar(ref scalar) => scalar.value,
_ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
};
let num = if intrinsic_name.ends_with("_nonzero") {
if num == 0 {
return err!(Intrinsic(format!("{} called on 0", intrinsic_name)));
}
numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
} else {
numeric_intrinsic(intrinsic_name, num, kind)?
};
self.write_scalar(dest, num, ty)?;
}
"discriminant_value" => {
let ty = substs.type_at(0);
let layout = self.layout_of(ty)?;
let adt_ptr = self.into_ptr(args[0].value)?;
let adt_align = self.layout_of(args[0].ty)?.align;
let place = Place::from_scalar_ptr(adt_ptr, adt_align);
let discr_val = self.read_discriminant_value(place, layout)?;
self.write_scalar(dest, Scalar::from_uint(discr_val, dest_layout.size), dest_layout.ty)?;
let place = self.ref_to_mplace(self.read_value(args[0])?)?;
let discr_val = self.read_discriminant(place.into())?.0;
self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
}
"sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
"log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
let f = self.value_to_scalar(args[0])?.to_bytes()?;
let f = self.read_scalar(args[0])?.to_bytes()?;
let f = f32::from_bits(f as u32);
let f = match intrinsic_name {
"sinf32" => f.sin(),
@ -272,12 +221,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
"truncf32" => f.trunc(),
_ => bug!(),
};
self.write_scalar(dest, Scalar::from_f32(f), dest_layout.ty)?;
self.write_scalar(Scalar::from_f32(f), dest)?;
}
"sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
"log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
let f = self.value_to_scalar(args[0])?.to_bytes()?;
let f = self.read_scalar(args[0])?.to_bytes()?;
let f = f64::from_bits(f as u64);
let f = match intrinsic_name {
"sinf64" => f.sin(),
@ -294,13 +243,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
"truncf64" => f.trunc(),
_ => bug!(),
};
self.write_scalar(dest, Scalar::from_f64(f), dest_layout.ty)?;
self.write_scalar(Scalar::from_f64(f), dest)?;
}
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
let ty = substs.type_at(0);
let a = self.value_to_scalar(args[0])?;
let b = self.value_to_scalar(args[1])?;
let a = self.read_value(args[0])?;
let b = self.read_value(args[1])?;
let op = match intrinsic_name {
"fadd_fast" => mir::BinOp::Add,
"fsub_fast" => mir::BinOp::Sub,
@ -309,74 +257,61 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
"frem_fast" => mir::BinOp::Rem,
_ => bug!(),
};
let result = self.binary_op(op, a, ty, b, ty)?;
self.write_scalar(dest, result.0, dest_layout.ty)?;
let result = self.binary_op(op, a, b)?;
self.write_scalar(result.0, dest)?;
}
"exact_div" => {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
let ty = substs.type_at(0);
let a = self.value_to_scalar(args[0])?;
let b = self.value_to_scalar(args[1])?;
let a = self.read_value(args[0])?;
let b = self.read_value(args[1])?;
// check x % y != 0
if !self.binary_op(mir::BinOp::Rem, a, ty, b, ty)?.0.is_null() {
if !self.binary_op(mir::BinOp::Rem, a, b)?.0.is_null() {
return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
}
let result = self.binary_op(mir::BinOp::Div, a, ty, b, ty)?;
self.write_scalar(dest, result.0, dest_layout.ty)?;
let result = self.binary_op(mir::BinOp::Div, a, b)?;
self.write_scalar(result.0, dest)?;
},
"likely" | "unlikely" | "forget" => {}
"init" => {
// we don't want to force an allocation in case the destination is a simple value
match dest {
Place::Local { frame, local } => {
match self.stack()[frame].locals[local].access()? {
Value::ByRef(ptr, _) => {
// These writes have no alignment restriction anyway.
self.memory.write_repeat(ptr, 0, dest_layout.size)?;
}
Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?,
Value::ScalarPair(..) => {
self.write_value(ValTy { value: Value::ScalarPair(Scalar::null(dest_layout.size).into(), Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?;
}
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
// but we also do not want to create a new allocation with 0s and then copy that over.
if !dest.layout.is_zst() { // notzhing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(ref s) => {
let x = Scalar::null(s.value.size(&self));
self.write_value(Value::Scalar(x.into()), dest)?;
}
layout::Abi::ScalarPair(ref s1, ref s2) => {
let x = Scalar::null(s1.value.size(&self));
let y = Scalar::null(s2.value.size(&self));
self.write_value(Value::ScalarPair(x.into(), y.into()), dest)?;
}
_ => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
assert!(mplace.extra.is_none());
self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?;
}
},
Place::Ptr {
ptr,
align: _align,
extra: PlaceExtra::None,
} => self.memory.write_repeat(ptr.unwrap_or_err()?, 0, dest_layout.size)?,
Place::Ptr { .. } => {
bug!("init intrinsic tried to write to fat or unaligned ptr target")
}
}
}
"min_align_of" => {
let elem_ty = substs.type_at(0);
let elem_align = self.layout_of(elem_ty)?.align.abi();
let ptr_size = self.memory.pointer_size();
let align_val = Scalar::from_uint(elem_align as u128, ptr_size);
self.write_scalar(dest, align_val, dest_layout.ty)?;
}
"pref_align_of" => {
let ty = substs.type_at(0);
let layout = self.layout_of(ty)?;
let align = layout.align.pref();
let ptr_size = self.memory.pointer_size();
let align_val = Scalar::from_uint(align as u128, ptr_size);
self.write_scalar(dest, align_val, dest_layout.ty)?;
self.write_scalar(align_val, dest)?;
}
"move_val_init" => {
let ty = substs.type_at(0);
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let align = self.layout_of(args[0].ty)?.align;
self.write_value_to_ptr(args[1].value, ptr, align, ty)?;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
self.copy_op(args[1], ptr.into())?;
}
"needs_drop" => {
@ -384,150 +319,137 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
let env = ty::ParamEnv::reveal_all();
let needs_drop = ty.needs_drop(self.tcx.tcx, env);
self.write_scalar(
dest,
Scalar::from_bool(needs_drop),
dest_layout.ty,
dest,
)?;
}
"offset" => {
let offset = self.value_to_isize(args[1])?;
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_ptr(dest, result_ptr, dest_layout.ty)?;
let offset = self.read_scalar(args[1])?.to_isize(&self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
self.write_scalar(result_ptr, dest)?;
}
"overflowing_sub" => {
self.intrinsic_overflowing(
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
self.binop_ignore_overflow(
mir::BinOp::Sub,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?;
}
"overflowing_mul" => {
self.intrinsic_overflowing(
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
self.binop_ignore_overflow(
mir::BinOp::Mul,
args[0],
args[1],
r,
l,
dest,
dest_layout.ty,
)?;
}
"overflowing_add" => {
self.intrinsic_overflowing(
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
self.binop_ignore_overflow(
mir::BinOp::Add,
args[0],
args[1],
r,
l,
dest,
dest_layout.ty,
)?;
}
"powf32" => {
let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
let f = f32::from_bits(f as u32);
let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
let f2 = f32::from_bits(f2 as u32);
self.write_scalar(
dest,
Scalar::from_f32(f.powf(f2)),
dest_layout.ty,
dest,
)?;
}
"powf64" => {
let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
let f = f64::from_bits(f as u64);
let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
let f2 = f64::from_bits(f2 as u64);
self.write_scalar(
dest,
Scalar::from_f64(f.powf(f2)),
dest_layout.ty,
dest,
)?;
}
"fmaf32" => {
let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
let a = f32::from_bits(a as u32);
let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
let b = f32::from_bits(b as u32);
let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(32))?;
let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(32))?;
let c = f32::from_bits(c as u32);
self.write_scalar(
dest,
Scalar::from_f32(a * b + c),
dest_layout.ty,
dest,
)?;
}
"fmaf64" => {
let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
let a = f64::from_bits(a as u64);
let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
let b = f64::from_bits(b as u64);
let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(64))?;
let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(64))?;
let c = f64::from_bits(c as u64);
self.write_scalar(
dest,
Scalar::from_f64(a * b + c),
dest_layout.ty,
dest,
)?;
}
"powif32" => {
let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
let f = f32::from_bits(f as u32);
let i = self.value_to_i32(args[1])?;
let i = self.read_scalar(args[1])?.to_i32()?;
self.write_scalar(
dest,
Scalar::from_f32(f.powi(i)),
dest_layout.ty,
dest,
)?;
}
"powif64" => {
let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
let f = f64::from_bits(f as u64);
let i = self.value_to_i32(args[1])?;
let i = self.read_scalar(args[1])?.to_i32()?;
self.write_scalar(
dest,
Scalar::from_f64(f.powi(i)),
dest_layout.ty,
dest,
)?;
}
"size_of" => {
let ty = substs.type_at(0);
let size = self.layout_of(ty)?.size.bytes();
let ptr_size = self.memory.pointer_size();
self.write_scalar(dest, Scalar::from_uint(size, ptr_size), dest_layout.ty)?;
}
"size_of_val" => {
let ty = substs.type_at(0);
let (size, _) = self.size_and_align_of_dst(ty, args[0].value)?;
let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
let (size, _) = self.size_and_align_of_mplace(mplace)?;
let ptr_size = self.memory.pointer_size();
self.write_scalar(
dest,
Scalar::from_uint(size.bytes() as u128, ptr_size),
dest_layout.ty,
dest,
)?;
}
"min_align_of_val" |
"align_of_val" => {
let ty = substs.type_at(0);
let (_, align) = self.size_and_align_of_dst(ty, args[0].value)?;
let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
let (_, align) = self.size_and_align_of_mplace(mplace)?;
let ptr_size = self.memory.pointer_size();
self.write_scalar(
dest,
Scalar::from_uint(align.abi(), ptr_size),
dest_layout.ty,
dest,
)?;
}
@ -535,110 +457,102 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
let ty = substs.type_at(0);
let ty_name = ty.to_string();
let value = self.str_to_value(&ty_name)?;
self.write_value(ValTy { value, ty: dest_layout.ty }, dest)?;
}
"type_id" => {
let ty = substs.type_at(0);
let n = self.tcx.type_id_hash(ty);
self.write_scalar(dest, Scalar::Bits { bits: n as u128, size: 8 }, dest_layout.ty)?;
self.write_value(value, dest)?;
}
"transmute" => {
let src_ty = substs.type_at(0);
let _src_align = self.layout_of(src_ty)?.align;
let ptr = self.force_allocation(dest)?.to_ptr()?;
let dest_align = self.layout_of(substs.type_at(1))?.align;
self.write_value_to_ptr(args[0].value, ptr.into(), dest_align, src_ty).unwrap();
// Go through an allocation, to make sure the completely different layouts
// do not pose a problem. (When the user transmutes through a union,
// there will not be a layout mismatch.)
let dest = self.force_allocation(dest)?;
self.copy_op(args[0], dest.into())?;
}
"unchecked_shl" => {
let bits = dest_layout.size.bytes() as u128 * 8;
let rhs = self.value_to_scalar(args[1])?
.to_bytes()?;
if rhs >= bits {
let bits = dest.layout.size.bytes() as u128 * 8;
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
if rval >= bits {
return err!(Intrinsic(
format!("Overflowing shift by {} in unchecked_shl", rhs),
format!("Overflowing shift by {} in unchecked_shl", rval),
));
}
self.intrinsic_overflowing(
self.binop_ignore_overflow(
mir::BinOp::Shl,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?;
}
"unchecked_shr" => {
let bits = dest_layout.size.bytes() as u128 * 8;
let rhs = self.value_to_scalar(args[1])?
.to_bytes()?;
if rhs >= bits {
let bits = dest.layout.size.bytes() as u128 * 8;
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
if rval >= bits {
return err!(Intrinsic(
format!("Overflowing shift by {} in unchecked_shr", rhs),
format!("Overflowing shift by {} in unchecked_shr", rval),
));
}
self.intrinsic_overflowing(
self.binop_ignore_overflow(
mir::BinOp::Shr,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?;
}
"unchecked_div" => {
let rhs = self.value_to_scalar(args[1])?
.to_bytes()?;
if rhs == 0 {
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
if rval == 0 {
return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
}
self.intrinsic_overflowing(
self.binop_ignore_overflow(
mir::BinOp::Div,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?;
}
"unchecked_rem" => {
let rhs = self.value_to_scalar(args[1])?
.to_bytes()?;
if rhs == 0 {
let l = self.read_value(args[0])?;
let r = self.read_value(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
if rval == 0 {
return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
}
self.intrinsic_overflowing(
self.binop_ignore_overflow(
mir::BinOp::Rem,
args[0],
args[1],
l,
r,
dest,
dest_layout.ty,
)?;
}
"uninit" => {
// we don't want to force an allocation in case the destination is a simple value
match dest {
Place::Local { frame, local } => {
match self.stack()[frame].locals[local].access()? {
Value::ByRef(ptr, _) => {
// These writes have no alignment restriction anyway.
self.memory.mark_definedness(ptr, dest_layout.size, false)?;
}
Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?,
Value::ScalarPair(..) => {
self.write_value(ValTy { value: Value::ScalarPair(ScalarMaybeUndef::Undef, ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?;
}
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
// but we also do not want to create a new allocation with 0s and then copy that over.
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(..) => {
let x = ScalarMaybeUndef::Undef;
self.write_value(Value::Scalar(x), dest)?;
}
layout::Abi::ScalarPair(..) => {
let x = ScalarMaybeUndef::Undef;
self.write_value(Value::ScalarPair(x, x), dest)?;
}
_ => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
assert!(mplace.extra.is_none());
self.memory.mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?;
}
},
Place::Ptr {
ptr,
align: _align,
extra: PlaceExtra::None,
} => self.memory.mark_definedness(ptr.unwrap_or_err()?, dest_layout.size, false)?,
Place::Ptr { .. } => {
bug!("uninit intrinsic tried to write to fat or unaligned ptr target")
}
}
}
@ -646,9 +560,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
"write_bytes" => {
let ty = substs.type_at(0);
let ty_layout = self.layout_of(ty)?;
let val_byte = self.value_to_u8(args[1])?;
let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
let count = self.value_to_usize(args[2])?;
let val_byte = self.read_scalar(args[1])?.to_u8()?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let count = self.read_scalar(args[2])?.to_usize(&self)?;
if count > 0 {
// HashMap relies on write_bytes on a NULL ptr with count == 0 to work
// TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
@ -660,47 +574,6 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
}
self.goto_block(target);
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.
Ok(())
}
}
fn numeric_intrinsic<'tcx>(
name: &str,
bytes: u128,
kind: Primitive,
) -> EvalResult<'tcx, Scalar> {
macro_rules! integer_intrinsic {
($method:ident) => ({
let (result_bytes, size) = match kind {
Primitive::Int(I8, true) => ((bytes as i8).$method() as u128, 1),
Primitive::Int(I8, false) => ((bytes as u8).$method() as u128, 1),
Primitive::Int(I16, true) => ((bytes as i16).$method() as u128, 2),
Primitive::Int(I16, false) => ((bytes as u16).$method() as u128, 2),
Primitive::Int(I32, true) => ((bytes as i32).$method() as u128, 4),
Primitive::Int(I32, false) => ((bytes as u32).$method() as u128, 4),
Primitive::Int(I64, true) => ((bytes as i64).$method() as u128, 8),
Primitive::Int(I64, false) => ((bytes as u64).$method() as u128, 8),
Primitive::Int(I128, true) => ((bytes as i128).$method() as u128, 16),
Primitive::Int(I128, false) => (bytes.$method() as u128, 16),
_ => bug!("invalid `{}` argument: {:?}", name, bytes),
};
Scalar::from_uint(result_bytes, Size::from_bytes(size))
});
}
let result_val = match name {
"bswap" => integer_intrinsic!(swap_bytes),
"ctlz" => integer_intrinsic!(leading_zeros),
"ctpop" => integer_intrinsic!(count_ones),
"cttz" => integer_intrinsic!(trailing_zeros),
_ => bug!("not a numeric intrinsic: {}", name),
};
Ok(result_val)
}

View file

@ -1,7 +1,4 @@
#![feature(
rustc_private,
catch_expr,
)]
#![feature(rustc_private)]
#![cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
@ -16,17 +13,17 @@ extern crate rustc_mir;
extern crate rustc_target;
extern crate syntax;
use rustc::ty::{self, TyCtxt};
use rustc::ty::{self, TyCtxt, query::TyCtxtAt};
use rustc::ty::layout::{TyLayout, LayoutOf, Size};
use rustc::ty::subst::Subst;
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc_data_structures::fx::FxHasher;
use syntax::ast::Mutability;
use syntax::codemap::Span;
use syntax::attr;
use std::marker::PhantomData;
use std::collections::{HashMap, BTreeMap};
use std::hash::{Hash, Hasher};
@ -41,89 +38,23 @@ mod memory;
mod tls;
mod locks;
mod range_map;
mod validation;
use fn_call::EvalContextExt as MissingFnsEvalContextExt;
use operator::EvalContextExt as OperatorEvalContextExt;
use intrinsic::EvalContextExt as IntrinsicEvalContextExt;
use tls::EvalContextExt as TlsEvalContextExt;
use memory::MemoryKind as MiriMemoryKind;
use locks::LockInfo;
use locks::MemoryExt as LockMemoryExt;
use validation::EvalContextExt as ValidationEvalContextExt;
use range_map::RangeMap;
use validation::{ValidationQuery, AbsPlace};
pub trait ScalarExt {
fn null(size: Size) -> Self;
fn from_i32(i: i32) -> Self;
fn from_uint(i: impl Into<u128>, ptr_size: Size) -> Self;
fn from_int(i: impl Into<i128>, ptr_size: Size) -> Self;
fn from_f32(f: f32) -> Self;
fn from_f64(f: f64) -> Self;
fn to_usize<'a, 'mir, 'tcx>(self, ecx: &rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>) -> EvalResult<'static, u64>;
fn is_null(self) -> bool;
/// HACK: this function just extracts all bits if `defined != 0`
/// Mainly used for args of C-functions and we should totally correctly fetch the size
/// of their arguments
fn to_bytes(self) -> EvalResult<'static, u128>;
}
impl ScalarExt for Scalar {
fn null(size: Size) -> Self {
Scalar::Bits { bits: 0, size: size.bytes() as u8 }
}
fn from_i32(i: i32) -> Self {
Scalar::Bits { bits: i as u32 as u128, size: 4 }
}
fn from_uint(i: impl Into<u128>, ptr_size: Size) -> Self {
Scalar::Bits { bits: i.into(), size: ptr_size.bytes() as u8 }
}
fn from_int(i: impl Into<i128>, ptr_size: Size) -> Self {
Scalar::Bits { bits: i.into() as u128, size: ptr_size.bytes() as u8 }
}
fn from_f32(f: f32) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 4 }
}
fn from_f64(f: f64) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 8 }
}
fn to_usize<'a, 'mir, 'tcx>(self, ecx: &rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>) -> EvalResult<'static, u64> {
let b = self.to_bits(ecx.memory.pointer_size())?;
assert_eq!(b as u64 as u128, b);
Ok(b as u64)
}
fn is_null(self) -> bool {
match self {
Scalar::Bits { bits, .. } => bits == 0,
Scalar::Ptr(_) => false
}
}
fn to_bytes(self) -> EvalResult<'static, u128> {
match self {
Scalar::Bits { bits, size } => {
assert_ne!(size, 0);
Ok(bits)
},
Scalar::Ptr(_) => err!(ReadPointerAsBytes),
}
}
}
use helpers::{ScalarExt, FalibleScalarExt};
pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
main_id: DefId,
start_wrapper: Option<DefId>,
) -> EvalResult<'tcx, (EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>, Option<Pointer>)> {
) -> EvalResult<'tcx, EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>> {
let mut ecx = EvalContext::new(
tcx.at(syntax::codemap::DUMMY_SP),
tcx.at(syntax::source_map::DUMMY_SP),
ty::ParamEnv::reveal_all(),
Default::default(),
MemoryData::new()
@ -131,7 +62,6 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
let main_instance = ty::Instance::mono(ecx.tcx.tcx, main_id);
let main_mir = ecx.load_mir(main_instance.def)?;
let mut cleanup_ptr = None; // Scalar to be deallocated when we are done
if !main_mir.return_ty().is_nil() || main_mir.arg_count != 0 {
return err!(Unimplemented(
@ -160,11 +90,10 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
)));
}
// Return value
// Return value (in static memory so that it does not count as leak)
let size = ecx.tcx.data_layout.pointer_size;
let align = ecx.tcx.data_layout.pointer_align;
let ret_ptr = ecx.memory_mut().allocate(size, align, MemoryKind::Stack)?;
cleanup_ptr = Some(ret_ptr);
let ret_ptr = ecx.memory_mut().allocate(size, align, MiriMemoryKind::MutStatic.into())?;
// Push our stack frame
ecx.push_stack_frame(
@ -172,7 +101,7 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
start_mir.span,
start_mir,
Place::from_ptr(ret_ptr, align),
StackPopCleanup::None,
StackPopCleanup::None { cleanup: true },
)?;
let mut args = ecx.frame().mir.args_iter();
@ -180,31 +109,22 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
// First argument: pointer to main()
let main_ptr = ecx.memory_mut().create_fn_alloc(main_instance);
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let main_ty = main_instance.ty(ecx.tcx.tcx);
let main_ptr_ty = ecx.tcx.mk_fn_ptr(main_ty.fn_sig(ecx.tcx.tcx));
ecx.write_value(
ValTy {
value: Value::Scalar(Scalar::Ptr(main_ptr).into()),
ty: main_ptr_ty,
},
dest,
)?;
ecx.write_scalar(Scalar::Ptr(main_ptr), dest)?;
// Second argument (argc): 1
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let ty = ecx.tcx.types.isize;
ecx.write_scalar(dest, Scalar::from_int(1, ptr_size), ty)?;
ecx.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
// FIXME: extract main source file path
// Third argument (argv): &[b"foo"]
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let ty = ecx.tcx.mk_imm_ptr(ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8));
let foo = ecx.memory.allocate_bytes(b"foo\0");
let ptr_align = ecx.tcx.data_layout.pointer_align;
let foo_ptr = ecx.memory.allocate(ptr_size, ptr_align, MemoryKind::Stack)?;
ecx.memory.write_scalar(foo_ptr.into(), ptr_align, Scalar::Ptr(foo).into(), ptr_size, ptr_align, false)?;
ecx.memory.mark_static_initialized(foo_ptr.alloc_id, Mutability::Immutable)?;
ecx.write_ptr(dest, foo_ptr.into(), ty)?;
let foo = ecx.memory.allocate_static_bytes(b"foo\0");
let foo_ty = ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8);
let foo_layout = ecx.layout_of(foo_ty)?;
let foo_place = ecx.allocate(foo_layout, MemoryKind::Stack)?; // will be interned in just a second
ecx.write_scalar(Scalar::Ptr(foo), foo_place.into())?;
ecx.memory.intern_static(foo_place.to_ptr()?.alloc_id, Mutability::Immutable)?;
ecx.write_scalar(foo_place.ptr, dest)?;
assert!(args.next().is_none(), "start lang item has more arguments than expected");
} else {
@ -213,7 +133,7 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
main_mir.span,
main_mir,
Place::from_scalar_ptr(Scalar::from_int(1, ptr_size).into(), ty::layout::Align::from_bytes(1, 1).unwrap()),
StackPopCleanup::None,
StackPopCleanup::None { cleanup: true },
)?;
// No arguments
@ -221,7 +141,7 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
assert!(args.next().is_none(), "main function must not have arguments");
}
Ok((ecx, cleanup_ptr))
Ok(ecx)
}
pub fn eval_main<'a, 'tcx: 'a>(
@ -229,26 +149,22 @@ pub fn eval_main<'a, 'tcx: 'a>(
main_id: DefId,
start_wrapper: Option<DefId>,
) {
let (mut ecx, cleanup_ptr) = create_ecx(tcx, main_id, start_wrapper).expect("Couldn't create ecx");
let mut ecx = create_ecx(tcx, main_id, start_wrapper).expect("Couldn't create ecx");
let res: EvalResult = do catch {
while ecx.step()? {}
ecx.run_tls_dtors()?;
if let Some(cleanup_ptr) = cleanup_ptr {
ecx.memory_mut().deallocate(
cleanup_ptr,
None,
MemoryKind::Stack,
)?;
}
};
let res: EvalResult = (|| {
ecx.run()?;
ecx.run_tls_dtors()
})();
match res {
Ok(()) => {
let leaks = ecx.memory().leak_report();
if leaks != 0 {
// TODO: Prevent leaks which aren't supposed to be there
//tcx.sess.err("the evaluated program leaked memory");
// Disable the leak test on some platforms where we likely do not
// correctly implement TLS destructors.
let target_os = ecx.tcx.tcx.sess.target.target.target_os.to_lowercase();
let ignore_leaks = target_os == "windows" || target_os == "macos";
if !ignore_leaks && leaks != 0 {
tcx.sess.err("the evaluated program leaked memory");
}
}
Err(e) => {
@ -273,6 +189,7 @@ pub fn eval_main<'a, 'tcx: 'a>(
ecx.tcx.sess.err(&e.to_string());
}
/* Nice try, but with MIRI_BACKTRACE this shows 100s of backtraces.
for (i, frame) in ecx.stack().iter().enumerate() {
trace!("-------------------");
trace!("Frame {}", i);
@ -282,7 +199,7 @@ pub fn eval_main<'a, 'tcx: 'a>(
trace!(" local {}: {:?}", i, local);
}
}
}
}*/
}
}
}
@ -293,15 +210,15 @@ pub struct Evaluator<'tcx> {
/// Miri does not expose env vars from the host to the emulated program
pub(crate) env_vars: HashMap<Vec<u8>, Pointer>,
/// Places that were suspended by the validation subsystem, and will be recovered later
pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
/// Use the lifetime
_dummy : PhantomData<&'tcx ()>,
}
impl<'tcx> Hash for Evaluator<'tcx> {
fn hash<H: Hasher>(&self, state: &mut H) {
let Evaluator {
env_vars,
suspended: _,
_dummy: _,
} = self;
env_vars.iter()
@ -337,8 +254,6 @@ pub struct MemoryData<'tcx> {
/// Only mutable (static mut, heap, stack) allocations have an entry in this map.
/// The entry is created when allocating the memory and deleted after deallocation.
locks: HashMap<AllocId, RangeMap<LockInfo<'tcx>>>,
statics: HashMap<GlobalId<'tcx>, AllocId>,
}
impl<'tcx> MemoryData<'tcx> {
@ -347,7 +262,6 @@ impl<'tcx> MemoryData<'tcx> {
next_thread_local: 1, // start with 1 as we must not use 0 on Windows
thread_local: BTreeMap::new(),
locks: HashMap::new(),
statics: HashMap::new(),
}
}
}
@ -358,134 +272,54 @@ impl<'tcx> Hash for MemoryData<'tcx> {
next_thread_local: _,
thread_local,
locks: _,
statics: _,
} = self;
thread_local.hash(state);
}
}
impl<'mir, 'tcx: 'mir> Machine<'mir, 'tcx> for Evaluator<'tcx> {
impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
type MemoryData = MemoryData<'tcx>;
type MemoryKinds = memory::MemoryKind;
const MUT_STATIC_KIND: Option<memory::MemoryKind> = Some(memory::MemoryKind::MutStatic);
/// Returns Ok() when the function was handled, fail otherwise
fn eval_fn_call<'a>(
fn find_fn<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Place, mir::BasicBlock)>,
args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
ecx.eval_fn_call(instance, destination, args, span, sig)
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
ecx.find_fn(instance, args, dest, ret)
}
fn call_intrinsic<'a>(
ecx: &mut rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[ValTy<'tcx>],
dest: Place,
dest_layout: TyLayout<'tcx>,
target: mir::BasicBlock,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
ecx.call_intrinsic(instance, args, dest, dest_layout, target)
ecx.call_intrinsic(instance, args, dest)
}
fn try_ptr_op<'a>(
ecx: &rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: Scalar,
left_ty: ty::Ty<'tcx>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right_ty: ty::Ty<'tcx>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Scalar, bool)>> {
ecx.ptr_op(bin_op, left, left_ty, right, right_ty)
}
fn mark_static_initialized<'a>(
mem: &mut Memory<'a, 'mir, 'tcx, Self>,
id: AllocId,
_mutability: Mutability,
) -> EvalResult<'tcx, bool> {
use memory::MemoryKind::*;
match mem.get_alloc_kind(id) {
// FIXME: This could be allowed, but not for env vars set during miri execution
Some(MemoryKind::Machine(Env)) => err!(Unimplemented("statics can't refer to env vars".to_owned())),
_ => Ok(false), // TODO: What does the bool mean?
}
}
fn init_static<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
cid: GlobalId<'tcx>,
) -> EvalResult<'tcx, AllocId> {
// Step 1: If the static has already been evaluated return the cached version
if let Some(alloc_id) = ecx.memory.data.statics.get(&cid) {
return Ok(*alloc_id);
}
let tcx = ecx.tcx.tcx;
// Step 2: Load mir
let mut mir = ecx.load_mir(cid.instance.def)?;
if let Some(index) = cid.promoted {
mir = &mir.promoted[index];
}
assert!(mir.arg_count == 0);
// Step 3: Allocate storage
let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
let ptr = ecx.memory.allocate(
layout.size,
layout.align,
MemoryKind::Stack,
)?;
// Step 4: Cache allocation id for recursive statics
assert!(ecx.memory.data.statics.insert(cid, ptr.alloc_id).is_none());
// Step 5: Push stackframe to evaluate static
let cleanup = StackPopCleanup::None;
ecx.push_stack_frame(
cid.instance,
mir.span,
mir,
Place::from_ptr(ptr, layout.align),
cleanup,
)?;
// Step 6: Step until static has been initialized
let call_stackframe = ecx.stack().len();
while ecx.step()? && ecx.stack().len() >= call_stackframe {
if ecx.stack().len() == call_stackframe {
let frame = ecx.frame_mut();
let bb = &frame.mir.basic_blocks()[frame.block];
if bb.statements.len() == frame.stmt && !bb.is_cleanup {
if let ::rustc::mir::TerminatorKind::Return = bb.terminator().kind {
for (local, _local_decl) in mir.local_decls.iter_enumerated().skip(1) {
// Don't deallocate locals, because the return value might reference them
frame.storage_dead(local);
}
}
}
}
}
// TODO: Freeze immutable statics without copying them to the global static cache
// Step 7: Return the alloc
Ok(ptr.alloc_id)
ecx.ptr_op(bin_op, left, left_layout, right, right_layout)
}
fn box_alloc<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
ty: ty::Ty<'tcx>,
dest: Place,
dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
let layout = ecx.layout_of(ty)?;
trace!("box_alloc for {:?}", dest.layout.ty);
// Call the `exchange_malloc` lang item
let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap();
let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc);
@ -494,100 +328,54 @@ impl<'mir, 'tcx: 'mir> Machine<'mir, 'tcx> for Evaluator<'tcx> {
malloc,
malloc_mir.span,
malloc_mir,
dest,
*dest,
// Don't do anything when we are done. The statement() function will increment
// the old stack frame's stmt counter to the next statement, which means that when
// exchange_malloc returns, we go on evaluating exactly where we want to be.
StackPopCleanup::None,
StackPopCleanup::None { cleanup: true },
)?;
let mut args = ecx.frame().mir.args_iter();
let usize = ecx.tcx.types.usize;
let ptr_size = ecx.memory.pointer_size();
let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?;
// First argument: size
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
ecx.write_value(
ValTy {
value: Value::Scalar(Scalar::from_uint(match layout.size.bytes() {
0 => 1,
size => size,
}, ptr_size).into()),
ty: usize,
},
dest,
)?;
// (0 is allowed here, this is expected to be handled by the lang item)
let arg = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let size = layout.size.bytes();
ecx.write_scalar(Scalar::from_uint(size, arg.layout.size), arg)?;
// Second argument: align
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
ecx.write_value(
ValTy {
value: Value::Scalar(Scalar::from_uint(layout.align.abi(), ptr_size).into()),
ty: usize,
},
dest,
)?;
let arg = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let align = layout.align.abi();
ecx.write_scalar(Scalar::from_uint(align, arg.layout.size), arg)?;
// No more arguments
assert!(args.next().is_none(), "exchange_malloc lang item has more arguments than expected");
Ok(())
}
fn global_item_with_linkage<'a>(
_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
_mutability: Mutability,
) -> EvalResult<'tcx> {
panic!("remove this function from rustc");
}
fn find_foreign_static<'a>(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> EvalResult<'tcx, &'tcx Allocation> {
let attrs = tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(name) => name.as_str(),
None => tcx.item_name(def_id).as_str(),
};
fn check_locks<'a>(
mem: &Memory<'a, 'mir, 'tcx, Self>,
ptr: Pointer,
size: Size,
access: AccessKind,
) -> EvalResult<'tcx> {
mem.check_locks(ptr, size.bytes(), access)
}
fn add_lock<'a>(
mem: &mut Memory<'a, 'mir, 'tcx, Self>,
id: AllocId,
) {
mem.data.locks.insert(id, RangeMap::new());
}
fn free_lock<'a>(
mem: &mut Memory<'a, 'mir, 'tcx, Self>,
id: AllocId,
len: u64,
) -> EvalResult<'tcx> {
mem.data.locks
.remove(&id)
.expect("allocation has no corresponding locks")
.check(
Some(mem.cur_frame),
0,
len,
AccessKind::Read,
)
.map_err(|lock| {
EvalErrorKind::DeallocatedLockedMemory {
//ptr, FIXME
ptr: Pointer {
alloc_id: AllocId(0),
offset: Size::from_bytes(0),
},
lock: lock.active,
}.into()
})
}
fn end_region<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
reg: Option<::rustc::middle::region::Scope>,
) -> EvalResult<'tcx> {
ecx.end_region(reg)
let alloc = match &link_name[..] {
"__cxa_thread_atexit_impl" => {
// This should be all-zero, pointer-sized
let data = vec![0; tcx.data_layout.pointer_size.bytes() as usize];
let alloc = Allocation::from_bytes(&data[..], tcx.data_layout.pointer_align);
tcx.intern_const_alloc(alloc)
}
_ => return err!(Unimplemented(
format!("can't access foreign static: {}", link_name),
)),
};
Ok(alloc)
}
fn validation_op<'a>(

View file

@ -1,3 +1,5 @@
#![allow(unused)]
use super::*;
use rustc::middle::region;
use rustc::ty::layout::Size;
@ -6,6 +8,9 @@ use rustc::ty::layout::Size;
// Locks
////////////////////////////////////////////////////////////////////////////////
// Just some dummy to keep this compiling; I think some of this will be useful later
type AbsPlace<'tcx> = ::rustc::ty::Ty<'tcx>;
/// Information about a lock that is currently held.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LockInfo<'tcx> {
@ -67,321 +72,6 @@ impl<'tcx> LockInfo<'tcx> {
}
}
pub trait MemoryExt<'tcx> {
fn check_locks(
&self,
ptr: Pointer,
len: u64,
access: AccessKind,
) -> EvalResult<'tcx>;
fn acquire_lock(
&mut self,
ptr: Pointer,
len: u64,
region: Option<region::Scope>,
kind: AccessKind,
) -> EvalResult<'tcx>;
fn suspend_write_lock(
&mut self,
ptr: Pointer,
len: u64,
lock_path: &AbsPlace<'tcx>,
suspend: Option<region::Scope>,
) -> EvalResult<'tcx>;
fn recover_write_lock(
&mut self,
ptr: Pointer,
len: u64,
lock_path: &AbsPlace<'tcx>,
lock_region: Option<region::Scope>,
suspended_region: region::Scope,
) -> EvalResult<'tcx>;
fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>);
}
impl<'a, 'mir, 'tcx: 'mir + 'a> MemoryExt<'tcx> for Memory<'a, 'mir, 'tcx, Evaluator<'tcx>> {
fn check_locks(
&self,
ptr: Pointer,
len: u64,
access: AccessKind,
) -> EvalResult<'tcx> {
if len == 0 {
return Ok(());
}
let locks = match self.data.locks.get(&ptr.alloc_id) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
let frame = self.cur_frame;
locks
.check(Some(frame), ptr.offset.bytes(), len, access)
.map_err(|lock| {
EvalErrorKind::MemoryLockViolation {
ptr,
len,
frame,
access,
lock: lock.active,
}.into()
})
}
/// Acquire the lock for the given lifetime
fn acquire_lock(
&mut self,
ptr: Pointer,
len: u64,
region: Option<region::Scope>,
kind: AccessKind,
) -> EvalResult<'tcx> {
let frame = self.cur_frame;
assert!(len > 0);
trace!(
"Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}",
frame,
kind,
ptr,
len,
region
);
self.check_bounds(ptr.offset(Size::from_bytes(len), &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
// Iterate over our range and acquire the lock. If the range is already split into pieces,
// we have to manipulate all of them.
let lifetime = DynamicLifetime { frame, region };
for lock in locks.iter_mut(ptr.offset.bytes(), len) {
if !lock.access_permitted(None, kind) {
return err!(MemoryAcquireConflict {
ptr,
len,
kind,
lock: lock.active.clone(),
});
}
// See what we have to do
match (&mut lock.active, kind) {
(active @ &mut NoLock, AccessKind::Write) => {
*active = WriteLock(lifetime);
}
(active @ &mut NoLock, AccessKind::Read) => {
*active = ReadLock(vec![lifetime]);
}
(&mut ReadLock(ref mut lifetimes), AccessKind::Read) => {
lifetimes.push(lifetime);
}
_ => bug!("We already checked that there is no conflicting lock"),
}
}
Ok(())
}
/// Release or suspend a write lock of the given lifetime prematurely.
/// When releasing, if there is a read lock or someone else's write lock, that's an error.
/// If no lock is held, that's fine. This can happen when e.g. a local is initialized
/// from a constant, and then suspended.
/// When suspending, the same cases are fine; we just register an additional suspension.
fn suspend_write_lock(
&mut self,
ptr: Pointer,
len: u64,
lock_path: &AbsPlace<'tcx>,
suspend: Option<region::Scope>,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
'locks: for lock in locks.iter_mut(ptr.offset.bytes(), len) {
let is_our_lock = match lock.active {
WriteLock(lft) =>
// Double-check that we are holding the lock.
// (Due to subtyping, checking the region would not make any sense.)
lft.frame == cur_frame,
ReadLock(_) | NoLock => false,
};
if is_our_lock {
trace!("Releasing {:?}", lock.active);
// Disable the lock
lock.active = NoLock;
} else {
trace!(
"Not touching {:?} as it is not our lock",
lock.active,
);
}
// Check if we want to register a suspension
if let Some(suspend_region) = suspend {
let lock_id = WriteLockId {
frame: cur_frame,
path: lock_path.clone(),
};
trace!("Adding suspension to {:?}", lock_id);
let mut new_suspension = false;
lock.suspended
.entry(lock_id)
// Remember whether we added a new suspension or not
.or_insert_with(|| { new_suspension = true; Vec::new() })
.push(suspend_region);
// If the suspension is new, we should have owned this.
// If there already was a suspension, we should NOT have owned this.
if new_suspension == is_our_lock {
// All is well
continue 'locks;
}
} else if !is_our_lock {
// All is well.
continue 'locks;
}
// If we get here, releasing this is an error except for NoLock.
if lock.active != NoLock {
return err!(InvalidMemoryLockRelease {
ptr,
len,
frame: cur_frame,
lock: lock.active.clone(),
});
}
}
Ok(())
}
/// Release a suspension from the write lock. If this is the last suspension or if there is no suspension, acquire the lock.
fn recover_write_lock(
&mut self,
ptr: Pointer,
len: u64,
lock_path: &AbsPlace<'tcx>,
lock_region: Option<region::Scope>,
suspended_region: region::Scope,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let lock_id = WriteLockId {
frame: cur_frame,
path: lock_path.clone(),
};
let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
for lock in locks.iter_mut(ptr.offset.bytes(), len) {
// Check if we have a suspension here
let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
None => {
trace!("No suspension around, we can just acquire");
(true, false)
}
Some(suspensions) => {
trace!("Found suspension of {:?}, removing it", lock_id);
// That's us! Remove suspension (it should be in there). The same suspension can
// occur multiple times (when there are multiple shared borrows of this that have the same
// lifetime); only remove one of them.
let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
None => // TODO: Can the user trigger this?
bug!("We have this lock suspended, but not for the given region."),
Some((idx, _)) => idx
};
suspensions.remove(idx);
let got_lock = suspensions.is_empty();
if got_lock {
trace!("All suspensions are gone, we can have the lock again");
}
(got_lock, got_lock)
}
};
if remove_suspension {
// with NLL, we could do that up in the match above...
assert!(got_the_lock);
lock.suspended.remove(&lock_id);
}
if got_the_lock {
match lock.active {
ref mut active @ NoLock => {
*active = WriteLock(
DynamicLifetime {
frame: cur_frame,
region: lock_region,
}
);
}
_ => {
return err!(MemoryAcquireConflict {
ptr,
len,
kind: AccessKind::Write,
lock: lock.active.clone(),
})
}
}
}
}
Ok(())
}
fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>) {
let cur_frame = self.cur_frame;
trace!(
"Releasing frame {} locks that expire at {:?}",
cur_frame,
ending_region
);
let has_ended = |lifetime: &DynamicLifetime| -> bool {
if lifetime.frame != cur_frame {
return false;
}
match ending_region {
None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
// when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
// end of a function. Same for a function still having recoveries.
Some(ending_region) => lifetime.region == Some(ending_region),
}
};
for alloc_locks in self.data.locks.values_mut() {
for lock in alloc_locks.iter_mut_all() {
// Delete everything that ends now -- i.e., keep only all the other lifetimes.
let lock_ended = match lock.active {
WriteLock(ref lft) => has_ended(lft),
ReadLock(ref mut lfts) => {
lfts.retain(|lft| !has_ended(lft));
lfts.is_empty()
}
NoLock => false,
};
if lock_ended {
lock.active = NoLock;
}
// Also clean up suspended write locks when the function returns
if ending_region.is_none() {
lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
}
}
// Clean up the map
alloc_locks.retain(|lock| match lock.active {
NoLock => !lock.suspended.is_empty(),
_ => true,
});
}
}
}
impl<'tcx> RangeMap<LockInfo<'tcx>> {
pub fn check(
&self,

View file

@ -1,12 +1,13 @@
#[derive(Debug, PartialEq, Copy, Clone)]
#[derive(Debug, PartialEq, Copy, Clone, Hash, Eq)]
pub enum MemoryKind {
/// Error if deallocated any other way than `rust_deallocate`
/// `__rust_alloc` memory
Rust,
/// Error if deallocated any other way than `free`
/// `malloc` memory
C,
/// Part of env var emulation
Env,
// mutable statics
MutStatic,
}
impl Into<::rustc_mir::interpret::MemoryKind<MemoryKind>> for MemoryKind {

View file

@ -1,19 +1,17 @@
use rustc::ty;
use rustc::ty::layout::Primitive;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{TyLayout, Primitive};
use rustc::mir;
use super::*;
use helpers::EvalContextExt as HelperEvalContextExt;
pub trait EvalContextExt<'tcx> {
fn ptr_op(
&self,
bin_op: mir::BinOp,
left: Scalar,
left_ty: ty::Ty<'tcx>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right_ty: ty::Ty<'tcx>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Scalar, bool)>>;
fn ptr_int_arithmetic(
@ -23,6 +21,13 @@ pub trait EvalContextExt<'tcx> {
right: u128,
signed: bool,
) -> EvalResult<'tcx, (Scalar, bool)>;
fn pointer_offset_inbounds(
&self,
ptr: Scalar,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
@ -30,9 +35,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
&self,
bin_op: mir::BinOp,
left: Scalar,
left_ty: ty::Ty<'tcx>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right_ty: ty::Ty<'tcx>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Scalar, bool)>> {
trace!("ptr_op: {:?} {:?} {:?}", left, bin_op, right);
@ -45,7 +50,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
8 => I64,
16 => I128,
_ => unreachable!(),
}, false);
}, /*signed*/ false);
let isize = Primitive::Int(match self.memory.pointer_size().bytes() {
1 => I8,
2 => I16,
@ -53,24 +58,23 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
8 => I64,
16 => I128,
_ => unreachable!(),
}, true);
let left_layout = self.layout_of(left_ty)?;
}, /*signed*/ true);
let left_kind = match left_layout.abi {
ty::layout::Abi::Scalar(ref scalar) => scalar.value,
_ => Err(EvalErrorKind::TypeNotPrimitive(left_ty))?,
_ => Err(EvalErrorKind::TypeNotPrimitive(left_layout.ty))?,
};
let right_layout = self.layout_of(right_ty)?;
let right_kind = match right_layout.abi {
ty::layout::Abi::Scalar(ref scalar) => scalar.value,
_ => Err(EvalErrorKind::TypeNotPrimitive(right_ty))?,
_ => Err(EvalErrorKind::TypeNotPrimitive(right_layout.ty))?,
};
match bin_op {
Offset if left_kind == Primitive::Pointer && right_kind == usize => {
let pointee_ty = left_ty
Offset => {
assert!(left_kind == Primitive::Pointer && right_kind == usize);
let pointee_ty = left_layout.ty
.builtin_deref(true)
.expect("Offset called on non-ptr type")
.ty;
let ptr = self.pointer_offset(
let ptr = self.pointer_offset_inbounds(
left,
pointee_ty,
right.to_bits(self.memory.pointer_size())? as i64,
@ -114,12 +118,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
Gt => left.offset > right.offset,
Ge => left.offset >= right.offset,
Sub => {
let left_offset = Scalar::from_uint(left.offset.bytes(), self.memory.pointer_size());
let right_offset = Scalar::from_uint(right.offset.bytes(), self.memory.pointer_size());
let layout = self.layout_of(self.tcx.types.usize)?;
return self.binary_op(
Sub,
Scalar::Bits { bits: left.offset.bytes() as u128, size: self.memory.pointer_size().bytes() as u8 },
self.tcx.types.usize,
Scalar::Bits { bits: right.offset.bytes() as u128, size: self.memory.pointer_size().bytes() as u8 },
self.tcx.types.usize,
ValTy { value: Value::Scalar(left_offset.into()), layout },
ValTy { value: Value::Scalar(right_offset.into()), layout },
).map(Some)
}
_ => bug!("We already established it has to be one of these operators."),
@ -228,4 +233,40 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
}
})
}
/// This function raises an error if the offset moves the pointer outside of its allocation. We consider
/// ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0).
/// We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own
/// allocation.
fn pointer_offset_inbounds(
&self,
ptr: Scalar,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar> {
if ptr.is_null() {
// NULL pointers must only be offset by 0
return if offset == 0 {
Ok(ptr)
} else {
err!(InvalidNullPointerUsage)
};
}
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.checked_mul(pointee_size).ok_or_else(|| EvalErrorKind::Overflow(mir::BinOp::Mul))?;
// Now let's see what kind of pointer this is
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds.
// (Of the same allocation, but that part is trivial with our representation.)
self.memory.check_bounds(ptr, false)?;
let ptr = ptr.signed_offset(offset, self)?;
self.memory.check_bounds(ptr, false)?;
Ok(Scalar::Ptr(ptr))
} else {
// An integer pointer. They can move around freely, as long as they do not overflow
// (which ptr_signed_offset checks).
ptr.ptr_signed_offset(offset, self)
}
}
}

View file

@ -1,3 +1,5 @@
#![allow(unused)]
//! Implements a map from integer indices to data.
//! Rather than storing data for every index, internally, this maps entire ranges to the data.
//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as

View file

@ -119,22 +119,22 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, '
// TODO: Potentially, this has to support all the other possible instances?
// See eval_fn_call in interpret/terminator/mod.rs
let mir = self.load_mir(instance.def)?;
let ret = Place::null(&self);
self.push_stack_frame(
instance,
mir.span,
mir,
Place::undef(),
StackPopCleanup::None,
ret,
StackPopCleanup::None { cleanup: true },
)?;
let arg_local = self.frame().mir.args_iter().next().ok_or_else(
|| EvalErrorKind::AbiViolation("TLS dtor does not take enough arguments.".to_owned()),
)?;
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
let ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
self.write_ptr(dest, ptr, ty)?;
self.write_scalar(ptr, dest)?;
// step until out of stackframes
while self.step()? {}
self.run()?;
dtor = match self.memory.fetch_tls_dtor(Some(key)) {
dtor @ Some(_) => dtor,

View file

@ -1,803 +0,0 @@
use rustc::hir::{self, Mutability};
use rustc::hir::Mutability::*;
use rustc::mir::{self, ValidationOp, ValidationOperand};
use rustc::mir::interpret::GlobalId;
use rustc::ty::{self, Ty, TypeFoldable, TyCtxt, Instance};
use rustc::ty::layout::{LayoutOf, PrimitiveExt};
use rustc::ty::subst::{Substs, Subst};
use rustc::traits::{self, TraitEngine};
use rustc::infer::InferCtxt;
use rustc::middle::region;
use rustc::mir::interpret::{ConstValue};
use rustc_data_structures::indexed_vec::Idx;
use rustc_mir::interpret::HasMemory;
use super::{EvalContext, Place, PlaceExtra, ValTy, ScalarExt};
use rustc::mir::interpret::{DynamicLifetime, AccessKind, EvalErrorKind, Value, EvalError, EvalResult};
use locks::MemoryExt;
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsPlace<'tcx>, Place)>;
#[derive(Copy, Clone, Debug, PartialEq)]
pub(crate) enum ValidationMode {
Acquire,
/// Recover because the given region ended
Recover(region::Scope),
ReleaseUntil(Option<region::Scope>),
}
impl ValidationMode {
fn acquiring(self) -> bool {
use self::ValidationMode::*;
match self {
Acquire | Recover(_) => true,
ReleaseUntil(_) => false,
}
}
}
// Abstract places
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum AbsPlace<'tcx> {
Local(mir::Local),
Static(hir::def_id::DefId),
Projection(Box<AbsPlaceProjection<'tcx>>),
}
type AbsPlaceProjection<'tcx> = mir::Projection<'tcx, AbsPlace<'tcx>, u64, ()>;
type AbsPlaceElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
impl<'tcx> AbsPlace<'tcx> {
pub fn field(self, f: mir::Field) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Field(f, ()))
}
pub fn deref(self) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Deref)
}
pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
}
pub fn index(self, index: u64) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Index(index))
}
fn elem(self, elem: AbsPlaceElem<'tcx>) -> AbsPlace<'tcx> {
AbsPlace::Projection(Box::new(AbsPlaceProjection {
base: self,
elem,
}))
}
}
pub(crate) trait EvalContextExt<'tcx> {
fn abstract_place_projection(&self, proj: &mir::PlaceProjection<'tcx>) -> EvalResult<'tcx, AbsPlaceProjection<'tcx>>;
fn abstract_place(&self, place: &mir::Place<'tcx>) -> EvalResult<'tcx, AbsPlace<'tcx>>;
fn validation_op(
&mut self,
op: ValidationOp,
operand: &ValidationOperand<'tcx, mir::Place<'tcx>>,
) -> EvalResult<'tcx>;
fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx>;
fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx>;
fn field_with_lifetimes(
&mut self,
base: Place,
layout: ty::layout::TyLayout<'tcx>,
i: usize,
) -> EvalResult<'tcx, Ty<'tcx>>;
fn validate_fields(
&mut self,
query: ValidationQuery<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx>;
fn validate_ptr(
&mut self,
val: Value,
abs_place: AbsPlace<'tcx>,
pointee_ty: Ty<'tcx>,
re: Option<region::Scope>,
mutbl: Mutability,
mode: ValidationMode,
) -> EvalResult<'tcx>;
fn validate(
&mut self,
query: ValidationQuery<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
fn abstract_place_projection(&self, proj: &mir::PlaceProjection<'tcx>) -> EvalResult<'tcx, AbsPlaceProjection<'tcx>> {
use self::mir::ProjectionElem::*;
let elem = match proj.elem {
Deref => Deref,
Field(f, _) => Field(f, ()),
Index(v) => {
let value = self.frame().locals[v].access()?;
let ty = self.tcx.tcx.types.usize;
let n = self.value_to_scalar(ValTy { value, ty })?.to_usize(self)?;
Index(n)
},
ConstantIndex { offset, min_length, from_end } =>
ConstantIndex { offset, min_length, from_end },
Subslice { from, to } =>
Subslice { from, to },
Downcast(adt, sz) => Downcast(adt, sz),
};
Ok(AbsPlaceProjection {
base: self.abstract_place(&proj.base)?,
elem
})
}
fn abstract_place(&self, place: &mir::Place<'tcx>) -> EvalResult<'tcx, AbsPlace<'tcx>> {
Ok(match *place {
mir::Place::Local(l) => AbsPlace::Local(l),
mir::Place::Static(ref s) => AbsPlace::Static(s.def_id),
mir::Place::Projection(ref p) =>
AbsPlace::Projection(Box::new(self.abstract_place_projection(&*p)?)),
_ => unimplemented!("validation is not currently maintained"),
})
}
// Validity checks
fn validation_op(
&mut self,
op: ValidationOp,
operand: &ValidationOperand<'tcx, mir::Place<'tcx>>,
) -> EvalResult<'tcx> {
// If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands
// because other crates may have been compiled with mir-emit-validate > 0. Ignore those
// commands. This makes mir-emit-validate also a flag to control whether miri will do
// validation or not.
if self.tcx.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 {
return Ok(());
}
debug_assert!(self.memory.cur_frame == self.cur_frame());
// We need to monomorphize ty *without* erasing lifetimes
trace!("validation_op1: {:?}", operand.ty.sty);
let ty = operand.ty.subst(self.tcx.tcx, self.substs());
trace!("validation_op2: {:?}", operand.ty.sty);
let place = self.eval_place(&operand.place)?;
let abs_place = self.abstract_place(&operand.place)?;
let query = ValidationQuery {
place: (abs_place, place),
ty,
re: operand.re,
mutbl: operand.mutbl,
};
// Check the mode, and also perform mode-specific operations
let mode = match op {
ValidationOp::Acquire => ValidationMode::Acquire,
ValidationOp::Release => ValidationMode::ReleaseUntil(None),
ValidationOp::Suspend(scope) => {
if query.mutbl == MutMutable {
let lft = DynamicLifetime {
frame: self.cur_frame(),
region: Some(scope), // Notably, we only ever suspend things for given regions.
// Suspending for the entire function does not make any sense.
};
trace!("Suspending {:?} until {:?}", query, scope);
self.machine.suspended.entry(lft).or_insert_with(Vec::new).push(
query.clone(),
);
}
ValidationMode::ReleaseUntil(Some(scope))
}
};
self.validate(query, mode)
}
/// Release locks and executes suspensions of the given region (or the entire fn, in case of None).
fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx> {
debug_assert!(self.memory.cur_frame == self.cur_frame());
self.memory.locks_lifetime_ended(scope);
match scope {
Some(scope) => {
// Recover suspended places
let lft = DynamicLifetime {
frame: self.cur_frame(),
region: Some(scope),
};
if let Some(queries) = self.machine.suspended.remove(&lft) {
for query in queries {
trace!("Recovering {:?} from suspension", query);
self.validate(query, ValidationMode::Recover(scope))?;
}
}
}
None => {
// Clean suspension table of current frame
let cur_frame = self.cur_frame();
self.machine.suspended.retain(|lft, _| {
lft.frame != cur_frame // keep only what is in the other (lower) frames
});
}
}
Ok(())
}
fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
return normalize_associated_type(self.tcx.tcx, &ty);
use syntax::codemap::{Span, DUMMY_SP};
// We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior
fn normalize_projections_in<'a, 'gcx, 'tcx, T>(
self_: &InferCtxt<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: &T,
) -> T::Lifted
where
T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
{
let mut selcx = traits::SelectionContext::new(self_);
let cause = traits::ObligationCause::dummy();
let traits::Normalized {
value: result,
obligations,
} = traits::normalize(&mut selcx, param_env, cause, value);
let mut fulfill_cx = traits::FulfillmentContext::new();
for obligation in obligations {
fulfill_cx.register_predicate_obligation(self_, obligation);
}
drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result)
}
fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>(
self_: &InferCtxt<'a, 'gcx, 'tcx>,
span: Span,
fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
result: &T,
) -> T::Lifted
where
T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
{
// In principle, we only need to do this so long as `result`
// contains unbound type parameters. It could be a slight
// optimization to stop iterating early.
match fulfill_cx.select_all_or_error(self_) {
Ok(()) => { }
Err(errors) => {
span_bug!(
span,
"Encountered errors `{:?}` resolving bounds after type-checking",
errors
);
}
}
let result = self_.resolve_type_vars_if_possible(result);
let result = self_.tcx.fold_regions(
&result,
&mut false,
|r, _| match *r {
ty::ReVar(_) => self_.tcx.types.re_erased,
_ => r,
},
);
match self_.tcx.lift_to_global(&result) {
Some(result) => result,
None => {
span_bug!(span, "Uninferred types/regions in `{:?}`", result);
}
}
}
trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> {
fn my_trans_normalize<'a, 'tcx>(
&self,
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Self;
}
macro_rules! items { ($($item:item)+) => ($($item)+) }
macro_rules! impl_trans_normalize {
($lt_gcx:tt, $($ty:ty),+) => {
items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty {
fn my_trans_normalize<'a, 'tcx>(&self,
infcx: &InferCtxt<'a, $lt_gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>)
-> Self {
normalize_projections_in(infcx, param_env, self)
}
})+);
}
}
impl_trans_normalize!('gcx,
Ty<'gcx>,
&'gcx Substs<'gcx>,
ty::FnSig<'gcx>,
ty::PolyFnSig<'gcx>,
ty::ClosureSubsts<'gcx>,
ty::PolyTraitRef<'gcx>,
ty::ExistentialTraitRef<'gcx>
);
fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T
where
T: MyTransNormalize<'tcx>,
{
let param_env = ty::ParamEnv::reveal_all();
if !value.has_projections() {
return value.clone();
}
self_.infer_ctxt().enter(|infcx| {
value.my_trans_normalize(&infcx, param_env)
})
}
}
// This is a copy of `Layout::field`
//
// FIXME: remove once validation does not depend on lifetimes
fn field_with_lifetimes(
&mut self,
base: Place,
mut layout: ty::layout::TyLayout<'tcx>,
i: usize,
) -> EvalResult<'tcx, Ty<'tcx>> {
if let Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } = base {
layout = layout.for_variant(&self, variant_index);
}
let tcx = self.tcx.tcx;
Ok(match layout.ty.sty {
ty::TyBool |
ty::TyChar |
ty::TyInt(_) |
ty::TyUint(_) |
ty::TyFloat(_) |
ty::TyFnPtr(_) |
ty::TyNever |
ty::TyFnDef(..) |
ty::TyGeneratorWitness(..) |
ty::TyDynamic(..) |
ty::TyForeign(..) => {
bug!("TyLayout::field_type({:?}): not applicable", layout)
}
// Potentially-fat pointers.
ty::TyRef(_, pointee, _) |
ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
assert!(i < 2);
// Reuse the fat *T type as its own thin pointer data field.
// This provides information about e.g. DST struct pointees
// (which may have no non-DST form), and will work as long
// as the `Abi` or `FieldPlacement` is checked by users.
if i == 0 {
return Ok(layout.ty);
}
match tcx.struct_tail(pointee).sty {
ty::TySlice(_) |
ty::TyStr => tcx.types.usize,
ty::TyDynamic(..) => {
// FIXME(eddyb) use an usize/fn() array with
// the correct number of vtables slots.
tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
}
_ => bug!("TyLayout::field_type({:?}): not applicable", layout)
}
}
// Arrays and slices.
ty::TyArray(element, _) |
ty::TySlice(element) => element,
ty::TyStr => tcx.types.u8,
// Tuples, generators and closures.
ty::TyClosure(def_id, ref substs) => {
substs.upvar_tys(def_id, tcx).nth(i).unwrap()
}
ty::TyGenerator(def_id, ref substs, _) => {
substs.field_tys(def_id, tcx).nth(i).unwrap()
}
ty::TyTuple(tys) => tys[i],
// SIMD vector types.
ty::TyAdt(def, ..) if def.repr.simd() => {
layout.ty.simd_type(tcx)
}
// ADTs.
ty::TyAdt(def, substs) => {
use rustc::ty::layout::Variants;
match layout.variants {
Variants::Single { index } => {
def.variants[index].fields[i].ty(tcx, substs)
}
// Discriminant field for enums (where applicable).
Variants::Tagged { tag: ref discr, .. } |
Variants::NicheFilling { niche: ref discr, .. } => {
assert_eq!(i, 0);
return Ok(discr.value.to_ty(tcx))
}
}
}
ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
ty::TyInfer(_) | ty::TyError => {
bug!("TyLayout::field_type: unexpected type `{}`", layout.ty)
}
})
}
fn validate_fields(
&mut self,
query: ValidationQuery<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx> {
let mut layout = self.layout_of(query.ty)?;
layout.ty = query.ty;
// TODO: Maybe take visibility/privacy into account.
for idx in 0..layout.fields.count() {
let field = mir::Field::new(idx);
let (field_place, field_layout) =
self.place_field(query.place.1, field, layout)?;
// layout stuff erases lifetimes, get the field ourselves
let field_ty = self.field_with_lifetimes(query.place.1, layout, idx)?;
trace!("assuming \n{:?}\n == \n{:?}\n except for lifetimes", field_layout.ty, field_ty);
self.validate(
ValidationQuery {
place: (query.place.0.clone().field(field), field_place),
ty: field_ty,
..query
},
mode,
)?;
}
Ok(())
}
fn validate_ptr(
&mut self,
val: Value,
abs_place: AbsPlace<'tcx>,
pointee_ty: Ty<'tcx>,
re: Option<region::Scope>,
mutbl: Mutability,
mode: ValidationMode,
) -> EvalResult<'tcx> {
// Check alignment and non-NULLness
let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
let ptr = self.into_ptr(val)?.unwrap_or_err()?;
self.memory.check_align(ptr, align)?;
// Recurse
let pointee_place = self.val_to_place(val, pointee_ty)?;
self.validate(
ValidationQuery {
place: (abs_place.deref(), pointee_place),
ty: pointee_ty,
re,
mutbl,
},
mode,
)
}
/// Validate the place at the given type. If `acquire` is false, just do a release of all write locks
fn validate(
&mut self,
mut query: ValidationQuery<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx> {
use rustc::ty::TypeVariants::*;
use rustc::ty::RegionKind::*;
use rustc::ty::AdtKind;
// No point releasing shared stuff.
if !mode.acquiring() && query.mutbl == MutImmutable {
return Ok(());
}
// When we recover, we may see data whose validity *just* ended. Do not acquire it.
if let ValidationMode::Recover(ending_ce) = mode {
if query.re == Some(ending_ce) {
return Ok(());
}
}
query.ty = self.normalize_type_unerased(&query.ty);
trace!("{:?} on {:#?}", mode, query);
trace!("{:#?}", query.ty.sty);
// Decide whether this type *owns* the memory it covers (like integers), or whether it
// just assembles pieces (that each own their memory) together to a larger whole.
// TODO: Currently, we don't acquire locks for padding and discriminants. We should.
let is_owning = match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr |
TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
TyAdt(adt, _) if adt.is_box() => true,
TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) |
TyDynamic(..) | TyGenerator(..) | TyForeign(_) => false,
TyGeneratorWitness(..) => unreachable!("TyGeneratorWitness in validate"),
TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => {
bug!("I got an incomplete/unnormalized type for validation")
}
};
if is_owning {
// We need to lock. So we need memory. So we have to force_acquire.
// Tracking the same state for locals not backed by memory would just duplicate too
// much machinery.
// FIXME: We ignore alignment.
let (ptr, _, extra) = self.force_allocation(query.place.1)?.to_ptr_align_extra();
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Places?
let layout = self.layout_of(query.ty)?;
let len = if !layout.is_unsized() {
assert_eq!(extra, PlaceExtra::None, "Got a fat ptr to a sized type");
layout.size.bytes()
} else {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(
query.ty.sty,
TyStr,
"Found a surprising unsized owning type"
);
// The extra must be the length, in bytes.
match extra {
PlaceExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
};
// Handle locking
if len > 0 {
let ptr = ptr.unwrap_or_err()?.to_ptr()?;
match query.mutbl {
MutImmutable => {
if mode.acquiring() {
self.memory.acquire_lock(
ptr,
len,
query.re,
AccessKind::Read,
)?;
}
}
// No releasing of read locks, ever.
MutMutable => {
match mode {
ValidationMode::Acquire => {
self.memory.acquire_lock(
ptr,
len,
query.re,
AccessKind::Write,
)?
}
ValidationMode::Recover(ending_ce) => {
self.memory.recover_write_lock(
ptr,
len,
&query.place.0,
query.re,
ending_ce,
)?
}
ValidationMode::ReleaseUntil(suspended_ce) => {
self.memory.suspend_write_lock(
ptr,
len,
&query.place.0,
suspended_ce,
)?
}
}
}
}
}
}
let res: EvalResult<'tcx> = do catch {
match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
if mode.acquiring() {
// Make sure we can read this.
let val = self.read_place(query.place.1)?;
self.follow_by_ref_value(val, query.ty)?;
// FIXME: It would be great to rule out Undef here, but that doesn't actually work.
// Passing around undef data is a thing that e.g. Vec::extend_with does.
}
}
TyBool | TyFloat(_) | TyChar => {
if mode.acquiring() {
let val = self.read_place(query.place.1)?;
let val = self.value_to_scalar(ValTy { value: val, ty: query.ty })?;
val.to_bytes()?;
// TODO: Check if these are valid bool/float/codepoint/UTF-8
}
}
TyNever => return err!(ValidationFailure(format!("The empty type is never valid."))),
TyRef(region, pointee_ty, mutbl) => {
let val = self.read_place(query.place.1)?;
// Sharing restricts our context
if mutbl == MutImmutable {
query.mutbl = MutImmutable;
}
// Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
// we record the region of this borrow to the context.
if query.re == None {
if let ReScope(scope) = *region {
query.re = Some(scope);
}
// It is possible for us to encounter erased lifetimes here because the lifetimes in
// this functions' Subst will be erased.
}
self.validate_ptr(val, query.place.0, pointee_ty, query.re, query.mutbl, mode)?;
}
TyAdt(adt, _) if adt.is_box() => {
let val = self.read_place(query.place.1)?;
self.validate_ptr(val, query.place.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)?;
}
TyFnPtr(_sig) => {
let ptr = self.read_place(query.place.1)?;
let ptr = self.into_ptr(ptr)?.unwrap_or_err()?.to_ptr()?;
self.memory.get_fn(ptr)?;
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
}
TyFnDef(..) => {
// This is a zero-sized type with all relevant data sitting in the type.
// There is nothing to validate.
}
// Compound types
TyStr => {
// TODO: Validate strings
}
TySlice(elem_ty) => {
let len = match query.place.1 {
Place::Ptr { extra: PlaceExtra::Length(len), .. } => len,
_ => {
bug!(
"acquire_valid of a TySlice given non-slice place: {:?}",
query.place
)
}
};
for i in 0..len {
let inner_place = self.place_index(query.place.1, query.ty, i)?;
self.validate(
ValidationQuery {
place: (query.place.0.clone().index(i), inner_place),
ty: elem_ty,
..query
},
mode,
)?;
}
}
TyArray(elem_ty, len) => {
let len = match len.val {
ConstValue::Unevaluated(def_id, substs) => {
self.tcx.const_eval(self.tcx.param_env(def_id).and(GlobalId {
instance: Instance::new(def_id, substs),
promoted: None,
}))
.map_err(|_err|EvalErrorKind::MachineError("<already reported>".to_string()))?
}
_ => len,
};
let len = len.unwrap_usize(self.tcx.tcx);
for i in 0..len {
let inner_place = self.place_index(query.place.1, query.ty, i as u64)?;
self.validate(
ValidationQuery {
place: (query.place.0.clone().index(i as u64), inner_place),
ty: elem_ty,
..query
},
mode,
)?;
}
}
TyDynamic(_data, _region) => {
// Check that this is a valid vtable
let vtable = match query.place.1 {
Place::Ptr { extra: PlaceExtra::Vtable(vtable), .. } => vtable,
_ => {
bug!(
"acquire_valid of a TyDynamic given non-trait-object place: {:?}",
query.place
)
}
};
self.read_size_and_align_from_vtable(vtable)?;
// TODO: Check that the vtable contains all the function pointers we expect it to have.
// Trait objects cannot have any operations performed
// on them directly. We cannot, in general, even acquire any locks as the trait object *could*
// contain an UnsafeCell. If we call functions to get access to data, we will validate
// their return values. So, it doesn't seem like there's anything else to do.
}
TyAdt(adt, _) => {
if Some(adt.did) == self.tcx.tcx.lang_items().unsafe_cell_type() &&
query.mutbl == MutImmutable
{
// No locks for shared unsafe cells. Also no other validation, the only field is private anyway.
return Ok(());
}
match adt.adt_kind() {
AdtKind::Enum => {
let layout = self.layout_of(query.ty)?;
let variant_idx = self.read_discriminant_as_variant_index(query.place.1, layout)?;
let variant = &adt.variants[variant_idx];
if !variant.fields.is_empty() {
// Downcast to this variant, if needed
let place = if adt.is_enum() {
(
query.place.0.downcast(adt, variant_idx),
self.eval_place_projection(
query.place.1,
query.ty,
&mir::ProjectionElem::Downcast(adt, variant_idx),
)?,
)
} else {
query.place
};
// Recursively validate the fields
self.validate_fields(
ValidationQuery { place, ..query },
mode,
)?;
} else {
// No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
}
}
AdtKind::Struct => {
self.validate_fields(query, mode)?;
}
AdtKind::Union => {
// No guarantees are provided for union types.
// TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
}
}
}
TyTuple(..) |
TyClosure(..) => {
// TODO: Check if the signature matches for `TyClosure`
// (should be the same check as what terminator/mod.rs already does on call?).
// Is there other things we can/should check? Like vtable pointers?
self.validate_fields(query, mode)?;
}
// FIXME: generators aren't validated right now
TyGenerator(..) => {},
_ => bug!("We already established that this is a type we support. ({})", query.ty),
}
};
match res {
// ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because
// we have to release the return value of a function; due to destination-passing-style
// the callee may directly write there.
// TODO: Ideally we would know whether the destination is already initialized, and only
// release if it is. But of course that can't even always be statically determined.
Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
if mode == ValidationMode::ReleaseUntil(None) => {
Ok(())
}
res => res,
}
}
}

View file

@ -1,4 +1,6 @@
//ignore-test FIXME (do some basic validation of invariants for all values in flight)
//This does currently not get caught becuase it compiles to SwitchInt, which
//has no knowledge about data invariants.
fn main() {
let b = unsafe { std::mem::transmute::<u8, bool>(2) };

View file

@ -0,0 +1,4 @@
fn main() {
let b = unsafe { std::mem::transmute::<u8, bool>(2) };
let _x = b == true; //~ ERROR invalid boolean value read
}

View file

@ -8,11 +8,10 @@ pub enum Foo {
fn main() {
let f = unsafe { std::mem::transmute::<i32, Foo>(42) };
match f {
match f { //~ ERROR invalid enum discriminant
Foo::A => {},
Foo::B => {},
Foo::C => {},
Foo::D => {},
}
} //~ ERROR constant evaluation error
//~^ NOTE entered unreachable code
}

View file

@ -0,0 +1,16 @@
// Validation makes this fail in the wrong place
// compile-flags: -Zmir-emit-validate=0
// error-pattern: invalid enum discriminant
use std::mem;
#[repr(C)]
pub enum Foo {
A, B, C, D
}
fn main() {
let f = unsafe { std::mem::transmute::<i32, Foo>(42) };
let _ = mem::discriminant(&f);
}

View file

@ -1,11 +1,13 @@
// ignore-test FIXME: we are not checking these things on match any more?
//This does currently not get caught becuase it compiles to SwitchInt, which
//has no knowledge about data invariants.
fn main() {
assert!(std::char::from_u32(-1_i32 as u32).is_none());
match unsafe { std::mem::transmute::<i32, char>(-1) } { //~ ERROR constant evaluation error
let _ = match unsafe { std::mem::transmute::<i32, char>(-1) } { //~ ERROR constant evaluation error
//~^ NOTE tried to interpret an invalid 32-bit value as a char: 4294967295
'a' => {},
'b' => {},
_ => {},
}
'a' => {true},
'b' => {false},
_ => {true},
};
}

View file

@ -0,0 +1,5 @@
fn main() {
assert!(std::char::from_u32(-1_i32 as u32).is_none());
let c = unsafe { std::mem::transmute::<i32, char>(-1) };
let _x = c == 'x'; //~ ERROR tried to interpret an invalid 32-bit value as a char
}

View file

@ -1,4 +1,6 @@
// ignore-test FIXME: leak detection is disabled
// ignore-windows: We do not check leaks on Windows
// ignore-macos: We do not check leaks on macOS
//error-pattern: the evaluated program leaked memory
fn main() {

View file

@ -1,4 +1,6 @@
// ignore-test FIXME: leak detection is disabled
// ignore-windows: We do not check leaks on Windows
// ignore-macos: We do not check leaks on macOS
//error-pattern: the evaluated program leaked memory
use std::rc::Rc;

View file

@ -1,4 +1,3 @@
// ignore-test FIXME: we are not making these statics read-only any more?
fn main() {
let x = &1; // the `&1` is promoted to a constant, but it used to be that only the pointer is marked static, not the pointee

View file

@ -1,4 +1,4 @@
#![feature(custom_attribute, attr_literals)]
#![feature(custom_attribute)]
#![miri(stack_limit=16)]
//error-pattern: reached the configured maximum number of stack frames

View file

@ -1,4 +1,3 @@
// ignore-test FIXME: we are not making these statics read-only any more?
static X: usize = 5;
#[allow(mutable_transmutes)]

View file

@ -0,0 +1,19 @@
// Just instantiate some data structures to make sure we got all their foreign items covered.
// Requires full MIR on Windows.
use std::sync;
fn main() {
let m = sync::Mutex::new(0);
let _ = m.lock();
drop(m);
// We don't provide RwLock on Windows
#[cfg(not(target_os = "windows"))]
{
let rw = sync::RwLock::new(0);
let _ = rw.read();
let _ = rw.write();
drop(rw);
}
}

View file

@ -0,0 +1,15 @@
use std::collections::VecDeque;
fn main() {
let mut dst = VecDeque::new();
dst.push_front(Box::new(1));
dst.push_front(Box::new(2));
dst.pop_back();
let mut src = VecDeque::new();
src.push_front(Box::new(2));
dst.append(&mut src);
for a in dst {
assert_eq!(*a, 2);
}
}

View file

@ -15,18 +15,22 @@ static ATOMIC: AtomicIsize = ATOMIC_ISIZE_INIT;
fn main() {
// Make sure trans can emit all the intrinsics correctly
ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok();
ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok();
ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok();
ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok();
assert_eq!(ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed), Ok(0));
assert_eq!(ATOMIC.compare_exchange(0, 2, Acquire, Relaxed), Err(1));
assert_eq!(ATOMIC.compare_exchange(0, 1, Release, Relaxed), Err(1));
assert_eq!(ATOMIC.compare_exchange(1, 0, AcqRel, Relaxed), Ok(1));
ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok();
ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok();
ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok();
ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok();
ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok();
ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok();
ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok();
ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok();
ATOMIC.store(0, SeqCst);
assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed), Ok(0));
assert_eq!(ATOMIC.compare_exchange_weak(0, 2, Acquire, Relaxed), Err(1));
assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed), Err(1));
assert_eq!(ATOMIC.compare_exchange_weak(1, 0, AcqRel, Relaxed), Ok(1));
ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok();
ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok();
ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok();

View file

@ -25,4 +25,5 @@ fn main() {
assert_eq!(if_false(), 0);
assert_eq!(if_true(), 1);
assert_eq!(match_bool(), 1);
assert_eq!(true == true, true);
}

View file

@ -0,0 +1,10 @@
#![feature(extern_types)]
extern {
type Foo;
}
fn main() {
let x: &Foo = unsafe { &*(16 as *const Foo) };
let _y: &Foo = &*x;
}

View file

@ -8,4 +8,8 @@ fn main() {
let x: u64 = unsafe { std::mem::transmute(42.0_f64) };
let y: f64 = unsafe { std::mem::transmute(x) };
assert_eq!(y, 42.0_f64);
assert_eq!(5.0f32 as u32, 5);
assert_eq!(5.0f32 as i32, 5);
assert_eq!(-5.0f32 as i32, -5);
}

5
tests/run-pass/unops.rs Normal file
View file

@ -0,0 +1,5 @@
fn main() {
assert_eq!(!true, false);
assert_eq!(!0xFFu16, 0xFF00);
assert_eq!(-{1i16}, -1i16);
}