Auto merge of #1461 - RalfJung:rwlock-win, r=oli-obk

Implement rwlocks on Windows

Fixes https://github.com/rust-lang/miri/issues/1059
This commit is contained in:
bors 2020-06-28 10:59:34 +00:00
commit 2dfa6c1a46
45 changed files with 522 additions and 262 deletions

View file

@ -467,6 +467,37 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
}
}
fn read_scalar_at_offset(
&self,
op: OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);
// Ensure that the following read at an offset is within bounds
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
this.read_scalar(value_place.into())
}
fn write_scalar_at_offset(
&mut self,
op: OpTy<'tcx, Tag>,
offset: u64,
value: impl Into<ScalarMaybeUninit<Tag>>,
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);
// Ensure that the following read at an offset is within bounds
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
this.write_scalar(value, value_place.into())
}
}
/// Check that the number of args is what we expect.

View file

@ -1,34 +1,24 @@
use rustc_middle::mir;
use crate::*;
use helpers::check_arg_count;
use shims::posix::dlsym as posix;
use shims::windows::dlsym as windows;
#[derive(Debug, Copy, Clone)]
#[allow(non_camel_case_types)]
pub enum Dlsym {
GetEntropy,
Posix(posix::Dlsym),
Windows(windows::Dlsym),
}
impl Dlsym {
// Returns an error for unsupported symbols, and None if this symbol
// should become a NULL pointer (pretend it does not exist).
pub fn from_str(name: &[u8], target_os: &str) -> InterpResult<'static, Option<Dlsym>> {
use self::Dlsym::*;
let name = String::from_utf8_lossy(name);
let name = &*String::from_utf8_lossy(name);
Ok(match target_os {
"linux" => match &*name {
"__pthread_get_minstack" => None,
_ => throw_unsup_format!("unsupported Linux dlsym: {}", name),
}
"macos" => match &*name {
"getentropy" => Some(GetEntropy),
_ => throw_unsup_format!("unsupported macOS dlsym: {}", name),
}
"windows" => match &*name {
"SetThreadStackGuarantee" => None,
"AcquireSRWLockExclusive" => None,
"GetSystemTimePreciseAsFileTime" => None,
_ => throw_unsup_format!("unsupported Windows dlsym: {}", name),
}
"linux" | "macos" => posix::Dlsym::from_str(name, target_os)?.map(Dlsym::Posix),
"windows" => windows::Dlsym::from_str(name)?.map(Dlsym::Windows),
os => bug!("dlsym not implemented for target_os {}", os),
})
}
@ -42,23 +32,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
) -> InterpResult<'tcx> {
use self::Dlsym::*;
let this = self.eval_context_mut();
let (dest, ret) = ret.expect("we don't support any diverging dlsym");
match dlsym {
GetEntropy => {
let &[ptr, len] = check_arg_count(args)?;
let ptr = this.read_scalar(ptr)?.not_undef()?;
let len = this.read_scalar(len)?.to_machine_usize(this)?;
this.gen_random(ptr, len)?;
this.write_null(dest)?;
}
Dlsym::Posix(dlsym) => posix::EvalContextExt::call_dlsym(this, dlsym, args, ret),
Dlsym::Windows(dlsym) => windows::EvalContextExt::call_dlsym(this, dlsym, args, ret),
}
this.dump_place(*dest);
this.go_to_block(ret);
Ok(())
}
}

View file

@ -129,7 +129,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// This matches calls to the foreign item `panic_impl`.
// The implementation is provided by the function with the `#[panic_handler]` attribute.
"panic_impl" => {
this.check_panic_supported()?;
let panic_impl_id = tcx.lang_items().panic_impl().unwrap();
let panic_impl_instance = ty::Instance::mono(tcx, panic_impl_id);
return Ok(Some(&*this.load_mir(panic_impl_instance.def, None)?));

View file

@ -52,14 +52,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
return this.emulate_foreign_item(instance.def_id(), args, ret, unwind);
}
// Better error message for panics on Windows.
let def_id = instance.def_id();
if Some(def_id) == this.tcx.lang_items().begin_panic_fn() ||
Some(def_id) == this.tcx.lang_items().panic_impl()
{
this.check_panic_supported()?;
}
// Otherwise, load the MIR.
Ok(Some(&*this.load_mir(instance.def, None)?))
}

View file

@ -34,14 +34,6 @@ pub struct CatchUnwindData<'tcx> {
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Check if panicking is supported on this target, and give a good error otherwise.
fn check_panic_supported(&self) -> InterpResult<'tcx> {
match self.eval_context_ref().tcx.sess.target.target.target_os.as_str() {
"linux" | "macos" => Ok(()),
_ => throw_unsup_format!("panicking is not supported on this target"),
}
}
/// Handles the special `miri_start_panic` intrinsic, which is called
/// by libpanic_unwind to delegate the actual unwinding process to Miri.
fn handle_miri_start_panic(

39
src/shims/posix/dlsym.rs Normal file
View file

@ -0,0 +1,39 @@
use rustc_middle::mir;
use crate::*;
use shims::posix::linux::dlsym as linux;
use shims::posix::macos::dlsym as macos;
#[derive(Debug, Copy, Clone)]
pub enum Dlsym {
Linux(linux::Dlsym),
MacOs(macos::Dlsym),
}
impl Dlsym {
// Returns an error for unsupported symbols, and None if this symbol
// should become a NULL pointer (pretend it does not exist).
pub fn from_str(name: &str, target_os: &str) -> InterpResult<'static, Option<Dlsym>> {
Ok(match target_os {
"linux" => linux::Dlsym::from_str(name)?.map(Dlsym::Linux),
"macos" => macos::Dlsym::from_str(name)?.map(Dlsym::MacOs),
_ => unreachable!(),
})
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn call_dlsym(
&mut self,
dlsym: Dlsym,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
match dlsym {
Dlsym::Linux(dlsym) => linux::EvalContextExt::call_dlsym(this, dlsym, args, ret),
Dlsym::MacOs(dlsym) => macos::EvalContextExt::call_dlsym(this, dlsym, args, ret),
}
}
}

View file

@ -0,0 +1,34 @@
use rustc_middle::mir;
use crate::*;
#[derive(Debug, Copy, Clone)]
pub enum Dlsym {
}
impl Dlsym {
// Returns an error for unsupported symbols, and None if this symbol
// should become a NULL pointer (pretend it does not exist).
pub fn from_str(name: &str) -> InterpResult<'static, Option<Dlsym>> {
Ok(match &*name {
"__pthread_get_minstack" => None,
_ => throw_unsup_format!("unsupported Linux dlsym: {}", name),
})
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn call_dlsym(
&mut self,
dlsym: Dlsym,
_args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (_dest, _ret) = ret.expect("we don't support any diverging dlsym");
assert!(this.tcx.sess.target.target.target_os == "linux");
match dlsym {}
}
}

View file

@ -1 +1,2 @@
pub mod foreign_items;
pub mod dlsym;

View file

@ -0,0 +1,49 @@
use rustc_middle::mir;
use crate::*;
use helpers::check_arg_count;
#[derive(Debug, Copy, Clone)]
#[allow(non_camel_case_types)]
pub enum Dlsym {
getentropy,
}
impl Dlsym {
// Returns an error for unsupported symbols, and None if this symbol
// should become a NULL pointer (pretend it does not exist).
pub fn from_str(name: &str) -> InterpResult<'static, Option<Dlsym>> {
Ok(match name {
"getentropy" => Some(Dlsym::getentropy),
_ => throw_unsup_format!("unsupported macOS dlsym: {}", name),
})
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn call_dlsym(
&mut self,
dlsym: Dlsym,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (dest, ret) = ret.expect("we don't support any diverging dlsym");
assert!(this.tcx.sess.target.target.target_os == "macos");
match dlsym {
Dlsym::getentropy => {
let &[ptr, len] = check_arg_count(args)?;
let ptr = this.read_scalar(ptr)?.not_undef()?;
let len = this.read_scalar(len)?.to_machine_usize(this)?;
this.gen_random(ptr, len)?;
this.write_null(dest)?;
}
}
this.dump_place(*dest);
this.go_to_block(ret);
Ok(())
}
}

View file

@ -1 +1,2 @@
pub mod foreign_items;
pub mod dlsym;

View file

@ -1,4 +1,5 @@
pub mod foreign_items;
pub mod dlsym;
mod fs;
mod sync;

View file

@ -1,58 +1,11 @@
use std::convert::TryInto;
use std::time::{Duration, SystemTime};
use std::ops::Not;
use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut};
use rustc_target::abi::{LayoutOf, Size};
use crate::*;
use stacked_borrows::Tag;
use thread::Time;
fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
operand: OpTy<'tcx, Tag>,
min_size: u64,
) -> InterpResult<'tcx, ()> {
let target_ty = match operand.layout.ty.kind {
TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty,
_ => panic!("Argument to pthread function was not a raw pointer"),
};
let target_layout = ecx.layout_of(target_ty)?;
assert!(target_layout.size.bytes() >= min_size);
Ok(())
}
fn get_at_offset<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
op: OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
min_size: u64,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
// Ensure that the following read at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, op, min_size)?;
let op_place = ecx.deref_operand(op)?;
let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
ecx.read_scalar(value_place.into())
}
fn set_at_offset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
op: OpTy<'tcx, Tag>,
offset: u64,
value: impl Into<ScalarMaybeUninit<Tag>>,
layout: TyAndLayout<'tcx>,
min_size: u64,
) -> InterpResult<'tcx, ()> {
// Ensure that the following write at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, op, min_size)?;
let op_place = ecx.deref_operand(op)?;
let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
ecx.write_scalar(value.into(), value_place.into())
}
// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
// Our chosen memory layout for emulation (does not have to match the platform layout!):
@ -66,8 +19,6 @@ fn set_at_offset<'mir, 'tcx: 'mir>(
/// in `pthread_mutexattr_settype` function.
const PTHREAD_MUTEX_NORMAL_FLAG: i32 = 0x8000000;
const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4;
fn is_mutex_kind_default<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
kind: Scalar<Tag>,
@ -88,7 +39,7 @@ fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
attr_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
}
fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
@ -96,7 +47,7 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
attr_op: OpTy<'tcx, Tag>,
kind: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, attr_op, 0, kind, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
ecx.write_scalar_at_offset(attr_op, 0, kind, ecx.machine.layouts.i32)
}
// pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
@ -108,14 +59,12 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
// bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
// (the kind has to be at its offset for compatibility with static initializer macros)
const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24;
fn mutex_get_kind<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
get_at_offset(ecx, mutex_op, offset, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
ecx.read_scalar_at_offset(mutex_op, offset, ecx.machine.layouts.i32)
}
fn mutex_set_kind<'mir, 'tcx: 'mir>(
@ -124,14 +73,14 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
kind: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
ecx.write_scalar_at_offset(mutex_op, offset, kind, ecx.machine.layouts.i32)
}
fn mutex_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
ecx.read_scalar_at_offset(mutex_op, 4, ecx.machine.layouts.u32)
}
fn mutex_set_id<'mir, 'tcx: 'mir>(
@ -139,7 +88,7 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, mutex_op, 4, id, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
ecx.write_scalar_at_offset(mutex_op, 4, id, ecx.machine.layouts.u32)
}
fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
@ -165,13 +114,11 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
// (need to avoid this because it is set by static initializer macros)
// bytes 4-7: rwlock id as u32 or 0 if id is not assigned yet.
const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 32;
fn rwlock_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
ecx.read_scalar_at_offset(rwlock_op, 4, ecx.machine.layouts.u32)
}
fn rwlock_set_id<'mir, 'tcx: 'mir>(
@ -179,7 +126,7 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
rwlock_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, rwlock_op, 4, id, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
ecx.write_scalar_at_offset(rwlock_op, 4, id, ecx.machine.layouts.u32)
}
fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
@ -204,13 +151,11 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
// store an i32 in the first four bytes equal to the corresponding libc clock id constant
// (e.g. CLOCK_REALTIME).
const PTHREAD_CONDATTR_T_MIN_SIZE: u64 = 4;
fn condattr_get_clock_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
attr_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE)
ecx.read_scalar_at_offset(attr_op, 0, ecx.machine.layouts.i32)
}
fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
@ -218,7 +163,7 @@ fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
attr_op: OpTy<'tcx, Tag>,
clock_id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, attr_op, 0, clock_id, ecx.machine.layouts.i32, PTHREAD_CONDATTR_T_MIN_SIZE)
ecx.write_scalar_at_offset(attr_op, 0, clock_id, ecx.machine.layouts.i32)
}
// pthread_cond_t
@ -230,13 +175,11 @@ fn condattr_set_clock_id<'mir, 'tcx: 'mir>(
// bytes 4-7: the conditional variable id as u32 or 0 if id is not assigned yet.
// bytes 8-11: the clock id constant as i32
const PTHREAD_COND_T_MIN_SIZE: u64 = 12;
fn cond_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
cond_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
get_at_offset(ecx, cond_op, 4, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE)
ecx.read_scalar_at_offset(cond_op, 4, ecx.machine.layouts.u32)
}
fn cond_set_id<'mir, 'tcx: 'mir>(
@ -244,7 +187,7 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
cond_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, cond_op, 4, id, ecx.machine.layouts.u32, PTHREAD_COND_T_MIN_SIZE)
ecx.write_scalar_at_offset(cond_op, 4, id, ecx.machine.layouts.u32)
}
fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
@ -267,7 +210,7 @@ fn cond_get_clock_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
cond_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
get_at_offset(ecx, cond_op, 8, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE)
ecx.read_scalar_at_offset(cond_op, 8, ecx.machine.layouts.i32)
}
fn cond_set_clock_id<'mir, 'tcx: 'mir>(
@ -275,7 +218,7 @@ fn cond_set_clock_id<'mir, 'tcx: 'mir>(
cond_op: OpTy<'tcx, Tag>,
clock_id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, cond_op, 8, clock_id, ecx.machine.layouts.i32, PTHREAD_COND_T_MIN_SIZE)
ecx.write_scalar_at_offset(cond_op, 8, clock_id, ecx.machine.layouts.i32)
}
/// Try to reacquire the mutex associated with the condition variable after we
@ -604,27 +547,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let active_thread = this.get_active_thread();
if this.rwlock_reader_unlock(id, active_thread) {
// The thread was a reader.
if this.rwlock_is_locked(id).not() {
// No more readers owning the lock. Give it to a writer if there
// is any.
this.rwlock_dequeue_and_lock_writer(id);
}
Ok(0)
} else if Some(active_thread) == this.rwlock_writer_unlock(id) {
// The thread was a writer.
//
// We are prioritizing writers here against the readers. As a
// result, not only readers can starve writers, but also writers can
// starve readers.
if this.rwlock_dequeue_and_lock_writer(id) {
// Someone got the write lock, nice.
} else {
// Give the lock to all readers.
while this.rwlock_dequeue_and_lock_reader(id) {
// Rinse and repeat.
}
}
} else if this.rwlock_writer_unlock(id, active_thread) {
Ok(0)
} else {
throw_ub_format!("unlocked an rwlock that was not locked by the active thread");

View file

@ -230,7 +230,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
// (that would be basically https://github.com/rust-lang/miri/issues/450),

View file

@ -0,0 +1,80 @@
use rustc_middle::mir;
use crate::*;
use helpers::check_arg_count;
use shims::windows::sync::EvalContextExt as _;
#[derive(Debug, Copy, Clone)]
pub enum Dlsym {
AcquireSRWLockExclusive,
ReleaseSRWLockExclusive,
TryAcquireSRWLockExclusive,
AcquireSRWLockShared,
ReleaseSRWLockShared,
TryAcquireSRWLockShared,
}
impl Dlsym {
// Returns an error for unsupported symbols, and None if this symbol
// should become a NULL pointer (pretend it does not exist).
pub fn from_str(name: &str) -> InterpResult<'static, Option<Dlsym>> {
Ok(match name {
"AcquireSRWLockExclusive" => Some(Dlsym::AcquireSRWLockExclusive),
"ReleaseSRWLockExclusive" => Some(Dlsym::ReleaseSRWLockExclusive),
"TryAcquireSRWLockExclusive" => Some(Dlsym::TryAcquireSRWLockExclusive),
"AcquireSRWLockShared" => Some(Dlsym::AcquireSRWLockShared),
"ReleaseSRWLockShared" => Some(Dlsym::ReleaseSRWLockShared),
"TryAcquireSRWLockShared" => Some(Dlsym::TryAcquireSRWLockShared),
"SetThreadStackGuarantee" => None,
"GetSystemTimePreciseAsFileTime" => None,
_ => throw_unsup_format!("unsupported Windows dlsym: {}", name),
})
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn call_dlsym(
&mut self,
dlsym: Dlsym,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (dest, ret) = ret.expect("we don't support any diverging dlsym");
assert!(this.tcx.sess.target.target.target_os == "windows");
match dlsym {
Dlsym::AcquireSRWLockExclusive => {
let &[ptr] = check_arg_count(args)?;
this.AcquireSRWLockExclusive(ptr)?;
}
Dlsym::ReleaseSRWLockExclusive => {
let &[ptr] = check_arg_count(args)?;
this.ReleaseSRWLockExclusive(ptr)?;
}
Dlsym::TryAcquireSRWLockExclusive => {
let &[ptr] = check_arg_count(args)?;
let ret = this.TryAcquireSRWLockExclusive(ptr)?;
this.write_scalar(Scalar::from_u8(ret), dest)?;
}
Dlsym::AcquireSRWLockShared => {
let &[ptr] = check_arg_count(args)?;
this.AcquireSRWLockShared(ptr)?;
}
Dlsym::ReleaseSRWLockShared => {
let &[ptr] = check_arg_count(args)?;
this.ReleaseSRWLockShared(ptr)?;
}
Dlsym::TryAcquireSRWLockShared => {
let &[ptr] = check_arg_count(args)?;
let ret = this.TryAcquireSRWLockShared(ptr)?;
this.write_scalar(Scalar::from_u8(ret), dest)?;
}
}
this.dump_place(*dest);
this.go_to_block(ret);
Ok(())
}
}

View file

@ -21,6 +21,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// HANDLE = isize
// DWORD = ULONG = u32
// BOOL = i32
// BOOLEAN = u8
match link_name {
// Environment related shims
"GetEnvironmentVariableW" => {
@ -256,7 +257,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Better error for attempts to create a thread
"CreateThread" => {
throw_unsup_format!("Miri does not support threading");
throw_unsup_format!("Miri does not support concurrency on Windows");
}
// Incomplete shims that we "stub out" just to get pre-main initialization code to work.
@ -291,7 +292,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
#[allow(non_snake_case)]
let &[_lpCriticalSection] = check_arg_count(args)?;
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
// Nothing to do, not even a return value.
// (Windows locks are reentrant, and we have only 1 thread,
// so not doing any futher checks here is at least not incorrect.)
@ -300,8 +301,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
#[allow(non_snake_case)]
let &[_lpCriticalSection] = check_arg_count(args)?;
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
// There is only one thread, so this always succeeds and returns TRUE
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
// There is only one thread, so this always succeeds and returns TRUE.
this.write_scalar(Scalar::from_i32(1), dest)?;
}

View file

@ -1 +1,4 @@
pub mod foreign_items;
pub mod dlsym;
mod sync;

135
src/shims/windows/sync.rs Normal file
View file

@ -0,0 +1,135 @@
use crate::*;
// Locks are pointer-sized pieces of data, initialized to 0.
// We use the first 4 bytes to store the RwLockId.
fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, RwLockId> {
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new rwlock.
let id = ecx.rwlock_create();
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
}
}
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
#[allow(non_snake_case)]
fn AcquireSRWLockExclusive(
&mut self,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
// Note: this will deadlock if the lock is already locked by this
// thread in any way.
//
// FIXME: Detect and report the deadlock proactively. (We currently
// report the deadlock only when no thread can continue execution,
// but we could detect that this lock is already locked and report
// an error.)
this.rwlock_enqueue_and_block_writer(id, active_thread);
} else {
this.rwlock_writer_lock(id, active_thread);
}
Ok(())
}
#[allow(non_snake_case)]
fn TryAcquireSRWLockExclusive(
&mut self,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, u8> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
// Lock is already held.
Ok(0)
} else {
this.rwlock_writer_lock(id, active_thread);
Ok(1)
}
}
#[allow(non_snake_case)]
fn ReleaseSRWLockExclusive(
&mut self,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
if !this.rwlock_writer_unlock(id, active_thread) {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!("calling ReleaseSRWLockExclusive on an SRWLock that is not exclusively locked by the current thread");
}
Ok(())
}
#[allow(non_snake_case)]
fn AcquireSRWLockShared(
&mut self,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
this.rwlock_enqueue_and_block_reader(id, active_thread);
} else {
this.rwlock_reader_lock(id, active_thread);
}
Ok(())
}
#[allow(non_snake_case)]
fn TryAcquireSRWLockShared(
&mut self,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, u8> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
Ok(0)
} else {
this.rwlock_reader_lock(id, active_thread);
Ok(1)
}
}
#[allow(non_snake_case)]
fn ReleaseSRWLockShared(
&mut self,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
if !this.rwlock_reader_unlock(id, active_thread) {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!("calling ReleaseSRWLockShared on an SRWLock that is not locked by the current thread");
}
Ok(())
}
}

View file

@ -3,6 +3,8 @@ use std::convert::TryFrom;
use std::num::NonZeroU32;
use std::ops::Not;
use log::trace;
use rustc_index::vec::{Idx, IndexVec};
use crate::*;
@ -102,6 +104,52 @@ pub(super) struct SynchronizationState {
condvars: IndexVec<CondvarId, Condvar>,
}
// Private extension trait for local helper methods
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Take a reader out of the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
#[inline]
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
this.unblock_thread(reader);
this.rwlock_reader_lock(id, reader);
true
} else {
false
}
}
/// Take the writer out of the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
#[inline]
fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
this.unblock_thread(writer);
this.rwlock_writer_lock(id, writer);
true
} else {
false
}
}
/// Take a thread out of the queue waiting for the mutex, and lock
/// the mutex for it. Returns `true` if some thread has the mutex now.
#[inline]
fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
let this = self.eval_context_mut();
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
this.unblock_thread(thread);
this.mutex_lock(id, thread);
true
} else {
false
}
}
}
// Public interface to synchronization primitives. Please note that in most
// cases, the function calls are infallible and it is the client's (shim
// implementation's) responsibility to detect and deal with erroneous
@ -124,8 +172,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
/// Check if locked.
fn mutex_is_locked(&mut self, id: MutexId) -> bool {
let this = self.eval_context_mut();
fn mutex_is_locked(&self, id: MutexId) -> bool {
let this = self.eval_context_ref();
this.machine.threads.sync.mutexes[id].owner.is_some()
}
@ -174,7 +222,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
Some(old_lock_count)
} else {
// Mutex is unlocked.
// Mutex is not locked.
None
}
}
@ -188,20 +236,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.block_thread(thread);
}
#[inline]
/// Take a thread out of the queue waiting for the mutex, and lock
/// the mutex for it. Returns `true` if some thread has the mutex now.
fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
let this = self.eval_context_mut();
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
this.unblock_thread(thread);
this.mutex_lock(id, thread);
true
} else {
false
}
}
#[inline]
/// Create state for a new read write lock.
fn rwlock_create(&mut self) -> RwLockId {
@ -211,17 +245,23 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
/// Check if locked.
fn rwlock_is_locked(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks[id].writer.is_some()
|| this.machine.threads.sync.rwlocks[id].readers.is_empty().not()
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
let this = self.eval_context_ref();
let rwlock = &this.machine.threads.sync.rwlocks[id];
trace!(
"rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
id, rwlock.writer, rwlock.readers.len(),
);
rwlock.writer.is_some()|| rwlock.readers.is_empty().not()
}
#[inline]
/// Check if write locked.
fn rwlock_is_write_locked(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks[id].writer.is_some()
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
let this = self.eval_context_ref();
let rwlock = &this.machine.threads.sync.rwlocks[id];
trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
rwlock.writer.is_some()
}
/// Read-lock the lock by adding the `reader` the list of threads that own
@ -229,12 +269,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
let this = self.eval_context_mut();
assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
let count = this.machine.threads.sync.rwlocks[id].readers.entry(reader).or_insert(0);
*count = count.checked_add(1).expect("the reader counter overflowed");
}
/// Try read-unlock the lock for `reader`. Returns `true` if succeeded,
/// `false` if this `reader` did not hold the lock.
/// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
/// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
let this = self.eval_context_mut();
match this.machine.threads.sync.rwlocks[id].readers.entry(reader) {
@ -243,12 +284,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
assert!(*count > 0, "rwlock locked with count == 0");
*count -= 1;
if *count == 0 {
trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, reader);
entry.remove();
} else {
trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, reader);
}
true
}
Entry::Vacant(_) => false,
Entry::Vacant(_) => return false, // we did not even own this lock
}
// The thread was a reader. If the lock is not held any more, give it to a writer.
if this.rwlock_is_locked(id).not() {
this.rwlock_dequeue_and_lock_writer(id);
}
true
}
#[inline]
@ -259,38 +307,49 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
reader: ThreadId,
) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_write_locked(id), "queueing on not write locked lock");
assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
this.block_thread(reader);
}
#[inline]
/// Take a reader out the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
this.unblock_thread(reader);
this.rwlock_reader_lock(id, reader);
true
} else {
false
}
}
#[inline]
/// Lock by setting the writer that owns the lock.
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
let this = self.eval_context_mut();
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
this.machine.threads.sync.rwlocks[id].writer = Some(writer);
}
#[inline]
/// Try to unlock by removing the writer.
fn rwlock_writer_unlock(&mut self, id: RwLockId) -> Option<ThreadId> {
fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks[id].writer.take()
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
if let Some(current_writer) = rwlock.writer {
if current_writer != expected_writer {
// Only the owner can unlock the rwlock.
return false;
}
rwlock.writer = None;
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
// The thread was a writer.
//
// We are prioritizing writers here against the readers. As a
// result, not only readers can starve writers, but also writers can
// starve readers.
if this.rwlock_dequeue_and_lock_writer(id) {
// Someone got the write lock, nice.
} else {
// Give the lock to all readers.
while this.rwlock_dequeue_and_lock_reader(id) {
// Rinse and repeat.
}
}
true
} else {
false
}
}
#[inline]
@ -301,25 +360,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
writer: ThreadId,
) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_locked(id), "queueing on unlocked lock");
assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
this.block_thread(writer);
}
#[inline]
/// Take the writer out the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
this.unblock_thread(writer);
this.rwlock_writer_lock(id, writer);
true
} else {
false
}
}
#[inline]
/// Create state for a new conditional variable.
fn condvar_create(&mut self) -> CondvarId {

View file

@ -1,5 +1,4 @@
// error-pattern: the evaluated program aborted
// ignore-windows (panics dont work on Windows)
#![feature(unwind_attributes)]
#[unwind(aborts)]

View file

@ -3,7 +3,7 @@
use std::thread;
// error-pattern: Miri does not support threading
// error-pattern: Miri does not support concurrency on Windows
fn main() {
thread::spawn(|| {});

View file

@ -1,5 +1,4 @@
// error-pattern: the evaluated program aborted
// ignore-windows (panics dont work on Windows)
struct Foo;
impl Drop for Foo {

View file

@ -1,9 +0,0 @@
// ignore-linux
// ignore-macos
// Test that panics on Windows give a reasonable error message.
// error-pattern: panicking is not supported on this target
fn main() {
core::panic!("this is {}", "Windows");
}

View file

@ -1,9 +0,0 @@
// ignore-linux
// ignore-macos
// Test that panics on Windows give a reasonable error message.
// error-pattern: panicking is not supported on this target
fn main() {
std::panic!("this is Windows");
}

View file

@ -1,10 +0,0 @@
// ignore-linux
// ignore-macos
// Test that panics on Windows give a reasonable error message.
// error-pattern: panicking is not supported on this target
#[allow(unconditional_panic)]
fn main() {
let _val = 1/0;
}

View file

@ -6,10 +6,7 @@ use std::hint;
fn main() {
test_mutex_stdlib();
#[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows
{
test_rwlock_stdlib();
}
test_rwlock_stdlib();
test_spin_loop_hint();
test_thread_yield_now();
}
@ -24,7 +21,6 @@ fn test_mutex_stdlib() {
drop(m);
}
#[cfg(not(target_os = "windows"))]
fn test_rwlock_stdlib() {
use std::sync::RwLock;
let rw = RwLock::new(0);

View file

@ -1,7 +1,7 @@
// ignore-windows: Unwind panicking does not currently work on Windows
// normalize-stderr-test "[^ ]*libcore/[a-z/]+.rs[0-9:]*" -> "$$LOC"
#![feature(never_type)]
#![allow(unconditional_panic)]
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::cell::Cell;

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
#![allow(unconditional_panic)]
fn main() {

View file

@ -1 +1 @@
thread 'main' panicked at 'attempt to divide by zero', $DIR/div-by-zero-2.rs:5:14
thread 'main' panicked at 'attempt to divide by zero', $DIR/div-by-zero-2.rs:4:14

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
#![allow(arithmetic_overflow)]
fn main() {

View file

@ -1 +1 @@
thread 'main' panicked at 'attempt to shift left with overflow', $DIR/overflowing-lsh-neg.rs:5:14
thread 'main' panicked at 'attempt to shift left with overflow', $DIR/overflowing-lsh-neg.rs:4:14

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
#![allow(arithmetic_overflow)]
fn main() {

View file

@ -1 +1 @@
thread 'main' panicked at 'attempt to shift right with overflow', $DIR/overflowing-rsh-1.rs:5:14
thread 'main' panicked at 'attempt to shift right with overflow', $DIR/overflowing-rsh-1.rs:4:14

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
#![allow(arithmetic_overflow)]
fn main() {

View file

@ -1 +1 @@
thread 'main' panicked at 'attempt to shift right with overflow', $DIR/overflowing-rsh-2.rs:6:14
thread 'main' panicked at 'attempt to shift right with overflow', $DIR/overflowing-rsh-2.rs:5:14

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
fn main() {
std::panic!("panicking from libstd");
}

View file

@ -1 +1 @@
thread 'main' panicked at 'panicking from libstd', $DIR/panic1.rs:3:5
thread 'main' panicked at 'panicking from libstd', $DIR/panic1.rs:2:5

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
fn main() {
std::panic!("{}-panicking from libstd", 42);
}

View file

@ -1 +1 @@
thread 'main' panicked at '42-panicking from libstd', $DIR/panic2.rs:3:5
thread 'main' panicked at '42-panicking from libstd', $DIR/panic2.rs:2:5

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
fn main() {
core::panic!("panicking from libcore");
}

View file

@ -1 +1 @@
thread 'main' panicked at 'panicking from libcore', $DIR/panic3.rs:3:5
thread 'main' panicked at 'panicking from libcore', $DIR/panic3.rs:2:5

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
fn main() {
core::panic!("{}-panicking from libcore", 42);
}

View file

@ -1 +1 @@
thread 'main' panicked at '42-panicking from libcore', $DIR/panic4.rs:3:5
thread 'main' panicked at '42-panicking from libcore', $DIR/panic4.rs:2:5

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
#![feature(option_expect_none, option_unwrap_none)]
//! Test that panic locations for `#[track_caller]` functions in std have the correct
//! location reported.

View file

@ -1,4 +1,3 @@
// ignore-windows: Unwind panicking does not currently work on Windows
fn main() {
#[cfg(target_pointer_width="64")]
let bad = unsafe {

View file

@ -1 +1 @@
thread 'main' panicked at 'index out of bounds: the len is 0 but the index is 0', $DIR/transmute_fat2.rs:12:5
thread 'main' panicked at 'index out of bounds: the len is 0 but the index is 0', $DIR/transmute_fat2.rs:11:5