Auto merge of #1284 - vakaras:add-threads-cr2, r=RalfJung

Implement basic support for concurrency (Linux/macos only)

Changes  (most new code is in `src/threads.rs` and `src/shims/foreign_items/posix.rs`):

1. Move the stack from `Machine` to a newly created `Thread` struct.
2. Add a `ThreadSet` struct that manages the threads.
3. Change `canonical_alloc_id` to create a unique allocation id for each thread local and thread (the responsible struct is `ThreadLocalStorage`)
4. Change the code to execute the thread local destructors immediately when a thread terminates.
5. Add the most basic round-robin scheduler.

This pull request depends on [these changes to the compiler](https://github.com/rust-lang/rust/pull/70598).
This commit is contained in:
bors 2020-04-30 16:21:43 +00:00
commit 351d46d3fb
39 changed files with 1950 additions and 276 deletions

View file

@ -47,7 +47,9 @@ in your program, and cannot run all programs:
* Miri runs the program as a platform-independent interpreter, so the program
has no access to most platform-specific APIs or FFI. A few APIs have been
implemented (such as printing to stdout) but most have not: for example, Miri
currently does not support concurrency, or SIMD, or networking.
currently does not support SIMD or networking.
* Miri currently does not check for data-races and most other concurrency
related issues.
[rust]: https://www.rust-lang.org/
[mir]: https://github.com/rust-lang/rfcs/blob/master/text/1211-mir.md

View file

@ -139,7 +139,7 @@ fn report_msg<'tcx, 'mir>(
mut helps: Vec<String>,
error: bool,
) {
let span = if let Some(frame) = ecx.machine.stack.last() {
let span = if let Some(frame) = ecx.active_thread_stack().last() {
frame.current_source_info().unwrap().span
} else {
DUMMY_SP
@ -171,7 +171,7 @@ fn report_msg<'tcx, 'mir>(
err.emit();
for (i, frame) in ecx.machine.stack.iter().enumerate() {
for (i, frame) in ecx.active_thread_stack().iter().enumerate() {
trace!("-------------------");
trace!("Frame {}", i);
trace!(" return: {:?}", frame.return_place.map(|p| *p));

View file

@ -205,14 +205,24 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) ->
// Perform the main execution.
let res: InterpResult<'_, i64> = (|| {
// Main loop.
while ecx.step()? {
loop {
match ecx.schedule()? {
SchedulingAction::ExecuteStep => {
assert!(ecx.step()?, "a terminated thread was scheduled for execution");
}
SchedulingAction::ExecuteDtors => {
// This will either enable the thread again (so we go back
// to `ExecuteStep`), or determine that this thread is done
// for good.
ecx.schedule_next_tls_dtor_for_active_thread()?;
}
SchedulingAction::Stop => {
break;
}
}
ecx.process_diagnostics();
}
// Read the return code pointer *before* we run TLS destructors, to assert
// that it was written to by the time that `start` lang item returned.
let return_code = ecx.read_scalar(ret_place.into())?.not_undef()?.to_machine_isize(&ecx)?;
// Global destructors.
ecx.run_tls_dtors()?;
Ok(return_code)
})();

View file

@ -12,6 +12,7 @@ extern crate rustc_ast;
#[macro_use] extern crate rustc_middle;
extern crate rustc_data_structures;
extern crate rustc_hir;
extern crate rustc_index;
extern crate rustc_mir;
extern crate rustc_span;
extern crate rustc_target;
@ -26,6 +27,7 @@ mod operator;
mod range_map;
mod shims;
mod stacked_borrows;
mod thread;
// Make all those symbols available in the same place as our own.
pub use rustc_mir::interpret::*;
@ -40,6 +42,7 @@ pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt;
pub use crate::shims::os_str::EvalContextExt as OsStrEvalContextExt;
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as PanicEvalContextExt};
pub use crate::shims::sync::{EvalContextExt as SyncEvalContextExt};
pub use crate::shims::thread::EvalContextExt as ThreadShimsEvalContextExt;
pub use crate::shims::time::EvalContextExt as TimeEvalContextExt;
pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData};
pub use crate::shims::EvalContextExt as ShimsEvalContextExt;
@ -60,6 +63,9 @@ pub use crate::range_map::RangeMap;
pub use crate::stacked_borrows::{
EvalContextExt as StackedBorEvalContextExt, Item, Permission, PtrId, Stack, Stacks, Tag,
};
pub use crate::thread::{
EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, ThreadState,
};
/// Insert rustc arguments at the beginning of the argument list that Miri wants to be
/// set per default, for maximal validation power.

View file

@ -251,8 +251,8 @@ pub struct Evaluator<'mir, 'tcx> {
/// The "time anchor" for this machine's monotone clock (for `Instant` simulation).
pub(crate) time_anchor: Instant,
/// The call stack.
pub(crate) stack: Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>>,
/// The set of threads.
pub(crate) threads: ThreadManager<'mir, 'tcx>,
/// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
pub(crate) layouts: PrimitiveLayouts<'tcx>,
@ -282,7 +282,7 @@ impl<'mir, 'tcx> Evaluator<'mir, 'tcx> {
panic_payload: None,
time_anchor: Instant::now(),
layouts,
stack: Vec::default(),
threads: ThreadManager::default(),
}
}
}
@ -416,6 +416,14 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
Ok(())
}
fn adjust_global_const(
ecx: &InterpCx<'mir, 'tcx, Self>,
mut val: mir::interpret::ConstValue<'tcx>,
) -> InterpResult<'tcx, mir::interpret::ConstValue<'tcx>> {
ecx.remap_thread_local_alloc_ids(&mut val)?;
Ok(val)
}
fn canonical_alloc_id(mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId {
let tcx = mem.tcx;
// Figure out if this is an extern static, and if yes, which one.
@ -525,18 +533,16 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
Ok(frame.with_extra(extra))
}
#[inline(always)]
fn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>,
ecx: &'a InterpCx<'mir, 'tcx, Self>
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
&ecx.machine.stack
ecx.active_thread_stack()
}
#[inline(always)]
fn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
ecx: &'a mut InterpCx<'mir, 'tcx, Self>
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
&mut ecx.machine.stack
ecx.active_thread_stack_mut()
}
#[inline(always)]

View file

@ -221,13 +221,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
"pthread_getspecific" => {
let key = this.force_bits(this.read_scalar(args[0])?.not_undef()?, args[0].layout.size)?;
let ptr = this.machine.tls.load_tls(key, this)?;
let active_thread = this.get_active_thread()?;
let ptr = this.machine.tls.load_tls(key, active_thread, this)?;
this.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = this.force_bits(this.read_scalar(args[0])?.not_undef()?, args[0].layout.size)?;
let active_thread = this.get_active_thread()?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?;
this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?;
// Return success (`0`).
this.write_null(dest)?;
@ -291,9 +293,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_i32(result), dest)?;
}
// Better error for attempts to create a thread
// Threading
"pthread_create" => {
throw_unsup_format!("Miri does not support threading");
assert_eq!(args.len(), 4);
let result = this.pthread_create(args[0], args[1], args[2], args[3])?;
this.write_scalar(Scalar::from_i32(result), dest)?;
}
"pthread_join" => {
assert_eq!(args.len(), 2);
let result = this.pthread_join(args[0], args[1])?;
this.write_scalar(Scalar::from_i32(result), dest)?;
}
"pthread_detach" => {
assert_eq!(args.len(), 1);
let result = this.pthread_detach(args[0])?;
this.write_scalar(Scalar::from_i32(result), dest)?;
}
"pthread_self" => {
assert_eq!(args.len(), 0);
this.pthread_self(dest)?;
}
"sched_yield" => {
assert_eq!(args.len(), 0);
let result = this.sched_yield()?;
this.write_scalar(Scalar::from_i32(result), dest)?;
}
// Miscellaneous
@ -312,15 +335,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// We do not support forking, so there is nothing to do here.
this.write_null(dest)?;
}
"sched_yield" => {
this.write_null(dest)?;
}
// Incomplete shims that we "stub out" just to get pre-main initialization code to work.
// These shims are enabled only when the caller is in the standard library.
| "pthread_attr_init"
| "pthread_attr_destroy"
| "pthread_self"
| "pthread_attr_setstacksize"
| "pthread_condattr_init"
| "pthread_condattr_setclock"
@ -330,6 +349,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
=> {
this.write_null(dest)?;
}
"pthread_attr_getguardsize" if this.frame().instance.to_string().starts_with("std::sys::unix::")
=> {
let guard_size = this.deref_operand(args[1])?;
let guard_size_layout = this.libc_ty_layout("size_t")?;
this.write_scalar(Scalar::from_uint(crate::PAGE_SIZE, guard_size_layout.size), guard_size.into())?;
// Return success (`0`).
this.write_null(dest)?;
}
| "signal"
| "sigaction"

View file

@ -75,6 +75,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_null(dest)?;
}
// Threading
"prctl" => {
assert_eq!(args.len(), 5);
let result = this.prctl(args[0], args[1], args[2], args[3], args[4])?;
this.write_scalar(Scalar::from_i32(result), dest)?;
}
// Dynamically invoked syscalls
"syscall" => {
let sys_getrandom = this

View file

@ -82,7 +82,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let dtor = this.read_scalar(args[0])?.not_undef()?;
let dtor = this.memory.get_fn(dtor)?.as_instance()?;
let data = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.set_global_dtor(dtor, data)?;
let active_thread = this.get_active_thread()?;
this.machine.tls.set_thread_dtor(active_thread, dtor, data)?;
}
// Querying system information

View file

@ -144,13 +144,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
"TlsGetValue" => {
let key = u128::from(this.read_scalar(args[0])?.to_u32()?);
let ptr = this.machine.tls.load_tls(key, this)?;
let active_thread = this.get_active_thread()?;
let ptr = this.machine.tls.load_tls(key, active_thread, this)?;
this.write_scalar(ptr, dest)?;
}
"TlsSetValue" => {
let key = u128::from(this.read_scalar(args[0])?.to_u32()?);
let active_thread = this.get_active_thread()?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?;
this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?;
// Return success (`1`).
this.write_scalar(Scalar::from_i32(1), dest)?;

View file

@ -6,6 +6,7 @@ pub mod intrinsics;
pub mod os_str;
pub mod panic;
pub mod sync;
pub mod thread;
pub mod time;
pub mod tls;

View file

@ -1,7 +1,8 @@
use rustc_middle::ty::{TyKind, TypeAndMut};
use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut};
use rustc_target::abi::{LayoutOf, Size};
use crate::stacked_borrows::Tag;
use crate::thread::BlockSetId;
use crate::*;
fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
@ -18,22 +19,48 @@ fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(
Ok(())
}
fn get_at_offset<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
op: OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
min_size: u64,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
// Ensure that the following read at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, op, min_size)?;
let op_place = ecx.deref_operand(op)?;
let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
ecx.read_scalar(value_place.into())
}
fn set_at_offset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
op: OpTy<'tcx, Tag>,
offset: u64,
value: impl Into<ScalarMaybeUndef<Tag>>,
layout: TyAndLayout<'tcx>,
min_size: u64,
) -> InterpResult<'tcx, ()> {
// Ensure that the following write at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, op, min_size)?;
let op_place = ecx.deref_operand(op)?;
let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?;
ecx.write_scalar(value.into(), value_place.into())
}
// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
// Our chosen memory layout for emulation (does not have to match the platform layout!):
// store an i32 in the first four bytes equal to the corresponding libc mutex kind constant
// (e.g. PTHREAD_MUTEX_NORMAL).
const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4;
fn mutexattr_get_kind<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
attr_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
// Ensure that the following read at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, attr_op, 4)?;
let attr_place = ecx.deref_operand(attr_op)?;
let kind_place =
attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?;
ecx.read_scalar(kind_place.into())
get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
}
fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
@ -41,12 +68,7 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
attr_op: OpTy<'tcx, Tag>,
kind: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
// Ensure that the following write at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, attr_op, 4)?;
let attr_place = ecx.deref_operand(attr_op)?;
let kind_place =
attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?;
ecx.write_scalar(kind.into(), kind_place.into())
set_at_offset(ecx, attr_op, 0, kind, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE)
}
// pthread_mutex_t is between 24 and 48 bytes, depending on the platform.
@ -55,23 +77,19 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
// bytes 0-3: reserved for signature on macOS
// (need to avoid this because it is set by static initializer macros)
// bytes 4-7: count of how many times this mutex has been locked, as a u32
// bytes 8-11: when count > 0, id of the owner thread as a u32
// bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32
// (the kind has to be at its offset for compatibility with static initializer macros)
// bytes 20-23: when count > 0, id of the blockset in which the blocked threads
// are waiting or 0 if blockset is not yet assigned.
const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24;
fn mutex_get_locked_count<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
// Ensure that the following read at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
let locked_count_place = mutex_place.offset(
Size::from_bytes(4),
MemPlaceMeta::None,
ecx.machine.layouts.u32,
ecx,
)?;
ecx.read_scalar(locked_count_place.into())
get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
@ -79,33 +97,30 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
locked_count: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
// Ensure that the following write at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
let locked_count_place = mutex_place.offset(
Size::from_bytes(4),
MemPlaceMeta::None,
ecx.machine.layouts.u32,
ecx,
)?;
ecx.write_scalar(locked_count.into(), locked_count_place.into())
set_at_offset(ecx, mutex_op, 4, locked_count, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_get_owner<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
get_at_offset(ecx, mutex_op, 8, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_set_owner<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
owner: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, mutex_op, 8, owner, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_get_kind<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
// Ensure that the following read at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
let kind_place = mutex_place.offset(
Size::from_bytes(kind_offset),
MemPlaceMeta::None,
ecx.machine.layouts.i32,
ecx,
)?;
ecx.read_scalar(kind_place.into())
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
get_at_offset(ecx, mutex_op, offset, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_set_kind<'mir, 'tcx: 'mir>(
@ -113,17 +128,39 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
kind: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
// Ensure that the following write at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
let kind_place = mutex_place.offset(
Size::from_bytes(kind_offset),
MemPlaceMeta::None,
ecx.machine.layouts.i32,
ecx,
)?;
ecx.write_scalar(kind.into(), kind_place.into())
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_get_blockset<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
get_at_offset(ecx, mutex_op, 20, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_set_blockset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
blockset: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, mutex_op, 20, blockset, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE)
}
fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, BlockSetId> {
let blockset = mutex_get_blockset(ecx, mutex_op)?.to_u32()?;
if blockset == 0 {
// 0 is a default value and also not a valid blockset id. Need to
// allocate a new blockset.
let blockset = ecx.create_blockset()?;
mutex_set_blockset(ecx, mutex_op, blockset.to_u32_scalar())?;
Ok(blockset)
} else {
Ok(BlockSetId::new(blockset))
}
}
// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
@ -133,21 +170,18 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
// (need to avoid this because it is set by static initializer macros)
// bytes 4-7: reader count, as a u32
// bytes 8-11: writer count, as a u32
// bytes 12-15: when writer or reader count > 0, id of the blockset in which the
// blocked writers are waiting or 0 if blockset is not yet assigned.
// bytes 16-20: when writer count > 0, id of the blockset in which the blocked
// readers are waiting or 0 if blockset is not yet assigned.
const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 20;
fn rwlock_get_readers<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
// Ensure that the following read at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
let readers_place = rwlock_place.offset(
Size::from_bytes(4),
MemPlaceMeta::None,
ecx.machine.layouts.u32,
ecx,
)?;
ecx.read_scalar(readers_place.into())
get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_set_readers<'mir, 'tcx: 'mir>(
@ -155,32 +189,14 @@ fn rwlock_set_readers<'mir, 'tcx: 'mir>(
rwlock_op: OpTy<'tcx, Tag>,
readers: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
// Ensure that the following write at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
let readers_place = rwlock_place.offset(
Size::from_bytes(4),
MemPlaceMeta::None,
ecx.machine.layouts.u32,
ecx,
)?;
ecx.write_scalar(readers.into(), readers_place.into())
set_at_offset(ecx, rwlock_op, 4, readers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_get_writers<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
// Ensure that the following read at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
let writers_place = rwlock_place.offset(
Size::from_bytes(8),
MemPlaceMeta::None,
ecx.machine.layouts.u32,
ecx,
)?;
ecx.read_scalar(writers_place.into())
get_at_offset(ecx, rwlock_op, 8, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_set_writers<'mir, 'tcx: 'mir>(
@ -188,16 +204,69 @@ fn rwlock_set_writers<'mir, 'tcx: 'mir>(
rwlock_op: OpTy<'tcx, Tag>,
writers: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
// Ensure that the following write at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
let writers_place = rwlock_place.offset(
Size::from_bytes(8),
MemPlaceMeta::None,
ecx.machine.layouts.u32,
ecx,
)?;
ecx.write_scalar(writers.into(), writers_place.into())
set_at_offset(ecx, rwlock_op, 8, writers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_get_writer_blockset<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
get_at_offset(ecx, rwlock_op, 12, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
blockset: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, rwlock_op, 12, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, BlockSetId> {
let blockset = rwlock_get_writer_blockset(ecx, rwlock_op)?.to_u32()?;
if blockset == 0 {
// 0 is a default value and also not a valid blockset id. Need to
// allocate a new blockset.
let blockset = ecx.create_blockset()?;
rwlock_set_writer_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
Ok(blockset)
} else {
Ok(BlockSetId::new(blockset))
}
}
fn rwlock_get_reader_blockset<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>> {
get_at_offset(ecx, rwlock_op, 16, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
blockset: impl Into<ScalarMaybeUndef<Tag>>,
) -> InterpResult<'tcx, ()> {
set_at_offset(ecx, rwlock_op, 16, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE)
}
fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, BlockSetId> {
let blockset = rwlock_get_reader_blockset(ecx, rwlock_op)?.to_u32()?;
if blockset == 0 {
// 0 is a default value and also not a valid blockset id. Need to
// allocate a new blockset.
let blockset = ecx.create_blockset()?;
rwlock_set_reader_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?;
Ok(blockset)
} else {
Ok(BlockSetId::new(blockset))
}
}
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
@ -265,31 +334,39 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
let active_thread = this.get_active_thread()?;
if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
if locked_count == 0 {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
Ok(0)
} else {
throw_machine_stop!(TerminationInfo::Deadlock);
}
} else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
if locked_count == 0 {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
Ok(0)
} else {
this.eval_libc_i32("EDEADLK")
}
} else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
match locked_count.checked_add(1) {
Some(new_count) => {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
Ok(0)
}
None => this.eval_libc_i32("EAGAIN"),
}
if locked_count == 0 {
// The mutex is unlocked. Let's lock it.
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
Ok(0)
} else {
throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex");
// The mutex is locked. Let's check by whom.
let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
if owner_thread != active_thread {
// Block the active thread.
let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
this.block_active_thread(blockset)?;
Ok(0)
} else {
// Trying to acquire the same mutex again.
if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
throw_machine_stop!(TerminationInfo::Deadlock);
} else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
this.eval_libc_i32("EDEADLK")
} else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
match locked_count.checked_add(1) {
Some(new_count) => {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
Ok(0)
}
None => this.eval_libc_i32("EAGAIN"),
}
} else {
throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex");
}
}
}
}
@ -298,26 +375,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
let active_thread = this.get_active_thread()?;
if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
|| kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
{
if locked_count == 0 {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
Ok(0)
} else {
this.eval_libc_i32("EBUSY")
}
} else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
match locked_count.checked_add(1) {
Some(new_count) => {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
Ok(0)
}
None => this.eval_libc_i32("EAGAIN"),
}
if locked_count == 0 {
// The mutex is unlocked. Let's lock it.
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?;
mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?;
Ok(0)
} else {
throw_ub_format!("called pthread_mutex_trylock on an unsupported type of mutex");
let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
if owner_thread != active_thread {
this.eval_libc_i32("EBUSY")
} else {
if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")?
|| kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")?
{
this.eval_libc_i32("EBUSY")
} else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
match locked_count.checked_add(1) {
Some(new_count) => {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
Ok(0)
}
None => this.eval_libc_i32("EAGAIN"),
}
} else {
throw_ub_format!(
"called pthread_mutex_trylock on an unsupported type of mutex"
);
}
}
}
}
@ -326,34 +413,41 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?;
let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into();
if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
if locked_count != 0 {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
Ok(0)
if owner_thread != this.get_active_thread()? {
throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread");
} else if locked_count == 1 {
let blockset = mutex_get_or_create_blockset(this, mutex_op)?;
if let Some(new_owner) = this.unblock_some_thread(blockset)? {
// We have at least one thread waiting on this mutex. Transfer
// ownership to it.
mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?;
} else {
throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked");
}
} else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
if locked_count != 0 {
// No thread is waiting on this mutex.
mutex_set_owner(this, mutex_op, Scalar::from_u32(0))?;
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?;
Ok(0)
} else {
this.eval_libc_i32("EPERM")
}
} else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
match locked_count.checked_sub(1) {
Some(new_count) => {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
Ok(0)
}
None => {
// locked_count was already zero
this.eval_libc_i32("EPERM")
}
}
Ok(0)
} else {
throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex");
if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? {
throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked");
} else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? {
this.eval_libc_i32("EPERM")
} else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? {
match locked_count.checked_sub(1) {
Some(new_count) => {
mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?;
Ok(0)
}
None => {
// locked_count was already zero
this.eval_libc_i32("EPERM")
}
}
} else {
throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex");
}
}
}
@ -366,6 +460,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?;
mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?;
mutex_set_blockset(this, mutex_op, ScalarMaybeUndef::Undef)?;
Ok(0)
}
@ -375,8 +470,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
if writers != 0 {
throw_machine_stop!(TerminationInfo::Deadlock);
// The lock is locked by a writer.
assert_eq!(writers, 1);
let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
this.block_active_thread(reader_blockset)?;
Ok(0)
} else {
match readers.checked_add(1) {
Some(new_readers) => {
@ -411,14 +511,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
if readers != 0 {
throw_machine_stop!(TerminationInfo::Deadlock);
} else if writers != 0 {
throw_machine_stop!(TerminationInfo::Deadlock);
let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
if readers != 0 || writers != 0 {
this.block_active_thread(writer_blockset)?;
} else {
rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
Ok(0)
}
Ok(0)
}
fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
@ -434,16 +533,37 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
}
// FIXME: We should check that this lock was locked by the active thread.
fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?;
let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?;
let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?;
if let Some(new_readers) = readers.checked_sub(1) {
assert_eq!(writers, 0);
rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?;
if new_readers == 0 {
if let Some(_writer) = this.unblock_some_thread(writer_blockset)? {
rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
}
}
Ok(0)
} else if writers != 0 {
rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?;
// We are prioritizing writers here against the readers. As a
// result, not only readers can starve writers, but also writers can
// starve readers.
if let Some(_writer) = this.unblock_some_thread(writer_blockset)? {
assert_eq!(writers, 1);
} else {
rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
let mut readers = 0;
while let Some(_reader) = this.unblock_some_thread(reader_blockset)? {
readers += 1;
}
rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers))?
}
Ok(0)
} else {
throw_ub_format!("unlocked an rwlock that was not locked");
@ -461,6 +581,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?;
rwlock_set_reader_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?;
rwlock_set_writer_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?;
Ok(0)
}

128
src/shims/thread.rs Normal file
View file

@ -0,0 +1,128 @@
use std::convert::TryInto;
use crate::*;
use rustc_target::abi::LayoutOf;
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn pthread_create(
&mut self,
thread: OpTy<'tcx, Tag>,
_attr: OpTy<'tcx, Tag>,
start_routine: OpTy<'tcx, Tag>,
arg: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
this.tcx.sess.warn(
"thread support is experimental. \
For example, Miri does not detect data races yet.",
);
let new_thread_id = this.create_thread()?;
// Also switch to new thread so that we can push the first stackframe.
let old_thread_id = this.set_active_thread(new_thread_id)?;
let thread_info_place = this.deref_operand(thread)?;
this.write_scalar(
Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
thread_info_place.into(),
)?;
let fn_ptr = this.read_scalar(start_routine)?.not_undef()?;
let instance = this.memory.get_fn(fn_ptr)?.as_instance()?;
let func_arg = this.read_immediate(arg)?;
// Note: the returned value is currently ignored (see the FIXME in
// pthread_join below) because the Rust standard library does not use
// it.
let ret_place =
this.allocate(this.layout_of(this.tcx.types.usize)?, MiriMemoryKind::Machine.into());
this.call_function(
instance,
&[*func_arg],
Some(ret_place.into()),
StackPopCleanup::None { cleanup: true },
)?;
this.set_active_thread(old_thread_id)?;
Ok(0)
}
fn pthread_join(
&mut self,
thread: OpTy<'tcx, Tag>,
retval: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
if !this.is_null(this.read_scalar(retval)?.not_undef()?)? {
// FIXME: implement reading the thread function's return place.
throw_unsup_format!("Miri supports pthread_join only with retval==NULL");
}
let thread_id = this.read_scalar(thread)?.to_machine_usize(this)?;
this.join_thread(thread_id.try_into().expect("thread ID should fit in u32"))?;
Ok(0)
}
fn pthread_detach(&mut self, thread: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let thread_id = this.read_scalar(thread)?.to_machine_usize(this)?;
this.detach_thread(thread_id.try_into().expect("thread ID should fit in u32"))?;
Ok(0)
}
fn pthread_self(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let thread_id = this.get_active_thread()?;
this.write_scalar(Scalar::from_uint(thread_id.to_u32(), dest.layout.size), dest)
}
fn prctl(
&mut self,
option: OpTy<'tcx, Tag>,
arg2: OpTy<'tcx, Tag>,
_arg3: OpTy<'tcx, Tag>,
_arg4: OpTy<'tcx, Tag>,
_arg5: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
let option = this.read_scalar(option)?.to_i32()?;
if option == this.eval_libc_i32("PR_SET_NAME")? {
let address = this.read_scalar(arg2)?.not_undef()?;
let mut name = this.memory.read_c_str(address)?.to_owned();
// The name should be no more than 16 bytes, including the null
// byte. Since `read_c_str` returns the string without the null
// byte, we need to truncate to 15.
name.truncate(15);
this.set_active_thread_name(name)?;
} else if option == this.eval_libc_i32("PR_GET_NAME")? {
let address = this.read_scalar(arg2)?.not_undef()?;
let mut name = this.get_active_thread_name()?.to_vec();
name.push(0u8);
assert!(name.len() <= 16);
this.memory.write_bytes(address, name)?;
} else {
throw_unsup_format!("unsupported prctl option {}", option);
}
Ok(0)
}
fn sched_yield(&mut self) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
this.yield_active_thread()?;
Ok(0)
}
}

View file

@ -1,25 +1,38 @@
//! Implement thread-local storage.
use std::collections::BTreeMap;
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::hash_map::Entry as HashMapEntry;
use log::trace;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty;
use rustc_target::abi::{Size, HasDataLayout};
use crate::{HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag};
use crate::{
HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag, ThreadId,
ThreadsEvalContextExt,
};
pub type TlsKey = u128;
#[derive(Copy, Clone, Debug)]
#[derive(Clone, Debug)]
pub struct TlsEntry<'tcx> {
/// The data for this key. None is used to represent NULL.
/// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.)
/// Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread.
data: Option<Scalar<Tag>>,
data: BTreeMap<ThreadId, Scalar<Tag>>,
dtor: Option<ty::Instance<'tcx>>,
}
#[derive(Clone, Debug)]
struct RunningDtorsState {
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
/// have not tried to retrieve a TLS destructor yet or that we already tried
/// all keys.
last_dtor_key: Option<TlsKey>,
}
#[derive(Debug)]
pub struct TlsData<'tcx> {
/// The Key to use for the next thread-local allocation.
@ -28,11 +41,14 @@ pub struct TlsData<'tcx> {
/// pthreads-style thread-local storage.
keys: BTreeMap<TlsKey, TlsEntry<'tcx>>,
/// A single global dtor (that's how things work on macOS) with a data argument.
global_dtor: Option<(ty::Instance<'tcx>, Scalar<Tag>)>,
/// A single per thread destructor of the thread local storage (that's how
/// things work on macOS) with a data argument.
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Tag>)>,
/// Whether we are in the "destruct" phase, during which some operations are UB.
dtors_running: bool,
/// State for currently running TLS dtors. If this map contains a key for a
/// specific thread, it means that we are in the "destruct" phase, during
/// which some operations are UB.
dtors_running: FxHashMap<ThreadId, RunningDtorsState>,
}
impl<'tcx> Default for TlsData<'tcx> {
@ -40,8 +56,8 @@ impl<'tcx> Default for TlsData<'tcx> {
TlsData {
next_key: 1, // start with 1 as we must not use 0 on Windows
keys: Default::default(),
global_dtor: None,
dtors_running: false,
macos_thread_dtors: Default::default(),
dtors_running: Default::default(),
}
}
}
@ -52,7 +68,7 @@ impl<'tcx> TlsData<'tcx> {
pub fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>, max_size: Size) -> InterpResult<'tcx, TlsKey> {
let new_key = self.next_key;
self.next_key += 1;
self.keys.insert(new_key, TlsEntry { data: None, dtor }).unwrap_none();
self.keys.insert(new_key, TlsEntry { data: Default::default(), dtor }).unwrap_none();
trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
if max_size.bits() < 128 && new_key >= (1u128 << max_size.bits() as u128) {
@ -74,38 +90,65 @@ impl<'tcx> TlsData<'tcx> {
pub fn load_tls(
&self,
key: TlsKey,
thread_id: ThreadId,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Scalar<Tag>> {
match self.keys.get(&key) {
Some(&TlsEntry { data, .. }) => {
trace!("TLS key {} loaded: {:?}", key, data);
Ok(data.unwrap_or_else(|| Scalar::null_ptr(cx).into()))
Some(TlsEntry { data, .. }) => {
let value = data.get(&thread_id).copied();
trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value);
Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into()))
}
None => throw_ub_format!("loading from a non-existing TLS key: {}", key),
}
}
pub fn store_tls(&mut self, key: TlsKey, new_data: Option<Scalar<Tag>>) -> InterpResult<'tcx> {
pub fn store_tls(
&mut self,
key: TlsKey,
thread_id: ThreadId,
new_data: Option<Scalar<Tag>>
) -> InterpResult<'tcx> {
match self.keys.get_mut(&key) {
Some(TlsEntry { data, .. }) => {
trace!("TLS key {} stored: {:?}", key, new_data);
*data = new_data;
match new_data {
Some(scalar) => {
trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, scalar);
data.insert(thread_id, scalar);
}
None => {
trace!("TLS key {} for thread {:?} removed", key, thread_id);
data.remove(&thread_id);
}
}
Ok(())
}
None => throw_ub_format!("storing to a non-existing TLS key: {}", key),
}
}
pub fn set_global_dtor(&mut self, dtor: ty::Instance<'tcx>, data: Scalar<Tag>) -> InterpResult<'tcx> {
if self.dtors_running {
/// Set the thread wide destructor of the thread local storage for the given
/// thread. This function is used to implement `_tlv_atexit` shim on MacOS.
///
/// Thread wide dtors are available only on MacOS. There is one destructor
/// per thread as can be guessed from the following comment in the
/// [`_tlv_atexit`
/// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389):
///
/// // NOTE: this does not need locks because it only operates on current thread data
pub fn set_thread_dtor(
&mut self,
thread: ThreadId,
dtor: ty::Instance<'tcx>,
data: Scalar<Tag>
) -> InterpResult<'tcx> {
if self.dtors_running.contains_key(&thread) {
// UB, according to libstd docs.
throw_ub_format!("setting global destructor while destructors are already running");
throw_ub_format!("setting thread's local storage destructor while destructors are already running");
}
if self.global_dtor.is_some() {
throw_unsup_format!("setting more than one global destructor is not supported");
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
throw_unsup_format!("setting more than one thread local storage destructor for the same thread is not supported");
}
self.global_dtor = Some((dtor, data));
Ok(())
}
@ -131,6 +174,7 @@ impl<'tcx> TlsData<'tcx> {
fn fetch_tls_dtor(
&mut self,
key: Option<TlsKey>,
thread_id: ThreadId,
) -> Option<(ty::Instance<'tcx>, Scalar<Tag>, TlsKey)> {
use std::collections::Bound::*;
@ -142,54 +186,85 @@ impl<'tcx> TlsData<'tcx> {
for (&key, TlsEntry { data, dtor }) in
thread_local.range_mut((start, Unbounded))
{
if let Some(data_scalar) = *data {
if let Some(dtor) = dtor {
let ret = Some((*dtor, data_scalar, key));
*data = None;
return ret;
match data.entry(thread_id) {
BTreeEntry::Occupied(entry) => {
if let Some(dtor) = dtor {
// Set TLS data to NULL, and call dtor with old value.
let data_scalar = entry.remove();
let ret = Some((*dtor, data_scalar, key));
return ret;
}
}
BTreeEntry::Vacant(_) => {}
}
}
None
}
/// Set that dtors are running for `thread`. It is guaranteed not to change
/// the existing values stored in `dtors_running` for this thread. Returns
/// `true` if dtors for `thread` are already running.
fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool {
match self.dtors_running.entry(thread) {
HashMapEntry::Occupied(_) => true,
HashMapEntry::Vacant(entry) => {
// We cannot just do `self.dtors_running.insert` because that
// would overwrite `last_dtor_key` with `None`.
entry.insert(RunningDtorsState { last_dtor_key: None });
false
}
}
}
/// Delete all TLS entries for the given thread. This function should be
/// called after all TLS destructors have already finished.
fn delete_all_thread_tls(&mut self, thread_id: ThreadId) {
for TlsEntry { data, .. } in self.keys.values_mut() {
data.remove(&thread_id);
}
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn run_tls_dtors(&mut self) -> InterpResult<'tcx> {
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Schedule TLS destructors for the main thread on Windows. The
/// implementation assumes that we do not support concurrency on Windows
/// yet.
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
assert!(!this.machine.tls.dtors_running, "running TLS dtors twice");
this.machine.tls.dtors_running = true;
let active_thread = this.get_active_thread()?;
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
// (that would be basically https://github.com/rust-lang/miri/issues/450),
// we specifically look up the static in libstd that we know is placed
// in that section.
let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local", "p_thread_callback"])?;
let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?;
if this.tcx.sess.target.target.target_os == "windows" {
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
// (that would be basically https://github.com/rust-lang/miri/issues/450),
// we specifically look up the static in libstd that we know is placed
// in that section.
let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local", "p_thread_callback"])?;
let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?;
// The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_THREAD_DETACH"])?;
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
this.call_function(
thread_callback,
&[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()],
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
// The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_PROCESS_DETACH"])?;
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
this.call_function(
thread_callback,
&[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()],
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
this.enable_thread(active_thread)?;
Ok(())
}
// step until out of stackframes
this.run()?;
// Windows doesn't have other destructors.
return Ok(());
}
// The macOS global dtor runs "before any TLS slots get freed", so do that first.
if let Some((instance, data)) = this.machine.tls.global_dtor {
trace!("Running global dtor {:?} on {:?}", instance, data);
/// Schedule the MacOS thread destructor of the thread local storage to be
/// executed. Returns `true` if scheduled.
///
/// Note: It is safe to call this function also on other Unixes.
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
let this = self.eval_context_mut();
let thread_id = this.get_active_thread()?;
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
this.call_function(
@ -199,14 +274,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
StackPopCleanup::None { cleanup: true },
)?;
// step until out of stackframes
this.run()?;
// Enable the thread so that it steps through the destructor which
// we just scheduled. Since we deleted the destructor, it is
// guaranteed that we will schedule it again. The `dtors_running`
// flag will prevent the code from adding the destructor again.
this.enable_thread(thread_id)?;
Ok(true)
} else {
Ok(false)
}
}
// Now run the "keyed" destructors.
let mut dtor = this.machine.tls.fetch_tls_dtor(None);
while let Some((instance, ptr, key)) = dtor {
trace!("Running TLS dtor {:?} on {:?}", instance, ptr);
/// Schedule a pthread TLS destructor. Returns `true` if found
/// a destructor to schedule, and `false` otherwise.
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread()?;
assert!(this.has_terminated(active_thread)?, "running TLS dtors for non-terminated thread");
// Fetch next dtor after `key`.
let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone();
let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
dtor @ Some(_) => dtor,
// We ran each dtor once, start over from the beginning.
None => {
this.machine.tls.fetch_tls_dtor(None, active_thread)
}
};
if let Some((instance, ptr, key)) = dtor {
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = Some(key);
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
assert!(!this.is_null(ptr).unwrap(), "data can't be NULL when dtor is called!");
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
@ -217,16 +314,63 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
StackPopCleanup::None { cleanup: true },
)?;
// step until out of stackframes
this.run()?;
// Fetch next dtor after `key`.
dtor = match this.machine.tls.fetch_tls_dtor(Some(key)) {
dtor @ Some(_) => dtor,
// We ran each dtor once, start over from the beginning.
None => this.machine.tls.fetch_tls_dtor(None),
};
this.enable_thread(active_thread)?;
return Ok(true);
}
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
Ok(false)
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Schedule an active thread's TLS destructor to run on the active thread.
/// Note that this function does not run the destructors itself, it just
/// schedules them one by one each time it is called and reenables the
/// thread so that it can be executed normally by the main execution loop.
///
/// FIXME: we do not support yet deallocation of thread local statics.
/// Issue: https://github.com/rust-lang/miri/issues/1369
///
/// Note: we consistently run TLS destructors for all threads, including the
/// main thread. However, it is not clear that we should run the TLS
/// destructors for the main thread. See issue:
/// https://github.com/rust-lang/rust/issues/28129.
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread()?;
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
// This is the first time we got asked to schedule a destructor. The
// Windows schedule destructor function must be called exactly once,
// this is why it is in this block.
if this.tcx.sess.target.target.target_os == "windows" {
// On Windows, we signal that the thread quit by starting the
// relevant function, reenabling the thread, and going back to
// the scheduler.
this.schedule_windows_tls_dtors()?;
return Ok(())
}
}
// The macOS thread wide destructor runs "before any TLS slots get
// freed", so do that first.
if this.schedule_macos_tls_dtor()? {
// We have scheduled a MacOS dtor to run on the thread. Execute it
// to completion and come back here. Scheduling a destructor
// destroys it, so we will not enter this branch again.
return Ok(())
}
if this.schedule_next_pthread_tls_dtor()? {
// We have scheduled a pthread destructor and removed it from the
// destructors list. Run it to completion and come back here.
return Ok(())
}
// All dtors done!
this.machine.tls.delete_all_thread_tls(active_thread);
Ok(())
}
}

611
src/thread.rs Normal file
View file

@ -0,0 +1,611 @@
//! Implements threads.
use std::cell::RefCell;
use std::convert::TryFrom;
use std::num::{NonZeroU32, TryFromIntError};
use log::trace;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::{
middle::codegen_fn_attrs::CodegenFnAttrFlags,
mir,
ty::{self, Instance},
};
use crate::*;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SchedulingAction {
/// Execute step on the active thread.
ExecuteStep,
/// Execute destructors of the active thread.
ExecuteDtors,
/// Stop the program.
Stop,
}
/// A thread identifier.
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct ThreadId(u32);
/// The main thread. When it terminates, the whole application terminates.
const MAIN_THREAD: ThreadId = ThreadId(0);
impl ThreadId {
pub fn to_u32(self) -> u32 {
self.0
}
}
impl Idx for ThreadId {
fn new(idx: usize) -> Self {
ThreadId(u32::try_from(idx).unwrap())
}
fn index(self) -> usize {
usize::try_from(self.0).unwrap()
}
}
impl TryFrom<u64> for ThreadId {
type Error = TryFromIntError;
fn try_from(id: u64) -> Result<Self, Self::Error> {
u32::try_from(id).map(|id_u32| Self(id_u32))
}
}
impl From<u32> for ThreadId {
fn from(id: u32) -> Self {
Self(id)
}
}
impl ThreadId {
pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
Scalar::from_u32(u32::try_from(self.0).unwrap())
}
}
/// An identifier of a set of blocked threads. 0 is used to indicate the absence
/// of a blockset identifier and, therefore, is not a valid identifier.
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct BlockSetId(NonZeroU32);
impl BlockSetId {
/// Panics if `id` is 0.
pub fn new(id: u32) -> Self {
Self(NonZeroU32::new(id).expect("0 is not a valid blockset id"))
}
pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
Scalar::from_u32(self.0.get())
}
}
/// The state of a thread.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ThreadState {
/// The thread is enabled and can be executed.
Enabled,
/// The thread tried to join the specified thread and is blocked until that
/// thread terminates.
BlockedOnJoin(ThreadId),
/// The thread is blocked and belongs to the given blockset.
Blocked(BlockSetId),
/// The thread has terminated its execution (we do not delete terminated
/// threads).
Terminated,
}
/// The join status of a thread.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum ThreadJoinStatus {
/// The thread can be joined.
Joinable,
/// A thread is detached if its join handle was destroyed and no other
/// thread can join it.
Detached,
/// The thread was already joined by some thread and cannot be joined again.
Joined,
}
/// A thread.
pub struct Thread<'mir, 'tcx> {
state: ThreadState,
/// Name of the thread.
thread_name: Option<Vec<u8>>,
/// The virtual call stack.
stack: Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>>,
/// The join status.
join_status: ThreadJoinStatus,
}
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
/// Check if the thread is done executing (no more stack frames). If yes,
/// change the state to terminated and return `true`.
fn check_terminated(&mut self) -> bool {
if self.state == ThreadState::Enabled {
if self.stack.is_empty() {
self.state = ThreadState::Terminated;
return true;
}
}
false
}
}
impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(ref name) = self.thread_name {
write!(f, "{}", String::from_utf8_lossy(name))?;
} else {
write!(f, "<unnamed>")?;
}
write!(f, "({:?}, {:?})", self.state, self.join_status)
}
}
impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> {
fn default() -> Self {
Self {
state: ThreadState::Enabled,
thread_name: None,
stack: Vec::new(),
join_status: ThreadJoinStatus::Joinable,
}
}
}
/// A set of threads.
#[derive(Debug)]
pub struct ThreadManager<'mir, 'tcx> {
/// Identifier of the currently active thread.
active_thread: ThreadId,
/// Threads used in the program.
///
/// Note that this vector also contains terminated threads.
threads: IndexVec<ThreadId, Thread<'mir, 'tcx>>,
/// A counter used to generate unique identifiers for blocksets.
blockset_counter: u32,
/// A mapping from a thread-local static to an allocation id of a thread
/// specific allocation.
thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), AllocId>>,
/// A flag that indicates that we should change the active thread.
yield_active_thread: bool,
}
impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
fn default() -> Self {
let mut threads = IndexVec::new();
// Create the main thread and add it to the list of threads.
let mut main_thread = Thread::default();
// The main thread can *not* be joined on.
main_thread.join_status = ThreadJoinStatus::Detached;
threads.push(main_thread);
Self {
active_thread: ThreadId::new(0),
threads: threads,
blockset_counter: 0,
thread_local_alloc_ids: Default::default(),
yield_active_thread: false,
}
}
}
impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// Check if we have an allocation for the given thread local static for the
/// active thread.
fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<AllocId> {
self.thread_local_alloc_ids.borrow().get(&(def_id, self.active_thread)).cloned()
}
/// Set the allocation id as the allocation id of the given thread local
/// static for the active thread.
///
/// Panics if a thread local is initialized twice for the same thread.
fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) {
self.thread_local_alloc_ids
.borrow_mut()
.insert((def_id, self.active_thread), new_alloc_id)
.unwrap_none();
}
/// Borrow the stack of the active thread.
fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] {
&self.threads[self.active_thread].stack
}
/// Mutably borrow the stack of the active thread.
fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>> {
&mut self.threads[self.active_thread].stack
}
/// Create a new thread and returns its id.
fn create_thread(&mut self) -> ThreadId {
let new_thread_id = ThreadId::new(self.threads.len());
self.threads.push(Default::default());
new_thread_id
}
/// Set an active thread and return the id of the thread that was active before.
fn set_active_thread_id(&mut self, id: ThreadId) -> ThreadId {
let active_thread_id = self.active_thread;
self.active_thread = id;
assert!(self.active_thread.index() < self.threads.len());
active_thread_id
}
/// Get the id of the currently active thread.
fn get_active_thread_id(&self) -> ThreadId {
self.active_thread
}
/// Get the total number of threads that were ever spawn by this program.
fn get_total_thread_count(&self) -> usize {
self.threads.len()
}
/// Has the given thread terminated?
fn has_terminated(&self, thread_id: ThreadId) -> bool {
self.threads[thread_id].state == ThreadState::Terminated
}
/// Enable the thread for execution. The thread must be terminated.
fn enable_thread(&mut self, thread_id: ThreadId) {
assert!(self.has_terminated(thread_id));
self.threads[thread_id].state = ThreadState::Enabled;
}
/// Get a mutable borrow of the currently active thread.
fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
&mut self.threads[self.active_thread]
}
/// Get a shared borrow of the currently active thread.
fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
&self.threads[self.active_thread]
}
/// Mark the thread as detached, which means that no other thread will try
/// to join it and the thread is responsible for cleaning up.
fn detach_thread(&mut self, id: ThreadId) -> InterpResult<'tcx> {
if self.threads[id].join_status != ThreadJoinStatus::Joinable {
throw_ub_format!("trying to detach thread that was already detached or joined");
}
self.threads[id].join_status = ThreadJoinStatus::Detached;
Ok(())
}
/// Mark that the active thread tries to join the thread with `joined_thread_id`.
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
throw_ub_format!("trying to join a detached or already joined thread");
}
if joined_thread_id == self.active_thread {
throw_ub_format!("trying to join itself");
}
assert!(
self.threads
.iter()
.all(|thread| thread.state != ThreadState::BlockedOnJoin(joined_thread_id)),
"a joinable thread already has threads waiting for its termination"
);
// Mark the joined thread as being joined so that we detect if other
// threads try to join it.
self.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined;
if self.threads[joined_thread_id].state != ThreadState::Terminated {
// The joined thread is still running, we need to wait for it.
self.active_thread_mut().state = ThreadState::BlockedOnJoin(joined_thread_id);
trace!(
"{:?} blocked on {:?} when trying to join",
self.active_thread,
joined_thread_id
);
}
Ok(())
}
/// Set the name of the active thread.
fn set_thread_name(&mut self, new_thread_name: Vec<u8>) {
self.active_thread_mut().thread_name = Some(new_thread_name);
}
/// Get the name of the active thread.
fn get_thread_name(&self) -> &[u8] {
if let Some(ref thread_name) = self.active_thread_ref().thread_name {
thread_name
} else {
b"<unnamed>"
}
}
/// Allocate a new blockset id.
fn create_blockset(&mut self) -> BlockSetId {
self.blockset_counter = self.blockset_counter.checked_add(1).unwrap();
BlockSetId::new(self.blockset_counter)
}
/// Block the currently active thread and put it into the given blockset.
fn block_active_thread(&mut self, set: BlockSetId) {
let state = &mut self.active_thread_mut().state;
assert_eq!(*state, ThreadState::Enabled);
*state = ThreadState::Blocked(set);
}
/// Unblock any one thread from the given blockset if it contains at least
/// one. Return the id of the unblocked thread.
fn unblock_some_thread(&mut self, set: BlockSetId) -> Option<ThreadId> {
for (id, thread) in self.threads.iter_enumerated_mut() {
if thread.state == ThreadState::Blocked(set) {
trace!("unblocking {:?} in blockset {:?}", id, set);
thread.state = ThreadState::Enabled;
return Some(id);
}
}
None
}
/// Change the active thread to some enabled thread.
fn yield_active_thread(&mut self) {
self.yield_active_thread = true;
}
/// Decide which action to take next and on which thread.
///
/// The currently implemented scheduling policy is the one that is commonly
/// used in stateless model checkers such as Loom: run the active thread as
/// long as we can and switch only when we have to (the active thread was
/// blocked, terminated, or has explicitly asked to be preempted).
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
// Check whether the thread has **just** terminated (`check_terminated`
// checks whether the thread has popped all its stack and if yes, sets
// the thread state to terminated).
if self.threads[self.active_thread].check_terminated() {
// Check if we need to unblock any threads.
for (i, thread) in self.threads.iter_enumerated_mut() {
if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
thread.state = ThreadState::Enabled;
}
}
return Ok(SchedulingAction::ExecuteDtors);
}
if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
// The main thread terminated; stop the program.
if self.threads.iter().any(|thread| thread.state != ThreadState::Terminated) {
// FIXME: This check should be either configurable or just emit
// a warning. For example, it seems normal for a program to
// terminate without waiting for its detached threads to
// terminate. However, this case is not trivial to support
// because we also probably do not want to consider the memory
// owned by these threads as leaked.
throw_unsup_format!("the main thread terminated without waiting for other threads");
}
return Ok(SchedulingAction::Stop);
}
if self.threads[self.active_thread].state == ThreadState::Enabled
&& !self.yield_active_thread
{
// The currently active thread is still enabled, just continue with it.
return Ok(SchedulingAction::ExecuteStep);
}
// We need to pick a new thread for execution.
for (id, thread) in self.threads.iter_enumerated() {
if thread.state == ThreadState::Enabled {
if !self.yield_active_thread || id != self.active_thread {
self.active_thread = id;
break;
}
}
}
self.yield_active_thread = false;
if self.threads[self.active_thread].state == ThreadState::Enabled {
return Ok(SchedulingAction::ExecuteStep);
}
// We have not found a thread to execute.
if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) {
unreachable!();
} else {
throw_machine_stop!(TerminationInfo::Deadlock);
}
}
}
// Public interface to thread management.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// A workaround for thread-local statics until
/// https://github.com/rust-lang/rust/issues/70685 is fixed: change the
/// thread-local allocation id with a freshly generated allocation id for
/// the currently active thread.
fn remap_thread_local_alloc_ids(
&self,
val: &mut mir::interpret::ConstValue<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
match *val {
mir::interpret::ConstValue::Scalar(Scalar::Ptr(ref mut ptr)) => {
let alloc_id = ptr.alloc_id;
let alloc = this.tcx.alloc_map.lock().get(alloc_id);
let tcx = this.tcx;
let is_thread_local = |def_id| {
tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
};
match alloc {
Some(GlobalAlloc::Static(def_id)) if is_thread_local(def_id) => {
ptr.alloc_id = this.get_or_create_thread_local_alloc_id(def_id)?;
}
_ => {}
}
}
_ => {
// FIXME: Handling only `Scalar` seems to work for now, but at
// least in principle thread-locals could be in any constant, so
// we should also consider other cases. However, once
// https://github.com/rust-lang/rust/issues/70685 gets fixed,
// this code will have to be rewritten anyway.
}
}
Ok(())
}
/// Get a thread-specific allocation id for the given thread-local static.
/// If needed, allocate a new one.
///
/// FIXME: This method should be replaced as soon as
/// https://github.com/rust-lang/rust/issues/70685 gets fixed.
fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tcx, AllocId> {
let this = self.eval_context_ref();
let tcx = this.tcx;
if let Some(new_alloc_id) = this.machine.threads.get_thread_local_alloc_id(def_id) {
// We already have a thread-specific allocation id for this
// thread-local static.
Ok(new_alloc_id)
} else {
// We need to allocate a thread-specific allocation id for this
// thread-local static.
//
// At first, we invoke the `const_eval_raw` query and extract the
// allocation from it. Unfortunately, we have to duplicate the code
// from `Memory::get_global_alloc` that does this.
//
// Then we store the retrieved allocation back into the `alloc_map`
// to get a fresh allocation id, which we can use as a
// thread-specific allocation id for the thread-local static.
if tcx.is_foreign_item(def_id) {
throw_unsup_format!("foreign thread-local statics are not supported");
}
// Invoke the `const_eval_raw` query.
let instance = Instance::mono(tcx.tcx, def_id);
let gid = GlobalId { instance, promoted: None };
let raw_const =
tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
// no need to report anything, the const_eval call takes care of that
// for statics
assert!(tcx.is_static(def_id));
err
})?;
let id = raw_const.alloc_id;
// Extract the allocation from the query result.
let mut alloc_map = tcx.alloc_map.lock();
let allocation = alloc_map.unwrap_memory(id);
// Create a new allocation id for the same allocation in this hacky
// way. Internally, `alloc_map` deduplicates allocations, but this
// is fine because Miri will make a copy before a first mutable
// access.
let new_alloc_id = alloc_map.create_memory_alloc(allocation);
this.machine.threads.set_thread_local_alloc_id(def_id, new_alloc_id);
Ok(new_alloc_id)
}
}
#[inline]
fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> {
let this = self.eval_context_mut();
Ok(this.machine.threads.create_thread())
}
#[inline]
fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.machine.threads.detach_thread(thread_id)
}
#[inline]
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.machine.threads.join_thread(joined_thread_id)
}
#[inline]
fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> {
let this = self.eval_context_mut();
Ok(this.machine.threads.set_active_thread_id(thread_id))
}
#[inline]
fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> {
let this = self.eval_context_ref();
Ok(this.machine.threads.get_active_thread_id())
}
#[inline]
fn get_total_thread_count(&self) -> InterpResult<'tcx, usize> {
let this = self.eval_context_ref();
Ok(this.machine.threads.get_total_thread_count())
}
#[inline]
fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> {
let this = self.eval_context_ref();
Ok(this.machine.threads.has_terminated(thread_id))
}
#[inline]
fn enable_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.machine.threads.enable_thread(thread_id);
Ok(())
}
#[inline]
fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] {
let this = self.eval_context_ref();
this.machine.threads.active_thread_stack()
}
#[inline]
fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>> {
let this = self.eval_context_mut();
this.machine.threads.active_thread_stack_mut()
}
#[inline]
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
Ok(this.machine.threads.set_thread_name(new_thread_name))
}
#[inline]
fn get_active_thread_name<'c>(&'c self) -> InterpResult<'tcx, &'c [u8]>
where
'mir: 'c,
{
let this = self.eval_context_ref();
Ok(this.machine.threads.get_thread_name())
}
#[inline]
fn create_blockset(&mut self) -> InterpResult<'tcx, BlockSetId> {
let this = self.eval_context_mut();
Ok(this.machine.threads.create_blockset())
}
#[inline]
fn block_active_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
Ok(this.machine.threads.block_active_thread(set))
}
#[inline]
fn unblock_some_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx, Option<ThreadId>> {
let this = self.eval_context_mut();
Ok(this.machine.threads.unblock_some_thread(set))
}
#[inline]
fn yield_active_thread(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.machine.threads.yield_active_thread();
Ok(())
}
/// Decide which action to take next and on which thread.
#[inline]
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
let this = self.eval_context_mut();
this.machine.threads.schedule()
}
}

View file

@ -0,0 +1,23 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// error-pattern: unsupported operation: the main thread terminated without waiting for other threads
// Check that we terminate the program when the main thread terminates.
#![feature(rustc_private)]
extern crate libc;
use std::{mem, ptr};
extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
ptr::null_mut()
}
fn main() {
unsafe {
let mut native: libc::pthread_t = mem::zeroed();
let attr: libc::pthread_attr_t = mem::zeroed();
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
}
}

View file

@ -0,0 +1,24 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// Joining a detached thread is undefined behavior.
#![feature(rustc_private)]
extern crate libc;
use std::{mem, ptr};
extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
ptr::null_mut()
}
fn main() {
unsafe {
let mut native: libc::pthread_t = mem::zeroed();
let attr: libc::pthread_attr_t = mem::zeroed();
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
assert_eq!(libc::pthread_detach(native), 0);
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread
}
}

View file

@ -0,0 +1,24 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// Joining an already joined thread is undefined behavior.
#![feature(rustc_private)]
extern crate libc;
use std::{mem, ptr};
extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
ptr::null_mut()
}
fn main() {
unsafe {
let mut native: libc::pthread_t = mem::zeroed();
let attr: libc::pthread_attr_t = mem::zeroed();
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0);
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread
}
}

View file

@ -0,0 +1,20 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// Joining the main thread is undefined behavior.
#![feature(rustc_private)]
extern crate libc;
use std::{ptr, thread};
fn main() {
let thread_id: libc::pthread_t = unsafe { libc::pthread_self() };
let handle = thread::spawn(move || {
unsafe {
assert_eq!(libc::pthread_join(thread_id, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread
}
});
thread::yield_now();
handle.join().unwrap();
}

View file

@ -0,0 +1,33 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// Joining the same thread from multiple threads is undefined behavior.
#![feature(rustc_private)]
extern crate libc;
use std::thread;
use std::{mem, ptr};
extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
// Yield the thread several times so that other threads can join it.
thread::yield_now();
thread::yield_now();
ptr::null_mut()
}
fn main() {
unsafe {
let mut native: libc::pthread_t = mem::zeroed();
let attr: libc::pthread_attr_t = mem::zeroed();
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
let mut native_copy: libc::pthread_t = mem::zeroed();
ptr::copy_nonoverlapping(&native, &mut native_copy, 1);
let handle = thread::spawn(move || {
assert_eq!(libc::pthread_join(native_copy, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread
});
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0);
handle.join().unwrap();
}
}

View file

@ -0,0 +1,20 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// Joining itself is undefined behavior.
#![feature(rustc_private)]
extern crate libc;
use std::{ptr, thread};
fn main() {
let handle = thread::spawn(|| {
unsafe {
let native: libc::pthread_t = libc::pthread_self();
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join itself
}
});
thread::yield_now();
handle.join().unwrap();
}

View file

@ -1,3 +1,6 @@
// ignore-linux: Only Windows is not supported.
// ignore-macos: Only Windows is not supported.
use std::thread;
// error-pattern: Miri does not support threading

View file

@ -0,0 +1,32 @@
// ignore-windows: No libc on Windows
#![feature(rustc_private)]
extern crate libc;
use std::cell::UnsafeCell;
use std::sync::Arc;
use std::thread;
struct Mutex(UnsafeCell<libc::pthread_mutex_t>);
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
fn new_lock() -> Arc<Mutex> {
Arc::new(Mutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)))
}
fn main() {
unsafe {
let lock = new_lock();
assert_eq!(libc::pthread_mutex_lock(lock.0.get() as *mut _), 0);
let lock_copy = lock.clone();
thread::spawn(move || {
assert_eq!(libc::pthread_mutex_lock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock
})
.join()
.unwrap();
}
}

View file

@ -0,0 +1,32 @@
// ignore-windows: No libc on Windows
#![feature(rustc_private)]
extern crate libc;
use std::cell::UnsafeCell;
use std::sync::Arc;
use std::thread;
struct Mutex(UnsafeCell<libc::pthread_mutex_t>);
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
fn new_lock() -> Arc<Mutex> {
Arc::new(Mutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)))
}
fn main() {
unsafe {
let lock = new_lock();
assert_eq!(libc::pthread_mutex_lock(lock.0.get() as *mut _), 0);
let lock_copy = lock.clone();
thread::spawn(move || {
assert_eq!(libc::pthread_mutex_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: called pthread_mutex_unlock on a mutex owned by another thread
})
.join()
.unwrap();
}
}

View file

@ -4,10 +4,29 @@
extern crate libc;
use std::cell::UnsafeCell;
use std::sync::Arc;
use std::thread;
struct RwLock(UnsafeCell<libc::pthread_rwlock_t>);
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
fn new_lock() -> Arc<RwLock> {
Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER)))
}
fn main() {
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
unsafe {
assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0);
libc::pthread_rwlock_rdlock(rw.get()); //~ ERROR: deadlock
let lock = new_lock();
assert_eq!(libc::pthread_rwlock_rdlock(lock.0.get() as *mut _), 0);
let lock_copy = lock.clone();
thread::spawn(move || {
assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock
})
.join()
.unwrap();
}
}

View file

@ -0,0 +1,13 @@
// ignore-windows: No libc on Windows
#![feature(rustc_private)]
extern crate libc;
fn main() {
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
unsafe {
assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0);
libc::pthread_rwlock_rdlock(rw.get()); //~ ERROR: deadlock
}
}

View file

@ -4,10 +4,29 @@
extern crate libc;
use std::cell::UnsafeCell;
use std::sync::Arc;
use std::thread;
struct RwLock(UnsafeCell<libc::pthread_rwlock_t>);
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
fn new_lock() -> Arc<RwLock> {
Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER)))
}
fn main() {
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
unsafe {
assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0);
libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock
let lock = new_lock();
assert_eq!(libc::pthread_rwlock_wrlock(lock.0.get() as *mut _), 0);
let lock_copy = lock.clone();
thread::spawn(move || {
assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock
})
.join()
.unwrap();
}
}

View file

@ -0,0 +1,13 @@
// ignore-windows: No libc on Windows
#![feature(rustc_private)]
extern crate libc;
fn main() {
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
unsafe {
assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0);
libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock
}
}

View file

@ -0,0 +1,75 @@
// ignore-windows: Concurrency on Windows is not supported yet.
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
fn check_mutex() {
let data = Arc::new(Mutex::new(0));
let mut threads = Vec::new();
for _ in 0..3 {
let data = Arc::clone(&data);
let thread = thread::spawn(move || {
let mut data = data.lock().unwrap();
thread::yield_now();
*data += 1;
});
threads.push(thread);
}
for thread in threads {
thread.join().unwrap();
}
assert!(data.try_lock().is_ok());
let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap();
assert_eq!(data, 3);
}
fn check_rwlock_write() {
let data = Arc::new(RwLock::new(0));
let mut threads = Vec::new();
for _ in 0..3 {
let data = Arc::clone(&data);
let thread = thread::spawn(move || {
let mut data = data.write().unwrap();
thread::yield_now();
*data += 1;
});
threads.push(thread);
}
for thread in threads {
thread.join().unwrap();
}
assert!(data.try_write().is_ok());
let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap();
assert_eq!(data, 3);
}
fn check_rwlock_read_no_deadlock() {
let l1 = Arc::new(RwLock::new(0));
let l2 = Arc::new(RwLock::new(0));
let l1_copy = Arc::clone(&l1);
let l2_copy = Arc::clone(&l2);
let _guard1 = l1.read().unwrap();
let handle = thread::spawn(move || {
let _guard2 = l2_copy.read().unwrap();
thread::yield_now();
let _guard1 = l1_copy.read().unwrap();
});
thread::yield_now();
let _guard2 = l2.read().unwrap();
handle.join().unwrap();
}
fn main() {
check_mutex();
check_rwlock_write();
check_rwlock_read_no_deadlock();
}

View file

@ -0,0 +1,2 @@
warning: thread support is experimental. For example, Miri does not detect data races yet.

View file

@ -0,0 +1,61 @@
// ignore-windows: Concurrency on Windows is not supported yet.
use std::thread;
fn create_and_detach() {
thread::spawn(|| ());
}
fn create_and_join() {
thread::spawn(|| ()).join().unwrap();
}
fn create_and_get_result() {
let nine = thread::spawn(|| 5 + 4).join().unwrap();
assert_eq!(nine, 9);
}
fn create_and_leak_result() {
thread::spawn(|| 7);
}
fn create_nested_and_detach() {
thread::spawn(|| {
thread::spawn(|| ());
});
}
fn create_nested_and_join() {
let handle = thread::spawn(|| thread::spawn(|| ()));
let handle_nested = handle.join().unwrap();
handle_nested.join().unwrap();
}
fn create_move_in() {
let x = String::from("Hello!");
thread::spawn(move || {
assert_eq!(x.len(), 6);
})
.join()
.unwrap();
}
fn create_move_out() {
let result = thread::spawn(|| {
String::from("Hello!")
})
.join()
.unwrap();
assert_eq!(result.len(), 6);
}
fn main() {
create_and_detach();
create_and_join();
create_and_get_result();
create_and_leak_result();
create_nested_and_detach();
create_nested_and_join();
create_move_in();
create_move_out();
}

View file

@ -0,0 +1,2 @@
warning: thread support is experimental. For example, Miri does not detect data races yet.

View file

@ -0,0 +1,59 @@
// ignore-windows: Concurrency on Windows is not supported yet.
//! The main purpose of this test is to check that if we take a pointer to
//! thread's `t1` thread-local `A` and send it to another thread `t2`,
//! dereferencing the pointer on `t2` resolves to `t1`'s thread-local. In this
//! test, we also check that thread-locals act as per-thread statics.
#![feature(thread_local)]
use std::thread;
#[thread_local]
static mut A: u8 = 0;
#[thread_local]
static mut B: u8 = 0;
static mut C: u8 = 0;
unsafe fn get_a_ref() -> *mut u8 {
&mut A
}
struct Sender(*mut u8);
unsafe impl Send for Sender {}
fn main() {
let ptr = unsafe {
let x = get_a_ref();
*x = 5;
assert_eq!(A, 5);
B = 15;
C = 25;
Sender(&mut A)
};
thread::spawn(move || unsafe {
assert_eq!(*ptr.0, 5);
assert_eq!(A, 0);
assert_eq!(B, 0);
assert_eq!(C, 25);
B = 14;
C = 24;
let y = get_a_ref();
assert_eq!(*y, 0);
*y = 4;
assert_eq!(*ptr.0, 5);
assert_eq!(A, 4);
assert_eq!(*get_a_ref(), 4);
})
.join()
.unwrap();
unsafe {
assert_eq!(*get_a_ref(), 5);
assert_eq!(A, 5);
assert_eq!(B, 15);
assert_eq!(C, 24);
}
}

View file

@ -0,0 +1,2 @@
warning: thread support is experimental. For example, Miri does not detect data races yet.

View file

@ -0,0 +1,70 @@
// ignore-windows: Concurrency on Windows is not supported yet.
use std::cell::RefCell;
use std::thread;
struct TestCell {
value: RefCell<u8>,
}
impl Drop for TestCell {
fn drop(&mut self) {
println!("Dropping: {}", self.value.borrow())
}
}
thread_local! {
static A: TestCell = TestCell { value: RefCell::new(0) };
}
/// Check that destructors of the library thread locals are executed immediately
/// after a thread terminates.
fn check_destructors() {
thread::spawn(|| {
A.with(|f| {
assert_eq!(*f.value.borrow(), 0);
*f.value.borrow_mut() = 5;
});
})
.join()
.unwrap();
println!("Continue main.")
}
struct JoinCell {
value: RefCell<Option<thread::JoinHandle<u8>>>,
}
impl Drop for JoinCell {
fn drop(&mut self) {
let join_handle = self.value.borrow_mut().take().unwrap();
println!("Joining: {}", join_handle.join().unwrap());
}
}
thread_local! {
static B: JoinCell = JoinCell { value: RefCell::new(None) };
}
/// Check that the destructor can be blocked joining another thread.
fn check_blocking() {
thread::spawn(|| {
B.with(|f| {
assert!(f.value.borrow().is_none());
let handle = thread::spawn(|| 7);
*f.value.borrow_mut() = Some(handle);
});
})
.join()
.unwrap();
println!("Continue main 2.");
// Preempt the main thread so that the destructor gets executed and can join
// the thread.
thread::yield_now();
thread::yield_now();
}
fn main() {
check_destructors();
check_blocking();
}

View file

@ -0,0 +1,2 @@
warning: thread support is experimental. For example, Miri does not detect data races yet.

View file

@ -0,0 +1,4 @@
Dropping: 5
Continue main.
Continue main 2.
Joining: 7

View file

@ -0,0 +1,25 @@
//! Check that destructors of the thread locals are executed on all OSes.
use std::cell::RefCell;
struct TestCell {
value: RefCell<u8>,
}
impl Drop for TestCell {
fn drop(&mut self) {
eprintln!("Dropping: {}", self.value.borrow())
}
}
thread_local! {
static A: TestCell = TestCell { value: RefCell::new(0) };
}
fn main() {
A.with(|f| {
assert_eq!(*f.value.borrow(), 0);
*f.value.borrow_mut() = 5;
});
eprintln!("Continue main.")
}

View file

@ -0,0 +1,2 @@
Continue main.
Dropping: 5

View file

@ -141,6 +141,30 @@ fn test_rwlock_libc_static_initializer() {
}
}
/// Test whether the `prctl` shim correctly sets the thread name.
///
/// Note: `prctl` exists only on Linux.
#[cfg(target_os = "linux")]
fn test_prctl_thread_name() {
use std::ffi::CString;
use libc::c_long;
unsafe {
let mut buf = [255; 10];
assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0);
assert_eq!(b"<unnamed>\0", &buf);
let thread_name = CString::new("hello").expect("CString::new failed");
assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0);
let mut buf = [255; 6];
assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0);
assert_eq!(b"hello\0", &buf);
let long_thread_name = CString::new("01234567890123456789").expect("CString::new failed");
assert_eq!(libc::prctl(libc::PR_SET_NAME, long_thread_name.as_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0);
let mut buf = [255; 16];
assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0);
assert_eq!(b"012345678901234\0", &buf);
}
}
fn main() {
#[cfg(target_os = "linux")]
test_posix_fadvise();
@ -152,4 +176,7 @@ fn main() {
#[cfg(target_os = "linux")]
test_mutex_libc_static_initializer_recursive();
#[cfg(target_os = "linux")]
test_prctl_thread_name();
}