Auto merge of #127726 - RalfJung:miri-sync, r=RalfJung

Miri subtree update

r? `@ghost`
This commit is contained in:
bors 2024-07-14 21:14:02 +00:00
commit d9284afea9
45 changed files with 1526 additions and 272 deletions

View file

@ -104,9 +104,17 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
miri_for_host()
)
});
let host = &rustc_version.host;
let target = get_arg_flag_value("--target");
let target = target.as_ref().unwrap_or(host);
let mut targets = get_arg_flag_values("--target").collect::<Vec<_>>();
// If `targets` is empty, we need to add a `--target $HOST` flag ourselves, and also ensure
// that the host target is indeed setup.
let target_flag = if targets.is_empty() {
let host = &rustc_version.host;
targets.push(host.clone());
Some(host)
} else {
// We don't need to add a `--target` flag, we just forward the user's flags.
None
};
// If cleaning the target directory & sysroot cache,
// delete them then exit. There is no reason to setup a new
@ -118,8 +126,11 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
return;
}
// We always setup.
let miri_sysroot = setup(&subcommand, target, &rustc_version, verbose, quiet);
for target in &targets {
// We always setup.
setup(&subcommand, target.as_str(), &rustc_version, verbose, quiet);
}
let miri_sysroot = get_sysroot_dir();
// Invoke actual cargo for the job, but with different flags.
// We re-use `cargo test` and `cargo run`, which makes target and binary handling very easy but
@ -155,10 +166,9 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
// This is needed to make the `target.runner` settings do something,
// and it later helps us detect which crates are proc-macro/build-script
// (host crates) and which crates are needed for the program itself.
if get_arg_flag_value("--target").is_none() {
// No target given. Explicitly pick the host.
if let Some(target_flag) = target_flag {
cmd.arg("--target");
cmd.arg(host);
cmd.arg(target_flag);
}
// Set ourselves as runner for al binaries invoked by cargo.

View file

@ -41,9 +41,11 @@ function run_tests {
if [ -n "${TEST_TARGET-}" ]; then
begingroup "Testing foreign architecture $TEST_TARGET"
TARGET_FLAG="--target $TEST_TARGET"
MULTI_TARGET_FLAG=""
else
begingroup "Testing host architecture"
TARGET_FLAG=""
MULTI_TARGET_FLAG="--multi-target"
fi
## ui test suite
@ -93,7 +95,7 @@ function run_tests {
echo 'build.rustc-wrapper = "thisdoesnotexist"' > .cargo/config.toml
fi
# Run the actual test
time ${PYTHON} test-cargo-miri/run-test.py $TARGET_FLAG
time ${PYTHON} test-cargo-miri/run-test.py $TARGET_FLAG $MULTI_TARGET_FLAG
# Clean up
unset RUSTC MIRI
rm -rf .cargo

View file

@ -1 +1 @@
66b4f0021bfb11a8c20d084c99a40f4a78ce1d38
99b7134389e9766462601a2fc4013840b9d31745

View file

@ -592,6 +592,9 @@ fn main() {
let num_cpus = param
.parse::<u32>()
.unwrap_or_else(|err| show_error!("-Zmiri-num-cpus requires a `u32`: {}", err));
if !(1..=miri::MAX_CPUS).contains(&usize::try_from(num_cpus).unwrap()) {
show_error!("-Zmiri-num-cpus must be in the range 1..={}", miri::MAX_CPUS);
}
miri_config.num_cpus = num_cpus;
} else if let Some(param) = arg.strip_prefix("-Zmiri-force-page-size=") {
let page_size = param.parse::<u64>().unwrap_or_else(|err| {

View file

@ -136,8 +136,16 @@ impl StackCache {
impl PartialEq for Stack {
fn eq(&self, other: &Self) -> bool {
// All the semantics of Stack are in self.borrows, everything else is caching
self.borrows == other.borrows
let Stack {
borrows,
unknown_bottom,
// The cache is ignored for comparison.
#[cfg(feature = "stack-cache")]
cache: _,
#[cfg(feature = "stack-cache")]
unique_range: _,
} = self;
*borrows == other.borrows && *unknown_bottom == other.unknown_bottom
}
}

View file

@ -0,0 +1,90 @@
use rustc_middle::ty::layout::LayoutOf;
use rustc_target::abi::Endian;
use crate::*;
/// The maximum number of CPUs supported by miri.
///
/// This value is compatible with the libc `CPU_SETSIZE` constant and corresponds to the number
/// of CPUs that a `cpu_set_t` can contain.
///
/// Real machines can have more CPUs than this number, and there exist APIs to set their affinity,
/// but this is not currently supported by miri.
pub const MAX_CPUS: usize = 1024;
/// A thread's CPU affinity mask determines the set of CPUs on which it is eligible to run.
// the actual representation depends on the target's endianness and pointer width.
// See CpuAffinityMask::set for details
#[derive(Clone)]
pub(crate) struct CpuAffinityMask([u8; Self::CPU_MASK_BYTES]);
impl CpuAffinityMask {
pub(crate) const CPU_MASK_BYTES: usize = MAX_CPUS / 8;
pub fn new<'tcx>(cx: &impl LayoutOf<'tcx>, cpu_count: u32) -> Self {
let mut this = Self([0; Self::CPU_MASK_BYTES]);
// the default affinity mask includes only the available CPUs
for i in 0..cpu_count as usize {
this.set(cx, i);
}
this
}
pub fn chunk_size<'tcx>(cx: &impl LayoutOf<'tcx>) -> u64 {
// The actual representation of the CpuAffinityMask is [c_ulong; _].
let ulong = helpers::path_ty_layout(cx, &["core", "ffi", "c_ulong"]);
ulong.size.bytes()
}
fn set<'tcx>(&mut self, cx: &impl LayoutOf<'tcx>, cpu: usize) {
// we silently ignore CPUs that are out of bounds. This matches the behavior of
// `sched_setaffinity` with a mask that specifies more than `CPU_SETSIZE` CPUs.
if cpu >= MAX_CPUS {
return;
}
// The actual representation of the CpuAffinityMask is [c_ulong; _].
// Within the array elements, we need to use the endianness of the target.
let target = &cx.tcx().sess.target;
match Self::chunk_size(cx) {
4 => {
let start = cpu / 32 * 4; // first byte of the correct u32
let chunk = self.0[start..].first_chunk_mut::<4>().unwrap();
let offset = cpu % 32;
*chunk = match target.options.endian {
Endian::Little => (u32::from_le_bytes(*chunk) | 1 << offset).to_le_bytes(),
Endian::Big => (u32::from_be_bytes(*chunk) | 1 << offset).to_be_bytes(),
};
}
8 => {
let start = cpu / 64 * 8; // first byte of the correct u64
let chunk = self.0[start..].first_chunk_mut::<8>().unwrap();
let offset = cpu % 64;
*chunk = match target.options.endian {
Endian::Little => (u64::from_le_bytes(*chunk) | 1 << offset).to_le_bytes(),
Endian::Big => (u64::from_be_bytes(*chunk) | 1 << offset).to_be_bytes(),
};
}
other => bug!("chunk size not supported: {other}"),
};
}
pub fn as_slice(&self) -> &[u8] {
self.0.as_slice()
}
pub fn from_array<'tcx>(
cx: &impl LayoutOf<'tcx>,
cpu_count: u32,
bytes: [u8; Self::CPU_MASK_BYTES],
) -> Option<Self> {
// mask by what CPUs are actually available
let default = Self::new(cx, cpu_count);
let masked = std::array::from_fn(|i| bytes[i] & default.0[i]);
// at least one thread must be set for the input to be valid
masked.iter().any(|b| *b != 0).then_some(Self(masked))
}
}

View file

@ -1,3 +1,4 @@
pub mod cpu_affinity;
pub mod data_race;
pub mod init_once;
mod range_object_map;

View file

@ -269,7 +269,7 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
if this.mutex_is_locked(mutex) {
assert_ne!(this.mutex_get_owner(mutex), this.active_thread());
this.mutex_enqueue_and_block(mutex, retval, dest);
this.mutex_enqueue_and_block(mutex, Some((retval, dest)));
} else {
// We can have it right now!
this.mutex_lock(mutex);
@ -390,9 +390,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
/// Put the thread into the queue waiting for the mutex.
/// Once the Mutex becomes available, `retval` will be written to `dest`.
///
/// Once the Mutex becomes available and if it exists, `retval_dest.0` will
/// be written to `retval_dest.1`.
#[inline]
fn mutex_enqueue_and_block(&mut self, id: MutexId, retval: Scalar, dest: MPlaceTy<'tcx>) {
fn mutex_enqueue_and_block(
&mut self,
id: MutexId,
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
) {
let this = self.eval_context_mut();
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
let thread = this.active_thread();
@ -403,13 +409,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
callback!(
@capture<'tcx> {
id: MutexId,
retval: Scalar,
dest: MPlaceTy<'tcx>,
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
}
@unblock = |this| {
assert!(!this.mutex_is_locked(id));
this.mutex_lock(id);
this.write_scalar(retval, &dest)?;
if let Some((retval, dest)) = retval_dest {
this.write_scalar(retval, &dest)?;
}
Ok(())
}
),

View file

@ -936,6 +936,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// After this all accesses will be treated as occurring in the new thread.
let old_thread_id = this.machine.threads.set_active_thread_id(new_thread_id);
// The child inherits its parent's cpu affinity.
if let Some(cpuset) = this.machine.thread_cpu_affinity.get(&old_thread_id).cloned() {
this.machine.thread_cpu_affinity.insert(new_thread_id, cpuset);
}
// Perform the function pointer load in the new thread frame.
let instance = this.get_ptr_fn(start_routine)?.as_instance()?;

View file

@ -282,7 +282,8 @@ pub fn create_ecx<'tcx>(
})?;
// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"], Namespace::ValueNS);
let sentinel =
helpers::try_resolve_path(tcx, &["core", "ascii", "escape_default"], Namespace::ValueNS);
if !matches!(sentinel, Some(s) if tcx.is_mir_available(s.def.def_id())) {
tcx.dcx().fatal(
"the current sysroot was built without `-Zalways-encode-mir`, or libcore seems missing. \

View file

@ -18,6 +18,7 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::ExportedSymbol;
use rustc_middle::mir;
use rustc_middle::ty::layout::MaybeResult;
use rustc_middle::ty::{
self,
layout::{LayoutOf, TyAndLayout},
@ -159,6 +160,35 @@ fn try_resolve_did(tcx: TyCtxt<'_>, path: &[&str], namespace: Option<Namespace>)
None
}
/// Gets an instance for a path; fails gracefully if the path does not exist.
pub fn try_resolve_path<'tcx>(
tcx: TyCtxt<'tcx>,
path: &[&str],
namespace: Namespace,
) -> Option<ty::Instance<'tcx>> {
let did = try_resolve_did(tcx, path, Some(namespace))?;
Some(ty::Instance::mono(tcx, did))
}
/// Gets an instance for a path.
#[track_caller]
pub fn resolve_path<'tcx>(
tcx: TyCtxt<'tcx>,
path: &[&str],
namespace: Namespace,
) -> ty::Instance<'tcx> {
try_resolve_path(tcx, path, namespace)
.unwrap_or_else(|| panic!("failed to find required Rust item: {path:?}"))
}
/// Gets the layout of a type at a path.
#[track_caller]
pub fn path_ty_layout<'tcx>(cx: &impl LayoutOf<'tcx>, path: &[&str]) -> TyAndLayout<'tcx> {
let ty =
resolve_path(cx.tcx(), path, Namespace::TypeNS).ty(cx.tcx(), ty::ParamEnv::reveal_all());
cx.layout_of(ty).to_result().ok().unwrap()
}
/// Call `f` for each exported symbol.
pub fn iter_exported_symbols<'tcx>(
tcx: TyCtxt<'tcx>,
@ -259,23 +289,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
try_resolve_did(*self.eval_context_ref().tcx, path, None).is_some()
}
/// Gets an instance for a path; fails gracefully if the path does not exist.
fn try_resolve_path(&self, path: &[&str], namespace: Namespace) -> Option<ty::Instance<'tcx>> {
let tcx = self.eval_context_ref().tcx.tcx;
let did = try_resolve_did(tcx, path, Some(namespace))?;
Some(ty::Instance::mono(tcx, did))
}
/// Gets an instance for a path.
fn resolve_path(&self, path: &[&str], namespace: Namespace) -> ty::Instance<'tcx> {
self.try_resolve_path(path, namespace)
.unwrap_or_else(|| panic!("failed to find required Rust item: {path:?}"))
}
/// Evaluates the scalar at the specified path.
fn eval_path(&self, path: &[&str]) -> OpTy<'tcx> {
let this = self.eval_context_ref();
let instance = this.resolve_path(path, Namespace::ValueNS);
let instance = resolve_path(*this.tcx, path, Namespace::ValueNS);
// We don't give a span -- this isn't actually used directly by the program anyway.
let const_val = this.eval_global(instance).unwrap_or_else(|err| {
panic!("failed to evaluate required Rust item: {path:?}\n{err:?}")
@ -344,19 +361,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"`libc` crate is not reliably available on Windows targets; Miri should not use it there"
);
}
let ty = this
.resolve_path(&["libc", name], Namespace::TypeNS)
.ty(*this.tcx, ty::ParamEnv::reveal_all());
this.layout_of(ty).unwrap()
path_ty_layout(this, &["libc", name])
}
/// Helper function to get the `TyAndLayout` of a `windows` type
fn windows_ty_layout(&self, name: &str) -> TyAndLayout<'tcx> {
let this = self.eval_context_ref();
let ty = this
.resolve_path(&["std", "sys", "pal", "windows", "c", name], Namespace::TypeNS)
.ty(*this.tcx, ty::ParamEnv::reveal_all());
this.layout_of(ty).unwrap()
path_ty_layout(this, &["std", "sys", "pal", "windows", "c", name])
}
/// Project to the given *named* field (which must be a struct or union type).

View file

@ -392,10 +392,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
bug!("float_finite: non-float input type {}", x.layout.ty)
};
Ok(match fty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F16 => x.to_scalar().to_f16()?.is_finite(),
FloatTy::F32 => x.to_scalar().to_f32()?.is_finite(),
FloatTy::F64 => x.to_scalar().to_f64()?.is_finite(),
FloatTy::F128 => unimplemented!("f16_f128"),
FloatTy::F128 => x.to_scalar().to_f128()?.is_finite(),
})
};
match (float_finite(&a)?, float_finite(&b)?) {

View file

@ -129,6 +129,7 @@ pub use crate::borrow_tracker::{
};
pub use crate::clock::{Clock, Instant};
pub use crate::concurrency::{
cpu_affinity::MAX_CPUS,
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SynchronizationObjects},

View file

@ -30,6 +30,7 @@ use rustc_target::spec::abi::Abi;
use crate::{
concurrency::{
cpu_affinity::{self, CpuAffinityMask},
data_race::{self, NaReadType, NaWriteType},
weak_memory,
},
@ -471,6 +472,12 @@ pub struct MiriMachine<'tcx> {
/// The set of threads.
pub(crate) threads: ThreadManager<'tcx>,
/// Stores which thread is eligible to run on which CPUs.
/// This has no effect at all, it is just tracked to produce the correct result
/// in `sched_getaffinity`
pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
/// The state of the primitive synchronization objects.
pub(crate) sync: SynchronizationObjects,
@ -627,6 +634,18 @@ impl<'tcx> MiriMachine<'tcx> {
let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
let stack_size =
if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
assert!(
usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
"miri only supports up to {} CPUs, but {} were configured",
cpu_affinity::MAX_CPUS,
config.num_cpus
);
let threads = ThreadManager::default();
let mut thread_cpu_affinity = FxHashMap::default();
if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
thread_cpu_affinity
.insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
}
MiriMachine {
tcx,
borrow_tracker,
@ -644,7 +663,8 @@ impl<'tcx> MiriMachine<'tcx> {
fds: shims::FdTable::new(config.mute_stdout_stderr),
dirs: Default::default(),
layouts,
threads: ThreadManager::default(),
threads,
thread_cpu_affinity,
sync: SynchronizationObjects::default(),
static_roots: Vec::new(),
profiler,
@ -765,6 +785,7 @@ impl VisitProvenance for MiriMachine<'_> {
#[rustfmt::skip]
let MiriMachine {
threads,
thread_cpu_affinity: _,
sync: _,
tls,
env_vars,

View file

@ -30,6 +30,17 @@ impl<T: VisitProvenance> VisitProvenance for Option<T> {
}
}
impl<A, B> VisitProvenance for (A, B)
where
A: VisitProvenance,
B: VisitProvenance,
{
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
self.0.visit_provenance(visit);
self.1.visit_provenance(visit);
}
}
impl<T: VisitProvenance> VisitProvenance for std::cell::RefCell<T> {
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
self.borrow().visit_provenance(visit)

View file

@ -36,9 +36,9 @@ pub struct TlsData<'tcx> {
/// pthreads-style thread-local storage.
keys: BTreeMap<TlsKey, TlsEntry<'tcx>>,
/// A single per thread destructor of the thread local storage (that's how
/// things work on macOS) with a data argument.
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar)>,
/// On macOS, each thread holds a list of destructor functions with their
/// respective data arguments.
macos_thread_dtors: BTreeMap<ThreadId, Vec<(ty::Instance<'tcx>, Scalar)>>,
}
impl<'tcx> Default for TlsData<'tcx> {
@ -119,26 +119,15 @@ impl<'tcx> TlsData<'tcx> {
}
}
/// Set the thread wide destructor of the thread local storage for the given
/// thread. This function is used to implement `_tlv_atexit` shim on MacOS.
///
/// Thread wide dtors are available only on MacOS. There is one destructor
/// per thread as can be guessed from the following comment in the
/// [`_tlv_atexit`
/// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389):
///
/// NOTE: this does not need locks because it only operates on current thread data
pub fn set_macos_thread_dtor(
/// Add a thread local storage destructor for the given thread. This function
/// is used to implement the `_tlv_atexit` shim on MacOS.
pub fn add_macos_thread_dtor(
&mut self,
thread: ThreadId,
dtor: ty::Instance<'tcx>,
data: Scalar,
) -> InterpResult<'tcx> {
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
throw_unsup_format!(
"setting more than one thread local storage destructor for the same thread is not supported"
);
}
self.macos_thread_dtors.entry(thread).or_default().push((dtor, data));
Ok(())
}
@ -202,6 +191,10 @@ impl<'tcx> TlsData<'tcx> {
for TlsEntry { data, .. } in self.keys.values_mut() {
data.remove(&thread_id);
}
if let Some(dtors) = self.macos_thread_dtors.remove(&thread_id) {
assert!(dtors.is_empty(), "the destructors should have already been run");
}
}
}
@ -212,7 +205,7 @@ impl VisitProvenance for TlsData<'_> {
for scalar in keys.values().flat_map(|v| v.data.values()) {
scalar.visit_provenance(visit);
}
for (_, scalar) in macos_thread_dtors.values() {
for (_, scalar) in macos_thread_dtors.values().flatten() {
scalar.visit_provenance(visit);
}
}
@ -225,6 +218,7 @@ pub struct TlsDtorsState<'tcx>(TlsDtorsStatePriv<'tcx>);
enum TlsDtorsStatePriv<'tcx> {
#[default]
Init,
MacOsDtors,
PthreadDtors(RunningDtorState),
/// For Windows Dtors, we store the list of functions that we still have to call.
/// These are functions from the magic `.CRT$XLB` linker section.
@ -243,11 +237,10 @@ impl<'tcx> TlsDtorsState<'tcx> {
Init => {
match this.tcx.sess.target.os.as_ref() {
"macos" => {
// The macOS thread wide destructor runs "before any TLS slots get
// freed", so do that first.
this.schedule_macos_tls_dtor()?;
// When that destructor is done, go on with the pthread dtors.
break 'new_state PthreadDtors(Default::default());
// macOS has a _tlv_atexit function that allows
// registering destructors without associated keys.
// These are run first.
break 'new_state MacOsDtors;
}
_ if this.target_os_is_unix() => {
// All other Unixes directly jump to running the pthread dtors.
@ -266,6 +259,14 @@ impl<'tcx> TlsDtorsState<'tcx> {
}
}
}
MacOsDtors => {
match this.schedule_macos_tls_dtor()? {
Poll::Pending => return Ok(Poll::Pending),
// After all macOS destructors are run, the system switches
// to destroying the pthread destructors.
Poll::Ready(()) => break 'new_state PthreadDtors(Default::default()),
}
}
PthreadDtors(state) => {
match this.schedule_next_pthread_tls_dtor(state)? {
Poll::Pending => return Ok(Poll::Pending), // just keep going
@ -328,12 +329,15 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Ok(())
}
/// Schedule the MacOS thread destructor of the thread local storage to be
/// executed.
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> {
/// Schedule the macOS thread local storage destructors to be executed.
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, Poll<()>> {
let this = self.eval_context_mut();
let thread_id = this.active_thread();
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
// macOS keeps track of TLS destructors in a stack. If a destructor
// registers another destructor, it will be run next.
// See https://github.com/apple-oss-distributions/dyld/blob/d552c40cd1de105f0ec95008e0e0c0972de43456/dyld/DyldRuntimeState.cpp#L2277
let dtor = this.machine.tls.macos_thread_dtors.get_mut(&thread_id).and_then(Vec::pop);
if let Some((instance, data)) = dtor {
trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
this.call_function(
@ -343,8 +347,11 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
None,
StackPopCleanup::Root { cleanup: true },
)?;
return Ok(Poll::Pending);
}
Ok(())
Ok(Poll::Ready(()))
}
/// Schedule a pthread TLS destructor. Returns `true` if found

View file

@ -419,7 +419,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
match result {
Ok(read_bytes) => {
// If reading to `bytes` did not fail, we write those bytes to the buffer.
this.write_bytes_ptr(buf, bytes)?;
// Crucially, if fewer than `bytes.len()` bytes were read, only write
// that much into the output buffer!
this.write_bytes_ptr(
buf,
bytes[..usize::try_from(read_bytes).unwrap()].iter().copied(),
)?;
Ok(read_bytes)
}
Err(e) => {

View file

@ -3,8 +3,10 @@ use std::str;
use rustc_middle::ty::layout::LayoutOf;
use rustc_span::Symbol;
use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi;
use crate::concurrency::cpu_affinity::CpuAffinityMask;
use crate::shims::alloc::EvalContextExt as _;
use crate::shims::unix::*;
use crate::*;
@ -571,6 +573,99 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let result = this.nanosleep(req, rem)?;
this.write_scalar(Scalar::from_i32(result), dest)?;
}
"sched_getaffinity" => {
// Currently this function does not exist on all Unixes, e.g. on macOS.
if !matches!(&*this.tcx.sess.target.os, "linux" | "freebsd" | "android") {
throw_unsup_format!(
"`sched_getaffinity` is not supported on {}",
this.tcx.sess.target.os
);
}
let [pid, cpusetsize, mask] =
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
let pid = this.read_scalar(pid)?.to_u32()?;
let cpusetsize = this.read_target_usize(cpusetsize)?;
let mask = this.read_pointer(mask)?;
// TODO: when https://github.com/rust-lang/miri/issues/3730 is fixed this should use its notion of tid/pid
let thread_id = match pid {
0 => this.active_thread(),
_ => throw_unsup_format!("`sched_getaffinity` is only supported with a pid of 0 (indicating the current thread)"),
};
// The mask is stored in chunks, and the size must be a whole number of chunks.
let chunk_size = CpuAffinityMask::chunk_size(this);
if this.ptr_is_null(mask)? {
let einval = this.eval_libc("EFAULT");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_i32(-1), dest)?;
} else if cpusetsize == 0 || cpusetsize.checked_rem(chunk_size).unwrap() != 0 {
// we only copy whole chunks of size_of::<c_ulong>()
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_i32(-1), dest)?;
} else if let Some(cpuset) = this.machine.thread_cpu_affinity.get(&thread_id) {
let cpuset = cpuset.clone();
// we only copy whole chunks of size_of::<c_ulong>()
let byte_count = Ord::min(cpuset.as_slice().len(), cpusetsize.try_into().unwrap());
this.write_bytes_ptr(mask, cpuset.as_slice()[..byte_count].iter().copied())?;
this.write_scalar(Scalar::from_i32(0), dest)?;
} else {
// The thread whose ID is pid could not be found
let einval = this.eval_libc("ESRCH");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_i32(-1), dest)?;
}
}
"sched_setaffinity" => {
// Currently this function does not exist on all Unixes, e.g. on macOS.
if !matches!(&*this.tcx.sess.target.os, "linux" | "freebsd" | "android") {
throw_unsup_format!(
"`sched_setaffinity` is not supported on {}",
this.tcx.sess.target.os
);
}
let [pid, cpusetsize, mask] =
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
let pid = this.read_scalar(pid)?.to_u32()?;
let cpusetsize = this.read_target_usize(cpusetsize)?;
let mask = this.read_pointer(mask)?;
// TODO: when https://github.com/rust-lang/miri/issues/3730 is fixed this should use its notion of tid/pid
let thread_id = match pid {
0 => this.active_thread(),
_ => throw_unsup_format!("`sched_setaffinity` is only supported with a pid of 0 (indicating the current thread)"),
};
if this.ptr_is_null(mask)? {
let einval = this.eval_libc("EFAULT");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_i32(-1), dest)?;
} else {
// NOTE: cpusetsize might be smaller than `CpuAffinityMask::CPU_MASK_BYTES`.
// Any unspecified bytes are treated as zero here (none of the CPUs are configured).
// This is not exactly documented, so we assume that this is the behavior in practice.
let bits_slice = this.read_bytes_ptr_strip_provenance(mask, Size::from_bytes(cpusetsize))?;
// This ignores the bytes beyond `CpuAffinityMask::CPU_MASK_BYTES`
let bits_array: [u8; CpuAffinityMask::CPU_MASK_BYTES] =
std::array::from_fn(|i| bits_slice.get(i).copied().unwrap_or(0));
match CpuAffinityMask::from_array(this, this.machine.num_cpus, bits_array) {
Some(cpuset) => {
this.machine.thread_cpu_affinity.insert(thread_id, cpuset);
this.write_scalar(Scalar::from_i32(0), dest)?;
}
None => {
// The intersection between the mask and the available CPUs was empty.
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_i32(-1), dest)?;
}
}
}
}
// Miscellaneous
"isatty" => {

View file

@ -178,19 +178,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(Scalar::from_i32(SIGRTMAX), dest)?;
}
"sched_getaffinity" => {
// This shim isn't useful, aside from the fact that it makes `num_cpus`
// fall back to `sysconf` where it will successfully determine the number of CPUs.
let [pid, cpusetsize, mask] =
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
this.read_scalar(pid)?.to_i32()?;
this.read_target_usize(cpusetsize)?;
this.deref_pointer_as(mask, this.libc_ty_layout("cpu_set_t"))?;
// FIXME: we just return an error.
let einval = this.eval_libc("EINVAL");
this.set_last_error(einval)?;
this.write_scalar(Scalar::from_i32(-1), dest)?;
}
// Incomplete shims that we "stub out" just to get pre-main initialization code to work.
// These shims are enabled only when the caller is in the standard library.

View file

@ -1,6 +1,7 @@
use rustc_span::Symbol;
use rustc_target::spec::abi::Abi;
use super::sync::EvalContextExt as _;
use crate::shims::unix::*;
use crate::*;
@ -132,7 +133,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let dtor = this.get_ptr_fn(dtor)?.as_instance()?;
let data = this.read_scalar(data)?;
let active_thread = this.active_thread();
this.machine.tls.set_macos_thread_dtor(active_thread, dtor, data)?;
this.machine.tls.add_macos_thread_dtor(active_thread, dtor, data)?;
}
// Querying system information
@ -174,6 +175,27 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(res, dest)?;
}
"os_unfair_lock_lock" => {
let [lock_op] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
this.os_unfair_lock_lock(lock_op)?;
}
"os_unfair_lock_trylock" => {
let [lock_op] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
this.os_unfair_lock_trylock(lock_op, dest)?;
}
"os_unfair_lock_unlock" => {
let [lock_op] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
this.os_unfair_lock_unlock(lock_op)?;
}
"os_unfair_lock_assert_owner" => {
let [lock_op] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
this.os_unfair_lock_assert_owner(lock_op)?;
}
"os_unfair_lock_assert_not_owner" => {
let [lock_op] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
this.os_unfair_lock_assert_not_owner(lock_op)?;
}
_ => return Ok(EmulateItemResult::NotSupported),
};

View file

@ -1 +1,2 @@
pub mod foreign_items;
pub mod sync;

View file

@ -0,0 +1,107 @@
//! Contains macOS-specific synchronization functions.
//!
//! For `os_unfair_lock`, see the documentation
//! <https://developer.apple.com/documentation/os/synchronization?language=objc>
//! and in case of underspecification its implementation
//! <https://github.com/apple-oss-distributions/libplatform/blob/a00a4cc36da2110578bcf3b8eeeeb93dcc7f4e11/src/os/lock.c#L645>.
//!
//! Note that we don't emulate every edge-case behaviour of the locks. Notably,
//! we don't abort when locking a lock owned by a thread that has already exited
//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
//! in that case either.
use crate::*;
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_getid(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> {
let this = self.eval_context_mut();
// os_unfair_lock holds a 32-bit value, is initialized with zero and
// must be assumed to be opaque. Therefore, we can just store our
// internal mutex ID in the structure without anyone noticing.
this.mutex_get_or_create_id(lock_op, this.libc_ty_layout("os_unfair_lock"), 0)
}
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?;
if this.mutex_is_locked(id) {
if this.mutex_get_owner(id) == this.active_thread() {
// Matching the current macOS implementation: abort on reentrant locking.
throw_machine_stop!(TerminationInfo::Abort(
"attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
));
}
this.mutex_enqueue_and_block(id, None);
} else {
this.mutex_lock(id);
}
Ok(())
}
fn os_unfair_lock_trylock(
&mut self,
lock_op: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?;
if this.mutex_is_locked(id) {
// Contrary to the blocking lock function, this does not check for
// reentrancy.
this.write_scalar(Scalar::from_bool(false), dest)?;
} else {
this.mutex_lock(id);
this.write_scalar(Scalar::from_bool(true), dest)?;
}
Ok(())
}
fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?;
if this.mutex_unlock(id)?.is_none() {
// Matching the current macOS implementation: abort.
throw_machine_stop!(TerminationInfo::Abort(
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
));
}
Ok(())
}
fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?;
if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() {
throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
));
}
Ok(())
}
fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?;
if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() {
throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
));
}
Ok(())
}
}

View file

@ -473,7 +473,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let ret = if this.mutex_is_locked(id) {
let owner_thread = this.mutex_get_owner(id);
if owner_thread != this.active_thread() {
this.mutex_enqueue_and_block(id, Scalar::from_i32(0), dest.clone());
this.mutex_enqueue_and_block(id, Some((Scalar::from_i32(0), dest.clone())));
return Ok(());
} else {
// Trying to acquire the same mutex again.

View file

@ -22,12 +22,17 @@ def fail(msg):
print("\nTEST FAIL: {}".format(msg))
sys.exit(1)
def cargo_miri(cmd, quiet = True):
def cargo_miri(cmd, quiet = True, targets = None):
args = ["cargo", "miri", cmd] + CARGO_EXTRA_FLAGS
if quiet:
args += ["-q"]
if ARGS.target:
if targets is not None:
for target in targets:
args.extend(("--target", target))
elif ARGS.target is not None:
args += ["--target", ARGS.target]
return args
def normalize_stdout(str):
@ -186,10 +191,21 @@ def test_cargo_miri_test():
default_ref, "test.stderr-empty.ref",
env={'MIRIFLAGS': "-Zmiri-permissive-provenance"},
)
if ARGS.multi_target:
test_cargo_miri_multi_target()
def test_cargo_miri_multi_target():
test("`cargo miri test` (multiple targets)",
cargo_miri("test", targets = ["aarch64-unknown-linux-gnu", "s390x-unknown-linux-gnu"]),
"test.multiple_targets.stdout.ref", "test.stderr-empty.ref",
env={'MIRIFLAGS': "-Zmiri-permissive-provenance"},
)
args_parser = argparse.ArgumentParser(description='`cargo miri` testing')
args_parser.add_argument('--target', help='the target to test')
args_parser.add_argument('--bless', help='bless the reference files', action='store_true')
args_parser.add_argument('--multi-target', help='run tests related to multiple targets', action='store_true')
ARGS = args_parser.parse_args()
os.chdir(os.path.dirname(os.path.realpath(__file__)))

View file

@ -0,0 +1,22 @@
running 2 tests
..
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
running 2 tests
..
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
imported main
imported main
running 6 tests
...i..
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME
running 6 tests
...i..
test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME

View file

@ -0,0 +1,13 @@
//@ only-target-darwin
use std::cell::UnsafeCell;
fn main() {
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
unsafe {
libc::os_unfair_lock_lock(lock.get());
libc::os_unfair_lock_assert_not_owner(lock.get());
//~^ error: abnormal termination: called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread
}
}

View file

@ -0,0 +1,13 @@
error: abnormal termination: called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread
--> $DIR/apple_os_unfair_lock_assert_not_owner.rs:LL:CC
|
LL | libc::os_unfair_lock_assert_not_owner(lock.get());
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread
|
= note: BACKTRACE:
= note: inside `main` at $DIR/apple_os_unfair_lock_assert_not_owner.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,12 @@
//@ only-target-darwin
use std::cell::UnsafeCell;
fn main() {
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
unsafe {
libc::os_unfair_lock_assert_owner(lock.get());
//~^ error: abnormal termination: called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread
}
}

View file

@ -0,0 +1,13 @@
error: abnormal termination: called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread
--> $DIR/apple_os_unfair_lock_assert_owner.rs:LL:CC
|
LL | libc::os_unfair_lock_assert_owner(lock.get());
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread
|
= note: BACKTRACE:
= note: inside `main` at $DIR/apple_os_unfair_lock_assert_owner.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,13 @@
//@ only-target-darwin
use std::cell::UnsafeCell;
fn main() {
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
unsafe {
libc::os_unfair_lock_lock(lock.get());
libc::os_unfair_lock_lock(lock.get());
//~^ error: abnormal termination: attempted to lock an os_unfair_lock that is already locked by the current thread
}
}

View file

@ -0,0 +1,13 @@
error: abnormal termination: attempted to lock an os_unfair_lock that is already locked by the current thread
--> $DIR/apple_os_unfair_lock_reentrant.rs:LL:CC
|
LL | libc::os_unfair_lock_lock(lock.get());
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ attempted to lock an os_unfair_lock that is already locked by the current thread
|
= note: BACKTRACE:
= note: inside `main` at $DIR/apple_os_unfair_lock_reentrant.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,12 @@
//@ only-target-darwin
use std::cell::UnsafeCell;
fn main() {
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
unsafe {
libc::os_unfair_lock_unlock(lock.get());
//~^ error: abnormal termination: attempted to unlock an os_unfair_lock not owned by the current thread
}
}

View file

@ -0,0 +1,13 @@
error: abnormal termination: attempted to unlock an os_unfair_lock not owned by the current thread
--> $DIR/apple_os_unfair_lock_unowned.rs:LL:CC
|
LL | libc::os_unfair_lock_unlock(lock.get());
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ attempted to unlock an os_unfair_lock not owned by the current thread
|
= note: BACKTRACE:
= note: inside `main` at $DIR/apple_os_unfair_lock_unowned.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,17 @@
//@ignore-target-windows: only very limited libc on Windows
//@ignore-target-apple: `sched_setaffinity` is not supported on macOS
//@compile-flags: -Zmiri-disable-isolation -Zmiri-num-cpus=4
fn main() {
use libc::{cpu_set_t, sched_setaffinity};
use std::mem::size_of;
// If pid is zero, then the calling thread is used.
const PID: i32 = 0;
let cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>() + 1, &cpuset) }; //~ ERROR: memory access failed
assert_eq!(err, 0);
}

View file

@ -0,0 +1,20 @@
error: Undefined Behavior: memory access failed: ALLOC has size 128, so pointer to 129 bytes starting at offset 0 is out-of-bounds
--> $DIR/affinity.rs:LL:CC
|
LL | let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>() + 1, &cpuset) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access failed: ALLOC has size 128, so pointer to 129 bytes starting at offset 0 is out-of-bounds
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
help: ALLOC was allocated here:
--> $DIR/affinity.rs:LL:CC
|
LL | let cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
| ^^^^^^
= note: BACKTRACE (of the first span):
= note: inside `main` at $DIR/affinity.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,27 @@
//! We test that if we requested to read 4 bytes, but actually read 3 bytes,
//! then 3 bytes (not 4) will be initialized.
//@ignore-target-windows: no file system support on Windows
//@compile-flags: -Zmiri-disable-isolation
use std::ffi::CString;
use std::fs::remove_file;
use std::mem::MaybeUninit;
#[path = "../../utils/mod.rs"]
mod utils;
fn main() {
let path =
utils::prepare_with_content("fail-libc-read-and-uninit-premature-eof.txt", &[1u8, 2, 3]);
let cpath = CString::new(path.clone().into_os_string().into_encoded_bytes()).unwrap();
unsafe {
let fd = libc::open(cpath.as_ptr(), libc::O_RDONLY);
assert_ne!(fd, -1);
let mut buf: MaybeUninit<[u8; 4]> = std::mem::MaybeUninit::uninit();
// Read 4 bytes from a 3-byte file.
assert_eq!(libc::read(fd, buf.as_mut_ptr().cast::<std::ffi::c_void>(), 4), 3);
buf.assume_init(); //~ERROR: Undefined Behavior: constructing invalid value at .value[3]: encountered uninitialized memory, but expected an integer
assert_eq!(libc::close(fd), 0);
}
remove_file(&path).unwrap();
}

View file

@ -0,0 +1,15 @@
error: Undefined Behavior: constructing invalid value at .value[3]: encountered uninitialized memory, but expected an integer
--> $DIR/libc-read-and-uninit-premature-eof.rs:LL:CC
|
LL | ... buf.assume_init();
| ^^^^^^^^^^^^^^^^^ constructing invalid value at .value[3]: encountered uninitialized memory, but expected an integer
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `main` at $DIR/libc-read-and-uninit-premature-eof.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,20 @@
#![feature(raw_ref_op)]
#![feature(strict_provenance)]
use std::ptr;
fn direct_raw(x: *const (i32, i32)) -> *const i32 {
unsafe { &raw const (*x).0 }
}
// Ensure that if a raw pointer is created via an intermediate
// reference, we catch that. (Just in case someone decides to
// desugar this differenly or so.)
fn via_ref(x: *const (i32, i32)) -> *const i32 {
unsafe { &(*x).0 as *const i32 } //~ERROR: dangling pointer
}
fn main() {
let ptr = ptr::without_provenance(0x10);
direct_raw(ptr); // this is fine
via_ref(ptr); // this is not
}

View file

@ -0,0 +1,20 @@
error: Undefined Behavior: out-of-bounds pointer use: 0x10[noalloc] is a dangling pointer (it has no provenance)
--> $DIR/dangling_pointer_to_raw_pointer.rs:LL:CC
|
LL | unsafe { &(*x).0 as *const i32 }
| ^^^^^^^ out-of-bounds pointer use: 0x10[noalloc] is a dangling pointer (it has no provenance)
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `via_ref` at $DIR/dangling_pointer_to_raw_pointer.rs:LL:CC
note: inside `main`
--> $DIR/dangling_pointer_to_raw_pointer.rs:LL:CC
|
LL | via_ref(ptr); // this is not
| ^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,25 @@
//@ only-target-darwin
use std::cell::UnsafeCell;
fn main() {
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
unsafe {
libc::os_unfair_lock_lock(lock.get());
libc::os_unfair_lock_assert_owner(lock.get());
assert!(!libc::os_unfair_lock_trylock(lock.get()));
libc::os_unfair_lock_unlock(lock.get());
libc::os_unfair_lock_assert_not_owner(lock.get());
}
// `os_unfair_lock`s can be moved and leaked.
// In the real implementation, even moving it while locked is possible
// (and "forks" the lock, i.e. old and new location have independent wait queues);
// Miri behavior differs here and anyway none of this is documented.
let lock = lock;
let locked = unsafe { libc::os_unfair_lock_trylock(lock.get()) };
assert!(locked);
let _lock = lock;
}

View file

@ -0,0 +1,218 @@
//@ignore-target-windows: only very limited libc on Windows
//@ignore-target-apple: `sched_{g, s}etaffinity` are not supported on macOS
//@compile-flags: -Zmiri-disable-isolation -Zmiri-num-cpus=4
#![feature(io_error_more)]
#![feature(pointer_is_aligned_to)]
#![feature(strict_provenance)]
use libc::{cpu_set_t, sched_getaffinity, sched_setaffinity};
use std::mem::{size_of, size_of_val};
// If pid is zero, then the calling thread is used.
const PID: i32 = 0;
fn null_pointers() {
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), std::ptr::null_mut()) };
assert_eq!(err, -1);
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>(), std::ptr::null()) };
assert_eq!(err, -1);
}
fn configure_no_cpus() {
let cpu_count = std::thread::available_parallelism().unwrap().get();
let mut cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
// configuring no CPUs will fail
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>(), &cpuset) };
assert_eq!(err, -1);
assert_eq!(std::io::Error::last_os_error().kind(), std::io::ErrorKind::InvalidInput);
// configuring no (physically available) CPUs will fail
unsafe { libc::CPU_SET(cpu_count, &mut cpuset) };
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>(), &cpuset) };
assert_eq!(err, -1);
assert_eq!(std::io::Error::last_os_error().kind(), std::io::ErrorKind::InvalidInput);
}
fn configure_unavailable_cpu() {
let cpu_count = std::thread::available_parallelism().unwrap().get();
// Safety: valid value for this type
let mut cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), &mut cpuset) };
assert_eq!(err, 0);
// by default, only available CPUs are configured
for i in 0..cpu_count {
assert!(unsafe { libc::CPU_ISSET(i, &cpuset) });
}
assert!(unsafe { !libc::CPU_ISSET(cpu_count, &cpuset) });
// configure CPU that we don't have
unsafe { libc::CPU_SET(cpu_count, &mut cpuset) };
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>(), &cpuset) };
assert_eq!(err, 0);
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), &mut cpuset) };
assert_eq!(err, 0);
// the CPU is not set because it is not available
assert!(!unsafe { libc::CPU_ISSET(cpu_count, &cpuset) });
}
fn large_set() {
// rust's libc does not currently implement dynamic cpu set allocation
// and related functions like `CPU_ZERO_S`. So we have to be creative
// i.e. this has 2048 bits, twice the standard number
let mut cpuset = [u64::MAX; 32];
let err = unsafe { sched_setaffinity(PID, size_of_val(&cpuset), cpuset.as_ptr().cast()) };
assert_eq!(err, 0);
let err = unsafe { sched_getaffinity(PID, size_of_val(&cpuset), cpuset.as_mut_ptr().cast()) };
assert_eq!(err, 0);
}
fn get_small_cpu_mask() {
let mut cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
// should be 4 on 32-bit systems and 8 otherwise for systems that implement sched_getaffinity
let step = size_of::<std::ffi::c_ulong>();
for i in (0..=2).map(|x| x * step) {
if i == 0 {
// 0 always fails
let err = unsafe { sched_getaffinity(PID, i, &mut cpuset) };
assert_eq!(err, -1, "fail for {}", i);
assert_eq!(std::io::Error::last_os_error().kind(), std::io::ErrorKind::InvalidInput);
} else {
// other whole multiples of the size of c_ulong works
let err = unsafe { sched_getaffinity(PID, i, &mut cpuset) };
assert_eq!(err, 0, "fail for {i}");
}
// anything else returns an error
for j in 1..step {
let err = unsafe { sched_getaffinity(PID, i + j, &mut cpuset) };
assert_eq!(err, -1, "success for {}", i + j);
assert_eq!(std::io::Error::last_os_error().kind(), std::io::ErrorKind::InvalidInput);
}
}
}
fn set_small_cpu_mask() {
let mut cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), &mut cpuset) };
assert_eq!(err, 0);
// setting a mask of size 0 is invalid
let err = unsafe { sched_setaffinity(PID, 0, &cpuset) };
assert_eq!(err, -1);
assert_eq!(std::io::Error::last_os_error().kind(), std::io::ErrorKind::InvalidInput);
// on LE systems, any other number of bytes (at least up to `size_of<cpu_set_t>()`) will work.
// on BE systems the CPUs 0..8 are stored in the right-most byte of the first chunk. If that
// byte is not included, no valid CPUs are configured. We skip those cases.
let cpu_zero_included_length =
if cfg!(target_endian = "little") { 1 } else { core::mem::size_of::<std::ffi::c_ulong>() };
for i in cpu_zero_included_length..24 {
let err = unsafe { sched_setaffinity(PID, i, &cpuset) };
assert_eq!(err, 0, "fail for {i}");
}
}
fn set_custom_cpu_mask() {
let cpu_count = std::thread::available_parallelism().unwrap().get();
assert!(cpu_count > 1, "this test cannot do anything interesting with just one thread");
let mut cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
// at the start, thread 1 should be set
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), &mut cpuset) };
assert_eq!(err, 0);
assert!(unsafe { libc::CPU_ISSET(1, &cpuset) });
// make a valid mask
unsafe { libc::CPU_ZERO(&mut cpuset) };
unsafe { libc::CPU_SET(0, &mut cpuset) };
// giving a smaller mask is fine
let err = unsafe { sched_setaffinity(PID, 8, &cpuset) };
assert_eq!(err, 0);
// and actually disables other threads
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), &mut cpuset) };
assert_eq!(err, 0);
assert!(unsafe { !libc::CPU_ISSET(1, &cpuset) });
// it is important that we reset the cpu mask now for future tests
for i in 0..cpu_count {
unsafe { libc::CPU_SET(i, &mut cpuset) };
}
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>(), &cpuset) };
assert_eq!(err, 0);
}
fn parent_child() {
let cpu_count = std::thread::available_parallelism().unwrap().get();
assert!(cpu_count > 1, "this test cannot do anything interesting with just one thread");
// configure the parent thread to only run only on CPU 0
let mut parent_cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
unsafe { libc::CPU_SET(0, &mut parent_cpuset) };
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>(), &parent_cpuset) };
assert_eq!(err, 0);
std::thread::scope(|spawner| {
spawner.spawn(|| {
let mut cpuset: cpu_set_t = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), &mut cpuset) };
assert_eq!(err, 0);
// the child inherits its parent's set
assert!(unsafe { libc::CPU_ISSET(0, &cpuset) });
assert!(unsafe { !libc::CPU_ISSET(1, &cpuset) });
// configure cpu 1 for the child
unsafe { libc::CPU_SET(1, &mut cpuset) };
});
});
let err = unsafe { sched_getaffinity(PID, size_of::<cpu_set_t>(), &mut parent_cpuset) };
assert_eq!(err, 0);
// the parent's set should be unaffected
assert!(unsafe { !libc::CPU_ISSET(1, &parent_cpuset) });
// it is important that we reset the cpu mask now for future tests
let mut cpuset = parent_cpuset;
for i in 0..cpu_count {
unsafe { libc::CPU_SET(i, &mut cpuset) };
}
let err = unsafe { sched_setaffinity(PID, size_of::<cpu_set_t>(), &cpuset) };
assert_eq!(err, 0);
}
fn main() {
null_pointers();
configure_no_cpus();
configure_unavailable_cpu();
large_set();
get_small_cpu_mask();
set_small_cpu_mask();
set_custom_cpu_mask();
parent_child();
}

View file

@ -36,6 +36,7 @@ fn main() {
#[cfg(target_os = "linux")]
test_sync_file_range();
test_isatty();
test_read_and_uninit();
}
fn test_file_open_unix_allow_two_args() {
@ -388,3 +389,37 @@ fn test_isatty() {
remove_file(&path).unwrap();
}
}
fn test_read_and_uninit() {
use std::mem::MaybeUninit;
{
// We test that libc::read initializes its buffer.
let path = utils::prepare_with_content("pass-libc-read-and-uninit.txt", &[1u8, 2, 3]);
let cpath = CString::new(path.clone().into_os_string().into_encoded_bytes()).unwrap();
unsafe {
let fd = libc::open(cpath.as_ptr(), libc::O_RDONLY);
assert_ne!(fd, -1);
let mut buf: MaybeUninit<[u8; 2]> = std::mem::MaybeUninit::uninit();
assert_eq!(libc::read(fd, buf.as_mut_ptr().cast::<std::ffi::c_void>(), 2), 2);
let buf = buf.assume_init();
assert_eq!(buf, [1, 2]);
assert_eq!(libc::close(fd), 0);
}
remove_file(&path).unwrap();
}
{
// We test that if we requested to read 4 bytes, but actually read 3 bytes, then
// 3 bytes (not 4) will be overwritten, and remaining byte will be left as-is.
let path = utils::prepare_with_content("pass-libc-read-and-uninit-2.txt", &[1u8, 2, 3]);
let cpath = CString::new(path.clone().into_os_string().into_encoded_bytes()).unwrap();
unsafe {
let fd = libc::open(cpath.as_ptr(), libc::O_RDONLY);
assert_ne!(fd, -1);
let mut buf = [42u8; 5];
assert_eq!(libc::read(fd, buf.as_mut_ptr().cast::<std::ffi::c_void>(), 4), 3);
assert_eq!(buf, [1, 2, 3, 42, 42]);
assert_eq!(libc::close(fd), 0);
}
remove_file(&path).unwrap();
}
}

View file

@ -4,8 +4,11 @@
#![feature(f128)]
#![feature(f16)]
#![allow(arithmetic_overflow)]
#![allow(internal_features)]
use std::fmt::Debug;
use std::any::type_name;
use std::cmp::min;
use std::fmt::{Debug, Display, LowerHex};
use std::hint::black_box;
use std::{f32, f64};
@ -29,15 +32,41 @@ fn main() {
test_algebraic();
}
// Helper function to avoid promotion so that this tests "run-time" casts, not CTFE.
// Doesn't make a big difference when running this in Miri, but it means we can compare this
// with the LLVM backend by running `rustc -Zmir-opt-level=0 -Zsaturating-float-casts`.
#[track_caller]
#[inline(never)]
fn assert_eq<T: PartialEq + Debug>(x: T, y: T) {
assert_eq!(x, y);
trait Float: Copy + PartialEq + Debug {
/// The unsigned integer with the same bit width as this float
type Int: Copy + PartialEq + LowerHex + Debug;
const BITS: u32 = size_of::<Self>() as u32 * 8;
const EXPONENT_BITS: u32 = Self::BITS - Self::SIGNIFICAND_BITS - 1;
const SIGNIFICAND_BITS: u32;
/// The saturated (all ones) value of the exponent (infinity representation)
const EXPONENT_SAT: u32 = (1 << Self::EXPONENT_BITS) - 1;
/// The exponent bias value (max representable positive exponent)
const EXPONENT_BIAS: u32 = Self::EXPONENT_SAT >> 1;
fn to_bits(self) -> Self::Int;
}
macro_rules! impl_float {
($ty:ty, $ity:ty) => {
impl Float for $ty {
type Int = $ity;
// Just get this from std's value, which includes the implicit digit
const SIGNIFICAND_BITS: u32 = <$ty>::MANTISSA_DIGITS - 1;
fn to_bits(self) -> Self::Int {
self.to_bits()
}
}
};
}
impl_float!(f16, u16);
impl_float!(f32, u32);
impl_float!(f64, u64);
impl_float!(f128, u128);
trait FloatToInt<Int>: Copy {
fn cast(self) -> Int;
unsafe fn cast_unchecked(self) -> Int;
@ -58,19 +87,61 @@ macro_rules! float_to_int {
};
}
float_to_int!(f16 => i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
float_to_int!(f32 => i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
float_to_int!(f64 => i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
float_to_int!(f128 => i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
/// Test this cast both via `as` and via `approx_unchecked` (i.e., it must not saturate).
#[track_caller]
#[inline(never)]
fn test_both_cast<F, I>(x: F, y: I)
fn test_both_cast<F, I>(x: F, y: I, msg: impl Display)
where
F: FloatToInt<I>,
I: PartialEq + Debug,
{
assert_eq!(x.cast(), y);
assert_eq!(unsafe { x.cast_unchecked() }, y);
let f_tname = type_name::<F>();
let i_tname = type_name::<I>();
assert_eq!(x.cast(), y, "{f_tname} -> {i_tname}: {msg}");
assert_eq!(unsafe { x.cast_unchecked() }, y, "{f_tname} -> {i_tname}: {msg}",);
}
/// Helper function to avoid promotion so that this tests "run-time" casts, not CTFE.
/// Doesn't make a big difference when running this in Miri, but it means we can compare this
/// with the LLVM backend by running `rustc -Zmir-opt-level=0 -Zsaturating-float-casts`.
#[track_caller]
#[inline(never)]
fn assert_eq<T: PartialEq + Debug>(x: T, y: T) {
assert_eq!(x, y);
}
/// The same as `assert_eq` except prints a specific message on failure
#[track_caller]
#[inline(never)]
fn assert_eq_msg<T: PartialEq + Debug>(x: T, y: T, msg: impl Display) {
assert_eq!(x, y, "{msg}");
}
/// Check that floats have bitwise equality
fn assert_biteq<F: Float>(a: F, b: F, msg: impl Display) {
let ab = a.to_bits();
let bb = b.to_bits();
let tname = type_name::<F>();
let width = (2 + F::BITS / 4) as usize;
assert_eq_msg::<F::Int>(
ab,
bb,
format_args!("({ab:#0width$x} != {bb:#0width$x}) {tname}: {msg}"),
);
}
/// Check that two floats have equality
fn assert_feq<F: Float>(a: F, b: F, msg: impl Display) {
let ab = a.to_bits();
let bb = b.to_bits();
let tname = type_name::<F>();
let width = (2 + F::BITS / 4) as usize;
assert_eq_msg::<F>(a, b, format_args!("({ab:#0width$x} != {bb:#0width$x}) {tname}: {msg}"));
}
fn basic() {
@ -148,155 +219,368 @@ fn basic() {
assert_eq!(34.2f64.abs(), 34.2f64);
}
/// Many of these test values are taken from
/// Test casts from floats to ints and back
macro_rules! test_ftoi_itof {
(
f: $fty:ty,
i: $ity:ty,
// Int min and max as float literals
imin_f: $imin_f:literal,
imax_f: $imax_f:literal $(,)?
) => {{
/// By default we test float to int `as` casting as well as to_int_unchecked
fn assert_ftoi(f: $fty, i: $ity, msg: &str) {
#[allow(unused_comparisons)]
if <$ity>::MIN >= 0 && f < 0.0 {
// If `ity` is signed and `f` is negative, it is unrepresentable so skip
// unchecked casts.
assert_ftoi_unrep(f, i, msg);
} else {
test_both_cast::<$fty, $ity>(f, i, msg);
}
}
/// Unrepresentable values only get tested with `as` casting, not unchecked
fn assert_ftoi_unrep(f: $fty, i: $ity, msg: &str) {
assert_eq_msg::<$ity>(
f as $ity,
i,
format_args!("{} -> {}: {msg}", stringify!($fty), stringify!($ity)),
);
}
/// Int to float checks
fn assert_itof(i: $ity, f: $fty, msg: &str) {
assert_eq_msg::<$fty>(
i as $fty,
f,
format_args!("{} -> {}: {msg}", stringify!($ity), stringify!($fty)),
);
}
/// Check both float to int and int to float
fn assert_bidir(f: $fty, i: $ity, msg: &str) {
assert_ftoi(f, i, msg);
assert_itof(i, f, msg);
}
/// Check both float to int and int to float for unrepresentable numbers
fn assert_bidir_unrep(f: $fty, i: $ity, msg: &str) {
assert_ftoi_unrep(f, i, msg);
assert_itof(i, f, msg);
}
let fbits = <$fty>::BITS;
let fsig_bits = <$fty>::SIGNIFICAND_BITS;
let ibits = <$ity>::BITS;
let imax: $ity = <$ity>::MAX;
let imin: $ity = <$ity>::MIN;
let izero: $ity = 0;
#[allow(unused_comparisons)]
let isigned = <$ity>::MIN < 0;
#[allow(overflowing_literals)]
let imin_f: $fty = $imin_f;
#[allow(overflowing_literals)]
let imax_f: $fty = $imax_f;
// If an integer can fit entirely in the mantissa (counting the hidden bit), every value
// can be represented exactly.
let all_ints_exact_rep = ibits <= fsig_bits + 1;
// We can represent the full range of the integer (but possibly not every value) without
// saturating to infinity if `1 << (I::BITS - 1)` (single one in the MSB position) is
// within the float's dynamic range.
let int_range_rep = ibits - 1 < <$fty>::EXPONENT_BIAS;
// Skip unchecked cast when int min/max would be unrepresentable
let assert_ftoi_big = if all_ints_exact_rep { assert_ftoi } else { assert_ftoi_unrep };
let assert_bidir_big = if all_ints_exact_rep { assert_bidir } else { assert_bidir_unrep };
// Near zero representations
assert_bidir(0.0, 0, "zero");
assert_ftoi(-0.0, 0, "negative zero");
assert_ftoi(1.0, 1, "one");
assert_ftoi(-1.0, izero.saturating_sub(1), "negative one");
assert_ftoi(1.0 - <$fty>::EPSILON, 0, "1.0 - ε");
assert_ftoi(1.0 + <$fty>::EPSILON, 1, "1.0 + ε");
assert_ftoi(-1.0 + <$fty>::EPSILON, 0, "-1.0 + ε");
assert_ftoi(-1.0 - <$fty>::EPSILON, izero.saturating_sub(1), "-1.0 - ε");
assert_ftoi(<$fty>::from_bits(0x1), 0, "min subnormal");
assert_ftoi(<$fty>::from_bits(0x1 | 1 << (fbits - 1)), 0, "min neg subnormal");
// Spot checks. Use `saturating_sub` to create negative integers so that unsigned
// integers stay at zero.
assert_ftoi(0.9, 0, "0.9");
assert_ftoi(-0.9, 0, "-0.9");
assert_ftoi(1.1, 1, "1.1");
assert_ftoi(-1.1, izero.saturating_sub(1), "-1.1");
assert_ftoi(1.9, 1, "1.9");
assert_ftoi(-1.9, izero.saturating_sub(1), "-1.9");
assert_ftoi(5.0, 5, "5.0");
assert_ftoi(-5.0, izero.saturating_sub(5), "-5.0");
assert_ftoi(5.9, 5, "5.0");
assert_ftoi(-5.9, izero.saturating_sub(5), "-5.0");
// Exercise the middle of the integer's bit range. A power of two fits as long as the
// exponent can fit its log2, so cap at the maximum representable power of two (which
// is the exponent's bias).
let half_i_max: $ity = 1 << min(ibits / 2, <$fty>::EXPONENT_BIAS);
let half_i_min = izero.saturating_sub(half_i_max);
assert_bidir(half_i_max as $fty, half_i_max, "half int max");
assert_bidir(half_i_min as $fty, half_i_min, "half int min");
// Integer limits
assert_bidir_big(imax_f, imax, "i max");
assert_bidir_big(imin_f, imin, "i min");
// We need a small perturbation to test against that does not round up to the next
// integer. `f16` needs a smaller perturbation since it only has resolution for ~1 decimal
// place near 10^3.
let perturb = if fbits < 32 { 0.9 } else { 0.99 };
assert_ftoi_big(imax_f + perturb, <$ity>::MAX, "slightly above i max");
assert_ftoi_big(imin_f - perturb, <$ity>::MIN, "slightly below i min");
// Tests for when we can represent the integer's magnitude
if int_range_rep {
// If the float can represent values larger than the integer, float extremes
// will saturate.
assert_ftoi_unrep(<$fty>::MAX, imax, "f max");
assert_ftoi_unrep(<$fty>::MIN, imin, "f min");
// Max representable power of 10
let pow10_max = (10 as $ity).pow(imax.ilog10());
// If the power of 10 should be representable (fits in a mantissa), check it
if ibits - pow10_max.leading_zeros() - pow10_max.trailing_zeros() <= fsig_bits + 1 {
assert_bidir(pow10_max as $fty, pow10_max, "pow10 max");
}
}
// Test rounding the largest and smallest integers, but skip this when
// all integers have an exact representation (it's less interesting then and the arithmetic gets more complicated).
if int_range_rep && !all_ints_exact_rep {
// The maximum representable integer is a saturated mantissa (including the implicit
// bit), shifted into the int's leftmost position.
//
// Positive signed integers never use their top bit, so shift by one bit fewer.
let sat_mantissa: $ity = (1 << (fsig_bits + 1)) - 1;
let adj = if isigned { 1 } else { 0 };
let max_rep = sat_mantissa << (sat_mantissa.leading_zeros() - adj);
// This value should roundtrip exactly
assert_bidir(max_rep as $fty, max_rep, "max representable int");
// The cutoff for where to round to `imax` is halfway between the maximum exactly
// representable integer and `imax`. This should round down (to `max_rep`),
// i.e., `max_rep as $fty == max_non_sat as $fty`.
let max_non_sat = max_rep + ((imax - max_rep) / 2);
assert_bidir(max_non_sat as $fty, max_rep, "max non saturating int");
// So the next value up should round up to the maximum value of the integer
assert_bidir_unrep((max_non_sat + 1) as $fty, imax, "min infinite int");
if isigned {
// Floats can always represent the minimum signed number if they can fit the
// exponent, because it is just a `1` in the MSB. So, no negative int -> float
// conversion will round to negative infinity (if the exponent fits).
//
// Since `imin` is thus the minimum representable value, we test rounding near
// the next value. This happens to be the opposite of the maximum representable
// value, and it should roundtrip exactly.
let next_min_rep = max_rep.wrapping_neg();
assert_bidir(next_min_rep as $fty, next_min_rep, "min representable above imin");
// Following a similar pattern as for positive numbers, halfway between this value
// and `imin` should round back to `next_min_rep`.
let min_non_sat = imin - ((imin - next_min_rep) / 2) + 1;
assert_bidir(
min_non_sat as $fty,
next_min_rep,
"min int that does not round to imin",
);
// And then anything else saturates to the minimum value.
assert_bidir_unrep(
(min_non_sat - 1) as $fty,
imin,
"max negative int that rounds to imin",
);
}
}
// Check potentially saturating int ranges. (`imax_f` here will be `$fty::INFINITY` if
// it cannot be represented as a finite value.)
assert_itof(imax, imax_f, "imax");
assert_itof(imin, imin_f, "imin");
// Float limits
assert_ftoi_unrep(<$fty>::INFINITY, imax, "f inf");
assert_ftoi_unrep(<$fty>::NEG_INFINITY, imin, "f neg inf");
assert_ftoi_unrep(<$fty>::NAN, 0, "f nan");
assert_ftoi_unrep(-<$fty>::NAN, 0, "f neg nan");
}};
}
/// Test casts from one float to another
macro_rules! test_ftof {
(
f1: $f1:ty,
f2: $f2:ty $(,)?
) => {{
type F2Int = <$f2 as Float>::Int;
let f1zero: $f1 = 0.0;
let f2zero: $f2 = 0.0;
let f1five: $f1 = 5.0;
let f2five: $f2 = 5.0;
assert_biteq((f1zero as $f2), f2zero, "0.0");
assert_biteq(((-f1zero) as $f2), (-f2zero), "-0.0");
assert_biteq((f1five as $f2), f2five, "5.0");
assert_biteq(((-f1five) as $f2), (-f2five), "-5.0");
assert_feq(<$f1>::INFINITY as $f2, <$f2>::INFINITY, "max -> inf");
assert_feq(<$f1>::NEG_INFINITY as $f2, <$f2>::NEG_INFINITY, "max -> inf");
assert!((<$f1>::NAN as $f2).is_nan(), "{} -> {} nan", stringify!($f1), stringify!($f2));
let min_sub_casted = <$f1>::from_bits(0x1) as $f2;
let min_neg_sub_casted = <$f1>::from_bits(0x1 | 1 << (<$f1>::BITS - 1)) as $f2;
if <$f1>::BITS > <$f2>::BITS {
assert_feq(<$f1>::MAX as $f2, <$f2>::INFINITY, "max -> inf");
assert_feq(<$f1>::MIN as $f2, <$f2>::NEG_INFINITY, "max -> inf");
assert_biteq(min_sub_casted, f2zero, "min subnormal -> 0.0");
assert_biteq(min_neg_sub_casted, -f2zero, "min neg subnormal -> -0.0");
} else {
// When increasing precision, the minimum subnormal will just roll to the next
// exponent. This exponent will be the current exponent (with bias), plus
// `sig_bits - 1` to account for the implicit change in exponent (since the
// mantissa starts with 0).
let sub_casted = <$f2>::from_bits(
((<$f2>::EXPONENT_BIAS - (<$f1>::EXPONENT_BIAS + <$f1>::SIGNIFICAND_BITS - 1))
as F2Int)
<< <$f2>::SIGNIFICAND_BITS,
);
assert_biteq(min_sub_casted, sub_casted, "min subnormal");
assert_biteq(min_neg_sub_casted, -sub_casted, "min neg subnormal");
}
}};
}
/// Many of these test patterns were adapted from the values in
/// https://github.com/WebAssembly/testsuite/blob/master/conversions.wast.
fn casts() {
// f32 -> i8
test_both_cast::<f32, i8>(127.99, 127);
test_both_cast::<f32, i8>(-128.99, -128);
/* int <-> float generic tests */
// f32 -> i32
test_both_cast::<f32, i32>(0.0, 0);
test_both_cast::<f32, i32>(-0.0, 0);
test_both_cast::<f32, i32>(/*0x1p-149*/ f32::from_bits(0x00000001), 0);
test_both_cast::<f32, i32>(/*-0x1p-149*/ f32::from_bits(0x80000001), 0);
test_both_cast::<f32, i32>(/*0x1.19999ap+0*/ f32::from_bits(0x3f8ccccd), 1);
test_both_cast::<f32, i32>(/*-0x1.19999ap+0*/ f32::from_bits(0xbf8ccccd), -1);
test_both_cast::<f32, i32>(1.9, 1);
test_both_cast::<f32, i32>(-1.9, -1);
test_both_cast::<f32, i32>(5.0, 5);
test_both_cast::<f32, i32>(-5.0, -5);
test_both_cast::<f32, i32>(2147483520.0, 2147483520);
test_both_cast::<f32, i32>(-2147483648.0, -2147483648);
// unrepresentable casts
assert_eq::<i32>(2147483648.0f32 as i32, i32::MAX);
assert_eq::<i32>(-2147483904.0f32 as i32, i32::MIN);
assert_eq::<i32>(f32::MAX as i32, i32::MAX);
assert_eq::<i32>(f32::MIN as i32, i32::MIN);
assert_eq::<i32>(f32::INFINITY as i32, i32::MAX);
assert_eq::<i32>(f32::NEG_INFINITY as i32, i32::MIN);
assert_eq::<i32>(f32::NAN as i32, 0);
assert_eq::<i32>((-f32::NAN) as i32, 0);
test_ftoi_itof! { f: f16, i: i8, imin_f: -128.0, imax_f: 127.0 };
test_ftoi_itof! { f: f16, i: u8, imin_f: 0.0, imax_f: 255.0 };
test_ftoi_itof! { f: f16, i: i16, imin_f: -32_768.0, imax_f: 32_767.0 };
test_ftoi_itof! { f: f16, i: u16, imin_f: 0.0, imax_f: 65_535.0 };
test_ftoi_itof! { f: f16, i: i32, imin_f: -2_147_483_648.0, imax_f: 2_147_483_647.0 };
test_ftoi_itof! { f: f16, i: u32, imin_f: 0.0, imax_f: 4_294_967_295.0 };
test_ftoi_itof! {
f: f16,
i: i64,
imin_f: -9_223_372_036_854_775_808.0,
imax_f: 9_223_372_036_854_775_807.0
};
test_ftoi_itof! { f: f16, i: u64, imin_f: 0.0, imax_f: 18_446_744_073_709_551_615.0 };
test_ftoi_itof! {
f: f16,
i: i128,
imin_f: -170_141_183_460_469_231_731_687_303_715_884_105_728.0,
imax_f: 170_141_183_460_469_231_731_687_303_715_884_105_727.0,
};
test_ftoi_itof! {
f: f16,
i: u128,
imin_f: 0.0,
imax_f: 340_282_366_920_938_463_463_374_607_431_768_211_455.0
};
// f32 -> u32
test_both_cast::<f32, u32>(0.0, 0);
test_both_cast::<f32, u32>(-0.0, 0);
test_both_cast::<f32, u32>(-0.9999999, 0);
test_both_cast::<f32, u32>(/*0x1p-149*/ f32::from_bits(0x1), 0);
test_both_cast::<f32, u32>(/*-0x1p-149*/ f32::from_bits(0x80000001), 0);
test_both_cast::<f32, u32>(/*0x1.19999ap+0*/ f32::from_bits(0x3f8ccccd), 1);
test_both_cast::<f32, u32>(1.9, 1);
test_both_cast::<f32, u32>(5.0, 5);
test_both_cast::<f32, u32>(2147483648.0, 0x8000_0000);
test_both_cast::<f32, u32>(4294967040.0, 0u32.wrapping_sub(256));
test_both_cast::<f32, u32>(/*-0x1.ccccccp-1*/ f32::from_bits(0xbf666666), 0);
test_both_cast::<f32, u32>(/*-0x1.fffffep-1*/ f32::from_bits(0xbf7fffff), 0);
test_both_cast::<f32, u32>((u32::MAX - 128) as f32, u32::MAX - 255); // rounding loss
// unrepresentable casts
assert_eq::<u32>((u32::MAX - 127) as f32 as u32, u32::MAX); // rounds up and then becomes unrepresentable
assert_eq::<u32>(4294967296.0f32 as u32, u32::MAX);
assert_eq::<u32>(-5.0f32 as u32, 0);
assert_eq::<u32>(f32::MAX as u32, u32::MAX);
assert_eq::<u32>(f32::MIN as u32, 0);
assert_eq::<u32>(f32::INFINITY as u32, u32::MAX);
assert_eq::<u32>(f32::NEG_INFINITY as u32, 0);
assert_eq::<u32>(f32::NAN as u32, 0);
assert_eq::<u32>((-f32::NAN) as u32, 0);
test_ftoi_itof! { f: f32, i: i8, imin_f: -128.0, imax_f: 127.0 };
test_ftoi_itof! { f: f32, i: u8, imin_f: 0.0, imax_f: 255.0 };
test_ftoi_itof! { f: f32, i: i16, imin_f: -32_768.0, imax_f: 32_767.0 };
test_ftoi_itof! { f: f32, i: u16, imin_f: 0.0, imax_f: 65_535.0 };
test_ftoi_itof! { f: f32, i: i32, imin_f: -2_147_483_648.0, imax_f: 2_147_483_647.0 };
test_ftoi_itof! { f: f32, i: u32, imin_f: 0.0, imax_f: 4_294_967_295.0 };
test_ftoi_itof! {
f: f32,
i: i64,
imin_f: -9_223_372_036_854_775_808.0,
imax_f: 9_223_372_036_854_775_807.0
};
test_ftoi_itof! { f: f32, i: u64, imin_f: 0.0, imax_f: 18_446_744_073_709_551_615.0 };
test_ftoi_itof! {
f: f32,
i: i128,
imin_f: -170_141_183_460_469_231_731_687_303_715_884_105_728.0,
imax_f: 170_141_183_460_469_231_731_687_303_715_884_105_727.0,
};
test_ftoi_itof! {
f: f32,
i: u128,
imin_f: 0.0,
imax_f: 340_282_366_920_938_463_463_374_607_431_768_211_455.0
};
// f32 -> i64
test_both_cast::<f32, i64>(4294967296.0, 4294967296);
test_both_cast::<f32, i64>(-4294967296.0, -4294967296);
test_both_cast::<f32, i64>(9223371487098961920.0, 9223371487098961920);
test_both_cast::<f32, i64>(-9223372036854775808.0, -9223372036854775808);
test_ftoi_itof! { f: f64, i: i8, imin_f: -128.0, imax_f: 127.0 };
test_ftoi_itof! { f: f64, i: u8, imin_f: 0.0, imax_f: 255.0 };
test_ftoi_itof! { f: f64, i: i16, imin_f: -32_768.0, imax_f: 32_767.0 };
test_ftoi_itof! { f: f64, i: u16, imin_f: 0.0, imax_f: 65_535.0 };
test_ftoi_itof! { f: f64, i: i32, imin_f: -2_147_483_648.0, imax_f: 2_147_483_647.0 };
test_ftoi_itof! { f: f64, i: u32, imin_f: 0.0, imax_f: 4_294_967_295.0 };
test_ftoi_itof! {
f: f64,
i: i64,
imin_f: -9_223_372_036_854_775_808.0,
imax_f: 9_223_372_036_854_775_807.0
};
test_ftoi_itof! { f: f64, i: u64, imin_f: 0.0, imax_f: 18_446_744_073_709_551_615.0 };
test_ftoi_itof! {
f: f64,
i: i128,
imin_f: -170_141_183_460_469_231_731_687_303_715_884_105_728.0,
imax_f: 170_141_183_460_469_231_731_687_303_715_884_105_727.0,
};
test_ftoi_itof! {
f: f64,
i: u128,
imin_f: 0.0,
imax_f: 340_282_366_920_938_463_463_374_607_431_768_211_455.0
};
// f64 -> i8
test_both_cast::<f64, i8>(127.99, 127);
test_both_cast::<f64, i8>(-128.99, -128);
test_ftoi_itof! { f: f128, i: i8, imin_f: -128.0, imax_f: 127.0 };
test_ftoi_itof! { f: f128, i: u8, imin_f: 0.0, imax_f: 255.0 };
test_ftoi_itof! { f: f128, i: i16, imin_f: -32_768.0, imax_f: 32_767.0 };
test_ftoi_itof! { f: f128, i: u16, imin_f: 0.0, imax_f: 65_535.0 };
test_ftoi_itof! { f: f128, i: i32, imin_f: -2_147_483_648.0, imax_f: 2_147_483_647.0 };
test_ftoi_itof! { f: f128, i: u32, imin_f: 0.0, imax_f: 4_294_967_295.0 };
test_ftoi_itof! {
f: f128,
i: i64,
imin_f: -9_223_372_036_854_775_808.0,
imax_f: 9_223_372_036_854_775_807.0
};
test_ftoi_itof! { f: f128, i: u64, imin_f: 0.0, imax_f: 18_446_744_073_709_551_615.0 };
test_ftoi_itof! {
f: f128,
i: i128,
imin_f: -170_141_183_460_469_231_731_687_303_715_884_105_728.0,
imax_f: 170_141_183_460_469_231_731_687_303_715_884_105_727.0,
};
test_ftoi_itof! {
f: f128,
i: u128,
imin_f: 0.0,
imax_f: 340_282_366_920_938_463_463_374_607_431_768_211_455.0
};
// f64 -> i32
test_both_cast::<f64, i32>(0.0, 0);
test_both_cast::<f64, i32>(-0.0, 0);
test_both_cast::<f64, i32>(/*0x1.199999999999ap+0*/ f64::from_bits(0x3ff199999999999a), 1);
test_both_cast::<f64, i32>(
/*-0x1.199999999999ap+0*/ f64::from_bits(0xbff199999999999a),
-1,
);
test_both_cast::<f64, i32>(1.9, 1);
test_both_cast::<f64, i32>(-1.9, -1);
test_both_cast::<f64, i32>(1e8, 100_000_000);
test_both_cast::<f64, i32>(2147483647.0, 2147483647);
test_both_cast::<f64, i32>(-2147483648.0, -2147483648);
// unrepresentable casts
assert_eq::<i32>(2147483648.0f64 as i32, i32::MAX);
assert_eq::<i32>(-2147483649.0f64 as i32, i32::MIN);
// f64 -> i64
test_both_cast::<f64, i64>(0.0, 0);
test_both_cast::<f64, i64>(-0.0, 0);
test_both_cast::<f64, i64>(/*0x0.0000000000001p-1022*/ f64::from_bits(0x1), 0);
test_both_cast::<f64, i64>(
/*-0x0.0000000000001p-1022*/ f64::from_bits(0x8000000000000001),
0,
);
test_both_cast::<f64, i64>(/*0x1.199999999999ap+0*/ f64::from_bits(0x3ff199999999999a), 1);
test_both_cast::<f64, i64>(
/*-0x1.199999999999ap+0*/ f64::from_bits(0xbff199999999999a),
-1,
);
test_both_cast::<f64, i64>(5.0, 5);
test_both_cast::<f64, i64>(5.9, 5);
test_both_cast::<f64, i64>(-5.0, -5);
test_both_cast::<f64, i64>(-5.9, -5);
test_both_cast::<f64, i64>(4294967296.0, 4294967296);
test_both_cast::<f64, i64>(-4294967296.0, -4294967296);
test_both_cast::<f64, i64>(9223372036854774784.0, 9223372036854774784);
test_both_cast::<f64, i64>(-9223372036854775808.0, -9223372036854775808);
// unrepresentable casts
assert_eq::<i64>(9223372036854775808.0f64 as i64, i64::MAX);
assert_eq::<i64>(-9223372036854777856.0f64 as i64, i64::MIN);
assert_eq::<i64>(f64::MAX as i64, i64::MAX);
assert_eq::<i64>(f64::MIN as i64, i64::MIN);
assert_eq::<i64>(f64::INFINITY as i64, i64::MAX);
assert_eq::<i64>(f64::NEG_INFINITY as i64, i64::MIN);
assert_eq::<i64>(f64::NAN as i64, 0);
assert_eq::<i64>((-f64::NAN) as i64, 0);
// f64 -> u64
test_both_cast::<f64, u64>(0.0, 0);
test_both_cast::<f64, u64>(-0.0, 0);
test_both_cast::<f64, u64>(-0.99999999999, 0);
test_both_cast::<f64, u64>(5.0, 5);
test_both_cast::<f64, u64>(1e16, 10000000000000000);
test_both_cast::<f64, u64>((u64::MAX - 1024) as f64, u64::MAX - 2047); // rounding loss
test_both_cast::<f64, u64>(9223372036854775808.0, 9223372036854775808);
// unrepresentable casts
assert_eq::<u64>(-5.0f64 as u64, 0);
assert_eq::<u64>((u64::MAX - 1023) as f64 as u64, u64::MAX); // rounds up and then becomes unrepresentable
assert_eq::<u64>(18446744073709551616.0f64 as u64, u64::MAX);
assert_eq::<u64>(f64::MAX as u64, u64::MAX);
assert_eq::<u64>(f64::MIN as u64, 0);
assert_eq::<u64>(f64::INFINITY as u64, u64::MAX);
assert_eq::<u64>(f64::NEG_INFINITY as u64, 0);
assert_eq::<u64>(f64::NAN as u64, 0);
assert_eq::<u64>((-f64::NAN) as u64, 0);
// f64 -> i128
assert_eq::<i128>(f64::MAX as i128, i128::MAX);
assert_eq::<i128>(f64::MIN as i128, i128::MIN);
// f64 -> u128
assert_eq::<u128>(f64::MAX as u128, u128::MAX);
assert_eq::<u128>(f64::MIN as u128, 0);
/* int <-> float spot checks */
// int -> f32
assert_eq::<f32>(127i8 as f32, 127.0);
assert_eq::<f32>(2147483647i32 as f32, 2147483648.0);
assert_eq::<f32>((-2147483648i32) as f32, -2147483648.0);
assert_eq::<f32>(1234567890i32 as f32, /*0x1.26580cp+30*/ f32::from_bits(0x4e932c06));
assert_eq::<f32>(16777217i32 as f32, 16777216.0);
assert_eq::<f32>((-16777217i32) as f32, -16777216.0);
assert_eq::<f32>(16777219i32 as f32, 16777220.0);
assert_eq::<f32>((-16777219i32) as f32, -16777220.0);
assert_eq::<f32>(
0x7fffff4000000001i64 as f32,
/*0x1.fffffep+62*/ f32::from_bits(0x5effffff),
@ -313,36 +597,33 @@ fn casts() {
0xffdfffffdfffffffu64 as i64 as f32,
/*-0x1.000002p+53*/ f32::from_bits(0xda000001),
);
assert_eq::<f32>(i128::MIN as f32, -170141183460469231731687303715884105728.0f32);
assert_eq::<f32>(u128::MAX as f32, f32::INFINITY); // saturation
// int -> f64
assert_eq::<f64>(127i8 as f64, 127.0);
assert_eq::<f64>(i16::MIN as f64, -32768.0f64);
assert_eq::<f64>(2147483647i32 as f64, 2147483647.0);
assert_eq::<f64>(-2147483648i32 as f64, -2147483648.0);
assert_eq::<f64>(987654321i32 as f64, 987654321.0);
assert_eq::<f64>(9223372036854775807i64 as f64, 9223372036854775807.0);
assert_eq::<f64>(-9223372036854775808i64 as f64, -9223372036854775808.0);
assert_eq::<f64>(4669201609102990i64 as f64, 4669201609102990.0); // Feigenbaum (?)
assert_eq::<f64>(9007199254740993i64 as f64, 9007199254740992.0);
assert_eq::<f64>(-9007199254740993i64 as f64, -9007199254740992.0);
assert_eq::<f64>(9007199254740995i64 as f64, 9007199254740996.0);
assert_eq::<f64>(-9007199254740995i64 as f64, -9007199254740996.0);
assert_eq::<f64>(u128::MAX as f64, 340282366920938463463374607431768211455.0f64); // even that fits...
/* float -> float generic tests */
test_ftof! { f1: f16, f2: f32 };
test_ftof! { f1: f16, f2: f64 };
test_ftof! { f1: f16, f2: f128 };
test_ftof! { f1: f32, f2: f16 };
test_ftof! { f1: f32, f2: f64 };
test_ftof! { f1: f32, f2: f128 };
test_ftof! { f1: f64, f2: f16 };
test_ftof! { f1: f64, f2: f32 };
test_ftof! { f1: f64, f2: f128 };
test_ftof! { f1: f128, f2: f16 };
test_ftof! { f1: f128, f2: f32 };
test_ftof! { f1: f128, f2: f64 };
/* float -> float spot checks */
// f32 -> f64
assert_eq::<u64>((0.0f32 as f64).to_bits(), 0.0f64.to_bits());
assert_eq::<u64>(((-0.0f32) as f64).to_bits(), (-0.0f64).to_bits());
assert_eq::<f64>(5.0f32 as f64, 5.0f64);
assert_eq::<f64>(
/*0x1p-149*/ f32::from_bits(0x1) as f64,
/*0x1p-149*/ f64::from_bits(0x36a0000000000000),
);
assert_eq::<f64>(
/*-0x1p-149*/ f32::from_bits(0x80000001) as f64,
/*-0x1p-149*/ f64::from_bits(0xb6a0000000000000),
);
assert_eq::<f64>(
/*0x1.fffffep+127*/ f32::from_bits(0x7f7fffff) as f64,
/*0x1.fffffep+127*/ f64::from_bits(0x47efffffe0000000),
@ -359,15 +640,8 @@ fn casts() {
/*0x1.8f867ep+125*/ f32::from_bits(0x7e47c33f) as f64,
6.6382536710104395e+37,
);
assert_eq::<f64>(f32::INFINITY as f64, f64::INFINITY);
assert_eq::<f64>(f32::NEG_INFINITY as f64, f64::NEG_INFINITY);
// f64 -> f32
assert_eq::<u32>((0.0f64 as f32).to_bits(), 0.0f32.to_bits());
assert_eq::<u32>(((-0.0f64) as f32).to_bits(), (-0.0f32).to_bits());
assert_eq::<f32>(5.0f64 as f32, 5.0f32);
assert_eq::<f32>(/*0x0.0000000000001p-1022*/ f64::from_bits(0x1) as f32, 0.0);
assert_eq::<f32>(/*-0x0.0000000000001p-1022*/ (-f64::from_bits(0x1)) as f32, -0.0);
assert_eq::<f32>(
/*0x1.fffffe0000000p-127*/ f64::from_bits(0x380fffffe0000000) as f32,
/*0x1p-149*/ f32::from_bits(0x800000),
@ -376,10 +650,6 @@ fn casts() {
/*0x1.4eae4f7024c7p+108*/ f64::from_bits(0x46b4eae4f7024c70) as f32,
/*0x1.4eae5p+108*/ f32::from_bits(0x75a75728),
);
assert_eq::<f32>(f64::MAX as f32, f32::INFINITY);
assert_eq::<f32>(f64::MIN as f32, f32::NEG_INFINITY);
assert_eq::<f32>(f64::INFINITY as f32, f32::INFINITY);
assert_eq::<f32>(f64::NEG_INFINITY as f32, f32::NEG_INFINITY);
}
fn ops() {

View file

@ -0,0 +1,43 @@
//@only-target-darwin
use std::thread;
extern "C" {
fn _tlv_atexit(dtor: unsafe extern "C" fn(*mut u8), arg: *mut u8);
}
fn register<F>(f: F)
where
F: FnOnce() + 'static,
{
// This will receive the pointer passed into `_tlv_atexit`, which is the
// original `f` but boxed up.
unsafe extern "C" fn run<F>(ptr: *mut u8)
where
F: FnOnce() + 'static,
{
let f = unsafe { Box::from_raw(ptr as *mut F) };
f()
}
unsafe {
_tlv_atexit(run::<F>, Box::into_raw(Box::new(f)) as *mut u8);
}
}
fn main() {
thread::spawn(|| {
register(|| println!("dtor 2"));
register(|| println!("dtor 1"));
println!("exiting thread");
})
.join()
.unwrap();
println!("exiting main");
register(|| println!("dtor 5"));
register(|| {
println!("registering dtor in dtor 3");
register(|| println!("dtor 4"));
});
}

View file

@ -0,0 +1,7 @@
exiting thread
dtor 1
dtor 2
exiting main
registering dtor in dtor 3
dtor 4
dtor 5