atomicrmw on pointers: move integer-pointer cast hacks into backend

This commit is contained in:
Ralf Jung 2025-07-19 23:23:40 +02:00
parent efcae7d31d
commit de1b999ff6
17 changed files with 243 additions and 176 deletions

View file

@ -969,7 +969,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = amount.layout();
match layout.ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
ty::Uint(_) | ty::Int(_) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return Ok(());
@ -982,7 +982,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let old =
fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
let old = CValue::by_val(old, layout);
let old = CValue::by_val(old, ret.layout());
ret.write_cvalue(fx, old);
}
sym::atomic_xsub => {
@ -991,7 +991,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = amount.layout();
match layout.ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
ty::Uint(_) | ty::Int(_) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return Ok(());
@ -1004,7 +1004,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let old =
fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
let old = CValue::by_val(old, layout);
let old = CValue::by_val(old, ret.layout());
ret.write_cvalue(fx, old);
}
sym::atomic_and => {
@ -1013,7 +1013,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = src.layout();
match layout.ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
ty::Uint(_) | ty::Int(_) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return Ok(());
@ -1025,7 +1025,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
let old = CValue::by_val(old, layout);
let old = CValue::by_val(old, ret.layout());
ret.write_cvalue(fx, old);
}
sym::atomic_or => {
@ -1034,7 +1034,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = src.layout();
match layout.ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
ty::Uint(_) | ty::Int(_) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return Ok(());
@ -1046,7 +1046,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
let old = CValue::by_val(old, layout);
let old = CValue::by_val(old, ret.layout());
ret.write_cvalue(fx, old);
}
sym::atomic_xor => {
@ -1055,7 +1055,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = src.layout();
match layout.ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
ty::Uint(_) | ty::Int(_) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return Ok(());
@ -1067,7 +1067,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
let old = CValue::by_val(old, layout);
let old = CValue::by_val(old, ret.layout());
ret.write_cvalue(fx, old);
}
sym::atomic_nand => {
@ -1076,7 +1076,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = src.layout();
match layout.ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
ty::Uint(_) | ty::Int(_) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
return Ok(());
@ -1088,7 +1088,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
let old = CValue::by_val(old, layout);
let old = CValue::by_val(old, ret.layout());
ret.write_cvalue(fx, old);
}
sym::atomic_max => {

View file

@ -1656,6 +1656,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
dst: RValue<'gcc>,
src: RValue<'gcc>,
order: AtomicOrdering,
ret_ptr: bool,
) -> RValue<'gcc> {
let size = get_maybe_pointer_size(src);
let name = match op {
@ -1683,6 +1684,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let atomic_function = self.context.get_builtin_function(name);
let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
// FIXME: If `ret_ptr` is true and `src` is an integer, we should really tell GCC
// that this is a pointer operation that needs to preserve provenance -- but like LLVM,
// GCC does not currently seems to support that.
let void_ptr_type = self.context.new_type::<*mut ()>();
let volatile_void_ptr_type = void_ptr_type.make_volatile();
let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
@ -1690,7 +1694,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
let src = self.context.new_bitcast(self.location, src, new_src_type);
let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]);
self.context.new_cast(self.location, res, src.get_type())
let res_type = if ret_ptr { void_ptr_type } else { src.get_type() };
self.context.new_cast(self.location, res, res_type)
}
fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {

View file

@ -1326,15 +1326,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
&mut self,
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
dst: &'ll Value,
mut src: &'ll Value,
src: &'ll Value,
order: rustc_middle::ty::AtomicOrdering,
ret_ptr: bool,
) -> &'ll Value {
// The only RMW operation that LLVM supports on pointers is compare-exchange.
let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
&& op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg;
if requires_cast_to_int {
src = self.ptrtoint(src, self.type_isize());
}
// FIXME: If `ret_ptr` is true and `src` is not a pointer, we *should* tell LLVM that the
// LHS is a pointer and the operation should be provenance-preserving, but LLVM does not
// currently support that (https://github.com/llvm/llvm-project/issues/120837).
let mut res = unsafe {
llvm::LLVMBuildAtomicRMW(
self.llbuilder,
@ -1345,7 +1343,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llvm::False, // SingleThreaded
)
};
if requires_cast_to_int {
if ret_ptr && self.val_ty(res) != self.type_ptr() {
res = self.inttoptr(res, self.type_ptr());
}
res

View file

@ -97,6 +97,8 @@ codegen_ssa_invalid_monomorphization_basic_float_type = invalid monomorphization
codegen_ssa_invalid_monomorphization_basic_integer_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}`
codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer or pointer type, found `{$ty}`
codegen_ssa_invalid_monomorphization_cannot_return = invalid monomorphization of `{$name}` intrinsic: cannot return `{$ret_ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]`
codegen_ssa_invalid_monomorphization_cast_wide_pointer = invalid monomorphization of `{$name}` intrinsic: cannot cast wide pointer `{$ty}`

View file

@ -764,6 +764,14 @@ pub enum InvalidMonomorphization<'tcx> {
ty: Ty<'tcx>,
},
#[diag(codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type, code = E0511)]
BasicIntegerOrPtrType {
#[primary_span]
span: Span,
name: Symbol,
ty: Ty<'tcx>,
},
#[diag(codegen_ssa_invalid_monomorphization_basic_float_type, code = E0511)]
BasicFloatType {
#[primary_span]

View file

@ -92,6 +92,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let invalid_monomorphization_int_type = |ty| {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
};
let invalid_monomorphization_int_or_ptr_type = |ty| {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
span,
name,
ty,
});
};
let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
@ -351,7 +358,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::atomic_load => {
let ty = fn_args.type_at(0);
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
invalid_monomorphization_int_type(ty);
invalid_monomorphization_int_or_ptr_type(ty);
return Ok(());
}
let ordering = fn_args.const_at(1).to_value();
@ -367,7 +374,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::atomic_store => {
let ty = fn_args.type_at(0);
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
invalid_monomorphization_int_type(ty);
invalid_monomorphization_int_or_ptr_type(ty);
return Ok(());
}
let ordering = fn_args.const_at(1).to_value();
@ -377,10 +384,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
return Ok(());
}
// These are all AtomicRMW ops
sym::atomic_cxchg | sym::atomic_cxchgweak => {
let ty = fn_args.type_at(0);
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
invalid_monomorphization_int_type(ty);
invalid_monomorphization_int_or_ptr_type(ty);
return Ok(());
}
let succ_ordering = fn_args.const_at(1).to_value();
@ -407,7 +415,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
return Ok(());
}
// These are all AtomicRMW ops
sym::atomic_max | sym::atomic_min => {
let atom_op = if name == sym::atomic_max {
AtomicRmwBinOp::AtomicMax
@ -420,7 +427,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ordering = fn_args.const_at(1).to_value();
let ptr = args[0].immediate();
let val = args[1].immediate();
bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
bx.atomic_rmw(
atom_op,
ptr,
val,
parse_atomic_ordering(ordering),
/* ret_ptr */ false,
)
} else {
invalid_monomorphization_int_type(ty);
return Ok(());
@ -438,21 +451,44 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ordering = fn_args.const_at(1).to_value();
let ptr = args[0].immediate();
let val = args[1].immediate();
bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
bx.atomic_rmw(
atom_op,
ptr,
val,
parse_atomic_ordering(ordering),
/* ret_ptr */ false,
)
} else {
invalid_monomorphization_int_type(ty);
return Ok(());
}
}
sym::atomic_xchg
| sym::atomic_xadd
sym::atomic_xchg => {
let ty = fn_args.type_at(0);
let ordering = fn_args.const_at(1).to_value();
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
let ptr = args[0].immediate();
let val = args[1].immediate();
let atomic_op = AtomicRmwBinOp::AtomicXchg;
bx.atomic_rmw(
atomic_op,
ptr,
val,
parse_atomic_ordering(ordering),
/* ret_ptr */ ty.is_raw_ptr(),
)
} else {
invalid_monomorphization_int_or_ptr_type(ty);
return Ok(());
}
}
sym::atomic_xadd
| sym::atomic_xsub
| sym::atomic_and
| sym::atomic_nand
| sym::atomic_or
| sym::atomic_xor => {
let atom_op = match name {
sym::atomic_xchg => AtomicRmwBinOp::AtomicXchg,
sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
@ -462,14 +498,28 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => unreachable!(),
};
let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
let ordering = fn_args.const_at(1).to_value();
let ptr = args[0].immediate();
let val = args[1].immediate();
bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
// The type of the in-memory data.
let ty_mem = fn_args.type_at(0);
// The type of the 2nd operand, given by-value.
let ty_op = fn_args.type_at(1);
let ordering = fn_args.const_at(2).to_value();
// We require either both arguments to have the same integer type, or the first to
// be a pointer and the second to be `usize`.
if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
|| (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
{
let ptr = args[0].immediate(); // of type "pointer to `ty_mem`"
let val = args[1].immediate(); // of type `ty_op`
bx.atomic_rmw(
atom_op,
ptr,
val,
parse_atomic_ordering(ordering),
/* ret_ptr */ ty_mem.is_raw_ptr(),
)
} else {
invalid_monomorphization_int_type(ty);
invalid_monomorphization_int_or_ptr_type(ty_mem);
return Ok(());
}
}

View file

@ -548,12 +548,15 @@ pub trait BuilderMethods<'a, 'tcx>:
failure_order: AtomicOrdering,
weak: bool,
) -> (Self::Value, Self::Value);
/// `ret_ptr` indicates whether the return type (which is also the type `dst` points to)
/// is a pointer or the same type as `src`.
fn atomic_rmw(
&mut self,
op: AtomicRmwBinOp,
dst: Self::Value,
src: Self::Value,
order: AtomicOrdering,
ret_ptr: bool,
) -> Self::Value;
fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
fn set_invariant_load(&mut self, load: Self::Value);

View file

@ -652,16 +652,16 @@ pub(crate) fn check_intrinsic_type(
sym::atomic_store => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
sym::atomic_xchg
| sym::atomic_xadd
| sym::atomic_xsub
| sym::atomic_and
| sym::atomic_nand
| sym::atomic_or
| sym::atomic_xor
| sym::atomic_max
| sym::atomic_min
| sym::atomic_umax
| sym::atomic_umin => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
sym::atomic_xadd
| sym::atomic_xsub
| sym::atomic_and
| sym::atomic_nand
| sym::atomic_or
| sym::atomic_xor => (2, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(1)], param(0)),
sym::atomic_fence | sym::atomic_singlethreadfence => (0, 1, Vec::new(), tcx.types.unit),
other => {

View file

@ -150,69 +150,63 @@ pub unsafe fn atomic_xchg<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src:
/// Adds to the current value, returning the previous value.
/// `T` must be an integer or pointer type.
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
/// value stored at `*dst` will have the provenance of the old value stored there.
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_add` method. For example, [`AtomicIsize::fetch_add`].
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn atomic_xadd<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
pub unsafe fn atomic_xadd<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
/// Subtract from the current value, returning the previous value.
/// `T` must be an integer or pointer type.
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
/// value stored at `*dst` will have the provenance of the old value stored there.
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_sub` method. For example, [`AtomicIsize::fetch_sub`].
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn atomic_xsub<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
pub unsafe fn atomic_xsub<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
/// Bitwise and with the current value, returning the previous value.
/// `T` must be an integer or pointer type.
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
/// value stored at `*dst` will have the provenance of the old value stored there.
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_and` method. For example, [`AtomicBool::fetch_and`].
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn atomic_and<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
pub unsafe fn atomic_and<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
/// Bitwise nand with the current value, returning the previous value.
/// `T` must be an integer or pointer type.
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
/// value stored at `*dst` will have the provenance of the old value stored there.
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
///
/// The stabilized version of this intrinsic is available on the
/// [`AtomicBool`] type via the `fetch_nand` method. For example, [`AtomicBool::fetch_nand`].
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn atomic_nand<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
pub unsafe fn atomic_nand<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
/// Bitwise or with the current value, returning the previous value.
/// `T` must be an integer or pointer type.
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
/// value stored at `*dst` will have the provenance of the old value stored there.
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_or` method. For example, [`AtomicBool::fetch_or`].
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn atomic_or<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
pub unsafe fn atomic_or<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
/// Bitwise xor with the current value, returning the previous value.
/// `T` must be an integer or pointer type.
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
/// value stored at `*dst` will have the provenance of the old value stored there.
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_xor` method. For example, [`AtomicBool::fetch_xor`].
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn atomic_xor<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
pub unsafe fn atomic_xor<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
/// Maximum with the current value using a signed comparison.
/// `T` must be a signed integer type.

View file

@ -2291,7 +2291,7 @@ impl<T> AtomicPtr<T> {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
unsafe { atomic_add(self.p.get(), val, order).cast() }
}
/// Offsets the pointer's address by subtracting `val` *bytes*, returning the
@ -2316,9 +2316,10 @@ impl<T> AtomicPtr<T> {
/// #![feature(strict_provenance_atomic_ptr)]
/// use core::sync::atomic::{AtomicPtr, Ordering};
///
/// let atom = AtomicPtr::<i64>::new(core::ptr::without_provenance_mut(1));
/// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
/// let mut arr = [0i64, 1];
/// let atom = AtomicPtr::<i64>::new(&raw mut arr[1]);
/// assert_eq!(atom.fetch_byte_sub(8, Ordering::Relaxed).addr(), (&raw const arr[1]).addr());
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), (&raw const arr[0]).addr());
/// ```
#[inline]
#[cfg(target_has_atomic = "ptr")]
@ -2326,7 +2327,7 @@ impl<T> AtomicPtr<T> {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
unsafe { atomic_sub(self.p.get(), val, order).cast() }
}
/// Performs a bitwise "or" operation on the address of the current pointer,
@ -2377,7 +2378,7 @@ impl<T> AtomicPtr<T> {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
unsafe { atomic_or(self.p.get(), val, order).cast() }
}
/// Performs a bitwise "and" operation on the address of the current
@ -2427,7 +2428,7 @@ impl<T> AtomicPtr<T> {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
unsafe { atomic_and(self.p.get(), val, order).cast() }
}
/// Performs a bitwise "xor" operation on the address of the current
@ -2475,7 +2476,7 @@ impl<T> AtomicPtr<T> {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
unsafe { atomic_xor(self.p.get(), val, order).cast() }
}
/// Returns a mutable pointer to the underlying pointer.
@ -3975,15 +3976,15 @@ unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
#[inline]
#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
unsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_add`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xadd::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xadd::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xadd::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xadd::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xadd::<T, { AO::SeqCst }>(dst, val),
Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
@ -3992,15 +3993,15 @@ unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
#[inline]
#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
unsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_sub`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xsub::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xsub::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xsub::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xsub::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xsub::<T, { AO::SeqCst }>(dst, val),
Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
@ -4141,15 +4142,15 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
#[inline]
#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
unsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_and`
unsafe {
match order {
Relaxed => intrinsics::atomic_and::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_and::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_and::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_and::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_and::<T, { AO::SeqCst }>(dst, val),
Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
@ -4157,15 +4158,15 @@ unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
#[inline]
#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
unsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_nand`
unsafe {
match order {
Relaxed => intrinsics::atomic_nand::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_nand::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_nand::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_nand::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_nand::<T, { AO::SeqCst }>(dst, val),
Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
@ -4173,15 +4174,15 @@ unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
#[inline]
#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
unsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_or`
unsafe {
match order {
SeqCst => intrinsics::atomic_or::<T, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_or::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_or::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_or::<T, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_or::<T, { AO::Relaxed }>(dst, val),
SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
}
}
}
@ -4189,15 +4190,15 @@ unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
#[inline]
#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
unsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_xor`
unsafe {
match order {
SeqCst => intrinsics::atomic_xor::<T, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_xor::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xor::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xor::<T, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_xor::<T, { AO::Relaxed }>(dst, val),
SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
}
}
}

View file

@ -105,27 +105,27 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
"or" => {
let ord = get_ord_at(1);
let ord = get_ord_at(2);
this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), rw_ord(ord))?;
}
"xor" => {
let ord = get_ord_at(1);
let ord = get_ord_at(2);
this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), rw_ord(ord))?;
}
"and" => {
let ord = get_ord_at(1);
let ord = get_ord_at(2);
this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), rw_ord(ord))?;
}
"nand" => {
let ord = get_ord_at(1);
let ord = get_ord_at(2);
this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), rw_ord(ord))?;
}
"xadd" => {
let ord = get_ord_at(1);
let ord = get_ord_at(2);
this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), rw_ord(ord))?;
}
"xsub" => {
let ord = get_ord_at(1);
let ord = get_ord_at(2);
this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), rw_ord(ord))?;
}
"min" => {
@ -231,15 +231,14 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
let place = this.deref_pointer(place)?;
let rhs = this.read_immediate(rhs)?;
if !place.layout.ty.is_integral() && !place.layout.ty.is_raw_ptr() {
if !(place.layout.ty.is_integral() || place.layout.ty.is_raw_ptr())
|| !(rhs.layout.ty.is_integral() || rhs.layout.ty.is_raw_ptr())
{
span_bug!(
this.cur_span(),
"atomic arithmetic operations only work on integer and raw pointer types",
);
}
if rhs.layout.ty != place.layout.ty {
span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
}
let old = match atomic_op {
AtomicOp::Min =>

View file

@ -50,17 +50,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
// Some more operations are possible with atomics.
// The return value always has the provenance of the *left* operand.
// The RHS must be `usize`.
Add | Sub | BitOr | BitAnd | BitXor => {
assert!(left.layout.ty.is_raw_ptr());
assert!(right.layout.ty.is_raw_ptr());
assert_eq!(right.layout.ty, this.tcx.types.usize);
let ptr = left.to_scalar().to_pointer(this)?;
// We do the actual operation with usize-typed scalars.
let left = ImmTy::from_uint(ptr.addr().bytes(), this.machine.layouts.usize);
let right = ImmTy::from_uint(
right.to_scalar().to_target_usize(this)?,
this.machine.layouts.usize,
);
let result = this.binary_op(bin_op, &left, &right)?;
// Construct a new pointer with the provenance of `ptr` (the LHS).
let result_ptr = Pointer::new(

View file

@ -1,5 +1,5 @@
// LLVM does not support some atomic RMW operations on pointers, so inside codegen we lower those
// to integer atomics, surrounded by casts to and from integer type.
// to integer atomics, followed by an inttoptr cast.
// This test ensures that we do the round-trip correctly for AtomicPtr::fetch_byte_add, and also
// ensures that we do not have such a round-trip for AtomicPtr::swap, because LLVM supports pointer
// arguments to `atomicrmw xchg`.
@ -20,8 +20,8 @@ pub fn helper(_: usize) {}
// CHECK-LABEL: @atomicptr_fetch_byte_add
#[no_mangle]
pub fn atomicptr_fetch_byte_add(a: &AtomicPtr<u8>, v: usize) -> *mut u8 {
// CHECK: %[[INTPTR:.*]] = ptrtoint ptr %{{.*}} to [[USIZE]]
// CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %[[INTPTR]]
// CHECK: llvm.lifetime.start
// CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %v
// CHECK-NEXT: inttoptr [[USIZE]] %[[RET]] to ptr
a.fetch_byte_add(v, Relaxed)
}

View file

@ -14,7 +14,7 @@ pub enum AtomicOrdering {
}
#[rustc_intrinsic]
unsafe fn atomic_xadd<T, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
unsafe fn atomic_xadd<T, U, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
#[lang = "pointee_sized"]
pub trait PointeeSized {}
@ -35,51 +35,62 @@ impl<T: ?Sized> Copy for *mut T {}
impl ConstParamTy for AtomicOrdering {}
#[cfg(target_has_atomic = "8")]
#[unsafe(no_mangle)] // let's make sure we actually generate a symbol to check
pub unsafe fn atomic_u8(x: *mut u8) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u8);
}
#[cfg(target_has_atomic = "8")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_i8(x: *mut i8) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i8);
}
#[cfg(target_has_atomic = "16")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_u16(x: *mut u16) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u16);
}
#[cfg(target_has_atomic = "16")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_i16(x: *mut i16) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i16);
}
#[cfg(target_has_atomic = "32")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_u32(x: *mut u32) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u32);
}
#[cfg(target_has_atomic = "32")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_i32(x: *mut i32) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i32);
}
#[cfg(target_has_atomic = "64")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_u64(x: *mut u64) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u64);
}
#[cfg(target_has_atomic = "64")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_i64(x: *mut i64) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i64);
}
#[cfg(target_has_atomic = "128")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_u128(x: *mut u128) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u128);
}
#[cfg(target_has_atomic = "128")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_i128(x: *mut i128) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i128);
}
#[cfg(target_has_atomic = "ptr")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_usize(x: *mut usize) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1usize);
}
#[cfg(target_has_atomic = "ptr")]
#[unsafe(no_mangle)]
pub unsafe fn atomic_isize(x: *mut isize) {
atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1isize);
}

View file

@ -33,14 +33,14 @@ pub fn main() {
assert_eq!(rusti::atomic_xchg::<_, { Release }>(&mut *x, 0), 1);
assert_eq!(*x, 0);
assert_eq!(rusti::atomic_xadd::<_, { SeqCst }>(&mut *x, 1), 0);
assert_eq!(rusti::atomic_xadd::<_, { Acquire }>(&mut *x, 1), 1);
assert_eq!(rusti::atomic_xadd::<_, { Release }>(&mut *x, 1), 2);
assert_eq!(rusti::atomic_xadd::<_, _, { SeqCst }>(&mut *x, 1), 0);
assert_eq!(rusti::atomic_xadd::<_, _, { Acquire }>(&mut *x, 1), 1);
assert_eq!(rusti::atomic_xadd::<_, _, { Release }>(&mut *x, 1), 2);
assert_eq!(*x, 3);
assert_eq!(rusti::atomic_xsub::<_, { SeqCst }>(&mut *x, 1), 3);
assert_eq!(rusti::atomic_xsub::<_, { Acquire }>(&mut *x, 1), 2);
assert_eq!(rusti::atomic_xsub::<_, { Release }>(&mut *x, 1), 1);
assert_eq!(rusti::atomic_xsub::<_, _, { SeqCst }>(&mut *x, 1), 3);
assert_eq!(rusti::atomic_xsub::<_, _, { Acquire }>(&mut *x, 1), 2);
assert_eq!(rusti::atomic_xsub::<_, _, { Release }>(&mut *x, 1), 1);
assert_eq!(*x, 0);
loop {

View file

@ -13,80 +13,80 @@ pub type Quux = [u8; 100];
pub unsafe fn test_bool_load(p: &mut bool, v: bool) {
intrinsics::atomic_load::<_, { SeqCst }>(p);
//~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `bool`
//~^ ERROR `atomic_load` intrinsic: expected basic integer or pointer type, found `bool`
}
pub unsafe fn test_bool_store(p: &mut bool, v: bool) {
intrinsics::atomic_store::<_, { SeqCst }>(p, v);
//~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `bool`
//~^ ERROR `atomic_store` intrinsic: expected basic integer or pointer type, found `bool`
}
pub unsafe fn test_bool_xchg(p: &mut bool, v: bool) {
intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
//~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `bool`
//~^ ERROR `atomic_xchg` intrinsic: expected basic integer or pointer type, found `bool`
}
pub unsafe fn test_bool_cxchg(p: &mut bool, v: bool) {
intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
//~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `bool`
//~^ ERROR `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `bool`
}
pub unsafe fn test_Foo_load(p: &mut Foo, v: Foo) {
intrinsics::atomic_load::<_, { SeqCst }>(p);
//~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `Foo`
//~^ ERROR `atomic_load` intrinsic: expected basic integer or pointer type, found `Foo`
}
pub unsafe fn test_Foo_store(p: &mut Foo, v: Foo) {
intrinsics::atomic_store::<_, { SeqCst }>(p, v);
//~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `Foo`
//~^ ERROR `atomic_store` intrinsic: expected basic integer or pointer type, found `Foo`
}
pub unsafe fn test_Foo_xchg(p: &mut Foo, v: Foo) {
intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
//~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `Foo`
//~^ ERROR `atomic_xchg` intrinsic: expected basic integer or pointer type, found `Foo`
}
pub unsafe fn test_Foo_cxchg(p: &mut Foo, v: Foo) {
intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
//~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `Foo`
//~^ ERROR `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `Foo`
}
pub unsafe fn test_Bar_load(p: &mut Bar, v: Bar) {
intrinsics::atomic_load::<_, { SeqCst }>(p);
//~^ ERROR expected basic integer type, found `&dyn Fn()`
//~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
}
pub unsafe fn test_Bar_store(p: &mut Bar, v: Bar) {
intrinsics::atomic_store::<_, { SeqCst }>(p, v);
//~^ ERROR expected basic integer type, found `&dyn Fn()`
//~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
}
pub unsafe fn test_Bar_xchg(p: &mut Bar, v: Bar) {
intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
//~^ ERROR expected basic integer type, found `&dyn Fn()`
//~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
}
pub unsafe fn test_Bar_cxchg(p: &mut Bar, v: Bar) {
intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
//~^ ERROR expected basic integer type, found `&dyn Fn()`
//~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
}
pub unsafe fn test_Quux_load(p: &mut Quux, v: Quux) {
intrinsics::atomic_load::<_, { SeqCst }>(p);
//~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `[u8; 100]`
//~^ ERROR `atomic_load` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
}
pub unsafe fn test_Quux_store(p: &mut Quux, v: Quux) {
intrinsics::atomic_store::<_, { SeqCst }>(p, v);
//~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `[u8; 100]`
//~^ ERROR `atomic_store` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
}
pub unsafe fn test_Quux_xchg(p: &mut Quux, v: Quux) {
intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
//~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `[u8; 100]`
//~^ ERROR `atomic_xchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
}
pub unsafe fn test_Quux_cxchg(p: &mut Quux, v: Quux) {
intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
//~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `[u8; 100]`
//~^ ERROR `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
}

View file

@ -1,94 +1,94 @@
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `bool`
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `bool`
--> $DIR/non-integer-atomic.rs:15:5
|
LL | intrinsics::atomic_load::<_, { SeqCst }>(p);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `bool`
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `bool`
--> $DIR/non-integer-atomic.rs:20:5
|
LL | intrinsics::atomic_store::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `bool`
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `bool`
--> $DIR/non-integer-atomic.rs:25:5
|
LL | intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `bool`
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `bool`
--> $DIR/non-integer-atomic.rs:30:5
|
LL | intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `Foo`
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `Foo`
--> $DIR/non-integer-atomic.rs:35:5
|
LL | intrinsics::atomic_load::<_, { SeqCst }>(p);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `Foo`
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `Foo`
--> $DIR/non-integer-atomic.rs:40:5
|
LL | intrinsics::atomic_store::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `Foo`
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `Foo`
--> $DIR/non-integer-atomic.rs:45:5
|
LL | intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `Foo`
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `Foo`
--> $DIR/non-integer-atomic.rs:50:5
|
LL | intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `&dyn Fn()`
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
--> $DIR/non-integer-atomic.rs:55:5
|
LL | intrinsics::atomic_load::<_, { SeqCst }>(p);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `&dyn Fn()`
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
--> $DIR/non-integer-atomic.rs:60:5
|
LL | intrinsics::atomic_store::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `&dyn Fn()`
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
--> $DIR/non-integer-atomic.rs:65:5
|
LL | intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `&dyn Fn()`
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
--> $DIR/non-integer-atomic.rs:70:5
|
LL | intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `[u8; 100]`
error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
--> $DIR/non-integer-atomic.rs:75:5
|
LL | intrinsics::atomic_load::<_, { SeqCst }>(p);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `[u8; 100]`
error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
--> $DIR/non-integer-atomic.rs:80:5
|
LL | intrinsics::atomic_store::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `[u8; 100]`
error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
--> $DIR/non-integer-atomic.rs:85:5
|
LL | intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `[u8; 100]`
error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
--> $DIR/non-integer-atomic.rs:90:5
|
LL | intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);