Add -Zannotate-moves for profiler visibility of move/copy operations

This implements a new unstable compiler flag `-Zannotate-moves` that makes
move and copy operations visible in profilers by creating synthetic debug
information. This is achieved with zero runtime cost by manipulating debug
info scopes to make moves/copies appear as calls to `compiler_move<T, SIZE>`
and `compiler_copy<T, SIZE>` marker functions in profiling tools.

This allows developers to identify expensive move/copy operations in their
code using standard profiling tools, without requiring specialized tooling
or runtime instrumentation.

The implementation works at codegen time. When processing MIR operands
(`Operand::Move` and `Operand::Copy`), the codegen creates an `OperandRef`
with an optional `move_annotation` field containing an `Instance` of the
appropriate profiling marker function. When storing the operand,
`store_with_annotation()` wraps the store operation in a synthetic debug
scope that makes it appear inlined from the marker.

Two marker functions (`compiler_move` and `compiler_copy`) are defined
in `library/core/src/profiling.rs`. These are never actually called -
they exist solely as debug info anchors.

Operations are only annotated if the type:
   - Meets the size threshold (default: 65 bytes, configurable via
     `-Zannotate-moves=SIZE`)
   - Has a non-scalar backend representation (scalars use registers,
     not memcpy)

This has a very small size impact on object file size. With the default
limit it's well under 0.1%, and even with a very small limit of 8 bytes
it's still ~1.5%. This could be enabled by default.
This commit is contained in:
Jeremy Fitzhardinge 2025-10-12 22:41:03 -07:00 committed by Jeremy Fitzhardinge
parent c90bcb9571
commit 5f29f11a4d
28 changed files with 897 additions and 48 deletions

View file

@ -1069,7 +1069,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandValue::Ref(place.val)
};
OperandRef { val, layout: place.layout }
OperandRef { val, layout: place.layout, move_annotation: None }
}
fn write_operand_repeatedly(

View file

@ -19,7 +19,7 @@ use crate::context::CodegenCx;
pub(super) const UNKNOWN_LINE_NUMBER: u32 = 0;
pub(super) const UNKNOWN_COLUMN_NUMBER: u32 = 0;
impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn dbg_var_addr(

View file

@ -253,7 +253,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
);
bx.lifetime_end(llscratch, scratch_size);
}
_ => {
PassMode::Pair(..) | PassMode::Direct { .. } => {
OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);
}
}

View file

@ -751,7 +751,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
OperandValue::Ref(place.val)
};
OperandRef { val, layout: place.layout }
OperandRef { val, layout: place.layout, move_annotation: None }
}
fn write_operand_repeatedly(

View file

@ -146,7 +146,7 @@ impl<'ll> Builder<'_, 'll, '_> {
}
}
impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
impl<'ll, 'tcx> DebugInfoBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn dbg_var_addr(
@ -284,6 +284,57 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
llvm::set_value_name(value, name.as_bytes());
}
}
/// Annotate move/copy operations with debug info for profiling.
///
/// This creates a temporary debug scope that makes the move/copy appear as an inlined call to
/// `compiler_move<T, SIZE>()` or `compiler_copy<T, SIZE>()`. The provided closure is executed
/// with this temporary debug location active.
///
/// The `instance` parameter should be the monomorphized instance of the `compiler_move` or
/// `compiler_copy` function with the actual type and size.
fn with_move_annotation<R>(
&mut self,
instance: ty::Instance<'tcx>,
f: impl FnOnce(&mut Self) -> R,
) -> R {
// Save the current debug location
let saved_loc = self.get_dbg_loc();
// Create a DIScope for the compiler_move/compiler_copy function
// We use the function's FnAbi for debug info generation
let fn_abi = self
.cx()
.tcx
.fn_abi_of_instance(
self.cx().typing_env().as_query_input((instance, ty::List::empty())),
)
.unwrap();
let di_scope = self.cx().dbg_scope_fn(instance, fn_abi, None);
// Create an inlined debug location:
// - scope: the compiler_move/compiler_copy function
// - inlined_at: the current location (where the move/copy actually occurs)
// - span: use the function's definition span
let fn_span = self.cx().tcx.def_span(instance.def_id());
let inlined_loc = self.cx().dbg_loc(di_scope, saved_loc, fn_span);
// Set the temporary debug location
self.set_dbg_loc(inlined_loc);
// Execute the closure (which will generate the memcpy)
let result = f(self);
// Restore the original debug location
if let Some(loc) = saved_loc {
self.set_dbg_loc(loc);
} else {
self.clear_dbg_loc();
}
result
}
}
/// A source code location used to generate debug information.

View file

@ -557,9 +557,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(op) => op,
LocalRef::PendingOperand => bug!("use of return before def"),
LocalRef::Place(cg_place) => {
OperandRef { val: Ref(cg_place.val), layout: cg_place.layout }
}
LocalRef::Place(cg_place) => OperandRef {
val: Ref(cg_place.val),
layout: cg_place.layout,
move_annotation: None,
},
LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
};
let llslot = match op.val {
@ -1155,7 +1157,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
| (&mir::Operand::Constant(_), Ref(PlaceValue { llextra: None, .. })) => {
let tmp = PlaceRef::alloca(bx, op.layout);
bx.lifetime_start(tmp.val.llval, tmp.layout.size);
op.val.store(bx, tmp);
op.store_with_annotation(bx, tmp);
op.val = Ref(tmp.val);
lifetime_ends_after_call.push((tmp.val.llval, tmp.layout.size));
}
@ -1563,13 +1565,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align);
bx.lifetime_start(scratch.llval, arg.layout.size);
op.val.store(bx, scratch.with_type(arg.layout));
op.store_with_annotation(bx, scratch.with_type(arg.layout));
lifetime_ends_after_call.push((scratch.llval, arg.layout.size));
(scratch.llval, scratch.align, true)
}
PassMode::Cast { .. } => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
op.store_with_annotation(bx, scratch);
(scratch.val.llval, scratch.val.align, true)
}
_ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),

View file

@ -480,6 +480,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
return local(OperandRef {
val: OperandValue::Pair(a, b),
layout: arg.layout,
move_annotation: None,
});
}
_ => {}
@ -552,6 +553,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
fx.caller_location = Some(OperandRef {
val: OperandValue::Immediate(bx.get_param(llarg_idx)),
layout: arg.layout,
move_annotation: None,
});
}

View file

@ -5,12 +5,13 @@ use rustc_abi as abi;
use rustc_abi::{
Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants,
};
use rustc_hir::LangItem;
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
use rustc_middle::mir::{self, ConstValue};
use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty};
use rustc_middle::{bug, span_bug};
use rustc_session::config::OptLevel;
use rustc_session::config::{AnnotateMoves, DebugInfo, OptLevel};
use tracing::{debug, instrument};
use super::place::{PlaceRef, PlaceValue};
@ -131,6 +132,10 @@ pub struct OperandRef<'tcx, V> {
/// The layout of value, based on its Rust type.
pub layout: TyAndLayout<'tcx>,
/// Annotation for profiler visibility of move/copy operations.
/// When set, the store operation should appear as an inlined call to this function.
pub move_annotation: Option<ty::Instance<'tcx>>,
}
impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
@ -142,7 +147,7 @@ impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> {
assert!(layout.is_zst());
OperandRef { val: OperandValue::ZeroSized, layout }
OperandRef { val: OperandValue::ZeroSized, layout, move_annotation: None }
}
pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
@ -180,7 +185,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
}
};
OperandRef { val, layout }
OperandRef { val, layout, move_annotation: None }
}
fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
@ -214,7 +219,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
OperandRef { val: OperandValue::Immediate(val), layout }
OperandRef { val: OperandValue::Immediate(val), layout, move_annotation: None }
}
BackendRepr::ScalarPair(
a @ abi::Scalar::Initialized { .. },
@ -235,7 +240,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
b,
bx.scalar_pair_element_backend_type(layout, 1, true),
);
OperandRef { val: OperandValue::Pair(a_val, b_val), layout }
OperandRef { val: OperandValue::Pair(a_val, b_val), layout, move_annotation: None }
}
_ if layout.is_zst() => OperandRef::zero_sized(layout),
_ => {
@ -285,6 +290,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
self.val.deref(layout.align.abi).with_type(layout)
}
/// Store this operand into a place, applying move/copy annotation if present.
///
/// This is the preferred method for storing operands, as it automatically
/// applies profiler annotations for tracked move/copy operations.
pub fn store_with_annotation<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &mut Bx,
dest: PlaceRef<'tcx, V>,
) {
if let Some(instance) = self.move_annotation {
bx.with_move_annotation(instance, |bx| self.val.store(bx, dest))
} else {
self.val.store(bx, dest)
}
}
/// If this operand is a `Pair`, we return an aggregate with the two values.
/// For other cases, see `immediate`.
pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
@ -320,7 +341,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
} else {
OperandValue::Immediate(llval)
};
OperandRef { val, layout }
OperandRef { val, layout, move_annotation: None }
}
pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
@ -388,7 +409,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
})
};
OperandRef { val, layout: field }
OperandRef { val, layout: field, move_annotation: None }
}
/// Obtain the actual discriminant of a value.
@ -828,10 +849,15 @@ impl<'a, 'tcx, V: CodegenObject> OperandRefBuilder<'tcx, V> {
}
},
};
OperandRef { val, layout }
OperandRef { val, layout, move_annotation: None }
}
}
/// Default size limit for move/copy annotations (in bytes). 64 bytes is a common size of a cache
/// line, and the assumption is that anything this size or below is very cheap to move/copy, so only
/// annotate copies larger than this.
const MOVE_ANNOTATION_DEFAULT_LIMIT: u64 = 65;
impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
/// Returns an `OperandValue` that's generally UB to use in any way.
///
@ -961,7 +987,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
abi::Variants::Single { index: vidx },
);
let layout = o.layout.for_variant(bx.cx(), vidx);
o = OperandRef { val: o.val, layout }
o = OperandRef { layout, ..o }
}
_ => return None,
}
@ -1014,7 +1040,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match *operand {
mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
self.codegen_consume(bx, place.as_ref())
let kind = match operand {
mir::Operand::Move(_) => LangItem::CompilerMove,
mir::Operand::Copy(_) => LangItem::CompilerCopy,
_ => unreachable!(),
};
// Check if we should annotate this move/copy for profiling
let move_annotation = self.move_copy_annotation_instance(bx, place.as_ref(), kind);
OperandRef { move_annotation, ..self.codegen_consume(bx, place.as_ref()) }
}
mir::Operand::Constant(ref constant) => {
@ -1030,6 +1065,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
return OperandRef {
val: OperandValue::Immediate(llval),
layout: bx.layout_of(ty),
move_annotation: None,
};
}
}
@ -1037,4 +1073,68 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
}
/// Creates an `Instance` for annotating a move/copy operation at codegen time.
///
/// Returns `Some(instance)` if the operation should be annotated with debug info, `None`
/// otherwise. The instance represents a monomorphized `compiler_move<T, SIZE>` or
/// `compiler_copy<T, SIZE>` function that can be used to create debug scopes.
///
/// There are a number of conditions that must be met for an annotation to be created, but aside
/// from the basics (annotation is enabled, we're generating debuginfo), the primary concern is
/// moves/copies which could result in a real `memcpy`. So we check for the size limit, but also
/// that the underlying representation of the type is in memory.
fn move_copy_annotation_instance(
&self,
bx: &Bx,
place: mir::PlaceRef<'tcx>,
kind: LangItem,
) -> Option<ty::Instance<'tcx>> {
let tcx = bx.tcx();
let sess = tcx.sess;
// Skip if we're not generating debuginfo
if sess.opts.debuginfo == DebugInfo::None {
return None;
}
// Check if annotation is enabled and get size limit (otherwise skip)
let size_limit = match sess.opts.unstable_opts.annotate_moves {
AnnotateMoves::Disabled => return None,
AnnotateMoves::Enabled(None) => MOVE_ANNOTATION_DEFAULT_LIMIT,
AnnotateMoves::Enabled(Some(limit)) => limit,
};
let ty = self.monomorphized_place_ty(place);
let layout = bx.cx().layout_of(ty);
let ty_size = layout.size.bytes();
// Only annotate if type has a memory representation and exceeds size limit (and has a
// non-zero size)
if layout.is_zst()
|| ty_size < size_limit
|| !matches!(layout.backend_repr, BackendRepr::Memory { .. })
{
return None;
}
// Look up the DefId for compiler_move or compiler_copy lang item
let def_id = tcx.lang_items().get(kind)?;
// Create generic args: compiler_move<T, SIZE> or compiler_copy<T, SIZE>
let size_const = ty::Const::from_target_usize(tcx, ty_size);
let generic_args = tcx.mk_args(&[ty.into(), size_const.into()]);
// Create the Instance
let typing_env = self.mir.typing_env(tcx);
let instance = ty::Instance::expect_resolve(
tcx,
typing_env,
def_id,
generic_args,
rustc_span::DUMMY_SP, // span only used for error messages
);
Some(instance)
}
}

View file

@ -36,7 +36,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing
// constants into `OperandValue::Ref`; why dont we do that yet if we dont?)
cg_operand.val.store(bx, dest);
cg_operand.store_with_annotation(bx, dest);
}
mir::Rvalue::Cast(
@ -50,7 +50,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Into-coerce of a thin pointer to a wide pointer -- just
// use the operand path.
let temp = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(bx, dest);
temp.store_with_annotation(bx, dest);
return;
}
@ -70,7 +70,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
debug!("codegen_rvalue: creating ugly alloca");
let scratch = PlaceRef::alloca(bx, operand.layout);
scratch.storage_live(bx);
operand.val.store(bx, scratch);
operand.store_with_annotation(bx, scratch);
base::coerce_unsized_into(bx, scratch, dest);
scratch.storage_dead(bx);
}
@ -183,7 +183,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
variant_dest.project_field(bx, field_index.as_usize())
};
op.val.store(bx, field);
op.store_with_annotation(bx, field);
}
}
dest.codegen_set_discr(bx, variant_index);
@ -191,7 +191,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => {
let temp = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(bx, dest);
temp.store_with_annotation(bx, dest);
}
}
}
@ -221,7 +221,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Since in this path we have a place anyway, we can store or copy to it,
// making sure we use the destination place's alignment even if the
// source would normally have a higher one.
src.val.store(bx, dst.val.with_type(src.layout));
src.store_with_annotation(bx, dst.val.with_type(src.layout));
}
}
@ -320,7 +320,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let size = Ord::max(operand.layout.size, cast.size);
let temp = PlaceValue::alloca(bx, size, align);
bx.lifetime_start(temp.llval, size);
operand.val.store(bx, temp.with_type(operand.layout));
operand.store_with_annotation(bx, temp.with_type(operand.layout));
let val = bx.load_operand(temp.with_type(cast)).val;
bx.lifetime_end(temp.llval, size);
val
@ -478,7 +478,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let to_backend_ty = bx.cx().immediate_backend_type(cast);
if operand.layout.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
return OperandRef { val, layout: cast };
return OperandRef { val, layout: cast, move_annotation: None };
}
let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
bug!("Found non-scalar for cast {cast:?}");
@ -494,7 +494,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_transmute_operand(bx, operand, cast)
}
};
OperandRef { val, layout: cast }
OperandRef { val, layout: cast, move_annotation: None }
}
mir::Rvalue::Ref(_, bk, place) => {
@ -525,7 +525,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
);
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
OperandRef {
val: result,
layout: bx.cx().layout_of(operand_ty),
move_annotation: None,
}
}
mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs = self.codegen_operand(bx, lhs);
@ -559,6 +563,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef {
val: OperandValue::Immediate(llresult),
layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
move_annotation: None,
}
}
@ -593,7 +598,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
val.is_expected_variant_for_type(self.cx, layout),
"Made wrong variant {val:?} for type {layout:?}",
);
OperandRef { val, layout }
OperandRef { val, layout, move_annotation: None }
}
mir::Rvalue::Discriminant(ref place) => {
@ -604,6 +609,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef {
val: OperandValue::Immediate(discr),
layout: self.cx.layout_of(discr_ty),
move_annotation: None,
}
}
@ -631,6 +637,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef {
val: OperandValue::Immediate(val),
layout: self.cx.layout_of(null_op.ty(tcx)),
move_annotation: None,
}
}
@ -663,7 +670,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
bx.get_static(def_id)
};
OperandRef { val: OperandValue::Immediate(static_), layout }
OperandRef { val: OperandValue::Immediate(static_), layout, move_annotation: None }
}
mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
mir::Rvalue::Repeat(ref elem, len_const) => {
@ -675,7 +682,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let array_ty = self.monomorphize(array_ty);
let array_layout = bx.layout_of(array_ty);
assert!(array_layout.is_zst());
OperandRef { val: OperandValue::ZeroSized, layout: array_layout }
OperandRef {
val: OperandValue::ZeroSized,
layout: array_layout,
move_annotation: None,
}
}
mir::Rvalue::Aggregate(ref kind, ref fields) => {
let (variant_index, active_field_index) = match **kind {
@ -704,7 +715,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// more optimizability, if that turns out to be helpful.
bx.abort();
let val = OperandValue::poison(bx, layout);
OperandRef { val, layout }
OperandRef { val, layout, move_annotation: None }
}
Ok(maybe_tag_value) => {
if let Some((tag_field, tag_imm)) = maybe_tag_value {
@ -718,7 +729,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let operand = self.codegen_operand(bx, operand);
let binder_ty = self.monomorphize(binder_ty);
let layout = bx.cx().layout_of(binder_ty);
OperandRef { val: operand.val, layout }
OperandRef { val: operand.val, layout, move_annotation: None }
}
mir::Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in codegen"),
mir::Rvalue::ShallowInitBox(..) => bug!("`ShallowInitBox` in codegen"),
@ -745,7 +756,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
"Address of place was unexpectedly {val:?} for pointee type {ty:?}",
);
OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
OperandRef {
val,
layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)),
move_annotation: None,
}
}
fn codegen_scalar_binop(

View file

@ -37,7 +37,7 @@ pub trait BuilderMethods<'a, 'tcx>:
+ FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+ Deref<Target = Self::CodegenCx>
+ CoverageInfoBuilderMethods<'tcx>
+ DebugInfoBuilderMethods
+ DebugInfoBuilderMethods<'tcx>
+ ArgAbiBuilderMethods<'tcx>
+ AbiBuilderMethods
+ IntrinsicCallBuilderMethods<'tcx>

View file

@ -64,7 +64,7 @@ pub trait DebugInfoCodegenMethods<'tcx>: BackendTypes {
) -> Self::DIVariable;
}
pub trait DebugInfoBuilderMethods: BackendTypes {
pub trait DebugInfoBuilderMethods<'tcx>: BackendTypes {
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn dbg_var_addr(
@ -95,4 +95,18 @@ pub trait DebugInfoBuilderMethods: BackendTypes {
fn clear_dbg_loc(&mut self);
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
fn set_var_name(&mut self, value: Self::Value, name: &str);
/// Hook to allow move/copy operations to be annotated for profiling.
///
/// The `instance` parameter should be the monomorphized instance of the
/// `compiler_move` or `compiler_copy` function with the actual type and size.
///
/// Default implementation does no annotation (just executes the closure).
fn with_move_annotation<R>(
&mut self,
_instance: Instance<'tcx>,
f: impl FnOnce(&mut Self) -> R,
) -> R {
f(self)
}
}

View file

@ -345,6 +345,10 @@ language_item_table! {
EhPersonality, sym::eh_personality, eh_personality, Target::Fn, GenericRequirement::None;
EhCatchTypeinfo, sym::eh_catch_typeinfo, eh_catch_typeinfo, Target::Static, GenericRequirement::None;
// Profiling markers for move/copy operations (used by -Z annotate-moves)
CompilerMove, sym::compiler_move, compiler_move_fn, Target::Fn, GenericRequirement::Exact(2);
CompilerCopy, sym::compiler_copy, compiler_copy_fn, Target::Fn, GenericRequirement::Exact(2);
OwnedBox, sym::owned_box, owned_box, Target::Struct, GenericRequirement::Minimum(1);
GlobalAlloc, sym::global_alloc_ty, global_alloc_ty, Target::Struct, GenericRequirement::None;

View file

@ -10,7 +10,7 @@ use rustc_errors::emitter::HumanReadableErrorType;
use rustc_errors::{ColorConfig, registry};
use rustc_hir::attrs::NativeLibKind;
use rustc_session::config::{
AutoDiff, BranchProtection, CFGuard, Cfg, CollapseMacroDebuginfo, CoverageLevel,
AnnotateMoves, AutoDiff, BranchProtection, CFGuard, Cfg, CollapseMacroDebuginfo, CoverageLevel,
CoverageOptions, DebugInfo, DumpMonoStatsFormat, ErrorOutputType, ExternEntry, ExternLocation,
Externs, FmtDebug, FunctionReturn, InliningThreshold, Input, InstrumentCoverage,
InstrumentXRay, LinkSelfContained, LinkerPluginLto, LocationDetail, LtoCli, MirIncludeSpans,
@ -764,6 +764,7 @@ fn test_unstable_options_tracking_hash() {
// tidy-alphabetical-start
tracked!(allow_features, Some(vec![String::from("lang_items")]));
tracked!(always_encode_mir, true);
tracked!(annotate_moves, AnnotateMoves::Enabled(Some(1234)));
tracked!(assume_incomplete_release, true);
tracked!(autodiff, vec![AutoDiff::Enable, AutoDiff::NoTT]);
tracked!(binary_dep_depinfo, true);

View file

@ -262,6 +262,16 @@ pub enum AutoDiff {
NoTT,
}
/// The different settings that the `-Z annotate-moves` flag can have.
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum AnnotateMoves {
/// `-Z annotate-moves=no` (or `off`, `false` etc.)
Disabled,
/// `-Z annotate-moves` or `-Z annotate-moves=yes` (use default size limit)
/// `-Z annotate-moves=SIZE` (use specified size limit)
Enabled(Option<u64>),
}
/// Settings for `-Z instrument-xray` flag.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
pub struct InstrumentXRay {
@ -3283,13 +3293,13 @@ pub(crate) mod dep_tracking {
};
use super::{
AutoDiff, BranchProtection, CFGuard, CFProtection, CollapseMacroDebuginfo, CoverageOptions,
CrateType, DebugInfo, DebugInfoCompression, ErrorOutputType, FmtDebug, FunctionReturn,
InliningThreshold, InstrumentCoverage, InstrumentXRay, LinkerPluginLto, LocationDetail,
LtoCli, MirStripDebugInfo, NextSolverConfig, Offload, OomStrategy, OptLevel, OutFileName,
OutputType, OutputTypes, PatchableFunctionEntry, Polonius, RemapPathScopeComponents,
ResolveDocLinks, SourceFileHashAlgorithm, SplitDwarfKind, SwitchWithOptPath,
SymbolManglingVersion, WasiExecModel,
AnnotateMoves, AutoDiff, BranchProtection, CFGuard, CFProtection, CollapseMacroDebuginfo,
CoverageOptions, CrateType, DebugInfo, DebugInfoCompression, ErrorOutputType, FmtDebug,
FunctionReturn, InliningThreshold, InstrumentCoverage, InstrumentXRay, LinkerPluginLto,
LocationDetail, LtoCli, MirStripDebugInfo, NextSolverConfig, Offload, OomStrategy,
OptLevel, OutFileName, OutputType, OutputTypes, PatchableFunctionEntry, Polonius,
RemapPathScopeComponents, ResolveDocLinks, SourceFileHashAlgorithm, SplitDwarfKind,
SwitchWithOptPath, SymbolManglingVersion, WasiExecModel,
};
use crate::lint;
use crate::utils::NativeLib;
@ -3332,6 +3342,7 @@ pub(crate) mod dep_tracking {
impl_dep_tracking_hash_via_hash!(
(),
AnnotateMoves,
AutoDiff,
Offload,
bool,

View file

@ -864,6 +864,8 @@ mod desc {
pub(crate) const parse_linker_features: &str =
"a list of enabled (`+` prefix) and disabled (`-` prefix) features: `lld`";
pub(crate) const parse_polonius: &str = "either no value or `legacy` (the default), or `next`";
pub(crate) const parse_annotate_moves: &str =
"either a boolean (`yes`, `no`, `on`, `off`, etc.), or a size limit in bytes";
pub(crate) const parse_stack_protector: &str =
"one of (`none` (default), `basic`, `strong`, or `all`)";
pub(crate) const parse_branch_protection: &str = "a `,` separated combination of `bti`, `gcs`, `pac-ret`, (optionally with `pc`, `b-key`, `leaf` if `pac-ret` is set)";
@ -949,6 +951,29 @@ pub mod parse {
}
}
pub(crate) fn parse_annotate_moves(slot: &mut AnnotateMoves, v: Option<&str>) -> bool {
let mut bslot = false;
let mut nslot = 0u64;
*slot = match v {
// No value provided: -Z annotate-moves (enable with default limit)
None => AnnotateMoves::Enabled(None),
// Explicit boolean value provided: -Z annotate-moves=yes/no
s @ Some(_) if parse_bool(&mut bslot, s) => {
if bslot {
AnnotateMoves::Enabled(None)
} else {
AnnotateMoves::Disabled
}
}
// With numeric limit provided: -Z annotate-moves=1234
s @ Some(_) if parse_number(&mut nslot, s) => AnnotateMoves::Enabled(Some(nslot)),
_ => return false,
};
true
}
/// Use this for any string option that has a static default.
pub(crate) fn parse_string(slot: &mut String, v: Option<&str>) -> bool {
match v {
@ -2198,6 +2223,9 @@ options! {
"only allow the listed language features to be enabled in code (comma separated)"),
always_encode_mir: bool = (false, parse_bool, [TRACKED],
"encode MIR of all functions into the crate metadata (default: no)"),
annotate_moves: AnnotateMoves = (AnnotateMoves::Disabled, parse_annotate_moves, [TRACKED],
"emit debug info for compiler-generated move and copy operations \
to make them visible in profilers. Can be a boolean or a size limit in bytes (default: disabled)"),
assert_incr_state: Option<String> = (None, parse_opt_string, [UNTRACKED],
"assert that the incremental cache is in given state: \
either `loaded` or `not-loaded`."),

View file

@ -700,7 +700,9 @@ symbols! {
compile_error,
compiler,
compiler_builtins,
compiler_copy,
compiler_fence,
compiler_move,
concat,
concat_bytes,
concat_idents,

View file

@ -282,6 +282,8 @@ pub mod num;
pub mod hint;
pub mod intrinsics;
pub mod mem;
#[unstable(feature = "profiling_marker_api", issue = "148197")]
pub mod profiling;
pub mod ptr;
#[unstable(feature = "ub_checks", issue = "none")]
pub mod ub_checks;

View file

@ -0,0 +1,33 @@
//! Profiling markers for compiler instrumentation.
/// Profiling marker for move operations.
///
/// This function is never called at runtime. When `-Z annotate-moves` is enabled,
/// the compiler creates synthetic debug info that makes move operations appear as
/// calls to this function in profilers.
///
/// The `SIZE` parameter encodes the size of the type being copied. It's the same as
/// `size_of::<T>()`, and is only present for convenience.
#[unstable(feature = "profiling_marker_api", issue = "148197")]
#[lang = "compiler_move"]
pub fn compiler_move<T, const SIZE: usize>(_src: *const T, _dst: *mut T) {
unreachable!(
"compiler_move marks where the compiler-generated a memcpy for moves. It is never actually called."
)
}
/// Profiling marker for copy operations.
///
/// This function is never called at runtime. When `-Z annotate-moves` is enabled,
/// the compiler creates synthetic debug info that makes copy operations appear as
/// calls to this function in profilers.
///
/// The `SIZE` parameter encodes the size of the type being copied. It's the same as
/// `size_of::<T>()`, and is only present for convenience.
#[unstable(feature = "profiling_marker_api", issue = "148197")]
#[lang = "compiler_copy"]
pub fn compiler_copy<T, const SIZE: usize>(_src: *const T, _dst: *mut T) {
unreachable!(
"compiler_copy marks where the compiler-generated a memcpy for Copies. It is never actually called."
)
}

View file

@ -0,0 +1,92 @@
# `annotate-moves`
The tracking issue for this feature is: [#148197](https://github.com/rust-lang/rust/issues/148197).
------------------------
The `-Z annotate-moves` flag enables annotation of compiler-generated
move and copy operations, making them visible in profilers and stack traces
for performance debugging.
When enabled, the compiler manipulates debug info to make large move and copy
operations appear as if they were inlined calls to `core::profiling::compiler_move`
and `core::profiling::compiler_copy`. No actual function calls are generated -
this is purely a debug info transformation that makes expensive memory operations
visible in profilers and stack traces.
## Syntax
```bash
rustc -Z annotate-moves[=<value>]
```
Where `<value>` can be:
- A boolean: `true`, `false`, `yes`, `no`, `on`, `off`
- A number: size threshold in bytes (e.g., `128`)
- Omitted: enables with default threshold (65 bytes)
## Options
- `-Z annotate-moves` or `-Z annotate-moves=true`: Enable with default size limit
- `-Z annotate-moves=false`: Disable annotation
- `-Z annotate-moves=N`: Enable with custom size limit of N bytes
## Examples
```bash
# Enable annotation with default threshold (65 bytes)
rustc -Z annotate-moves main.rs
# Enable with custom 128-byte threshold
rustc -Z annotate-moves=128 main.rs
# Only annotate very large moves (1KB+)
rustc -Z annotate-moves=1024 main.rs
# Explicitly disable
rustc -Z annotate-moves=false main.rs
```
## Behavior
The annotation only applies to:
- Types equal or larger than the specified size threshold
- Non-immediate types (those that would generate `memcpy`)
- Operations that actually move/copy data (not ZST types)
Stack traces will show the operations:
```text
0: memcpy
1: core::profiling::compiler_move::<MyLargeStruct, 148>
2: my_function
```
The `compiler_move` and `compiler_copy` functions have two generic parameters:
the type being moved/copied and its size in bytes. The size is identical to
`size_of::<T>()`, and is present just so that it's easy to immediately tell how
large the copy is.
Note that this requires v0 mangling to be properly encoded; legacy mangling does
not substitute these with a specific type and size.
## Example
```rust
#[derive(Clone)]
struct LargeData {
buffer: [u8; 1000],
}
fn example() {
let data = LargeData { buffer: [0; 1000] };
let copy = data.clone(); // Shows as compiler_copy in profiler
let moved = data; // Shows as compiler_move in profiler
}
```
## Overhead
This has no effect on generated code; it only adds debuginfo. The overhead is
typically very small; on rustc itself, the default limit of 65 bytes adds about
0.055% to the binary size.

View file

@ -0,0 +1,114 @@
//@ compile-flags: -Z annotate-moves=8 -Copt-level=0 -g
//
// This test verifies that function call and return instructions use the correct debug scopes
// when passing/returning large values. The actual move/copy operations may be annotated,
// but the CALL and RETURN instructions themselves should reference the source location,
// NOT have an inlinedAt scope pointing to compiler_move/compiler_copy.
#![crate_type = "lib"]
#[derive(Clone, Copy)]
pub struct LargeStruct {
pub data: [u64; 20], // 160 bytes
}
#[derive(Clone, Copy)]
pub struct MediumStruct {
pub data: [u64; 5], // 40 bytes
}
pub struct SmallStruct {
pub x: u32, // 4 bytes
}
// ============================================================================
// Test 1: Single argument call
// ============================================================================
// CHECK-LABEL: call_arg_scope::test_call_with_single_arg
pub fn test_call_with_single_arg(s: LargeStruct) {
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#CALL1_ARG_LOC:]]
// CHECK: call {{.*}}@{{.*}}helper_single{{.*}}({{.*}}){{.*}}, !dbg ![[#CALL1_LOC:]]
helper_single(s);
}
#[inline(never)]
fn helper_single(_s: LargeStruct) {}
// ============================================================================
// Test 2: Multiple arguments of different types
// ============================================================================
// CHECK-LABEL: call_arg_scope::test_call_with_multiple_args
pub fn test_call_with_multiple_args(large: LargeStruct, medium: MediumStruct, small: SmallStruct) {
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#CALL2_ARG1_LOC:]]
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#CALL2_ARG2_LOC:]]
// CHECK: call {{.*}}@{{.*}}helper_multiple{{.*}}({{.*}}){{.*}}, !dbg ![[#CALL2_LOC:]]
helper_multiple(large, medium, small);
}
#[inline(never)]
fn helper_multiple(_l: LargeStruct, _m: MediumStruct, _s: SmallStruct) {}
// ============================================================================
// Test 3: Return value
// ============================================================================
// CHECK-LABEL: call_arg_scope::test_return_large_value
pub fn test_return_large_value() -> LargeStruct {
let s = LargeStruct { data: [42; 20] };
// CHECK: ret {{.*}}, !dbg ![[#RET1_LOC:]]
s
}
// ============================================================================
// Test 4: Calling a function that returns a large value
// ============================================================================
// CHECK-LABEL: call_arg_scope::test_call_returning_large
pub fn test_call_returning_large() {
// CHECK: call {{.*}}@{{.*}}make_large_struct{{.*}}({{.*}}){{.*}}, !dbg ![[#CALL3_LOC:]]
let _result = make_large_struct();
}
#[inline(never)]
fn make_large_struct() -> LargeStruct {
LargeStruct { data: [1; 20] }
}
// ============================================================================
// Test 5: Mixed scenario - passing and returning large values
// ============================================================================
// CHECK-LABEL: call_arg_scope::test_mixed_call
pub fn test_mixed_call(input: LargeStruct) -> LargeStruct {
// CHECK: call {{.*}}@{{.*}}transform_large{{.*}}({{.*}}){{.*}}, !dbg ![[#CALL4_LOC:]]
transform_large(input)
}
#[inline(never)]
fn transform_large(mut s: LargeStruct) -> LargeStruct {
s.data[0] += 1;
s
}
// CHECK-DAG: ![[#CALL1_ARG_LOC]] = !DILocation({{.*}}scope: ![[#CALL1_ARG_SCOPE:]]
// CHECK-DAG: ![[#CALL1_ARG_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<call_arg_scope::LargeStruct,{{ *[0-9]+}}>"
// CHECK-DAG: ![[#CALL1_LOC]] = !DILocation({{.*}}scope: ![[#CALL1_SCOPE:]]
// CHECK-DAG: ![[#CALL1_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "test_call_with_single_arg"
// CHECK-DAG: ![[#CALL2_ARG1_LOC]] = !DILocation({{.*}}scope: ![[#CALL2_ARG1_SCOPE:]]
// CHECK-DAG: ![[#CALL2_ARG1_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<call_arg_scope::LargeStruct,{{ *[0-9]+}}>"
// CHECK-DAG: ![[#CALL2_ARG2_LOC]] = !DILocation({{.*}}scope: ![[#CALL2_ARG2_SCOPE:]]
// CHECK-DAG: ![[#CALL2_ARG2_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<call_arg_scope::MediumStruct,{{ *[0-9]+}}>"
// CHECK-DAG: ![[#CALL2_LOC]] = !DILocation({{.*}}scope: ![[#CALL2_SCOPE:]]
// CHECK-DAG: ![[#CALL2_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "test_call_with_multiple_args"
// CHECK-DAG: ![[#CALL3_LOC]] = !DILocation({{.*}}scope: ![[#CALL3_SCOPE:]]
// CHECK-DAG: ![[#CALL3_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "test_call_returning_large"
// CHECK-DAG: ![[#CALL4_LOC]] = !DILocation({{.*}}scope: ![[#CALL4_SCOPE:]]
// CHECK-DAG: ![[#CALL4_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "test_mixed_call"
// CHECK-DAG: ![[#RET1_LOC]] = !DILocation({{.*}}scope: ![[#RET1_SCOPE:]]
// CHECK-DAG: ![[#RET1_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "test_return_large_value"

View file

@ -0,0 +1,35 @@
//@ compile-flags: -Zannotate-moves=no -Copt-level=0 -g
// Test that move/copy operations are NOT annotated when the flag is disabled
#![crate_type = "lib"]
struct LargeStruct {
data: [u64; 20], // 160 bytes - would normally trigger annotation
}
impl Clone for LargeStruct {
// CHECK-LABEL: <disabled::LargeStruct as core::clone::Clone>::clone
fn clone(&self) -> Self {
// Should NOT be annotated when flag is disabled
// CHECK-NOT: compiler_copy
LargeStruct { data: self.data }
}
}
// CHECK-LABEL: disabled::test_large_copy_no_annotation
pub fn test_large_copy_no_annotation() {
let large = LargeStruct { data: [42; 20] };
// CHECK-NOT: compiler_copy
let _copy = large.clone();
}
// CHECK-LABEL: disabled::test_large_move_no_annotation
pub fn test_large_move_no_annotation() {
let large = LargeStruct { data: [42; 20] };
// CHECK-NOT: compiler_move
let _moved = large;
}
// Verify that no compiler_move or compiler_copy annotations exist anywhere
// CHECK-NOT: compiler_move
// CHECK-NOT: compiler_copy

View file

@ -0,0 +1,192 @@
//@ compile-flags: -Z annotate-moves=1 -Copt-level=0 -g
#![crate_type = "lib"]
// Test with large array (non-struct type, Copy)
type LargeArray = [u64; 20]; // 160 bytes
#[derive(Clone, Default)]
struct NonCopyU64(u64);
// Test with Copy implementation
#[derive(Copy)]
struct ExplicitCopy {
data: [u64; 20], // 160 bytes
}
impl Clone for ExplicitCopy {
// CHECK-LABEL: <integration::ExplicitCopy as core::clone::Clone>::clone
fn clone(&self) -> Self {
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#EXPLICIT_COPY_LOC:]]
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#EXPLICIT_RETURN_LOC:]]
Self { data: self.data }
}
}
// Test with hand-implemented Clone (non-Copy)
struct NonCopyStruct {
data: [u64; 20], // 160 bytes
}
impl Clone for NonCopyStruct {
// CHECK-LABEL: <integration::NonCopyStruct as core::clone::Clone>::clone
fn clone(&self) -> Self {
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#CLONE_COPY_LOC:]]
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#CLONE_RETURN_LOC:]]
NonCopyStruct { data: self.data }
}
}
// CHECK-LABEL: integration::test_pure_assignment_move
pub fn test_pure_assignment_move() {
let arr: LargeArray = [42; 20];
// Arrays are initialized with a loop
// CHECK-NOT: call void @llvm.memcpy{{.*}}, !dbg ![[#]]
let _moved = arr;
}
// CHECK-LABEL: integration::test_pure_assignment_copy
pub fn test_pure_assignment_copy() {
let s = ExplicitCopy { data: [42; 20] };
// Arrays are initialized with a loop
// CHECK-NOT: call void @llvm.memcpy{{.*}}, !dbg ![[#]]
let _copied = s;
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#ASSIGN_COPY2_LOC:]]
let _copied_2 = s;
}
#[derive(Default)]
struct InitializeStruct {
field1: String,
field2: String,
field3: String,
}
// CHECK-LABEL: integration::test_init_struct
pub fn test_init_struct() {
let mut s = InitializeStruct::default();
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#INIT_STRUCT_LOC:]]
s = InitializeStruct {
field1: String::from("Hello"),
field2: String::from("from"),
field3: String::from("Rust"),
};
}
// CHECK-LABEL: integration::test_tuple_of_scalars
pub fn test_tuple_of_scalars() {
// Tuple of scalars (even if large) may use scalar-pair repr, so may not be annotated
let t: (u64, u64, u64, u64) = (1, 2, 3, 4); // 32 bytes
// Copied with explicit stores
// CHECK-NOT: call void @llvm.memcpy{{.*}}, !dbg ![[#]]
let _moved = t;
}
// CHECK-LABEL: integration::test_tuple_of_structs
pub fn test_tuple_of_structs() {
let s1 = NonCopyStruct { data: [1; 20] };
let s2 = NonCopyStruct { data: [2; 20] };
let tuple = (s1, s2); // Large tuple containing structs (320 bytes)
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#TUPLE_MOVE_LOC:]]
let _moved = tuple;
}
// CHECK-LABEL: integration::test_tuple_mixed
pub fn test_tuple_mixed() {
let s = NonCopyStruct { data: [1; 20] };
let tuple = (42u64, s); // Mixed tuple (168 bytes: 8 for u64 + 160 for struct)
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#MIXED_TUPLE_LOC:]]
let _moved = tuple;
}
// CHECK-LABEL: integration::test_explicit_copy_assignment
pub fn test_explicit_copy_assignment() {
let c1 = ExplicitCopy { data: [1; 20] };
// Initialized with loop
// CHECK-NOT: call void @llvm.memcpy{{.*}}, !dbg ![[#]]
let c2 = c1;
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#COPY2_LOC:]]
let _c3 = c1; // Can still use c1 (it was copied)
let _ = c2;
}
// CHECK-LABEL: integration::test_array_move
pub fn test_array_move() {
let arr: [String; 20] = std::array::from_fn(|i| i.to_string());
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#ARRAY_MOVE_LOC:]]
let _moved = arr;
}
// CHECK-LABEL: integration::test_array_in_struct_field
pub fn test_array_in_struct_field() {
let s = NonCopyStruct { data: [1; 20] };
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#FIELD_MOVE_LOC:]]
let data = s.data; // Move array field out of struct
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#FIELD_MOVE2_LOC:]]
let _moved = data;
}
// CHECK-LABEL: integration::test_clone_noncopy
pub fn test_clone_noncopy() {
let s = NonCopyStruct { data: [1; 20] };
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#CALL_CLONE_NONCOPY_LOC:]]
let _cloned = s.clone(); // The copy happens inside the clone() impl above
}
// CHECK-LABEL: integration::test_clone_explicit_copy
pub fn test_clone_explicit_copy() {
let c = ExplicitCopy { data: [1; 20] };
// Derived Clone on Copy type - the copy happens inside the generated clone impl
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#CALL_CLONE_COPY_LOC:]]
let _cloned = c.clone();
}
// CHECK-LABEL: integration::test_copy_ref
pub fn test_copy_ref(x: &ExplicitCopy) {
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#LOCAL_COPY_LOC:]]
let _local = *x;
}
// CHECK-DAG: ![[#EXPLICIT_COPY_LOC]] = !DILocation({{.*}}scope: ![[#EXPLICIT_COPY_SCOPE:]]
// CHECK-DAG: ![[#EXPLICIT_COPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#EXPLICIT_RETURN_LOC]] = !DILocation({{.*}}scope: ![[#EXPLICIT_RETURN_SCOPE:]]
// CHECK-DAG: ![[#EXPLICIT_RETURN_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#CLONE_COPY_LOC]] = !DILocation({{.*}}scope: ![[#CLONE_COPY_SCOPE:]]
// CHECK-DAG: ![[#CLONE_COPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#CLONE_RETURN_LOC]] = !DILocation({{.*}}scope: ![[#CLONE_RETURN_SCOPE:]]
// CHECK-DAG: ![[#CLONE_RETURN_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#ASSIGN_COPY2_LOC]] = !DILocation({{.*}}scope: ![[#ASSIGN_COPY2_SCOPE:]]
// CHECK-DAG: ![[#ASSIGN_COPY2_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#INIT_STRUCT_LOC]] = !DILocation({{.*}}scope: ![[#INIT_STRUCT_SCOPE:]]
// CHECK-DAG: ![[#INIT_STRUCT_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<alloc::string::String,{{ *[0-9]+}}>"
// CHECK-DAG: ![[#TUPLE_MOVE_LOC]] = !DILocation({{.*}}scope: ![[#TUPLE_MOVE_SCOPE:]]
// CHECK-DAG: ![[#TUPLE_MOVE_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#MIXED_TUPLE_LOC]] = !DILocation({{.*}}scope: ![[#MIXED_TUPLE_SCOPE:]]
// CHECK-DAG: ![[#MIXED_TUPLE_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#COPY2_LOC]] = !DILocation({{.*}}scope: ![[#COPY2_SCOPE:]]
// CHECK-DAG: ![[#COPY2_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#ARRAY_MOVE_LOC]] = !DILocation({{.*}}scope: ![[#ARRAY_MOVE_SCOPE:]]
// CHECK-DAG: ![[#ARRAY_MOVE_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)alloc::string::String[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#FIELD_MOVE_LOC]] = !DILocation({{.*}}scope: ![[#FIELD_MOVE_SCOPE:]]
// CHECK-DAG: ![[#FIELD_MOVE_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#FIELD_MOVE2_LOC]] = !DILocation({{.*}}scope: ![[#FIELD_MOVE2_SCOPE:]]
// CHECK-DAG: ![[#FIELD_MOVE2_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#CALL_CLONE_NONCOPY_LOC]] = !DILocation({{.*}}scope: ![[#CALL_CLONE_NONCOPY_SCOPE:]]
// CHECK-DAG: ![[#CALL_CLONE_NONCOPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#CALL_CLONE_COPY_LOC]] = !DILocation({{.*}}scope: ![[#CALL_CLONE_COPY_SCOPE:]]
// CHECK-DAG: ![[#CALL_CLONE_COPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u64[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#LOCAL_COPY_LOC]] = !DILocation({{.*}}scope: ![[#LOCAL_COPY_SCOPE:]]
// CHECK-DAG: ![[#LOCAL_COPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<integration::ExplicitCopy,{{ *[0-9]+}}>"

View file

@ -0,0 +1,112 @@
//@ compile-flags: -Z annotate-moves=100 -Copt-level=0 -g
// Test that custom size limits work correctly
#![crate_type = "lib"]
struct Struct99 {
data: [u8; 99], // just below custom 100-byte threshold
}
const _: () = { assert!(size_of::<Struct99>() == 99) };
impl Clone for Struct99 {
// CHECK-LABEL: <size_limit::Struct99 as core::clone::Clone>::clone
fn clone(&self) -> Self {
// Should NOT be annotated since 99 < 100
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#SZ99_COPY_LOC:]]
Struct99 { data: self.data }
}
}
// CHECK-LABEL: size_limit::test_99_copy
pub fn test_99_copy() {
let sz99 = Struct99 { data: [42; 99] };
let _copy = sz99.clone();
}
// CHECK-LABEL: size_limit::test_99_move
pub fn test_99_move() {
let sz99 = Struct99 { data: [42; 99] };
// Should NOT be annotated
// CHECK-NOT: compiler_move
let _moved = sz99;
}
struct Struct100 {
data: [u8; 100], // 100 bytes - equal to custom 100-byte threshold
}
const _: () = { assert!(size_of::<Struct100>() == 100) };
impl Clone for Struct100 {
// CHECK-LABEL: <size_limit::Struct100 as core::clone::Clone>::clone
fn clone(&self) -> Self {
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#SZ100_COPY_LOC:]]
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#SZ100_RETURN_LOC:]]
Struct100 { data: self.data }
}
}
// CHECK-LABEL: size_limit::test_100_copy
pub fn test_100_copy() {
let sz100 = Struct100 { data: [42; 100] };
let _copy = sz100.clone();
}
// CHECK-LABEL: size_limit::test_100_move
pub fn test_100_move() {
let sz100 = Struct100 { data: [42; 100] };
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#SZ100_MOVE_LOC:]]
let _moved = sz100;
}
struct Struct101 {
data: [u8; 101], // 101 bytes - above custom 100-byte threshold
}
const _: () = { assert!(size_of::<Struct101>() == 101) };
impl Clone for Struct101 {
// CHECK-LABEL: <size_limit::Struct101 as core::clone::Clone>::clone
fn clone(&self) -> Self {
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#SZ101_COPY_LOC:]]
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#SZ101_RETURN_LOC:]]
Struct101 { data: self.data }
}
}
// CHECK-LABEL: size_limit::test_101_copy
pub fn test_101_copy() {
let sz101 = Struct101 { data: [42; 101] };
let _copy = sz101.clone();
}
// CHECK-LABEL: size_limit::test_101_move
pub fn test_101_move() {
let sz101 = Struct101 { data: [42; 101] };
// CHECK: call void @llvm.memcpy{{.*}}, !dbg ![[#SZ101_MOVE_LOC:]]
let _moved = sz101;
}
// The scope for no-annotated is clone function itself
// CHECK-DAG: ![[#SZ99_COPY_LOC]] = !DILocation({{.*}}scope: ![[#SZ99_COPY_SCOPE:]]
// CHECK-DAG: ![[#SZ99_COPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "clone",
// Clone itself is copy, but return is move.
// CHECK-DAG: ![[#SZ100_COPY_LOC]] = !DILocation({{.*}}scope: ![[#SZ100_COPY_SCOPE:]]
// CHECK-DAG: ![[#SZ100_COPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<{{(array\$<|\[)u8[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#SZ100_RETURN_LOC]] = !DILocation({{.*}}scope: ![[#SZ100_RETURN_SCOPE:]]
// CHECK-DAG: ![[#SZ100_RETURN_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u8[,;].*}},{{ *[0-9]+}}>"
// Assignment is move
// CHECK-DAG: ![[#SZ100_MOVE_LOC]] = !DILocation({{.*}}scope: ![[#SZ100_MOVE_SCOPE:]]
// CHECK-DAG: ![[#SZ100_MOVE_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u8[,;].*}},{{ *[0-9]+}}>"
// Clone itself is copy, but return is move.
// CHECK-DAG: ![[#SZ101_COPY_LOC]] = !DILocation({{.*}}scope: ![[#SZ101_COPY_SCOPE:]]
// CHECK-DAG: ![[#SZ101_COPY_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_copy<{{(array\$<|\[)u8[,;].*}},{{ *[0-9]+}}>"
// CHECK-DAG: ![[#SZ101_RETURN_LOC]] = !DILocation({{.*}}scope: ![[#SZ101_RETURN_SCOPE:]]
// CHECK-DAG: ![[#SZ101_RETURN_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u8[,;].*}},{{ *[0-9]+}}>"
// Assignment is move
// CHECK-DAG: ![[#SZ101_MOVE_LOC]] = !DILocation({{.*}}scope: ![[#SZ101_MOVE_SCOPE:]]
// CHECK-DAG: ![[#SZ101_MOVE_SCOPE]] = {{(distinct )?}}!DISubprogram(name: "compiler_move<{{(array\$<|\[)u8[,;].*}},{{ *[0-9]+}}>"

View file

@ -0,0 +1,15 @@
//@ check-pass
//@ compile-flags: -Z annotate-moves=100
// Test that valid annotate-moves flags are accepted
#[derive(Clone)]
struct TestStruct {
data: [u64; 20], // 160 bytes
}
fn main() {
let s = TestStruct { data: [42; 20] };
let _copy = s.clone();
let _moved = s;
}

View file

@ -0,0 +1,10 @@
//@ check-fail
//@ compile-flags: -Z annotate-moves=invalid
// Test that invalid values for annotate-moves flag are rejected
fn main() {
// This should fail at compile time due to invalid flag value
}
//~? ERROR incorrect value `invalid` for unstable option `annotate-moves`

View file

@ -0,0 +1,2 @@
error: incorrect value `invalid` for unstable option `annotate-moves` - either a boolean (`yes`, `no`, `on`, `off`, etc.), or a size limit in bytes was expected

View file

@ -0,0 +1,10 @@
//@ check-fail
//@ compile-flags: -Z annotate-moves=-5
// Test that negative size limits are rejected
fn main() {
// This should fail at compile time due to invalid negative size limit
}
//~? ERROR incorrect value `-5` for unstable option `annotate-moves`

View file

@ -0,0 +1,2 @@
error: incorrect value `-5` for unstable option `annotate-moves` - either a boolean (`yes`, `no`, `on`, `off`, etc.), or a size limit in bytes was expected