Auto merge of #123429 - matthiaskrgr:rollup-4emw4e9, r=matthiaskrgr

Rollup of 8 pull requests

Successful merges:

 - #121595 (Better reporting on generic argument mismatchs)
 - #122619 (Fix some unsoundness with PassMode::Cast ABI)
 - #122964 (Rename `expose_addr` to `expose_provenance`)
 - #123291 (Move some tests)
 - #123301 (pattern analysis: fix union handling)
 - #123395 (More postfix match fixes)
 - #123419 (rustc_index: Add a `ZERO` constant to index types)
 - #123421 (Fix target name in NetBSD platform-support doc)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-04-03 20:19:51 +00:00
commit 4fd4797c26
133 changed files with 1324 additions and 368 deletions

View file

@ -1276,7 +1276,8 @@ impl Expr {
ExprKind::While(..) => ExprPrecedence::While,
ExprKind::ForLoop { .. } => ExprPrecedence::ForLoop,
ExprKind::Loop(..) => ExprPrecedence::Loop,
ExprKind::Match(..) => ExprPrecedence::Match,
ExprKind::Match(_, _, MatchKind::Prefix) => ExprPrecedence::Match,
ExprKind::Match(_, _, MatchKind::Postfix) => ExprPrecedence::PostfixMatch,
ExprKind::Closure(..) => ExprPrecedence::Closure,
ExprKind::Block(..) => ExprPrecedence::Block,
ExprKind::TryBlock(..) => ExprPrecedence::TryBlock,

View file

@ -281,6 +281,7 @@ pub enum ExprPrecedence {
ForLoop,
Loop,
Match,
PostfixMatch,
ConstBlock,
Block,
TryBlock,
@ -334,7 +335,8 @@ impl ExprPrecedence {
| ExprPrecedence::InlineAsm
| ExprPrecedence::Mac
| ExprPrecedence::FormatArgs
| ExprPrecedence::OffsetOf => PREC_POSTFIX,
| ExprPrecedence::OffsetOf
| ExprPrecedence::PostfixMatch => PREC_POSTFIX,
// Never need parens
ExprPrecedence::Array
@ -390,7 +392,8 @@ pub fn contains_exterior_struct_lit(value: &ast::Expr) -> bool {
| ast::ExprKind::Cast(x, _)
| ast::ExprKind::Type(x, _)
| ast::ExprKind::Field(x, _)
| ast::ExprKind::Index(x, _, _) => {
| ast::ExprKind::Index(x, _, _)
| ast::ExprKind::Match(x, _, ast::MatchKind::Postfix) => {
// &X { y: 1 }, X { y: 1 }.y
contains_exterior_struct_lit(x)
}

View file

@ -3,7 +3,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::{LocalDefId, LocalDefIdMap};
use rustc_hir::intravisit::Visitor;
use rustc_hir::*;
use rustc_index::{Idx, IndexVec};
use rustc_index::IndexVec;
use rustc_middle::span_bug;
use rustc_middle::ty::TyCtxt;
use rustc_span::{Span, DUMMY_SP};
@ -31,7 +31,7 @@ pub(super) fn index_hir<'hir>(
bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>,
num_nodes: usize,
) -> (IndexVec<ItemLocalId, ParentedNode<'hir>>, LocalDefIdMap<ItemLocalId>) {
let zero_id = ItemLocalId::new(0);
let zero_id = ItemLocalId::ZERO;
let err_node = ParentedNode { parent: zero_id, node: Node::Err(item.span()) };
let mut nodes = IndexVec::from_elem_n(err_node, num_nodes);
// This node's parent should never be accessed: the owner's parent is computed by the

View file

@ -11,7 +11,7 @@ use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
use rustc_hir::PredicateOrigin;
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::span_bug;
use rustc_middle::ty::{ResolverAstLowering, TyCtxt};
use rustc_span::edit_distance::find_best_match_for_name;
@ -563,7 +563,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let kind =
this.lower_use_tree(use_tree, &prefix, id, vis_span, &mut ident, attrs);
if let Some(attrs) = attrs {
this.attrs.insert(hir::ItemLocalId::new(0), attrs);
this.attrs.insert(hir::ItemLocalId::ZERO, attrs);
}
let item = hir::Item {

View file

@ -157,7 +157,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
attrs: SortedMap::default(),
children: Vec::default(),
current_hir_id_owner: hir::CRATE_OWNER_ID,
item_local_id_counter: hir::ItemLocalId::new(0),
item_local_id_counter: hir::ItemLocalId::ZERO,
node_id_to_local_id: Default::default(),
trait_map: Default::default(),
@ -583,7 +583,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// and the caller to refer to some of the subdefinitions' nodes' `LocalDefId`s.
// Always allocate the first `HirId` for the owner itself.
let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::new(0));
let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::ZERO);
debug_assert_eq!(_old, None);
let item = f(self);
@ -677,7 +677,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
v.insert(local_id);
self.item_local_id_counter.increment_by(1);
assert_ne!(local_id, hir::ItemLocalId::new(0));
assert_ne!(local_id, hir::ItemLocalId::ZERO);
if let Some(def_id) = self.opt_local_def_id(ast_node_id) {
self.children.push((def_id, hir::MaybeOwner::NonOwner(hir_id)));
}
@ -696,7 +696,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn next_id(&mut self) -> hir::HirId {
let owner = self.current_hir_id_owner;
let local_id = self.item_local_id_counter;
assert_ne!(local_id, hir::ItemLocalId::new(0));
assert_ne!(local_id, hir::ItemLocalId::ZERO);
self.item_local_id_counter.increment_by(1);
hir::HirId { owner, local_id }
}

View file

@ -159,7 +159,7 @@ impl<'tcx> BorrowSet<'tcx> {
}
pub(crate) fn indices(&self) -> impl Iterator<Item = BorrowIndex> {
BorrowIndex::from_usize(0)..BorrowIndex::from_usize(self.len())
BorrowIndex::ZERO..BorrowIndex::from_usize(self.len())
}
pub(crate) fn iter_enumerated(&self) -> impl Iterator<Item = (BorrowIndex, &BorrowData<'tcx>)> {

View file

@ -2261,7 +2261,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
CastKind::PointerExposeAddress => {
CastKind::PointerExposeProvenance => {
let ty_from = op.ty(body, tcx);
let cast_ty_from = CastTy::from_ty(ty_from);
let cast_ty_to = CastTy::from_ty(*ty);
@ -2271,7 +2271,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
span_mirbug!(
self,
rvalue,
"Invalid PointerExposeAddress cast {:?} -> {:?}",
"Invalid PointerExposeProvenance cast {:?} -> {:?}",
ty_from,
ty
)

View file

@ -649,7 +649,7 @@ fn codegen_stmt<'tcx>(
| CastKind::IntToFloat
| CastKind::FnPtrToPtr
| CastKind::PtrToPtr
| CastKind::PointerExposeAddress
| CastKind::PointerExposeProvenance
| CastKind::PointerWithExposedProvenance,
ref operand,
to_ty,

View file

@ -1393,7 +1393,7 @@ fn llvm_add_sub<'tcx>(
// c + carry -> c + first intermediate carry or borrow respectively
let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
let c = int0.value_field(fx, FieldIdx::new(0));
let c = int0.value_field(fx, FieldIdx::ZERO);
let cb0 = int0.value_field(fx, FieldIdx::new(1)).load_scalar(fx);
// c + carry -> c + second intermediate carry or borrow respectively

View file

@ -965,7 +965,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
});
}
sym::simd_expose_addr | sym::simd_with_exposed_provenance | sym::simd_cast_ptr => {
sym::simd_expose_provenance | sym::simd_with_exposed_provenance | sym::simd_cast_ptr => {
intrinsic_args!(fx, args => (arg); intrinsic);
ret.write_cvalue_transmute(fx, arg);
}

View file

@ -61,7 +61,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
if ty.is_dyn_star() {
let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap().ty);
let dyn_star = CPlace::for_ptr(Pointer::new(arg.load_scalar(fx)), inner_layout);
let ptr = dyn_star.place_field(fx, FieldIdx::new(0)).to_ptr();
let ptr = dyn_star.place_field(fx, FieldIdx::ZERO).to_ptr();
let vtable =
dyn_star.place_field(fx, FieldIdx::new(1)).to_cvalue(fx).load_scalar(fx);
break 'block (ptr, vtable);

View file

@ -16,13 +16,15 @@ pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use rustc_middle::ty::Ty;
use rustc_session::config;
pub use rustc_target::abi::call::*;
use rustc_target::abi::{self, HasDataLayout, Int};
use rustc_target::abi::{self, HasDataLayout, Int, Size};
pub use rustc_target::spec::abi::Abi;
use rustc_target::spec::SanitizerSet;
use libc::c_uint;
use smallvec::SmallVec;
use std::cmp;
pub trait ArgAttributesExt {
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
fn apply_attrs_to_callsite(
@ -130,42 +132,36 @@ impl LlvmType for Reg {
impl LlvmType for CastTarget {
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
let rest_ll_unit = self.rest.unit.llvm_type(cx);
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
(0, 0)
let rest_count = if self.rest.total == Size::ZERO {
0
} else {
(
self.rest.total.bytes() / self.rest.unit.size.bytes(),
self.rest.total.bytes() % self.rest.unit.size.bytes(),
)
assert_ne!(
self.rest.unit.size,
Size::ZERO,
"total size {:?} cannot be divided into units of zero size",
self.rest.total
);
if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 {
assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split");
}
self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes())
};
// Simplify to a single unit or an array if there's no prefix.
// This produces the same layout, but using a simpler type.
if self.prefix.iter().all(|x| x.is_none()) {
// Simplify to a single unit when there is no prefix and size <= unit size
if self.rest.total <= self.rest.unit.size {
if rest_count == 1 {
return rest_ll_unit;
}
// Simplify to array when all chunks are the same size and type
if rem_bytes == 0 {
return cx.type_array(rest_ll_unit, rest_count);
}
}
// Create list of fields in the main structure
let mut args: Vec<_> = self
.prefix
.iter()
.flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
.chain((0..rest_count).map(|_| rest_ll_unit))
.collect();
// Append final integer
if rem_bytes != 0 {
// Only integers can be really split further.
assert_eq!(self.rest.unit.kind, RegKind::Integer);
args.push(cx.type_ix(rem_bytes * 8));
return cx.type_array(rest_ll_unit, rest_count);
}
// Generate a struct type with the prefix and the "rest" arguments.
let prefix_args =
self.prefix.iter().flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)));
let rest_args = (0..rest_count).map(|_| rest_ll_unit);
let args: Vec<_> = prefix_args.chain(rest_args).collect();
cx.type_struct(&args, false)
}
}
@ -215,47 +211,33 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
}
PassMode::Cast { cast, pad_i32: _ } => {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
bx.store(val, dst.llval, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ... where we first store the value...
bx.store(val, llscratch, scratch_align);
// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
self.layout.align.abi,
llscratch,
scratch_align,
bx.const_usize(self.layout.size.bytes()),
MemFlags::empty(),
);
bx.lifetime_end(llscratch, scratch_size);
}
// The ABI mandates that the value is passed as a different struct representation.
// Spill and reload it from the stack to convert from the ABI representation to
// the Rust representation.
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
// Note that the ABI type may be either larger or smaller than the Rust type,
// due to the presence or absence of trailing padding. For example:
// - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding
// when passed by value, making it smaller.
// - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
// when passed by value, making it larger.
let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
// Allocate some scratch space...
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ...store the value...
bx.store(val, llscratch, scratch_align);
// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
self.layout.align.abi,
llscratch,
scratch_align,
bx.const_usize(copy_bytes),
MemFlags::empty(),
);
bx.lifetime_end(llscratch, scratch_size);
}
_ => {
OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);

View file

@ -2111,7 +2111,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
return Ok(args[0].immediate());
}
if name == sym::simd_expose_addr {
if name == sym::simd_expose_provenance {
let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
require!(
in_len == out_len,

View file

@ -1505,9 +1505,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast { cast: ty, .. } = &arg.mode {
let llty = bx.cast_backend_type(ty);
llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
if let PassMode::Cast { cast, pad_i32: _ } = &arg.mode {
// The ABI mandates that the value is passed as a different struct representation.
// Spill and reload it from the stack to convert from the Rust representation to
// the ABI representation.
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
// Note that the ABI type may be either larger or smaller than the Rust type,
// due to the presence or absence of trailing padding. For example:
// - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding
// when passed by value, making it smaller.
// - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
// when passed by value, making it larger.
let copy_bytes = cmp::min(scratch_size.bytes(), arg.layout.size.bytes());
// Allocate some scratch space...
let llscratch = bx.alloca(bx.cast_backend_type(cast), scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ...memcpy the value...
bx.memcpy(
llscratch,
scratch_align,
llval,
align,
bx.const_usize(copy_bytes),
MemFlags::empty(),
);
// ...and then load it with the ABI type.
let cast_ty = bx.cast_backend_type(cast);
llval = bx.load(cast_ty, llscratch, scratch_align);
bx.lifetime_end(llscratch, scratch_size);
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI

View file

@ -405,7 +405,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
let val = match *kind {
mir::CastKind::PointerExposeAddress => {
mir::CastKind::PointerExposeProvenance => {
assert!(bx.cx().is_backend_immediate(cast));
let llptr = operand.immediate();
let llcast_ty = bx.cx().immediate_backend_type(cast);

View file

@ -34,9 +34,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.unsize_into(src, cast_layout, dest)?;
}
CastKind::PointerExposeAddress => {
CastKind::PointerExposeProvenance => {
let src = self.read_immediate(src)?;
let res = self.pointer_expose_address_cast(&src, cast_layout)?;
let res = self.pointer_expose_provenance_cast(&src, cast_layout)?;
self.write_immediate(*res, dest)?;
}
@ -225,7 +225,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
pub fn pointer_expose_address_cast(
pub fn pointer_expose_provenance_cast(
&mut self,
src: &ImmTy<'tcx, M::Provenance>,
cast_to: TyAndLayout<'tcx>,

View file

@ -544,7 +544,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// Unsizing is implemented for CTFE.
}
Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => {
Rvalue::Cast(CastKind::PointerExposeProvenance, _, _) => {
self.check_op(ops::RawPtrToIntCast);
}
Rvalue::Cast(CastKind::PointerWithExposedProvenance, _, _) => {

View file

@ -1077,7 +1077,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
// FIXME: Add Checks for these
CastKind::PointerWithExposedProvenance
| CastKind::PointerExposeAddress
| CastKind::PointerExposeProvenance
| CastKind::PointerCoercion(_) => {}
CastKind::IntToInt | CastKind::IntToFloat => {
let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();

View file

@ -72,7 +72,7 @@ fn dominators_impl<G: ControlFlowGraph>(graph: &G) -> Inner<G::Node> {
IndexVec::with_capacity(graph.num_nodes());
let mut stack = vec![PreOrderFrame {
pre_order_idx: PreorderIndex::new(0),
pre_order_idx: PreorderIndex::ZERO,
iter: graph.successors(graph.start_node()),
}];
let mut pre_order_to_real: IndexVec<PreorderIndex, G::Node> =
@ -80,8 +80,8 @@ fn dominators_impl<G: ControlFlowGraph>(graph: &G) -> Inner<G::Node> {
let mut real_to_pre_order: IndexVec<G::Node, Option<PreorderIndex>> =
IndexVec::from_elem_n(None, graph.num_nodes());
pre_order_to_real.push(graph.start_node());
parent.push(PreorderIndex::new(0)); // the parent of the root node is the root for now.
real_to_pre_order[graph.start_node()] = Some(PreorderIndex::new(0));
parent.push(PreorderIndex::ZERO); // the parent of the root node is the root for now.
real_to_pre_order[graph.start_node()] = Some(PreorderIndex::ZERO);
let mut post_order_idx = 0;
// Traverse the graph, collecting a number of things:
@ -111,7 +111,7 @@ fn dominators_impl<G: ControlFlowGraph>(graph: &G) -> Inner<G::Node> {
let reachable_vertices = pre_order_to_real.len();
let mut idom = IndexVec::from_elem_n(PreorderIndex::new(0), reachable_vertices);
let mut idom = IndexVec::from_elem_n(PreorderIndex::ZERO, reachable_vertices);
let mut semi = IndexVec::from_fn_n(std::convert::identity, reachable_vertices);
let mut label = semi.clone();
let mut bucket = IndexVec::from_elem_n(vec![], reachable_vertices);

View file

@ -1951,6 +1951,39 @@ pub fn report_ambiguity_error<'a, G: EmissionGuarantee>(
}
}
/// Grammatical tool for displaying messages to end users in a nice form.
///
/// Returns "an" if the given string starts with a vowel, and "a" otherwise.
pub fn a_or_an(s: &str) -> &'static str {
let mut chars = s.chars();
let Some(mut first_alpha_char) = chars.next() else {
return "a";
};
if first_alpha_char == '`' {
let Some(next) = chars.next() else {
return "a";
};
first_alpha_char = next;
}
if ["a", "e", "i", "o", "u", "&"].contains(&&first_alpha_char.to_lowercase().to_string()[..]) {
"an"
} else {
"a"
}
}
/// Grammatical tool for displaying messages to end users in a nice form.
///
/// Take a list ["a", "b", "c"] and output a display friendly version "a, b and c"
pub fn display_list_with_comma_and<T: std::fmt::Display>(v: &[T]) -> String {
match v.len() {
0 => "".to_string(),
1 => v[0].to_string(),
2 => format!("{} and {}", v[0], v[1]),
_ => format!("{}, {}", v[0], display_list_with_comma_and(&v[1..])),
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum TerminalUrl {
No,

View file

@ -846,9 +846,8 @@ pub struct OwnerNodes<'tcx> {
impl<'tcx> OwnerNodes<'tcx> {
pub fn node(&self) -> OwnerNode<'tcx> {
use rustc_index::Idx;
// Indexing must ensure it is an OwnerNode.
self.nodes[ItemLocalId::new(0)].node.as_owner().unwrap()
self.nodes[ItemLocalId::ZERO].node.as_owner().unwrap()
}
}
@ -856,7 +855,7 @@ impl fmt::Debug for OwnerNodes<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OwnerNodes")
// Do not print all the pointers to all the nodes, as it would be unreadable.
.field("node", &self.nodes[ItemLocalId::from_u32(0)])
.field("node", &self.nodes[ItemLocalId::ZERO])
.field(
"parents",
&self

View file

@ -17,7 +17,7 @@ impl Debug for OwnerId {
impl From<OwnerId> for HirId {
fn from(owner: OwnerId) -> HirId {
HirId { owner, local_id: ItemLocalId::from_u32(0) }
HirId { owner, local_id: ItemLocalId::ZERO }
}
}
@ -110,7 +110,7 @@ impl HirId {
#[inline]
pub fn make_owner(owner: LocalDefId) -> Self {
Self { owner: OwnerId { def_id: owner }, local_id: ItemLocalId::from_u32(0) }
Self { owner: OwnerId { def_id: owner }, local_id: ItemLocalId::ZERO }
}
pub fn index(self) -> (usize, usize) {
@ -172,6 +172,6 @@ unsafe impl StableOrd for ItemLocalId {
/// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_ID`.
pub const CRATE_HIR_ID: HirId =
HirId { owner: OwnerId { def_id: CRATE_DEF_ID }, local_id: ItemLocalId::from_u32(0) };
HirId { owner: OwnerId { def_id: CRATE_DEF_ID }, local_id: ItemLocalId::ZERO };
pub const CRATE_OWNER_ID: OwnerId = OwnerId { def_id: CRATE_DEF_ID };

View file

@ -899,7 +899,7 @@ pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
struct_span_code_err!(tcx.dcx(), sp, E0075, "SIMD vector cannot be empty").emit();
return;
}
let e = fields[FieldIdx::from_u32(0)].ty(tcx, args);
let e = fields[FieldIdx::ZERO].ty(tcx, args);
if !fields.iter().all(|f| f.ty(tcx, args) == e) {
struct_span_code_err!(tcx.dcx(), sp, E0076, "SIMD vector should be homogeneous")
.with_span_label(sp, "SIMD elements must have the same type")

View file

@ -183,7 +183,7 @@ pub fn check_intrinsic_type(
let region = ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon },
ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon },
);
let env_region = ty::Region::new_bound(
tcx,
@ -495,7 +495,7 @@ pub fn check_intrinsic_type(
);
let discriminant_def_id = assoc_items[0];
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
let br = ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon };
(
1,
0,
@ -555,7 +555,7 @@ pub fn check_intrinsic_type(
}
sym::raw_eq => {
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
let br = ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon };
let param_ty_lhs =
Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0));
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrAnon };
@ -627,7 +627,7 @@ pub fn check_intrinsic_type(
sym::simd_cast
| sym::simd_as
| sym::simd_cast_ptr
| sym::simd_expose_addr
| sym::simd_expose_provenance
| sym::simd_with_exposed_provenance => (2, 0, vec![param(0)], param(1)),
sym::simd_bitmask => (2, 0, vec![param(0)], param(1)),
sym::simd_select | sym::simd_select_bitmask => {

View file

@ -67,7 +67,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
ty::RawPtr(ty, _) if self.is_thin_ptr_ty(ty) => Some(asm_ty_isize),
ty::Adt(adt, args) if adt.repr().simd() => {
let fields = &adt.non_enum_variant().fields;
let elem_ty = fields[FieldIdx::from_u32(0)].ty(self.tcx, args);
let elem_ty = fields[FieldIdx::ZERO].ty(self.tcx, args);
let (size, ty) = match elem_ty.kind() {
ty::Array(ty, len) => {
@ -146,7 +146,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
"expected first field of `MaybeUnit` to be `ManuallyDrop`"
);
let fields = &ty.non_enum_variant().fields;
let ty = fields[FieldIdx::from_u32(0)].ty(self.tcx, args);
let ty = fields[FieldIdx::ZERO].ty(self.tcx, args);
self.get_asm_ty(ty)
}
_ => self.get_asm_ty(ty),

View file

@ -628,7 +628,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
let projection_ty = pred.skip_binder().projection_ty;
let args_with_infer_self = tcx.mk_args_from_iter(
std::iter::once(Ty::new_var(tcx, ty::TyVid::from_u32(0)).into())
std::iter::once(Ty::new_var(tcx, ty::TyVid::ZERO).into())
.chain(projection_ty.args.iter().skip(1)),
);

View file

@ -91,7 +91,7 @@ hir_typeck_lossy_provenance_int2ptr =
hir_typeck_lossy_provenance_ptr2int =
under strict provenance it is considered bad style to cast pointer `{$expr_ty}` to integer `{$cast_ty}`
.suggestion = use `.addr()` to obtain the address of a pointer
.help = if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead
.help = if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_provenance()` instead
hir_typeck_method_call_on_unknown_raw_pointee =
cannot call a method on a raw pointer with an unknown pointee type

View file

@ -182,7 +182,7 @@ fn check_panic_info_fn(tcx: TyCtxt<'_>, fn_id: LocalDefId, fn_sig: ty::FnSig<'_>
ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon },
ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon },
),
panic_info_ty,
);

View file

@ -17,7 +17,8 @@ use itertools::Itertools;
use rustc_ast as ast;
use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::{
codes::*, pluralize, Applicability, Diag, ErrorGuaranteed, MultiSpan, StashKey,
a_or_an, codes::*, display_list_with_comma_and, pluralize, Applicability, Diag,
ErrorGuaranteed, MultiSpan, StashKey,
};
use rustc_hir as hir;
use rustc_hir::def::{CtorOf, DefKind, Res};
@ -818,6 +819,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
call_expr,
None,
Some(mismatch_idx),
&matched_inputs,
&formal_and_expected_inputs,
is_method,
);
suggest_confusable(&mut err);
@ -904,6 +907,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
err.span_label(full_call_span, format!("arguments to this {call_name} are incorrect"));
self.label_generic_mismatches(
&mut err,
fn_def_id,
&matched_inputs,
&provided_arg_tys,
&formal_and_expected_inputs,
is_method,
);
if let hir::ExprKind::MethodCall(_, rcvr, _, _) = call_expr.kind
&& provided_idx.as_usize() == expected_idx.as_usize()
{
@ -932,6 +944,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
call_expr,
Some(expected_ty),
Some(expected_idx.as_usize()),
&matched_inputs,
&formal_and_expected_inputs,
is_method,
);
suggest_confusable(&mut err);
@ -1270,6 +1284,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
self.label_generic_mismatches(
&mut err,
fn_def_id,
&matched_inputs,
&provided_arg_tys,
&formal_and_expected_inputs,
is_method,
);
// Incorporate the argument changes in the removal suggestion.
// When a type is *missing*, and the rest are additional, we want to suggest these with a
// multipart suggestion, but in order to do so we need to figure out *where* the arg that
@ -1317,7 +1340,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
// Call out where the function is defined
self.label_fn_like(&mut err, fn_def_id, callee_ty, call_expr, None, None, is_method);
self.label_fn_like(
&mut err,
fn_def_id,
callee_ty,
call_expr,
None,
None,
&matched_inputs,
&formal_and_expected_inputs,
is_method,
);
// And add a suggestion block for all of the parameters
let suggestion_text = match suggestion_text {
@ -2094,6 +2127,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected_ty: Option<Ty<'tcx>>,
// A specific argument should be labeled, instead of all of them
expected_idx: Option<usize>,
matched_inputs: &IndexVec<ExpectedIdx, Option<ProvidedIdx>>,
formal_and_expected_inputs: &IndexVec<ExpectedIdx, (Ty<'tcx>, Ty<'tcx>)>,
is_method: bool,
) {
let Some(mut def_id) = callable_def_id else {
@ -2185,21 +2220,164 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
let mut spans: MultiSpan = def_span.into();
let params = self
let params_with_generics = self.get_hir_params_with_generics(def_id, is_method);
let mut generics_with_unmatched_params = Vec::new();
let check_for_matched_generics = || {
if matched_inputs.iter().any(|x| x.is_some())
&& params_with_generics.iter().any(|x| x.0.is_some())
{
for (idx, (generic, _)) in params_with_generics.iter().enumerate() {
// Param has to have a generic and be matched to be relevant
if matched_inputs[idx.into()].is_none() {
continue;
}
let Some(generic) = generic else {
continue;
};
for unmatching_idx in idx + 1..params_with_generics.len() {
if matched_inputs[unmatching_idx.into()].is_none()
&& let Some(unmatched_idx_param_generic) =
params_with_generics[unmatching_idx].0
&& unmatched_idx_param_generic.name.ident() == generic.name.ident()
{
// We found a parameter that didn't match that needed to
return true;
}
}
}
}
false
};
let check_for_matched_generics = check_for_matched_generics();
for (idx, (generic_param, param)) in
params_with_generics.iter().enumerate().filter(|(idx, _)| {
check_for_matched_generics
|| expected_idx.map_or(true, |expected_idx| expected_idx == *idx)
})
{
let Some(generic_param) = generic_param else {
spans.push_span_label(param.span, "");
continue;
};
let other_params_matched: Vec<(usize, &hir::Param<'_>)> = params_with_generics
.iter()
.enumerate()
.filter(|(other_idx, (other_generic_param, _))| {
if *other_idx == idx {
return false;
}
let Some(other_generic_param) = other_generic_param else {
return false;
};
if matched_inputs[idx.into()].is_none()
&& matched_inputs[(*other_idx).into()].is_none()
{
return false;
}
if matched_inputs[idx.into()].is_some()
&& matched_inputs[(*other_idx).into()].is_some()
{
return false;
}
other_generic_param.name.ident() == generic_param.name.ident()
})
.map(|(other_idx, (_, other_param))| (other_idx, *other_param))
.collect();
if !other_params_matched.is_empty() {
let other_param_matched_names: Vec<String> = other_params_matched
.iter()
.map(|(_, other_param)| {
if let hir::PatKind::Binding(_, _, ident, _) = other_param.pat.kind {
format!("`{ident}`")
} else {
"{unknown}".to_string()
}
})
.collect();
let matched_ty = self
.resolve_vars_if_possible(formal_and_expected_inputs[idx.into()].1)
.sort_string(self.tcx);
if matched_inputs[idx.into()].is_some() {
spans.push_span_label(
param.span,
format!(
"{} {} to match the {} type of this parameter",
display_list_with_comma_and(&other_param_matched_names),
format!(
"need{}",
pluralize!(if other_param_matched_names.len() == 1 {
0
} else {
1
})
),
matched_ty,
),
);
} else {
spans.push_span_label(
param.span,
format!(
"this parameter needs to match the {} type of {}",
matched_ty,
display_list_with_comma_and(&other_param_matched_names),
),
);
}
generics_with_unmatched_params.push(generic_param);
} else {
spans.push_span_label(param.span, "");
}
}
for generic_param in self
.tcx
.hir()
.get_if_local(def_id)
.and_then(|node| node.body_id())
.and_then(|node| node.generics())
.into_iter()
.flat_map(|id| self.tcx.hir().body(id).params)
.skip(if is_method { 1 } else { 0 });
for (_, param) in params
.into_iter()
.enumerate()
.filter(|(idx, _)| expected_idx.map_or(true, |expected_idx| expected_idx == *idx))
.flat_map(|x| x.params)
.filter(|x| {
generics_with_unmatched_params.iter().any(|y| x.name.ident() == y.name.ident())
})
{
spans.push_span_label(param.span, "");
let param_idents_matching: Vec<String> = params_with_generics
.iter()
.filter(|(generic, _)| {
if let Some(generic) = generic {
generic.name.ident() == generic_param.name.ident()
} else {
false
}
})
.map(|(_, param)| {
if let hir::PatKind::Binding(_, _, ident, _) = param.pat.kind {
format!("`{ident}`")
} else {
"{unknown}".to_string()
}
})
.collect();
if !param_idents_matching.is_empty() {
spans.push_span_label(
generic_param.span,
format!(
"{} all reference this parameter {}",
display_list_with_comma_and(&param_idents_matching),
generic_param.name.ident().name,
),
);
}
}
err.span_note(spans, format!("{} defined here", self.tcx.def_descr(def_id)));
@ -2260,6 +2438,115 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
}
fn label_generic_mismatches(
&self,
err: &mut Diag<'_>,
callable_def_id: Option<DefId>,
matched_inputs: &IndexVec<ExpectedIdx, Option<ProvidedIdx>>,
provided_arg_tys: &IndexVec<ProvidedIdx, (Ty<'tcx>, Span)>,
formal_and_expected_inputs: &IndexVec<ExpectedIdx, (Ty<'tcx>, Ty<'tcx>)>,
is_method: bool,
) {
let Some(def_id) = callable_def_id else {
return;
};
let params_with_generics = self.get_hir_params_with_generics(def_id, is_method);
for (idx, (generic_param, _)) in params_with_generics.iter().enumerate() {
if matched_inputs[idx.into()].is_none() {
continue;
}
let Some((_, matched_arg_span)) = provided_arg_tys.get(idx.into()) else {
continue;
};
let Some(generic_param) = generic_param else {
continue;
};
let mut idxs_matched: Vec<usize> = vec![];
for (other_idx, (_, _)) in params_with_generics.iter().enumerate().filter(
|(other_idx, (other_generic_param, _))| {
if *other_idx == idx {
return false;
}
let Some(other_generic_param) = other_generic_param else {
return false;
};
if matched_inputs[(*other_idx).into()].is_some() {
return false;
}
other_generic_param.name.ident() == generic_param.name.ident()
},
) {
idxs_matched.push(other_idx.into());
}
if idxs_matched.is_empty() {
continue;
}
let expected_display_type = self
.resolve_vars_if_possible(formal_and_expected_inputs[idx.into()].1)
.sort_string(self.tcx);
let label = if idxs_matched.len() == params_with_generics.len() - 1 {
format!(
"expected all arguments to be this {} type because they need to match the type of this parameter",
expected_display_type
)
} else {
format!(
"expected some other arguments to be {} {} type to match the type of this parameter",
a_or_an(&expected_display_type),
expected_display_type,
)
};
err.span_label(*matched_arg_span, label);
}
}
fn get_hir_params_with_generics(
&self,
def_id: DefId,
is_method: bool,
) -> Vec<(Option<&hir::GenericParam<'_>>, &hir::Param<'_>)> {
let fn_node = self.tcx.hir().get_if_local(def_id);
let generic_params: Vec<Option<&hir::GenericParam<'_>>> = fn_node
.and_then(|node| node.fn_decl())
.into_iter()
.flat_map(|decl| decl.inputs)
.skip(if is_method { 1 } else { 0 })
.map(|param| {
if let hir::TyKind::Path(QPath::Resolved(
_,
hir::Path { res: Res::Def(_, res_def_id), .. },
)) = param.kind
{
fn_node
.and_then(|node| node.generics())
.into_iter()
.flat_map(|generics| generics.params)
.find(|gen| &gen.def_id.to_def_id() == res_def_id)
} else {
None
}
})
.collect();
let params: Vec<&hir::Param<'_>> = fn_node
.and_then(|node| node.body_id())
.into_iter()
.flat_map(|id| self.tcx.hir().body(id).params)
.skip(if is_method { 1 } else { 0 })
.collect();
generic_params.into_iter().zip(params).collect()
}
}
struct FindClosureArg<'tcx> {

View file

@ -17,7 +17,7 @@ fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
let data_idx;
let one = VariantIdx::new(1);
let zero = VariantIdx::new(0);
let zero = VariantIdx::ZERO;
if def.variant(zero).fields.is_empty() {
data_idx = one;

View file

@ -774,7 +774,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let projection_ty = pred.skip_binder().projection_ty;
let args_with_infer_self = tcx.mk_args_from_iter(
iter::once(Ty::new_var(tcx, ty::TyVid::from_u32(0)).into())
iter::once(Ty::new_var(tcx, ty::TyVid::ZERO).into())
.chain(projection_ty.args.iter().skip(1)),
);

View file

@ -343,10 +343,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let closure_env_region: ty::Region<'_> = ty::Region::new_bound(
self.tcx,
ty::INNERMOST,
ty::BoundRegion {
var: ty::BoundVar::from_usize(0),
kind: ty::BoundRegionKind::BrEnv,
},
ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BoundRegionKind::BrEnv },
);
let tupled_upvars_ty_for_borrow = Ty::new_tup_from_iter(
self.tcx,

View file

@ -174,6 +174,9 @@ impl Parse for Newtype {
/// Maximum value the index can take.
#vis const MAX: Self = Self::from_u32(#max);
/// Zero value of the index.
#vis const ZERO: Self = Self::from_u32(0);
/// Creates a new index from a given `usize`.
///
/// # Panics

View file

@ -2797,17 +2797,17 @@ declare_lint! {
/// Since this cast is lossy, it is considered good style to use the
/// [`ptr::addr`] method instead, which has a similar effect, but doesn't
/// "expose" the pointer provenance. This improves optimisation potential.
/// See the docs of [`ptr::addr`] and [`ptr::expose_addr`] for more information
/// See the docs of [`ptr::addr`] and [`ptr::expose_provenance`] for more information
/// about exposing pointer provenance.
///
/// If your code can't comply with strict provenance and needs to expose
/// the provenance, then there is [`ptr::expose_addr`] as an escape hatch,
/// the provenance, then there is [`ptr::expose_provenance`] as an escape hatch,
/// which preserves the behaviour of `as usize` casts while being explicit
/// about the semantics.
///
/// [issue #95228]: https://github.com/rust-lang/rust/issues/95228
/// [`ptr::addr`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.addr
/// [`ptr::expose_addr`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.expose_addr
/// [`ptr::expose_provenance`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.expose_provenance
pub LOSSY_PROVENANCE_CASTS,
Allow,
"a lossy pointer to integer cast is used",

View file

@ -13,7 +13,6 @@ use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId, LOCAL_CRATE};
use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
use rustc_hir::intravisit::Visitor;
use rustc_hir::*;
use rustc_index::Idx;
use rustc_middle::hir::nested_filter;
use rustc_span::def_id::StableCrateId;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
@ -69,7 +68,7 @@ impl<'hir> Iterator for ParentOwnerIterator<'hir> {
fn next(&mut self) -> Option<Self::Item> {
if self.current_id.local_id.index() != 0 {
self.current_id.local_id = ItemLocalId::new(0);
self.current_id.local_id = ItemLocalId::ZERO;
let node = self.map.tcx.hir_owner_node(self.current_id.owner);
return Some((self.current_id.owner, node));
}
@ -133,7 +132,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// If calling repeatedly and iterating over parents, prefer [`Map::parent_iter`].
pub fn parent_hir_id(self, hir_id: HirId) -> HirId {
let HirId { owner, local_id } = hir_id;
if local_id == ItemLocalId::from_u32(0) {
if local_id == ItemLocalId::ZERO {
self.hir_owner_parent(owner)
} else {
let parent_local_id = self.hir_owner_nodes(owner).nodes[local_id].parent;

View file

@ -82,7 +82,7 @@ impl CanonicalVarValues<'_> {
}
pub fn is_identity_modulo_regions(&self) -> bool {
let mut var = ty::BoundVar::from_u32(0);
let mut var = ty::BoundVar::ZERO;
for arg in self.var_values {
match arg.unpack() {
ty::GenericArgKind::Lifetime(r) => {

View file

@ -34,7 +34,7 @@ rustc_index::newtype_index! {
}
impl CounterId {
pub const START: Self = Self::from_u32(0);
pub const START: Self = Self::ZERO;
}
rustc_index::newtype_index! {
@ -56,7 +56,7 @@ rustc_index::newtype_index! {
}
impl ExpressionId {
pub const START: Self = Self::from_u32(0);
pub const START: Self = Self::ZERO;
}
/// Enum that can hold a constant zero value, the ID of an physical coverage

View file

@ -409,7 +409,7 @@ impl<'tcx> Rvalue<'tcx> {
// Pointer to int casts may be side-effects due to exposing the provenance.
// While the model is undecided, we should be conservative. See
// <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
Rvalue::Cast(CastKind::PointerExposeProvenance, _, _) => false,
Rvalue::Use(_)
| Rvalue::CopyForDeref(_)

View file

@ -1309,8 +1309,8 @@ pub enum Rvalue<'tcx> {
pub enum CastKind {
/// An exposing pointer to address cast. A cast between a pointer and an integer type, or
/// between a function pointer and an integer type.
/// See the docs on `expose_addr` for more details.
PointerExposeAddress,
/// See the docs on `expose_provenance` for more details.
PointerExposeProvenance,
/// An address-to-pointer cast that picks up an exposed provenance.
/// See the docs on `with_exposed_provenance` for more details.
PointerWithExposedProvenance,

View file

@ -1121,7 +1121,8 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
printed += 1;
}
if printed < variant.fields.len() {
let is_union = self.ty.ty_adt_def().is_some_and(|adt| adt.is_union());
if printed < variant.fields.len() && (!is_union || printed == 0) {
write!(f, "{}..", start_or_comma())?;
}

View file

@ -83,7 +83,7 @@ pub fn mir_cast_kind<'tcx>(from_ty: Ty<'tcx>, cast_ty: Ty<'tcx>) -> mir::CastKin
let cast = CastTy::from_ty(cast_ty);
let cast_kind = match (from, cast) {
(Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => {
mir::CastKind::PointerExposeAddress
mir::CastKind::PointerExposeProvenance
}
(Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => mir::CastKind::PointerWithExposedProvenance,
(_, Some(CastTy::DynStar)) => mir::CastKind::DynStar,

View file

@ -1324,7 +1324,7 @@ impl VariantDef {
pub fn single_field(&self) -> &FieldDef {
assert!(self.fields.len() == 1);
&self.fields[FieldIdx::from_u32(0)]
&self.fields[FieldIdx::ZERO]
}
/// Returns the last field in this variant, if present.

View file

@ -2589,7 +2589,7 @@ impl<'a, 'tcx> ty::TypeFolder<TyCtxt<'tcx>> for RegionFolder<'a, 'tcx> {
ty::BrAnon | ty::BrEnv => r,
_ => {
// Index doesn't matter, since this is just for naming and these never get bound
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind };
let br = ty::BoundRegion { var: ty::BoundVar::ZERO, kind };
*self
.region_map
.entry(br)

View file

@ -1958,7 +1958,7 @@ impl<'tcx> Ty<'tcx> {
Adt(def, args) => {
assert!(def.repr().simd(), "`simd_size_and_type` called on non-SIMD type");
let variant = def.non_enum_variant();
let f0_ty = variant.fields[FieldIdx::from_u32(0)].ty(tcx, args);
let f0_ty = variant.fields[FieldIdx::ZERO].ty(tcx, args);
match f0_ty.kind() {
// If the first field is an array, we assume it is the only field and its

View file

@ -19,7 +19,7 @@ use rustc_hir::{
hir_id::OwnerId,
BindingAnnotation, ByRef, HirId, ItemLocalId, ItemLocalMap, ItemLocalSet, Mutability,
};
use rustc_index::{Idx, IndexVec};
use rustc_index::IndexVec;
use rustc_macros::HashStable;
use rustc_middle::mir::FakeReadCause;
use rustc_session::Session;
@ -680,7 +680,7 @@ impl<'tcx> IsIdentity for CanonicalUserType<'tcx> {
return false;
}
iter::zip(user_args.args, BoundVar::new(0)..).all(|(kind, cvar)| {
iter::zip(user_args.args, BoundVar::ZERO..).all(|(kind, cvar)| {
match kind.unpack() {
GenericArgKind::Type(ty) => match ty.kind() {
ty::Bound(debruijn, b) => {

View file

@ -215,7 +215,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
fn parse_local_decls(&mut self, mut stmts: impl Iterator<Item = StmtId>) -> PResult<()> {
let (ret_var, ..) = self.parse_let_statement(stmts.next().unwrap())?;
self.local_map.insert(ret_var, Local::from_u32(0));
self.local_map.insert(ret_var, Local::ZERO);
for stmt in stmts {
let (var, ty, span) = self.parse_let_statement(stmt)?;

View file

@ -573,7 +573,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
result_value,
Rvalue::CheckedBinaryOp(op, Box::new((lhs.to_copy(), rhs.to_copy()))),
);
let val_fld = FieldIdx::new(0);
let val_fld = FieldIdx::ZERO;
let of_fld = FieldIdx::new(1);
let tcx = self.tcx;

View file

@ -190,7 +190,7 @@ rustc_index::newtype_index! {
struct DropIdx {}
}
const ROOT_NODE: DropIdx = DropIdx::from_u32(0);
const ROOT_NODE: DropIdx = DropIdx::ZERO;
/// A tree of drops that we have deferred lowering. It's used for:
///

View file

@ -420,14 +420,14 @@ where
) -> BasicBlock {
// drop glue is sent straight to codegen
// box cannot be directly dereferenced
let unique_ty = adt.non_enum_variant().fields[FieldIdx::new(0)].ty(self.tcx(), args);
let unique_ty = adt.non_enum_variant().fields[FieldIdx::ZERO].ty(self.tcx(), args);
let unique_variant = unique_ty.ty_adt_def().unwrap().non_enum_variant();
let nonnull_ty = unique_variant.fields[FieldIdx::from_u32(0)].ty(self.tcx(), args);
let nonnull_ty = unique_variant.fields[FieldIdx::ZERO].ty(self.tcx(), args);
let ptr_ty = Ty::new_imm_ptr(self.tcx(), args[0].expect_ty());
let unique_place = self.tcx().mk_place_field(self.place, FieldIdx::new(0), unique_ty);
let nonnull_place = self.tcx().mk_place_field(unique_place, FieldIdx::new(0), nonnull_ty);
let ptr_place = self.tcx().mk_place_field(nonnull_place, FieldIdx::new(0), ptr_ty);
let unique_place = self.tcx().mk_place_field(self.place, FieldIdx::ZERO, unique_ty);
let nonnull_place = self.tcx().mk_place_field(unique_place, FieldIdx::ZERO, nonnull_ty);
let ptr_place = self.tcx().mk_place_field(nonnull_place, FieldIdx::ZERO, ptr_ty);
let interior = self.tcx().mk_place_deref(ptr_place);
let interior_path = self.elaborator.deref_subpath(self.path);

View file

@ -168,7 +168,7 @@ impl<'tcx> MutVisitor<'tcx> for PinArgVisitor<'tcx> {
Place {
local: SELF_ARG,
projection: self.tcx().mk_place_elems(&[ProjectionElem::Field(
FieldIdx::new(0),
FieldIdx::ZERO,
self.ref_coroutine_ty,
)]),
},
@ -267,7 +267,7 @@ impl<'tcx> TransformVisitor<'tcx> {
Rvalue::Aggregate(
Box::new(AggregateKind::Adt(
option_def_id,
VariantIdx::from_usize(0),
VariantIdx::ZERO,
self.tcx.mk_args(&[self.old_yield_ty.into()]),
None,
None,
@ -329,7 +329,7 @@ impl<'tcx> TransformVisitor<'tcx> {
Rvalue::Aggregate(
Box::new(AggregateKind::Adt(
poll_def_id,
VariantIdx::from_usize(0),
VariantIdx::ZERO,
args,
None,
None,
@ -358,7 +358,7 @@ impl<'tcx> TransformVisitor<'tcx> {
Rvalue::Aggregate(
Box::new(AggregateKind::Adt(
option_def_id,
VariantIdx::from_usize(0),
VariantIdx::ZERO,
args,
None,
None,
@ -420,7 +420,7 @@ impl<'tcx> TransformVisitor<'tcx> {
Rvalue::Aggregate(
Box::new(AggregateKind::Adt(
coroutine_state_def_id,
VariantIdx::from_usize(0),
VariantIdx::ZERO,
args,
None,
None,

View file

@ -3,7 +3,6 @@
//! Box is not actually a pointer so it is incorrect to dereference it directly.
use rustc_hir::def_id::DefId;
use rustc_index::Idx;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::visit::MutVisitor;
use rustc_middle::mir::*;
@ -32,9 +31,9 @@ pub fn build_projection<'tcx>(
ptr_ty: Ty<'tcx>,
) -> [PlaceElem<'tcx>; 3] {
[
PlaceElem::Field(FieldIdx::new(0), unique_ty),
PlaceElem::Field(FieldIdx::new(0), nonnull_ty),
PlaceElem::Field(FieldIdx::new(0), ptr_ty),
PlaceElem::Field(FieldIdx::ZERO, unique_ty),
PlaceElem::Field(FieldIdx::ZERO, nonnull_ty),
PlaceElem::Field(FieldIdx::ZERO, ptr_ty),
]
}
@ -91,15 +90,14 @@ pub struct ElaborateBoxDerefs;
impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
if let Some(def_id) = tcx.lang_items().owned_box() {
let unique_did =
tcx.adt_def(def_id).non_enum_variant().fields[FieldIdx::from_u32(0)].did;
let unique_did = tcx.adt_def(def_id).non_enum_variant().fields[FieldIdx::ZERO].did;
let Some(nonnull_def) = tcx.type_of(unique_did).instantiate_identity().ty_adt_def()
else {
span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique")
};
let nonnull_did = nonnull_def.non_enum_variant().fields[FieldIdx::from_u32(0)].did;
let nonnull_did = nonnull_def.non_enum_variant().fields[FieldIdx::ZERO].did;
let patch = MirPatch::new(body);

View file

@ -355,7 +355,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
}
fn insert_tuple(&mut self, values: Vec<VnIndex>) -> VnIndex {
self.insert(Value::Aggregate(AggregateTy::Tuple, VariantIdx::from_u32(0), values))
self.insert(Value::Aggregate(AggregateTy::Tuple, VariantIdx::ZERO, values))
}
#[instrument(level = "trace", skip(self), ret)]

View file

@ -13,7 +13,7 @@ use rustc_const_eval::interpret::{
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def::DefKind;
use rustc_hir::HirId;
use rustc_index::{bit_set::BitSet, Idx, IndexVec};
use rustc_index::{bit_set::BitSet, IndexVec};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
@ -124,10 +124,8 @@ impl<'tcx> Value<'tcx> {
fields.ensure_contains_elem(*idx, || Value::Uninit)
}
(PlaceElem::Field(..), val @ Value::Uninit) => {
*val = Value::Aggregate {
variant: VariantIdx::new(0),
fields: Default::default(),
};
*val =
Value::Aggregate { variant: VariantIdx::ZERO, fields: Default::default() };
val.project_mut(&[*proj])?
}
_ => return None,
@ -572,7 +570,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
self.use_ecx(|this| this.ecx.overflowing_binary_op(bin_op, &left, &right))?;
let overflowed = ImmTy::from_bool(overflowed, self.tcx);
Value::Aggregate {
variant: VariantIdx::new(0),
variant: VariantIdx::ZERO,
fields: [Value::from(val), overflowed.into()].into_iter().collect(),
}
}
@ -607,7 +605,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
| AggregateKind::Tuple
| AggregateKind::Closure(_, _)
| AggregateKind::Coroutine(_, _)
| AggregateKind::CoroutineClosure(_, _) => VariantIdx::new(0),
| AggregateKind::CoroutineClosure(_, _) => VariantIdx::ZERO,
},
}
}

View file

@ -434,7 +434,7 @@ impl<'tcx> Validator<'_, 'tcx> {
Rvalue::ThreadLocalRef(_) => return Err(Unpromotable),
// ptr-to-int casts are not possible in consts and thus not promotable
Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => return Err(Unpromotable),
Rvalue::Cast(CastKind::PointerExposeProvenance, _, _) => return Err(Unpromotable),
// all other casts including int-to-ptr casts are fine, they just use the integer value
// at pointer type.

View file

@ -985,7 +985,7 @@ fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'t
let locals = local_decls_for_sig(&sig, span);
let source_info = SourceInfo::outermost(span);
// FIXME: use `expose_addr` once we figure out whether function pointers have meaningful provenance.
// FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful provenance.
let rvalue = Rvalue::Cast(
CastKind::FnPtrToPtr,
Operand::Move(Place::from(Local::new(1))),

View file

@ -415,7 +415,7 @@ fn make_local_map<V>(
used_locals: &UsedLocals,
) -> IndexVec<Local, Option<Local>> {
let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, local_decls);
let mut used = Local::new(0);
let mut used = Local::ZERO;
for alive_index in local_decls.indices() {
// `is_used` treats the `RETURN_PLACE` and arguments as used.

View file

@ -140,6 +140,34 @@
//! [`ConstructorSet::split`]. The invariants of [`SplitConstructorSet`] are also of interest.
//!
//!
//! ## Unions
//!
//! Unions allow us to match a value via several overlapping representations at the same time. For
//! example, the following is exhaustive because when seeing the value as a boolean we handled all
//! possible cases (other cases such as `n == 3` would trigger UB).
//!
//! ```rust
//! # fn main() {
//! union U8AsBool {
//! n: u8,
//! b: bool,
//! }
//! let x = U8AsBool { n: 1 };
//! unsafe {
//! match x {
//! U8AsBool { n: 2 } => {}
//! U8AsBool { b: true } => {}
//! U8AsBool { b: false } => {}
//! }
//! }
//! # }
//! ```
//!
//! Pattern-matching has no knowledge that e.g. `false as u8 == 0`, so the values we consider in the
//! algorithm look like `U8AsBool { b: true, n: 2 }`. In other words, for the most part a union is
//! treated like a struct with the same fields. The difference lies in how we construct witnesses of
//! non-exhaustiveness.
//!
//!
//! ## Opaque patterns
//!
@ -974,7 +1002,6 @@ impl<Cx: PatCx> ConstructorSet<Cx> {
/// any) are missing; 2/ split constructors to handle non-trivial intersections e.g. on ranges
/// or slices. This can get subtle; see [`SplitConstructorSet`] for details of this operation
/// and its invariants.
#[instrument(level = "debug", skip(self, ctors), ret)]
pub fn split<'a>(
&self,
ctors: impl Iterator<Item = &'a Constructor<Cx>> + Clone,

View file

@ -186,7 +186,6 @@ impl<'p, 'tcx: 'p> RustcPatCtxt<'p, 'tcx> {
/// Returns the types of the fields for a given constructor. The result must have a length of
/// `ctor.arity()`.
#[instrument(level = "trace", skip(self))]
pub(crate) fn ctor_sub_tys<'a>(
&'a self,
ctor: &'a Constructor<'p, 'tcx>,
@ -283,7 +282,6 @@ impl<'p, 'tcx: 'p> RustcPatCtxt<'p, 'tcx> {
/// Creates a set that represents all the constructors of `ty`.
///
/// See [`crate::constructor`] for considerations of emptiness.
#[instrument(level = "debug", skip(self), ret)]
pub fn ctors_for_ty(
&self,
ty: RevealedTy<'tcx>,

View file

@ -871,12 +871,14 @@ impl<Cx: PatCx> PlaceInfo<Cx> {
where
Cx: 'a,
{
debug!(?self.ty);
if self.private_uninhabited {
// Skip the whole column
return Ok((smallvec![Constructor::PrivateUninhabited], vec![]));
}
let ctors_for_ty = cx.ctors_for_ty(&self.ty)?;
debug!(?ctors_for_ty);
// We treat match scrutinees of type `!` or `EmptyEnum` differently.
let is_toplevel_exception =
@ -895,6 +897,7 @@ impl<Cx: PatCx> PlaceInfo<Cx> {
// Analyze the constructors present in this column.
let mut split_set = ctors_for_ty.split(ctors);
debug!(?split_set);
let all_missing = split_set.present.is_empty();
// Build the set of constructors we will specialize with. It must cover the whole type, so
@ -1254,7 +1257,7 @@ impl<'p, Cx: PatCx> Matrix<'p, Cx> {
/// + true + [Second(true)] +
/// + false + [_] +
/// + _ + [_, _, tail @ ..] +
/// | ✓ | ? | // column validity
/// | ✓ | ? | // validity
/// ```
impl<'p, Cx: PatCx> fmt::Debug for Matrix<'p, Cx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -1285,7 +1288,7 @@ impl<'p, Cx: PatCx> fmt::Debug for Matrix<'p, Cx> {
write!(f, " {sep}")?;
}
if is_validity_row {
write!(f, " // column validity")?;
write!(f, " // validity")?;
}
write!(f, "\n")?;
}
@ -1381,12 +1384,35 @@ impl<Cx: PatCx> WitnessStack<Cx> {
/// pats: [(false, "foo"), _, true]
/// result: [Enum::Variant { a: (false, "foo"), b: _ }, true]
/// ```
fn apply_constructor(&mut self, pcx: &PlaceCtxt<'_, Cx>, ctor: &Constructor<Cx>) {
fn apply_constructor(
mut self,
pcx: &PlaceCtxt<'_, Cx>,
ctor: &Constructor<Cx>,
) -> SmallVec<[Self; 1]> {
let len = self.0.len();
let arity = pcx.ctor_arity(ctor);
let fields = self.0.drain((len - arity)..).rev().collect();
let pat = WitnessPat::new(ctor.clone(), fields, pcx.ty.clone());
self.0.push(pat);
let fields: Vec<_> = self.0.drain((len - arity)..).rev().collect();
if matches!(ctor, Constructor::UnionField)
&& fields.iter().filter(|p| !matches!(p.ctor(), Constructor::Wildcard)).count() >= 2
{
// Convert a `Union { a: p, b: q }` witness into `Union { a: p }` and `Union { b: q }`.
// First add `Union { .. }` to `self`.
self.0.push(WitnessPat::wild_from_ctor(pcx.cx, ctor.clone(), pcx.ty.clone()));
fields
.into_iter()
.enumerate()
.filter(|(_, p)| !matches!(p.ctor(), Constructor::Wildcard))
.map(|(i, p)| {
let mut ret = self.clone();
// Fill the `i`th field of the union with `p`.
ret.0.last_mut().unwrap().fields[i] = p;
ret
})
.collect()
} else {
self.0.push(WitnessPat::new(ctor.clone(), fields, pcx.ty.clone()));
smallvec![self]
}
}
}
@ -1459,8 +1485,8 @@ impl<Cx: PatCx> WitnessMatrix<Cx> {
*self = ret;
} else {
// Any other constructor we unspecialize as expected.
for witness in self.0.iter_mut() {
witness.apply_constructor(pcx, ctor)
for witness in std::mem::take(&mut self.0) {
self.0.extend(witness.apply_constructor(pcx, ctor));
}
}
}
@ -1617,7 +1643,6 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: PatCx>(
};
// Analyze the constructors present in this column.
debug!("ty: {:?}", place.ty);
let ctors = matrix.heads().map(|p| p.ctor());
let (split_ctors, missing_ctors) = place.split_column_ctors(mcx.tycx, ctors)?;
@ -1669,7 +1694,10 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: PatCx>(
for row in matrix.rows() {
if row.useful {
if let PatOrWild::Pat(pat) = row.head() {
mcx.useful_subpatterns.insert(pat.uid);
let newly_useful = mcx.useful_subpatterns.insert(pat.uid);
if newly_useful {
debug!("newly useful: {pat:?}");
}
}
}
}
@ -1768,6 +1796,7 @@ pub fn compute_match_usefulness<'p, Cx: PatCx>(
.map(|arm| {
debug!(?arm);
let usefulness = collect_pattern_usefulness(&cx.useful_subpatterns, arm.pat);
debug!(?usefulness);
(arm, usefulness)
})
.collect();

View file

@ -40,7 +40,7 @@ rustc_index::newtype_index! {
}
impl DepNodeIndex {
const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0);
const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
}

View file

@ -532,7 +532,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let mut seen_spans = FxHashSet::default();
let mut errors = vec![];
let mut prev_root_id: NodeId = NodeId::from_u32(0);
let mut prev_root_id: NodeId = NodeId::ZERO;
let determined_imports = mem::take(&mut self.determined_imports);
let indeterminate_imports = mem::take(&mut self.indeterminate_imports);
@ -556,8 +556,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
}
if prev_root_id.as_u32() != 0
&& prev_root_id.as_u32() != import.root_id.as_u32()
if prev_root_id != NodeId::ZERO
&& prev_root_id != import.root_id
&& !errors.is_empty()
{
// In the case of a new import line, throw a diagnostic message

View file

@ -267,7 +267,7 @@ impl<'tcx> Stable<'tcx> for mir::CastKind {
fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
use rustc_middle::mir::CastKind::*;
match self {
PointerExposeAddress => stable_mir::mir::CastKind::PointerExposeAddress,
PointerExposeProvenance => stable_mir::mir::CastKind::PointerExposeAddress,
PointerWithExposedProvenance => stable_mir::mir::CastKind::PointerWithExposedProvenance,
PointerCoercion(c) => stable_mir::mir::CastKind::PointerCoercion(c.stable(tables)),
DynStar => stable_mir::mir::CastKind::DynStar,

View file

@ -22,7 +22,7 @@ rustc_index::newtype_index! {
/// Item definitions in the currently-compiled crate would have the `CrateNum`
/// `LOCAL_CRATE` in their `DefId`.
pub const LOCAL_CRATE: CrateNum = CrateNum::from_u32(0);
pub const LOCAL_CRATE: CrateNum = CrateNum::ZERO;
impl CrateNum {
#[inline]

View file

@ -165,7 +165,7 @@ pub enum Transparency {
impl LocalExpnId {
/// The ID of the theoretical expansion that generates freshly parsed, unexpanded AST.
pub const ROOT: LocalExpnId = LocalExpnId::from_u32(0);
pub const ROOT: LocalExpnId = LocalExpnId::ZERO;
#[inline]
fn from_raw(idx: ExpnIndex) -> LocalExpnId {
@ -242,7 +242,7 @@ impl ExpnId {
/// The ID of the theoretical expansion that generates freshly parsed, unexpanded AST.
/// Invariant: we do not create any ExpnId with local_id == 0 and krate != 0.
pub const fn root() -> ExpnId {
ExpnId { krate: LOCAL_CRATE, local_id: ExpnIndex::from_u32(0) }
ExpnId { krate: LOCAL_CRATE, local_id: ExpnIndex::ZERO }
}
#[inline]

View file

@ -243,7 +243,7 @@ fn t10() {
src_hash,
stable_id,
source_len.to_u32(),
CrateNum::new(0),
CrateNum::ZERO,
FreezeLock::new(lines.read().clone()),
multibyte_chars,
non_narrow_chars,

View file

@ -1659,7 +1659,7 @@ symbols! {
simd_cttz,
simd_div,
simd_eq,
simd_expose_addr,
simd_expose_provenance,
simd_extract,
simd_fabs,
simd_fcos,

View file

@ -251,9 +251,9 @@ pub struct Uniform {
/// The total size of the argument, which can be:
/// * equal to `unit.size` (one scalar/vector),
/// * a multiple of `unit.size` (an array of scalar/vectors),
/// * if `unit.kind` is `Integer`, the last element
/// can be shorter, i.e., `{ i64, i64, i32 }` for
/// 64-bit integers with a total size of 20 bytes.
/// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
/// for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
/// this size will be rounded up to the nearest multiple of `unit.size`.
pub total: Size,
}
@ -319,14 +319,17 @@ impl CastTarget {
}
pub fn size<C: HasDataLayout>(&self, _cx: &C) -> Size {
let mut size = self.rest.total;
for i in 0..self.prefix.iter().count() {
match self.prefix[i] {
Some(v) => size += v.size,
None => {}
}
}
return size;
// Prefix arguments are passed in specific designated registers
let prefix_size = self
.prefix
.iter()
.filter_map(|x| x.map(|reg| reg.size))
.fold(Size::ZERO, |acc, size| acc + size);
// Remaining arguments are passed in chunks of the unit size
let rest_size =
self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
prefix_size + rest_size
}
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {

View file

@ -554,11 +554,7 @@ fn plug_infer_with_placeholders<'tcx>(
}
}
value.visit_with(&mut PlugInferWithPlaceholder {
infcx,
universe,
var: ty::BoundVar::from_u32(0),
});
value.visit_with(&mut PlugInferWithPlaceholder { infcx, universe, var: ty::BoundVar::ZERO });
}
fn try_prove_negated_where_clause<'tcx>(

View file

@ -377,7 +377,7 @@ fn layout_of_uncached<'tcx>(
}
// Type of the first ADT field:
let f0_ty = fields[FieldIdx::from_u32(0)].ty(tcx, args);
let f0_ty = fields[FieldIdx::ZERO].ty(tcx, args);
// Heterogeneous SIMD vectors are not supported:
// (should be caught by typeck)

View file

@ -314,7 +314,7 @@ rustc_index::newtype_index! {
}
impl UniverseIndex {
pub const ROOT: UniverseIndex = UniverseIndex::from_u32(0);
pub const ROOT: UniverseIndex = UniverseIndex::ZERO;
/// Returns the "next" universe index in order -- this new index
/// is considered to extend all previous universes. This

View file

@ -971,6 +971,7 @@ pub enum PointerCoercion {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum CastKind {
// FIXME(smir-rename): rename this to PointerExposeProvenance
PointerExposeAddress,
PointerWithExposedProvenance,
PointerCoercion(PointerCoercion),

View file

@ -2438,8 +2438,8 @@ impl Display for char {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Pointer for *const T {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
// Cast is needed here because `.expose_addr()` requires `T: Sized`.
pointer_fmt_inner((*self as *const ()).expose_addr(), f)
// Cast is needed here because `.expose_provenance()` requires `T: Sized`.
pointer_fmt_inner((*self as *const ()).expose_provenance(), f)
}
}

View file

@ -540,6 +540,10 @@ extern "rust-intrinsic" {
/// `T` must be a vector of pointers.
///
/// `U` must be a vector of `usize` with the same length as `T`.
#[cfg(not(bootstrap))]
#[rustc_nounwind]
pub fn simd_expose_provenance<T, U>(ptr: T) -> U;
#[cfg(bootstrap)]
#[rustc_nounwind]
pub fn simd_expose_addr<T, U>(ptr: T) -> U;
@ -660,5 +664,7 @@ extern "rust-intrinsic" {
pub fn simd_flog<T>(a: T) -> T;
}
#[cfg(bootstrap)]
pub use simd_expose_addr as simd_expose_provenance;
#[cfg(bootstrap)]
pub use simd_from_exposed_addr as simd_with_exposed_provenance;

View file

@ -136,7 +136,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
#[deprecated(
since = "1.67.0",
note = "replaced by the `expose_addr` method, or update your code \
note = "replaced by the `expose_provenance` method, or update your code \
to follow the strict provenance rules using its APIs"
)]
#[inline(always)]
@ -187,7 +187,7 @@ impl<T: ?Sized> *const T {
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
/// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
/// or [`expose_addr`][pointer::expose_addr] and [`with_exposed_provenance`][with_exposed_provenance]
/// or [`expose_provenance`][pointer::expose_provenance] and [`with_exposed_provenance`][with_exposed_provenance]
/// instead. However, note that this makes your code less portable and less amenable to tools
/// that check for compliance with the Rust memory model.
///
@ -210,8 +210,8 @@ impl<T: ?Sized> *const T {
unsafe { mem::transmute(self.cast::<()>()) }
}
/// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
/// use in [`with_exposed_provenance`][].
/// Exposes the "provenance" part of the pointer for future use in
/// [`with_exposed_provenance`][] and returns the "address" portion.
///
/// This is equivalent to `self as usize`, which semantically discards *provenance* and
/// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
@ -238,7 +238,7 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline(always)]
#[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
pub fn expose_provenance(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
}

View file

@ -340,8 +340,8 @@
//! clear where a satisfying unambiguous semantics can be defined for Exposed Provenance.
//! Furthermore, Exposed Provenance will not work (well) with tools like [Miri] and [CHERI].
//!
//! Exposed Provenance is provided by the [`expose_addr`] and [`with_exposed_provenance`] methods, which
//! are meant to replace `as` casts between pointers and integers. [`expose_addr`] is a lot like
//! Exposed Provenance is provided by the [`expose_provenance`] and [`with_exposed_provenance`] methods,
//! which are meant to replace `as` casts between pointers and integers. [`expose_provenance`] is a lot like
//! [`addr`], but additionally adds the provenance of the pointer to a global list of 'exposed'
//! provenances. (This list is purely conceptual, it exists for the purpose of specifying Rust but
//! is not materialized in actual executions, except in tools like [Miri].) [`with_exposed_provenance`]
@ -355,9 +355,9 @@
//! there is *no* previously 'exposed' provenance that justifies the way the returned pointer will
//! be used, the program has undefined behavior.
//!
//! Using [`expose_addr`] or [`with_exposed_provenance`] (or the `as` casts) means that code is
//! Using [`expose_provenance`] or [`with_exposed_provenance`] (or the `as` casts) means that code is
//! *not* following Strict Provenance rules. The goal of the Strict Provenance experiment is to
//! determine how far one can get in Rust without the use of [`expose_addr`] and
//! determine how far one can get in Rust without the use of [`expose_provenance`] and
//! [`with_exposed_provenance`], and to encourage code to be written with Strict Provenance APIs only.
//! Maximizing the amount of such code is a major win for avoiding specification complexity and to
//! facilitate adoption of tools like [CHERI] and [Miri] that can be a big help in increasing the
@ -374,7 +374,7 @@
//! [`map_addr`]: pointer::map_addr
//! [`addr`]: pointer::addr
//! [`ptr::dangling`]: core::ptr::dangling
//! [`expose_addr`]: pointer::expose_addr
//! [`expose_provenance`]: pointer::expose_provenance
//! [`with_exposed_provenance`]: with_exposed_provenance
//! [Miri]: https://github.com/rust-lang/miri
//! [CHERI]: https://www.cl.cam.ac.uk/research/security/ctsrd/cheri/
@ -663,7 +663,7 @@ pub const fn dangling_mut<T>() -> *mut T {
///
/// This is a more rigorously specified alternative to `addr as *const T`. The provenance of the
/// returned pointer is that of *any* pointer that was previously exposed by passing it to
/// [`expose_addr`][pointer::expose_addr], or a `ptr as usize` cast. In addition, memory which is
/// [`expose_provenance`][pointer::expose_provenance], or a `ptr as usize` cast. In addition, memory which is
/// outside the control of the Rust abstract machine (MMIO registers, for example) is always
/// considered to be exposed, so long as this memory is disjoint from memory that will be used by
/// the abstract machine such as the stack, heap, and statics.
@ -711,7 +711,7 @@ where
///
/// This is a more rigorously specified alternative to `addr as *mut T`. The provenance of the
/// returned pointer is that of *any* pointer that was previously passed to
/// [`expose_addr`][pointer::expose_addr] or a `ptr as usize` cast. If there is no previously
/// [`expose_provenance`][pointer::expose_provenance] or a `ptr as usize` cast. If there is no previously
/// 'exposed' provenance that justifies the way this pointer will be used, the program has undefined
/// behavior. Note that there is no algorithm that decides which provenance will be used. You can
/// think of this as "guessing" the right provenance, and the guess will be "maximally in your

View file

@ -142,7 +142,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
#[deprecated(
since = "1.67.0",
note = "replaced by the `expose_addr` method, or update your code \
note = "replaced by the `expose_provenance` method, or update your code \
to follow the strict provenance rules using its APIs"
)]
#[inline(always)]
@ -194,7 +194,7 @@ impl<T: ?Sized> *mut T {
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
/// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
/// or [`expose_addr`][pointer::expose_addr] and [`with_exposed_provenance`][with_exposed_provenance]
/// or [`expose_provenance`][pointer::expose_provenance] and [`with_exposed_provenance`][with_exposed_provenance]
/// instead. However, note that this makes your code less portable and less amenable to tools
/// that check for compliance with the Rust memory model.
///
@ -217,8 +217,8 @@ impl<T: ?Sized> *mut T {
unsafe { mem::transmute(self.cast::<()>()) }
}
/// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
/// use in [`with_exposed_provenance`][].
/// Exposes the "provenance" part of the pointer for future use in
/// [`with_exposed_provenance`][] and returns the "address" portion.
///
/// This is equivalent to `self as usize`, which semantically discards *provenance* and
/// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
@ -242,10 +242,9 @@ impl<T: ?Sized> *mut T {
/// API and its claimed semantics are part of [Exposed Provenance][super#exposed-provenance].
///
/// [`with_exposed_provenance_mut`]: with_exposed_provenance_mut
#[must_use]
#[inline(always)]
#[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
pub fn expose_provenance(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
}

View file

@ -50,9 +50,9 @@ pub trait SimdConstPtr: Copy + Sealed {
/// Equivalent to calling [`pointer::with_addr`] on each element.
fn with_addr(self, addr: Self::Usize) -> Self;
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
/// in [`Self::with_exposed_provenance`].
fn expose_addr(self) -> Self::Usize;
/// Exposes the "provenance" part of the pointer for future use in
/// [`Self::with_exposed_provenance`] and returns the "address" portion.
fn expose_provenance(self) -> Self::Usize;
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
///
@ -131,9 +131,9 @@ where
}
#[inline]
fn expose_addr(self) -> Self::Usize {
fn expose_provenance(self) -> Self::Usize {
// Safety: `self` is a pointer vector
unsafe { core::intrinsics::simd::simd_expose_addr(self) }
unsafe { core::intrinsics::simd::simd_expose_provenance(self) }
}
#[inline]

View file

@ -47,9 +47,9 @@ pub trait SimdMutPtr: Copy + Sealed {
/// Equivalent to calling [`pointer::with_addr`] on each element.
fn with_addr(self, addr: Self::Usize) -> Self;
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
/// in [`Self::with_exposed_provenance`].
fn expose_addr(self) -> Self::Usize;
/// Exposes the "provenance" part of the pointer for future use in
/// [`Self::with_exposed_provenance`] and returns the "address" portion.
fn expose_provenance(self) -> Self::Usize;
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
///
@ -128,9 +128,9 @@ where
}
#[inline]
fn expose_addr(self) -> Self::Usize {
fn expose_provenance(self) -> Self::Usize {
// Safety: `self` is a pointer vector
unsafe { core::intrinsics::simd::simd_expose_addr(self) }
unsafe { core::intrinsics::simd::simd_expose_provenance(self) }
}
#[inline]

View file

@ -32,10 +32,10 @@ macro_rules! common_tests {
);
}
fn expose_addr<const LANES: usize>() {
fn expose_provenance<const LANES: usize>() {
test_helpers::test_unary_elementwise(
&Simd::<*$constness u32, LANES>::expose_addr,
&<*$constness u32>::expose_addr,
&Simd::<*$constness u32, LANES>::expose_provenance,
&<*$constness u32>::expose_provenance,
&|_| true,
);
}

View file

@ -29,7 +29,7 @@ impl Thread {
let p = Box::into_raw(Box::new(p));
let tid = abi::spawn2(
thread_start,
p.expose_addr(),
p.expose_provenance(),
abi::Priority::into(abi::NORMAL_PRIO),
stack,
core_id,

View file

@ -181,7 +181,7 @@ impl Thread {
abi::acre_tsk(&abi::T_CTSK {
// Activate this task immediately
tskatr: abi::TA_ACT,
exinf: p_inner.as_ptr().expose_addr() as abi::EXINF,
exinf: p_inner.as_ptr().expose_provenance() as abi::EXINF,
// The entry point
task: Some(trampoline),
// Inherit the calling task's base priority

View file

@ -13,7 +13,7 @@ are currently defined running NetBSD:
| Target name | NetBSD Platform |
|--------------------------------|-----------------|
| `amd64-unknown-netbsd` | [amd64 / x86_64 systems](https://wiki.netbsd.org/ports/amd64/) |
| `x86_64-unknown-netbsd` | [amd64 / x86_64 systems](https://wiki.netbsd.org/ports/amd64/) |
| `armv7-unknown-netbsd-eabihf` | [32-bit ARMv7 systems with hard-float](https://wiki.netbsd.org/ports/evbarm/) |
| `armv6-unknown-netbsd-eabihf` | [32-bit ARMv6 systems with hard-float](https://wiki.netbsd.org/ports/evbarm/) |
| `aarch64-unknown-netbsd` | [64-bit ARM systems, little-endian](https://wiki.netbsd.org/ports/evbarm/) |
@ -22,7 +22,7 @@ are currently defined running NetBSD:
| `i686-unknown-netbsd` | [32-bit i386 with SSE](https://wiki.netbsd.org/ports/i386/) |
| `mipsel-unknown-netbsd` | [32-bit mips, requires mips32 cpu support](https://wiki.netbsd.org/ports/evbmips/) |
| `powerpc-unknown-netbsd` | [Various 32-bit PowerPC systems, e.g. MacPPC](https://wiki.netbsd.org/ports/macppc/) |
| `riscv64gc-unknown-netbsd` | [64-bit RISC-V](https://wiki.netbsd.org/ports/riscv/)
| `riscv64gc-unknown-netbsd` | [64-bit RISC-V](https://wiki.netbsd.org/ports/riscv/) |
| `sparc64-unknown-netbsd` | [Sun UltraSPARC systems](https://wiki.netbsd.org/ports/sparc64/) |
All use the "native" `stdc++` library which goes along with the natively
@ -43,7 +43,7 @@ bug reporting system.
## Requirements
The `amd64-unknown-netbsd` artifacts is being distributed by the
The `x86_64-unknown-netbsd` artifacts is being distributed by the
rust project.
The other targets are built by the designated developers (see above),
@ -95,7 +95,7 @@ capable systems we build and test `firefox` (amd64, i386, aarch64).
## Building Rust programs
Rust ships pre-compiled artifacts for the `amd64-unknown-netbsd`
Rust ships pre-compiled artifacts for the `x86_64-unknown-netbsd`
target.
For the other systems mentioned above, using the `pkgsrc` route is

View file

@ -149,7 +149,7 @@ fn check_rvalue<'tcx>(
Err((span, "unsizing casts are not allowed in const fn".into()))
}
},
Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => {
Rvalue::Cast(CastKind::PointerExposeProvenance, _, _) => {
Err((span, "casting pointers to ints is unstable in const fn".into()))
},
Rvalue::Cast(CastKind::DynStar, _, _) => {

View file

@ -18,8 +18,8 @@ use reuse_pool::ReusePool;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ProvenanceMode {
/// We support `expose_addr`/`with_exposed_provenance` via "wildcard" provenance.
/// However, we want on `with_exposed_provenance` to alert the user of the precision loss.
/// We support `expose_provenance`/`with_exposed_provenance` via "wildcard" provenance.
/// However, we warn on `with_exposed_provenance` to alert the user of the precision loss.
Default,
/// Like `Default`, but without the warning.
Permissive,

View file

@ -514,7 +514,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
dest.transmute(this.machine.layouts.uint(dest.layout.size).unwrap(), this)?;
this.write_int(res, &dest)?;
}
"cast" | "as" | "cast_ptr" | "expose_addr" | "with_exposed_provenance" => {
"cast" | "as" | "cast_ptr" | "expose_provenance" | "with_exposed_provenance" => {
let [op] = check_arg_count(args)?;
let (op, op_len) = this.operand_to_simd(op)?;
let (dest, dest_len) = this.mplace_to_simd(dest)?;
@ -524,7 +524,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let unsafe_cast = intrinsic_name == "cast";
let safe_cast = intrinsic_name == "as";
let ptr_cast = intrinsic_name == "cast_ptr";
let expose_cast = intrinsic_name == "expose_addr";
let expose_cast = intrinsic_name == "expose_provenance";
let from_exposed_cast = intrinsic_name == "with_exposed_provenance";
for i in 0..dest_len {
@ -557,7 +557,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.ptr_to_ptr(&op, dest.layout)?,
// Ptr/Int casts
(ty::RawPtr(..), ty::Int(_) | ty::Uint(_)) if expose_cast =>
this.pointer_expose_address_cast(&op, dest.layout)?,
this.pointer_expose_provenance_cast(&op, dest.layout)?,
(ty::Int(_) | ty::Uint(_), ty::RawPtr(..)) if from_exposed_cast =>
this.pointer_with_exposed_provenance_cast(&op, dest.layout)?,
// Error otherwise

View file

@ -4,6 +4,6 @@
fn main() {
let x = 42;
let xptr = &x as *const i32;
let xptr_invalid = std::ptr::without_provenance::<i32>(xptr.expose_addr());
let xptr_invalid = std::ptr::without_provenance::<i32>(xptr.expose_provenance());
let _val = unsafe { *xptr_invalid }; //~ ERROR: is a dangling pointer
}

View file

@ -6,7 +6,7 @@
fn main() {
let mut x = 0;
let _fool = &mut x as *mut i32; // this would have fooled the old untagged pointer logic
let addr = (&x as *const i32).expose_addr();
let addr = (&x as *const i32).expose_provenance();
let ptr = std::ptr::with_exposed_provenance_mut::<i32>(addr);
unsafe { *ptr = 0 }; //~ ERROR: /write access using <wildcard> .* no exposed tags have suitable permission in the borrow stack/
}

View file

@ -7,6 +7,6 @@ use std::simd::prelude::*;
fn main() {
// Pointer casts
let _val: Simd<*const u8, 4> = Simd::<*const i32, 4>::splat(ptr::null()).cast();
let addrs = Simd::<*const i32, 4>::splat(ptr::null()).expose_addr();
let addrs = Simd::<*const i32, 4>::splat(ptr::null()).expose_provenance();
let _ptrs = Simd::<*const i32, 4>::with_exposed_provenance(addrs);
}

View file

@ -10,7 +10,7 @@ fn ptr_roundtrip_out_of_bounds() {
let x: i32 = 3;
let x_ptr = &x as *const i32;
let x_usize = x_ptr.wrapping_offset(128).expose_addr();
let x_usize = x_ptr.wrapping_offset(128).expose_provenance();
let ptr = ptr::with_exposed_provenance::<i32>(x_usize).wrapping_offset(-128);
assert_eq!(unsafe { *ptr }, 3);
@ -24,8 +24,8 @@ fn ptr_roundtrip_confusion() {
let x_ptr = &x as *const i32;
let y_ptr = &y as *const i32;
let x_usize = x_ptr.expose_addr();
let y_usize = y_ptr.expose_addr();
let x_usize = x_ptr.expose_provenance();
let y_usize = y_ptr.expose_provenance();
let ptr = ptr::with_exposed_provenance::<i32>(y_usize);
let ptr = ptr.with_addr(x_usize);
@ -37,7 +37,7 @@ fn ptr_roundtrip_imperfect() {
let x: u8 = 3;
let x_ptr = &x as *const u8;
let x_usize = x_ptr.expose_addr() + 128;
let x_usize = x_ptr.expose_provenance() + 128;
let ptr = ptr::with_exposed_provenance::<u8>(x_usize).wrapping_offset(-128);
assert_eq!(unsafe { *ptr }, 3);
@ -48,7 +48,7 @@ fn ptr_roundtrip_null() {
let x = &42;
let x_ptr = x as *const i32;
let x_null_ptr = x_ptr.with_addr(0); // addr 0, but still the provenance of x
let null = x_null_ptr.expose_addr();
let null = x_null_ptr.expose_provenance();
assert_eq!(null, 0);
let x_null_ptr_copy = ptr::with_exposed_provenance::<i32>(null); // just a roundtrip, so has provenance of x (angelically)

View file

@ -17,7 +17,7 @@ fn example(variant: bool) {
unsafe {
fn not_so_innocent(x: &mut u32) -> usize {
let x_raw4 = x as *mut u32;
x_raw4.expose_addr()
x_raw4.expose_provenance()
}
let mut c = 42u32;
@ -26,7 +26,7 @@ fn example(variant: bool) {
// stack: [..., Unique(1)]
let x_raw2 = x_unique1 as *mut u32;
let x_raw2_addr = x_raw2.expose_addr();
let x_raw2_addr = x_raw2.expose_provenance();
// stack: [..., Unique(1), SharedRW(2)]
let x_unique3 = &mut *x_raw2;

View file

@ -9,7 +9,7 @@ fn main() {
// Expose the allocation and use the exposed pointer, creating an unknown bottom
unsafe {
let p: *mut u8 = ptr::with_exposed_provenance::<u8>(ptr.expose_addr()) as *mut u8;
let p: *mut u8 = ptr::with_exposed_provenance::<u8>(ptr.expose_provenance()) as *mut u8;
*p = 1;
}

View file

@ -434,6 +434,7 @@
"ui/closures/issue-111932.rs",
"ui/closures/issue-113087.rs",
"ui/closures/issue-11873.rs",
"ui/closures/issue-1460.rs",
"ui/closures/issue-23012-supertrait-signature-inference.rs",
"ui/closures/issue-25439.rs",
"ui/closures/issue-41366.rs",
@ -1007,6 +1008,8 @@
"ui/fmt/issue-86085.rs",
"ui/fmt/issue-89173.rs",
"ui/fmt/issue-91556.rs",
"ui/fn/issue-1451.rs",
"ui/fn/issue-1900.rs",
"ui/fn/issue-3044.rs",
"ui/fn/issue-3099.rs",
"ui/fn/issue-3904.rs",
@ -1550,7 +1553,6 @@
"ui/issues/issue-13497-2.rs",
"ui/issues/issue-13497.rs",
"ui/issues/issue-13507-2.rs",
"ui/issues/issue-1362.rs",
"ui/issues/issue-13620.rs",
"ui/issues/issue-13665.rs",
"ui/issues/issue-13703.rs",
@ -1576,12 +1578,8 @@
"ui/issues/issue-14399.rs",
"ui/issues/issue-14421.rs",
"ui/issues/issue-14422.rs",
"ui/issues/issue-1448-2.rs",
"ui/issues/issue-1451.rs",
"ui/issues/issue-14541.rs",
"ui/issues/issue-1460.rs",
"ui/issues/issue-14721.rs",
"ui/issues/issue-1476.rs",
"ui/issues/issue-14821.rs",
"ui/issues/issue-14845.rs",
"ui/issues/issue-14853.rs",
@ -1631,7 +1629,6 @@
"ui/issues/issue-16560.rs",
"ui/issues/issue-16562.rs",
"ui/issues/issue-16596.rs",
"ui/issues/issue-1660.rs",
"ui/issues/issue-16643.rs",
"ui/issues/issue-16648.rs",
"ui/issues/issue-16668.rs",
@ -1645,7 +1642,6 @@
"ui/issues/issue-16819.rs",
"ui/issues/issue-16922-rpass.rs",
"ui/issues/issue-16939.rs",
"ui/issues/issue-1696.rs",
"ui/issues/issue-16966.rs",
"ui/issues/issue-16994.rs",
"ui/issues/issue-17001.rs",
@ -1725,7 +1721,6 @@
"ui/issues/issue-18952.rs",
"ui/issues/issue-18959.rs",
"ui/issues/issue-18988.rs",
"ui/issues/issue-1900.rs",
"ui/issues/issue-19001.rs",
"ui/issues/issue-19037.rs",
"ui/issues/issue-19086.rs",
@ -1753,12 +1748,10 @@
"ui/issues/issue-19482.rs",
"ui/issues/issue-19499.rs",
"ui/issues/issue-19601.rs",
"ui/issues/issue-1962.rs",
"ui/issues/issue-19631.rs",
"ui/issues/issue-19632.rs",
"ui/issues/issue-19692.rs",
"ui/issues/issue-19734.rs",
"ui/issues/issue-1974.rs",
"ui/issues/issue-19811-escape-unicode.rs",
"ui/issues/issue-19850.rs",
"ui/issues/issue-19922.rs",
@ -2856,6 +2849,8 @@
"ui/lint/unused/issue-92751.rs",
"ui/lint/unused/issue-96606.rs",
"ui/lint/use-redundant/issue-92904.rs",
"ui/loops/issue-1962.rs",
"ui/loops/issue-1974.rs",
"ui/loops/issue-43162.rs",
"ui/loops/issue-50576.rs",
"ui/loops/issue-69225-SCEVAddExpr-wrap-flag.rs",
@ -3045,6 +3040,8 @@
"ui/mismatched_types/issue-118145-unwrap-for-shorthand.rs",
"ui/mismatched_types/issue-118510.rs",
"ui/mismatched_types/issue-13033.rs",
"ui/mismatched_types/issue-1362.rs",
"ui/mismatched_types/issue-1448-2.rs",
"ui/mismatched_types/issue-19109.rs",
"ui/mismatched_types/issue-26480.rs",
"ui/mismatched_types/issue-35030.rs",
@ -3860,6 +3857,7 @@
"ui/stability-attribute/issue-28388-3.rs",
"ui/stability-attribute/issue-99286-stable-intrinsics.rs",
"ui/static/auxiliary/issue_24843.rs",
"ui/static/issue-1660.rs",
"ui/static/issue-18118-2.rs",
"ui/static/issue-18118.rs",
"ui/static/issue-24446.rs",

View file

@ -17,7 +17,7 @@ use std::path::{Path, PathBuf};
const ENTRY_LIMIT: usize = 900;
// FIXME: The following limits should be reduced eventually.
const ISSUES_ENTRY_LIMIT: usize = 1750;
const ISSUES_ENTRY_LIMIT: usize = 1733;
const ROOT_ENTRY_LIMIT: usize = 860;
const EXPECTED_TEST_FILE_EXTENSIONS: &[&str] = &[

View file

@ -118,6 +118,30 @@ rust_dbg_extern_identity_TwoDoubles(struct TwoDoubles u) {
return u;
}
struct FiveU16s {
uint16_t one;
uint16_t two;
uint16_t three;
uint16_t four;
uint16_t five;
};
struct FiveU16s
rust_dbg_extern_return_FiveU16s() {
struct FiveU16s s;
s.one = 10;
s.two = 20;
s.three = 30;
s.four = 40;
s.five = 50;
return s;
}
struct FiveU16s
rust_dbg_extern_identity_FiveU16s(struct FiveU16s u) {
return u;
}
struct ManyInts {
int8_t arg1;
int16_t arg2;

View file

@ -0,0 +1,280 @@
// ignore-tidy-linelength
//@ revisions:aarch64 loongarch64 powerpc64 sparc64
//@ compile-flags: -O -C no-prepopulate-passes
//@[aarch64] compile-flags: --target aarch64-unknown-linux-gnu
//@[aarch64] needs-llvm-components: arm
//@[loongarch64] compile-flags: --target loongarch64-unknown-linux-gnu
//@[loongarch64] needs-llvm-components: loongarch
//@[powerpc64] compile-flags: --target powerpc64-unknown-linux-gnu
//@[powerpc64] needs-llvm-components: powerpc
//@[sparc64] compile-flags: --target sparc64-unknown-linux-gnu
//@[sparc64] needs-llvm-components: sparc
// Tests that arguments with `PassMode::Cast` are handled correctly.
#![feature(no_core, lang_items)]
#![crate_type = "lib"]
#![no_std]
#![no_core]
#[lang="sized"] trait Sized { }
#[lang="freeze"] trait Freeze { }
#[lang="copy"] trait Copy { }
// This struct will be passed as a single `i64` or `i32`.
// This may be (if `i64)) larger than the Rust layout, which is just `{ i16, i16 }`.
#[repr(C)]
pub struct TwoU16s {
a: u16,
b: u16,
}
// This struct will be passed as `[2 x i64]`.
// This is larger than the Rust layout.
#[repr(C)]
pub struct FiveU16s {
a: u16,
b: u16,
c: u16,
d: u16,
e: u16,
}
// This struct will be passed as `[2 x double]`.
// This is the same as the Rust layout.
#[repr(C)]
pub struct DoubleDouble {
f: f64,
g: f64,
}
// On loongarch, this struct will be passed as `{ double, float }`.
// This is smaller than the Rust layout, which has trailing padding (`{ f64, f32, <f32 padding> }`)
#[repr(C)]
pub struct DoubleFloat {
f: f64,
g: f32,
}
extern "C" {
fn receives_twou16s(x: TwoU16s);
fn returns_twou16s() -> TwoU16s;
fn receives_fiveu16s(x: FiveU16s);
fn returns_fiveu16s() -> FiveU16s;
fn receives_doubledouble(x: DoubleDouble);
fn returns_doubledouble() -> DoubleDouble;
// These functions cause an ICE in sparc64 ABI code (https://github.com/rust-lang/rust/issues/122620)
#[cfg(not(target_arch = "sparc64"))]
fn receives_doublefloat(x: DoubleFloat);
#[cfg(not(target_arch = "sparc64"))]
fn returns_doublefloat() -> DoubleFloat;
}
// CHECK-LABEL: @call_twou16s
#[no_mangle]
pub unsafe fn call_twou16s() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i32]], align [[ABI_ALIGN:4]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 4, i1 false)
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_twou16s([[ABI_TYPE]] [[ABI_VALUE]])
let x = TwoU16s { a: 1, b: 2 };
receives_twou16s(x);
}
// CHECK-LABEL: @return_twou16s
#[no_mangle]
pub unsafe fn return_twou16s() -> TwoU16s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca %TwoU16s, align 2
// powerpc64: call void @returns_twou16s(ptr {{.+}} [[RETVAL]])
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:i64]], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca %TwoU16s, align [[RUST_ALIGN:2]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_twou16s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
// sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false)
returns_twou16s()
}
// CHECK-LABEL: @call_fiveu16s
#[no_mangle]
pub unsafe fn call_fiveu16s() {
// CHECK: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca %FiveU16s, align 2
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 10, i1 false)
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_fiveu16s([[ABI_TYPE]] [[ABI_VALUE]])
let x = FiveU16s { a: 1, b: 2, c: 3, d: 4, e: 5 };
receives_fiveu16s(x);
}
// CHECK-LABEL: @return_fiveu16s
// CHECK-SAME: (ptr {{.+}} sret([10 x i8]) align [[RUST_ALIGN:2]] dereferenceable(10) [[RET_PTR:%.+]])
#[no_mangle]
pub unsafe fn return_fiveu16s() -> FiveU16s {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: call void @returns_fiveu16s(ptr {{.+}} [[RET_PTR]])
// The other targets copy the cast ABI type to the sret pointer.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_fiveu16s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
// sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false)
returns_fiveu16s()
}
// CHECK-LABEL: @call_doubledouble
#[no_mangle]
pub unsafe fn call_doubledouble() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x double\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// CHECK: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_doubledouble([[ABI_TYPE]] [[ABI_VALUE]])
let x = DoubleDouble { f: 1., g: 2. };
receives_doubledouble(x);
}
// CHECK-LABEL: @return_doubledouble
#[no_mangle]
pub unsafe fn return_doubledouble() -> DoubleDouble {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca %DoubleDouble, align 8
// powerpc64: call void @returns_doubledouble(ptr {{.+}} [[RETVAL]])
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x double\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// sparc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, double }]], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// sparc64: [[RUST_ALLOCA:%.+]] = alloca %DoubleDouble, align [[RUST_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doubledouble()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
returns_doubledouble()
}
// This test causes an ICE in sparc64 ABI code (https://github.com/rust-lang/rust/issues/122620)
#[cfg(not(target_arch = "sparc64"))]
// aarch64-LABEL: @call_doublefloat
// loongarch64-LABEL: @call_doublefloat
// powerpc64-LABEL: @call_doublefloat
#[no_mangle]
pub unsafe fn call_doublefloat() {
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, float }]], align [[ABI_ALIGN:8]]
// powerpc64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// powerpc64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 12, i1 false)
// powerpc64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false)
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
// loongarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
// powerpc64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]])
let x = DoubleFloat { f: 1., g: 2. };
receives_doublefloat(x);
}
// This test causes an ICE in sparc64 ABI code (https://github.com/rust-lang/rust/issues/122620)
#[cfg(not(target_arch = "sparc64"))]
// aarch64-LABEL: @return_doublefloat
// loongarch64-LABEL: @return_doublefloat
// powerpc64-LABEL: @return_doublefloat
#[no_mangle]
pub unsafe fn return_doublefloat() -> DoubleFloat {
// powerpc returns this struct via sret pointer, it doesn't use the cast ABI.
// powerpc64: [[RETVAL:%.+]] = alloca %DoubleFloat, align 8
// powerpc64: call void @returns_doublefloat(ptr {{.+}} [[RETVAL]])
// The other targets copy the cast ABI type to an alloca.
// aarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:\[2 x i64\]]], align [[ABI_ALIGN:8]]
// loongarch64: [[ABI_ALLOCA:%.+]] = alloca [[ABI_TYPE:{ double, float }]], align [[ABI_ALIGN:8]]
// aarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// loongarch64: [[RUST_ALLOCA:%.+]] = alloca %DoubleFloat, align [[RUST_ALIGN:8]]
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doublefloat()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE]] @returns_doublefloat()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false)
// loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false)
returns_doublefloat()
}

View file

@ -1,8 +1,21 @@
//@ revisions: linux apple
//@ compile-flags: -C opt-level=0 -C no-prepopulate-passes
//@[linux] compile-flags: --target x86_64-unknown-linux-gnu
//@[linux] needs-llvm-components: x86
//@[apple] compile-flags: --target x86_64-apple-darwin
//@[apple] needs-llvm-components: x86
// Regression test for #29988
//@ compile-flags: -C no-prepopulate-passes
//@ only-x86_64
//@ ignore-windows
#![feature(no_core, lang_items)]
#![crate_type = "lib"]
#![no_std]
#![no_core]
#[lang="sized"] trait Sized { }
#[lang="freeze"] trait Freeze { }
#[lang="copy"] trait Copy { }
#[repr(C)]
struct S {
@ -15,11 +28,14 @@ extern "C" {
fn foo(s: S);
}
fn main() {
// CHECK-LABEL: @test
#[no_mangle]
pub fn test() {
let s = S { f1: 1, f2: 2, f3: 3 };
unsafe {
// CHECK: load { i64, i32 }, {{.*}}, align 4
// CHECK: call void @foo({ i64, i32 } {{.*}})
// CHECK: [[ALLOCA:%.+]] = alloca { i64, i32 }, align 8
// CHECK: [[LOAD:%.+]] = load { i64, i32 }, ptr [[ALLOCA]], align 8
// CHECK: call void @foo({ i64, i32 } [[LOAD]])
foo(s);
}
}

View file

@ -19,7 +19,7 @@
StorageLive(_3);
_3 = const main::FOO;
_2 = &raw const (*_3);
_1 = move _2 as usize (PointerExposeAddress);
_1 = move _2 as usize (PointerExposeProvenance);
StorageDead(_2);
StorageDead(_3);
StorageLive(_4);

View file

@ -19,7 +19,7 @@
StorageLive(_3);
_3 = const main::FOO;
_2 = &raw const (*_3);
_1 = move _2 as usize (PointerExposeAddress);
_1 = move _2 as usize (PointerExposeProvenance);
StorageDead(_2);
StorageDead(_3);
StorageLive(_4);

View file

@ -4,12 +4,12 @@
#[inline(never)]
fn read(_: usize) { }
// EMIT_MIR pointer_expose_address.main.GVN.diff
// EMIT_MIR pointer_expose_provenance.main.GVN.diff
fn main() {
// CHECK-LABEL: fn main(
// CHECK: [[ptr:_.*]] = const main::FOO;
// CHECK: [[ref:_.*]] = &raw const (*[[ptr]]);
// CHECK: [[x:_.*]] = move [[ref]] as usize (PointerExposeAddress);
// CHECK: [[x:_.*]] = move [[ref]] as usize (PointerExposeProvenance);
// CHECK: = read([[x]])
const FOO: &i32 = &1;
let x = FOO as *const i32 as usize;

View file

@ -14,7 +14,7 @@
StorageLive(_2);
StorageLive(_3);
_3 = main as fn() (PointerCoercion(ReifyFnPointer));
_2 = move _3 as usize (PointerExposeAddress);
_2 = move _3 as usize (PointerExposeProvenance);
StorageDead(_3);
_1 = move _2 as *const fn() (PointerWithExposedProvenance);
StorageDead(_2);

Some files were not shown because too many files have changed in this diff Show more