Merge pull request #18457 from lnicola/sync-from-rust

minor: Sync from downstream
This commit is contained in:
Laurențiu Nicola 2024-11-01 12:26:18 +00:00 committed by GitHub
commit cf6bc48353
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
906 changed files with 8789 additions and 6526 deletions

View file

@ -720,6 +720,7 @@ dependencies = [
"miropt-test-tools",
"regex",
"rustfix",
"semver",
"serde",
"serde_json",
"tracing",
@ -3203,9 +3204,11 @@ dependencies = [
"rand",
"rand_xoshiro",
"rustc_data_structures",
"rustc_feature",
"rustc_index",
"rustc_macros",
"rustc_serialize",
"rustc_span",
"tracing",
]
@ -3301,6 +3304,7 @@ version = "0.0.0"
dependencies = [
"itertools",
"rustc_ast",
"rustc_data_structures",
"rustc_lexer",
"rustc_span",
"thin-vec",
@ -3621,6 +3625,7 @@ version = "0.0.0"
dependencies = [
"annotate-snippets 0.11.4",
"derive_setters",
"rustc_abi",
"rustc_ast",
"rustc_ast_pretty",
"rustc_data_structures",
@ -3718,6 +3723,7 @@ name = "rustc_hir_analysis"
version = "0.0.0"
dependencies = [
"itertools",
"rustc_abi",
"rustc_arena",
"rustc_ast",
"rustc_attr",
@ -3745,11 +3751,11 @@ dependencies = [
name = "rustc_hir_pretty"
version = "0.0.0"
dependencies = [
"rustc_abi",
"rustc_ast",
"rustc_ast_pretty",
"rustc_hir",
"rustc_span",
"rustc_target",
]
[[package]]
@ -3906,6 +3912,7 @@ dependencies = [
name = "rustc_lint"
version = "0.0.0"
dependencies = [
"rustc_abi",
"rustc_ast",
"rustc_ast_pretty",
"rustc_attr",
@ -3933,6 +3940,7 @@ dependencies = [
name = "rustc_lint_defs"
version = "0.0.0"
dependencies = [
"rustc_abi",
"rustc_ast",
"rustc_data_structures",
"rustc_error_messages",
@ -3940,7 +3948,6 @@ dependencies = [
"rustc_macros",
"rustc_serialize",
"rustc_span",
"rustc_target",
"serde",
]
@ -3980,6 +3987,7 @@ dependencies = [
"bitflags 2.6.0",
"libloading",
"odht",
"rustc_abi",
"rustc_ast",
"rustc_attr",
"rustc_data_structures",
@ -3998,7 +4006,6 @@ dependencies = [
"rustc_span",
"rustc_target",
"rustc_type_ir",
"snap",
"tempfile",
"tracing",
]
@ -4048,6 +4055,7 @@ version = "0.0.0"
dependencies = [
"either",
"itertools",
"rustc_abi",
"rustc_apfloat",
"rustc_arena",
"rustc_ast",
@ -4063,7 +4071,6 @@ dependencies = [
"rustc_pattern_analysis",
"rustc_session",
"rustc_span",
"rustc_target",
"rustc_trait_selection",
"tracing",
]
@ -4074,6 +4081,7 @@ version = "0.0.0"
dependencies = [
"polonius-engine",
"regex",
"rustc_abi",
"rustc_ast",
"rustc_data_structures",
"rustc_errors",
@ -4095,6 +4103,7 @@ version = "0.0.0"
dependencies = [
"either",
"itertools",
"rustc_abi",
"rustc_arena",
"rustc_ast",
"rustc_attr",
@ -4187,6 +4196,7 @@ dependencies = [
name = "rustc_passes"
version = "0.0.0"
dependencies = [
"rustc_abi",
"rustc_ast",
"rustc_ast_pretty",
"rustc_attr",
@ -4213,6 +4223,7 @@ name = "rustc_pattern_analysis"
version = "0.0.0"
dependencies = [
"rustc-hash 2.0.0",
"rustc_abi",
"rustc_apfloat",
"rustc_arena",
"rustc_data_structures",
@ -4352,6 +4363,7 @@ dependencies = [
"bitflags 2.6.0",
"getopts",
"libc",
"rustc_abi",
"rustc_ast",
"rustc_data_structures",
"rustc_errors",
@ -4415,6 +4427,7 @@ version = "0.0.0"
dependencies = [
"punycode",
"rustc-demangle",
"rustc_abi",
"rustc_data_structures",
"rustc_errors",
"rustc_hir",
@ -4877,12 +4890,6 @@ version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "snap"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b"
[[package]]
name = "socket2"
version = "0.5.7"

View file

@ -9,9 +9,11 @@ bitflags = "2.4.1"
rand = { version = "0.8.4", default-features = false, optional = true }
rand_xoshiro = { version = "0.6.0", optional = true }
rustc_data_structures = { path = "../rustc_data_structures", optional = true }
rustc_feature = { path = "../rustc_feature", optional = true }
rustc_index = { path = "../rustc_index", default-features = false }
rustc_macros = { path = "../rustc_macros", optional = true }
rustc_serialize = { path = "../rustc_serialize", optional = true }
rustc_span = { path = "../rustc_span", optional = true }
tracing = "0.1"
# tidy-alphabetical-end
@ -22,8 +24,10 @@ default = ["nightly", "randomize"]
# without depending on rustc_data_structures, rustc_macros and rustc_serialize
nightly = [
"dep:rustc_data_structures",
"dep:rustc_feature",
"dep:rustc_macros",
"dep:rustc_serialize",
"dep:rustc_span",
"rustc_index/nightly",
]
randomize = ["dep:rand", "dep:rand_xoshiro", "nightly"]

View file

@ -6,9 +6,9 @@ mod abi {
#[cfg(feature = "nightly")]
use rustc_macros::HashStable_Generic;
#[cfg(feature = "nightly")]
use crate::{Abi, FieldsShape, TyAbiInterface, TyAndLayout};
use crate::{Align, HasDataLayout, Size};
#[cfg(feature = "nightly")]
use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout};
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
@ -128,11 +128,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
where
Ty: TyAbiInterface<'a, C> + Copy,
{
match self.abi {
Abi::Uninhabited => Err(Heterogeneous),
match self.backend_repr {
BackendRepr::Uninhabited => Err(Heterogeneous),
// The primitive for this algorithm.
Abi::Scalar(scalar) => {
BackendRepr::Scalar(scalar) => {
let kind = match scalar.primitive() {
abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
abi::Float(_) => RegKind::Float,
@ -140,7 +140,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
}
Abi::Vector { .. } => {
BackendRepr::Vector { .. } => {
assert!(!self.is_zst());
Ok(HomogeneousAggregate::Homogeneous(Reg {
kind: RegKind::Vector,
@ -148,7 +148,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
}))
}
Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => {
BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
// Helper for computing `homogeneous_aggregate`, allowing a custom
// starting offset (used below for handling variants).
let from_fields_at =
@ -246,7 +246,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Ok(result)
}
}
Abi::Aggregate { sized: false } => Err(Heterogeneous),
BackendRepr::Memory { sized: false } => Err(Heterogeneous),
}
}
}

View file

@ -7,9 +7,11 @@ use rustc_span::{Span, Symbol};
#[cfg(test)]
mod tests;
use ExternAbi as Abi;
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
#[derive(HashStable_Generic, Encodable, Decodable)]
pub enum Abi {
pub enum ExternAbi {
// Some of the ABIs come first because every time we add a new ABI, we have to re-bless all the
// hashing tests. These are used in many places, so giving them stable values reduces test
// churn. The specific values are meaningless.

View file

@ -6,7 +6,7 @@ use rustc_index::Idx;
use tracing::debug;
use crate::{
Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
Variants, WrappingRange,
};
@ -28,7 +28,7 @@ where
VariantIdx: Idx,
F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
{
let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
let uninhabited = fields.iter().any(|f| f.is_uninhabited());
// We cannot ignore alignment; that might lead us to entirely discard a variant and
// produce an enum that is less aligned than it should be!
let is_1zst = fields.iter().all(|f| f.is_1zst());
@ -125,7 +125,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
offsets: [Size::ZERO, b_offset].into(),
memory_index: [0, 1].into(),
},
abi: Abi::ScalarPair(a, b),
backend_repr: BackendRepr::ScalarPair(a, b),
largest_niche,
align,
size,
@ -216,7 +216,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
backend_repr: BackendRepr::Uninhabited,
largest_niche: None,
align: dl.i8_align,
size: Size::ZERO,
@ -331,7 +331,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
if let Ok(common) = common_non_zst_abi_and_align {
// Discard valid range information and allow undef
let field_abi = field.abi.to_union();
let field_abi = field.backend_repr.to_union();
if let Some((common_abi, common_align)) = common {
if common_abi != field_abi {
@ -340,7 +340,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
} else {
// Fields with the same non-Aggregate ABI should also
// have the same alignment
if !matches!(common_abi, Abi::Aggregate { .. }) {
if !matches!(common_abi, BackendRepr::Memory { .. }) {
assert_eq!(
common_align, field.align.abi,
"non-Aggregate field with matching ABI but differing alignment"
@ -369,11 +369,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
// If all non-ZST fields have the same ABI, we may forward that ABI
// for the union as a whole, unless otherwise inhibited.
let abi = match common_non_zst_abi_and_align {
Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
Ok(Some((abi, _))) => {
if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
Abi::Aggregate { sized: true }
BackendRepr::Memory { sized: true }
} else {
abi
}
@ -387,7 +387,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
Ok(LayoutData {
variants: Variants::Single { index: only_variant_idx },
fields: FieldsShape::Union(union_field_count),
abi,
backend_repr: abi,
largest_niche: None,
align,
size: size.align_to(align.abi),
@ -434,23 +434,23 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
// Already doesn't have any niches
Scalar::Union { .. } => {}
};
match &mut st.abi {
Abi::Uninhabited => {}
Abi::Scalar(scalar) => hide_niches(scalar),
Abi::ScalarPair(a, b) => {
match &mut st.backend_repr {
BackendRepr::Uninhabited => {}
BackendRepr::Scalar(scalar) => hide_niches(scalar),
BackendRepr::ScalarPair(a, b) => {
hide_niches(a);
hide_niches(b);
}
Abi::Vector { element, count: _ } => hide_niches(element),
Abi::Aggregate { sized: _ } => {}
BackendRepr::Vector { element, count: _ } => hide_niches(element),
BackendRepr::Memory { sized: _ } => {}
}
st.largest_niche = None;
return Ok(st);
}
let (start, end) = scalar_valid_range;
match st.abi {
Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
match st.backend_repr {
BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
// Enlarging validity ranges would result in missed
// optimizations, *not* wrongly assuming the inner
// value is valid. e.g. unions already enlarge validity ranges,
@ -607,8 +607,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
}
// It can't be a Scalar or ScalarPair because the offset isn't 0.
if !layout.abi.is_uninhabited() {
layout.abi = Abi::Aggregate { sized: true };
if !layout.is_uninhabited() {
layout.backend_repr = BackendRepr::Memory { sized: true };
}
layout.size += this_offset;
@ -627,26 +627,26 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
let same_size = size == variant_layouts[largest_variant_index].size;
let same_align = align == variant_layouts[largest_variant_index].align;
let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
Abi::Uninhabited
let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) {
BackendRepr::Uninhabited
} else if same_size && same_align && others_zst {
match variant_layouts[largest_variant_index].abi {
match variant_layouts[largest_variant_index].backend_repr {
// When the total alignment and size match, we can use the
// same ABI as the scalar variant with the reserved niche.
Abi::Scalar(_) => Abi::Scalar(niche_scalar),
Abi::ScalarPair(first, second) => {
BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
BackendRepr::ScalarPair(first, second) => {
// Only the niche is guaranteed to be initialised,
// so use union layouts for the other primitive.
if niche_offset == Size::ZERO {
Abi::ScalarPair(niche_scalar, second.to_union())
BackendRepr::ScalarPair(niche_scalar, second.to_union())
} else {
Abi::ScalarPair(first.to_union(), niche_scalar)
BackendRepr::ScalarPair(first.to_union(), niche_scalar)
}
}
_ => Abi::Aggregate { sized: true },
_ => BackendRepr::Memory { sized: true },
}
} else {
Abi::Aggregate { sized: true }
BackendRepr::Memory { sized: true }
};
let layout = LayoutData {
@ -664,7 +664,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
offsets: [niche_offset].into(),
memory_index: [0].into(),
},
abi,
backend_repr: abi,
largest_niche,
size,
align,
@ -681,7 +681,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
let discr_type = repr.discr_type();
let bits = Integer::from_attr(dl, discr_type).size().bits();
for (i, mut val) in discriminants {
if !repr.c() && variants[i].iter().any(|f| f.abi.is_uninhabited()) {
if !repr.c() && variants[i].iter().any(|f| f.is_uninhabited()) {
continue;
}
if discr_type.is_signed() {
@ -833,14 +833,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
end: (max as u128 & tag_mask),
},
};
let mut abi = Abi::Aggregate { sized: true };
let mut abi = BackendRepr::Memory { sized: true };
if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
abi = Abi::Uninhabited;
if layout_variants.iter().all(|v| v.is_uninhabited()) {
abi = BackendRepr::Uninhabited;
} else if tag.size(dl) == size {
// Make sure we only use scalar layout when the enum is entirely its
// own tag (i.e. it has no padding nor any non-ZST variant fields).
abi = Abi::Scalar(tag);
abi = BackendRepr::Scalar(tag);
} else {
// Try to use a ScalarPair for all tagged enums.
// That's possible only if we can find a common primitive type for all variants.
@ -864,8 +864,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
break;
}
};
let prim = match field.abi {
Abi::Scalar(scalar) => {
let prim = match field.backend_repr {
BackendRepr::Scalar(scalar) => {
common_prim_initialized_in_all_variants &=
matches!(scalar, Scalar::Initialized { .. });
scalar.primitive()
@ -934,7 +934,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
abi = pair.backend_repr;
}
}
}
@ -942,12 +942,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
// variants to ensure they are consistent. This is because a downcast is
// semantically a NOP, and thus should not affect layout.
if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
for variant in &mut layout_variants {
// We only do this for variants with fields; the others are not accessed anyway.
// Also do not overwrite any already existing "clever" ABIs.
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
variant.abi = abi;
if variant.fields.count() > 0
&& matches!(variant.backend_repr, BackendRepr::Memory { .. })
{
variant.backend_repr = abi;
// Also need to bump up the size and alignment, so that the entire value fits
// in here.
variant.size = cmp::max(variant.size, size);
@ -970,7 +972,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
memory_index: [0].into(),
},
largest_niche,
abi,
backend_repr: abi,
align,
size,
max_repr_align,
@ -1252,7 +1254,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
}
let mut layout_of_single_non_zst_field = None;
let sized = unsized_field.is_none();
let mut abi = Abi::Aggregate { sized };
let mut abi = BackendRepr::Memory { sized };
let optimize_abi = !repr.inhibit_newtype_abi_optimization();
@ -1270,16 +1272,16 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
{
match field.abi {
match field.backend_repr {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => {
abi = field.abi;
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => {
abi = field.backend_repr;
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
abi = field.abi;
BackendRepr::ScalarPair(..) => {
abi = field.backend_repr;
}
_ => {}
}
@ -1288,8 +1290,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
// Two non-ZST fields, and they're both scalars.
(Some((i, a)), Some((j, b)), None) => {
match (a.abi, b.abi) {
(Abi::Scalar(a), Abi::Scalar(b)) => {
match (a.backend_repr, b.backend_repr) {
(BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
((i, a), (j, b))
@ -1315,7 +1317,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
abi = pair.backend_repr;
}
}
_ => {}
@ -1325,8 +1327,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
_ => {}
}
}
if fields.iter().any(|f| f.abi.is_uninhabited()) {
abi = Abi::Uninhabited;
if fields.iter().any(|f| f.is_uninhabited()) {
abi = BackendRepr::Uninhabited;
}
let unadjusted_abi_align = if repr.transparent() {
@ -1344,7 +1346,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
Ok(LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
backend_repr: abi,
largest_niche,
align,
size,

View file

@ -83,8 +83,8 @@ impl<'a> Layout<'a> {
&self.0.0.variants
}
pub fn abi(self) -> Abi {
self.0.0.abi
pub fn backend_repr(self) -> BackendRepr {
self.0.0.backend_repr
}
pub fn largest_niche(self) -> Option<Niche> {
@ -114,7 +114,7 @@ impl<'a> Layout<'a> {
pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
self.size() == data_layout.pointer_size
&& self.align().abi == data_layout.pointer_align.abi
&& matches!(self.abi(), Abi::Scalar(Scalar::Initialized { .. }))
&& matches!(self.backend_repr(), BackendRepr::Scalar(Scalar::Initialized { .. }))
}
}
@ -196,9 +196,9 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Ty: TyAbiInterface<'a, C>,
C: HasDataLayout,
{
match self.abi {
Abi::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
Abi::Aggregate { .. } => {
match self.backend_repr {
BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
BackendRepr::Memory { .. } => {
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
self.field(cx, 0).is_single_fp_element(cx)
} else {

View file

@ -1,6 +1,7 @@
// tidy-alphabetical-start
#![cfg_attr(feature = "nightly", allow(internal_features))]
#![cfg_attr(feature = "nightly", doc(rust_logo))]
#![cfg_attr(feature = "nightly", feature(assert_matches))]
#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
#![cfg_attr(feature = "nightly", feature(step_trait))]
@ -28,8 +29,15 @@ mod layout;
#[cfg(test)]
mod tests;
#[cfg(feature = "nightly")]
mod extern_abi;
pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
#[cfg(feature = "nightly")]
pub use extern_abi::{
AbiDisabled, AbiUnsupported, ExternAbi, all_names, enabled_names, is_enabled, is_stable, lookup,
};
#[cfg(feature = "nightly")]
pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
pub use layout::{LayoutCalculator, LayoutCalculatorError};
@ -1344,11 +1352,19 @@ impl AddressSpace {
pub const DATA: Self = AddressSpace(0);
}
/// Describes how values of the type are passed by target ABIs,
/// in terms of categories of C types there are ABI rules for.
/// The way we represent values to the backend
///
/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
/// In reality, this implies little about that, but is mostly used to describe the syntactic form
/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
/// how the value will be lowered to the calling convention, in itself.
///
/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
/// and larger values will usually prefer to be represented as memory.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum Abi {
pub enum BackendRepr {
Uninhabited,
Scalar(Scalar),
ScalarPair(Scalar, Scalar),
@ -1356,19 +1372,23 @@ pub enum Abi {
element: Scalar,
count: u64,
},
Aggregate {
// FIXME: I sometimes use memory, sometimes use an IR aggregate!
Memory {
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
},
}
impl Abi {
impl BackendRepr {
/// Returns `true` if the layout corresponds to an unsized type.
#[inline]
pub fn is_unsized(&self) -> bool {
match *self {
Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
Abi::Aggregate { sized } => !sized,
BackendRepr::Uninhabited
| BackendRepr::Scalar(_)
| BackendRepr::ScalarPair(..)
| BackendRepr::Vector { .. } => false,
BackendRepr::Memory { sized } => !sized,
}
}
@ -1381,7 +1401,7 @@ impl Abi {
#[inline]
pub fn is_signed(&self) -> bool {
match self {
Abi::Scalar(scal) => match scal.primitive() {
BackendRepr::Scalar(scal) => match scal.primitive() {
Primitive::Int(_, signed) => signed,
_ => false,
},
@ -1392,61 +1412,67 @@ impl Abi {
/// Returns `true` if this is an uninhabited type
#[inline]
pub fn is_uninhabited(&self) -> bool {
matches!(*self, Abi::Uninhabited)
matches!(*self, BackendRepr::Uninhabited)
}
/// Returns `true` if this is a scalar type
#[inline]
pub fn is_scalar(&self) -> bool {
matches!(*self, Abi::Scalar(_))
matches!(*self, BackendRepr::Scalar(_))
}
/// Returns `true` if this is a bool
#[inline]
pub fn is_bool(&self) -> bool {
matches!(*self, Abi::Scalar(s) if s.is_bool())
matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
}
/// Returns the fixed alignment of this ABI, if any is mandated.
pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
Some(match *self {
Abi::Scalar(s) => s.align(cx),
Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
Abi::Vector { element, count } => {
BackendRepr::Scalar(s) => s.align(cx),
BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
BackendRepr::Vector { element, count } => {
cx.data_layout().vector_align(element.size(cx) * count)
}
Abi::Uninhabited | Abi::Aggregate { .. } => return None,
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
})
}
/// Returns the fixed size of this ABI, if any is mandated.
pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
Some(match *self {
Abi::Scalar(s) => {
BackendRepr::Scalar(s) => {
// No padding in scalars.
s.size(cx)
}
Abi::ScalarPair(s1, s2) => {
BackendRepr::ScalarPair(s1, s2) => {
// May have some padding between the pair.
let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
(field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
}
Abi::Vector { element, count } => {
BackendRepr::Vector { element, count } => {
// No padding in vectors, except possibly for trailing padding
// to make the size a multiple of align (e.g. for vectors of size 3).
(element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
}
Abi::Uninhabited | Abi::Aggregate { .. } => return None,
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
})
}
/// Discard validity range information and allow undef.
pub fn to_union(&self) -> Self {
match *self {
Abi::Scalar(s) => Abi::Scalar(s.to_union()),
Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()),
Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count },
Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
BackendRepr::ScalarPair(s1, s2) => {
BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
}
BackendRepr::Vector { element, count } => {
BackendRepr::Vector { element: element.to_union(), count }
}
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
BackendRepr::Memory { sized: true }
}
}
}
@ -1454,12 +1480,12 @@ impl Abi {
match (self, other) {
// Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
// We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
(Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
(BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
(
Abi::Vector { element: element_l, count: count_l },
Abi::Vector { element: element_r, count: count_r },
BackendRepr::Vector { element: element_l, count: count_l },
BackendRepr::Vector { element: element_r, count: count_r },
) => element_l.primitive() == element_r.primitive() && count_l == count_r,
(Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
(BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
}
// Everything else must be strictly identical.
@ -1616,14 +1642,14 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
/// must be taken into account.
pub variants: Variants<FieldIdx, VariantIdx>,
/// The `abi` defines how this data is passed between functions, and it defines
/// value restrictions via `valid_range`.
/// The `backend_repr` defines how this data will be represented to the codegen backend,
/// and encodes value restrictions via `valid_range`.
///
/// Note that this is entirely orthogonal to the recursive structure defined by
/// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
/// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
/// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
/// have to be taken into account to find all fields of this layout.
pub abi: Abi,
pub backend_repr: BackendRepr,
/// The leaf scalar with the largest number of invalid values
/// (i.e. outside of its `valid_range`), if it exists.
@ -1646,12 +1672,17 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// Returns `true` if this is an aggregate type (including a ScalarPair!)
pub fn is_aggregate(&self) -> bool {
match self.abi {
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
match self.backend_repr {
BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false,
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
}
}
/// Returns `true` if this is an uninhabited type
pub fn is_uninhabited(&self) -> bool {
self.backend_repr.is_uninhabited()
}
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.size(cx);
@ -1659,7 +1690,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Scalar(scalar),
backend_repr: BackendRepr::Scalar(scalar),
largest_niche,
size,
align,
@ -1681,7 +1712,7 @@ where
let LayoutData {
size,
align,
abi,
backend_repr,
fields,
largest_niche,
variants,
@ -1691,7 +1722,7 @@ where
f.debug_struct("Layout")
.field("size", size)
.field("align", align)
.field("abi", abi)
.field("abi", backend_repr)
.field("fields", fields)
.field("largest_niche", largest_niche)
.field("variants", variants)
@ -1727,12 +1758,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// Returns `true` if the layout corresponds to an unsized type.
#[inline]
pub fn is_unsized(&self) -> bool {
self.abi.is_unsized()
self.backend_repr.is_unsized()
}
#[inline]
pub fn is_sized(&self) -> bool {
self.abi.is_sized()
self.backend_repr.is_sized()
}
/// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
@ -1745,10 +1776,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// Note that this does *not* imply that the type is irrelevant for layout! It can still have
/// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
pub fn is_zst(&self) -> bool {
match self.abi {
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
Abi::Uninhabited => self.size.bytes() == 0,
Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
false
}
BackendRepr::Uninhabited => self.size.bytes() == 0,
BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
}
}
@ -1763,8 +1796,8 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
// 2nd point is quite hard to check though.
self.size == other.size
&& self.is_sized() == other.is_sized()
&& self.abi.eq_up_to_validity(&other.abi)
&& self.abi.is_bool() == other.abi.is_bool()
&& self.backend_repr.eq_up_to_validity(&other.backend_repr)
&& self.backend_repr.is_bool() == other.backend_repr.is_bool()
&& self.align.abi == other.align.abi
&& self.max_repr_align == other.max_repr_align
&& self.unadjusted_abi_align == other.unadjusted_abi_align

View file

@ -2810,6 +2810,8 @@ pub struct ModSpans {
/// E.g., `extern { .. }` or `extern "C" { .. }`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct ForeignMod {
/// Span of the `extern` keyword.
pub extern_span: Span,
/// `unsafe` keyword accepted syntactically for macro DSLs, but not
/// semantically by Rust.
pub safety: Safety,

View file

@ -525,7 +525,7 @@ pub fn walk_ty<T: MutVisitor>(vis: &mut T, ty: &mut P<Ty>) {
}
fn walk_foreign_mod<T: MutVisitor>(vis: &mut T, foreign_mod: &mut ForeignMod) {
let ForeignMod { safety, abi: _, items } = foreign_mod;
let ForeignMod { extern_span: _, safety, abi: _, items } = foreign_mod;
visit_safety(vis, safety);
items.flat_map_in_place(|item| vis.flat_map_foreign_item(item));
}

View file

@ -368,7 +368,7 @@ impl Clone for TokenKind {
// a copy. This is faster than the `derive(Clone)` version which has a
// separate path for every variant.
match self {
Interpolated(nt) => Interpolated(nt.clone()),
Interpolated(nt) => Interpolated(Lrc::clone(nt)),
_ => unsafe { std::ptr::read(self) },
}
}

View file

@ -366,7 +366,7 @@ impl WalkItemKind for ItemKind {
}
ModKind::Unloaded => {}
},
ItemKind::ForeignMod(ForeignMod { safety: _, abi: _, items }) => {
ItemKind::ForeignMod(ForeignMod { extern_span: _, safety: _, abi: _, items }) => {
walk_list!(visitor, visit_foreign_item, items);
}
ItemKind::GlobalAsm(asm) => try_visit!(visitor.visit_inline_asm(asm)),

View file

@ -10,17 +10,18 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
b: &Block,
targeted_by_break: bool,
) -> &'hir hir::Block<'hir> {
self.arena.alloc(self.lower_block_noalloc(b, targeted_by_break))
let hir_id = self.lower_node_id(b.id);
self.arena.alloc(self.lower_block_noalloc(hir_id, b, targeted_by_break))
}
pub(super) fn lower_block_noalloc(
&mut self,
hir_id: hir::HirId,
b: &Block,
targeted_by_break: bool,
) -> hir::Block<'hir> {
let (stmts, expr) = self.lower_stmts(&b.stmts);
let rules = self.lower_block_check_mode(&b.rules);
let hir_id = self.lower_node_id(b.id);
hir::Block { hir_id, stmts, expr, rules, span: self.lower_span(b.span), targeted_by_break }
}

View file

@ -259,10 +259,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
self_param_id: pat_node_id,
};
self_resolver.visit_block(block);
// Target expr needs to lower `self` path.
this.ident_and_label_to_local_id.insert(pat_node_id, param.pat.hir_id.local_id);
this.lower_target_expr(&block)
} else {
let pat_hir_id = this.lower_node_id(pat_node_id);
this.generate_arg(pat_hir_id, span)
this.generate_arg(param.pat.hir_id, span)
};
args.push(arg);
}

View file

@ -3,6 +3,7 @@ use std::assert_matches::assert_matches;
use rustc_ast::ptr::P as AstP;
use rustc_ast::*;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lrc;
use rustc_hir as hir;
use rustc_hir::HirId;
use rustc_hir::def::{DefKind, Res};
@ -70,8 +71,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
_ => (),
}
let hir_id = self.lower_node_id(e.id);
self.lower_attrs(hir_id, &e.attrs);
let expr_hir_id = self.lower_node_id(e.id);
self.lower_attrs(expr_hir_id, &e.attrs);
let kind = match &e.kind {
ExprKind::Array(exprs) => hir::ExprKind::Array(self.lower_exprs(exprs)),
@ -143,7 +144,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
ExprKind::IncludedBytes(bytes) => {
let lit = self.arena.alloc(respan(
self.lower_span(e.span),
LitKind::ByteStr(bytes.clone(), StrStyle::Cooked),
LitKind::ByteStr(Lrc::clone(bytes), StrStyle::Cooked),
));
hir::ExprKind::Lit(lit)
}
@ -175,18 +176,25 @@ impl<'hir> LoweringContext<'_, 'hir> {
ExprKind::If(cond, then, else_opt) => {
self.lower_expr_if(cond, then, else_opt.as_deref())
}
ExprKind::While(cond, body, opt_label) => self.with_loop_scope(e.id, |this| {
let span = this.mark_span_with_reason(DesugaringKind::WhileLoop, e.span, None);
this.lower_expr_while_in_loop_scope(span, cond, body, *opt_label)
}),
ExprKind::Loop(body, opt_label, span) => self.with_loop_scope(e.id, |this| {
hir::ExprKind::Loop(
this.lower_block(body, false),
this.lower_label(*opt_label),
hir::LoopSource::Loop,
this.lower_span(*span),
)
}),
ExprKind::While(cond, body, opt_label) => {
self.with_loop_scope(expr_hir_id, |this| {
let span =
this.mark_span_with_reason(DesugaringKind::WhileLoop, e.span, None);
let opt_label = this.lower_label(*opt_label, e.id, expr_hir_id);
this.lower_expr_while_in_loop_scope(span, cond, body, opt_label)
})
}
ExprKind::Loop(body, opt_label, span) => {
self.with_loop_scope(expr_hir_id, |this| {
let opt_label = this.lower_label(*opt_label, e.id, expr_hir_id);
hir::ExprKind::Loop(
this.lower_block(body, false),
opt_label,
hir::LoopSource::Loop,
this.lower_span(*span),
)
})
}
ExprKind::TryBlock(body) => self.lower_expr_try_block(body),
ExprKind::Match(expr, arms, kind) => hir::ExprKind::Match(
self.lower_expr(expr),
@ -212,7 +220,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
binder,
*capture_clause,
e.id,
hir_id,
expr_hir_id,
*coroutine_kind,
fn_decl,
body,
@ -223,7 +231,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
binder,
*capture_clause,
e.id,
hir_id,
expr_hir_id,
*constness,
*movability,
fn_decl,
@ -250,8 +258,16 @@ impl<'hir> LoweringContext<'_, 'hir> {
)
}
ExprKind::Block(blk, opt_label) => {
let opt_label = self.lower_label(*opt_label);
hir::ExprKind::Block(self.lower_block(blk, opt_label.is_some()), opt_label)
// Different from loops, label of block resolves to block id rather than
// expr node id.
let block_hir_id = self.lower_node_id(blk.id);
let opt_label = self.lower_label(*opt_label, blk.id, block_hir_id);
let hir_block = self.arena.alloc(self.lower_block_noalloc(
block_hir_id,
blk,
opt_label.is_some(),
));
hir::ExprKind::Block(hir_block, opt_label)
}
ExprKind::Assign(el, er, span) => self.lower_expr_assign(el, er, *span, e.span),
ExprKind::AssignOp(op, el, er) => hir::ExprKind::AssignOp(
@ -354,7 +370,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
ExprKind::MacCall(_) => panic!("{:?} shouldn't exist here", e.span),
};
hir::Expr { hir_id, kind, span: self.lower_span(e.span) }
hir::Expr { hir_id: expr_hir_id, kind, span: self.lower_span(e.span) }
})
}
@ -504,7 +520,6 @@ impl<'hir> LoweringContext<'_, 'hir> {
let if_expr = self.expr(span, if_kind);
let block = self.block_expr(self.arena.alloc(if_expr));
let span = self.lower_span(span.with_hi(cond.span.hi()));
let opt_label = self.lower_label(opt_label);
hir::ExprKind::Loop(block, opt_label, hir::LoopSource::While, span)
}
@ -512,8 +527,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
/// `try { <stmts>; }` into `{ <stmts>; ::std::ops::Try::from_output(()) }`
/// and save the block id to use it as a break target for desugaring of the `?` operator.
fn lower_expr_try_block(&mut self, body: &Block) -> hir::ExprKind<'hir> {
self.with_catch_scope(body.id, |this| {
let mut block = this.lower_block_noalloc(body, true);
let body_hir_id = self.lower_node_id(body.id);
self.with_catch_scope(body_hir_id, |this| {
let mut block = this.lower_block_noalloc(body_hir_id, body, true);
// Final expression of the block (if present) or `()` with span at the end of block
let (try_span, tail_expr) = if let Some(expr) = block.expr.take() {
@ -521,7 +537,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
this.mark_span_with_reason(
DesugaringKind::TryBlock,
expr.span,
Some(this.allow_try_trait.clone()),
Some(Lrc::clone(&this.allow_try_trait)),
),
expr,
)
@ -529,7 +545,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let try_span = this.mark_span_with_reason(
DesugaringKind::TryBlock,
this.tcx.sess.source_map().end_point(body.span),
Some(this.allow_try_trait.clone()),
Some(Lrc::clone(&this.allow_try_trait)),
);
(try_span, this.expr_unit(try_span))
@ -638,7 +654,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let unstable_span = self.mark_span_with_reason(
DesugaringKind::Async,
self.lower_span(span),
Some(self.allow_gen_future.clone()),
Some(Lrc::clone(&self.allow_gen_future)),
);
let resume_ty =
self.make_lang_item_qpath(hir::LangItem::ResumeTy, unstable_span, None);
@ -724,7 +740,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let unstable_span = self.mark_span_with_reason(
DesugaringKind::Async,
span,
Some(self.allow_gen_future.clone()),
Some(Lrc::clone(&self.allow_gen_future)),
);
self.lower_attrs(inner_hir_id, &[Attribute {
kind: AttrKind::Normal(ptr::P(NormalAttr::from_ident(Ident::new(
@ -800,13 +816,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
let features = match await_kind {
FutureKind::Future => None,
FutureKind::AsyncIterator => Some(self.allow_for_await.clone()),
FutureKind::AsyncIterator => Some(Lrc::clone(&self.allow_for_await)),
};
let span = self.mark_span_with_reason(DesugaringKind::Await, await_kw_span, features);
let gen_future_span = self.mark_span_with_reason(
DesugaringKind::Await,
full_span,
Some(self.allow_gen_future.clone()),
Some(Lrc::clone(&self.allow_gen_future)),
);
let expr_hir_id = expr.hir_id;
@ -869,7 +885,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let x_expr = self.expr_ident(gen_future_span, x_ident, x_pat_hid);
let ready_field = self.single_pat_field(gen_future_span, x_pat);
let ready_pat = self.pat_lang_item_variant(span, hir::LangItem::PollReady, ready_field);
let break_x = self.with_loop_scope(loop_node_id, move |this| {
let break_x = self.with_loop_scope(loop_hir_id, move |this| {
let expr_break =
hir::ExprKind::Break(this.lower_loop_destination(None), Some(x_expr));
this.arena.alloc(this.expr(gen_future_span, expr_break))
@ -1101,8 +1117,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir::CoroutineSource::Closure,
);
let hir_id = this.lower_node_id(coroutine_kind.closure_id());
this.maybe_forward_track_caller(body.span, closure_hir_id, hir_id);
this.maybe_forward_track_caller(body.span, closure_hir_id, expr.hir_id);
(parameters, expr)
});
@ -1465,8 +1480,16 @@ impl<'hir> LoweringContext<'_, 'hir> {
)
}
fn lower_label(&self, opt_label: Option<Label>) -> Option<Label> {
// Record labelled expr's HirId so that we can retrieve it in `lower_jump_destination` without
// lowering node id again.
fn lower_label(
&mut self,
opt_label: Option<Label>,
dest_id: NodeId,
dest_hir_id: hir::HirId,
) -> Option<Label> {
let label = opt_label?;
self.ident_and_label_to_local_id.insert(dest_id, dest_hir_id.local_id);
Some(Label { ident: self.lower_ident(label.ident) })
}
@ -1474,17 +1497,20 @@ impl<'hir> LoweringContext<'_, 'hir> {
let target_id = match destination {
Some((id, _)) => {
if let Some(loop_id) = self.resolver.get_label_res(id) {
Ok(self.lower_node_id(loop_id))
let local_id = self.ident_and_label_to_local_id[&loop_id];
let loop_hir_id = HirId { owner: self.current_hir_id_owner, local_id };
Ok(loop_hir_id)
} else {
Err(hir::LoopIdError::UnresolvedLabel)
}
}
None => self
.loop_scope
.map(|id| Ok(self.lower_node_id(id)))
.unwrap_or(Err(hir::LoopIdError::OutsideLoopScope)),
None => {
self.loop_scope.map(|id| Ok(id)).unwrap_or(Err(hir::LoopIdError::OutsideLoopScope))
}
};
let label = self.lower_label(destination.map(|(_, label)| label));
let label = destination
.map(|(_, label)| label)
.map(|label| Label { ident: self.lower_ident(label.ident) });
hir::Destination { label, target_id }
}
@ -1499,14 +1525,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
}
fn with_catch_scope<T>(&mut self, catch_id: NodeId, f: impl FnOnce(&mut Self) -> T) -> T {
fn with_catch_scope<T>(&mut self, catch_id: hir::HirId, f: impl FnOnce(&mut Self) -> T) -> T {
let old_scope = self.catch_scope.replace(catch_id);
let result = f(self);
self.catch_scope = old_scope;
result
}
fn with_loop_scope<T>(&mut self, loop_id: NodeId, f: impl FnOnce(&mut Self) -> T) -> T {
fn with_loop_scope<T>(&mut self, loop_id: hir::HirId, f: impl FnOnce(&mut Self) -> T) -> T {
// We're no longer in the base loop's condition; we're in another loop.
let was_in_loop_condition = self.is_in_loop_condition;
self.is_in_loop_condition = false;
@ -1658,9 +1684,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
let head_span = self.mark_span_with_reason(DesugaringKind::ForLoop, head.span, None);
let pat_span = self.mark_span_with_reason(DesugaringKind::ForLoop, pat.span, None);
let loop_hir_id = self.lower_node_id(e.id);
let label = self.lower_label(opt_label, e.id, loop_hir_id);
// `None => break`
let none_arm = {
let break_expr = self.with_loop_scope(e.id, |this| this.expr_break_alloc(for_span));
let break_expr =
self.with_loop_scope(loop_hir_id, |this| this.expr_break_alloc(for_span));
let pat = self.pat_none(for_span);
self.arm(pat, break_expr)
};
@ -1668,7 +1698,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
// Some(<pat>) => <body>,
let some_arm = {
let some_pat = self.pat_some(pat_span, pat);
let body_block = self.with_loop_scope(e.id, |this| this.lower_block(body, false));
let body_block =
self.with_loop_scope(loop_hir_id, |this| this.lower_block(body, false));
let body_expr = self.arena.alloc(self.expr_block(body_block));
self.arm(some_pat, body_expr)
};
@ -1722,12 +1753,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
// `[opt_ident]: loop { ... }`
let kind = hir::ExprKind::Loop(
loop_block,
self.lower_label(opt_label),
label,
hir::LoopSource::ForLoop,
self.lower_span(for_span.with_hi(head.span.hi())),
);
let loop_expr =
self.arena.alloc(hir::Expr { hir_id: self.lower_node_id(e.id), kind, span: for_span });
let loop_expr = self.arena.alloc(hir::Expr { hir_id: loop_hir_id, kind, span: for_span });
// `mut iter => { ... }`
let iter_arm = self.arm(iter_pat, loop_expr);
@ -1812,13 +1842,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
let unstable_span = self.mark_span_with_reason(
DesugaringKind::QuestionMark,
span,
Some(self.allow_try_trait.clone()),
Some(Lrc::clone(&self.allow_try_trait)),
);
let try_span = self.tcx.sess.source_map().end_point(span);
let try_span = self.mark_span_with_reason(
DesugaringKind::QuestionMark,
try_span,
Some(self.allow_try_trait.clone()),
Some(Lrc::clone(&self.allow_try_trait)),
);
// `Try::branch(<expr>)`
@ -1867,8 +1897,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.arena.alloc(residual_expr),
unstable_span,
);
let ret_expr = if let Some(catch_node) = self.catch_scope {
let target_id = Ok(self.lower_node_id(catch_node));
let ret_expr = if let Some(catch_id) = self.catch_scope {
let target_id = Ok(catch_id);
self.arena.alloc(self.expr(
try_span,
hir::ExprKind::Break(
@ -1912,7 +1942,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let unstable_span = self.mark_span_with_reason(
DesugaringKind::YeetExpr,
span,
Some(self.allow_try_trait.clone()),
Some(Lrc::clone(&self.allow_try_trait)),
);
let from_yeet_expr = self.wrap_in_try_constructor(
@ -1922,8 +1952,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
yeeted_span,
);
if let Some(catch_node) = self.catch_scope {
let target_id = Ok(self.lower_node_id(catch_node));
if let Some(catch_id) = self.catch_scope {
let target_id = Ok(catch_id);
hir::ExprKind::Break(hir::Destination { label: None, target_id }, Some(from_yeet_expr))
} else {
hir::ExprKind::Ret(Some(from_yeet_expr))

View file

@ -154,7 +154,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn lower_item(&mut self, i: &Item) -> &'hir hir::Item<'hir> {
let mut ident = i.ident;
let vis_span = self.lower_span(i.vis.span);
let hir_id = self.lower_node_id(i.id);
let hir_id = hir::HirId::make_owner(self.current_hir_id_owner.def_id);
let attrs = self.lower_attrs(hir_id, &i.attrs);
let kind = self.lower_item_kind(i.span, i.id, hir_id, &mut ident, attrs, vis_span, &i.kind);
let item = hir::Item {
@ -604,7 +604,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
fn lower_foreign_item(&mut self, i: &ForeignItem) -> &'hir hir::ForeignItem<'hir> {
let hir_id = self.lower_node_id(i.id);
let hir_id = hir::HirId::make_owner(self.current_hir_id_owner.def_id);
let owner_id = hir_id.expect_owner();
self.lower_attrs(hir_id, &i.attrs);
let item = hir::ForeignItem {
@ -728,7 +728,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
fn lower_trait_item(&mut self, i: &AssocItem) -> &'hir hir::TraitItem<'hir> {
let hir_id = self.lower_node_id(i.id);
let hir_id = hir::HirId::make_owner(self.current_hir_id_owner.def_id);
self.lower_attrs(hir_id, &i.attrs);
let trait_item_def_id = hir_id.expect_owner();
@ -858,7 +858,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
// Since `default impl` is not yet implemented, this is always true in impls.
let has_value = true;
let (defaultness, _) = self.lower_defaultness(i.kind.defaultness(), has_value);
let hir_id = self.lower_node_id(i.id);
let hir_id = hir::HirId::make_owner(self.current_hir_id_owner.def_id);
self.lower_attrs(hir_id, &i.attrs);
let (generics, kind) = match &i.kind {
@ -1086,7 +1086,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
);
// FIXME(async_fn_track_caller): Can this be moved above?
let hir_id = this.lower_node_id(coroutine_kind.closure_id());
let hir_id = expr.hir_id;
this.maybe_forward_track_caller(body.span, fn_id, hir_id);
(parameters, expr)

View file

@ -40,23 +40,19 @@
#![warn(unreachable_pub)]
// tidy-alphabetical-end
use std::collections::hash_map::Entry;
use rustc_ast::node_id::NodeMap;
use rustc_ast::ptr::P;
use rustc_ast::{self as ast, *};
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxIndexSet;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_errors::{DiagArgFromDisplay, DiagCtxtHandle, StashKey};
use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId, LocalDefIdMap};
use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId};
use rustc_hir::{
self as hir, ConstArg, GenericArg, HirId, ItemLocalMap, LangItem, MissingLifetimeKind,
ParamName, TraitCandidate,
self as hir, ConstArg, GenericArg, HirId, ItemLocalMap, LangItem, ParamName, TraitCandidate,
};
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_macros::extension;
@ -85,7 +81,6 @@ mod expr;
mod format;
mod index;
mod item;
mod lifetime_collector;
mod pat;
mod path;
@ -115,8 +110,8 @@ struct LoweringContext<'a, 'hir> {
/// outside of an `async fn`.
current_item: Option<Span>,
catch_scope: Option<NodeId>,
loop_scope: Option<NodeId>,
catch_scope: Option<HirId>,
loop_scope: Option<HirId>,
is_in_loop_condition: bool,
is_in_trait_impl: bool,
is_in_dyn_type: bool,
@ -140,7 +135,10 @@ struct LoweringContext<'a, 'hir> {
impl_trait_defs: Vec<hir::GenericParam<'hir>>,
impl_trait_bounds: Vec<hir::WherePredicate<'hir>>,
/// NodeIds that are lowered inside the current HIR owner.
/// NodeIds of pattern identifiers and labelled nodes that are lowered inside the current HIR owner.
ident_and_label_to_local_id: NodeMap<hir::ItemLocalId>,
/// NodeIds that are lowered inside the current HIR owner. Only used for duplicate lowering check.
#[cfg(debug_assertions)]
node_id_to_local_id: NodeMap<hir::ItemLocalId>,
allow_try_trait: Lrc<[Symbol]>,
@ -148,12 +146,6 @@ struct LoweringContext<'a, 'hir> {
allow_async_iterator: Lrc<[Symbol]>,
allow_for_await: Lrc<[Symbol]>,
allow_async_fn_traits: Lrc<[Symbol]>,
/// Mapping from generics `def_id`s to TAIT generics `def_id`s.
/// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
/// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
/// field from the original parameter 'a to the new parameter 'a1.
generics_def_id_map: Vec<LocalDefIdMap<LocalDefId>>,
}
impl<'a, 'hir> LoweringContext<'a, 'hir> {
@ -171,6 +163,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
current_hir_id_owner: hir::CRATE_OWNER_ID,
current_def_id_parent: CRATE_DEF_ID,
item_local_id_counter: hir::ItemLocalId::ZERO,
ident_and_label_to_local_id: Default::default(),
#[cfg(debug_assertions)]
node_id_to_local_id: Default::default(),
trait_map: Default::default(),
@ -196,7 +190,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// FIXME(gen_blocks): how does `closure_track_caller`/`async_fn_track_caller`
// interact with `gen`/`async gen` blocks
allow_async_iterator: [sym::gen_future, sym::async_iterator].into(),
generics_def_id_map: Default::default(),
}
}
@ -279,7 +272,7 @@ enum ImplTraitContext {
/// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually
/// equivalent to a new opaque type like `type T = impl Debug; fn foo() -> T`.
///
OpaqueTy { origin: hir::OpaqueTyOrigin },
OpaqueTy { origin: hir::OpaqueTyOrigin<LocalDefId> },
/// `impl Trait` is unstably accepted in this position.
FeatureGated(ImplTraitPosition, Symbol),
/// `impl Trait` is not accepted in this position.
@ -525,54 +518,14 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
/// resolver (if any).
fn orig_opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
self.resolver.node_id_to_def_id.get(&node).copied()
}
/// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
/// resolver (if any), after applying any remapping from `get_remapped_def_id`.
///
/// For example, in a function like `fn foo<'a>(x: &'a u32)`,
/// invoking with the id from the `ast::Lifetime` node found inside
/// the `&'a u32` type would return the `LocalDefId` of the
/// `'a` parameter declared on `foo`.
///
/// This function also applies remapping from `get_remapped_def_id`.
/// These are used when synthesizing opaque types from `-> impl Trait` return types and so forth.
/// For example, in a function like `fn foo<'a>() -> impl Debug + 'a`,
/// we would create an opaque type `type FooReturn<'a1> = impl Debug + 'a1`.
/// When lowering the `Debug + 'a` bounds, we add a remapping to map `'a` to `'a1`.
fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
self.orig_opt_local_def_id(node).map(|local_def_id| self.get_remapped_def_id(local_def_id))
self.resolver.node_id_to_def_id.get(&node).copied()
}
fn local_def_id(&self, node: NodeId) -> LocalDefId {
self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{node:?}`"))
}
/// Get the previously recorded `to` local def id given the `from` local def id, obtained using
/// `generics_def_id_map` field.
fn get_remapped_def_id(&self, local_def_id: LocalDefId) -> LocalDefId {
// `generics_def_id_map` is a stack of mappings. As we go deeper in impl traits nesting we
// push new mappings, so we first need to get the latest (innermost) mappings, hence `iter().rev()`.
//
// Consider:
//
// `fn test<'a, 'b>() -> impl Trait<&'a u8, Ty = impl Sized + 'b> {}`
//
// We would end with a generics_def_id_map like:
//
// `[[fn#'b -> impl_trait#'b], [fn#'b -> impl_sized#'b]]`
//
// for the opaque type generated on `impl Sized + 'b`, we want the result to be: impl_sized#'b.
// So, if we were trying to find first from the start (outermost) would give the wrong result, impl_trait#'b.
self.generics_def_id_map
.iter()
.rev()
.find_map(|map| map.get(&local_def_id).copied())
.unwrap_or(local_def_id)
}
/// Freshen the `LoweringContext` and ready it to lower a nested item.
/// The lowered item is registered into `self.children`.
///
@ -588,7 +541,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let current_attrs = std::mem::take(&mut self.attrs);
let current_bodies = std::mem::take(&mut self.bodies);
let current_node_ids = std::mem::take(&mut self.node_id_to_local_id);
let current_ident_and_label_to_local_id =
std::mem::take(&mut self.ident_and_label_to_local_id);
#[cfg(debug_assertions)]
let current_node_id_to_local_id = std::mem::take(&mut self.node_id_to_local_id);
let current_trait_map = std::mem::take(&mut self.trait_map);
let current_owner =
std::mem::replace(&mut self.current_hir_id_owner, hir::OwnerId { def_id });
@ -602,8 +559,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// and the caller to refer to some of the subdefinitions' nodes' `LocalDefId`s.
// Always allocate the first `HirId` for the owner itself.
let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::ZERO);
debug_assert_eq!(_old, None);
#[cfg(debug_assertions)]
{
let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::ZERO);
debug_assert_eq!(_old, None);
}
let item = self.with_def_id_parent(def_id, f);
debug_assert_eq!(def_id, item.def_id().def_id);
@ -614,7 +574,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
self.attrs = current_attrs;
self.bodies = current_bodies;
self.node_id_to_local_id = current_node_ids;
self.ident_and_label_to_local_id = current_ident_and_label_to_local_id;
#[cfg(debug_assertions)]
{
self.node_id_to_local_id = current_node_id_to_local_id;
}
self.trait_map = current_trait_map;
self.current_hir_id_owner = current_owner;
self.item_local_id_counter = current_local_counter;
@ -632,27 +597,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
result
}
/// Installs the remapping `remap` in scope while `f` is being executed.
/// This causes references to the `LocalDefId` keys to be changed to
/// refer to the values instead.
///
/// The remapping is used when one piece of AST expands to multiple
/// pieces of HIR. For example, the function `fn foo<'a>(...) -> impl Debug + 'a`,
/// expands to both a function definition (`foo`) and a TAIT for the return value,
/// both of which have a lifetime parameter `'a`. The remapping allows us to
/// rewrite the `'a` in the return value to refer to the
/// `'a` declared on the TAIT, instead of the function.
fn with_remapping<R>(
&mut self,
remap: LocalDefIdMap<LocalDefId>,
f: impl FnOnce(&mut Self) -> R,
) -> R {
self.generics_def_id_map.push(remap);
let res = f(self);
self.generics_def_id_map.pop();
res
}
fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> &'hir hir::OwnerInfo<'hir> {
let attrs = std::mem::take(&mut self.attrs);
let mut bodies = std::mem::take(&mut self.bodies);
@ -680,39 +624,37 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
self.arena.alloc(hir::OwnerInfo { nodes, parenting, attrs, trait_map })
}
/// This method allocates a new `HirId` for the given `NodeId` and stores it in
/// the `LoweringContext`'s `NodeId => HirId` map.
/// This method allocates a new `HirId` for the given `NodeId`.
/// Take care not to call this method if the resulting `HirId` is then not
/// actually used in the HIR, as that would trigger an assertion in the
/// `HirIdValidator` later on, which makes sure that all `NodeId`s got mapped
/// properly. Calling the method twice with the same `NodeId` is fine though.
/// properly. Calling the method twice with the same `NodeId` is also forbidden.
#[instrument(level = "debug", skip(self), ret)]
fn lower_node_id(&mut self, ast_node_id: NodeId) -> HirId {
assert_ne!(ast_node_id, DUMMY_NODE_ID);
match self.node_id_to_local_id.entry(ast_node_id) {
Entry::Occupied(o) => HirId { owner: self.current_hir_id_owner, local_id: *o.get() },
Entry::Vacant(v) => {
// Generate a new `HirId`.
let owner = self.current_hir_id_owner;
let local_id = self.item_local_id_counter;
let hir_id = HirId { owner, local_id };
let owner = self.current_hir_id_owner;
let local_id = self.item_local_id_counter;
assert_ne!(local_id, hir::ItemLocalId::ZERO);
self.item_local_id_counter.increment_by(1);
let hir_id = HirId { owner, local_id };
v.insert(local_id);
self.item_local_id_counter.increment_by(1);
assert_ne!(local_id, hir::ItemLocalId::ZERO);
if let Some(def_id) = self.opt_local_def_id(ast_node_id) {
self.children.push((def_id, hir::MaybeOwner::NonOwner(hir_id)));
}
if let Some(traits) = self.resolver.trait_map.remove(&ast_node_id) {
self.trait_map.insert(hir_id.local_id, traits.into_boxed_slice());
}
hir_id
}
if let Some(def_id) = self.opt_local_def_id(ast_node_id) {
self.children.push((def_id, hir::MaybeOwner::NonOwner(hir_id)));
}
if let Some(traits) = self.resolver.trait_map.remove(&ast_node_id) {
self.trait_map.insert(hir_id.local_id, traits.into_boxed_slice());
}
// Check whether the same `NodeId` is lowered more than once.
#[cfg(debug_assertions)]
{
let old = self.node_id_to_local_id.insert(ast_node_id, local_id);
assert_eq!(old, None);
}
hir_id
}
/// Generate a new `HirId` without a backing `NodeId`.
@ -729,7 +671,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_res(&mut self, res: Res<NodeId>) -> Res {
let res: Result<Res, ()> = res.apply_id(|id| {
let owner = self.current_hir_id_owner;
let local_id = self.node_id_to_local_id.get(&id).copied().ok_or(())?;
let local_id = self.ident_and_label_to_local_id.get(&id).copied().ok_or(())?;
Ok(HirId { owner, local_id })
});
trace!(?res);
@ -1474,7 +1416,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_opaque_impl_trait(
&mut self,
span: Span,
origin: hir::OpaqueTyOrigin,
origin: hir::OpaqueTyOrigin<LocalDefId>,
opaque_ty_node_id: NodeId,
bounds: &GenericBounds,
itctx: ImplTraitContext,
@ -1486,27 +1428,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// frequently opened issues show.
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
// Whether this opaque always captures lifetimes in scope.
// Right now, this is all RPITIT and TAITs, and when `lifetime_capture_rules_2024`
// is enabled. We don't check the span of the edition, since this is done
// on a per-opaque basis to account for nested opaques.
let always_capture_in_scope = match origin {
_ if self.tcx.features().lifetime_capture_rules_2024() => true,
hir::OpaqueTyOrigin::TyAlias { .. } => true,
hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl, .. } => in_trait_or_impl.is_some(),
hir::OpaqueTyOrigin::AsyncFn { .. } => {
unreachable!("should be using `lower_coroutine_fn_ret_ty`")
}
};
let captured_lifetimes_to_duplicate = lifetime_collector::lifetimes_for_opaque(
self.resolver,
always_capture_in_scope,
opaque_ty_node_id,
bounds,
span,
);
debug!(?captured_lifetimes_to_duplicate);
// Feature gate for RPITIT + use<..>
match origin {
rustc_hir::OpaqueTyOrigin::FnReturn { in_trait_or_impl: Some(_), .. } => {
@ -1529,22 +1450,15 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
_ => {}
}
self.lower_opaque_inner(
opaque_ty_node_id,
origin,
captured_lifetimes_to_duplicate,
span,
opaque_ty_span,
|this| this.lower_param_bounds(bounds, itctx),
)
self.lower_opaque_inner(opaque_ty_node_id, origin, opaque_ty_span, |this| {
this.lower_param_bounds(bounds, itctx)
})
}
fn lower_opaque_inner(
&mut self,
opaque_ty_node_id: NodeId,
origin: hir::OpaqueTyOrigin,
captured_lifetimes_to_duplicate: FxIndexSet<Lifetime>,
span: Span,
origin: hir::OpaqueTyOrigin<LocalDefId>,
opaque_ty_span: Span,
lower_item_bounds: impl FnOnce(&mut Self) -> &'hir [hir::GenericBound<'hir>],
) -> hir::TyKind<'hir> {
@ -1552,145 +1466,19 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let opaque_ty_hir_id = self.lower_node_id(opaque_ty_node_id);
debug!(?opaque_ty_def_id, ?opaque_ty_hir_id);
// Map from captured (old) lifetime to synthetic (new) lifetime.
// Used to resolve lifetimes in the bounds of the opaque.
let mut captured_to_synthesized_mapping = LocalDefIdMap::default();
// List of (early-bound) synthetic lifetimes that are owned by the opaque.
// This is used to create the `hir::Generics` owned by the opaque.
let mut synthesized_lifetime_definitions = vec![];
// Pairs of lifetime arg (that resolves to the captured lifetime)
// and the def-id of the (early-bound) synthetic lifetime definition.
// This is used both to create generics for the `TyKind::OpaqueDef` that
// we return, and also as a captured lifetime mapping for RPITITs.
let mut synthesized_lifetime_args = vec![];
for lifetime in captured_lifetimes_to_duplicate {
let res = self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error);
let (old_def_id, missing_kind) = match res {
LifetimeRes::Param { param: old_def_id, binder: _ } => (old_def_id, None),
LifetimeRes::Fresh { param, kind, .. } => {
debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
if let Some(old_def_id) = self.orig_opt_local_def_id(param) {
(old_def_id, Some(kind))
} else {
self.dcx()
.span_delayed_bug(lifetime.ident.span, "no def-id for fresh lifetime");
continue;
}
}
// Opaques do not capture `'static`
LifetimeRes::Static { .. } | LifetimeRes::Error => {
continue;
}
res => {
let bug_msg = format!(
"Unexpected lifetime resolution {:?} for {:?} at {:?}",
res, lifetime.ident, lifetime.ident.span
);
span_bug!(lifetime.ident.span, "{}", bug_msg);
}
};
if captured_to_synthesized_mapping.get(&old_def_id).is_none() {
// Create a new lifetime parameter local to the opaque.
let duplicated_lifetime_node_id = self.next_node_id();
let duplicated_lifetime_def_id = self.create_def(
opaque_ty_def_id,
duplicated_lifetime_node_id,
lifetime.ident.name,
DefKind::LifetimeParam,
self.lower_span(lifetime.ident.span),
);
captured_to_synthesized_mapping.insert(old_def_id, duplicated_lifetime_def_id);
// FIXME: Instead of doing this, we could move this whole loop
// into the `with_hir_id_owner`, then just directly construct
// the `hir::GenericParam` here.
synthesized_lifetime_definitions.push((
duplicated_lifetime_node_id,
duplicated_lifetime_def_id,
self.lower_ident(lifetime.ident),
missing_kind,
));
// Now make an arg that we can use for the generic params of the opaque tykind.
let id = self.next_node_id();
let lifetime_arg = self.new_named_lifetime_with_res(id, lifetime.ident, res);
let duplicated_lifetime_def_id = self.local_def_id(duplicated_lifetime_node_id);
synthesized_lifetime_args.push((lifetime_arg, duplicated_lifetime_def_id))
}
}
let opaque_ty_def = self.with_def_id_parent(opaque_ty_def_id, |this| {
// Install the remapping from old to new (if any). This makes sure that
// any lifetimes that would have resolved to the def-id of captured
// lifetimes are remapped to the new *synthetic* lifetimes of the opaque.
let bounds = this
.with_remapping(captured_to_synthesized_mapping, |this| lower_item_bounds(this));
let generic_params =
this.arena.alloc_from_iter(synthesized_lifetime_definitions.iter().map(
|&(new_node_id, new_def_id, ident, missing_kind)| {
let hir_id = this.lower_node_id(new_node_id);
let (name, kind) = if ident.name == kw::UnderscoreLifetime {
(
hir::ParamName::Fresh,
hir::LifetimeParamKind::Elided(
missing_kind.unwrap_or(MissingLifetimeKind::Underscore),
),
)
} else {
(hir::ParamName::Plain(ident), hir::LifetimeParamKind::Explicit)
};
hir::GenericParam {
hir_id,
def_id: new_def_id,
name,
span: ident.span,
pure_wrt_drop: false,
kind: hir::GenericParamKind::Lifetime { kind },
colon_span: None,
source: hir::GenericParamSource::Generics,
}
},
));
debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
let lifetime_mapping = self.arena.alloc_slice(&synthesized_lifetime_args);
trace!("registering opaque type with id {:#?}", opaque_ty_def_id);
let bounds = lower_item_bounds(this);
let opaque_ty_def = hir::OpaqueTy {
hir_id: opaque_ty_hir_id,
def_id: opaque_ty_def_id,
generics: this.arena.alloc(hir::Generics {
params: generic_params,
predicates: &[],
has_where_clause_predicates: false,
where_clause_span: this.lower_span(span),
span: this.lower_span(span),
}),
bounds,
origin,
lifetime_mapping,
span: this.lower_span(opaque_ty_span),
};
this.arena.alloc(opaque_ty_def)
});
let generic_args = self.arena.alloc_from_iter(
synthesized_lifetime_args
.iter()
.map(|(lifetime, _)| hir::GenericArg::Lifetime(*lifetime)),
);
// Create the `Foo<...>` reference itself. Note that the `type
// Foo = impl Trait` is, internally, created as a child of the
// async fn, so the *type parameters* are inherited. It's
// only the lifetime parameters that we must supply.
hir::TyKind::OpaqueDef(opaque_ty_def, generic_args)
hir::TyKind::OpaqueDef(opaque_ty_def)
}
fn lower_precise_capturing_args(
@ -1865,20 +1653,13 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
CoroutineKind::Async { return_impl_trait_id, .. } => (return_impl_trait_id, None),
CoroutineKind::Gen { return_impl_trait_id, .. } => (return_impl_trait_id, None),
CoroutineKind::AsyncGen { return_impl_trait_id, .. } => {
(return_impl_trait_id, Some(self.allow_async_iterator.clone()))
(return_impl_trait_id, Some(Lrc::clone(&self.allow_async_iterator)))
}
};
let opaque_ty_span =
self.mark_span_with_reason(DesugaringKind::Async, span, allowed_features);
let captured_lifetimes = self
.resolver
.extra_lifetime_params(opaque_ty_node_id)
.into_iter()
.map(|(ident, id, _)| Lifetime { id, ident })
.collect();
let in_trait_or_impl = match fn_kind {
FnDeclKind::Trait => Some(hir::RpitContext::Trait),
FnDeclKind::Impl => Some(hir::RpitContext::TraitImpl),
@ -1889,8 +1670,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let opaque_ty_ref = self.lower_opaque_inner(
opaque_ty_node_id,
hir::OpaqueTyOrigin::AsyncFn { parent: fn_def_id, in_trait_or_impl },
captured_lifetimes,
span,
opaque_ty_span,
|this| {
let bound = this.lower_coroutine_fn_output_type_to_bound(
@ -1987,10 +1766,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
res: LifetimeRes,
) -> &'hir hir::Lifetime {
let res = match res {
LifetimeRes::Param { param, .. } => {
let param = self.get_remapped_def_id(param);
hir::LifetimeName::Param(param)
}
LifetimeRes::Param { param, .. } => hir::LifetimeName::Param(param),
LifetimeRes::Fresh { param, .. } => {
let param = self.local_def_id(param);
hir::LifetimeName::Param(param)

View file

@ -1,151 +0,0 @@
use rustc_ast::visit::{self, BoundKind, LifetimeCtxt, Visitor};
use rustc_ast::{
GenericBound, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, Ty, TyKind,
};
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir::def::{DefKind, LifetimeRes, Res};
use rustc_middle::span_bug;
use rustc_middle::ty::ResolverAstLowering;
use rustc_span::Span;
use rustc_span::symbol::{Ident, kw};
use super::ResolverAstLoweringExt;
struct LifetimeCollectVisitor<'ast> {
resolver: &'ast mut ResolverAstLowering,
always_capture_in_scope: bool,
current_binders: Vec<NodeId>,
collected_lifetimes: FxIndexSet<Lifetime>,
}
impl<'ast> LifetimeCollectVisitor<'ast> {
fn new(resolver: &'ast mut ResolverAstLowering, always_capture_in_scope: bool) -> Self {
Self {
resolver,
always_capture_in_scope,
current_binders: Vec::new(),
collected_lifetimes: FxIndexSet::default(),
}
}
fn visit_opaque(&mut self, opaque_ty_node_id: NodeId, bounds: &'ast GenericBounds, span: Span) {
// If we're edition 2024 or within a TAIT or RPITIT, *and* there is no
// `use<>` statement to override the default capture behavior, then
// capture all of the in-scope lifetimes.
if (self.always_capture_in_scope || span.at_least_rust_2024())
&& bounds.iter().all(|bound| !matches!(bound, GenericBound::Use(..)))
{
for (ident, id, _) in self.resolver.extra_lifetime_params(opaque_ty_node_id) {
self.record_lifetime_use(Lifetime { id, ident });
}
}
// We also recurse on the bounds to make sure we capture all the lifetimes
// mentioned in the bounds. These may disagree with the `use<>` list, in which
// case we will error on these later. We will also recurse to visit any
// nested opaques, which may *implicitly* capture lifetimes.
for bound in bounds {
self.visit_param_bound(bound, BoundKind::Bound);
}
}
fn record_lifetime_use(&mut self, lifetime: Lifetime) {
match self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error) {
LifetimeRes::Param { binder, .. } | LifetimeRes::Fresh { binder, .. } => {
if !self.current_binders.contains(&binder) {
self.collected_lifetimes.insert(lifetime);
}
}
LifetimeRes::Static { .. } | LifetimeRes::Error => {
self.collected_lifetimes.insert(lifetime);
}
LifetimeRes::Infer => {}
res => {
let bug_msg = format!(
"Unexpected lifetime resolution {:?} for {:?} at {:?}",
res, lifetime.ident, lifetime.ident.span
);
span_bug!(lifetime.ident.span, "{}", bug_msg);
}
}
}
/// This collect lifetimes that are elided, for nodes like `Foo<T>` where there are no explicit
/// lifetime nodes. Is equivalent to having "pseudo" nodes introduced for each of the node ids
/// in the list start..end.
fn record_elided_anchor(&mut self, node_id: NodeId, span: Span) {
if let Some(LifetimeRes::ElidedAnchor { start, end }) =
self.resolver.get_lifetime_res(node_id)
{
for i in start..end {
let lifetime = Lifetime { id: i, ident: Ident::new(kw::UnderscoreLifetime, span) };
self.record_lifetime_use(lifetime);
}
}
}
}
impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
fn visit_lifetime(&mut self, lifetime: &'ast Lifetime, _: LifetimeCtxt) {
self.record_lifetime_use(*lifetime);
}
fn visit_path_segment(&mut self, path_segment: &'ast PathSegment) {
self.record_elided_anchor(path_segment.id, path_segment.ident.span);
visit::walk_path_segment(self, path_segment);
}
fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef) {
self.current_binders.push(t.trait_ref.ref_id);
visit::walk_poly_trait_ref(self, t);
self.current_binders.pop();
}
fn visit_ty(&mut self, t: &'ast Ty) {
match &t.kind {
TyKind::Path(None, _) => {
// We can sometimes encounter bare trait objects
// which are represented in AST as paths.
if let Some(partial_res) = self.resolver.get_partial_res(t.id)
&& let Some(Res::Def(DefKind::Trait | DefKind::TraitAlias, _)) =
partial_res.full_res()
{
self.current_binders.push(t.id);
visit::walk_ty(self, t);
self.current_binders.pop();
} else {
visit::walk_ty(self, t);
}
}
TyKind::BareFn(_) => {
self.current_binders.push(t.id);
visit::walk_ty(self, t);
self.current_binders.pop();
}
TyKind::Ref(None, _) | TyKind::PinnedRef(None, _) => {
self.record_elided_anchor(t.id, t.span);
visit::walk_ty(self, t);
}
TyKind::ImplTrait(opaque_ty_node_id, bounds) => {
self.visit_opaque(*opaque_ty_node_id, bounds, t.span)
}
_ => {
visit::walk_ty(self, t);
}
}
}
}
pub(crate) fn lifetimes_for_opaque(
resolver: &mut ResolverAstLowering,
always_capture_in_scope: bool,
opaque_ty_node_id: NodeId,
bounds: &GenericBounds,
span: Span,
) -> FxIndexSet<Lifetime> {
let mut visitor = LifetimeCollectVisitor::new(resolver, always_capture_in_scope);
visitor.visit_opaque(opaque_ty_node_id, bounds, span);
visitor.collected_lifetimes
}

View file

@ -21,13 +21,20 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_pat_mut(&mut self, mut pattern: &Pat) -> hir::Pat<'hir> {
ensure_sufficient_stack(|| {
// loop here to avoid recursion
let pat_hir_id = self.lower_node_id(pattern.id);
let node = loop {
match &pattern.kind {
PatKind::Wild => break hir::PatKind::Wild,
PatKind::Never => break hir::PatKind::Never,
PatKind::Ident(binding_mode, ident, sub) => {
let lower_sub = |this: &mut Self| sub.as_ref().map(|s| this.lower_pat(s));
break self.lower_pat_ident(pattern, *binding_mode, *ident, lower_sub);
break self.lower_pat_ident(
pattern,
*binding_mode,
*ident,
pat_hir_id,
lower_sub,
);
}
PatKind::Lit(e) => {
break hir::PatKind::Lit(self.lower_expr_within_pat(e, false));
@ -119,7 +126,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
};
self.pat_with_node_id_of(pattern, node)
self.pat_with_node_id_of(pattern, node, pat_hir_id)
})
}
@ -187,10 +194,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let mut prev_rest_span = None;
// Lowers `$bm $ident @ ..` to `$bm $ident @ _`.
let lower_rest_sub = |this: &mut Self, pat, &ann, &ident, sub| {
let lower_sub = |this: &mut Self| Some(this.pat_wild_with_node_id_of(sub));
let node = this.lower_pat_ident(pat, ann, ident, lower_sub);
this.pat_with_node_id_of(pat, node)
let lower_rest_sub = |this: &mut Self, pat: &Pat, &ann, &ident, sub: &Pat| {
let sub_hir_id = this.lower_node_id(sub.id);
let lower_sub = |this: &mut Self| Some(this.pat_wild_with_node_id_of(sub, sub_hir_id));
let pat_hir_id = this.lower_node_id(pat.id);
let node = this.lower_pat_ident(pat, ann, ident, pat_hir_id, lower_sub);
this.pat_with_node_id_of(pat, node, pat_hir_id)
};
let mut iter = pats.iter();
@ -200,7 +209,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// Found a sub-slice pattern `..`. Record, lower it to `_`, and stop here.
PatKind::Rest => {
prev_rest_span = Some(pat.span);
slice = Some(self.pat_wild_with_node_id_of(pat));
let hir_id = self.lower_node_id(pat.id);
slice = Some(self.pat_wild_with_node_id_of(pat, hir_id));
break;
}
// Found a sub-slice pattern `$binding_mode $ident @ ..`.
@ -248,19 +258,35 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
p: &Pat,
annotation: BindingMode,
ident: Ident,
hir_id: hir::HirId,
lower_sub: impl FnOnce(&mut Self) -> Option<&'hir hir::Pat<'hir>>,
) -> hir::PatKind<'hir> {
match self.resolver.get_partial_res(p.id).map(|d| d.expect_full_res()) {
// `None` can occur in body-less function signatures
res @ (None | Some(Res::Local(_))) => {
let canonical_id = match res {
Some(Res::Local(id)) => id,
_ => p.id,
let binding_id = match res {
Some(Res::Local(id)) => {
// In `Or` patterns like `VariantA(s) | VariantB(s, _)`, multiple identifier patterns
// will be resolved to the same `Res::Local`. Thus they just share a single
// `HirId`.
if id == p.id {
self.ident_and_label_to_local_id.insert(id, hir_id.local_id);
hir_id
} else {
hir::HirId {
owner: self.current_hir_id_owner,
local_id: self.ident_and_label_to_local_id[&id],
}
}
}
_ => {
self.ident_and_label_to_local_id.insert(p.id, hir_id.local_id);
hir_id
}
};
hir::PatKind::Binding(
annotation,
self.lower_node_id(canonical_id),
binding_id,
self.lower_ident(ident),
lower_sub(self),
)
@ -280,18 +306,18 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
fn pat_wild_with_node_id_of(&mut self, p: &Pat) -> &'hir hir::Pat<'hir> {
self.arena.alloc(self.pat_with_node_id_of(p, hir::PatKind::Wild))
fn pat_wild_with_node_id_of(&mut self, p: &Pat, hir_id: hir::HirId) -> &'hir hir::Pat<'hir> {
self.arena.alloc(self.pat_with_node_id_of(p, hir::PatKind::Wild, hir_id))
}
/// Construct a `Pat` with the `HirId` of `p.id` lowered.
fn pat_with_node_id_of(&mut self, p: &Pat, kind: hir::PatKind<'hir>) -> hir::Pat<'hir> {
hir::Pat {
hir_id: self.lower_node_id(p.id),
kind,
span: self.lower_span(p.span),
default_binding_modes: true,
}
/// Construct a `Pat` with the `HirId` of `p.id` already lowered.
fn pat_with_node_id_of(
&mut self,
p: &Pat,
kind: hir::PatKind<'hir>,
hir_id: hir::HirId,
) -> hir::Pat<'hir> {
hir::Pat { hir_id, kind, span: self.lower_span(p.span), default_binding_modes: true }
}
/// Emit a friendly error for extra `..` patterns in a tuple/tuple struct/slice pattern.

View file

@ -73,7 +73,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let bound_modifier_allowed_features = if let Res::Def(DefKind::Trait, async_def_id) = res
&& self.tcx.async_fn_trait_kind_from_def_id(async_def_id).is_some()
{
Some(self.allow_async_fn_traits.clone())
Some(Lrc::clone(&self.allow_async_fn_traits))
} else {
None
};

View file

@ -677,9 +677,8 @@ impl<'a> AstValidator<'a> {
Self::check_decl_no_pat(&bfty.decl, |span, _, _| {
self.dcx().emit_err(errors::PatternFnPointer { span });
});
if let Extern::Implicit(_) = bfty.ext {
let sig_span = self.sess.source_map().next_point(ty.span.shrink_to_lo());
self.maybe_lint_missing_abi(sig_span, ty.id);
if let Extern::Implicit(extern_span) = bfty.ext {
self.maybe_lint_missing_abi(extern_span, ty.id);
}
}
TyKind::TraitObject(bounds, ..) => {
@ -953,7 +952,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
walk_list!(self, visit_attribute, &item.attrs);
return; // Avoid visiting again.
}
ItemKind::ForeignMod(ForeignMod { abi, safety, .. }) => {
ItemKind::ForeignMod(ForeignMod { extern_span, abi, safety, .. }) => {
self.with_in_extern_mod(*safety, |this| {
let old_item = mem::replace(&mut this.extern_mod, Some(item.span));
this.visibility_not_permitted(
@ -977,7 +976,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
if abi.is_none() {
this.maybe_lint_missing_abi(item.span, item.id);
this.maybe_lint_missing_abi(*extern_span, item.id);
}
visit::walk_item(this, item);
this.extern_mod = old_item;
@ -1350,13 +1349,13 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
if let FnKind::Fn(
_,
_,
FnSig { span: sig_span, header: FnHeader { ext: Extern::Implicit(_), .. }, .. },
FnSig { header: FnHeader { ext: Extern::Implicit(extern_span), .. }, .. },
_,
_,
_,
) = fk
{
self.maybe_lint_missing_abi(*sig_span, id);
self.maybe_lint_missing_abi(*extern_span, id);
}
// Functions without bodies cannot have patterns.

View file

@ -7,6 +7,7 @@ edition = "2021"
# tidy-alphabetical-start
itertools = "0.12"
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_lexer = { path = "../rustc_lexer" }
rustc_span = { path = "../rustc_span" }
thin-vec = "0.2.12"

View file

@ -21,6 +21,7 @@ use rustc_ast::{
GenericBound, InlineAsmOperand, InlineAsmOptions, InlineAsmRegOrRegClass,
InlineAsmTemplatePiece, PatKind, RangeEnd, RangeSyntax, Safety, SelfKind, Term, attr,
};
use rustc_data_structures::sync::Lrc;
use rustc_span::edition::Edition;
use rustc_span::source_map::{SourceMap, Spanned};
use rustc_span::symbol::{Ident, IdentPrinter, Symbol, kw, sym};
@ -105,7 +106,7 @@ fn split_block_comment_into_lines(text: &str, col: CharPos) -> Vec<String> {
fn gather_comments(sm: &SourceMap, path: FileName, src: String) -> Vec<Comment> {
let sm = SourceMap::new(sm.path_mapping().clone());
let source_file = sm.new_source_file(path, src);
let text = (*source_file.src.as_ref().unwrap()).clone();
let text = Lrc::clone(&(*source_file.src.as_ref().unwrap()));
let text: &str = text.as_str();
let start_bpos = source_file.start_pos;

View file

@ -1489,6 +1489,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
&borrow_msg,
&value_msg,
);
self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err);
borrow_spans.var_path_only_subdiag(&mut err, crate::InitializationRequiringAction::Borrow);
@ -1524,7 +1525,6 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
matches!(
adj.kind,
ty::adjustment::Adjust::Borrow(ty::adjustment::AutoBorrow::Ref(
_,
ty::adjustment::AutoBorrowMutability::Not
| ty::adjustment::AutoBorrowMutability::Mut {
allow_two_phase_borrow: ty::adjustment::AllowTwoPhase::No
@ -1561,6 +1561,8 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
borrow_span,
&self.describe_any_place(borrow.borrowed_place.as_ref()),
);
self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err);
borrow_spans.var_subdiag(&mut err, Some(borrow.kind), |kind, var_span| {
use crate::session_diagnostics::CaptureVarCause::*;
let place = &borrow.borrowed_place;
@ -1820,6 +1822,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
unreachable!()
}
};
self.note_due_to_edition_2024_opaque_capture_rules(issued_borrow, &mut err);
if issued_spans == borrow_spans {
borrow_spans.var_subdiag(&mut err, Some(gen_borrow_kind), |kind, var_span| {
@ -2860,7 +2863,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
debug!(?place_desc, ?explanation);
let err = match (place_desc, explanation) {
let mut err = match (place_desc, explanation) {
// If the outlives constraint comes from inside the closure,
// for example:
//
@ -2939,6 +2942,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
explanation,
),
};
self.note_due_to_edition_2024_opaque_capture_rules(borrow, &mut err);
self.buffer_error(err);
}
@ -3777,6 +3781,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
}
let mut err = self.cannot_assign_to_borrowed(span, loan_span, &descr_place);
self.note_due_to_edition_2024_opaque_capture_rules(loan, &mut err);
loan_spans.var_subdiag(&mut err, Some(loan.kind), |kind, var_span| {
use crate::session_diagnostics::CaptureVarCause::*;

View file

@ -48,6 +48,7 @@ mod conflict_errors;
mod explain_borrow;
mod move_errors;
mod mutability_errors;
mod opaque_suggestions;
mod region_errors;
pub(crate) use bound_region_errors::{ToUniverseInfo, UniverseInfo};

View file

@ -0,0 +1,224 @@
#![allow(rustc::diagnostic_outside_of_impl)]
#![allow(rustc::untranslatable_diagnostic)]
use std::ops::ControlFlow;
use either::Either;
use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::{Applicability, Diag};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{self, ConstraintCategory, Location};
use rustc_middle::ty::{
self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor,
};
use rustc_span::Symbol;
use crate::MirBorrowckCtxt;
use crate::borrow_set::BorrowData;
use crate::consumers::RegionInferenceContext;
use crate::type_check::Locations;
impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
/// Try to note when an opaque is involved in a borrowck error and that
/// opaque captures lifetimes due to edition 2024.
// FIXME: This code is otherwise somewhat general, and could easily be adapted
// to explain why other things overcapture... like async fn and RPITITs.
pub(crate) fn note_due_to_edition_2024_opaque_capture_rules(
&self,
borrow: &BorrowData<'tcx>,
diag: &mut Diag<'_>,
) {
// We look at all the locals. Why locals? Because it's the best thing
// I could think of that's correlated with the *instantiated* higer-ranked
// binder for calls, since we don't really store those anywhere else.
for ty in self.body.local_decls.iter().map(|local| local.ty) {
if !ty.has_opaque_types() {
continue;
}
let tcx = self.infcx.tcx;
let ControlFlow::Break((opaque_def_id, offending_region_idx, location)) = ty
.visit_with(&mut FindOpaqueRegion {
regioncx: &self.regioncx,
tcx,
borrow_region: borrow.region,
})
else {
continue;
};
// If an opaque explicitly captures a lifetime, then no need to point it out.
// FIXME: We should be using a better heuristic for `use<>`.
if tcx.rendered_precise_capturing_args(opaque_def_id).is_some() {
continue;
}
// If one of the opaque's bounds mentions the region, then no need to
// point it out, since it would've been captured on edition 2021 as well.
//
// Also, while we're at it, collect all the lifetimes that the opaque
// *does* mention. We'll use that for the `+ use<'a>` suggestion below.
let mut visitor = CheckExplicitRegionMentionAndCollectGenerics {
tcx,
offending_region_idx,
seen_opaques: [opaque_def_id].into_iter().collect(),
seen_lifetimes: Default::default(),
};
if tcx
.explicit_item_bounds(opaque_def_id)
.skip_binder()
.visit_with(&mut visitor)
.is_break()
{
continue;
}
// If we successfully located a terminator, then point it out
// and provide a suggestion if it's local.
match self.body.stmt_at(location) {
Either::Right(mir::Terminator { source_info, .. }) => {
diag.span_note(
source_info.span,
"this call may capture more lifetimes than intended, \
because Rust 2024 has adjusted the `impl Trait` lifetime capture rules",
);
let mut seen_generics: Vec<_> =
visitor.seen_lifetimes.iter().map(ToString::to_string).collect();
// Capture all in-scope ty/const params.
seen_generics.extend(
ty::GenericArgs::identity_for_item(tcx, opaque_def_id)
.iter()
.filter(|arg| {
matches!(
arg.unpack(),
ty::GenericArgKind::Type(_) | ty::GenericArgKind::Const(_)
)
})
.map(|arg| arg.to_string()),
);
if opaque_def_id.is_local() {
diag.span_suggestion_verbose(
tcx.def_span(opaque_def_id).shrink_to_hi(),
"add a precise capturing bound to avoid overcapturing",
format!(" + use<{}>", seen_generics.join(", ")),
Applicability::MaybeIncorrect,
);
} else {
diag.span_help(
tcx.def_span(opaque_def_id),
format!(
"if you can modify this crate, add a precise \
capturing bound to avoid overcapturing: `+ use<{}>`",
seen_generics.join(", ")
),
);
}
return;
}
Either::Left(_) => {}
}
}
}
}
/// This visitor contains the bulk of the logic for this lint.
struct FindOpaqueRegion<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
borrow_region: ty::RegionVid,
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for FindOpaqueRegion<'_, 'tcx> {
type Result = ControlFlow<(DefId, usize, Location), ()>;
fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
// If we find an opaque in a local ty, then for each of its captured regions,
// try to find a path between that captured regions and our borrow region...
if let ty::Alias(ty::Opaque, opaque) = *ty.kind()
&& let hir::OpaqueTyOrigin::FnReturn { parent, in_trait_or_impl: None } =
self.tcx.opaque_ty_origin(opaque.def_id)
{
let variances = self.tcx.variances_of(opaque.def_id);
for (idx, (arg, variance)) in std::iter::zip(opaque.args, variances).enumerate() {
// Skip uncaptured args.
if *variance == ty::Bivariant {
continue;
}
// We only care about regions.
let Some(opaque_region) = arg.as_region() else {
continue;
};
// Don't try to convert a late-bound region, which shouldn't exist anyways (yet).
if opaque_region.is_bound() {
continue;
}
let opaque_region_vid = self.regioncx.to_region_vid(opaque_region);
// Find a path between the borrow region and our opaque capture.
if let Some((path, _)) =
self.regioncx.find_constraint_paths_between_regions(self.borrow_region, |r| {
r == opaque_region_vid
})
{
for constraint in path {
// If we find a call in this path, then check if it defines the opaque.
if let ConstraintCategory::CallArgument(Some(call_ty)) = constraint.category
&& let ty::FnDef(call_def_id, _) = *call_ty.kind()
// This function defines the opaque :D
&& call_def_id == parent
&& let Locations::Single(location) = constraint.locations
{
return ControlFlow::Break((opaque.def_id, idx, location));
}
}
}
}
}
ty.super_visit_with(self)
}
}
struct CheckExplicitRegionMentionAndCollectGenerics<'tcx> {
tcx: TyCtxt<'tcx>,
offending_region_idx: usize,
seen_opaques: FxIndexSet<DefId>,
seen_lifetimes: FxIndexSet<Symbol>,
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for CheckExplicitRegionMentionAndCollectGenerics<'tcx> {
type Result = ControlFlow<(), ()>;
fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
match *ty.kind() {
ty::Alias(ty::Opaque, opaque) => {
if self.seen_opaques.insert(opaque.def_id) {
for (bound, _) in self
.tcx
.explicit_item_bounds(opaque.def_id)
.iter_instantiated_copied(self.tcx, opaque.args)
{
bound.visit_with(self)?;
}
}
ControlFlow::Continue(())
}
_ => ty.super_visit_with(self),
}
}
fn visit_region(&mut self, r: ty::Region<'tcx>) -> Self::Result {
match r.kind() {
ty::ReEarlyParam(param) => {
if param.index as usize == self.offending_region_idx {
ControlFlow::Break(())
} else {
self.seen_lifetimes.insert(param.name);
ControlFlow::Continue(())
}
}
_ => ControlFlow::Continue(()),
}
}
}

View file

@ -830,7 +830,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
///
/// [`OpaqueDef`]: hir::TyKind::OpaqueDef
fn get_future_inner_return_ty(&self, hir_ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
let hir::TyKind::OpaqueDef(opaque_ty, _) = hir_ty.kind else {
let hir::TyKind::OpaqueDef(opaque_ty) = hir_ty.kind else {
span_bug!(
hir_ty.span,
"lowered return type of async fn is not OpaqueDef: {:?}",

View file

@ -34,7 +34,7 @@ use rustc_infer::infer::{
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::mir::*;
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, ParamEnv, RegionVid, TyCtxt};
use rustc_middle::ty::{self, ParamEnv, RegionVid, TyCtxt, TypingMode};
use rustc_middle::{bug, span_bug};
use rustc_mir_dataflow::Analysis;
use rustc_mir_dataflow::impls::{
@ -193,9 +193,7 @@ fn do_mir_borrowck<'tcx>(
.map(|(idx, body)| (idx, MoveData::gather_moves(body, tcx, |_| true)));
let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &move_data)
.into_engine(tcx, body)
.pass_name("borrowck")
.iterate_to_fixpoint()
.iterate_to_fixpoint(tcx, body, Some("borrowck"))
.into_results_cursor(body);
let locals_are_invalidated_at_exit = tcx.hir().body_owner_kind(def).is_fn_or_closure();
@ -243,18 +241,21 @@ fn do_mir_borrowck<'tcx>(
// usage significantly on some benchmarks.
drop(flow_inits);
let flow_borrows = Borrows::new(tcx, body, &regioncx, &borrow_set)
.into_engine(tcx, body)
.pass_name("borrowck")
.iterate_to_fixpoint();
let flow_uninits = MaybeUninitializedPlaces::new(tcx, body, &move_data)
.into_engine(tcx, body)
.pass_name("borrowck")
.iterate_to_fixpoint();
let flow_ever_inits = EverInitializedPlaces::new(body, &move_data)
.into_engine(tcx, body)
.pass_name("borrowck")
.iterate_to_fixpoint();
let flow_borrows = Borrows::new(tcx, body, &regioncx, &borrow_set).iterate_to_fixpoint(
tcx,
body,
Some("borrowck"),
);
let flow_uninits = MaybeUninitializedPlaces::new(tcx, body, &move_data).iterate_to_fixpoint(
tcx,
body,
Some("borrowck"),
);
let flow_ever_inits = EverInitializedPlaces::new(body, &move_data).iterate_to_fixpoint(
tcx,
body,
Some("borrowck"),
);
let movable_coroutine =
// The first argument is the coroutine type passed by value
@ -440,7 +441,7 @@ pub struct BorrowckInferCtxt<'tcx> {
impl<'tcx> BorrowckInferCtxt<'tcx> {
pub(crate) fn new(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
let infcx = tcx.infer_ctxt().with_opaque_type_inference(def_id).build();
let infcx = tcx.infer_ctxt().build(TypingMode::analysis_in_body(tcx, def_id));
BorrowckInferCtxt { infcx, reg_var_to_origin: RefCell::new(Default::default()) }
}

View file

@ -107,13 +107,13 @@ pub(crate) fn compute_regions<'a, 'tcx>(
param_env,
body,
promoted,
universal_regions.clone(),
Rc::clone(&universal_regions),
location_table,
borrow_set,
&mut all_facts,
flow_inits,
move_data,
elements.clone(),
Rc::clone(&elements),
upvars,
);

View file

@ -733,7 +733,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
// Now take member constraints into account.
let member_constraints = self.member_constraints.clone();
let member_constraints = Rc::clone(&self.member_constraints);
for m_c_i in member_constraints.indices(scc_a) {
self.apply_member_constraint(scc_a, m_c_i, member_constraints.choice_regions(m_c_i));
}
@ -1679,7 +1679,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
infcx: &InferCtxt<'tcx>,
errors_buffer: &mut RegionErrors<'tcx>,
) {
let member_constraints = self.member_constraints.clone();
let member_constraints = Rc::clone(&self.member_constraints);
for m_c_i in member_constraints.all_indices() {
debug!(?m_c_i);
let m_c = &member_constraints[m_c_i];

View file

@ -9,6 +9,7 @@ use rustc_macros::extension;
use rustc_middle::ty::visit::TypeVisitableExt;
use rustc_middle::ty::{
self, GenericArgKind, GenericArgs, OpaqueHiddenType, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable,
TypingMode,
};
use rustc_span::Span;
use rustc_trait_selection::error_reporting::InferCtxtErrorExt;
@ -340,14 +341,13 @@ fn check_opaque_type_well_formed<'tcx>(
parent_def_id = tcx.local_parent(parent_def_id);
}
// FIXME(-Znext-solver): We probably should use `&[]` instead of
// and prepopulate this `InferCtxt` with known opaque values, rather than
// allowing opaque types to be defined and checking them after the fact.
// FIXME(#132279): This should eventually use the already defined hidden types
// instead. Alternatively we'll entirely remove this function given we also check
// the opaque in `check_opaque_meets_bounds` later.
let infcx = tcx
.infer_ctxt()
.with_next_trait_solver(next_trait_solver)
.with_opaque_type_inference(parent_def_id)
.build();
.build(TypingMode::analysis_in_body(tcx, parent_def_id));
let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
let identity_args = GenericArgs::identity_for_item(tcx, def_id);
@ -502,7 +502,7 @@ impl<'tcx> LazyOpaqueTyEnv<'tcx> {
}
let &Self { tcx, def_id, .. } = self;
let origin = tcx.opaque_type_origin(def_id);
let origin = tcx.local_opaque_ty_origin(def_id);
let parent = match origin {
hir::OpaqueTyOrigin::FnReturn { parent, .. }
| hir::OpaqueTyOrigin::AsyncFn { parent, .. }
@ -517,7 +517,9 @@ impl<'tcx> LazyOpaqueTyEnv<'tcx> {
},
);
let infcx = tcx.infer_ctxt().build();
// FIXME(#132279): It feels wrong to use `non_body_analysis` here given that we're
// in a body here.
let infcx = tcx.infer_ctxt().build(TypingMode::non_body_analysis());
let ocx = ObligationCtxt::new(&infcx);
let wf_tys = ocx.assumed_wf_types(param_env, parent).unwrap_or_else(|_| {

View file

@ -134,7 +134,7 @@ pub(crate) fn type_check<'a, 'tcx>(
let mut constraints = MirTypeckRegionConstraints {
placeholder_indices: PlaceholderIndices::default(),
placeholder_index_to_region: IndexVec::default(),
liveness_constraints: LivenessValues::with_specific_points(elements.clone()),
liveness_constraints: LivenessValues::with_specific_points(Rc::clone(&elements)),
outlives_constraints: OutlivesConstraintSet::default(),
member_constraints: MemberConstraintSet::default(),
type_tests: Vec::default(),
@ -150,7 +150,7 @@ pub(crate) fn type_check<'a, 'tcx>(
infcx,
param_env,
implicit_region_bound,
universal_regions.clone(),
Rc::clone(&universal_regions),
&mut constraints,
);

View file

@ -193,7 +193,7 @@ fn make_local_place<'tcx>(
);
}
let place = if is_ssa {
if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
if let BackendRepr::ScalarPair(_, _) = layout.backend_repr {
CPlace::new_var_pair(fx, local, layout)
} else {
CPlace::new_var(fx, local, layout)

View file

@ -78,19 +78,19 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
match self.mode {
PassMode::Ignore => smallvec![],
PassMode::Direct(attrs) => match self.layout.abi {
Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
PassMode::Direct(attrs) => match self.layout.backend_repr {
BackendRepr::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
AbiParam::new(scalar_to_clif_type(tcx, scalar)),
attrs
)],
Abi::Vector { .. } => {
BackendRepr::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
smallvec![AbiParam::new(vector_ty)]
}
_ => unreachable!("{:?}", self.layout.abi),
_ => unreachable!("{:?}", self.layout.backend_repr),
},
PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
Abi::ScalarPair(a, b) => {
PassMode::Pair(attrs_a, attrs_b) => match self.layout.backend_repr {
BackendRepr::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
smallvec![
@ -98,7 +98,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
]
}
_ => unreachable!("{:?}", self.layout.abi),
_ => unreachable!("{:?}", self.layout.backend_repr),
},
PassMode::Cast { ref cast, pad_i32 } => {
assert!(!pad_i32, "padding support not yet implemented");
@ -130,23 +130,23 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
match self.mode {
PassMode::Ignore => (None, vec![]),
PassMode::Direct(_) => match self.layout.abi {
Abi::Scalar(scalar) => {
PassMode::Direct(_) => match self.layout.backend_repr {
BackendRepr::Scalar(scalar) => {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
}
Abi::Vector { .. } => {
BackendRepr::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
(None, vec![AbiParam::new(vector_ty)])
}
_ => unreachable!("{:?}", self.layout.abi),
_ => unreachable!("{:?}", self.layout.backend_repr),
},
PassMode::Pair(_, _) => match self.layout.abi {
Abi::ScalarPair(a, b) => {
PassMode::Pair(_, _) => match self.layout.backend_repr {
BackendRepr::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
(None, vec![AbiParam::new(a), AbiParam::new(b)])
}
_ => unreachable!("{:?}", self.layout.abi),
_ => unreachable!("{:?}", self.layout.backend_repr),
},
PassMode::Cast { ref cast, .. } => {
(None, cast_target_to_abi_params(cast).into_iter().collect())

View file

@ -290,7 +290,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
let arg_uninhabited = fx
.mir
.args_iter()
.any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
.any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).is_uninhabited());
if arg_uninhabited {
fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
@ -644,9 +644,9 @@ fn codegen_stmt<'tcx>(
_ => unreachable!("un op Neg for {:?}", layout.ty),
}
}
UnOp::PtrMetadata => match layout.abi {
Abi::Scalar(_) => CValue::zst(dest_layout),
Abi::ScalarPair(_, _) => {
UnOp::PtrMetadata => match layout.backend_repr {
BackendRepr::Scalar(_) => CValue::zst(dest_layout),
BackendRepr::ScalarPair(_, _) => {
CValue::by_val(operand.load_scalar_pair(fx).1, dest_layout)
}
_ => bug!("Unexpected `PtrToMetadata` operand: {operand:?}"),

View file

@ -14,7 +14,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
variant_index: VariantIdx,
) {
let layout = place.layout();
if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
if layout.for_variant(fx, variant_index).is_uninhabited() {
return;
}
match layout.variants {
@ -80,7 +80,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
) {
let layout = value.layout();
if layout.abi.is_uninhabited() {
if layout.is_uninhabited() {
return;
}

View file

@ -51,8 +51,8 @@ fn report_atomic_type_validation_error<'tcx>(
}
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type {
let (element, count) = match layout.abi {
Abi::Vector { element, count } => (element, count),
let (element, count) = match layout.backend_repr {
BackendRepr::Vector { element, count } => (element, count),
_ => unreachable!(),
};
@ -505,7 +505,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr {
Some(ptr.load_scalar_pair(fx).1)
} else {
None
@ -519,7 +519,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr {
Some(ptr.load_scalar_pair(fx).1)
} else {
None
@ -693,7 +693,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(ty);
let msg_str = with_no_visible_paths!({
with_no_trimmed_paths!({
if layout.abi.is_uninhabited() {
if layout.is_uninhabited() {
// Use this error even for the other intrinsics as it is more precise.
format!("attempted to instantiate uninhabited type `{}`", ty)
} else if intrinsic == sym::assert_zero_valid {

View file

@ -92,6 +92,7 @@ mod prelude {
StackSlotData, StackSlotKind, TrapCode, Type, Value, types,
};
pub(crate) use cranelift_module::{self, DataDescription, FuncId, Linkage, Module};
pub(crate) use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx};
pub(crate) use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
pub(crate) use rustc_index::Idx;
@ -101,7 +102,6 @@ mod prelude {
self, FloatTy, Instance, InstanceKind, IntTy, ParamEnv, Ty, TyCtxt, UintTy,
};
pub(crate) use rustc_span::Span;
pub(crate) use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx};
pub(crate) use crate::abi::*;
pub(crate) use crate::base::{codegen_operand, codegen_place};

View file

@ -131,8 +131,8 @@ impl<'tcx> CValue<'tcx> {
match self.0 {
CValueInner::ByRef(ptr, None) => {
let (a_scalar, b_scalar) = match self.1.abi {
Abi::ScalarPair(a, b) => (a, b),
let (a_scalar, b_scalar) = match self.1.backend_repr {
BackendRepr::ScalarPair(a, b) => (a, b),
_ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
@ -164,15 +164,15 @@ impl<'tcx> CValue<'tcx> {
}
}
/// Load a value with layout.abi of scalar
/// Load a value with layout.backend_repr of scalar
#[track_caller]
pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi {
Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
let clif_ty = match layout.backend_repr {
BackendRepr::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
BackendRepr::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
.by(u32::try_from(count).unwrap())
.unwrap(),
_ => unreachable!("{:?}", layout.ty),
@ -187,14 +187,14 @@ impl<'tcx> CValue<'tcx> {
}
}
/// Load a value pair with layout.abi of scalar pair
/// Load a value pair with layout.backend_repr of scalar pair
#[track_caller]
pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
let (a_scalar, b_scalar) = match layout.abi {
Abi::ScalarPair(a, b) => (a, b),
let (a_scalar, b_scalar) = match layout.backend_repr {
BackendRepr::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
@ -222,8 +222,8 @@ impl<'tcx> CValue<'tcx> {
let layout = self.1;
match self.0 {
CValueInner::ByVal(_) => unreachable!(),
CValueInner::ByValPair(val1, val2) => match layout.abi {
Abi::ScalarPair(_, _) => {
CValueInner::ByValPair(val1, val2) => match layout.backend_repr {
BackendRepr::ScalarPair(_, _) => {
let val = match field.as_u32() {
0 => val1,
1 => val2,
@ -232,7 +232,7 @@ impl<'tcx> CValue<'tcx> {
let field_layout = layout.field(&*fx, usize::from(field));
CValue::by_val(val, field_layout)
}
_ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
_ => unreachable!("value_field for ByValPair with abi {:?}", layout.backend_repr),
},
CValueInner::ByRef(ptr, None) => {
let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
@ -360,7 +360,7 @@ impl<'tcx> CValue<'tcx> {
pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
assert_eq!(self.layout().abi, layout.abi);
assert_eq!(self.layout().backend_repr, layout.backend_repr);
CValue(self.0, layout)
}
}
@ -609,8 +609,8 @@ impl<'tcx> CPlace<'tcx> {
let dst_layout = self.layout();
match self.inner {
CPlaceInner::Var(_local, var) => {
let data = match from.1.abi {
Abi::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
let data = match from.1.backend_repr {
BackendRepr::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
_ => {
let (ptr, meta) = from.force_stack(fx);
assert!(meta.is_none());
@ -621,8 +621,10 @@ impl<'tcx> CPlace<'tcx> {
transmute_scalar(fx, var, data, dst_ty);
}
CPlaceInner::VarPair(_local, var1, var2) => {
let (data1, data2) = match from.1.abi {
Abi::ScalarPair(_, _) => CValue(from.0, dst_layout).load_scalar_pair(fx),
let (data1, data2) = match from.1.backend_repr {
BackendRepr::ScalarPair(_, _) => {
CValue(from.0, dst_layout).load_scalar_pair(fx)
}
_ => {
let (ptr, meta) = from.force_stack(fx);
assert!(meta.is_none());
@ -635,7 +637,9 @@ impl<'tcx> CPlace<'tcx> {
}
CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
CPlaceInner::Addr(to_ptr, None) => {
if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
if dst_layout.size == Size::ZERO
|| dst_layout.backend_repr == BackendRepr::Uninhabited
{
return;
}
@ -646,23 +650,28 @@ impl<'tcx> CPlace<'tcx> {
CValueInner::ByVal(val) => {
to_ptr.store(fx, val, flags);
}
CValueInner::ByValPair(val1, val2) => match from.layout().abi {
Abi::ScalarPair(a_scalar, b_scalar) => {
CValueInner::ByValPair(val1, val2) => match from.layout().backend_repr {
BackendRepr::ScalarPair(a_scalar, b_scalar) => {
let b_offset =
scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, val1, flags);
to_ptr.offset(fx, b_offset).store(fx, val2, flags);
}
_ => bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi),
_ => {
bug!(
"Non ScalarPair repr {:?} for ByValPair CValue",
dst_layout.backend_repr
)
}
},
CValueInner::ByRef(from_ptr, None) => {
match from.layout().abi {
Abi::Scalar(_) => {
match from.layout().backend_repr {
BackendRepr::Scalar(_) => {
let val = from.load_scalar(fx);
to_ptr.store(fx, val, flags);
return;
}
Abi::ScalarPair(a_scalar, b_scalar) => {
BackendRepr::ScalarPair(a_scalar, b_scalar) => {
let b_offset =
scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
let (val1, val2) = from.load_scalar_pair(fx);

View file

@ -47,7 +47,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
idx: usize,
) -> (Pointer, Value) {
let (ptr, vtable) = 'block: {
if let Abi::Scalar(_) = arg.layout().abi {
if let BackendRepr::Scalar(_) = arg.layout().backend_repr {
while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
let (idx, _) = arg
.layout()
@ -68,7 +68,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
}
}
if let Abi::ScalarPair(_, _) = arg.layout().abi {
if let BackendRepr::ScalarPair(_, _) = arg.layout().backend_repr {
let (ptr, vtable) = arg.load_scalar_pair(fx);
(Pointer::new(ptr), vtable)
} else {

View file

@ -1016,11 +1016,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandValue::Ref(place.val)
} else if place.layout.is_gcc_immediate() {
let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr {
scalar_load_metadata(self, load, scalar);
}
OperandValue::Immediate(self.to_immediate(load, place.layout))
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
} else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr {
let b_offset = a.size(self).align_to(b.align(self).abi);
let mut load = |i, scalar: &abi::Scalar, align| {

View file

@ -294,13 +294,13 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
}
sym::raw_eq => {
use rustc_target::abi::Abi::*;
use rustc_abi::BackendRepr::*;
let tp_ty = fn_args.type_at(0);
let layout = self.layout_of(tp_ty).layout;
let _use_integer_compare = match layout.abi() {
let _use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
Uninhabited | Vector { .. } => false,
Aggregate { .. } => {
Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
// so we re-use that same threshold here.

View file

@ -3,7 +3,7 @@ use std::fmt::Write;
use gccjit::{Struct, Type};
use rustc_abi as abi;
use rustc_abi::Primitive::*;
use rustc_abi::{Abi, FieldsShape, Integer, PointeeInfo, Size, Variants};
use rustc_abi::{BackendRepr, FieldsShape, Integer, PointeeInfo, Size, Variants};
use rustc_codegen_ssa::traits::{
BaseTypeCodegenMethods, DerivedTypeCodegenMethods, LayoutTypeCodegenMethods,
};
@ -60,9 +60,9 @@ fn uncached_gcc_type<'gcc, 'tcx>(
layout: TyAndLayout<'tcx>,
defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>,
) -> Type<'gcc> {
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { ref element, count } => {
match layout.backend_repr {
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
BackendRepr::Vector { ref element, count } => {
let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
let element =
// NOTE: gcc doesn't allow pointer types in vectors.
@ -74,7 +74,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
};
return cx.context.new_vector_type(element, count);
}
Abi::ScalarPair(..) => {
BackendRepr::ScalarPair(..) => {
return cx.type_struct(
&[
layout.scalar_pair_element_gcc_type(cx, 0),
@ -83,7 +83,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
false,
);
}
Abi::Uninhabited | Abi::Aggregate { .. } => {}
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {}
}
let name = match *layout.ty.kind() {
@ -176,16 +176,21 @@ pub trait LayoutGccExt<'tcx> {
impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
fn is_gcc_immediate(&self) -> bool {
match self.abi {
Abi::Scalar(_) | Abi::Vector { .. } => true,
Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
false
}
}
}
fn is_gcc_scalar_pair(&self) -> bool {
match self.abi {
Abi::ScalarPair(..) => true,
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
match self.backend_repr {
BackendRepr::ScalarPair(..) => true,
BackendRepr::Uninhabited
| BackendRepr::Scalar(_)
| BackendRepr::Vector { .. }
| BackendRepr::Memory { .. } => false,
}
}
@ -205,7 +210,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
if let Abi::Scalar(ref scalar) = self.abi {
if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
// Use a different cache for scalars because pointers to DSTs
// can be either wide or thin (data pointers of wide pointers).
if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
@ -261,7 +266,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
}
fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
if let Abi::Scalar(ref scalar) = self.abi {
if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
if scalar.is_bool() {
return cx.type_i1();
}
@ -299,8 +304,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
let (a, b) = match self.abi {
Abi::ScalarPair(ref a, ref b) => (a, b),
let (a, b) = match self.backend_repr {
BackendRepr::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
};
let scalar = [a, b][index];

View file

@ -415,7 +415,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
instance: Option<ty::Instance<'tcx>>,
) {
let mut func_attrs = SmallVec::<[_; 3]>::new();
if self.ret.layout.abi.is_uninhabited() {
if self.ret.layout.is_uninhabited() {
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
}
if !self.can_unwind {
@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
match &self.ret.mode {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
}
}
@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
PassMode::Direct(attrs) => {
let i = apply(attrs);
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
}
}
@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Pair(a, b) => {
let i = apply(a);
let ii = apply(b);
if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
arg.layout.backend_repr
{
apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
}
@ -532,7 +534,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
let mut func_attrs = SmallVec::<[_; 2]>::new();
if self.ret.layout.abi.is_uninhabited() {
if self.ret.layout.is_uninhabited() {
func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
}
if !self.can_unwind {
@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
if bx.cx.sess().opts.optimize != config::OptLevel::No
&& llvm_util::get_version() < (19, 0, 0)
&& let abi::Abi::Scalar(scalar) = self.ret.layout.abi
&& let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
&& matches!(scalar.primitive(), Int(..))
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected

View file

@ -7,6 +7,7 @@ use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{DebugInfo, OomStrategy};
use crate::common::AsCCharPtr;
use crate::llvm::{self, Context, False, Module, True, Type};
use crate::{ModuleLlvm, attributes, debuginfo};
@ -76,14 +77,14 @@ pub(crate) unsafe fn codegen(
unsafe {
// __rust_alloc_error_handler_should_panic
let name = OomStrategy::SYMBOL;
let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8);
llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility()));
let val = tcx.sess.opts.unstable_opts.oom.should_panic();
let llval = llvm::LLVMConstInt(i8, val as u64, False);
llvm::LLVMSetInitializer(ll_g, llval);
let name = NO_ALLOC_SHIM_IS_UNSTABLE;
let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8);
llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility()));
let llval = llvm::LLVMConstInt(i8, 0, False);
llvm::LLVMSetInitializer(ll_g, llval);
@ -115,7 +116,7 @@ fn create_wrapper_function(
);
let llfn = llvm::LLVMRustGetOrInsertFunction(
llmod,
from_name.as_ptr().cast(),
from_name.as_c_char_ptr(),
from_name.len(),
ty,
);
@ -137,7 +138,7 @@ fn create_wrapper_function(
}
let callee =
llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_ptr().cast(), to_name.len(), ty);
llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_c_char_ptr(), to_name.len(), ty);
if let Some(no_return) = no_return {
// -> ! DIFlagNoReturn
attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
@ -153,7 +154,7 @@ fn create_wrapper_function(
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
let ret = llvm::LLVMRustBuildCall(
let ret = llvm::LLVMBuildCallWithOperandBundles(
llbuilder,
ty,
callee,
@ -161,6 +162,7 @@ fn create_wrapper_function(
args.len() as c_uint,
[].as_ptr(),
0 as c_uint,
c"".as_ptr(),
);
llvm::LLVMSetTailCall(ret, True);
if output.is_some() {

View file

@ -15,7 +15,7 @@ use smallvec::SmallVec;
use tracing::debug;
use crate::builder::Builder;
use crate::common::Funclet;
use crate::common::{AsCCharPtr, Funclet};
use crate::context::CodegenCx;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
@ -420,7 +420,7 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
unsafe {
llvm::LLVMAppendModuleInlineAsm(
self.llmod,
template_str.as_ptr().cast(),
template_str.as_c_char_ptr(),
template_str.len(),
);
}
@ -458,14 +458,14 @@ pub(crate) fn inline_asm_call<'ll>(
let fty = bx.cx.type_func(&argtys, output);
unsafe {
// Ask LLVM to verify that the constraints are well-formed.
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len());
debug!("constraint verification result: {:?}", constraints_ok);
if constraints_ok {
let v = llvm::LLVMRustInlineAsm(
fty,
asm.as_ptr().cast(),
asm.as_c_char_ptr(),
asm.len(),
cons.as_ptr().cast(),
cons.as_c_char_ptr(),
cons.len(),
volatile,
alignstack,
@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>(
) -> &'ll Value {
use InlineAsmRegClass::*;
let dl = &bx.tcx.data_layout;
match (reg, layout.abi) {
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
match (reg, layout.backend_repr) {
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
value
}
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
}
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
bx.bitcast(value, bx.cx.type_i64())
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
BackendRepr::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
(
X86(
@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.insert_element(
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
BackendRepr::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
(
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f32())
} else {
@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f64())
@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
Abi::Vector { element, count: count @ (4 | 8) },
BackendRepr::Vector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
_ => value,
}
}
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
{
@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>(
instance: Instance<'_>,
) -> &'ll Value {
use InlineAsmRegClass::*;
match (reg, layout.abi) {
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
match (reg, layout.backend_repr) {
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
bx.extract_element(value, bx.const_i32(0))
} else {
value
}
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
value = bx.extract_element(value, bx.const_i32(0));
@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
value
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
bx.bitcast(value, bx.cx.type_f64())
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
BackendRepr::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
(
X86(
@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => {
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
bx.extract_element(value, bx.const_usize(0))
@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
BackendRepr::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
(
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i32())
} else {
@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i64())
@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
Abi::Vector { element, count: count @ (4 | 8) },
BackendRepr::Vector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
_ => value,
}
}
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
{
@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
instance: Instance<'_>,
) -> &'ll Type {
use InlineAsmRegClass::*;
match (reg, layout.abi) {
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
match (reg, layout.backend_repr) {
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
cx.type_vector(cx.type_i8(), 8)
} else {
layout.llvm_type(cx)
}
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
let elem_ty = llvm_asm_scalar_type(cx, s);
let count = 16 / layout.size.bytes();
cx.type_vector(elem_ty, count)
}
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(cx, element);
cx.type_vector(elem_ty, count * 2)
}
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
cx.type_i64()
}
(
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
BackendRepr::Vector { .. },
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
(
X86(
@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
&& s.primitive() == Primitive::Float(Float::F128) =>
{
@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
(
X86(
@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
),
Abi::Vector { element, count: count @ (8 | 16) },
BackendRepr::Vector { element, count: count @ (8 | 16) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
(
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
cx.type_f32()
} else {
@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
Abi::Scalar(s),
BackendRepr::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.primitive() {
cx.type_f64()
@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
| ArmInlineAsmRegClass::qreg_low4
| ArmInlineAsmRegClass::qreg_low8,
),
Abi::Vector { element, count: count @ (4 | 8) },
BackendRepr::Vector { element, count: count @ (4 | 8) },
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
_ => layout.llvm_type(cx),
}
}
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
{

View file

@ -165,13 +165,14 @@ fn get_bitcode_slice_from_object_data<'a>(
// We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment
// name" which in the public API for sections gets treated as part of the section name, but
// internally in MachOObjectFile.cpp gets treated separately.
let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
let section_name = bitcode_section_name(cgcx).to_str().unwrap().trim_start_matches("__LLVM,");
let mut len = 0;
let data = unsafe {
llvm::LLVMRustGetSliceFromObjectDataByName(
obj.as_ptr(),
obj.len(),
section_name.as_ptr(),
section_name.len(),
&mut len,
)
};
@ -502,9 +503,9 @@ fn thin_lto(
// upstream...
let data = llvm::LLVMRustCreateThinLTOData(
thin_modules.as_ptr(),
thin_modules.len() as u32,
thin_modules.len(),
symbols_below_threshold.as_ptr(),
symbols_below_threshold.len() as u32,
symbols_below_threshold.len(),
)
.ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
@ -569,7 +570,7 @@ fn thin_lto(
info!(" - {}: re-compiled", module_name);
opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
shared: shared.clone(),
shared: Arc::clone(&shared),
idx: module_index,
}));
}
@ -601,23 +602,9 @@ pub(crate) fn run_pass_manager(
// This code is based off the code found in llvm's LTO code generator:
// llvm/lib/LTO/LTOCodeGenerator.cpp
debug!("running the pass manager");
unsafe {
if !llvm::LLVMRustHasModuleFlag(
module.module_llvm.llmod(),
"LTOPostLink".as_ptr().cast(),
11,
) {
llvm::LLVMRustAddModuleFlagU32(
module.module_llvm.llmod(),
llvm::LLVMModFlagBehavior::Error,
c"LTOPostLink".as_ptr(),
1,
);
}
let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage)?;
}
let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
unsafe { write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage) }?;
debug!("lto done");
Ok(())
}

View file

@ -1,4 +1,4 @@
use std::ffi::CString;
use std::ffi::{CStr, CString};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::sync::Arc;
@ -34,6 +34,7 @@ use crate::back::owned_target_machine::OwnedTargetMachine;
use crate::back::profiling::{
LlvmSelfProfiler, selfprofile_after_pass_callback, selfprofile_before_pass_callback,
};
use crate::common::AsCCharPtr;
use crate::errors::{
CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
WithLlvmError, WriteBytecode,
@ -596,9 +597,9 @@ pub(crate) unsafe fn llvm_optimize(
llvm_selfprofiler,
selfprofile_before_pass_callback,
selfprofile_after_pass_callback,
extra_passes.as_ptr().cast(),
extra_passes.as_c_char_ptr(),
extra_passes.len(),
llvm_plugins.as_ptr().cast(),
llvm_plugins.as_c_char_ptr(),
llvm_plugins.len(),
)
};
@ -957,14 +958,13 @@ fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
cgcx.opts.target_triple.triple().contains("-aix")
}
//FIXME use c string literals here too
pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static CStr {
if target_is_apple(cgcx) {
"__LLVM,__bitcode\0"
c"__LLVM,__bitcode"
} else if target_is_aix(cgcx) {
".ipa\0"
c".ipa"
} else {
".llvmbc\0"
c".llvmbc"
}
}
@ -1041,8 +1041,7 @@ unsafe fn embed_bitcode(
);
llvm::LLVMSetInitializer(llglobal, llconst);
let section = bitcode_section_name(cgcx);
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::set_section(llglobal, bitcode_section_name(cgcx));
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
@ -1060,15 +1059,15 @@ unsafe fn embed_bitcode(
} else {
c".llvmcmd"
};
llvm::LLVMSetSection(llglobal, section.as_ptr());
llvm::set_section(llglobal, section);
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
} else {
// We need custom section flags, so emit module-level inline assembly.
let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len());
let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len());
}
}
}

View file

@ -145,10 +145,8 @@ pub(crate) fn compile_codegen_unit(
pub(crate) fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let Some(sect) = attrs.link_section else { return };
unsafe {
let buf = SmallCStr::new(sect.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
let buf = SmallCStr::new(sect.as_str());
llvm::set_section(llval, &buf);
}
pub(crate) fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {

View file

@ -239,7 +239,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let args = self.check_call("invoke", llty, llfn, args);
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
if let Some(funclet_bundle) = funclet_bundle {
bundles.push(funclet_bundle);
@ -250,13 +249,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
// Emit KCFI operand bundle
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
if let Some(kcfi_bundle) = kcfi_bundle {
if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
bundles.push(kcfi_bundle);
}
let invoke = unsafe {
llvm::LLVMRustBuildInvoke(
llvm::LLVMBuildInvokeWithOperandBundles(
self.llbuilder,
llty,
llfn,
@ -545,13 +543,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(llty, place.val.llval, place.val.align);
if let abi::Abi::Scalar(scalar) = place.layout.abi {
if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
}
load
});
OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
} else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
let b_offset = a.size(self).align_to(b.align(self).abi);
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
@ -1179,7 +1177,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let args = self.check_call("call", llty, llfn, args);
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
if let Some(funclet_bundle) = funclet_bundle {
bundles.push(funclet_bundle);
@ -1190,13 +1187,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
// Emit KCFI operand bundle
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
if let Some(kcfi_bundle) = kcfi_bundle {
if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
bundles.push(kcfi_bundle);
}
let call = unsafe {
llvm::LLVMRustBuildCall(
llvm::LLVMBuildCallWithOperandBundles(
self.llbuilder,
llty,
llfn,
@ -1204,6 +1200,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args.len() as c_uint,
bundles.as_ptr(),
bundles.len() as c_uint,
c"".as_ptr(),
)
};
if let Some(fn_abi) = fn_abi {
@ -1509,7 +1506,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
let args = self.check_call("callbr", llty, llfn, args);
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
if let Some(funclet_bundle) = funclet_bundle {
bundles.push(funclet_bundle);
@ -1520,13 +1516,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
// Emit KCFI operand bundle
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn);
let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
if let Some(kcfi_bundle) = kcfi_bundle {
if let Some(kcfi_bundle) = kcfi_bundle.as_deref() {
bundles.push(kcfi_bundle);
}
let callbr = unsafe {
llvm::LLVMRustBuildCallBr(
llvm::LLVMBuildCallBr(
self.llbuilder,
llty,
llfn,
@ -1601,7 +1596,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
instance: Option<Instance<'tcx>>,
llfn: &'ll Value,
) -> Option<llvm::OperandBundleDef<'ll>> {
) -> Option<llvm::OperandBundleOwned<'ll>> {
let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled()
&& let Some(fn_abi) = fn_abi
@ -1627,7 +1622,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
kcfi::typeid_for_fnabi(self.tcx, fn_abi, options)
};
Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
Some(llvm::OperandBundleOwned::new("kcfi", &[self.const_u32(kcfi_typeid)]))
} else {
None
};

View file

@ -17,7 +17,7 @@ use tracing::debug;
use crate::consts::const_alloc_to_llvm;
pub(crate) use crate::context::CodegenCx;
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, OperandBundleDef, True};
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, True};
use crate::type_::Type;
use crate::value::Value;
@ -63,19 +63,19 @@ use crate::value::Value;
/// the `OperandBundleDef` value created for MSVC landing pads.
pub(crate) struct Funclet<'ll> {
cleanuppad: &'ll Value,
operand: OperandBundleDef<'ll>,
operand: llvm::OperandBundleOwned<'ll>,
}
impl<'ll> Funclet<'ll> {
pub(crate) fn new(cleanuppad: &'ll Value) -> Self {
Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
Funclet { cleanuppad, operand: llvm::OperandBundleOwned::new("funclet", &[cleanuppad]) }
}
pub(crate) fn cleanuppad(&self) -> &'ll Value {
self.cleanuppad
}
pub(crate) fn bundle(&self) -> &OperandBundleDef<'ll> {
pub(crate) fn bundle(&self) -> &llvm::OperandBundle<'ll> {
&self.operand
}
}
@ -392,3 +392,21 @@ pub(crate) fn get_dllimport<'tcx>(
tcx.native_library(id)
.and_then(|lib| lib.dll_imports.iter().find(|di| di.name.as_str() == name))
}
/// Extension trait for explicit casts to `*const c_char`.
pub(crate) trait AsCCharPtr {
/// Equivalent to `self.as_ptr().cast()`, but only casts to `*const c_char`.
fn as_c_char_ptr(&self) -> *const c_char;
}
impl AsCCharPtr for str {
fn as_c_char_ptr(&self) -> *const c_char {
self.as_ptr().cast()
}
}
impl AsCCharPtr for [u8] {
fn as_c_char_ptr(&self) -> *const c_char {
self.as_ptr().cast()
}
}

View file

@ -19,7 +19,7 @@ use rustc_target::abi::{
};
use tracing::{debug, instrument, trace};
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::errors::{
InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined,
};
@ -400,7 +400,7 @@ impl<'ll> CodegenCx<'ll, '_> {
let new_g = llvm::LLVMRustGetOrInsertGlobal(
self.llmod,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
val_llty,
);
@ -451,7 +451,7 @@ impl<'ll> CodegenCx<'ll, '_> {
if let Some(section) = attrs.link_section {
let section = llvm::LLVMMDStringInContext2(
self.llcx,
section.as_str().as_ptr().cast(),
section.as_str().as_c_char_ptr(),
section.as_str().len(),
);
assert!(alloc.provenance().ptrs().is_empty());
@ -462,7 +462,7 @@ impl<'ll> CodegenCx<'ll, '_> {
let bytes =
alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
let alloc =
llvm::LLVMMDStringInContext2(self.llcx, bytes.as_ptr().cast(), bytes.len());
llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len());
let data = [section, alloc];
let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len());
let val = llvm::LLVMMetadataAsValue(self.llcx, meta);

View file

@ -29,6 +29,7 @@ use smallvec::SmallVec;
use crate::back::write::to_llvm_code_model;
use crate::callee::get_fn;
use crate::common::AsCCharPtr;
use crate::debuginfo::metadata::apply_vcall_visibility_metadata;
use crate::llvm::{Metadata, MetadataType};
use crate::type_::Type;
@ -147,6 +148,11 @@ pub(crate) unsafe fn create_module<'ll>(
target_data_layout =
target_data_layout.replace("-p270:32:32-p271:32:32-p272:64:64", "");
}
if sess.target.arch.starts_with("sparc") {
// LLVM 20 updates the sparc layout to correctly align 128 bit integers to 128 bit.
// See https://github.com/llvm/llvm-project/pull/106951
target_data_layout = target_data_layout.replace("-i128:128", "");
}
}
// Ensure the data-layout values hardcoded remain the defaults.
@ -209,133 +215,111 @@ pub(crate) unsafe fn create_module<'ll>(
// If skipping the PLT is enabled, we need to add some module metadata
// to ensure intrinsic calls don't use it.
if !sess.needs_plt() {
let avoid_plt = c"RtLibUseGOT".as_ptr();
unsafe {
llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
}
llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "RtLibUseGOT", 1);
}
// Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.)
if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() {
let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr();
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Override,
canonical_jump_tables,
1,
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Override,
"CFI Canonical Jump Tables",
1,
);
}
// If we're normalizing integers with CFI, ensure LLVM generated functions do the same.
// See https://github.com/llvm/llvm-project/pull/104826
if sess.is_sanitizer_cfi_normalize_integers_enabled() {
let cfi_normalize_integers = c"cfi-normalize-integers".as_ptr().cast();
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Override,
cfi_normalize_integers,
1,
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Override,
"cfi-normalize-integers",
1,
);
}
// Enable LTO unit splitting if specified or if CFI is enabled. (See
// https://reviews.llvm.org/D53891.)
if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr();
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Override,
enable_split_lto_unit,
1,
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Override,
"EnableSplitLTOUnit",
1,
);
}
// Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.)
if sess.is_sanitizer_kcfi_enabled() {
let kcfi = c"kcfi".as_ptr();
unsafe {
llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
}
llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Override, "kcfi", 1);
// Add "kcfi-offset" module flag with -Z patchable-function-entry (See
// https://reviews.llvm.org/D141172).
let pfe =
PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry);
if pfe.prefix() > 0 {
let kcfi_offset = c"kcfi-offset".as_ptr().cast();
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Override,
kcfi_offset,
pfe.prefix().into(),
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Override,
"kcfi-offset",
pfe.prefix().into(),
);
}
}
// Control Flow Guard is currently only supported by the MSVC linker on Windows.
if sess.target.is_like_msvc {
unsafe {
match sess.opts.cg.control_flow_guard {
CFGuard::Disabled => {}
CFGuard::NoChecks => {
// Set `cfguard=1` module flag to emit metadata only.
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Warning,
c"cfguard".as_ptr() as *const _,
1,
)
}
CFGuard::Checks => {
// Set `cfguard=2` module flag to emit metadata and checks.
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Warning,
c"cfguard".as_ptr() as *const _,
2,
)
}
match sess.opts.cg.control_flow_guard {
CFGuard::Disabled => {}
CFGuard::NoChecks => {
// Set `cfguard=1` module flag to emit metadata only.
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Warning,
"cfguard",
1,
);
}
CFGuard::Checks => {
// Set `cfguard=2` module flag to emit metadata and checks.
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Warning,
"cfguard",
2,
);
}
}
}
if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
if sess.target.arch == "aarch64" {
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Min,
c"branch-target-enforcement".as_ptr(),
bti.into(),
);
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Min,
c"sign-return-address".as_ptr(),
pac_ret.is_some().into(),
);
let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Min,
c"sign-return-address-all".as_ptr(),
pac_opts.leaf.into(),
);
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Min,
c"sign-return-address-with-bkey".as_ptr(),
u32::from(pac_opts.key == PAuthKey::B),
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Min,
"branch-target-enforcement",
bti.into(),
);
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Min,
"sign-return-address",
pac_ret.is_some().into(),
);
let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Min,
"sign-return-address-all",
pac_opts.leaf.into(),
);
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Min,
"sign-return-address-with-bkey",
u32::from(pac_opts.key == PAuthKey::B),
);
} else {
bug!(
"branch-protection used on non-AArch64 target; \
@ -346,59 +330,46 @@ pub(crate) unsafe fn create_module<'ll>(
// Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Override,
c"cf-protection-branch".as_ptr(),
1,
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Override,
"cf-protection-branch",
1,
);
}
if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Override,
c"cf-protection-return".as_ptr(),
1,
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Override,
"cf-protection-return",
1,
);
}
if sess.opts.unstable_opts.virtual_function_elimination {
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Error,
c"Virtual Function Elim".as_ptr(),
1,
);
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Error,
"Virtual Function Elim",
1,
);
}
// Set module flag to enable Windows EHCont Guard (/guard:ehcont).
if sess.opts.unstable_opts.ehcont_guard {
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Warning,
c"ehcontguard".as_ptr() as *const _,
1,
)
}
llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "ehcontguard", 1);
}
match sess.opts.unstable_opts.function_return {
FunctionReturn::Keep => {}
FunctionReturn::ThunkExtern => unsafe {
llvm::LLVMRustAddModuleFlagU32(
FunctionReturn::ThunkExtern => {
llvm::add_module_flag_u32(
llmod,
llvm::LLVMModFlagBehavior::Override,
c"function_return_thunk_extern".as_ptr(),
llvm::ModuleFlagMergeBehavior::Override,
"function_return_thunk_extern",
1,
)
},
);
}
}
match (sess.opts.unstable_opts.small_data_threshold, sess.target.small_data_threshold_support())
@ -406,15 +377,12 @@ pub(crate) unsafe fn create_module<'ll>(
// Set up the small-data optimization limit for architectures that use
// an LLVM module flag to control this.
(Some(threshold), SmallDataThresholdSupport::LlvmModuleFlag(flag)) => {
let flag = SmallCStr::new(flag.as_ref());
unsafe {
llvm::LLVMRustAddModuleFlagU32(
llmod,
llvm::LLVMModFlagBehavior::Error,
flag.as_c_str().as_ptr(),
threshold as u32,
)
}
llvm::add_module_flag_u32(
llmod,
llvm::ModuleFlagMergeBehavior::Error,
&flag,
threshold as u32,
);
}
_ => (),
};
@ -429,7 +397,7 @@ pub(crate) unsafe fn create_module<'ll>(
let name_metadata = unsafe {
llvm::LLVMMDStringInContext2(
llcx,
rustc_producer.as_ptr().cast(),
rustc_producer.as_c_char_ptr(),
rustc_producer.as_bytes().len(),
)
};
@ -448,33 +416,29 @@ pub(crate) unsafe fn create_module<'ll>(
// If llvm_abiname is empty, emit nothing.
let llvm_abiname = &sess.target.options.llvm_abiname;
if matches!(sess.target.arch.as_ref(), "riscv32" | "riscv64") && !llvm_abiname.is_empty() {
unsafe {
llvm::LLVMRustAddModuleFlagString(
llmod,
llvm::LLVMModFlagBehavior::Error,
c"target-abi".as_ptr(),
llvm_abiname.as_ptr().cast(),
llvm_abiname.len(),
);
}
llvm::add_module_flag_str(
llmod,
llvm::ModuleFlagMergeBehavior::Error,
"target-abi",
llvm_abiname,
);
}
// Add module flags specified via -Z llvm_module_flag
for (key, value, behavior) in &sess.opts.unstable_opts.llvm_module_flag {
let key = format!("{key}\0");
let behavior = match behavior.as_str() {
"error" => llvm::LLVMModFlagBehavior::Error,
"warning" => llvm::LLVMModFlagBehavior::Warning,
"require" => llvm::LLVMModFlagBehavior::Require,
"override" => llvm::LLVMModFlagBehavior::Override,
"append" => llvm::LLVMModFlagBehavior::Append,
"appendunique" => llvm::LLVMModFlagBehavior::AppendUnique,
"max" => llvm::LLVMModFlagBehavior::Max,
"min" => llvm::LLVMModFlagBehavior::Min,
for (key, value, merge_behavior) in &sess.opts.unstable_opts.llvm_module_flag {
let merge_behavior = match merge_behavior.as_str() {
"error" => llvm::ModuleFlagMergeBehavior::Error,
"warning" => llvm::ModuleFlagMergeBehavior::Warning,
"require" => llvm::ModuleFlagMergeBehavior::Require,
"override" => llvm::ModuleFlagMergeBehavior::Override,
"append" => llvm::ModuleFlagMergeBehavior::Append,
"appendunique" => llvm::ModuleFlagMergeBehavior::AppendUnique,
"max" => llvm::ModuleFlagMergeBehavior::Max,
"min" => llvm::ModuleFlagMergeBehavior::Min,
// We already checked this during option parsing
_ => unreachable!(),
};
unsafe { llvm::LLVMRustAddModuleFlagU32(llmod, behavior, key.as_ptr().cast(), *value) }
llvm::add_module_flag_u32(llmod, merge_behavior, key, *value);
}
llmod
@ -595,6 +559,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
/// Extra state that is only available when coverage instrumentation is enabled.
#[inline]
#[track_caller]
pub(crate) fn coverage_cx(&self) -> &coverageinfo::CrateCoverageContext<'ll, 'tcx> {
self.coverage_cx.as_ref().expect("only called when coverage instrumentation is enabled")
}
@ -606,7 +571,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
llvm::LLVMSetInitializer(g, array);
llvm::set_linkage(g, llvm::Linkage::AppendingLinkage);
llvm::LLVMSetSection(g, c"llvm.metadata".as_ptr());
llvm::set_section(g, c"llvm.metadata");
}
}
}

View file

@ -54,7 +54,11 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
add_unused_functions(cx);
}
let function_coverage_map = cx.coverage_cx().take_function_coverage_map();
// FIXME(#132395): Can this be none even when coverage is enabled?
let function_coverage_map = match cx.coverage_cx {
Some(ref cx) => cx.take_function_coverage_map(),
None => return,
};
if function_coverage_map.is_empty() {
// This module has no functions with coverage instrumentation
return;

View file

@ -14,7 +14,7 @@ use rustc_target::abi::Size;
use tracing::{debug, instrument};
use crate::builder::Builder;
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::coverageinfo::map_data::FunctionCoverageCollector;
use crate::llvm;
@ -152,7 +152,12 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
return;
};
let mut coverage_map = bx.coverage_cx().function_coverage_map.borrow_mut();
// FIXME(#132395): Unwrapping `coverage_cx` here has led to ICEs in the
// wild, so keep this early-return until we understand why.
let mut coverage_map = match bx.coverage_cx {
Some(ref cx) => cx.function_coverage_map.borrow_mut(),
None => return,
};
let func_coverage = coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info));
@ -236,7 +241,7 @@ fn create_pgo_func_name_var<'ll, 'tcx>(
unsafe {
llvm::LLVMRustCoverageCreatePGOFuncNameVar(
llfn,
mangled_fn_name.as_ptr().cast(),
mangled_fn_name.as_c_char_ptr(),
mangled_fn_name.len(),
)
}
@ -248,7 +253,7 @@ pub(crate) fn write_filenames_section_to_buffer<'a>(
) {
let (pointers, lengths) = filenames
.into_iter()
.map(|s: &str| (s.as_ptr().cast(), s.len()))
.map(|s: &str| (s.as_c_char_ptr(), s.len()))
.unzip::<_, _, Vec<_>, Vec<_>>();
unsafe {
@ -291,7 +296,7 @@ pub(crate) fn write_mapping_to_buffer(
}
pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_c_char_ptr(), bytes.len()) }
}
pub(crate) fn mapping_version() -> u32 {

View file

@ -72,7 +72,7 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>(
let section_var = cx
.define_global(section_var_name, llvm_type)
.unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr());
llvm::set_section(section_var, c".debug_gdb_scripts");
llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);

View file

@ -32,7 +32,7 @@ use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_na
use super::utils::{
DIB, create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit,
};
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::debuginfo::metadata::type_map::build_type_with_children;
use crate::debuginfo::utils::{WidePtrKind, wide_pointer_kind};
use crate::llvm::debuginfo::{
@ -190,7 +190,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
data_layout.pointer_size.bits(),
data_layout.pointer_align.abi.bits() as u32,
0, // Ignore DWARF address space.
ptr_type_debuginfo_name.as_ptr().cast(),
ptr_type_debuginfo_name.as_c_char_ptr(),
ptr_type_debuginfo_name.len(),
)
};
@ -348,7 +348,7 @@ fn build_subroutine_type_di_node<'ll, 'tcx>(
size,
align,
0, // Ignore DWARF address space.
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
)
};
@ -518,7 +518,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D
let name = "<recur_type>";
llvm::LLVMRustDIBuilderCreateBasicType(
DIB(cx),
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
cx.tcx.data_layout.pointer_size.bits(),
DW_ATE_unsigned,
@ -640,14 +640,14 @@ pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFi
unsafe {
llvm::LLVMRustDIBuilderCreateFile(
DIB(cx),
file_name.as_ptr().cast(),
file_name.as_c_char_ptr(),
file_name.len(),
directory.as_ptr().cast(),
directory.as_c_char_ptr(),
directory.len(),
hash_kind,
hash_value.as_ptr().cast(),
hash_value.as_c_char_ptr(),
hash_value.len(),
source.map_or(ptr::null(), |x| x.as_ptr().cast()),
source.map_or(ptr::null(), |x| x.as_c_char_ptr()),
source.map_or(0, |x| x.len()),
)
}
@ -662,12 +662,12 @@ fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile {
llvm::LLVMRustDIBuilderCreateFile(
DIB(cx),
file_name.as_ptr().cast(),
file_name.as_c_char_ptr(),
file_name.len(),
directory.as_ptr().cast(),
directory.as_c_char_ptr(),
directory.len(),
llvm::ChecksumKind::None,
hash_value.as_ptr().cast(),
hash_value.as_c_char_ptr(),
hash_value.len(),
ptr::null(),
0,
@ -788,7 +788,7 @@ fn build_basic_type_di_node<'ll, 'tcx>(
let ty_di_node = unsafe {
llvm::LLVMRustDIBuilderCreateBasicType(
DIB(cx),
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
cx.size_of(t).bits(),
encoding,
@ -810,7 +810,7 @@ fn build_basic_type_di_node<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateTypedef(
DIB(cx),
ty_di_node,
typedef_name.as_ptr().cast(),
typedef_name.as_c_char_ptr(),
typedef_name.len(),
unknown_file_metadata(cx),
0,
@ -861,7 +861,7 @@ fn build_param_type_di_node<'ll, 'tcx>(
di_node: unsafe {
llvm::LLVMRustDIBuilderCreateBasicType(
DIB(cx),
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
Size::ZERO.bits(),
DW_ATE_unsigned,
@ -948,9 +948,9 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
unsafe {
let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile(
debug_context.builder,
name_in_debuginfo.as_ptr().cast(),
name_in_debuginfo.as_c_char_ptr(),
name_in_debuginfo.len(),
work_dir.as_ptr().cast(),
work_dir.as_c_char_ptr(),
work_dir.len(),
llvm::ChecksumKind::None,
ptr::null(),
@ -963,7 +963,7 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
debug_context.builder,
DW_LANG_RUST,
compile_unit_file,
producer.as_ptr().cast(),
producer.as_c_char_ptr(),
producer.len(),
tcx.sess.opts.optimize != config::OptLevel::No,
c"".as_ptr(),
@ -971,7 +971,7 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>(
// NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead
// put the path supplied to `MCSplitDwarfFile` into the debug info of the final
// output(s).
split_name.as_ptr().cast(),
split_name.as_c_char_ptr(),
split_name.len(),
kind,
0,
@ -1022,7 +1022,7 @@ fn build_field_di_node<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateMemberType(
DIB(cx),
owner,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
@ -1306,7 +1306,7 @@ fn build_generic_type_param_di_nodes<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
DIB(cx),
None,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
actual_type_di_node,
)
@ -1382,9 +1382,9 @@ pub(crate) fn build_global_var_di_node<'ll>(
llvm::LLVMRustDIBuilderCreateStaticVariable(
DIB(cx),
Some(var_scope),
var_name.as_ptr().cast(),
var_name.as_c_char_ptr(),
var_name.len(),
linkage_name.as_ptr().cast(),
linkage_name.as_c_char_ptr(),
linkage_name.len(),
file_metadata,
line_number,
@ -1602,9 +1602,9 @@ pub(crate) fn create_vtable_di_node<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateStaticVariable(
DIB(cx),
NO_SCOPE_METADATA,
vtable_name.as_ptr().cast(),
vtable_name.as_c_char_ptr(),
vtable_name.len(),
linkage_name.as_ptr().cast(),
linkage_name.as_c_char_ptr(),
linkage_name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,

View file

@ -11,7 +11,7 @@ use rustc_middle::ty::{self, AdtDef, CoroutineArgs, CoroutineArgsExt, Ty};
use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
use smallvec::smallvec;
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::debuginfo::metadata::enums::DiscrResult;
use crate::debuginfo::metadata::type_map::{self, Stub, UniqueTypeId};
use crate::debuginfo::metadata::{
@ -359,7 +359,7 @@ fn build_single_variant_union_fields<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateStaticMemberType(
DIB(cx),
enum_type_di_node,
TAG_FIELD_NAME.as_ptr().cast(),
TAG_FIELD_NAME.as_c_char_ptr(),
TAG_FIELD_NAME.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
@ -537,7 +537,7 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateStaticMemberType(
DIB(cx),
wrapper_struct_type_di_node,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
@ -785,7 +785,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateMemberType(
DIB(cx),
enum_type_di_node,
field_name.as_ptr().cast(),
field_name.as_c_char_ptr(),
field_name.len(),
file_di_node,
line_number,

View file

@ -13,7 +13,7 @@ use rustc_target::abi::{FieldIdx, TagEncoding, VariantIdx, Variants};
use super::type_map::{DINodeCreationResult, UniqueTypeId};
use super::{SmallVec, size_and_align_of};
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::debuginfo::metadata::type_map::{self, Stub};
use crate::debuginfo::metadata::{
UNKNOWN_LINE_NUMBER, build_field_di_node, build_generic_type_param_di_nodes, type_di_node,
@ -106,7 +106,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>(
let value = [value as u64, (value >> 64) as u64];
Some(llvm::LLVMRustDIBuilderCreateEnumerator(
DIB(cx),
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
value.as_ptr(),
size.bits() as libc::c_uint,
@ -119,7 +119,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateEnumerationType(
DIB(cx),
containing_scope,
type_name.as_ptr().cast(),
type_name.as_c_char_ptr(),
type_name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,

View file

@ -10,7 +10,7 @@ use rustc_middle::ty::{self};
use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
use smallvec::smallvec;
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::debuginfo::metadata::type_map::{self, Stub, StubInfo, UniqueTypeId};
use crate::debuginfo::metadata::{
DINodeCreationResult, NO_GENERICS, SmallVec, UNKNOWN_LINE_NUMBER, file_metadata,
@ -244,7 +244,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateVariantPart(
DIB(cx),
enum_type_di_node,
variant_part_name.as_ptr().cast(),
variant_part_name.as_c_char_ptr(),
variant_part_name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
@ -253,7 +253,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>(
DIFlags::FlagZero,
tag_member_di_node,
create_DIArray(DIB(cx), &[]),
variant_part_unique_type_id_str.as_ptr().cast(),
variant_part_unique_type_id_str.as_c_char_ptr(),
variant_part_unique_type_id_str.len(),
)
},
@ -327,7 +327,7 @@ fn build_discr_member_di_node<'ll, 'tcx>(
Some(llvm::LLVMRustDIBuilderCreateMemberType(
DIB(cx),
containing_scope,
tag_name.as_ptr().cast(),
tag_name.as_c_char_ptr(),
tag_name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
@ -399,7 +399,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateVariantMemberType(
DIB(cx),
variant_part_di_node,
variant_member_info.variant_name.as_ptr().cast(),
variant_member_info.variant_name.as_c_char_ptr(),
variant_member_info.variant_name.len(),
file_di_node,
line_number,

View file

@ -9,7 +9,7 @@ use rustc_middle::ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
use rustc_target::abi::{Align, Size, VariantIdx};
use super::{SmallVec, UNKNOWN_LINE_NUMBER, unknown_file_metadata};
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::debuginfo::utils::{DIB, create_DIArray, debug_context};
use crate::llvm::debuginfo::{DIFlags, DIScope, DIType};
use crate::llvm::{self};
@ -191,7 +191,7 @@ pub(super) fn stub<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateStructType(
DIB(cx),
containing_scope,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
@ -202,7 +202,7 @@ pub(super) fn stub<'ll, 'tcx>(
empty_array,
0,
vtable_holder,
unique_type_id_str.as_ptr().cast(),
unique_type_id_str.as_c_char_ptr(),
unique_type_id_str.len(),
)
}
@ -211,7 +211,7 @@ pub(super) fn stub<'ll, 'tcx>(
llvm::LLVMRustDIBuilderCreateUnionType(
DIB(cx),
containing_scope,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
@ -220,7 +220,7 @@ pub(super) fn stub<'ll, 'tcx>(
flags,
Some(empty_array),
0,
unique_type_id_str.as_ptr().cast(),
unique_type_id_str.as_c_char_ptr(),
unique_type_id_str.len(),
)
},

View file

@ -31,7 +31,7 @@ use self::namespace::mangled_name_of_instance;
use self::utils::{DIB, create_DIArray, is_node_local_to_unit};
use crate::abi::FnAbi;
use crate::builder::Builder;
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::llvm;
use crate::llvm::debuginfo::{
DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType,
@ -91,45 +91,39 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
}
pub(crate) fn finalize(&self, sess: &Session) {
unsafe {
llvm::LLVMRustDIBuilderFinalize(self.builder);
if !sess.target.is_like_msvc {
// Debuginfo generation in LLVM by default uses a higher
// version of dwarf than macOS currently understands. We can
// instruct LLVM to emit an older version of dwarf, however,
// for macOS to understand. For more info see #11352
// This can be overridden using --llvm-opts -dwarf-version,N.
// Android has the same issue (#22398)
let dwarf_version = sess
.opts
.unstable_opts
.dwarf_version
.unwrap_or(sess.target.default_dwarf_version);
llvm::LLVMRustAddModuleFlagU32(
self.llmod,
llvm::LLVMModFlagBehavior::Warning,
c"Dwarf Version".as_ptr(),
dwarf_version,
);
} else {
// Indicate that we want CodeView debug information on MSVC
llvm::LLVMRustAddModuleFlagU32(
self.llmod,
llvm::LLVMModFlagBehavior::Warning,
c"CodeView".as_ptr(),
1,
)
}
// Prevent bitcode readers from deleting the debug info.
llvm::LLVMRustAddModuleFlagU32(
unsafe { llvm::LLVMRustDIBuilderFinalize(self.builder) };
if !sess.target.is_like_msvc {
// Debuginfo generation in LLVM by default uses a higher
// version of dwarf than macOS currently understands. We can
// instruct LLVM to emit an older version of dwarf, however,
// for macOS to understand. For more info see #11352
// This can be overridden using --llvm-opts -dwarf-version,N.
// Android has the same issue (#22398)
let dwarf_version =
sess.opts.unstable_opts.dwarf_version.unwrap_or(sess.target.default_dwarf_version);
llvm::add_module_flag_u32(
self.llmod,
llvm::LLVMModFlagBehavior::Warning,
c"Debug Info Version".as_ptr(),
llvm::LLVMRustDebugMetadataVersion(),
llvm::ModuleFlagMergeBehavior::Warning,
"Dwarf Version",
dwarf_version,
);
} else {
// Indicate that we want CodeView debug information on MSVC
llvm::add_module_flag_u32(
self.llmod,
llvm::ModuleFlagMergeBehavior::Warning,
"CodeView",
1,
);
}
// Prevent bitcode readers from deleting the debug info.
llvm::add_module_flag_u32(
self.llmod,
llvm::ModuleFlagMergeBehavior::Warning,
"Debug Info Version",
unsafe { llvm::LLVMRustDebugMetadataVersion() },
);
}
}
@ -364,7 +358,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let mut flags = DIFlags::FlagPrototyped;
if fn_abi.ret.layout.abi.is_uninhabited() {
if fn_abi.ret.layout.is_uninhabited() {
flags |= DIFlags::FlagNoReturn;
}
@ -389,9 +383,9 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
llvm::LLVMRustDIBuilderCreateMethod(
DIB(self),
containing_scope,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
linkage_name.as_ptr().cast(),
linkage_name.as_c_char_ptr(),
linkage_name.len(),
file_metadata,
loc.line,
@ -406,9 +400,9 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
llvm::LLVMRustDIBuilderCreateFunction(
DIB(self),
containing_scope,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
linkage_name.as_ptr().cast(),
linkage_name.as_c_char_ptr(),
linkage_name.len(),
file_metadata,
loc.line,
@ -494,7 +488,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
DIB(cx),
None,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
actual_type_metadata,
))
@ -635,7 +629,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
DIB(self),
dwarf_tag,
scope_metadata,
name.as_ptr().cast(),
name.as_c_char_ptr(),
name.len(),
file_metadata,
loc.line,

View file

@ -5,7 +5,7 @@ use rustc_hir::def_id::DefId;
use rustc_middle::ty::{self, Instance};
use super::utils::{DIB, debug_context};
use crate::common::CodegenCx;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::llvm;
use crate::llvm::debuginfo::DIScope;
@ -36,7 +36,7 @@ pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'l
llvm::LLVMRustDIBuilderCreateNameSpace(
DIB(cx),
parent_scope,
namespace_name_string.as_ptr().cast(),
namespace_name_string.as_c_char_ptr(),
namespace_name_string.len(),
false, // ExportSymbols (only relevant for C++ anonymous namespaces)
)

View file

@ -20,6 +20,7 @@ use smallvec::SmallVec;
use tracing::debug;
use crate::abi::{FnAbi, FnAbiLlvmExt};
use crate::common::AsCCharPtr;
use crate::context::CodegenCx;
use crate::llvm::AttributePlace::Function;
use crate::llvm::Visibility;
@ -41,7 +42,7 @@ fn declare_raw_fn<'ll>(
) -> &'ll Value {
debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
let llfn = unsafe {
llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_c_char_ptr(), name.len(), ty)
};
llvm::SetFunctionCallConv(llfn, callconv);
@ -68,7 +69,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
/// return its Value instead.
pub(crate) fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
debug!("declare_global(name={:?})", name);
unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_c_char_ptr(), name.len(), ty) }
}
/// Declare a C ABI function.
@ -209,7 +210,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
/// Gets declared value by name.
pub(crate) fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_c_char_ptr(), name.len()) }
}
/// Gets defined or externally defined (AvailableExternally linkage) value by

View file

@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
}
sym::va_arg => {
match fn_abi.ret.layout.abi {
abi::Abi::Scalar(scalar) => {
match fn_abi.ret.layout.backend_repr {
abi::BackendRepr::Scalar(scalar) => {
match scalar.primitive() {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
sym::raw_eq => {
use abi::Abi::*;
use abi::BackendRepr::*;
let tp_ty = fn_args.type_at(0);
let layout = self.layout_of(tp_ty).layout;
let use_integer_compare = match layout.abi() {
let use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
Uninhabited | Vector { .. } => false,
Aggregate { .. } => {
Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
// so we re-use that same threshold here.
@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
let llret_ty = if ret_ty.is_simd()
&& let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
&& let abi::BackendRepr::Memory { .. } =
self.layout_of(ret_ty).layout.backend_repr
{
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {

View file

@ -3,6 +3,7 @@
use std::fmt::Debug;
use std::marker::PhantomData;
use std::ptr;
use libc::{c_char, c_int, c_uint, c_ulonglong, c_void, size_t};
use rustc_macros::TryFromU32;
@ -85,7 +86,7 @@ pub enum LLVMMachineType {
ARM = 0x01c0,
}
/// LLVM's Module::ModFlagBehavior, defined in llvm/include/llvm/IR/Module.h.
/// Must match the layout of `LLVMRustModuleFlagMergeBehavior`.
///
/// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are
/// resolved according to the merge behaviors specified here. Flags differing only in merge
@ -93,9 +94,13 @@ pub enum LLVMMachineType {
///
/// In order for Rust-C LTO to work, we must specify behaviors compatible with Clang. Notably,
/// 'Error' and 'Warning' cannot be mixed for a given flag.
///
/// There is a stable LLVM-C version of this enum (`LLVMModuleFlagBehavior`),
/// but as of LLVM 19 it does not support all of the enum values in the unstable
/// C++ API.
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub enum LLVMModFlagBehavior {
pub enum ModuleFlagMergeBehavior {
Error = 1,
Warning = 2,
Require = 3,
@ -704,8 +709,9 @@ unsafe extern "C" {
}
#[repr(C)]
pub struct RustArchiveMember<'a>(InvariantOpaque<'a>);
/// Opaque pointee of `LLVMOperandBundleRef`.
#[repr(C)]
pub struct OperandBundleDef<'a>(InvariantOpaque<'a>);
pub(crate) struct OperandBundle<'a>(InvariantOpaque<'a>);
#[repr(C)]
pub struct Linker<'a>(InvariantOpaque<'a>);
@ -1534,6 +1540,50 @@ unsafe extern "C" {
pub fn LLVMGetOrInsertComdat(M: &Module, Name: *const c_char) -> &Comdat;
pub fn LLVMSetComdat(V: &Value, C: &Comdat);
pub(crate) fn LLVMCreateOperandBundle(
Tag: *const c_char,
TagLen: size_t,
Args: *const &'_ Value,
NumArgs: c_uint,
) -> *mut OperandBundle<'_>;
pub(crate) fn LLVMDisposeOperandBundle(Bundle: ptr::NonNull<OperandBundle<'_>>);
pub(crate) fn LLVMBuildCallWithOperandBundles<'a>(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
Bundles: *const &OperandBundle<'a>,
NumBundles: c_uint,
Name: *const c_char,
) -> &'a Value;
pub(crate) fn LLVMBuildInvokeWithOperandBundles<'a>(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
Then: &'a BasicBlock,
Catch: &'a BasicBlock,
Bundles: *const &OperandBundle<'a>,
NumBundles: c_uint,
Name: *const c_char,
) -> &'a Value;
pub(crate) fn LLVMBuildCallBr<'a>(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
DefaultDest: &'a BasicBlock,
IndirectDests: *const &'a BasicBlock,
NumIndirectDests: c_uint,
Args: *const &'a Value,
NumArgs: c_uint,
Bundles: *const &OperandBundle<'a>,
NumBundles: c_uint,
Name: *const c_char,
) -> &'a Value;
}
#[link(name = "llvm-wrapper", kind = "static")]
@ -1619,47 +1669,11 @@ unsafe extern "C" {
AttrsLen: size_t,
);
pub fn LLVMRustBuildInvoke<'a>(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
Then: &'a BasicBlock,
Catch: &'a BasicBlock,
OpBundles: *const &OperandBundleDef<'a>,
NumOpBundles: c_uint,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMRustBuildCallBr<'a>(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
DefaultDest: &'a BasicBlock,
IndirectDests: *const &'a BasicBlock,
NumIndirectDests: c_uint,
Args: *const &'a Value,
NumArgs: c_uint,
OpBundles: *const &OperandBundleDef<'a>,
NumOpBundles: c_uint,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMRustSetFastMath(Instr: &Value);
pub fn LLVMRustSetAlgebraicMath(Instr: &Value);
pub fn LLVMRustSetAllowReassoc(Instr: &Value);
// Miscellaneous instructions
pub fn LLVMRustBuildCall<'a>(
B: &Builder<'a>,
Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
OpBundles: *const &OperandBundleDef<'a>,
NumOpBundles: c_uint,
) -> &'a Value;
pub fn LLVMRustBuildMemCpy<'a>(
B: &Builder<'a>,
Dst: &'a Value,
@ -1829,21 +1843,21 @@ unsafe extern "C" {
/// "compatible" means depends on the merge behaviors involved.
pub fn LLVMRustAddModuleFlagU32(
M: &Module,
merge_behavior: LLVMModFlagBehavior,
name: *const c_char,
value: u32,
MergeBehavior: ModuleFlagMergeBehavior,
Name: *const c_char,
NameLen: size_t,
Value: u32,
);
pub fn LLVMRustAddModuleFlagString(
M: &Module,
merge_behavior: LLVMModFlagBehavior,
name: *const c_char,
value: *const c_char,
value_len: size_t,
MergeBehavior: ModuleFlagMergeBehavior,
Name: *const c_char,
NameLen: size_t,
Value: *const c_char,
ValueLen: size_t,
);
pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool;
pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>;
pub fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>);
@ -2353,13 +2367,6 @@ unsafe extern "C" {
pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
pub fn LLVMRustBuildOperandBundleDef(
Name: *const c_char,
Inputs: *const &'_ Value,
NumInputs: c_uint,
) -> &mut OperandBundleDef<'_>;
pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>);
pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
pub fn LLVMRustSetModulePICLevel(M: &Module);
@ -2385,9 +2392,9 @@ unsafe extern "C" {
pub fn LLVMRustThinLTOBufferThinLinkDataLen(M: &ThinLTOBuffer) -> size_t;
pub fn LLVMRustCreateThinLTOData(
Modules: *const ThinLTOModule,
NumModules: c_uint,
NumModules: size_t,
PreservedSymbols: *const *const c_char,
PreservedSymbolsLen: c_uint,
PreservedSymbolsLen: size_t,
) -> Option<&'static mut ThinLTOData>;
pub fn LLVMRustPrepareThinLTORename(
Data: &ThinLTOData,
@ -2412,6 +2419,7 @@ unsafe extern "C" {
data: *const u8,
len: usize,
name: *const u8,
name_len: usize,
out_len: &mut usize,
) -> *const u8;

View file

@ -2,11 +2,12 @@
use std::cell::RefCell;
use std::ffi::{CStr, CString};
use std::ops::Deref;
use std::ptr;
use std::str::FromStr;
use std::string::FromUtf8Error;
use libc::c_uint;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_llvm::RustString;
use rustc_target::abi::{Align, Size, WrappingRange};
@ -17,13 +18,13 @@ pub use self::IntPredicate::*;
pub use self::Linkage::*;
pub use self::MetadataType::*;
pub use self::RealPredicate::*;
pub use self::ffi::*;
use crate::common::AsCCharPtr;
pub mod archive_ro;
pub mod diagnostic;
mod ffi;
pub use self::ffi::*;
impl LLVMRustResult {
pub fn into_result(self) -> Result<(), ()> {
match self {
@ -53,9 +54,9 @@ pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &str, value: &str) -
unsafe {
LLVMCreateStringAttribute(
llcx,
attr.as_ptr().cast(),
attr.as_c_char_ptr(),
attr.len().try_into().unwrap(),
value.as_ptr().cast(),
value.as_c_char_ptr(),
value.len().try_into().unwrap(),
)
}
@ -65,7 +66,7 @@ pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &str) -> &'ll Attribute {
unsafe {
LLVMCreateStringAttribute(
llcx,
attr.as_ptr().cast(),
attr.as_c_char_ptr(),
attr.len().try_into().unwrap(),
std::ptr::null(),
0,
@ -294,7 +295,7 @@ pub fn get_value_name(value: &Value) -> &[u8] {
/// Safe wrapper for `LLVMSetValueName2` from a byte slice
pub fn set_value_name(value: &Value, name: &[u8]) {
unsafe {
let data = name.as_ptr().cast();
let data = name.as_c_char_ptr();
LLVMSetValueName2(value, data, name.len());
}
}
@ -331,24 +332,68 @@ pub fn last_error() -> Option<String> {
}
}
pub struct OperandBundleDef<'a> {
pub raw: &'a mut ffi::OperandBundleDef<'a>,
/// Owns an [`OperandBundle`], and will dispose of it when dropped.
pub(crate) struct OperandBundleOwned<'a> {
raw: ptr::NonNull<OperandBundle<'a>>,
}
impl<'a> OperandBundleDef<'a> {
pub fn new(name: &str, vals: &[&'a Value]) -> Self {
let name = SmallCStr::new(name);
let def = unsafe {
LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
impl<'a> OperandBundleOwned<'a> {
pub(crate) fn new(name: &str, vals: &[&'a Value]) -> Self {
let raw = unsafe {
LLVMCreateOperandBundle(
name.as_c_char_ptr(),
name.len(),
vals.as_ptr(),
vals.len() as c_uint,
)
};
OperandBundleDef { raw: def }
OperandBundleOwned { raw: ptr::NonNull::new(raw).unwrap() }
}
}
impl Drop for OperandBundleDef<'_> {
impl Drop for OperandBundleOwned<'_> {
fn drop(&mut self) {
unsafe {
LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
LLVMDisposeOperandBundle(self.raw);
}
}
}
impl<'a> Deref for OperandBundleOwned<'a> {
type Target = OperandBundle<'a>;
fn deref(&self) -> &Self::Target {
// SAFETY: The returned reference is opaque and can only used for FFI.
// It is valid for as long as `&self` is.
unsafe { self.raw.as_ref() }
}
}
pub(crate) fn add_module_flag_u32(
module: &Module,
merge_behavior: ModuleFlagMergeBehavior,
key: &str,
value: u32,
) {
unsafe {
LLVMRustAddModuleFlagU32(module, merge_behavior, key.as_c_char_ptr(), key.len(), value);
}
}
pub(crate) fn add_module_flag_str(
module: &Module,
merge_behavior: ModuleFlagMergeBehavior,
key: &str,
value: &str,
) {
unsafe {
LLVMRustAddModuleFlagString(
module,
merge_behavior,
key.as_c_char_ptr(),
key.len(),
value.as_c_char_ptr(),
value.len(),
);
}
}

View file

@ -698,12 +698,9 @@ fn backend_feature_name<'a>(sess: &Session, s: &'a str) -> Option<&'a str> {
let feature = s
.strip_prefix(&['+', '-'][..])
.unwrap_or_else(|| sess.dcx().emit_fatal(InvalidTargetFeaturePrefix { feature: s }));
if s.is_empty() {
return None;
}
// Rustc-specific feature requests like `+crt-static` or `-crt-static`
// are not passed down to LLVM.
if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
if s.is_empty() || RUSTC_SPECIFIC_FEATURES.contains(&feature) {
return None;
}
Some(feature)

View file

@ -1,7 +1,7 @@
use std::fmt::Write;
use rustc_abi::Primitive::{Float, Int, Pointer};
use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants};
use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
layout: TyAndLayout<'tcx>,
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
) -> &'a Type {
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { element, count } => {
match layout.backend_repr {
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
BackendRepr::Vector { element, count } => {
let element = layout.scalar_llvm_type_at(cx, element);
return cx.type_vector(element, count);
}
Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
}
let name = match layout.ty.kind() {
@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
match self.abi {
Abi::Scalar(_) | Abi::Vector { .. } => true,
Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
false
}
}
}
fn is_llvm_scalar_pair(&self) -> bool {
match self.abi {
Abi::ScalarPair(..) => true,
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
match self.backend_repr {
BackendRepr::ScalarPair(..) => true,
BackendRepr::Uninhabited
| BackendRepr::Scalar(_)
| BackendRepr::Vector { .. }
| BackendRepr::Memory { .. } => false,
}
}
@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
if let Abi::Scalar(scalar) = self.abi {
if let BackendRepr::Scalar(scalar) = self.backend_repr {
// Use a different cache for scalars because pointers to DSTs
// can be either wide or thin (data pointers of wide pointers).
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
}
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
match self.abi {
Abi::Scalar(scalar) => {
match self.backend_repr {
BackendRepr::Scalar(scalar) => {
if scalar.is_bool() {
return cx.type_i1();
}
}
Abi::ScalarPair(..) => {
BackendRepr::ScalarPair(..) => {
// An immediate pair always contains just the two elements, without any padding
// filler, as it should never be stored to memory.
return cx.type_struct(
@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
// In other words, this should generally not look at the type at all, but only at the
// layout.
let Abi::ScalarPair(a, b) = self.abi else {
let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
};
let scalar = [a, b][index];

View file

@ -322,10 +322,11 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
// Set the appropriate flag based on ABI
// This needs to match LLVM `RISCVELFStreamer.cpp`
match &*sess.target.llvm_abiname {
"" | "ilp32" | "lp64" => (),
"ilp32" | "lp64" => (),
"ilp32f" | "lp64f" => e_flags |= elf::EF_RISCV_FLOAT_ABI_SINGLE,
"ilp32d" | "lp64d" => e_flags |= elf::EF_RISCV_FLOAT_ABI_DOUBLE,
"ilp32e" => e_flags |= elf::EF_RISCV_RVE,
// Note that the `lp64e` is still unstable as it's not (yet) part of the ELF psABI.
"ilp32e" | "lp64e" => e_flags |= elf::EF_RISCV_RVE,
_ => bug!("unknown RISC-V ABI name"),
}

View file

@ -514,7 +514,7 @@ pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
future: Some(coordinator_thread),
phantom: PhantomData,
},
output_filenames: tcx.output_filenames(()).clone(),
output_filenames: Arc::clone(tcx.output_filenames(())),
}
}
@ -1203,7 +1203,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
coordinator_send,
expanded_args: tcx.sess.expanded_args.clone(),
diag_emitter: shared_emitter.clone(),
output_filenames: tcx.output_filenames(()).clone(),
output_filenames: Arc::clone(tcx.output_filenames(())),
regular_module_config: regular_config,
metadata_module_config: metadata_config,
allocator_module_config: allocator_config,

View file

@ -7,7 +7,7 @@ use rustc_ast::expand::allocator::{ALLOCATOR_METHODS, AllocatorKind, global_fn_n
use rustc_attr as attr;
use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
use rustc_data_structures::sync::par_map;
use rustc_data_structures::sync::{Lrc, par_map};
use rustc_data_structures::unord::UnordMap;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::lang_items::LangItem;
@ -21,7 +21,7 @@ use rustc_middle::mir::BinOp;
use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
use rustc_middle::query::Providers;
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypingMode};
use rustc_session::Session;
use rustc_session::config::{self, CrateType, EntryFnType, OptLevel, OutputType};
use rustc_span::symbol::sym;
@ -119,7 +119,7 @@ pub fn validate_trivial_unsize<'tcx>(
) -> bool {
match (source_data.principal(), target_data.principal()) {
(Some(hr_source_principal), Some(hr_target_principal)) => {
let infcx = tcx.infer_ctxt().build();
let infcx = tcx.infer_ctxt().build(TypingMode::PostAnalysis);
let universe = infcx.universe();
let ocx = ObligationCtxt::new(&infcx);
infcx.enter_forall(hr_target_principal, |target_principal| {
@ -923,7 +923,7 @@ impl CrateInfo {
crate_name: UnordMap::with_capacity(n_crates),
used_crates,
used_crate_source: UnordMap::with_capacity(n_crates),
dependency_formats: tcx.dependency_formats(()).clone(),
dependency_formats: Lrc::clone(tcx.dependency_formats(())),
windows_subsystem,
natvis_debugger_visualizers: Default::default(),
};
@ -936,7 +936,7 @@ impl CrateInfo {
info.crate_name.insert(cnum, tcx.crate_name(cnum));
let used_crate_source = tcx.used_crate_source(cnum);
info.used_crate_source.insert(cnum, used_crate_source.clone());
info.used_crate_source.insert(cnum, Lrc::clone(used_crate_source));
if tcx.is_profiler_runtime(cnum) {
info.profiler_runtime = Some(cnum);
}

View file

@ -438,7 +438,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => bug!("C-variadic function must have a `VaList` place"),
}
}
if self.fn_abi.ret.layout.abi.is_uninhabited() {
if self.fn_abi.ret.layout.is_uninhabited() {
// Functions with uninhabited return values are marked `noreturn`,
// so we should make sure that we never actually do.
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
@ -774,7 +774,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(if do_panic {
let msg_str = with_no_visible_paths!({
with_no_trimmed_paths!({
if layout.abi.is_uninhabited() {
if layout.is_uninhabited() {
// Use this error even for the other intrinsics as it is more precise.
format!("attempted to instantiate uninhabited type `{ty}`")
} else if requirement == ValidityRequirement::Zero {
@ -1532,7 +1532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = bx.load(bx.backend_type(arg.layout), llval, align);
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
if scalar.is_bool() {
bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
}

View file

@ -1,8 +1,8 @@
use rustc_abi::BackendRepr;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::{self, Ty};
use rustc_middle::{bug, mir, span_bug};
use rustc_target::abi::Abi;
use super::FunctionCx;
use crate::errors;
@ -86,7 +86,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.map(|field| {
if let Some(prim) = field.try_to_scalar() {
let layout = bx.layout_of(field_ty);
let Abi::Scalar(scalar) = layout.abi else {
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))

View file

@ -2,6 +2,7 @@ use std::collections::hash_map::Entry;
use std::marker::PhantomData;
use std::ops::Range;
use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx};
use rustc_data_structures::fx::FxHashMap;
use rustc_index::IndexVec;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@ -11,7 +12,6 @@ use rustc_middle::{bug, mir, ty};
use rustc_session::config::DebugInfo;
use rustc_span::symbol::{Symbol, kw};
use rustc_span::{BytePos, Span, hygiene};
use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx};
use super::operand::{OperandRef, OperandValue};
use super::place::{PlaceRef, PlaceValue};
@ -510,7 +510,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// be marked as a `LocalVariable` for MSVC debuggers to visualize
// their data correctly. (See #81894 & #88625)
let var_ty_layout = self.cx.layout_of(var_ty);
if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr {
VariableKind::LocalVariable
} else {
VariableKind::ArgumentVariable(arg_index)

View file

@ -4,7 +4,7 @@ use std::fmt;
use arrayvec::ArrayVec;
use either::Either;
use rustc_abi as abi;
use rustc_abi::{Abi, Align, Size};
use rustc_abi::{Align, BackendRepr, Size};
use rustc_middle::bug;
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
use rustc_middle::mir::{self, ConstValue};
@ -163,7 +163,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let val = match val {
ConstValue::Scalar(x) => {
let Abi::Scalar(scalar) = layout.abi else {
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
@ -171,7 +171,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
}
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
ConstValue::Slice { data, meta } => {
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
};
let a = Scalar::from_pointer(
@ -221,14 +221,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
match layout.abi {
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
match layout.backend_repr {
BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
OperandRef { val: OperandValue::Immediate(val), layout }
}
Abi::ScalarPair(
BackendRepr::ScalarPair(
a @ abi::Scalar::Initialized { .. },
b @ abi::Scalar::Initialized { .. },
) => {
@ -322,7 +322,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
llval: V,
layout: TyAndLayout<'tcx>,
) -> Self {
let val = if let Abi::ScalarPair(..) = layout.abi {
let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
// Deconstruct the immediate aggregate.
@ -343,7 +343,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i);
let mut val = match (self.val, self.layout.abi) {
let mut val = match (self.val, self.layout.backend_repr) {
// If the field is ZST, it has no data.
_ if field.is_zst() => OperandValue::ZeroSized,
@ -356,7 +356,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
}
// Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
(OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
if offset.bytes() == 0 {
assert_eq!(field.size, a.size(bx.cx()));
OperandValue::Immediate(a_llval)
@ -368,30 +368,30 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
}
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), Abi::Vector { .. }) => {
(OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => {
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
}
_ => bug!("OperandRef::extract_field({:?}): not applicable", self),
};
match (&mut val, field.abi) {
match (&mut val, field.backend_repr) {
(OperandValue::ZeroSized, _) => {}
(
OperandValue::Immediate(llval),
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. },
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. },
) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
}
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi);
}
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
(OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
assert_matches!(self.layout.abi, Abi::Vector { .. });
(OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => {
assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. });
let llfield_ty = bx.cx().backend_type(field);
@ -400,7 +400,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
bx.store(*llval, llptr, field.align.abi);
*llval = bx.load(llfield_ty, llptr, field.align.abi);
}
(OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
(
OperandValue::Immediate(_),
BackendRepr::Uninhabited | BackendRepr::Memory { sized: false },
) => {
bug!()
}
(OperandValue::Pair(..), _) => bug!(),
@ -494,7 +497,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
}
OperandValue::Pair(a, b) => {
let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
};
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
@ -645,7 +648,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// However, some SIMD types do not actually use the vector ABI
// (in particular, packed SIMD types do not). Ensure we exclude those.
let layout = bx.layout_of(constant_ty);
if let Abi::Vector { .. } = layout.abi {
if let BackendRepr::Vector { .. } = layout.backend_repr {
let (llval, ty) = self.immediate_const_vector(bx, constant);
return OperandRef {
val: OperandValue::Immediate(llval),

View file

@ -55,7 +55,7 @@ impl<V: CodegenObject> PlaceValue<V> {
/// Creates a `PlaceRef` to this location with the given type.
pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
assert!(
layout.is_unsized() || layout.abi.is_uninhabited() || self.llextra.is_none(),
layout.is_unsized() || layout.is_uninhabited() || self.llextra.is_none(),
"Had pointer metadata {:?} for sized type {layout:?}",
self.llextra,
);
@ -239,7 +239,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
let dl = &bx.tcx().data_layout;
let cast_to_layout = bx.cx().layout_of(cast_to);
let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
if self.layout.abi.is_uninhabited() {
if self.layout.is_uninhabited() {
return bx.cx().const_poison(cast_to);
}
let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
@ -358,7 +358,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
variant_index: VariantIdx,
) {
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() {
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
// if that turns out to be helpful.
bx.abort();

View file

@ -203,10 +203,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) -> Option<OperandValue<Bx::Value>> {
// Check for transmutes that are always UB.
if operand.layout.size != cast.size
|| operand.layout.abi.is_uninhabited()
|| cast.abi.is_uninhabited()
|| operand.layout.is_uninhabited()
|| cast.is_uninhabited()
{
if !operand.layout.abi.is_uninhabited() {
if !operand.layout.is_uninhabited() {
// Since this is known statically and the input could have existed
// without already having hit UB, might as well trap for it.
bx.abort();
@ -555,7 +555,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
assert!(bx.cx().is_backend_immediate(cast));
let to_backend_ty = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() {
if operand.layout.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
return OperandRef { val, layout: cast };
}
@ -1136,17 +1136,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValueKind::ZeroSized
} else if self.cx.is_backend_immediate(layout) {
assert!(!self.cx.is_backend_scalar_pair(layout));
OperandValueKind::Immediate(match layout.abi {
abi::Abi::Scalar(s) => s,
abi::Abi::Vector { element, .. } => element,
OperandValueKind::Immediate(match layout.backend_repr {
abi::BackendRepr::Scalar(s) => s,
abi::BackendRepr::Vector { element, .. } => element,
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
})
} else if self.cx.is_backend_scalar_pair(layout) {
let abi::Abi::ScalarPair(s1, s2) = layout.abi else {
let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
span_bug!(
self.mir.span,
"Couldn't translate {:?} as backend scalar pair",
layout.abi,
layout.backend_repr,
);
};
OperandValueKind::Pair(s1, s2)

View file

@ -1,13 +1,13 @@
use std::assert_matches::assert_matches;
use std::ops::Deref;
use rustc_abi::{Align, BackendRepr, Scalar, Size, WrappingRange};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::{Instance, Ty};
use rustc_session::config::OptLevel;
use rustc_span::Span;
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
use super::abi::AbiBuilderMethods;
use super::asm::AsmBuilderMethods;
@ -162,7 +162,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
if let Abi::Scalar(scalar) = layout.abi {
if let BackendRepr::Scalar(scalar) = layout.backend_repr {
self.to_immediate_scalar(val, scalar)
} else {
val

View file

@ -64,8 +64,7 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
let ConstCx { tcx, body, .. } = *ccx;
FlowSensitiveAnalysis::new(NeedsDrop, ccx)
.into_engine(tcx, body)
.iterate_to_fixpoint()
.iterate_to_fixpoint(tcx, body, None)
.into_results_cursor(body)
});
@ -94,8 +93,7 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
let ConstCx { tcx, body, .. } = *ccx;
FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
.into_engine(tcx, body)
.iterate_to_fixpoint()
.iterate_to_fixpoint(tcx, body, None)
.into_results_cursor(body)
});
@ -124,8 +122,7 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
let ConstCx { tcx, body, .. } = *ccx;
FlowSensitiveAnalysis::new(HasMutInterior, ccx)
.into_engine(tcx, body)
.iterate_to_fixpoint()
.iterate_to_fixpoint(tcx, body, None)
.into_results_cursor(body)
});
@ -240,8 +237,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
let always_live_locals = &always_storage_live_locals(&ccx.body);
let mut maybe_storage_live =
MaybeStorageLive::new(Cow::Borrowed(always_live_locals))
.into_engine(ccx.tcx, &ccx.body)
.iterate_to_fixpoint()
.iterate_to_fixpoint(ccx.tcx, &ccx.body, None)
.into_results_cursor(&ccx.body);
// And then check all `Return` in the MIR, and if a local is "maybe live" at a
@ -593,7 +589,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// Typeck only does a "non-const" check since it operates on HIR and cannot distinguish
// which path expressions are getting called on and which path expressions are only used
// as function pointers. This is required for correctness.
let infcx = tcx.infer_ctxt().build();
let infcx = tcx.infer_ctxt().build(body.typing_mode(tcx));
let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
let predicates = tcx.predicates_of(callee).instantiate(tcx, fn_args);

View file

@ -32,14 +32,7 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
let def_id = body.source.def_id().expect_local();
let param_env = tcx.param_env(def_id);
Self::new_with_param_env(tcx, body, param_env)
}
pub fn new_with_param_env(
tcx: TyCtxt<'tcx>,
body: &'mir mir::Body<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
ConstCx { body, tcx, param_env, const_kind }
}

View file

@ -116,7 +116,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
let obligation =
Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref);
let infcx = tcx.infer_ctxt().build();
let infcx = tcx.infer_ctxt().build(body.typing_mode(tcx));
let mut selcx = SelectionContext::new(&infcx);
let implsrc = selcx.select(&obligation);

View file

@ -114,11 +114,11 @@ impl Qualif for HasMutInterior {
ty::TraitRef::new(cx.tcx, freeze_def_id, [ty::GenericArg::from(ty)]),
);
let infcx = cx
.tcx
.infer_ctxt()
.with_opaque_type_inference(cx.body.source.def_id().expect_local())
.build();
// FIXME(#132279): This should eventually use the already defined hidden types.
let infcx = cx.tcx.infer_ctxt().build(ty::TypingMode::analysis_in_body(
cx.tcx,
cx.body.source.def_id().expect_local(),
));
let ocx = ObligationCtxt::new(&infcx);
ocx.register_obligation(obligation);
let errors = ocx.select_all_or_error();

View file

@ -131,7 +131,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
interp_ok(match bin_op {
Eq | Ne | Lt | Le | Gt | Ge => {
// Types can differ, e.g. fn ptrs with different `for`.
assert_eq!(left.layout.abi, right.layout.abi);
assert_eq!(left.layout.backend_repr, right.layout.backend_repr);
let size = ecx.pointer_size();
// Just compare the bits. ScalarPairs are compared lexicographically.
// We thus always compare pairs and simply fill scalars up with 0.

View file

@ -1,6 +1,7 @@
use std::sync::atomic::Ordering::Relaxed;
use either::{Left, Right};
use rustc_abi::{self as abi, BackendRepr};
use rustc_hir::def::DefKind;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
@ -12,7 +13,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::def_id::LocalDefId;
use rustc_span::{DUMMY_SP, Span};
use rustc_target::abi::{self, Abi};
use tracing::{debug, instrument, trace};
use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine};
@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>(
// type (it's used throughout the compiler and having it work just on literals is not enough)
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
// from its byte-serialized form).
let force_as_immediate = match op.layout.abi {
Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
let force_as_immediate = match op.layout.backend_repr {
BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true,
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
// not have to generate any duplicate allocations (we preserve the original `AllocId` in

View file

@ -395,7 +395,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
#[inline(always)]
fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.abi.is_uninhabited()
ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.is_uninhabited()
}
fn load_mir(

View file

@ -1,10 +1,10 @@
use rustc_abi::{BackendRepr, VariantIdx};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_span::DUMMY_SP;
use rustc_target::abi::{Abi, VariantIdx};
use tracing::{debug, instrument, trace};
use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>(
let val = ecx.read_immediate(place).unwrap();
// We could allow wide raw pointers where both sides are integers in the future,
// but for now we reject them.
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
return Err(ValTreeCreationError::NonSupportedType(ty));
}
let val = val.to_scalar();
@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>(
// Fast path to avoid some allocations.
return mir::ConstValue::ZeroSized;
}
if layout.abi.is_scalar()
if layout.backend_repr.is_scalar()
&& (matches!(ty.kind(), ty::Tuple(_))
|| matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
{

View file

@ -172,8 +172,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// must be compatible. So we just accept everything with Pointer ABI as compatible,
// even if this will accept some code that is not stably guaranteed to work.
// This also handles function pointers.
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
abi::Abi::Scalar(s) => match s.primitive() {
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
abi::BackendRepr::Scalar(s) => match s.primitive() {
abi::Primitive::Pointer(addr_space) => Some(addr_space),
_ => None,
},

View file

@ -274,7 +274,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
// Let's make sure v is sign-extended *if* it has a signed type.
let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`.
let v = match src_layout.ty.kind() {
Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?,

View file

@ -27,7 +27,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// discriminant, so we cannot do anything here.
// When evaluating we will always error before even getting here, but ConstProp 'executes'
// dead code, so we cannot ICE here.
if dest.layout().for_variant(self, variant_index).abi.is_uninhabited() {
if dest.layout().for_variant(self, variant_index).is_uninhabited() {
throw_ub!(UninhabitedEnumVariantWritten(variant_index))
}
@ -86,7 +86,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// For consistency with `write_discriminant`, and to make sure that
// `project_downcast` cannot fail due to strange layouts, we declare immediate UB
// for uninhabited variants.
if op.layout().for_variant(self, index).abi.is_uninhabited() {
if op.layout().for_variant(self, index).is_uninhabited() {
throw_ub!(UninhabitedEnumVariantRead(index))
}
}
@ -112,7 +112,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed());
trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to.
@ -203,7 +203,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Reading the discriminant of an uninhabited variant is UB. This is the basis for the
// `uninhabited_enum_branching` MIR pass. It also ensures consistency with
// `write_discriminant`.
if op.layout().for_variant(self, index).abi.is_uninhabited() {
if op.layout().for_variant(self, index).is_uninhabited() {
throw_ub!(UninhabitedEnumVariantRead(index))
}
interp_ok(index)

View file

@ -3,13 +3,15 @@ use rustc_errors::DiagCtxtHandle;
use rustc_hir::def_id::DefId;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::infer::at::ToTrace;
use rustc_infer::traits::ObligationCause;
use rustc_infer::traits::{ObligationCause, Reveal};
use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
};
use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
use rustc_middle::ty::{
self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, TypingMode, Variance,
};
use rustc_middle::{mir, span_bug};
use rustc_session::Limit;
use rustc_span::Span;
@ -114,6 +116,7 @@ impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> {
/// This test should be symmetric, as it is primarily about layout compatibility.
pub(super) fn mir_assign_valid_types<'tcx>(
tcx: TyCtxt<'tcx>,
typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
src: TyAndLayout<'tcx>,
dest: TyAndLayout<'tcx>,
@ -122,7 +125,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
// differences.
if util::relate_types(tcx, param_env, Variance::Covariant, src.ty, dest.ty) {
if util::relate_types(tcx, typing_mode, param_env, Variance::Covariant, src.ty, dest.ty) {
// Make sure the layout is equal, too -- just to be safe. Miri really
// needs layout equality. For performance reason we skip this check when
// the types are equal. Equal types *can* have different layouts when
@ -142,6 +145,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
#[cfg_attr(not(debug_assertions), inline(always))]
pub(super) fn from_known_layout<'tcx>(
tcx: TyCtxtAt<'tcx>,
typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
known_layout: Option<TyAndLayout<'tcx>>,
compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
@ -151,7 +155,13 @@ pub(super) fn from_known_layout<'tcx>(
Some(known_layout) => {
if cfg!(debug_assertions) {
let check_layout = compute()?;
if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
if !mir_assign_valid_types(
tcx.tcx,
typing_mode,
param_env,
check_layout,
known_layout,
) {
span_bug!(
tcx.span,
"expected type differs from actual type.\nexpected: {}\nactual: {}",
@ -201,6 +211,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
pub fn typing_mode(&self) -> TypingMode<'tcx> {
debug_assert_eq!(self.param_env.reveal(), Reveal::All);
TypingMode::PostAnalysis
}
/// Returns the span of the currently executed statement/terminator.
/// This is the span typically used for error reporting.
#[inline(always)]
@ -325,7 +340,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return true;
}
// Slow path: spin up an inference context to check if these traits are sufficiently equal.
let infcx = self.tcx.infer_ctxt().build();
let infcx = self.tcx.infer_ctxt().build(self.typing_mode());
let ocx = ObligationCtxt::new(&infcx);
let cause = ObligationCause::dummy_with_span(self.cur_span());
// equate the two trait refs after normalization

View file

@ -364,7 +364,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let msg = match requirement {
// For *all* intrinsics we first check `is_uninhabited` to give a more specific
// error message.
_ if layout.abi.is_uninhabited() => format!(
_ if layout.is_uninhabited() => format!(
"aborted execution: attempted to instantiate uninhabited type `{ty}`"
),
ValidityRequirement::Inhabited => bug!("handled earlier"),
@ -563,7 +563,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
interp_ok(if overflowed.to_bool()? {
let size = l.layout.size;
if l.layout.abi.is_signed() {
if l.layout.backend_repr.is_signed() {
// For signed ints the saturated value depends on the sign of the first
// term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no

View file

@ -5,7 +5,7 @@ use std::assert_matches::assert_matches;
use either::{Either, Left, Right};
use rustc_abi as abi;
use rustc_abi::{Abi, HasDataLayout, Size};
use rustc_abi::{BackendRepr, HasDataLayout, Size};
use rustc_hir::def::Namespace;
use rustc_middle::mir::interpret::ScalarSizeMismatch;
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
@ -114,9 +114,9 @@ impl<Prov: Provenance> Immediate<Prov> {
}
/// Assert that this immediate is a valid value for the given ABI.
pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) {
pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) {
match (self, abi) {
(Immediate::Scalar(scalar), Abi::Scalar(s)) => {
(Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => {
assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size");
if !matches!(s.primitive(), abi::Primitive::Pointer(..)) {
// This is not a pointer, it should not carry provenance.
@ -126,7 +126,7 @@ impl<Prov: Provenance> Immediate<Prov> {
);
}
}
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
assert_eq!(
a_val.size(),
a.size(cx),
@ -244,7 +244,7 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
debug_assert_eq!(val.size(), layout.size);
ImmTy { imm: val.into(), layout }
}
@ -252,7 +252,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(
matches!(layout.abi, Abi::ScalarPair(..)),
matches!(layout.backend_repr, BackendRepr::ScalarPair(..)),
"`ImmTy::from_scalar_pair` on non-scalar-pair layout"
);
let imm = Immediate::ScalarPair(a, b);
@ -263,9 +263,9 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
// Without a `cx` we cannot call `assert_matches_abi`.
debug_assert!(
match (imm, layout.abi) {
(Immediate::Scalar(..), Abi::Scalar(..)) => true,
(Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
match (imm, layout.backend_repr) {
(Immediate::Scalar(..), BackendRepr::Scalar(..)) => true,
(Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true,
(Immediate::Uninit, _) if layout.is_sized() => true,
_ => false,
},
@ -356,7 +356,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
// Verify that the input matches its type.
if cfg!(debug_assertions) {
self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx);
self.assert_matches_abi(
self.layout.backend_repr,
"invalid input to Immediate::offset",
cx,
);
}
// `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
// remains in-bounds. This cannot actually be violated since projections are type-checked
@ -370,19 +374,19 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
);
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let inner_val: Immediate<_> = match (**self, self.layout.abi) {
let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) {
// If the entire value is uninit, then so is the field (can happen in ConstProp).
(Immediate::Uninit, _) => Immediate::Uninit,
// If the field is uninhabited, we can forget the data (can happen in ConstProp).
// `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
// has an `Uninhabited` variant, which means this case is possible.
_ if layout.abi.is_uninhabited() => Immediate::Uninit,
_ if layout.is_uninhabited() => Immediate::Uninit,
// the field contains no information, can be left uninit
// (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
_ if layout.is_zst() => Immediate::Uninit,
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
// to detect those here and also give them no data
_ if matches!(layout.abi, Abi::Aggregate { .. })
_ if matches!(layout.backend_repr, BackendRepr::Memory { .. })
&& matches!(layout.variants, abi::Variants::Single { .. })
&& matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
{
@ -394,7 +398,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
**self
}
// extract fields from types with `ScalarPair` ABI
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
Immediate::from(if offset.bytes() == 0 {
a_val
} else {
@ -411,7 +415,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
),
};
// Ensure the new layout matches the new value.
inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx);
inner_val.assert_matches_abi(
layout.backend_repr,
"invalid field type in Immediate::offset",
cx,
);
ImmTy::from_immediate(inner_val, layout)
}
@ -567,8 +575,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
interp_ok(match mplace.layout.abi {
Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
interp_ok(match mplace.layout.backend_repr {
BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
let size = s.size(self);
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let scalar = alloc.read_scalar(
@ -577,7 +585,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)?;
Some(ImmTy::from_scalar(scalar, mplace.layout))
}
Abi::ScalarPair(
BackendRepr::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
abi::Scalar::Initialized { value: b, .. },
) => {
@ -637,9 +645,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
op: &impl Projectable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
if !matches!(
op.layout().abi,
Abi::Scalar(abi::Scalar::Initialized { .. })
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
op.layout().backend_repr,
BackendRepr::Scalar(abi::Scalar::Initialized { .. })
| BackendRepr::ScalarPair(
abi::Scalar::Initialized { .. },
abi::Scalar::Initialized { .. }
)
) {
span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
}
@ -762,6 +773,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)?;
if !mir_assign_valid_types(
*self.tcx,
self.typing_mode(),
self.param_env,
self.layout_of(normalized_place_ty)?,
op.layout,
@ -821,7 +833,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
})
};
let layout =
from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty).into())?;
from_known_layout(self.tcx, self.typing_mode(), self.param_env, layout, || {
self.layout_of(ty).into()
})?;
let imm = match val_val {
mir::ConstValue::Indirect { alloc_id, offset } => {
// This is const data, no mutation allowed.

View file

@ -114,7 +114,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let l_bits = left.layout.size.bits();
// Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
// the one MIR operator that does *not* directly map to a single LLVM operation.)
let (shift_amount, overflow) = if right.layout.abi.is_signed() {
let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
let shift_amount = r_signed();
let rem = shift_amount.rem_euclid(l_bits.into());
// `rem` is guaranteed positive, so the `unwrap` cannot fail
@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
};
let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
// Compute the shifted result.
let result = if left.layout.abi.is_signed() {
let result = if left.layout.backend_repr.is_signed() {
let l = l_signed();
let result = match bin_op {
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
@ -147,7 +147,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
throw_ub!(ShiftOverflow {
intrinsic,
shift_amount: if right.layout.abi.is_signed() {
shift_amount: if right.layout.backend_repr.is_signed() {
Either::Right(r_signed())
} else {
Either::Left(r_unsigned())
@ -171,7 +171,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let size = left.layout.size;
// Operations that need special treatment for signed integers
if left.layout.abi.is_signed() {
if left.layout.backend_repr.is_signed() {
let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
Lt => Some(i128::lt),
Le => Some(i128::le),
@ -250,7 +250,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
BitXor => ImmTy::from_uint(l ^ r, left.layout),
_ => {
assert!(!left.layout.abi.is_signed());
assert!(!left.layout.backend_repr.is_signed());
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
@ -315,7 +315,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let ptr = left.to_scalar().to_pointer(self)?;
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap();
let pointee_layout = self.layout_of(pointee_ty)?;
assert!(pointee_layout.abi.is_sized());
assert!(pointee_layout.is_sized());
// The size always fits in `i64` as it can be at most `isize::MAX`.
let pointee_size = i64::try_from(pointee_layout.size.bytes()).unwrap();
@ -332,7 +332,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
let offset_bytes = val.to_target_isize(self)?;
if !right.layout.abi.is_signed() && offset_bytes < 0 {
if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
// We were supposed to do an unsigned offset but the result is negative -- this
// can only mean that the cast wrapped around.
throw_ub!(PointerArithOverflow)
@ -518,14 +518,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
interp_ok(match null_op {
SizeOf => {
if !layout.abi.is_sized() {
if !layout.is_sized() {
span_bug!(self.cur_span(), "unsized type for `NullaryOp::SizeOf`");
}
let val = layout.size.bytes();
ImmTy::from_uint(val, usize_layout())
}
AlignOf => {
if !layout.abi.is_sized() {
if !layout.is_sized() {
span_bug!(self.cur_span(), "unsized type for `NullaryOp::AlignOf`");
}
let val = layout.align.abi.bytes();

View file

@ -5,11 +5,11 @@
use std::assert_matches::assert_matches;
use either::{Either, Left, Right};
use rustc_abi::{Align, BackendRepr, HasDataLayout, Size};
use rustc_ast::Mutability;
use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::{bug, mir, span_bug};
use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
use tracing::{instrument, trace};
use super::{
@ -540,6 +540,7 @@ where
)?;
if !mir_assign_valid_types(
*self.tcx,
self.typing_mode(),
self.param_env,
self.layout_of(normalized_place_ty)?,
place.layout,
@ -659,7 +660,7 @@ where
// Unfortunately this is too expensive to do in release builds.
if cfg!(debug_assertions) {
src.assert_matches_abi(
local_layout.abi,
local_layout.backend_repr,
"invalid immediate for given destination place",
self,
);
@ -683,7 +684,11 @@ where
) -> InterpResult<'tcx> {
// We use the sizes from `value` below.
// Ensure that matches the type of the place it is written to.
value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self);
value.assert_matches_abi(
layout.backend_repr,
"invalid immediate for given destination place",
self,
);
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
// to handle padding properly, which is only correct if we never look at this data with the
@ -700,7 +705,7 @@ where
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
let Abi::ScalarPair(a, b) = layout.abi else {
let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
@ -866,8 +871,13 @@ where
) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
let layout_compat =
mir_assign_valid_types(*self.tcx, self.param_env, src.layout(), dest.layout());
let layout_compat = mir_assign_valid_types(
*self.tcx,
self.typing_mode(),
self.param_env,
src.layout(),
dest.layout(),
);
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),

View file

@ -596,12 +596,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return interp_ok(layout);
}
let layout = from_known_layout(self.tcx, self.param_env, layout, || {
let local_ty = frame.body.local_decls[local].ty;
let local_ty =
self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
self.layout_of(local_ty).into()
})?;
let layout =
from_known_layout(self.tcx, self.typing_mode(), self.param_env, layout, || {
let local_ty = frame.body.local_decls[local].ty;
let local_ty =
self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
self.layout_of(local_ty).into()
})?;
// Layouts of locals are requested a lot, so we cache them.
state.layout.set(Some(layout));

View file

@ -11,6 +11,10 @@ use std::num::NonZero;
use either::{Left, Right};
use hir::def::DefKind;
use rustc_abi::{
BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants,
WrappingRange,
};
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
@ -23,9 +27,6 @@ use rustc_middle::mir::interpret::{
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty};
use rustc_span::symbol::{Symbol, sym};
use rustc_target::abi::{
Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
};
use tracing::trace;
use super::machine::AllocMap;
@ -422,7 +423,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
// Reset provenance: ensure slice tail metadata does not preserve provenance,
// and ensure all pointers do not preserve partial provenance.
if self.reset_provenance_and_padding {
if matches!(imm.layout.abi, Abi::Scalar(..)) {
if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) {
// A thin pointer. If it has provenance, we don't have to do anything.
// If it does not, ensure we clear the provenance in memory.
if matches!(imm.to_scalar(), Scalar::Int(..)) {
@ -542,7 +543,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
throw_validation_failure!(self.path, NullPtr { ptr_kind })
}
// Do not allow references to uninhabited types.
if place.layout.abi.is_uninhabited() {
if place.layout.is_uninhabited() {
let ty = place.layout.ty;
throw_validation_failure!(self.path, PtrToUninhabited { ptr_kind, ty })
}
@ -867,7 +868,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
/// Add the entire given place to the "data" range of this visit.
fn add_data_range_place(&mut self, place: &PlaceTy<'tcx, M::Provenance>) {
// Only sized places can be added this way.
debug_assert!(place.layout.abi.is_sized());
debug_assert!(place.layout.is_sized());
if let Some(data_bytes) = self.data_bytes.as_mut() {
let offset = Self::data_range_offset(self.ecx, place);
data_bytes.add_range(offset, place.layout.size);
@ -945,7 +946,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
layout: TyAndLayout<'tcx>,
) -> Cow<'e, RangeSet> {
assert!(layout.ty.is_union());
assert!(layout.abi.is_sized(), "there are no unsized unions");
assert!(layout.is_sized(), "there are no unsized unions");
let layout_cx = LayoutCx::new(*ecx.tcx, ecx.param_env);
return M::cached_union_data_range(ecx, layout.ty, || {
let mut out = RangeSet(Vec::new());
@ -981,7 +982,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
let elem = layout.field(cx, 0);
// Fast-path for large arrays of simple types that do not contain any padding.
if elem.abi.is_scalar() {
if elem.backend_repr.is_scalar() {
out.add_range(base_offset, elem.size * count);
} else {
for idx in 0..count {
@ -1299,19 +1300,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
// scalars, we do the same check on every "level" (e.g., first we check
// MyNewtype and then the scalar in there).
match val.layout.abi {
Abi::Uninhabited => {
match val.layout.backend_repr {
BackendRepr::Uninhabited => {
let ty = val.layout.ty;
throw_validation_failure!(self.path, UninhabitedVal { ty });
}
Abi::Scalar(scalar_layout) => {
BackendRepr::Scalar(scalar_layout) => {
if !scalar_layout.is_uninit_valid() {
// There is something to check here.
let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
self.visit_scalar(scalar, scalar_layout)?;
}
}
Abi::ScalarPair(a_layout, b_layout) => {
BackendRepr::ScalarPair(a_layout, b_layout) => {
// We can only proceed if *both* scalars need to be initialized.
// FIXME: find a way to also check ScalarPair when one side can be uninit but
// the other must be init.
@ -1322,12 +1323,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
self.visit_scalar(b, b_layout)?;
}
}
Abi::Vector { .. } => {
BackendRepr::Vector { .. } => {
// No checks here, we assume layout computation gets this right.
// (This is harder to check since Miri does not represent these as `Immediate`. We
// also cannot use field projections since this might be a newtype around a vector.)
}
Abi::Aggregate { .. } => {
BackendRepr::Memory { .. } => {
// Nothing to do.
}
}

View file

@ -1,9 +1,9 @@
use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants};
use rustc_middle::bug;
use rustc_middle::ty::layout::{
HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
};
use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
use crate::interpret::{InterpCx, MemoryKind};
@ -29,7 +29,7 @@ pub fn check_validity_requirement<'tcx>(
// There is nothing strict or lax about inhabitedness.
if kind == ValidityRequirement::Inhabited {
return Ok(!layout.abi.is_uninhabited());
return Ok(!layout.is_uninhabited());
}
let layout_cx = LayoutCx::new(tcx, param_env_and_ty.param_env);
@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>(
};
// Check the ABI.
let valid = match this.abi {
Abi::Uninhabited => false, // definitely UB
Abi::Scalar(s) => scalar_allows_raw_init(s),
Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
Abi::Aggregate { .. } => true, // Fields are checked below.
let valid = match this.backend_repr {
BackendRepr::Uninhabited => false, // definitely UB
BackendRepr::Scalar(s) => scalar_allows_raw_init(s),
BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
BackendRepr::Memory { .. } => true, // Fields are checked below.
};
if !valid {
// This is definitely not okay.

View file

@ -5,27 +5,18 @@
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt, Variance};
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt, TypingMode, Variance};
use rustc_trait_selection::traits::ObligationCtxt;
/// Returns whether the two types are equal up to subtyping.
///
/// This is used in case we don't know the expected subtyping direction
/// and still want to check whether anything is broken.
pub fn is_equal_up_to_subtyping<'tcx>(
/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
pub fn sub_types<'tcx>(
tcx: TyCtxt<'tcx>,
typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
src: Ty<'tcx>,
dest: Ty<'tcx>,
) -> bool {
// Fast path.
if src == dest {
return true;
}
// Check for subtyping in either direction.
relate_types(tcx, param_env, Variance::Covariant, src, dest)
|| relate_types(tcx, param_env, Variance::Covariant, dest, src)
relate_types(tcx, typing_mode, param_env, Variance::Covariant, src, dest)
}
/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
@ -35,6 +26,7 @@ pub fn is_equal_up_to_subtyping<'tcx>(
/// because we want to check for type equality.
pub fn relate_types<'tcx>(
tcx: TyCtxt<'tcx>,
typing_mode: TypingMode<'tcx>,
param_env: ParamEnv<'tcx>,
variance: Variance,
src: Ty<'tcx>,
@ -45,7 +37,7 @@ pub fn relate_types<'tcx>(
}
let mut builder = tcx.infer_ctxt().ignoring_regions();
let infcx = builder.build();
let infcx = builder.build(typing_mode);
let ocx = ObligationCtxt::new(&infcx);
let cause = ObligationCause::dummy();
let src = ocx.normalize(&cause, param_env, src);

View file

@ -8,7 +8,7 @@ mod type_name;
pub use self::alignment::{is_disaligned, is_within_packed};
pub use self::check_validity_requirement::check_validity_requirement;
pub use self::compare_types::{is_equal_up_to_subtyping, relate_types};
pub use self::compare_types::{relate_types, sub_types};
pub use self::type_name::type_name;
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the

Some files were not shown because too many files have changed in this diff Show more