Merge ref '7fefa09b90' from rust-lang/rust

Pull recent changes from https://github.com/rust-lang/rust via Josh.

Upstream ref: 7fefa09b90
Filtered ref: 002cd53664b0eb50c44ed23284c3ced2a6eaa6d9
Upstream diff: 2dc30247c5...7fefa09b90

This merge was created using https://github.com/rust-lang/josh-sync.
This commit is contained in:
The rustc-josh-sync Cronjob Bot 2025-12-29 04:28:08 +00:00
commit da9d86c6cd
1356 changed files with 43285 additions and 21974 deletions

View file

@ -15,6 +15,7 @@ on:
- try
- try-perf
- automation/bors/try
- automation/bors/auto
pull_request:
branches:
- "**"
@ -56,7 +57,7 @@ jobs:
- name: Test citool
# Only test citool on the auto branch, to reduce latency of the calculate matrix job
# on PR/try builds.
if: ${{ github.ref == 'refs/heads/auto' }}
if: ${{ github.ref == 'refs/heads/auto' || github.ref == 'refs/heads/automation/bors/auto' }}
run: |
cd src/ci/citool
CARGO_INCREMENTAL=0 cargo test
@ -79,7 +80,7 @@ jobs:
# access the environment.
#
# We only enable the environment for the rust-lang/rust repository, so that CI works on forks.
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto')) && 'bors') || '' }}
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto' || github.ref == 'refs/heads/automation/bors/auto')) && 'bors') || '' }}
env:
CI_JOB_NAME: ${{ matrix.name }}
CI_JOB_DOC_URL: ${{ matrix.doc_url }}
@ -313,7 +314,7 @@ jobs:
needs: [ calculate_matrix, job ]
# !cancelled() executes the job regardless of whether the previous jobs passed or failed
if: ${{ !cancelled() && contains(fromJSON('["auto", "try"]'), needs.calculate_matrix.outputs.run_type) }}
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto')) && 'bors') || '' }}
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto' || github.ref == 'refs/heads/automation/bors/auto')) && 'bors') || '' }}
steps:
- name: checkout the source code
uses: actions/checkout@v5

View file

@ -666,6 +666,7 @@ dependencies = [
"indoc",
"itertools",
"opener",
"rustc-literal-escaper",
"walkdir",
]
@ -3637,8 +3638,6 @@ dependencies = [
"rustc_span",
"rustc_symbol_mangling",
"rustc_target",
"serde",
"serde_json",
"smallvec",
"tracing",
]
@ -4145,8 +4144,8 @@ version = "0.0.0"
dependencies = [
"expect-test",
"memchr",
"unicode-ident",
"unicode-properties",
"unicode-xid",
]
[[package]]
@ -5357,9 +5356,9 @@ dependencies = [
[[package]]
name = "stringdex"
version = "0.0.3"
version = "0.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "556a6126952cb2f5150057c98a77cc6c771027dea2825bf7fa03d3d638b0a4f8"
checksum = "07ab85c3f308f022ce6861ab57576b5b6ebc4835f9577e67e0f35f6c351e3f0a"
dependencies = [
"stacker",
]
@ -5982,24 +5981,24 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
[[package]]
name = "unicode-ident"
version = "1.0.18"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "unicode-normalization"
version = "0.1.24"
version = "0.1.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-properties"
version = "0.1.3"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0"
checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d"
[[package]]
name = "unicode-script"

View file

@ -714,7 +714,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
},
fields: FieldsShape::Arbitrary {
offsets: [niche_offset].into(),
memory_index: [0].into(),
in_memory_order: [FieldIdx::new(0)].into(),
},
backend_repr: abi,
largest_niche,
@ -1008,8 +1008,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
let pair =
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
assert_eq!(in_memory_order.raw, [FieldIdx::new(0), FieldIdx::new(1)]);
offsets
}
_ => panic!("encountered a non-arbitrary layout during enum layout"),
@ -1061,7 +1061,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
},
fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO].into(),
memory_index: [0].into(),
in_memory_order: [FieldIdx::new(0)].into(),
},
largest_niche,
uninhabited,
@ -1110,10 +1110,10 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut max_repr_align = repr.align;
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
let mut in_memory_order: IndexVec<u32, FieldIdx> = fields.indices().collect();
let optimize_field_order = !repr.inhibit_struct_field_reordering();
let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
let optimizing = &mut inverse_memory_index.raw[..end];
let optimizing = &mut in_memory_order.raw[..end];
let fields_excluding_tail = &fields.raw[..end];
// unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
let field_seed = fields_excluding_tail
@ -1248,12 +1248,10 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
// regardless of the status of `-Z randomize-layout`
}
}
// inverse_memory_index holds field indices by increasing memory offset.
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
// in_memory_order holds field indices by increasing memory offset.
// That is, if field 5 has offset 0, the first element of in_memory_order is 5.
// We now write field offsets to the corresponding offset slot;
// field 5 with offset 0 puts 0 in offsets[5].
// At the bottom of this function, we invert `inverse_memory_index` to
// produce `memory_index` (see `invert_mapping`).
let mut unsized_field = None::<&F>;
let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
let mut offset = Size::ZERO;
@ -1265,7 +1263,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
align = align.max(prefix_align);
offset = prefix_size.align_to(prefix_align);
}
for &i in &inverse_memory_index {
for &i in &in_memory_order {
let field = &fields[i];
if let Some(unsized_field) = unsized_field {
return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
@ -1322,18 +1320,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
debug!("univariant min_size: {:?}", offset);
let min_size = offset;
// As stated above, inverse_memory_index holds field indices by increasing offset.
// This makes it an already-sorted view of the offsets vec.
// To invert it, consider:
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
// Field 5 would be the first element, so memory_index is i:
// Note: if we didn't optimize, it's already right.
let memory_index = if optimize_field_order {
inverse_memory_index.invert_bijective_mapping()
} else {
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
};
let size = min_size.align_to(align);
// FIXME(oli-obk): deduplicate and harden these checks
if size.bytes() >= dl.obj_size_bound() {
@ -1389,8 +1375,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
let pair =
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index.raw, [0, 1]);
FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
assert_eq!(
in_memory_order.raw,
[FieldIdx::new(0), FieldIdx::new(1)]
);
offsets
}
FieldsShape::Primitive
@ -1434,7 +1423,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
Ok(LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
fields: FieldsShape::Arbitrary { offsets, in_memory_order },
backend_repr: abi,
largest_niche,
uninhabited,
@ -1530,7 +1519,10 @@ where
Ok(LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() },
fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO].into(),
in_memory_order: [FieldIdx::new(0)].into(),
},
backend_repr: repr,
largest_niche: elt.largest_niche,
uninhabited: false,

View file

@ -182,33 +182,29 @@ pub(super) fn layout<
// CoroutineLayout.
debug!("prefix = {:#?}", prefix);
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
FieldsShape::Arbitrary { mut offsets, memory_index } => {
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
FieldsShape::Arbitrary { mut offsets, in_memory_order } => {
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
// "outer" and "promoted" fields respectively.
let b_start = tag_index.plus(1);
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
let offsets_a = offsets;
// Disentangle the "a" and "b" components of `inverse_memory_index`
// Disentangle the "a" and "b" components of `in_memory_order`
// by preserving the order but keeping only one disjoint "half" each.
// FIXME(eddyb) build a better abstraction for permutations, if possible.
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
.iter()
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
.collect();
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
let inverse_memory_index_a = inverse_memory_index;
// Since `inverse_memory_index_{a,b}` each only refer to their
// respective fields, they can be safely inverted
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
let mut in_memory_order_a = IndexVec::<u32, FieldIdx>::new();
let mut in_memory_order_b = IndexVec::<u32, FieldIdx>::new();
for i in in_memory_order {
if let Some(j) = i.index().checked_sub(b_start.index()) {
in_memory_order_b.push(FieldIdx::new(j));
} else {
in_memory_order_a.push(i);
}
}
let outer_fields =
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
(outer_fields, offsets_b, memory_index_b)
FieldsShape::Arbitrary { offsets: offsets_a, in_memory_order: in_memory_order_a };
(outer_fields, offsets_b, in_memory_order_b.invert_bijective_mapping())
}
_ => unreachable!(),
};
@ -236,7 +232,7 @@ pub(super) fn layout<
)?;
variant.variants = Variants::Single { index };
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
let FieldsShape::Arbitrary { offsets, in_memory_order } = variant.fields else {
unreachable!();
};
@ -249,8 +245,9 @@ pub(super) fn layout<
// promoted fields were being used, but leave the elements not in the
// subset as `invalid_field_idx`, which we can filter out later to
// obtain a valid (bijective) mapping.
let memory_index = in_memory_order.invert_bijective_mapping();
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
let mut combined_inverse_memory_index =
let mut combined_in_memory_order =
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
@ -268,19 +265,18 @@ pub(super) fn layout<
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
}
};
combined_inverse_memory_index[memory_index] = i;
combined_in_memory_order[memory_index] = i;
offset
})
.collect();
// Remove the unused slots and invert the mapping to obtain the
// combined `memory_index` (also see previous comment).
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
// Remove the unused slots to obtain the combined `in_memory_order`
// (also see previous comment).
combined_in_memory_order.raw.retain(|&i| i.index() != invalid_field_idx);
variant.fields = FieldsShape::Arbitrary {
offsets: combined_offsets,
memory_index: combined_memory_index,
in_memory_order: combined_in_memory_order,
};
size = size.max(variant.size);

View file

@ -16,7 +16,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: IndexVec::new(),
memory_index: IndexVec::new(),
in_memory_order: IndexVec::new(),
},
backend_repr: BackendRepr::Memory { sized },
largest_niche: None,
@ -108,7 +108,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO, b_offset].into(),
memory_index: [0, 1].into(),
in_memory_order: [FieldIdx::new(0), FieldIdx::new(1)].into(),
},
backend_repr: BackendRepr::ScalarPair(a, b),
largest_niche,
@ -133,7 +133,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
Some(fields) => FieldsShape::Union(fields),
None => FieldsShape::Arbitrary {
offsets: IndexVec::new(),
memory_index: IndexVec::new(),
in_memory_order: IndexVec::new(),
},
},
backend_repr: BackendRepr::Memory { sized: true },

View file

@ -1636,19 +1636,14 @@ pub enum FieldsShape<FieldIdx: Idx> {
// FIXME(eddyb) use small vector optimization for the common case.
offsets: IndexVec<FieldIdx, Size>,
/// Maps source order field indices to memory order indices,
/// Maps memory order field indices to source order indices,
/// depending on how the fields were reordered (if at all).
/// This is a permutation, with both the source order and the
/// memory order using the same (0..n) index ranges.
///
/// Note that during computation of `memory_index`, sometimes
/// it is easier to operate on the inverse mapping (that is,
/// from memory order to source order), and that is usually
/// named `inverse_memory_index`.
///
// FIXME(eddyb) build a better abstraction for permutations, if possible.
// FIXME(camlorn) also consider small vector optimization here.
memory_index: IndexVec<FieldIdx, u32>,
in_memory_order: IndexVec<u32, FieldIdx>,
},
}
@ -1682,51 +1677,17 @@ impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
}
}
#[inline]
pub fn memory_index(&self, i: usize) -> usize {
match *self {
FieldsShape::Primitive => {
unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
}
FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
FieldsShape::Arbitrary { ref memory_index, .. } => {
memory_index[FieldIdx::new(i)].try_into().unwrap()
}
}
}
/// Gets source indices of the fields by increasing offsets.
#[inline]
pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
let mut inverse_small = [0u8; 64];
let mut inverse_big = IndexVec::new();
let use_small = self.count() <= inverse_small.len();
// We have to write this logic twice in order to keep the array small.
if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
if use_small {
for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
inverse_small[mem_idx as usize] = field_idx.index() as u8;
}
} else {
inverse_big = memory_index.invert_bijective_mapping();
}
}
// Primitives don't really have fields in the way that structs do,
// but having this return an empty iterator for them is unhelpful
// since that makes them look kinda like ZSTs, which they're not.
let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
(0..pseudofield_count).map(move |i| match *self {
(0..pseudofield_count).map(move |i| match self {
FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
FieldsShape::Arbitrary { .. } => {
if use_small {
inverse_small[i] as usize
} else {
inverse_big[i as u32].index()
}
}
FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
})
}
}

View file

@ -10,7 +10,6 @@
// tidy-alphabetical-start
#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
#![allow(internal_features)]
#![cfg_attr(bootstrap, feature(maybe_uninit_slice))]
#![cfg_attr(test, feature(test))]
#![deny(unsafe_op_in_unsafe_fn)]
#![doc(test(no_crate_inject, attr(deny(warnings), allow(internal_features))))]

View file

@ -1810,7 +1810,7 @@ pub enum ExprKind {
/// or a `gen` block (`gen move { ... }`).
///
/// The span is the "decl", which is the header before the body `{ }`
/// including the `asyng`/`gen` keywords and possibly `move`.
/// including the `async`/`gen` keywords and possibly `move`.
Gen(CaptureBy, Box<Block>, GenBlockKind, Span),
/// An await expression (`my_future.await`). Span is of await keyword.
Await(Box<Expr>, Span),

View file

@ -6,6 +6,7 @@ ast_lowering_abi_specified_multiple_times =
ast_lowering_arbitrary_expression_in_pattern =
arbitrary expressions aren't allowed in patterns
.pattern_from_macro_note = the `expr` fragment specifier forces the metavariable's content to be an expression
.const_block_in_pattern_help = use a named `const`-item or an `if`-guard (`x if x == const {"{ ... }"}`) instead
ast_lowering_argument = argument
@ -56,6 +57,8 @@ ast_lowering_coroutine_too_many_parameters =
ast_lowering_default_field_in_tuple = default fields are not supported in tuple structs
.label = default fields are only supported on structs
ast_lowering_delegation_cycle_in_signature_resolution = encountered a cycle during delegation signature resolution
ast_lowering_delegation_unresolved_callee = failed to resolve delegation callee
ast_lowering_does_not_support_modifiers =
the `{$class_name}` register class does not support template modifiers

View file

@ -44,17 +44,20 @@ use hir::{BodyId, HirId};
use rustc_abi::ExternAbi;
use rustc_ast::*;
use rustc_attr_parsing::{AttributeParser, ShouldEmit};
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::ErrorGuaranteed;
use rustc_hir::Target;
use rustc_hir::attrs::{AttributeKind, InlineAttr};
use rustc_hir::def_id::DefId;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::span_bug;
use rustc_middle::ty::{Asyncness, DelegationFnSigAttrs, ResolverAstLowering};
use rustc_middle::ty::{Asyncness, DelegationAttrs, DelegationFnSigAttrs, ResolverAstLowering};
use rustc_span::symbol::kw;
use rustc_span::{DUMMY_SP, Ident, Span, Symbol};
use smallvec::SmallVec;
use {rustc_ast as ast, rustc_hir as hir};
use super::{GenericArgsMode, ImplTraitContext, LoweringContext, ParamMode};
use crate::errors::{CycleInDelegationSignatureResolution, UnresolvedDelegationCallee};
use crate::{AllowReturnTypeNotation, ImplTraitPosition, ResolverAstLoweringExt};
pub(crate) struct DelegationResults<'hir> {
@ -64,24 +67,24 @@ pub(crate) struct DelegationResults<'hir> {
pub generics: &'hir hir::Generics<'hir>,
}
struct AttributeAdditionInfo {
struct AttrAdditionInfo {
pub equals: fn(&hir::Attribute) -> bool,
pub kind: AttributeAdditionKind,
pub kind: AttrAdditionKind,
}
enum AttributeAdditionKind {
enum AttrAdditionKind {
Default { factory: fn(Span) -> hir::Attribute },
Inherit { flag: DelegationFnSigAttrs, factory: fn(Span, &hir::Attribute) -> hir::Attribute },
}
const PARENT_ID: hir::ItemLocalId = hir::ItemLocalId::ZERO;
static ATTRIBUTES_ADDITIONS: &[AttributeAdditionInfo] = &[
AttributeAdditionInfo {
static ATTRS_ADDITIONS: &[AttrAdditionInfo] = &[
AttrAdditionInfo {
equals: |a| matches!(a, hir::Attribute::Parsed(AttributeKind::MustUse { .. })),
kind: AttributeAdditionKind::Inherit {
factory: |span, original_attribute| {
let reason = match original_attribute {
kind: AttrAdditionKind::Inherit {
factory: |span, original_attr| {
let reason = match original_attr {
hir::Attribute::Parsed(AttributeKind::MustUse { reason, .. }) => *reason,
_ => None,
};
@ -91,14 +94,41 @@ static ATTRIBUTES_ADDITIONS: &[AttributeAdditionInfo] = &[
flag: DelegationFnSigAttrs::MUST_USE,
},
},
AttributeAdditionInfo {
AttrAdditionInfo {
equals: |a| matches!(a, hir::Attribute::Parsed(AttributeKind::Inline(..))),
kind: AttributeAdditionKind::Default {
kind: AttrAdditionKind::Default {
factory: |span| hir::Attribute::Parsed(AttributeKind::Inline(InlineAttr::Hint, span)),
},
},
];
type DelegationIdsVec = SmallVec<[DefId; 1]>;
// As delegations can now refer to another delegation, we have a delegation path
// of the following type: reuse (current delegation) <- reuse (delegee_id) <- ... <- reuse <- function (root_function_id).
// In its most basic and widely used form: reuse (current delegation) <- function (delegee_id, root_function_id)
struct DelegationIds {
path: DelegationIdsVec,
}
impl DelegationIds {
fn new(path: DelegationIdsVec) -> Self {
assert!(!path.is_empty());
Self { path }
}
// Id of the first function in (non)local crate that is being reused
fn root_function_id(&self) -> DefId {
*self.path.last().expect("Ids vector can't be empty")
}
// Id of the first definition which is being reused,
// can be either function, in this case `root_id == delegee_id`, or other delegation
fn delegee_id(&self) -> DefId {
*self.path.first().expect("Ids vector can't be empty")
}
}
impl<'hir> LoweringContext<'_, 'hir> {
fn is_method(&self, def_id: DefId, span: Span) -> bool {
match self.tcx.def_kind(def_id) {
@ -119,18 +149,39 @@ impl<'hir> LoweringContext<'_, 'hir> {
&mut self,
delegation: &Delegation,
item_id: NodeId,
is_in_trait_impl: bool,
) -> DelegationResults<'hir> {
let span = self.lower_span(delegation.path.segments.last().unwrap().ident.span);
let sig_id = self.get_delegation_sig_id(item_id, delegation.id, span, is_in_trait_impl);
match sig_id {
Ok(sig_id) => {
self.add_attributes_if_needed(span, sig_id);
let is_method = self.is_method(sig_id, span);
let (param_count, c_variadic) = self.param_count(sig_id);
let decl = self.lower_delegation_decl(sig_id, param_count, c_variadic, span);
let sig = self.lower_delegation_sig(sig_id, decl, span);
let ids = self.get_delegation_ids(
self.resolver.delegation_infos[&self.local_def_id(item_id)].resolution_node,
span,
);
match ids {
Ok(ids) => {
self.add_attrs_if_needed(span, &ids);
let delegee_id = ids.delegee_id();
let root_function_id = ids.root_function_id();
// `is_method` is used to choose the name of the first parameter (`self` or `arg0`),
// if the original function is not a method (without `self`), then it can not be added
// during chain of reuses, so we use `root_function_id` here
let is_method = self.is_method(root_function_id, span);
// Here we use `root_function_id` as we can not get params information out of potential delegation reuse,
// we need a function to extract this information
let (param_count, c_variadic) = self.param_count(root_function_id);
// Here we use `delegee_id`, as this id will then be used to calculate parent for generics
// inheritance, and we want this id to point on a delegee, not on the original
// function (see https://github.com/rust-lang/rust/issues/150152#issuecomment-3674834654)
let decl = self.lower_delegation_decl(delegee_id, param_count, c_variadic, span);
// Here we pass `root_function_id` as we want to inherit signature (including consts, async)
// from the root function that started delegation
let sig = self.lower_delegation_sig(root_function_id, decl, span);
let body_id = self.lower_delegation_body(delegation, is_method, param_count, span);
let ident = self.lower_ident(delegation.ident);
let generics = self.lower_delegation_generics(span);
@ -140,36 +191,36 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
}
fn add_attributes_if_needed(&mut self, span: Span, sig_id: DefId) {
let new_attributes = self.create_new_attributes(
ATTRIBUTES_ADDITIONS,
span,
sig_id,
self.attrs.get(&PARENT_ID),
);
fn add_attrs_if_needed(&mut self, span: Span, ids: &DelegationIds) {
let new_attrs =
self.create_new_attrs(ATTRS_ADDITIONS, span, ids, self.attrs.get(&PARENT_ID));
if new_attributes.is_empty() {
if new_attrs.is_empty() {
return;
}
let new_arena_allocated_attributes = match self.attrs.get(&PARENT_ID) {
let new_arena_allocated_attrs = match self.attrs.get(&PARENT_ID) {
Some(existing_attrs) => self.arena.alloc_from_iter(
existing_attrs.iter().map(|a| a.clone()).chain(new_attributes.into_iter()),
existing_attrs.iter().map(|a| a.clone()).chain(new_attrs.into_iter()),
),
None => self.arena.alloc_from_iter(new_attributes.into_iter()),
None => self.arena.alloc_from_iter(new_attrs.into_iter()),
};
self.attrs.insert(PARENT_ID, new_arena_allocated_attributes);
self.attrs.insert(PARENT_ID, new_arena_allocated_attrs);
}
fn create_new_attributes(
fn create_new_attrs(
&self,
candidate_additions: &[AttributeAdditionInfo],
candidate_additions: &[AttrAdditionInfo],
span: Span,
sig_id: DefId,
ids: &DelegationIds,
existing_attrs: Option<&&[hir::Attribute]>,
) -> Vec<hir::Attribute> {
let local_original_attributes = self.parse_local_original_attributes(sig_id);
let defs_orig_attrs = ids
.path
.iter()
.map(|def_id| (*def_id, self.parse_local_original_attrs(*def_id)))
.collect::<Vec<_>>();
candidate_additions
.iter()
@ -183,79 +234,120 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
match addition_info.kind {
AttributeAdditionKind::Default { factory } => Some(factory(span)),
AttributeAdditionKind::Inherit { flag, factory } => {
let original_attribute = match sig_id.as_local() {
Some(local_id) => self
.resolver
.delegation_fn_sigs
.get(&local_id)
.is_some_and(|sig| sig.attrs_flags.contains(flag))
.then(|| {
local_original_attributes
.as_ref()
.map(|attrs| {
attrs
.iter()
.find(|base_attr| (addition_info.equals)(base_attr))
})
.flatten()
})
.flatten(),
None => self
.tcx
.get_all_attrs(sig_id)
.iter()
.find(|base_attr| (addition_info.equals)(base_attr)),
};
AttrAdditionKind::Default { factory } => Some(factory(span)),
AttrAdditionKind::Inherit { flag, factory } => {
for (def_id, orig_attrs) in &defs_orig_attrs {
let original_attr = match def_id.as_local() {
Some(local_id) => self
.get_attrs(local_id)
.flags
.contains(flag)
.then(|| {
orig_attrs
.as_ref()
.map(|attrs| {
attrs.iter().find(|base_attr| {
(addition_info.equals)(base_attr)
})
})
.flatten()
})
.flatten(),
None => self
.tcx
.get_all_attrs(*def_id)
.iter()
.find(|base_attr| (addition_info.equals)(base_attr)),
};
original_attribute.map(|a| factory(span, a))
if let Some(original_attr) = original_attr {
return Some(factory(span, original_attr));
}
}
None
}
}
})
.collect::<Vec<_>>()
}
fn parse_local_original_attributes(&self, sig_id: DefId) -> Option<Vec<hir::Attribute>> {
if let Some(local_id) = sig_id.as_local()
&& let Some(info) = self.resolver.delegation_fn_sigs.get(&local_id)
&& !info.to_inherit_attrs.is_empty()
{
Some(AttributeParser::parse_limited_all(
self.tcx.sess,
info.to_inherit_attrs.as_slice(),
None,
Target::Fn,
DUMMY_SP,
DUMMY_NODE_ID,
Some(self.tcx.features()),
ShouldEmit::Nothing,
))
fn parse_local_original_attrs(&self, def_id: DefId) -> Option<Vec<hir::Attribute>> {
if let Some(local_id) = def_id.as_local() {
let attrs = &self.get_attrs(local_id).to_inherit;
if !attrs.is_empty() {
return Some(AttributeParser::parse_limited_all(
self.tcx.sess,
attrs,
None,
Target::Fn,
DUMMY_SP,
DUMMY_NODE_ID,
Some(self.tcx.features()),
ShouldEmit::Nothing,
));
}
}
None
}
fn get_attrs(&self, local_id: LocalDefId) -> &DelegationAttrs {
// local_id can correspond either to a function or other delegation
if let Some(fn_sig) = self.resolver.delegation_fn_sigs.get(&local_id) {
&fn_sig.attrs
} else {
None
&self.resolver.delegation_infos[&local_id].attrs
}
}
fn get_delegation_sig_id(
fn get_delegation_ids(
&self,
item_id: NodeId,
path_id: NodeId,
mut node_id: NodeId,
span: Span,
is_in_trait_impl: bool,
) -> Result<DefId, ErrorGuaranteed> {
let sig_id = if is_in_trait_impl { item_id } else { path_id };
self.get_resolution_id(sig_id, span)
) -> Result<DelegationIds, ErrorGuaranteed> {
let mut visited: FxHashSet<NodeId> = Default::default();
let mut path: DelegationIdsVec = Default::default();
loop {
visited.insert(node_id);
let Some(def_id) = self.get_resolution_id(node_id) else {
return Err(self.tcx.dcx().span_delayed_bug(
span,
format!(
"LoweringContext: couldn't resolve node {:?} in delegation item",
node_id
),
));
};
path.push(def_id);
// If def_id is in local crate and it corresponds to another delegation
// it means that we refer to another delegation as a callee, so in order to obtain
// a signature DefId we obtain NodeId of the callee delegation and try to get signature from it.
if let Some(local_id) = def_id.as_local()
&& let Some(delegation_info) = self.resolver.delegation_infos.get(&local_id)
{
node_id = delegation_info.resolution_node;
if visited.contains(&node_id) {
// We encountered a cycle in the resolution, or delegation callee refers to non-existent
// entity, in this case emit an error.
return Err(match visited.len() {
1 => self.dcx().emit_err(UnresolvedDelegationCallee { span }),
_ => self.dcx().emit_err(CycleInDelegationSignatureResolution { span }),
});
}
} else {
return Ok(DelegationIds::new(path));
}
}
}
fn get_resolution_id(&self, node_id: NodeId, span: Span) -> Result<DefId, ErrorGuaranteed> {
let def_id =
self.resolver.get_partial_res(node_id).and_then(|r| r.expect_full_res().opt_def_id());
def_id.ok_or_else(|| {
self.tcx.dcx().span_delayed_bug(
span,
format!("LoweringContext: couldn't resolve node {:?} in delegation item", node_id),
)
})
fn get_resolution_id(&self, node_id: NodeId) -> Option<DefId> {
self.resolver.get_partial_res(node_id).and_then(|r| r.expect_full_res().opt_def_id())
}
fn lower_delegation_generics(&mut self, span: Span) -> &'hir hir::Generics<'hir> {
@ -269,16 +361,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
// Function parameter count, including C variadic `...` if present.
fn param_count(&self, sig_id: DefId) -> (usize, bool /*c_variadic*/) {
if let Some(local_sig_id) = sig_id.as_local() {
// Map may be filled incorrectly due to recursive delegation.
// Error will be emitted later during HIR ty lowering.
fn param_count(&self, def_id: DefId) -> (usize, bool /*c_variadic*/) {
if let Some(local_sig_id) = def_id.as_local() {
match self.resolver.delegation_fn_sigs.get(&local_sig_id) {
Some(sig) => (sig.param_count, sig.c_variadic),
None => (0, false),
}
} else {
let sig = self.tcx.fn_sig(sig_id).skip_binder().skip_binder();
let sig = self.tcx.fn_sig(def_id).skip_binder().skip_binder();
(sig.inputs().len() + usize::from(sig.c_variadic), sig.c_variadic)
}
}
@ -328,7 +418,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
// We are not forwarding the attributes, as the delegation fn sigs are collected on the ast,
// and here we need the hir attributes.
let default_safety =
if sig.attrs_flags.contains(DelegationFnSigAttrs::TARGET_FEATURE)
if sig.attrs.flags.contains(DelegationFnSigAttrs::TARGET_FEATURE)
|| self.tcx.def_kind(parent) == DefKind::ForeignMod
{
hir::Safety::Unsafe
@ -489,8 +579,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
delegation.path.segments.iter().rev().skip(1).any(|segment| segment.args.is_some());
let call = if self
.get_resolution_id(delegation.id, span)
.and_then(|def_id| Ok(self.is_method(def_id, span)))
.get_resolution_id(delegation.id)
.map(|def_id| self.is_method(def_id, span))
.unwrap_or_default()
&& delegation.qself.is_none()
&& !has_generic_args

View file

@ -357,6 +357,8 @@ pub(crate) struct ArbitraryExpressionInPattern {
pub span: Span,
#[note(ast_lowering_pattern_from_macro_note)]
pub pattern_from_macro_note: bool,
#[help(ast_lowering_const_block_in_pattern_help)]
pub const_block_in_pattern_help: bool,
}
#[derive(Diagnostic)]
@ -475,3 +477,17 @@ pub(crate) struct UnionWithDefault {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(ast_lowering_delegation_unresolved_callee)]
pub(crate) struct UnresolvedDelegationCallee {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(ast_lowering_delegation_cycle_in_signature_resolution)]
pub(crate) struct CycleInDelegationSignatureResolution {
#[primary_span]
pub span: Span,
}

View file

@ -114,7 +114,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
ExprKind::Tup(elts) => hir::ExprKind::Tup(self.lower_exprs(elts)),
ExprKind::Call(f, args) => {
if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f) {
if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f, self.tcx)
{
self.lower_legacy_const_generics((**f).clone(), args.clone(), &legacy_args)
} else {
let f = self.lower_expr(f);

View file

@ -281,6 +281,13 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
});
}
fn visit_const_arg_expr_field(&mut self, field: &'hir ConstArgExprField<'hir>) {
self.insert(field.span, field.hir_id, Node::ConstArgExprField(field));
self.with_parent(field.hir_id, |this| {
intravisit::walk_const_arg_expr_field(this, field);
})
}
fn visit_stmt(&mut self, stmt: &'hir Stmt<'hir>) {
self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt));

View file

@ -541,7 +541,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir::ItemKind::Macro(ident, macro_def, macro_kinds)
}
ItemKind::Delegation(box delegation) => {
let delegation_results = self.lower_delegation(delegation, id, false);
let delegation_results = self.lower_delegation(delegation, id);
hir::ItemKind::Fn {
sig: delegation_results.sig,
ident: delegation_results.ident,
@ -1026,7 +1026,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
(*ident, generics, kind, ty.is_some())
}
AssocItemKind::Delegation(box delegation) => {
let delegation_results = self.lower_delegation(delegation, i.id, false);
let delegation_results = self.lower_delegation(delegation, i.id);
let item_kind = hir::TraitItemKind::Fn(
delegation_results.sig,
hir::TraitFn::Provided(delegation_results.body_id),
@ -1196,7 +1196,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
)
}
AssocItemKind::Delegation(box delegation) => {
let delegation_results = self.lower_delegation(delegation, i.id, is_in_trait_impl);
let delegation_results = self.lower_delegation(delegation, i.id);
(
delegation.ident,
(

View file

@ -47,13 +47,14 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::spawn;
use rustc_data_structures::tagged_ptr::TaggedRef;
use rustc_errors::{DiagArgFromDisplay, DiagCtxtHandle};
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId};
use rustc_hir::definitions::{DefPathData, DisambiguatorState};
use rustc_hir::lints::DelayedLint;
use rustc_hir::{
self as hir, AngleBrackets, ConstArg, GenericArg, HirId, ItemLocalMap, LifetimeSource,
LifetimeSyntax, ParamName, Target, TraitCandidate,
LifetimeSyntax, ParamName, Target, TraitCandidate, find_attr,
};
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_macros::extension;
@ -236,29 +237,32 @@ impl SpanLowerer {
#[extension(trait ResolverAstLoweringExt)]
impl ResolverAstLowering {
fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>> {
if let ExprKind::Path(None, path) = &expr.kind {
// Don't perform legacy const generics rewriting if the path already
// has generic arguments.
if path.segments.last().unwrap().args.is_some() {
return None;
}
fn legacy_const_generic_args(&self, expr: &Expr, tcx: TyCtxt<'_>) -> Option<Vec<usize>> {
let ExprKind::Path(None, path) = &expr.kind else {
return None;
};
if let Res::Def(DefKind::Fn, def_id) = self.partial_res_map.get(&expr.id)?.full_res()? {
// We only support cross-crate argument rewriting. Uses
// within the same crate should be updated to use the new
// const generics style.
if def_id.is_local() {
return None;
}
if let Some(v) = self.legacy_const_generic_args.get(&def_id) {
return v.clone();
}
}
// Don't perform legacy const generics rewriting if the path already
// has generic arguments.
if path.segments.last().unwrap().args.is_some() {
return None;
}
None
let def_id = self.partial_res_map.get(&expr.id)?.full_res()?.opt_def_id()?;
// We only support cross-crate argument rewriting. Uses
// within the same crate should be updated to use the new
// const generics style.
if def_id.is_local() {
return None;
}
find_attr!(
// we can use parsed attrs here since for other crates they're already available
tcx.get_all_attrs(def_id),
AttributeKind::RustcLegacyConstGenerics{fn_indexes,..} => fn_indexes
)
.map(|fn_indexes| fn_indexes.iter().map(|(num, _)| *num).collect())
}
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes> {
@ -2406,6 +2410,47 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
ConstArg { hir_id: self.next_id(), kind: hir::ConstArgKind::Path(qpath) }
}
ExprKind::Struct(se) => {
let path = self.lower_qpath(
expr.id,
&se.qself,
&se.path,
// FIXME(mgca): we may want this to be `Optional` instead, but
// we would also need to make sure that HIR ty lowering errors
// when these paths wind up in signatures.
ParamMode::Explicit,
AllowReturnTypeNotation::No,
ImplTraitContext::Disallowed(ImplTraitPosition::Path),
None,
);
let fields = self.arena.alloc_from_iter(se.fields.iter().map(|f| {
let hir_id = self.lower_node_id(f.id);
// FIXME(mgca): This might result in lowering attributes that
// then go unused as the `Target::ExprField` is not actually
// corresponding to `Node::ExprField`.
self.lower_attrs(hir_id, &f.attrs, f.span, Target::ExprField);
let expr = if let ExprKind::ConstBlock(anon_const) = &f.expr.kind {
let def_id = self.local_def_id(anon_const.id);
let def_kind = self.tcx.def_kind(def_id);
assert_eq!(DefKind::AnonConst, def_kind);
self.lower_anon_const_to_const_arg_direct(anon_const)
} else {
self.lower_expr_to_const_arg_direct(&f.expr)
};
&*self.arena.alloc(hir::ConstArgExprField {
hir_id,
field: self.lower_ident(f.ident),
expr: self.arena.alloc(expr),
span: self.lower_span(f.span),
})
}));
ConstArg { hir_id: self.next_id(), kind: hir::ConstArgKind::Struct(path, fields) }
}
ExprKind::Underscore => ConstArg {
hir_id: self.lower_node_id(expr.id),
kind: hir::ConstArgKind::Infer(expr.span, ()),

View file

@ -399,7 +399,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
ExprKind::Lit(lit) => {
hir::PatExprKind::Lit { lit: self.lower_lit(lit, span), negated: false }
}
ExprKind::ConstBlock(c) => hir::PatExprKind::ConstBlock(self.lower_const_block(c)),
ExprKind::IncludedBytes(byte_sym) => hir::PatExprKind::Lit {
lit: respan(span, LitKind::ByteStr(*byte_sym, StrStyle::Cooked)),
negated: false,
@ -419,10 +418,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
hir::PatExprKind::Lit { lit: self.lower_lit(lit, span), negated: true }
}
_ => {
let is_const_block = matches!(expr.kind, ExprKind::ConstBlock(_));
let pattern_from_macro = expr.is_approximately_pattern();
let guar = self.dcx().emit_err(ArbitraryExpressionInPattern {
span,
pattern_from_macro_note: pattern_from_macro,
const_block_in_pattern_help: is_const_block,
});
err(guar)
}

View file

@ -33,6 +33,9 @@ pub fn where_bound_predicate_to_string(where_bound_predicate: &ast::WhereBoundPr
State::new().where_bound_predicate_to_string(where_bound_predicate)
}
/// # Panics
///
/// Panics if `pat.kind` is `PatKind::Missing`.
pub fn pat_to_string(pat: &ast::Pat) -> String {
State::new().pat_to_string(pat)
}

View file

@ -28,6 +28,33 @@ pub struct CfgSelectBranches {
pub unreachable: Vec<(CfgSelectPredicate, TokenStream, Span)>,
}
impl CfgSelectBranches {
/// Removes the top-most branch for which `predicate` returns `true`,
/// or the wildcard if none of the reachable branches satisfied the predicate.
pub fn pop_first_match<F>(&mut self, predicate: F) -> Option<(TokenStream, Span)>
where
F: Fn(&CfgEntry) -> bool,
{
for (index, (cfg, _, _)) in self.reachable.iter().enumerate() {
if predicate(cfg) {
let matched = self.reachable.remove(index);
return Some((matched.1, matched.2));
}
}
self.wildcard.take().map(|(_, tts, span)| (tts, span))
}
/// Consume this value and iterate over all the `TokenStream`s that it stores.
pub fn into_iter_tts(self) -> impl Iterator<Item = (TokenStream, Span)> {
let it1 = self.reachable.into_iter().map(|(_, tts, span)| (tts, span));
let it2 = self.wildcard.into_iter().map(|(_, tts, span)| (tts, span));
let it3 = self.unreachable.into_iter().map(|(_, tts, span)| (tts, span));
it1.chain(it2).chain(it3)
}
}
pub fn parse_cfg_select(
p: &mut Parser<'_>,
sess: &Session,

View file

@ -0,0 +1,33 @@
use super::prelude::*;
pub(crate) struct CfiEncodingParser;
impl<S: Stage> SingleAttributeParser<S> for CfiEncodingParser {
const PATH: &[Symbol] = &[sym::cfi_encoding];
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowListWarnRest(&[
Allow(Target::Struct),
Allow(Target::ForeignTy),
Allow(Target::Enum),
Allow(Target::Union),
]);
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "encoding");
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> {
let Some(name_value) = args.name_value() else {
cx.expected_name_value(cx.attr_span, Some(sym::cfi_encoding));
return None;
};
let Some(value_str) = name_value.value_as_str() else {
cx.expected_string_literal(name_value.value_span, None);
return None;
};
if value_str.as_str().trim().is_empty() {
cx.expected_non_empty_string_literal(name_value.value_span);
return None;
}
Some(AttributeKind::CfiEncoding { encoding: value_str })
}
}

View file

@ -690,6 +690,16 @@ impl<S: Stage> SingleAttributeParser<S> for SanitizeParser {
}
}
pub(crate) struct ThreadLocalParser;
impl<S: Stage> NoArgsAttributeParser<S> for ThreadLocalParser {
const PATH: &[Symbol] = &[sym::thread_local];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::Static), Allow(Target::ForeignStatic)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::ThreadLocal;
}
pub(crate) struct RustcPassIndirectlyInNonRusticAbisParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcPassIndirectlyInNonRusticAbisParser {

View file

@ -33,6 +33,7 @@ pub(crate) mod allow_unstable;
pub(crate) mod body;
pub(crate) mod cfg;
pub(crate) mod cfg_select;
pub(crate) mod cfi_encoding;
pub(crate) mod codegen_attrs;
pub(crate) mod confusables;
pub(crate) mod crate_level;
@ -47,6 +48,7 @@ pub(crate) mod loop_match;
pub(crate) mod macro_attrs;
pub(crate) mod must_use;
pub(crate) mod no_implicit_prelude;
pub(crate) mod no_link;
pub(crate) mod non_exhaustive;
pub(crate) mod path;
pub(crate) mod pin_v2;

View file

@ -0,0 +1,14 @@
use super::prelude::*;
pub(crate) struct NoLinkParser;
impl<S: Stage> NoArgsAttributeParser<S> for NoLinkParser {
const PATH: &[Symbol] = &[sym::no_link];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::ExternCrate),
Warn(Target::Field),
Warn(Target::Arm),
Warn(Target::MacroDef),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::NoLink;
}

View file

@ -1,3 +1,6 @@
use rustc_ast::{LitIntType, LitKind, MetaItemLit};
use rustc_session::errors;
use super::prelude::*;
use super::util::parse_single_integer;
use crate::session_diagnostics::RustcScalableVectorCountOutOfRange;
@ -11,6 +14,82 @@ impl<S: Stage> NoArgsAttributeParser<S> for RustcMainParser {
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcMain;
}
pub(crate) struct RustcMustImplementOneOfParser;
impl<S: Stage> SingleAttributeParser<S> for RustcMustImplementOneOfParser {
const PATH: &[Symbol] = &[sym::rustc_must_implement_one_of];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Trait)]);
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const TEMPLATE: AttributeTemplate = template!(List: &["function1, function2, ..."]);
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> {
let Some(list) = args.list() else {
cx.expected_list(cx.attr_span, args);
return None;
};
let mut fn_names = ThinVec::new();
let inputs: Vec<_> = list.mixed().collect();
if inputs.len() < 2 {
cx.expected_list_with_num_args_or_more(2, list.span);
return None;
}
let mut errored = false;
for argument in inputs {
let Some(meta) = argument.meta_item() else {
cx.expected_identifier(argument.span());
return None;
};
let Some(ident) = meta.ident() else {
cx.dcx().emit_err(errors::MustBeNameOfAssociatedFunction { span: meta.span() });
errored = true;
continue;
};
fn_names.push(ident);
}
if errored {
return None;
}
Some(AttributeKind::RustcMustImplementOneOf { attr_span: cx.attr_span, fn_names })
}
}
pub(crate) struct RustcNeverReturnsNullPointerParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcNeverReturnsNullPointerParser {
const PATH: &[Symbol] = &[sym::rustc_never_returns_null_ptr];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::Fn),
Allow(Target::Method(MethodKind::Inherent)),
Allow(Target::Method(MethodKind::Trait { body: false })),
Allow(Target::Method(MethodKind::Trait { body: true })),
Allow(Target::Method(MethodKind::TraitImpl)),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcNeverReturnsNullPointer;
}
pub(crate) struct RustcNoImplicitAutorefsParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcNoImplicitAutorefsParser {
const PATH: &[Symbol] = &[sym::rustc_no_implicit_autorefs];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::Fn),
Allow(Target::Method(MethodKind::Inherent)),
Allow(Target::Method(MethodKind::Trait { body: false })),
Allow(Target::Method(MethodKind::Trait { body: true })),
Allow(Target::Method(MethodKind::TraitImpl)),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcNoImplicitAutorefs;
}
pub(crate) struct RustcLayoutScalarValidRangeStartParser;
impl<S: Stage> SingleAttributeParser<S> for RustcLayoutScalarValidRangeStartParser {
@ -41,6 +120,129 @@ impl<S: Stage> SingleAttributeParser<S> for RustcLayoutScalarValidRangeEndParser
}
}
pub(crate) struct RustcLegacyConstGenericsParser;
impl<S: Stage> SingleAttributeParser<S> for RustcLegacyConstGenericsParser {
const PATH: &[Symbol] = &[sym::rustc_legacy_const_generics];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Fn)]);
const TEMPLATE: AttributeTemplate = template!(List: &["N"]);
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> {
let ArgParser::List(meta_items) = args else {
cx.expected_list(cx.attr_span, args);
return None;
};
let mut parsed_indexes = ThinVec::new();
let mut errored = false;
for possible_index in meta_items.mixed() {
if let MetaItemOrLitParser::Lit(MetaItemLit {
kind: LitKind::Int(index, LitIntType::Unsuffixed),
..
}) = possible_index
{
parsed_indexes.push((index.0 as usize, possible_index.span()));
} else {
cx.expected_integer_literal(possible_index.span());
errored = true;
}
}
if errored {
return None;
} else if parsed_indexes.is_empty() {
cx.expected_at_least_one_argument(args.span()?);
return None;
}
Some(AttributeKind::RustcLegacyConstGenerics {
fn_indexes: parsed_indexes,
attr_span: cx.attr_span,
})
}
}
pub(crate) struct RustcLintDiagnosticsParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcLintDiagnosticsParser {
const PATH: &[Symbol] = &[sym::rustc_lint_diagnostics];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::Fn),
Allow(Target::Method(MethodKind::Inherent)),
Allow(Target::Method(MethodKind::Trait { body: false })),
Allow(Target::Method(MethodKind::Trait { body: true })),
Allow(Target::Method(MethodKind::TraitImpl)),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcLintDiagnostics;
}
pub(crate) struct RustcLintOptDenyFieldAccessParser;
impl<S: Stage> SingleAttributeParser<S> for RustcLintOptDenyFieldAccessParser {
const PATH: &[Symbol] = &[sym::rustc_lint_opt_deny_field_access];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Field)]);
const TEMPLATE: AttributeTemplate = template!(Word);
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> {
let Some(arg) = args.list().and_then(MetaItemListParser::single) else {
cx.expected_single_argument(cx.attr_span);
return None;
};
let MetaItemOrLitParser::Lit(MetaItemLit { kind: LitKind::Str(lint_message, _), .. }) = arg
else {
cx.expected_string_literal(arg.span(), arg.lit());
return None;
};
Some(AttributeKind::RustcLintOptDenyFieldAccess { lint_message: *lint_message })
}
}
pub(crate) struct RustcLintOptTyParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcLintOptTyParser {
const PATH: &[Symbol] = &[sym::rustc_lint_opt_ty];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Struct)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcLintOptTy;
}
pub(crate) struct RustcLintQueryInstabilityParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcLintQueryInstabilityParser {
const PATH: &[Symbol] = &[sym::rustc_lint_query_instability];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::Fn),
Allow(Target::Method(MethodKind::Inherent)),
Allow(Target::Method(MethodKind::Trait { body: false })),
Allow(Target::Method(MethodKind::Trait { body: true })),
Allow(Target::Method(MethodKind::TraitImpl)),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcLintQueryInstability;
}
pub(crate) struct RustcLintUntrackedQueryInformationParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcLintUntrackedQueryInformationParser {
const PATH: &[Symbol] = &[sym::rustc_lint_untracked_query_information];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::Fn),
Allow(Target::Method(MethodKind::Inherent)),
Allow(Target::Method(MethodKind::Trait { body: false })),
Allow(Target::Method(MethodKind::Trait { body: true })),
Allow(Target::Method(MethodKind::TraitImpl)),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcLintUntrackedQueryInformation;
}
pub(crate) struct RustcObjectLifetimeDefaultParser;
impl<S: Stage> SingleAttributeParser<S> for RustcObjectLifetimeDefaultParser {

View file

@ -19,11 +19,12 @@ use crate::attributes::allow_unstable::{
AllowConstFnUnstableParser, AllowInternalUnstableParser, UnstableFeatureBoundParser,
};
use crate::attributes::body::CoroutineParser;
use crate::attributes::cfi_encoding::CfiEncodingParser;
use crate::attributes::codegen_attrs::{
ColdParser, CoverageParser, EiiExternItemParser, ExportNameParser, ForceTargetFeatureParser,
NakedParser, NoMangleParser, ObjcClassParser, ObjcSelectorParser, OptimizeParser,
RustcPassIndirectlyInNonRusticAbisParser, SanitizeParser, TargetFeatureParser,
TrackCallerParser, UsedParser,
ThreadLocalParser, TrackCallerParser, UsedParser,
};
use crate::attributes::confusables::ConfusablesParser;
use crate::attributes::crate_level::{
@ -50,6 +51,7 @@ use crate::attributes::macro_attrs::{
};
use crate::attributes::must_use::MustUseParser;
use crate::attributes::no_implicit_prelude::NoImplicitPreludeParser;
use crate::attributes::no_link::NoLinkParser;
use crate::attributes::non_exhaustive::NonExhaustiveParser;
use crate::attributes::path::PathParser as PathAttributeParser;
use crate::attributes::pin_v2::PinV2Parser;
@ -59,7 +61,11 @@ use crate::attributes::proc_macro_attrs::{
use crate::attributes::prototype::CustomMirParser;
use crate::attributes::repr::{AlignParser, AlignStaticParser, ReprParser};
use crate::attributes::rustc_internal::{
RustcLayoutScalarValidRangeEndParser, RustcLayoutScalarValidRangeStartParser, RustcMainParser,
RustcLayoutScalarValidRangeEndParser, RustcLayoutScalarValidRangeStartParser,
RustcLegacyConstGenericsParser, RustcLintDiagnosticsParser, RustcLintOptDenyFieldAccessParser,
RustcLintOptTyParser, RustcLintQueryInstabilityParser,
RustcLintUntrackedQueryInformationParser, RustcMainParser, RustcMustImplementOneOfParser,
RustcNeverReturnsNullPointerParser, RustcNoImplicitAutorefsParser,
RustcObjectLifetimeDefaultParser, RustcScalableVectorParser,
RustcSimdMonomorphizeLaneLimitParser,
};
@ -183,6 +189,7 @@ attribute_parsers!(
// tidy-alphabetical-end
// tidy-alphabetical-start
Single<CfiEncodingParser>,
Single<CoverageParser>,
Single<CrateNameParser>,
Single<CustomMirParser>,
@ -209,6 +216,9 @@ attribute_parsers!(
Single<RustcForceInlineParser>,
Single<RustcLayoutScalarValidRangeEndParser>,
Single<RustcLayoutScalarValidRangeStartParser>,
Single<RustcLegacyConstGenericsParser>,
Single<RustcLintOptDenyFieldAccessParser>,
Single<RustcMustImplementOneOfParser>,
Single<RustcObjectLifetimeDefaultParser>,
Single<RustcScalableVectorParser>,
Single<RustcSimdMonomorphizeLaneLimitParser>,
@ -240,6 +250,7 @@ attribute_parsers!(
Single<WithoutArgs<MayDangleParser>>,
Single<WithoutArgs<NoCoreParser>>,
Single<WithoutArgs<NoImplicitPreludeParser>>,
Single<WithoutArgs<NoLinkParser>>,
Single<WithoutArgs<NoMangleParser>>,
Single<WithoutArgs<NoStdParser>>,
Single<WithoutArgs<NonExhaustiveParser>>,
@ -251,11 +262,18 @@ attribute_parsers!(
Single<WithoutArgs<ProcMacroParser>>,
Single<WithoutArgs<PubTransparentParser>>,
Single<WithoutArgs<RustcCoherenceIsCoreParser>>,
Single<WithoutArgs<RustcLintDiagnosticsParser>>,
Single<WithoutArgs<RustcLintOptTyParser>>,
Single<WithoutArgs<RustcLintQueryInstabilityParser>>,
Single<WithoutArgs<RustcLintUntrackedQueryInformationParser>>,
Single<WithoutArgs<RustcMainParser>>,
Single<WithoutArgs<RustcNeverReturnsNullPointerParser>>,
Single<WithoutArgs<RustcNoImplicitAutorefsParser>>,
Single<WithoutArgs<RustcPassIndirectlyInNonRusticAbisParser>>,
Single<WithoutArgs<RustcShouldNotBeCalledOnConstItems>>,
Single<WithoutArgs<SpecializationTraitParser>>,
Single<WithoutArgs<StdInternalSymbolParser>>,
Single<WithoutArgs<ThreadLocalParser>>,
Single<WithoutArgs<TrackCallerParser>>,
Single<WithoutArgs<TypeConstParser>>,
Single<WithoutArgs<UnsafeSpecializationMarkerParser>>,
@ -476,6 +494,17 @@ impl<'f, 'sess: 'f, S: Stage> AcceptContext<'f, 'sess, S> {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedList)
}
pub(crate) fn expected_list_with_num_args_or_more(
&self,
args: usize,
span: Span,
) -> ErrorGuaranteed {
self.emit_parse_error(
span,
AttributeParseErrorReason::ExpectedListWithNumArgsOrMore { args },
)
}
pub(crate) fn expected_list_or_no_args(&self, span: Span) -> ErrorGuaranteed {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedListOrNoArgs)
}
@ -484,6 +513,10 @@ impl<'f, 'sess: 'f, S: Stage> AcceptContext<'f, 'sess, S> {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedNameValueOrNoArgs)
}
pub(crate) fn expected_non_empty_string_literal(&self, span: Span) -> ErrorGuaranteed {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedNonEmptyStringLiteral)
}
pub(crate) fn expected_no_args(&self, span: Span) -> ErrorGuaranteed {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedNoArgs)
}

View file

@ -8,7 +8,9 @@ use std::fmt::{Debug, Display};
use rustc_ast::token::{self, Delimiter, MetaVarKind};
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::{AttrArgs, Expr, ExprKind, LitKind, MetaItemLit, Path, StmtKind, UnOp};
use rustc_ast::{
AttrArgs, Expr, ExprKind, LitKind, MetaItemLit, Path, PathSegment, StmtKind, UnOp,
};
use rustc_ast_pretty::pprust;
use rustc_errors::{Diag, PResult};
use rustc_hir::{self as hir, AttrPath};
@ -256,6 +258,11 @@ impl Debug for MetaItemParser {
}
impl MetaItemParser {
/// For a single-segment meta item, returns its name; otherwise, returns `None`.
pub fn ident(&self) -> Option<Ident> {
if let [PathSegment { ident, .. }] = self.path.0.segments[..] { Some(ident) } else { None }
}
pub fn span(&self) -> Span {
if let Some(other) = self.args.span() {
self.path.borrow().span().with_hi(other.hi())

View file

@ -520,7 +520,11 @@ pub(crate) enum AttributeParseErrorReason<'a> {
ExpectedSingleArgument,
ExpectedList,
ExpectedListOrNoArgs,
ExpectedListWithNumArgsOrMore {
args: usize,
},
ExpectedNameValueOrNoArgs,
ExpectedNonEmptyStringLiteral,
UnexpectedLiteral,
ExpectedNameValue(Option<Symbol>),
DuplicateKey(Symbol),
@ -596,9 +600,15 @@ impl<'a, G: EmissionGuarantee> Diagnostic<'a, G> for AttributeParseError<'_> {
AttributeParseErrorReason::ExpectedListOrNoArgs => {
diag.span_label(self.span, "expected a list or no arguments here");
}
AttributeParseErrorReason::ExpectedListWithNumArgsOrMore { args } => {
diag.span_label(self.span, format!("expected {args} or more items"));
}
AttributeParseErrorReason::ExpectedNameValueOrNoArgs => {
diag.span_label(self.span, "didn't expect a list here");
}
AttributeParseErrorReason::ExpectedNonEmptyStringLiteral => {
diag.span_label(self.span, "string is not allowed to be empty");
}
AttributeParseErrorReason::DuplicateKey(key) => {
diag.span_label(self.span, format!("found `{key}` used as a key more than once"));
diag.code(E0538);

View file

@ -764,6 +764,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
{
// Just point to the function, to reduce the chance of overlapping spans.
let function_span = match func {
Operand::RuntimeChecks(_) => span,
Operand::Constant(c) => c.span,
Operand::Copy(place) | Operand::Move(place) => {
if let Some(l) = place.as_local() {
@ -809,6 +810,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
{
// Just point to the function, to reduce the chance of overlapping spans.
let function_span = match func {
Operand::RuntimeChecks(_) => span,
Operand::Constant(c) => c.span,
Operand::Copy(place) | Operand::Move(place) => {
if let Some(l) = place.as_local() {

View file

@ -1,4 +1,4 @@
//! This query borrow-checks the MIR to (further) ensure it is not broken.
//! This crate implemens MIR typeck and MIR borrowck.
// tidy-alphabetical-start
#![allow(internal_features)]
@ -111,9 +111,9 @@ pub fn provide(providers: &mut Providers) {
*providers = Providers { mir_borrowck, ..*providers };
}
/// Provider for `query mir_borrowck`. Similar to `typeck`, this must
/// only be called for typeck roots which will then borrowck all
/// nested bodies as well.
/// Provider for `query mir_borrowck`. Unlike `typeck`, this must
/// only be called for typeck roots which *similar* to `typeck` will
/// then borrowck all nested bodies as well.
fn mir_borrowck(
tcx: TyCtxt<'_>,
def: LocalDefId,
@ -1559,10 +1559,6 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> {
self.consume_operand(location, (operand2, span), state);
}
Rvalue::NullaryOp(_op) => {
// nullary ops take no dynamic input; no borrowck effect.
}
Rvalue::Aggregate(aggregate_kind, operands) => {
// We need to report back the list of mutable upvars that were
// moved into the closure and subsequently used by the closure,
@ -1699,7 +1695,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> {
_ => propagate_closure_used_mut_place(self, place),
}
}
Operand::Constant(..) => {}
Operand::Constant(..) | Operand::RuntimeChecks(_) => {}
}
}
@ -1750,7 +1746,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> {
state,
);
}
Operand::Constant(_) => {}
Operand::Constant(_) | Operand::RuntimeChecks(_) => {}
}
}

View file

@ -247,7 +247,7 @@ impl<'a, 'tcx> LoanInvalidationsGenerator<'a, 'tcx> {
LocalMutationIsAllowed::Yes,
);
}
Operand::Constant(_) => {}
Operand::Constant(_) | Operand::RuntimeChecks(_) => {}
}
}
@ -314,8 +314,6 @@ impl<'a, 'tcx> LoanInvalidationsGenerator<'a, 'tcx> {
self.consume_operand(location, operand2);
}
Rvalue::NullaryOp(_op) => {}
Rvalue::Aggregate(_, operands) => {
for operand in operands {
self.consume_operand(location, operand);

View file

@ -1275,29 +1275,81 @@ impl<'tcx> RegionInferenceContext<'tcx> {
shorter_fr: RegionVid,
propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
) -> RegionRelationCheckResult {
if let Some(propagated_outlives_requirements) = propagated_outlives_requirements
// Shrink `longer_fr` until we find a non-local region (if we do).
// We'll call it `fr-` -- it's ever so slightly smaller than
if let Some(propagated_outlives_requirements) = propagated_outlives_requirements {
// Shrink `longer_fr` until we find some non-local regions.
// We'll call them `longer_fr-` -- they are ever so slightly smaller than
// `longer_fr`.
&& let Some(fr_minus) = self.universal_region_relations.non_local_lower_bound(longer_fr)
{
debug!("try_propagate_universal_region_error: fr_minus={:?}", fr_minus);
let longer_fr_minus = self.universal_region_relations.non_local_lower_bounds(longer_fr);
debug!("try_propagate_universal_region_error: fr_minus={:?}", longer_fr_minus);
// If we don't find a any non-local regions, we should error out as there is nothing
// to propagate.
if longer_fr_minus.is_empty() {
return RegionRelationCheckResult::Error;
}
let blame_constraint = self
.best_blame_constraint(longer_fr, NllRegionVariableOrigin::FreeRegion, shorter_fr)
.0;
// Grow `shorter_fr` until we find some non-local regions. (We
// always will.) We'll call them `shorter_fr+` -- they're ever
// so slightly larger than `shorter_fr`.
// Grow `shorter_fr` until we find some non-local regions.
// We will always find at least one: `'static`. We'll call
// them `shorter_fr+` -- they're ever so slightly larger
// than `shorter_fr`.
let shorter_fr_plus =
self.universal_region_relations.non_local_upper_bounds(shorter_fr);
debug!("try_propagate_universal_region_error: shorter_fr_plus={:?}", shorter_fr_plus);
for fr in shorter_fr_plus {
// Push the constraint `fr-: shorter_fr+`
// We then create constraints `longer_fr-: shorter_fr+` that may or may not
// be propagated (see below).
let mut constraints = vec![];
for fr_minus in longer_fr_minus {
for shorter_fr_plus in &shorter_fr_plus {
constraints.push((fr_minus, *shorter_fr_plus));
}
}
// We only need to propagate at least one of the constraints for
// soundness. However, we want to avoid arbitrary choices here
// and currently don't support returning OR constraints.
//
// If any of the `shorter_fr+` regions are already outlived by `longer_fr-`,
// we propagate only those.
//
// Consider this example (`'b: 'a` == `a -> b`), where we try to propagate `'d: 'a`:
// a --> b --> d
// \
// \-> c
// Here, `shorter_fr+` of `'a` == `['b, 'c]`.
// Propagating `'d: 'b` is correct and should occur; `'d: 'c` is redundant because of
// `'d: 'b` and could reject valid code.
//
// So we filter the constraints to regions already outlived by `longer_fr-`, but if
// the filter yields an empty set, we fall back to the original one.
let subset: Vec<_> = constraints
.iter()
.filter(|&&(fr_minus, shorter_fr_plus)| {
self.eval_outlives(fr_minus, shorter_fr_plus)
})
.copied()
.collect();
let propagated_constraints = if subset.is_empty() { constraints } else { subset };
debug!(
"try_propagate_universal_region_error: constraints={:?}",
propagated_constraints
);
assert!(
!propagated_constraints.is_empty(),
"Expected at least one constraint to propagate here"
);
for (fr_minus, fr_plus) in propagated_constraints {
// Push the constraint `long_fr-: shorter_fr+`
propagated_outlives_requirements.push(ClosureOutlivesRequirement {
subject: ClosureOutlivesSubject::Region(fr_minus),
outlived_free_region: fr,
outlived_free_region: fr_plus,
blame_span: blame_constraint.cause.span,
category: blame_constraint.category,
});

View file

@ -255,7 +255,7 @@ impl<'tcx> BorrowCheckRootCtxt<'tcx> {
}
// We now apply the closure requirements of nested bodies modulo
// regions. In case a body does not depend on opaque types, we
// opaques. In case a body does not depend on opaque types, we
// eagerly check its region constraints and use the final closure
// requirements.
//

View file

@ -94,28 +94,10 @@ impl UniversalRegionRelations<'_> {
/// words, returns the largest (*) known region `fr1` that (a) is
/// outlived by `fr` and (b) is not local.
///
/// (*) If there are multiple competing choices, we pick the "postdominating"
/// one. See `TransitiveRelation::postdom_upper_bound` for details.
pub(crate) fn non_local_lower_bound(&self, fr: RegionVid) -> Option<RegionVid> {
/// (*) If there are multiple competing choices, we return all of them.
pub(crate) fn non_local_lower_bounds(&self, fr: RegionVid) -> Vec<RegionVid> {
debug!("non_local_lower_bound(fr={:?})", fr);
let lower_bounds = self.non_local_bounds(&self.outlives, fr);
// In case we find more than one, reduce to one for
// convenience. This is to prevent us from generating more
// complex constraints, but it will cause spurious errors.
let post_dom = self.outlives.mutual_immediate_postdominator(lower_bounds);
debug!("non_local_bound: post_dom={:?}", post_dom);
post_dom.and_then(|post_dom| {
// If the mutual immediate postdom is not local, then
// there is no non-local result we can return.
if !self.universal_regions.is_local_free_region(post_dom) {
Some(post_dom)
} else {
None
}
})
self.non_local_bounds(&self.outlives, fr)
}
/// Helper for `non_local_upper_bounds` and `non_local_lower_bounds`.

View file

@ -1023,7 +1023,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// element, so we require the `Copy` trait.
if len.try_to_target_usize(tcx).is_none_or(|len| len > 1) {
match operand {
Operand::Copy(..) | Operand::Constant(..) => {
Operand::Copy(..) | Operand::Constant(..) | Operand::RuntimeChecks(_) => {
// These are always okay: direct use of a const, or a value that can
// evidently be copied.
}
@ -1046,8 +1046,6 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
&Rvalue::NullaryOp(NullOp::RuntimeChecks(_)) => {}
Rvalue::ShallowInitBox(_operand, ty) => {
let trait_ref =
ty::TraitRef::new(tcx, tcx.require_lang_item(LangItem::Sized, span), [*ty]);
@ -2276,7 +2274,6 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
| Rvalue::Cast(..)
| Rvalue::ShallowInitBox(..)
| Rvalue::BinaryOp(..)
| Rvalue::NullaryOp(..)
| Rvalue::CopyForDeref(..)
| Rvalue::UnaryOp(..)
| Rvalue::Discriminant(..)

View file

@ -1,13 +1,12 @@
//! Code to extract the universally quantified regions declared on a
//! function and the relationships between them. For example:
//! function. For example:
//!
//! ```
//! fn foo<'a, 'b, 'c: 'b>() { }
//! ```
//!
//! here we would return a map assigning each of `{'a, 'b, 'c}`
//! to an index, as well as the `FreeRegionMap` which can compute
//! relationships between them.
//! to an index.
//!
//! The code in this file doesn't *do anything* with those results; it
//! just returns them for other code to use.
@ -271,8 +270,7 @@ impl<'tcx> UniversalRegions<'tcx> {
/// Creates a new and fully initialized `UniversalRegions` that
/// contains indices for all the free regions found in the given
/// MIR -- that is, all the regions that appear in the function's
/// signature. This will also compute the relationships that are
/// known between those regions.
/// signature.
pub(crate) fn new(infcx: &BorrowckInferCtxt<'tcx>, mir_def: LocalDefId) -> Self {
UniversalRegionsBuilder { infcx, mir_def }.build()
}
@ -648,17 +646,14 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
BodyOwnerKind::Const { .. } | BodyOwnerKind::Static(..) => {
let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
if self.mir_def.to_def_id() == typeck_root_def_id
// Do not ICE when checking default_field_values consts with lifetimes (#135649)
&& DefKind::Field != tcx.def_kind(tcx.parent(typeck_root_def_id))
{
if self.mir_def.to_def_id() == typeck_root_def_id {
let args = self.infcx.replace_free_regions_with_nll_infer_vars(
NllRegionVariableOrigin::FreeRegion,
identity_args,
);
DefiningTy::Const(self.mir_def.to_def_id(), args)
} else {
// FIXME this line creates a dependency between borrowck and typeck.
// FIXME: this line creates a query dependency between borrowck and typeck.
//
// This is required for `AscribeUserType` canonical query, which will call
// `type_of(inline_const_def_id)`. That `type_of` would inject erased lifetimes
@ -699,30 +694,14 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
let tcx = self.infcx.tcx;
let typeck_root_def_id = tcx.typeck_root_def_id(self.mir_def.to_def_id());
let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
let fr_args = match defining_ty {
DefiningTy::Closure(_, args)
| DefiningTy::CoroutineClosure(_, args)
| DefiningTy::Coroutine(_, args)
| DefiningTy::InlineConst(_, args) => {
// In the case of closures, we rely on the fact that
// the first N elements in the ClosureArgs are
// inherited from the `typeck_root_def_id`.
// Therefore, when we zip together (below) with
// `identity_args`, we will get only those regions
// that correspond to early-bound regions declared on
// the `typeck_root_def_id`.
assert!(args.len() >= identity_args.len());
assert_eq!(args.regions().count(), identity_args.regions().count());
args
}
DefiningTy::FnDef(_, args) | DefiningTy::Const(_, args) => args,
DefiningTy::GlobalAsm(_) => ty::List::empty(),
};
let renumbered_args = defining_ty.args();
let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
let arg_mapping = iter::zip(identity_args.regions(), fr_args.regions().map(|r| r.as_var()));
// This relies on typeck roots being generics_of parents with their
// parameters at the start of nested bodies' generics.
assert!(renumbered_args.len() >= identity_args.len());
let arg_mapping =
iter::zip(identity_args.regions(), renumbered_args.regions().map(|r| r.as_var()));
UniversalRegionIndices {
indices: global_mapping.chain(arg_mapping).collect(),
@ -862,8 +841,8 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
};
// FIXME(#129952): We probably want a more principled approach here.
if let Err(terr) = inputs_and_output.skip_binder().error_reported() {
self.infcx.set_tainted_by_errors(terr);
if let Err(e) = inputs_and_output.error_reported() {
self.infcx.set_tainted_by_errors(e);
}
inputs_and_output

View file

@ -55,7 +55,6 @@ builtin_macros_assert_requires_expression = macro requires an expression as an a
builtin_macros_autodiff = autodiff must be applied to function
builtin_macros_autodiff_missing_config = autodiff requires at least a name and mode
builtin_macros_autodiff_mode_activity = {$act} can not be used in {$mode} Mode
builtin_macros_autodiff_not_build = this rustc version does not support autodiff
builtin_macros_autodiff_number_activities = expected {$expected} activities, but found {$found}
builtin_macros_autodiff_ret_activity = invalid return activity {$act} in {$mode} Mode
builtin_macros_autodiff_ty_activity = {$act} can not be used for this type

View file

@ -209,11 +209,6 @@ mod llvm_enzyme {
mut item: Annotatable,
mode: DiffMode,
) -> Vec<Annotatable> {
// FIXME(bjorn3) maybe have the backend directly tell if autodiff is supported?
if cfg!(not(feature = "llvm_enzyme")) {
ecx.sess.dcx().emit_err(errors::AutoDiffSupportNotBuild { span: meta_item.span });
return vec![item];
}
let dcx = ecx.sess.dcx();
// first get information about the annotable item: visibility, signature, name and generic

View file

@ -1,22 +1,65 @@
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::{Expr, ast};
use rustc_attr_parsing as attr;
use rustc_attr_parsing::{
CfgSelectBranches, CfgSelectPredicate, EvalConfigResult, parse_cfg_select,
};
use rustc_expand::base::{DummyResult, ExpandResult, ExtCtxt, MacroExpanderResult};
use rustc_expand::base::{DummyResult, ExpandResult, ExtCtxt, MacResult, MacroExpanderResult};
use rustc_span::{Ident, Span, sym};
use smallvec::SmallVec;
use crate::errors::{CfgSelectNoMatches, CfgSelectUnreachable};
/// Selects the first arm whose predicate evaluates to true.
fn select_arm(ecx: &ExtCtxt<'_>, branches: CfgSelectBranches) -> Option<(TokenStream, Span)> {
for (cfg, tt, arm_span) in branches.reachable {
if let EvalConfigResult::True = attr::eval_config_entry(&ecx.sess, &cfg) {
return Some((tt, arm_span));
}
}
/// This intermediate structure is used to emit parse errors for the branches that are not chosen.
/// The `MacResult` instance below parses all branches, emitting any errors it encounters, but only
/// keeps the parse result for the selected branch.
struct CfgSelectResult<'cx, 'sess> {
ecx: &'cx mut ExtCtxt<'sess>,
site_span: Span,
selected_tts: TokenStream,
selected_span: Span,
other_branches: CfgSelectBranches,
}
branches.wildcard.map(|(_, tt, span)| (tt, span))
fn tts_to_mac_result<'cx, 'sess>(
ecx: &'cx mut ExtCtxt<'sess>,
site_span: Span,
tts: TokenStream,
span: Span,
) -> Box<dyn MacResult + 'cx> {
match ExpandResult::from_tts(ecx, tts, site_span, span, Ident::with_dummy_span(sym::cfg_select))
{
ExpandResult::Ready(x) => x,
_ => unreachable!("from_tts always returns Ready"),
}
}
macro_rules! forward_to_parser_any_macro {
($method_name:ident, $ret_ty:ty) => {
fn $method_name(self: Box<Self>) -> Option<$ret_ty> {
let CfgSelectResult { ecx, site_span, selected_tts, selected_span, .. } = *self;
for (tts, span) in self.other_branches.into_iter_tts() {
let _ = tts_to_mac_result(ecx, site_span, tts, span).$method_name();
}
tts_to_mac_result(ecx, site_span, selected_tts, selected_span).$method_name()
}
};
}
impl<'cx, 'sess> MacResult for CfgSelectResult<'cx, 'sess> {
forward_to_parser_any_macro!(make_expr, Box<Expr>);
forward_to_parser_any_macro!(make_stmts, SmallVec<[ast::Stmt; 1]>);
forward_to_parser_any_macro!(make_items, SmallVec<[Box<ast::Item>; 1]>);
forward_to_parser_any_macro!(make_impl_items, SmallVec<[Box<ast::AssocItem>; 1]>);
forward_to_parser_any_macro!(make_trait_impl_items, SmallVec<[Box<ast::AssocItem>; 1]>);
forward_to_parser_any_macro!(make_trait_items, SmallVec<[Box<ast::AssocItem>; 1]>);
forward_to_parser_any_macro!(make_foreign_items, SmallVec<[Box<ast::ForeignItem>; 1]>);
forward_to_parser_any_macro!(make_ty, Box<ast::Ty>);
forward_to_parser_any_macro!(make_pat, Box<ast::Pat>);
}
pub(super) fn expand_cfg_select<'cx>(
@ -31,7 +74,7 @@ pub(super) fn expand_cfg_select<'cx>(
Some(ecx.ecfg.features),
ecx.current_expansion.lint_node_id,
) {
Ok(branches) => {
Ok(mut branches) => {
if let Some((underscore, _, _)) = branches.wildcard {
// Warn for every unreachable predicate. We store the fully parsed branch for rustfmt.
for (predicate, _, _) in &branches.unreachable {
@ -44,14 +87,17 @@ pub(super) fn expand_cfg_select<'cx>(
}
}
if let Some((tts, arm_span)) = select_arm(ecx, branches) {
return ExpandResult::from_tts(
if let Some((selected_tts, selected_span)) = branches.pop_first_match(|cfg| {
matches!(attr::eval_config_entry(&ecx.sess, cfg), EvalConfigResult::True)
}) {
let mac = CfgSelectResult {
ecx,
tts,
sp,
arm_span,
Ident::with_dummy_span(sym::cfg_select),
);
selected_tts,
selected_span,
other_branches: branches,
site_span: sp,
};
return ExpandResult::Ready(Box::new(mac));
} else {
// Emit a compiler error when none of the predicates matched.
let guar = ecx.dcx().emit_err(CfgSelectNoMatches { span: sp });

View file

@ -1,11 +1,12 @@
use rustc_ast::token::{Delimiter, TokenKind};
use rustc_ast::tokenstream::{DelimSpacing, DelimSpan, Spacing, TokenStream, TokenTree};
use rustc_ast::{
DUMMY_NODE_ID, EiiExternTarget, EiiImpl, ItemKind, Stmt, StmtKind, ast, token, tokenstream,
Attribute, DUMMY_NODE_ID, EiiExternTarget, EiiImpl, ItemKind, MetaItem, Path, Stmt, StmtKind,
Visibility, ast,
};
use rustc_ast_pretty::pprust::path_to_string;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::{Ident, Span, kw, sym};
use rustc_span::{ErrorGuaranteed, Ident, Span, kw, sym};
use thin_vec::{ThinVec, thin_vec};
use crate::errors::{
@ -52,138 +53,238 @@ pub(crate) fn unsafe_eii(
fn eii_(
ecx: &mut ExtCtxt<'_>,
span: Span,
eii_attr_span: Span,
meta_item: &ast::MetaItem,
item: Annotatable,
impl_unsafe: bool,
) -> Vec<Annotatable> {
let span = ecx.with_def_site_ctxt(span);
let eii_attr_span = ecx.with_def_site_ctxt(eii_attr_span);
let (item, stmt) = if let Annotatable::Item(item) = item {
(item, false)
let (item, wrap_item): (_, &dyn Fn(_) -> _) = if let Annotatable::Item(item) = item {
(item, &Annotatable::Item)
} else if let Annotatable::Stmt(ref stmt) = item
&& let StmtKind::Item(ref item) = stmt.kind
{
(item.clone(), true)
(item.clone(), &|item| {
Annotatable::Stmt(Box::new(Stmt {
id: DUMMY_NODE_ID,
kind: StmtKind::Item(item),
span: eii_attr_span,
}))
})
} else {
ecx.dcx().emit_err(EiiSharedMacroExpectedFunction {
span,
span: eii_attr_span,
name: path_to_string(&meta_item.path),
});
return vec![item];
};
let orig_item = item.clone();
let item = *item;
let ast::Item { attrs, id: _, span: item_span, vis, kind: ItemKind::Fn(mut func), tokens: _ } =
item
let ast::Item { attrs, id: _, span: _, vis, kind: ItemKind::Fn(func), tokens: _ } =
item.as_ref()
else {
ecx.dcx().emit_err(EiiSharedMacroExpectedFunction {
span,
span: eii_attr_span,
name: path_to_string(&meta_item.path),
});
return vec![Annotatable::Item(Box::new(item))];
return vec![wrap_item(item)];
};
// only clone what we need
let attrs = attrs.clone();
let func = (**func).clone();
let vis = vis.clone();
let attrs_from_decl =
filter_attrs_for_multiple_eii_attr(ecx, attrs, eii_attr_span, &meta_item.path);
let Ok(macro_name) = name_for_impl_macro(ecx, &func, &meta_item) else {
return vec![wrap_item(item)];
};
// Detect when this is the *second* eii attribute on an item.
let mut new_attrs = ThinVec::new();
for i in attrs {
if i.has_name(sym::eii) {
ecx.dcx().emit_err(EiiOnlyOnce {
span: i.span,
first_span: span,
name: path_to_string(&meta_item.path),
});
} else {
new_attrs.push(i);
}
}
let attrs = new_attrs;
let macro_name = if meta_item.is_word() {
func.ident
} else if let Some([first]) = meta_item.meta_item_list()
&& let Some(m) = first.meta_item()
&& m.path.segments.len() == 1
{
m.path.segments[0].ident
} else {
ecx.dcx().emit_err(EiiMacroExpectedMaxOneArgument {
span: meta_item.span,
name: path_to_string(&meta_item.path),
});
return vec![Annotatable::Item(orig_item)];
};
// span of the declaring item without attributes
let item_span = func.sig.span;
// span of the eii attribute and the item below it, i.e. the full declaration
let decl_span = eii_attr_span.to(item_span);
let foreign_item_name = func.ident;
let mut return_items = Vec::new();
if func.body.is_some() {
let mut default_func = func.clone();
func.body = None;
default_func.eii_impls.push(ast::EiiImpl {
node_id: DUMMY_NODE_ID,
eii_macro_path: ast::Path::from_ident(macro_name),
impl_safety: if impl_unsafe { ast::Safety::Unsafe(span) } else { ast::Safety::Default },
span,
inner_span: macro_name.span,
is_default: true, // important!
});
return_items.push(Box::new(ast::Item {
attrs: ThinVec::new(),
id: ast::DUMMY_NODE_ID,
span,
vis: ast::Visibility { span, kind: ast::VisibilityKind::Inherited, tokens: None },
kind: ast::ItemKind::Const(Box::new(ast::ConstItem {
ident: Ident { name: kw::Underscore, span },
defaultness: ast::Defaultness::Final,
generics: ast::Generics::default(),
ty: Box::new(ast::Ty {
id: DUMMY_NODE_ID,
kind: ast::TyKind::Tup(ThinVec::new()),
span,
tokens: None,
}),
rhs: Some(ast::ConstItemRhs::Body(Box::new(ast::Expr {
id: DUMMY_NODE_ID,
kind: ast::ExprKind::Block(
Box::new(ast::Block {
stmts: thin_vec![ast::Stmt {
id: DUMMY_NODE_ID,
kind: ast::StmtKind::Item(Box::new(ast::Item {
attrs: thin_vec![], // FIXME: re-add some original attrs
id: DUMMY_NODE_ID,
span: item_span,
vis: ast::Visibility {
span,
kind: ast::VisibilityKind::Inherited,
tokens: None
},
kind: ItemKind::Fn(default_func),
tokens: None,
})),
span
}],
id: DUMMY_NODE_ID,
rules: ast::BlockCheckMode::Default,
span,
tokens: None,
}),
None,
),
span,
attrs: ThinVec::new(),
tokens: None,
}))),
define_opaque: None,
})),
tokens: None,
}))
return_items.push(Box::new(generate_default_impl(
&func,
impl_unsafe,
macro_name,
eii_attr_span,
item_span,
)))
}
let decl_span = span.to(func.sig.span);
return_items.push(Box::new(generate_foreign_item(
ecx,
eii_attr_span,
item_span,
func,
vis,
&attrs_from_decl,
)));
return_items.push(Box::new(generate_attribute_macro_to_implement(
ecx,
eii_attr_span,
macro_name,
foreign_item_name,
impl_unsafe,
decl_span,
)));
return_items.into_iter().map(wrap_item).collect()
}
/// Decide on the name of the macro that can be used to implement the EII.
/// This is either an explicitly given name, or the name of the item in the
/// declaration of the EII.
fn name_for_impl_macro(
ecx: &mut ExtCtxt<'_>,
func: &ast::Fn,
meta_item: &MetaItem,
) -> Result<Ident, ErrorGuaranteed> {
if meta_item.is_word() {
Ok(func.ident)
} else if let Some([first]) = meta_item.meta_item_list()
&& let Some(m) = first.meta_item()
&& m.path.segments.len() == 1
{
Ok(m.path.segments[0].ident)
} else {
Err(ecx.dcx().emit_err(EiiMacroExpectedMaxOneArgument {
span: meta_item.span,
name: path_to_string(&meta_item.path),
}))
}
}
/// Ensure that in the list of attrs, there's only a single `eii` attribute.
fn filter_attrs_for_multiple_eii_attr(
ecx: &mut ExtCtxt<'_>,
attrs: ThinVec<Attribute>,
eii_attr_span: Span,
eii_attr_path: &Path,
) -> ThinVec<Attribute> {
attrs
.into_iter()
.filter(|i| {
if i.has_name(sym::eii) {
ecx.dcx().emit_err(EiiOnlyOnce {
span: i.span,
first_span: eii_attr_span,
name: path_to_string(eii_attr_path),
});
false
} else {
true
}
})
.collect()
}
fn generate_default_impl(
func: &ast::Fn,
impl_unsafe: bool,
macro_name: Ident,
eii_attr_span: Span,
item_span: Span,
) -> ast::Item {
// FIXME: re-add some original attrs
let attrs = ThinVec::new();
let mut default_func = func.clone();
default_func.eii_impls.push(EiiImpl {
node_id: DUMMY_NODE_ID,
inner_span: macro_name.span,
eii_macro_path: ast::Path::from_ident(macro_name),
impl_safety: if impl_unsafe {
ast::Safety::Unsafe(eii_attr_span)
} else {
ast::Safety::Default
},
span: eii_attr_span,
is_default: true,
});
ast::Item {
attrs: ThinVec::new(),
id: ast::DUMMY_NODE_ID,
span: eii_attr_span,
vis: ast::Visibility {
span: eii_attr_span,
kind: ast::VisibilityKind::Inherited,
tokens: None,
},
kind: ast::ItemKind::Const(Box::new(ast::ConstItem {
ident: Ident { name: kw::Underscore, span: eii_attr_span },
defaultness: ast::Defaultness::Final,
generics: ast::Generics::default(),
ty: Box::new(ast::Ty {
id: DUMMY_NODE_ID,
kind: ast::TyKind::Tup(ThinVec::new()),
span: eii_attr_span,
tokens: None,
}),
rhs: Some(ast::ConstItemRhs::Body(Box::new(ast::Expr {
id: DUMMY_NODE_ID,
kind: ast::ExprKind::Block(
Box::new(ast::Block {
stmts: thin_vec![ast::Stmt {
id: DUMMY_NODE_ID,
kind: ast::StmtKind::Item(Box::new(ast::Item {
attrs,
id: DUMMY_NODE_ID,
span: item_span,
vis: ast::Visibility {
span: eii_attr_span,
kind: ast::VisibilityKind::Inherited,
tokens: None
},
kind: ItemKind::Fn(Box::new(default_func)),
tokens: None,
})),
span: eii_attr_span
}],
id: DUMMY_NODE_ID,
rules: ast::BlockCheckMode::Default,
span: eii_attr_span,
tokens: None,
}),
None,
),
span: eii_attr_span,
attrs: ThinVec::new(),
tokens: None,
}))),
define_opaque: None,
})),
tokens: None,
}
}
/// Generates a foreign item, like
///
/// ```rust, ignore
/// extern "…" { safe fn item(); }
/// ```
fn generate_foreign_item(
ecx: &mut ExtCtxt<'_>,
eii_attr_span: Span,
item_span: Span,
mut func: ast::Fn,
vis: Visibility,
attrs_from_decl: &[Attribute],
) -> ast::Item {
let mut foreign_item_attrs = ThinVec::new();
foreign_item_attrs.extend_from_slice(attrs_from_decl);
// Add the rustc_eii_extern_item on the foreign item. Usually, foreign items are mangled.
// This attribute makes sure that we later know that this foreign item's symbol should not be.
foreign_item_attrs.push(ecx.attr_word(sym::rustc_eii_extern_item, eii_attr_span));
let abi = match func.sig.header.ext {
// extern "X" fn => extern "X" {}
@ -196,85 +297,69 @@ fn eii_(
suffix: None,
symbol_unescaped: sym::Rust,
style: ast::StrStyle::Cooked,
span,
span: eii_attr_span,
}),
};
// ABI has been moved to the extern {} block, so we remove it from the fn item.
func.sig.header.ext = ast::Extern::None;
func.body = None;
// And mark safe functions explicitly as `safe fn`.
if func.sig.header.safety == ast::Safety::Default {
func.sig.header.safety = ast::Safety::Safe(func.sig.span);
}
// extern "…" { safe fn item(); }
let mut extern_item_attrs = attrs.clone();
extern_item_attrs.push(ast::Attribute {
kind: ast::AttrKind::Normal(Box::new(ast::NormalAttr {
item: ast::AttrItem {
unsafety: ast::Safety::Default,
// Add the rustc_eii_extern_item on the foreign item. Usually, foreign items are mangled.
// This attribute makes sure that we later know that this foreign item's symbol should not be.
path: ast::Path::from_ident(Ident::new(sym::rustc_eii_extern_item, span)),
args: ast::AttrArgs::Empty,
tokens: None,
},
tokens: None,
})),
id: ecx.sess.psess.attr_id_generator.mk_attr_id(),
style: ast::AttrStyle::Outer,
span,
});
let extern_block = Box::new(ast::Item {
ast::Item {
attrs: ast::AttrVec::default(),
id: ast::DUMMY_NODE_ID,
span,
vis: ast::Visibility { span, kind: ast::VisibilityKind::Inherited, tokens: None },
span: eii_attr_span,
vis: ast::Visibility {
span: eii_attr_span,
kind: ast::VisibilityKind::Inherited,
tokens: None,
},
kind: ast::ItemKind::ForeignMod(ast::ForeignMod {
extern_span: span,
safety: ast::Safety::Unsafe(span),
extern_span: eii_attr_span,
safety: ast::Safety::Unsafe(eii_attr_span),
abi,
items: From::from([Box::new(ast::ForeignItem {
attrs: extern_item_attrs,
attrs: foreign_item_attrs,
id: ast::DUMMY_NODE_ID,
span: item_span,
vis,
kind: ast::ForeignItemKind::Fn(func.clone()),
kind: ast::ForeignItemKind::Fn(Box::new(func.clone())),
tokens: None,
})]),
}),
tokens: None,
});
}
}
let mut macro_attrs = attrs.clone();
macro_attrs.push(
// #[builtin_macro(eii_shared_macro)]
ast::Attribute {
kind: ast::AttrKind::Normal(Box::new(ast::NormalAttr {
item: ast::AttrItem {
unsafety: ast::Safety::Default,
path: ast::Path::from_ident(Ident::new(sym::rustc_builtin_macro, span)),
args: ast::AttrArgs::Delimited(ast::DelimArgs {
dspan: DelimSpan::from_single(span),
delim: Delimiter::Parenthesis,
tokens: TokenStream::new(vec![tokenstream::TokenTree::token_alone(
token::TokenKind::Ident(sym::eii_shared_macro, token::IdentIsRaw::No),
span,
)]),
}),
tokens: None,
},
tokens: None,
})),
id: ecx.sess.psess.attr_id_generator.mk_attr_id(),
style: ast::AttrStyle::Outer,
span,
},
);
/// Generate a stub macro (a bit like in core) that will roughly look like:
///
/// ```rust, ignore, example
/// // Since this a stub macro, the actual code that expands it lives in the compiler.
/// // This attribute tells the compiler that
/// #[builtin_macro(eii_shared_macro)]
/// // the metadata to link this macro to the generated foreign item.
/// #[eii_extern_target(<related_reign_item>)]
/// macro macro_name { () => {} }
/// ```
fn generate_attribute_macro_to_implement(
ecx: &mut ExtCtxt<'_>,
span: Span,
macro_name: Ident,
foreign_item_name: Ident,
impl_unsafe: bool,
decl_span: Span,
) -> ast::Item {
let mut macro_attrs = ThinVec::new();
let macro_def = Box::new(ast::Item {
// #[builtin_macro(eii_shared_macro)]
macro_attrs.push(ecx.attr_nested_word(sym::rustc_builtin_macro, sym::eii_shared_macro, span));
ast::Item {
attrs: macro_attrs,
id: ast::DUMMY_NODE_ID,
span,
@ -305,33 +390,15 @@ fn eii_(
]),
}),
macro_rules: false,
// #[eii_extern_target(func.ident)]
// #[eii_extern_target(foreign_item_ident)]
eii_extern_target: Some(ast::EiiExternTarget {
extern_item_path: ast::Path::from_ident(func.ident),
extern_item_path: ast::Path::from_ident(foreign_item_name),
impl_unsafe,
span: decl_span,
}),
},
),
tokens: None,
});
return_items.push(extern_block);
return_items.push(macro_def);
if stmt {
return_items
.into_iter()
.map(|i| {
Annotatable::Stmt(Box::new(Stmt {
id: DUMMY_NODE_ID,
kind: StmtKind::Item(i),
span,
}))
})
.collect()
} else {
return_items.into_iter().map(|i| Annotatable::Item(i)).collect()
}
}
@ -436,10 +503,10 @@ pub(crate) fn eii_shared_macro(
f.eii_impls.push(EiiImpl {
node_id: DUMMY_NODE_ID,
inner_span: meta_item.path.span,
eii_macro_path: meta_item.path.clone(),
impl_safety: meta_item.unsafety,
span,
inner_span: meta_item.path.span,
is_default,
});

View file

@ -216,17 +216,6 @@ mod autodiff {
}
}
pub(crate) use ad_fallback::*;
mod ad_fallback {
use super::*;
#[derive(Diagnostic)]
#[diag(builtin_macros_autodiff_not_build)]
pub(crate) struct AutoDiffSupportNotBuild {
#[primary_span]
pub(crate) span: Span,
}
}
#[derive(Diagnostic)]
#[diag(builtin_macros_concat_bytes_invalid)]
pub(crate) struct ConcatBytesInvalid {

View file

@ -144,9 +144,7 @@ pub(crate) fn expand_include<'cx>(
let mut p = unwrap_or_emit_fatal(new_parser_from_file(
self.psess,
&self.path,
// Don't strip frontmatter for backward compatibility, `---` may be the start of a
// manifold negation. FIXME: Ideally, we wouldn't strip shebangs here either.
StripTokens::Shebang,
StripTokens::Nothing,
Some(self.span),
));
let expr = parse_expr(&mut p).ok()?;

View file

@ -12,7 +12,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- run: |
sed -i 's/components.*/components = []/' rust-toolchain
sed -i 's/components.*/components = []/' rust-toolchain.toml
- uses: rustsec/audit-check@v1.4.1
with:
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -28,7 +28,7 @@ jobs:
- name: Avoid installing rustc-dev
run: |
sed -i 's/components.*/components = ["rustfmt"]/' rust-toolchain
sed -i 's/components.*/components = ["rustfmt"]/' rust-toolchain.toml
rustfmt -v
- name: Rustfmt
@ -88,7 +88,7 @@ jobs:
uses: actions/cache@v4
with:
path: build/cg_clif
key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-build-target-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
- name: Set MinGW as the default toolchain
if: matrix.os == 'windows-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
@ -158,7 +158,7 @@ jobs:
uses: actions/cache@v4
with:
path: build/cg_clif
key: ${{ runner.os }}-x86_64-unknown-linux-gnu-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
key: ${{ runner.os }}-x86_64-unknown-linux-gnu-cargo-build-target-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
- name: Install hyperfine
run: |
@ -207,7 +207,7 @@ jobs:
uses: actions/cache@v4
with:
path: build/cg_clif
key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-dist-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-dist-cargo-build-target-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
- name: Set MinGW as the default toolchain
if: matrix.os == 'windows-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'

View file

@ -20,7 +20,7 @@ jobs:
uses: actions/cache@v4
with:
path: build/cg_clif
key: ${{ runner.os }}-rustc-test-cargo-build-target-${{ hashFiles('rust-toolchain', 'Cargo.lock') }}
key: ${{ runner.os }}-rustc-test-cargo-build-target-${{ hashFiles('rust-toolchain.toml', 'Cargo.lock') }}
- name: Test
run: ./scripts/test_bootstrap.sh
@ -40,7 +40,7 @@ jobs:
uses: actions/cache@v4
with:
path: build/cg_clif
key: ${{ runner.os }}-rustc-test-cargo-build-target-${{ hashFiles('rust-toolchain', 'Cargo.lock') }}
key: ${{ runner.os }}-rustc-test-cargo-build-target-${{ hashFiles('rust-toolchain.toml', 'Cargo.lock') }}
- name: Install ripgrep
run: |

View file

@ -43,42 +43,42 @@ checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "cranelift-assembler-x64"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf7631e609c97f063f9777aae405e8492abf9bf92336d7aa3f875403dd4ffd7d"
checksum = "8bd963a645179fa33834ba61fa63353998543b07f877e208da9eb47d4a70d1e7"
dependencies = [
"cranelift-assembler-x64-meta",
]
[[package]]
name = "cranelift-assembler-x64-meta"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c030edccdc4a5bbf28fbfe7701b5cd1f9854b4445184dd34af2a7e8f8db6f45"
checksum = "3f6d5739c9dc6b5553ca758d78d87d127dd19f397f776efecf817b8ba8d0bb01"
dependencies = [
"cranelift-srcgen",
]
[[package]]
name = "cranelift-bforest"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb544c1242d0ca98baf01873ebba96c79d5df155d5108d9bb699aefc741f5e6d"
checksum = "ff402c11bb1c9652b67a3e885e84b1b8d00c13472c8fd85211e06a41a63c3e03"
dependencies = [
"cranelift-entity",
]
[[package]]
name = "cranelift-bitset"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0325aecbafec053d3d3f082edfdca7937e2945e7f09c5ff9672e05198312282"
checksum = "769a0d88c2f5539e9c5536a93a7bf164b0dc68d91e3d00723e5b4ffc1440afdc"
[[package]]
name = "cranelift-codegen"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb3236fd319ae897ba00c8a25105081de5c1348576def0e96c062ad259f87a7"
checksum = "d4351f721fb3b26add1c180f0a75c7474bab2f903c8b777c6ca65238ded59a78"
dependencies = [
"bumpalo",
"cranelift-assembler-x64",
@ -102,9 +102,9 @@ dependencies = [
[[package]]
name = "cranelift-codegen-meta"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b8791c911a361c539130ace34fb726b16aca4216470ec75d75264b1495c8a3a"
checksum = "61f86c0ba5b96713643f4dd0de0df12844de9c7bb137d6829b174b706939aa74"
dependencies = [
"cranelift-assembler-x64-meta",
"cranelift-codegen-shared",
@ -114,33 +114,33 @@ dependencies = [
[[package]]
name = "cranelift-codegen-shared"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12ead718c2a10990870c19b2497b5a04b8aae6024485e33da25b5d02e35819e0"
checksum = "f08605eee8d51fd976a970bd5b16c9529b51b624f8af68f80649ffb172eb85a4"
[[package]]
name = "cranelift-control"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0a57fc972b5651047efddccb99440d103d9d8c13393ccebde15ddd5b6a1181b"
checksum = "623aab0a09e40f0cf0b5d35eb7832bae4c4f13e3768228e051a6c1a60e88ef5f"
dependencies = [
"arbitrary",
]
[[package]]
name = "cranelift-entity"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aae980b4a1678b601eab2f52e372ed0b3c9565a31c17f380008cb97b3a699c5"
checksum = "ea0f066e07e3bcbe38884cc5c94c32c7a90267d69df80f187d9dfe421adaa7c4"
dependencies = [
"cranelift-bitset",
]
[[package]]
name = "cranelift-frontend"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a78877016b607982ca1708c0dd4ce23bde04581a39854c9b43a1dca43625b54c"
checksum = "40865b02a0e52ca8e580ad64feef530cb1d05f6bb4972b4eef05e3eaeae81701"
dependencies = [
"cranelift-codegen",
"log",
@ -150,15 +150,15 @@ dependencies = [
[[package]]
name = "cranelift-isle"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dc46a68b46d4f53f9f2f02ab8d3a34b00f03a21c124a7a965b8cbf5fdb6773b"
checksum = "104b3c117ae513e9af1d90679842101193a5ccb96ac9f997966d85ea25be2852"
[[package]]
name = "cranelift-jit"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7df920009af919ad9df52eb7b47b1895145822e0c29da9b715a876fc8ecc6d82"
checksum = "3aa5f855cfb8e4253ed2d0dfc1a0b6ebe4912e67aa8b7ee14026ff55ca17f1fe"
dependencies = [
"anyhow",
"cranelift-codegen",
@ -171,14 +171,14 @@ dependencies = [
"region",
"target-lexicon",
"wasmtime-internal-jit-icache-coherence",
"windows-sys 0.60.2",
"windows-sys 0.61.2",
]
[[package]]
name = "cranelift-module"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddcf313629071ce74de8e59f02092f5453d1a01047607fc4ad36886b8bd1486c"
checksum = "b1d01806b191b59f4fc4680293dd5f554caf2de5b62f95eff5beef7acb46c29c"
dependencies = [
"anyhow",
"cranelift-codegen",
@ -187,9 +187,9 @@ dependencies = [
[[package]]
name = "cranelift-native"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03faa07ec8cf373250a8252eb773d098ff88259fa1c19ee1ecde8012839f4097"
checksum = "e5c54e0a358bc05b48f2032e1c320e7f468da068604f2869b77052eab68eb0fe"
dependencies = [
"cranelift-codegen",
"libc",
@ -198,9 +198,9 @@ dependencies = [
[[package]]
name = "cranelift-object"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cca62c14f3c2e4f438192562bbf82d1a98a59543cc66ba04fb658ba99f515a6"
checksum = "3d17e0216be5daabab616647c1918e06dae0708474ba5f7b7762ac24ea5eb126"
dependencies = [
"anyhow",
"cranelift-codegen",
@ -213,9 +213,9 @@ dependencies = [
[[package]]
name = "cranelift-srcgen"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0484cb32c527a742e1bba09ef174acac0afb1dcf623ef1adda42849200edcd2e"
checksum = "cc6f4b039f453b66c75e9f7886e5a2af96276e151f44dc19b24b58f9a0c98009"
[[package]]
name = "crc32fast"
@ -293,7 +293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "754ca22de805bb5744484a5b151a9e1a8e837d5dc232c2d7d8c2e3492edc8b60"
dependencies = [
"cfg-if",
"windows-link 0.2.1",
"windows-link",
]
[[package]]
@ -469,31 +469,25 @@ checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "wasmtime-internal-jit-icache-coherence"
version = "39.0.0"
version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f67986f5c499274ae5b2ba5b173bba0b95d1381f5ca70d8eec657f2392117d8"
checksum = "0858b470463f3e7c73acd6049046049e64be17b98901c2db5047450cf83df1fe"
dependencies = [
"anyhow",
"cfg-if",
"libc",
"windows-sys 0.60.2",
"windows-sys 0.61.2",
]
[[package]]
name = "wasmtime-internal-math"
version = "39.0.0"
version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a681733e9b5d5d8804ee6cacd59f92c0d87ba2274f42ee1d4e5a943828d0075d"
checksum = "222e1a590ece4e898f20af1e541b61d2cb803f2557e7eaff23e6c1db5434454a"
dependencies = [
"libm",
]
[[package]]
name = "windows-link"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
[[package]]
name = "windows-link"
version = "0.2.1"
@ -506,16 +500,16 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.6",
"windows-targets",
]
[[package]]
name = "windows-sys"
version = "0.60.2"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
dependencies = [
"windows-targets 0.53.3",
"windows-link",
]
[[package]]
@ -524,31 +518,14 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm 0.52.6",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.53.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
dependencies = [
"windows-link 0.1.3",
"windows_aarch64_gnullvm 0.53.0",
"windows_aarch64_msvc 0.53.0",
"windows_i686_gnu 0.53.0",
"windows_i686_gnullvm 0.53.0",
"windows_i686_msvc 0.53.0",
"windows_x86_64_gnu 0.53.0",
"windows_x86_64_gnullvm 0.53.0",
"windows_x86_64_msvc 0.53.0",
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
@ -557,92 +534,44 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_i686_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"

View file

@ -8,12 +8,12 @@ crate-type = ["dylib"]
[dependencies]
# These have to be in sync with each other
cranelift-codegen = { version = "0.126.0", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] }
cranelift-frontend = { version = "0.126.0" }
cranelift-module = { version = "0.126.0" }
cranelift-native = { version = "0.126.0" }
cranelift-jit = { version = "0.126.0", optional = true }
cranelift-object = { version = "0.126.0" }
cranelift-codegen = { version = "0.127.0", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] }
cranelift-frontend = { version = "0.127.0" }
cranelift-module = { version = "0.127.0" }
cranelift-native = { version = "0.127.0" }
cranelift-jit = { version = "0.127.0", optional = true }
cranelift-object = { version = "0.127.0" }
target-lexicon = "0.13"
gimli = { version = "0.32", default-features = false, features = ["write"] }
object = { version = "0.37.3", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
@ -24,12 +24,12 @@ smallvec = "1.8.1"
[patch.crates-io]
# Uncomment to use an unreleased version of cranelift
#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
# Uncomment to use local checkout of cranelift
#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }

View file

@ -259,6 +259,9 @@ unsafe fn test_simd() {
test_mm_cvttps_epi32();
test_mm_cvtsi128_si64();
#[cfg(not(jit))]
test_mm_cvtps_ph();
test_mm_extract_epi8();
test_mm_insert_epi16();
test_mm_shuffle_epi8();
@ -558,6 +561,21 @@ unsafe fn test_mm_cvttps_epi32() {
}
}
#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "f16c")]
#[cfg(not(jit))]
unsafe fn test_mm_cvtps_ph() {
const F16_ONE: i16 = 0x3c00;
const F16_TWO: i16 = 0x4000;
const F16_THREE: i16 = 0x4200;
const F16_FOUR: i16 = 0x4400;
let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0);
let r = _mm_cvtps_ph::<_MM_FROUND_CUR_DIRECTION>(a);
let e = _mm_set_epi16(0, 0, 0, 0, F16_ONE, F16_TWO, F16_THREE, F16_FOUR);
assert_eq_m128i(r, e);
}
fn test_checked_mul() {
let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
assert_eq!(u, None);

View file

@ -1,4 +0,0 @@
[toolchain]
channel = "nightly-2025-12-08"
components = ["rust-src", "rustc-dev", "llvm-tools"]
profile = "minimal"

View file

@ -0,0 +1,4 @@
[toolchain]
channel = "nightly-2025-12-23"
components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"]
profile = "minimal"

View file

@ -22,8 +22,7 @@ case $1 in
"prepare")
echo "=> Installing new nightly"
rustup toolchain install --profile minimal "nightly-${TOOLCHAIN}" # Sanity check to see if the nightly exists
sed -i "s/\"nightly-.*\"/\"nightly-${TOOLCHAIN}\"/" rust-toolchain
rustup component add rustfmt || true
sed -i "s/\"nightly-.*\"/\"nightly-${TOOLCHAIN}\"/" rust-toolchain.toml
echo "=> Uninstalling all old nightlies"
for nightly in $(rustup toolchain list | grep nightly | grep -v "$TOOLCHAIN" | grep -v nightly-x86_64); do
@ -35,7 +34,7 @@ case $1 in
./y.sh prepare
;;
"commit")
git add rust-toolchain
git add rust-toolchain.toml
git commit -m "Rustup to $(rustc -V)"
;;
"push")

View file

@ -35,6 +35,7 @@ git checkout -- tests/ui/entry-point/auxiliary/bad_main_functions.rs
rm tests/ui/asm/x86_64/evex512-implicit-feature.rs # unimplemented AVX512 x86 vendor intrinsic
rm tests/ui/simd/dont-invalid-bitcast-x86_64.rs # unimplemented llvm.x86.sse41.round.ps
rm tests/ui/simd/intrinsic/generic-arithmetic-pass.rs # unimplemented simd_funnel_{shl,shr}
rm -r tests/ui/scalable-vectors # scalable vectors are unsupported
# exotic linkages
rm tests/incremental/hashes/function_interfaces.rs
@ -53,23 +54,29 @@ rm tests/ui/sanitizer/kcfi-c-variadic.rs # same
rm tests/ui/c-variadic/same-program-multiple-abis-x86_64.rs # variadics for calling conventions other than C unsupported
rm tests/ui/delegation/fn-header.rs
# inline assembly features
rm tests/ui/asm/x86_64/issue-96797.rs # const and sym inline asm operands don't work entirely correctly
rm tests/ui/asm/global-asm-mono-sym-fn.rs # same
rm tests/ui/asm/naked-asm-mono-sym-fn.rs # same
rm tests/ui/asm/x86_64/goto.rs # inline asm labels not supported
rm tests/ui/asm/label-operand.rs # same
rm tests/ui/asm/may_unwind.rs # asm unwinding not supported
rm tests/ui/asm/aarch64/may_unwind.rs # same
# misc unimplemented things
rm tests/ui/target-feature/missing-plusminus.rs # error not implemented
rm -r tests/run-make/repr128-dwarf # debuginfo test
rm -r tests/run-make/split-debuginfo # same
rm -r tests/run-make/target-specs # i686 not supported by Cranelift
rm -r tests/run-make/mismatching-target-triples # same
rm tests/ui/asm/x86_64/issue-96797.rs # const and sym inline asm operands don't work entirely correctly
rm tests/ui/asm/global-asm-mono-sym-fn.rs # same
rm tests/ui/asm/naked-asm-mono-sym-fn.rs # same
rm tests/ui/asm/x86_64/goto.rs # inline asm labels not supported
rm tests/ui/asm/label-operand.rs # same
rm tests/ui/simd/simd-bitmask-notpow2.rs # non-pow-of-2 simd vector sizes
rm -r tests/run-make/used-proc-macro # used(linker) isn't supported yet
rm tests/ui/linking/no-gc-encapsulation-symbols.rs # same
rm tests/ui/attributes/fn-align-dyn.rs # per-function alignment not supported
rm -r tests/ui/explicit-tail-calls # tail calls
rm -r tests/run-make/pointer-auth-link-with-c # pointer auth
rm -r tests/ui/eii # EII not yet implemented
rm -r tests/run-make/forced-unwind-terminate-pof # forced unwinding doesn't take precedence
# requires LTO
rm -r tests/run-make/cdylib
@ -78,6 +85,7 @@ rm -r tests/run-make/lto-*
rm -r tests/run-make/reproducible-build-2
rm -r tests/run-make/no-builtins-lto
rm -r tests/run-make/reachable-extern-fn-available-lto
rm -r tests/run-make/no-builtins-linker-plugin-lto
# coverage instrumentation
rm tests/ui/consts/precise-drop-with-coverage.rs
@ -87,6 +95,7 @@ rm -r tests/ui/instrument-coverage/
# ==================
rm tests/ui/codegen/issue-28950.rs # depends on stack size optimizations
rm tests/ui/codegen/init-large-type.rs # same
rm tests/ui/codegen/StackColoring-not-blowup-stack-issue-40883.rs # same
rm tests/ui/statics/const_generics.rs # tests an optimization
rm tests/ui/linking/executable-no-mangle-strip.rs # requires --gc-sections to work for statics
@ -143,6 +152,15 @@ rm tests/ui/errors/remap-path-prefix-sysroot.rs # different sysroot source path
rm -r tests/run-make/export/extern-opt # something about rustc version mismatches
rm -r tests/run-make/export # same
rm -r tests/ui/compiletest-self-test/compile-flags-incremental.rs # needs compiletest compiled with panic=unwind
rm tests/ui/async-await/in-trait/dont-project-to-specializable-projection.rs # something going wrong with stdlib source remapping
rm tests/ui/consts/miri_unleashed/drop.rs # same
rm tests/ui/error-emitter/multiline-removal-suggestion.rs # same
rm tests/ui/lint/lint-const-item-mutation.rs # same
rm tests/ui/lint/use-redundant/use-redundant-issue-71450.rs # same
rm tests/ui/lint/use-redundant/use-redundant-prelude-rust-2021.rs # same
rm tests/ui/specialization/const_trait_impl.rs # same
rm tests/ui/thir-print/offset_of.rs # same
rm tests/ui/traits/const-traits/const_closure-const_trait_impl-ice-113381.rs # same
# genuine bugs
# ============
@ -157,6 +175,7 @@ rm tests/ui/lint/non-snake-case/lint-non-snake-case-crate.rs # same
rm tests/ui/async-await/async-drop/async-drop-initial.rs # same (rust-lang/rust#140493)
rm -r tests/ui/codegen/equal-pointers-unequal # make incorrect assumptions about the location of stack variables
rm -r tests/run-make-cargo/rustdoc-scrape-examples-paths # FIXME(rust-lang/rust#145580) incr comp bug
rm -r tests/incremental/extern_static/issue-49153.rs # assumes reference to undefined static gets optimized away
rm tests/ui/intrinsics/panic-uninitialized-zeroed.rs # really slow with unoptimized libstd
rm tests/ui/process/process-panic-after-fork.rs # same

View file

@ -10,7 +10,7 @@ use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_index::IndexVec;
use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{FnAbiOf, HasTypingEnv};
use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_session::config::OutputFilenames;
use rustc_span::Symbol;
@ -853,17 +853,6 @@ fn codegen_stmt<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, cur_block: Block, stmt:
fx.bcx.ins().nop();
}
}
Rvalue::NullaryOp(ref null_op) => {
assert!(lval.layout().ty.is_sized(fx.tcx, fx.typing_env()));
let val = match null_op {
NullOp::RuntimeChecks(kind) => kind.value(fx.tcx.sess),
};
let val = CValue::by_val(
fx.bcx.ins().iconst(types::I8, i64::from(val)),
fx.layout_of(fx.tcx.types.bool),
);
lval.write_cvalue(fx, val);
}
Rvalue::Aggregate(ref kind, ref operands)
if matches!(**kind, AggregateKind::RawPtr(..)) =>
{
@ -1050,6 +1039,11 @@ pub(crate) fn codegen_operand<'tcx>(
cplace.to_cvalue(fx)
}
Operand::Constant(const_) => crate::constant::codegen_constant_operand(fx, const_),
Operand::RuntimeChecks(checks) => {
let val = checks.value(fx.tcx.sess);
let layout = fx.layout_of(fx.tcx.types.bool);
return CValue::const_val(fx, layout, val.into());
}
}
}

View file

@ -540,6 +540,7 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
operand: &Operand<'tcx>,
) -> Option<ScalarInt> {
match operand {
Operand::RuntimeChecks(checks) => Some(checks.value(fx.tcx.sess).into()),
Operand::Constant(const_) => eval_mir_constant(fx, const_).0.try_to_scalar_int(),
// FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
// inside a temporary before being passed to the intrinsic requiring the const argument.

View file

@ -190,7 +190,7 @@ fn dep_symbol_lookup_fn(
diag.emit();
}
Linkage::Dynamic => {
dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
dylib_paths.push(src.dylib.as_ref().unwrap().clone());
}
}
}

View file

@ -1313,6 +1313,35 @@ pub(super) fn codegen_x86_llvm_intrinsic_call<'tcx>(
ret.write_cvalue_transmute(fx, res);
}
"llvm.x86.vcvtps2ph.128" => {
// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_ph
intrinsic_args!(fx, args => (a, _imm8); intrinsic);
let a = a.load_scalar(fx);
let imm8 =
if let Some(imm8) = crate::constant::mir_operand_get_const_val(fx, &args[1].node) {
imm8
} else {
fx.tcx
.dcx()
.span_fatal(span, "Index argument for `_mm_cvtps_ph` is not a constant");
};
let imm8 = imm8.to_u32();
codegen_inline_asm_inner(
fx,
&[InlineAsmTemplatePiece::String(format!("vcvtps2ph xmm0, xmm0, {imm8}").into())],
&[CInlineAsmOperand::InOut {
reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::xmm0)),
_late: true,
in_value: a,
out_place: Some(ret),
}],
InlineAsmOptions::NOSTACK | InlineAsmOptions::PURE | InlineAsmOptions::NOMEM,
);
}
_ => {
fx.tcx
.dcx()

View file

@ -130,7 +130,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
return;
}
let idx = generic_args[2].expect_const().to_value().valtree.unwrap_branch();
let idx = generic_args[2].expect_const().to_branch();
assert_eq!(x.layout(), y.layout());
let layout = x.layout();
@ -143,7 +143,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let total_len = lane_count * 2;
let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::<Vec<u32>>();
let indexes = idx.iter().map(|idx| idx.to_leaf().to_u32()).collect::<Vec<u32>>();
for &idx in &indexes {
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
@ -961,9 +961,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap();
let ptr_val = ptr.load_scalar(fx);
let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment =
generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
let memflags = match alignment {
SimdAlign::Unaligned => MemFlags::new().with_notrap(),
@ -1006,15 +1005,6 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap();
let ret_lane_layout = fx.layout_of(ret_lane_ty);
let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let memflags = match alignment {
SimdAlign::Unaligned => MemFlags::new().with_notrap(),
_ => MemFlags::trusted(),
};
for lane_idx in 0..ptr_lane_count {
let val_lane = val.value_lane(fx, lane_idx).load_scalar(fx);
let ptr_lane = ptr.value_lane(fx, lane_idx).load_scalar(fx);
@ -1030,7 +1020,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx.bcx.seal_block(if_disabled);
fx.bcx.switch_to_block(if_enabled);
let res = fx.bcx.ins().load(lane_clif_ty, memflags, ptr_lane, 0);
let res = fx.bcx.ins().load(lane_clif_ty, MemFlags::trusted(), ptr_lane, 0);
fx.bcx.ins().jump(next, &[res.into()]);
fx.bcx.switch_to_block(if_disabled);
@ -1059,9 +1049,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let ret_lane_layout = fx.layout_of(ret_lane_ty);
let ptr_val = ptr.load_scalar(fx);
let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment =
generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
let memflags = match alignment {
SimdAlign::Unaligned => MemFlags::new().with_notrap(),

View file

@ -10,7 +10,7 @@
//! function u0:22(i64) -> i8, i8 system_v {
//! ; symbol _ZN97_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$RF$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17hd361e9f5c3d1c4deE
//! ; instance Instance { def: Item(DefId(0:42 ~ example[3895]::{impl#0}::call_once)), args: ['{erased}, '{erased}] }
//! ; abi FnAbi { args: [ArgAbi { layout: TyAndLayout { ty: IsNotEmpty, layout: Layout { size: Size(0 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, backend_repr: Memory { sized: true }, fields: Arbitrary { offsets: [], memory_index: [] }, largest_niche: None, uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(1 bytes), randomization_seed: 12266848898570219025 } }, mode: Ignore }, ArgAbi { layout: TyAndLayout { ty: &&[u16], layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, backend_repr: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(8 bytes), randomization_seed: 281492156579847 } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) }], ret: ArgAbi { layout: TyAndLayout { ty: (u8, u8), layout: Layout { size: Size(2 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, backend_repr: ScalarPair(Initialized { value: Int(I8, false), valid_range: 0..=255 }, Initialized { value: Int(I8, false), valid_range: 0..=255 }), fields: Arbitrary { offsets: [Size(0 bytes), Size(1 bytes)], memory_index: [0, 1] }, largest_niche: None, uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(1 bytes), randomization_seed: 71776127651151873 } }, mode: Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) }, c_variadic: false, fixed_count: 1, conv: Rust, can_unwind: false }
//! ; abi FnAbi { args: [ArgAbi { layout: TyAndLayout { ty: IsNotEmpty, layout: Layout { size: Size(0 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, backend_repr: Memory { sized: true }, fields: Arbitrary { offsets: [], in_memory_order: [] }, largest_niche: None, uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(1 bytes), randomization_seed: 12266848898570219025 } }, mode: Ignore }, ArgAbi { layout: TyAndLayout { ty: &&[u16], layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, backend_repr: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(8 bytes), randomization_seed: 281492156579847 } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) }], ret: ArgAbi { layout: TyAndLayout { ty: (u8, u8), layout: Layout { size: Size(2 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, backend_repr: ScalarPair(Initialized { value: Int(I8, false), valid_range: 0..=255 }, Initialized { value: Int(I8, false), valid_range: 0..=255 }), fields: Arbitrary { offsets: [Size(0 bytes), Size(1 bytes)], in_memory_order: [0, 1] }, largest_niche: None, uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(1 bytes), randomization_seed: 71776127651151873 } }, mode: Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) }, c_variadic: false, fixed_count: 1, conv: Rust, can_unwind: false }
//!
//! ; kind loc.idx param pass mode ty
//! ; ssa _0 (u8, u8) 2b 1 var=(0, 1)
@ -41,7 +41,7 @@
//! ;
//! ; _0 = <IsNotEmpty as mini_core::FnMut<(&&[u16],)>>::call_mut(move _3, copy _2)
//! v2 = stack_load.i64 ss0
//! ; abi: FnAbi { args: [ArgAbi { layout: TyAndLayout { ty: &mut IsNotEmpty, layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, backend_repr: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(8 bytes), randomization_seed: 281492156579847 } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(1 bytes)) }) }, ArgAbi { layout: TyAndLayout { ty: &&[u16], layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, backend_repr: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(8 bytes), randomization_seed: 281492156579847 } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) }], ret: ArgAbi { layout: TyAndLayout { ty: (u8, u8), layout: Layout { size: Size(2 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, backend_repr: ScalarPair(Initialized { value: Int(I8, false), valid_range: 0..=255 }, Initialized { value: Int(I8, false), valid_range: 0..=255 }), fields: Arbitrary { offsets: [Size(0 bytes), Size(1 bytes)], memory_index: [0, 1] }, largest_niche: None, uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(1 bytes), randomization_seed: 71776127651151873 } }, mode: Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) }, c_variadic: false, fixed_count: 1, conv: Rust, can_unwind: false }
//! ; abi: FnAbi { args: [ArgAbi { layout: TyAndLayout { ty: &mut IsNotEmpty, layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, backend_repr: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(8 bytes), randomization_seed: 281492156579847 } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(1 bytes)) }) }, ArgAbi { layout: TyAndLayout { ty: &&[u16], layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, backend_repr: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(8 bytes), randomization_seed: 281492156579847 } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) }], ret: ArgAbi { layout: TyAndLayout { ty: (u8, u8), layout: Layout { size: Size(2 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, backend_repr: ScalarPair(Initialized { value: Int(I8, false), valid_range: 0..=255 }, Initialized { value: Int(I8, false), valid_range: 0..=255 }), fields: Arbitrary { offsets: [Size(0 bytes), Size(1 bytes)], in_memory_order: [0, 1] }, largest_niche: None, uninhabited: false, variants: Single { index: 0 }, max_repr_align: None, unadjusted_abi_align: Align(1 bytes), randomization_seed: 71776127651151873 } }, mode: Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) }, c_variadic: false, fixed_count: 1, conv: Rust, can_unwind: false }
//! v3, v4 = call fn0(v1, v2) ; v1 = 1
//! v5 -> v3
//! v6 -> v4

View file

@ -2,6 +2,3 @@
# Prevents un-canonicalized issue links (to avoid wrong issues being linked in r-l/rust)
[issue-links]
# Prevents mentions in commits to avoid users being spammed
[no-mentions]

View file

@ -111,14 +111,20 @@ pub fn build_sysroot(env: &HashMap<String, String>, config: &ConfigInfo) -> Resu
// Symlink libgccjit.so to sysroot.
let lib_path = start_dir.join("sysroot").join("lib");
let rustlib_target_path = lib_path
.join("rustlib")
.join(&config.host_triple)
.join("codegen-backends")
.join("lib")
.join(&config.target_triple);
let libgccjit_path =
PathBuf::from(config.gcc_path.as_ref().expect("libgccjit should be set by this point"))
.join("libgccjit.so");
let libgccjit_in_sysroot_path = lib_path.join("libgccjit.so");
let libgccjit_in_sysroot_path = rustlib_target_path.join("libgccjit.so");
// First remove the file to be able to create the symlink even when the file already exists.
let _ = fs::remove_file(&libgccjit_in_sysroot_path);
create_dir(&lib_path)?;
symlink(libgccjit_path, libgccjit_in_sysroot_path)
create_dir(&rustlib_target_path)?;
symlink(libgccjit_path, &libgccjit_in_sysroot_path)
.map_err(|error| format!("Cannot create symlink for libgccjit.so: {}", error))?;
let library_dir = start_dir.join("sysroot_src").join("library");

View file

@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2025-11-24"
channel = "nightly-2025-12-20"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View file

@ -314,14 +314,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
self.block.get_function()
}
fn function_call(
pub fn function_call(
&mut self,
func: RValue<'gcc>,
func: Function<'gcc>,
args: &[RValue<'gcc>],
_funclet: Option<&Funclet>,
) -> RValue<'gcc> {
// TODO(antoyo): remove when the API supports a different type for functions.
let func: Function<'gcc> = self.cx.rvalue_as_function(func);
let args = self.check_call("call", func, args);
// gccjit requires to use the result of functions, even when it's not used.
@ -514,6 +512,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
type CodegenCx = CodegenCx<'gcc, 'tcx>;
fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> {
*cx.current_func.borrow_mut() = Some(block.get_function());
Builder::with_cx(cx, block)
}
@ -1765,6 +1764,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
// FIXME(antoyo): remove when having a proper API.
let gcc_func = unsafe { std::mem::transmute::<RValue<'gcc>, Function<'gcc>>(func) };
let call = if self.functions.borrow().values().any(|value| *value == gcc_func) {
// TODO(antoyo): remove when the API supports a different type for functions.
let func: Function<'gcc> = self.cx.rvalue_as_function(func);
self.function_call(func, args, funclet)
} else {
// If it's a not function that was defined, it's a function pointer.

View file

@ -92,6 +92,8 @@ pub struct CodegenCx<'gcc, 'tcx> {
pub instances: RefCell<FxHashMap<Instance<'tcx>, LValue<'gcc>>>,
/// Cache function instances of monomorphic and polymorphic items
pub function_instances: RefCell<FxHashMap<Instance<'tcx>, Function<'gcc>>>,
/// Cache function instances of intrinsics
pub intrinsic_instances: RefCell<FxHashMap<Instance<'tcx>, Function<'gcc>>>,
/// Cache generated vtables
pub vtables:
RefCell<FxHashMap<(Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
@ -280,6 +282,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
linkage: Cell::new(FunctionType::Internal),
instances: Default::default(),
function_instances: Default::default(),
intrinsic_instances: Default::default(),
on_stack_params: Default::default(),
on_stack_function_params: Default::default(),
vtables: Default::default(),
@ -391,17 +394,13 @@ impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
fn get_fn(&self, instance: Instance<'tcx>) -> Function<'gcc> {
let func = get_fn(self, instance);
*self.current_func.borrow_mut() = Some(func);
func
get_fn(self, instance)
}
fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
let func_name = self.tcx.symbol_name(instance).name;
let func = if self.intrinsics.borrow().contains_key(func_name) {
self.intrinsics.borrow()[func_name]
} else if let Some(variable) = self.get_declared_value(func_name) {
let func = if let Some(variable) = self.get_declared_value(func_name) {
return variable;
} else {
get_fn(self, instance)

View file

@ -8,7 +8,6 @@ use rustc_target::callconv::FnAbi;
use crate::abi::{FnAbiGcc, FnAbiGccExt};
use crate::context::CodegenCx;
use crate::intrinsic::llvm;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn get_or_insert_global(
@ -100,18 +99,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let return_type = self.type_i32();
let variadic = false;
self.linkage.set(FunctionType::Exported);
let func = declare_raw_fn(
declare_raw_fn(
self,
name,
callconv,
return_type,
&[self.type_i32(), const_string],
variadic,
);
// NOTE: it is needed to set the current_func here as well, because get_fn() is not called
// for the main function.
*self.current_func.borrow_mut() = Some(func);
func
)
}
pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Function<'gcc> {
@ -166,19 +161,6 @@ fn declare_raw_fn<'gcc>(
param_types: &[Type<'gcc>],
variadic: bool,
) -> Function<'gcc> {
if name.starts_with("llvm.") {
let intrinsic = match name {
"llvm.fma.f16" => {
// fma is not a target builtin, but a normal builtin, so we handle it differently
// here.
cx.context.get_builtin_function("fma")
}
_ => llvm::intrinsic(name, cx),
};
cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic);
return intrinsic;
}
let func = if cx.functions.borrow().contains_key(name) {
cx.functions.borrow()[name]
} else {

View file

@ -70,10 +70,6 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"sve.sm4e" => "__builtin_sve_svsm4e_u32",
"sve.sm4ekey" => "__builtin_sve_svsm4ekey_u32",
"sve.wrffr" => "__builtin_sve_svwrffr",
"tcancel" => "__builtin_arm_tcancel",
"tcommit" => "__builtin_arm_tcommit",
"tstart" => "__builtin_arm_tstart",
"ttest" => "__builtin_arm_ttest",
_ => unimplemented!("***** unsupported LLVM intrinsic {full_name}"),
}
}
@ -1632,6 +1628,14 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"V6.vabs.f8.128B" => "__builtin_HEXAGON_V6_vabs_f8_128B",
"V6.vabs.hf" => "__builtin_HEXAGON_V6_vabs_hf",
"V6.vabs.hf.128B" => "__builtin_HEXAGON_V6_vabs_hf_128B",
"V6.vabs.qf16.hf" => "__builtin_HEXAGON_V6_vabs_qf16_hf",
"V6.vabs.qf16.hf.128B" => "__builtin_HEXAGON_V6_vabs_qf16_hf_128B",
"V6.vabs.qf16.qf16" => "__builtin_HEXAGON_V6_vabs_qf16_qf16",
"V6.vabs.qf16.qf16.128B" => "__builtin_HEXAGON_V6_vabs_qf16_qf16_128B",
"V6.vabs.qf32.qf32" => "__builtin_HEXAGON_V6_vabs_qf32_qf32",
"V6.vabs.qf32.qf32.128B" => "__builtin_HEXAGON_V6_vabs_qf32_qf32_128B",
"V6.vabs.qf32.sf" => "__builtin_HEXAGON_V6_vabs_qf32_sf",
"V6.vabs.qf32.sf.128B" => "__builtin_HEXAGON_V6_vabs_qf32_sf_128B",
"V6.vabs.sf" => "__builtin_HEXAGON_V6_vabs_sf",
"V6.vabs.sf.128B" => "__builtin_HEXAGON_V6_vabs_sf_128B",
"V6.vabsb" => "__builtin_HEXAGON_V6_vabsb",
@ -1744,6 +1748,8 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"V6.vaddwsat.128B" => "__builtin_HEXAGON_V6_vaddwsat_128B",
"V6.vaddwsat.dv" => "__builtin_HEXAGON_V6_vaddwsat_dv",
"V6.vaddwsat.dv.128B" => "__builtin_HEXAGON_V6_vaddwsat_dv_128B",
"V6.valign4" => "__builtin_HEXAGON_V6_valign4",
"V6.valign4.128B" => "__builtin_HEXAGON_V6_valign4_128B",
"V6.valignb" => "__builtin_HEXAGON_V6_valignb",
"V6.valignb.128B" => "__builtin_HEXAGON_V6_valignb_128B",
"V6.valignbi" => "__builtin_HEXAGON_V6_valignbi",
@ -1862,14 +1868,30 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"V6.vcl0w.128B" => "__builtin_HEXAGON_V6_vcl0w_128B",
"V6.vcombine" => "__builtin_HEXAGON_V6_vcombine",
"V6.vcombine.128B" => "__builtin_HEXAGON_V6_vcombine_128B",
"V6.vconv.bf.qf32" => "__builtin_HEXAGON_V6_vconv_bf_qf32",
"V6.vconv.bf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_bf_qf32_128B",
"V6.vconv.f8.qf16" => "__builtin_HEXAGON_V6_vconv_f8_qf16",
"V6.vconv.f8.qf16.128B" => "__builtin_HEXAGON_V6_vconv_f8_qf16_128B",
"V6.vconv.h.hf" => "__builtin_HEXAGON_V6_vconv_h_hf",
"V6.vconv.h.hf.128B" => "__builtin_HEXAGON_V6_vconv_h_hf_128B",
"V6.vconv.h.hf.rnd" => "__builtin_HEXAGON_V6_vconv_h_hf_rnd",
"V6.vconv.h.hf.rnd.128B" => "__builtin_HEXAGON_V6_vconv_h_hf_rnd_128B",
"V6.vconv.hf.h" => "__builtin_HEXAGON_V6_vconv_hf_h",
"V6.vconv.hf.h.128B" => "__builtin_HEXAGON_V6_vconv_hf_h_128B",
"V6.vconv.hf.qf16" => "__builtin_HEXAGON_V6_vconv_hf_qf16",
"V6.vconv.hf.qf16.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf16_128B",
"V6.vconv.hf.qf32" => "__builtin_HEXAGON_V6_vconv_hf_qf32",
"V6.vconv.hf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf32_128B",
"V6.vconv.qf16.f8" => "__builtin_HEXAGON_V6_vconv_qf16_f8",
"V6.vconv.qf16.f8.128B" => "__builtin_HEXAGON_V6_vconv_qf16_f8_128B",
"V6.vconv.qf16.hf" => "__builtin_HEXAGON_V6_vconv_qf16_hf",
"V6.vconv.qf16.hf.128B" => "__builtin_HEXAGON_V6_vconv_qf16_hf_128B",
"V6.vconv.qf16.qf16" => "__builtin_HEXAGON_V6_vconv_qf16_qf16",
"V6.vconv.qf16.qf16.128B" => "__builtin_HEXAGON_V6_vconv_qf16_qf16_128B",
"V6.vconv.qf32.qf32" => "__builtin_HEXAGON_V6_vconv_qf32_qf32",
"V6.vconv.qf32.qf32.128B" => "__builtin_HEXAGON_V6_vconv_qf32_qf32_128B",
"V6.vconv.qf32.sf" => "__builtin_HEXAGON_V6_vconv_qf32_sf",
"V6.vconv.qf32.sf.128B" => "__builtin_HEXAGON_V6_vconv_qf32_sf_128B",
"V6.vconv.sf.qf32" => "__builtin_HEXAGON_V6_vconv_sf_qf32",
"V6.vconv.sf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_sf_qf32_128B",
"V6.vconv.sf.w" => "__builtin_HEXAGON_V6_vconv_sf_w",
@ -1984,6 +2006,22 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"V6.veqh.or.128B" => "__builtin_HEXAGON_V6_veqh_or_128B",
"V6.veqh.xor" => "__builtin_HEXAGON_V6_veqh_xor",
"V6.veqh.xor.128B" => "__builtin_HEXAGON_V6_veqh_xor_128B",
"V6.veqhf" => "__builtin_HEXAGON_V6_veqhf",
"V6.veqhf.128B" => "__builtin_HEXAGON_V6_veqhf_128B",
"V6.veqhf.and" => "__builtin_HEXAGON_V6_veqhf_and",
"V6.veqhf.and.128B" => "__builtin_HEXAGON_V6_veqhf_and_128B",
"V6.veqhf.or" => "__builtin_HEXAGON_V6_veqhf_or",
"V6.veqhf.or.128B" => "__builtin_HEXAGON_V6_veqhf_or_128B",
"V6.veqhf.xor" => "__builtin_HEXAGON_V6_veqhf_xor",
"V6.veqhf.xor.128B" => "__builtin_HEXAGON_V6_veqhf_xor_128B",
"V6.veqsf" => "__builtin_HEXAGON_V6_veqsf",
"V6.veqsf.128B" => "__builtin_HEXAGON_V6_veqsf_128B",
"V6.veqsf.and" => "__builtin_HEXAGON_V6_veqsf_and",
"V6.veqsf.and.128B" => "__builtin_HEXAGON_V6_veqsf_and_128B",
"V6.veqsf.or" => "__builtin_HEXAGON_V6_veqsf_or",
"V6.veqsf.or.128B" => "__builtin_HEXAGON_V6_veqsf_or_128B",
"V6.veqsf.xor" => "__builtin_HEXAGON_V6_veqsf_xor",
"V6.veqsf.xor.128B" => "__builtin_HEXAGON_V6_veqsf_xor_128B",
"V6.veqw" => "__builtin_HEXAGON_V6_veqw",
"V6.veqw.128B" => "__builtin_HEXAGON_V6_veqw_128B",
"V6.veqw.and" => "__builtin_HEXAGON_V6_veqw_and",
@ -2096,6 +2134,14 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"V6.vgtw.or.128B" => "__builtin_HEXAGON_V6_vgtw_or_128B",
"V6.vgtw.xor" => "__builtin_HEXAGON_V6_vgtw_xor",
"V6.vgtw.xor.128B" => "__builtin_HEXAGON_V6_vgtw_xor_128B",
"V6.vilog2.hf" => "__builtin_HEXAGON_V6_vilog2_hf",
"V6.vilog2.hf.128B" => "__builtin_HEXAGON_V6_vilog2_hf_128B",
"V6.vilog2.qf16" => "__builtin_HEXAGON_V6_vilog2_qf16",
"V6.vilog2.qf16.128B" => "__builtin_HEXAGON_V6_vilog2_qf16_128B",
"V6.vilog2.qf32" => "__builtin_HEXAGON_V6_vilog2_qf32",
"V6.vilog2.qf32.128B" => "__builtin_HEXAGON_V6_vilog2_qf32_128B",
"V6.vilog2.sf" => "__builtin_HEXAGON_V6_vilog2_sf",
"V6.vilog2.sf.128B" => "__builtin_HEXAGON_V6_vilog2_sf_128B",
"V6.vinsertwr" => "__builtin_HEXAGON_V6_vinsertwr",
"V6.vinsertwr.128B" => "__builtin_HEXAGON_V6_vinsertwr_128B",
"V6.vlalignb" => "__builtin_HEXAGON_V6_vlalignb",
@ -2350,6 +2396,14 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"V6.vnavgub.128B" => "__builtin_HEXAGON_V6_vnavgub_128B",
"V6.vnavgw" => "__builtin_HEXAGON_V6_vnavgw",
"V6.vnavgw.128B" => "__builtin_HEXAGON_V6_vnavgw_128B",
"V6.vneg.qf16.hf" => "__builtin_HEXAGON_V6_vneg_qf16_hf",
"V6.vneg.qf16.hf.128B" => "__builtin_HEXAGON_V6_vneg_qf16_hf_128B",
"V6.vneg.qf16.qf16" => "__builtin_HEXAGON_V6_vneg_qf16_qf16",
"V6.vneg.qf16.qf16.128B" => "__builtin_HEXAGON_V6_vneg_qf16_qf16_128B",
"V6.vneg.qf32.qf32" => "__builtin_HEXAGON_V6_vneg_qf32_qf32",
"V6.vneg.qf32.qf32.128B" => "__builtin_HEXAGON_V6_vneg_qf32_qf32_128B",
"V6.vneg.qf32.sf" => "__builtin_HEXAGON_V6_vneg_qf32_sf",
"V6.vneg.qf32.sf.128B" => "__builtin_HEXAGON_V6_vneg_qf32_sf_128B",
"V6.vnormamth" => "__builtin_HEXAGON_V6_vnormamth",
"V6.vnormamth.128B" => "__builtin_HEXAGON_V6_vnormamth_128B",
"V6.vnormamtw" => "__builtin_HEXAGON_V6_vnormamtw",
@ -2684,6 +2738,24 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"iocsrwr.d" => "__builtin_loongarch_iocsrwr_d",
"iocsrwr.h" => "__builtin_loongarch_iocsrwr_h",
"iocsrwr.w" => "__builtin_loongarch_iocsrwr_w",
"lasx.cast.128" => "__builtin_lasx_cast_128",
"lasx.cast.128.d" => "__builtin_lasx_cast_128_d",
"lasx.cast.128.s" => "__builtin_lasx_cast_128_s",
"lasx.concat.128" => "__builtin_lasx_concat_128",
"lasx.concat.128.d" => "__builtin_lasx_concat_128_d",
"lasx.concat.128.s" => "__builtin_lasx_concat_128_s",
"lasx.extract.128.hi" => "__builtin_lasx_extract_128_hi",
"lasx.extract.128.hi.d" => "__builtin_lasx_extract_128_hi_d",
"lasx.extract.128.hi.s" => "__builtin_lasx_extract_128_hi_s",
"lasx.extract.128.lo" => "__builtin_lasx_extract_128_lo",
"lasx.extract.128.lo.d" => "__builtin_lasx_extract_128_lo_d",
"lasx.extract.128.lo.s" => "__builtin_lasx_extract_128_lo_s",
"lasx.insert.128.hi" => "__builtin_lasx_insert_128_hi",
"lasx.insert.128.hi.d" => "__builtin_lasx_insert_128_hi_d",
"lasx.insert.128.hi.s" => "__builtin_lasx_insert_128_hi_s",
"lasx.insert.128.lo" => "__builtin_lasx_insert_128_lo",
"lasx.insert.128.lo.d" => "__builtin_lasx_insert_128_lo_d",
"lasx.insert.128.lo.s" => "__builtin_lasx_insert_128_lo_s",
"lasx.vext2xv.d.b" => "__builtin_lasx_vext2xv_d_b",
"lasx.vext2xv.d.h" => "__builtin_lasx_vext2xv_d_h",
"lasx.vext2xv.d.w" => "__builtin_lasx_vext2xv_d_w",
@ -4950,8 +5022,20 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"f16x2.to.e5m2x2.rn.relu" => "__nvvm_f16x2_to_e5m2x2_rn_relu",
"f2bf16.rn" => "__nvvm_f2bf16_rn",
"f2bf16.rn.relu" => "__nvvm_f2bf16_rn_relu",
"f2bf16.rn.relu.satfinite" => "__nvvm_f2bf16_rn_relu_satfinite",
"f2bf16.rn.satfinite" => "__nvvm_f2bf16_rn_satfinite",
"f2bf16.rz" => "__nvvm_f2bf16_rz",
"f2bf16.rz.relu" => "__nvvm_f2bf16_rz_relu",
"f2bf16.rz.relu.satfinite" => "__nvvm_f2bf16_rz_relu_satfinite",
"f2bf16.rz.satfinite" => "__nvvm_f2bf16_rz_satfinite",
"f2f16.rn" => "__nvvm_f2f16_rn",
"f2f16.rn.relu" => "__nvvm_f2f16_rn_relu",
"f2f16.rn.relu.satfinite" => "__nvvm_f2f16_rn_relu_satfinite",
"f2f16.rn.satfinite" => "__nvvm_f2f16_rn_satfinite",
"f2f16.rz" => "__nvvm_f2f16_rz",
"f2f16.rz.relu" => "__nvvm_f2f16_rz_relu",
"f2f16.rz.relu.satfinite" => "__nvvm_f2f16_rz_relu_satfinite",
"f2f16.rz.satfinite" => "__nvvm_f2f16_rz_satfinite",
"f2h.rn" => "__nvvm_f2h_rn",
"f2h.rn.ftz" => "__nvvm_f2h_rn_ftz",
"f2i.rm" => "__nvvm_f2i_rm",
@ -5035,20 +5119,28 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"ff.to.ue8m0x2.rz.satfinite" => "__nvvm_ff_to_ue8m0x2_rz_satfinite",
"ff2bf16x2.rn" => "__nvvm_ff2bf16x2_rn",
"ff2bf16x2.rn.relu" => "__nvvm_ff2bf16x2_rn_relu",
"ff2bf16x2.rn.relu.satfinite" => "__nvvm_ff2bf16x2_rn_relu_satfinite",
"ff2bf16x2.rn.satfinite" => "__nvvm_ff2bf16x2_rn_satfinite",
"ff2bf16x2.rs" => "__nvvm_ff2bf16x2_rs",
"ff2bf16x2.rs.relu" => "__nvvm_ff2bf16x2_rs_relu",
"ff2bf16x2.rs.relu.satfinite" => "__nvvm_ff2bf16x2_rs_relu_satfinite",
"ff2bf16x2.rs.satfinite" => "__nvvm_ff2bf16x2_rs_satfinite",
"ff2bf16x2.rz" => "__nvvm_ff2bf16x2_rz",
"ff2bf16x2.rz.relu" => "__nvvm_ff2bf16x2_rz_relu",
"ff2bf16x2.rz.relu.satfinite" => "__nvvm_ff2bf16x2_rz_relu_satfinite",
"ff2bf16x2.rz.satfinite" => "__nvvm_ff2bf16x2_rz_satfinite",
"ff2f16x2.rn" => "__nvvm_ff2f16x2_rn",
"ff2f16x2.rn.relu" => "__nvvm_ff2f16x2_rn_relu",
"ff2f16x2.rn.relu.satfinite" => "__nvvm_ff2f16x2_rn_relu_satfinite",
"ff2f16x2.rn.satfinite" => "__nvvm_ff2f16x2_rn_satfinite",
"ff2f16x2.rs" => "__nvvm_ff2f16x2_rs",
"ff2f16x2.rs.relu" => "__nvvm_ff2f16x2_rs_relu",
"ff2f16x2.rs.relu.satfinite" => "__nvvm_ff2f16x2_rs_relu_satfinite",
"ff2f16x2.rs.satfinite" => "__nvvm_ff2f16x2_rs_satfinite",
"ff2f16x2.rz" => "__nvvm_ff2f16x2_rz",
"ff2f16x2.rz.relu" => "__nvvm_ff2f16x2_rz_relu",
"ff2f16x2.rz.relu.satfinite" => "__nvvm_ff2f16x2_rz_relu_satfinite",
"ff2f16x2.rz.satfinite" => "__nvvm_ff2f16x2_rz_satfinite",
"floor.d" => "__nvvm_floor_d",
"floor.f" => "__nvvm_floor_f",
"floor.ftz.f" => "__nvvm_floor_ftz_f",
@ -5942,6 +6034,8 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"altivec.vupklsb" => "__builtin_altivec_vupklsb",
"altivec.vupklsh" => "__builtin_altivec_vupklsh",
"altivec.vupklsw" => "__builtin_altivec_vupklsw",
"amo.ldat" => "__builtin_amo_ldat",
"amo.lwat" => "__builtin_amo_lwat",
"bcdadd" => "__builtin_ppc_bcdadd",
"bcdadd.p" => "__builtin_ppc_bcdadd_p",
"bcdcopysign" => "__builtin_ppc_bcdcopysign",
@ -6202,6 +6296,7 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"vsx.xvminsp" => "__builtin_vsx_xvminsp",
"vsx.xvredp" => "__builtin_vsx_xvredp",
"vsx.xvresp" => "__builtin_vsx_xvresp",
"vsx.xvrlw" => "__builtin_vsx_xvrlw",
"vsx.xvrsqrtedp" => "__builtin_vsx_xvrsqrtedp",
"vsx.xvrsqrtesp" => "__builtin_vsx_xvrsqrtesp",
"vsx.xvtdivdp" => "__builtin_vsx_xvtdivdp",
@ -10158,24 +10253,16 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"stui" => "__builtin_ia32_stui",
"subborrow.u32" => "__builtin_ia32_subborrow_u32",
"subborrow.u64" => "__builtin_ia32_subborrow_u64",
"t2rpntlvwz0" => "__builtin_ia32_t2rpntlvwz0",
"t2rpntlvwz0rs" => "__builtin_ia32_t2rpntlvwz0rs",
"t2rpntlvwz0rst1" => "__builtin_ia32_t2rpntlvwz0rst1",
"t2rpntlvwz0t1" => "__builtin_ia32_t2rpntlvwz0t1",
"t2rpntlvwz1" => "__builtin_ia32_t2rpntlvwz1",
"t2rpntlvwz1rs" => "__builtin_ia32_t2rpntlvwz1rs",
"t2rpntlvwz1rst1" => "__builtin_ia32_t2rpntlvwz1rst1",
"t2rpntlvwz1t1" => "__builtin_ia32_t2rpntlvwz1t1",
"tbm.bextri.u32" => "__builtin_ia32_bextri_u32",
"tbm.bextri.u64" => "__builtin_ia32_bextri_u64",
"tcmmimfp16ps" => "__builtin_ia32_tcmmimfp16ps",
"tcmmimfp16ps.internal" => "__builtin_ia32_tcmmimfp16ps_internal",
"tcmmrlfp16ps" => "__builtin_ia32_tcmmrlfp16ps",
"tcmmrlfp16ps.internal" => "__builtin_ia32_tcmmrlfp16ps_internal",
"tconjtcmmimfp16ps" => "__builtin_ia32_tconjtcmmimfp16ps",
"tconjtcmmimfp16ps.internal" => "__builtin_ia32_tconjtcmmimfp16ps_internal",
"tconjtfp16" => "__builtin_ia32_tconjtfp16",
"tconjtfp16.internal" => "__builtin_ia32_tconjtfp16_internal",
"tcvtrowd2ps" => "__builtin_ia32_tcvtrowd2ps",
"tcvtrowd2ps.internal" => "__builtin_ia32_tcvtrowd2ps_internal",
"tcvtrowps2bf16h" => "__builtin_ia32_tcvtrowps2bf16h",
@ -10225,18 +10312,6 @@ fn map_arch_intrinsic(full_name: &str) -> &'static str {
"tmmultf32ps" => "__builtin_ia32_tmmultf32ps",
"tmmultf32ps.internal" => "__builtin_ia32_tmmultf32ps_internal",
"tpause" => "__builtin_ia32_tpause",
"ttcmmimfp16ps" => "__builtin_ia32_ttcmmimfp16ps",
"ttcmmimfp16ps.internal" => "__builtin_ia32_ttcmmimfp16ps_internal",
"ttcmmrlfp16ps" => "__builtin_ia32_ttcmmrlfp16ps",
"ttcmmrlfp16ps.internal" => "__builtin_ia32_ttcmmrlfp16ps_internal",
"ttdpbf16ps" => "__builtin_ia32_ttdpbf16ps",
"ttdpbf16ps.internal" => "__builtin_ia32_ttdpbf16ps_internal",
"ttdpfp16ps" => "__builtin_ia32_ttdpfp16ps",
"ttdpfp16ps.internal" => "__builtin_ia32_ttdpfp16ps_internal",
"ttmmultf32ps" => "__builtin_ia32_ttmmultf32ps",
"ttmmultf32ps.internal" => "__builtin_ia32_ttmmultf32ps_internal",
"ttransposed" => "__builtin_ia32_ttransposed",
"ttransposed.internal" => "__builtin_ia32_ttransposed_internal",
"umonitor" => "__builtin_ia32_umonitor",
"umwait" => "__builtin_ia32_umwait",
"urdmsr" => "__builtin_ia32_urdmsr",

View file

@ -9,7 +9,7 @@ use gccjit::Type;
use gccjit::{ComparisonOp, Function, FunctionType, RValue, ToRValue, UnaryOp};
#[cfg(feature = "master")]
use rustc_abi::ExternAbi;
use rustc_abi::{BackendRepr, HasDataLayout};
use rustc_abi::{BackendRepr, HasDataLayout, WrappingRange};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::common::IntPredicate;
@ -20,19 +20,15 @@ use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
use rustc_codegen_ssa::traits::MiscCodegenMethods;
use rustc_codegen_ssa::traits::{
ArgAbiBuilderMethods, BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods,
IntrinsicCallBuilderMethods,
IntrinsicCallBuilderMethods, LayoutTypeCodegenMethods,
};
use rustc_middle::bug;
#[cfg(feature = "master")]
use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
use rustc_middle::ty::{self, Instance, Ty};
use rustc_span::{Span, Symbol, sym};
use rustc_target::callconv::{ArgAbi, PassMode};
#[cfg(feature = "master")]
use crate::abi::FnAbiGccExt;
use crate::abi::GccType;
use crate::abi::{FnAbiGccExt, GccType};
use crate::builder::Builder;
use crate::common::{SignType, TypeReflection};
use crate::context::CodegenCx;
@ -609,6 +605,94 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
Ok(())
}
fn codegen_llvm_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OperandRef<'tcx, Self::Value>],
is_cleanup: bool,
) -> Self::Value {
let func = if let Some(&func) = self.intrinsic_instances.borrow().get(&instance) {
func
} else {
let sym = self.tcx.symbol_name(instance).name;
let func = if let Some(func) = self.intrinsics.borrow().get(sym) {
*func
} else {
self.linkage.set(FunctionType::Extern);
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
let fn_ty = fn_abi.gcc_type(self);
let func = match sym {
"llvm.fma.f16" => {
// fma is not a target builtin, but a normal builtin, so we handle it differently
// here.
self.context.get_builtin_function("fma")
}
_ => llvm::intrinsic(sym, self),
};
self.intrinsics.borrow_mut().insert(sym.to_string(), func);
self.on_stack_function_params
.borrow_mut()
.insert(func, fn_ty.on_stack_param_indices);
#[cfg(feature = "master")]
for fn_attr in fn_ty.fn_attributes {
func.add_attribute(fn_attr);
}
crate::attributes::from_fn_attrs(self, func, instance);
func
};
self.intrinsic_instances.borrow_mut().insert(instance, func);
func
};
let fn_ptr = func.get_address(None);
let fn_ty = fn_ptr.get_type();
let mut llargs = vec![];
for arg in args {
match arg.val {
OperandValue::ZeroSized => {}
OperandValue::Immediate(_) => llargs.push(arg.immediate()),
OperandValue::Pair(a, b) => {
llargs.push(a);
llargs.push(b);
}
OperandValue::Ref(op_place_val) => {
let mut llval = op_place_val.llval;
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
// used for this call is passing it by-value. In that case,
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = self.load(self.backend_type(arg.layout), llval, op_place_val.align);
if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
if scalar.is_bool() {
self.range_metadata(llval, WrappingRange { start: 0, end: 1 });
}
// We store bools as `i8` so we need to truncate to `i1`.
llval = self.to_immediate_scalar(llval, scalar);
}
llargs.push(llval);
}
}
}
// FIXME directly use the llvm intrinsic adjustment functions here
let llret = self.call(fn_ty, None, None, fn_ptr, &llargs, None, None);
if is_cleanup {
self.apply_attrs_to_cleanup_callsite(llret);
}
llret
}
fn abort(&mut self) {
let func = self.context.get_builtin_function("abort");
let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };

View file

@ -88,3 +88,12 @@ tests/ui/test-attrs/test-panic-while-printing.rs
tests/ui/thir-print/offset_of.rs
tests/ui/iterators/rangefrom-overflow-debug.rs
tests/ui/iterators/rangefrom-overflow-overflow-checks.rs
tests/ui/iterators/iter-filter-count-debug-check.rs
tests/ui/eii/codegen_single_crate.rs
tests/ui/eii/codegen_cross_crate.rs
tests/ui/eii/default/local_crate.rs
tests/ui/eii/multiple_impls.rs
tests/ui/eii/default/call_default.rs
tests/ui/eii/same-symbol.rs
tests/ui/eii/privacy1.rs
tests/ui/eii/default/call_impl.rs

View file

@ -2,6 +2,3 @@
# Prevents un-canonicalized issue links (to avoid wrong issues being linked in r-l/rust)
[issue-links]
# Prevents mentions in commits to avoid users being spammed
[no-mentions]

View file

@ -14,7 +14,7 @@ bitflags = "2.4.1"
gimli = "0.31"
itertools = "0.12"
libc = "0.2"
libloading = { version = "0.9.0", optional = true }
libloading = { version = "0.9.0" }
measureme = "12.0.1"
object = { version = "0.37.0", default-features = false, features = ["std", "read"] }
rustc-demangle = "0.1.21"
@ -38,8 +38,6 @@ rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
rustc_target = { path = "../rustc_target" }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
tracing = "0.1"
# tidy-alphabetical-end
@ -47,7 +45,7 @@ tracing = "0.1"
[features]
# tidy-alphabetical-start
check_only = ["rustc_llvm/check_only"]
llvm_enzyme = ["dep:libloading"]
llvm_enzyme = []
llvm_offload = []
# tidy-alphabetical-end

View file

@ -1,9 +1,10 @@
codegen_llvm_autodiff_component_unavailable = failed to load our autodiff backend. Did you install it via rustup?
codegen_llvm_autodiff_without_enable = using the autodiff feature requires -Z autodiff=Enable
codegen_llvm_autodiff_without_lto = using the autodiff feature requires setting `lto="fat"` in your Cargo.toml
codegen_llvm_copy_bitcode = failed to copy bitcode to object file: {$err}
codegen_llvm_fixed_x18_invalid_arch = the `-Zfixed-x18` flag is not supported on the `{$arch}` architecture
codegen_llvm_from_llvm_diag = {$message}
@ -18,7 +19,12 @@ codegen_llvm_lto_bitcode_from_rlib = failed to get bitcode from object file for
codegen_llvm_mismatch_data_layout =
data-layout for target `{$rustc_target}`, `{$rustc_layout}`, differs from LLVM target's `{$llvm_target}` default layout, `{$llvm_layout}`
codegen_llvm_offload_without_enable = using the offload feature requires -Z offload=Enable
codegen_llvm_offload_bundleimages_failed = call to BundleImages failed, `host.out` was not created
codegen_llvm_offload_embed_failed = call to EmbedBufferInModule failed, `host.o` was not created
codegen_llvm_offload_no_abs_path = using the `-Z offload=Host=/absolute/path/to/host.out` flag requires an absolute path
codegen_llvm_offload_no_host_out = using the `-Z offload=Host=/absolute/path/to/host.out` flag must point to a `host.out` file
codegen_llvm_offload_nonexisting = the given path/file to `host.out` does not exist. Did you forget to run the device compilation first?
codegen_llvm_offload_without_enable = using the offload feature requires -Z offload=<Device or Host=/absolute/path/to/host.out>
codegen_llvm_offload_without_fat_lto = using the offload feature requires -C lto=fat
codegen_llvm_parse_bitcode = failed to parse bitcode for LTO module

View file

@ -528,7 +528,6 @@ fn thin_lto(
}
}
#[cfg(feature = "llvm_enzyme")]
pub(crate) fn enable_autodiff_settings(ad: &[config::AutoDiff]) {
let mut enzyme = llvm::EnzymeWrapper::get_instance();

View file

@ -568,8 +568,7 @@ pub(crate) unsafe fn llvm_optimize(
// FIXME(ZuseZ4): In a future update we could figure out how to only optimize individual functions getting
// differentiated.
let consider_ad =
cfg!(feature = "llvm_enzyme") && config.autodiff.contains(&config::AutoDiff::Enable);
let consider_ad = config.autodiff.contains(&config::AutoDiff::Enable);
let run_enzyme = autodiff_stage == AutodiffStage::DuringAD;
let print_before_enzyme = config.autodiff.contains(&config::AutoDiff::PrintModBefore);
let print_after_enzyme = config.autodiff.contains(&config::AutoDiff::PrintModAfter);
@ -704,11 +703,9 @@ pub(crate) unsafe fn llvm_optimize(
llvm::set_value_name(new_fn, &name);
}
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Enable) {
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Device) {
let cx =
SimpleCx::new(module.module_llvm.llmod(), module.module_llvm.llcx, cgcx.pointer_size);
// For now we only support up to 10 kernels named kernel_0 ... kernel_9, a follow-up PR is
// introducing a proper offload intrinsic to solve this limitation.
for func in cx.get_functions() {
let offload_kernel = "offload-kernel";
if attributes::has_string_attr(func, offload_kernel) {
@ -777,12 +774,77 @@ pub(crate) unsafe fn llvm_optimize(
)
};
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Enable) {
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Device) {
let device_path = cgcx.output_filenames.path(OutputType::Object);
let device_dir = device_path.parent().unwrap();
let device_out = device_dir.join("host.out");
let device_out_c = path_to_c_string(device_out.as_path());
unsafe {
llvm::LLVMRustBundleImages(module.module_llvm.llmod(), module.module_llvm.tm.raw());
// 1) Bundle device module into offload image host.out (device TM)
let ok = llvm::LLVMRustBundleImages(
module.module_llvm.llmod(),
module.module_llvm.tm.raw(),
device_out_c.as_ptr(),
);
if !ok || !device_out.exists() {
dcx.emit_err(crate::errors::OffloadBundleImagesFailed);
}
}
}
// This assumes that we previously compiled our kernels for a gpu target, which created a
// `host.out` artifact. The user is supposed to provide us with a path to this artifact, we
// don't need any other artifacts from the previous run. We will embed this artifact into our
// LLVM-IR host module, to create a `host.o` ObjectFile, which we will write to disk.
// The last, not yet automated steps uses the `clang-linker-wrapper` to process `host.o`.
if !cgcx.target_is_like_gpu {
if let Some(device_path) = config
.offload
.iter()
.find_map(|o| if let config::Offload::Host(path) = o { Some(path) } else { None })
{
let device_pathbuf = PathBuf::from(device_path);
if device_pathbuf.is_relative() {
dcx.emit_err(crate::errors::OffloadWithoutAbsPath);
} else if device_pathbuf
.file_name()
.and_then(|n| n.to_str())
.is_some_and(|n| n != "host.out")
{
dcx.emit_err(crate::errors::OffloadWrongFileName);
} else if !device_pathbuf.exists() {
dcx.emit_err(crate::errors::OffloadNonexistingPath);
}
let host_path = cgcx.output_filenames.path(OutputType::Object);
let host_dir = host_path.parent().unwrap();
let out_obj = host_dir.join("host.o");
let host_out_c = path_to_c_string(device_pathbuf.as_path());
// 2) Finalize host: lib.bc + host.out -> host.o (host TM)
// We create a full clone of our LLVM host module, since we will embed the device IR
// into it, and this might break caching or incremental compilation otherwise.
let llmod2 = llvm::LLVMCloneModule(module.module_llvm.llmod());
let ok =
unsafe { llvm::LLVMRustOffloadEmbedBufferInModule(llmod2, host_out_c.as_ptr()) };
if !ok {
dcx.emit_err(crate::errors::OffloadEmbedFailed);
}
write_output_file(
dcx,
module.module_llvm.tm.raw(),
config.no_builtins,
llmod2,
&out_obj,
None,
llvm::FileType::ObjectFile,
&cgcx.prof,
true,
);
// We ignore cgcx.save_temps here and unconditionally always keep our `host.out` artifact.
// Otherwise, recompiling the host code would fail since we deleted that device artifact
// in the previous host compilation, which would be confusing at best.
}
}
result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::RunLlvmPasses))
}
@ -819,8 +881,7 @@ pub(crate) fn optimize(
// If we know that we will later run AD, then we disable vectorization and loop unrolling.
// Otherwise we pretend AD is already done and run the normal opt pipeline (=PostAD).
let consider_ad =
cfg!(feature = "llvm_enzyme") && config.autodiff.contains(&config::AutoDiff::Enable);
let consider_ad = config.autodiff.contains(&config::AutoDiff::Enable);
let autodiff_stage = if consider_ad { AutodiffStage::PreAD } else { AutodiffStage::PostAD };
// The embedded bitcode is used to run LTO/ThinLTO.
// The bitcode obtained during the `codegen` phase is no longer suitable for performing LTO.
@ -1075,7 +1136,7 @@ pub(crate) fn codegen(
EmitObj::None => {}
}
record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
record_llvm_cgu_instructions_stats(&cgcx.prof, &module.name, llmod);
}
// `.dwo` files are only emitted if:
@ -1282,22 +1343,11 @@ fn record_artifact_size(
}
}
fn record_llvm_cgu_instructions_stats(prof: &SelfProfilerRef, llmod: &llvm::Module) {
fn record_llvm_cgu_instructions_stats(prof: &SelfProfilerRef, name: &str, llmod: &llvm::Module) {
if !prof.enabled() {
return;
}
let raw_stats =
llvm::build_string(|s| unsafe { llvm::LLVMRustModuleInstructionStats(llmod, s) })
.expect("cannot get module instruction stats");
#[derive(serde::Deserialize)]
struct InstructionsStats {
module: String,
total: u64,
}
let InstructionsStats { module, total } =
serde_json::from_str(&raw_stats).expect("cannot parse llvm cgu instructions stats");
prof.artifact_size("cgu_instructions", module, total);
let total = unsafe { llvm::LLVMRustModuleInstructionStats(llmod) };
prof.artifact_size("cgu_instructions", name, total);
}

View file

@ -23,13 +23,14 @@ use rustc_middle::dep_graph;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrs, SanitizerFnAttrs};
use rustc_middle::mir::mono::Visibility;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::DebugInfo;
use rustc_session::config::{DebugInfo, Offload};
use rustc_span::Symbol;
use rustc_target::spec::SanitizerSet;
use super::ModuleLlvm;
use crate::attributes;
use crate::builder::Builder;
use crate::builder::gpu_offload::OffloadGlobals;
use crate::context::CodegenCx;
use crate::llvm::{self, Value};
@ -85,6 +86,19 @@ pub(crate) fn compile_codegen_unit(
let llvm_module = ModuleLlvm::new(tcx, cgu_name.as_str());
{
let mut cx = CodegenCx::new(tcx, cgu, &llvm_module);
// Declare and store globals shared by all offload kernels
//
// These globals are left in the LLVM-IR host module so all kernels can access them.
// They are necessary for correct offload execution. We do this here to simplify the
// `offload` intrinsic, avoiding the need for tracking whether it's the first
// intrinsic call or not.
let has_host_offload =
cx.sess().opts.unstable_opts.offload.iter().any(|o| matches!(o, Offload::Host(_)));
if has_host_offload && !cx.sess().target.is_like_gpu {
cx.offload_globals.replace(Some(OffloadGlobals::declare(&cx)));
}
let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
for &(mono_item, data) in &mono_items {
mono_item.predefine::<Builder<'_, '_, '_>>(

View file

@ -1705,7 +1705,7 @@ impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
ret.expect("LLVM does not have support for catchret")
}
fn check_call<'b>(
pub(crate) fn check_call<'b>(
&mut self,
typ: &str,
fn_ty: &'ll Type,

View file

@ -2,17 +2,76 @@ use std::ffi::CString;
use llvm::Linkage::*;
use rustc_abi::Align;
use rustc_codegen_ssa::traits::BaseTypeCodegenMethods;
use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
use rustc_middle::ty::offload_meta::OffloadMetadata;
use crate::builder::SBuilder;
use crate::builder::Builder;
use crate::common::CodegenCx;
use crate::llvm::AttributePlace::Function;
use crate::llvm::{self, BasicBlock, Linkage, Type, Value};
use crate::llvm::{self, Linkage, Type, Value};
use crate::{SimpleCx, attributes};
// LLVM kernel-independent globals required for offloading
pub(crate) struct OffloadGlobals<'ll> {
pub launcher_fn: &'ll llvm::Value,
pub launcher_ty: &'ll llvm::Type,
pub bin_desc: &'ll llvm::Type,
pub kernel_args_ty: &'ll llvm::Type,
pub offload_entry_ty: &'ll llvm::Type,
pub begin_mapper: &'ll llvm::Value,
pub end_mapper: &'ll llvm::Value,
pub mapper_fn_ty: &'ll llvm::Type,
pub ident_t_global: &'ll llvm::Value,
pub register_lib: &'ll llvm::Value,
pub unregister_lib: &'ll llvm::Value,
pub init_rtls: &'ll llvm::Value,
}
impl<'ll> OffloadGlobals<'ll> {
pub(crate) fn declare(cx: &CodegenCx<'ll, '_>) -> Self {
let (launcher_fn, launcher_ty) = generate_launcher(cx);
let kernel_args_ty = KernelArgsTy::new_decl(cx);
let offload_entry_ty = TgtOffloadEntry::new_decl(cx);
let (begin_mapper, _, end_mapper, mapper_fn_ty) = gen_tgt_data_mappers(cx);
let ident_t_global = generate_at_one(cx);
let tptr = cx.type_ptr();
let ti32 = cx.type_i32();
let tgt_bin_desc_ty = vec![ti32, tptr, tptr, tptr];
let bin_desc = cx.type_named_struct("struct.__tgt_bin_desc");
cx.set_struct_body(bin_desc, &tgt_bin_desc_ty, false);
let register_lib = declare_offload_fn(&cx, "__tgt_register_lib", mapper_fn_ty);
let unregister_lib = declare_offload_fn(&cx, "__tgt_unregister_lib", mapper_fn_ty);
let init_ty = cx.type_func(&[], cx.type_void());
let init_rtls = declare_offload_fn(cx, "__tgt_init_all_rtls", init_ty);
OffloadGlobals {
launcher_fn,
launcher_ty,
bin_desc,
kernel_args_ty,
offload_entry_ty,
begin_mapper,
end_mapper,
mapper_fn_ty,
ident_t_global,
register_lib,
unregister_lib,
init_rtls,
}
}
}
// ; Function Attrs: nounwind
// declare i32 @__tgt_target_kernel(ptr, i64, i32, i32, ptr, ptr) #2
fn generate_launcher<'ll>(cx: &'ll SimpleCx<'_>) -> (&'ll llvm::Value, &'ll llvm::Type) {
fn generate_launcher<'ll>(cx: &CodegenCx<'ll, '_>) -> (&'ll llvm::Value, &'ll llvm::Type) {
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
@ -30,7 +89,7 @@ fn generate_launcher<'ll>(cx: &'ll SimpleCx<'_>) -> (&'ll llvm::Value, &'ll llvm
// @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8
// FIXME(offload): @0 should include the file name (e.g. lib.rs) in which the function to be
// offloaded was defined.
fn generate_at_one<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Value {
pub(crate) fn generate_at_one<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll llvm::Value {
let unknown_txt = ";unknown;unknown;0;0;;";
let c_entry_name = CString::new(unknown_txt).unwrap();
let c_val = c_entry_name.as_bytes_with_nul();
@ -68,7 +127,7 @@ pub(crate) struct TgtOffloadEntry {
}
impl TgtOffloadEntry {
pub(crate) fn new_decl<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Type {
pub(crate) fn new_decl<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll llvm::Type {
let offload_entry_ty = cx.type_named_struct("struct.__tgt_offload_entry");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
@ -82,7 +141,7 @@ impl TgtOffloadEntry {
}
fn new<'ll>(
cx: &'ll SimpleCx<'_>,
cx: &CodegenCx<'ll, '_>,
region_id: &'ll Value,
llglobal: &'ll Value,
) -> [&'ll Value; 9] {
@ -126,7 +185,7 @@ impl KernelArgsTy {
const OFFLOAD_VERSION: u64 = 3;
const FLAGS: u64 = 0;
const TRIPCOUNT: u64 = 0;
fn new_decl<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll Type {
fn new_decl<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Type {
let kernel_arguments_ty = cx.type_named_struct("struct.__tgt_kernel_arguments");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
@ -140,8 +199,8 @@ impl KernelArgsTy {
kernel_arguments_ty
}
fn new<'ll>(
cx: &'ll SimpleCx<'_>,
fn new<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
num_args: u64,
memtransfer_types: &'ll Value,
geps: [&'ll Value; 3],
@ -171,7 +230,8 @@ impl KernelArgsTy {
}
// Contains LLVM values needed to manage offloading for a single kernel.
pub(crate) struct OffloadKernelData<'ll> {
#[derive(Copy, Clone)]
pub(crate) struct OffloadKernelGlobals<'ll> {
pub offload_sizes: &'ll llvm::Value,
pub memtransfer_types: &'ll llvm::Value,
pub region_id: &'ll llvm::Value,
@ -179,7 +239,7 @@ pub(crate) struct OffloadKernelData<'ll> {
}
fn gen_tgt_data_mappers<'ll>(
cx: &'ll SimpleCx<'_>,
cx: &CodegenCx<'ll, '_>,
) -> (&'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Type) {
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
@ -241,12 +301,18 @@ pub(crate) fn add_global<'ll>(
// mapped to/from the gpu. It also returns a region_id with the name of this kernel, to be
// concatenated into the list of region_ids.
pub(crate) fn gen_define_handling<'ll>(
cx: &SimpleCx<'ll>,
offload_entry_ty: &'ll llvm::Type,
cx: &CodegenCx<'ll, '_>,
metadata: &[OffloadMetadata],
types: &[&Type],
symbol: &str,
) -> OffloadKernelData<'ll> {
types: &[&'ll Type],
symbol: String,
offload_globals: &OffloadGlobals<'ll>,
) -> OffloadKernelGlobals<'ll> {
if let Some(entry) = cx.offload_kernel_cache.borrow().get(&symbol) {
return *entry;
}
let offload_entry_ty = offload_globals.offload_entry_ty;
// It seems like non-pointer values are automatically mapped. So here, we focus on pointer (or
// reference) types.
let ptr_meta = types.iter().zip(metadata).filter_map(|(&x, meta)| match cx.type_kind(x) {
@ -272,9 +338,9 @@ pub(crate) fn gen_define_handling<'ll>(
let name = format!(".{symbol}.region_id");
let initializer = cx.get_const_i8(0);
let region_id = add_unnamed_global(&cx, &name, initializer, WeakAnyLinkage);
let region_id = add_global(&cx, &name, initializer, WeakAnyLinkage);
let c_entry_name = CString::new(symbol).unwrap();
let c_entry_name = CString::new(symbol.clone()).unwrap();
let c_val = c_entry_name.as_bytes_with_nul();
let offload_entry_name = format!(".offloading.entry_name.{symbol}");
@ -298,11 +364,16 @@ pub(crate) fn gen_define_handling<'ll>(
let c_section_name = CString::new("llvm_offload_entries").unwrap();
llvm::set_section(offload_entry, &c_section_name);
OffloadKernelData { offload_sizes, memtransfer_types, region_id, offload_entry }
let result =
OffloadKernelGlobals { offload_sizes, memtransfer_types, region_id, offload_entry };
cx.offload_kernel_cache.borrow_mut().insert(symbol, result);
result
}
fn declare_offload_fn<'ll>(
cx: &'ll SimpleCx<'_>,
cx: &CodegenCx<'ll, '_>,
name: &str,
ty: &'ll llvm::Type,
) -> &'ll llvm::Value {
@ -335,28 +406,28 @@ fn declare_offload_fn<'ll>(
// 4. set insert point after kernel call.
// 5. generate all the GEPS and stores, to be used in 6)
// 6. generate __tgt_target_data_end calls to move data from the GPU
pub(crate) fn gen_call_handling<'ll>(
cx: &SimpleCx<'ll>,
bb: &BasicBlock,
offload_data: &OffloadKernelData<'ll>,
pub(crate) fn gen_call_handling<'ll, 'tcx>(
builder: &mut Builder<'_, 'll, 'tcx>,
offload_data: &OffloadKernelGlobals<'ll>,
args: &[&'ll Value],
types: &[&Type],
metadata: &[OffloadMetadata],
offload_globals: &OffloadGlobals<'ll>,
) {
let OffloadKernelData { offload_sizes, offload_entry, memtransfer_types, region_id } =
let cx = builder.cx;
let OffloadKernelGlobals { offload_sizes, offload_entry, memtransfer_types, region_id } =
offload_data;
let (tgt_decl, tgt_target_kernel_ty) = generate_launcher(&cx);
let tgt_decl = offload_globals.launcher_fn;
let tgt_target_kernel_ty = offload_globals.launcher_ty;
// %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr }
let tptr = cx.type_ptr();
let ti32 = cx.type_i32();
let tgt_bin_desc_ty = vec![ti32, tptr, tptr, tptr];
let tgt_bin_desc = cx.type_named_struct("struct.__tgt_bin_desc");
cx.set_struct_body(tgt_bin_desc, &tgt_bin_desc_ty, false);
let tgt_bin_desc = offload_globals.bin_desc;
let tgt_kernel_decl = KernelArgsTy::new_decl(&cx);
let (begin_mapper_decl, _, end_mapper_decl, fn_ty) = gen_tgt_data_mappers(&cx);
let mut builder = SBuilder::build(cx, bb);
let tgt_kernel_decl = offload_globals.kernel_args_ty;
let begin_mapper_decl = offload_globals.begin_mapper;
let end_mapper_decl = offload_globals.end_mapper;
let fn_ty = offload_globals.mapper_fn_ty;
let num_args = types.len() as u64;
let ip = unsafe { llvm::LLVMRustGetInsertPoint(&builder.llbuilder) };
@ -378,9 +449,8 @@ pub(crate) fn gen_call_handling<'ll>(
// Step 0)
// %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr }
// %6 = alloca %struct.__tgt_bin_desc, align 8
let llfn = unsafe { llvm::LLVMGetBasicBlockParent(bb) };
unsafe {
llvm::LLVMRustPositionBuilderPastAllocas(&builder.llbuilder, llfn);
llvm::LLVMRustPositionBuilderPastAllocas(&builder.llbuilder, builder.llfn());
}
let tgt_bin_desc_alloca = builder.direct_alloca(tgt_bin_desc, Align::EIGHT, "EmptyDesc");
@ -413,16 +483,16 @@ pub(crate) fn gen_call_handling<'ll>(
}
let mapper_fn_ty = cx.type_func(&[cx.type_ptr()], cx.type_void());
let register_lib_decl = declare_offload_fn(&cx, "__tgt_register_lib", mapper_fn_ty);
let unregister_lib_decl = declare_offload_fn(&cx, "__tgt_unregister_lib", mapper_fn_ty);
let register_lib_decl = offload_globals.register_lib;
let unregister_lib_decl = offload_globals.unregister_lib;
let init_ty = cx.type_func(&[], cx.type_void());
let init_rtls_decl = declare_offload_fn(cx, "__tgt_init_all_rtls", init_ty);
let init_rtls_decl = offload_globals.init_rtls;
// FIXME(offload): Later we want to add them to the wrapper code, rather than our main function.
// call void @__tgt_register_lib(ptr noundef %6)
builder.call(mapper_fn_ty, register_lib_decl, &[tgt_bin_desc_alloca], None);
builder.call(mapper_fn_ty, None, None, register_lib_decl, &[tgt_bin_desc_alloca], None, None);
// call void @__tgt_init_all_rtls()
builder.call(init_ty, init_rtls_decl, &[], None);
builder.call(init_ty, None, None, init_rtls_decl, &[], None, None);
for i in 0..num_args {
let idx = cx.get_const_i32(i);
@ -437,15 +507,15 @@ pub(crate) fn gen_call_handling<'ll>(
// For now we have a very simplistic indexing scheme into our
// offload_{baseptrs,ptrs,sizes}. We will probably improve this along with our gpu frontend pr.
fn get_geps<'a, 'll>(
builder: &mut SBuilder<'a, 'll>,
cx: &'ll SimpleCx<'ll>,
fn get_geps<'ll, 'tcx>(
builder: &mut Builder<'_, 'll, 'tcx>,
ty: &'ll Type,
ty2: &'ll Type,
a1: &'ll Value,
a2: &'ll Value,
a4: &'ll Value,
) -> [&'ll Value; 3] {
let cx = builder.cx;
let i32_0 = cx.get_const_i32(0);
let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, i32_0]);
@ -454,9 +524,8 @@ pub(crate) fn gen_call_handling<'ll>(
[gep1, gep2, gep3]
}
fn generate_mapper_call<'a, 'll>(
builder: &mut SBuilder<'a, 'll>,
cx: &'ll SimpleCx<'ll>,
fn generate_mapper_call<'ll, 'tcx>(
builder: &mut Builder<'_, 'll, 'tcx>,
geps: [&'ll Value; 3],
o_type: &'ll Value,
fn_to_call: &'ll Value,
@ -464,20 +533,20 @@ pub(crate) fn gen_call_handling<'ll>(
num_args: u64,
s_ident_t: &'ll Value,
) {
let cx = builder.cx;
let nullptr = cx.const_null(cx.type_ptr());
let i64_max = cx.get_const_i64(u64::MAX);
let num_args = cx.get_const_i32(num_args);
let args =
vec![s_ident_t, i64_max, num_args, geps[0], geps[1], geps[2], o_type, nullptr, nullptr];
builder.call(fn_ty, fn_to_call, &args, None);
builder.call(fn_ty, None, None, fn_to_call, &args, None, None);
}
// Step 2)
let s_ident_t = generate_at_one(&cx);
let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4);
let s_ident_t = offload_globals.ident_t_global;
let geps = get_geps(builder, ty, ty2, a1, a2, a4);
generate_mapper_call(
&mut builder,
&cx,
builder,
geps,
memtransfer_types,
begin_mapper_decl,
@ -504,14 +573,13 @@ pub(crate) fn gen_call_handling<'ll>(
region_id,
a5,
];
builder.call(tgt_target_kernel_ty, tgt_decl, &args, None);
builder.call(tgt_target_kernel_ty, None, None, tgt_decl, &args, None, None);
// %41 = call i32 @__tgt_target_kernel(ptr @1, i64 -1, i32 2097152, i32 256, ptr @.kernel_1.region_id, ptr %kernel_args)
// Step 4)
let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4);
let geps = get_geps(builder, ty, ty2, a1, a2, a4);
generate_mapper_call(
&mut builder,
&cx,
builder,
geps,
memtransfer_types,
end_mapper_decl,
@ -520,7 +588,5 @@ pub(crate) fn gen_call_handling<'ll>(
s_ident_t,
);
builder.call(mapper_fn_ty, unregister_lib_decl, &[tgt_bin_desc_alloca], None);
drop(builder);
builder.call(mapper_fn_ty, None, None, unregister_lib_decl, &[tgt_bin_desc_alloca], None, None);
}

View file

@ -35,6 +35,7 @@ use smallvec::SmallVec;
use crate::abi::to_llvm_calling_convention;
use crate::back::write::to_llvm_code_model;
use crate::builder::gpu_offload::{OffloadGlobals, OffloadKernelGlobals};
use crate::callee::get_fn;
use crate::debuginfo::metadata::apply_vcall_visibility_metadata;
use crate::llvm::{self, Metadata, MetadataKindId, Module, Type, Value};
@ -100,6 +101,8 @@ pub(crate) struct FullCx<'ll, 'tcx> {
/// Cache instances of monomorphic and polymorphic items
pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
/// Cache instances of intrinsics
pub intrinsic_instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
/// Cache generated vtables
pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>), &'ll Value>>,
/// Cache of constant strings,
@ -156,6 +159,12 @@ pub(crate) struct FullCx<'ll, 'tcx> {
/// Cache of Objective-C selector references
pub objc_selrefs: RefCell<FxHashMap<Symbol, &'ll Value>>,
/// Globals shared by the offloading runtime
pub offload_globals: RefCell<Option<OffloadGlobals<'ll>>>,
/// Cache of kernel-specific globals
pub offload_kernel_cache: RefCell<FxHashMap<String, OffloadKernelGlobals<'ll>>>,
}
fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
@ -620,6 +629,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
tls_model,
codegen_unit,
instances: Default::default(),
intrinsic_instances: Default::default(),
vtables: Default::default(),
const_str_cache: Default::default(),
const_globals: Default::default(),
@ -639,6 +649,8 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
objc_class_t: Cell::new(None),
objc_classrefs: Default::default(),
objc_selrefs: Default::default(),
offload_globals: Default::default(),
offload_kernel_cache: Default::default(),
},
PhantomData,
)

View file

@ -1,8 +1,37 @@
use std::ptr;
use libc::c_uint;
use rustc_abi::Align;
use crate::llvm::debuginfo::DIBuilder;
use crate::llvm::{self, ToLlvmBool};
use crate::llvm::{self, Module, ToLlvmBool};
/// Owning pointer to a `DIBuilder<'ll>` that will dispose of the builder
/// when dropped. Use `.as_ref()` to get the underlying `&DIBuilder`
/// needed for debuginfo FFI calls.
pub(crate) struct DIBuilderBox<'ll> {
raw: ptr::NonNull<DIBuilder<'ll>>,
}
impl<'ll> DIBuilderBox<'ll> {
pub(crate) fn new(llmod: &'ll Module) -> Self {
let raw = unsafe { llvm::LLVMCreateDIBuilder(llmod) };
let raw = ptr::NonNull::new(raw).unwrap();
Self { raw }
}
pub(crate) fn as_ref(&self) -> &DIBuilder<'ll> {
// SAFETY: This is an owning pointer, so `&DIBuilder` is valid
// for as long as `&self` is.
unsafe { self.raw.as_ref() }
}
}
impl<'ll> Drop for DIBuilderBox<'ll> {
fn drop(&mut self) {
unsafe { llvm::LLVMDisposeDIBuilder(self.raw) };
}
}
/// Extension trait for defining safe wrappers and helper methods on
/// `&DIBuilder<'ll>`, without requiring it to be defined in the same crate.

View file

@ -38,8 +38,9 @@ use self::namespace::mangled_name_of_instance;
use self::utils::{DIB, create_DIArray, is_node_local_to_unit};
use crate::builder::Builder;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::debuginfo::di_builder::DIBuilderBox;
use crate::llvm::debuginfo::{
DIArray, DIBuilderBox, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope,
DIArray, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope,
DITemplateTypeParameter, DIType, DIVariable,
};
use crate::llvm::{self, Value};

View file

@ -32,6 +32,10 @@ impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> {
}
}
#[derive(Diagnostic)]
#[diag(codegen_llvm_autodiff_component_unavailable)]
pub(crate) struct AutoDiffComponentUnavailable;
#[derive(Diagnostic)]
#[diag(codegen_llvm_autodiff_without_lto)]
pub(crate) struct AutoDiffWithoutLto;
@ -48,6 +52,26 @@ pub(crate) struct OffloadWithoutEnable;
#[diag(codegen_llvm_offload_without_fat_lto)]
pub(crate) struct OffloadWithoutFatLTO;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_no_abs_path)]
pub(crate) struct OffloadWithoutAbsPath;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_no_host_out)]
pub(crate) struct OffloadWrongFileName;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_nonexisting)]
pub(crate) struct OffloadNonexistingPath;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_bundleimages_failed)]
pub(crate) struct OffloadBundleImagesFailed;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_embed_failed)]
pub(crate) struct OffloadEmbedFailed;
#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_bitcode_from_rlib)]
pub(crate) struct LtoBitcodeFromRlib {

View file

@ -1,7 +1,11 @@
use std::assert_matches::assert_matches;
use std::cmp::Ordering;
use std::ffi::c_uint;
use std::ptr;
use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size};
use rustc_abi::{
Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size, WrappingRange,
};
use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
use rustc_codegen_ssa::codegen_attrs::autodiff_attrs;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
@ -26,8 +30,9 @@ use tracing::debug;
use crate::abi::FnAbiLlvmExt;
use crate::builder::Builder;
use crate::builder::autodiff::{adjust_activity_to_abi, generate_enzyme_call};
use crate::builder::gpu_offload::TgtOffloadEntry;
use crate::builder::gpu_offload::{gen_call_handling, gen_define_handling};
use crate::context::CodegenCx;
use crate::declare::declare_raw_fn;
use crate::errors::{
AutoDiffWithoutEnable, AutoDiffWithoutLto, OffloadWithoutEnable, OffloadWithoutFatLTO,
};
@ -202,13 +207,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
return Ok(());
}
sym::offload => {
if !tcx
.sess
.opts
.unstable_opts
.offload
.contains(&rustc_session::config::Offload::Enable)
{
if tcx.sess.opts.unstable_opts.offload.is_empty() {
let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutEnable);
}
@ -351,7 +350,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
_ => bug!(),
};
let ptr = args[0].immediate();
let locality = fn_args.const_at(1).to_value().valtree.unwrap_leaf().to_i32();
let locality = fn_args.const_at(1).to_leaf().to_i32();
self.call_intrinsic(
"llvm.prefetch",
&[self.val_ty(ptr)],
@ -639,6 +638,99 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
Ok(())
}
fn codegen_llvm_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OperandRef<'tcx, Self::Value>],
is_cleanup: bool,
) -> Self::Value {
let tcx = self.tcx();
// FIXME remove usage of fn_abi
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
assert!(!fn_abi.ret.is_indirect());
let fn_ty = fn_abi.llvm_type(self);
let fn_ptr = if let Some(&llfn) = self.intrinsic_instances.borrow().get(&instance) {
llfn
} else {
let sym = tcx.symbol_name(instance).name;
// FIXME use get_intrinsic
let llfn = if let Some(llfn) = self.get_declared_value(sym) {
llfn
} else {
// Function addresses in Rust are never significant, allowing functions to
// be merged.
let llfn = declare_raw_fn(
self,
sym,
fn_abi.llvm_cconv(self),
llvm::UnnamedAddr::Global,
llvm::Visibility::Default,
fn_ty,
);
fn_abi.apply_attrs_llfn(self, llfn, Some(instance));
llfn
};
self.intrinsic_instances.borrow_mut().insert(instance, llfn);
llfn
};
let mut llargs = vec![];
for arg in args {
match arg.val {
OperandValue::ZeroSized => {}
OperandValue::Immediate(_) => llargs.push(arg.immediate()),
OperandValue::Pair(a, b) => {
llargs.push(a);
llargs.push(b);
}
OperandValue::Ref(op_place_val) => {
let mut llval = op_place_val.llval;
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
// used for this call is passing it by-value. In that case,
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = self.load(self.backend_type(arg.layout), llval, op_place_val.align);
if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
if scalar.is_bool() {
self.range_metadata(llval, WrappingRange { start: 0, end: 1 });
}
// We store bools as `i8` so we need to truncate to `i1`.
llval = self.to_immediate_scalar(llval, scalar);
}
llargs.push(llval);
}
}
}
debug!("call intrinsic {:?} with args ({:?})", instance, llargs);
let args = self.check_call("call", fn_ty, fn_ptr, &llargs);
let llret = unsafe {
llvm::LLVMBuildCallWithOperandBundles(
self.llbuilder,
fn_ty,
fn_ptr,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
ptr::dangling(),
0,
c"".as_ptr(),
)
};
if is_cleanup {
self.apply_attrs_to_cleanup_callsite(llret);
}
llret
}
fn abort(&mut self) {
self.call_intrinsic("llvm.trap", &[], &[]);
}
@ -1295,8 +1387,6 @@ fn codegen_offload<'ll, 'tcx>(
let args = get_args_from_tuple(bx, args[1], fn_target);
let target_symbol = symbol_name_for_instance_in_crate(tcx, fn_target, LOCAL_CRATE);
let offload_entry_ty = TgtOffloadEntry::new_decl(&cx);
let sig = tcx.fn_sig(fn_target.def_id()).skip_binder().skip_binder();
let inputs = sig.inputs();
@ -1304,17 +1394,16 @@ fn codegen_offload<'ll, 'tcx>(
let types = inputs.iter().map(|ty| cx.layout_of(*ty).llvm_type(cx)).collect::<Vec<_>>();
let offload_data = crate::builder::gpu_offload::gen_define_handling(
cx,
offload_entry_ty,
&metadata,
&types,
&target_symbol,
);
// FIXME(Sa4dUs): pass the original builder once we separate kernel launch logic from globals
let bb = unsafe { llvm::LLVMGetInsertBlock(bx.llbuilder) };
crate::builder::gpu_offload::gen_call_handling(cx, bb, &offload_data, &args, &types, &metadata);
let offload_globals_ref = cx.offload_globals.borrow();
let offload_globals = match offload_globals_ref.as_ref() {
Some(globals) => globals,
None => {
// Offload is not initialized, cannot continue
return;
}
};
let offload_data = gen_define_handling(&cx, &metadata, &types, target_symbol, offload_globals);
gen_call_handling(bx, &offload_data, &args, &types, &metadata, offload_globals);
}
fn get_args_from_tuple<'ll, 'tcx>(
@ -1536,7 +1625,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
if name == sym::simd_shuffle_const_generic {
let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
let idx = fn_args[2].expect_const().to_branch();
let n = idx.len() as u64;
let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
@ -1555,7 +1644,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
.iter()
.enumerate()
.map(|(arg_idx, val)| {
let idx = val.unwrap_leaf().to_i32();
let idx = val.to_leaf().to_i32();
if idx >= i32::try_from(total_len).unwrap() {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
@ -1967,9 +2056,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;
@ -2062,9 +2149,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;

View file

@ -5,7 +5,6 @@
//! This API is completely unstable and subject to change.
// tidy-alphabetical-start
#![cfg_attr(bootstrap, feature(slice_as_array))]
#![feature(assert_matches)]
#![feature(extern_types)]
#![feature(file_buffered)]
@ -13,6 +12,7 @@
#![feature(impl_trait_in_assoc_type)]
#![feature(iter_intersperse)]
#![feature(macro_derive)]
#![feature(once_cell_try)]
#![feature(trim_prefix_suffix)]
#![feature(try_blocks)]
// tidy-alphabetical-end
@ -241,13 +241,17 @@ impl CodegenBackend for LlvmCodegenBackend {
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
#[cfg(feature = "llvm_enzyme")]
// autodiff is based on Enzyme, a library which we might not have available, when it was
// neither build, nor downloaded via rustup. If autodiff is used, but not available we emit
// an early error here and abort compilation.
{
use rustc_session::config::AutoDiff;
use crate::back::lto::enable_autodiff_settings;
if sess.opts.unstable_opts.autodiff.contains(&AutoDiff::Enable) {
drop(llvm::EnzymeWrapper::get_or_init(&sess.opts.sysroot));
if let Err(_) = llvm::EnzymeWrapper::get_or_init(&sess.opts.sysroot) {
sess.dcx().emit_fatal(crate::errors::AutoDiffComponentUnavailable);
}
enable_autodiff_settings(&sess.opts.unstable_opts.autodiff);
}
}

View file

@ -1,6 +1,6 @@
//! Conversions from backend-independent data types to/from LLVM FFI types.
use rustc_codegen_ssa::common::{AtomicRmwBinOp, IntPredicate, RealPredicate};
use rustc_codegen_ssa::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, TypeKind};
use rustc_middle::ty::AtomicOrdering;
use rustc_session::config::DebugInfo;
use rustc_target::spec::SymbolVisibility;
@ -9,10 +9,22 @@ use crate::llvm;
/// Helper trait for converting backend-independent types to LLVM-specific
/// types, for FFI purposes.
///
/// FIXME(#147327): These trait/method names were chosen to avoid churn in
/// existing code, but are not great and could probably be made clearer.
pub(crate) trait FromGeneric<T> {
fn from_generic(other: T) -> Self;
}
/// Helper trait for converting LLVM-specific types to backend-independent
/// types, for FFI purposes.
///
/// FIXME(#147327): These trait/method names were chosen to avoid churn in
/// existing code, but are not great and could probably be made clearer.
pub(crate) trait ToGeneric<T> {
fn to_generic(&self) -> T;
}
impl FromGeneric<SymbolVisibility> for llvm::Visibility {
fn from_generic(visibility: SymbolVisibility) -> Self {
match visibility {
@ -113,3 +125,29 @@ impl FromGeneric<DebugInfo> for llvm::debuginfo::DebugEmissionKind {
}
}
}
impl ToGeneric<TypeKind> for llvm::TypeKind {
fn to_generic(&self) -> TypeKind {
match self {
Self::Void => TypeKind::Void,
Self::Half => TypeKind::Half,
Self::Float => TypeKind::Float,
Self::Double => TypeKind::Double,
Self::X86_FP80 => TypeKind::X86_FP80,
Self::FP128 => TypeKind::FP128,
Self::PPC_FP128 => TypeKind::PPC_FP128,
Self::Label => TypeKind::Label,
Self::Integer => TypeKind::Integer,
Self::Function => TypeKind::Function,
Self::Struct => TypeKind::Struct,
Self::Array => TypeKind::Array,
Self::Pointer => TypeKind::Pointer,
Self::Vector => TypeKind::Vector,
Self::Metadata => TypeKind::Metadata,
Self::Token => TypeKind::Token,
Self::ScalableVector => TypeKind::ScalableVector,
Self::BFloat => TypeKind::BFloat,
Self::X86_AMX => TypeKind::X86_AMX,
}
}
}

View file

@ -86,10 +86,8 @@ pub(crate) enum LLVMRustVerifierFailureAction {
LLVMReturnStatusAction = 2,
}
#[cfg(feature = "llvm_enzyme")]
pub(crate) use self::Enzyme_AD::*;
#[cfg(feature = "llvm_enzyme")]
pub(crate) mod Enzyme_AD {
use std::ffi::{c_char, c_void};
use std::sync::{Mutex, MutexGuard, OnceLock};
@ -199,15 +197,13 @@ pub(crate) mod Enzyme_AD {
/// Safe to call multiple times - subsequent calls are no-ops due to OnceLock.
pub(crate) fn get_or_init(
sysroot: &rustc_session::config::Sysroot,
) -> MutexGuard<'static, Self> {
ENZYME_INSTANCE
.get_or_init(|| {
Self::call_dynamic(sysroot)
.unwrap_or_else(|e| bug!("failed to load Enzyme: {e}"))
.into()
})
.lock()
.unwrap()
) -> Result<MutexGuard<'static, Self>, Box<dyn std::error::Error>> {
let mtx: &'static Mutex<EnzymeWrapper> = ENZYME_INSTANCE.get_or_try_init(|| {
let w = Self::call_dynamic(sysroot)?;
Ok::<_, Box<dyn std::error::Error>>(Mutex::new(w))
})?;
Ok(mtx.lock().unwrap())
}
/// Get the EnzymeWrapper instance. Panics if not initialized.
@ -452,147 +448,6 @@ pub(crate) mod Enzyme_AD {
}
}
#[cfg(not(feature = "llvm_enzyme"))]
pub(crate) use self::Fallback_AD::*;
#[cfg(not(feature = "llvm_enzyme"))]
pub(crate) mod Fallback_AD {
#![allow(unused_variables)]
use std::ffi::c_void;
use std::sync::{Mutex, MutexGuard};
use libc::c_char;
use rustc_codegen_ssa::back::write::CodegenContext;
use rustc_codegen_ssa::traits::WriteBackendMethods;
use super::{CConcreteType, CTypeTreeRef, Context, EnzymeTypeTree};
pub(crate) struct EnzymeWrapper {
pub registerEnzymeAndPassPipeline: *const c_void,
}
impl EnzymeWrapper {
pub(crate) fn get_or_init(
_sysroot: &rustc_session::config::Sysroot,
) -> MutexGuard<'static, Self> {
unimplemented!("Enzyme not available: build with llvm_enzyme feature")
}
pub(crate) fn init<'a, B: WriteBackendMethods>(
_cgcx: &'a CodegenContext<B>,
) -> &'static Mutex<Self> {
unimplemented!("Enzyme not available: build with llvm_enzyme feature")
}
pub(crate) fn get_instance() -> MutexGuard<'static, Self> {
unimplemented!("Enzyme not available: build with llvm_enzyme feature")
}
pub(crate) fn new_type_tree(&self) -> CTypeTreeRef {
unimplemented!()
}
pub(crate) fn new_type_tree_ct(
&self,
t: CConcreteType,
ctx: &Context,
) -> *mut EnzymeTypeTree {
unimplemented!()
}
pub(crate) fn new_type_tree_tr(&self, tree: CTypeTreeRef) -> CTypeTreeRef {
unimplemented!()
}
pub(crate) fn free_type_tree(&self, tree: CTypeTreeRef) {
unimplemented!()
}
pub(crate) fn merge_type_tree(&self, tree1: CTypeTreeRef, tree2: CTypeTreeRef) -> bool {
unimplemented!()
}
pub(crate) fn tree_only_eq(&self, tree: CTypeTreeRef, num: i64) {
unimplemented!()
}
pub(crate) fn tree_data0_eq(&self, tree: CTypeTreeRef) {
unimplemented!()
}
pub(crate) fn shift_indicies_eq(
&self,
tree: CTypeTreeRef,
data_layout: *const c_char,
offset: i64,
max_size: i64,
add_offset: u64,
) {
unimplemented!()
}
pub(crate) fn tree_insert_eq(
&self,
tree: CTypeTreeRef,
indices: *const i64,
len: usize,
ct: CConcreteType,
ctx: &Context,
) {
unimplemented!()
}
pub(crate) fn tree_to_string(&self, tree: *mut EnzymeTypeTree) -> *const c_char {
unimplemented!()
}
pub(crate) fn tree_to_string_free(&self, ch: *const c_char) {
unimplemented!()
}
pub(crate) fn get_max_type_depth(&self) -> usize {
unimplemented!()
}
pub(crate) fn set_inline(&mut self, val: bool) {
unimplemented!()
}
pub(crate) fn set_print_perf(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_print_activity(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_print_type(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_print_type_fun(&mut self, fun_name: &str) {
unimplemented!()
}
pub(crate) fn set_print(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_strict_aliasing(&mut self, strict: bool) {
unimplemented!()
}
pub(crate) fn set_loose_types(&mut self, loose: bool) {
unimplemented!()
}
pub(crate) fn set_rust_rules(&mut self, val: bool) {
unimplemented!()
}
}
}
impl TypeTree {
pub(crate) fn new() -> TypeTree {
let wrapper = EnzymeWrapper::get_instance();

View file

@ -363,33 +363,6 @@ pub(crate) enum TypeKind {
X86_AMX = 19,
}
impl TypeKind {
pub(crate) fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
use rustc_codegen_ssa::common::TypeKind as Common;
match self {
Self::Void => Common::Void,
Self::Half => Common::Half,
Self::Float => Common::Float,
Self::Double => Common::Double,
Self::X86_FP80 => Common::X86_FP80,
Self::FP128 => Common::FP128,
Self::PPC_FP128 => Common::PPC_FP128,
Self::Label => Common::Label,
Self::Integer => Common::Integer,
Self::Function => Common::Function,
Self::Struct => Common::Struct,
Self::Array => Common::Array,
Self::Pointer => Common::Pointer,
Self::Vector => Common::Vector,
Self::Metadata => Common::Metadata,
Self::Token => Common::Token,
Self::ScalableVector => Common::ScalableVector,
Self::BFloat => Common::BFloat,
Self::X86_AMX => Common::X86_AMX,
}
}
}
/// LLVMAtomicRmwBinOp
#[derive(Copy, Clone)]
#[repr(C)]
@ -738,12 +711,9 @@ unsafe extern "C" {
pub(crate) type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
pub(crate) mod debuginfo {
use std::ptr;
use bitflags::bitflags;
use super::{InvariantOpaque, Metadata};
use crate::llvm::{self, Module};
/// Opaque target type for references to an LLVM debuginfo builder.
///
@ -756,33 +726,6 @@ pub(crate) mod debuginfo {
#[repr(C)]
pub(crate) struct DIBuilder<'ll>(InvariantOpaque<'ll>);
/// Owning pointer to a `DIBuilder<'ll>` that will dispose of the builder
/// when dropped. Use `.as_ref()` to get the underlying `&DIBuilder`
/// needed for debuginfo FFI calls.
pub(crate) struct DIBuilderBox<'ll> {
raw: ptr::NonNull<DIBuilder<'ll>>,
}
impl<'ll> DIBuilderBox<'ll> {
pub(crate) fn new(llmod: &'ll Module) -> Self {
let raw = unsafe { llvm::LLVMCreateDIBuilder(llmod) };
let raw = ptr::NonNull::new(raw).unwrap();
Self { raw }
}
pub(crate) fn as_ref(&self) -> &DIBuilder<'ll> {
// SAFETY: This is an owning pointer, so `&DIBuilder` is valid
// for as long as `&self` is.
unsafe { self.raw.as_ref() }
}
}
impl<'ll> Drop for DIBuilderBox<'ll> {
fn drop(&mut self) {
unsafe { llvm::LLVMDisposeDIBuilder(self.raw) };
}
}
pub(crate) type DIDescriptor = Metadata;
pub(crate) type DILocation = Metadata;
pub(crate) type DIScope = DIDescriptor;
@ -1723,7 +1666,15 @@ mod Offload {
use super::*;
unsafe extern "C" {
/// Processes the module and writes it in an offload compatible way into a "host.out" file.
pub(crate) fn LLVMRustBundleImages<'a>(M: &'a Module, TM: &'a TargetMachine) -> bool;
pub(crate) fn LLVMRustBundleImages<'a>(
M: &'a Module,
TM: &'a TargetMachine,
host_out: *const c_char,
) -> bool;
pub(crate) unsafe fn LLVMRustOffloadEmbedBufferInModule<'a>(
_M: &'a Module,
_host_out: *const c_char,
) -> bool;
pub(crate) fn LLVMRustOffloadMapper<'a>(OldFn: &'a Value, NewFn: &'a Value);
}
}
@ -1737,7 +1688,17 @@ mod Offload_fallback {
/// Processes the module and writes it in an offload compatible way into a "host.out" file.
/// Marked as unsafe to match the real offload wrapper which is unsafe due to FFI.
#[allow(unused_unsafe)]
pub(crate) unsafe fn LLVMRustBundleImages<'a>(_M: &'a Module, _TM: &'a TargetMachine) -> bool {
pub(crate) unsafe fn LLVMRustBundleImages<'a>(
_M: &'a Module,
_TM: &'a TargetMachine,
_host_out: *const c_char,
) -> bool {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
pub(crate) unsafe fn LLVMRustOffloadEmbedBufferInModule<'a>(
_M: &'a Module,
_host_out: *const c_char,
) -> bool {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
#[allow(unused_unsafe)]
@ -2493,7 +2454,7 @@ unsafe extern "C" {
pub(crate) fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
pub(crate) fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
pub(crate) fn LLVMRustModuleCost(M: &Module) -> u64;
pub(crate) fn LLVMRustModuleInstructionStats(M: &Module, Str: &RustString);
pub(crate) fn LLVMRustModuleInstructionStats(M: &Module) -> u64;
pub(crate) fn LLVMRustThinLTOBufferCreate(
M: &Module,

View file

@ -266,6 +266,10 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
"leoncasa" => Some(LLVMFeature::new("hasleoncasa")),
s => Some(LLVMFeature::new(s)),
},
Arch::Wasm32 | Arch::Wasm64 => match s {
"gc" if major < 22 => None,
s => Some(LLVMFeature::new(s)),
},
Arch::X86 | Arch::X86_64 => {
match s {
"sse4.2" => Some(LLVMFeature::with_dependencies(
@ -360,25 +364,26 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) {
let target_abi = &sess.target.options.abi;
let target_pointer_width = sess.target.pointer_width;
let version = get_version();
let lt_20_1_1 = version < (20, 1, 1);
let lt_21_0_0 = version < (21, 0, 0);
let (major, _, _) = version;
cfg.has_reliable_f16 = match (target_arch, target_os) {
// LLVM crash without neon <https://github.com/llvm/llvm-project/issues/129394> (fixed in llvm20)
// LLVM crash without neon <https://github.com/llvm/llvm-project/issues/129394> (fixed in LLVM 20.1.1)
(Arch::AArch64, _)
if !cfg.target_features.iter().any(|f| f.as_str() == "neon") && lt_20_1_1 =>
if !cfg.target_features.iter().any(|f| f.as_str() == "neon")
&& version < (20, 1, 1) =>
{
false
}
// Unsupported <https://github.com/llvm/llvm-project/issues/94434>
(Arch::Arm64EC, _) => false,
// Selection failure <https://github.com/llvm/llvm-project/issues/50374> (fixed in llvm21)
(Arch::S390x, _) if lt_21_0_0 => false,
(Arch::S390x, _) if major < 21 => false,
// MinGW ABI bugs <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115054>
(Arch::X86_64, Os::Windows) if *target_env == Env::Gnu && *target_abi != Abi::Llvm => false,
// Infinite recursion <https://github.com/llvm/llvm-project/issues/97981>
(Arch::CSky, _) => false,
(Arch::Hexagon, _) if lt_21_0_0 => false, // (fixed in llvm21)
(Arch::Hexagon, _) if major < 21 => false, // (fixed in llvm21)
(Arch::LoongArch32 | Arch::LoongArch64, _) if major < 21 => false, // (fixed in llvm21)
(Arch::PowerPC | Arch::PowerPC64, _) => false,
(Arch::Sparc | Arch::Sparc64, _) => false,
(Arch::Wasm32 | Arch::Wasm64, _) => false,
@ -389,15 +394,15 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) {
};
cfg.has_reliable_f128 = match (target_arch, target_os) {
// Unsupported https://github.com/llvm/llvm-project/issues/121122
(Arch::AmdGpu, _) => false,
// Unsupported <https://github.com/llvm/llvm-project/issues/94434>
(Arch::Arm64EC, _) => false,
// Selection bug <https://github.com/llvm/llvm-project/issues/96432> (fixed in llvm20)
(Arch::Mips64 | Arch::Mips64r6, _) if lt_20_1_1 => false,
// Selection bug <https://github.com/llvm/llvm-project/issues/96432> (fixed in LLVM 20.1.0)
(Arch::Mips64 | Arch::Mips64r6, _) if version < (20, 1, 0) => false,
// Selection bug <https://github.com/llvm/llvm-project/issues/95471>. This issue is closed
// but basic math still does not work.
(Arch::Nvptx64, _) => false,
// Unsupported https://github.com/llvm/llvm-project/issues/121122
(Arch::AmdGpu, _) => false,
// ABI bugs <https://github.com/rust-lang/rust/issues/125109> et al. (full
// list at <https://github.com/rust-lang/rust/issues/116909>)
(Arch::PowerPC | Arch::PowerPC64, _) => false,
@ -405,7 +410,7 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) {
(Arch::Sparc, _) => false,
// Stack alignment bug <https://github.com/llvm/llvm-project/issues/77401>. NB: tests may
// not fail if our compiler-builtins is linked. (fixed in llvm21)
(Arch::X86, _) if lt_21_0_0 => false,
(Arch::X86, _) if major < 21 => false,
// MinGW ABI bugs <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115054>
(Arch::X86_64, Os::Windows) if *target_env == Env::Gnu && *target_abi != Abi::Llvm => false,
// There are no known problems on other platforms, so the only requirement is that symbols

View file

@ -15,7 +15,7 @@ use rustc_target::callconv::{CastTarget, FnAbi};
use crate::abi::{FnAbiLlvmExt, LlvmType};
use crate::common;
use crate::context::{CodegenCx, GenericCx, SCx};
use crate::llvm::{self, FALSE, Metadata, TRUE, ToLlvmBool, Type, Value};
use crate::llvm::{self, FALSE, Metadata, TRUE, ToGeneric, ToLlvmBool, Type, Value};
use crate::type_of::LayoutLlvmExt;
impl PartialEq for Type {

View file

@ -1,15 +1,10 @@
use rustc_ast::expand::typetree::FncTree;
#[cfg(feature = "llvm_enzyme")]
use {
crate::attributes,
crate::llvm::EnzymeWrapper,
rustc_ast::expand::typetree::TypeTree as RustTypeTree,
std::ffi::{CString, c_char, c_uint},
};
use std::ffi::{CString, c_char, c_uint};
use crate::llvm::{self, Value};
use rustc_ast::expand::typetree::{FncTree, TypeTree as RustTypeTree};
use crate::attributes;
use crate::llvm::{self, EnzymeWrapper, Value};
#[cfg(feature = "llvm_enzyme")]
fn to_enzyme_typetree(
rust_typetree: RustTypeTree,
_data_layout: &str,
@ -19,7 +14,6 @@ fn to_enzyme_typetree(
process_typetree_recursive(&mut enzyme_tt, &rust_typetree, &[], llcx);
enzyme_tt
}
#[cfg(feature = "llvm_enzyme")]
fn process_typetree_recursive(
enzyme_tt: &mut llvm::TypeTree,
rust_typetree: &RustTypeTree,
@ -57,13 +51,21 @@ fn process_typetree_recursive(
}
}
#[cfg(feature = "llvm_enzyme")]
#[cfg_attr(not(feature = "llvm_enzyme"), allow(unused))]
pub(crate) fn add_tt<'ll>(
llmod: &'ll llvm::Module,
llcx: &'ll llvm::Context,
fn_def: &'ll Value,
tt: FncTree,
) {
// TypeTree processing uses functions from Enzyme, which we might not have available if we did
// not build this compiler with `llvm_enzyme`. This feature is not strictly necessary, but
// skipping this function increases the chance that Enzyme fails to compile some code.
// FIXME(autodiff): In the future we should conditionally run this function even without the
// `llvm_enzyme` feature, in case that libEnzyme was provided via rustup.
#[cfg(not(feature = "llvm_enzyme"))]
return;
let inputs = tt.args;
let ret_tt: RustTypeTree = tt.ret;
@ -113,13 +115,3 @@ pub(crate) fn add_tt<'ll>(
enzyme_wrapper.tree_to_string_free(c_str.as_ptr());
}
}
#[cfg(not(feature = "llvm_enzyme"))]
pub(crate) fn add_tt<'ll>(
_llmod: &'ll llvm::Module,
_llcx: &'ll llvm::Context,
_fn_def: &'ll Value,
_tt: FncTree,
) {
unimplemented!()
}

View file

@ -7,7 +7,7 @@ use rustc_codegen_ssa::traits::{
};
use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_target::spec::{Abi, Arch};
use rustc_target::spec::{Abi, Arch, Env};
use crate::builder::Builder;
use crate::llvm::{Type, Value};
@ -782,6 +782,129 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>(
mem_addr
}
fn emit_hexagon_va_arg_musl<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
target_ty: Ty<'tcx>,
) -> &'ll Value {
// Implementation of va_arg for Hexagon musl target.
// Based on LLVM's HexagonBuiltinVaList implementation.
//
// struct __va_list_tag {
// void *__current_saved_reg_area_pointer;
// void *__saved_reg_area_end_pointer;
// void *__overflow_area_pointer;
// };
//
// All variadic arguments are passed on the stack, but the musl implementation
// uses a register save area for compatibility.
let va_list_addr = list.immediate();
let layout = bx.cx.layout_of(target_ty);
let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
let ptr_size = bx.tcx().data_layout.pointer_size().bytes();
// Check if argument fits in register save area
let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
let from_overflow = bx.append_sibling_block("va_arg.from_overflow");
let end = bx.append_sibling_block("va_arg.end");
// Load the three pointers from va_list
let current_ptr_addr = va_list_addr;
let end_ptr_addr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(ptr_size));
let overflow_ptr_addr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(2 * ptr_size));
let current_ptr = bx.load(bx.type_ptr(), current_ptr_addr, ptr_align_abi);
let end_ptr = bx.load(bx.type_ptr(), end_ptr_addr, ptr_align_abi);
let overflow_ptr = bx.load(bx.type_ptr(), overflow_ptr_addr, ptr_align_abi);
// Align current pointer based on argument type size (following LLVM's implementation)
// Arguments <= 32 bits (4 bytes) use 4-byte alignment, > 32 bits use 8-byte alignment
let type_size_bits = bx.cx.size_of(target_ty).bits();
let arg_align = if type_size_bits > 32 {
Align::from_bytes(8).unwrap()
} else {
Align::from_bytes(4).unwrap()
};
let aligned_current = round_pointer_up_to_alignment(bx, current_ptr, arg_align, bx.type_ptr());
// Calculate next pointer position (following LLVM's logic)
// Arguments <= 32 bits take 4 bytes, > 32 bits take 8 bytes
let arg_size = if type_size_bits > 32 { 8 } else { 4 };
let next_ptr = bx.inbounds_ptradd(aligned_current, bx.const_usize(arg_size));
// Check if argument fits in register save area
let fits_in_regs = bx.icmp(IntPredicate::IntULE, next_ptr, end_ptr);
bx.cond_br(fits_in_regs, maybe_reg, from_overflow);
// Load from register save area
bx.switch_to_block(maybe_reg);
let reg_value_addr = aligned_current;
// Update current pointer
bx.store(next_ptr, current_ptr_addr, ptr_align_abi);
bx.br(end);
// Load from overflow area (stack)
bx.switch_to_block(from_overflow);
// Align overflow pointer using the same alignment rules
let aligned_overflow =
round_pointer_up_to_alignment(bx, overflow_ptr, arg_align, bx.type_ptr());
let overflow_value_addr = aligned_overflow;
// Update overflow pointer - use the same size calculation
let next_overflow = bx.inbounds_ptradd(aligned_overflow, bx.const_usize(arg_size));
bx.store(next_overflow, overflow_ptr_addr, ptr_align_abi);
// IMPORTANT: Also update the current saved register area pointer to match
// This synchronizes the pointers when switching to overflow area
bx.store(next_overflow, current_ptr_addr, ptr_align_abi);
bx.br(end);
// Return the value
bx.switch_to_block(end);
let value_addr =
bx.phi(bx.type_ptr(), &[reg_value_addr, overflow_value_addr], &[maybe_reg, from_overflow]);
bx.load(layout.llvm_type(bx), value_addr, layout.align.abi)
}
fn emit_hexagon_va_arg_bare_metal<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
target_ty: Ty<'tcx>,
) -> &'ll Value {
// Implementation of va_arg for Hexagon bare-metal (non-musl) targets.
// Based on LLVM's EmitVAArgForHexagon implementation.
//
// va_list is a simple pointer (char *)
let va_list_addr = list.immediate();
let layout = bx.cx.layout_of(target_ty);
let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
// Load current pointer from va_list
let current_ptr = bx.load(bx.type_ptr(), va_list_addr, ptr_align_abi);
// Handle address alignment for types with alignment > 4 bytes
let ty_align = layout.align.abi;
let aligned_ptr = if ty_align.bytes() > 4 {
// Ensure alignment is a power of 2
debug_assert!(ty_align.bytes().is_power_of_two(), "Alignment is not power of 2!");
round_pointer_up_to_alignment(bx, current_ptr, ty_align, bx.type_ptr())
} else {
current_ptr
};
// Calculate offset: round up type size to 4-byte boundary (minimum stack slot size)
let type_size = layout.size.bytes();
let offset = type_size.next_multiple_of(4); // align to 4 bytes
// Update va_list to point to next argument
let next_ptr = bx.inbounds_ptradd(aligned_ptr, bx.const_usize(offset));
bx.store(next_ptr, va_list_addr, ptr_align_abi);
// Load and return the argument value
bx.load(layout.llvm_type(bx), aligned_ptr, layout.align.abi)
}
fn emit_xtensa_va_arg<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
@ -966,6 +1089,13 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
// This includes `target.is_like_darwin`, which on x86_64 targets is like sysv64.
Arch::X86_64 => emit_x86_64_sysv64_va_arg(bx, addr, target_ty),
Arch::Xtensa => emit_xtensa_va_arg(bx, addr, target_ty),
Arch::Hexagon => {
if target.env == Env::Musl {
emit_hexagon_va_arg_musl(bx, addr, target_ty)
} else {
emit_hexagon_va_arg_bare_metal(bx, addr, target_ty)
}
}
// For all other architecture/OS combinations fall back to using
// the LLVM va_arg instruction.
// https://llvm.org/docs/LangRef.html#va-arg-instruction

View file

@ -103,17 +103,18 @@ pub fn link_binary(
});
if outputs.outputs.should_link() {
let tmpdir = TempDirBuilder::new()
.prefix("rustc")
.tempdir()
.unwrap_or_else(|error| sess.dcx().emit_fatal(errors::CreateTempDir { error }));
let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
let output = out_filename(
sess,
crate_type,
outputs,
codegen_results.crate_info.local_crate_name,
);
let tmpdir = TempDirBuilder::new()
.prefix("rustc")
.tempdir_in(output.parent().unwrap_or_else(|| Path::new(".")))
.unwrap_or_else(|error| sess.dcx().emit_fatal(errors::CreateTempDir { error }));
let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
let crate_name = format!("{}", codegen_results.crate_info.local_crate_name);
let out_filename = output.file_for_writing(
outputs,

View file

@ -350,6 +350,9 @@ fn process_builtin_attrs(
codegen_fn_attrs.flags |= CodegenFnAttrFlags::EXTERNALLY_IMPLEMENTABLE_ITEM;
}
}
AttributeKind::ThreadLocal => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL
}
_ => {}
}
}
@ -366,7 +369,6 @@ fn process_builtin_attrs(
sym::rustc_allocator_zeroed => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
}
sym::thread_local => codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL,
sym::instruction_set => {
codegen_fn_attrs.instruction_set = parse_instruction_set_attr(tcx, attr)
}

View file

@ -9,12 +9,12 @@ use rustc_lint_defs::builtin::TAIL_CALL_TRACK_CALLER;
use rustc_middle::mir::{self, AssertKind, InlineAsmMacro, SwitchTargets, UnwindTerminateReason};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::ty::{self, Instance, Ty, TypeVisitableExt};
use rustc_middle::{bug, span_bug};
use rustc_session::config::OptLevel;
use rustc_span::Span;
use rustc_span::source_map::Spanned;
use rustc_target::callconv::{ArgAbi, CastTarget, FnAbi, PassMode};
use rustc_target::callconv::{ArgAbi, ArgAttributes, CastTarget, FnAbi, PassMode};
use tracing::{debug, info};
use super::operand::OperandRef;
@ -1036,6 +1036,59 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => bug!("{} is not callable", callee.layout.ty),
};
if let Some(instance) = instance
&& let Some(name) = bx.tcx().codegen_fn_attrs(instance.def_id()).symbol_name
&& name.as_str().starts_with("llvm.")
// This is the only LLVM intrinsic we use that unwinds
// FIXME either add unwind support to codegen_llvm_intrinsic_call or replace usage of
// this intrinsic with something else
&& name.as_str() != "llvm.wasm.throw"
{
assert!(!instance.args.has_infer());
assert!(!instance.args.has_escaping_bound_vars());
let result_layout =
self.cx.layout_of(self.monomorphized_place_ty(destination.as_ref()));
let return_dest = if result_layout.is_zst() {
ReturnDest::Nothing
} else if let Some(index) = destination.as_local() {
match self.locals[index] {
LocalRef::Place(dest) => ReturnDest::Store(dest),
LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
LocalRef::PendingOperand => {
// Handle temporary places, specifically `Operand` ones, as
// they don't have `alloca`s.
ReturnDest::DirectOperand(index)
}
LocalRef::Operand(_) => bug!("place local already assigned to"),
}
} else {
ReturnDest::Store(self.codegen_place(bx, destination.as_ref()))
};
let args =
args.into_iter().map(|arg| self.codegen_operand(bx, &arg.node)).collect::<Vec<_>>();
self.set_debug_loc(bx, source_info);
let llret =
bx.codegen_llvm_intrinsic_call(instance, &args, self.mir[helper.bb].is_cleanup);
if let Some(target) = target {
self.store_return(
bx,
return_dest,
&ArgAbi { layout: result_layout, mode: PassMode::Direct(ArgAttributes::new()) },
llret,
);
return helper.funclet_br(self, bx, target, mergeable_succ);
} else {
bx.unreachable();
return MergingSucc::False;
}
}
// FIXME(eddyb) avoid computing this if possible, when `instance` is
// available - right now `sig` is only needed for getting the `abi`
// and figuring out how many extra args were passed to a C-variadic `fn`.

View file

@ -77,22 +77,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.flatten()
.map(|val| {
// A SIMD type has a single field, which is an array.
let fields = val.unwrap_branch();
let fields = val.to_branch();
assert_eq!(fields.len(), 1);
let array = fields[0].unwrap_branch();
let array = fields[0].to_branch();
// Iterate over the array elements to obtain the values in the vector.
let values: Vec<_> = array
.iter()
.map(|field| {
if let Some(prim) = field.try_to_scalar() {
let layout = bx.layout_of(field_ty);
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
} else {
let Some(prim) = field.try_to_scalar() else {
bug!("field is not a scalar {:?}", field)
}
};
let layout = bx.layout_of(field_ty);
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
})
.collect();
bx.const_vector(&values)

View file

@ -102,7 +102,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
let discr = ord.to_branch()[0].to_leaf();
discr.to_atomic_ordering()
};

View file

@ -1056,6 +1056,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef { move_annotation, ..self.codegen_consume(bx, place.as_ref()) }
}
mir::Operand::RuntimeChecks(checks) => {
let layout = bx.layout_of(bx.tcx().types.bool);
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
let x = Scalar::from_bool(checks.value(bx.tcx().sess));
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
let val = OperandValue::Immediate(llval);
OperandRef { val, layout, move_annotation: None }
}
mir::Operand::Constant(ref constant) => {
let constant_ty = self.monomorphize(constant.ty());
// Most SIMD vector constants should be passed as immediates.

View file

@ -619,21 +619,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
mir::Rvalue::NullaryOp(ref null_op) => {
let val = match null_op {
mir::NullOp::RuntimeChecks(kind) => {
let val = kind.value(bx.tcx().sess);
bx.cx().const_bool(val)
}
};
let tcx = self.cx.tcx();
OperandRef {
val: OperandValue::Immediate(val),
layout: self.cx.layout_of(null_op.ty(tcx)),
move_annotation: None,
}
}
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(bx.cx().tcx().is_static(def_id));
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));

View file

@ -25,6 +25,13 @@ pub trait IntrinsicCallBuilderMethods<'tcx>: BackendTypes {
span: Span,
) -> Result<(), ty::Instance<'tcx>>;
fn codegen_llvm_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OperandRef<'tcx, Self::Value>],
is_cleanup: bool,
) -> Self::Value;
fn abort(&mut self);
fn assume(&mut self, val: Self::Value);
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;

View file

@ -251,7 +251,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
let mut transient = DenseBitSet::new_filled(ccx.body.local_decls.len());
// Make sure to only visit reachable blocks, the dataflow engine can ICE otherwise.
for (bb, data) in traversal::reachable(&ccx.body) {
if matches!(data.terminator().kind, TerminatorKind::Return) {
if data.terminator().kind == TerminatorKind::Return {
let location = ccx.body.terminator_loc(bb);
maybe_storage_live.seek_after_primary_effect(location);
// If a local may be live here, it is definitely not transient.
@ -645,7 +645,6 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
Rvalue::Cast(_, _, _) => {}
Rvalue::NullaryOp(NullOp::RuntimeChecks(_)) => {}
Rvalue::ShallowInitBox(_, _) => {}
Rvalue::UnaryOp(op, operand) => {

View file

@ -230,9 +230,7 @@ where
F: FnMut(Local) -> bool,
{
match rvalue {
Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
}
Rvalue::ThreadLocalRef(_) => Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)),
Rvalue::Discriminant(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
@ -314,7 +312,7 @@ where
// i.e., we treat all qualifs as non-structural for deref projections. Generally,
// we can say very little about `*ptr` even if we know that `ptr` satisfies all
// sorts of properties.
if matches!(elem, ProjectionElem::Deref) {
if elem == ProjectionElem::Deref {
// We have to assume that this qualifies.
return true;
}
@ -340,6 +338,7 @@ where
Operand::Copy(place) | Operand::Move(place) => {
return in_place::<Q, _>(cx, in_local, place.as_ref());
}
Operand::RuntimeChecks(_) => return Q::in_any_value_of_ty(cx, cx.tcx.types.bool),
Operand::Constant(c) => c,
};

View file

@ -198,7 +198,6 @@ where
| mir::Rvalue::ThreadLocalRef(..)
| mir::Rvalue::Repeat(..)
| mir::Rvalue::BinaryOp(..)
| mir::Rvalue::NullaryOp(..)
| mir::Rvalue::UnaryOp(..)
| mir::Rvalue::Discriminant(..)
| mir::Rvalue::Aggregate(..)

View file

@ -122,6 +122,13 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
unimplemented!()
}
#[inline(always)]
fn runtime_checks(_ecx: &InterpCx<'tcx, Self>, r: RuntimeChecks) -> InterpResult<'tcx, bool> {
// Runtime checks have different value depending on the crate they are codegenned in.
// Verify we aren't trying to evaluate them in mir-optimizations.
panic!("compiletime machine evaluated {r:?}")
}
fn binary_ptr_op(
ecx: &InterpCx<'tcx, Self>,
bin_op: BinOp,

View file

@ -637,6 +637,16 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
Err(ConstEvalErrKind::AssertFailure(err)).into()
}
#[inline(always)]
fn runtime_checks(
_ecx: &InterpCx<'tcx, Self>,
_r: mir::RuntimeChecks,
) -> InterpResult<'tcx, bool> {
// We can't look at `tcx.sess` here as that can differ across crates, which can lead to
// unsound differences in evaluating the same constant at different instantiation sites.
interp_ok(true)
}
fn binary_ptr_op(
_ecx: &InterpCx<'tcx, Self>,
_bin_op: mir::BinOp,

View file

@ -36,13 +36,17 @@ fn branches<'tcx>(
// For enums, we prepend their variant index before the variant's fields so we can figure out
// the variant again when just seeing a valtree.
if let Some(variant) = variant {
branches.push(ty::ValTree::from_scalar_int(*ecx.tcx, variant.as_u32().into()));
branches.push(ty::Const::new_value(
*ecx.tcx,
ty::ValTree::from_scalar_int(*ecx.tcx, variant.as_u32().into()),
ecx.tcx.types.u32,
));
}
for i in 0..field_count {
let field = ecx.project_field(&place, FieldIdx::from_usize(i)).unwrap();
let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
branches.push(valtree);
branches.push(ty::Const::new_value(*ecx.tcx, valtree, field.layout.ty));
}
// Have to account for ZSTs here
@ -65,7 +69,7 @@ fn slice_branches<'tcx>(
for i in 0..n {
let place_elem = ecx.project_index(place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
elems.push(valtree);
elems.push(ty::Const::new_value(*ecx.tcx, valtree, place_elem.layout.ty));
}
Ok(ty::ValTree::from_branches(*ecx.tcx, elems))
@ -200,8 +204,8 @@ fn reconstruct_place_meta<'tcx>(
&ObligationCause::dummy(),
|ty| ty,
|| {
let branches = last_valtree.unwrap_branch();
last_valtree = *branches.last().unwrap();
let branches = last_valtree.to_branch();
last_valtree = branches.last().unwrap().to_value().valtree;
debug!(?branches, ?last_valtree);
},
);
@ -212,7 +216,7 @@ fn reconstruct_place_meta<'tcx>(
};
// Get the number of elements in the unsized field.
let num_elems = last_valtree.unwrap_branch().len();
let num_elems = last_valtree.to_branch().len();
MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx))
}
@ -274,7 +278,7 @@ pub fn valtree_to_const_value<'tcx>(
mir::ConstValue::ZeroSized
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char | ty::RawPtr(_, _) => {
mir::ConstValue::Scalar(Scalar::Int(cv.valtree.unwrap_leaf()))
mir::ConstValue::Scalar(Scalar::Int(cv.to_leaf()))
}
ty::Pat(ty, _) => {
let cv = ty::Value { valtree: cv.valtree, ty };
@ -301,12 +305,13 @@ pub fn valtree_to_const_value<'tcx>(
|| matches!(cv.ty.kind(), ty::Adt(def, _) if def.is_struct()))
{
// A Scalar tuple/struct; we can avoid creating an allocation.
let branches = cv.valtree.unwrap_branch();
let branches = cv.to_branch();
// Find the non-ZST field. (There can be aligned ZST!)
for (i, &inner_valtree) in branches.iter().enumerate() {
let field = layout.field(&LayoutCx::new(tcx, typing_env), i);
if !field.is_zst() {
let cv = ty::Value { valtree: inner_valtree, ty: field.ty };
let cv =
ty::Value { valtree: inner_valtree.to_value().valtree, ty: field.ty };
return valtree_to_const_value(tcx, typing_env, cv);
}
}
@ -381,7 +386,7 @@ fn valtree_into_mplace<'tcx>(
// Zero-sized type, nothing to do.
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char | ty::RawPtr(..) => {
let scalar_int = valtree.unwrap_leaf();
let scalar_int = valtree.to_leaf();
debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
}
@ -391,13 +396,13 @@ fn valtree_into_mplace<'tcx>(
ecx.write_immediate(imm, place).unwrap();
}
ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
let branches = valtree.unwrap_branch();
let branches = valtree.to_branch();
// Need to downcast place for enums
let (place_adjusted, branches, variant_idx) = match ty.kind() {
ty::Adt(def, _) if def.is_enum() => {
// First element of valtree corresponds to variant
let scalar_int = branches[0].unwrap_leaf();
let scalar_int = branches[0].to_leaf();
let variant_idx = VariantIdx::from_u32(scalar_int.to_u32());
let variant = def.variant(variant_idx);
debug!(?variant);
@ -425,7 +430,7 @@ fn valtree_into_mplace<'tcx>(
};
debug!(?place_inner);
valtree_into_mplace(ecx, &place_inner, *inner_valtree);
valtree_into_mplace(ecx, &place_inner, inner_valtree.to_value().valtree);
dump_place(ecx, &place_inner);
}

Some files were not shown because too many files have changed in this diff Show more