diff --git a/Cargo.lock b/Cargo.lock
index d2dba92b7d8f..833356011964 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1670,9 +1670,12 @@ dependencies = [
[[package]]
name = "hashbrown"
-version = "0.16.0"
+version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
+checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
+dependencies = [
+ "foldhash 0.2.0",
+]
[[package]]
name = "heck"
@@ -1950,12 +1953,12 @@ checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5"
[[package]]
name = "indexmap"
-version = "2.12.0"
+version = "2.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
+checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
dependencies = [
"equivalent",
- "hashbrown 0.16.0",
+ "hashbrown 0.16.1",
"serde",
"serde_core",
]
@@ -3334,6 +3337,7 @@ dependencies = [
"rustdoc-json-types",
"serde_json",
"similar",
+ "tempfile",
"wasmparser 0.236.1",
]
@@ -3730,7 +3734,7 @@ dependencies = [
"either",
"elsa",
"ena",
- "hashbrown 0.15.5",
+ "hashbrown 0.16.1",
"indexmap",
"jobserver",
"libc",
@@ -4350,7 +4354,7 @@ name = "rustc_mir_transform"
version = "0.0.0"
dependencies = [
"either",
- "hashbrown 0.15.5",
+ "hashbrown 0.16.1",
"itertools",
"rustc_abi",
"rustc_arena",
@@ -4561,7 +4565,7 @@ dependencies = [
name = "rustc_query_system"
version = "0.0.0"
dependencies = [
- "hashbrown 0.15.5",
+ "hashbrown 0.16.1",
"parking_lot",
"rustc_abi",
"rustc_ast",
diff --git a/RELEASES.md b/RELEASES.md
index 0dffe931e6eb..10a400fda5c3 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -1,3 +1,114 @@
+Version 1.93.0 (2026-01-22)
+==========================
+
+
+
+Language
+--------
+- [Stabilize several s390x `vector`-related target features and the `is_s390x_feature_detected!` macro](https://github.com/rust-lang/rust/pull/145656)
+- [Stabilize declaration of C-style variadic functions for the `system` ABI](https://github.com/rust-lang/rust/pull/145954)
+- [Emit error when using some keyword as a `cfg` predicate](https://github.com/rust-lang/rust/pull/146978)
+- [Stabilize `asm_cfg`](https://github.com/rust-lang/rust/pull/147736)
+- [During const-evaluation, support copying pointers byte-by-byte](https://github.com/rust-lang/rust/pull/148259)
+- [LUB coercions now correctly handle function item types, and functions with differing safeties](https://github.com/rust-lang/rust/pull/148602)
+- [Allow `const` items that contain mutable references to `static` (which is *very* unsafe, but not *always* UB)](https://github.com/rust-lang/rust/pull/148746)
+- [Add warn-by-default `const_item_interior_mutations` lint to warn against calls which mutate interior mutable `const` items](https://github.com/rust-lang/rust/pull/148407)
+- [Add warn-by-default `function_casts_as_integer` lint](https://github.com/rust-lang/rust/pull/141470)
+
+
+
+
+Compiler
+--------
+- [Stabilize `-Cjump-tables=bool`](https://github.com/rust-lang/rust/pull/145974). The flag was previously called `-Zno-jump-tables`.
+
+
+
+Platform Support
+----------------
+
+- [Promote `riscv64a23-unknown-linux-gnu` to Tier 2 (without host tools)](https://github.com/rust-lang/rust/pull/148435)
+
+Refer to Rust's [platform support page][platform-support-doc]
+for more information on Rust's tiered platform support.
+
+[platform-support-doc]: https://doc.rust-lang.org/rustc/platform-support.html
+
+
+
+Libraries
+---------
+- [Stop internally using `specialization` on the `Copy` trait as it is unsound in the presence of lifetime dependent `Copy` implementations. This may result in some performance regressions as some standard library APIs may now call `Clone::clone` instead of performing bitwise copies](https://github.com/rust-lang/rust/pull/135634)
+- [Allow the global allocator to use thread-local storage and `std::thread::current()`](https://github.com/rust-lang/rust/pull/144465)
+- [Make `BTree::append` not update existing keys when appending an entry which already exists](https://github.com/rust-lang/rust/pull/145628)
+- [Don't require `T: RefUnwindSafe` for `vec::IntoIter: UnwindSafe`](https://github.com/rust-lang/rust/pull/145665)
+
+
+
+
+Stabilized APIs
+---------------
+
+- [`<[MaybeUninit]>::assume_init_drop`](https://doc.rust-lang.org/stable/core/primitive.slice.html#method.assume_init_drop)
+- [`<[MaybeUninit]>::assume_init_ref`](https://doc.rust-lang.org/stable/core/primitive.slice.html#method.assume_init_ref)
+- [`<[MaybeUninit]>::assume_init_mut`](https://doc.rust-lang.org/stable/core/primitive.slice.html#method.assume_init_mut)
+- [`<[MaybeUninit]>::write_copy_of_slice`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.write_copy_of_slice)
+- [`<[MaybeUninit]>::write_clone_of_slice`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.write_clone_of_slice)
+- [`String::into_raw_parts`](https://doc.rust-lang.org/stable/std/string/struct.String.html#method.into_raw_parts)
+- [`Vec::into_raw_parts`](https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#method.into_raw_parts)
+- [`::unchecked_neg`](https://doc.rust-lang.org/stable/std/primitive.isize.html#method.unchecked_neg)
+- [`::unchecked_shl`](https://doc.rust-lang.org/stable/std/primitive.isize.html#method.unchecked_shl)
+- [`::unchecked_shr`](https://doc.rust-lang.org/stable/std/primitive.isize.html#method.unchecked_shr)
+- [`::unchecked_shl`](https://doc.rust-lang.org/stable/std/primitive.usize.html#method.unchecked_shl)
+- [`::unchecked_shr`](https://doc.rust-lang.org/stable/std/primitive.usize.html#method.unchecked_shr)
+- [`<[T]>::as_array`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.as_array)
+- [`<[T]>::as_array_mut`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.as_mut_array)
+- [`<*const [T]>::as_array`](https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.as_array)
+- [`<*mut [T]>::as_array_mut`](https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.as_mut_array)
+- [`VecDeque::pop_front_if`](https://doc.rust-lang.org/stable/std/collections/struct.VecDeque.html#method.pop_front_if)
+- [`VecDeque::pop_back_if`](https://doc.rust-lang.org/stable/std/collections/struct.VecDeque.html#method.pop_back_if)
+- [`Duration::from_nanos_u128`](https://doc.rust-lang.org/stable/std/time/struct.Duration.html#method.from_nanos_u128)
+- [`char::MAX_LEN_UTF8`](https://doc.rust-lang.org/stable/std/primitive.char.html#associatedconstant.MAX_LEN_UTF8)
+- [`char::MAX_LEN_UTF16`](https://doc.rust-lang.org/stable/std/primitive.char.html#associatedconstant.MAX_LEN_UTF16)
+- [`std::fmt::from_fn`](https://doc.rust-lang.org/stable/std/fmt/fn.from_fn.html)
+- [`std::fmt::FromFn`](https://doc.rust-lang.org/stable/std/fmt/struct.FromFn.html)
+
+
+
+
+Cargo
+-----
+- [Enable CARGO_CFG_DEBUG_ASSERTIONS in build scripts based on profile](https://github.com/rust-lang/cargo/pull/16160/)
+- [In `cargo tree`, support long forms for `--format` variables](https://github.com/rust-lang/cargo/pull/16204/)
+- [Add `--workspace` to `cargo clean`](https://github.com/rust-lang/cargo/pull/16263/)
+
+
+
+Rustdoc
+-----
+- [Remove `#![doc(document_private_items)]`](https://github.com/rust-lang/rust/pull/146495)
+- [Include attribute and derive macros in search filters for "macros"](https://github.com/rust-lang/rust/pull/148176)
+- [Include extern crates in search filters for `import`](https://github.com/rust-lang/rust/pull/148301)
+- [Validate usage of crate-level doc attributes](https://github.com/rust-lang/rust/pull/149197). This means if any of `html_favicon_url`, `html_logo_url`, `html_playground_url`, `issue_tracker_base_url`, or `html_no_source` either has a missing value, an unexpected value, or a value of the wrong type, rustdoc will emit the deny-by-default lint `rustdoc::invalid_doc_attributes`.
+
+
+
+
+Compatibility Notes
+-------------------
+- [Introduce `pin_v2` into the builtin attributes namespace](https://github.com/rust-lang/rust/pull/139751)
+- [Update bundled musl to 1.2.5](https://github.com/rust-lang/rust/pull/142682)
+- [On Emscripten, the unwinding ABI used when compiling with `panic=unwind` was changed from the JS exception handling ABI to the wasm exception handling ABI.](https://github.com/rust-lang/rust/pull/147224) If linking C/C++ object files with Rust objects, `-fwasm-exceptions` must be passed to the linker now. On nightly Rust, it is possible to get the old behavior with `-Zwasm-emscripten-eh=false -Zbuild-std`, but it will be removed in a future release.
+- The `#[test]` attribute, used to define tests, was previously ignored in various places where it had no meaning (e.g on trait methods or types). Putting the `#[test]` attribute in these places is no longer ignored, and will now result in an error; this may also result in errors when generating rustdoc. [Error when `test` attribute is applied to structs](https://github.com/rust-lang/rust/pull/147841)
+- Cargo now sets the `CARGO_CFG_DEBUG_ASSERTIONS` environment variable in more situations. This will cause crates depending on `static-init` versions 1.0.1 to 1.0.3 to fail compilation with "failed to resolve: use of unresolved module or unlinked crate `parking_lot`". See [the linked issue](https://github.com/rust-lang/rust/issues/150646#issuecomment-3718964342) for details.
+- [User written types in the `offset_of!` macro are now checked to be well formed.](https://github.com/rust-lang/rust/issues/150465/)
+- `cargo publish` no longer emits `.crate` files as a final artifact for user access when the `build.build-dir` config is unset
+- [Upgrade the `deref_nullptr` lint from warn-by-default to deny-by-default](https://github.com/rust-lang/rust/pull/148122)
+- [Add future-incompatibility warning for `...` function parameters without a pattern outside of `extern` blocks](https://github.com/rust-lang/rust/pull/143619)
+- [Introduce future-compatibility warning for `repr(C)` enums whose discriminant values do not fit into a `c_int` or `c_uint`](https://github.com/rust-lang/rust/pull/147017)
+- [Introduce future-compatibility warning against ignoring `repr(C)` types as part of `repr(transparent)`](https://github.com/rust-lang/rust/pull/147185)
+
+
Version 1.92.0 (2025-12-11)
==========================
diff --git a/compiler/rustc_ast_lowering/src/delegation.rs b/compiler/rustc_ast_lowering/src/delegation.rs
index 5d2531e50393..cccfb112ec2b 100644
--- a/compiler/rustc_ast_lowering/src/delegation.rs
+++ b/compiler/rustc_ast_lowering/src/delegation.rs
@@ -152,10 +152,21 @@ impl<'hir> LoweringContext<'_, 'hir> {
) -> DelegationResults<'hir> {
let span = self.lower_span(delegation.path.segments.last().unwrap().ident.span);
- let ids = self.get_delegation_ids(
- self.resolver.delegation_infos[&self.local_def_id(item_id)].resolution_node,
- span,
- );
+ // Delegation can be unresolved in illegal places such as function bodies in extern blocks (see #151356)
+ let ids = if let Some(delegation_info) =
+ self.resolver.delegation_infos.get(&self.local_def_id(item_id))
+ {
+ self.get_delegation_ids(delegation_info.resolution_node, span)
+ } else {
+ return self.generate_delegation_error(
+ self.dcx().span_delayed_bug(
+ span,
+ format!("LoweringContext: the delegation {:?} is unresolved", item_id),
+ ),
+ span,
+ delegation,
+ );
+ };
match ids {
Ok(ids) => {
diff --git a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs
index 5bbaeda18df3..063fa12d3896 100644
--- a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs
@@ -717,3 +717,100 @@ impl NoArgsAttributeParser for EiiForeignItemParser {
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::ForeignFn)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::EiiForeignItem;
}
+
+pub(crate) struct PatchableFunctionEntryParser;
+
+impl SingleAttributeParser for PatchableFunctionEntryParser {
+ const PATH: &[Symbol] = &[sym::patchable_function_entry];
+ const ON_DUPLICATE: OnDuplicate = OnDuplicate::Error;
+ const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
+ const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Fn)]);
+ const TEMPLATE: AttributeTemplate = template!(List: &["prefix_nops = m, entry_nops = n"]);
+
+ fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option {
+ let Some(meta_item_list) = args.list() else {
+ cx.expected_list(cx.attr_span, args);
+ return None;
+ };
+
+ let mut prefix = None;
+ let mut entry = None;
+
+ if meta_item_list.len() == 0 {
+ cx.expected_list(meta_item_list.span, args);
+ return None;
+ }
+
+ let mut errored = false;
+
+ for item in meta_item_list.mixed() {
+ let Some(meta_item) = item.meta_item() else {
+ errored = true;
+ cx.expected_name_value(item.span(), None);
+ continue;
+ };
+
+ let Some(name_value_lit) = meta_item.args().name_value() else {
+ errored = true;
+ cx.expected_name_value(item.span(), None);
+ continue;
+ };
+
+ let attrib_to_write = match meta_item.ident().map(|ident| ident.name) {
+ Some(sym::prefix_nops) => {
+ // Duplicate prefixes are not allowed
+ if prefix.is_some() {
+ errored = true;
+ cx.duplicate_key(meta_item.path().span(), sym::prefix_nops);
+ continue;
+ }
+ &mut prefix
+ }
+ Some(sym::entry_nops) => {
+ // Duplicate entries are not allowed
+ if entry.is_some() {
+ errored = true;
+ cx.duplicate_key(meta_item.path().span(), sym::entry_nops);
+ continue;
+ }
+ &mut entry
+ }
+ _ => {
+ errored = true;
+ cx.expected_specific_argument(
+ meta_item.path().span(),
+ &[sym::prefix_nops, sym::entry_nops],
+ );
+ continue;
+ }
+ };
+
+ let rustc_ast::LitKind::Int(val, _) = name_value_lit.value_as_lit().kind else {
+ errored = true;
+ cx.expected_integer_literal(name_value_lit.value_span);
+ continue;
+ };
+
+ let Ok(val) = val.get().try_into() else {
+ errored = true;
+ cx.expected_integer_literal_in_range(
+ name_value_lit.value_span,
+ u8::MIN as isize,
+ u8::MAX as isize,
+ );
+ continue;
+ };
+
+ *attrib_to_write = Some(val);
+ }
+
+ if errored {
+ None
+ } else {
+ Some(AttributeKind::PatchableFunctionEntry {
+ prefix: prefix.unwrap_or(0),
+ entry: entry.unwrap_or(0),
+ })
+ }
+ }
+}
diff --git a/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs
index 7f25641b948e..ec7cdd3624dc 100644
--- a/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs
@@ -91,3 +91,25 @@ impl SingleAttributeParser for ShouldPanicParser {
})
}
}
+
+pub(crate) struct RustcVarianceParser;
+
+impl NoArgsAttributeParser for RustcVarianceParser {
+ const PATH: &[Symbol] = &[sym::rustc_variance];
+ const ON_DUPLICATE: OnDuplicate = OnDuplicate::Warn;
+ const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
+ Allow(Target::Struct),
+ Allow(Target::Enum),
+ Allow(Target::Union),
+ ]);
+ const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcVariance;
+}
+
+pub(crate) struct RustcVarianceOfOpaquesParser;
+
+impl NoArgsAttributeParser for RustcVarianceOfOpaquesParser {
+ const PATH: &[Symbol] = &[sym::rustc_variance_of_opaques];
+ const ON_DUPLICATE: OnDuplicate = OnDuplicate::Warn;
+ const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]);
+ const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcVarianceOfOpaques;
+}
diff --git a/compiler/rustc_attr_parsing/src/attributes/traits.rs b/compiler/rustc_attr_parsing/src/attributes/traits.rs
index ee5895a6efd0..c0db5b4d442a 100644
--- a/compiler/rustc_attr_parsing/src/attributes/traits.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/traits.rs
@@ -94,12 +94,12 @@ impl NoArgsAttributeParser for DenyExplicitImplParser {
const CREATE: fn(Span) -> AttributeKind = AttributeKind::DenyExplicitImpl;
}
-pub(crate) struct DoNotImplementViaObjectParser;
-impl NoArgsAttributeParser for DoNotImplementViaObjectParser {
- const PATH: &[Symbol] = &[sym::rustc_do_not_implement_via_object];
+pub(crate) struct DynIncompatibleTraitParser;
+impl NoArgsAttributeParser for DynIncompatibleTraitParser {
+ const PATH: &[Symbol] = &[sym::rustc_dyn_incompatible_trait];
const ON_DUPLICATE: OnDuplicate = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Trait)]);
- const CREATE: fn(Span) -> AttributeKind = AttributeKind::DoNotImplementViaObject;
+ const CREATE: fn(Span) -> AttributeKind = AttributeKind::DynIncompatibleTrait;
}
// Specialization
diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs
index 6aae2b90a504..614619bca3ec 100644
--- a/compiler/rustc_attr_parsing/src/context.rs
+++ b/compiler/rustc_attr_parsing/src/context.rs
@@ -23,8 +23,8 @@ use crate::attributes::cfi_encoding::CfiEncodingParser;
use crate::attributes::codegen_attrs::{
ColdParser, CoverageParser, EiiForeignItemParser, ExportNameParser, ForceTargetFeatureParser,
NakedParser, NoMangleParser, ObjcClassParser, ObjcSelectorParser, OptimizeParser,
- RustcPassIndirectlyInNonRusticAbisParser, SanitizeParser, TargetFeatureParser,
- ThreadLocalParser, TrackCallerParser, UsedParser,
+ PatchableFunctionEntryParser, RustcPassIndirectlyInNonRusticAbisParser, SanitizeParser,
+ TargetFeatureParser, ThreadLocalParser, TrackCallerParser, UsedParser,
};
use crate::attributes::confusables::ConfusablesParser;
use crate::attributes::crate_level::{
@@ -85,11 +85,13 @@ use crate::attributes::semantics::MayDangleParser;
use crate::attributes::stability::{
BodyStabilityParser, ConstStabilityIndirectParser, ConstStabilityParser, StabilityParser,
};
-use crate::attributes::test_attrs::{IgnoreParser, ShouldPanicParser};
+use crate::attributes::test_attrs::{
+ IgnoreParser, RustcVarianceOfOpaquesParser, RustcVarianceParser, ShouldPanicParser,
+};
use crate::attributes::traits::{
AllowIncoherentImplParser, CoinductiveParser, DenyExplicitImplParser,
- DoNotImplementViaObjectParser, FundamentalParser, MarkerParser, ParenSugarParser,
- PointeeParser, SkipDuringMethodDispatchParser, SpecializationTraitParser, TypeConstParser,
+ DynIncompatibleTraitParser, FundamentalParser, MarkerParser, ParenSugarParser, PointeeParser,
+ SkipDuringMethodDispatchParser, SpecializationTraitParser, TypeConstParser,
UnsafeSpecializationMarkerParser,
};
use crate::attributes::transparency::TransparencyParser;
@@ -103,18 +105,18 @@ type GroupType = LazyLock>;
pub(super) struct GroupTypeInner {
pub(super) accepters: BTreeMap<&'static [Symbol], Vec>>,
- pub(super) finalizers: Vec>,
}
pub(super) struct GroupTypeInnerAccept {
pub(super) template: AttributeTemplate,
pub(super) accept_fn: AcceptFn,
pub(super) allowed_targets: AllowedTargets,
+ pub(super) finalizer: FinalizeFn,
}
-type AcceptFn =
+pub(crate) type AcceptFn =
Box Fn(&mut AcceptContext<'_, 'sess, S>, &ArgParser) + Send + Sync>;
-type FinalizeFn =
+pub(crate) type FinalizeFn =
Box) -> Option>;
macro_rules! attribute_parsers {
@@ -142,8 +144,7 @@ macro_rules! attribute_parsers {
@[$stage: ty] pub(crate) static $name: ident = [$($names: ty),* $(,)?];
) => {
pub(crate) static $name: GroupType<$stage> = LazyLock::new(|| {
- let mut accepts = BTreeMap::<_, Vec>>::new();
- let mut finalizes = Vec::>::new();
+ let mut accepters = BTreeMap::<_, Vec>>::new();
$(
{
thread_local! {
@@ -151,7 +152,7 @@ macro_rules! attribute_parsers {
};
for (path, template, accept_fn) in <$names>::ATTRIBUTES {
- accepts.entry(*path).or_default().push(GroupTypeInnerAccept {
+ accepters.entry(*path).or_default().push(GroupTypeInnerAccept {
template: *template,
accept_fn: Box::new(|cx, args| {
STATE_OBJECT.with_borrow_mut(|s| {
@@ -159,17 +160,16 @@ macro_rules! attribute_parsers {
})
}),
allowed_targets: <$names as crate::attributes::AttributeParser<$stage>>::ALLOWED_TARGETS,
+ finalizer: Box::new(|cx| {
+ let state = STATE_OBJECT.take();
+ state.finalize(cx)
+ }),
});
}
-
- finalizes.push(Box::new(|cx| {
- let state = STATE_OBJECT.take();
- state.finalize(cx)
- }));
}
)*
- GroupTypeInner { accepters:accepts, finalizers:finalizes }
+ GroupTypeInner { accepters }
});
};
}
@@ -223,6 +223,7 @@ attribute_parsers!(
Single,
Single,
Single,
+ Single,
Single,
Single,
Single,
@@ -254,7 +255,7 @@ attribute_parsers!(
Single>,
Single>,
Single>,
- Single>,
+ Single>,
Single>,
Single>,
Single>,
@@ -300,6 +301,8 @@ attribute_parsers!(
Single>,
Single>,
Single>,
+ Single>,
+ Single>,
Single>,
Single>,
Single>,
@@ -503,6 +506,18 @@ impl<'f, 'sess: 'f, S: Stage> AcceptContext<'f, 'sess, S> {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedIntegerLiteral)
}
+ pub(crate) fn expected_integer_literal_in_range(
+ &self,
+ span: Span,
+ lower_bound: isize,
+ upper_bound: isize,
+ ) -> ErrorGuaranteed {
+ self.emit_parse_error(
+ span,
+ AttributeParseErrorReason::ExpectedIntegerLiteralInRange { lower_bound, upper_bound },
+ )
+ }
+
pub(crate) fn expected_list(&self, span: Span, args: &ArgParser) -> ErrorGuaranteed {
let span = match args {
ArgParser::NoArgs => span,
diff --git a/compiler/rustc_attr_parsing/src/interface.rs b/compiler/rustc_attr_parsing/src/interface.rs
index c6be18321b5e..bac4936c20d2 100644
--- a/compiler/rustc_attr_parsing/src/interface.rs
+++ b/compiler/rustc_attr_parsing/src/interface.rs
@@ -12,7 +12,7 @@ use rustc_session::Session;
use rustc_session::lint::{BuiltinLintDiag, LintId};
use rustc_span::{DUMMY_SP, Span, Symbol, sym};
-use crate::context::{AcceptContext, FinalizeContext, SharedContext, Stage};
+use crate::context::{AcceptContext, FinalizeContext, FinalizeFn, SharedContext, Stage};
use crate::early_parsed::{EARLY_PARSED_ATTRIBUTES, EarlyParsedState};
use crate::parser::{ArgParser, PathParser, RefPathParser};
use crate::session_diagnostics::ParsedDescription;
@@ -270,6 +270,8 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> {
let mut attr_paths: Vec> = Vec::new();
let mut early_parsed_state = EarlyParsedState::default();
+ let mut finalizers: Vec<&FinalizeFn> = Vec::with_capacity(attrs.len());
+
for attr in attrs {
// If we're only looking for a single attribute, skip all the ones we don't care about.
if let Some(expected) = self.parse_only {
@@ -383,6 +385,8 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> {
};
(accept.accept_fn)(&mut cx, &args);
+ finalizers.push(&accept.finalizer);
+
if !matches!(cx.stage.should_emit(), ShouldEmit::Nothing) {
Self::check_target(&accept.allowed_targets, target, &mut cx);
}
@@ -417,7 +421,7 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> {
}
early_parsed_state.finalize_early_parsed_attributes(&mut attributes);
- for f in &S::parsers().finalizers {
+ for f in &finalizers {
if let Some(attr) = f(&mut FinalizeContext {
shared: SharedContext { cx: self, target_span, target, emit_lint: &mut emit_lint },
all_attrs: &attr_paths,
diff --git a/compiler/rustc_attr_parsing/src/session_diagnostics.rs b/compiler/rustc_attr_parsing/src/session_diagnostics.rs
index 85e7891b1e64..f9748542beb9 100644
--- a/compiler/rustc_attr_parsing/src/session_diagnostics.rs
+++ b/compiler/rustc_attr_parsing/src/session_diagnostics.rs
@@ -525,6 +525,10 @@ pub(crate) enum AttributeParseErrorReason<'a> {
byte_string: Option,
},
ExpectedIntegerLiteral,
+ ExpectedIntegerLiteralInRange {
+ lower_bound: isize,
+ upper_bound: isize,
+ },
ExpectedAtLeastOneArgument,
ExpectedSingleArgument,
ExpectedList,
@@ -596,6 +600,17 @@ impl<'a, G: EmissionGuarantee> Diagnostic<'a, G> for AttributeParseError<'_> {
AttributeParseErrorReason::ExpectedIntegerLiteral => {
diag.span_label(self.span, "expected an integer literal here");
}
+ AttributeParseErrorReason::ExpectedIntegerLiteralInRange {
+ lower_bound,
+ upper_bound,
+ } => {
+ diag.span_label(
+ self.span,
+ format!(
+ "expected an integer literal in the range of {lower_bound}..={upper_bound}"
+ ),
+ );
+ }
AttributeParseErrorReason::ExpectedSingleArgument => {
diag.span_label(self.span, "expected a single argument here");
diag.code(E0805);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index a78c6e0a4e7a..ab9a11305baa 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -1506,7 +1506,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
}
// FIXME implement variadics in cranelift
- sym::va_copy | sym::va_arg | sym::va_end => {
+ sym::va_arg | sym::va_end => {
fx.tcx.dcx().span_fatal(
source_info.span,
"Defining variadic functions is not yet supported by Cranelift",
diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs
index 840f51c0685d..c93a2e8f8da5 100644
--- a/compiler/rustc_codegen_gcc/src/back/lto.rs
+++ b/compiler/rustc_codegen_gcc/src/back/lto.rs
@@ -26,11 +26,11 @@ use std::sync::atomic::Ordering;
use gccjit::{Context, OutputKind};
use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::memmap::Mmap;
-use rustc_errors::DiagCtxtHandle;
+use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_log::tracing::info;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
@@ -112,10 +112,11 @@ fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec>,
) -> ModuleCodegen {
- let dcx = cgcx.create_dcx();
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
/*let symbols_below_threshold =
@@ -283,14 +284,13 @@ impl ModuleBufferMethods for ModuleBuffer {
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext,
+ dcx: DiagCtxtHandle<'_>,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule, WorkProduct)>,
) -> (Vec>, Vec) {
- let dcx = cgcx.create_dcx();
- let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
- if cgcx.opts.cg.linker_plugin_lto.enabled() {
+ if cgcx.use_linker_plugin_lto {
unreachable!(
"We should never reach this case if the LTO step \
is deferred to the linker"
@@ -522,8 +522,6 @@ pub fn optimize_thin_module(
thin_module: ThinModule,
_cgcx: &CodegenContext,
) -> ModuleCodegen {
- //let dcx = cgcx.create_dcx();
-
//let module_name = &thin_module.shared.module_names[thin_module.idx];
/*let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;*/
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
index eae0f2aa00f6..b6223c5be370 100644
--- a/compiler/rustc_codegen_gcc/src/back/write.rs
+++ b/compiler/rustc_codegen_gcc/src/back/write.rs
@@ -2,8 +2,11 @@ use std::{env, fs};
use gccjit::{Context, OutputKind};
use rustc_codegen_ssa::back::link::ensure_removed;
-use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
+use rustc_codegen_ssa::back::write::{
+ BitcodeSection, CodegenContext, EmitObj, ModuleConfig, SharedEmitter,
+};
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_errors::DiagCtxt;
use rustc_fs_util::link_or_copy;
use rustc_log::tracing::debug;
use rustc_session::config::OutputType;
@@ -15,10 +18,11 @@ use crate::{GccCodegenBackend, GccContext, LtoMode};
pub(crate) fn codegen(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
module: ModuleCodegen,
config: &ModuleConfig,
) -> CompiledModule {
- let dcx = cgcx.create_dcx();
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index 36ea76cbc51a..553e4d3d2fe0 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -391,9 +391,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
sym::breakpoint => {
unimplemented!();
}
- sym::va_copy => {
- unimplemented!();
- }
sym::va_arg => {
unimplemented!();
}
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index cf1be1806235..00bea0222622 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -84,7 +84,7 @@ use gccjit::{TargetInfo, Version};
use rustc_ast::expand::allocator::AllocatorMethod;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
- CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
+ CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::base::codegen_crate;
use rustc_codegen_ssa::target_features::cfg_target_feature;
@@ -435,23 +435,25 @@ impl WriteBackendMethods for GccCodegenBackend {
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec>,
) -> ModuleCodegen {
- back::lto::run_fat(cgcx, each_linked_rlib_for_lto, modules)
+ back::lto::run_fat(cgcx, shared_emitter, each_linked_rlib_for_lto, modules)
}
fn run_thin_lto(
cgcx: &CodegenContext,
+ dcx: DiagCtxtHandle<'_>,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule, WorkProduct)>,
) -> (Vec>, Vec) {
- back::lto::run_thin(cgcx, each_linked_rlib_for_lto, modules, cached_modules)
+ back::lto::run_thin(cgcx, dcx, each_linked_rlib_for_lto, modules, cached_modules)
}
fn print_pass_timings(&self) {
@@ -464,7 +466,7 @@ impl WriteBackendMethods for GccCodegenBackend {
fn optimize(
_cgcx: &CodegenContext,
- _dcx: DiagCtxtHandle<'_>,
+ _shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen,
config: &ModuleConfig,
) {
@@ -473,6 +475,7 @@ impl WriteBackendMethods for GccCodegenBackend {
fn optimize_thin(
cgcx: &CodegenContext,
+ _shared_emitter: &SharedEmitter,
thin: ThinModule,
) -> ModuleCodegen {
back::lto::optimize_thin_module(thin, cgcx)
@@ -480,10 +483,11 @@ impl WriteBackendMethods for GccCodegenBackend {
fn codegen(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
module: ModuleCodegen,
config: &ModuleConfig,
) -> CompiledModule {
- back::write::codegen(cgcx, module, config)
+ back::write::codegen(cgcx, shared_emitter, module, config)
}
fn prepare_thin(module: ModuleCodegen) -> (String, Self::ThinBuffer) {
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 52c2ca7b1696..71327ed6d2d1 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -9,12 +9,12 @@ use std::{io, iter, slice};
use object::read::archive::ArchiveFile;
use object::{Object, ObjectSection};
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
-use rustc_errors::DiagCtxtHandle;
+use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_hir::attrs::SanitizerSet;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
@@ -150,17 +150,18 @@ fn get_bitcode_slice_from_object_data<'a>(
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec>,
) -> ModuleCodegen {
- let dcx = cgcx.create_dcx();
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>();
- fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold)
+ fat_lto(cgcx, dcx, shared_emitter, modules, upstream_modules, &symbols_below_threshold)
}
/// Performs thin LTO by performing necessary global analysis and returning two
@@ -168,18 +169,17 @@ pub(crate) fn run_fat(
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext,
+ dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule, WorkProduct)>,
) -> (Vec>, Vec) {
- let dcx = cgcx.create_dcx();
- let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>();
- if cgcx.opts.cg.linker_plugin_lto.enabled() {
+ if cgcx.use_linker_plugin_lto {
unreachable!(
"We should never reach this case if the LTO step \
is deferred to the linker"
@@ -197,6 +197,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBu
fn fat_lto(
cgcx: &CodegenContext,
dcx: DiagCtxtHandle<'_>,
+ shared_emitter: &SharedEmitter,
modules: Vec>,
mut serialized_modules: Vec<(SerializedModule, CString)>,
symbols_below_threshold: &[*const libc::c_char],
@@ -265,8 +266,13 @@ fn fat_lto(
// The linking steps below may produce errors and diagnostics within LLVM
// which we'd like to handle and print, so set up our diagnostic handlers
// (which get unregistered when they go out of scope below).
- let _handler =
- DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::LTO);
+ let _handler = DiagnosticHandlers::new(
+ cgcx,
+ shared_emitter,
+ llcx,
+ &module,
+ CodegenDiagnosticsStage::LTO,
+ );
// For all other modules we codegened we'll need to link them into our own
// bitcode. All modules were codegened in their own LLVM context, however,
@@ -720,10 +726,11 @@ impl Drop for ThinBuffer {
}
pub(crate) fn optimize_thin_module(
- thin_module: ThinModule,
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
+ thin_module: ThinModule,
) -> ModuleCodegen {
- let dcx = cgcx.create_dcx();
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let module_name = &thin_module.shared.module_names[thin_module.idx];
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index bcadb6f0de92..e66a4ab6b37b 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -9,15 +9,16 @@ use libc::{c_char, c_int, c_void, size_t};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::versioned_llvm_target;
use rustc_codegen_ssa::back::write::{
- BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig,
+ BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig, SharedEmitter,
TargetMachineFactoryConfig, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::base::wants_wasm_eh;
+use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::small_c_str::SmallCStr;
-use rustc_errors::{DiagCtxtHandle, Level};
+use rustc_errors::{DiagCtxt, DiagCtxtHandle, Level};
use rustc_fs_util::{link_or_copy, path_to_c_string};
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
@@ -33,6 +34,8 @@ use crate::back::owned_target_machine::OwnedTargetMachine;
use crate::back::profiling::{
LlvmSelfProfiler, selfprofile_after_pass_callback, selfprofile_before_pass_callback,
};
+use crate::builder::SBuilder;
+use crate::builder::gpu_offload::scalar_width;
use crate::common::AsCCharPtr;
use crate::errors::{
CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
@@ -353,7 +356,7 @@ pub(crate) enum CodegenDiagnosticsStage {
}
pub(crate) struct DiagnosticHandlers<'a> {
- data: *mut (&'a CodegenContext, DiagCtxtHandle<'a>),
+ data: *mut (&'a CodegenContext, &'a SharedEmitter),
llcx: &'a llvm::Context,
old_handler: Option<&'a llvm::DiagnosticHandler>,
}
@@ -361,7 +364,7 @@ pub(crate) struct DiagnosticHandlers<'a> {
impl<'a> DiagnosticHandlers<'a> {
pub(crate) fn new(
cgcx: &'a CodegenContext,
- dcx: DiagCtxtHandle<'a>,
+ shared_emitter: &'a SharedEmitter,
llcx: &'a llvm::Context,
module: &ModuleCodegen,
stage: CodegenDiagnosticsStage,
@@ -395,8 +398,8 @@ impl<'a> DiagnosticHandlers<'a> {
})
.and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
- let pgo_available = cgcx.opts.cg.profile_use.is_some();
- let data = Box::into_raw(Box::new((cgcx, dcx)));
+ let pgo_available = cgcx.module_config.pgo_use.is_some();
+ let data = Box::into_raw(Box::new((cgcx, shared_emitter)));
unsafe {
let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
llvm::LLVMRustContextConfigureDiagnosticHandler(
@@ -458,12 +461,16 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
if user.is_null() {
return;
}
- let (cgcx, dcx) =
- unsafe { *(user as *const (&CodegenContext, DiagCtxtHandle<'_>)) };
+ let (cgcx, shared_emitter) =
+ unsafe { *(user as *const (&CodegenContext, &SharedEmitter)) };
+
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
+ let dcx = dcx.handle();
match unsafe { llvm::diagnostic::Diagnostic::unpack(info) } {
llvm::diagnostic::InlineAsm(inline) => {
- cgcx.diag_emitter.inline_asm_error(report_inline_asm(
+ // FIXME use dcx
+ shared_emitter.inline_asm_error(report_inline_asm(
cgcx,
inline.message,
inline.level,
@@ -669,7 +676,17 @@ pub(crate) unsafe fn llvm_optimize(
// Create the new parameter list, with ptr as the first argument
let mut new_param_types = Vec::with_capacity(old_param_count as usize + 1);
new_param_types.push(cx.type_ptr());
- new_param_types.extend(old_param_types);
+
+ // This relies on undocumented LLVM knowledge that scalars must be passed as i64
+ for &old_ty in &old_param_types {
+ let new_ty = match cx.type_kind(old_ty) {
+ TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
+ cx.type_i64()
+ }
+ _ => old_ty,
+ };
+ new_param_types.push(new_ty);
+ }
// Create the new function type
let ret_ty = unsafe { llvm::LLVMGetReturnType(old_fn_ty) };
@@ -682,10 +699,33 @@ pub(crate) unsafe fn llvm_optimize(
let a0 = llvm::get_param(new_fn, 0);
llvm::set_value_name(a0, CString::new("dyn_ptr").unwrap().as_bytes());
+ let bb = SBuilder::append_block(cx, new_fn, "entry");
+ let mut builder = SBuilder::build(cx, bb);
+
+ let mut old_args_rebuilt = Vec::with_capacity(old_param_types.len());
+
+ for (i, &old_ty) in old_param_types.iter().enumerate() {
+ let new_arg = llvm::get_param(new_fn, (i + 1) as u32);
+
+ let rebuilt = match cx.type_kind(old_ty) {
+ TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
+ let num_bits = scalar_width(cx, old_ty);
+
+ let trunc = builder.trunc(new_arg, cx.type_ix(num_bits));
+ builder.bitcast(trunc, old_ty)
+ }
+ _ => new_arg,
+ };
+
+ old_args_rebuilt.push(rebuilt);
+ }
+
+ builder.ret_void();
+
// Here we map the old arguments to the new arguments, with an offset of 1 to make sure
// that we don't use the newly added `%dyn_ptr`.
unsafe {
- llvm::LLVMRustOffloadMapper(old_fn, new_fn);
+ llvm::LLVMRustOffloadMapper(old_fn, new_fn, old_args_rebuilt.as_ptr());
}
llvm::set_linkage(new_fn, llvm::get_linkage(old_fn));
@@ -740,7 +780,7 @@ pub(crate) unsafe fn llvm_optimize(
&*module.module_llvm.tm.raw(),
to_pass_builder_opt_level(opt_level),
opt_stage,
- cgcx.opts.cg.linker_plugin_lto.enabled(),
+ cgcx.use_linker_plugin_lto,
config.no_prepopulate_passes,
config.verify_llvm_ir,
config.lint_llvm_ir,
@@ -851,14 +891,18 @@ pub(crate) unsafe fn llvm_optimize(
// Unsafe due to LLVM calls.
pub(crate) fn optimize(
cgcx: &CodegenContext,
- dcx: DiagCtxtHandle<'_>,
+ shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen,
config: &ModuleConfig,
) {
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
+ let dcx = dcx.handle();
+
let llcx = &*module.module_llvm.llcx;
- let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt);
+ let _handlers =
+ DiagnosticHandlers::new(cgcx, shared_emitter, llcx, module, CodegenDiagnosticsStage::Opt);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext_for_cgu(
@@ -875,7 +919,7 @@ pub(crate) fn optimize(
let opt_stage = match cgcx.lto {
Lto::Fat => llvm::OptStage::PreLinkFatLTO,
Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
- _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
+ _ if cgcx.use_linker_plugin_lto => llvm::OptStage::PreLinkThinLTO,
_ => llvm::OptStage::PreLinkNoLTO,
};
@@ -938,19 +982,26 @@ pub(crate) fn optimize(
pub(crate) fn codegen(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
module: ModuleCodegen,
config: &ModuleConfig,
) -> CompiledModule {
- let dcx = cgcx.create_dcx();
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
- let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
- let _handlers =
- DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::Codegen);
+ let _handlers = DiagnosticHandlers::new(
+ cgcx,
+ shared_emitter,
+ llcx,
+ &module,
+ CodegenDiagnosticsStage::Codegen,
+ );
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 7a49ba64029e..9379faf1156f 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -97,6 +97,21 @@ impl<'a, 'll, CX: Borrow>> GenericBuilder<'a, 'll, CX> {
GenericBuilder { llbuilder, cx: scx }
}
+ pub(crate) fn append_block(
+ cx: &'a GenericCx<'ll, CX>,
+ llfn: &'ll Value,
+ name: &str,
+ ) -> &'ll BasicBlock {
+ unsafe {
+ let name = SmallCStr::new(name);
+ llvm::LLVMAppendBasicBlockInContext(cx.llcx(), llfn, name.as_ptr())
+ }
+ }
+
+ pub(crate) fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
pub(crate) fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
}
@@ -1773,6 +1788,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
}
if crate::llvm_util::get_version() >= (22, 0, 0) {
+ // LLVM 22 requires the lifetime intrinsic to act directly on the alloca,
+ // there can't be an addrspacecast in between.
+ let ptr = unsafe { llvm::LLVMRustStripPointerCasts(ptr) };
self.call_intrinsic(intrinsic, &[self.val_ty(ptr)], &[ptr]);
} else {
self.call_intrinsic(intrinsic, &[self.val_ty(ptr)], &[self.cx.const_u64(size), ptr]);
diff --git a/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs b/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs
index 084d40317ba8..f1735b9a0f58 100644
--- a/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs
+++ b/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs
@@ -2,6 +2,7 @@ use std::ffi::CString;
use llvm::Linkage::*;
use rustc_abi::Align;
+use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
use rustc_middle::bug;
@@ -361,7 +362,6 @@ pub(crate) fn add_global<'ll>(
pub(crate) fn gen_define_handling<'ll>(
cx: &CodegenCx<'ll, '_>,
metadata: &[OffloadMetadata],
- types: &[&'ll Type],
symbol: String,
offload_globals: &OffloadGlobals<'ll>,
) -> OffloadKernelGlobals<'ll> {
@@ -371,25 +371,18 @@ pub(crate) fn gen_define_handling<'ll>(
let offload_entry_ty = offload_globals.offload_entry_ty;
- // It seems like non-pointer values are automatically mapped. So here, we focus on pointer (or
- // reference) types.
- let ptr_meta = types.iter().zip(metadata).filter_map(|(&x, meta)| match cx.type_kind(x) {
- rustc_codegen_ssa::common::TypeKind::Pointer => Some(meta),
- _ => None,
- });
-
// FIXME(Sa4dUs): add `OMP_MAP_TARGET_PARAM = 0x20` only if necessary
- let (ptr_sizes, ptr_transfer): (Vec<_>, Vec<_>) =
- ptr_meta.map(|m| (m.payload_size, m.mode.bits() | 0x20)).unzip();
+ let (sizes, transfer): (Vec<_>, Vec<_>) =
+ metadata.iter().map(|m| (m.payload_size, m.mode.bits() | 0x20)).unzip();
- let offload_sizes = add_priv_unnamed_arr(&cx, &format!(".offload_sizes.{symbol}"), &ptr_sizes);
+ let offload_sizes = add_priv_unnamed_arr(&cx, &format!(".offload_sizes.{symbol}"), &sizes);
// Here we figure out whether something needs to be copied to the gpu (=1), from the gpu (=2),
// or both to and from the gpu (=3). Other values shouldn't affect us for now.
// A non-mutable reference or pointer will be 1, an array that's not read, but fully overwritten
// will be 2. For now, everything is 3, until we have our frontend set up.
// 1+2+32: 1 (MapTo), 2 (MapFrom), 32 (Add one extra input ptr per function, to be used later).
let memtransfer_types =
- add_priv_unnamed_arr(&cx, &format!(".offload_maptypes.{symbol}"), &ptr_transfer);
+ add_priv_unnamed_arr(&cx, &format!(".offload_maptypes.{symbol}"), &transfer);
// Next: For each function, generate these three entries. A weak constant,
// the llvm.rodata entry name, and the llvm_offload_entries value
@@ -445,13 +438,25 @@ fn declare_offload_fn<'ll>(
)
}
+pub(crate) fn scalar_width<'ll>(cx: &'ll SimpleCx<'_>, ty: &'ll Type) -> u64 {
+ match cx.type_kind(ty) {
+ TypeKind::Half
+ | TypeKind::Float
+ | TypeKind::Double
+ | TypeKind::X86_FP80
+ | TypeKind::FP128
+ | TypeKind::PPC_FP128 => cx.float_width(ty) as u64,
+ TypeKind::Integer => cx.int_width(ty),
+ other => bug!("scalar_width was called on a non scalar type {other:?}"),
+ }
+}
+
// For each kernel *call*, we now use some of our previous declared globals to move data to and from
// the gpu. For now, we only handle the data transfer part of it.
// If two consecutive kernels use the same memory, we still move it to the host and back to the gpu.
// Since in our frontend users (by default) don't have to specify data transfer, this is something
-// we should optimize in the future! We also assume that everything should be copied back and forth,
-// but sometimes we can directly zero-allocate on the device and only move back, or if something is
-// immutable, we might only copy it to the device, but not back.
+// we should optimize in the future! In some cases we can directly zero-allocate on the device and
+// only move data back, or if something is immutable, we might only copy it to the device.
//
// Current steps:
// 0. Alloca some variables for the following steps
@@ -538,8 +543,34 @@ pub(crate) fn gen_call_handling<'ll, 'tcx>(
let mut geps = vec![];
let i32_0 = cx.get_const_i32(0);
for &v in args {
- let gep = builder.inbounds_gep(cx.type_f32(), v, &[i32_0]);
- vals.push(v);
+ let ty = cx.val_ty(v);
+ let ty_kind = cx.type_kind(ty);
+ let (base_val, gep_base) = match ty_kind {
+ TypeKind::Pointer => (v, v),
+ TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
+ // FIXME(Sa4dUs): check for `f128` support, latest NVIDIA cards support it
+ let num_bits = scalar_width(cx, ty);
+
+ let bb = builder.llbb();
+ unsafe {
+ llvm::LLVMRustPositionBuilderPastAllocas(builder.llbuilder, builder.llfn());
+ }
+ let addr = builder.direct_alloca(cx.type_i64(), Align::EIGHT, "addr");
+ unsafe {
+ llvm::LLVMPositionBuilderAtEnd(builder.llbuilder, bb);
+ }
+
+ let cast = builder.bitcast(v, cx.type_ix(num_bits));
+ let value = builder.zext(cast, cx.type_i64());
+ builder.store(value, addr, Align::EIGHT);
+ (value, addr)
+ }
+ other => bug!("offload does not support {other:?}"),
+ };
+
+ let gep = builder.inbounds_gep(cx.type_f32(), gep_base, &[i32_0]);
+
+ vals.push(base_val);
geps.push(gep);
}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 4b2544b7efdf..2760683dad9d 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -215,6 +215,17 @@ pub(crate) unsafe fn create_module<'ll>(
// LLVM 22 updated the ABI alignment for double on AIX: https://github.com/llvm/llvm-project/pull/144673
target_data_layout = target_data_layout.replace("-f64:32:64", "");
}
+ if sess.target.arch == Arch::AmdGpu {
+ // LLVM 22 specified ELF mangling in the amdgpu data layout:
+ // https://github.com/llvm/llvm-project/pull/163011
+ target_data_layout = target_data_layout.replace("-m:e", "");
+ }
+ }
+ if llvm_version < (23, 0, 0) {
+ if sess.target.arch == Arch::S390x {
+ // LLVM 23 updated the s390x layout to specify the stack alignment: https://github.com/llvm/llvm-project/pull/176041
+ target_data_layout = target_data_layout.replace("-S64", "");
+ }
}
// Ensure the data-layout values hardcoded remain the defaults.
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 565db7d298bc..a712b7b4138c 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -269,14 +269,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
return Ok(());
}
sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
- sym::va_copy => {
- let dest = args[0].immediate();
- self.call_intrinsic(
- "llvm.va_copy",
- &[self.val_ty(dest)],
- &[dest, args[1].immediate()],
- )
- }
sym::va_arg => {
match result.layout.backend_repr {
BackendRepr::Scalar(scalar) => {
@@ -1394,7 +1386,8 @@ fn codegen_offload<'ll, 'tcx>(
let args = get_args_from_tuple(bx, args[3], fn_target);
let target_symbol = symbol_name_for_instance_in_crate(tcx, fn_target, LOCAL_CRATE);
- let sig = tcx.fn_sig(fn_target.def_id()).skip_binder().skip_binder();
+ let sig = tcx.fn_sig(fn_target.def_id()).skip_binder();
+ let sig = tcx.instantiate_bound_regions_with_erased(sig);
let inputs = sig.inputs();
let metadata = inputs.iter().map(|ty| OffloadMetadata::from_ty(tcx, *ty)).collect::>();
@@ -1409,7 +1402,7 @@ fn codegen_offload<'ll, 'tcx>(
return;
}
};
- let offload_data = gen_define_handling(&cx, &metadata, &types, target_symbol, offload_globals);
+ let offload_data = gen_define_handling(&cx, &metadata, target_symbol, offload_globals);
gen_call_handling(bx, &offload_data, &args, &types, &metadata, offload_globals, &offload_dims);
}
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index e0007f69828d..5879132eb9fb 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -30,12 +30,13 @@ use llvm_util::target_config;
use rustc_ast::expand::allocator::AllocatorMethod;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
- CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+ CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryConfig,
+ TargetMachineFactoryFn,
};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig};
use rustc_data_structures::fx::FxIndexMap;
-use rustc_errors::DiagCtxtHandle;
+use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::TyCtxt;
@@ -166,14 +167,20 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec>,
) -> ModuleCodegen {
- let mut module =
- back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules);
+ let mut module = back::lto::run_fat(
+ cgcx,
+ shared_emitter,
+ exported_symbols_for_lto,
+ each_linked_rlib_for_lto,
+ modules,
+ );
- let dcx = cgcx.create_dcx();
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
back::lto::run_pass_manager(cgcx, dcx, &mut module, false);
@@ -181,6 +188,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn run_thin_lto(
cgcx: &CodegenContext,
+ dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
@@ -188,6 +196,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
) -> (Vec>, Vec) {
back::lto::run_thin(
cgcx,
+ dcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
modules,
@@ -196,24 +205,26 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn optimize(
cgcx: &CodegenContext,
- dcx: DiagCtxtHandle<'_>,
+ shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen,
config: &ModuleConfig,
) {
- back::write::optimize(cgcx, dcx, module, config)
+ back::write::optimize(cgcx, shared_emitter, module, config)
}
fn optimize_thin(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
thin: ThinModule,
) -> ModuleCodegen {
- back::lto::optimize_thin_module(thin, cgcx)
+ back::lto::optimize_thin_module(cgcx, shared_emitter, thin)
}
fn codegen(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
module: ModuleCodegen,
config: &ModuleConfig,
) -> CompiledModule {
- back::write::codegen(cgcx, module, config)
+ back::write::codegen(cgcx, shared_emitter, module, config)
}
fn prepare_thin(module: ModuleCodegen) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(module)
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index a90013801c8c..a3d4e9f9d32a 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1675,7 +1675,11 @@ mod Offload {
_M: &'a Module,
_host_out: *const c_char,
) -> bool;
- pub(crate) fn LLVMRustOffloadMapper<'a>(OldFn: &'a Value, NewFn: &'a Value);
+ pub(crate) fn LLVMRustOffloadMapper<'a>(
+ OldFn: &'a Value,
+ NewFn: &'a Value,
+ RebuiltArgs: *const &Value,
+ );
}
}
@@ -1702,7 +1706,11 @@ mod Offload_fallback {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
#[allow(unused_unsafe)]
- pub(crate) unsafe fn LLVMRustOffloadMapper<'a>(_OldFn: &'a Value, _NewFn: &'a Value) {
+ pub(crate) unsafe fn LLVMRustOffloadMapper<'a>(
+ _OldFn: &'a Value,
+ _NewFn: &'a Value,
+ _RebuiltArgs: *const &Value,
+ ) {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
}
@@ -1959,6 +1967,7 @@ unsafe extern "C" {
Metadata: &'a Metadata,
);
pub(crate) fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool;
+ pub(crate) fn LLVMRustStripPointerCasts<'a>(Val: &'a Value) -> &'a Value;
// Operations on scalar constants
pub(crate) fn LLVMRustConstIntGetZExtValue(ConstantVal: &ConstantInt, Value: &mut u64) -> bool;
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 63f820dc2918..fbb582fe8601 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -287,12 +287,12 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option Some(LLVMFeature::new("cx16")),
"lahfsahf" => Some(LLVMFeature::new("sahf")),
// Enable the evex512 target feature if an avx512 target feature is enabled.
- s if s.starts_with("avx512") => Some(LLVMFeature::with_dependencies(
+ s if s.starts_with("avx512") && major < 22 => Some(LLVMFeature::with_dependencies(
s,
smallvec![TargetFeatureFoldStrength::EnableOnly("evex512")],
)),
- "avx10.1" => Some(LLVMFeature::new("avx10.1-512")),
- "avx10.2" => Some(LLVMFeature::new("avx10.2-512")),
+ "avx10.1" if major < 22 => Some(LLVMFeature::new("avx10.1-512")),
+ "avx10.2" if major < 22 => Some(LLVMFeature::new("avx10.2-512")),
"apxf" => Some(LLVMFeature::with_dependencies(
"egpr",
smallvec![
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index 4875f309cca5..a49f411a7df6 100644
--- a/compiler/rustc_codegen_ssa/messages.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -48,8 +48,6 @@ codegen_ssa_error_creating_remark_dir = failed to create remark directory: {$err
codegen_ssa_error_writing_def_file =
error writing .DEF file: {$error}
-codegen_ssa_expected_name_value_pair = expected name value pair
-
codegen_ssa_extern_funcs_not_found = some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified
codegen_ssa_extract_bundled_libs_archive_member = failed to get data from archive member '{$rlib}': {$error}
@@ -90,9 +88,6 @@ codegen_ssa_incorrect_cgu_reuse_type =
codegen_ssa_insufficient_vs_code_product = VS Code is a different product, and is not sufficient.
-codegen_ssa_invalid_literal_value = invalid literal value
- .label = value must be an integer between `0` and `255`
-
codegen_ssa_invalid_monomorphization_basic_float_type = invalid monomorphization of `{$name}` intrinsic: expected basic float type, found `{$ty}`
codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer or pointer type, found `{$ty}`
@@ -225,9 +220,6 @@ codegen_ssa_no_natvis_directory = error enumerating natvis directory: {$error}
codegen_ssa_no_saved_object_file = cached cgu {$cgu_name} should have an object file, but doesn't
-codegen_ssa_out_of_range_integer = integer value out of range
- .label = value must be between `0` and `255`
-
codegen_ssa_processing_dymutil_failed = processing debug info with `dsymutil` failed: {$status}
.note = {$output}
@@ -357,9 +349,6 @@ codegen_ssa_unable_to_run_dsymutil = unable to run `dsymutil`: {$error}
codegen_ssa_unable_to_write_debugger_visualizer = unable to write debugger visualizer file `{$path}`: {$error}
-codegen_ssa_unexpected_parameter_name = unexpected parameter name
- .label = expected `{$prefix_nops}` or `{$entry_nops}`
-
codegen_ssa_unknown_archive_kind =
don't know how to build archive of type: {$kind}
diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs
index e6df6a2469f3..ef4c193c4c2a 100644
--- a/compiler/rustc_codegen_ssa/src/back/lto.rs
+++ b/compiler/rustc_codegen_ssa/src/back/lto.rs
@@ -2,6 +2,7 @@ use std::ffi::CString;
use std::sync::Arc;
use rustc_data_structures::memmap::Mmap;
+use rustc_errors::DiagCtxtHandle;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportLevel};
use rustc_middle::ty::TyCtxt;
@@ -124,28 +125,29 @@ pub(super) fn exported_symbols_for_lto(
symbols_below_threshold
}
-pub(super) fn check_lto_allowed(cgcx: &CodegenContext) {
+pub(super) fn check_lto_allowed(
+ cgcx: &CodegenContext,
+ dcx: DiagCtxtHandle<'_>,
+) {
if cgcx.lto == Lto::ThinLocal {
// Crate local LTO is always allowed
return;
}
- let dcx = cgcx.create_dcx();
-
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
dcx.handle().emit_fatal(LtoDisallowed);
} else if *crate_type == CrateType::Dylib {
- if !cgcx.opts.unstable_opts.dylib_lto {
+ if !cgcx.dylib_lto {
dcx.handle().emit_fatal(LtoDylib);
}
- } else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto {
+ } else if *crate_type == CrateType::ProcMacro && !cgcx.dylib_lto {
dcx.handle().emit_fatal(LtoProcMacro);
}
}
- if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
+ if cgcx.prefer_dynamic && !cgcx.dylib_lto {
dcx.handle().emit_fatal(DynamicLinkingWithLTO);
}
}
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 53121fc6275b..af9c61e17768 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -15,8 +15,8 @@ use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
use rustc_errors::emitter::Emitter;
use rustc_errors::translation::Translator;
use rustc_errors::{
- Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalError, FatalErrorMarker, Level,
- MultiSpan, Style, Suggestions,
+ Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
+ Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
};
use rustc_fs_util::link_or_copy;
use rustc_incremental::{
@@ -326,15 +326,16 @@ pub struct CodegenContext {
// Resources needed when running LTO
pub prof: SelfProfilerRef,
pub lto: Lto,
+ pub use_linker_plugin_lto: bool,
+ pub dylib_lto: bool,
+ pub prefer_dynamic: bool,
pub save_temps: bool,
pub fewer_names: bool,
pub time_trace: bool,
- pub opts: Arc,
pub crate_types: Vec,
pub output_filenames: Arc,
pub invocation_temp: Option,
pub module_config: Arc,
- pub allocator_config: Arc,
pub tm_factory: TargetMachineFactoryFn,
pub msvc_imps_needed: bool,
pub is_pe_coff: bool,
@@ -347,8 +348,6 @@ pub struct CodegenContext {
pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
pub pointer_size: Size,
- /// Emitter to use for diagnostics produced during codegen.
- pub diag_emitter: SharedEmitter,
/// LLVM optimizations for which we want to print remarks.
pub remark: Passes,
/// Directory into which should the LLVM optimization remarks be written.
@@ -363,14 +362,9 @@ pub struct CodegenContext {
pub parallel: bool,
}
-impl CodegenContext {
- pub fn create_dcx(&self) -> DiagCtxt {
- DiagCtxt::new(Box::new(self.diag_emitter.clone()))
- }
-}
-
fn generate_thin_lto_work(
cgcx: &CodegenContext,
+ dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
@@ -380,6 +374,7 @@ fn generate_thin_lto_work(
let (lto_modules, copy_jobs) = B::run_thin_lto(
cgcx,
+ dcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_thin_lto,
@@ -408,6 +403,29 @@ struct CompiledModules {
allocator_module: Option,
}
+enum MaybeLtoModules {
+ NoLto {
+ modules: Vec,
+ allocator_module: Option,
+ },
+ FatLto {
+ cgcx: CodegenContext,
+ exported_symbols_for_lto: Arc>,
+ each_linked_rlib_file_for_lto: Vec,
+ needs_fat_lto: Vec>,
+ lto_import_only_modules:
+ Vec<(SerializedModule<::ModuleBuffer>, WorkProduct)>,
+ },
+ ThinLto {
+ cgcx: CodegenContext,
+ exported_symbols_for_lto: Arc>,
+ each_linked_rlib_file_for_lto: Vec,
+ needs_thin_lto: Vec<(String, ::ThinBuffer)>,
+ lto_import_only_modules:
+ Vec<(SerializedModule<::ModuleBuffer>, WorkProduct)>,
+ },
+}
+
fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
let sess = tcx.sess;
sess.opts.cg.embed_bitcode
@@ -797,20 +815,12 @@ pub(crate) enum ComputedLtoType {
pub(crate) fn compute_per_cgu_lto_type(
sess_lto: &Lto,
- opts: &config::Options,
+ linker_does_lto: bool,
sess_crate_types: &[CrateType],
- module_kind: ModuleKind,
) -> ComputedLtoType {
// If the linker does LTO, we don't have to do it. Note that we
// keep doing full LTO, if it is requested, as not to break the
// assumption that the output will be a single module.
- let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
-
- // When we're automatically doing ThinLTO for multi-codegen-unit
- // builds we don't actually want to LTO the allocator module if
- // it shows up. This is due to various linker shenanigans that
- // we'll encounter later.
- let is_allocator = module_kind == ModuleKind::Allocator;
// We ignore a request for full crate graph LTO if the crate type
// is only an rlib, as there is no full crate graph to process,
@@ -823,7 +833,7 @@ pub(crate) fn compute_per_cgu_lto_type(
let is_rlib = matches!(sess_crate_types, [CrateType::Rlib]);
match sess_lto {
- Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
+ Lto::ThinLocal if !linker_does_lto => ComputedLtoType::Thin,
Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
Lto::Fat if !is_rlib => ComputedLtoType::Fat,
_ => ComputedLtoType::No,
@@ -832,30 +842,24 @@ pub(crate) fn compute_per_cgu_lto_type(
fn execute_optimize_work_item(
cgcx: &CodegenContext,
+ shared_emitter: SharedEmitter,
mut module: ModuleCodegen,
) -> WorkItemResult {
let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
- let dcx = cgcx.create_dcx();
- let dcx = dcx.handle();
-
- let module_config = match module.kind {
- ModuleKind::Regular => &cgcx.module_config,
- ModuleKind::Allocator => &cgcx.allocator_config,
- };
-
- B::optimize(cgcx, dcx, &mut module, module_config);
+ B::optimize(cgcx, &shared_emitter, &mut module, &cgcx.module_config);
// After we've done the initial round of optimizations we need to
// decide whether to synchronously codegen this module or ship it
// back to the coordinator thread for further LTO processing (which
// has to wait for all the initial modules to be optimized).
- let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
+ let lto_type =
+ compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
// If we're doing some form of incremental LTO then we need to be sure to
// save our module to disk first.
- let bitcode = if module_config.emit_pre_lto_bc {
+ let bitcode = if cgcx.module_config.emit_pre_lto_bc {
let filename = pre_lto_bitcode_filename(&module.name);
cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
} else {
@@ -864,7 +868,7 @@ fn execute_optimize_work_item(
match lto_type {
ComputedLtoType::No => {
- let module = B::codegen(cgcx, module, module_config);
+ let module = B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config);
WorkItemResult::Finished(module)
}
ComputedLtoType::Thin => {
@@ -894,12 +898,16 @@ fn execute_optimize_work_item(
fn execute_copy_from_cache_work_item(
cgcx: &CodegenContext,
+ shared_emitter: SharedEmitter,
module: CachedModuleCodegen,
) -> CompiledModule {
let _timer = cgcx
.prof
.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
+ let dcx = DiagCtxt::new(Box::new(shared_emitter));
+ let dcx = dcx.handle();
+
let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
let mut links_from_incr_cache = Vec::new();
@@ -918,11 +926,7 @@ fn execute_copy_from_cache_work_item(
Some(output_path)
}
Err(error) => {
- cgcx.create_dcx().handle().emit_err(errors::CopyPathBuf {
- source_file,
- output_path,
- error,
- });
+ dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
None
}
}
@@ -965,7 +969,7 @@ fn execute_copy_from_cache_work_item(
let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
if should_emit_obj && object.is_none() {
- cgcx.create_dcx().handle().emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
+ dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
}
CompiledModule {
@@ -982,6 +986,7 @@ fn execute_copy_from_cache_work_item(
fn do_fat_lto(
cgcx: &CodegenContext,
+ shared_emitter: SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
mut needs_fat_lto: Vec>,
@@ -989,7 +994,10 @@ fn do_fat_lto(
) -> CompiledModule {
let _timer = cgcx.prof.verbose_generic_activity("LLVM_fatlto");
- check_lto_allowed(&cgcx);
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
+ let dcx = dcx.handle();
+
+ check_lto_allowed(&cgcx, dcx);
for (module, wp) in import_only_modules {
needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
@@ -997,15 +1005,17 @@ fn do_fat_lto(
let module = B::run_and_optimize_fat_lto(
cgcx,
+ &shared_emitter,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_fat_lto,
);
- B::codegen(cgcx, module, &cgcx.module_config)
+ B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config)
}
fn do_thin_lto<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext,
+ shared_emitter: SharedEmitter,
exported_symbols_for_lto: Arc>,
each_linked_rlib_for_lto: Vec,
needs_thin_lto: Vec<(String, ::ThinBuffer)>,
@@ -1016,7 +1026,10 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
) -> Vec {
let _timer = cgcx.prof.verbose_generic_activity("LLVM_thinlto");
- check_lto_allowed(&cgcx);
+ let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
+ let dcx = dcx.handle();
+
+ check_lto_allowed(&cgcx, dcx);
let (coordinator_send, coordinator_receive) = channel();
@@ -1041,6 +1054,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
// we don't worry about tokens.
for (work, cost) in generate_thin_lto_work(
cgcx,
+ dcx,
&exported_symbols_for_lto,
&each_linked_rlib_for_lto,
needs_thin_lto,
@@ -1082,7 +1096,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
while used_token_count < tokens.len() + 1
&& let Some((item, _)) = work_items.pop()
{
- spawn_thin_lto_work(&cgcx, coordinator_send.clone(), item);
+ spawn_thin_lto_work(&cgcx, shared_emitter.clone(), coordinator_send.clone(), item);
used_token_count += 1;
}
} else {
@@ -1106,7 +1120,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
}
Err(e) => {
let msg = &format!("failed to acquire jobserver token: {e}");
- cgcx.diag_emitter.fatal(msg);
+ shared_emitter.fatal(msg);
codegen_aborted = Some(FatalError);
}
},
@@ -1144,12 +1158,13 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
fn execute_thin_lto_work_item(
cgcx: &CodegenContext,
+ shared_emitter: SharedEmitter,
module: lto::ThinModule,
) -> CompiledModule {
let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
- let module = B::optimize_thin(cgcx, module);
- B::codegen(cgcx, module, &cgcx.module_config)
+ let module = B::optimize_thin(cgcx, &shared_emitter, module);
+ B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config)
}
/// Messages sent to the coordinator.
@@ -1245,9 +1260,9 @@ fn start_executing_work(
coordinator_receive: Receiver>,
regular_config: Arc,
allocator_config: Arc,
- allocator_module: Option>,
+ mut allocator_module: Option>,
coordinator_send: Sender>,
-) -> thread::JoinHandle> {
+) -> thread::JoinHandle, ()>> {
let sess = tcx.sess;
let mut each_linked_rlib_for_lto = Vec::new();
@@ -1292,18 +1307,18 @@ fn start_executing_work(
let cgcx = CodegenContext:: {
crate_types: tcx.crate_types().to_vec(),
lto: sess.lto(),
+ use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
+ dylib_lto: sess.opts.unstable_opts.dylib_lto,
+ prefer_dynamic: sess.opts.cg.prefer_dynamic,
fewer_names: sess.fewer_names(),
save_temps: sess.opts.cg.save_temps,
time_trace: sess.opts.unstable_opts.llvm_time_trace,
- opts: Arc::new(sess.opts.clone()),
prof: sess.prof.clone(),
remark: sess.opts.cg.remark.clone(),
remark_dir,
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
- diag_emitter: shared_emitter.clone(),
output_filenames: Arc::clone(tcx.output_filenames(())),
module_config: regular_config,
- allocator_config,
tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features),
msvc_imps_needed: msvc_imps_needed(tcx),
is_pe_coff: tcx.sess.target.is_like_windows,
@@ -1497,16 +1512,9 @@ fn start_executing_work(
let mut llvm_start_time: Option> = None;
- let compiled_allocator_module = allocator_module.and_then(|allocator_module| {
- match execute_optimize_work_item(&cgcx, allocator_module) {
- WorkItemResult::Finished(compiled_module) => return Some(compiled_module),
- WorkItemResult::NeedsFatLto(fat_lto_input) => needs_fat_lto.push(fat_lto_input),
- WorkItemResult::NeedsThinLto(name, thin_buffer) => {
- needs_thin_lto.push((name, thin_buffer))
- }
- }
- None
- });
+ if let Some(allocator_module) = &mut allocator_module {
+ B::optimize(&cgcx, &shared_emitter, allocator_module, &allocator_config);
+ }
// Run the message loop while there's still anything that needs message
// processing. Note that as soon as codegen is aborted we simply want to
@@ -1543,7 +1551,13 @@ fn start_executing_work(
let (item, _) =
work_items.pop().expect("queue empty - queue_full_enough() broken?");
main_thread_state = MainThreadState::Lending;
- spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
+ spawn_work(
+ &cgcx,
+ shared_emitter.clone(),
+ coordinator_send.clone(),
+ &mut llvm_start_time,
+ item,
+ );
}
}
} else if codegen_state == Completed {
@@ -1561,7 +1575,13 @@ fn start_executing_work(
MainThreadState::Idle => {
if let Some((item, _)) = work_items.pop() {
main_thread_state = MainThreadState::Lending;
- spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
+ spawn_work(
+ &cgcx,
+ shared_emitter.clone(),
+ coordinator_send.clone(),
+ &mut llvm_start_time,
+ item,
+ );
} else {
// There is no unstarted work, so let the main thread
// take over for a running worker. Otherwise the
@@ -1597,7 +1617,13 @@ fn start_executing_work(
while running_with_own_token < tokens.len()
&& let Some((item, _)) = work_items.pop()
{
- spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
+ spawn_work(
+ &cgcx,
+ shared_emitter.clone(),
+ coordinator_send.clone(),
+ &mut llvm_start_time,
+ item,
+ );
running_with_own_token += 1;
}
}
@@ -1733,36 +1759,51 @@ fn start_executing_work(
assert!(compiled_modules.is_empty());
assert!(needs_thin_lto.is_empty());
- // This uses the implicit token
- let module = do_fat_lto(
- &cgcx,
- &exported_symbols_for_lto,
- &each_linked_rlib_file_for_lto,
+ if let Some(allocator_module) = allocator_module.take() {
+ needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
+ }
+
+ return Ok(MaybeLtoModules::FatLto {
+ cgcx,
+ exported_symbols_for_lto,
+ each_linked_rlib_file_for_lto,
needs_fat_lto,
lto_import_only_modules,
- );
- compiled_modules.push(module);
+ });
} else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
assert!(compiled_modules.is_empty());
assert!(needs_fat_lto.is_empty());
- compiled_modules.extend(do_thin_lto(
- &cgcx,
- exported_symbols_for_lto,
- each_linked_rlib_file_for_lto,
- needs_thin_lto,
- lto_import_only_modules,
- ));
+ if cgcx.lto == Lto::ThinLocal {
+ compiled_modules.extend(do_thin_lto(
+ &cgcx,
+ shared_emitter.clone(),
+ exported_symbols_for_lto,
+ each_linked_rlib_file_for_lto,
+ needs_thin_lto,
+ lto_import_only_modules,
+ ));
+ } else {
+ if let Some(allocator_module) = allocator_module.take() {
+ let (name, thin_buffer) = B::prepare_thin(allocator_module);
+ needs_thin_lto.push((name, thin_buffer));
+ }
+
+ return Ok(MaybeLtoModules::ThinLto {
+ cgcx,
+ exported_symbols_for_lto,
+ each_linked_rlib_file_for_lto,
+ needs_thin_lto,
+ lto_import_only_modules,
+ });
+ }
}
- // Regardless of what order these modules completed in, report them to
- // the backend in the same order every time to ensure that we're handing
- // out deterministic results.
- compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
-
- Ok(CompiledModules {
+ Ok(MaybeLtoModules::NoLto {
modules: compiled_modules,
- allocator_module: compiled_allocator_module,
+ allocator_module: allocator_module.map(|allocator_module| {
+ B::codegen(&cgcx, &shared_emitter, allocator_module, &allocator_config)
+ }),
})
})
.expect("failed to spawn coordinator thread");
@@ -1831,6 +1872,7 @@ pub(crate) struct WorkerFatalError;
fn spawn_work<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext,
+ shared_emitter: SharedEmitter,
coordinator_send: Sender>,
llvm_start_time: &mut Option>,
work: WorkItem,
@@ -1843,10 +1885,10 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
- WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, m),
- WorkItem::CopyPostLtoArtifacts(m) => {
- WorkItemResult::Finished(execute_copy_from_cache_work_item(&cgcx, m))
- }
+ WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, shared_emitter, m),
+ WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
+ execute_copy_from_cache_work_item(&cgcx, shared_emitter, m),
+ ),
}));
let msg = match result {
@@ -1868,6 +1910,7 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext,
+ shared_emitter: SharedEmitter,
coordinator_send: Sender,
work: ThinLtoWorkItem,
) {
@@ -1875,8 +1918,10 @@ fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>(
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
- ThinLtoWorkItem::CopyPostLtoArtifacts(m) => execute_copy_from_cache_work_item(&cgcx, m),
- ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, m),
+ ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
+ execute_copy_from_cache_work_item(&cgcx, shared_emitter, m)
+ }
+ ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, shared_emitter, m),
}));
let msg = match result {
@@ -2052,13 +2097,13 @@ impl SharedEmitterMain {
pub struct Coordinator {
sender: Sender>,
- future: Option>>,
+ future: Option, ()>>>,
// Only used for the Message type.
phantom: PhantomData,
}
impl Coordinator {
- fn join(mut self) -> std::thread::Result> {
+ fn join(mut self) -> std::thread::Result, ()>> {
self.future.take().unwrap().join()
}
}
@@ -2089,8 +2134,9 @@ pub struct OngoingCodegen {
impl OngoingCodegen {
pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap) {
self.shared_emitter_main.check(sess, true);
- let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
- Ok(Ok(compiled_modules)) => compiled_modules,
+
+ let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
+ Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
Ok(Err(())) => {
sess.dcx().abort_if_errors();
panic!("expected abort due to worker thread errors")
@@ -2102,6 +2148,62 @@ impl OngoingCodegen {
sess.dcx().abort_if_errors();
+ let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
+
+ // Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
+ let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
+ MaybeLtoModules::NoLto { modules, allocator_module } => {
+ drop(shared_emitter);
+ CompiledModules { modules, allocator_module }
+ }
+ MaybeLtoModules::FatLto {
+ cgcx,
+ exported_symbols_for_lto,
+ each_linked_rlib_file_for_lto,
+ needs_fat_lto,
+ lto_import_only_modules,
+ } => CompiledModules {
+ modules: vec![do_fat_lto(
+ &cgcx,
+ shared_emitter,
+ &exported_symbols_for_lto,
+ &each_linked_rlib_file_for_lto,
+ needs_fat_lto,
+ lto_import_only_modules,
+ )],
+ allocator_module: None,
+ },
+ MaybeLtoModules::ThinLto {
+ cgcx,
+ exported_symbols_for_lto,
+ each_linked_rlib_file_for_lto,
+ needs_thin_lto,
+ lto_import_only_modules,
+ } => CompiledModules {
+ modules: do_thin_lto(
+ &cgcx,
+ shared_emitter,
+ exported_symbols_for_lto,
+ each_linked_rlib_file_for_lto,
+ needs_thin_lto,
+ lto_import_only_modules,
+ ),
+ allocator_module: None,
+ },
+ });
+
+ shared_emitter_main.check(sess, true);
+
+ sess.dcx().abort_if_errors();
+
+ let mut compiled_modules =
+ compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
+
+ // Regardless of what order these modules completed in, report them to
+ // the backend in the same order every time to ensure that we're handing
+ // out deterministic results.
+ compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
+
let work_products =
copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index c8aa7c04585c..982a814c1c86 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -49,9 +49,7 @@ use crate::meth::load_vtable;
use crate::mir::operand::OperandValue;
use crate::mir::place::PlaceRef;
use crate::traits::*;
-use crate::{
- CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, ModuleKind, errors, meth, mir,
-};
+use crate::{CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, errors, meth, mir};
pub(crate) fn bin_op_to_icmp_predicate(op: BinOp, signed: bool) -> IntPredicate {
match (op, signed) {
@@ -1126,9 +1124,8 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) ->
// reuse pre-LTO artifacts
match compute_per_cgu_lto_type(
&tcx.sess.lto(),
- &tcx.sess.opts,
+ tcx.sess.opts.cg.linker_plugin_lto.enabled(),
tcx.crate_types(),
- ModuleKind::Regular,
) {
ComputedLtoType::No => CguReuse::PostLto,
_ => CguReuse::PreLto,
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index e35d884b6711..d55e134109d6 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -47,59 +47,6 @@ fn try_fn_sig<'tcx>(
}
}
-// FIXME(jdonszelmann): remove when patchable_function_entry becomes a parsed attr
-fn parse_patchable_function_entry(
- tcx: TyCtxt<'_>,
- attr: &Attribute,
-) -> Option {
- attr.meta_item_list().and_then(|l| {
- let mut prefix = None;
- let mut entry = None;
- for item in l {
- let Some(meta_item) = item.meta_item() else {
- tcx.dcx().emit_err(errors::ExpectedNameValuePair { span: item.span() });
- continue;
- };
-
- let Some(name_value_lit) = meta_item.name_value_literal() else {
- tcx.dcx().emit_err(errors::ExpectedNameValuePair { span: item.span() });
- continue;
- };
-
- let attrib_to_write = match meta_item.name() {
- Some(sym::prefix_nops) => &mut prefix,
- Some(sym::entry_nops) => &mut entry,
- _ => {
- tcx.dcx().emit_err(errors::UnexpectedParameterName {
- span: item.span(),
- prefix_nops: sym::prefix_nops,
- entry_nops: sym::entry_nops,
- });
- continue;
- }
- };
-
- let rustc_ast::LitKind::Int(val, _) = name_value_lit.kind else {
- tcx.dcx().emit_err(errors::InvalidLiteralValue { span: name_value_lit.span });
- continue;
- };
-
- let Ok(val) = val.get().try_into() else {
- tcx.dcx().emit_err(errors::OutOfRangeInteger { span: name_value_lit.span });
- continue;
- };
-
- *attrib_to_write = Some(val);
- }
-
- if let (None, None) = (prefix, entry) {
- tcx.dcx().span_err(attr.span(), "must specify at least one parameter");
- }
-
- Some(PatchableFunctionEntry::from_prefix_and_entry(prefix.unwrap_or(0), entry.unwrap_or(0)))
- })
-}
-
/// Spans that are collected when processing built-in attributes,
/// that are useful for emitting diagnostics later.
#[derive(Default)]
@@ -121,250 +68,235 @@ fn process_builtin_attrs(
let mut interesting_spans = InterestingAttributeDiagnosticSpans::default();
let rust_target_features = tcx.rust_target_features(LOCAL_CRATE);
- for attr in attrs.iter() {
- if let hir::Attribute::Parsed(p) = attr {
- match p {
- AttributeKind::Cold(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD,
- AttributeKind::ExportName { name, .. } => {
- codegen_fn_attrs.symbol_name = Some(*name)
+ let parsed_attrs = attrs
+ .iter()
+ .filter_map(|attr| if let hir::Attribute::Parsed(attr) = attr { Some(attr) } else { None });
+ for attr in parsed_attrs {
+ match attr {
+ AttributeKind::Cold(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD,
+ AttributeKind::ExportName { name, .. } => codegen_fn_attrs.symbol_name = Some(*name),
+ AttributeKind::Inline(inline, span) => {
+ codegen_fn_attrs.inline = *inline;
+ interesting_spans.inline = Some(*span);
+ }
+ AttributeKind::Naked(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED,
+ AttributeKind::Align { align, .. } => codegen_fn_attrs.alignment = Some(*align),
+ AttributeKind::LinkName { name, .. } => {
+ // FIXME Remove check for foreign functions once #[link_name] on non-foreign
+ // functions is a hard error
+ if tcx.is_foreign_item(did) {
+ codegen_fn_attrs.symbol_name = Some(*name);
}
- AttributeKind::Inline(inline, span) => {
- codegen_fn_attrs.inline = *inline;
- interesting_spans.inline = Some(*span);
+ }
+ AttributeKind::LinkOrdinal { ordinal, span } => {
+ codegen_fn_attrs.link_ordinal = Some(*ordinal);
+ interesting_spans.link_ordinal = Some(*span);
+ }
+ AttributeKind::LinkSection { name, .. } => codegen_fn_attrs.link_section = Some(*name),
+ AttributeKind::NoMangle(attr_span) => {
+ interesting_spans.no_mangle = Some(*attr_span);
+ if tcx.opt_item_name(did.to_def_id()).is_some() {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
+ } else {
+ tcx.dcx()
+ .span_delayed_bug(*attr_span, "no_mangle should be on a named function");
}
- AttributeKind::Naked(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED,
- AttributeKind::Align { align, .. } => codegen_fn_attrs.alignment = Some(*align),
- AttributeKind::LinkName { name, .. } => {
- // FIXME Remove check for foreign functions once #[link_name] on non-foreign
- // functions is a hard error
- if tcx.is_foreign_item(did) {
- codegen_fn_attrs.symbol_name = Some(*name);
- }
- }
- AttributeKind::LinkOrdinal { ordinal, span } => {
- codegen_fn_attrs.link_ordinal = Some(*ordinal);
- interesting_spans.link_ordinal = Some(*span);
- }
- AttributeKind::LinkSection { name, .. } => {
- codegen_fn_attrs.link_section = Some(*name)
- }
- AttributeKind::NoMangle(attr_span) => {
- interesting_spans.no_mangle = Some(*attr_span);
- if tcx.opt_item_name(did.to_def_id()).is_some() {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
+ }
+ AttributeKind::Optimize(optimize, _) => codegen_fn_attrs.optimize = *optimize,
+ AttributeKind::TargetFeature { features, attr_span, was_forced } => {
+ let Some(sig) = tcx.hir_node_by_def_id(did).fn_sig() else {
+ tcx.dcx().span_delayed_bug(*attr_span, "target_feature applied to non-fn");
+ continue;
+ };
+ let safe_target_features =
+ matches!(sig.header.safety, hir::HeaderSafety::SafeTargetFeatures);
+ codegen_fn_attrs.safe_target_features = safe_target_features;
+ if safe_target_features && !was_forced {
+ if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
+ // The `#[target_feature]` attribute is allowed on
+ // WebAssembly targets on all functions. Prior to stabilizing
+ // the `target_feature_11` feature, `#[target_feature]` was
+ // only permitted on unsafe functions because on most targets
+ // execution of instructions that are not supported is
+ // considered undefined behavior. For WebAssembly which is a
+ // 100% safe target at execution time it's not possible to
+ // execute undefined instructions, and even if a future
+ // feature was added in some form for this it would be a
+ // deterministic trap. There is no undefined behavior when
+ // executing WebAssembly so `#[target_feature]` is allowed
+ // on safe functions (but again, only for WebAssembly)
+ //
+ // Note that this is also allowed if `actually_rustdoc` so
+ // if a target is documenting some wasm-specific code then
+ // it's not spuriously denied.
+ //
+ // Now that `#[target_feature]` is permitted on safe functions,
+ // this exception must still exist for allowing the attribute on
+ // `main`, `start`, and other functions that are not usually
+ // allowed.
} else {
- tcx.dcx().span_delayed_bug(
- *attr_span,
- "no_mangle should be on a named function",
- );
+ check_target_feature_trait_unsafe(tcx, did, *attr_span);
}
}
- AttributeKind::Optimize(optimize, _) => codegen_fn_attrs.optimize = *optimize,
- AttributeKind::TargetFeature { features, attr_span, was_forced } => {
- let Some(sig) = tcx.hir_node_by_def_id(did).fn_sig() else {
- tcx.dcx().span_delayed_bug(*attr_span, "target_feature applied to non-fn");
- continue;
+ from_target_feature_attr(
+ tcx,
+ did,
+ features,
+ *was_forced,
+ rust_target_features,
+ &mut codegen_fn_attrs.target_features,
+ );
+ }
+ AttributeKind::TrackCaller(attr_span) => {
+ let is_closure = tcx.is_closure_like(did.to_def_id());
+
+ if !is_closure
+ && let Some(fn_sig) = try_fn_sig(tcx, did, *attr_span)
+ && fn_sig.skip_binder().abi() != ExternAbi::Rust
+ {
+ tcx.dcx().emit_err(errors::RequiresRustAbi { span: *attr_span });
+ }
+ if is_closure
+ && !tcx.features().closure_track_caller()
+ && !attr_span.allows_unstable(sym::closure_track_caller)
+ {
+ feature_err(
+ &tcx.sess,
+ sym::closure_track_caller,
+ *attr_span,
+ "`#[track_caller]` on closures is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER
+ }
+ AttributeKind::Used { used_by, .. } => match used_by {
+ UsedBy::Compiler => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_COMPILER,
+ UsedBy::Linker => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER,
+ UsedBy::Default => {
+ let used_form = if tcx.sess.target.os == Os::Illumos {
+ // illumos' `ld` doesn't support a section header that would represent
+ // `#[used(linker)]`, see
+ // https://github.com/rust-lang/rust/issues/146169. For that target,
+ // downgrade as if `#[used(compiler)]` was requested and hope for the
+ // best.
+ CodegenFnAttrFlags::USED_COMPILER
+ } else {
+ CodegenFnAttrFlags::USED_LINKER
};
- let safe_target_features =
- matches!(sig.header.safety, hir::HeaderSafety::SafeTargetFeatures);
- codegen_fn_attrs.safe_target_features = safe_target_features;
- if safe_target_features && !was_forced {
- if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
- // The `#[target_feature]` attribute is allowed on
- // WebAssembly targets on all functions. Prior to stabilizing
- // the `target_feature_11` feature, `#[target_feature]` was
- // only permitted on unsafe functions because on most targets
- // execution of instructions that are not supported is
- // considered undefined behavior. For WebAssembly which is a
- // 100% safe target at execution time it's not possible to
- // execute undefined instructions, and even if a future
- // feature was added in some form for this it would be a
- // deterministic trap. There is no undefined behavior when
- // executing WebAssembly so `#[target_feature]` is allowed
- // on safe functions (but again, only for WebAssembly)
- //
- // Note that this is also allowed if `actually_rustdoc` so
- // if a target is documenting some wasm-specific code then
- // it's not spuriously denied.
- //
- // Now that `#[target_feature]` is permitted on safe functions,
- // this exception must still exist for allowing the attribute on
- // `main`, `start`, and other functions that are not usually
- // allowed.
- } else {
- check_target_feature_trait_unsafe(tcx, did, *attr_span);
+ codegen_fn_attrs.flags |= used_form;
+ }
+ },
+ AttributeKind::FfiConst(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST,
+ AttributeKind::FfiPure(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
+ AttributeKind::StdInternalSymbol(_) => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
+ }
+ AttributeKind::Linkage(linkage, span) => {
+ let linkage = Some(*linkage);
+
+ if tcx.is_foreign_item(did) {
+ codegen_fn_attrs.import_linkage = linkage;
+
+ if tcx.is_mutable_static(did.into()) {
+ let mut diag = tcx.dcx().struct_span_err(
+ *span,
+ "extern mutable statics are not allowed with `#[linkage]`",
+ );
+ diag.note(
+ "marking the extern static mutable would allow changing which \
+ symbol the static references rather than make the target of the \
+ symbol mutable",
+ );
+ diag.emit();
+ }
+ } else {
+ codegen_fn_attrs.linkage = linkage;
+ }
+ }
+ AttributeKind::Sanitize { span, .. } => {
+ interesting_spans.sanitize = Some(*span);
+ }
+ AttributeKind::ObjcClass { classname, .. } => {
+ codegen_fn_attrs.objc_class = Some(*classname);
+ }
+ AttributeKind::ObjcSelector { methname, .. } => {
+ codegen_fn_attrs.objc_selector = Some(*methname);
+ }
+ AttributeKind::EiiForeignItem => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::EXTERNALLY_IMPLEMENTABLE_ITEM;
+ }
+ AttributeKind::EiiImpls(impls) => {
+ for i in impls {
+ let foreign_item = match i.resolution {
+ EiiImplResolution::Macro(def_id) => {
+ let Some(extern_item) = find_attr!(
+ tcx.get_all_attrs(def_id),
+ AttributeKind::EiiDeclaration(target) => target.foreign_item
+ ) else {
+ tcx.dcx().span_delayed_bug(
+ i.span,
+ "resolved to something that's not an EII",
+ );
+ continue;
+ };
+ extern_item
}
- }
- from_target_feature_attr(
- tcx,
- did,
- features,
- *was_forced,
- rust_target_features,
- &mut codegen_fn_attrs.target_features,
- );
- }
- AttributeKind::TrackCaller(attr_span) => {
- let is_closure = tcx.is_closure_like(did.to_def_id());
+ EiiImplResolution::Known(decl) => decl.foreign_item,
+ EiiImplResolution::Error(_eg) => continue,
+ };
- if !is_closure
- && let Some(fn_sig) = try_fn_sig(tcx, did, *attr_span)
- && fn_sig.skip_binder().abi() != ExternAbi::Rust
+ // this is to prevent a bug where a single crate defines both the default and explicit implementation
+ // for an EII. In that case, both of them may be part of the same final object file. I'm not 100% sure
+ // what happens, either rustc deduplicates the symbol or llvm, or it's random/order-dependent.
+ // However, the fact that the default one of has weak linkage isn't considered and you sometimes get that
+ // the default implementation is used while an explicit implementation is given.
+ if
+ // if this is a default impl
+ i.is_default
+ // iterate over all implementations *in the current crate*
+ // (this is ok since we generate codegen fn attrs in the local crate)
+ // if any of them is *not default* then don't emit the alias.
+ && tcx.externally_implementable_items(LOCAL_CRATE).get(&foreign_item).expect("at least one").1.iter().any(|(_, imp)| !imp.is_default)
{
- tcx.dcx().emit_err(errors::RequiresRustAbi { span: *attr_span });
+ continue;
}
- if is_closure
- && !tcx.features().closure_track_caller()
- && !attr_span.allows_unstable(sym::closure_track_caller)
- {
- feature_err(
- &tcx.sess,
- sym::closure_track_caller,
- *attr_span,
- "`#[track_caller]` on closures is currently unstable",
- )
- .emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER
- }
- AttributeKind::Used { used_by, .. } => match used_by {
- UsedBy::Compiler => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_COMPILER,
- UsedBy::Linker => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER,
- UsedBy::Default => {
- let used_form = if tcx.sess.target.os == Os::Illumos {
- // illumos' `ld` doesn't support a section header that would represent
- // `#[used(linker)]`, see
- // https://github.com/rust-lang/rust/issues/146169. For that target,
- // downgrade as if `#[used(compiler)]` was requested and hope for the
- // best.
- CodegenFnAttrFlags::USED_COMPILER
- } else {
- CodegenFnAttrFlags::USED_LINKER
- };
- codegen_fn_attrs.flags |= used_form;
- }
- },
- AttributeKind::FfiConst(_) => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST
- }
- AttributeKind::FfiPure(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
- AttributeKind::StdInternalSymbol(_) => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
- }
- AttributeKind::Linkage(linkage, span) => {
- let linkage = Some(*linkage);
- if tcx.is_foreign_item(did) {
- codegen_fn_attrs.import_linkage = linkage;
-
- if tcx.is_mutable_static(did.into()) {
- let mut diag = tcx.dcx().struct_span_err(
- *span,
- "extern mutable statics are not allowed with `#[linkage]`",
- );
- diag.note(
- "marking the extern static mutable would allow changing which \
- symbol the static references rather than make the target of the \
- symbol mutable",
- );
- diag.emit();
- }
- } else {
- codegen_fn_attrs.linkage = linkage;
- }
- }
- AttributeKind::Sanitize { span, .. } => {
- interesting_spans.sanitize = Some(*span);
- }
- AttributeKind::ObjcClass { classname, .. } => {
- codegen_fn_attrs.objc_class = Some(*classname);
- }
- AttributeKind::ObjcSelector { methname, .. } => {
- codegen_fn_attrs.objc_selector = Some(*methname);
- }
- AttributeKind::EiiForeignItem => {
+ codegen_fn_attrs.foreign_item_symbol_aliases.push((
+ foreign_item,
+ if i.is_default { Linkage::LinkOnceAny } else { Linkage::External },
+ Visibility::Default,
+ ));
codegen_fn_attrs.flags |= CodegenFnAttrFlags::EXTERNALLY_IMPLEMENTABLE_ITEM;
}
- AttributeKind::EiiImpls(impls) => {
- for i in impls {
- let foreign_item = match i.resolution {
- EiiImplResolution::Macro(def_id) => {
- let Some(extern_item) = find_attr!(
- tcx.get_all_attrs(def_id),
- AttributeKind::EiiDeclaration(target) => target.foreign_item
- ) else {
- tcx.dcx().span_delayed_bug(
- i.span,
- "resolved to something that's not an EII",
- );
- continue;
- };
- extern_item
- }
- EiiImplResolution::Known(decl) => decl.foreign_item,
- EiiImplResolution::Error(_eg) => continue,
- };
-
- // this is to prevent a bug where a single crate defines both the default and explicit implementation
- // for an EII. In that case, both of them may be part of the same final object file. I'm not 100% sure
- // what happens, either rustc deduplicates the symbol or llvm, or it's random/order-dependent.
- // However, the fact that the default one of has weak linkage isn't considered and you sometimes get that
- // the default implementation is used while an explicit implementation is given.
- if
- // if this is a default impl
- i.is_default
- // iterate over all implementations *in the current crate*
- // (this is ok since we generate codegen fn attrs in the local crate)
- // if any of them is *not default* then don't emit the alias.
- && tcx.externally_implementable_items(LOCAL_CRATE).get(&foreign_item).expect("at least one").1.iter().any(|(_, imp)| !imp.is_default)
- {
- continue;
- }
-
- codegen_fn_attrs.foreign_item_symbol_aliases.push((
- foreign_item,
- if i.is_default { Linkage::LinkOnceAny } else { Linkage::External },
- Visibility::Default,
- ));
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::EXTERNALLY_IMPLEMENTABLE_ITEM;
- }
- }
- AttributeKind::ThreadLocal => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL
- }
- AttributeKind::InstructionSet(instruction_set) => {
- codegen_fn_attrs.instruction_set = Some(*instruction_set)
- }
- AttributeKind::RustcAllocator => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR
- }
- AttributeKind::RustcDeallocator => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR
- }
- AttributeKind::RustcReallocator => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR
- }
- AttributeKind::RustcAllocatorZeroed => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
- }
- AttributeKind::RustcNounwind => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND
- }
- AttributeKind::RustcOffloadKernel => {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::OFFLOAD_KERNEL
- }
- _ => {}
}
- }
-
- let Some(name) = attr.name() else {
- continue;
- };
-
- match name {
- sym::patchable_function_entry => {
+ AttributeKind::ThreadLocal => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL
+ }
+ AttributeKind::InstructionSet(instruction_set) => {
+ codegen_fn_attrs.instruction_set = Some(*instruction_set)
+ }
+ AttributeKind::RustcAllocator => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR
+ }
+ AttributeKind::RustcDeallocator => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR
+ }
+ AttributeKind::RustcReallocator => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR
+ }
+ AttributeKind::RustcAllocatorZeroed => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
+ }
+ AttributeKind::RustcNounwind => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND
+ }
+ AttributeKind::RustcOffloadKernel => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::OFFLOAD_KERNEL
+ }
+ AttributeKind::PatchableFunctionEntry { prefix, entry } => {
codegen_fn_attrs.patchable_function_entry =
- parse_patchable_function_entry(tcx, attr);
+ Some(PatchableFunctionEntry::from_prefix_and_entry(*prefix, *entry));
}
_ => {}
}
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index 39727685aec1..6a97de4c2b13 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -136,39 +136,6 @@ pub(crate) struct RequiresRustAbi {
pub span: Span,
}
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_expected_name_value_pair)]
-pub(crate) struct ExpectedNameValuePair {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_unexpected_parameter_name)]
-pub(crate) struct UnexpectedParameterName {
- #[primary_span]
- #[label]
- pub span: Span,
- pub prefix_nops: Symbol,
- pub entry_nops: Symbol,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_invalid_literal_value)]
-pub(crate) struct InvalidLiteralValue {
- #[primary_span]
- #[label]
- pub span: Span,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_out_of_range_integer)]
-pub(crate) struct OutOfRangeInteger {
- #[primary_span]
- #[label]
- pub span: Span,
-}
-
#[derive(Diagnostic)]
#[diag(codegen_ssa_copy_path_buf)]
pub(crate) struct CopyPathBuf {
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
index 1ac1d7ef2e2e..e1d23841118c 100644
--- a/compiler/rustc_codegen_ssa/src/traits/write.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -4,7 +4,7 @@ use rustc_errors::DiagCtxtHandle;
use rustc_middle::dep_graph::WorkProduct;
use crate::back::lto::{SerializedModule, ThinModule};
-use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig};
+use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter};
use crate::{CompiledModule, ModuleCodegen};
pub trait WriteBackendMethods: Clone + 'static {
@@ -19,6 +19,7 @@ pub trait WriteBackendMethods: Clone + 'static {
/// if necessary and running any further optimizations
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec>,
@@ -28,6 +29,7 @@ pub trait WriteBackendMethods: Clone + 'static {
/// can simply be copied over from the incr. comp. cache.
fn run_thin_lto(
cgcx: &CodegenContext,
+ dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
@@ -37,16 +39,18 @@ pub trait WriteBackendMethods: Clone + 'static {
fn print_statistics(&self);
fn optimize(
cgcx: &CodegenContext,
- dcx: DiagCtxtHandle<'_>,
+ shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen,
config: &ModuleConfig,
);
fn optimize_thin(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
thin: ThinModule,
) -> ModuleCodegen;
fn codegen(
cgcx: &CodegenContext,
+ shared_emitter: &SharedEmitter,
module: ModuleCodegen,
config: &ModuleConfig,
) -> CompiledModule;
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index c8296e05f6bd..f358ffffb47d 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -10,7 +10,7 @@ bitflags = "2.4.1"
either = "1.0"
elsa = "1.11.0"
ena = "0.14.3"
-indexmap = "2.4.0"
+indexmap = "2.12.1"
jobserver_crate = { version = "0.1.28", package = "jobserver" }
measureme = "12.0.1"
parking_lot = "0.12"
@@ -31,7 +31,7 @@ tracing = "0.1"
# tidy-alphabetical-end
[dependencies.hashbrown]
-version = "0.15.2"
+version = "0.16.1"
default-features = false
features = ["nightly"] # for may_dangle
diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs
index f425c26ba706..5dee64b42f73 100644
--- a/compiler/rustc_driver_impl/src/lib.rs
+++ b/compiler/rustc_driver_impl/src/lib.rs
@@ -18,7 +18,7 @@ use std::ffi::OsString;
use std::fmt::Write as _;
use std::fs::{self, File};
use std::io::{self, IsTerminal, Read, Write};
-use std::panic::{self, PanicHookInfo, catch_unwind};
+use std::panic::{self, PanicHookInfo};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use std::sync::OnceLock;
@@ -32,10 +32,11 @@ use rustc_codegen_ssa::{CodegenErrors, CodegenResults};
use rustc_data_structures::profiling::{
TimePassesFormat, get_resident_set_size, print_time_passes_entry,
};
+pub use rustc_errors::catch_fatal_errors;
use rustc_errors::emitter::stderr_destination;
use rustc_errors::registry::Registry;
use rustc_errors::translation::Translator;
-use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, FatalError, PResult, markdown};
+use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, PResult, markdown};
use rustc_feature::find_gated_cfg;
// This avoids a false positive with `-Wunused_crate_dependencies`.
// `rust_index` isn't used in this crate's code, but it must be named in the
@@ -1377,21 +1378,6 @@ fn parse_crate_attrs<'a>(sess: &'a Session) -> PResult<'a, ast::AttrVec> {
parser.parse_inner_attributes()
}
-/// Runs a closure and catches unwinds triggered by fatal errors.
-///
-/// The compiler currently unwinds with a special sentinel value to abort
-/// compilation on fatal errors. This function catches that sentinel and turns
-/// the panic into a `Result` instead.
-pub fn catch_fatal_errors