Merge ref 'd276646872' from rust-lang/rust

Pull recent changes from https://github.com/rust-lang/rust via Josh.

Upstream ref: rust-lang/rust@d276646872
Filtered ref: rust-lang/miri@e200be98fe
Upstream diff: 63f4513795...d276646872

This merge was created using https://github.com/rust-lang/josh-sync.
This commit is contained in:
The Miri Cronjob Bot 2026-01-21 05:11:40 +00:00
commit add567bb4b
241 changed files with 3749 additions and 1370 deletions

View file

@ -1670,9 +1670,12 @@ dependencies = [
[[package]]
name = "hashbrown"
version = "0.16.0"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
dependencies = [
"foldhash 0.2.0",
]
[[package]]
name = "heck"
@ -1950,12 +1953,12 @@ checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5"
[[package]]
name = "indexmap"
version = "2.12.0"
version = "2.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
dependencies = [
"equivalent",
"hashbrown 0.16.0",
"hashbrown 0.16.1",
"serde",
"serde_core",
]
@ -3334,6 +3337,7 @@ dependencies = [
"rustdoc-json-types",
"serde_json",
"similar",
"tempfile",
"wasmparser 0.236.1",
]
@ -3730,7 +3734,7 @@ dependencies = [
"either",
"elsa",
"ena",
"hashbrown 0.15.5",
"hashbrown 0.16.1",
"indexmap",
"jobserver",
"libc",
@ -4350,7 +4354,7 @@ name = "rustc_mir_transform"
version = "0.0.0"
dependencies = [
"either",
"hashbrown 0.15.5",
"hashbrown 0.16.1",
"itertools",
"rustc_abi",
"rustc_arena",
@ -4561,7 +4565,7 @@ dependencies = [
name = "rustc_query_system"
version = "0.0.0"
dependencies = [
"hashbrown 0.15.5",
"hashbrown 0.16.1",
"parking_lot",
"rustc_abi",
"rustc_ast",

View file

@ -1,3 +1,114 @@
Version 1.93.0 (2026-01-22)
==========================
<a id="1.93.0-Language"></a>
Language
--------
- [Stabilize several s390x `vector`-related target features and the `is_s390x_feature_detected!` macro](https://github.com/rust-lang/rust/pull/145656)
- [Stabilize declaration of C-style variadic functions for the `system` ABI](https://github.com/rust-lang/rust/pull/145954)
- [Emit error when using some keyword as a `cfg` predicate](https://github.com/rust-lang/rust/pull/146978)
- [Stabilize `asm_cfg`](https://github.com/rust-lang/rust/pull/147736)
- [During const-evaluation, support copying pointers byte-by-byte](https://github.com/rust-lang/rust/pull/148259)
- [LUB coercions now correctly handle function item types, and functions with differing safeties](https://github.com/rust-lang/rust/pull/148602)
- [Allow `const` items that contain mutable references to `static` (which is *very* unsafe, but not *always* UB)](https://github.com/rust-lang/rust/pull/148746)
- [Add warn-by-default `const_item_interior_mutations` lint to warn against calls which mutate interior mutable `const` items](https://github.com/rust-lang/rust/pull/148407)
- [Add warn-by-default `function_casts_as_integer` lint](https://github.com/rust-lang/rust/pull/141470)
<a id="1.93.0-Compiler"></a>
Compiler
--------
- [Stabilize `-Cjump-tables=bool`](https://github.com/rust-lang/rust/pull/145974). The flag was previously called `-Zno-jump-tables`.
<a id="1.93.0-Platform-Support"></a>
Platform Support
----------------
- [Promote `riscv64a23-unknown-linux-gnu` to Tier 2 (without host tools)](https://github.com/rust-lang/rust/pull/148435)
Refer to Rust's [platform support page][platform-support-doc]
for more information on Rust's tiered platform support.
[platform-support-doc]: https://doc.rust-lang.org/rustc/platform-support.html
<a id="1.93.0-Libraries"></a>
Libraries
---------
- [Stop internally using `specialization` on the `Copy` trait as it is unsound in the presence of lifetime dependent `Copy` implementations. This may result in some performance regressions as some standard library APIs may now call `Clone::clone` instead of performing bitwise copies](https://github.com/rust-lang/rust/pull/135634)
- [Allow the global allocator to use thread-local storage and `std::thread::current()`](https://github.com/rust-lang/rust/pull/144465)
- [Make `BTree::append` not update existing keys when appending an entry which already exists](https://github.com/rust-lang/rust/pull/145628)
- [Don't require `T: RefUnwindSafe` for `vec::IntoIter<T>: UnwindSafe`](https://github.com/rust-lang/rust/pull/145665)
<a id="1.93.0-Stabilized-APIs"></a>
Stabilized APIs
---------------
- [`<[MaybeUninit<T>]>::assume_init_drop`](https://doc.rust-lang.org/stable/core/primitive.slice.html#method.assume_init_drop)
- [`<[MaybeUninit<T>]>::assume_init_ref`](https://doc.rust-lang.org/stable/core/primitive.slice.html#method.assume_init_ref)
- [`<[MaybeUninit<T>]>::assume_init_mut`](https://doc.rust-lang.org/stable/core/primitive.slice.html#method.assume_init_mut)
- [`<[MaybeUninit<T>]>::write_copy_of_slice`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.write_copy_of_slice)
- [`<[MaybeUninit<T>]>::write_clone_of_slice`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.write_clone_of_slice)
- [`String::into_raw_parts`](https://doc.rust-lang.org/stable/std/string/struct.String.html#method.into_raw_parts)
- [`Vec::into_raw_parts`](https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#method.into_raw_parts)
- [`<iN>::unchecked_neg`](https://doc.rust-lang.org/stable/std/primitive.isize.html#method.unchecked_neg)
- [`<iN>::unchecked_shl`](https://doc.rust-lang.org/stable/std/primitive.isize.html#method.unchecked_shl)
- [`<iN>::unchecked_shr`](https://doc.rust-lang.org/stable/std/primitive.isize.html#method.unchecked_shr)
- [`<uN>::unchecked_shl`](https://doc.rust-lang.org/stable/std/primitive.usize.html#method.unchecked_shl)
- [`<uN>::unchecked_shr`](https://doc.rust-lang.org/stable/std/primitive.usize.html#method.unchecked_shr)
- [`<[T]>::as_array`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.as_array)
- [`<[T]>::as_array_mut`](https://doc.rust-lang.org/stable/std/primitive.slice.html#method.as_mut_array)
- [`<*const [T]>::as_array`](https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.as_array)
- [`<*mut [T]>::as_array_mut`](https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.as_mut_array)
- [`VecDeque::pop_front_if`](https://doc.rust-lang.org/stable/std/collections/struct.VecDeque.html#method.pop_front_if)
- [`VecDeque::pop_back_if`](https://doc.rust-lang.org/stable/std/collections/struct.VecDeque.html#method.pop_back_if)
- [`Duration::from_nanos_u128`](https://doc.rust-lang.org/stable/std/time/struct.Duration.html#method.from_nanos_u128)
- [`char::MAX_LEN_UTF8`](https://doc.rust-lang.org/stable/std/primitive.char.html#associatedconstant.MAX_LEN_UTF8)
- [`char::MAX_LEN_UTF16`](https://doc.rust-lang.org/stable/std/primitive.char.html#associatedconstant.MAX_LEN_UTF16)
- [`std::fmt::from_fn`](https://doc.rust-lang.org/stable/std/fmt/fn.from_fn.html)
- [`std::fmt::FromFn`](https://doc.rust-lang.org/stable/std/fmt/struct.FromFn.html)
<a id="1.93.0-Cargo"></a>
Cargo
-----
- [Enable CARGO_CFG_DEBUG_ASSERTIONS in build scripts based on profile](https://github.com/rust-lang/cargo/pull/16160/)
- [In `cargo tree`, support long forms for `--format` variables](https://github.com/rust-lang/cargo/pull/16204/)
- [Add `--workspace` to `cargo clean`](https://github.com/rust-lang/cargo/pull/16263/)
<a id="1.93.0-Rustdoc"></a>
Rustdoc
-----
- [Remove `#![doc(document_private_items)]`](https://github.com/rust-lang/rust/pull/146495)
- [Include attribute and derive macros in search filters for "macros"](https://github.com/rust-lang/rust/pull/148176)
- [Include extern crates in search filters for `import`](https://github.com/rust-lang/rust/pull/148301)
- [Validate usage of crate-level doc attributes](https://github.com/rust-lang/rust/pull/149197). This means if any of `html_favicon_url`, `html_logo_url`, `html_playground_url`, `issue_tracker_base_url`, or `html_no_source` either has a missing value, an unexpected value, or a value of the wrong type, rustdoc will emit the deny-by-default lint `rustdoc::invalid_doc_attributes`.
<a id="1.93.0-Compatibility-Notes"></a>
Compatibility Notes
-------------------
- [Introduce `pin_v2` into the builtin attributes namespace](https://github.com/rust-lang/rust/pull/139751)
- [Update bundled musl to 1.2.5](https://github.com/rust-lang/rust/pull/142682)
- [On Emscripten, the unwinding ABI used when compiling with `panic=unwind` was changed from the JS exception handling ABI to the wasm exception handling ABI.](https://github.com/rust-lang/rust/pull/147224) If linking C/C++ object files with Rust objects, `-fwasm-exceptions` must be passed to the linker now. On nightly Rust, it is possible to get the old behavior with `-Zwasm-emscripten-eh=false -Zbuild-std`, but it will be removed in a future release.
- The `#[test]` attribute, used to define tests, was previously ignored in various places where it had no meaning (e.g on trait methods or types). Putting the `#[test]` attribute in these places is no longer ignored, and will now result in an error; this may also result in errors when generating rustdoc. [Error when `test` attribute is applied to structs](https://github.com/rust-lang/rust/pull/147841)
- Cargo now sets the `CARGO_CFG_DEBUG_ASSERTIONS` environment variable in more situations. This will cause crates depending on `static-init` versions 1.0.1 to 1.0.3 to fail compilation with "failed to resolve: use of unresolved module or unlinked crate `parking_lot`". See [the linked issue](https://github.com/rust-lang/rust/issues/150646#issuecomment-3718964342) for details.
- [User written types in the `offset_of!` macro are now checked to be well formed.](https://github.com/rust-lang/rust/issues/150465/)
- `cargo publish` no longer emits `.crate` files as a final artifact for user access when the `build.build-dir` config is unset
- [Upgrade the `deref_nullptr` lint from warn-by-default to deny-by-default](https://github.com/rust-lang/rust/pull/148122)
- [Add future-incompatibility warning for `...` function parameters without a pattern outside of `extern` blocks](https://github.com/rust-lang/rust/pull/143619)
- [Introduce future-compatibility warning for `repr(C)` enums whose discriminant values do not fit into a `c_int` or `c_uint`](https://github.com/rust-lang/rust/pull/147017)
- [Introduce future-compatibility warning against ignoring `repr(C)` types as part of `repr(transparent)`](https://github.com/rust-lang/rust/pull/147185)
Version 1.92.0 (2025-12-11)
==========================

View file

@ -152,10 +152,21 @@ impl<'hir> LoweringContext<'_, 'hir> {
) -> DelegationResults<'hir> {
let span = self.lower_span(delegation.path.segments.last().unwrap().ident.span);
let ids = self.get_delegation_ids(
self.resolver.delegation_infos[&self.local_def_id(item_id)].resolution_node,
span,
);
// Delegation can be unresolved in illegal places such as function bodies in extern blocks (see #151356)
let ids = if let Some(delegation_info) =
self.resolver.delegation_infos.get(&self.local_def_id(item_id))
{
self.get_delegation_ids(delegation_info.resolution_node, span)
} else {
return self.generate_delegation_error(
self.dcx().span_delayed_bug(
span,
format!("LoweringContext: the delegation {:?} is unresolved", item_id),
),
span,
delegation,
);
};
match ids {
Ok(ids) => {

View file

@ -717,3 +717,100 @@ impl<S: Stage> NoArgsAttributeParser<S> for EiiForeignItemParser {
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::ForeignFn)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::EiiForeignItem;
}
pub(crate) struct PatchableFunctionEntryParser;
impl<S: Stage> SingleAttributeParser<S> for PatchableFunctionEntryParser {
const PATH: &[Symbol] = &[sym::patchable_function_entry];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Fn)]);
const TEMPLATE: AttributeTemplate = template!(List: &["prefix_nops = m, entry_nops = n"]);
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> {
let Some(meta_item_list) = args.list() else {
cx.expected_list(cx.attr_span, args);
return None;
};
let mut prefix = None;
let mut entry = None;
if meta_item_list.len() == 0 {
cx.expected_list(meta_item_list.span, args);
return None;
}
let mut errored = false;
for item in meta_item_list.mixed() {
let Some(meta_item) = item.meta_item() else {
errored = true;
cx.expected_name_value(item.span(), None);
continue;
};
let Some(name_value_lit) = meta_item.args().name_value() else {
errored = true;
cx.expected_name_value(item.span(), None);
continue;
};
let attrib_to_write = match meta_item.ident().map(|ident| ident.name) {
Some(sym::prefix_nops) => {
// Duplicate prefixes are not allowed
if prefix.is_some() {
errored = true;
cx.duplicate_key(meta_item.path().span(), sym::prefix_nops);
continue;
}
&mut prefix
}
Some(sym::entry_nops) => {
// Duplicate entries are not allowed
if entry.is_some() {
errored = true;
cx.duplicate_key(meta_item.path().span(), sym::entry_nops);
continue;
}
&mut entry
}
_ => {
errored = true;
cx.expected_specific_argument(
meta_item.path().span(),
&[sym::prefix_nops, sym::entry_nops],
);
continue;
}
};
let rustc_ast::LitKind::Int(val, _) = name_value_lit.value_as_lit().kind else {
errored = true;
cx.expected_integer_literal(name_value_lit.value_span);
continue;
};
let Ok(val) = val.get().try_into() else {
errored = true;
cx.expected_integer_literal_in_range(
name_value_lit.value_span,
u8::MIN as isize,
u8::MAX as isize,
);
continue;
};
*attrib_to_write = Some(val);
}
if errored {
None
} else {
Some(AttributeKind::PatchableFunctionEntry {
prefix: prefix.unwrap_or(0),
entry: entry.unwrap_or(0),
})
}
}
}

View file

@ -91,3 +91,25 @@ impl<S: Stage> SingleAttributeParser<S> for ShouldPanicParser {
})
}
}
pub(crate) struct RustcVarianceParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcVarianceParser {
const PATH: &[Symbol] = &[sym::rustc_variance];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::Struct),
Allow(Target::Enum),
Allow(Target::Union),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcVariance;
}
pub(crate) struct RustcVarianceOfOpaquesParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcVarianceOfOpaquesParser {
const PATH: &[Symbol] = &[sym::rustc_variance_of_opaques];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcVarianceOfOpaques;
}

View file

@ -94,12 +94,12 @@ impl<S: Stage> NoArgsAttributeParser<S> for DenyExplicitImplParser {
const CREATE: fn(Span) -> AttributeKind = AttributeKind::DenyExplicitImpl;
}
pub(crate) struct DoNotImplementViaObjectParser;
impl<S: Stage> NoArgsAttributeParser<S> for DoNotImplementViaObjectParser {
const PATH: &[Symbol] = &[sym::rustc_do_not_implement_via_object];
pub(crate) struct DynIncompatibleTraitParser;
impl<S: Stage> NoArgsAttributeParser<S> for DynIncompatibleTraitParser {
const PATH: &[Symbol] = &[sym::rustc_dyn_incompatible_trait];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Trait)]);
const CREATE: fn(Span) -> AttributeKind = AttributeKind::DoNotImplementViaObject;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::DynIncompatibleTrait;
}
// Specialization

View file

@ -23,8 +23,8 @@ use crate::attributes::cfi_encoding::CfiEncodingParser;
use crate::attributes::codegen_attrs::{
ColdParser, CoverageParser, EiiForeignItemParser, ExportNameParser, ForceTargetFeatureParser,
NakedParser, NoMangleParser, ObjcClassParser, ObjcSelectorParser, OptimizeParser,
RustcPassIndirectlyInNonRusticAbisParser, SanitizeParser, TargetFeatureParser,
ThreadLocalParser, TrackCallerParser, UsedParser,
PatchableFunctionEntryParser, RustcPassIndirectlyInNonRusticAbisParser, SanitizeParser,
TargetFeatureParser, ThreadLocalParser, TrackCallerParser, UsedParser,
};
use crate::attributes::confusables::ConfusablesParser;
use crate::attributes::crate_level::{
@ -85,11 +85,13 @@ use crate::attributes::semantics::MayDangleParser;
use crate::attributes::stability::{
BodyStabilityParser, ConstStabilityIndirectParser, ConstStabilityParser, StabilityParser,
};
use crate::attributes::test_attrs::{IgnoreParser, ShouldPanicParser};
use crate::attributes::test_attrs::{
IgnoreParser, RustcVarianceOfOpaquesParser, RustcVarianceParser, ShouldPanicParser,
};
use crate::attributes::traits::{
AllowIncoherentImplParser, CoinductiveParser, DenyExplicitImplParser,
DoNotImplementViaObjectParser, FundamentalParser, MarkerParser, ParenSugarParser,
PointeeParser, SkipDuringMethodDispatchParser, SpecializationTraitParser, TypeConstParser,
DynIncompatibleTraitParser, FundamentalParser, MarkerParser, ParenSugarParser, PointeeParser,
SkipDuringMethodDispatchParser, SpecializationTraitParser, TypeConstParser,
UnsafeSpecializationMarkerParser,
};
use crate::attributes::transparency::TransparencyParser;
@ -103,18 +105,18 @@ type GroupType<S> = LazyLock<GroupTypeInner<S>>;
pub(super) struct GroupTypeInner<S: Stage> {
pub(super) accepters: BTreeMap<&'static [Symbol], Vec<GroupTypeInnerAccept<S>>>,
pub(super) finalizers: Vec<FinalizeFn<S>>,
}
pub(super) struct GroupTypeInnerAccept<S: Stage> {
pub(super) template: AttributeTemplate,
pub(super) accept_fn: AcceptFn<S>,
pub(super) allowed_targets: AllowedTargets,
pub(super) finalizer: FinalizeFn<S>,
}
type AcceptFn<S> =
pub(crate) type AcceptFn<S> =
Box<dyn for<'sess, 'a> Fn(&mut AcceptContext<'_, 'sess, S>, &ArgParser) + Send + Sync>;
type FinalizeFn<S> =
pub(crate) type FinalizeFn<S> =
Box<dyn Send + Sync + Fn(&mut FinalizeContext<'_, '_, S>) -> Option<AttributeKind>>;
macro_rules! attribute_parsers {
@ -142,8 +144,7 @@ macro_rules! attribute_parsers {
@[$stage: ty] pub(crate) static $name: ident = [$($names: ty),* $(,)?];
) => {
pub(crate) static $name: GroupType<$stage> = LazyLock::new(|| {
let mut accepts = BTreeMap::<_, Vec<GroupTypeInnerAccept<$stage>>>::new();
let mut finalizes = Vec::<FinalizeFn<$stage>>::new();
let mut accepters = BTreeMap::<_, Vec<GroupTypeInnerAccept<$stage>>>::new();
$(
{
thread_local! {
@ -151,7 +152,7 @@ macro_rules! attribute_parsers {
};
for (path, template, accept_fn) in <$names>::ATTRIBUTES {
accepts.entry(*path).or_default().push(GroupTypeInnerAccept {
accepters.entry(*path).or_default().push(GroupTypeInnerAccept {
template: *template,
accept_fn: Box::new(|cx, args| {
STATE_OBJECT.with_borrow_mut(|s| {
@ -159,17 +160,16 @@ macro_rules! attribute_parsers {
})
}),
allowed_targets: <$names as crate::attributes::AttributeParser<$stage>>::ALLOWED_TARGETS,
finalizer: Box::new(|cx| {
let state = STATE_OBJECT.take();
state.finalize(cx)
}),
});
}
finalizes.push(Box::new(|cx| {
let state = STATE_OBJECT.take();
state.finalize(cx)
}));
}
)*
GroupTypeInner { accepters:accepts, finalizers:finalizes }
GroupTypeInner { accepters }
});
};
}
@ -223,6 +223,7 @@ attribute_parsers!(
Single<ObjcClassParser>,
Single<ObjcSelectorParser>,
Single<OptimizeParser>,
Single<PatchableFunctionEntryParser>,
Single<PathAttributeParser>,
Single<PatternComplexityLimitParser>,
Single<ProcMacroDeriveParser>,
@ -254,7 +255,7 @@ attribute_parsers!(
Single<WithoutArgs<ConstStabilityIndirectParser>>,
Single<WithoutArgs<CoroutineParser>>,
Single<WithoutArgs<DenyExplicitImplParser>>,
Single<WithoutArgs<DoNotImplementViaObjectParser>>,
Single<WithoutArgs<DynIncompatibleTraitParser>>,
Single<WithoutArgs<EiiForeignItemParser>>,
Single<WithoutArgs<ExportStableParser>>,
Single<WithoutArgs<FfiConstParser>>,
@ -300,6 +301,8 @@ attribute_parsers!(
Single<WithoutArgs<RustcPassIndirectlyInNonRusticAbisParser>>,
Single<WithoutArgs<RustcReallocatorParser>>,
Single<WithoutArgs<RustcShouldNotBeCalledOnConstItems>>,
Single<WithoutArgs<RustcVarianceOfOpaquesParser>>,
Single<WithoutArgs<RustcVarianceParser>>,
Single<WithoutArgs<SpecializationTraitParser>>,
Single<WithoutArgs<StdInternalSymbolParser>>,
Single<WithoutArgs<ThreadLocalParser>>,
@ -503,6 +506,18 @@ impl<'f, 'sess: 'f, S: Stage> AcceptContext<'f, 'sess, S> {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedIntegerLiteral)
}
pub(crate) fn expected_integer_literal_in_range(
&self,
span: Span,
lower_bound: isize,
upper_bound: isize,
) -> ErrorGuaranteed {
self.emit_parse_error(
span,
AttributeParseErrorReason::ExpectedIntegerLiteralInRange { lower_bound, upper_bound },
)
}
pub(crate) fn expected_list(&self, span: Span, args: &ArgParser) -> ErrorGuaranteed {
let span = match args {
ArgParser::NoArgs => span,

View file

@ -12,7 +12,7 @@ use rustc_session::Session;
use rustc_session::lint::{BuiltinLintDiag, LintId};
use rustc_span::{DUMMY_SP, Span, Symbol, sym};
use crate::context::{AcceptContext, FinalizeContext, SharedContext, Stage};
use crate::context::{AcceptContext, FinalizeContext, FinalizeFn, SharedContext, Stage};
use crate::early_parsed::{EARLY_PARSED_ATTRIBUTES, EarlyParsedState};
use crate::parser::{ArgParser, PathParser, RefPathParser};
use crate::session_diagnostics::ParsedDescription;
@ -270,6 +270,8 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> {
let mut attr_paths: Vec<RefPathParser<'_>> = Vec::new();
let mut early_parsed_state = EarlyParsedState::default();
let mut finalizers: Vec<&FinalizeFn<S>> = Vec::with_capacity(attrs.len());
for attr in attrs {
// If we're only looking for a single attribute, skip all the ones we don't care about.
if let Some(expected) = self.parse_only {
@ -383,6 +385,8 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> {
};
(accept.accept_fn)(&mut cx, &args);
finalizers.push(&accept.finalizer);
if !matches!(cx.stage.should_emit(), ShouldEmit::Nothing) {
Self::check_target(&accept.allowed_targets, target, &mut cx);
}
@ -417,7 +421,7 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> {
}
early_parsed_state.finalize_early_parsed_attributes(&mut attributes);
for f in &S::parsers().finalizers {
for f in &finalizers {
if let Some(attr) = f(&mut FinalizeContext {
shared: SharedContext { cx: self, target_span, target, emit_lint: &mut emit_lint },
all_attrs: &attr_paths,

View file

@ -525,6 +525,10 @@ pub(crate) enum AttributeParseErrorReason<'a> {
byte_string: Option<Span>,
},
ExpectedIntegerLiteral,
ExpectedIntegerLiteralInRange {
lower_bound: isize,
upper_bound: isize,
},
ExpectedAtLeastOneArgument,
ExpectedSingleArgument,
ExpectedList,
@ -596,6 +600,17 @@ impl<'a, G: EmissionGuarantee> Diagnostic<'a, G> for AttributeParseError<'_> {
AttributeParseErrorReason::ExpectedIntegerLiteral => {
diag.span_label(self.span, "expected an integer literal here");
}
AttributeParseErrorReason::ExpectedIntegerLiteralInRange {
lower_bound,
upper_bound,
} => {
diag.span_label(
self.span,
format!(
"expected an integer literal in the range of {lower_bound}..={upper_bound}"
),
);
}
AttributeParseErrorReason::ExpectedSingleArgument => {
diag.span_label(self.span, "expected a single argument here");
diag.code(E0805);

View file

@ -1506,7 +1506,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
}
// FIXME implement variadics in cranelift
sym::va_copy | sym::va_arg | sym::va_end => {
sym::va_arg | sym::va_end => {
fx.tcx.dcx().span_fatal(
source_info.span,
"Defining variadic functions is not yet supported by Cranelift",

View file

@ -26,11 +26,11 @@ use std::sync::atomic::Ordering;
use gccjit::{Context, OutputKind};
use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::memmap::Mmap;
use rustc_errors::DiagCtxtHandle;
use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_log::tracing::info;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
@ -112,10 +112,11 @@ fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext<GccCodegenBackend>,
shared_emitter: &SharedEmitter,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<GccCodegenBackend>>,
) -> ModuleCodegen<GccContext> {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
/*let symbols_below_threshold =
@ -283,14 +284,13 @@ impl ModuleBufferMethods for ModuleBuffer {
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext<GccCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
if cgcx.opts.cg.linker_plugin_lto.enabled() {
if cgcx.use_linker_plugin_lto {
unreachable!(
"We should never reach this case if the LTO step \
is deferred to the linker"
@ -522,8 +522,6 @@ pub fn optimize_thin_module(
thin_module: ThinModule<GccCodegenBackend>,
_cgcx: &CodegenContext<GccCodegenBackend>,
) -> ModuleCodegen<GccContext> {
//let dcx = cgcx.create_dcx();
//let module_name = &thin_module.shared.module_names[thin_module.idx];
/*let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;*/

View file

@ -2,8 +2,11 @@ use std::{env, fs};
use gccjit::{Context, OutputKind};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, ModuleConfig, SharedEmitter,
};
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
use rustc_errors::DiagCtxt;
use rustc_fs_util::link_or_copy;
use rustc_log::tracing::debug;
use rustc_session::config::OutputType;
@ -15,10 +18,11 @@ use crate::{GccCodegenBackend, GccContext, LtoMode};
pub(crate) fn codegen(
cgcx: &CodegenContext<GccCodegenBackend>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<GccContext>,
config: &ModuleConfig,
) -> CompiledModule {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);

View file

@ -391,9 +391,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
sym::breakpoint => {
unimplemented!();
}
sym::va_copy => {
unimplemented!();
}
sym::va_arg => {
unimplemented!();
}

View file

@ -84,7 +84,7 @@ use gccjit::{TargetInfo, Version};
use rustc_ast::expand::allocator::AllocatorMethod;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::base::codegen_crate;
use rustc_codegen_ssa::target_features::cfg_target_feature;
@ -435,23 +435,25 @@ impl WriteBackendMethods for GccCodegenBackend {
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
) -> ModuleCodegen<Self::Module> {
back::lto::run_fat(cgcx, each_linked_rlib_for_lto, modules)
back::lto::run_fat(cgcx, shared_emitter, each_linked_rlib_for_lto, modules)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) {
back::lto::run_thin(cgcx, each_linked_rlib_for_lto, modules, cached_modules)
back::lto::run_thin(cgcx, dcx, each_linked_rlib_for_lto, modules, cached_modules)
}
fn print_pass_timings(&self) {
@ -464,7 +466,7 @@ impl WriteBackendMethods for GccCodegenBackend {
fn optimize(
_cgcx: &CodegenContext<Self>,
_dcx: DiagCtxtHandle<'_>,
_shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) {
@ -473,6 +475,7 @@ impl WriteBackendMethods for GccCodegenBackend {
fn optimize_thin(
cgcx: &CodegenContext<Self>,
_shared_emitter: &SharedEmitter,
thin: ThinModule<Self>,
) -> ModuleCodegen<Self::Module> {
back::lto::optimize_thin_module(thin, cgcx)
@ -480,10 +483,11 @@ impl WriteBackendMethods for GccCodegenBackend {
fn codegen(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> CompiledModule {
back::write::codegen(cgcx, module, config)
back::write::codegen(cgcx, shared_emitter, module, config)
}
fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {

View file

@ -9,12 +9,12 @@ use std::{io, iter, slice};
use object::read::archive::ArchiveFile;
use object::{Object, ObjectSection};
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
use rustc_errors::DiagCtxtHandle;
use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_hir::attrs::SanitizerSet;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
@ -150,17 +150,18 @@ fn get_bitcode_slice_from_object_data<'a>(
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext<LlvmCodegenBackend>,
shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
) -> ModuleCodegen<ModuleLlvm> {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold)
fat_lto(cgcx, dcx, shared_emitter, modules, upstream_modules, &symbols_below_threshold)
}
/// Performs thin LTO by performing necessary global analysis and returning two
@ -168,18 +169,17 @@ pub(crate) fn run_fat(
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
if cgcx.opts.cg.linker_plugin_lto.enabled() {
if cgcx.use_linker_plugin_lto {
unreachable!(
"We should never reach this case if the LTO step \
is deferred to the linker"
@ -197,6 +197,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBu
fn fat_lto(
cgcx: &CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
symbols_below_threshold: &[*const libc::c_char],
@ -265,8 +266,13 @@ fn fat_lto(
// The linking steps below may produce errors and diagnostics within LLVM
// which we'd like to handle and print, so set up our diagnostic handlers
// (which get unregistered when they go out of scope below).
let _handler =
DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::LTO);
let _handler = DiagnosticHandlers::new(
cgcx,
shared_emitter,
llcx,
&module,
CodegenDiagnosticsStage::LTO,
);
// For all other modules we codegened we'll need to link them into our own
// bitcode. All modules were codegened in their own LLVM context, however,
@ -720,10 +726,11 @@ impl Drop for ThinBuffer {
}
pub(crate) fn optimize_thin_module(
thin_module: ThinModule<LlvmCodegenBackend>,
cgcx: &CodegenContext<LlvmCodegenBackend>,
shared_emitter: &SharedEmitter,
thin_module: ThinModule<LlvmCodegenBackend>,
) -> ModuleCodegen<ModuleLlvm> {
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let module_name = &thin_module.shared.module_names[thin_module.idx];

View file

@ -9,15 +9,16 @@ use libc::{c_char, c_int, c_void, size_t};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::versioned_llvm_target;
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig,
BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig, SharedEmitter,
TargetMachineFactoryConfig, TargetMachineFactoryFn,
};
use rustc_codegen_ssa::base::wants_wasm_eh;
use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_errors::{DiagCtxtHandle, Level};
use rustc_errors::{DiagCtxt, DiagCtxtHandle, Level};
use rustc_fs_util::{link_or_copy, path_to_c_string};
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
@ -33,6 +34,8 @@ use crate::back::owned_target_machine::OwnedTargetMachine;
use crate::back::profiling::{
LlvmSelfProfiler, selfprofile_after_pass_callback, selfprofile_before_pass_callback,
};
use crate::builder::SBuilder;
use crate::builder::gpu_offload::scalar_width;
use crate::common::AsCCharPtr;
use crate::errors::{
CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
@ -353,7 +356,7 @@ pub(crate) enum CodegenDiagnosticsStage {
}
pub(crate) struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, DiagCtxtHandle<'a>),
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a SharedEmitter),
llcx: &'a llvm::Context,
old_handler: Option<&'a llvm::DiagnosticHandler>,
}
@ -361,7 +364,7 @@ pub(crate) struct DiagnosticHandlers<'a> {
impl<'a> DiagnosticHandlers<'a> {
pub(crate) fn new(
cgcx: &'a CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'a>,
shared_emitter: &'a SharedEmitter,
llcx: &'a llvm::Context,
module: &ModuleCodegen<ModuleLlvm>,
stage: CodegenDiagnosticsStage,
@ -395,8 +398,8 @@ impl<'a> DiagnosticHandlers<'a> {
})
.and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
let pgo_available = cgcx.opts.cg.profile_use.is_some();
let data = Box::into_raw(Box::new((cgcx, dcx)));
let pgo_available = cgcx.module_config.pgo_use.is_some();
let data = Box::into_raw(Box::new((cgcx, shared_emitter)));
unsafe {
let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
llvm::LLVMRustContextConfigureDiagnosticHandler(
@ -458,12 +461,16 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
if user.is_null() {
return;
}
let (cgcx, dcx) =
unsafe { *(user as *const (&CodegenContext<LlvmCodegenBackend>, DiagCtxtHandle<'_>)) };
let (cgcx, shared_emitter) =
unsafe { *(user as *const (&CodegenContext<LlvmCodegenBackend>, &SharedEmitter)) };
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
match unsafe { llvm::diagnostic::Diagnostic::unpack(info) } {
llvm::diagnostic::InlineAsm(inline) => {
cgcx.diag_emitter.inline_asm_error(report_inline_asm(
// FIXME use dcx
shared_emitter.inline_asm_error(report_inline_asm(
cgcx,
inline.message,
inline.level,
@ -669,7 +676,17 @@ pub(crate) unsafe fn llvm_optimize(
// Create the new parameter list, with ptr as the first argument
let mut new_param_types = Vec::with_capacity(old_param_count as usize + 1);
new_param_types.push(cx.type_ptr());
new_param_types.extend(old_param_types);
// This relies on undocumented LLVM knowledge that scalars must be passed as i64
for &old_ty in &old_param_types {
let new_ty = match cx.type_kind(old_ty) {
TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
cx.type_i64()
}
_ => old_ty,
};
new_param_types.push(new_ty);
}
// Create the new function type
let ret_ty = unsafe { llvm::LLVMGetReturnType(old_fn_ty) };
@ -682,10 +699,33 @@ pub(crate) unsafe fn llvm_optimize(
let a0 = llvm::get_param(new_fn, 0);
llvm::set_value_name(a0, CString::new("dyn_ptr").unwrap().as_bytes());
let bb = SBuilder::append_block(cx, new_fn, "entry");
let mut builder = SBuilder::build(cx, bb);
let mut old_args_rebuilt = Vec::with_capacity(old_param_types.len());
for (i, &old_ty) in old_param_types.iter().enumerate() {
let new_arg = llvm::get_param(new_fn, (i + 1) as u32);
let rebuilt = match cx.type_kind(old_ty) {
TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
let num_bits = scalar_width(cx, old_ty);
let trunc = builder.trunc(new_arg, cx.type_ix(num_bits));
builder.bitcast(trunc, old_ty)
}
_ => new_arg,
};
old_args_rebuilt.push(rebuilt);
}
builder.ret_void();
// Here we map the old arguments to the new arguments, with an offset of 1 to make sure
// that we don't use the newly added `%dyn_ptr`.
unsafe {
llvm::LLVMRustOffloadMapper(old_fn, new_fn);
llvm::LLVMRustOffloadMapper(old_fn, new_fn, old_args_rebuilt.as_ptr());
}
llvm::set_linkage(new_fn, llvm::get_linkage(old_fn));
@ -740,7 +780,7 @@ pub(crate) unsafe fn llvm_optimize(
&*module.module_llvm.tm.raw(),
to_pass_builder_opt_level(opt_level),
opt_stage,
cgcx.opts.cg.linker_plugin_lto.enabled(),
cgcx.use_linker_plugin_lto,
config.no_prepopulate_passes,
config.verify_llvm_ir,
config.lint_llvm_ir,
@ -851,14 +891,18 @@ pub(crate) unsafe fn llvm_optimize(
// Unsafe due to LLVM calls.
pub(crate) fn optimize(
cgcx: &CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) {
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let llcx = &*module.module_llvm.llcx;
let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt);
let _handlers =
DiagnosticHandlers::new(cgcx, shared_emitter, llcx, module, CodegenDiagnosticsStage::Opt);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext_for_cgu(
@ -875,7 +919,7 @@ pub(crate) fn optimize(
let opt_stage = match cgcx.lto {
Lto::Fat => llvm::OptStage::PreLinkFatLTO,
Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
_ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
_ if cgcx.use_linker_plugin_lto => llvm::OptStage::PreLinkThinLTO,
_ => llvm::OptStage::PreLinkNoLTO,
};
@ -938,19 +982,26 @@ pub(crate) fn optimize(
pub(crate) fn codegen(
cgcx: &CodegenContext<LlvmCodegenBackend>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> CompiledModule {
let dcx = cgcx.create_dcx();
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let _handlers =
DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::Codegen);
let _handlers = DiagnosticHandlers::new(
cgcx,
shared_emitter,
llcx,
&module,
CodegenDiagnosticsStage::Codegen,
);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);

View file

@ -97,6 +97,21 @@ impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
GenericBuilder { llbuilder, cx: scx }
}
pub(crate) fn append_block(
cx: &'a GenericCx<'ll, CX>,
llfn: &'ll Value,
name: &str,
) -> &'ll BasicBlock {
unsafe {
let name = SmallCStr::new(name);
llvm::LLVMAppendBasicBlockInContext(cx.llcx(), llfn, name.as_ptr())
}
}
pub(crate) fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
}
pub(crate) fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
}
@ -1773,6 +1788,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
}
if crate::llvm_util::get_version() >= (22, 0, 0) {
// LLVM 22 requires the lifetime intrinsic to act directly on the alloca,
// there can't be an addrspacecast in between.
let ptr = unsafe { llvm::LLVMRustStripPointerCasts(ptr) };
self.call_intrinsic(intrinsic, &[self.val_ty(ptr)], &[ptr]);
} else {
self.call_intrinsic(intrinsic, &[self.val_ty(ptr)], &[self.cx.const_u64(size), ptr]);

View file

@ -2,6 +2,7 @@ use std::ffi::CString;
use llvm::Linkage::*;
use rustc_abi::Align;
use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
use rustc_middle::bug;
@ -361,7 +362,6 @@ pub(crate) fn add_global<'ll>(
pub(crate) fn gen_define_handling<'ll>(
cx: &CodegenCx<'ll, '_>,
metadata: &[OffloadMetadata],
types: &[&'ll Type],
symbol: String,
offload_globals: &OffloadGlobals<'ll>,
) -> OffloadKernelGlobals<'ll> {
@ -371,25 +371,18 @@ pub(crate) fn gen_define_handling<'ll>(
let offload_entry_ty = offload_globals.offload_entry_ty;
// It seems like non-pointer values are automatically mapped. So here, we focus on pointer (or
// reference) types.
let ptr_meta = types.iter().zip(metadata).filter_map(|(&x, meta)| match cx.type_kind(x) {
rustc_codegen_ssa::common::TypeKind::Pointer => Some(meta),
_ => None,
});
// FIXME(Sa4dUs): add `OMP_MAP_TARGET_PARAM = 0x20` only if necessary
let (ptr_sizes, ptr_transfer): (Vec<_>, Vec<_>) =
ptr_meta.map(|m| (m.payload_size, m.mode.bits() | 0x20)).unzip();
let (sizes, transfer): (Vec<_>, Vec<_>) =
metadata.iter().map(|m| (m.payload_size, m.mode.bits() | 0x20)).unzip();
let offload_sizes = add_priv_unnamed_arr(&cx, &format!(".offload_sizes.{symbol}"), &ptr_sizes);
let offload_sizes = add_priv_unnamed_arr(&cx, &format!(".offload_sizes.{symbol}"), &sizes);
// Here we figure out whether something needs to be copied to the gpu (=1), from the gpu (=2),
// or both to and from the gpu (=3). Other values shouldn't affect us for now.
// A non-mutable reference or pointer will be 1, an array that's not read, but fully overwritten
// will be 2. For now, everything is 3, until we have our frontend set up.
// 1+2+32: 1 (MapTo), 2 (MapFrom), 32 (Add one extra input ptr per function, to be used later).
let memtransfer_types =
add_priv_unnamed_arr(&cx, &format!(".offload_maptypes.{symbol}"), &ptr_transfer);
add_priv_unnamed_arr(&cx, &format!(".offload_maptypes.{symbol}"), &transfer);
// Next: For each function, generate these three entries. A weak constant,
// the llvm.rodata entry name, and the llvm_offload_entries value
@ -445,13 +438,25 @@ fn declare_offload_fn<'ll>(
)
}
pub(crate) fn scalar_width<'ll>(cx: &'ll SimpleCx<'_>, ty: &'ll Type) -> u64 {
match cx.type_kind(ty) {
TypeKind::Half
| TypeKind::Float
| TypeKind::Double
| TypeKind::X86_FP80
| TypeKind::FP128
| TypeKind::PPC_FP128 => cx.float_width(ty) as u64,
TypeKind::Integer => cx.int_width(ty),
other => bug!("scalar_width was called on a non scalar type {other:?}"),
}
}
// For each kernel *call*, we now use some of our previous declared globals to move data to and from
// the gpu. For now, we only handle the data transfer part of it.
// If two consecutive kernels use the same memory, we still move it to the host and back to the gpu.
// Since in our frontend users (by default) don't have to specify data transfer, this is something
// we should optimize in the future! We also assume that everything should be copied back and forth,
// but sometimes we can directly zero-allocate on the device and only move back, or if something is
// immutable, we might only copy it to the device, but not back.
// we should optimize in the future! In some cases we can directly zero-allocate on the device and
// only move data back, or if something is immutable, we might only copy it to the device.
//
// Current steps:
// 0. Alloca some variables for the following steps
@ -538,8 +543,34 @@ pub(crate) fn gen_call_handling<'ll, 'tcx>(
let mut geps = vec![];
let i32_0 = cx.get_const_i32(0);
for &v in args {
let gep = builder.inbounds_gep(cx.type_f32(), v, &[i32_0]);
vals.push(v);
let ty = cx.val_ty(v);
let ty_kind = cx.type_kind(ty);
let (base_val, gep_base) = match ty_kind {
TypeKind::Pointer => (v, v),
TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
// FIXME(Sa4dUs): check for `f128` support, latest NVIDIA cards support it
let num_bits = scalar_width(cx, ty);
let bb = builder.llbb();
unsafe {
llvm::LLVMRustPositionBuilderPastAllocas(builder.llbuilder, builder.llfn());
}
let addr = builder.direct_alloca(cx.type_i64(), Align::EIGHT, "addr");
unsafe {
llvm::LLVMPositionBuilderAtEnd(builder.llbuilder, bb);
}
let cast = builder.bitcast(v, cx.type_ix(num_bits));
let value = builder.zext(cast, cx.type_i64());
builder.store(value, addr, Align::EIGHT);
(value, addr)
}
other => bug!("offload does not support {other:?}"),
};
let gep = builder.inbounds_gep(cx.type_f32(), gep_base, &[i32_0]);
vals.push(base_val);
geps.push(gep);
}

View file

@ -215,6 +215,17 @@ pub(crate) unsafe fn create_module<'ll>(
// LLVM 22 updated the ABI alignment for double on AIX: https://github.com/llvm/llvm-project/pull/144673
target_data_layout = target_data_layout.replace("-f64:32:64", "");
}
if sess.target.arch == Arch::AmdGpu {
// LLVM 22 specified ELF mangling in the amdgpu data layout:
// https://github.com/llvm/llvm-project/pull/163011
target_data_layout = target_data_layout.replace("-m:e", "");
}
}
if llvm_version < (23, 0, 0) {
if sess.target.arch == Arch::S390x {
// LLVM 23 updated the s390x layout to specify the stack alignment: https://github.com/llvm/llvm-project/pull/176041
target_data_layout = target_data_layout.replace("-S64", "");
}
}
// Ensure the data-layout values hardcoded remain the defaults.

View file

@ -269,14 +269,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
return Ok(());
}
sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
sym::va_copy => {
let dest = args[0].immediate();
self.call_intrinsic(
"llvm.va_copy",
&[self.val_ty(dest)],
&[dest, args[1].immediate()],
)
}
sym::va_arg => {
match result.layout.backend_repr {
BackendRepr::Scalar(scalar) => {
@ -1394,7 +1386,8 @@ fn codegen_offload<'ll, 'tcx>(
let args = get_args_from_tuple(bx, args[3], fn_target);
let target_symbol = symbol_name_for_instance_in_crate(tcx, fn_target, LOCAL_CRATE);
let sig = tcx.fn_sig(fn_target.def_id()).skip_binder().skip_binder();
let sig = tcx.fn_sig(fn_target.def_id()).skip_binder();
let sig = tcx.instantiate_bound_regions_with_erased(sig);
let inputs = sig.inputs();
let metadata = inputs.iter().map(|ty| OffloadMetadata::from_ty(tcx, *ty)).collect::<Vec<_>>();
@ -1409,7 +1402,7 @@ fn codegen_offload<'ll, 'tcx>(
return;
}
};
let offload_data = gen_define_handling(&cx, &metadata, &types, target_symbol, offload_globals);
let offload_data = gen_define_handling(&cx, &metadata, target_symbol, offload_globals);
gen_call_handling(bx, &offload_data, &args, &types, &metadata, offload_globals, &offload_dims);
}

View file

@ -30,12 +30,13 @@ use llvm_util::target_config;
use rustc_ast::expand::allocator::AllocatorMethod;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryConfig,
TargetMachineFactoryFn,
};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig};
use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::DiagCtxtHandle;
use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::TyCtxt;
@ -166,14 +167,20 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
) -> ModuleCodegen<Self::Module> {
let mut module =
back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules);
let mut module = back::lto::run_fat(
cgcx,
shared_emitter,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
modules,
);
let dcx = cgcx.create_dcx();
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
back::lto::run_pass_manager(cgcx, dcx, &mut module, false);
@ -181,6 +188,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
@ -188,6 +196,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) {
back::lto::run_thin(
cgcx,
dcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
modules,
@ -196,24 +205,26 @@ impl WriteBackendMethods for LlvmCodegenBackend {
}
fn optimize(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) {
back::write::optimize(cgcx, dcx, module, config)
back::write::optimize(cgcx, shared_emitter, module, config)
}
fn optimize_thin(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
thin: ThinModule<Self>,
) -> ModuleCodegen<Self::Module> {
back::lto::optimize_thin_module(thin, cgcx)
back::lto::optimize_thin_module(cgcx, shared_emitter, thin)
}
fn codegen(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> CompiledModule {
back::write::codegen(cgcx, module, config)
back::write::codegen(cgcx, shared_emitter, module, config)
}
fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(module)

View file

@ -1675,7 +1675,11 @@ mod Offload {
_M: &'a Module,
_host_out: *const c_char,
) -> bool;
pub(crate) fn LLVMRustOffloadMapper<'a>(OldFn: &'a Value, NewFn: &'a Value);
pub(crate) fn LLVMRustOffloadMapper<'a>(
OldFn: &'a Value,
NewFn: &'a Value,
RebuiltArgs: *const &Value,
);
}
}
@ -1702,7 +1706,11 @@ mod Offload_fallback {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
#[allow(unused_unsafe)]
pub(crate) unsafe fn LLVMRustOffloadMapper<'a>(_OldFn: &'a Value, _NewFn: &'a Value) {
pub(crate) unsafe fn LLVMRustOffloadMapper<'a>(
_OldFn: &'a Value,
_NewFn: &'a Value,
_RebuiltArgs: *const &Value,
) {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
}
@ -1959,6 +1967,7 @@ unsafe extern "C" {
Metadata: &'a Metadata,
);
pub(crate) fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool;
pub(crate) fn LLVMRustStripPointerCasts<'a>(Val: &'a Value) -> &'a Value;
// Operations on scalar constants
pub(crate) fn LLVMRustConstIntGetZExtValue(ConstantVal: &ConstantInt, Value: &mut u64) -> bool;

View file

@ -287,12 +287,12 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
"cmpxchg16b" => Some(LLVMFeature::new("cx16")),
"lahfsahf" => Some(LLVMFeature::new("sahf")),
// Enable the evex512 target feature if an avx512 target feature is enabled.
s if s.starts_with("avx512") => Some(LLVMFeature::with_dependencies(
s if s.starts_with("avx512") && major < 22 => Some(LLVMFeature::with_dependencies(
s,
smallvec![TargetFeatureFoldStrength::EnableOnly("evex512")],
)),
"avx10.1" => Some(LLVMFeature::new("avx10.1-512")),
"avx10.2" => Some(LLVMFeature::new("avx10.2-512")),
"avx10.1" if major < 22 => Some(LLVMFeature::new("avx10.1-512")),
"avx10.2" if major < 22 => Some(LLVMFeature::new("avx10.2-512")),
"apxf" => Some(LLVMFeature::with_dependencies(
"egpr",
smallvec![

View file

@ -48,8 +48,6 @@ codegen_ssa_error_creating_remark_dir = failed to create remark directory: {$err
codegen_ssa_error_writing_def_file =
error writing .DEF file: {$error}
codegen_ssa_expected_name_value_pair = expected name value pair
codegen_ssa_extern_funcs_not_found = some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified
codegen_ssa_extract_bundled_libs_archive_member = failed to get data from archive member '{$rlib}': {$error}
@ -90,9 +88,6 @@ codegen_ssa_incorrect_cgu_reuse_type =
codegen_ssa_insufficient_vs_code_product = VS Code is a different product, and is not sufficient.
codegen_ssa_invalid_literal_value = invalid literal value
.label = value must be an integer between `0` and `255`
codegen_ssa_invalid_monomorphization_basic_float_type = invalid monomorphization of `{$name}` intrinsic: expected basic float type, found `{$ty}`
codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer or pointer type, found `{$ty}`
@ -225,9 +220,6 @@ codegen_ssa_no_natvis_directory = error enumerating natvis directory: {$error}
codegen_ssa_no_saved_object_file = cached cgu {$cgu_name} should have an object file, but doesn't
codegen_ssa_out_of_range_integer = integer value out of range
.label = value must be between `0` and `255`
codegen_ssa_processing_dymutil_failed = processing debug info with `dsymutil` failed: {$status}
.note = {$output}
@ -357,9 +349,6 @@ codegen_ssa_unable_to_run_dsymutil = unable to run `dsymutil`: {$error}
codegen_ssa_unable_to_write_debugger_visualizer = unable to write debugger visualizer file `{$path}`: {$error}
codegen_ssa_unexpected_parameter_name = unexpected parameter name
.label = expected `{$prefix_nops}` or `{$entry_nops}`
codegen_ssa_unknown_archive_kind =
don't know how to build archive of type: {$kind}

View file

@ -2,6 +2,7 @@ use std::ffi::CString;
use std::sync::Arc;
use rustc_data_structures::memmap::Mmap;
use rustc_errors::DiagCtxtHandle;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportLevel};
use rustc_middle::ty::TyCtxt;
@ -124,28 +125,29 @@ pub(super) fn exported_symbols_for_lto(
symbols_below_threshold
}
pub(super) fn check_lto_allowed<B: WriteBackendMethods>(cgcx: &CodegenContext<B>) {
pub(super) fn check_lto_allowed<B: WriteBackendMethods>(
cgcx: &CodegenContext<B>,
dcx: DiagCtxtHandle<'_>,
) {
if cgcx.lto == Lto::ThinLocal {
// Crate local LTO is always allowed
return;
}
let dcx = cgcx.create_dcx();
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
dcx.handle().emit_fatal(LtoDisallowed);
} else if *crate_type == CrateType::Dylib {
if !cgcx.opts.unstable_opts.dylib_lto {
if !cgcx.dylib_lto {
dcx.handle().emit_fatal(LtoDylib);
}
} else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto {
} else if *crate_type == CrateType::ProcMacro && !cgcx.dylib_lto {
dcx.handle().emit_fatal(LtoProcMacro);
}
}
if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
if cgcx.prefer_dynamic && !cgcx.dylib_lto {
dcx.handle().emit_fatal(DynamicLinkingWithLTO);
}
}

View file

@ -15,8 +15,8 @@ use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
use rustc_errors::emitter::Emitter;
use rustc_errors::translation::Translator;
use rustc_errors::{
Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalError, FatalErrorMarker, Level,
MultiSpan, Style, Suggestions,
Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
};
use rustc_fs_util::link_or_copy;
use rustc_incremental::{
@ -326,15 +326,16 @@ pub struct CodegenContext<B: WriteBackendMethods> {
// Resources needed when running LTO
pub prof: SelfProfilerRef,
pub lto: Lto,
pub use_linker_plugin_lto: bool,
pub dylib_lto: bool,
pub prefer_dynamic: bool,
pub save_temps: bool,
pub fewer_names: bool,
pub time_trace: bool,
pub opts: Arc<config::Options>,
pub crate_types: Vec<CrateType>,
pub output_filenames: Arc<OutputFilenames>,
pub invocation_temp: Option<String>,
pub module_config: Arc<ModuleConfig>,
pub allocator_config: Arc<ModuleConfig>,
pub tm_factory: TargetMachineFactoryFn<B>,
pub msvc_imps_needed: bool,
pub is_pe_coff: bool,
@ -347,8 +348,6 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
pub pointer_size: Size,
/// Emitter to use for diagnostics produced during codegen.
pub diag_emitter: SharedEmitter,
/// LLVM optimizations for which we want to print remarks.
pub remark: Passes,
/// Directory into which should the LLVM optimization remarks be written.
@ -363,14 +362,9 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub parallel: bool,
}
impl<B: WriteBackendMethods> CodegenContext<B> {
pub fn create_dcx(&self) -> DiagCtxt {
DiagCtxt::new(Box::new(self.diag_emitter.clone()))
}
}
fn generate_thin_lto_work<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
@ -380,6 +374,7 @@ fn generate_thin_lto_work<B: ExtraBackendMethods>(
let (lto_modules, copy_jobs) = B::run_thin_lto(
cgcx,
dcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_thin_lto,
@ -408,6 +403,29 @@ struct CompiledModules {
allocator_module: Option<CompiledModule>,
}
enum MaybeLtoModules<B: WriteBackendMethods> {
NoLto {
modules: Vec<CompiledModule>,
allocator_module: Option<CompiledModule>,
},
FatLto {
cgcx: CodegenContext<B>,
exported_symbols_for_lto: Arc<Vec<String>>,
each_linked_rlib_file_for_lto: Vec<PathBuf>,
needs_fat_lto: Vec<FatLtoInput<B>>,
lto_import_only_modules:
Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
},
ThinLto {
cgcx: CodegenContext<B>,
exported_symbols_for_lto: Arc<Vec<String>>,
each_linked_rlib_file_for_lto: Vec<PathBuf>,
needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
lto_import_only_modules:
Vec<(SerializedModule<<B as WriteBackendMethods>::ModuleBuffer>, WorkProduct)>,
},
}
fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
let sess = tcx.sess;
sess.opts.cg.embed_bitcode
@ -797,20 +815,12 @@ pub(crate) enum ComputedLtoType {
pub(crate) fn compute_per_cgu_lto_type(
sess_lto: &Lto,
opts: &config::Options,
linker_does_lto: bool,
sess_crate_types: &[CrateType],
module_kind: ModuleKind,
) -> ComputedLtoType {
// If the linker does LTO, we don't have to do it. Note that we
// keep doing full LTO, if it is requested, as not to break the
// assumption that the output will be a single module.
let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
// When we're automatically doing ThinLTO for multi-codegen-unit
// builds we don't actually want to LTO the allocator module if
// it shows up. This is due to various linker shenanigans that
// we'll encounter later.
let is_allocator = module_kind == ModuleKind::Allocator;
// We ignore a request for full crate graph LTO if the crate type
// is only an rlib, as there is no full crate graph to process,
@ -823,7 +833,7 @@ pub(crate) fn compute_per_cgu_lto_type(
let is_rlib = matches!(sess_crate_types, [CrateType::Rlib]);
match sess_lto {
Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
Lto::ThinLocal if !linker_does_lto => ComputedLtoType::Thin,
Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
Lto::Fat if !is_rlib => ComputedLtoType::Fat,
_ => ComputedLtoType::No,
@ -832,30 +842,24 @@ pub(crate) fn compute_per_cgu_lto_type(
fn execute_optimize_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
mut module: ModuleCodegen<B::Module>,
) -> WorkItemResult<B> {
let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let module_config = match module.kind {
ModuleKind::Regular => &cgcx.module_config,
ModuleKind::Allocator => &cgcx.allocator_config,
};
B::optimize(cgcx, dcx, &mut module, module_config);
B::optimize(cgcx, &shared_emitter, &mut module, &cgcx.module_config);
// After we've done the initial round of optimizations we need to
// decide whether to synchronously codegen this module or ship it
// back to the coordinator thread for further LTO processing (which
// has to wait for all the initial modules to be optimized).
let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
let lto_type =
compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
// If we're doing some form of incremental LTO then we need to be sure to
// save our module to disk first.
let bitcode = if module_config.emit_pre_lto_bc {
let bitcode = if cgcx.module_config.emit_pre_lto_bc {
let filename = pre_lto_bitcode_filename(&module.name);
cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
} else {
@ -864,7 +868,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
match lto_type {
ComputedLtoType::No => {
let module = B::codegen(cgcx, module, module_config);
let module = B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config);
WorkItemResult::Finished(module)
}
ComputedLtoType::Thin => {
@ -894,12 +898,16 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
module: CachedModuleCodegen,
) -> CompiledModule {
let _timer = cgcx
.prof
.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
let dcx = DiagCtxt::new(Box::new(shared_emitter));
let dcx = dcx.handle();
let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
let mut links_from_incr_cache = Vec::new();
@ -918,11 +926,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
Some(output_path)
}
Err(error) => {
cgcx.create_dcx().handle().emit_err(errors::CopyPathBuf {
source_file,
output_path,
error,
});
dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
None
}
}
@ -965,7 +969,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
if should_emit_obj && object.is_none() {
cgcx.create_dcx().handle().emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
}
CompiledModule {
@ -982,6 +986,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
fn do_fat_lto<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
mut needs_fat_lto: Vec<FatLtoInput<B>>,
@ -989,7 +994,10 @@ fn do_fat_lto<B: ExtraBackendMethods>(
) -> CompiledModule {
let _timer = cgcx.prof.verbose_generic_activity("LLVM_fatlto");
check_lto_allowed(&cgcx);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
check_lto_allowed(&cgcx, dcx);
for (module, wp) in import_only_modules {
needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
@ -997,15 +1005,17 @@ fn do_fat_lto<B: ExtraBackendMethods>(
let module = B::run_and_optimize_fat_lto(
cgcx,
&shared_emitter,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_fat_lto,
);
B::codegen(cgcx, module, &cgcx.module_config)
B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config)
}
fn do_thin_lto<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext<B>,
shared_emitter: SharedEmitter,
exported_symbols_for_lto: Arc<Vec<String>>,
each_linked_rlib_for_lto: Vec<PathBuf>,
needs_thin_lto: Vec<(String, <B as WriteBackendMethods>::ThinBuffer)>,
@ -1016,7 +1026,10 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
) -> Vec<CompiledModule> {
let _timer = cgcx.prof.verbose_generic_activity("LLVM_thinlto");
check_lto_allowed(&cgcx);
let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
let dcx = dcx.handle();
check_lto_allowed(&cgcx, dcx);
let (coordinator_send, coordinator_receive) = channel();
@ -1041,6 +1054,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
// we don't worry about tokens.
for (work, cost) in generate_thin_lto_work(
cgcx,
dcx,
&exported_symbols_for_lto,
&each_linked_rlib_for_lto,
needs_thin_lto,
@ -1082,7 +1096,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
while used_token_count < tokens.len() + 1
&& let Some((item, _)) = work_items.pop()
{
spawn_thin_lto_work(&cgcx, coordinator_send.clone(), item);
spawn_thin_lto_work(&cgcx, shared_emitter.clone(), coordinator_send.clone(), item);
used_token_count += 1;
}
} else {
@ -1106,7 +1120,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
}
Err(e) => {
let msg = &format!("failed to acquire jobserver token: {e}");
cgcx.diag_emitter.fatal(msg);
shared_emitter.fatal(msg);
codegen_aborted = Some(FatalError);
}
},
@ -1144,12 +1158,13 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>(
fn execute_thin_lto_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
shared_emitter: SharedEmitter,
module: lto::ThinModule<B>,
) -> CompiledModule {
let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
let module = B::optimize_thin(cgcx, module);
B::codegen(cgcx, module, &cgcx.module_config)
let module = B::optimize_thin(cgcx, &shared_emitter, module);
B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config)
}
/// Messages sent to the coordinator.
@ -1245,9 +1260,9 @@ fn start_executing_work<B: ExtraBackendMethods>(
coordinator_receive: Receiver<Message<B>>,
regular_config: Arc<ModuleConfig>,
allocator_config: Arc<ModuleConfig>,
allocator_module: Option<ModuleCodegen<B::Module>>,
mut allocator_module: Option<ModuleCodegen<B::Module>>,
coordinator_send: Sender<Message<B>>,
) -> thread::JoinHandle<Result<CompiledModules, ()>> {
) -> thread::JoinHandle<Result<MaybeLtoModules<B>, ()>> {
let sess = tcx.sess;
let mut each_linked_rlib_for_lto = Vec::new();
@ -1292,18 +1307,18 @@ fn start_executing_work<B: ExtraBackendMethods>(
let cgcx = CodegenContext::<B> {
crate_types: tcx.crate_types().to_vec(),
lto: sess.lto(),
use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
dylib_lto: sess.opts.unstable_opts.dylib_lto,
prefer_dynamic: sess.opts.cg.prefer_dynamic,
fewer_names: sess.fewer_names(),
save_temps: sess.opts.cg.save_temps,
time_trace: sess.opts.unstable_opts.llvm_time_trace,
opts: Arc::new(sess.opts.clone()),
prof: sess.prof.clone(),
remark: sess.opts.cg.remark.clone(),
remark_dir,
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
diag_emitter: shared_emitter.clone(),
output_filenames: Arc::clone(tcx.output_filenames(())),
module_config: regular_config,
allocator_config,
tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features),
msvc_imps_needed: msvc_imps_needed(tcx),
is_pe_coff: tcx.sess.target.is_like_windows,
@ -1497,16 +1512,9 @@ fn start_executing_work<B: ExtraBackendMethods>(
let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
let compiled_allocator_module = allocator_module.and_then(|allocator_module| {
match execute_optimize_work_item(&cgcx, allocator_module) {
WorkItemResult::Finished(compiled_module) => return Some(compiled_module),
WorkItemResult::NeedsFatLto(fat_lto_input) => needs_fat_lto.push(fat_lto_input),
WorkItemResult::NeedsThinLto(name, thin_buffer) => {
needs_thin_lto.push((name, thin_buffer))
}
}
None
});
if let Some(allocator_module) = &mut allocator_module {
B::optimize(&cgcx, &shared_emitter, allocator_module, &allocator_config);
}
// Run the message loop while there's still anything that needs message
// processing. Note that as soon as codegen is aborted we simply want to
@ -1543,7 +1551,13 @@ fn start_executing_work<B: ExtraBackendMethods>(
let (item, _) =
work_items.pop().expect("queue empty - queue_full_enough() broken?");
main_thread_state = MainThreadState::Lending;
spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
spawn_work(
&cgcx,
shared_emitter.clone(),
coordinator_send.clone(),
&mut llvm_start_time,
item,
);
}
}
} else if codegen_state == Completed {
@ -1561,7 +1575,13 @@ fn start_executing_work<B: ExtraBackendMethods>(
MainThreadState::Idle => {
if let Some((item, _)) = work_items.pop() {
main_thread_state = MainThreadState::Lending;
spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
spawn_work(
&cgcx,
shared_emitter.clone(),
coordinator_send.clone(),
&mut llvm_start_time,
item,
);
} else {
// There is no unstarted work, so let the main thread
// take over for a running worker. Otherwise the
@ -1597,7 +1617,13 @@ fn start_executing_work<B: ExtraBackendMethods>(
while running_with_own_token < tokens.len()
&& let Some((item, _)) = work_items.pop()
{
spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item);
spawn_work(
&cgcx,
shared_emitter.clone(),
coordinator_send.clone(),
&mut llvm_start_time,
item,
);
running_with_own_token += 1;
}
}
@ -1733,36 +1759,51 @@ fn start_executing_work<B: ExtraBackendMethods>(
assert!(compiled_modules.is_empty());
assert!(needs_thin_lto.is_empty());
// This uses the implicit token
let module = do_fat_lto(
&cgcx,
&exported_symbols_for_lto,
&each_linked_rlib_file_for_lto,
if let Some(allocator_module) = allocator_module.take() {
needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
}
return Ok(MaybeLtoModules::FatLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_fat_lto,
lto_import_only_modules,
);
compiled_modules.push(module);
});
} else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
assert!(compiled_modules.is_empty());
assert!(needs_fat_lto.is_empty());
compiled_modules.extend(do_thin_lto(
&cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
));
if cgcx.lto == Lto::ThinLocal {
compiled_modules.extend(do_thin_lto(
&cgcx,
shared_emitter.clone(),
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
));
} else {
if let Some(allocator_module) = allocator_module.take() {
let (name, thin_buffer) = B::prepare_thin(allocator_module);
needs_thin_lto.push((name, thin_buffer));
}
return Ok(MaybeLtoModules::ThinLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
});
}
}
// Regardless of what order these modules completed in, report them to
// the backend in the same order every time to ensure that we're handing
// out deterministic results.
compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
Ok(CompiledModules {
Ok(MaybeLtoModules::NoLto {
modules: compiled_modules,
allocator_module: compiled_allocator_module,
allocator_module: allocator_module.map(|allocator_module| {
B::codegen(&cgcx, &shared_emitter, allocator_module, &allocator_config)
}),
})
})
.expect("failed to spawn coordinator thread");
@ -1831,6 +1872,7 @@ pub(crate) struct WorkerFatalError;
fn spawn_work<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext<B>,
shared_emitter: SharedEmitter,
coordinator_send: Sender<Message<B>>,
llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
work: WorkItem<B>,
@ -1843,10 +1885,10 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, m),
WorkItem::CopyPostLtoArtifacts(m) => {
WorkItemResult::Finished(execute_copy_from_cache_work_item(&cgcx, m))
}
WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, shared_emitter, m),
WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
execute_copy_from_cache_work_item(&cgcx, shared_emitter, m),
),
}));
let msg = match result {
@ -1868,6 +1910,7 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext<B>,
shared_emitter: SharedEmitter,
coordinator_send: Sender<ThinLtoMessage>,
work: ThinLtoWorkItem<B>,
) {
@ -1875,8 +1918,10 @@ fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>(
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
ThinLtoWorkItem::CopyPostLtoArtifacts(m) => execute_copy_from_cache_work_item(&cgcx, m),
ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, m),
ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
execute_copy_from_cache_work_item(&cgcx, shared_emitter, m)
}
ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, shared_emitter, m),
}));
let msg = match result {
@ -2052,13 +2097,13 @@ impl SharedEmitterMain {
pub struct Coordinator<B: ExtraBackendMethods> {
sender: Sender<Message<B>>,
future: Option<thread::JoinHandle<Result<CompiledModules, ()>>>,
future: Option<thread::JoinHandle<Result<MaybeLtoModules<B>, ()>>>,
// Only used for the Message type.
phantom: PhantomData<B>,
}
impl<B: ExtraBackendMethods> Coordinator<B> {
fn join(mut self) -> std::thread::Result<Result<CompiledModules, ()>> {
fn join(mut self) -> std::thread::Result<Result<MaybeLtoModules<B>, ()>> {
self.future.take().unwrap().join()
}
}
@ -2089,8 +2134,9 @@ pub struct OngoingCodegen<B: ExtraBackendMethods> {
impl<B: ExtraBackendMethods> OngoingCodegen<B> {
pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
self.shared_emitter_main.check(sess, true);
let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
Ok(Ok(compiled_modules)) => compiled_modules,
let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
Ok(Err(())) => {
sess.dcx().abort_if_errors();
panic!("expected abort due to worker thread errors")
@ -2102,6 +2148,62 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
sess.dcx().abort_if_errors();
let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
// Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
MaybeLtoModules::NoLto { modules, allocator_module } => {
drop(shared_emitter);
CompiledModules { modules, allocator_module }
}
MaybeLtoModules::FatLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_fat_lto,
lto_import_only_modules,
} => CompiledModules {
modules: vec![do_fat_lto(
&cgcx,
shared_emitter,
&exported_symbols_for_lto,
&each_linked_rlib_file_for_lto,
needs_fat_lto,
lto_import_only_modules,
)],
allocator_module: None,
},
MaybeLtoModules::ThinLto {
cgcx,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
} => CompiledModules {
modules: do_thin_lto(
&cgcx,
shared_emitter,
exported_symbols_for_lto,
each_linked_rlib_file_for_lto,
needs_thin_lto,
lto_import_only_modules,
),
allocator_module: None,
},
});
shared_emitter_main.check(sess, true);
sess.dcx().abort_if_errors();
let mut compiled_modules =
compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
// Regardless of what order these modules completed in, report them to
// the backend in the same order every time to ensure that we're handing
// out deterministic results.
compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
let work_products =
copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);

View file

@ -49,9 +49,7 @@ use crate::meth::load_vtable;
use crate::mir::operand::OperandValue;
use crate::mir::place::PlaceRef;
use crate::traits::*;
use crate::{
CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, ModuleKind, errors, meth, mir,
};
use crate::{CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, errors, meth, mir};
pub(crate) fn bin_op_to_icmp_predicate(op: BinOp, signed: bool) -> IntPredicate {
match (op, signed) {
@ -1126,9 +1124,8 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) ->
// reuse pre-LTO artifacts
match compute_per_cgu_lto_type(
&tcx.sess.lto(),
&tcx.sess.opts,
tcx.sess.opts.cg.linker_plugin_lto.enabled(),
tcx.crate_types(),
ModuleKind::Regular,
) {
ComputedLtoType::No => CguReuse::PostLto,
_ => CguReuse::PreLto,

View file

@ -47,59 +47,6 @@ fn try_fn_sig<'tcx>(
}
}
// FIXME(jdonszelmann): remove when patchable_function_entry becomes a parsed attr
fn parse_patchable_function_entry(
tcx: TyCtxt<'_>,
attr: &Attribute,
) -> Option<PatchableFunctionEntry> {
attr.meta_item_list().and_then(|l| {
let mut prefix = None;
let mut entry = None;
for item in l {
let Some(meta_item) = item.meta_item() else {
tcx.dcx().emit_err(errors::ExpectedNameValuePair { span: item.span() });
continue;
};
let Some(name_value_lit) = meta_item.name_value_literal() else {
tcx.dcx().emit_err(errors::ExpectedNameValuePair { span: item.span() });
continue;
};
let attrib_to_write = match meta_item.name() {
Some(sym::prefix_nops) => &mut prefix,
Some(sym::entry_nops) => &mut entry,
_ => {
tcx.dcx().emit_err(errors::UnexpectedParameterName {
span: item.span(),
prefix_nops: sym::prefix_nops,
entry_nops: sym::entry_nops,
});
continue;
}
};
let rustc_ast::LitKind::Int(val, _) = name_value_lit.kind else {
tcx.dcx().emit_err(errors::InvalidLiteralValue { span: name_value_lit.span });
continue;
};
let Ok(val) = val.get().try_into() else {
tcx.dcx().emit_err(errors::OutOfRangeInteger { span: name_value_lit.span });
continue;
};
*attrib_to_write = Some(val);
}
if let (None, None) = (prefix, entry) {
tcx.dcx().span_err(attr.span(), "must specify at least one parameter");
}
Some(PatchableFunctionEntry::from_prefix_and_entry(prefix.unwrap_or(0), entry.unwrap_or(0)))
})
}
/// Spans that are collected when processing built-in attributes,
/// that are useful for emitting diagnostics later.
#[derive(Default)]
@ -121,250 +68,235 @@ fn process_builtin_attrs(
let mut interesting_spans = InterestingAttributeDiagnosticSpans::default();
let rust_target_features = tcx.rust_target_features(LOCAL_CRATE);
for attr in attrs.iter() {
if let hir::Attribute::Parsed(p) = attr {
match p {
AttributeKind::Cold(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD,
AttributeKind::ExportName { name, .. } => {
codegen_fn_attrs.symbol_name = Some(*name)
let parsed_attrs = attrs
.iter()
.filter_map(|attr| if let hir::Attribute::Parsed(attr) = attr { Some(attr) } else { None });
for attr in parsed_attrs {
match attr {
AttributeKind::Cold(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD,
AttributeKind::ExportName { name, .. } => codegen_fn_attrs.symbol_name = Some(*name),
AttributeKind::Inline(inline, span) => {
codegen_fn_attrs.inline = *inline;
interesting_spans.inline = Some(*span);
}
AttributeKind::Naked(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED,
AttributeKind::Align { align, .. } => codegen_fn_attrs.alignment = Some(*align),
AttributeKind::LinkName { name, .. } => {
// FIXME Remove check for foreign functions once #[link_name] on non-foreign
// functions is a hard error
if tcx.is_foreign_item(did) {
codegen_fn_attrs.symbol_name = Some(*name);
}
AttributeKind::Inline(inline, span) => {
codegen_fn_attrs.inline = *inline;
interesting_spans.inline = Some(*span);
}
AttributeKind::LinkOrdinal { ordinal, span } => {
codegen_fn_attrs.link_ordinal = Some(*ordinal);
interesting_spans.link_ordinal = Some(*span);
}
AttributeKind::LinkSection { name, .. } => codegen_fn_attrs.link_section = Some(*name),
AttributeKind::NoMangle(attr_span) => {
interesting_spans.no_mangle = Some(*attr_span);
if tcx.opt_item_name(did.to_def_id()).is_some() {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
} else {
tcx.dcx()
.span_delayed_bug(*attr_span, "no_mangle should be on a named function");
}
AttributeKind::Naked(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED,
AttributeKind::Align { align, .. } => codegen_fn_attrs.alignment = Some(*align),
AttributeKind::LinkName { name, .. } => {
// FIXME Remove check for foreign functions once #[link_name] on non-foreign
// functions is a hard error
if tcx.is_foreign_item(did) {
codegen_fn_attrs.symbol_name = Some(*name);
}
}
AttributeKind::LinkOrdinal { ordinal, span } => {
codegen_fn_attrs.link_ordinal = Some(*ordinal);
interesting_spans.link_ordinal = Some(*span);
}
AttributeKind::LinkSection { name, .. } => {
codegen_fn_attrs.link_section = Some(*name)
}
AttributeKind::NoMangle(attr_span) => {
interesting_spans.no_mangle = Some(*attr_span);
if tcx.opt_item_name(did.to_def_id()).is_some() {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
}
AttributeKind::Optimize(optimize, _) => codegen_fn_attrs.optimize = *optimize,
AttributeKind::TargetFeature { features, attr_span, was_forced } => {
let Some(sig) = tcx.hir_node_by_def_id(did).fn_sig() else {
tcx.dcx().span_delayed_bug(*attr_span, "target_feature applied to non-fn");
continue;
};
let safe_target_features =
matches!(sig.header.safety, hir::HeaderSafety::SafeTargetFeatures);
codegen_fn_attrs.safe_target_features = safe_target_features;
if safe_target_features && !was_forced {
if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
// The `#[target_feature]` attribute is allowed on
// WebAssembly targets on all functions. Prior to stabilizing
// the `target_feature_11` feature, `#[target_feature]` was
// only permitted on unsafe functions because on most targets
// execution of instructions that are not supported is
// considered undefined behavior. For WebAssembly which is a
// 100% safe target at execution time it's not possible to
// execute undefined instructions, and even if a future
// feature was added in some form for this it would be a
// deterministic trap. There is no undefined behavior when
// executing WebAssembly so `#[target_feature]` is allowed
// on safe functions (but again, only for WebAssembly)
//
// Note that this is also allowed if `actually_rustdoc` so
// if a target is documenting some wasm-specific code then
// it's not spuriously denied.
//
// Now that `#[target_feature]` is permitted on safe functions,
// this exception must still exist for allowing the attribute on
// `main`, `start`, and other functions that are not usually
// allowed.
} else {
tcx.dcx().span_delayed_bug(
*attr_span,
"no_mangle should be on a named function",
);
check_target_feature_trait_unsafe(tcx, did, *attr_span);
}
}
AttributeKind::Optimize(optimize, _) => codegen_fn_attrs.optimize = *optimize,
AttributeKind::TargetFeature { features, attr_span, was_forced } => {
let Some(sig) = tcx.hir_node_by_def_id(did).fn_sig() else {
tcx.dcx().span_delayed_bug(*attr_span, "target_feature applied to non-fn");
continue;
from_target_feature_attr(
tcx,
did,
features,
*was_forced,
rust_target_features,
&mut codegen_fn_attrs.target_features,
);
}
AttributeKind::TrackCaller(attr_span) => {
let is_closure = tcx.is_closure_like(did.to_def_id());
if !is_closure
&& let Some(fn_sig) = try_fn_sig(tcx, did, *attr_span)
&& fn_sig.skip_binder().abi() != ExternAbi::Rust
{
tcx.dcx().emit_err(errors::RequiresRustAbi { span: *attr_span });
}
if is_closure
&& !tcx.features().closure_track_caller()
&& !attr_span.allows_unstable(sym::closure_track_caller)
{
feature_err(
&tcx.sess,
sym::closure_track_caller,
*attr_span,
"`#[track_caller]` on closures is currently unstable",
)
.emit();
}
codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER
}
AttributeKind::Used { used_by, .. } => match used_by {
UsedBy::Compiler => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_COMPILER,
UsedBy::Linker => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER,
UsedBy::Default => {
let used_form = if tcx.sess.target.os == Os::Illumos {
// illumos' `ld` doesn't support a section header that would represent
// `#[used(linker)]`, see
// https://github.com/rust-lang/rust/issues/146169. For that target,
// downgrade as if `#[used(compiler)]` was requested and hope for the
// best.
CodegenFnAttrFlags::USED_COMPILER
} else {
CodegenFnAttrFlags::USED_LINKER
};
let safe_target_features =
matches!(sig.header.safety, hir::HeaderSafety::SafeTargetFeatures);
codegen_fn_attrs.safe_target_features = safe_target_features;
if safe_target_features && !was_forced {
if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
// The `#[target_feature]` attribute is allowed on
// WebAssembly targets on all functions. Prior to stabilizing
// the `target_feature_11` feature, `#[target_feature]` was
// only permitted on unsafe functions because on most targets
// execution of instructions that are not supported is
// considered undefined behavior. For WebAssembly which is a
// 100% safe target at execution time it's not possible to
// execute undefined instructions, and even if a future
// feature was added in some form for this it would be a
// deterministic trap. There is no undefined behavior when
// executing WebAssembly so `#[target_feature]` is allowed
// on safe functions (but again, only for WebAssembly)
//
// Note that this is also allowed if `actually_rustdoc` so
// if a target is documenting some wasm-specific code then
// it's not spuriously denied.
//
// Now that `#[target_feature]` is permitted on safe functions,
// this exception must still exist for allowing the attribute on
// `main`, `start`, and other functions that are not usually
// allowed.
} else {
check_target_feature_trait_unsafe(tcx, did, *attr_span);
codegen_fn_attrs.flags |= used_form;
}
},
AttributeKind::FfiConst(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST,
AttributeKind::FfiPure(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
AttributeKind::StdInternalSymbol(_) => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
}
AttributeKind::Linkage(linkage, span) => {
let linkage = Some(*linkage);
if tcx.is_foreign_item(did) {
codegen_fn_attrs.import_linkage = linkage;
if tcx.is_mutable_static(did.into()) {
let mut diag = tcx.dcx().struct_span_err(
*span,
"extern mutable statics are not allowed with `#[linkage]`",
);
diag.note(
"marking the extern static mutable would allow changing which \
symbol the static references rather than make the target of the \
symbol mutable",
);
diag.emit();
}
} else {
codegen_fn_attrs.linkage = linkage;
}
}
AttributeKind::Sanitize { span, .. } => {
interesting_spans.sanitize = Some(*span);
}
AttributeKind::ObjcClass { classname, .. } => {
codegen_fn_attrs.objc_class = Some(*classname);
}
AttributeKind::ObjcSelector { methname, .. } => {
codegen_fn_attrs.objc_selector = Some(*methname);
}
AttributeKind::EiiForeignItem => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::EXTERNALLY_IMPLEMENTABLE_ITEM;
}
AttributeKind::EiiImpls(impls) => {
for i in impls {
let foreign_item = match i.resolution {
EiiImplResolution::Macro(def_id) => {
let Some(extern_item) = find_attr!(
tcx.get_all_attrs(def_id),
AttributeKind::EiiDeclaration(target) => target.foreign_item
) else {
tcx.dcx().span_delayed_bug(
i.span,
"resolved to something that's not an EII",
);
continue;
};
extern_item
}
}
from_target_feature_attr(
tcx,
did,
features,
*was_forced,
rust_target_features,
&mut codegen_fn_attrs.target_features,
);
}
AttributeKind::TrackCaller(attr_span) => {
let is_closure = tcx.is_closure_like(did.to_def_id());
EiiImplResolution::Known(decl) => decl.foreign_item,
EiiImplResolution::Error(_eg) => continue,
};
if !is_closure
&& let Some(fn_sig) = try_fn_sig(tcx, did, *attr_span)
&& fn_sig.skip_binder().abi() != ExternAbi::Rust
// this is to prevent a bug where a single crate defines both the default and explicit implementation
// for an EII. In that case, both of them may be part of the same final object file. I'm not 100% sure
// what happens, either rustc deduplicates the symbol or llvm, or it's random/order-dependent.
// However, the fact that the default one of has weak linkage isn't considered and you sometimes get that
// the default implementation is used while an explicit implementation is given.
if
// if this is a default impl
i.is_default
// iterate over all implementations *in the current crate*
// (this is ok since we generate codegen fn attrs in the local crate)
// if any of them is *not default* then don't emit the alias.
&& tcx.externally_implementable_items(LOCAL_CRATE).get(&foreign_item).expect("at least one").1.iter().any(|(_, imp)| !imp.is_default)
{
tcx.dcx().emit_err(errors::RequiresRustAbi { span: *attr_span });
continue;
}
if is_closure
&& !tcx.features().closure_track_caller()
&& !attr_span.allows_unstable(sym::closure_track_caller)
{
feature_err(
&tcx.sess,
sym::closure_track_caller,
*attr_span,
"`#[track_caller]` on closures is currently unstable",
)
.emit();
}
codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER
}
AttributeKind::Used { used_by, .. } => match used_by {
UsedBy::Compiler => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_COMPILER,
UsedBy::Linker => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER,
UsedBy::Default => {
let used_form = if tcx.sess.target.os == Os::Illumos {
// illumos' `ld` doesn't support a section header that would represent
// `#[used(linker)]`, see
// https://github.com/rust-lang/rust/issues/146169. For that target,
// downgrade as if `#[used(compiler)]` was requested and hope for the
// best.
CodegenFnAttrFlags::USED_COMPILER
} else {
CodegenFnAttrFlags::USED_LINKER
};
codegen_fn_attrs.flags |= used_form;
}
},
AttributeKind::FfiConst(_) => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST
}
AttributeKind::FfiPure(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
AttributeKind::StdInternalSymbol(_) => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
}
AttributeKind::Linkage(linkage, span) => {
let linkage = Some(*linkage);
if tcx.is_foreign_item(did) {
codegen_fn_attrs.import_linkage = linkage;
if tcx.is_mutable_static(did.into()) {
let mut diag = tcx.dcx().struct_span_err(
*span,
"extern mutable statics are not allowed with `#[linkage]`",
);
diag.note(
"marking the extern static mutable would allow changing which \
symbol the static references rather than make the target of the \
symbol mutable",
);
diag.emit();
}
} else {
codegen_fn_attrs.linkage = linkage;
}
}
AttributeKind::Sanitize { span, .. } => {
interesting_spans.sanitize = Some(*span);
}
AttributeKind::ObjcClass { classname, .. } => {
codegen_fn_attrs.objc_class = Some(*classname);
}
AttributeKind::ObjcSelector { methname, .. } => {
codegen_fn_attrs.objc_selector = Some(*methname);
}
AttributeKind::EiiForeignItem => {
codegen_fn_attrs.foreign_item_symbol_aliases.push((
foreign_item,
if i.is_default { Linkage::LinkOnceAny } else { Linkage::External },
Visibility::Default,
));
codegen_fn_attrs.flags |= CodegenFnAttrFlags::EXTERNALLY_IMPLEMENTABLE_ITEM;
}
AttributeKind::EiiImpls(impls) => {
for i in impls {
let foreign_item = match i.resolution {
EiiImplResolution::Macro(def_id) => {
let Some(extern_item) = find_attr!(
tcx.get_all_attrs(def_id),
AttributeKind::EiiDeclaration(target) => target.foreign_item
) else {
tcx.dcx().span_delayed_bug(
i.span,
"resolved to something that's not an EII",
);
continue;
};
extern_item
}
EiiImplResolution::Known(decl) => decl.foreign_item,
EiiImplResolution::Error(_eg) => continue,
};
// this is to prevent a bug where a single crate defines both the default and explicit implementation
// for an EII. In that case, both of them may be part of the same final object file. I'm not 100% sure
// what happens, either rustc deduplicates the symbol or llvm, or it's random/order-dependent.
// However, the fact that the default one of has weak linkage isn't considered and you sometimes get that
// the default implementation is used while an explicit implementation is given.
if
// if this is a default impl
i.is_default
// iterate over all implementations *in the current crate*
// (this is ok since we generate codegen fn attrs in the local crate)
// if any of them is *not default* then don't emit the alias.
&& tcx.externally_implementable_items(LOCAL_CRATE).get(&foreign_item).expect("at least one").1.iter().any(|(_, imp)| !imp.is_default)
{
continue;
}
codegen_fn_attrs.foreign_item_symbol_aliases.push((
foreign_item,
if i.is_default { Linkage::LinkOnceAny } else { Linkage::External },
Visibility::Default,
));
codegen_fn_attrs.flags |= CodegenFnAttrFlags::EXTERNALLY_IMPLEMENTABLE_ITEM;
}
}
AttributeKind::ThreadLocal => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL
}
AttributeKind::InstructionSet(instruction_set) => {
codegen_fn_attrs.instruction_set = Some(*instruction_set)
}
AttributeKind::RustcAllocator => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR
}
AttributeKind::RustcDeallocator => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR
}
AttributeKind::RustcReallocator => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR
}
AttributeKind::RustcAllocatorZeroed => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
}
AttributeKind::RustcNounwind => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND
}
AttributeKind::RustcOffloadKernel => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::OFFLOAD_KERNEL
}
_ => {}
}
}
let Some(name) = attr.name() else {
continue;
};
match name {
sym::patchable_function_entry => {
AttributeKind::ThreadLocal => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL
}
AttributeKind::InstructionSet(instruction_set) => {
codegen_fn_attrs.instruction_set = Some(*instruction_set)
}
AttributeKind::RustcAllocator => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR
}
AttributeKind::RustcDeallocator => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR
}
AttributeKind::RustcReallocator => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR
}
AttributeKind::RustcAllocatorZeroed => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
}
AttributeKind::RustcNounwind => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND
}
AttributeKind::RustcOffloadKernel => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::OFFLOAD_KERNEL
}
AttributeKind::PatchableFunctionEntry { prefix, entry } => {
codegen_fn_attrs.patchable_function_entry =
parse_patchable_function_entry(tcx, attr);
Some(PatchableFunctionEntry::from_prefix_and_entry(*prefix, *entry));
}
_ => {}
}

View file

@ -136,39 +136,6 @@ pub(crate) struct RequiresRustAbi {
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(codegen_ssa_expected_name_value_pair)]
pub(crate) struct ExpectedNameValuePair {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(codegen_ssa_unexpected_parameter_name)]
pub(crate) struct UnexpectedParameterName {
#[primary_span]
#[label]
pub span: Span,
pub prefix_nops: Symbol,
pub entry_nops: Symbol,
}
#[derive(Diagnostic)]
#[diag(codegen_ssa_invalid_literal_value)]
pub(crate) struct InvalidLiteralValue {
#[primary_span]
#[label]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(codegen_ssa_out_of_range_integer)]
pub(crate) struct OutOfRangeInteger {
#[primary_span]
#[label]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(codegen_ssa_copy_path_buf)]
pub(crate) struct CopyPathBuf {

View file

@ -4,7 +4,7 @@ use rustc_errors::DiagCtxtHandle;
use rustc_middle::dep_graph::WorkProduct;
use crate::back::lto::{SerializedModule, ThinModule};
use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig};
use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter};
use crate::{CompiledModule, ModuleCodegen};
pub trait WriteBackendMethods: Clone + 'static {
@ -19,6 +19,7 @@ pub trait WriteBackendMethods: Clone + 'static {
/// if necessary and running any further optimizations
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
@ -28,6 +29,7 @@ pub trait WriteBackendMethods: Clone + 'static {
/// can simply be copied over from the incr. comp. cache.
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
@ -37,16 +39,18 @@ pub trait WriteBackendMethods: Clone + 'static {
fn print_statistics(&self);
fn optimize(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
shared_emitter: &SharedEmitter,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
);
fn optimize_thin(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
thin: ThinModule<Self>,
) -> ModuleCodegen<Self::Module>;
fn codegen(
cgcx: &CodegenContext<Self>,
shared_emitter: &SharedEmitter,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> CompiledModule;

View file

@ -10,7 +10,7 @@ bitflags = "2.4.1"
either = "1.0"
elsa = "1.11.0"
ena = "0.14.3"
indexmap = "2.4.0"
indexmap = "2.12.1"
jobserver_crate = { version = "0.1.28", package = "jobserver" }
measureme = "12.0.1"
parking_lot = "0.12"
@ -31,7 +31,7 @@ tracing = "0.1"
# tidy-alphabetical-end
[dependencies.hashbrown]
version = "0.15.2"
version = "0.16.1"
default-features = false
features = ["nightly"] # for may_dangle

View file

@ -18,7 +18,7 @@ use std::ffi::OsString;
use std::fmt::Write as _;
use std::fs::{self, File};
use std::io::{self, IsTerminal, Read, Write};
use std::panic::{self, PanicHookInfo, catch_unwind};
use std::panic::{self, PanicHookInfo};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use std::sync::OnceLock;
@ -32,10 +32,11 @@ use rustc_codegen_ssa::{CodegenErrors, CodegenResults};
use rustc_data_structures::profiling::{
TimePassesFormat, get_resident_set_size, print_time_passes_entry,
};
pub use rustc_errors::catch_fatal_errors;
use rustc_errors::emitter::stderr_destination;
use rustc_errors::registry::Registry;
use rustc_errors::translation::Translator;
use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, FatalError, PResult, markdown};
use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, PResult, markdown};
use rustc_feature::find_gated_cfg;
// This avoids a false positive with `-Wunused_crate_dependencies`.
// `rust_index` isn't used in this crate's code, but it must be named in the
@ -1377,21 +1378,6 @@ fn parse_crate_attrs<'a>(sess: &'a Session) -> PResult<'a, ast::AttrVec> {
parser.parse_inner_attributes()
}
/// Runs a closure and catches unwinds triggered by fatal errors.
///
/// The compiler currently unwinds with a special sentinel value to abort
/// compilation on fatal errors. This function catches that sentinel and turns
/// the panic into a `Result` instead.
pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, FatalError> {
catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| {
if value.is::<rustc_errors::FatalErrorMarker>() {
FatalError
} else {
panic::resume_unwind(value);
}
})
}
/// Variant of `catch_fatal_errors` for the `interface::Result` return type
/// that also computes the exit code.
pub fn catch_with_exit_code(f: impl FnOnce()) -> i32 {

View file

@ -66,7 +66,7 @@ use rustc_lint_defs::LintExpectationId;
pub use rustc_lint_defs::{Applicability, listify, pluralize};
use rustc_macros::{Decodable, Encodable};
pub use rustc_span::ErrorGuaranteed;
pub use rustc_span::fatal_error::{FatalError, FatalErrorMarker};
pub use rustc_span::fatal_error::{FatalError, FatalErrorMarker, catch_fatal_errors};
use rustc_span::source_map::SourceMap;
use rustc_span::{BytePos, DUMMY_SP, Loc, Span};
pub use snippet::Style;

View file

@ -1315,13 +1315,13 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
"`#[rustc_deny_explicit_impl]` enforces that a trait can have no user-provided impls"
),
rustc_attr!(
rustc_do_not_implement_via_object,
rustc_dyn_incompatible_trait,
AttributeType::Normal,
template!(Word),
ErrorFollowing,
EncodeCrossCrate::No,
"`#[rustc_do_not_implement_via_object]` opts out of the automatic trait impl for trait objects \
(`impl Trait for dyn Trait`)"
"`#[rustc_dyn_incompatible_trait]` marks a trait as dyn-incompatible, \
even if it otherwise satisfies the requirements to be dyn-compatible."
),
rustc_attr!(
rustc_has_incoherent_inherent_impls, AttributeType::Normal, template!(Word),

View file

@ -728,9 +728,6 @@ pub enum AttributeKind {
/// Represents [`#[deprecated]`](https://doc.rust-lang.org/stable/reference/attributes/diagnostics.html#the-deprecated-attribute).
Deprecation { deprecation: Deprecation, span: Span },
/// Represents `#[rustc_do_not_implement_via_object]`.
DoNotImplementViaObject(Span),
/// Represents `#[diagnostic::do_not_recommend]`.
DoNotRecommend { attr_span: Span },
@ -746,6 +743,9 @@ pub enum AttributeKind {
/// Represents `#[rustc_dummy]`.
Dummy,
/// Represents `#[rustc_dyn_incompatible_trait]`.
DynIncompatibleTrait(Span),
/// Implementation detail of `#[eii]`
EiiDeclaration(EiiDecl),
@ -879,6 +879,9 @@ pub enum AttributeKind {
/// Represents `#[rustc_pass_by_value]` (used by the `rustc_pass_by_value` lint).
PassByValue(Span),
/// Represents `#[patchable_function_entry]`
PatchableFunctionEntry { prefix: u8, entry: u8 },
/// Represents `#[path]`
Path(Symbol, Span),
@ -1007,6 +1010,12 @@ pub enum AttributeKind {
/// Represents `#[rustc_simd_monomorphize_lane_limit = "N"]`.
RustcSimdMonomorphizeLaneLimit(Limit),
/// Represents `#[rustc_variance]`
RustcVariance,
/// Represents `#[rustc_variance_of_opaques]`
RustcVarianceOfOpaques,
/// Represents `#[sanitize]`
///
/// the on set and off set are distjoint since there's a third option: unset.

View file

@ -43,11 +43,11 @@ impl AttributeKind {
DebuggerVisualizer(..) => No,
DenyExplicitImpl(..) => No,
Deprecation { .. } => Yes,
DoNotImplementViaObject(..) => No,
DoNotRecommend { .. } => Yes,
Doc(_) => Yes,
DocComment { .. } => Yes,
Dummy => No,
DynIncompatibleTrait(..) => No,
EiiDeclaration(_) => Yes,
EiiForeignItem => No,
EiiImpls(..) => No,
@ -88,6 +88,7 @@ impl AttributeKind {
Optimize(..) => No,
ParenSugar(..) => No,
PassByValue(..) => Yes,
PatchableFunctionEntry { .. } => Yes,
Path(..) => No,
PatternComplexityLimit { .. } => No,
PinV2(..) => Yes,
@ -129,6 +130,8 @@ impl AttributeKind {
RustcScalableVector { .. } => Yes,
RustcShouldNotBeCalledOnConstItems(..) => Yes,
RustcSimdMonomorphizeLaneLimit(..) => Yes, // Affects layout computation, which needs to work cross-crate
RustcVariance => No,
RustcVarianceOfOpaques => No,
Sanitize { .. } => No,
ShouldPanic { .. } => No,
SkipDuringMethodDispatch { .. } => No,

View file

@ -171,7 +171,7 @@ macro_rules! print_tup {
print_tup!(A B C D E F G H);
print_skip!(Span, (), ErrorGuaranteed);
print_disp!(u16, u128, usize, bool, NonZero<u32>, Limit);
print_disp!(u8, u16, u128, usize, bool, NonZero<u32>, Limit);
print_debug!(
Symbol,
Ident,

View file

@ -4615,6 +4615,11 @@ pub struct Upvar {
pub struct TraitCandidate {
pub def_id: DefId,
pub import_ids: SmallVec<[LocalDefId; 1]>,
// Indicates whether this trait candidate is ambiguously glob imported
// in it's scope. Related to the AMBIGUOUS_GLOB_IMPORTED_TRAITS lint.
// If this is set to true and the trait is used as a result of method lookup, this
// lint is thrown.
pub lint_ambiguous: bool,
}
#[derive(Copy, Clone, Debug, HashStable_Generic)]

View file

@ -24,6 +24,7 @@ use rustc_middle::ty::{
TypeVisitable, TypeVisitableExt, fold_regions,
};
use rustc_session::lint::builtin::UNINHABITED_STATIC;
use rustc_span::source_map::Spanned;
use rustc_target::spec::{AbiMap, AbiMapping};
use rustc_trait_selection::error_reporting::InferCtxtErrorExt;
use rustc_trait_selection::error_reporting::traits::on_unimplemented::OnUnimplementedDirective;
@ -192,6 +193,12 @@ fn check_static_inhabited(tcx: TyCtxt<'_>, def_id: LocalDefId) {
tcx.dcx().emit_err(errors::TooLargeStatic { span });
return;
}
// SIMD types with invalid layout (e.g., zero-length) should emit an error
Err(e @ LayoutError::InvalidSimd { .. }) => {
let ty_span = tcx.ty_span(def_id);
tcx.dcx().emit_err(Spanned { span: ty_span, node: e.into_diagnostic() });
return;
}
// Generic statics are rejected, but we still reach this case.
Err(e) => {
tcx.dcx().span_delayed_bug(span, format!("{e:?}"));

View file

@ -216,6 +216,7 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi
| sym::type_name
| sym::type_of
| sym::ub_checks
| sym::va_copy
| sym::variant_count
| sym::vtable_for
| sym::wrapping_add
@ -629,14 +630,13 @@ pub(crate) fn check_intrinsic_type(
)
}
sym::va_start | sym::va_end => {
(0, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], tcx.types.unit)
}
sym::va_copy => {
let (va_list_ref_ty, va_list_ty) = mk_va_list_ty(hir::Mutability::Not);
let va_list_ptr_ty = Ty::new_mut_ptr(tcx, va_list_ty);
(0, 0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.types.unit)
(0, 0, vec![va_list_ref_ty], va_list_ty)
}
sym::va_start | sym::va_end => {
(0, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], tcx.types.unit)
}
sym::va_arg => (1, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], param(0)),

View file

@ -211,9 +211,7 @@ fn check_object_overlap<'tcx>(
// This is a WF error tested by `coherence-impl-trait-for-trait-dyn-compatible.rs`.
} else {
let mut supertrait_def_ids = elaborate::supertrait_def_ids(tcx, component_def_id);
if supertrait_def_ids
.any(|d| d == trait_def_id && tcx.trait_def(d).implement_via_object)
{
if supertrait_def_ids.any(|d| d == trait_def_id) {
let span = tcx.def_span(impl_def_id);
return Err(struct_span_code_err!(
tcx.dcx(),

View file

@ -924,7 +924,8 @@ fn trait_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::TraitDef {
);
let deny_explicit_impl = find_attr!(attrs, AttributeKind::DenyExplicitImpl(_));
let implement_via_object = !find_attr!(attrs, AttributeKind::DoNotImplementViaObject(_));
let force_dyn_incompatible =
find_attr!(attrs, AttributeKind::DynIncompatibleTrait(span) => *span);
ty::TraitDef {
def_id: def_id.to_def_id(),
@ -939,7 +940,7 @@ fn trait_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::TraitDef {
skip_boxed_slice_during_method_dispatch,
specialization_kind,
must_implement_one_of,
implement_via_object,
force_dyn_incompatible,
deny_explicit_impl,
}
}

View file

@ -1,8 +1,9 @@
use std::fmt::Write;
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def_id::{CRATE_DEF_ID, LocalDefId};
use rustc_hir::find_attr;
use rustc_middle::ty::{GenericArgs, TyCtxt};
use rustc_span::sym;
fn format_variances(tcx: TyCtxt<'_>, def_id: LocalDefId) -> String {
let variances = tcx.variances_of(def_id);
@ -25,7 +26,7 @@ fn format_variances(tcx: TyCtxt<'_>, def_id: LocalDefId) -> String {
pub(crate) fn variances(tcx: TyCtxt<'_>) {
let crate_items = tcx.hir_crate_items(());
if tcx.has_attr(CRATE_DEF_ID, sym::rustc_variance_of_opaques) {
if find_attr!(tcx.get_all_attrs(CRATE_DEF_ID), AttributeKind::RustcVarianceOfOpaques) {
for id in crate_items.opaques() {
tcx.dcx().emit_err(crate::errors::VariancesOf {
span: tcx.def_span(id),
@ -35,7 +36,7 @@ pub(crate) fn variances(tcx: TyCtxt<'_>) {
}
for id in crate_items.free_items() {
if !tcx.has_attr(id.owner_id, sym::rustc_variance) {
if !find_attr!(tcx.get_all_attrs(id.owner_id), AttributeKind::RustcVariance) {
continue;
}

View file

@ -1,3 +1,4 @@
use std::fmt::Debug;
use std::ops::Deref;
use rustc_hir as hir;
@ -12,7 +13,9 @@ use rustc_hir_analysis::hir_ty_lowering::{
use rustc_infer::infer::{
BoundRegionConversionTime, DefineOpaqueTypes, InferOk, RegionVariableOrigin,
};
use rustc_lint::builtin::RESOLVING_TO_ITEMS_SHADOWING_SUPERTRAIT_ITEMS;
use rustc_lint::builtin::{
AMBIGUOUS_GLOB_IMPORTED_TRAITS, RESOLVING_TO_ITEMS_SHADOWING_SUPERTRAIT_ITEMS,
};
use rustc_middle::traits::ObligationCauseCode;
use rustc_middle::ty::adjustment::{
Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCoercion,
@ -149,6 +152,9 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// Lint when an item is shadowing a supertrait item.
self.lint_shadowed_supertrait_items(pick, segment);
// Lint when a trait is ambiguously imported
self.lint_ambiguously_glob_imported_traits(pick, segment);
// Add any trait/regions obligations specified on the method's type parameters.
// We won't add these if we encountered an illegal sized bound, so that we can use
// a custom error in that case.
@ -322,7 +328,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
})
}
probe::TraitPick => {
probe::TraitPick(_) => {
let trait_def_id = pick.item.container_id(self.tcx);
// Make a trait reference `$0 : Trait<$1...$n>`
@ -719,6 +725,25 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
);
}
fn lint_ambiguously_glob_imported_traits(
&self,
pick: &probe::Pick<'_>,
segment: &hir::PathSegment<'tcx>,
) {
if pick.kind != probe::PickKind::TraitPick(true) {
return;
}
let trait_name = self.tcx.item_name(pick.item.container_id(self.tcx));
let import_span = self.tcx.hir_span_if_local(pick.import_ids[0].to_def_id()).unwrap();
self.tcx.node_lint(AMBIGUOUS_GLOB_IMPORTED_TRAITS, segment.hir_id, |diag| {
diag.primary_message(format!("Use of ambiguously glob imported trait `{trait_name}`"))
.span(segment.ident.span)
.span_label(import_span, format!("`{trait_name}` imported ambiguously here"))
.help(format!("Import `{trait_name}` explicitly"));
});
}
fn upcast(
&mut self,
source_trait_ref: ty::PolyTraitRef<'tcx>,

View file

@ -106,7 +106,7 @@ pub(crate) struct Candidate<'tcx> {
pub(crate) enum CandidateKind<'tcx> {
InherentImplCandidate { impl_def_id: DefId, receiver_steps: usize },
ObjectCandidate(ty::PolyTraitRef<'tcx>),
TraitCandidate(ty::PolyTraitRef<'tcx>),
TraitCandidate(ty::PolyTraitRef<'tcx>, bool /* lint_ambiguous */),
WhereClauseCandidate(ty::PolyTraitRef<'tcx>),
}
@ -235,7 +235,10 @@ pub(crate) struct Pick<'tcx> {
pub(crate) enum PickKind<'tcx> {
InherentImplPick,
ObjectPick,
TraitPick,
TraitPick(
// Is Ambiguously Imported
bool,
),
WhereClausePick(
// Trait
ty::PolyTraitRef<'tcx>,
@ -560,7 +563,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
probe_cx.push_candidate(
Candidate {
item,
kind: CandidateKind::TraitCandidate(ty::Binder::dummy(trait_ref)),
kind: CandidateKind::TraitCandidate(
ty::Binder::dummy(trait_ref),
false,
),
import_ids: smallvec![],
},
false,
@ -1018,6 +1024,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
self.assemble_extension_candidates_for_trait(
&trait_candidate.import_ids,
trait_did,
trait_candidate.lint_ambiguous,
);
}
}
@ -1029,7 +1036,11 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
let mut duplicates = FxHashSet::default();
for trait_info in suggest::all_traits(self.tcx) {
if duplicates.insert(trait_info.def_id) {
self.assemble_extension_candidates_for_trait(&smallvec![], trait_info.def_id);
self.assemble_extension_candidates_for_trait(
&smallvec![],
trait_info.def_id,
false,
);
}
}
}
@ -1055,6 +1066,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
&mut self,
import_ids: &SmallVec<[LocalDefId; 1]>,
trait_def_id: DefId,
lint_ambiguous: bool,
) {
let trait_args = self.fresh_args_for_item(self.span, trait_def_id);
let trait_ref = ty::TraitRef::new_from_args(self.tcx, trait_def_id, trait_args);
@ -1076,7 +1088,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
Candidate {
item,
import_ids: import_ids.clone(),
kind: TraitCandidate(bound_trait_ref),
kind: TraitCandidate(bound_trait_ref, lint_ambiguous),
},
false,
);
@ -1099,7 +1111,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
Candidate {
item,
import_ids: import_ids.clone(),
kind: TraitCandidate(ty::Binder::dummy(trait_ref)),
kind: TraitCandidate(ty::Binder::dummy(trait_ref), lint_ambiguous),
},
false,
);
@ -1842,7 +1854,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
ObjectCandidate(_) | WhereClauseCandidate(_) => {
CandidateSource::Trait(candidate.item.container_id(self.tcx))
}
TraitCandidate(trait_ref) => self.probe(|_| {
TraitCandidate(trait_ref, _) => self.probe(|_| {
let trait_ref = self.instantiate_binder_with_fresh_vars(
self.span,
BoundRegionConversionTime::FnCall,
@ -1872,7 +1884,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
fn candidate_source_from_pick(&self, pick: &Pick<'tcx>) -> CandidateSource {
match pick.kind {
InherentImplPick => CandidateSource::Impl(pick.item.container_id(self.tcx)),
ObjectPick | WhereClausePick(_) | TraitPick => {
ObjectPick | WhereClausePick(_) | TraitPick(_) => {
CandidateSource::Trait(pick.item.container_id(self.tcx))
}
}
@ -1948,7 +1960,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
impl_bounds,
));
}
TraitCandidate(poly_trait_ref) => {
TraitCandidate(poly_trait_ref, _) => {
// Some trait methods are excluded for arrays before 2021.
// (`array.into_iter()` wants a slice iterator for compatibility.)
if let Some(method_name) = self.method_name {
@ -2274,11 +2286,16 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
}
let lint_ambiguous = match probes[0].0.kind {
TraitCandidate(_, lint) => lint,
_ => false,
};
// FIXME: check the return type here somehow.
// If so, just use this trait and call it a day.
Some(Pick {
item: probes[0].0.item,
kind: TraitPick,
kind: TraitPick(lint_ambiguous),
import_ids: probes[0].0.import_ids.clone(),
autoderefs: 0,
autoref_or_ptr_adjustment: None,
@ -2348,9 +2365,14 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
}
let lint_ambiguous = match probes[0].0.kind {
TraitCandidate(_, lint) => lint,
_ => false,
};
Some(Pick {
item: child_candidate.item,
kind: TraitPick,
kind: TraitPick(lint_ambiguous),
import_ids: child_candidate.import_ids.clone(),
autoderefs: 0,
autoref_or_ptr_adjustment: None,
@ -2613,7 +2635,7 @@ impl<'tcx> Candidate<'tcx> {
kind: match self.kind {
InherentImplCandidate { .. } => InherentImplPick,
ObjectCandidate(_) => ObjectPick,
TraitCandidate(_) => TraitPick,
TraitCandidate(_, lint_ambiguous) => TraitPick(lint_ambiguous),
WhereClauseCandidate(trait_ref) => {
// Only trait derived from where-clauses should
// appear here, so they should not contain any

View file

@ -91,10 +91,7 @@ fn delete_dirty_work_product(sess: &Session, swp: SerializedWorkProduct) {
work_product::delete_workproduct_files(sess, &swp.work_product);
}
fn load_dep_graph(
sess: &Session,
deps: &DepsType,
) -> LoadResult<(Arc<SerializedDepGraph>, WorkProductMap)> {
fn load_dep_graph(sess: &Session) -> LoadResult<(Arc<SerializedDepGraph>, WorkProductMap)> {
let prof = sess.prof.clone();
if sess.opts.incremental.is_none() {
@ -174,7 +171,7 @@ fn load_dep_graph(
return LoadResult::DataOutOfDate;
}
let dep_graph = SerializedDepGraph::decode::<DepsType>(&mut decoder, deps);
let dep_graph = SerializedDepGraph::decode::<DepsType>(&mut decoder);
LoadResult::Ok { data: (dep_graph, prev_work_products) }
}
@ -212,12 +209,11 @@ pub fn setup_dep_graph(
sess: &Session,
crate_name: Symbol,
stable_crate_id: StableCrateId,
deps: &DepsType,
) -> DepGraph {
// `load_dep_graph` can only be called after `prepare_session_directory`.
prepare_session_directory(sess, crate_name, stable_crate_id);
let res = sess.opts.build_dep_graph().then(|| load_dep_graph(sess, deps));
let res = sess.opts.build_dep_graph().then(|| load_dep_graph(sess));
if sess.opts.incremental.is_some() {
sess.time("incr_comp_garbage_collect_session_directories", || {

View file

@ -26,7 +26,6 @@ use rustc_lint::{BufferedEarlyLint, EarlyCheckNode, LintStore, unerased_lint_sto
use rustc_metadata::EncodedMetadata;
use rustc_metadata::creader::CStore;
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepsType;
use rustc_middle::ty::{self, CurrentGcx, GlobalCtxt, RegisteredTools, TyCtxt};
use rustc_middle::util::Providers;
use rustc_parse::lexer::StripTokens;
@ -940,8 +939,7 @@ pub fn create_and_enter_global_ctxt<T, F: for<'tcx> FnOnce(TyCtxt<'tcx>) -> T>(
let outputs = util::build_output_filenames(&pre_configured_attrs, sess);
let dep_type = DepsType { dep_names: rustc_query_impl::dep_kind_names() };
let dep_graph = setup_dep_graph(sess, crate_name, stable_crate_id, &dep_type);
let dep_graph = setup_dep_graph(sess, crate_name, stable_crate_id);
let cstore =
FreezeLock::new(Box::new(CStore::new(compiler.codegen_backend.metadata_loader())) as _);

View file

@ -17,6 +17,7 @@ declare_lint_pass! {
AARCH64_SOFTFLOAT_NEON,
ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE,
AMBIGUOUS_ASSOCIATED_ITEMS,
AMBIGUOUS_GLOB_IMPORTED_TRAITS,
AMBIGUOUS_GLOB_IMPORTS,
AMBIGUOUS_GLOB_REEXPORTS,
AMBIGUOUS_PANIC_IMPORTS,
@ -4473,6 +4474,60 @@ declare_lint! {
};
}
declare_lint! {
/// The `ambiguous_glob_imported_traits` lint reports uses of traits that are
/// imported ambiguously via glob imports. Previously, this was not enforced
/// due to a bug in rustc.
///
/// ### Example
///
/// ```rust,compile_fail
/// #![deny(ambiguous_glob_imported_traits)]
/// mod m1 {
/// pub trait Trait {
/// fn method1(&self) {}
/// }
/// impl Trait for u8 {}
/// }
/// mod m2 {
/// pub trait Trait {
/// fn method2(&self) {}
/// }
/// impl Trait for u8 {}
/// }
///
/// fn main() {
/// use m1::*;
/// use m2::*;
/// 0u8.method1();
/// 0u8.method2();
/// }
/// ```
///
/// {{produces}}
///
/// ### Explanation
///
/// When multiple traits with the same name are brought into scope through glob imports,
/// one trait becomes the "primary" one while the others are shadowed. Methods from the
/// shadowed traits (e.g. `method2`) become inaccessible, while methods from the "primary"
/// trait (e.g. `method1`) still resolve. Ideally, none of the ambiguous traits would be in scope,
/// but we have to allow this for now because of backwards compatibility.
/// This lint reports uses of these "primary" traits that are ambiguous.
///
/// This is a [future-incompatible] lint to transition this to a
/// hard error in the future.
///
/// [future-incompatible]: ../index.md#future-incompatible-lints
pub AMBIGUOUS_GLOB_IMPORTED_TRAITS,
Warn,
"detects uses of ambiguously glob imported traits",
@future_incompatible = FutureIncompatibleInfo {
reason: fcw!(FutureReleaseError #147992),
report_in_deps: false,
};
}
declare_lint! {
/// The `ambiguous_panic_imports` lint detects ambiguous core and std panic imports, but
/// previously didn't do that due to `#[macro_use]` prelude macro import.

View file

@ -223,7 +223,12 @@ extern "C" bool LLVMRustOffloadEmbedBufferInModule(LLVMModuleRef HostM,
return true;
}
extern "C" void LLVMRustOffloadMapper(LLVMValueRef OldFn, LLVMValueRef NewFn) {
// Clone OldFn into NewFn, remapping its arguments to RebuiltArgs.
// Each arg of OldFn is replaced with the corresponding value in RebuiltArgs.
// For scalars, RebuiltArgs contains the value cast and/or truncated to the
// original type.
extern "C" void LLVMRustOffloadMapper(LLVMValueRef OldFn, LLVMValueRef NewFn,
const LLVMValueRef *RebuiltArgs) {
llvm::Function *oldFn = llvm::unwrap<llvm::Function>(OldFn);
llvm::Function *newFn = llvm::unwrap<llvm::Function>(NewFn);
@ -232,15 +237,25 @@ extern "C" void LLVMRustOffloadMapper(LLVMValueRef OldFn, LLVMValueRef NewFn) {
llvm::ValueToValueMapTy vmap;
auto newArgIt = newFn->arg_begin();
newArgIt->setName("dyn_ptr");
++newArgIt; // skip %dyn_ptr
unsigned i = 0;
for (auto &oldArg : oldFn->args()) {
vmap[&oldArg] = &*newArgIt++;
vmap[&oldArg] = unwrap<Value>(RebuiltArgs[i++]);
}
llvm::SmallVector<llvm::ReturnInst *, 8> returns;
llvm::CloneFunctionInto(newFn, oldFn, vmap,
llvm::CloneFunctionChangeType::LocalChangesOnly,
returns);
BasicBlock &entry = newFn->getEntryBlock();
BasicBlock &clonedEntry = *std::next(newFn->begin());
if (entry.getTerminator())
entry.getTerminator()->eraseFromParent();
IRBuilder<> B(&entry);
B.CreateBr(&clonedEntry);
}
#endif
@ -1745,6 +1760,10 @@ extern "C" bool LLVMRustIsNonGVFunctionPointerTy(LLVMValueRef V) {
return false;
}
extern "C" LLVMValueRef LLVMRustStripPointerCasts(LLVMValueRef V) {
return wrap(unwrap(V)->stripPointerCasts());
}
extern "C" bool LLVMRustLLVMHasZlibCompression() {
return llvm::compression::zlib::isAvailable();
}

View file

@ -24,15 +24,6 @@ macro_rules! define_dep_nodes {
($mod:ident) => {[ $($mod::$variant()),* ]};
}
#[macro_export]
macro_rules! make_dep_kind_name_array {
($mod:ident) => {
vec! {
$(*$mod::$variant().name),*
}
};
}
/// This enum serves as an index into arrays built by `make_dep_kind_array`.
// This enum has more than u8::MAX variants so we need some kind of multi-byte
// encoding. The derived Encodable/Decodable uses leb128 encoding which is
@ -68,20 +59,24 @@ macro_rules! define_dep_nodes {
deps.len() as u16
};
/// List containing the name of each dep kind as a static string,
/// indexable by `DepKind`.
pub(crate) const DEP_KIND_NAMES: &[&str] = &[
$( self::label_strs::$variant, )*
];
pub(super) fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
match label {
$(stringify!($variant) => Ok(dep_kinds::$variant),)*
$( self::label_strs::$variant => Ok(self::dep_kinds::$variant), )*
_ => Err(()),
}
}
/// Contains variant => str representations for constructing
/// DepNode groups for tests.
#[allow(dead_code, non_upper_case_globals)]
#[expect(non_upper_case_globals)]
pub mod label_strs {
$(
pub const $variant: &str = stringify!($variant);
)*
$( pub const $variant: &str = stringify!($variant); )*
}
};
}

View file

@ -20,10 +20,7 @@ pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepsType>;
pub type DepKindStruct<'tcx> = rustc_query_system::dep_graph::DepKindStruct<TyCtxt<'tcx>>;
#[derive(Clone)]
pub struct DepsType {
pub dep_names: Vec<&'static str>,
}
pub struct DepsType;
impl Deps for DepsType {
fn with_deps<OP, R>(task_deps: TaskDepsRef<'_>, op: OP) -> R
@ -47,8 +44,8 @@ impl Deps for DepsType {
})
}
fn name(&self, dep_kind: DepKind) -> &'static str {
self.dep_names[dep_kind.as_usize()]
fn name(dep_kind: DepKind) -> &'static str {
dep_node::DEP_KIND_NAMES[dep_kind.as_usize()]
}
const DEP_KIND_NULL: DepKind = dep_kinds::Null;

View file

@ -764,6 +764,9 @@ pub enum DynCompatibilityViolation {
/// `Self: Sized` declared on the trait.
SizedSelf(SmallVec<[Span; 1]>),
/// Trait is marked `#[rustc_dyn_incompatible_trait]`.
ExplicitlyDynIncompatible(SmallVec<[Span; 1]>),
/// Supertrait reference references `Self` an in illegal location
/// (e.g., `trait Foo : Bar<Self>`).
SupertraitSelf(SmallVec<[Span; 1]>),
@ -788,6 +791,9 @@ impl DynCompatibilityViolation {
pub fn error_msg(&self) -> Cow<'static, str> {
match self {
DynCompatibilityViolation::SizedSelf(_) => "it requires `Self: Sized`".into(),
DynCompatibilityViolation::ExplicitlyDynIncompatible(_) => {
"it opted out of dyn-compatibility".into()
}
DynCompatibilityViolation::SupertraitSelf(spans) => {
if spans.iter().any(|sp| *sp != DUMMY_SP) {
"it uses `Self` as a type parameter".into()
@ -861,6 +867,7 @@ impl DynCompatibilityViolation {
pub fn solution(&self) -> DynCompatibilityViolationSolution {
match self {
DynCompatibilityViolation::SizedSelf(_)
| DynCompatibilityViolation::ExplicitlyDynIncompatible(_)
| DynCompatibilityViolation::SupertraitSelf(_)
| DynCompatibilityViolation::SupertraitNonLifetimeBinder(..)
| DynCompatibilityViolation::SupertraitConst(_) => {
@ -894,6 +901,7 @@ impl DynCompatibilityViolation {
match self {
DynCompatibilityViolation::SupertraitSelf(spans)
| DynCompatibilityViolation::SizedSelf(spans)
| DynCompatibilityViolation::ExplicitlyDynIncompatible(spans)
| DynCompatibilityViolation::SupertraitNonLifetimeBinder(spans)
| DynCompatibilityViolation::SupertraitConst(spans) => spans.clone(),
DynCompatibilityViolation::AssocConst(_, span)

View file

@ -709,10 +709,6 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
self.trait_def(def_id).is_fundamental
}
fn trait_may_be_implemented_via_object(self, trait_def_id: DefId) -> bool {
self.trait_def(trait_def_id).implement_via_object
}
fn trait_is_unsafe(self, trait_def_id: Self::DefId) -> bool {
self.trait_def(trait_def_id).safety.is_unsafe()
}

View file

@ -78,16 +78,13 @@ impl MappingFlags {
use rustc_ast::Mutability::*;
match ty.kind() {
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Adt(_, _)
| ty::Tuple(_)
| ty::Array(_, _)
| ty::Alias(_, _)
| ty::Param(_) => MappingFlags::TO,
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
MappingFlags::LITERAL | MappingFlags::IMPLICIT
}
ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Alias(_, _) | ty::Param(_) => {
MappingFlags::TO
}
ty::RawPtr(_, Not) | ty::Ref(_, _, Not) => MappingFlags::TO,

View file

@ -7,6 +7,7 @@ use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::{self as hir, find_attr};
use rustc_macros::{Decodable, Encodable, HashStable};
use rustc_span::Span;
use tracing::debug;
use crate::query::LocalCrate;
@ -69,10 +70,9 @@ pub struct TraitDef {
/// must be implemented.
pub must_implement_one_of: Option<Box<[Ident]>>,
/// Whether to add a builtin `dyn Trait: Trait` implementation.
/// This is enabled for all traits except ones marked with
/// `#[rustc_do_not_implement_via_object]`.
pub implement_via_object: bool,
/// Whether the trait should be considered dyn-incompatible, even if it otherwise
/// satisfies the requirements to be dyn-compatible.
pub force_dyn_incompatible: Option<Span>,
/// Whether a trait is fully built-in, and any implementation is disallowed.
/// This only applies to built-in traits, and is marked via

View file

@ -6,7 +6,7 @@ edition = "2024"
[dependencies]
# tidy-alphabetical-start
either = "1"
hashbrown = "0.15"
hashbrown = { version = "0.16.1", default-features = false }
itertools = "0.12"
rustc_abi = { path = "../rustc_abi" }
rustc_arena = { path = "../rustc_arena" }

View file

@ -1,3 +1,4 @@
use itertools::Itertools;
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet, IndexEntry};
use rustc_middle::mir;
use rustc_middle::mir::coverage::{BasicCoverageBlock, BranchSpan};
@ -6,6 +7,7 @@ use rustc_span::{ExpnId, ExpnKind, Span};
use crate::coverage::from_mir;
use crate::coverage::graph::CoverageGraph;
use crate::coverage::hir_info::ExtractedHirInfo;
use crate::coverage::mappings::MappingsError;
#[derive(Clone, Copy, Debug)]
pub(crate) struct SpanWithBcb {
@ -22,38 +24,6 @@ impl ExpnTree {
pub(crate) fn get(&self, expn_id: ExpnId) -> Option<&ExpnNode> {
self.nodes.get(&expn_id)
}
/// Yields the tree node for the given expansion ID (if present), followed
/// by the nodes of all of its descendants in depth-first order.
pub(crate) fn iter_node_and_descendants(
&self,
root_expn_id: ExpnId,
) -> impl Iterator<Item = &ExpnNode> {
gen move {
let Some(root_node) = self.get(root_expn_id) else { return };
yield root_node;
// Stack of child-node-ID iterators that drives the depth-first traversal.
let mut iter_stack = vec![root_node.child_expn_ids.iter()];
while let Some(curr_iter) = iter_stack.last_mut() {
// Pull the next ID from the top of the stack.
let Some(&curr_id) = curr_iter.next() else {
iter_stack.pop();
continue;
};
// Yield this node.
let Some(node) = self.get(curr_id) else { continue };
yield node;
// Push the node's children, to be traversed next.
if !node.child_expn_ids.is_empty() {
iter_stack.push(node.child_expn_ids.iter());
}
}
}
}
}
#[derive(Debug)]
@ -62,6 +32,8 @@ pub(crate) struct ExpnNode {
/// but is helpful for debugging and might be useful later.
#[expect(dead_code)]
pub(crate) expn_id: ExpnId,
/// Index of this node in a depth-first traversal from the root.
pub(crate) dfs_rank: usize,
// Useful info extracted from `ExpnData`.
pub(crate) expn_kind: ExpnKind,
@ -82,6 +54,10 @@ pub(crate) struct ExpnNode {
pub(crate) spans: Vec<SpanWithBcb>,
/// Expansions whose call-site is in this expansion.
pub(crate) child_expn_ids: FxIndexSet<ExpnId>,
/// The "minimum" and "maximum" BCBs (in dominator order) of ordinary spans
/// belonging to this tree node and all of its descendants. Used when
/// creating a single code mapping representing an entire child expansion.
pub(crate) minmax_bcbs: Option<MinMaxBcbs>,
/// Branch spans (recorded during MIR building) belonging to this expansion.
pub(crate) branch_spans: Vec<BranchSpan>,
@ -100,6 +76,7 @@ impl ExpnNode {
Self {
expn_id,
dfs_rank: usize::MAX,
expn_kind: expn_data.kind,
call_site,
@ -110,6 +87,7 @@ impl ExpnNode {
spans: vec![],
child_expn_ids: FxIndexSet::default(),
minmax_bcbs: None,
branch_spans: vec![],
@ -124,7 +102,7 @@ pub(crate) fn build_expn_tree(
mir_body: &mir::Body<'_>,
hir_info: &ExtractedHirInfo,
graph: &CoverageGraph,
) -> ExpnTree {
) -> Result<ExpnTree, MappingsError> {
let raw_spans = from_mir::extract_raw_spans_from_mir(mir_body, graph);
let mut nodes = FxIndexMap::default();
@ -157,6 +135,20 @@ pub(crate) fn build_expn_tree(
}
}
// Sort the tree nodes into depth-first order.
sort_nodes_depth_first(&mut nodes)?;
// For each node, determine its "minimum" and "maximum" BCBs, based on its
// own spans and its immediate children. This relies on the nodes having
// been sorted, so that each node's children are processed before the node
// itself.
for i in (0..nodes.len()).rev() {
// Computing a node's min/max BCBs requires a shared ref to other nodes.
let minmax_bcbs = minmax_bcbs_for_expn_tree_node(graph, &nodes, &nodes[i]);
// Now we can mutate the current node to set its min/max BCBs.
nodes[i].minmax_bcbs = minmax_bcbs;
}
// If we have a span for the function signature, associate it with the
// corresponding expansion tree node.
if let Some(fn_sig_span) = hir_info.fn_sig_span
@ -189,5 +181,60 @@ pub(crate) fn build_expn_tree(
}
}
ExpnTree { nodes }
Ok(ExpnTree { nodes })
}
/// Sorts the tree nodes in the map into depth-first order.
///
/// This allows subsequent operations to iterate over all nodes, while assuming
/// that every node occurs before all of its descendants.
fn sort_nodes_depth_first(nodes: &mut FxIndexMap<ExpnId, ExpnNode>) -> Result<(), MappingsError> {
let mut dfs_stack = vec![ExpnId::root()];
let mut next_dfs_rank = 0usize;
while let Some(expn_id) = dfs_stack.pop() {
if let Some(node) = nodes.get_mut(&expn_id) {
node.dfs_rank = next_dfs_rank;
next_dfs_rank += 1;
dfs_stack.extend(node.child_expn_ids.iter().rev().copied());
}
}
nodes.sort_by_key(|_expn_id, node| node.dfs_rank);
// Verify that the depth-first search visited each node exactly once.
for (i, &ExpnNode { dfs_rank, .. }) in nodes.values().enumerate() {
if dfs_rank != i {
tracing::debug!(dfs_rank, i, "expansion tree node's rank does not match its index");
return Err(MappingsError::TreeSortFailure);
}
}
Ok(())
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct MinMaxBcbs {
pub(crate) min: BasicCoverageBlock,
pub(crate) max: BasicCoverageBlock,
}
/// For a single node in the expansion tree, compute its "minimum" and "maximum"
/// BCBs (in dominator order), from among the BCBs of its immediate spans,
/// and the min/max of its immediate children.
fn minmax_bcbs_for_expn_tree_node(
graph: &CoverageGraph,
nodes: &FxIndexMap<ExpnId, ExpnNode>,
node: &ExpnNode,
) -> Option<MinMaxBcbs> {
let immediate_span_bcbs = node.spans.iter().map(|sp: &SpanWithBcb| sp.bcb);
let child_minmax_bcbs = node
.child_expn_ids
.iter()
.flat_map(|id| nodes.get(id))
.flat_map(|child| child.minmax_bcbs)
.flat_map(|MinMaxBcbs { min, max }| [min, max]);
let (min, max) = Iterator::chain(immediate_span_bcbs, child_minmax_bcbs)
.minmax_by(|&a, &b| graph.cmp_in_dominator_order(a, b))
.into_option()?;
Some(MinMaxBcbs { min, max })
}

View file

@ -11,6 +11,13 @@ use crate::coverage::graph::CoverageGraph;
use crate::coverage::hir_info::ExtractedHirInfo;
use crate::coverage::spans::extract_refined_covspans;
/// Indicates why mapping extraction failed, for debug-logging purposes.
#[derive(Debug)]
pub(crate) enum MappingsError {
NoMappings,
TreeSortFailure,
}
#[derive(Default)]
pub(crate) struct ExtractedMappings {
pub(crate) mappings: Vec<Mapping>,
@ -23,8 +30,8 @@ pub(crate) fn extract_mappings_from_mir<'tcx>(
mir_body: &mir::Body<'tcx>,
hir_info: &ExtractedHirInfo,
graph: &CoverageGraph,
) -> ExtractedMappings {
let expn_tree = expansion::build_expn_tree(mir_body, hir_info, graph);
) -> Result<ExtractedMappings, MappingsError> {
let expn_tree = expansion::build_expn_tree(mir_body, hir_info, graph)?;
let mut mappings = vec![];
@ -33,7 +40,11 @@ pub(crate) fn extract_mappings_from_mir<'tcx>(
extract_branch_mappings(mir_body, hir_info, graph, &expn_tree, &mut mappings);
ExtractedMappings { mappings }
if mappings.is_empty() {
tracing::debug!("no mappings were extracted");
return Err(MappingsError::NoMappings);
}
Ok(ExtractedMappings { mappings })
}
fn resolve_block_markers(

View file

@ -73,12 +73,13 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
////////////////////////////////////////////////////
// Extract coverage spans and other mapping info from MIR.
let ExtractedMappings { mappings } =
mappings::extract_mappings_from_mir(tcx, mir_body, &hir_info, &graph);
if mappings.is_empty() {
// No spans could be converted into valid mappings, so skip this function.
debug!("no spans could be converted into valid mappings; skipping");
return;
}
match mappings::extract_mappings_from_mir(tcx, mir_body, &hir_info, &graph) {
Ok(m) => m,
Err(error) => {
tracing::debug!(?error, "mapping extraction failed; skipping this function");
return;
}
};
// Use the coverage graph to prepare intermediate data that will eventually
// be used to assign physical counters and counter expressions to points in

View file

@ -39,8 +39,7 @@ pub(super) fn extract_refined_covspans<'tcx>(
// For each expansion with its call-site in the body span, try to
// distill a corresponding covspan.
for &child_expn_id in &node.child_expn_ids {
if let Some(covspan) = single_covspan_for_child_expn(tcx, graph, &expn_tree, child_expn_id)
{
if let Some(covspan) = single_covspan_for_child_expn(tcx, &expn_tree, child_expn_id) {
covspans.push(covspan);
}
}
@ -127,24 +126,21 @@ pub(super) fn extract_refined_covspans<'tcx>(
/// For a single child expansion, try to distill it into a single span+BCB mapping.
fn single_covspan_for_child_expn(
tcx: TyCtxt<'_>,
graph: &CoverageGraph,
expn_tree: &ExpnTree,
expn_id: ExpnId,
) -> Option<Covspan> {
let node = expn_tree.get(expn_id)?;
let bcbs =
expn_tree.iter_node_and_descendants(expn_id).flat_map(|n| n.spans.iter().map(|s| s.bcb));
let minmax_bcbs = node.minmax_bcbs?;
let bcb = match node.expn_kind {
// For bang-macros (e.g. `assert!`, `trace!`) and for `await`, taking
// the "first" BCB in dominator order seems to give good results.
ExpnKind::Macro(MacroKind::Bang, _) | ExpnKind::Desugaring(DesugaringKind::Await) => {
bcbs.min_by(|&a, &b| graph.cmp_in_dominator_order(a, b))?
minmax_bcbs.min
}
// For other kinds of expansion, taking the "last" (most-dominated) BCB
// seems to give good results.
_ => bcbs.max_by(|&a, &b| graph.cmp_in_dominator_order(a, b))?,
_ => minmax_bcbs.max,
};
// For bang-macro expansions, limit the call-site span to just the macro

View file

@ -5,7 +5,6 @@
#![feature(const_type_name)]
#![feature(cow_is_borrowed)]
#![feature(file_buffered)]
#![feature(gen_blocks)]
#![feature(if_let_guard)]
#![feature(impl_trait_in_assoc_type)]
#![feature(try_blocks)]

View file

@ -792,7 +792,9 @@ where
candidates: &mut Vec<Candidate<I>>,
) {
let cx = self.cx();
if !cx.trait_may_be_implemented_via_object(goal.predicate.trait_def_id(cx)) {
if cx.is_sizedness_trait(goal.predicate.trait_def_id(cx)) {
// `dyn MetaSized` is valid, but should get its `MetaSized` impl from
// being `dyn` (SizedCandidate), not from the object candidate.
return;
}

View file

@ -659,7 +659,7 @@ where
}
// `rustc_transmute` does not have support for type or const params
if goal.has_non_region_placeholders() {
if goal.predicate.has_non_region_placeholders() {
return Err(NoSolution);
}

View file

@ -244,7 +244,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| AttributeKind::SkipDuringMethodDispatch { .. }
| AttributeKind::Coinductive(..)
| AttributeKind::DenyExplicitImpl(..)
| AttributeKind::DoNotImplementViaObject(..)
| AttributeKind::DynIncompatibleTrait(..)
| AttributeKind::SpecializationTrait(..)
| AttributeKind::UnsafeSpecializationMarker(..)
| AttributeKind::ParenSugar(..)
@ -274,6 +274,8 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| AttributeKind::RustcScalableVector { .. }
| AttributeKind::RustcSimdMonomorphizeLaneLimit(..)
| AttributeKind::RustcShouldNotBeCalledOnConstItems(..)
| AttributeKind::RustcVariance
| AttributeKind::RustcVarianceOfOpaques
| AttributeKind::ExportStable
| AttributeKind::FfiConst(..)
| AttributeKind::UnstableFeatureBound(..)
@ -323,6 +325,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| AttributeKind::RustcReallocator
| AttributeKind::RustcNounwind
| AttributeKind::RustcOffloadKernel
| AttributeKind::PatchableFunctionEntry { .. }
) => { /* do nothing */ }
Attribute::Unparsed(attr_item) => {
style = Some(attr_item.style);
@ -348,7 +351,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| sym::deny
| sym::forbid
// need to be fixed
| sym::patchable_function_entry // FIXME(patchable_function_entry)
| sym::deprecated_safe // FIXME(deprecated_safe)
// internal
| sym::prelude_import
@ -378,8 +380,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| sym::rustc_capture_analysis
| sym::rustc_regions
| sym::rustc_strict_coherence
| sym::rustc_variance
| sym::rustc_variance_of_opaques
| sym::rustc_hidden_type_of_opaques
| sym::rustc_mir
| sym::rustc_effective_visibility

View file

@ -1395,7 +1395,7 @@ pub struct TraitDecl {
pub skip_boxed_slice_during_method_dispatch: bool,
pub specialization_kind: TraitSpecializationKind,
pub must_implement_one_of: Option<Vec<Ident>>,
pub implement_via_object: bool,
pub force_dyn_incompatible: Option<Span>,
pub deny_explicit_impl: bool,
}

View file

@ -597,7 +597,7 @@ impl<'tcx> Stable<'tcx> for ty::TraitDef {
.must_implement_one_of
.as_ref()
.map(|idents| idents.iter().map(|ident| opaque(ident)).collect()),
implement_via_object: self.implement_via_object,
force_dyn_incompatible: self.force_dyn_incompatible.stable(tables, cx),
deny_explicit_impl: self.deny_explicit_impl,
}
}

View file

@ -921,9 +921,5 @@ macro_rules! define_queries {
pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct<'tcx>] {
arena.alloc_from_iter(rustc_middle::make_dep_kind_array!(query_callbacks))
}
pub fn dep_kind_names() -> Vec<&'static str> {
rustc_middle::make_dep_kind_name_array!(query_callbacks)
}
}
}

View file

@ -25,6 +25,6 @@ tracing = "0.1"
# tidy-alphabetical-end
[dependencies.hashbrown]
version = "0.15.2"
version = "0.16.1"
default-features = false
features = ["nightly"] # for may_dangle

View file

@ -28,7 +28,6 @@ use crate::dep_graph::edges::EdgesVec;
use crate::ich::StableHashingContext;
use crate::query::{QueryContext, QuerySideEffect};
#[derive(Clone)]
pub struct DepGraph<D: Deps> {
data: Option<Arc<DepGraphData<D>>>,
@ -39,6 +38,17 @@ pub struct DepGraph<D: Deps> {
virtual_dep_node_index: Arc<AtomicU32>,
}
/// Manual clone impl that does not require `D: Clone`.
impl<D: Deps> Clone for DepGraph<D> {
fn clone(&self) -> Self {
let Self { data, virtual_dep_node_index } = self;
Self {
data: Option::<Arc<_>>::clone(data),
virtual_dep_node_index: Arc::clone(virtual_dep_node_index),
}
}
}
rustc_index::newtype_index! {
pub struct DepNodeIndex {}
}

View file

@ -103,7 +103,7 @@ pub trait Deps: DynSync {
where
OP: for<'a> FnOnce(TaskDepsRef<'a>);
fn name(&self, dep_kind: DepKind) -> &'static str;
fn name(dep_kind: DepKind) -> &'static str;
/// We use this for most things when incr. comp. is turned off.
const DEP_KIND_NULL: DepKind;

View file

@ -191,8 +191,8 @@ fn mask(bits: usize) -> usize {
}
impl SerializedDepGraph {
#[instrument(level = "debug", skip(d, deps))]
pub fn decode<D: Deps>(d: &mut MemDecoder<'_>, deps: &D) -> Arc<SerializedDepGraph> {
#[instrument(level = "debug", skip(d))]
pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> Arc<SerializedDepGraph> {
// The last 16 bytes are the node count and edge count.
debug!("position: {:?}", d.position());
@ -280,7 +280,7 @@ impl SerializedDepGraph {
if index[node.kind.as_usize()].insert(node.hash, idx).is_some() {
// Empty nodes and side effect nodes can have duplicates
if node.kind != D::DEP_KIND_NULL && node.kind != D::DEP_KIND_SIDE_EFFECT {
let name = deps.name(node.kind);
let name = D::name(node.kind);
panic!(
"Error: A dep graph node ({name}) does not have an unique index. \
Running a clean build on a nightly compiler with `-Z incremental-verify-ich` \

View file

@ -622,7 +622,18 @@ struct ModuleData<'ra> {
globs: CmRefCell<Vec<Import<'ra>>>,
/// Used to memoize the traits in this module for faster searches through all traits in scope.
traits: CmRefCell<Option<Box<[(Macros20NormalizedIdent, Decl<'ra>, Option<Module<'ra>>)]>>>,
traits: CmRefCell<
Option<
Box<
[(
Macros20NormalizedIdent,
Decl<'ra>,
Option<Module<'ra>>,
bool, /* lint ambiguous */
)],
>,
>,
>,
/// Span of the module itself. Used for error reporting.
span: Span,
@ -719,7 +730,12 @@ impl<'ra> Module<'ra> {
return;
}
if let Res::Def(DefKind::Trait | DefKind::TraitAlias, def_id) = binding.res() {
collected_traits.push((name, binding, r.as_ref().get_module(def_id)))
collected_traits.push((
name,
binding,
r.as_ref().get_module(def_id),
binding.is_ambiguity_recursive(),
));
}
});
*traits = Some(collected_traits.into_boxed_slice());
@ -1877,7 +1893,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
if let Some(module) = current_trait {
if self.trait_may_have_item(Some(module), assoc_item) {
let def_id = module.def_id();
found_traits.push(TraitCandidate { def_id, import_ids: smallvec![] });
found_traits.push(TraitCandidate {
def_id,
import_ids: smallvec![],
lint_ambiguous: false,
});
}
}
@ -1915,11 +1935,13 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
) {
module.ensure_traits(self);
let traits = module.traits.borrow();
for &(trait_name, trait_binding, trait_module) in traits.as_ref().unwrap().iter() {
for &(trait_name, trait_binding, trait_module, lint_ambiguous) in
traits.as_ref().unwrap().iter()
{
if self.trait_may_have_item(trait_module, assoc_item) {
let def_id = trait_binding.res().def_id();
let import_ids = self.find_transitive_imports(&trait_binding.kind, trait_name.0);
found_traits.push(TraitCandidate { def_id, import_ids });
found_traits.push(TraitCandidate { def_id, import_ids, lint_ambiguous });
}
}
}

View file

@ -22,7 +22,9 @@ use rustc_hashes::Hash64;
use rustc_macros::{BlobDecodable, Decodable, Encodable, HashStable_Generic};
use rustc_span::edition::{DEFAULT_EDITION, EDITION_NAME_LIST, Edition, LATEST_STABLE_EDITION};
use rustc_span::source_map::FilePathMapping;
use rustc_span::{FileName, RealFileName, SourceFileHashAlgorithm, Symbol, sym};
use rustc_span::{
FileName, RealFileName, RemapPathScopeComponents, SourceFileHashAlgorithm, Symbol, sym,
};
use rustc_target::spec::{
FramePointer, LinkSelfContainedComponents, LinkerFeatures, PanicStrategy, SplitDebuginfo,
Target, TargetTuple,
@ -1317,6 +1319,29 @@ impl OutputFilenames {
}
}
pub(crate) fn parse_remap_path_scope(
early_dcx: &EarlyDiagCtxt,
matches: &getopts::Matches,
) -> RemapPathScopeComponents {
if let Some(v) = matches.opt_str("remap-path-scope") {
let mut slot = RemapPathScopeComponents::empty();
for s in v.split(',') {
slot |= match s {
"macro" => RemapPathScopeComponents::MACRO,
"diagnostics" => RemapPathScopeComponents::DIAGNOSTICS,
"debuginfo" => RemapPathScopeComponents::DEBUGINFO,
"coverage" => RemapPathScopeComponents::COVERAGE,
"object" => RemapPathScopeComponents::OBJECT,
"all" => RemapPathScopeComponents::all(),
_ => early_dcx.early_fatal("argument for `--remap-path-scope` must be a comma separated list of scopes: `macro`, `diagnostics`, `debuginfo`, `coverage`, `object`, `all`"),
}
}
slot
} else {
RemapPathScopeComponents::all()
}
}
#[derive(Clone, Debug)]
pub struct Sysroot {
pub explicit: Option<PathBuf>,
@ -1353,9 +1378,9 @@ pub fn host_tuple() -> &'static str {
fn file_path_mapping(
remap_path_prefix: Vec<(PathBuf, PathBuf)>,
unstable_opts: &UnstableOptions,
remap_path_scope: RemapPathScopeComponents,
) -> FilePathMapping {
FilePathMapping::new(remap_path_prefix.clone(), unstable_opts.remap_path_scope)
FilePathMapping::new(remap_path_prefix.clone(), remap_path_scope)
}
impl Default for Options {
@ -1367,7 +1392,7 @@ impl Default for Options {
// to create a default working directory.
let working_dir = {
let working_dir = std::env::current_dir().unwrap();
let file_mapping = file_path_mapping(Vec::new(), &unstable_opts);
let file_mapping = file_path_mapping(Vec::new(), RemapPathScopeComponents::empty());
file_mapping.to_real_filename(&RealFileName::empty(), &working_dir)
};
@ -1402,6 +1427,7 @@ impl Default for Options {
cli_forced_codegen_units: None,
cli_forced_local_thinlto_off: false,
remap_path_prefix: Vec::new(),
remap_path_scope: RemapPathScopeComponents::all(),
real_rust_source_base_dir: None,
real_rustc_dev_source_base_dir: None,
edition: DEFAULT_EDITION,
@ -1428,7 +1454,7 @@ impl Options {
}
pub fn file_path_mapping(&self) -> FilePathMapping {
file_path_mapping(self.remap_path_prefix.clone(), &self.unstable_opts)
file_path_mapping(self.remap_path_prefix.clone(), self.remap_path_scope)
}
/// Returns `true` if there will be an output file generated.
@ -1866,6 +1892,14 @@ pub fn rustc_optgroups() -> Vec<RustcOptGroup> {
"Remap source names in all output (compiler messages and output files)",
"<FROM>=<TO>",
),
opt(
Stable,
Opt,
"",
"remap-path-scope",
"Defines which scopes of paths should be remapped by `--remap-path-prefix`",
"<macro,diagnostics,debuginfo,coverage,object,all>",
),
opt(Unstable, Multi, "", "env-set", "Inject an environment variable", "<VAR>=<VALUE>"),
];
options.extend(verbose_only.into_iter().map(|mut opt| {
@ -2669,6 +2703,7 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
let externs = parse_externs(early_dcx, matches, &unstable_opts);
let remap_path_prefix = parse_remap_path_prefix(early_dcx, matches, &unstable_opts);
let remap_path_scope = parse_remap_path_scope(early_dcx, matches);
let pretty = parse_pretty(early_dcx, &unstable_opts);
@ -2735,7 +2770,7 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
early_dcx.early_fatal(format!("Current directory is invalid: {e}"));
});
let file_mapping = file_path_mapping(remap_path_prefix.clone(), &unstable_opts);
let file_mapping = file_path_mapping(remap_path_prefix.clone(), remap_path_scope);
file_mapping.to_real_filename(&RealFileName::empty(), &working_dir)
};
@ -2772,6 +2807,7 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
cli_forced_codegen_units: codegen_units,
cli_forced_local_thinlto_off: disable_local_thinlto,
remap_path_prefix,
remap_path_scope,
real_rust_source_base_dir,
real_rustc_dev_source_base_dir,
edition,

View file

@ -454,6 +454,8 @@ top_level_options!(
/// Remap source path prefixes in all output (messages, object files, debug, etc.).
remap_path_prefix: Vec<(PathBuf, PathBuf)> [TRACKED_NO_CRATE_HASH],
/// Defines which scopes of paths should be remapped by `--remap-path-prefix`.
remap_path_scope: RemapPathScopeComponents [TRACKED_NO_CRATE_HASH],
/// Base directory containing the `library/` directory for the Rust standard library.
/// Right now it's always `$sysroot/lib/rustlib/src/rust`
@ -872,7 +874,6 @@ mod desc {
pub(crate) const parse_branch_protection: &str = "a `,` separated combination of `bti`, `gcs`, `pac-ret`, (optionally with `pc`, `b-key`, `leaf` if `pac-ret` is set)";
pub(crate) const parse_proc_macro_execution_strategy: &str =
"one of supported execution strategies (`same-thread`, or `cross-thread`)";
pub(crate) const parse_remap_path_scope: &str = "comma separated list of scopes: `macro`, `diagnostics`, `debuginfo`, `coverage`, `object`, `all`";
pub(crate) const parse_inlining_threshold: &str =
"either a boolean (`yes`, `no`, `on`, `off`, etc), or a non-negative number";
pub(crate) const parse_llvm_module_flag: &str = "<key>:<type>:<value>:<behavior>. Type must currently be `u32`. Behavior should be one of (`error`, `warning`, `require`, `override`, `append`, `appendunique`, `max`, `min`)";
@ -1737,29 +1738,6 @@ pub mod parse {
true
}
pub(crate) fn parse_remap_path_scope(
slot: &mut RemapPathScopeComponents,
v: Option<&str>,
) -> bool {
if let Some(v) = v {
*slot = RemapPathScopeComponents::empty();
for s in v.split(',') {
*slot |= match s {
"macro" => RemapPathScopeComponents::MACRO,
"diagnostics" => RemapPathScopeComponents::DIAGNOSTICS,
"debuginfo" => RemapPathScopeComponents::DEBUGINFO,
"coverage" => RemapPathScopeComponents::COVERAGE,
"object" => RemapPathScopeComponents::OBJECT,
"all" => RemapPathScopeComponents::all(),
_ => return false,
}
}
true
} else {
false
}
}
pub(crate) fn parse_relocation_model(slot: &mut Option<RelocModel>, v: Option<&str>) -> bool {
match v.and_then(|s| RelocModel::from_str(s).ok()) {
Some(relocation_model) => *slot = Some(relocation_model),
@ -2614,8 +2592,6 @@ options! {
"whether ELF relocations can be relaxed"),
remap_cwd_prefix: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
"remap paths under the current working directory to this path prefix"),
remap_path_scope: RemapPathScopeComponents = (RemapPathScopeComponents::all(), parse_remap_path_scope, [TRACKED],
"remap path scope (default: all)"),
remark_dir: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED],
"directory into which to write optimization remarks (if not specified, they will be \
written to standard error output)"),

View file

@ -3,6 +3,8 @@
#[must_use]
pub struct FatalError;
use std::panic;
pub use rustc_data_structures::FatalErrorMarker;
// Don't implement Send on FatalError. This makes it impossible to `panic_any!(FatalError)`.
@ -22,3 +24,18 @@ impl std::fmt::Display for FatalError {
}
impl std::error::Error for FatalError {}
/// Runs a closure and catches unwinds triggered by fatal errors.
///
/// The compiler currently unwinds with a special sentinel value to abort
/// compilation on fatal errors. This function catches that sentinel and turns
/// the panic into a `Result` instead.
pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, FatalError> {
panic::catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| {
if value.is::<FatalErrorMarker>() {
FatalError
} else {
panic::resume_unwind(value);
}
})
}

View file

@ -1950,7 +1950,6 @@ symbols! {
rustc_diagnostic_macros,
rustc_dirty,
rustc_do_not_const_check,
rustc_do_not_implement_via_object,
rustc_doc_primitive,
rustc_driver,
rustc_dummy,
@ -1959,6 +1958,7 @@ symbols! {
rustc_dump_predicates,
rustc_dump_user_args,
rustc_dump_vtable,
rustc_dyn_incompatible_trait,
rustc_effective_visibility,
rustc_eii_foreign_item,
rustc_evaluate_where_clauses,

View file

@ -1801,6 +1801,8 @@ supported_targets! {
("x86_64-lynx-lynxos178", x86_64_lynx_lynxos178),
("x86_64-pc-cygwin", x86_64_pc_cygwin),
("x86_64-unknown-linux-gnuasan", x86_64_unknown_linux_gnuasan),
}
/// Cow-Vec-Str: Cow<'static, [Cow<'static, str>]>

View file

@ -5,7 +5,7 @@ use crate::spec::{
pub(crate) fn target() -> Target {
Target {
arch: Arch::AmdGpu,
data_layout: "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9".into(),
data_layout: "e-m:e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9".into(),
llvm_target: "amdgcn-amd-amdhsa".into(),
metadata: TargetMetadata {
description: Some("AMD GPU".into()),

View file

@ -11,7 +11,7 @@ pub(crate) fn target() -> Target {
Target {
llvm_target: "i586-unknown-redox".into(),
metadata: TargetMetadata { description: None, tier: None, host_tools: None, std: None },
metadata: TargetMetadata { description: None, tier: Some(3), host_tools: None, std: None },
pointer_width: 32,
data_layout:
"e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128"

View file

@ -22,7 +22,7 @@ pub(crate) fn target() -> Target {
std: Some(true),
},
pointer_width: 64,
data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64".into(),
data_layout: "E-S64-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64".into(),
arch: Arch::S390x,
options: base,
}

View file

@ -23,7 +23,7 @@ pub(crate) fn target() -> Target {
std: Some(true),
},
pointer_width: 64,
data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64".into(),
data_layout: "E-S64-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64".into(),
arch: Arch::S390x,
options: base,
}

View file

@ -0,0 +1,16 @@
use crate::spec::{SanitizerSet, Target, TargetMetadata};
pub(crate) fn target() -> Target {
let mut base = super::x86_64_unknown_linux_gnu::target();
base.metadata = TargetMetadata {
description: Some(
"64-bit Linux (kernel 3.2+, glibc 2.17+) with ASAN enabled by default".into(),
),
tier: Some(2),
host_tools: Some(false),
std: Some(true),
};
base.supported_sanitizers = SanitizerSet::ADDRESS;
base.default_sanitizers = SanitizerSet::ADDRESS;
base
}

View file

@ -15,7 +15,7 @@ pub(crate) fn target() -> Target {
llvm_target: "x86_64-unknown-linux-none".into(),
metadata: TargetMetadata {
description: None,
tier: None,
tier: Some(3),
host_tools: None,
std: Some(false),
},

View file

@ -9,7 +9,7 @@ pub(crate) fn target() -> Target {
pointer_width: 32,
data_layout: "e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32".into(),
arch: Arch::Xtensa,
metadata: TargetMetadata { description: None, tier: None, host_tools: None, std: None },
metadata: TargetMetadata { description: None, tier: Some(3), host_tools: None, std: None },
options: TargetOptions {
endian: Endian::Little,

View file

@ -9,7 +9,7 @@ pub(crate) fn target() -> Target {
pointer_width: 32,
data_layout: "e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32".into(),
arch: Arch::Xtensa,
metadata: TargetMetadata { description: None, tier: None, host_tools: None, std: None },
metadata: TargetMetadata { description: None, tier: Some(3), host_tools: None, std: None },
options: TargetOptions {
endian: Endian::Little,

View file

@ -9,7 +9,7 @@ pub(crate) fn target() -> Target {
pointer_width: 32,
data_layout: "e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32".into(),
arch: Arch::Xtensa,
metadata: TargetMetadata { description: None, tier: None, host_tools: None, std: None },
metadata: TargetMetadata { description: None, tier: Some(3), host_tools: None, std: None },
options: TargetOptions {
endian: Endian::Little,

View file

@ -1442,6 +1442,31 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
self.can_eq(param_env, goal.trait_ref, trait_assumption.trait_ref)
}
fn can_match_host_effect(
&self,
param_env: ty::ParamEnv<'tcx>,
goal: ty::HostEffectPredicate<'tcx>,
assumption: ty::Binder<'tcx, ty::HostEffectPredicate<'tcx>>,
) -> bool {
let assumption = self.instantiate_binder_with_fresh_vars(
DUMMY_SP,
infer::BoundRegionConversionTime::HigherRankedType,
assumption,
);
assumption.constness.satisfies(goal.constness)
&& self.can_eq(param_env, goal.trait_ref, assumption.trait_ref)
}
fn as_host_effect_clause(
predicate: ty::Predicate<'tcx>,
) -> Option<ty::Binder<'tcx, ty::HostEffectPredicate<'tcx>>> {
predicate.as_clause().and_then(|clause| match clause.kind().skip_binder() {
ty::ClauseKind::HostEffect(pred) => Some(clause.kind().rebind(pred)),
_ => None,
})
}
fn can_match_projection(
&self,
param_env: ty::ParamEnv<'tcx>,
@ -1484,6 +1509,12 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
.filter_map(|implied| implied.as_trait_clause())
.any(|implied| self.can_match_trait(param_env, error, implied))
})
} else if let Some(error) = Self::as_host_effect_clause(error.predicate) {
self.enter_forall(error, |error| {
elaborate(self.tcx, std::iter::once(cond.predicate))
.filter_map(Self::as_host_effect_clause)
.any(|implied| self.can_match_host_effect(param_env, error, implied))
})
} else if let Some(error) = error.predicate.as_projection_clause() {
self.enter_forall(error, |error| {
elaborate(self.tcx, std::iter::once(cond.predicate))

View file

@ -93,7 +93,10 @@ fn dyn_compatibility_violations_for_trait(
// We don't want to include the requirement from `Sized` itself to be `Sized` in the list.
let spans = get_sized_bounds(tcx, trait_def_id);
violations.push(DynCompatibilityViolation::SizedSelf(spans));
} else if let Some(span) = tcx.trait_def(trait_def_id).force_dyn_incompatible {
violations.push(DynCompatibilityViolation::ExplicitlyDynIncompatible([span].into()));
}
let spans = predicates_reference_self(tcx, trait_def_id, false);
if !spans.is_empty() {
violations.push(DynCompatibilityViolation::SupertraitSelf(spans));

View file

@ -799,10 +799,6 @@ fn assemble_candidates_from_object_ty<'cx, 'tcx>(
let tcx = selcx.tcx();
if !tcx.trait_def(obligation.predicate.trait_def_id(tcx)).implement_via_object {
return;
}
let self_ty = obligation.predicate.self_ty();
let object_ty = selcx.infcx.shallow_resolve(self_ty);
let data = match object_ty.kind() {

View file

@ -904,7 +904,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
"assemble_candidates_from_object_ty",
);
if !self.tcx().trait_def(obligation.predicate.def_id()).implement_via_object {
if self.tcx().is_sizedness_trait(obligation.predicate.def_id()) {
// `dyn MetaSized` is valid, but should get its `MetaSized` impl from
// being `dyn` (SizedCandidate), not from the object candidate.
return;
}

View file

@ -378,8 +378,6 @@ pub trait Interner:
fn trait_is_fundamental(self, def_id: Self::TraitId) -> bool;
fn trait_may_be_implemented_via_object(self, trait_def_id: Self::TraitId) -> bool;
/// Returns `true` if this is an `unsafe trait`.
fn trait_is_unsafe(self, trait_def_id: Self::TraitId) -> bool;

View file

@ -5,7 +5,7 @@
#[cfg(not(target_arch = "xtensa"))]
use crate::ffi::c_void;
use crate::fmt;
use crate::intrinsics::{va_arg, va_copy};
use crate::intrinsics::{va_arg, va_copy, va_end};
use crate::marker::PhantomCovariantLifetime;
// There are currently three flavors of how a C `va_list` is implemented for
@ -34,6 +34,10 @@ use crate::marker::PhantomCovariantLifetime;
//
// The Clang `BuiltinVaListKind` enumerates the `va_list` variations that Clang supports,
// and we mirror these here.
//
// For all current LLVM targets, `va_copy` lowers to `memcpy`. Hence the inner structs below all
// derive `Copy`. However, in the future we might want to support a target where `va_copy`
// allocates, or otherwise violates the requirements of `Copy`. Therefore `VaList` is only `Clone`.
crate::cfg_select! {
all(
target_arch = "aarch64",
@ -45,10 +49,12 @@ crate::cfg_select! {
///
/// See the [AArch64 Procedure Call Standard] for more details.
///
/// `va_copy` is `memcpy`: <https://github.com/llvm/llvm-project/blob/5aee01a3df011e660f26660bc30a8c94a1651d8e/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp#L12682-L12700>
///
/// [AArch64 Procedure Call Standard]:
/// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
struct VaListInner {
stack: *const c_void,
gr_top: *const c_void,
@ -62,11 +68,13 @@ crate::cfg_select! {
///
/// See the [LLVM source] and [GCC header] for more details.
///
/// `va_copy` is `memcpy`: <https://github.com/llvm/llvm-project/blob/5aee01a3df011e660f26660bc30a8c94a1651d8e/llvm/lib/Target/PowerPC/PPCISelLowering.cpp#L3755-L3764>
///
/// [LLVM source]:
/// https://github.com/llvm/llvm-project/blob/af9a4263a1a209953a1d339ef781a954e31268ff/llvm/lib/Target/PowerPC/PPCISelLowering.cpp#L4089-L4111
/// [GCC header]: https://web.mit.edu/darwin/src/modules/gcc/gcc/ginclude/va-ppc.h
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
#[rustc_pass_indirectly_in_non_rustic_abis]
struct VaListInner {
gpr: u8,
@ -81,10 +89,12 @@ crate::cfg_select! {
///
/// See the [S/390x ELF Application Binary Interface Supplement] for more details.
///
/// `va_copy` is `memcpy`: <https://github.com/llvm/llvm-project/blob/5aee01a3df011e660f26660bc30a8c94a1651d8e/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp#L4457-L4472>
///
/// [S/390x ELF Application Binary Interface Supplement]:
/// https://docs.google.com/gview?embedded=true&url=https://github.com/IBM/s390x-abi/releases/download/v1.7/lzsabi_s390x.pdf
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
#[rustc_pass_indirectly_in_non_rustic_abis]
struct VaListInner {
gpr: i64,
@ -98,10 +108,13 @@ crate::cfg_select! {
///
/// See the [System V AMD64 ABI] for more details.
///
/// `va_copy` is `memcpy`: <https://github.com/llvm/llvm-project/blob/5aee01a3df011e660f26660bc30a8c94a1651d8e/llvm/lib/Target/X86/X86ISelLowering.cpp#26319>
/// (github won't render that file, look for `SDValue LowerVACOPY`)
///
/// [System V AMD64 ABI]:
/// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
#[rustc_pass_indirectly_in_non_rustic_abis]
struct VaListInner {
gp_offset: i32,
@ -115,10 +128,12 @@ crate::cfg_select! {
///
/// See the [LLVM source] for more details.
///
/// `va_copy` is `memcpy`: <https://github.com/llvm/llvm-project/blob/5aee01a3df011e660f26660bc30a8c94a1651d8e/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp#L1260>
///
/// [LLVM source]:
/// https://github.com/llvm/llvm-project/blob/af9a4263a1a209953a1d339ef781a954e31268ff/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp#L1211-L1215
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
#[rustc_pass_indirectly_in_non_rustic_abis]
struct VaListInner {
stk: *const i32,
@ -132,10 +147,12 @@ crate::cfg_select! {
///
/// See the [LLVM source] for more details. On bare metal Hexagon uses an opaque pointer.
///
/// `va_copy` is `memcpy`: <https://github.com/llvm/llvm-project/blob/5aee01a3df011e660f26660bc30a8c94a1651d8e/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp#L1087-L1102>
///
/// [LLVM source]:
/// https://github.com/llvm/llvm-project/blob/0cdc1b6dd4a870fc41d4b15ad97e0001882aba58/clang/lib/CodeGen/Targets/Hexagon.cpp#L407-L417
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
#[rustc_pass_indirectly_in_non_rustic_abis]
struct VaListInner {
__current_saved_reg_area_pointer: *const c_void,
@ -156,8 +173,10 @@ crate::cfg_select! {
// That pointer is probably just the next variadic argument on the caller's stack.
_ => {
/// Basic implementation of a `va_list`.
///
/// `va_copy` is `memcpy`: <https://github.com/llvm/llvm-project/blob/87e8e7d8f0db53060ef2f6ef4ab612fc0f2b4490/llvm/lib/Transforms/IPO/ExpandVariadics.cpp#L127-L129>
#[repr(transparent)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
struct VaListInner {
ptr: *const c_void,
}
@ -179,6 +198,31 @@ impl fmt::Debug for VaList<'_> {
}
}
impl VaList<'_> {
// Helper used in the implementation of the `va_copy` intrinsic.
pub(crate) fn duplicate(&self) -> Self {
Self { inner: self.inner.clone(), _marker: self._marker }
}
}
impl Clone for VaList<'_> {
#[inline]
fn clone(&self) -> Self {
// We only implement Clone and not Copy because some future target might not be able to
// implement Copy (e.g. because it allocates). For the same reason we use an intrinsic
// to do the copying: the fact that on all current targets, this is just `memcpy`, is an implementation
// detail. The intrinsic lets Miri catch UB from code incorrectly relying on that implementation detail.
va_copy(self)
}
}
impl<'f> Drop for VaList<'f> {
fn drop(&mut self) {
// SAFETY: this variable argument list is being dropped, so won't be read from again.
unsafe { va_end(self) }
}
}
mod sealed {
pub trait Sealed {}
@ -253,26 +297,6 @@ impl<'f> VaList<'f> {
}
}
impl<'f> Clone for VaList<'f> {
#[inline]
fn clone(&self) -> Self {
let mut dest = crate::mem::MaybeUninit::uninit();
// SAFETY: we write to the `MaybeUninit`, thus it is initialized and `assume_init` is legal.
unsafe {
va_copy(dest.as_mut_ptr(), self);
dest.assume_init()
}
}
}
impl<'f> Drop for VaList<'f> {
fn drop(&mut self) {
// Rust requires that not calling `va_end` on a `va_list` does not cause undefined behaviour
// (as it is safe to leak values). As `va_end` is a no-op on all current LLVM targets, this
// destructor is empty.
}
}
// Checks (via an assert in `compiler/rustc_ty_utils/src/abi.rs`) that the C ABI for the current
// target correctly implements `rustc_pass_indirectly_in_non_rustic_abis`.
const _: () = {

View file

@ -3451,19 +3451,6 @@ pub(crate) const fn miri_promise_symbolic_alignment(ptr: *const (), align: usize
)
}
/// Copies the current location of arglist `src` to the arglist `dst`.
///
/// # Safety
///
/// You must check the following invariants before you call this function:
///
/// - `dest` must be non-null and point to valid, writable memory.
/// - `dest` must not alias `src`.
///
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn va_copy<'f>(dest: *mut VaList<'f>, src: &VaList<'f>);
/// Loads an argument of type `T` from the `va_list` `ap` and increment the
/// argument `ap` points to.
///
@ -3482,7 +3469,28 @@ pub unsafe fn va_copy<'f>(dest: *mut VaList<'f>, src: &VaList<'f>);
#[rustc_nounwind]
pub unsafe fn va_arg<T: VaArgSafe>(ap: &mut VaList<'_>) -> T;
/// Destroy the arglist `ap` after initialization with `va_start` or `va_copy`.
/// Duplicates a variable argument list. The returned list is initially at the same position as
/// the one in `src`, but can be advanced independently.
///
/// Codegen backends should not have custom behavior for this intrinsic, they should always use
/// this fallback implementation. This intrinsic *does not* map to the LLVM `va_copy` intrinsic.
///
/// This intrinsic exists only as a hook for Miri and constant evaluation, and is used to detect UB
/// when a variable argument list is used incorrectly.
#[rustc_intrinsic]
#[rustc_nounwind]
pub fn va_copy<'f>(src: &VaList<'f>) -> VaList<'f> {
src.duplicate()
}
/// Destroy the variable argument list `ap` after initialization with `va_start` (part of the
/// desugaring of `...`) or `va_copy`.
///
/// Code generation backends should not provide a custom implementation for this intrinsic. This
/// intrinsic *does not* map to the LLVM `va_end` intrinsic.
///
/// This function is a no-op on all current targets, but used as a hook for const evaluation to
/// detect UB when a variable argument list is used incorrectly.
///
/// # Safety
///
@ -3490,4 +3498,6 @@ pub unsafe fn va_arg<T: VaArgSafe>(ap: &mut VaList<'_>) -> T;
///
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn va_end(ap: &mut VaList<'_>);
pub unsafe fn va_end(ap: &mut VaList<'_>) {
/* deliberately does nothing */
}

View file

@ -153,7 +153,7 @@ unsafe impl<T: Sync + PointeeSized> Send for &T {}
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
// `Sized` being coinductive, despite having supertraits, is okay as there are no user-written impls,
// and we know that the supertraits are always implemented if the subtrait is just by looking at
// the builtin impls.
@ -172,7 +172,6 @@ pub trait Sized: MetaSized {
#[fundamental]
#[rustc_specialization_trait]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
// `MetaSized` being coinductive, despite having supertraits, is okay for the same reasons as
// `Sized` above.
#[rustc_coinductive]
@ -190,7 +189,6 @@ pub trait MetaSized: PointeeSized {
#[fundamental]
#[rustc_specialization_trait]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_coinductive]
pub trait PointeeSized {
// Empty
@ -236,7 +234,7 @@ pub trait PointeeSized {
#[unstable(feature = "unsize", issue = "18598")]
#[lang = "unsize"]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
pub trait Unsize<T: PointeeSized>: PointeeSized {
// Empty.
}
@ -455,9 +453,6 @@ marker_impls! {
/// [impls]: #implementors
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "copy"]
// This is unsound, but required by `hashbrown`
// FIXME(joboet): change `hashbrown` to use `TrivialClone`
#[rustc_unsafe_specialization_marker]
#[rustc_diagnostic_item = "Copy"]
pub trait Copy: Clone {
// Empty.
@ -512,7 +507,7 @@ impl<T: PointeeSized> Copy for &T {}
#[unstable(feature = "bikeshed_guaranteed_no_drop", issue = "none")]
#[lang = "bikeshed_guaranteed_no_drop"]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
#[doc(hidden)]
pub trait BikeshedGuaranteedNoDrop {}
@ -887,7 +882,7 @@ impl<T: PointeeSized> StructuralPartialEq for PhantomData<T> {}
)]
#[lang = "discriminant_kind"]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
pub trait DiscriminantKind {
/// The type of the discriminant, which must satisfy the trait
/// bounds required by `mem::Discriminant`.
@ -1057,7 +1052,7 @@ marker_impls! {
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
pub const trait Destruct: PointeeSized {}
/// A marker for tuple types.
@ -1068,7 +1063,7 @@ pub const trait Destruct: PointeeSized {}
#[lang = "tuple_trait"]
#[diagnostic::on_unimplemented(message = "`{Self}` is not a tuple")]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
pub trait Tuple {}
/// A marker for types which can be used as types of `const` generic parameters.
@ -1126,7 +1121,7 @@ marker_impls! {
)]
#[lang = "fn_ptr_trait"]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
pub trait FnPtr: Copy + Clone {
/// Returns the address of the function pointer.
#[lang = "fn_ptr_addr"]

View file

@ -86,7 +86,7 @@ use crate::marker::ConstParamTy_;
#[unstable_feature_bound(transmutability)]
#[lang = "transmute_trait"]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
#[rustc_coinductive]
pub unsafe trait TransmuteFrom<Src, const ASSUME: Assume = { Assume::NOTHING }>
where

View file

@ -55,7 +55,7 @@ use crate::ptr::NonNull;
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
#[rustc_deny_explicit_impl]
#[rustc_do_not_implement_via_object]
#[rustc_dyn_incompatible_trait]
pub trait Pointee: PointeeSized {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]

View file

@ -126,7 +126,7 @@ pub fn error_string(mut errnum: i32) -> String {
match String::from_utf16(&buf[..res]) {
Ok(mut msg) => {
// Trim trailing CRLF inserted by FormatMessageW
let len = msg.trim_end().len();
let len = msg.trim_ascii_end().len();
msg.truncate(len);
msg
}

View file

@ -818,7 +818,7 @@ impl From<u8> for ExitCode {
impl From<u32> for ExitCode {
fn from(code: u32) -> Self {
ExitCode(u32::from(code))
ExitCode(code)
}
}

View file

@ -53,6 +53,11 @@ check-aux:
src/tools/cargotest \
src/tools/test-float-parse \
$(BOOTSTRAP_ARGS)
# The build-std suite is off by default because it is uncommonly slow
# and memory-hungry.
$(Q)$(BOOTSTRAP) test --stage 2 \
build-std \
$(BOOTSTRAP_ARGS)
# Run standard library tests in Miri.
$(Q)MIRIFLAGS="-Zmiri-strict-provenance" \
$(BOOTSTRAP) miri --stage 2 \

View file

@ -2292,23 +2292,13 @@ impl Step for Assemble {
builder.compiler(target_compiler.stage - 1, builder.config.host_target);
// Build enzyme
if builder.config.llvm_enzyme && !builder.config.dry_run() {
if builder.config.llvm_enzyme {
debug!("`llvm_enzyme` requested");
let enzyme_install = builder.ensure(llvm::Enzyme { target: build_compiler.host });
if let Some(llvm_config) = builder.llvm_config(builder.config.host_target) {
let llvm_version_major = llvm::get_llvm_version_major(builder, &llvm_config);
let lib_ext = std::env::consts::DLL_EXTENSION;
let libenzyme = format!("libEnzyme-{llvm_version_major}");
let src_lib =
enzyme_install.join("build/Enzyme").join(&libenzyme).with_extension(lib_ext);
let libdir = builder.sysroot_target_libdir(build_compiler, build_compiler.host);
let target_libdir =
builder.sysroot_target_libdir(target_compiler, target_compiler.host);
let dst_lib = libdir.join(&libenzyme).with_extension(lib_ext);
let target_dst_lib = target_libdir.join(&libenzyme).with_extension(lib_ext);
builder.copy_link(&src_lib, &dst_lib, FileType::NativeLibrary);
builder.copy_link(&src_lib, &target_dst_lib, FileType::NativeLibrary);
}
let enzyme = builder.ensure(llvm::Enzyme { target: build_compiler.host });
let target_libdir =
builder.sysroot_target_libdir(target_compiler, target_compiler.host);
let target_dst_lib = target_libdir.join(enzyme.enzyme_filename());
builder.copy_link(&enzyme.enzyme_path(), &target_dst_lib, FileType::NativeLibrary);
}
if builder.config.llvm_offload && !builder.config.dry_run() {

Some files were not shown because too many files have changed in this diff Show more