Merge from rustc

This commit is contained in:
The Miri Cronjob Bot 2025-07-10 05:05:40 +00:00
commit 1f721c651e
521 changed files with 8467 additions and 2987 deletions

View file

@ -569,22 +569,16 @@ dependencies = [
"color-print",
"declare_clippy_lint",
"filetime",
"futures",
"if_chain",
"itertools",
"parking_lot",
"pulldown-cmark",
"quote",
"regex",
"rustc_tools_util 0.4.2",
"serde",
"serde_json",
"syn 2.0.104",
"tempfile",
"termize 0.1.1",
"tokio",
"termize",
"toml 0.7.8",
"ui_test",
"ui_test 0.30.2",
"walkdir",
]
@ -642,6 +636,21 @@ dependencies = [
"rustc-semver",
]
[[package]]
name = "clippy_test_deps"
version = "0.1.0"
dependencies = [
"futures",
"if_chain",
"itertools",
"parking_lot",
"quote",
"regex",
"serde",
"syn 2.0.104",
"tokio",
]
[[package]]
name = "clippy_utils"
version = "0.1.90"
@ -2409,7 +2418,7 @@ dependencies = [
"smallvec",
"tempfile",
"tikv-jemalloc-sys",
"ui_test",
"ui_test 0.29.2",
]
[[package]]
@ -3214,7 +3223,7 @@ dependencies = [
[[package]]
name = "run_make_support"
version = "0.2.0"
version = "0.0.0"
dependencies = [
"bstr",
"build_helper",
@ -3773,7 +3782,7 @@ dependencies = [
"serde",
"serde_json",
"termcolor",
"termize 0.2.0",
"termize",
"tracing",
"windows 0.61.3",
]
@ -4536,7 +4545,7 @@ dependencies = [
"rustc_serialize",
"rustc_span",
"rustc_target",
"termize 0.2.0",
"termize",
"tracing",
"windows 0.61.3",
]
@ -5067,6 +5076,17 @@ dependencies = [
"color-eyre",
]
[[package]]
name = "spanned"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c92d4b0c055fde758f086eb4a6e73410247df8a3837fd606d2caeeaf72aa566d"
dependencies = [
"anyhow",
"bstr",
"color-eyre",
]
[[package]]
name = "spdx-expression"
version = "0.5.2"
@ -5305,16 +5325,6 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "termize"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1706be6b564323ce7092f5f7e6b118a14c8ef7ed0e69c8c5329c914a9f101295"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "termize"
version = "0.2.0"
@ -5726,7 +5736,33 @@ dependencies = [
"rustfix",
"serde",
"serde_json",
"spanned",
"spanned 0.3.0",
]
[[package]]
name = "ui_test"
version = "0.30.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b56a6897cc4bb6f8daf1939b0b39cd9645856997f46f4d0b3e3cb7122dfe9251"
dependencies = [
"annotate-snippets 0.11.5",
"anyhow",
"bstr",
"cargo-platform",
"cargo_metadata 0.18.1",
"color-eyre",
"colored",
"comma",
"crossbeam-channel",
"indicatif",
"levenshtein",
"prettydiff",
"regex",
"rustc_version",
"rustfix",
"serde",
"serde_json",
"spanned 0.4.1",
]
[[package]]

View file

@ -13,6 +13,7 @@ members = [
"src/tools/cargotest",
"src/tools/clippy",
"src/tools/clippy/clippy_dev",
"src/tools/clippy/clippy_test_deps",
"src/tools/collect-license-metadata",
"src/tools/compiletest",
"src/tools/coverage-dump",

View file

@ -240,6 +240,9 @@ pub enum AttributeKind {
/// Represents [`#[doc]`](https://doc.rust-lang.org/stable/rustdoc/write-documentation/the-doc-attribute.html).
DocComment { style: AttrStyle, kind: CommentKind, span: Span, comment: Symbol },
/// Represents `#[rustc_dummy]`.
Dummy,
/// Represents [`#[export_name]`](https://doc.rust-lang.org/reference/abi.html#the-export_name-attribute).
ExportName {
/// The name to export this item with.
@ -248,6 +251,15 @@ pub enum AttributeKind {
span: Span,
},
/// Represents `#[export_stable]`.
ExportStable,
/// Represents `#[ffi_const]`.
FfiConst(Span),
/// Represents `#[ffi_pure]`.
FfiPure(Span),
/// Represents `#[ignore]`
Ignore {
span: Span,
@ -326,6 +338,9 @@ pub enum AttributeKind {
span: Span,
},
/// Represents `#[rustc_std_internal_symbol]`.
StdInternalSymbol(Span),
/// Represents `#[target_feature(enable = "...")]`
TargetFeature(ThinVec<(Symbol, Span)>, Span),

View file

@ -25,7 +25,11 @@ impl AttributeKind {
ConstStabilityIndirect => No,
Deprecation { .. } => Yes,
DocComment { .. } => Yes,
Dummy => No,
ExportName { .. } => Yes,
ExportStable => No,
FfiConst(..) => No,
FfiPure(..) => No,
Ignore { .. } => No,
Inline(..) => No,
LinkName { .. } => Yes,
@ -48,6 +52,7 @@ impl AttributeKind {
RustcObjectLifetimeDefault => No,
SkipDuringMethodDispatch { .. } => No,
Stability { .. } => Yes,
StdInternalSymbol(..) => No,
TargetFeature(..) => No,
TrackCaller(..) => Yes,
Used { .. } => No,

View file

@ -146,12 +146,12 @@ attr_parsing_unused_duplicate =
unused attribute
.suggestion = remove this attribute
.note = attribute also specified here
.warn = {-passes_previously_accepted}
.warn = {-attr_parsing_previously_accepted}
attr_parsing_unused_multiple =
multiple `{$name}` attributes
.suggestion = remove this attribute
.note = attribute also specified here
-attr_parsing_perviously_accepted =
-attr_parsing_previously_accepted =
this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!

View file

@ -15,7 +15,7 @@ pub(crate) struct OptimizeParser;
impl<S: Stage> SingleAttributeParser<S> for OptimizeParser {
const PATH: &[Symbol] = &[sym::optimize];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(List: "size|speed|none");
@ -56,7 +56,7 @@ pub(crate) struct ExportNameParser;
impl<S: Stage> SingleAttributeParser<S> for ExportNameParser {
const PATH: &[rustc_span::Symbol] = &[sym::export_name];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");

View file

@ -36,7 +36,7 @@ fn get<S: Stage>(
impl<S: Stage> SingleAttributeParser<S> for DeprecationParser {
const PATH: &[Symbol] = &[sym::deprecated];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate = template!(
Word,

View file

@ -0,0 +1,19 @@
use rustc_attr_data_structures::AttributeKind;
use rustc_feature::{AttributeTemplate, template};
use rustc_span::{Symbol, sym};
use crate::attributes::{AttributeOrder, OnDuplicate, SingleAttributeParser};
use crate::context::{AcceptContext, Stage};
use crate::parser::ArgParser;
pub(crate) struct DummyParser;
impl<S: Stage> SingleAttributeParser<S> for DummyParser {
const PATH: &[Symbol] = &[sym::rustc_dummy];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Ignore;
const TEMPLATE: AttributeTemplate = template!(Word); // Anything, really
fn convert(_: &mut AcceptContext<'_, '_, S>, _: &ArgParser<'_>) -> Option<AttributeKind> {
Some(AttributeKind::Dummy)
}
}

View file

@ -16,7 +16,7 @@ pub(crate) struct InlineParser;
impl<S: Stage> SingleAttributeParser<S> for InlineParser {
const PATH: &'static [Symbol] = &[sym::inline];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(Word, List: "always|never");
@ -57,7 +57,7 @@ pub(crate) struct RustcForceInlineParser;
impl<S: Stage> SingleAttributeParser<S> for RustcForceInlineParser {
const PATH: &'static [Symbol] = &[sym::rustc_force_inline];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(Word, List: "reason", NameValueStr: "reason");

View file

@ -1,9 +1,11 @@
use rustc_attr_data_structures::AttributeKind;
use rustc_attr_data_structures::AttributeKind::{LinkName, LinkSection};
use rustc_feature::{AttributeTemplate, template};
use rustc_span::{Symbol, sym};
use rustc_span::{Span, Symbol, sym};
use crate::attributes::{AttributeOrder, OnDuplicate, SingleAttributeParser};
use crate::attributes::{
AttributeOrder, NoArgsAttributeParser, OnDuplicate, SingleAttributeParser,
};
use crate::context::{AcceptContext, Stage};
use crate::parser::ArgParser;
use crate::session_diagnostics::NullOnLinkSection;
@ -12,7 +14,7 @@ pub(crate) struct LinkNameParser;
impl<S: Stage> SingleAttributeParser<S> for LinkNameParser {
const PATH: &[Symbol] = &[sym::link_name];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");
@ -34,7 +36,7 @@ pub(crate) struct LinkSectionParser;
impl<S: Stage> SingleAttributeParser<S> for LinkSectionParser {
const PATH: &[Symbol] = &[sym::link_section];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");
@ -57,3 +59,31 @@ impl<S: Stage> SingleAttributeParser<S> for LinkSectionParser {
Some(LinkSection { name, span: cx.attr_span })
}
}
pub(crate) struct ExportStableParser;
impl<S: Stage> NoArgsAttributeParser<S> for ExportStableParser {
const PATH: &[Symbol] = &[sym::export_stable];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::ExportStable;
}
pub(crate) struct FfiConstParser;
impl<S: Stage> NoArgsAttributeParser<S> for FfiConstParser {
const PATH: &[Symbol] = &[sym::ffi_const];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::FfiConst;
}
pub(crate) struct FfiPureParser;
impl<S: Stage> NoArgsAttributeParser<S> for FfiPureParser {
const PATH: &[Symbol] = &[sym::ffi_pure];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::FfiPure;
}
pub(crate) struct StdInternalSymbolParser;
impl<S: Stage> NoArgsAttributeParser<S> for StdInternalSymbolParser {
const PATH: &[Symbol] = &[sym::rustc_std_internal_symbol];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::StdInternalSymbol;
}

View file

@ -30,6 +30,7 @@ pub(crate) mod cfg;
pub(crate) mod codegen_attrs;
pub(crate) mod confusables;
pub(crate) mod deprecation;
pub(crate) mod dummy;
pub(crate) mod inline;
pub(crate) mod link_attrs;
pub(crate) mod lint_helpers;
@ -139,7 +140,7 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S>
if let Some(pa) = T::convert(cx, args) {
match T::ATTRIBUTE_ORDER {
// keep the first and report immediately. ignore this attribute
AttributeOrder::KeepFirst => {
AttributeOrder::KeepInnermost => {
if let Some((_, unused)) = group.1 {
T::ON_DUPLICATE.exec::<T>(cx, cx.attr_span, unused);
return;
@ -147,7 +148,7 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S>
}
// keep the new one and warn about the previous,
// then replace
AttributeOrder::KeepLast => {
AttributeOrder::KeepOutermost => {
if let Some((_, used)) = group.1 {
T::ON_DUPLICATE.exec::<T>(cx, used, cx.attr_span);
}
@ -164,9 +165,6 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S>
}
}
// FIXME(jdonszelmann): logic is implemented but the attribute parsers needing
// them will be merged in another PR
#[allow(unused)]
pub(crate) enum OnDuplicate<S: Stage> {
/// Give a default warning
Warn,
@ -212,25 +210,29 @@ impl<S: Stage> OnDuplicate<S> {
}
}
}
//
// FIXME(jdonszelmann): logic is implemented but the attribute parsers needing
// them will be merged in another PR
#[allow(unused)]
pub(crate) enum AttributeOrder {
/// Duplicates after the first attribute will be an error.
///
/// This should be used where duplicates would be ignored, but carry extra
/// meaning that could cause confusion. For example, `#[stable(since="1.0")]
/// #[stable(since="2.0")]`, which version should be used for `stable`?
KeepFirst,
/// Duplicates preceding the last instance of the attribute will be a
/// warning, with a note that this will be an error in the future.
pub(crate) enum AttributeOrder {
/// Duplicates after the innermost instance of the attribute will be an error/warning.
/// Only keep the lowest attribute.
///
/// This is the same as `FutureWarnFollowing`, except the last attribute is
/// the one that is "used". Ideally these can eventually migrate to
/// `ErrorPreceding`.
KeepLast,
/// Attributes are processed from bottom to top, so this raises a warning/error on all the attributes
/// further above the lowest one:
/// ```
/// #[stable(since="1.0")] //~ WARNING duplicated attribute
/// #[stable(since="2.0")]
/// ```
KeepInnermost,
/// Duplicates before the outermost instance of the attribute will be an error/warning.
/// Only keep the highest attribute.
///
/// Attributes are processed from bottom to top, so this raises a warning/error on all the attributes
/// below the highest one:
/// ```
/// #[path="foo.rs"]
/// #[path="bar.rs"] //~ WARNING duplicated attribute
/// ```
KeepOutermost,
}
/// An even simpler version of [`SingleAttributeParser`]:
@ -256,7 +258,7 @@ impl<T: NoArgsAttributeParser<S>, S: Stage> Default for WithoutArgs<T, S> {
impl<T: NoArgsAttributeParser<S>, S: Stage> SingleAttributeParser<S> for WithoutArgs<T, S> {
const PATH: &[Symbol] = T::PATH;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = T::ON_DUPLICATE;
const TEMPLATE: AttributeTemplate = template!(Word);

View file

@ -12,7 +12,7 @@ pub(crate) struct MustUseParser;
impl<S: Stage> SingleAttributeParser<S> for MustUseParser {
const PATH: &[Symbol] = &[sym::must_use];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(Word, NameValueStr: "reason");

View file

@ -10,7 +10,7 @@ pub(crate) struct PathParser;
impl<S: Stage> SingleAttributeParser<S> for PathParser {
const PATH: &[Symbol] = &[sym::path];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "file");

View file

@ -11,7 +11,7 @@ pub(crate) struct RustcLayoutScalarValidRangeStart;
impl<S: Stage> SingleAttributeParser<S> for RustcLayoutScalarValidRangeStart {
const PATH: &'static [Symbol] = &[sym::rustc_layout_scalar_valid_range_start];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate = template!(List: "start");
@ -25,7 +25,7 @@ pub(crate) struct RustcLayoutScalarValidRangeEnd;
impl<S: Stage> SingleAttributeParser<S> for RustcLayoutScalarValidRangeEnd {
const PATH: &'static [Symbol] = &[sym::rustc_layout_scalar_valid_range_end];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate = template!(List: "end");
@ -62,7 +62,7 @@ pub(crate) struct RustcObjectLifetimeDefaultParser;
impl<S: Stage> SingleAttributeParser<S> for RustcObjectLifetimeDefaultParser {
const PATH: &[rustc_span::Symbol] = &[sym::rustc_object_lifetime_default];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate = template!(Word);

View file

@ -11,7 +11,7 @@ pub(crate) struct IgnoreParser;
impl<S: Stage> SingleAttributeParser<S> for IgnoreParser {
const PATH: &[Symbol] = &[sym::ignore];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const TEMPLATE: AttributeTemplate = template!(Word, NameValueStr: "reason");

View file

@ -12,7 +12,7 @@ pub(crate) struct SkipDuringMethodDispatchParser;
impl<S: Stage> SingleAttributeParser<S> for SkipDuringMethodDispatchParser {
const PATH: &[Symbol] = &[sym::rustc_skip_during_method_dispatch];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate = template!(List: "array, boxed_slice");

View file

@ -14,7 +14,7 @@ pub(crate) struct TransparencyParser;
#[allow(rustc::diagnostic_outside_of_impl)]
impl<S: Stage> SingleAttributeParser<S> for TransparencyParser {
const PATH: &[Symbol] = &[sym::rustc_macro_transparency];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Custom(|cx, used, unused| {
cx.dcx().span_err(vec![used, unused], "multiple macro transparency attributes");
});

View file

@ -21,8 +21,12 @@ use crate::attributes::codegen_attrs::{
};
use crate::attributes::confusables::ConfusablesParser;
use crate::attributes::deprecation::DeprecationParser;
use crate::attributes::dummy::DummyParser;
use crate::attributes::inline::{InlineParser, RustcForceInlineParser};
use crate::attributes::link_attrs::{LinkNameParser, LinkSectionParser};
use crate::attributes::link_attrs::{
ExportStableParser, FfiConstParser, FfiPureParser, LinkNameParser, LinkSectionParser,
StdInternalSymbolParser,
};
use crate::attributes::lint_helpers::{AsPtrParser, PassByValueParser, PubTransparentParser};
use crate::attributes::loop_match::{ConstContinueParser, LoopMatchParser};
use crate::attributes::must_use::MustUseParser;
@ -127,6 +131,7 @@ attribute_parsers!(
// tidy-alphabetical-start
Single<DeprecationParser>,
Single<DummyParser>,
Single<ExportNameParser>,
Single<IgnoreParser>,
Single<InlineParser>,
@ -145,6 +150,9 @@ attribute_parsers!(
Single<WithoutArgs<ColdParser>>,
Single<WithoutArgs<ConstContinueParser>>,
Single<WithoutArgs<ConstStabilityIndirectParser>>,
Single<WithoutArgs<ExportStableParser>>,
Single<WithoutArgs<FfiConstParser>>,
Single<WithoutArgs<FfiPureParser>>,
Single<WithoutArgs<LoopMatchParser>>,
Single<WithoutArgs<MayDangleParser>>,
Single<WithoutArgs<NoImplicitPreludeParser>>,
@ -152,6 +160,7 @@ attribute_parsers!(
Single<WithoutArgs<NonExhaustiveParser>>,
Single<WithoutArgs<PassByValueParser>>,
Single<WithoutArgs<PubTransparentParser>>,
Single<WithoutArgs<StdInternalSymbolParser>>,
Single<WithoutArgs<TrackCallerParser>>,
// tidy-alphabetical-end
];

View file

@ -1,7 +1,9 @@
//! This file provides API for compiler consumers.
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::LocalDefId;
use rustc_index::IndexVec;
use rustc_middle::bug;
use rustc_middle::mir::{Body, Promoted};
use rustc_middle::ty::TyCtxt;
@ -17,7 +19,39 @@ pub use super::polonius::legacy::{
pub use super::region_infer::RegionInferenceContext;
use crate::{BorrowCheckRootCtxt, do_mir_borrowck};
/// Options determining the output behavior of [`get_body_with_borrowck_facts`].
/// Struct used during mir borrowck to collect bodies with facts for a typeck root and all
/// its nested bodies.
pub(crate) struct BorrowckConsumer<'tcx> {
options: ConsumerOptions,
bodies: FxHashMap<LocalDefId, BodyWithBorrowckFacts<'tcx>>,
}
impl<'tcx> BorrowckConsumer<'tcx> {
pub(crate) fn new(options: ConsumerOptions) -> Self {
Self { options, bodies: Default::default() }
}
pub(crate) fn insert_body(&mut self, def_id: LocalDefId, body: BodyWithBorrowckFacts<'tcx>) {
if self.bodies.insert(def_id, body).is_some() {
bug!("unexpected previous body for {def_id:?}");
}
}
/// Should the Polonius input facts be computed?
pub(crate) fn polonius_input(&self) -> bool {
matches!(
self.options,
ConsumerOptions::PoloniusInputFacts | ConsumerOptions::PoloniusOutputFacts
)
}
/// Should we run Polonius and collect the output facts?
pub(crate) fn polonius_output(&self) -> bool {
matches!(self.options, ConsumerOptions::PoloniusOutputFacts)
}
}
/// Options determining the output behavior of [`get_bodies_with_borrowck_facts`].
///
/// If executing under `-Z polonius` the choice here has no effect, and everything as if
/// [`PoloniusOutputFacts`](ConsumerOptions::PoloniusOutputFacts) had been selected
@ -43,17 +77,6 @@ pub enum ConsumerOptions {
PoloniusOutputFacts,
}
impl ConsumerOptions {
/// Should the Polonius input facts be computed?
pub(crate) fn polonius_input(&self) -> bool {
matches!(self, Self::PoloniusInputFacts | Self::PoloniusOutputFacts)
}
/// Should we run Polonius and collect the output facts?
pub(crate) fn polonius_output(&self) -> bool {
matches!(self, Self::PoloniusOutputFacts)
}
}
/// A `Body` with information computed by the borrow checker. This struct is
/// intended to be consumed by compiler consumers.
///
@ -82,25 +105,35 @@ pub struct BodyWithBorrowckFacts<'tcx> {
pub output_facts: Option<Box<PoloniusOutput>>,
}
/// This function computes borrowck facts for the given body. The [`ConsumerOptions`]
/// determine which facts are returned. This function makes a copy of the body because
/// it needs to regenerate the region identifiers. It should never be invoked during a
/// typical compilation session due to the unnecessary overhead of returning
/// [`BodyWithBorrowckFacts`].
/// This function computes borrowck facts for the given def id and all its nested bodies.
/// It must be called with a typeck root which will then borrowck all nested bodies as well.
/// The [`ConsumerOptions`] determine which facts are returned. This function makes a copy
/// of the bodies because it needs to regenerate the region identifiers. It should never be
/// invoked during a typical compilation session due to the unnecessary overhead of
/// returning [`BodyWithBorrowckFacts`].
///
/// Note:
/// * This function will panic if the required body was already stolen. This
/// * This function will panic if the required bodies were already stolen. This
/// can, for example, happen when requesting a body of a `const` function
/// because they are evaluated during typechecking. The panic can be avoided
/// by overriding the `mir_borrowck` query. You can find a complete example
/// that shows how to do this at `tests/run-make/obtain-borrowck/`.
/// that shows how to do this at `tests/ui-fulldeps/obtain-borrowck.rs`.
///
/// * Polonius is highly unstable, so expect regular changes in its signature or other details.
pub fn get_body_with_borrowck_facts(
pub fn get_bodies_with_borrowck_facts(
tcx: TyCtxt<'_>,
def_id: LocalDefId,
root_def_id: LocalDefId,
options: ConsumerOptions,
) -> BodyWithBorrowckFacts<'_> {
let mut root_cx = BorrowCheckRootCtxt::new(tcx, def_id);
*do_mir_borrowck(&mut root_cx, def_id, Some(options)).1.unwrap()
) -> FxHashMap<LocalDefId, BodyWithBorrowckFacts<'_>> {
let mut root_cx =
BorrowCheckRootCtxt::new(tcx, root_def_id, Some(BorrowckConsumer::new(options)));
// See comment in `rustc_borrowck::mir_borrowck`
let nested_bodies = tcx.nested_bodies_within(root_def_id);
for def_id in nested_bodies {
root_cx.get_or_insert_nested(def_id);
}
do_mir_borrowck(&mut root_cx, root_def_id);
root_cx.consumer.unwrap().bodies
}

View file

@ -51,7 +51,7 @@ use smallvec::SmallVec;
use tracing::{debug, instrument};
use crate::borrow_set::{BorrowData, BorrowSet};
use crate::consumers::{BodyWithBorrowckFacts, ConsumerOptions};
use crate::consumers::BodyWithBorrowckFacts;
use crate::dataflow::{BorrowIndex, Borrowck, BorrowckDomain, Borrows};
use crate::diagnostics::{
AccessKind, BorrowckDiagnosticsBuffer, IllegalMoveOriginKind, MoveError, RegionName,
@ -124,7 +124,7 @@ fn mir_borrowck(
let opaque_types = ConcreteOpaqueTypes(Default::default());
Ok(tcx.arena.alloc(opaque_types))
} else {
let mut root_cx = BorrowCheckRootCtxt::new(tcx, def);
let mut root_cx = BorrowCheckRootCtxt::new(tcx, def, None);
// We need to manually borrowck all nested bodies from the HIR as
// we do not generate MIR for dead code. Not doing so causes us to
// never check closures in dead code.
@ -134,7 +134,7 @@ fn mir_borrowck(
}
let PropagatedBorrowCheckResults { closure_requirements, used_mut_upvars } =
do_mir_borrowck(&mut root_cx, def, None).0;
do_mir_borrowck(&mut root_cx, def);
debug_assert!(closure_requirements.is_none());
debug_assert!(used_mut_upvars.is_empty());
root_cx.finalize()
@ -289,17 +289,12 @@ impl<'tcx> ClosureOutlivesSubjectTy<'tcx> {
/// Perform the actual borrow checking.
///
/// Use `consumer_options: None` for the default behavior of returning
/// [`PropagatedBorrowCheckResults`] only. Otherwise, return [`BodyWithBorrowckFacts`]
/// according to the given [`ConsumerOptions`].
///
/// For nested bodies this should only be called through `root_cx.get_or_insert_nested`.
#[instrument(skip(root_cx), level = "debug")]
fn do_mir_borrowck<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
def: LocalDefId,
consumer_options: Option<ConsumerOptions>,
) -> (PropagatedBorrowCheckResults<'tcx>, Option<Box<BodyWithBorrowckFacts<'tcx>>>) {
) -> PropagatedBorrowCheckResults<'tcx> {
let tcx = root_cx.tcx;
let infcx = BorrowckInferCtxt::new(tcx, def);
let (input_body, promoted) = tcx.mir_promoted(def);
@ -343,7 +338,6 @@ fn do_mir_borrowck<'tcx>(
&location_table,
&move_data,
&borrow_set,
consumer_options,
);
// Dump MIR results into a file, if that is enabled. This lets us
@ -483,23 +477,24 @@ fn do_mir_borrowck<'tcx>(
used_mut_upvars: mbcx.used_mut_upvars,
};
let body_with_facts = if consumer_options.is_some() {
Some(Box::new(BodyWithBorrowckFacts {
body: body_owned,
promoted,
borrow_set,
region_inference_context: regioncx,
location_table: polonius_input.as_ref().map(|_| location_table),
input_facts: polonius_input,
output_facts: polonius_output,
}))
} else {
None
};
if let Some(consumer) = &mut root_cx.consumer {
consumer.insert_body(
def,
BodyWithBorrowckFacts {
body: body_owned,
promoted,
borrow_set,
region_inference_context: regioncx,
location_table: polonius_input.as_ref().map(|_| location_table),
input_facts: polonius_input,
output_facts: polonius_output,
},
);
}
debug!("do_mir_borrowck: result = {:#?}", result);
(result, body_with_facts)
result
}
fn get_flow_results<'a, 'tcx>(

View file

@ -18,7 +18,6 @@ use rustc_span::sym;
use tracing::{debug, instrument};
use crate::borrow_set::BorrowSet;
use crate::consumers::ConsumerOptions;
use crate::diagnostics::RegionErrors;
use crate::handle_placeholders::compute_sccs_applying_placeholder_outlives_constraints;
use crate::polonius::PoloniusDiagnosticsContext;
@ -83,12 +82,11 @@ pub(crate) fn compute_regions<'tcx>(
location_table: &PoloniusLocationTable,
move_data: &MoveData<'tcx>,
borrow_set: &BorrowSet<'tcx>,
consumer_options: Option<ConsumerOptions>,
) -> NllOutput<'tcx> {
let is_polonius_legacy_enabled = infcx.tcx.sess.opts.unstable_opts.polonius.is_legacy_enabled();
let polonius_input = consumer_options.map(|c| c.polonius_input()).unwrap_or_default()
let polonius_input = root_cx.consumer.as_ref().map_or(false, |c| c.polonius_input())
|| is_polonius_legacy_enabled;
let polonius_output = consumer_options.map(|c| c.polonius_output()).unwrap_or_default()
let polonius_output = root_cx.consumer.as_ref().map_or(false, |c| c.polonius_output())
|| is_polonius_legacy_enabled;
let mut polonius_facts =
(polonius_input || PoloniusFacts::enabled(infcx.tcx)).then_some(PoloniusFacts::default());

View file

@ -6,6 +6,7 @@ use rustc_middle::ty::{OpaqueHiddenType, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::ErrorGuaranteed;
use smallvec::SmallVec;
use crate::consumers::BorrowckConsumer;
use crate::{ClosureRegionRequirements, ConcreteOpaqueTypes, PropagatedBorrowCheckResults};
/// The shared context used by both the root as well as all its nested
@ -16,16 +17,24 @@ pub(super) struct BorrowCheckRootCtxt<'tcx> {
concrete_opaque_types: ConcreteOpaqueTypes<'tcx>,
nested_bodies: FxHashMap<LocalDefId, PropagatedBorrowCheckResults<'tcx>>,
tainted_by_errors: Option<ErrorGuaranteed>,
/// This should be `None` during normal compilation. See [`crate::consumers`] for more
/// information on how this is used.
pub(crate) consumer: Option<BorrowckConsumer<'tcx>>,
}
impl<'tcx> BorrowCheckRootCtxt<'tcx> {
pub(super) fn new(tcx: TyCtxt<'tcx>, root_def_id: LocalDefId) -> BorrowCheckRootCtxt<'tcx> {
pub(super) fn new(
tcx: TyCtxt<'tcx>,
root_def_id: LocalDefId,
consumer: Option<BorrowckConsumer<'tcx>>,
) -> BorrowCheckRootCtxt<'tcx> {
BorrowCheckRootCtxt {
tcx,
root_def_id,
concrete_opaque_types: Default::default(),
nested_bodies: Default::default(),
tainted_by_errors: None,
consumer,
}
}
@ -71,7 +80,7 @@ impl<'tcx> BorrowCheckRootCtxt<'tcx> {
self.root_def_id.to_def_id()
);
if !self.nested_bodies.contains_key(&def_id) {
let result = super::do_mir_borrowck(self, def_id, None).0;
let result = super::do_mir_borrowck(self, def_id);
if let Some(prev) = self.nested_bodies.insert(def_id, result) {
bug!("unexpected previous nested body: {prev:?}");
}

View file

@ -175,6 +175,13 @@ pub(crate) fn codegen_const_value<'tcx>(
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
GlobalAlloc::TypeId { .. } => {
return CValue::const_val(
fx,
layout,
ScalarInt::try_from_target_usize(offset.bytes(), fx.tcx).unwrap(),
);
}
GlobalAlloc::Static(def_id) => {
assert!(fx.tcx.is_static(def_id));
let data_id = data_id_for_static(
@ -360,6 +367,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
GlobalAlloc::Memory(alloc) => alloc,
GlobalAlloc::Function { .. }
| GlobalAlloc::Static(_)
| GlobalAlloc::TypeId { .. }
| GlobalAlloc::VTable(..) => {
unreachable!()
}
@ -471,6 +479,11 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
.principal()
.map(|principal| tcx.instantiate_bound_regions_with_erased(principal)),
),
GlobalAlloc::TypeId { .. } => {
// Nothing to do, the bytes/offset of this pointer have already been written together with all other bytes,
// so we just need to drop this provenance.
continue;
}
GlobalAlloc::Static(def_id) => {
if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
{

View file

@ -1,7 +1,6 @@
use gccjit::{LValue, RValue, ToRValue, Type};
use rustc_abi as abi;
use rustc_abi::HasDataLayout;
use rustc_abi::Primitive::Pointer;
use rustc_abi::{self as abi, HasDataLayout};
use rustc_codegen_ssa::traits::{
BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods,
};
@ -282,6 +281,13 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
let init = self.const_data_from_alloc(alloc);
self.static_addr_of(init, alloc.inner().align, None)
}
GlobalAlloc::TypeId { .. } => {
let val = self.const_usize(offset.bytes());
// This is still a variable of pointer type, even though we only use the provenance
// of that pointer in CTFE and Miri. But to make LLVM's type system happy,
// we need an int-to-ptr cast here (it doesn't matter at all which provenance that picks).
return self.context.new_cast(None, val, ty);
}
GlobalAlloc::Static(def_id) => {
assert!(self.tcx.is_static(def_id));
self.get_static(def_id).get_address(None)

View file

@ -3,9 +3,8 @@
use std::borrow::Borrow;
use libc::{c_char, c_uint};
use rustc_abi as abi;
use rustc_abi::HasDataLayout;
use rustc_abi::Primitive::Pointer;
use rustc_abi::{self as abi, HasDataLayout as _};
use rustc_ast::Mutability;
use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::traits::*;
@ -284,7 +283,8 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
self.const_bitcast(llval, llty)
};
} else {
let init = const_alloc_to_llvm(self, alloc, /*static*/ false);
let init =
const_alloc_to_llvm(self, alloc.inner(), /*static*/ false);
let alloc = alloc.inner();
let value = match alloc.mutability {
Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
@ -316,15 +316,19 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
}),
)))
.unwrap_memory();
let init = const_alloc_to_llvm(self, alloc, /*static*/ false);
let value = self.static_addr_of_impl(init, alloc.inner().align, None);
value
let init = const_alloc_to_llvm(self, alloc.inner(), /*static*/ false);
self.static_addr_of_impl(init, alloc.inner().align, None)
}
GlobalAlloc::Static(def_id) => {
assert!(self.tcx.is_static(def_id));
assert!(!self.tcx.is_thread_local_static(def_id));
self.get_static(def_id)
}
GlobalAlloc::TypeId { .. } => {
// Drop the provenance, the offset contains the bytes of the hash
let llval = self.const_usize(offset.bytes());
return unsafe { llvm::LLVMConstIntToPtr(llval, llty) };
}
};
let base_addr_space = global_alloc.address_space(self);
let llval = unsafe {
@ -346,7 +350,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
}
fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value {
const_alloc_to_llvm(self, alloc, /*static*/ false)
const_alloc_to_llvm(self, alloc.inner(), /*static*/ false)
}
fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {

View file

@ -27,10 +27,9 @@ use crate::{base, debuginfo};
pub(crate) fn const_alloc_to_llvm<'ll>(
cx: &CodegenCx<'ll, '_>,
alloc: ConstAllocation<'_>,
alloc: &Allocation,
is_static: bool,
) -> &'ll Value {
let alloc = alloc.inner();
// We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or
// integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be
// producing empty LLVM allocations as they're just adding noise to binaries and forcing less
@ -141,7 +140,7 @@ fn codegen_static_initializer<'ll, 'tcx>(
def_id: DefId,
) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> {
let alloc = cx.tcx.eval_static_initializer(def_id)?;
Ok((const_alloc_to_llvm(cx, alloc, /*static*/ true), alloc))
Ok((const_alloc_to_llvm(cx, alloc.inner(), /*static*/ true), alloc))
}
fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {

View file

@ -1379,7 +1379,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
}
}
let features = sess.opts.unstable_opts.linker_features;
let features = sess.opts.cg.linker_features;
// linker and linker flavor specified via command line have precedence over what the target
// specification specifies
@ -3327,35 +3327,6 @@ fn add_lld_args(
// this, `wasm-component-ld`, which is overridden if this option is passed.
if !sess.target.is_like_wasm {
cmd.cc_arg("-fuse-ld=lld");
// On ELF platforms like at least x64 linux, GNU ld and LLD have opposite defaults on some
// section garbage-collection features. For example, the somewhat popular `linkme` crate and
// its dependents rely in practice on this difference: when using lld, they need `-z
// nostart-stop-gc` to prevent encapsulation symbols and sections from being
// garbage-collected.
//
// More information about all this can be found in:
// - https://maskray.me/blog/2021-01-31-metadata-sections-comdat-and-shf-link-order
// - https://lld.llvm.org/ELF/start-stop-gc
//
// So when using lld, we restore, for now, the traditional behavior to help migration, but
// will remove it in the future.
// Since this only disables an optimization, it shouldn't create issues, but is in theory
// slightly suboptimal. However, it:
// - doesn't have any visible impact on our benchmarks
// - reduces the need to disable lld for the crates that depend on this
//
// Note that lld can detect some cases where this difference is relied on, and emits a
// dedicated error to add this link arg. We could make use of this error to emit an FCW. As
// of writing this, we don't do it, because lld is already enabled by default on nightly
// without this mitigation: no working project would see the FCW, so we do this to help
// stabilization.
//
// FIXME: emit an FCW if linking fails due its absence, and then remove this link-arg in the
// future.
if sess.target.llvm_target == "x86_64-unknown-linux-gnu" {
cmd.link_arg("-znostart-stop-gc");
}
}
if !flavor.is_gnu() {

View file

@ -203,6 +203,13 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
UsedBy::Compiler => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_COMPILER,
UsedBy::Linker => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER,
},
AttributeKind::FfiConst(_) => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST
}
AttributeKind::FfiPure(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
AttributeKind::StdInternalSymbol(_) => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
}
_ => {}
}
}
@ -213,17 +220,12 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
match name {
sym::rustc_allocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR,
sym::ffi_pure => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
sym::ffi_const => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST,
sym::rustc_nounwind => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND,
sym::rustc_reallocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR,
sym::rustc_deallocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR,
sym::rustc_allocator_zeroed => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
}
sym::rustc_std_internal_symbol => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
}
sym::thread_local => codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL,
sym::linkage => {
if let Some(val) = attr.value_str() {

View file

@ -171,8 +171,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> Visitor<'tcx> for LocalAnalyzer
if let Some(local) = place.as_local() {
self.define(local, DefLocation::Assignment(location));
if self.locals[local] != LocalKind::Memory {
let decl_span = self.fx.mir.local_decls[local].source_info.span;
if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
if !self.fx.rvalue_creates_operand(rvalue) {
self.locals[local] = LocalKind::Memory;
}
}

View file

@ -186,7 +186,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
offset: Size,
) -> Self {
let alloc_align = alloc.inner().align;
assert!(alloc_align >= layout.align.abi);
assert!(alloc_align >= layout.align.abi, "{alloc_align:?} < {:?}", layout.align.abi);
let read_scalar = |start, size, s: abi::Scalar, ty| {
match alloc.0.read_scalar(
@ -565,118 +565,167 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
}
}
}
/// Creates an incomplete operand containing the [`abi::Scalar`]s expected based
/// on the `layout` passed. This is for use with [`OperandRef::insert_field`]
/// later to set the necessary immediate(s), one-by-one converting all the `Right` to `Left`.
///
/// Returns `None` for `layout`s which cannot be built this way.
pub(crate) fn builder(
layout: TyAndLayout<'tcx>,
) -> Option<OperandRef<'tcx, Either<V, abi::Scalar>>> {
// Uninhabited types are weird, because for example `Result<!, !>`
// shows up as `FieldsShape::Primitive` and we need to be able to write
// a field into `(u32, !)`. We'll do that in an `alloca` instead.
if layout.uninhabited {
return None;
}
let val = match layout.backend_repr {
BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
BackendRepr::Scalar(s) => OperandValue::Immediate(Either::Right(s)),
BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Either::Right(a), Either::Right(b)),
BackendRepr::Memory { .. } | BackendRepr::SimdVector { .. } => return None,
};
Some(OperandRef { val, layout })
}
}
impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
/// Each of these variants starts out as `Either::Right` when it's uninitialized,
/// then setting the field changes that to `Either::Left` with the backend value.
#[derive(Debug, Copy, Clone)]
enum OperandValueBuilder<V> {
ZeroSized,
Immediate(Either<V, abi::Scalar>),
Pair(Either<V, abi::Scalar>, Either<V, abi::Scalar>),
/// `repr(simd)` types need special handling because they each have a non-empty
/// array field (which uses [`OperandValue::Ref`]) despite the SIMD type itself
/// using [`OperandValue::Immediate`] which for any other kind of type would
/// mean that its one non-ZST field would also be [`OperandValue::Immediate`].
Vector(Either<V, ()>),
}
/// Allows building up an `OperandRef` by setting fields one at a time.
#[derive(Debug, Copy, Clone)]
pub(super) struct OperandRefBuilder<'tcx, V> {
val: OperandValueBuilder<V>,
layout: TyAndLayout<'tcx>,
}
impl<'a, 'tcx, V: CodegenObject> OperandRefBuilder<'tcx, V> {
/// Creates an uninitialized builder for an instance of the `layout`.
///
/// ICEs for [`BackendRepr::Memory`] types (other than ZSTs), which should
/// be built up inside a [`PlaceRef`] instead as they need an allocated place
/// into which to write the values of the fields.
pub(super) fn new(layout: TyAndLayout<'tcx>) -> Self {
let val = match layout.backend_repr {
BackendRepr::Memory { .. } if layout.is_zst() => OperandValueBuilder::ZeroSized,
BackendRepr::Scalar(s) => OperandValueBuilder::Immediate(Either::Right(s)),
BackendRepr::ScalarPair(a, b) => {
OperandValueBuilder::Pair(Either::Right(a), Either::Right(b))
}
BackendRepr::SimdVector { .. } => OperandValueBuilder::Vector(Either::Right(())),
BackendRepr::Memory { .. } => {
bug!("Cannot use non-ZST Memory-ABI type in operand builder: {layout:?}");
}
};
OperandRefBuilder { val, layout }
}
pub(super) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&mut self,
bx: &mut Bx,
v: VariantIdx,
f: FieldIdx,
operand: OperandRef<'tcx, V>,
variant: VariantIdx,
field: FieldIdx,
field_operand: OperandRef<'tcx, V>,
) {
let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields {
if let OperandValue::ZeroSized = field_operand.val {
// A ZST never adds any state, so just ignore it.
// This special-casing is worth it because of things like
// `Result<!, !>` where `Ok(never)` is legal to write,
// but the type shows as FieldShape::Primitive so we can't
// actually look at the layout for the field being set.
return;
}
let is_zero_offset = if let abi::FieldsShape::Primitive = self.layout.fields {
// The other branch looking at field layouts ICEs for primitives,
// so we need to handle them separately.
// Multiple fields is possible for cases such as aggregating
// a thin pointer, where the second field is the unit.
// Because we handled ZSTs above (like the metadata in a thin pointer),
// the only possibility is that we're setting the one-and-only field.
assert!(!self.layout.is_zst());
assert_eq!(v, FIRST_VARIANT);
let first_field = f == FieldIdx::ZERO;
(!first_field, first_field)
assert_eq!(variant, FIRST_VARIANT);
assert_eq!(field, FieldIdx::ZERO);
true
} else {
let variant_layout = self.layout.for_variant(bx.cx(), v);
let field_layout = variant_layout.field(bx.cx(), f.as_usize());
let field_offset = variant_layout.fields.offset(f.as_usize());
(field_layout.is_zst(), field_offset == Size::ZERO)
let variant_layout = self.layout.for_variant(bx.cx(), variant);
let field_offset = variant_layout.fields.offset(field.as_usize());
field_offset == Size::ZERO
};
let mut update = |tgt: &mut Either<V, abi::Scalar>, src, from_scalar| {
let to_scalar = tgt.unwrap_right();
// We transmute here (rather than just `from_immediate`) because in
// `Result<usize, *const ()>` the field of the `Ok` is an integer,
// but the corresponding scalar in the enum is a pointer.
let imm = transmute_scalar(bx, src, from_scalar, to_scalar);
*tgt = Either::Left(imm);
};
match (operand.val, operand.layout.backend_repr) {
(OperandValue::ZeroSized, _) if expect_zst => {}
match (field_operand.val, field_operand.layout.backend_repr) {
(OperandValue::ZeroSized, _) => unreachable!("Handled above"),
(OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
update(val, v, from_scalar);
}
OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
update(fst, v, from_scalar);
}
OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
update(snd, v, from_scalar);
}
_ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
_ => {
bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
}
},
(OperandValue::Immediate(v), BackendRepr::SimdVector { .. }) => match &mut self.val {
OperandValueBuilder::Vector(val @ Either::Right(())) if is_zero_offset => {
*val = Either::Left(v);
}
_ => {
bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
}
},
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
match &mut self.val {
OperandValue::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
OperandValueBuilder::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
update(fst, a, from_sa);
update(snd, b, from_sb);
}
_ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
_ => bug!(
"Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}"
),
}
}
_ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"),
(OperandValue::Ref(place), BackendRepr::Memory { .. }) => match &mut self.val {
OperandValueBuilder::Vector(val @ Either::Right(())) => {
let ibty = bx.cx().immediate_backend_type(self.layout);
let simd = bx.load_from_place(ibty, place);
*val = Either::Left(simd);
}
_ => {
bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
}
},
_ => bug!("Operand cannot be used with `insert_field`: {field_operand:?}"),
}
}
/// Insert the immediate value `imm` for field `f` in the *type itself*,
/// rather than into one of the variants.
///
/// Most things want [`OperandRef::insert_field`] instead, but this one is
/// Most things want [`Self::insert_field`] instead, but this one is
/// necessary for writing things like enum tags that aren't in any variant.
pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
let field_offset = self.layout.fields.offset(f.as_usize());
let is_zero_offset = field_offset == Size::ZERO;
match &mut self.val {
OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
*val = Either::Left(imm);
}
OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
*fst = Either::Left(imm);
}
OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
*snd = Either::Left(imm);
}
_ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
}
}
/// After having set all necessary fields, this converts the
/// `OperandValue<Either<V, _>>` (as obtained from [`OperandRef::builder`])
/// to the normal `OperandValue<V>`.
/// After having set all necessary fields, this converts the builder back
/// to the normal `OperandRef`.
///
/// ICEs if any required fields were not set.
pub fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
let OperandRef { val, layout } = *self;
pub(super) fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
let OperandRefBuilder { val, layout } = *self;
// For something like `Option::<u32>::None`, it's expected that the
// payload scalar will not actually have been set, so this converts
@ -692,10 +741,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
};
let val = match val {
OperandValue::ZeroSized => OperandValue::ZeroSized,
OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)),
OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
OperandValue::Ref(_) => bug!(),
OperandValueBuilder::ZeroSized => OperandValue::ZeroSized,
OperandValueBuilder::Immediate(v) => OperandValue::Immediate(unwrap(v)),
OperandValueBuilder::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
OperandValueBuilder::Vector(v) => match v {
Either::Left(v) => OperandValue::Immediate(v),
Either::Right(())
if let BackendRepr::SimdVector { element, .. } = layout.backend_repr
&& element.is_uninit_valid() =>
{
let bty = cx.immediate_backend_type(layout);
OperandValue::Immediate(cx.const_undef(bty))
}
Either::Right(()) => {
bug!("OperandRef::build called while fields are missing {self:?}")
}
},
};
OperandRef { val, layout }
}

View file

@ -4,10 +4,9 @@ use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_session::config::OptLevel;
use rustc_span::{DUMMY_SP, Span};
use tracing::{debug, instrument};
use super::operand::{OperandRef, OperandValue};
use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
use super::place::{PlaceRef, codegen_tag_value};
use super::{FunctionCx, LocalRef};
use crate::common::{IntPredicate, TypeKind};
@ -181,7 +180,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
_ => {
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
assert!(self.rvalue_creates_operand(rvalue));
let temp = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(bx, dest);
}
@ -354,10 +353,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx: &mut Bx,
rvalue: &mir::Rvalue<'tcx>,
) -> OperandRef<'tcx, Bx::Value> {
assert!(
self.rvalue_creates_operand(rvalue, DUMMY_SP),
"cannot codegen {rvalue:?} to operand",
);
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
@ -668,9 +664,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// `rvalue_creates_operand` has arranged that we only get here if
// we can build the aggregate immediate from the field immediates.
let Some(mut builder) = OperandRef::builder(layout) else {
bug!("Cannot use type in operand builder: {layout:?}")
};
let mut builder = OperandRefBuilder::new(layout);
for (field_idx, field) in fields.iter_enumerated() {
let op = self.codegen_operand(bx, field);
let fi = active_field_index.unwrap_or(field_idx);
@ -980,7 +974,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// will not actually take the operand path because the result type is such
/// that it always gets an `alloca`, but where it's not worth re-checking the
/// layout in this code when the right thing will happen anyway.
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
let operand_ty = operand.ty(self.mir, self.cx.tcx());
@ -1025,18 +1019,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::Rvalue::NullaryOp(..) |
mir::Rvalue::ThreadLocalRef(_) |
mir::Rvalue::Use(..) |
mir::Rvalue::Aggregate(..) | // (*)
mir::Rvalue::WrapUnsafeBinder(..) => // (*)
true,
// Arrays are always aggregates, so it's not worth checking anything here.
// (If it's really `[(); N]` or `[T; 0]` and we use the place path, fine.)
mir::Rvalue::Repeat(..) => false,
mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(ty);
let layout = self.cx.spanned_layout_of(ty, span);
OperandRef::<Bx::Value>::builder(layout).is_some()
}
}
}
// (*) this is only true if the type is suitable
}

View file

@ -78,6 +78,8 @@ const_eval_dealloc_kind_mismatch =
const_eval_deref_function_pointer =
accessing {$allocation} which contains a function
const_eval_deref_typeid_pointer =
accessing {$allocation} which contains a `TypeId`
const_eval_deref_vtable_pointer =
accessing {$allocation} which contains a vtable
const_eval_division_by_zero =

View file

@ -475,6 +475,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
WriteToReadOnly(_) => const_eval_write_to_read_only,
DerefFunctionPointer(_) => const_eval_deref_function_pointer,
DerefVTablePointer(_) => const_eval_deref_vtable_pointer,
DerefTypeIdPointer(_) => const_eval_deref_typeid_pointer,
InvalidBool(_) => const_eval_invalid_bool,
InvalidChar(_) => const_eval_invalid_char,
InvalidTag(_) => const_eval_invalid_tag,
@ -588,7 +589,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
diag.arg("has", has.bytes());
diag.arg("msg", format!("{msg:?}"));
}
WriteToReadOnly(alloc) | DerefFunctionPointer(alloc) | DerefVTablePointer(alloc) => {
WriteToReadOnly(alloc)
| DerefFunctionPointer(alloc)
| DerefVTablePointer(alloc)
| DerefTypeIdPointer(alloc) => {
diag.arg("allocation", alloc);
}
InvalidBool(b) => {

View file

@ -96,7 +96,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// This inherent method takes priority over the trait method with the same name in LayoutOf,
/// and allows wrapping the actual [LayoutOf::layout_of] with a tracing span.
/// See [LayoutOf::layout_of] for the original documentation.
#[inline]
#[inline(always)]
pub fn layout_of(
&self,
ty: Ty<'tcx>,

View file

@ -4,7 +4,7 @@
use std::assert_matches::assert_matches;
use rustc_abi::Size;
use rustc_abi::{FieldIdx, Size};
use rustc_apfloat::ieee::{Double, Half, Quad, Single};
use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
use rustc_middle::ty::layout::TyAndLayout;
@ -28,8 +28,35 @@ pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAll
let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ());
tcx.mk_const_alloc(alloc)
}
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Generates a value of `TypeId` for `ty` in-place.
pub(crate) fn write_type_id(
&mut self,
ty: Ty<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ()> {
let tcx = self.tcx;
let type_id_hash = tcx.type_id_hash(ty).as_u128();
let op = self.const_val_to_op(
ConstValue::Scalar(Scalar::from_u128(type_id_hash)),
tcx.types.u128,
None,
)?;
self.copy_op_allow_transmute(&op, dest)?;
// Give the first pointer-size bytes provenance that knows about the type id.
// Here we rely on `TypeId` being a newtype around an array of pointers, so we
// first project to its only field and then the first array element.
let alloc_id = tcx.reserve_and_set_type_id_alloc(ty);
let first = self.project_field(dest, FieldIdx::ZERO)?;
let first = self.project_index(&first, 0)?;
let offset = self.read_scalar(&first)?.to_target_usize(&tcx)?;
let ptr = Pointer::new(alloc_id.into(), Size::from_bytes(offset));
let ptr = self.global_root_pointer(ptr)?;
let val = Scalar::from_pointer(ptr, &tcx);
self.write_scalar(val, &first)
}
/// Returns `true` if emulation happened.
/// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
/// intrinsic handling.
@ -63,9 +90,48 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
sym::type_id => {
let tp_ty = instance.args.type_at(0);
ensure_monomorphic_enough(tcx, tp_ty)?;
let val = ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128());
let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?;
self.copy_op(&val, dest)?;
self.write_type_id(tp_ty, dest)?;
}
sym::type_id_eq => {
// Both operands are `TypeId`, which is a newtype around an array of pointers.
// Project until we have the array elements.
let a_fields = self.project_field(&args[0], FieldIdx::ZERO)?;
let b_fields = self.project_field(&args[1], FieldIdx::ZERO)?;
let mut a_fields = self.project_array_fields(&a_fields)?;
let mut b_fields = self.project_array_fields(&b_fields)?;
let (_idx, a) = a_fields
.next(self)?
.expect("we know the layout of TypeId has at least 2 array elements");
let a = self.deref_pointer(&a)?;
let (a, offset_a) = self.get_ptr_type_id(a.ptr())?;
let (_idx, b) = b_fields
.next(self)?
.expect("we know the layout of TypeId has at least 2 array elements");
let b = self.deref_pointer(&b)?;
let (b, offset_b) = self.get_ptr_type_id(b.ptr())?;
let provenance_matches = a == b;
let mut eq_id = offset_a == offset_b;
while let Some((_, a)) = a_fields.next(self)? {
let (_, b) = b_fields.next(self)?.unwrap();
let a = self.read_target_usize(&a)?;
let b = self.read_target_usize(&b)?;
eq_id &= a == b;
}
if !eq_id && provenance_matches {
throw_ub_format!(
"type_id_eq: one of the TypeId arguments is invalid, the hash does not match the type it represents"
)
}
self.write_scalar(Scalar::from_bool(provenance_matches), dest)?;
}
sym::variant_count => {
let tp_ty = instance.args.type_at(0);

View file

@ -18,8 +18,8 @@ use rustc_target::callconv::FnAbi;
use super::{
AllocBytes, AllocId, AllocKind, AllocRange, Allocation, CTFE_ALLOC_SALT, ConstAllocation,
CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind,
Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup,
CtfeProvenance, EnteredTraceSpan, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy,
MemoryKind, Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup,
};
/// Data returned by [`Machine::after_stack_pop`], and consumed by
@ -147,12 +147,6 @@ pub trait Machine<'tcx>: Sized {
/// already been checked before.
const ALL_CONSTS_ARE_PRECHECKED: bool = true;
/// Determines whether rustc_const_eval functions that make use of the [Machine] should make
/// tracing calls (to the `tracing` library). By default this is `false`, meaning the tracing
/// calls will supposedly be optimized out. This flag is set to `true` inside Miri, to allow
/// tracing the interpretation steps, among other things.
const TRACING_ENABLED: bool = false;
/// Whether memory accesses should be alignment-checked.
fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool;
@ -634,6 +628,17 @@ pub trait Machine<'tcx>: Sized {
/// Compute the value passed to the constructors of the `AllocBytes` type for
/// abstract machine allocations.
fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams;
/// Allows enabling/disabling tracing calls from within `rustc_const_eval` at compile time, by
/// delegating the entering of [tracing::Span]s to implementors of the [Machine] trait. The
/// default implementation corresponds to tracing being disabled, meaning the tracing calls will
/// supposedly be optimized out completely. To enable tracing, override this trait method and
/// return `span.entered()`. Also see [crate::enter_trace_span].
#[must_use]
#[inline(always)]
fn enter_trace_span(_span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
()
}
}
/// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines

View file

@ -15,9 +15,9 @@ use std::{fmt, ptr};
use rustc_abi::{Align, HasDataLayout, Size};
use rustc_ast::Mutability;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_middle::bug;
use rustc_middle::mir::display_allocation;
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_middle::{bug, throw_ub_format};
use tracing::{debug, instrument, trace};
use super::{
@ -346,6 +346,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
kind = "vtable",
)
}
Some(GlobalAlloc::TypeId { .. }) => {
err_ub_custom!(
fluent::const_eval_invalid_dealloc,
alloc_id = alloc_id,
kind = "typeid",
)
}
Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
err_ub_custom!(
fluent::const_eval_invalid_dealloc,
@ -615,6 +622,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)),
Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)),
None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),
Some(GlobalAlloc::Static(def_id)) => {
assert!(self.tcx.is_static(def_id));
@ -896,7 +904,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
let kind = match global_alloc {
GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
GlobalAlloc::TypeId { .. }
| GlobalAlloc::Static { .. }
| GlobalAlloc::Memory { .. } => AllocKind::LiveData,
GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
GlobalAlloc::VTable { .. } => AllocKind::VTable,
};
@ -936,6 +946,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
/// Takes a pointer that is the first chunk of a `TypeId` and return the type that its
/// provenance refers to, as well as the segment of the hash that this pointer covers.
pub fn get_ptr_type_id(
&self,
ptr: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, (Ty<'tcx>, Size)> {
let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else {
throw_ub_format!("type_id_eq: `TypeId` provenance is not a type id")
};
interp_ok((ty, offset))
}
pub fn get_ptr_fn(
&self,
ptr: Pointer<Option<M::Provenance>>,
@ -1197,6 +1220,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
}
Some(GlobalAlloc::TypeId { ty }) => {
write!(fmt, " (typeid for {ty})")?;
}
Some(GlobalAlloc::Static(did)) => {
write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
}

View file

@ -37,6 +37,7 @@ pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
use self::place::{MemPlace, Place};
pub use self::projection::{OffsetMode, Projectable};
pub use self::stack::{Frame, FrameInfo, LocalState, ReturnContinuation, StackPopInfo};
pub use self::util::EnteredTraceSpan;
pub(crate) use self::util::create_static_alloc;
pub use self::validity::{CtfeValidationMode, RangeSet, RefTracking};
pub use self::visitor::ValueVisitor;

View file

@ -296,7 +296,11 @@ where
base: &'a P,
) -> InterpResult<'tcx, ArrayIterator<'a, 'tcx, M::Provenance, P>> {
let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
span_bug!(self.cur_span(), "project_array_fields: expected an array layout");
span_bug!(
self.cur_span(),
"project_array_fields: expected an array layout, got {:#?}",
base.layout()
);
};
let len = base.len(self)?;
let field_layout = base.layout().field(self, 0);

View file

@ -46,21 +46,20 @@ pub(crate) fn create_static_alloc<'tcx>(
interp_ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout))
}
/// This struct is needed to enforce `#[must_use]` on [tracing::span::EnteredSpan]
/// while wrapping them in an `Option`.
#[must_use]
pub enum MaybeEnteredSpan {
Some(tracing::span::EnteredSpan),
None,
}
/// A marker trait returned by [crate::interpret::Machine::enter_trace_span], identifying either a
/// real [tracing::span::EnteredSpan] in case tracing is enabled, or the dummy type `()` when
/// tracing is disabled.
pub trait EnteredTraceSpan {}
impl EnteredTraceSpan for () {}
impl EnteredTraceSpan for tracing::span::EnteredSpan {}
/// Shortand for calling [crate::interpret::Machine::enter_trace_span] on a [tracing::info_span].
/// This is supposed to be compiled out when [crate::interpret::Machine::enter_trace_span] has the
/// default implementation (i.e. when it does not actually enter the span but instead returns `()`).
/// Note: the result of this macro **must be used** because the span is exited when it's dropped.
#[macro_export]
macro_rules! enter_trace_span {
($machine:ident, $($tt:tt)*) => {
if $machine::TRACING_ENABLED {
$crate::interpret::util::MaybeEnteredSpan::Some(tracing::info_span!($($tt)*).entered())
} else {
$crate::interpret::util::MaybeEnteredSpan::None
}
$machine::enter_trace_span(|| tracing::info_span!($($tt)*))
}
}

View file

@ -571,40 +571,42 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
let alloc_actual_mutbl =
global_alloc.mutability(*self.ecx.tcx, self.ecx.typing_env);
if let GlobalAlloc::Static(did) = global_alloc {
let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else {
bug!()
};
// Special handling for pointers to statics (irrespective of their type).
assert!(!self.ecx.tcx.is_thread_local_static(did));
assert!(self.ecx.tcx.is_static(did));
// Mode-specific checks
match ctfe_mode {
CtfeValidationMode::Static { .. }
| CtfeValidationMode::Promoted { .. } => {
// We skip recursively checking other statics. These statics must be sound by
// themselves, and the only way to get broken statics here is by using
// unsafe code.
// The reasons we don't check other statics is twofold. For one, in all
// sound cases, the static was already validated on its own, and second, we
// trigger cycle errors if we try to compute the value of the other static
// and that static refers back to us (potentially through a promoted).
// This could miss some UB, but that's fine.
// We still walk nested allocations, as they are fundamentally part of this validation run.
// This means we will also recurse into nested statics of *other*
// statics, even though we do not recurse into other statics directly.
// That's somewhat inconsistent but harmless.
skip_recursive_check = !nested;
}
CtfeValidationMode::Const { .. } => {
// If this is mutable memory or an `extern static`, there's no point in checking it -- we'd
// just get errors trying to read the value.
if alloc_actual_mutbl.is_mut() || self.ecx.tcx.is_foreign_item(did)
{
skip_recursive_check = true;
match global_alloc {
GlobalAlloc::Static(did) => {
let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else {
bug!()
};
assert!(!self.ecx.tcx.is_thread_local_static(did));
assert!(self.ecx.tcx.is_static(did));
match ctfe_mode {
CtfeValidationMode::Static { .. }
| CtfeValidationMode::Promoted { .. } => {
// We skip recursively checking other statics. These statics must be sound by
// themselves, and the only way to get broken statics here is by using
// unsafe code.
// The reasons we don't check other statics is twofold. For one, in all
// sound cases, the static was already validated on its own, and second, we
// trigger cycle errors if we try to compute the value of the other static
// and that static refers back to us (potentially through a promoted).
// This could miss some UB, but that's fine.
// We still walk nested allocations, as they are fundamentally part of this validation run.
// This means we will also recurse into nested statics of *other*
// statics, even though we do not recurse into other statics directly.
// That's somewhat inconsistent but harmless.
skip_recursive_check = !nested;
}
CtfeValidationMode::Const { .. } => {
// If this is mutable memory or an `extern static`, there's no point in checking it -- we'd
// just get errors trying to read the value.
if alloc_actual_mutbl.is_mut()
|| self.ecx.tcx.is_foreign_item(did)
{
skip_recursive_check = true;
}
}
}
}
_ => (),
}
// If this allocation has size zero, there is no actual mutability here.

View file

@ -3526,7 +3526,7 @@ pub fn is_case_difference(sm: &SourceMap, suggested: &str, sp: Span) -> bool {
// All the chars that differ in capitalization are confusable (above):
let confusable = iter::zip(found.chars(), suggested.chars())
.filter(|(f, s)| f != s)
.all(|(f, s)| (ascii_confusables.contains(&f) || ascii_confusables.contains(&s)));
.all(|(f, s)| ascii_confusables.contains(&f) || ascii_confusables.contains(&s));
confusable && found.to_lowercase() == suggested.to_lowercase()
// FIXME: We sometimes suggest the same thing we already have, which is a
// bug, but be defensive against that here.

View file

@ -133,6 +133,32 @@ expand_module_multiple_candidates =
expand_must_repeat_once =
this must repeat at least once
expand_mve_extra_tokens =
unexpected trailing tokens
.label = for this metavariable expression
.range = the `{$name}` metavariable expression takes between {$min_or_exact_args} and {$max_args} arguments
.exact = the `{$name}` metavariable expression takes {$min_or_exact_args ->
[zero] no arguments
[one] a single argument
*[other] {$min_or_exact_args} arguments
}
.suggestion = try removing {$extra_count ->
[one] this token
*[other] these tokens
}
expand_mve_missing_paren =
expected `(`
.label = for this this metavariable expression
.unexpected = unexpected token
.note = metavariable expressions use function-like parentheses syntax
.suggestion = try adding parentheses
expand_mve_unrecognized_expr =
unrecognized metavariable expression
.label = not a valid metavariable expression
.note = valid metavariable expressions are {$valid_expr_list}
expand_mve_unrecognized_var =
variable `{$key}` is not recognized in meta-variable expression

View file

@ -496,6 +496,50 @@ pub(crate) use metavar_exprs::*;
mod metavar_exprs {
use super::*;
#[derive(Diagnostic, Default)]
#[diag(expand_mve_extra_tokens)]
pub(crate) struct MveExtraTokens {
#[primary_span]
#[suggestion(code = "", applicability = "machine-applicable")]
pub span: Span,
#[label]
pub ident_span: Span,
pub extra_count: usize,
// The rest is only used for specific diagnostics and can be default if neither
// `note` is `Some`.
#[note(expand_exact)]
pub exact_args_note: Option<()>,
#[note(expand_range)]
pub range_args_note: Option<()>,
pub min_or_exact_args: usize,
pub max_args: usize,
pub name: String,
}
#[derive(Diagnostic)]
#[note]
#[diag(expand_mve_missing_paren)]
pub(crate) struct MveMissingParen {
#[primary_span]
#[label]
pub ident_span: Span,
#[label(expand_unexpected)]
pub unexpected_span: Option<Span>,
#[suggestion(code = "( /* ... */ )", applicability = "has-placeholders")]
pub insert_span: Option<Span>,
}
#[derive(Diagnostic)]
#[note]
#[diag(expand_mve_unrecognized_expr)]
pub(crate) struct MveUnrecognizedExpr {
#[primary_span]
#[label]
pub span: Span,
pub valid_expr_list: &'static str,
}
#[derive(Diagnostic)]
#[diag(expand_mve_unrecognized_var)]
pub(crate) struct MveUnrecognizedVar {

View file

@ -7,6 +7,8 @@ use rustc_macros::{Decodable, Encodable};
use rustc_session::parse::ParseSess;
use rustc_span::{Ident, Span, Symbol};
use crate::errors;
pub(crate) const RAW_IDENT_ERR: &str = "`${concat(..)}` currently does not support raw identifiers";
pub(crate) const UNSUPPORTED_CONCAT_ELEM_ERR: &str = "expected identifier or string literal";
@ -40,11 +42,32 @@ impl MetaVarExpr {
) -> PResult<'psess, MetaVarExpr> {
let mut iter = input.iter();
let ident = parse_ident(&mut iter, psess, outer_span)?;
let Some(TokenTree::Delimited(.., Delimiter::Parenthesis, args)) = iter.next() else {
let msg = "meta-variable expression parameter must be wrapped in parentheses";
return Err(psess.dcx().struct_span_err(ident.span, msg));
let next = iter.next();
let Some(TokenTree::Delimited(.., Delimiter::Parenthesis, args)) = next else {
// No `()`; wrong or no delimiters. Point at a problematic span or a place to
// add parens if it makes sense.
let (unexpected_span, insert_span) = match next {
Some(TokenTree::Delimited(..)) => (None, None),
Some(tt) => (Some(tt.span()), None),
None => (None, Some(ident.span.shrink_to_hi())),
};
let err =
errors::MveMissingParen { ident_span: ident.span, unexpected_span, insert_span };
return Err(psess.dcx().create_err(err));
};
check_trailing_token(&mut iter, psess)?;
// Ensure there are no trailing tokens in the braces, e.g. `${foo() extra}`
if iter.peek().is_some() {
let span = iter_span(&iter).expect("checked is_some above");
let err = errors::MveExtraTokens {
span,
ident_span: ident.span,
extra_count: iter.count(),
..Default::default()
};
return Err(psess.dcx().create_err(err));
}
let mut iter = args.iter();
let rslt = match ident.as_str() {
"concat" => parse_concat(&mut iter, psess, outer_span, ident.span)?,
@ -56,18 +79,14 @@ impl MetaVarExpr {
"index" => MetaVarExpr::Index(parse_depth(&mut iter, psess, ident.span)?),
"len" => MetaVarExpr::Len(parse_depth(&mut iter, psess, ident.span)?),
_ => {
let err_msg = "unrecognized meta-variable expression";
let mut err = psess.dcx().struct_span_err(ident.span, err_msg);
err.span_suggestion(
ident.span,
"supported expressions are count, ignore, index and len",
"",
Applicability::MachineApplicable,
);
return Err(err);
let err = errors::MveUnrecognizedExpr {
span: ident.span,
valid_expr_list: "`count`, `ignore`, `index`, `len`, and `concat`",
};
return Err(psess.dcx().create_err(err));
}
};
check_trailing_token(&mut iter, psess)?;
check_trailing_tokens(&mut iter, psess, ident)?;
Ok(rslt)
}
@ -87,20 +106,51 @@ impl MetaVarExpr {
}
}
// Checks if there are any remaining tokens. For example, `${ignore(ident ... a b c ...)}`
fn check_trailing_token<'psess>(
/// Checks if there are any remaining tokens (for example, `${ignore($valid, extra)}`) and create
/// a diag with the correct arg count if so.
fn check_trailing_tokens<'psess>(
iter: &mut TokenStreamIter<'_>,
psess: &'psess ParseSess,
ident: Ident,
) -> PResult<'psess, ()> {
if let Some(tt) = iter.next() {
let mut diag = psess
.dcx()
.struct_span_err(tt.span(), format!("unexpected token: {}", pprust::tt_to_string(tt)));
diag.span_note(tt.span(), "meta-variable expression must not have trailing tokens");
Err(diag)
} else {
Ok(())
if iter.peek().is_none() {
// All tokens consumed, as expected
return Ok(());
}
// `None` for max indicates the arg count must be exact, `Some` indicates a range is accepted.
let (min_or_exact_args, max_args) = match ident.as_str() {
"concat" => panic!("concat takes unlimited tokens but didn't eat them all"),
"ignore" => (1, None),
// 1 or 2 args
"count" => (1, Some(2)),
// 0 or 1 arg
"index" => (0, Some(1)),
"len" => (0, Some(1)),
other => unreachable!("unknown MVEs should be rejected earlier (got `{other}`)"),
};
let err = errors::MveExtraTokens {
span: iter_span(iter).expect("checked is_none above"),
ident_span: ident.span,
extra_count: iter.count(),
exact_args_note: if max_args.is_some() { None } else { Some(()) },
range_args_note: if max_args.is_some() { Some(()) } else { None },
min_or_exact_args,
max_args: max_args.unwrap_or_default(),
name: ident.to_string(),
};
Err(psess.dcx().create_err(err))
}
/// Returns a span encompassing all tokens in the iterator if there is at least one item.
fn iter_span(iter: &TokenStreamIter<'_>) -> Option<Span> {
let mut iter = iter.clone(); // cloning is cheap
let first_sp = iter.next()?.span();
let last_sp = iter.last().map(TokenTree::span).unwrap_or(first_sp);
let span = first_sp.with_hi(last_sp.hi());
Some(span)
}
/// Indicates what is placed in a `concat` parameter. For example, literals

View file

@ -274,6 +274,8 @@ language_item_table! {
PartialOrd, sym::partial_ord, partial_ord_trait, Target::Trait, GenericRequirement::Exact(1);
CVoid, sym::c_void, c_void, Target::Enum, GenericRequirement::None;
TypeId, sym::type_id, type_id, Target::Struct, GenericRequirement::None;
// A number of panic-related lang items. The `panic` item corresponds to divide-by-zero and
// various panic cases with `match`. The `panic_bounds_check` item is for indexing arrays.
//

View file

@ -93,6 +93,7 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi
| sym::three_way_compare
| sym::discriminant_value
| sym::type_id
| sym::type_id_eq
| sym::select_unpredictable
| sym::cold_path
| sym::ptr_guaranteed_cmp
@ -220,7 +221,13 @@ pub(crate) fn check_intrinsic_type(
sym::needs_drop => (1, 0, vec![], tcx.types.bool),
sym::type_name => (1, 0, vec![], Ty::new_static_str(tcx)),
sym::type_id => (1, 0, vec![], tcx.types.u128),
sym::type_id => {
(1, 0, vec![], tcx.type_of(tcx.lang_items().type_id().unwrap()).instantiate_identity())
}
sym::type_id_eq => {
let type_id = tcx.type_of(tcx.lang_items().type_id().unwrap()).instantiate_identity();
(0, 0, vec![type_id, type_id], tcx.types.bool)
}
sym::offset => (2, 0, vec![param(0), param(1)], param(0)),
sym::arith_offset => (
1,

View file

@ -59,8 +59,7 @@ declare_lint! {
}
declare_lint! {
/// The `overflowing_literals` lint detects literal out of range for its
/// type.
/// The `overflowing_literals` lint detects literals out of range for their type.
///
/// ### Example
///
@ -72,9 +71,9 @@ declare_lint! {
///
/// ### Explanation
///
/// It is usually a mistake to use a literal that overflows the type where
/// it is used. Either use a literal that is within range, or change the
/// type to be within the range of the literal.
/// It is usually a mistake to use a literal that overflows its type
/// Change either the literal or its type such that the literal is
/// within the range of its type.
OVERFLOWING_LITERALS,
Deny,
"literal out of range for its type"

View file

@ -1,7 +1,7 @@
use std::iter;
use rustc_ast::util::{classify, parser};
use rustc_ast::{self as ast, ExprKind, HasAttrs as _, StmtKind};
use rustc_ast::{self as ast, ExprKind, FnRetTy, HasAttrs as _, StmtKind};
use rustc_attr_data_structures::{AttributeKind, find_attr};
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{MultiSpan, pluralize};
@ -599,6 +599,7 @@ enum UnusedDelimsCtx {
AnonConst,
MatchArmExpr,
IndexExpr,
ClosureBody,
}
impl From<UnusedDelimsCtx> for &'static str {
@ -620,6 +621,7 @@ impl From<UnusedDelimsCtx> for &'static str {
UnusedDelimsCtx::ArrayLenExpr | UnusedDelimsCtx::AnonConst => "const expression",
UnusedDelimsCtx::MatchArmExpr => "match arm expression",
UnusedDelimsCtx::IndexExpr => "index expression",
UnusedDelimsCtx::ClosureBody => "closure body",
}
}
}
@ -919,6 +921,11 @@ trait UnusedDelimLint {
let (args_to_check, ctx) = match *call_or_other {
Call(_, ref args) => (&args[..], UnusedDelimsCtx::FunctionArg),
MethodCall(ref call) => (&call.args[..], UnusedDelimsCtx::MethodArg),
Closure(ref closure)
if matches!(closure.fn_decl.output, FnRetTy::Default(_)) =>
{
(&[closure.body.clone()][..], UnusedDelimsCtx::ClosureBody)
}
// actual catch-all arm
_ => {
return;
@ -1508,6 +1515,7 @@ impl UnusedDelimLint for UnusedBraces {
&& (ctx != UnusedDelimsCtx::AnonConst
|| (matches!(expr.kind, ast::ExprKind::Lit(_))
&& !expr.span.from_expansion()))
&& ctx != UnusedDelimsCtx::ClosureBody
&& !cx.sess().source_map().is_multiline(value.span)
&& value.attrs.is_empty()
&& !value.span.from_expansion()

View file

@ -4343,11 +4343,12 @@ declare_lint! {
///
/// [future-incompatible]: ../index.md#future-incompatible-lints
pub AMBIGUOUS_GLOB_IMPORTS,
Warn,
Deny,
"detects certain glob imports that require reporting an ambiguity error",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #114095 <https://github.com/rust-lang/rust/issues/114095>",
report_in_deps: true,
};
}

View file

@ -392,6 +392,8 @@ pub enum UndefinedBehaviorInfo<'tcx> {
DerefFunctionPointer(AllocId),
/// Trying to access the data behind a vtable pointer.
DerefVTablePointer(AllocId),
/// Trying to access the actual type id.
DerefTypeIdPointer(AllocId),
/// Using a non-boolean `u8` as bool.
InvalidBool(u8),
/// Using a non-character `u32` as character.

View file

@ -103,6 +103,7 @@ enum AllocDiscriminant {
Fn,
VTable,
Static,
Type,
}
pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>(
@ -127,6 +128,11 @@ pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>(
ty.encode(encoder);
poly_trait_ref.encode(encoder);
}
GlobalAlloc::TypeId { ty } => {
trace!("encoding {alloc_id:?} with {ty:#?}");
AllocDiscriminant::Type.encode(encoder);
ty.encode(encoder);
}
GlobalAlloc::Static(did) => {
assert!(!tcx.is_thread_local_static(did));
// References to statics doesn't need to know about their allocations,
@ -228,6 +234,12 @@ impl<'s> AllocDecodingSession<'s> {
trace!("decoded vtable alloc instance: {ty:?}, {poly_trait_ref:?}");
decoder.interner().reserve_and_set_vtable_alloc(ty, poly_trait_ref, CTFE_ALLOC_SALT)
}
AllocDiscriminant::Type => {
trace!("creating typeid alloc ID");
let ty = Decodable::decode(decoder);
trace!("decoded typid: {ty:?}");
decoder.interner().reserve_and_set_type_id_alloc(ty)
}
AllocDiscriminant::Static => {
trace!("creating extern static alloc ID");
let did = <DefId as Decodable<D>>::decode(decoder);
@ -258,6 +270,9 @@ pub enum GlobalAlloc<'tcx> {
Static(DefId),
/// The alloc ID points to memory.
Memory(ConstAllocation<'tcx>),
/// The first pointer-sized segment of a type id. On 64 bit systems, the 128 bit type id
/// is split into two segments, on 32 bit systems there are 4 segments, and so on.
TypeId { ty: Ty<'tcx> },
}
impl<'tcx> GlobalAlloc<'tcx> {
@ -296,9 +311,10 @@ impl<'tcx> GlobalAlloc<'tcx> {
pub fn address_space(&self, cx: &impl HasDataLayout) -> AddressSpace {
match self {
GlobalAlloc::Function { .. } => cx.data_layout().instruction_address_space,
GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => {
AddressSpace::ZERO
}
GlobalAlloc::TypeId { .. }
| GlobalAlloc::Static(..)
| GlobalAlloc::Memory(..)
| GlobalAlloc::VTable(..) => AddressSpace::ZERO,
}
}
@ -334,7 +350,7 @@ impl<'tcx> GlobalAlloc<'tcx> {
}
}
GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
GlobalAlloc::Function { .. } | GlobalAlloc::VTable(..) => {
GlobalAlloc::TypeId { .. } | GlobalAlloc::Function { .. } | GlobalAlloc::VTable(..) => {
// These are immutable.
Mutability::Not
}
@ -380,8 +396,10 @@ impl<'tcx> GlobalAlloc<'tcx> {
GlobalAlloc::Function { .. } => (Size::ZERO, Align::ONE),
GlobalAlloc::VTable(..) => {
// No data to be accessed here. But vtables are pointer-aligned.
return (Size::ZERO, tcx.data_layout.pointer_align().abi);
(Size::ZERO, tcx.data_layout.pointer_align().abi)
}
// Fake allocation, there's nothing to access here
GlobalAlloc::TypeId { .. } => (Size::ZERO, Align::ONE),
}
}
}
@ -487,6 +505,11 @@ impl<'tcx> TyCtxt<'tcx> {
self.reserve_and_set_dedup(GlobalAlloc::VTable(ty, dyn_ty), salt)
}
/// Generates an [AllocId] for a [core::any::TypeId]. Will get deduplicated.
pub fn reserve_and_set_type_id_alloc(self, ty: Ty<'tcx>) -> AllocId {
self.reserve_and_set_dedup(GlobalAlloc::TypeId { ty }, 0)
}
/// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical
/// `Allocation` with a different `AllocId`.
/// Statics with identical content will still point to the same `Allocation`, i.e.,

View file

@ -1621,6 +1621,7 @@ pub fn write_allocations<'tcx>(
Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
write!(w, " (vtable: impl {dyn_ty} for {ty})")?
}
Some(GlobalAlloc::TypeId { ty }) => write!(w, " (typeid for {ty})")?,
Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => {
write!(w, " (static: {}", tcx.def_path_str(did))?;
if body.phase <= MirPhase::Runtime(RuntimePhase::PostCleanup)

View file

@ -1773,6 +1773,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
}
Some(GlobalAlloc::Function { .. }) => p!("<function>"),
Some(GlobalAlloc::VTable(..)) => p!("<vtable>"),
Some(GlobalAlloc::TypeId { .. }) => p!("<typeid>"),
None => p!("<dangling pointer>"),
}
return Ok(());

View file

@ -927,6 +927,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
scrut_span: rustc_span::Span::default(),
refutable: true,
known_valid_scrutinee: true,
internal_state: Default::default(),
};
let valtree = match self.eval_unevaluated_mir_constant_to_valtree(constant) {

View file

@ -406,6 +406,7 @@ impl<'p, 'tcx> MatchVisitor<'p, 'tcx> {
scrut_span,
refutable,
known_valid_scrutinee,
internal_state: Default::default(),
}
}

View file

@ -1,5 +1,6 @@
use rustc_abi::VariantIdx;
use rustc_middle::mir::{self, Body, Location, Terminator, TerminatorKind};
use smallvec::SmallVec;
use tracing::debug;
use super::move_paths::{InitKind, LookupResult, MoveData, MovePathIndex};
@ -155,15 +156,28 @@ where
}
}
/// Calls `handle_inactive_variant` for each descendant move path of `enum_place` that contains a
/// `Downcast` to a variant besides the `active_variant`.
///
/// NOTE: If there are no move paths corresponding to an inactive variant,
/// `handle_inactive_variant` will not be called for that variant.
/// Indicates which variants are inactive at a `SwitchInt` edge by listing their `VariantIdx`s or
/// specifying the single active variant's `VariantIdx`.
pub(crate) enum InactiveVariants {
Inactives(SmallVec<[VariantIdx; 4]>),
Active(VariantIdx),
}
impl InactiveVariants {
fn contains(&self, variant_idx: VariantIdx) -> bool {
match self {
InactiveVariants::Inactives(inactives) => inactives.contains(&variant_idx),
InactiveVariants::Active(active) => variant_idx != *active,
}
}
}
/// Calls `handle_inactive_variant` for each child move path of `enum_place` corresponding to an
/// inactive variant at a particular `SwitchInt` edge.
pub(crate) fn on_all_inactive_variants<'tcx>(
move_data: &MoveData<'tcx>,
enum_place: mir::Place<'tcx>,
active_variant: VariantIdx,
inactive_variants: &InactiveVariants,
mut handle_inactive_variant: impl FnMut(MovePathIndex),
) {
let LookupResult::Exact(enum_mpi) = move_data.rev_lookup.find(enum_place.as_ref()) else {
@ -182,7 +196,7 @@ pub(crate) fn on_all_inactive_variants<'tcx>(
unreachable!();
};
if variant_idx != active_variant {
if inactive_variants.contains(variant_idx) {
on_all_children_bits(move_data, variant_mpi, |mpi| handle_inactive_variant(mpi));
}
}

View file

@ -112,12 +112,13 @@ impl Direction for Backward {
propagate(pred, &tmp);
}
mir::TerminatorKind::SwitchInt { targets: _, ref discr } => {
mir::TerminatorKind::SwitchInt { ref targets, ref discr } => {
if let Some(mut data) = analysis.get_switch_int_data(block, discr) {
let mut tmp = analysis.bottom_value(body);
for &value in &body.basic_blocks.switch_sources()[&(block, pred)] {
tmp.clone_from(exit_state);
analysis.apply_switch_int_edge_effect(&mut data, &mut tmp, value);
analysis
.apply_switch_int_edge_effect(&mut data, &mut tmp, value, targets);
propagate(pred, &tmp);
}
} else {
@ -290,20 +291,20 @@ impl Direction for Forward {
for (value, target) in targets.iter() {
tmp.clone_from(exit_state);
let value = SwitchTargetValue::Normal(value);
analysis.apply_switch_int_edge_effect(&mut data, &mut tmp, value);
analysis.apply_switch_int_edge_effect(&mut data, &mut tmp, value, targets);
propagate(target, &tmp);
}
// Once we get to the final, "otherwise" branch, there is no need to preserve
// `exit_state`, so pass it directly to `apply_switch_int_edge_effect` to save
// a clone of the dataflow state.
let otherwise = targets.otherwise();
analysis.apply_switch_int_edge_effect(
&mut data,
exit_state,
SwitchTargetValue::Otherwise,
targets,
);
propagate(otherwise, exit_state);
propagate(targets.otherwise(), exit_state);
} else {
for target in targets.all_targets() {
propagate(*target, exit_state);

View file

@ -224,6 +224,7 @@ pub trait Analysis<'tcx> {
_data: &mut Self::SwitchIntData,
_state: &mut Self::Domain,
_value: SwitchTargetValue,
_targets: &mir::SwitchTargets,
) {
unreachable!();
}

View file

@ -9,9 +9,10 @@ use rustc_middle::mir::{
};
use rustc_middle::ty::util::Discr;
use rustc_middle::ty::{self, TyCtxt};
use smallvec::SmallVec;
use tracing::{debug, instrument};
use crate::drop_flag_effects::DropFlagState;
use crate::drop_flag_effects::{DropFlagState, InactiveVariants};
use crate::move_paths::{HasMoveData, InitIndex, InitKind, LookupResult, MoveData, MovePathIndex};
use crate::{
Analysis, GenKill, MaybeReachable, drop_flag_effects, drop_flag_effects_for_function_entry,
@ -26,6 +27,12 @@ pub struct MaybePlacesSwitchIntData<'tcx> {
}
impl<'tcx> MaybePlacesSwitchIntData<'tcx> {
/// Creates a `SmallVec` mapping each target in `targets` to its `VariantIdx`.
fn variants(&mut self, targets: &mir::SwitchTargets) -> SmallVec<[VariantIdx; 4]> {
self.index = 0;
targets.all_values().iter().map(|value| self.next_discr(value.get())).collect()
}
// The discriminant order in the `SwitchInt` targets should match the order yielded by
// `AdtDef::discriminants`. We rely on this to match each discriminant in the targets to its
// corresponding variant in linear time.
@ -131,12 +138,26 @@ pub struct MaybeInitializedPlaces<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
move_data: &'a MoveData<'tcx>,
exclude_inactive_in_otherwise: bool,
skip_unreachable_unwind: bool,
}
impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, move_data: &'a MoveData<'tcx>) -> Self {
MaybeInitializedPlaces { tcx, body, move_data, skip_unreachable_unwind: false }
MaybeInitializedPlaces {
tcx,
body,
move_data,
exclude_inactive_in_otherwise: false,
skip_unreachable_unwind: false,
}
}
/// Ensures definitely inactive variants are excluded from the set of initialized places for
/// blocks reached through an `otherwise` edge.
pub fn exclude_inactive_in_otherwise(mut self) -> Self {
self.exclude_inactive_in_otherwise = true;
self
}
pub fn skipping_unreachable_unwind(mut self) -> Self {
@ -208,6 +229,7 @@ pub struct MaybeUninitializedPlaces<'a, 'tcx> {
move_data: &'a MoveData<'tcx>,
mark_inactive_variants_as_uninit: bool,
include_inactive_in_otherwise: bool,
skip_unreachable_unwind: DenseBitSet<mir::BasicBlock>,
}
@ -218,6 +240,7 @@ impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
body,
move_data,
mark_inactive_variants_as_uninit: false,
include_inactive_in_otherwise: false,
skip_unreachable_unwind: DenseBitSet::new_empty(body.basic_blocks.len()),
}
}
@ -232,6 +255,13 @@ impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
self
}
/// Ensures definitely inactive variants are included in the set of uninitialized places for
/// blocks reached through an `otherwise` edge.
pub fn include_inactive_in_otherwise(mut self) -> Self {
self.include_inactive_in_otherwise = true;
self
}
pub fn skipping_unreachable_unwind(
mut self,
unreachable_unwind: DenseBitSet<mir::BasicBlock>,
@ -431,17 +461,24 @@ impl<'tcx> Analysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
data: &mut Self::SwitchIntData,
state: &mut Self::Domain,
value: SwitchTargetValue,
targets: &mir::SwitchTargets,
) {
if let SwitchTargetValue::Normal(value) = value {
// Kill all move paths that correspond to variants we know to be inactive along this
// particular outgoing edge of a `SwitchInt`.
drop_flag_effects::on_all_inactive_variants(
self.move_data,
data.enum_place,
data.next_discr(value),
|mpi| state.kill(mpi),
);
}
let inactive_variants = match value {
SwitchTargetValue::Normal(value) => InactiveVariants::Active(data.next_discr(value)),
SwitchTargetValue::Otherwise if self.exclude_inactive_in_otherwise => {
InactiveVariants::Inactives(data.variants(targets))
}
_ => return,
};
// Kill all move paths that correspond to variants we know to be inactive along this
// particular outgoing edge of a `SwitchInt`.
drop_flag_effects::on_all_inactive_variants(
self.move_data,
data.enum_place,
&inactive_variants,
|mpi| state.kill(mpi),
);
}
}
@ -544,17 +581,24 @@ impl<'tcx> Analysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
data: &mut Self::SwitchIntData,
state: &mut Self::Domain,
value: SwitchTargetValue,
targets: &mir::SwitchTargets,
) {
if let SwitchTargetValue::Normal(value) = value {
// Mark all move paths that correspond to variants other than this one as maybe
// uninitialized (in reality, they are *definitely* uninitialized).
drop_flag_effects::on_all_inactive_variants(
self.move_data,
data.enum_place,
data.next_discr(value),
|mpi| state.gen_(mpi),
);
}
let inactive_variants = match value {
SwitchTargetValue::Normal(value) => InactiveVariants::Active(data.next_discr(value)),
SwitchTargetValue::Otherwise if self.include_inactive_in_otherwise => {
InactiveVariants::Inactives(data.variants(targets))
}
_ => return,
};
// Mark all move paths that correspond to variants other than this one as maybe
// uninitialized (in reality, they are *definitely* uninitialized).
drop_flag_effects::on_all_inactive_variants(
self.move_data,
data.enum_place,
&inactive_variants,
|mpi| state.gen_(mpi),
);
}
}

View file

@ -62,12 +62,14 @@ impl<'tcx> crate::MirPass<'tcx> for ElaborateDrops {
let env = MoveDataTypingEnv { move_data, typing_env };
let mut inits = MaybeInitializedPlaces::new(tcx, body, &env.move_data)
.exclude_inactive_in_otherwise()
.skipping_unreachable_unwind()
.iterate_to_fixpoint(tcx, body, Some("elaborate_drops"))
.into_results_cursor(body);
let dead_unwinds = compute_dead_unwinds(body, &mut inits);
let uninits = MaybeUninitializedPlaces::new(tcx, body, &env.move_data)
.include_inactive_in_otherwise()
.mark_inactive_variants_as_uninit()
.skipping_unreachable_unwind(dead_unwinds)
.iterate_to_fixpoint(tcx, body, Some("elaborate_drops"))

View file

@ -22,6 +22,7 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveUninitDrops {
let move_data = MoveData::gather_moves(body, tcx, |ty| ty.needs_drop(tcx, typing_env));
let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &move_data)
.exclude_inactive_in_otherwise()
.iterate_to_fixpoint(tcx, body, Some("remove_uninit_drops"))
.into_results_cursor(body);

View file

@ -1219,6 +1219,7 @@ fn collect_alloc<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIt
));
collect_alloc(tcx, alloc_id, output)
}
GlobalAlloc::TypeId { .. } => {}
}
}

View file

@ -2206,7 +2206,7 @@ impl<'a> Parser<'a> {
if self.look_ahead(1, |t| *t == token::Bang) && self.look_ahead(2, |t| t.is_ident()) {
return IsMacroRulesItem::Yes { has_bang: true };
} else if self.look_ahead(1, |t| (t.is_ident())) {
} else if self.look_ahead(1, |t| t.is_ident()) {
// macro_rules foo
self.dcx().emit_err(errors::MacroRulesMissingBang {
span: macro_rules_span,

View file

@ -271,6 +271,10 @@ pub fn check_builtin_meta_item(
if matches!(
name,
sym::inline
| sym::export_stable
| sym::ffi_const
| sym::ffi_pure
| sym::rustc_std_internal_symbol
| sym::may_dangle
| sym::rustc_as_ptr
| sym::rustc_pub_transparent

View file

@ -204,10 +204,20 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
AttributeKind::RustcLayoutScalarValidRangeStart(_num, attr_span)
| AttributeKind::RustcLayoutScalarValidRangeEnd(_num, attr_span),
) => self.check_rustc_layout_scalar_valid_range(*attr_span, span, target),
Attribute::Parsed(AttributeKind::ExportStable) => {
// handled in `check_export`
}
&Attribute::Parsed(AttributeKind::FfiConst(attr_span)) => {
self.check_ffi_const(attr_span, target)
}
&Attribute::Parsed(AttributeKind::FfiPure(attr_span)) => {
self.check_ffi_pure(attr_span, attrs, target)
}
Attribute::Parsed(
AttributeKind::BodyStability { .. }
| AttributeKind::ConstStabilityIndirect
| AttributeKind::MacroTransparency(_),
| AttributeKind::MacroTransparency(_)
| AttributeKind::Dummy,
) => { /* do nothing */ }
Attribute::Parsed(AttributeKind::AsPtr(attr_span)) => {
self.check_applied_to_fn_or_method(hir_id, *attr_span, span, target)
@ -233,6 +243,9 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
&Attribute::Parsed(AttributeKind::PassByValue(attr_span)) => {
self.check_pass_by_value(attr_span, span, target)
}
&Attribute::Parsed(AttributeKind::StdInternalSymbol(attr_span)) => {
self.check_rustc_std_internal_symbol(attr_span, span, target)
}
Attribute::Unparsed(attr_item) => {
style = Some(attr_item.style);
match attr.path().as_slice() {
@ -258,9 +271,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
),
[sym::no_link, ..] => self.check_no_link(hir_id, attr, span, target),
[sym::debugger_visualizer, ..] => self.check_debugger_visualizer(attr, target),
[sym::rustc_std_internal_symbol, ..] => {
self.check_rustc_std_internal_symbol(attr, span, target)
}
[sym::rustc_no_implicit_autorefs, ..] => {
self.check_applied_to_fn_or_method(hir_id, attr.span(), span, target)
}
@ -300,8 +310,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
[sym::rustc_has_incoherent_inherent_impls, ..] => {
self.check_has_incoherent_inherent_impls(attr, span, target)
}
[sym::ffi_pure, ..] => self.check_ffi_pure(attr.span(), attrs, target),
[sym::ffi_const, ..] => self.check_ffi_const(attr.span(), target),
[sym::link_ordinal, ..] => self.check_link_ordinal(attr, span, target),
[sym::link, ..] => self.check_link(hir_id, attr, span, target),
[sym::macro_use, ..] | [sym::macro_escape, ..] => {
@ -346,7 +354,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| sym::cfg_attr
| sym::cfg_trace
| sym::cfg_attr_trace
| sym::export_stable // handled in `check_export`
// need to be fixed
| sym::cfi_encoding // FIXME(cfi_encoding)
| sym::pointee // FIXME(derive_coerce_pointee)
@ -1507,7 +1514,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
self.dcx().emit_err(errors::FfiPureInvalidTarget { attr_span });
return;
}
if attrs.iter().any(|a| a.has_name(sym::ffi_const)) {
if find_attr!(attrs, AttributeKind::FfiConst(_)) {
// `#[ffi_const]` functions cannot be `#[ffi_pure]`
self.dcx().emit_err(errors::BothFfiConstAndPure { attr_span });
}
@ -2214,13 +2221,11 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
}
fn check_rustc_std_internal_symbol(&self, attr: &Attribute, span: Span, target: Target) {
fn check_rustc_std_internal_symbol(&self, attr_span: Span, span: Span, target: Target) {
match target {
Target::Fn | Target::Static | Target::ForeignFn | Target::ForeignStatic => {}
_ => {
self.tcx
.dcx()
.emit_err(errors::RustcStdInternalSymbol { attr_span: attr.span(), span });
self.tcx.dcx().emit_err(errors::RustcStdInternalSymbol { attr_span, span });
}
}
}

View file

@ -2,6 +2,7 @@ use std::iter;
use std::ops::ControlFlow;
use rustc_abi::ExternAbi;
use rustc_attr_data_structures::{AttributeKind, find_attr};
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_hir as hir;
use rustc_hir::def::DefKind;
@ -14,7 +15,7 @@ use rustc_middle::ty::{
self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, Visibility,
};
use rustc_session::config::CrateType;
use rustc_span::{Span, sym};
use rustc_span::Span;
use crate::errors::UnexportableItem;
@ -44,7 +45,7 @@ impl<'tcx> ExportableItemCollector<'tcx> {
}
fn item_is_exportable(&self, def_id: LocalDefId) -> bool {
let has_attr = self.tcx.has_attr(def_id, sym::export_stable);
let has_attr = find_attr!(self.tcx.get_all_attrs(def_id), AttributeKind::ExportStable);
if !self.in_exportable_mod && !has_attr {
return false;
}
@ -80,7 +81,7 @@ impl<'tcx> ExportableItemCollector<'tcx> {
fn walk_item_with_mod(&mut self, item: &'tcx hir::Item<'tcx>) {
let def_id = item.hir_id().owner.def_id;
let old_exportable_mod = self.in_exportable_mod;
if self.tcx.get_attr(def_id, sym::export_stable).is_some() {
if find_attr!(self.tcx.get_all_attrs(def_id), AttributeKind::ExportStable) {
self.in_exportable_mod = true;
}
let old_seen_exportable_in_mod = std::mem::replace(&mut self.seen_exportable_in_mod, false);

View file

@ -325,6 +325,7 @@ impl<'tcx> ReachableContext<'tcx> {
self.visit(args);
}
}
GlobalAlloc::TypeId { ty, .. } => self.visit(ty),
GlobalAlloc::Memory(alloc) => self.propagate_from_alloc(alloc),
}
}

View file

@ -0,0 +1,50 @@
//! Contains checks that must be run to validate matches before performing usefulness analysis.
use crate::constructor::Constructor::*;
use crate::pat_column::PatternColumn;
use crate::{MatchArm, PatCx};
/// Validate that deref patterns and normal constructors aren't used to match on the same place.
pub(crate) fn detect_mixed_deref_pat_ctors<'p, Cx: PatCx>(
cx: &Cx,
arms: &[MatchArm<'p, Cx>],
) -> Result<(), Cx::Error> {
let pat_column = PatternColumn::new(arms);
detect_mixed_deref_pat_ctors_inner(cx, &pat_column)
}
fn detect_mixed_deref_pat_ctors_inner<'p, Cx: PatCx>(
cx: &Cx,
column: &PatternColumn<'p, Cx>,
) -> Result<(), Cx::Error> {
let Some(ty) = column.head_ty() else {
return Ok(());
};
// Check for a mix of deref patterns and normal constructors.
let mut deref_pat = None;
let mut normal_pat = None;
for pat in column.iter() {
match pat.ctor() {
// The analysis can handle mixing deref patterns with wildcards and opaque patterns.
Wildcard | Opaque(_) => {}
DerefPattern(_) => deref_pat = Some(pat),
// Nothing else can be compared to deref patterns in `Constructor::is_covered_by`.
_ => normal_pat = Some(pat),
}
}
if let Some(deref_pat) = deref_pat
&& let Some(normal_pat) = normal_pat
{
return Err(cx.report_mixed_deref_pat_ctors(deref_pat, normal_pat));
}
// Specialize and recurse into the patterns' fields.
let set = column.analyze_ctors(cx, &ty)?;
for ctor in set.present {
for specialized_column in column.specialize(cx, &ty, &ctor).iter() {
detect_mixed_deref_pat_ctors_inner(cx, specialized_column)?;
}
}
Ok(())
}

View file

@ -8,6 +8,7 @@
#![allow(unused_crate_dependencies)]
// tidy-alphabetical-end
pub(crate) mod checks;
pub mod constructor;
#[cfg(feature = "rustc")]
pub mod errors;
@ -107,6 +108,20 @@ pub trait PatCx: Sized + fmt::Debug {
_gapped_with: &[&DeconstructedPat<Self>],
) {
}
/// Check if we may need to perform additional deref-pattern-specific validation.
fn match_may_contain_deref_pats(&self) -> bool {
true
}
/// The current implementation of deref patterns requires that they can't match on the same
/// place as a normal constructor. Since this isn't caught by type-checking, we check it in the
/// `PatCx` before running the analysis. This reports an error if the check fails.
fn report_mixed_deref_pat_ctors(
&self,
deref_pat: &DeconstructedPat<Self>,
normal_pat: &DeconstructedPat<Self>,
) -> Self::Error;
}
/// The arm of a match expression.

View file

@ -1,3 +1,4 @@
use std::cell::Cell;
use std::fmt;
use std::iter::once;
@ -99,6 +100,16 @@ pub struct RustcPatCtxt<'p, 'tcx: 'p> {
/// Whether the data at the scrutinee is known to be valid. This is false if the scrutinee comes
/// from a union field, a pointer deref, or a reference deref (pending opsem decisions).
pub known_valid_scrutinee: bool,
pub internal_state: RustcPatCtxtState,
}
/// Private fields of [`RustcPatCtxt`], separated out to permit record initialization syntax.
#[derive(Clone, Default)]
pub struct RustcPatCtxtState {
/// Has a deref pattern been lowered? This is initialized to `false` and is updated by
/// [`RustcPatCtxt::lower_pat`] in order to avoid performing deref-pattern-specific validation
/// for everything containing patterns.
has_lowered_deref_pat: Cell<bool>,
}
impl<'p, 'tcx: 'p> fmt::Debug for RustcPatCtxt<'p, 'tcx> {
@ -474,6 +485,7 @@ impl<'p, 'tcx: 'p> RustcPatCtxt<'p, 'tcx> {
fields = vec![self.lower_pat(subpattern).at_index(0)];
arity = 1;
ctor = DerefPattern(cx.reveal_opaque_ty(subpattern.ty));
self.internal_state.has_lowered_deref_pat.set(true);
}
PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => {
match ty.kind() {
@ -1027,6 +1039,25 @@ impl<'p, 'tcx: 'p> PatCx for RustcPatCtxt<'p, 'tcx> {
);
}
}
fn match_may_contain_deref_pats(&self) -> bool {
self.internal_state.has_lowered_deref_pat.get()
}
fn report_mixed_deref_pat_ctors(
&self,
deref_pat: &crate::pat::DeconstructedPat<Self>,
normal_pat: &crate::pat::DeconstructedPat<Self>,
) -> Self::Error {
let deref_pattern_label = deref_pat.data().span;
let normal_constructor_label = normal_pat.data().span;
self.tcx.dcx().emit_err(errors::MixedDerefPatternConstructors {
spans: vec![deref_pattern_label, normal_constructor_label],
smart_pointer_ty: deref_pat.ty().inner(),
deref_pattern_label,
normal_constructor_label,
})
}
}
/// Recursively expand this pattern into its subpatterns. Only useful for or-patterns.
@ -1055,13 +1086,6 @@ pub fn analyze_match<'p, 'tcx>(
) -> Result<UsefulnessReport<'p, 'tcx>, ErrorGuaranteed> {
let scrut_ty = tycx.reveal_opaque_ty(scrut_ty);
// The analysis doesn't support deref patterns mixed with normal constructors; error if present.
// FIXME(deref_patterns): This only needs to run when a deref pattern was found during lowering.
if tycx.tcx.features().deref_patterns() {
let pat_column = PatternColumn::new(arms);
detect_mixed_deref_pat_ctors(tycx, &pat_column)?;
}
let scrut_validity = PlaceValidity::from_bool(tycx.known_valid_scrutinee);
let report = compute_match_usefulness(
tycx,
@ -1080,48 +1104,3 @@ pub fn analyze_match<'p, 'tcx>(
Ok(report)
}
// FIXME(deref_patterns): Currently it's the responsibility of the frontend (rustc or rust-analyzer)
// to ensure that deref patterns don't appear in the same column as normal constructors. Deref
// patterns aren't currently implemented in rust-analyzer, but should they be, the columnwise check
// here could be made generic and shared between frontends.
fn detect_mixed_deref_pat_ctors<'p, 'tcx>(
cx: &RustcPatCtxt<'p, 'tcx>,
column: &PatternColumn<'p, RustcPatCtxt<'p, 'tcx>>,
) -> Result<(), ErrorGuaranteed> {
let Some(&ty) = column.head_ty() else {
return Ok(());
};
// Check for a mix of deref patterns and normal constructors.
let mut normal_ctor_span = None;
let mut deref_pat_span = None;
for pat in column.iter() {
match pat.ctor() {
// The analysis can handle mixing deref patterns with wildcards and opaque patterns.
Wildcard | Opaque(_) => {}
DerefPattern(_) => deref_pat_span = Some(pat.data().span),
// Nothing else can be compared to deref patterns in `Constructor::is_covered_by`.
_ => normal_ctor_span = Some(pat.data().span),
}
}
if let Some(normal_constructor_label) = normal_ctor_span
&& let Some(deref_pattern_label) = deref_pat_span
{
return Err(cx.tcx.dcx().emit_err(errors::MixedDerefPatternConstructors {
spans: vec![deref_pattern_label, normal_constructor_label],
smart_pointer_ty: ty.inner(),
deref_pattern_label,
normal_constructor_label,
}));
}
// Specialize and recurse into the patterns' fields.
let set = column.analyze_ctors(cx, &ty)?;
for ctor in set.present {
for specialized_column in column.specialize(cx, &ty, &ctor).iter() {
detect_mixed_deref_pat_ctors(cx, specialized_column)?;
}
}
Ok(())
}

View file

@ -720,7 +720,7 @@ use tracing::{debug, instrument};
use self::PlaceValidity::*;
use crate::constructor::{Constructor, ConstructorSet, IntRange};
use crate::pat::{DeconstructedPat, PatId, PatOrWild, WitnessPat};
use crate::{MatchArm, PatCx, PrivateUninhabitedField};
use crate::{MatchArm, PatCx, PrivateUninhabitedField, checks};
#[cfg(not(feature = "rustc"))]
pub fn ensure_sufficient_stack<R>(f: impl FnOnce() -> R) -> R {
f()
@ -1836,6 +1836,11 @@ pub fn compute_match_usefulness<'p, Cx: PatCx>(
scrut_validity: PlaceValidity,
complexity_limit: usize,
) -> Result<UsefulnessReport<'p, Cx>, Cx::Error> {
// The analysis doesn't support deref patterns mixed with normal constructors; error if present.
if tycx.match_may_contain_deref_pats() {
checks::detect_mixed_deref_pat_ctors(tycx, arms)?;
}
let mut cx = UsefulnessCtxt {
tycx,
branch_usefulness: FxHashMap::default(),

View file

@ -1,6 +1,7 @@
use rustc_pattern_analysis::constructor::{
Constructor, ConstructorSet, IntRange, MaybeInfiniteInt, RangeEnd, VariantVisibility,
};
use rustc_pattern_analysis::pat::DeconstructedPat;
use rustc_pattern_analysis::usefulness::{PlaceValidity, UsefulnessReport};
use rustc_pattern_analysis::{MatchArm, PatCx, PrivateUninhabitedField};
@ -184,6 +185,14 @@ impl PatCx for Cx {
fn complexity_exceeded(&self) -> Result<(), Self::Error> {
Err(())
}
fn report_mixed_deref_pat_ctors(
&self,
_deref_pat: &DeconstructedPat<Self>,
_normal_pat: &DeconstructedPat<Self>,
) -> Self::Error {
panic!("`rustc_pattern_analysis::tests` currently doesn't test deref pattern errors")
}
}
/// Construct a single pattern; see `pats!()`.

View file

@ -162,28 +162,30 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
pub(crate) fn get_macro(&mut self, res: Res) -> Option<&MacroData> {
pub(crate) fn get_macro(&self, res: Res) -> Option<&'ra MacroData> {
match res {
Res::Def(DefKind::Macro(..), def_id) => Some(self.get_macro_by_def_id(def_id)),
Res::NonMacroAttr(_) => Some(&self.non_macro_attr),
Res::NonMacroAttr(_) => Some(self.non_macro_attr),
_ => None,
}
}
pub(crate) fn get_macro_by_def_id(&mut self, def_id: DefId) -> &MacroData {
if self.macro_map.contains_key(&def_id) {
return &self.macro_map[&def_id];
pub(crate) fn get_macro_by_def_id(&self, def_id: DefId) -> &'ra MacroData {
// Local macros are always compiled.
match def_id.as_local() {
Some(local_def_id) => self.local_macro_map[&local_def_id],
None => *self.extern_macro_map.borrow_mut().entry(def_id).or_insert_with(|| {
let loaded_macro = self.cstore().load_macro_untracked(def_id, self.tcx);
let macro_data = match loaded_macro {
LoadedMacro::MacroDef { def, ident, attrs, span, edition } => {
self.compile_macro(&def, ident, &attrs, span, ast::DUMMY_NODE_ID, edition)
}
LoadedMacro::ProcMacro(ext) => MacroData::new(Arc::new(ext)),
};
self.arenas.alloc_macro(macro_data)
}),
}
let loaded_macro = self.cstore().load_macro_untracked(def_id, self.tcx);
let macro_data = match loaded_macro {
LoadedMacro::MacroDef { def, ident, attrs, span, edition } => {
self.compile_macro(&def, ident, &attrs, span, ast::DUMMY_NODE_ID, edition)
}
LoadedMacro::ProcMacro(ext) => MacroData::new(Arc::new(ext)),
};
self.macro_map.entry(def_id).or_insert(macro_data)
}
pub(crate) fn build_reduced_graph(
@ -1203,7 +1205,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> {
fn insert_unused_macro(&mut self, ident: Ident, def_id: LocalDefId, node_id: NodeId) {
if !ident.as_str().starts_with('_') {
self.r.unused_macros.insert(def_id, (node_id, ident));
let nrules = self.r.macro_map[&def_id.to_def_id()].nrules;
let nrules = self.r.local_macro_map[&def_id].nrules;
self.r.unused_macro_rules.insert(node_id, DenseBitSet::new_filled(nrules));
}
}
@ -1222,7 +1224,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> {
Some((macro_kind, ident, span)) => {
let res = Res::Def(DefKind::Macro(macro_kind), def_id.to_def_id());
let macro_data = MacroData::new(self.r.dummy_ext(macro_kind));
self.r.macro_map.insert(def_id.to_def_id(), macro_data);
self.r.new_local_macro(def_id, macro_data);
self.r.proc_macro_stubs.insert(def_id);
(res, ident, span, false)
}

View file

@ -165,7 +165,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> {
self.create_def(i.id, i.kind.ident().map(|ident| ident.name), def_kind, i.span);
if let Some(macro_data) = opt_macro_data {
self.resolver.macro_map.insert(def_id.to_def_id(), macro_data);
self.resolver.new_local_macro(def_id, macro_data);
}
self.with_parent(def_id, |this| {

View file

@ -1669,9 +1669,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let mut all_attrs: UnordMap<Symbol, Vec<_>> = UnordMap::default();
// We're collecting these in a hashmap, and handle ordering the output further down.
#[allow(rustc::potential_query_instability)]
for (def_id, data) in &self.macro_map {
for (def_id, data) in self
.local_macro_map
.iter()
.map(|(local_id, data)| (local_id.to_def_id(), data))
.chain(self.extern_macro_map.borrow().iter().map(|(id, d)| (*id, d)))
{
for helper_attr in &data.ext.helper_attrs {
let item_name = self.tcx.item_name(*def_id);
let item_name = self.tcx.item_name(def_id);
all_attrs.entry(*helper_attr).or_default().push(item_name);
if helper_attr == &ident.name {
derives.push(item_name);

View file

@ -328,8 +328,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
let module_did = mod_prefix.as_ref().and_then(Res::mod_def_id);
let mod_prefix =
mod_prefix.map_or_else(String::new, |res| (format!("{} ", res.descr())));
mod_prefix.map_or_else(String::new, |res| format!("{} ", res.descr()));
(mod_prefix, format!("`{}`", Segment::names_to_string(mod_path)), module_did, None)
};
@ -1183,15 +1182,23 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
_ => "`self` value is a keyword only available in methods with a `self` parameter",
},
);
// using `let self` is wrong even if we're not in an associated method or if we're in a macro expansion.
// So, we should return early if we're in a pattern, see issue #143134.
if matches!(source, PathSource::Pat) {
return true;
}
let is_assoc_fn = self.self_type_is_available();
let self_from_macro = "a `self` parameter, but a macro invocation can only \
access identifiers it receives from parameters";
if let Some((fn_kind, span)) = &self.diag_metadata.current_function {
if let Some((fn_kind, fn_span)) = &self.diag_metadata.current_function {
// The current function has a `self` parameter, but we were unable to resolve
// a reference to `self`. This can only happen if the `self` identifier we
// are resolving came from a different hygiene context.
// are resolving came from a different hygiene context or a variable binding.
// But variable binding error is returned early above.
if fn_kind.decl().inputs.get(0).is_some_and(|p| p.is_self()) {
err.span_label(*span, format!("this function has {self_from_macro}"));
err.span_label(*fn_span, format!("this function has {self_from_macro}"));
} else {
let doesnt = if is_assoc_fn {
let (span, sugg) = fn_kind
@ -1204,7 +1211,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
// This avoids placing the suggestion into the visibility specifier.
let span = fn_kind
.ident()
.map_or(*span, |ident| span.with_lo(ident.span.hi()));
.map_or(*fn_span, |ident| fn_span.with_lo(ident.span.hi()));
(
self.r
.tcx

View file

@ -1128,10 +1128,12 @@ pub struct Resolver<'ra, 'tcx> {
builtin_macros: FxHashMap<Symbol, SyntaxExtensionKind>,
registered_tools: &'tcx RegisteredTools,
macro_use_prelude: FxIndexMap<Symbol, NameBinding<'ra>>,
macro_map: FxHashMap<DefId, MacroData>,
local_macro_map: FxHashMap<LocalDefId, &'ra MacroData>,
/// Lazily populated cache of macros loaded from external crates.
extern_macro_map: RefCell<FxHashMap<DefId, &'ra MacroData>>,
dummy_ext_bang: Arc<SyntaxExtension>,
dummy_ext_derive: Arc<SyntaxExtension>,
non_macro_attr: MacroData,
non_macro_attr: &'ra MacroData,
local_macro_def_scopes: FxHashMap<LocalDefId, Module<'ra>>,
ast_transform_scopes: FxHashMap<LocalExpnId, Module<'ra>>,
unused_macros: FxIndexMap<LocalDefId, (NodeId, Ident)>,
@ -1241,6 +1243,7 @@ pub struct ResolverArenas<'ra> {
imports: TypedArena<ImportData<'ra>>,
name_resolutions: TypedArena<RefCell<NameResolution<'ra>>>,
ast_paths: TypedArena<ast::Path>,
macros: TypedArena<MacroData>,
dropless: DroplessArena,
}
@ -1287,7 +1290,7 @@ impl<'ra> ResolverArenas<'ra> {
self.name_resolutions.alloc(Default::default())
}
fn alloc_macro_rules_scope(&'ra self, scope: MacroRulesScope<'ra>) -> MacroRulesScopeRef<'ra> {
Interned::new_unchecked(self.dropless.alloc(Cell::new(scope)))
self.dropless.alloc(Cell::new(scope))
}
fn alloc_macro_rules_binding(
&'ra self,
@ -1298,6 +1301,9 @@ impl<'ra> ResolverArenas<'ra> {
fn alloc_ast_paths(&'ra self, paths: &[ast::Path]) -> &'ra [ast::Path] {
self.ast_paths.alloc_from_iter(paths.iter().cloned())
}
fn alloc_macro(&'ra self, macro_data: MacroData) -> &'ra MacroData {
self.macros.alloc(macro_data)
}
fn alloc_pattern_spans(&'ra self, spans: impl Iterator<Item = Span>) -> &'ra [Span] {
self.dropless.alloc_from_iter(spans)
}
@ -1540,10 +1546,12 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
builtin_macros: Default::default(),
registered_tools,
macro_use_prelude: Default::default(),
macro_map: FxHashMap::default(),
local_macro_map: Default::default(),
extern_macro_map: Default::default(),
dummy_ext_bang: Arc::new(SyntaxExtension::dummy_bang(edition)),
dummy_ext_derive: Arc::new(SyntaxExtension::dummy_derive(edition)),
non_macro_attr: MacroData::new(Arc::new(SyntaxExtension::non_macro_attr(edition))),
non_macro_attr: arenas
.alloc_macro(MacroData::new(Arc::new(SyntaxExtension::non_macro_attr(edition)))),
invocation_parent_scopes: Default::default(),
output_macro_rules_scopes: Default::default(),
macro_rules_scopes: Default::default(),
@ -1616,6 +1624,12 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
)
}
fn new_local_macro(&mut self, def_id: LocalDefId, macro_data: MacroData) -> &'ra MacroData {
let mac = self.arenas.alloc_macro(macro_data);
self.local_macro_map.insert(def_id, mac);
mac
}
fn next_node_id(&mut self) -> NodeId {
let start = self.next_node_id;
let next = start.as_u32().checked_add(1).expect("input too large; ran out of NodeIds");
@ -1734,7 +1748,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
f(self, MacroNS);
}
fn is_builtin_macro(&mut self, res: Res) -> bool {
fn is_builtin_macro(&self, res: Res) -> bool {
self.get_macro(res).is_some_and(|macro_data| macro_data.ext.builtin_name.is_some())
}

View file

@ -9,7 +9,6 @@ use rustc_ast::expand::StrippedCfgItem;
use rustc_ast::{self as ast, Crate, NodeId, attr};
use rustc_ast_pretty::pprust;
use rustc_attr_data_structures::StabilityLevel;
use rustc_data_structures::intern::Interned;
use rustc_errors::{Applicability, DiagCtxtHandle, StashKey};
use rustc_expand::base::{
Annotatable, DeriveResolution, Indeterminate, ResolverExpand, SyntaxExtension,
@ -80,7 +79,7 @@ pub(crate) enum MacroRulesScope<'ra> {
/// This helps to avoid uncontrollable growth of `macro_rules!` scope chains,
/// which usually grow linearly with the number of macro invocations
/// in a module (including derives) and hurt performance.
pub(crate) type MacroRulesScopeRef<'ra> = Interned<'ra, Cell<MacroRulesScope<'ra>>>;
pub(crate) type MacroRulesScopeRef<'ra> = &'ra Cell<MacroRulesScope<'ra>>;
/// Macro namespace is separated into two sub-namespaces, one for bang macros and
/// one for attribute-like macros (attributes, derives).
@ -354,8 +353,8 @@ impl<'ra, 'tcx> ResolverExpand for Resolver<'ra, 'tcx> {
if unused_arms.is_empty() {
continue;
}
let def_id = self.local_def_id(node_id).to_def_id();
let m = &self.macro_map[&def_id];
let def_id = self.local_def_id(node_id);
let m = &self.local_macro_map[&def_id];
let SyntaxExtensionKind::LegacyBang(ref ext) = m.ext.kind else {
continue;
};
@ -1132,7 +1131,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
}
}
pub(crate) fn check_reserved_macro_name(&mut self, ident: Ident, res: Res) {
pub(crate) fn check_reserved_macro_name(&self, ident: Ident, res: Res) {
// Reserve some names that are not quite covered by the general check
// performed on `Resolver::builtin_attrs`.
if ident.name == sym::cfg || ident.name == sym::cfg_attr {
@ -1148,7 +1147,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
///
/// Possibly replace its expander to a pre-defined one for built-in macros.
pub(crate) fn compile_macro(
&mut self,
&self,
macro_def: &ast::MacroDef,
ident: Ident,
attrs: &[rustc_hir::Attribute],

View file

@ -370,12 +370,34 @@ impl LinkSelfContained {
}
/// To help checking CLI usage while some of the values are unstable: returns whether one of the
/// components was set individually. This would also require the `-Zunstable-options` flag, to
/// be allowed.
fn are_unstable_variants_set(&self) -> bool {
let any_component_set =
!self.enabled_components.is_empty() || !self.disabled_components.is_empty();
self.explicitly_set.is_none() && any_component_set
/// unstable components was set individually, for the given `TargetTuple`. This would also
/// require the `-Zunstable-options` flag, to be allowed.
fn check_unstable_variants(&self, target_tuple: &TargetTuple) -> Result<(), String> {
if self.explicitly_set.is_some() {
return Ok(());
}
// `-C link-self-contained=-linker` is only stable on x64 linux.
let has_minus_linker = self.disabled_components.is_linker_enabled();
if has_minus_linker && target_tuple.tuple() != "x86_64-unknown-linux-gnu" {
return Err(format!(
"`-C link-self-contained=-linker` is unstable on the `{target_tuple}` \
target. The `-Z unstable-options` flag must also be passed to use it on this target",
));
}
// Any `+linker` or other component used is unstable, and that's an error.
let unstable_enabled = self.enabled_components;
let unstable_disabled = self.disabled_components - LinkSelfContainedComponents::LINKER;
if !unstable_enabled.union(unstable_disabled).is_empty() {
return Err(String::from(
"only `-C link-self-contained` values `y`/`yes`/`on`/`n`/`no`/`off`/`-linker` \
are stable, the `-Z unstable-options` flag must also be passed to use \
the unstable values",
));
}
Ok(())
}
/// Returns whether the self-contained linker component was enabled on the CLI, using the
@ -402,7 +424,7 @@ impl LinkSelfContained {
}
}
/// The different values that `-Z linker-features` can take on the CLI: a list of individually
/// The different values that `-C linker-features` can take on the CLI: a list of individually
/// enabled or disabled features used during linking.
///
/// There is no need to enable or disable them in bulk. Each feature is fine-grained, and can be
@ -442,6 +464,39 @@ impl LinkerFeaturesCli {
_ => None,
}
}
/// When *not* using `-Z unstable-options` on the CLI, ensure only stable linker features are
/// used, for the given `TargetTuple`. Returns `Ok` if no unstable variants are used.
/// The caller should ensure that e.g. `nightly_options::is_unstable_enabled()`
/// returns false.
pub(crate) fn check_unstable_variants(&self, target_tuple: &TargetTuple) -> Result<(), String> {
// `-C linker-features=-lld` is only stable on x64 linux.
let has_minus_lld = self.disabled.is_lld_enabled();
if has_minus_lld && target_tuple.tuple() != "x86_64-unknown-linux-gnu" {
return Err(format!(
"`-C linker-features=-lld` is unstable on the `{target_tuple}` \
target. The `-Z unstable-options` flag must also be passed to use it on this target",
));
}
// Any `+lld` or non-lld feature used is unstable, and that's an error.
let unstable_enabled = self.enabled;
let unstable_disabled = self.disabled - LinkerFeatures::LLD;
if !unstable_enabled.union(unstable_disabled).is_empty() {
let unstable_features: Vec<_> = unstable_enabled
.iter()
.map(|f| format!("+{}", f.as_str().unwrap()))
.chain(unstable_disabled.iter().map(|f| format!("-{}", f.as_str().unwrap())))
.collect();
return Err(format!(
"`-C linker-features={}` is unstable, and also requires the \
`-Z unstable-options` flag to be used",
unstable_features.join(","),
));
}
Ok(())
}
}
/// Used with `-Z assert-incr-state`.
@ -2638,26 +2693,21 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
}
}
if !nightly_options::is_unstable_enabled(matches)
&& cg.force_frame_pointers == FramePointer::NonLeaf
{
let unstable_options_enabled = nightly_options::is_unstable_enabled(matches);
if !unstable_options_enabled && cg.force_frame_pointers == FramePointer::NonLeaf {
early_dcx.early_fatal(
"`-Cforce-frame-pointers=non-leaf` or `always` also requires `-Zunstable-options` \
and a nightly compiler",
)
}
// For testing purposes, until we have more feedback about these options: ensure `-Z
// unstable-options` is required when using the unstable `-C link-self-contained` and `-C
// linker-flavor` options.
if !nightly_options::is_unstable_enabled(matches) {
let uses_unstable_self_contained_option =
cg.link_self_contained.are_unstable_variants_set();
if uses_unstable_self_contained_option {
early_dcx.early_fatal(
"only `-C link-self-contained` values `y`/`yes`/`on`/`n`/`no`/`off` are stable, \
the `-Z unstable-options` flag must also be passed to use the unstable values",
);
let target_triple = parse_target_triple(early_dcx, matches);
// Ensure `-Z unstable-options` is required when using the unstable `-C link-self-contained` and
// `-C linker-flavor` options.
if !unstable_options_enabled {
if let Err(error) = cg.link_self_contained.check_unstable_variants(&target_triple) {
early_dcx.early_fatal(error);
}
if let Some(flavor) = cg.linker_flavor {
@ -2697,7 +2747,6 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
let cg = cg;
let target_triple = parse_target_triple(early_dcx, matches);
let opt_level = parse_opt_level(early_dcx, matches, &cg);
// The `-g` and `-C debuginfo` flags specify the same setting, so we want to be able
// to use them interchangeably. See the note above (regarding `-O` and `-C opt-level`)
@ -2706,6 +2755,12 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
let debuginfo = select_debuginfo(matches, &cg);
let debuginfo_compression = unstable_opts.debuginfo_compression;
if !unstable_options_enabled {
if let Err(error) = cg.linker_features.check_unstable_variants(&target_triple) {
early_dcx.early_fatal(error);
}
}
let crate_name = matches.opt_str("crate-name");
let unstable_features = UnstableFeatures::from_environment(crate_name.as_deref());
// Parse any `-l` flags, which link to native libraries.

View file

@ -2015,6 +2015,8 @@ options! {
on a C toolchain or linker installed in the system"),
linker: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED],
"system linker to link outputs with"),
linker_features: LinkerFeaturesCli = (LinkerFeaturesCli::default(), parse_linker_features, [UNTRACKED],
"a comma-separated list of linker features to enable (+) or disable (-): `lld`"),
linker_flavor: Option<LinkerFlavorCli> = (None, parse_linker_flavor, [UNTRACKED],
"linker flavor"),
linker_plugin_lto: LinkerPluginLto = (LinkerPluginLto::Disabled,
@ -2307,8 +2309,6 @@ options! {
"link native libraries in the linker invocation (default: yes)"),
link_only: bool = (false, parse_bool, [TRACKED],
"link the `.rlink` file generated by `-Z no-link` (default: no)"),
linker_features: LinkerFeaturesCli = (LinkerFeaturesCli::default(), parse_linker_features, [UNTRACKED],
"a comma-separated list of linker features to enable (+) or disable (-): `lld`"),
lint_llvm_ir: bool = (false, parse_bool, [TRACKED],
"lint LLVM IR (default: no)"),
lint_mir: bool = (false, parse_bool, [UNTRACKED],

View file

@ -2194,6 +2194,7 @@ symbols! {
type_changing_struct_update,
type_const,
type_id,
type_id_eq,
type_ir,
type_ir_infer_ctxt_like,
type_ir_inherent,

View file

@ -725,7 +725,7 @@ impl ToJson for LinkSelfContainedComponents {
}
bitflags::bitflags! {
/// The `-Z linker-features` components that can individually be enabled or disabled.
/// The `-C linker-features` components that can individually be enabled or disabled.
///
/// They are feature flags intended to be a more flexible mechanism than linker flavors, and
/// also to prevent a combinatorial explosion of flavors whenever a new linker feature is
@ -756,7 +756,7 @@ bitflags::bitflags! {
rustc_data_structures::external_bitflags_debug! { LinkerFeatures }
impl LinkerFeatures {
/// Parses a single `-Z linker-features` well-known feature, not a set of flags.
/// Parses a single `-C linker-features` well-known feature, not a set of flags.
pub fn from_str(s: &str) -> Option<LinkerFeatures> {
Some(match s {
"cc" => LinkerFeatures::CC,
@ -765,6 +765,17 @@ impl LinkerFeatures {
})
}
/// Return the linker feature name, as would be passed on the CLI.
///
/// Returns `None` if the bitflags aren't a singular component (but a mix of multiple flags).
pub fn as_str(self) -> Option<&'static str> {
Some(match self {
LinkerFeatures::CC => "cc",
LinkerFeatures::LLD => "lld",
_ => return None,
})
}
/// Returns whether the `lld` linker feature is enabled.
pub fn is_lld_enabled(self) -> bool {
self.contains(LinkerFeatures::LLD)

View file

@ -4,7 +4,7 @@ use rustc_errors::{Applicability, Diag, E0283, E0284, E0790, MultiSpan, struct_s
use rustc_hir as hir;
use rustc_hir::LangItem;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::def_id::{CRATE_DEF_ID, DefId};
use rustc_hir::intravisit::Visitor as _;
use rustc_infer::infer::{BoundRegionConversionTime, InferCtxt};
use rustc_infer::traits::util::elaborate;
@ -128,19 +128,26 @@ pub fn compute_applicable_impls_for_diagnostics<'tcx>(
},
);
let predicates =
tcx.predicates_of(obligation.cause.body_id.to_def_id()).instantiate_identity(tcx);
for (pred, span) in elaborate(tcx, predicates.into_iter()) {
let kind = pred.kind();
if let ty::ClauseKind::Trait(trait_pred) = kind.skip_binder()
&& param_env_candidate_may_apply(kind.rebind(trait_pred))
{
if kind.rebind(trait_pred.trait_ref)
== ty::Binder::dummy(ty::TraitRef::identity(tcx, trait_pred.def_id()))
// If our `body_id` has been set (and isn't just from a dummy obligation cause),
// then try to look for a param-env clause that would apply. The way we compute
// this is somewhat manual, since we need the spans, so we elaborate this directly
// from `predicates_of` rather than actually looking at the param-env which
// otherwise would be more appropriate.
let body_id = obligation.cause.body_id;
if body_id != CRATE_DEF_ID {
let predicates = tcx.predicates_of(body_id.to_def_id()).instantiate_identity(tcx);
for (pred, span) in elaborate(tcx, predicates.into_iter()) {
let kind = pred.kind();
if let ty::ClauseKind::Trait(trait_pred) = kind.skip_binder()
&& param_env_candidate_may_apply(kind.rebind(trait_pred))
{
ambiguities.push(CandidateSource::ParamEnv(tcx.def_span(trait_pred.def_id())))
} else {
ambiguities.push(CandidateSource::ParamEnv(span))
if kind.rebind(trait_pred.trait_ref)
== ty::Binder::dummy(ty::TraitRef::identity(tcx, trait_pred.def_id()))
{
ambiguities.push(CandidateSource::ParamEnv(tcx.def_span(trait_pred.def_id())))
} else {
ambiguities.push(CandidateSource::ParamEnv(span))
}
}
}
}

View file

@ -1933,7 +1933,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
StringPart::highlighted("multiple different versions".to_string()),
StringPart::normal(" of crate `".to_string()),
StringPart::highlighted(format!("{crate_name}")),
StringPart::normal("` in the dependency graph\n".to_string()),
StringPart::normal("` in the dependency graph".to_string()),
],
);
if points_at_type {

View file

@ -1581,12 +1581,15 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
'outer: loop {
while let hir::ExprKind::AddrOf(_, _, borrowed) = expr.kind {
count += 1;
let span = if expr.span.eq_ctxt(borrowed.span) {
expr.span.until(borrowed.span)
} else {
expr.span.with_hi(expr.span.lo() + BytePos(1))
};
let span =
if let Some(borrowed_span) = borrowed.span.find_ancestor_inside(expr.span) {
expr.span.until(borrowed_span)
} else {
break 'outer;
};
// Double check that the span we extracted actually corresponds to a borrow,
// rather than some macro garbage.
match self.tcx.sess.source_map().span_to_snippet(span) {
Ok(snippet) if snippet.starts_with("&") => {}
_ => break 'outer,

View file

@ -19,7 +19,7 @@ use rustc_middle::{bug, span_bug};
use tracing::{debug, instrument, trace};
use super::SelectionCandidate::*;
use super::{BuiltinImplConditions, SelectionCandidateSet, SelectionContext, TraitObligationStack};
use super::{SelectionCandidateSet, SelectionContext, TraitObligationStack};
use crate::traits::util;
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
@ -75,8 +75,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
self.assemble_candidates_from_impls(obligation, &mut candidates);
// For other types, we'll use the builtin rules.
let copy_conditions = self.copy_clone_conditions(obligation);
self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates);
self.assemble_builtin_copy_clone_candidate(
obligation.predicate.self_ty().skip_binder(),
&mut candidates,
);
}
Some(LangItem::DiscriminantKind) => {
// `DiscriminantKind` is automatically implemented for every type.
@ -88,14 +90,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
Some(LangItem::Sized) => {
self.assemble_builtin_sized_candidate(
obligation,
obligation.predicate.self_ty().skip_binder(),
&mut candidates,
SizedTraitKind::Sized,
);
}
Some(LangItem::MetaSized) => {
self.assemble_builtin_sized_candidate(
obligation,
obligation.predicate.self_ty().skip_binder(),
&mut candidates,
SizedTraitKind::MetaSized,
);
@ -357,14 +359,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation: &PolyTraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
let self_ty = obligation.self_ty().skip_binder();
// gen constructs get lowered to a special kind of coroutine that
// should directly `impl FusedIterator`.
if let ty::Coroutine(did, ..) = self_ty.kind()
&& self.tcx().coroutine_is_gen(*did)
{
debug!(?self_ty, ?obligation, "assemble_fused_iterator_candidates",);
if self.coroutine_is_gen(obligation.self_ty().skip_binder()) {
candidates.vec.push(BuiltinCandidate);
}
}
@ -1113,41 +1108,164 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
/// Assembles the `Sized` and `MetaSized` traits which are built-in to the language itself.
/// Assembles `Copy` and `Clone` candidates for built-in types with no libcore-defined
/// `Copy` or `Clone` impls.
#[instrument(level = "debug", skip(self, candidates))]
fn assemble_builtin_sized_candidate(
fn assemble_builtin_copy_clone_candidate(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
self_ty: Ty<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
sizedness: SizedTraitKind,
) {
match self.sizedness_conditions(obligation, sizedness) {
BuiltinImplConditions::Where(_nested) => {
candidates.vec.push(SizedCandidate);
match *self_ty.kind() {
// These impls are built-in because we cannot express sufficiently
// generic impls in libcore.
ty::FnDef(..)
| ty::FnPtr(..)
| ty::Error(_)
| ty::Tuple(..)
| ty::CoroutineWitness(..)
| ty::Pat(..) => {
candidates.vec.push(BuiltinCandidate);
}
BuiltinImplConditions::None => {}
BuiltinImplConditions::Ambiguous => {
// Implementations provided in libcore.
ty::Uint(_)
| ty::Int(_)
| ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Bool
| ty::Float(_)
| ty::Char
| ty::RawPtr(..)
| ty::Never
| ty::Ref(_, _, hir::Mutability::Not)
| ty::Array(..) => {}
// FIXME(unsafe_binder): Should we conditionally
// (i.e. universally) implement copy/clone?
ty::UnsafeBinder(_) => {}
// Not `Sized`, which is a supertrait of `Copy`/`Clone`.
ty::Dynamic(..) | ty::Str | ty::Slice(..) | ty::Foreign(..) => {}
// Not `Copy` or `Clone` by design.
ty::Ref(_, _, hir::Mutability::Mut) => {}
ty::Coroutine(coroutine_def_id, args) => {
match self.tcx().coroutine_movability(coroutine_def_id) {
hir::Movability::Static => {}
hir::Movability::Movable => {
if self.tcx().features().coroutine_clone() {
let resolved_upvars =
self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
let resolved_witness =
self.infcx.shallow_resolve(args.as_coroutine().witness());
if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
// Not yet resolved.
candidates.ambiguous = true;
} else {
candidates.vec.push(BuiltinCandidate);
}
}
}
}
}
ty::Closure(_, args) => {
let resolved_upvars =
self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
if resolved_upvars.is_ty_var() {
// Not yet resolved.
candidates.ambiguous = true;
} else {
candidates.vec.push(BuiltinCandidate);
}
}
ty::CoroutineClosure(_, args) => {
let resolved_upvars =
self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
if resolved_upvars.is_ty_var() {
// Not yet resolved.
candidates.ambiguous = true;
} else {
candidates.vec.push(BuiltinCandidate);
}
}
// Fallback to whatever user-defined impls or param-env clauses exist in this case.
ty::Adt(..) | ty::Alias(..) | ty::Param(..) | ty::Placeholder(..) => {}
ty::Infer(ty::TyVar(_)) => {
candidates.ambiguous = true;
}
// Only appears when assembling higher-ranked `for<T> T: Clone`.
ty::Bound(..) => {}
ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
}
}
}
/// Assembles the trait which are built-in to the language itself:
/// e.g. `Copy` and `Clone`.
/// Assembles the `Sized` and `MetaSized` traits which are built-in to the language itself.
#[instrument(level = "debug", skip(self, candidates))]
fn assemble_builtin_bound_candidates(
fn assemble_builtin_sized_candidate(
&mut self,
conditions: BuiltinImplConditions<'tcx>,
self_ty: Ty<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
sizedness: SizedTraitKind,
) {
match conditions {
BuiltinImplConditions::Where(_) => {
candidates.vec.push(BuiltinCandidate);
match *self_ty.kind() {
// Always sized.
ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(..)
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Never
| ty::Error(_) => {
candidates.vec.push(SizedCandidate);
}
BuiltinImplConditions::None => {}
BuiltinImplConditions::Ambiguous => {
// Conditionally `Sized`.
ty::Tuple(..) | ty::Pat(..) | ty::Adt(..) | ty::UnsafeBinder(_) => {
candidates.vec.push(SizedCandidate);
}
// `MetaSized` but not `Sized`.
ty::Str | ty::Slice(_) | ty::Dynamic(..) => match sizedness {
SizedTraitKind::Sized => {}
SizedTraitKind::MetaSized => {
candidates.vec.push(SizedCandidate);
}
},
// Not `MetaSized` or `Sized`.
ty::Foreign(..) => {}
ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => {}
ty::Infer(ty::TyVar(_)) => {
candidates.ambiguous = true;
}
// Only appears when assembling higher-ranked `for<T> T: Sized`.
ty::Bound(..) => {}
ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
}
}
}

View file

@ -21,7 +21,7 @@ use thin_vec::thin_vec;
use tracing::{debug, instrument};
use super::SelectionCandidate::{self, *};
use super::{BuiltinImplConditions, PredicateObligations, SelectionContext};
use super::{PredicateObligations, SelectionContext};
use crate::traits::normalize::{normalize_with_depth, normalize_with_depth_to};
use crate::traits::util::{self, closure_trait_ref_and_return_type};
use crate::traits::{
@ -257,16 +257,25 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
debug!(?obligation, "confirm_builtin_candidate");
let tcx = self.tcx();
let trait_def = obligation.predicate.def_id();
let conditions = match tcx.as_lang_item(trait_def) {
Some(LangItem::Sized) => self.sizedness_conditions(obligation, SizedTraitKind::Sized),
let self_ty = self.infcx.shallow_resolve(
self.infcx.enter_forall_and_leak_universe(obligation.predicate.self_ty()),
);
let types = match tcx.as_lang_item(trait_def) {
Some(LangItem::Sized) => self.sizedness_conditions(self_ty, SizedTraitKind::Sized),
Some(LangItem::MetaSized) => {
self.sizedness_conditions(obligation, SizedTraitKind::MetaSized)
self.sizedness_conditions(self_ty, SizedTraitKind::MetaSized)
}
Some(LangItem::PointeeSized) => {
bug!("`PointeeSized` is removing during lowering");
}
Some(LangItem::Copy | LangItem::Clone) => self.copy_clone_conditions(obligation),
Some(LangItem::FusedIterator) => self.fused_iterator_conditions(obligation),
Some(LangItem::Copy | LangItem::Clone) => self.copy_clone_conditions(self_ty),
Some(LangItem::FusedIterator) => {
if self.coroutine_is_gen(self_ty) {
ty::Binder::dummy(vec![])
} else {
unreachable!("tried to assemble `FusedIterator` for non-gen coroutine");
}
}
Some(
LangItem::Destruct
| LangItem::DiscriminantKind
@ -274,12 +283,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| LangItem::PointeeTrait
| LangItem::Tuple
| LangItem::Unpin,
) => BuiltinImplConditions::Where(ty::Binder::dummy(vec![])),
) => ty::Binder::dummy(vec![]),
other => bug!("unexpected builtin trait {trait_def:?} ({other:?})"),
};
let BuiltinImplConditions::Where(types) = conditions else {
bug!("obligation {:?} had matched a builtin impl but now doesn't", obligation);
};
let types = self.infcx.enter_forall_and_leak_universe(types);
let cause = obligation.derived_cause(ObligationCauseCode::BuiltinDerived);
@ -403,6 +409,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let self_ty =
obligation.predicate.self_ty().map_bound(|ty| self.infcx.shallow_resolve(ty));
let self_ty = self.infcx.enter_forall_and_leak_universe(self_ty);
let types = self.constituent_types_for_ty(self_ty)?;
let types = self.infcx.enter_forall_and_leak_universe(types);

View file

@ -188,18 +188,6 @@ struct EvaluatedCandidate<'tcx> {
evaluation: EvaluationResult,
}
/// When does the builtin impl for `T: Trait` apply?
#[derive(Debug)]
enum BuiltinImplConditions<'tcx> {
/// The impl is conditional on `T1, T2, ...: Trait`.
Where(ty::Binder<'tcx, Vec<Ty<'tcx>>>),
/// There is no built-in impl. There may be some other
/// candidate (a where-clause or user-defined impl).
None,
/// It is unknown whether there is an impl.
Ambiguous,
}
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
pub fn new(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
SelectionContext {
@ -2104,14 +2092,9 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
impl<'tcx> SelectionContext<'_, 'tcx> {
fn sizedness_conditions(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
self_ty: Ty<'tcx>,
sizedness: SizedTraitKind,
) -> BuiltinImplConditions<'tcx> {
use self::BuiltinImplConditions::{Ambiguous, None, Where};
// NOTE: binder moved to (*)
let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
match self_ty.kind() {
ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Uint(_)
@ -2129,59 +2112,44 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Never
| ty::Error(_) => {
// safe for everything
Where(ty::Binder::dummy(Vec::new()))
}
| ty::Error(_) => ty::Binder::dummy(vec![]),
ty::Str | ty::Slice(_) | ty::Dynamic(..) => match sizedness {
SizedTraitKind::Sized => None,
SizedTraitKind::MetaSized => Where(ty::Binder::dummy(Vec::new())),
SizedTraitKind::Sized => unreachable!("tried to assemble `Sized` for unsized type"),
SizedTraitKind::MetaSized => ty::Binder::dummy(vec![]),
},
ty::Foreign(..) => None,
ty::Foreign(..) => unreachable!("tried to assemble `Sized` for unsized type"),
ty::Tuple(tys) => Where(
obligation.predicate.rebind(tys.last().map_or_else(Vec::new, |&last| vec![last])),
),
ty::Tuple(tys) => {
ty::Binder::dummy(tys.last().map_or_else(Vec::new, |&last| vec![last]))
}
ty::Pat(ty, _) => Where(obligation.predicate.rebind(vec![*ty])),
ty::Pat(ty, _) => ty::Binder::dummy(vec![*ty]),
ty::Adt(def, args) => {
if let Some(crit) = def.sizedness_constraint(self.tcx(), sizedness) {
// (*) binder moved here
Where(obligation.predicate.rebind(vec![crit.instantiate(self.tcx(), args)]))
ty::Binder::dummy(vec![crit.instantiate(self.tcx(), args)])
} else {
Where(ty::Binder::dummy(Vec::new()))
ty::Binder::dummy(vec![])
}
}
// FIXME(unsafe_binders): This binder needs to be squashed
ty::UnsafeBinder(binder_ty) => Where(binder_ty.map_bound(|ty| vec![ty])),
ty::UnsafeBinder(binder_ty) => binder_ty.map_bound(|ty| vec![ty]),
ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => None,
ty::Infer(ty::TyVar(_)) => Ambiguous,
// We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
ty::Bound(..) => None,
ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
ty::Alias(..)
| ty::Param(_)
| ty::Placeholder(..)
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
| ty::Bound(..) => {
bug!("asked to assemble `Sized` of unexpected type: {:?}", self_ty);
}
}
}
fn copy_clone_conditions(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
// NOTE: binder moved to (*)
let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
use self::BuiltinImplConditions::{Ambiguous, None, Where};
fn copy_clone_conditions(&mut self, self_ty: Ty<'tcx>) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
match *self_ty.kind() {
ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => ty::Binder::dummy(vec![]),
ty::Uint(_)
| ty::Int(_)
@ -2193,127 +2161,78 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
| ty::Never
| ty::Ref(_, _, hir::Mutability::Not)
| ty::Array(..) => {
// Implementations provided in libcore
None
unreachable!("tried to assemble `Sized` for type with libcore-provided impl")
}
// FIXME(unsafe_binder): Should we conditionally
// (i.e. universally) implement copy/clone?
ty::UnsafeBinder(_) => None,
ty::Dynamic(..)
| ty::Str
| ty::Slice(..)
| ty::Foreign(..)
| ty::Ref(_, _, hir::Mutability::Mut) => None,
ty::UnsafeBinder(_) => unreachable!("tried to assemble `Sized` for unsafe binder"),
ty::Tuple(tys) => {
// (*) binder moved here
Where(obligation.predicate.rebind(tys.iter().collect()))
ty::Binder::dummy(tys.iter().collect())
}
ty::Pat(ty, _) => {
// (*) binder moved here
Where(obligation.predicate.rebind(vec![ty]))
ty::Binder::dummy(vec![ty])
}
ty::Coroutine(coroutine_def_id, args) => {
match self.tcx().coroutine_movability(coroutine_def_id) {
hir::Movability::Static => None,
hir::Movability::Static => {
unreachable!("tried to assemble `Sized` for static coroutine")
}
hir::Movability::Movable => {
if self.tcx().features().coroutine_clone() {
let resolved_upvars =
self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
let resolved_witness =
self.infcx.shallow_resolve(args.as_coroutine().witness());
if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
// Not yet resolved.
Ambiguous
} else {
let all = args
.as_coroutine()
ty::Binder::dummy(
args.as_coroutine()
.upvar_tys()
.iter()
.chain([args.as_coroutine().witness()])
.collect::<Vec<_>>();
Where(obligation.predicate.rebind(all))
}
.collect::<Vec<_>>(),
)
} else {
None
unreachable!(
"tried to assemble `Sized` for coroutine without enabled feature"
)
}
}
}
}
ty::CoroutineWitness(def_id, args) => {
let hidden_types = rebind_coroutine_witness_types(
self.infcx.tcx,
def_id,
args,
obligation.predicate.bound_vars(),
);
Where(hidden_types)
}
ty::CoroutineWitness(def_id, args) => self
.infcx
.tcx
.coroutine_hidden_types(def_id)
.instantiate(self.infcx.tcx, args)
.map_bound(|witness| witness.types.to_vec()),
ty::Closure(_, args) => {
// (*) binder moved here
let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
if let ty::Infer(ty::TyVar(_)) = ty.kind() {
// Not yet resolved.
Ambiguous
} else {
Where(obligation.predicate.rebind(args.as_closure().upvar_tys().to_vec()))
}
}
ty::Closure(_, args) => ty::Binder::dummy(args.as_closure().upvar_tys().to_vec()),
ty::CoroutineClosure(_, args) => {
// (*) binder moved here
let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
if let ty::Infer(ty::TyVar(_)) = ty.kind() {
// Not yet resolved.
Ambiguous
} else {
Where(
obligation
.predicate
.rebind(args.as_coroutine_closure().upvar_tys().to_vec()),
)
}
ty::Binder::dummy(args.as_coroutine_closure().upvar_tys().to_vec())
}
ty::Adt(..) | ty::Alias(..) | ty::Param(..) | ty::Placeholder(..) => {
// Fallback to whatever user-defined impls exist in this case.
None
}
ty::Infer(ty::TyVar(_)) => {
// Unbound type variable. Might or might not have
// applicable impls and so forth, depending on what
// those type variables wind up being bound to.
Ambiguous
}
// We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
ty::Bound(..) => None,
ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
ty::Foreign(..)
| ty::Str
| ty::Slice(_)
| ty::Dynamic(..)
| ty::Adt(..)
| ty::Alias(..)
| ty::Param(..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Ref(_, _, ty::Mutability::Mut)
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
}
}
}
fn fused_iterator_conditions(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
if let ty::Coroutine(did, ..) = *self_ty.kind()
&& self.tcx().coroutine_is_gen(did)
{
BuiltinImplConditions::Where(ty::Binder::dummy(Vec::new()))
} else {
BuiltinImplConditions::None
}
fn coroutine_is_gen(&mut self, self_ty: Ty<'tcx>) -> bool {
matches!(*self_ty.kind(), ty::Coroutine(did, ..)
if self.tcx().coroutine_is_gen(did))
}
/// For default impls, we need to break apart a type into its
@ -2330,9 +2249,9 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
#[instrument(level = "debug", skip(self), ret)]
fn constituent_types_for_ty(
&self,
t: ty::Binder<'tcx, Ty<'tcx>>,
t: Ty<'tcx>,
) -> Result<ty::Binder<'tcx, Vec<Ty<'tcx>>>, SelectionError<'tcx>> {
Ok(match *t.skip_binder().kind() {
Ok(match *t.kind() {
ty::Uint(_)
| ty::Int(_)
| ty::Bool
@ -2349,8 +2268,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
// `assemble_candidates_from_auto_impls`.
ty::Foreign(..) => ty::Binder::dummy(Vec::new()),
// FIXME(unsafe_binders): Squash the double binder for now, I guess.
ty::UnsafeBinder(_) => return Err(SelectionError::Unimplemented),
ty::UnsafeBinder(ty) => ty.map_bound(|ty| vec![ty]),
// Treat this like `struct str([u8]);`
ty::Str => ty::Binder::dummy(vec![Ty::new_slice(self.tcx(), self.tcx().types.u8)]),
@ -2364,40 +2282,47 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
bug!("asked to assemble constituent types of unexpected type: {:?}", t);
}
ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => t.rebind(vec![element_ty]),
ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => {
ty::Binder::dummy(vec![element_ty])
}
ty::Pat(ty, _) | ty::Array(ty, _) | ty::Slice(ty) => t.rebind(vec![ty]),
ty::Pat(ty, _) | ty::Array(ty, _) | ty::Slice(ty) => ty::Binder::dummy(vec![ty]),
ty::Tuple(tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
t.rebind(tys.iter().collect())
ty::Binder::dummy(tys.iter().collect())
}
ty::Closure(_, args) => {
let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
t.rebind(vec![ty])
ty::Binder::dummy(vec![ty])
}
ty::CoroutineClosure(_, args) => {
let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
t.rebind(vec![ty])
ty::Binder::dummy(vec![ty])
}
ty::Coroutine(_, args) => {
let ty = self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
let witness = args.as_coroutine().witness();
t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
ty::Binder::dummy([ty].into_iter().chain(iter::once(witness)).collect())
}
ty::CoroutineWitness(def_id, args) => {
rebind_coroutine_witness_types(self.infcx.tcx, def_id, args, t.bound_vars())
}
ty::CoroutineWitness(def_id, args) => self
.infcx
.tcx
.coroutine_hidden_types(def_id)
.instantiate(self.infcx.tcx, args)
.map_bound(|witness| witness.types.to_vec()),
// For `PhantomData<T>`, we pass `T`.
ty::Adt(def, args) if def.is_phantom_data() => t.rebind(args.types().collect()),
ty::Adt(def, args) if def.is_phantom_data() => {
ty::Binder::dummy(args.types().collect())
}
ty::Adt(def, args) => {
t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), args)).collect())
ty::Binder::dummy(def.all_fields().map(|f| f.ty(self.tcx(), args)).collect())
}
ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
@ -2408,7 +2333,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
match self.tcx().type_of_opaque(def_id) {
Ok(ty) => t.rebind(vec![ty.instantiate(self.tcx(), args)]),
Ok(ty) => ty::Binder::dummy(vec![ty.instantiate(self.tcx(), args)]),
Err(_) => {
return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
}
@ -2880,23 +2805,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
}
}
fn rebind_coroutine_witness_types<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
args: ty::GenericArgsRef<'tcx>,
bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
let bound_coroutine_types = tcx.coroutine_hidden_types(def_id).skip_binder();
let shifted_coroutine_types =
tcx.shift_bound_var_indices(bound_vars.len(), bound_coroutine_types.skip_binder());
ty::Binder::bind_with_vars(
ty::EarlyBinder::bind(shifted_coroutine_types.types.to_vec()).instantiate(tcx, args),
tcx.mk_bound_variable_kinds_from_iter(
bound_vars.iter().chain(bound_coroutine_types.bound_vars()),
),
)
}
impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList::with(self)

View file

@ -23,6 +23,9 @@ pub enum GlobalAlloc {
Static(StaticDef),
/// The alloc ID points to memory.
Memory(Allocation),
/// The first pointer-sized segment of a type id. On 64 bit systems, the 128 bit type id
/// is split into two segments, on 32 bit systems there are 4 segments, and so on.
TypeId { ty: Ty },
}
impl From<AllocId> for GlobalAlloc {

View file

@ -864,6 +864,9 @@ impl<'tcx> Stable<'tcx> for mir::interpret::GlobalAlloc<'tcx> {
mir::interpret::GlobalAlloc::Memory(alloc) => {
GlobalAlloc::Memory(alloc.stable(tables, cx))
}
mir::interpret::GlobalAlloc::TypeId { ty } => {
GlobalAlloc::TypeId { ty: ty.stable(tables, cx) }
}
}
}
}

View file

@ -107,8 +107,10 @@
#![feature(char_max_len)]
#![feature(clone_to_uninit)]
#![feature(coerce_unsized)]
#![feature(const_default)]
#![feature(const_eval_select)]
#![feature(const_heap)]
#![feature(const_trait_impl)]
#![feature(core_intrinsics)]
#![feature(deprecated_suggestion)]
#![feature(deref_pure_trait)]

View file

@ -2611,7 +2611,8 @@ impl_eq! { Cow<'a, str>, &'b str }
impl_eq! { Cow<'a, str>, String }
#[stable(feature = "rust1", since = "1.0.0")]
impl Default for String {
#[rustc_const_unstable(feature = "const_default", issue = "67792")]
impl const Default for String {
/// Creates an empty `String`.
#[inline]
fn default() -> String {

View file

@ -3895,7 +3895,8 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Vec<T> {
#[rustc_const_unstable(feature = "const_default", issue = "67792")]
impl<T> const Default for Vec<T> {
/// Creates an empty `Vec<T>`.
///
/// The vector will not allocate until elements are pushed onto it.

View file

@ -195,6 +195,25 @@ jobs:
run: ./ci/update-musl.sh
- run: cargo clippy --workspace --all-targets
build-custom:
name: Build custom target
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- name: Install Rust
run: |
rustup update nightly --no-self-update
rustup default nightly
rustup component add rust-src
- uses: Swatinem/rust-cache@v2
- run: |
# Ensure we can build with custom target.json files (these can interact
# poorly with build scripts)
cargo build -p compiler_builtins -p libm \
--target etc/thumbv7em-none-eabi-renamed.json \
-Zbuild-std=core
benchmarks:
name: Benchmarks
timeout-minutes: 20
@ -331,6 +350,7 @@ jobs:
success:
needs:
- benchmarks
- build-custom
- clippy
- extensive
- miri

View file

@ -6,6 +6,5 @@ fn main() {
println!("cargo::rerun-if-changed=../configure.rs");
let target = builtins_configure::Target::from_env();
builtins_configure::configure_f16_f128(&target);
builtins_configure::configure_aliases(&target);
}

View file

@ -177,6 +177,7 @@ float_bench! {
],
}
#[cfg(f128_enabled)]
float_bench! {
name: cmp_f128_gt,
sig: (a: f128, b: f128) -> CmpResult,
@ -189,6 +190,7 @@ float_bench! {
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: cmp_f128_unord,
sig: (a: f128, b: f128) -> CmpResult,

View file

@ -116,5 +116,4 @@ fn main() {
}
builtins_configure::configure_aliases(&target);
builtins_configure::configure_f16_f128(&target);
}

View file

@ -118,7 +118,7 @@ mod i_to_f {
i128, __floattidf;
}
#[cfg(not(feature = "no-f16-f128"))]
#[cfg(f128_enabled)]
#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
i_to_f! { f128, Quad, not(feature = "no-sys-f128-int-convert"),
u32, __floatunsitf;
@ -129,7 +129,7 @@ mod i_to_f {
i128, __floattitf;
}
#[cfg(not(feature = "no-f16-f128"))]
#[cfg(f128_enabled)]
#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
i_to_f! { f128, Quad, not(feature = "no-sys-f128-int-convert"),
u32, __floatunsikf;

View file

@ -147,7 +147,7 @@ mod float_div {
f64, __divdf3, Double, all();
}
#[cfg(not(feature = "no-f16-f128"))]
#[cfg(f128_enabled)]
#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
float! {
f128, __divtf3, Quad,
@ -156,7 +156,7 @@ mod float_div {
not(any(feature = "no-sys-f128", all(target_arch = "aarch64", target_os = "linux")));
}
#[cfg(not(feature = "no-f16-f128"))]
#[cfg(f128_enabled)]
#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
float! {
f128, __divkf3, Quad, not(feature = "no-sys-f128");

Some files were not shown because too many files have changed in this diff Show more