Merge ref '269d5b56bc' from rust-lang/rust

Pull recent changes from https://github.com/rust-lang/rust via Josh.

Upstream ref: 269d5b56bc
Filtered ref: a221b1d3ebb78ec8a01dcb1fe6bb165378e2f5c9

This merge was created using https://github.com/rust-lang/josh-sync.
This commit is contained in:
The Miri Cronjob Bot 2025-08-27 05:00:43 +00:00
commit aa583798ab
310 changed files with 5539 additions and 2782 deletions

View file

@ -266,9 +266,9 @@ dependencies = [
[[package]]
name = "bitflags"
version = "2.9.2"
version = "2.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29"
checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d"
[[package]]
name = "blake3"
@ -5988,9 +5988,9 @@ dependencies = [
[[package]]
name = "wasi-preview1-component-adapter-provider"
version = "34.0.2"
version = "36.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33696c5f1ff1e083de9f36c3da471abd736362bc173e093f8b0b1ed5a387e39b"
checksum = "20689c88791776219f78c2529700d15e6a9bd57a27858c62e9ef8487956b571c"
[[package]]
name = "wasm-bindgen"
@ -6052,9 +6052,9 @@ dependencies = [
[[package]]
name = "wasm-component-ld"
version = "0.5.15"
version = "0.5.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d95124e34fee1316222e03b9bbf41af186ecbae2c8b79f8debe6e21b3ff60c5"
checksum = "14cd35d6cae91109a0ffd207b573cf3c741cab7e921dd376ea7aaf2c52a3408c"
dependencies = [
"anyhow",
"clap",
@ -6062,7 +6062,7 @@ dependencies = [
"libc",
"tempfile",
"wasi-preview1-component-adapter-provider",
"wasmparser 0.234.0",
"wasmparser 0.237.0",
"wat",
"windows-sys 0.59.0",
"winsplit",
@ -6089,34 +6089,24 @@ dependencies = [
[[package]]
name = "wasm-encoder"
version = "0.234.0"
version = "0.237.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "170a0157eef517a179f2d20ed7c68df9c3f7f6c1c047782d488bf5a464174684"
checksum = "efe92d1321afa53ffc88a57c497bb7330c3cf84c98ffdba4a4caf6a0684fad3c"
dependencies = [
"leb128fmt",
"wasmparser 0.234.0",
]
[[package]]
name = "wasm-encoder"
version = "0.236.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "724fccfd4f3c24b7e589d333fc0429c68042897a7e8a5f8694f31792471841e7"
dependencies = [
"leb128fmt",
"wasmparser 0.236.1",
"wasmparser 0.237.0",
]
[[package]]
name = "wasm-metadata"
version = "0.234.0"
version = "0.237.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a42fe3f5cbfb56fc65311ef827930d06189160038e81db62188f66b4bf468e3a"
checksum = "4cc0b0a0c4f35ca6efa7a797671372915d4e9659dba2d59edc6fafc931d19997"
dependencies = [
"anyhow",
"indexmap",
"wasm-encoder 0.234.0",
"wasmparser 0.234.0",
"wasm-encoder 0.237.0",
"wasmparser 0.237.0",
]
[[package]]
@ -6131,9 +6121,19 @@ dependencies = [
[[package]]
name = "wasmparser"
version = "0.234.0"
version = "0.236.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be22e5a8f600afce671dd53c8d2dd26b4b7aa810fd18ae27dfc49737f3e02fc5"
checksum = "a9b1e81f3eb254cf7404a82cee6926a4a3ccc5aad80cc3d43608a070c67aa1d7"
dependencies = [
"bitflags",
"indexmap",
]
[[package]]
name = "wasmparser"
version = "0.237.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d2a40ca0d2bdf4b0bf36c13a737d0b2c58e4c8aaefe1c57f336dd75369ca250"
dependencies = [
"bitflags",
"hashbrown",
@ -6142,35 +6142,24 @@ dependencies = [
"serde",
]
[[package]]
name = "wasmparser"
version = "0.236.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9b1e81f3eb254cf7404a82cee6926a4a3ccc5aad80cc3d43608a070c67aa1d7"
dependencies = [
"bitflags",
"indexmap",
"semver",
]
[[package]]
name = "wast"
version = "236.0.1"
version = "237.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3bec4b4db9c6808d394632fd4b0cd4654c32c540bd3237f55ee6a40fff6e51f"
checksum = "fcf66f545acbd55082485cb9a6daab54579cb8628a027162253e8e9f5963c767"
dependencies = [
"bumpalo",
"leb128fmt",
"memchr",
"unicode-width 0.2.1",
"wasm-encoder 0.236.1",
"wasm-encoder 0.237.0",
]
[[package]]
name = "wat"
version = "1.236.1"
version = "1.237.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64475e2f77d6071ce90624098fc236285ddafa8c3ea1fb386f2c4154b6c2bbdb"
checksum = "27975186f549e4b8d6878b627be732863883c72f7bf4dcf8f96e5f8242f73da9"
dependencies = [
"wast",
]
@ -6659,9 +6648,9 @@ dependencies = [
[[package]]
name = "wit-component"
version = "0.234.0"
version = "0.237.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a8888169acf4c6c4db535beb405b570eedac13215d6821ca9bd03190f7f8b8c"
checksum = "bfb7674f76c10e82fe00b256a9d4ffb2b8d037d42ab8e9a83ebb3be35c9d0bf6"
dependencies = [
"anyhow",
"bitflags",
@ -6670,17 +6659,17 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"wasm-encoder 0.234.0",
"wasm-encoder 0.237.0",
"wasm-metadata",
"wasmparser 0.234.0",
"wasmparser 0.237.0",
"wit-parser",
]
[[package]]
name = "wit-parser"
version = "0.234.0"
version = "0.237.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "465492df47d8dcc015a3b7f241aed8ea03688fee7c5e04162285c5b1a3539c8b"
checksum = "ce2596a5bc7c24cc965b56ad6ff9e32394c4e401764f89620a888519c6e849ab"
dependencies = [
"anyhow",
"id-arena",
@ -6691,7 +6680,7 @@ dependencies = [
"serde_derive",
"serde_json",
"unicode-xid",
"wasmparser 0.234.0",
"wasmparser 0.237.0",
]
[[package]]

View file

@ -407,8 +407,11 @@
#build.profiler = false
# Use the optimized LLVM C intrinsics for `compiler_builtins`, rather than Rust intrinsics.
# Requires the LLVM submodule to be managed by bootstrap (i.e. not external) so that `compiler-rt`
# sources are available.
# Choosing true requires the LLVM submodule to be managed by bootstrap (i.e. not external)
# so that `compiler-rt` sources are available.
#
# Setting this to a path removes the requirement for a C toolchain, but requires setting the
# path to an existing library containing the builtins library from LLVM's compiler-rt.
#
# Setting this to `false` generates slower code, but removes the requirement for a C toolchain in
# order to run `x check`.
@ -1041,13 +1044,15 @@
#runner = <none> (string)
# Use the optimized LLVM C intrinsics for `compiler_builtins`, rather than Rust intrinsics
# on this target.
# Requires the LLVM submodule to be managed by bootstrap (i.e. not external) so that `compiler-rt`
# sources are available.
# on this target. Choosing true requires the LLVM submodule to be managed by bootstrap
# (i.e. not external) so that `compiler-rt` sources are available.
#
# Setting this to a path removes the requirement for a C toolchain, but requires setting the
# path to an existing library containing the builtins library from LLVM's compiler-rt.
#
# Setting this to `false` generates slower code, but removes the requirement for a C toolchain in
# order to run `x check`.
#optimized-compiler-builtins = build.optimized-compiler-builtins (bool)
#optimized-compiler-builtins = build.optimized-compiler-builtins (bool or path)
# Link the compiler and LLVM against `jemalloc` instead of the default libc allocator.
# This overrides the global `rust.jemalloc` option. See that option for more info.

View file

@ -86,6 +86,12 @@ attr_parsing_invalid_repr_hint_no_value =
attr_parsing_invalid_since =
'since' must be a Rust version number, such as "1.31.0"
attr_parsing_invalid_style = {$is_used_as_inner ->
[false] crate-level attribute should be an inner attribute: add an exclamation mark: `#![{$name}]`
*[other] the `#![{$name}]` attribute can only be used at the crate root
}
.note = This attribute does not have an `!`, which means it is applied to this {$target}
attr_parsing_link_ordinal_out_of_range = ordinal value in `link_ordinal` is too large: `{$ordinal}`
.note = the value may not exceed `u16::MAX`

View file

@ -127,6 +127,7 @@ impl<S: Stage> SingleAttributeParser<S> for ExportNameParser {
Warn(Target::Field),
Warn(Target::Arm),
Warn(Target::MacroDef),
Warn(Target::MacroCall),
]);
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");
@ -174,6 +175,7 @@ impl<S: Stage> AttributeParser<S> for NakedParser {
Allow(Target::Method(MethodKind::Inherent)),
Allow(Target::Method(MethodKind::Trait { body: true })),
Allow(Target::Method(MethodKind::TraitImpl)),
Warn(Target::MacroCall),
]);
fn finalize(self, cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> {
@ -278,6 +280,7 @@ impl<S: Stage> NoArgsAttributeParser<S> for TrackCallerParser {
Warn(Target::MacroDef),
Warn(Target::Arm),
Warn(Target::Field),
Warn(Target::MacroCall),
]);
const CREATE: fn(Span) -> AttributeKind = AttributeKind::TrackCaller;
}
@ -365,7 +368,8 @@ impl<S: Stage> AttributeParser<S> for UsedParser {
}
},
)];
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Static)]);
const ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::Static), Warn(Target::MacroCall)]);
fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> {
// Ratcheting behaviour, if both `linker` and `compiler` are specified, use `linker`
@ -450,6 +454,7 @@ impl<S: Stage> CombineAttributeParser<S> for TargetFeatureParser {
Warn(Target::Field),
Warn(Target::Arm),
Warn(Target::MacroDef),
Warn(Target::MacroCall),
]);
}

View file

@ -1,3 +1,5 @@
use rustc_feature::AttributeType;
use super::prelude::*;
pub(crate) struct CrateNameParser;
@ -7,6 +9,7 @@ impl<S: Stage> SingleAttributeParser<S> for CrateNameParser {
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");
const TYPE: AttributeType = AttributeType::CrateLevel;
// FIXME: crate name is allowed on all targets and ignored,
// even though it should only be valid on crates of course

View file

@ -25,6 +25,7 @@ impl<S: Stage> SingleAttributeParser<S> for InlineParser {
Warn(Target::MacroDef),
Warn(Target::Arm),
Warn(Target::AssocConst),
Warn(Target::MacroCall),
]);
const TEMPLATE: AttributeTemplate = template!(
Word,

View file

@ -110,8 +110,11 @@ impl<S: Stage> SingleAttributeParser<S> for LinkOrdinalParser {
const PATH: &[Symbol] = &[sym::link_ordinal];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::ForeignFn), Allow(Target::ForeignStatic)]);
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::ForeignFn),
Allow(Target::ForeignStatic),
Warn(Target::MacroCall),
]);
const TEMPLATE: AttributeTemplate = template!(
List: &["ordinal"],
"https://doc.rust-lang.org/reference/items/external-blocks.html#the-link_ordinal-attribute"

View file

@ -12,11 +12,15 @@
//! - [`CombineAttributeParser`](crate::attributes::CombineAttributeParser): makes it easy to implement an attribute which should combine the
//! contents of attributes, if an attribute appear multiple times in a list
//!
//! By default, attributes are allowed anywhere. When adding an attribute that should only be used
//! at the crate root, consider setting the `TYPE` in the parser trait to
//! [`AttributeType::CrateLevel`](rustc_feature::AttributeType::CrateLevel).
//!
//! Attributes should be added to `crate::context::ATTRIBUTE_PARSERS` to be parsed.
use std::marker::PhantomData;
use rustc_feature::{AttributeTemplate, template};
use rustc_feature::{AttributeTemplate, AttributeType, template};
use rustc_hir::attrs::AttributeKind;
use rustc_span::{Span, Symbol};
use thin_vec::ThinVec;
@ -88,6 +92,8 @@ pub(crate) trait AttributeParser<S: Stage>: Default + 'static {
const ALLOWED_TARGETS: AllowedTargets;
const TYPE: AttributeType = AttributeType::Normal;
/// The parser has gotten a chance to accept the attributes on an item,
/// here it can produce an attribute.
///
@ -129,6 +135,8 @@ pub(crate) trait SingleAttributeParser<S: Stage>: 'static {
/// The template this attribute parser should implement. Used for diagnostics.
const TEMPLATE: AttributeTemplate;
const TYPE: AttributeType = AttributeType::Normal;
/// Converts a single syntactical attribute to a single semantic attribute, or [`AttributeKind`]
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind>;
}
@ -175,6 +183,8 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S>
)];
const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS;
const TYPE: AttributeType = T::TYPE;
fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> {
Some(self.1?.0)
}
@ -259,6 +269,7 @@ pub(crate) trait NoArgsAttributeParser<S: Stage>: 'static {
const PATH: &[Symbol];
const ON_DUPLICATE: OnDuplicate<S>;
const ALLOWED_TARGETS: AllowedTargets;
const TYPE: AttributeType = AttributeType::Normal;
/// Create the [`AttributeKind`] given attribute's [`Span`].
const CREATE: fn(Span) -> AttributeKind;
@ -278,6 +289,7 @@ impl<T: NoArgsAttributeParser<S>, S: Stage> SingleAttributeParser<S> for Without
const ON_DUPLICATE: OnDuplicate<S> = T::ON_DUPLICATE;
const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS;
const TEMPLATE: AttributeTemplate = template!(Word);
const TYPE: AttributeType = T::TYPE;
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> {
if let Err(span) = args.no_args() {
@ -311,6 +323,8 @@ pub(crate) trait CombineAttributeParser<S: Stage>: 'static {
/// The template this attribute parser should implement. Used for diagnostics.
const TEMPLATE: AttributeTemplate;
const TYPE: AttributeType = AttributeType::Normal;
/// Converts a single syntactical attribute to a number of elements of the semantic attribute, or [`AttributeKind`]
fn extend<'c>(
cx: &'c mut AcceptContext<'_, '_, S>,
@ -346,6 +360,7 @@ impl<T: CombineAttributeParser<S>, S: Stage> AttributeParser<S> for Combine<T, S
group.items.extend(T::extend(cx, args))
})];
const ALLOWED_TARGETS: AllowedTargets = T::ALLOWED_TARGETS;
const TYPE: AttributeType = T::TYPE;
fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> {
if let Some(first_span) = self.first_span {

View file

@ -19,6 +19,7 @@ impl<S: Stage> NoArgsAttributeParser<S> for NonExhaustiveParser {
Warn(Target::Field),
Warn(Target::Arm),
Warn(Target::MacroDef),
Warn(Target::MacroCall),
]);
const CREATE: fn(Span) -> AttributeKind = AttributeKind::NonExhaustive;
}

View file

@ -1,11 +1,13 @@
use super::prelude::*;
const PROC_MACRO_ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::Fn), Warn(Target::Crate), Warn(Target::MacroCall)]);
pub(crate) struct ProcMacroParser;
impl<S: Stage> NoArgsAttributeParser<S> for ProcMacroParser {
const PATH: &[Symbol] = &[sym::proc_macro];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::Fn), Warn(Target::Crate)]);
const ALLOWED_TARGETS: AllowedTargets = PROC_MACRO_ALLOWED_TARGETS;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::ProcMacro;
}
@ -13,8 +15,7 @@ pub(crate) struct ProcMacroAttributeParser;
impl<S: Stage> NoArgsAttributeParser<S> for ProcMacroAttributeParser {
const PATH: &[Symbol] = &[sym::proc_macro_attribute];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::Fn), Warn(Target::Crate)]);
const ALLOWED_TARGETS: AllowedTargets = PROC_MACRO_ALLOWED_TARGETS;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::ProcMacroAttribute;
}
@ -23,8 +24,7 @@ impl<S: Stage> SingleAttributeParser<S> for ProcMacroDeriveParser {
const PATH: &[Symbol] = &[sym::proc_macro_derive];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::Fn), Warn(Target::Crate)]);
const ALLOWED_TARGETS: AllowedTargets = PROC_MACRO_ALLOWED_TARGETS;
const TEMPLATE: AttributeTemplate = template!(
List: &["TraitName", "TraitName, attributes(name1, name2, ...)"],
"https://doc.rust-lang.org/reference/procedural-macros.html#derive-macros"

View file

@ -1,5 +1,7 @@
use std::mem;
use rustc_feature::AttributeType;
use super::prelude::*;
use crate::attributes::{
AttributeOrder, NoArgsAttributeParser, OnDuplicate, SingleAttributeParser,
@ -154,6 +156,7 @@ impl<S: Stage> NoArgsAttributeParser<S> for CoherenceIsCoreParser {
const PATH: &[Symbol] = &[sym::rustc_coherence_is_core];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]);
const TYPE: AttributeType = AttributeType::CrateLevel;
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::CoherenceIsCore;
}

View file

@ -4,12 +4,12 @@ use std::ops::{Deref, DerefMut};
use std::sync::LazyLock;
use private::Sealed;
use rustc_ast::{AttrStyle, MetaItemLit, NodeId};
use rustc_ast::{AttrStyle, CRATE_NODE_ID, MetaItemLit, NodeId};
use rustc_errors::{Diag, Diagnostic, Level};
use rustc_feature::AttributeTemplate;
use rustc_feature::{AttributeTemplate, AttributeType};
use rustc_hir::attrs::AttributeKind;
use rustc_hir::lints::{AttributeLint, AttributeLintKind};
use rustc_hir::{AttrPath, HirId};
use rustc_hir::{AttrPath, CRATE_HIR_ID, HirId};
use rustc_session::Session;
use rustc_span::{ErrorGuaranteed, Span, Symbol};
@ -80,6 +80,7 @@ pub(super) struct GroupTypeInnerAccept<S: Stage> {
pub(super) template: AttributeTemplate,
pub(super) accept_fn: AcceptFn<S>,
pub(super) allowed_targets: AllowedTargets,
pub(super) attribute_type: AttributeType,
}
type AcceptFn<S> =
@ -129,6 +130,7 @@ macro_rules! attribute_parsers {
})
}),
allowed_targets: <$names as crate::attributes::AttributeParser<$stage>>::ALLOWED_TARGETS,
attribute_type: <$names as crate::attributes::AttributeParser<$stage>>::TYPE,
});
}
@ -250,6 +252,8 @@ pub trait Stage: Sized + 'static + Sealed {
) -> ErrorGuaranteed;
fn should_emit(&self) -> ShouldEmit;
fn id_is_crate_root(id: Self::Id) -> bool;
}
// allow because it's a sealed trait
@ -271,6 +275,10 @@ impl Stage for Early {
fn should_emit(&self) -> ShouldEmit {
self.emit_errors
}
fn id_is_crate_root(id: Self::Id) -> bool {
id == CRATE_NODE_ID
}
}
// allow because it's a sealed trait
@ -292,6 +300,10 @@ impl Stage for Late {
fn should_emit(&self) -> ShouldEmit {
ShouldEmit::ErrorsAndLints
}
fn id_is_crate_root(id: Self::Id) -> bool {
id == CRATE_HIR_ID
}
}
/// used when parsing attributes for miscellaneous things *before* ast lowering

View file

@ -271,8 +271,8 @@ impl<'sess, S: Stage> AttributeParser<'sess, S> {
};
(accept.accept_fn)(&mut cx, args);
if !matches!(self.stage.should_emit(), ShouldEmit::Nothing) {
if !matches!(cx.stage.should_emit(), ShouldEmit::Nothing) {
Self::check_type(accept.attribute_type, target, &mut cx);
self.check_target(
path.get_attribute_path(),
attr.span,

View file

@ -41,8 +41,14 @@ pub fn emit_attribute_lint<L: LintEmitter>(lint: &AttributeLint<L::Id>, lint_emi
.emit_node_span_lint(
// This check is here because `deprecated` had its own lint group and removing this would be a breaking change
if name.segments[0].name == sym::deprecated
&& ![Target::Closure, Target::Expression, Target::Statement, Target::Arm]
.contains(target)
&& ![
Target::Closure,
Target::Expression,
Target::Statement,
Target::Arm,
Target::MacroCall,
]
.contains(target)
{
rustc_session::lint::builtin::USELESS_DEPRECATED
} else {
@ -60,5 +66,19 @@ pub fn emit_attribute_lint<L: LintEmitter>(lint: &AttributeLint<L::Id>, lint_emi
attr_span: *span,
},
),
&AttributeLintKind::InvalidStyle { ref name, is_used_as_inner, target, target_span } => {
lint_emitter.emit_node_span_lint(
rustc_session::lint::builtin::UNUSED_ATTRIBUTES,
*id,
*span,
session_diagnostics::InvalidAttrStyle {
name: name.clone(),
is_used_as_inner,
target_span: (!is_used_as_inner).then_some(target_span),
target,
},
)
}
}
}

View file

@ -6,7 +6,7 @@ use rustc_errors::{
Applicability, Diag, DiagArgValue, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level,
};
use rustc_feature::AttributeTemplate;
use rustc_hir::AttrPath;
use rustc_hir::{AttrPath, Target};
use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
use rustc_span::{Span, Symbol};
@ -826,3 +826,13 @@ pub(crate) struct SuffixedLiteralInAttribute {
#[primary_span]
pub span: Span,
}
#[derive(LintDiagnostic)]
#[diag(attr_parsing_invalid_style)]
pub(crate) struct InvalidAttrStyle {
pub name: AttrPath,
pub is_used_as_inner: bool,
#[note]
pub target_span: Option<Span>,
pub target: Target,
}

View file

@ -1,13 +1,14 @@
use std::borrow::Cow;
use rustc_ast::AttrStyle;
use rustc_errors::DiagArgValue;
use rustc_feature::Features;
use rustc_feature::{AttributeType, Features};
use rustc_hir::lints::{AttributeLint, AttributeLintKind};
use rustc_hir::{AttrPath, MethodKind, Target};
use rustc_span::Span;
use crate::AttributeParser;
use crate::context::Stage;
use crate::context::{AcceptContext, Stage};
use crate::session_diagnostics::InvalidTarget;
#[derive(Debug)]
@ -68,7 +69,7 @@ pub(crate) enum Policy {
Error(Target),
}
impl<S: Stage> AttributeParser<'_, S> {
impl<'sess, S: Stage> AttributeParser<'sess, S> {
pub(crate) fn check_target(
&self,
attr_name: AttrPath,
@ -111,6 +112,32 @@ impl<S: Stage> AttributeParser<'_, S> {
}
}
}
pub(crate) fn check_type(
attribute_type: AttributeType,
target: Target,
cx: &mut AcceptContext<'_, 'sess, S>,
) {
let is_crate_root = S::id_is_crate_root(cx.target_id);
if is_crate_root {
return;
}
if attribute_type != AttributeType::CrateLevel {
return;
}
let lint = AttributeLintKind::InvalidStyle {
name: cx.attr_path.clone(),
is_used_as_inner: cx.attr_style == AttrStyle::Inner,
target,
target_span: cx.target_span,
};
let attr_span = cx.attr_span;
cx.emit_lint(lint, attr_span);
}
}
/// Takes a list of `allowed_targets` for an attribute, and the `target` the attribute was applied to.

View file

@ -17,7 +17,7 @@ pub use super::polonius::legacy::{
RichLocation, RustcFacts,
};
pub use super::region_infer::RegionInferenceContext;
use crate::{BorrowCheckRootCtxt, do_mir_borrowck};
use crate::BorrowCheckRootCtxt;
/// Struct used during mir borrowck to collect bodies with facts for a typeck root and all
/// its nested bodies.
@ -127,13 +127,6 @@ pub fn get_bodies_with_borrowck_facts(
) -> FxHashMap<LocalDefId, BodyWithBorrowckFacts<'_>> {
let mut root_cx =
BorrowCheckRootCtxt::new(tcx, root_def_id, Some(BorrowckConsumer::new(options)));
// See comment in `rustc_borrowck::mir_borrowck`
let nested_bodies = tcx.nested_bodies_within(root_def_id);
for def_id in nested_bodies {
root_cx.get_or_insert_nested(def_id);
}
do_mir_borrowck(&mut root_cx, root_def_id);
root_cx.do_mir_borrowck();
root_cx.consumer.unwrap().bodies
}

View file

@ -22,8 +22,10 @@ use std::ops::{ControlFlow, Deref};
use std::rc::Rc;
use borrow_set::LocalsStateAtExit;
use polonius_engine::AllFacts;
use root_cx::BorrowCheckRootCtxt;
use rustc_abi::FieldIdx;
use rustc_data_structures::frozen::Frozen;
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_data_structures::graph::dominators::Dominators;
use rustc_errors::LintDiagnostic;
@ -32,6 +34,7 @@ use rustc_hir::CRATE_HIR_ID;
use rustc_hir::def_id::LocalDefId;
use rustc_index::bit_set::MixedBitSet;
use rustc_index::{IndexSlice, IndexVec};
use rustc_infer::infer::outlives::env::RegionBoundPairs;
use rustc_infer::infer::{
InferCtxt, NllRegionVariableOrigin, RegionVariableOrigin, TyCtxtInferExt,
};
@ -53,7 +56,7 @@ use smallvec::SmallVec;
use tracing::{debug, instrument};
use crate::borrow_set::{BorrowData, BorrowSet};
use crate::consumers::BodyWithBorrowckFacts;
use crate::consumers::{BodyWithBorrowckFacts, RustcFacts};
use crate::dataflow::{BorrowIndex, Borrowck, BorrowckDomain, Borrows};
use crate::diagnostics::{
AccessKind, BorrowckDiagnosticsBuffer, IllegalMoveOriginKind, MoveError, RegionName,
@ -61,15 +64,17 @@ use crate::diagnostics::{
use crate::path_utils::*;
use crate::place_ext::PlaceExt;
use crate::places_conflict::{PlaceConflictBias, places_conflict};
use crate::polonius::PoloniusDiagnosticsContext;
use crate::polonius::legacy::{
PoloniusFacts, PoloniusFactsExt, PoloniusLocationTable, PoloniusOutput,
};
use crate::polonius::{PoloniusContext, PoloniusDiagnosticsContext};
use crate::prefixes::PrefixSet;
use crate::region_infer::RegionInferenceContext;
use crate::region_infer::opaque_types::DeferredOpaqueTypeError;
use crate::renumber::RegionCtxt;
use crate::session_diagnostics::VarNeedNotMut;
use crate::type_check::MirTypeckResults;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::type_check::{Locations, MirTypeckRegionConstraints, MirTypeckResults};
mod borrow_set;
mod borrowck_errors;
@ -129,18 +134,7 @@ fn mir_borrowck(
Ok(tcx.arena.alloc(opaque_types))
} else {
let mut root_cx = BorrowCheckRootCtxt::new(tcx, def, None);
// We need to manually borrowck all nested bodies from the HIR as
// we do not generate MIR for dead code. Not doing so causes us to
// never check closures in dead code.
let nested_bodies = tcx.nested_bodies_within(def);
for def_id in nested_bodies {
root_cx.get_or_insert_nested(def_id);
}
let PropagatedBorrowCheckResults { closure_requirements, used_mut_upvars } =
do_mir_borrowck(&mut root_cx, def);
debug_assert!(closure_requirements.is_none());
debug_assert!(used_mut_upvars.is_empty());
root_cx.do_mir_borrowck();
root_cx.finalize()
}
}
@ -153,6 +147,8 @@ struct PropagatedBorrowCheckResults<'tcx> {
used_mut_upvars: SmallVec<[FieldIdx; 8]>,
}
type DeferredClosureRequirements<'tcx> = Vec<(LocalDefId, ty::GenericArgsRef<'tcx>, Locations)>;
/// After we borrow check a closure, we are left with various
/// requirements that we have inferred between the free regions that
/// appear in the closure's signature or on its field types. These
@ -291,14 +287,31 @@ impl<'tcx> ClosureOutlivesSubjectTy<'tcx> {
}
}
/// Perform the actual borrow checking.
///
/// For nested bodies this should only be called through `root_cx.get_or_insert_nested`.
#[instrument(skip(root_cx), level = "debug")]
fn do_mir_borrowck<'tcx>(
struct CollectRegionConstraintsResult<'tcx> {
infcx: BorrowckInferCtxt<'tcx>,
body_owned: Body<'tcx>,
promoted: IndexVec<Promoted, Body<'tcx>>,
move_data: MoveData<'tcx>,
borrow_set: BorrowSet<'tcx>,
location_table: PoloniusLocationTable,
location_map: Rc<DenseLocationMap>,
universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
region_bound_pairs: Frozen<RegionBoundPairs<'tcx>>,
known_type_outlives_obligations: Frozen<Vec<ty::PolyTypeOutlivesPredicate<'tcx>>>,
constraints: MirTypeckRegionConstraints<'tcx>,
deferred_closure_requirements: DeferredClosureRequirements<'tcx>,
deferred_opaque_type_errors: Vec<DeferredOpaqueTypeError<'tcx>>,
polonius_facts: Option<AllFacts<RustcFacts>>,
polonius_context: Option<PoloniusContext>,
}
/// Start borrow checking by collecting the region constraints for
/// the current body. This initializes the relevant data structures
/// and then type checks the MIR body.
fn borrowck_collect_region_constraints<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
def: LocalDefId,
) -> PropagatedBorrowCheckResults<'tcx> {
) -> CollectRegionConstraintsResult<'tcx> {
let tcx = root_cx.tcx;
let infcx = BorrowckInferCtxt::new(tcx, def, root_cx.root_def_id());
let (input_body, promoted) = tcx.mir_promoted(def);
@ -334,10 +347,11 @@ fn do_mir_borrowck<'tcx>(
// Run the MIR type-checker.
let MirTypeckResults {
mut constraints,
constraints,
universal_region_relations,
region_bound_pairs,
known_type_outlives_obligations,
deferred_closure_requirements,
polonius_context,
} = type_check::type_check(
root_cx,
@ -352,16 +366,53 @@ fn do_mir_borrowck<'tcx>(
Rc::clone(&location_map),
);
let opaque_type_errors = region_infer::opaque_types::handle_opaque_type_uses(
root_cx,
&infcx,
&body,
&universal_region_relations,
&region_bound_pairs,
&known_type_outlives_obligations,
&location_map,
&mut constraints,
);
CollectRegionConstraintsResult {
infcx,
body_owned,
promoted,
move_data,
borrow_set,
location_table,
location_map,
universal_region_relations,
region_bound_pairs,
known_type_outlives_obligations,
constraints,
deferred_closure_requirements,
deferred_opaque_type_errors: Default::default(),
polonius_facts,
polonius_context,
}
}
/// Using the region constraints computed by [borrowck_collect_region_constraints]
/// and the additional constraints from [BorrowCheckRootCtxt::handle_opaque_type_uses],
/// compute the region graph and actually check for any borrowck errors.
fn borrowck_check_region_constraints<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
CollectRegionConstraintsResult {
infcx,
body_owned,
promoted,
move_data,
borrow_set,
location_table,
location_map,
universal_region_relations,
region_bound_pairs: _,
known_type_outlives_obligations: _,
constraints,
deferred_closure_requirements,
deferred_opaque_type_errors,
polonius_facts,
polonius_context,
}: CollectRegionConstraintsResult<'tcx>,
) -> PropagatedBorrowCheckResults<'tcx> {
assert!(!infcx.has_opaque_types_in_storage());
assert!(deferred_closure_requirements.is_empty());
let tcx = root_cx.tcx;
let body = &body_owned;
let def = body.source.def_id().expect_local();
// Compute non-lexical lifetimes using the constraints computed
// by typechecking the MIR body.
@ -481,7 +532,7 @@ fn do_mir_borrowck<'tcx>(
// Compute and report region errors, if any.
if nll_errors.is_empty() {
mbcx.report_opaque_type_errors(opaque_type_errors);
mbcx.report_opaque_type_errors(deferred_opaque_type_errors);
} else {
mbcx.report_region_errors(nll_errors);
}

View file

@ -73,6 +73,38 @@ pub(crate) fn replace_regions_in_mir<'tcx>(
universal_regions
}
/// Computes the closure requirements given the current inference state.
///
/// This is intended to be used by before [BorrowCheckRootCtxt::handle_opaque_type_uses]
/// because applying member constraints may rely on closure requirements.
/// This is frequently the case of async functions where pretty much everything
/// happens inside of the inner async block but the opaque only gets constrained
/// in the parent function.
pub(crate) fn compute_closure_requirements_modulo_opaques<'tcx>(
infcx: &BorrowckInferCtxt<'tcx>,
body: &Body<'tcx>,
location_map: Rc<DenseLocationMap>,
universal_region_relations: &Frozen<UniversalRegionRelations<'tcx>>,
constraints: &MirTypeckRegionConstraints<'tcx>,
) -> Option<ClosureRegionRequirements<'tcx>> {
// FIXME(#146079): we shouldn't have to clone all this stuff here.
// Computing the region graph should take at least some of it by reference/`Rc`.
let lowered_constraints = compute_sccs_applying_placeholder_outlives_constraints(
constraints.clone(),
&universal_region_relations,
infcx,
);
let mut regioncx = RegionInferenceContext::new(
&infcx,
lowered_constraints,
universal_region_relations.clone(),
location_map,
);
let (closure_region_requirements, _nll_errors) = regioncx.solve(infcx, body, None);
closure_region_requirements
}
/// Computes the (non-lexical) regions from the input MIR.
///
/// This may result in errors being reported.

View file

@ -3,34 +3,34 @@ use std::rc::Rc;
use rustc_data_structures::frozen::Frozen;
use rustc_data_structures::fx::FxIndexMap;
use rustc_hir::def_id::DefId;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::outlives::env::RegionBoundPairs;
use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin, OpaqueTypeStorageEntries};
use rustc_infer::traits::ObligationCause;
use rustc_macros::extension;
use rustc_middle::mir::{Body, ConstraintCategory};
use rustc_middle::mir::{Body, ConcreteOpaqueTypes, ConstraintCategory};
use rustc_middle::ty::{
self, DefiningScopeKind, FallibleTypeFolder, GenericArg, GenericArgsRef, OpaqueHiddenType,
OpaqueTypeKey, Region, RegionVid, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable,
TypeVisitableExt, fold_regions,
self, DefiningScopeKind, EarlyBinder, FallibleTypeFolder, GenericArg, GenericArgsRef,
OpaqueHiddenType, OpaqueTypeKey, Region, RegionVid, Ty, TyCtxt, TypeFoldable,
TypeSuperFoldable, TypeVisitableExt, fold_regions,
};
use rustc_mir_dataflow::points::DenseLocationMap;
use rustc_span::Span;
use rustc_trait_selection::opaque_types::{
InvalidOpaqueTypeArgs, check_opaque_type_parameter_valid,
NonDefiningUseReason, opaque_type_has_defining_use_args,
};
use rustc_trait_selection::solve::NoSolution;
use rustc_trait_selection::traits::query::type_op::custom::CustomTypeOp;
use tracing::{debug, instrument};
use super::reverse_sccs::ReverseSccGraph;
use crate::BorrowckInferCtxt;
use crate::consumers::RegionInferenceContext;
use crate::session_diagnostics::LifetimeMismatchOpaqueParam;
use crate::type_check::canonical::fully_perform_op_raw;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::type_check::{Locations, MirTypeckRegionConstraints};
use crate::universal_regions::{RegionClassification, UniversalRegions};
use crate::{BorrowCheckRootCtxt, BorrowckInferCtxt};
mod member_constraints;
mod region_ctxt;
@ -42,7 +42,7 @@ use region_ctxt::RegionCtxt;
/// if there are no `RegionErrors`. If there are region errors, it's likely
/// that errors here are caused by them and don't need to be handled separately.
pub(crate) enum DeferredOpaqueTypeError<'tcx> {
InvalidOpaqueTypeArgs(InvalidOpaqueTypeArgs<'tcx>),
InvalidOpaqueTypeArgs(NonDefiningUseReason<'tcx>),
LifetimeMismatchOpaqueParam(LifetimeMismatchOpaqueParam<'tcx>),
UnexpectedHiddenRegion {
/// The opaque type.
@ -58,78 +58,32 @@ pub(crate) enum DeferredOpaqueTypeError<'tcx> {
},
}
/// This looks at all uses of opaque types in their defining scope inside
/// of this function.
/// We eagerly map all regions to NLL vars here, as we need to make sure we've
/// introduced nll vars for all used placeholders.
///
/// It first uses all defining uses to compute the actual concrete type of each
/// opaque type definition.
///
/// We then apply this inferred type to actually check all uses of the opaque.
pub(crate) fn handle_opaque_type_uses<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
/// We need to resolve inference vars as even though we're in MIR typeck, we may still
/// encounter inference variables, e.g. when checking user types.
pub(crate) fn clone_and_resolve_opaque_types<'tcx>(
infcx: &BorrowckInferCtxt<'tcx>,
body: &Body<'tcx>,
universal_region_relations: &Frozen<UniversalRegionRelations<'tcx>>,
region_bound_pairs: &RegionBoundPairs<'tcx>,
known_type_outlives_obligations: &[ty::PolyTypeOutlivesPredicate<'tcx>],
location_map: &Rc<DenseLocationMap>,
constraints: &mut MirTypeckRegionConstraints<'tcx>,
) -> Vec<DeferredOpaqueTypeError<'tcx>> {
let tcx = infcx.tcx;
) -> (OpaqueTypeStorageEntries, Vec<(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>)>) {
let opaque_types = infcx.clone_opaque_types();
if opaque_types.is_empty() {
return Vec::new();
}
// We need to eagerly map all regions to NLL vars here, as we need to make sure we've
// introduced nll vars for all used placeholders.
//
// We need to resolve inference vars as even though we're in MIR typeck, we may still
// encounter inference variables, e.g. when checking user types.
let opaque_types_storage_num_entries = infcx.inner.borrow_mut().opaque_types().num_entries();
let opaque_types = opaque_types
.into_iter()
.map(|entry| {
fold_regions(tcx, infcx.resolve_vars_if_possible(entry), |r, _| {
fold_regions(infcx.tcx, infcx.resolve_vars_if_possible(entry), |r, _| {
let vid = if let ty::RePlaceholder(placeholder) = r.kind() {
constraints.placeholder_region(infcx, placeholder).as_var()
} else {
universal_region_relations.universal_regions.to_region_vid(r)
};
Region::new_var(tcx, vid)
Region::new_var(infcx.tcx, vid)
})
})
.collect::<Vec<_>>();
debug!(?opaque_types);
let errors = compute_concrete_opaque_types(
root_cx,
infcx,
constraints,
universal_region_relations,
Rc::clone(location_map),
&opaque_types,
);
if !errors.is_empty() {
return errors;
}
let errors = apply_computed_concrete_opaque_types(
root_cx,
infcx,
body,
&universal_region_relations.universal_regions,
region_bound_pairs,
known_type_outlives_obligations,
constraints,
&opaque_types,
);
detect_opaque_types_added_while_handling_opaque_types(infcx, opaque_types_storage_num_entries);
errors
(opaque_types_storage_num_entries, opaque_types)
}
/// Maps an NLL var to a deterministically chosen equal universal region.
@ -172,6 +126,42 @@ fn nll_var_to_universal_region<'tcx>(
}
}
/// Collect all defining uses of opaque types inside of this typeck root. This
/// expects the hidden type to be mapped to the definition parameters of the opaque
/// and errors if we end up with distinct hidden types.
fn add_concrete_opaque_type<'tcx>(
tcx: TyCtxt<'tcx>,
concrete_opaque_types: &mut ConcreteOpaqueTypes<'tcx>,
def_id: LocalDefId,
hidden_ty: OpaqueHiddenType<'tcx>,
) {
// Sometimes two opaque types are the same only after we remap the generic parameters
// back to the opaque type definition. E.g. we may have `OpaqueType<X, Y>` mapped to
// `(X, Y)` and `OpaqueType<Y, X>` mapped to `(Y, X)`, and those are the same, but we
// only know that once we convert the generic parameters to those of the opaque type.
if let Some(prev) = concrete_opaque_types.0.get_mut(&def_id) {
if prev.ty != hidden_ty.ty {
let guar = hidden_ty.ty.error_reported().err().unwrap_or_else(|| {
let (Ok(e) | Err(e)) = prev.build_mismatch_error(&hidden_ty, tcx).map(|d| d.emit());
e
});
prev.ty = Ty::new_error(tcx, guar);
}
// Pick a better span if there is one.
// FIXME(oli-obk): collect multiple spans for better diagnostics down the road.
prev.span = prev.span.substitute_dummy(hidden_ty.span);
} else {
concrete_opaque_types.0.insert(def_id, hidden_ty);
}
}
fn get_concrete_opaque_type<'tcx>(
concrete_opaque_types: &ConcreteOpaqueTypes<'tcx>,
def_id: LocalDefId,
) -> Option<EarlyBinder<'tcx, OpaqueHiddenType<'tcx>>> {
concrete_opaque_types.0.get(&def_id).map(|ty| EarlyBinder::bind(*ty))
}
#[derive(Debug)]
struct DefiningUse<'tcx> {
/// The opaque type using non NLL vars. This uses the actual
@ -193,12 +183,12 @@ struct DefiningUse<'tcx> {
///
/// It also means that this whole function is not really soundness critical as we
/// recheck all uses of the opaques regardless.
fn compute_concrete_opaque_types<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
pub(crate) fn compute_concrete_opaque_types<'tcx>(
infcx: &BorrowckInferCtxt<'tcx>,
constraints: &MirTypeckRegionConstraints<'tcx>,
universal_region_relations: &Frozen<UniversalRegionRelations<'tcx>>,
constraints: &MirTypeckRegionConstraints<'tcx>,
location_map: Rc<DenseLocationMap>,
concrete_opaque_types: &mut ConcreteOpaqueTypes<'tcx>,
opaque_types: &[(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>)],
) -> Vec<DeferredOpaqueTypeError<'tcx>> {
let mut errors = Vec::new();
@ -211,7 +201,8 @@ fn compute_concrete_opaque_types<'tcx>(
// We start by checking each use of an opaque type during type check and
// check whether the generic arguments of the opaque type are fully
// universal, if so, it's a defining use.
let defining_uses = collect_defining_uses(root_cx, &mut rcx, opaque_types, &mut errors);
let defining_uses =
collect_defining_uses(&mut rcx, concrete_opaque_types, opaque_types, &mut errors);
// We now compute and apply member constraints for all regions in the hidden
// types of each defining use. This mutates the region values of the `rcx` which
@ -221,14 +212,19 @@ fn compute_concrete_opaque_types<'tcx>(
// After applying member constraints, we now check whether all member regions ended
// up equal to one of their choice regions and compute the actual concrete type of
// the opaque type definition. This is stored in the `root_cx`.
compute_concrete_types_from_defining_uses(root_cx, &rcx, &defining_uses, &mut errors);
compute_concrete_types_from_defining_uses(
&rcx,
concrete_opaque_types,
&defining_uses,
&mut errors,
);
errors
}
#[instrument(level = "debug", skip_all, ret)]
fn collect_defining_uses<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
rcx: &mut RegionCtxt<'_, 'tcx>,
concrete_opaque_types: &mut ConcreteOpaqueTypes<'tcx>,
opaque_types: &[(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>)],
errors: &mut Vec<DeferredOpaqueTypeError<'tcx>>,
) -> Vec<DefiningUse<'tcx>> {
@ -238,7 +234,7 @@ fn collect_defining_uses<'tcx>(
let non_nll_opaque_type_key = opaque_type_key.fold_captured_lifetime_args(infcx.tcx, |r| {
nll_var_to_universal_region(&rcx, r.as_var()).unwrap_or(r)
});
if let Err(err) = check_opaque_type_parameter_valid(
if let Err(err) = opaque_type_has_defining_use_args(
infcx,
non_nll_opaque_type_key,
hidden_type.span,
@ -248,11 +244,12 @@ fn collect_defining_uses<'tcx>(
// with `TypingMode::Borrowck`.
if infcx.tcx.use_typing_mode_borrowck() {
match err {
InvalidOpaqueTypeArgs::AlreadyReported(guar) => root_cx
.add_concrete_opaque_type(
opaque_type_key.def_id,
OpaqueHiddenType::new_error(infcx.tcx, guar),
),
NonDefiningUseReason::Tainted(guar) => add_concrete_opaque_type(
infcx.tcx,
concrete_opaque_types,
opaque_type_key.def_id,
OpaqueHiddenType::new_error(infcx.tcx, guar),
),
_ => debug!(?non_nll_opaque_type_key, ?err, "ignoring non-defining use"),
}
} else {
@ -281,8 +278,8 @@ fn collect_defining_uses<'tcx>(
}
fn compute_concrete_types_from_defining_uses<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
rcx: &RegionCtxt<'_, 'tcx>,
concrete_opaque_types: &mut ConcreteOpaqueTypes<'tcx>,
defining_uses: &[DefiningUse<'tcx>],
errors: &mut Vec<DeferredOpaqueTypeError<'tcx>>,
) {
@ -361,7 +358,9 @@ fn compute_concrete_types_from_defining_uses<'tcx>(
},
));
}
root_cx.add_concrete_opaque_type(
add_concrete_opaque_type(
tcx,
concrete_opaque_types,
opaque_type_key.def_id,
OpaqueHiddenType { span: hidden_type.span, ty },
);
@ -490,20 +489,20 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for ToArgRegionsFolder<'_, 'tcx> {
///
/// It does this by equating the hidden type of each use with the instantiated final
/// hidden type of the opaque.
fn apply_computed_concrete_opaque_types<'tcx>(
root_cx: &mut BorrowCheckRootCtxt<'tcx>,
pub(crate) fn apply_computed_concrete_opaque_types<'tcx>(
infcx: &BorrowckInferCtxt<'tcx>,
body: &Body<'tcx>,
universal_regions: &UniversalRegions<'tcx>,
region_bound_pairs: &RegionBoundPairs<'tcx>,
known_type_outlives_obligations: &[ty::PolyTypeOutlivesPredicate<'tcx>],
constraints: &mut MirTypeckRegionConstraints<'tcx>,
concrete_opaque_types: &mut ConcreteOpaqueTypes<'tcx>,
opaque_types: &[(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>)],
) -> Vec<DeferredOpaqueTypeError<'tcx>> {
let tcx = infcx.tcx;
let mut errors = Vec::new();
for &(key, hidden_type) in opaque_types {
let Some(expected) = root_cx.get_concrete_opaque_type(key.def_id) else {
let Some(expected) = get_concrete_opaque_type(concrete_opaque_types, key.def_id) else {
assert!(tcx.use_typing_mode_borrowck(), "non-defining use in defining scope");
errors.push(DeferredOpaqueTypeError::NonDefiningUseInDefiningScope {
span: hidden_type.span,
@ -513,7 +512,12 @@ fn apply_computed_concrete_opaque_types<'tcx>(
hidden_type.span,
"non-defining use in the defining scope with no defining uses",
);
root_cx.add_concrete_opaque_type(key.def_id, OpaqueHiddenType::new_error(tcx, guar));
add_concrete_opaque_type(
tcx,
concrete_opaque_types,
key.def_id,
OpaqueHiddenType::new_error(tcx, guar),
);
continue;
};
@ -553,7 +557,12 @@ fn apply_computed_concrete_opaque_types<'tcx>(
"equating opaque types",
),
) {
root_cx.add_concrete_opaque_type(key.def_id, OpaqueHiddenType::new_error(tcx, guar));
add_concrete_opaque_type(
tcx,
concrete_opaque_types,
key.def_id,
OpaqueHiddenType::new_error(tcx, guar),
);
}
}
errors
@ -566,7 +575,7 @@ fn apply_computed_concrete_opaque_types<'tcx>(
/// an ICE we can properly handle this, but we haven't encountered any such test yet.
///
/// See the related comment in `FnCtxt::detect_opaque_types_added_during_writeback`.
fn detect_opaque_types_added_while_handling_opaque_types<'tcx>(
pub(crate) fn detect_opaque_types_added_while_handling_opaque_types<'tcx>(
infcx: &InferCtxt<'tcx>,
opaque_types_storage_num_entries: OpaqueTypeStorageEntries,
) {
@ -676,8 +685,8 @@ impl<'tcx> InferCtxt<'tcx> {
&self,
opaque_type_key: OpaqueTypeKey<'tcx>,
instantiated_ty: OpaqueHiddenType<'tcx>,
) -> Result<Ty<'tcx>, InvalidOpaqueTypeArgs<'tcx>> {
check_opaque_type_parameter_valid(
) -> Result<Ty<'tcx>, NonDefiningUseReason<'tcx>> {
opaque_type_has_defining_use_args(
self,
opaque_type_key,
instantiated_ty.span,

View file

@ -37,6 +37,7 @@ pub(crate) enum RegionElement {
/// Records the CFG locations where each region is live. When we initially compute liveness, we use
/// an interval matrix storing liveness ranges for each region-vid.
#[derive(Clone)] // FIXME(#146079)
pub(crate) struct LivenessValues {
/// The map from locations to points.
location_map: Rc<DenseLocationMap>,
@ -194,6 +195,7 @@ impl LivenessValues {
/// rustc to the internal `PlaceholderIndex` values that are used in
/// NLL.
#[derive(Debug, Default)]
#[derive(Clone)] // FIXME(#146079)
pub(crate) struct PlaceholderIndices {
indices: FxIndexSet<ty::PlaceholderRegion>,
}

View file

@ -1,13 +1,26 @@
use std::mem;
use std::rc::Rc;
use rustc_abi::FieldIdx;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_hir::def_id::LocalDefId;
use rustc_middle::bug;
use rustc_middle::ty::{EarlyBinder, OpaqueHiddenType, Ty, TyCtxt, TypeVisitableExt};
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::ErrorGuaranteed;
use smallvec::SmallVec;
use crate::consumers::BorrowckConsumer;
use crate::{ClosureRegionRequirements, ConcreteOpaqueTypes, PropagatedBorrowCheckResults};
use crate::nll::compute_closure_requirements_modulo_opaques;
use crate::region_infer::opaque_types::{
apply_computed_concrete_opaque_types, clone_and_resolve_opaque_types,
compute_concrete_opaque_types, detect_opaque_types_added_while_handling_opaque_types,
};
use crate::type_check::{Locations, constraint_conversion};
use crate::{
ClosureRegionRequirements, CollectRegionConstraintsResult, ConcreteOpaqueTypes,
PropagatedBorrowCheckResults, borrowck_check_region_constraints,
borrowck_collect_region_constraints,
};
/// The shared context used by both the root as well as all its nested
/// items.
@ -15,7 +28,12 @@ pub(super) struct BorrowCheckRootCtxt<'tcx> {
pub tcx: TyCtxt<'tcx>,
root_def_id: LocalDefId,
concrete_opaque_types: ConcreteOpaqueTypes<'tcx>,
nested_bodies: FxHashMap<LocalDefId, PropagatedBorrowCheckResults<'tcx>>,
/// The region constraints computed by [borrowck_collect_region_constraints]. This uses
/// an [FxIndexMap] to guarantee that iterating over it visits nested bodies before
/// their parents.
collect_region_constraints_results:
FxIndexMap<LocalDefId, CollectRegionConstraintsResult<'tcx>>,
propagated_borrowck_results: FxHashMap<LocalDefId, PropagatedBorrowCheckResults<'tcx>>,
tainted_by_errors: Option<ErrorGuaranteed>,
/// This should be `None` during normal compilation. See [`crate::consumers`] for more
/// information on how this is used.
@ -32,7 +50,8 @@ impl<'tcx> BorrowCheckRootCtxt<'tcx> {
tcx,
root_def_id,
concrete_opaque_types: Default::default(),
nested_bodies: Default::default(),
collect_region_constraints_results: Default::default(),
propagated_borrowck_results: Default::default(),
tainted_by_errors: None,
consumer,
}
@ -42,76 +61,15 @@ impl<'tcx> BorrowCheckRootCtxt<'tcx> {
self.root_def_id
}
/// Collect all defining uses of opaque types inside of this typeck root. This
/// expects the hidden type to be mapped to the definition parameters of the opaque
/// and errors if we end up with distinct hidden types.
pub(super) fn add_concrete_opaque_type(
&mut self,
def_id: LocalDefId,
hidden_ty: OpaqueHiddenType<'tcx>,
) {
// Sometimes two opaque types are the same only after we remap the generic parameters
// back to the opaque type definition. E.g. we may have `OpaqueType<X, Y>` mapped to
// `(X, Y)` and `OpaqueType<Y, X>` mapped to `(Y, X)`, and those are the same, but we
// only know that once we convert the generic parameters to those of the opaque type.
if let Some(prev) = self.concrete_opaque_types.0.get_mut(&def_id) {
if prev.ty != hidden_ty.ty {
let guar = hidden_ty.ty.error_reported().err().unwrap_or_else(|| {
let (Ok(e) | Err(e)) =
prev.build_mismatch_error(&hidden_ty, self.tcx).map(|d| d.emit());
e
});
prev.ty = Ty::new_error(self.tcx, guar);
}
// Pick a better span if there is one.
// FIXME(oli-obk): collect multiple spans for better diagnostics down the road.
prev.span = prev.span.substitute_dummy(hidden_ty.span);
} else {
self.concrete_opaque_types.0.insert(def_id, hidden_ty);
}
}
pub(super) fn get_concrete_opaque_type(
&mut self,
def_id: LocalDefId,
) -> Option<EarlyBinder<'tcx, OpaqueHiddenType<'tcx>>> {
self.concrete_opaque_types.0.get(&def_id).map(|ty| EarlyBinder::bind(*ty))
}
pub(super) fn set_tainted_by_errors(&mut self, guar: ErrorGuaranteed) {
self.tainted_by_errors = Some(guar);
}
pub(super) fn get_or_insert_nested(
&mut self,
def_id: LocalDefId,
) -> &PropagatedBorrowCheckResults<'tcx> {
debug_assert_eq!(
self.tcx.typeck_root_def_id(def_id.to_def_id()),
self.root_def_id.to_def_id()
);
if !self.nested_bodies.contains_key(&def_id) {
let result = super::do_mir_borrowck(self, def_id);
if let Some(prev) = self.nested_bodies.insert(def_id, result) {
bug!("unexpected previous nested body: {prev:?}");
}
}
self.nested_bodies.get(&def_id).unwrap()
}
pub(super) fn closure_requirements(
&mut self,
nested_body_def_id: LocalDefId,
) -> &Option<ClosureRegionRequirements<'tcx>> {
&self.get_or_insert_nested(nested_body_def_id).closure_requirements
}
pub(super) fn used_mut_upvars(
&mut self,
nested_body_def_id: LocalDefId,
) -> &SmallVec<[FieldIdx; 8]> {
&self.get_or_insert_nested(nested_body_def_id).used_mut_upvars
&self.propagated_borrowck_results[&nested_body_def_id].used_mut_upvars
}
pub(super) fn finalize(self) -> Result<&'tcx ConcreteOpaqueTypes<'tcx>, ErrorGuaranteed> {
@ -121,4 +79,214 @@ impl<'tcx> BorrowCheckRootCtxt<'tcx> {
Ok(self.tcx.arena.alloc(self.concrete_opaque_types))
}
}
fn handle_opaque_type_uses(&mut self) {
let mut per_body_info = Vec::new();
for input in self.collect_region_constraints_results.values_mut() {
let (num_entries, opaque_types) = clone_and_resolve_opaque_types(
&input.infcx,
&input.universal_region_relations,
&mut input.constraints,
);
input.deferred_opaque_type_errors = compute_concrete_opaque_types(
&input.infcx,
&input.universal_region_relations,
&input.constraints,
Rc::clone(&input.location_map),
&mut self.concrete_opaque_types,
&opaque_types,
);
per_body_info.push((num_entries, opaque_types));
}
for (input, (opaque_types_storage_num_entries, opaque_types)) in
self.collect_region_constraints_results.values_mut().zip(per_body_info)
{
if input.deferred_opaque_type_errors.is_empty() {
input.deferred_opaque_type_errors = apply_computed_concrete_opaque_types(
&input.infcx,
&input.body_owned,
&input.universal_region_relations.universal_regions,
&input.region_bound_pairs,
&input.known_type_outlives_obligations,
&mut input.constraints,
&mut self.concrete_opaque_types,
&opaque_types,
);
}
detect_opaque_types_added_while_handling_opaque_types(
&input.infcx,
opaque_types_storage_num_entries,
)
}
}
/// Computing defining uses of opaques may depend on the propagated region
/// requirements of nested bodies, while applying defining uses may introduce
/// additional region requirements we need to propagate.
///
/// This results in cyclic dependency. To compute the defining uses in parent
/// bodies, we need the closure requirements of its nested bodies, but to check
/// non-defining uses in nested bodies, we may rely on the defining uses in the
/// parent.
///
/// We handle this issue by applying closure requirements twice. Once using the
/// region constraints from before we've handled opaque types in the nested body
/// - which is used by the parent to handle its defining uses - and once after.
///
/// As a performance optimization, we also eagerly finish borrowck for bodies
/// which don't depend on opaque types. In this case they get removed from
/// `collect_region_constraints_results` and the final result gets put into
/// `propagated_borrowck_results`.
fn apply_closure_requirements_modulo_opaques(&mut self) {
let mut closure_requirements_modulo_opaques = FxHashMap::default();
// We need to `mem::take` both `self.collect_region_constraints_results` and
// `input.deferred_closure_requirements` as we otherwise can't iterate over
// them while mutably using the containing struct.
let collect_region_constraints_results =
mem::take(&mut self.collect_region_constraints_results);
// We iterate over all bodies here, visiting nested bodies before their parent.
for (def_id, mut input) in collect_region_constraints_results {
// A body depends on opaque types if it either has any opaque type uses itself,
// or it has a nested body which does.
//
// If the current body does not depend on any opaque types, we eagerly compute
// its final result and write it into `self.propagated_borrowck_results`. This
// avoids having to compute its closure requirements modulo regions, as they
// are just the same as its final closure requirements.
let mut depends_on_opaques = input.infcx.has_opaque_types_in_storage();
// Iterate over all nested bodies of `input`. If that nested body depends on
// opaque types, we apply its closure requirements modulo opaques. Otherwise
// we use the closure requirements from its final borrowck result.
//
// In case we've only applied the closure requirements modulo opaques, we have
// to later apply its closure requirements considering opaques, so we put that
// nested body back into `deferred_closure_requirements`.
for (def_id, args, locations) in mem::take(&mut input.deferred_closure_requirements) {
let closure_requirements = match self.propagated_borrowck_results.get(&def_id) {
None => {
depends_on_opaques = true;
input.deferred_closure_requirements.push((def_id, args, locations));
&closure_requirements_modulo_opaques[&def_id]
}
Some(result) => &result.closure_requirements,
};
Self::apply_closure_requirements(
&mut input,
closure_requirements,
def_id,
args,
locations,
);
}
// In case the current body does depend on opaques and is a nested body,
// we need to compute its closure requirements modulo opaques so that
// we're able to use it when visiting its parent later in this function.
//
// If the current body does not depend on opaque types, we finish borrowck
// and write its result into `propagated_borrowck_results`.
if depends_on_opaques {
if def_id != self.root_def_id {
let req = Self::compute_closure_requirements_modulo_opaques(&input);
closure_requirements_modulo_opaques.insert(def_id, req);
}
self.collect_region_constraints_results.insert(def_id, input);
} else {
assert!(input.deferred_closure_requirements.is_empty());
let result = borrowck_check_region_constraints(self, input);
self.propagated_borrowck_results.insert(def_id, result);
}
}
}
fn compute_closure_requirements_modulo_opaques(
input: &CollectRegionConstraintsResult<'tcx>,
) -> Option<ClosureRegionRequirements<'tcx>> {
compute_closure_requirements_modulo_opaques(
&input.infcx,
&input.body_owned,
Rc::clone(&input.location_map),
&input.universal_region_relations,
&input.constraints,
)
}
fn apply_closure_requirements(
input: &mut CollectRegionConstraintsResult<'tcx>,
closure_requirements: &Option<ClosureRegionRequirements<'tcx>>,
closure_def_id: LocalDefId,
args: ty::GenericArgsRef<'tcx>,
locations: Locations,
) {
if let Some(closure_requirements) = closure_requirements {
constraint_conversion::ConstraintConversion::new(
&input.infcx,
&input.universal_region_relations.universal_regions,
&input.region_bound_pairs,
&input.known_type_outlives_obligations,
locations,
input.body_owned.span, // irrelevant; will be overridden.
ConstraintCategory::Boring, // same as above.
&mut input.constraints,
)
.apply_closure_requirements(closure_requirements, closure_def_id, args);
}
}
pub(super) fn do_mir_borrowck(&mut self) {
// The list of all bodies we need to borrowck. This first looks at
// nested bodies, and then their parents. This means accessing e.g.
// `used_mut_upvars` for a closure can assume that we've already
// checked that closure.
let all_bodies = self
.tcx
.nested_bodies_within(self.root_def_id)
.iter()
.chain(std::iter::once(self.root_def_id));
for def_id in all_bodies {
let result = borrowck_collect_region_constraints(self, def_id);
self.collect_region_constraints_results.insert(def_id, result);
}
// We now apply the closure requirements of nested bodies modulo
// regions. In case a body does not depend on opaque types, we
// eagerly check its region constraints and use the final closure
// requirements.
//
// We eagerly finish borrowck for bodies which don't depend on
// opaques.
self.apply_closure_requirements_modulo_opaques();
// We handle opaque type uses for all bodies together.
self.handle_opaque_type_uses();
// Now walk over all bodies which depend on opaque types and finish borrowck.
//
// We first apply the final closure requirements from nested bodies which also
// depend on opaque types and then finish borrow checking the parent. Bodies
// which don't depend on opaques have already been fully borrowchecked in
// `apply_closure_requirements_modulo_opaques` as an optimization.
for (def_id, mut input) in mem::take(&mut self.collect_region_constraints_results) {
for (def_id, args, locations) in mem::take(&mut input.deferred_closure_requirements) {
// We visit nested bodies before their parent, so we're already
// done with nested bodies at this point.
let closure_requirements =
&self.propagated_borrowck_results[&def_id].closure_requirements;
Self::apply_closure_requirements(
&mut input,
closure_requirements,
def_id,
args,
locations,
);
}
let result = borrowck_check_region_constraints(self, input);
self.propagated_borrowck_results.insert(def_id, result);
}
}
}

View file

@ -19,6 +19,7 @@ use crate::type_check::{Locations, MirTypeckRegionConstraints, constraint_conver
use crate::universal_regions::UniversalRegions;
#[derive(Debug)]
#[derive(Clone)] // FIXME(#146079)
pub(crate) struct UniversalRegionRelations<'tcx> {
pub(crate) universal_regions: UniversalRegions<'tcx>,

View file

@ -34,6 +34,7 @@ use rustc_mir_dataflow::points::DenseLocationMap;
use rustc_span::def_id::CRATE_DEF_ID;
use rustc_span::source_map::Spanned;
use rustc_span::{Span, sym};
use rustc_trait_selection::infer::InferCtxtExt;
use rustc_trait_selection::traits::query::type_op::custom::scrape_region_constraints;
use rustc_trait_selection::traits::query::type_op::{TypeOp, TypeOpOutput};
use tracing::{debug, instrument, trace};
@ -48,7 +49,7 @@ use crate::region_infer::values::{LivenessValues, PlaceholderIndex, PlaceholderI
use crate::session_diagnostics::{MoveUnsized, SimdIntrinsicArgConst};
use crate::type_check::free_region_relations::{CreateResult, UniversalRegionRelations};
use crate::universal_regions::{DefiningTy, UniversalRegions};
use crate::{BorrowCheckRootCtxt, BorrowckInferCtxt, path_utils};
use crate::{BorrowCheckRootCtxt, BorrowckInferCtxt, DeferredClosureRequirements, path_utils};
macro_rules! span_mirbug {
($context:expr, $elem:expr, $($message:tt)*) => ({
@ -66,7 +67,7 @@ macro_rules! span_mirbug {
}
pub(crate) mod canonical;
mod constraint_conversion;
pub(crate) mod constraint_conversion;
pub(crate) mod free_region_relations;
mod input_output;
pub(crate) mod liveness;
@ -141,6 +142,7 @@ pub(crate) fn type_check<'tcx>(
None
};
let mut deferred_closure_requirements = Default::default();
let mut typeck = TypeChecker {
root_cx,
infcx,
@ -156,6 +158,7 @@ pub(crate) fn type_check<'tcx>(
polonius_facts,
borrow_set,
constraints: &mut constraints,
deferred_closure_requirements: &mut deferred_closure_requirements,
polonius_liveness,
};
@ -190,6 +193,7 @@ pub(crate) fn type_check<'tcx>(
universal_region_relations,
region_bound_pairs,
known_type_outlives_obligations,
deferred_closure_requirements,
polonius_context,
}
}
@ -229,6 +233,7 @@ struct TypeChecker<'a, 'tcx> {
polonius_facts: &'a mut Option<PoloniusFacts>,
borrow_set: &'a BorrowSet<'tcx>,
constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
deferred_closure_requirements: &'a mut DeferredClosureRequirements<'tcx>,
/// When using `-Zpolonius=next`, the liveness helper data used to create polonius constraints.
polonius_liveness: Option<PoloniusLivenessContext>,
}
@ -240,11 +245,13 @@ pub(crate) struct MirTypeckResults<'tcx> {
pub(crate) universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
pub(crate) region_bound_pairs: Frozen<RegionBoundPairs<'tcx>>,
pub(crate) known_type_outlives_obligations: Frozen<Vec<ty::PolyTypeOutlivesPredicate<'tcx>>>,
pub(crate) deferred_closure_requirements: DeferredClosureRequirements<'tcx>,
pub(crate) polonius_context: Option<PoloniusContext>,
}
/// A collection of region constraints that must be satisfied for the
/// program to be considered well-typed.
#[derive(Clone)] // FIXME(#146079)
pub(crate) struct MirTypeckRegionConstraints<'tcx> {
/// Maps from a `ty::Placeholder` to the corresponding
/// `PlaceholderIndex` bit that we will use for it.
@ -1454,68 +1461,79 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
CastKind::PtrToPtr => {
let ty_from = op.ty(self.body, tcx);
let cast_ty_from = CastTy::from_ty(ty_from);
let cast_ty_to = CastTy::from_ty(*ty);
match (cast_ty_from, cast_ty_to) {
(Some(CastTy::Ptr(src)), Some(CastTy::Ptr(dst))) => {
let src_tail = self.struct_tail(src.ty, location);
let dst_tail = self.struct_tail(dst.ty, location);
let Some(CastTy::Ptr(src)) = CastTy::from_ty(ty_from) else {
unreachable!();
};
let Some(CastTy::Ptr(dst)) = CastTy::from_ty(*ty) else {
unreachable!();
};
// This checks (lifetime part of) vtable validity for pointer casts,
// which is irrelevant when there are aren't principal traits on
// both sides (aka only auto traits).
//
// Note that other checks (such as denying `dyn Send` -> `dyn
// Debug`) are in `rustc_hir_typeck`.
if let ty::Dynamic(src_tty, _src_lt, ty::Dyn) = *src_tail.kind()
&& let ty::Dynamic(dst_tty, dst_lt, ty::Dyn) = *dst_tail.kind()
&& src_tty.principal().is_some()
&& dst_tty.principal().is_some()
{
// Remove auto traits.
// Auto trait checks are handled in `rustc_hir_typeck` as FCW.
let src_obj = Ty::new_dynamic(
tcx,
tcx.mk_poly_existential_predicates(
&src_tty.without_auto_traits().collect::<Vec<_>>(),
),
// FIXME: Once we disallow casting `*const dyn Trait + 'short`
// to `*const dyn Trait + 'long`, then this can just be `src_lt`.
dst_lt,
ty::Dyn,
);
let dst_obj = Ty::new_dynamic(
tcx,
tcx.mk_poly_existential_predicates(
&dst_tty.without_auto_traits().collect::<Vec<_>>(),
),
dst_lt,
ty::Dyn,
);
if self.infcx.type_is_sized_modulo_regions(self.infcx.param_env, dst.ty) {
// Wide to thin ptr cast. This may even occur in an env with
// impossible predicates, such as `where dyn Trait: Sized`.
// In this case, we don't want to fall into the case below,
// since the types may not actually be equatable, but it's
// fine to perform this operation in an impossible env.
let trait_ref = ty::TraitRef::new(
tcx,
tcx.require_lang_item(LangItem::Sized, self.last_span),
[dst.ty],
);
self.prove_trait_ref(
trait_ref,
location.to_locations(),
ConstraintCategory::Cast {
is_implicit_coercion: true,
unsize_to: None,
},
);
} else if let ty::Dynamic(src_tty, _src_lt, ty::Dyn) =
*self.struct_tail(src.ty, location).kind()
&& let ty::Dynamic(dst_tty, dst_lt, ty::Dyn) =
*self.struct_tail(dst.ty, location).kind()
&& src_tty.principal().is_some()
&& dst_tty.principal().is_some()
{
// This checks (lifetime part of) vtable validity for pointer casts,
// which is irrelevant when there are aren't principal traits on
// both sides (aka only auto traits).
//
// Note that other checks (such as denying `dyn Send` -> `dyn
// Debug`) are in `rustc_hir_typeck`.
debug!(?src_tty, ?dst_tty, ?src_obj, ?dst_obj);
// Remove auto traits.
// Auto trait checks are handled in `rustc_hir_typeck` as FCW.
let src_obj = Ty::new_dynamic(
tcx,
tcx.mk_poly_existential_predicates(
&src_tty.without_auto_traits().collect::<Vec<_>>(),
),
// FIXME: Once we disallow casting `*const dyn Trait + 'short`
// to `*const dyn Trait + 'long`, then this can just be `src_lt`.
dst_lt,
ty::Dyn,
);
let dst_obj = Ty::new_dynamic(
tcx,
tcx.mk_poly_existential_predicates(
&dst_tty.without_auto_traits().collect::<Vec<_>>(),
),
dst_lt,
ty::Dyn,
);
self.sub_types(
src_obj,
dst_obj,
location.to_locations(),
ConstraintCategory::Cast {
is_implicit_coercion: false,
unsize_to: None,
},
)
.unwrap();
}
}
_ => {
span_mirbug!(
self,
rvalue,
"Invalid PtrToPtr cast {:?} -> {:?}",
ty_from,
ty
)
}
debug!(?src_tty, ?dst_tty, ?src_obj, ?dst_obj);
self.sub_types(
src_obj,
dst_obj,
location.to_locations(),
ConstraintCategory::Cast {
is_implicit_coercion: false,
unsize_to: None,
},
)
.unwrap();
}
}
CastKind::Transmute => {
@ -2458,21 +2476,11 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
locations: Locations,
) -> ty::InstantiatedPredicates<'tcx> {
let root_def_id = self.root_cx.root_def_id();
if let Some(closure_requirements) = &self.root_cx.closure_requirements(def_id) {
constraint_conversion::ConstraintConversion::new(
self.infcx,
self.universal_regions,
self.region_bound_pairs,
self.known_type_outlives_obligations,
locations,
self.body.span, // irrelevant; will be overridden.
ConstraintCategory::Boring, // same as above.
self.constraints,
)
.apply_closure_requirements(closure_requirements, def_id, args);
}
// We will have to handle propagated closure requirements for this closure,
// but need to defer this until the nested body has been fully borrow checked.
self.deferred_closure_requirements.push((def_id, args, locations));
// Now equate closure args to regions inherited from `root_def_id`. Fixes #98589.
// Equate closure args to regions inherited from `root_def_id`. Fixes #98589.
let typeck_root_args = ty::GenericArgs::identity_for_item(tcx, root_def_id);
let parent_args = match tcx.def_kind(def_id) {

View file

@ -40,6 +40,7 @@ use crate::BorrowckInferCtxt;
use crate::renumber::RegionCtxt;
#[derive(Debug)]
#[derive(Clone)] // FIXME(#146079)
pub(crate) struct UniversalRegions<'tcx> {
indices: UniversalRegionIndices<'tcx>,
@ -200,6 +201,7 @@ impl<'tcx> DefiningTy<'tcx> {
}
#[derive(Debug)]
#[derive(Clone)] // FIXME(#146079)
struct UniversalRegionIndices<'tcx> {
/// For those regions that may appear in the parameter environment
/// ('static and early-bound regions), we maintain a map from the

View file

@ -82,20 +82,16 @@ jobs:
- name: Build sample project with target defined as JSON spec
run: |
./y.sh prepare --only-libcore --cross
./y.sh build --sysroot --features compiler-builtins-no-f16-f128 --target-triple m68k-unknown-linux-gnu --target ${{ github.workspace }}/target_specs/m68k-unknown-linux-gnu.json
./y.sh build --sysroot --target-triple m68k-unknown-linux-gnu --target ${{ github.workspace }}/target_specs/m68k-unknown-linux-gnu.json
CG_RUSTFLAGS="-Clinker=m68k-unknown-linux-gnu-gcc" ./y.sh cargo build --manifest-path=./tests/hello-world/Cargo.toml --target ${{ github.workspace }}/target_specs/m68k-unknown-linux-gnu.json
./y.sh clean all
- name: Build
run: |
./y.sh prepare --only-libcore --cross
./y.sh build --sysroot --features compiler-builtins-no-f16-f128 --target-triple m68k-unknown-linux-gnu
./y.sh build --sysroot --target-triple m68k-unknown-linux-gnu
./y.sh test --mini-tests --target-triple m68k-unknown-linux-gnu
# FIXME: since https://github.com/rust-lang/rust/pull/140809, we cannot run programs for architectures not
# supported by the object crate, since this adds a dependency on symbols.o for the panic runtime.
# And as such, a wrong order of the object files in the linker command now fails with an undefined reference
# to some symbols like __rustc::rust_panic.
#CG_GCC_TEST_TARGET=m68k-unknown-linux-gnu ./y.sh test --cargo-tests --target-triple m68k-unknown-linux-gnu
CG_GCC_TEST_TARGET=m68k-unknown-linux-gnu ./y.sh test --cargo-tests --target-triple m68k-unknown-linux-gnu
./y.sh clean all
- name: Prepare dependencies
@ -104,23 +100,21 @@ jobs:
git config --global user.name "User"
./y.sh prepare --cross
# FIXME: We cannot run programs for architectures not supported by the object crate. See comment above.
#- name: Run tests
#run: |
#./y.sh test --target-triple m68k-unknown-linux-gnu --release --clean --build-sysroot --sysroot-features compiler-builtins-no-f16-f128 ${{ matrix.commands }}
- name: Run tests
run: |
./y.sh test --target-triple m68k-unknown-linux-gnu --release --clean --build-sysroot ${{ matrix.commands }}
# FIXME: We cannot run programs for architectures not supported by the object crate. See comment above.
#- name: Run Hello World!
#run: |
#./y.sh build --target-triple m68k-unknown-linux-gnu
- name: Run Hello World!
run: |
./y.sh build --target-triple m68k-unknown-linux-gnu
#vm_dir=$(pwd)/vm
#cd tests/hello-world
#CG_RUSTFLAGS="-Clinker=m68k-unknown-linux-gnu-gcc" ../../y.sh cargo build --target m68k-unknown-linux-gnu
#sudo cp target/m68k-unknown-linux-gnu/debug/hello_world $vm_dir/home/
#sudo chroot $vm_dir qemu-m68k-static /home/hello_world > hello_world_stdout
#expected_output="40"
#test $(cat hello_world_stdout) == $expected_output || (echo "Output differs. Actual output: $(cat hello_world_stdout)"; exit 1)
vm_dir=$(pwd)/vm
cd tests/hello-world
CG_RUSTFLAGS="-Clinker=m68k-unknown-linux-gnu-gcc" ../../y.sh cargo build --target m68k-unknown-linux-gnu
sudo cp target/m68k-unknown-linux-gnu/debug/hello_world $vm_dir/home/
sudo chroot $vm_dir qemu-m68k-static /home/hello_world > hello_world_stdout
expected_output="40"
test $(cat hello_world_stdout) == $expected_output || (echo "Output differs. Actual output: $(cat hello_world_stdout)"; exit 1)
# Summary job for the merge queue.
# ALL THE PREVIOUS JOBS NEED TO BE ADDED TO THE `needs` SECTION OF THIS JOB!

View file

@ -56,18 +56,18 @@ dependencies = [
[[package]]
name = "gccjit"
version = "2.7.0"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae99a89184220d967dd300139f2d2ae7d52c1a69d632b24aacc57c54625254ce"
checksum = "4a0e310ef75f396cd11b2443b353d55376656ca92c13cba36f92b7aff346ac1a"
dependencies = [
"gccjit_sys",
]
[[package]]
name = "gccjit_sys"
version = "0.8.0"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24edb7bfe2b7b27c6d09ed23eebfcab0b359c8fe978433f902943e6f127a0f1b"
checksum = "95ed7572b30cd32430294dde6fb70822d58e67c6846a548647e8739776a0125b"
dependencies = [
"libc",
]

View file

@ -24,7 +24,7 @@ default = ["master"]
[dependencies]
object = { version = "0.37.0", default-features = false, features = ["std", "read"] }
tempfile = "3.20"
gccjit = "2.7"
gccjit = "2.8"
#gccjit = { git = "https://github.com/rust-lang/gccjit.rs" }
# Local copy.

View file

@ -1,7 +1,7 @@
use std::ffi::OsStr;
use std::path::Path;
use crate::utils::run_command_with_output;
use crate::utils::{run_command_with_output, walk_dir};
fn show_usage() {
println!(
@ -32,5 +32,31 @@ pub fn run() -> Result<(), String> {
if check { &[&"cargo", &"fmt", &"--check"] } else { &[&"cargo", &"fmt"] };
run_command_with_output(cmd, Some(Path::new(".")))?;
run_command_with_output(cmd, Some(Path::new("build_system")))
run_command_with_output(cmd, Some(Path::new("build_system")))?;
run_rustfmt_recursively("tests/run", check)
}
fn run_rustfmt_recursively<P>(dir: P, check: bool) -> Result<(), String>
where
P: AsRef<Path>,
{
walk_dir(
dir,
&mut |dir| run_rustfmt_recursively(dir, check),
&mut |file_path| {
if file_path.extension().filter(|ext| ext == &OsStr::new("rs")).is_some() {
let rustfmt_cmd: &[&dyn AsRef<OsStr>] = if check {
&[&"rustfmt", &"--check", &file_path]
} else {
&[&"rustfmt", &file_path]
};
run_command_with_output(rustfmt_cmd, Some(Path::new(".")))
} else {
Ok(())
}
},
true,
)
}

View file

@ -531,7 +531,7 @@ fn setup_rustc(env: &mut Env, args: &TestArg) -> Result<PathBuf, String> {
r#"change-id = 115898
[rust]
codegen-backends = []
codegen-backends = ["gcc"]
deny-warnings = false
verbose-tests = true

View file

@ -1 +1 @@
04ce66d8c918de9273bd7101638ad8724edf5e21
4e995bd73c4490edfe5080ec6014d63aa9abed5f

View file

@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2025-08-03"
channel = "nightly-2025-08-25"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View file

@ -29,7 +29,7 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::memmap::Mmap;
use rustc_errors::{DiagCtxtHandle, FatalError};
use rustc_errors::DiagCtxtHandle;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
use rustc_session::config::Lto;
@ -51,12 +51,11 @@ fn prepare_lto(
cgcx: &CodegenContext<GccCodegenBackend>,
each_linked_rlib_for_lto: &[PathBuf],
dcx: DiagCtxtHandle<'_>,
) -> Result<LtoData, FatalError> {
) -> LtoData {
let tmp_path = match tempdir() {
Ok(tmp_path) => tmp_path,
Err(error) => {
eprintln!("Cannot create temporary directory: {}", error);
return Err(FatalError);
dcx.fatal(format!("Cannot create temporary directory: {}", error));
}
};
@ -91,15 +90,14 @@ fn prepare_lto(
upstream_modules.push((module, CString::new(name).unwrap()));
}
Err(e) => {
dcx.emit_err(e);
return Err(FatalError);
dcx.emit_fatal(e);
}
}
}
}
}
Ok(LtoData { upstream_modules, tmp_path })
LtoData { upstream_modules, tmp_path }
}
fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
@ -114,10 +112,10 @@ pub(crate) fn run_fat(
cgcx: &CodegenContext<GccCodegenBackend>,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<GccCodegenBackend>>,
) -> Result<ModuleCodegen<GccContext>, FatalError> {
) -> ModuleCodegen<GccContext> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx)?;
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
/*let symbols_below_threshold =
lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
fat_lto(
@ -137,7 +135,7 @@ fn fat_lto(
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
tmp_path: TempDir,
//symbols_below_threshold: &[String],
) -> Result<ModuleCodegen<GccContext>, FatalError> {
) -> ModuleCodegen<GccContext> {
let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module");
info!("going for a fat lto");
@ -261,7 +259,7 @@ fn fat_lto(
// of now.
module.module_llvm.temp_dir = Some(tmp_path);
Ok(module)
module
}
pub struct ModuleBuffer(PathBuf);
@ -286,10 +284,10 @@ pub(crate) fn run_thin(
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx)?;
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
if cgcx.opts.cg.linker_plugin_lto.enabled() {
unreachable!(
"We should never reach this case if the LTO step \
@ -355,7 +353,7 @@ fn thin_lto(
tmp_path: TempDir,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
//_symbols_below_threshold: &[String],
) -> Result<(Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) {
let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
info!("going for that thin, thin LTO");
@ -518,13 +516,13 @@ fn thin_lto(
// TODO: save the directory so that it gets deleted later.
std::mem::forget(tmp_path);
Ok((opt_jobs, copy_jobs))
(opt_jobs, copy_jobs)
}
pub fn optimize_thin_module(
thin_module: ThinModule<GccCodegenBackend>,
_cgcx: &CodegenContext<GccCodegenBackend>,
) -> Result<ModuleCodegen<GccContext>, FatalError> {
) -> ModuleCodegen<GccContext> {
//let dcx = cgcx.create_dcx();
//let module_name = &thin_module.shared.module_names[thin_module.idx];
@ -634,7 +632,8 @@ pub fn optimize_thin_module(
save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
}
}*/
Ok(module)
#[allow(clippy::let_and_return)]
module
}
pub struct ThinBuffer {

View file

@ -6,7 +6,6 @@ use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, Mo
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
use rustc_fs_util::link_or_copy;
use rustc_session::config::OutputType;
use rustc_span::fatal_error::FatalError;
use rustc_target::spec::SplitDebuginfo;
use crate::base::add_pic_option;
@ -17,7 +16,7 @@ pub(crate) fn codegen(
cgcx: &CodegenContext<GccCodegenBackend>,
module: ModuleCodegen<GccContext>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
) -> CompiledModule {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
@ -246,7 +245,7 @@ pub(crate) fn codegen(
}
}
Ok(module.into_compiled_module(
module.into_compiled_module(
config.emit_obj != EmitObj::None,
cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
config.emit_bc,
@ -254,7 +253,7 @@ pub(crate) fn codegen(
config.emit_ir,
&cgcx.output_filenames,
cgcx.invocation_temp.as_deref(),
))
)
}
pub(crate) fn save_temp_bitcode(

View file

@ -1497,7 +1497,6 @@ fn simd_funnel_shift<'a, 'gcc, 'tcx>(
let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32);
let a_val = bx.context.new_vector_access(None, a, index).to_rvalue();
let a_val = bx.context.new_bitcast(None, a_val, unsigned_type);
// TODO: we probably need to use gcc_int_cast instead.
let a_val = bx.gcc_int_cast(a_val, new_int_type);
let b_val = bx.context.new_vector_access(None, b, index).to_rvalue();
let b_val = bx.context.new_bitcast(None, b_val, unsigned_type);

View file

@ -110,7 +110,6 @@ use rustc_middle::util::Providers;
use rustc_session::Session;
use rustc_session::config::{OptLevel, OutputFilenames};
use rustc_span::Symbol;
use rustc_span::fatal_error::FatalError;
use rustc_target::spec::RelocModel;
use tempfile::TempDir;
@ -362,7 +361,7 @@ impl WriteBackendMethods for GccCodegenBackend {
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
) -> ModuleCodegen<Self::Module> {
back::lto::run_fat(cgcx, each_linked_rlib_for_lto, modules)
}
@ -373,7 +372,7 @@ impl WriteBackendMethods for GccCodegenBackend {
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError> {
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) {
back::lto::run_thin(cgcx, each_linked_rlib_for_lto, modules, cached_modules)
}
@ -390,15 +389,14 @@ impl WriteBackendMethods for GccCodegenBackend {
_dcx: DiagCtxtHandle<'_>,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
) {
module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
Ok(())
}
fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: ThinModule<Self>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
) -> ModuleCodegen<Self::Module> {
back::lto::optimize_thin_module(thin, cgcx)
}
@ -406,7 +404,7 @@ impl WriteBackendMethods for GccCodegenBackend {
cgcx: &CodegenContext<Self>,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
) -> CompiledModule {
back::write::codegen(cgcx, module, config)
}

View file

@ -6,7 +6,6 @@ tests/run-make/doctests-keep-binaries/
tests/run-make/doctests-runtool/
tests/run-make/emit-shared-files/
tests/run-make/exit-code/
tests/run-make/issue-64153/
tests/run-make/llvm-ident/
tests/run-make/native-link-modifier-bundle/
tests/run-make/remap-path-prefix-dwarf/
@ -34,8 +33,6 @@ tests/run-make/c-link-to-rust-staticlib/
tests/run-make/foreign-double-unwind/
tests/run-make/foreign-exceptions/
tests/run-make/glibc-staticlib-args/
tests/run-make/issue-36710/
tests/run-make/issue-68794-textrel-on-minimal-lib/
tests/run-make/lto-smoke-c/
tests/run-make/return-non-c-like-enum/

View file

@ -20,7 +20,7 @@ tests/ui/drop/dynamic-drop.rs
tests/ui/rfcs/rfc-2091-track-caller/std-panic-locations.rs
tests/ui/simd/issue-17170.rs
tests/ui/simd/issue-39720.rs
tests/ui/issues/issue-14875.rs
tests/ui/drop/panic-during-drop-14875.rs
tests/ui/issues/issue-29948.rs
tests/ui/process/println-with-broken-pipe.rs
tests/ui/lto/thin-lto-inlines2.rs
@ -86,3 +86,5 @@ tests/ui/panics/unwind-force-no-unwind-tables.rs
tests/ui/attributes/fn-align-dyn.rs
tests/ui/linkage-attr/raw-dylib/elf/glibc-x86_64.rs
tests/ui/explicit-tail-calls/recursion-etc.rs
tests/ui/explicit-tail-calls/indexer.rs
tests/ui/explicit-tail-calls/drop-order.rs

View file

@ -7,12 +7,10 @@ fn main() {
use std::hint::black_box;
macro_rules! check {
($ty:ty, $expr:expr) => {
{
const EXPECTED: $ty = $expr;
assert_eq!($expr, EXPECTED);
}
};
($ty:ty, $expr:expr) => {{
const EXPECTED: $ty = $expr;
assert_eq!($expr, EXPECTED);
}};
}
check!(u32, (2220326408_u32 + black_box(1)) >> (32 - 6));

View file

@ -12,7 +12,7 @@ fn main() {
let arg_count = std::env::args().count();
let int = isize::MAX;
let _int = int + arg_count as isize; // overflow
let _int = int + arg_count as isize; // overflow
// If overflow checking is disabled, we should reach here.
#[cfg(not(debug_assertions))]

View file

@ -27,12 +27,8 @@ fn one() -> isize {
#[no_mangle]
extern "C" fn main(argc: i32, _argv: *const *const u8) -> i32 {
let test = Test {
field: one(),
};
let two = Two {
two: 2,
};
let test = Test { field: one() };
let two = Two { two: 2 };
unsafe {
libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
libc::printf(b"%ld\n\0" as *const u8 as *const i8, two.two);

View file

@ -12,15 +12,11 @@ struct Struct {
func: unsafe fn(*const ()),
}
fn func(_ptr: *const ()) {
}
fn func(_ptr: *const ()) {}
fn main() {
let mut x = MaybeUninit::<&Struct>::uninit();
x.write(&Struct {
pointer: std::ptr::null(),
func,
});
x.write(&Struct { pointer: std::ptr::null(), func });
let x = unsafe { x.assume_init() };
let value = unsafe { (x as *const Struct).read_volatile() };
println!("{:?}", value);

View file

@ -7,7 +7,14 @@ mod libc {
#[link(name = "c")]
extern "C" {
pub fn sigaction(signum: i32, act: *const sigaction, oldact: *mut sigaction) -> i32;
pub fn mmap(addr: *mut (), len: usize, prot: i32, flags: i32, fd: i32, offset: i64) -> *mut ();
pub fn mmap(
addr: *mut (),
len: usize,
prot: i32,
flags: i32,
fd: i32,
offset: i64,
) -> *mut ();
pub fn mprotect(addr: *mut (), len: usize, prot: i32) -> i32;
}
@ -54,7 +61,8 @@ fn main() {
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
-1,
0,
).cast();
)
.cast();
if STORAGE == libc::MAP_FAILED {
panic!("error: mmap failed");
}

View file

@ -12,7 +12,7 @@ use smallvec::SmallVec;
use crate::builder::SBuilder;
use crate::declare::declare_simple_fn;
use crate::llvm::{self, False, True, Type, Value};
use crate::llvm::{self, FALSE, TRUE, Type, Value};
use crate::{SimpleCx, attributes, debuginfo, llvm_util};
pub(crate) unsafe fn codegen(
@ -80,7 +80,7 @@ pub(crate) unsafe fn codegen(
&cx,
&mangle_internal_symbol(tcx, OomStrategy::SYMBOL),
&i8,
&llvm::LLVMConstInt(i8, tcx.sess.opts.unstable_opts.oom.should_panic() as u64, False),
&llvm::LLVMConstInt(i8, tcx.sess.opts.unstable_opts.oom.should_panic() as u64, FALSE),
);
// __rust_no_alloc_shim_is_unstable_v2
@ -201,7 +201,7 @@ fn create_wrapper_function(
.map(|(i, _)| llvm::get_param(llfn, i as c_uint))
.collect::<Vec<_>>();
let ret = bx.call(ty, callee, &args, None);
llvm::LLVMSetTailCall(ret, True);
llvm::LLVMSetTailCall(ret, TRUE);
if output.is_some() {
bx.ret(ret);
} else {

View file

@ -16,6 +16,7 @@ use tracing::debug;
use crate::builder::Builder;
use crate::common::Funclet;
use crate::context::CodegenCx;
use crate::llvm::ToLlvmBool;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
@ -470,10 +471,6 @@ pub(crate) fn inline_asm_call<'ll>(
dest: Option<&'ll llvm::BasicBlock>,
catch_funclet: Option<(&'ll llvm::BasicBlock, Option<&Funclet<'ll>>)>,
) -> Option<&'ll Value> {
let volatile = if volatile { llvm::True } else { llvm::False };
let alignstack = if alignstack { llvm::True } else { llvm::False };
let can_throw = if unwind { llvm::True } else { llvm::False };
let argtys = inputs
.iter()
.map(|v| {
@ -500,10 +497,10 @@ pub(crate) fn inline_asm_call<'ll>(
asm.len(),
cons.as_ptr(),
cons.len(),
volatile,
alignstack,
volatile.to_llvm_bool(),
alignstack.to_llvm_bool(),
dia,
can_throw,
unwind.to_llvm_bool(),
)
};

View file

@ -14,7 +14,7 @@ use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
use rustc_errors::{DiagCtxtHandle, FatalError};
use rustc_errors::DiagCtxtHandle;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
use rustc_session::config::{self, Lto};
@ -36,7 +36,7 @@ fn prepare_lto(
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
dcx: DiagCtxtHandle<'_>,
) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
) -> (Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>) {
let mut symbols_below_threshold = exported_symbols_for_lto
.iter()
.map(|symbol| CString::new(symbol.to_owned()).unwrap())
@ -79,16 +79,13 @@ fn prepare_lto(
let module = SerializedModule::FromRlib(data.to_vec());
upstream_modules.push((module, CString::new(name).unwrap()));
}
Err(e) => {
dcx.emit_err(e);
return Err(FatalError);
}
Err(e) => dcx.emit_fatal(e),
}
}
}
}
Ok((symbols_below_threshold, upstream_modules))
(symbols_below_threshold, upstream_modules)
}
fn get_bitcode_slice_from_object_data<'a>(
@ -123,11 +120,11 @@ pub(crate) fn run_fat(
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
) -> ModuleCodegen<ModuleLlvm> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?;
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold)
@ -142,11 +139,11 @@ pub(crate) fn run_thin(
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?;
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx);
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
if cgcx.opts.cg.linker_plugin_lto.enabled() {
@ -173,7 +170,7 @@ fn fat_lto(
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
symbols_below_threshold: &[*const libc::c_char],
) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
) -> ModuleCodegen<ModuleLlvm> {
let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
info!("going for a fat lto");
@ -224,7 +221,7 @@ fn fat_lto(
assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
let (buffer, name) = serialized_modules.remove(0);
info!("no in-memory regular modules to choose from, parsing {:?}", name);
let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?;
let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx);
ModuleCodegen::new_regular(name.into_string().unwrap(), llvm_module)
}
};
@ -265,7 +262,9 @@ fn fat_lto(
});
info!("linking {:?}", name);
let data = bc_decoded.data();
linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?;
linker
.add(data)
.unwrap_or_else(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }));
}
drop(linker);
save_temp_bitcode(cgcx, &module, "lto.input");
@ -282,7 +281,7 @@ fn fat_lto(
save_temp_bitcode(cgcx, &module, "lto.after-restriction");
}
Ok(module)
module
}
pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
@ -352,7 +351,7 @@ fn thin_lto(
serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
symbols_below_threshold: &[*const libc::c_char],
) -> Result<(Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) {
let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
unsafe {
info!("going for that thin, thin LTO");
@ -422,7 +421,7 @@ fn thin_lto(
symbols_below_threshold.as_ptr(),
symbols_below_threshold.len(),
)
.ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
.unwrap_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext));
let data = ThinData(data);
@ -492,10 +491,10 @@ fn thin_lto(
if let Some(path) = key_map_path
&& let Err(err) = curr_key_map.save_to_file(&path)
{
return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }));
write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err });
}
Ok((opt_jobs, copy_jobs))
(opt_jobs, copy_jobs)
}
}
@ -550,7 +549,7 @@ pub(crate) fn run_pass_manager(
dcx: DiagCtxtHandle<'_>,
module: &mut ModuleCodegen<ModuleLlvm>,
thin: bool,
) -> Result<(), FatalError> {
) {
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
let config = cgcx.config(module.kind);
@ -582,7 +581,7 @@ pub(crate) fn run_pass_manager(
}
unsafe {
write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?;
write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage);
}
if enable_gpu && !thin {
@ -596,7 +595,7 @@ pub(crate) fn run_pass_manager(
let stage = write::AutodiffStage::PostAD;
if !config.autodiff.contains(&config::AutoDiff::NoPostopt) {
unsafe {
write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?;
write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage);
}
}
@ -608,7 +607,6 @@ pub(crate) fn run_pass_manager(
}
debug!("lto done");
Ok(())
}
pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
@ -701,7 +699,7 @@ impl Drop for ThinBuffer {
pub(crate) fn optimize_thin_module(
thin_module: ThinModule<LlvmCodegenBackend>,
cgcx: &CodegenContext<LlvmCodegenBackend>,
) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
) -> ModuleCodegen<ModuleLlvm> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
@ -712,7 +710,7 @@ pub(crate) fn optimize_thin_module(
// into that context. One day, however, we may do this for upstream
// crates but for locally codegened modules we may be able to reuse
// that LLVM Context and Module.
let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx)?;
let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx);
let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm);
// Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here.
if cgcx.config(ModuleKind::Regular).embed_bitcode() {
@ -746,7 +744,7 @@ pub(crate) fn optimize_thin_module(
.generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
if unsafe { !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) }
{
return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule));
write::llvm_err(dcx, LlvmError::PrepareThinLtoModule);
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
}
@ -757,7 +755,7 @@ pub(crate) fn optimize_thin_module(
.generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
if unsafe { !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) }
{
return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule));
write::llvm_err(dcx, LlvmError::PrepareThinLtoModule);
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
}
@ -768,7 +766,7 @@ pub(crate) fn optimize_thin_module(
if unsafe {
!llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target.raw())
} {
return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule));
write::llvm_err(dcx, LlvmError::PrepareThinLtoModule);
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
}
@ -780,11 +778,11 @@ pub(crate) fn optimize_thin_module(
// little differently.
{
info!("running thin lto passes over {}", module.name);
run_pass_manager(cgcx, dcx, &mut module, true)?;
run_pass_manager(cgcx, dcx, &mut module, true);
save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
}
}
Ok(module)
module
}
/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
@ -850,9 +848,9 @@ pub(crate) fn parse_module<'a>(
name: &CStr,
data: &[u8],
dcx: DiagCtxtHandle<'_>,
) -> Result<&'a llvm::Module, FatalError> {
) -> &'a llvm::Module {
unsafe {
llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr())
.ok_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode))
.unwrap_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode))
}
}

View file

@ -20,7 +20,7 @@ use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_errors::{DiagCtxtHandle, FatalError, Level};
use rustc_errors::{DiagCtxtHandle, Level};
use rustc_fs_util::{link_or_copy, path_to_c_string};
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
@ -46,10 +46,10 @@ use crate::llvm::{self, DiagnosticInfo};
use crate::type_::Type;
use crate::{LlvmCodegenBackend, ModuleLlvm, base, common, llvm_util};
pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> FatalError {
pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> ! {
match llvm::last_error() {
Some(llvm_err) => dcx.emit_almost_fatal(WithLlvmError(err, llvm_err)),
None => dcx.emit_almost_fatal(err),
Some(llvm_err) => dcx.emit_fatal(WithLlvmError(err, llvm_err)),
None => dcx.emit_fatal(err),
}
}
@ -63,7 +63,7 @@ fn write_output_file<'ll>(
file_type: llvm::FileType,
self_profiler_ref: &SelfProfilerRef,
verify_llvm_ir: bool,
) -> Result<(), FatalError> {
) {
debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output);
let output_c = path_to_c_string(output);
let dwo_output_c;
@ -100,7 +100,7 @@ fn write_output_file<'ll>(
}
}
result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output }))
result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output }))
}
pub(crate) fn create_informational_target_machine(
@ -112,7 +112,7 @@ pub(crate) fn create_informational_target_machine(
// system/tcx is set up.
let features = llvm_util::global_llvm_features(sess, false, only_base_features);
target_machine_factory(sess, config::OptLevel::No, &features)(config)
.unwrap_or_else(|err| llvm_err(sess.dcx(), err).raise())
.unwrap_or_else(|err| llvm_err(sess.dcx(), err))
}
pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
@ -139,7 +139,7 @@ pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTar
tcx.backend_optimization_level(()),
tcx.global_backend_features(()),
)(config)
.unwrap_or_else(|err| llvm_err(tcx.dcx(), err).raise())
.unwrap_or_else(|err| llvm_err(tcx.dcx(), err))
}
fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
@ -565,7 +565,7 @@ pub(crate) unsafe fn llvm_optimize(
opt_level: config::OptLevel,
opt_stage: llvm::OptStage,
autodiff_stage: AutodiffStage,
) -> Result<(), FatalError> {
) {
// Enzyme:
// The whole point of compiler based AD is to differentiate optimized IR instead of unoptimized
// source code. However, benchmarks show that optimizations increasing the code size
@ -704,7 +704,7 @@ pub(crate) unsafe fn llvm_optimize(
llvm_plugins.len(),
)
};
result.into_result().map_err(|()| llvm_err(dcx, LlvmError::RunLlvmPasses))
result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::RunLlvmPasses))
}
// Unsafe due to LLVM calls.
@ -713,7 +713,7 @@ pub(crate) fn optimize(
dcx: DiagCtxtHandle<'_>,
module: &mut ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
) {
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
let llcx = &*module.module_llvm.llcx;
@ -765,7 +765,7 @@ pub(crate) fn optimize(
opt_stage,
autodiff_stage,
)
}?;
};
if let Some(thin_lto_buffer) = thin_lto_buffer {
let thin_lto_buffer = unsafe { ThinBuffer::from_raw_ptr(thin_lto_buffer) };
module.thin_lto_buffer = Some(thin_lto_buffer.data().to_vec());
@ -793,14 +793,13 @@ pub(crate) fn optimize(
}
}
}
Ok(())
}
pub(crate) fn codegen(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
) -> CompiledModule {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
@ -909,7 +908,9 @@ pub(crate) fn codegen(
record_artifact_size(&cgcx.prof, "llvm_ir", &out);
}
result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out }))?;
result
.into_result()
.unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out }));
}
if config.emit_asm {
@ -940,7 +941,7 @@ pub(crate) fn codegen(
llvm::FileType::AssemblyFile,
&cgcx.prof,
config.verify_llvm_ir,
)?;
);
}
match config.emit_obj {
@ -976,7 +977,7 @@ pub(crate) fn codegen(
llvm::FileType::ObjectFile,
&cgcx.prof,
config.verify_llvm_ir,
)?;
);
}
EmitObj::Bitcode => {
@ -1009,7 +1010,7 @@ pub(crate) fn codegen(
&& cgcx.target_can_use_split_dwarf
&& cgcx.split_debuginfo != SplitDebuginfo::Off
&& cgcx.split_dwarf_kind == SplitDwarfKind::Split;
Ok(module.into_compiled_module(
module.into_compiled_module(
config.emit_obj != EmitObj::None,
dwarf_object_emitted,
config.emit_bc,
@ -1017,7 +1018,7 @@ pub(crate) fn codegen(
config.emit_ir,
&cgcx.output_filenames,
cgcx.invocation_temp.as_deref(),
))
)
}
fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
@ -1110,7 +1111,7 @@ fn embed_bitcode(
llvm::set_section(llglobal, bitcode_section_name(cgcx));
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
llvm::LLVMSetGlobalConstant(llglobal, llvm::TRUE);
let llconst = common::bytes_in_context(llcx, &[]);
let llglobal = llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.cmdline");

View file

@ -35,7 +35,7 @@ use crate::attributes;
use crate::common::Funclet;
use crate::context::{CodegenCx, FullCx, GenericCx, SCx};
use crate::llvm::{
self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, GEPNoWrapFlags, Metadata, True,
self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, GEPNoWrapFlags, Metadata, TRUE, ToLlvmBool,
};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
@ -493,8 +493,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe {
let add = llvm::LLVMBuildAdd(self.llbuilder, a, b, UNNAMED);
if llvm::LLVMIsAInstruction(add).is_some() {
llvm::LLVMSetNUW(add, True);
llvm::LLVMSetNSW(add, True);
llvm::LLVMSetNUW(add, TRUE);
llvm::LLVMSetNSW(add, TRUE);
}
add
}
@ -503,8 +503,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe {
let sub = llvm::LLVMBuildSub(self.llbuilder, a, b, UNNAMED);
if llvm::LLVMIsAInstruction(sub).is_some() {
llvm::LLVMSetNUW(sub, True);
llvm::LLVMSetNSW(sub, True);
llvm::LLVMSetNUW(sub, TRUE);
llvm::LLVMSetNSW(sub, TRUE);
}
sub
}
@ -513,8 +513,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe {
let mul = llvm::LLVMBuildMul(self.llbuilder, a, b, UNNAMED);
if llvm::LLVMIsAInstruction(mul).is_some() {
llvm::LLVMSetNUW(mul, True);
llvm::LLVMSetNSW(mul, True);
llvm::LLVMSetNUW(mul, TRUE);
llvm::LLVMSetNSW(mul, TRUE);
}
mul
}
@ -528,7 +528,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
// an instruction, so we need to check before setting the flag.
// (See also `LLVMBuildNUWNeg` which also needs a check.)
if llvm::LLVMIsAInstruction(or).is_some() {
llvm::LLVMSetIsDisjoint(or, True);
llvm::LLVMSetIsDisjoint(or, TRUE);
}
or
}
@ -629,7 +629,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetVolatile(load, llvm::True);
llvm::LLVMSetVolatile(load, llvm::TRUE);
load
}
}
@ -717,7 +717,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let mut const_llval = None;
let llty = place.layout.llvm_type(self);
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
if llvm::LLVMIsGlobalConstant(global).is_true() {
if let Some(init) = llvm::LLVMGetInitializer(global) {
if self.val_ty(init) == llty {
const_llval = Some(init);
@ -838,7 +838,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
llvm::LLVMSetVolatile(store, llvm::True);
llvm::LLVMSetVolatile(store, llvm::TRUE);
}
if flags.contains(MemFlags::NONTEMPORAL) {
// Make sure that the current target architectures supports "sane" non-temporal
@ -956,7 +956,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let trunc = self.trunc(val, dest_ty);
unsafe {
if llvm::LLVMIsAInstruction(trunc).is_some() {
llvm::LLVMSetNUW(trunc, True);
llvm::LLVMSetNUW(trunc, TRUE);
}
}
trunc
@ -968,7 +968,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let trunc = self.trunc(val, dest_ty);
unsafe {
if llvm::LLVMIsAInstruction(trunc).is_some() {
llvm::LLVMSetNSW(trunc, True);
llvm::LLVMSetNSW(trunc, TRUE);
}
}
trunc
@ -1067,13 +1067,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
unsafe {
llvm::LLVMBuildIntCast2(
self.llbuilder,
val,
dest_ty,
if is_signed { True } else { False },
UNNAMED,
)
llvm::LLVMBuildIntCast2(self.llbuilder, val, dest_ty, is_signed.to_llvm_bool(), UNNAMED)
}
}
@ -1229,7 +1223,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
let landing_pad = self.landing_pad(ty, pers_fn, 0);
unsafe {
llvm::LLVMSetCleanup(landing_pad, llvm::True);
llvm::LLVMSetCleanup(landing_pad, llvm::TRUE);
}
(self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
}
@ -1317,7 +1311,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
failure_order: rustc_middle::ty::AtomicOrdering,
weak: bool,
) -> (&'ll Value, &'ll Value) {
let weak = if weak { llvm::True } else { llvm::False };
unsafe {
let value = llvm::LLVMBuildAtomicCmpXchg(
self.llbuilder,
@ -1326,9 +1319,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
src,
AtomicOrdering::from_generic(order),
AtomicOrdering::from_generic(failure_order),
llvm::False, // SingleThreaded
llvm::FALSE, // SingleThreaded
);
llvm::LLVMSetWeak(value, weak);
llvm::LLVMSetWeak(value, weak.to_llvm_bool());
let val = self.extract_value(value, 0);
let success = self.extract_value(value, 1);
(val, success)
@ -1353,7 +1346,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
dst,
src,
AtomicOrdering::from_generic(order),
llvm::False, // SingleThreaded
llvm::FALSE, // SingleThreaded
)
};
if ret_ptr && self.val_ty(res) != self.type_ptr() {
@ -1368,14 +1361,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
scope: SynchronizationScope,
) {
let single_threaded = match scope {
SynchronizationScope::SingleThread => llvm::True,
SynchronizationScope::CrossThread => llvm::False,
SynchronizationScope::SingleThread => true,
SynchronizationScope::CrossThread => false,
};
unsafe {
llvm::LLVMBuildFence(
self.llbuilder,
AtomicOrdering::from_generic(order),
single_threaded,
single_threaded.to_llvm_bool(),
UNNAMED,
);
}

View file

@ -11,7 +11,7 @@ use crate::builder::{Builder, PlaceRef, UNNAMED};
use crate::context::SimpleCx;
use crate::declare::declare_simple_fn;
use crate::llvm;
use crate::llvm::{Metadata, True, Type};
use crate::llvm::{Metadata, TRUE, Type};
use crate::value::Value;
pub(crate) fn adjust_activity_to_abi<'tcx>(
@ -293,7 +293,7 @@ pub(crate) fn generate_enzyme_call<'ll, 'tcx>(
// ret double %0
// }
// ```
let enzyme_ty = unsafe { llvm::LLVMFunctionType(ret_ty, ptr::null(), 0, True) };
let enzyme_ty = unsafe { llvm::LLVMFunctionType(ret_ty, ptr::null(), 0, TRUE) };
// FIXME(ZuseZ4): the CC/Addr/Vis values are best effort guesses, we should look at tests and
// think a bit more about what should go here.

View file

@ -20,7 +20,7 @@ use tracing::debug;
use crate::consts::const_alloc_to_llvm;
pub(crate) use crate::context::CodegenCx;
use crate::context::{GenericCx, SCx};
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, True};
use crate::llvm::{self, BasicBlock, ConstantInt, FALSE, Metadata, TRUE, ToLlvmBool};
use crate::type_::Type;
use crate::value::Value;
@ -158,7 +158,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
self.type_kind(t) == TypeKind::Integer,
"only allows integer types in const_int"
);
unsafe { llvm::LLVMConstInt(t, i as u64, True) }
unsafe { llvm::LLVMConstInt(t, i as u64, TRUE) }
}
fn const_u8(&self, i: u8) -> &'ll Value {
@ -192,7 +192,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
self.type_kind(t) == TypeKind::Integer,
"only allows integer types in const_uint"
);
unsafe { llvm::LLVMConstInt(t, i, False) }
unsafe { llvm::LLVMConstInt(t, i, FALSE) }
}
fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
@ -377,7 +377,7 @@ pub(crate) fn val_ty(v: &Value) -> &Type {
pub(crate) fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
llvm::LLVMConstStringInContext2(llcx, ptr, bytes.len(), True)
llvm::LLVMConstStringInContext2(llcx, ptr, bytes.len(), TRUE)
}
}
@ -392,7 +392,7 @@ fn struct_in_context<'ll>(
packed: bool,
) -> &'ll Value {
let len = c_uint::try_from(elts.len()).expect("LLVMConstStructInContext elements len overflow");
unsafe { llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), len, packed as Bool) }
unsafe { llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), len, packed.to_llvm_bool()) }
}
#[inline]

View file

@ -701,7 +701,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
}
pub(crate) fn get_const_int(&self, ty: &'ll Type, val: u64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(ty, val, llvm::False) }
unsafe { llvm::LLVMConstInt(ty, val, llvm::FALSE) }
}
pub(crate) fn get_const_i64(&self, n: u64) -> &'ll Value {

View file

@ -72,7 +72,7 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>(
.unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
llvm::set_section(section_var, c".debug_gdb_scripts");
llvm::set_initializer(section_var, cx.const_bytes(section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetGlobalConstant(section_var, llvm::TRUE);
llvm::set_unnamed_address(section_var, llvm::UnnamedAddr::Global);
llvm::set_linkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
// This should make sure that the whole section is not larger than

View file

@ -38,7 +38,7 @@ pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'l
parent_scope,
namespace_name_string.as_ptr(),
namespace_name_string.len(),
llvm::False, // ExportSymbols (only relevant for C++ anonymous namespaces)
llvm::FALSE, // ExportSymbols (only relevant for C++ anonymous namespaces)
)
};

View file

@ -37,7 +37,7 @@ use rustc_codegen_ssa::back::write::{
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig};
use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::{DiagCtxtHandle, FatalError};
use rustc_errors::DiagCtxtHandle;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::TyCtxt;
@ -165,15 +165,15 @@ impl WriteBackendMethods for LlvmCodegenBackend {
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
) -> ModuleCodegen<Self::Module> {
let mut module =
back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules)?;
back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules);
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
back::lto::run_pass_manager(cgcx, dcx, &mut module, false)?;
back::lto::run_pass_manager(cgcx, dcx, &mut module, false);
Ok(module)
module
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
@ -181,7 +181,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError> {
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) {
back::lto::run_thin(
cgcx,
exported_symbols_for_lto,
@ -195,20 +195,20 @@ impl WriteBackendMethods for LlvmCodegenBackend {
dcx: DiagCtxtHandle<'_>,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
) {
back::write::optimize(cgcx, dcx, module, config)
}
fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: ThinModule<Self>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
) -> ModuleCodegen<Self::Module> {
back::lto::optimize_thin_module(thin, cgcx)
}
fn codegen(
cgcx: &CodegenContext<Self>,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
) -> CompiledModule {
back::write::codegen(cgcx, module, config)
}
fn prepare_thin(
@ -407,12 +407,12 @@ impl ModuleLlvm {
cgcx: &CodegenContext<LlvmCodegenBackend>,
name: &str,
dcx: DiagCtxtHandle<'_>,
) -> Result<OwnedTargetMachine, FatalError> {
) -> OwnedTargetMachine {
let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name);
match (cgcx.tm_factory)(tm_factory_config) {
Ok(m) => Ok(m),
Ok(m) => m,
Err(e) => {
return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e)));
dcx.emit_fatal(ParseTargetMachineConfig(e));
}
}
}
@ -422,13 +422,13 @@ impl ModuleLlvm {
name: &CStr,
buffer: &[u8],
dcx: DiagCtxtHandle<'_>,
) -> Result<Self, FatalError> {
) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx)?;
let tm = ModuleLlvm::tm_from_cgcx(cgcx, name.to_str().unwrap(), dcx)?;
let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx);
let tm = ModuleLlvm::tm_from_cgcx(cgcx, name.to_str().unwrap(), dcx);
Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) })
ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) }
}
}

View file

@ -11,9 +11,8 @@
//! the need for an extra cast from `*const u8` on the Rust side.
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
use std::fmt::Debug;
use std::fmt::{self, Debug};
use std::marker::PhantomData;
use std::num::NonZero;
use std::ptr;
@ -33,10 +32,59 @@ use crate::llvm;
/// In the LLVM-C API, boolean values are passed as `typedef int LLVMBool`,
/// which has a different ABI from Rust or C++ `bool`.
pub(crate) type Bool = c_int;
///
/// This wrapper does not implement `PartialEq`.
/// To test the underlying boolean value, use [`Self::is_true`].
#[derive(Clone, Copy)]
#[repr(transparent)]
pub(crate) struct Bool {
value: c_int,
}
pub(crate) const True: Bool = 1 as Bool;
pub(crate) const False: Bool = 0 as Bool;
pub(crate) const TRUE: Bool = Bool::TRUE;
pub(crate) const FALSE: Bool = Bool::FALSE;
impl Bool {
pub(crate) const TRUE: Self = Self { value: 1 };
pub(crate) const FALSE: Self = Self { value: 0 };
pub(crate) const fn from_bool(rust_bool: bool) -> Self {
if rust_bool { Self::TRUE } else { Self::FALSE }
}
/// Converts this LLVM-C boolean to a Rust `bool`
pub(crate) fn is_true(self) -> bool {
// Since we're interacting with a C API, follow the C convention of
// treating any nonzero value as true.
self.value != Self::FALSE.value
}
}
impl Debug for Bool {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.value {
0 => f.write_str("FALSE"),
1 => f.write_str("TRUE"),
// As with `Self::is_true`, treat any nonzero value as true.
v => write!(f, "TRUE ({v})"),
}
}
}
/// Convenience trait to convert `bool` to `llvm::Bool` with an explicit method call.
///
/// Being able to write `b.to_llvm_bool()` is less noisy than `llvm::Bool::from(b)`,
/// while being more explicit and less mistake-prone than something like `b.into()`.
pub(crate) trait ToLlvmBool: Copy {
fn to_llvm_bool(self) -> llvm::Bool;
}
impl ToLlvmBool for bool {
#[inline(always)]
fn to_llvm_bool(self) -> llvm::Bool {
llvm::Bool::from_bool(self)
}
}
/// Wrapper for a raw enum value returned from LLVM's C APIs.
///
@ -1881,11 +1929,17 @@ unsafe extern "C" {
C: &Context,
effects: MemoryEffects,
) -> &Attribute;
/// ## Safety
/// - Each of `LowerWords` and `UpperWords` must point to an array that is
/// long enough to fully define an integer of size `NumBits`, i.e. each
/// pointer must point to `NumBits.div_ceil(64)` elements or more.
/// - The implementation will make its own copy of the pointed-to `u64`
/// values, so the pointers only need to outlive this function call.
pub(crate) fn LLVMRustCreateRangeAttribute(
C: &Context,
num_bits: c_uint,
lower_words: *const u64,
upper_words: *const u64,
NumBits: c_uint,
LowerWords: *const u64,
UpperWords: *const u64,
) -> &Attribute;
// Operations on functions

View file

@ -112,16 +112,26 @@ pub(crate) fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> &
pub(crate) fn CreateRangeAttr(llcx: &Context, size: Size, range: WrappingRange) -> &Attribute {
let lower = range.start;
// LLVM treats the upper bound as exclusive, but allows wrapping.
let upper = range.end.wrapping_add(1);
let lower_words = [lower as u64, (lower >> 64) as u64];
let upper_words = [upper as u64, (upper >> 64) as u64];
// Pass each `u128` endpoint value as a `[u64; 2]` array, least-significant part first.
let as_u64_array = |x: u128| [x as u64, (x >> 64) as u64];
let lower_words: [u64; 2] = as_u64_array(lower);
let upper_words: [u64; 2] = as_u64_array(upper);
// To ensure that LLVM doesn't try to read beyond the `[u64; 2]` arrays,
// we must explicitly check that `size_bits` does not exceed 128.
let size_bits = size.bits();
assert!(size_bits <= 128);
// More robust assertions that are redundant with `size_bits <= 128` and
// should be optimized away.
assert!(size_bits.div_ceil(64) <= u64::try_from(lower_words.len()).unwrap());
assert!(size_bits.div_ceil(64) <= u64::try_from(upper_words.len()).unwrap());
let size_bits = c_uint::try_from(size_bits).unwrap();
unsafe {
LLVMRustCreateRangeAttribute(
llcx,
size.bits().try_into().unwrap(),
lower_words.as_ptr(),
upper_words.as_ptr(),
)
LLVMRustCreateRangeAttribute(llcx, size_bits, lower_words.as_ptr(), upper_words.as_ptr())
}
}
@ -215,7 +225,7 @@ pub(crate) fn set_initializer(llglobal: &Value, constant_val: &Value) {
}
pub(crate) fn set_global_constant(llglobal: &Value, is_constant: bool) {
LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False });
LLVMSetGlobalConstant(llglobal, is_constant.to_llvm_bool());
}
pub(crate) fn get_linkage(llglobal: &Value) -> Linkage {
@ -229,7 +239,7 @@ pub(crate) fn set_linkage(llglobal: &Value, linkage: Linkage) {
}
pub(crate) fn is_declaration(llglobal: &Value) -> bool {
unsafe { LLVMIsDeclaration(llglobal) == ffi::True }
unsafe { LLVMIsDeclaration(llglobal) }.is_true()
}
pub(crate) fn get_visibility(llglobal: &Value) -> Visibility {

View file

@ -26,7 +26,7 @@ static INIT: Once = Once::new();
pub(crate) fn init(sess: &Session) {
unsafe {
// Before we touch LLVM, make sure that multithreading is enabled.
if llvm::LLVMIsMultithreaded() != 1 {
if !llvm::LLVMIsMultithreaded().is_true() {
bug!("LLVM compiled without support for threads");
}
INIT.call_once(|| {
@ -279,7 +279,7 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
}
("loongarch32" | "loongarch64", "32s") if get_version().0 < 21 => None,
// Filter out features that are not supported by the current LLVM version
("riscv32" | "riscv64", "zacas") if get_version().0 < 20 => None,
("riscv32" | "riscv64", "zacas" | "rva23u64" | "supm") if get_version().0 < 20 => None,
(
"s390x",
"message-security-assist-extension12"

View file

@ -133,7 +133,7 @@ impl CodegenCx<'_, '_> {
// Thread-local variables generally don't support copy relocations.
let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval)
.is_some_and(|v| llvm::LLVMIsThreadLocal(v) == llvm::True);
.is_some_and(|v| llvm::LLVMIsThreadLocal(v).is_true());
if is_thread_local_var {
return false;
}

View file

@ -15,7 +15,7 @@ use rustc_target::callconv::{CastTarget, FnAbi};
use crate::abi::{FnAbiLlvmExt, LlvmType};
use crate::context::{CodegenCx, GenericCx, SCx};
pub(crate) use crate::llvm::Type;
use crate::llvm::{Bool, False, Metadata, True};
use crate::llvm::{FALSE, Metadata, TRUE, ToLlvmBool};
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use crate::{common, llvm};
@ -53,7 +53,9 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
}
pub(crate) fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) }
unsafe {
llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed.to_llvm_bool())
}
}
pub(crate) fn type_void(&self) -> &'ll Type {
unsafe { llvm::LLVMVoidTypeInContext(self.llcx()) }
@ -139,7 +141,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
}
pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) }
unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, TRUE) }
}
pub(crate) fn type_i1(&self) -> &'ll Type {
@ -152,7 +154,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
self.llcx(),
els.as_ptr(),
els.len() as c_uint,
packed as Bool,
packed.to_llvm_bool(),
)
}
}
@ -200,7 +202,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> BaseTypeCodegenMethods for GenericCx<'ll, CX> {
}
fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) }
unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, FALSE) }
}
fn type_kind(&self, ty: &'ll Type) -> TypeKind {

View file

@ -164,7 +164,7 @@ pub(super) fn get_sdk_root(sess: &Session) -> Option<PathBuf> {
//
// Note that when cross-compiling from e.g. Linux, the `xcrun` binary may sometimes be provided
// as a shim by a cross-compilation helper tool. It usually isn't, but we still try nonetheless.
match xcrun_show_sdk_path(sdk_name, sess.verbose_internals()) {
match xcrun_show_sdk_path(sdk_name, false) {
Ok((path, stderr)) => {
// Emit extra stderr, such as if `-verbose` was passed, or if `xcrun` emitted a warning.
if !stderr.is_empty() {

View file

@ -307,11 +307,14 @@ fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]
stub.reserve_section_headers();
stub.reserve_dynsym();
stub.reserve_dynstr();
let verdef_count = 1 + vers.len();
let mut dynamic_entries = 2; // DT_SONAME, DT_NULL
if !vers.is_empty() {
stub.reserve_gnu_versym();
stub.reserve_gnu_verdef(1 + vers.len(), 1 + vers.len());
stub.reserve_gnu_verdef(verdef_count, verdef_count);
dynamic_entries += 1; // DT_VERDEFNUM
}
stub.reserve_dynamic(2); // DT_SONAME, DT_NULL
stub.reserve_dynamic(dynamic_entries);
// First write the ELF header with the arch information.
let e_machine = match (arch, sub_arch) {
@ -443,9 +446,13 @@ fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]
// .dynamic
// the DT_SONAME will be used by the linker to populate DT_NEEDED
// which the loader uses to find the library.
// DT_NULL terminates the .dynamic table.
stub.write_align_dynamic();
stub.write_dynamic_string(elf::DT_SONAME, soname);
// LSB section "2.7. Symbol Versioning" requires `DT_VERDEFNUM` to be reliable.
if verdef_count > 1 {
stub.write_dynamic(elf::DT_VERDEFNUM, verdef_count as u64);
}
// DT_NULL terminates the .dynamic table.
stub.write_dynamic(elf::DT_NULL, 0);
stub_buf

View file

@ -329,12 +329,18 @@ pub(super) fn elf_e_flags(architecture: Architecture, sess: &Session) -> u32 {
// Source: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/079772828bd10933d34121117a222b4cc0ee2200/riscv-elf.adoc
let mut e_flags: u32 = 0x0;
// Check if compressed is enabled
// `unstable_target_features` is used here because "c" is gated behind riscv_target_feature.
if sess.unstable_target_features.contains(&sym::c) {
// Check if compression is enabled
// `unstable_target_features` is used here because "zca" is gated behind riscv_target_feature.
if sess.unstable_target_features.contains(&sym::zca) {
e_flags |= elf::EF_RISCV_RVC;
}
// Check if RVTSO is enabled
// `unstable_target_features` is used here because "ztso" is gated behind riscv_target_feature.
if sess.unstable_target_features.contains(&sym::ztso) {
e_flags |= elf::EF_RISCV_TSO;
}
// Set the appropriate flag based on ABI
// This needs to match LLVM `RISCVELFStreamer.cpp`
match &*sess.target.llvm_abiname {

View file

@ -570,7 +570,7 @@ fn symbol_export_level(tcx: TyCtxt<'_>, sym_def_id: DefId) -> SymbolExportLevel
// core/std/allocators/etc. For example symbols used to hook up allocation
// are not considered for export
let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
let is_extern = codegen_fn_attrs.contains_extern_indicator(tcx, sym_def_id);
let is_extern = codegen_fn_attrs.contains_extern_indicator();
let std_internal =
codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);

View file

@ -1,5 +1,6 @@
use std::assert_matches::assert_matches;
use std::marker::PhantomData;
use std::panic::AssertUnwindSafe;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender, channel};
@ -14,7 +15,7 @@ use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
use rustc_errors::emitter::Emitter;
use rustc_errors::translation::Translator;
use rustc_errors::{
Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalError, Level, MultiSpan, Style,
Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalErrorMarker, Level, MultiSpan, Style,
Suggestions,
};
use rustc_fs_util::link_or_copy;
@ -395,8 +396,7 @@ fn generate_thin_lto_work<B: ExtraBackendMethods>(
each_linked_rlib_for_lto,
needs_thin_lto,
import_only_modules,
)
.unwrap_or_else(|e| e.raise());
);
lto_modules
.into_iter()
.map(|module| {
@ -844,11 +844,11 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
mut module: ModuleCodegen<B::Module>,
module_config: &ModuleConfig,
) -> Result<WorkItemResult<B>, FatalError> {
) -> WorkItemResult<B> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
B::optimize(cgcx, dcx, &mut module, module_config)?;
B::optimize(cgcx, dcx, &mut module, module_config);
// After we've done the initial round of optimizations we need to
// decide whether to synchronously codegen this module or ship it
@ -868,8 +868,8 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
match lto_type {
ComputedLtoType::No => {
let module = B::codegen(cgcx, module, module_config)?;
Ok(WorkItemResult::Finished(module))
let module = B::codegen(cgcx, module, module_config);
WorkItemResult::Finished(module)
}
ComputedLtoType::Thin => {
let (name, thin_buffer) = B::prepare_thin(module, false);
@ -878,7 +878,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
}
Ok(WorkItemResult::NeedsThinLto(name, thin_buffer))
WorkItemResult::NeedsThinLto(name, thin_buffer)
}
ComputedLtoType::Fat => match bitcode {
Some(path) => {
@ -886,12 +886,12 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
fs::write(&path, buffer.data()).unwrap_or_else(|e| {
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
name,
buffer: SerializedModule::Local(buffer),
}))
})
}
None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))),
None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)),
},
}
}
@ -987,7 +987,7 @@ fn execute_fat_lto_work_item<B: ExtraBackendMethods>(
mut needs_fat_lto: Vec<FatLtoInput<B>>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
module_config: &ModuleConfig,
) -> Result<WorkItemResult<B>, FatalError> {
) -> WorkItemResult<B> {
for (module, wp) in import_only_modules {
needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
}
@ -997,19 +997,19 @@ fn execute_fat_lto_work_item<B: ExtraBackendMethods>(
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_fat_lto,
)?;
let module = B::codegen(cgcx, module, module_config)?;
Ok(WorkItemResult::Finished(module))
);
let module = B::codegen(cgcx, module, module_config);
WorkItemResult::Finished(module)
}
fn execute_thin_lto_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
module: lto::ThinModule<B>,
module_config: &ModuleConfig,
) -> Result<WorkItemResult<B>, FatalError> {
let module = B::optimize_thin(cgcx, module)?;
let module = B::codegen(cgcx, module, module_config)?;
Ok(WorkItemResult::Finished(module))
) -> WorkItemResult<B> {
let module = B::optimize_thin(cgcx, module);
let module = B::codegen(cgcx, module, module_config);
WorkItemResult::Finished(module)
}
/// Messages sent to the coordinator.
@ -1722,37 +1722,10 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
let cgcx = cgcx.clone();
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
// Set up a destructor which will fire off a message that we're done as
// we exit.
struct Bomb<B: ExtraBackendMethods> {
coordinator_send: Sender<Message<B>>,
result: Option<Result<WorkItemResult<B>, FatalError>>,
}
impl<B: ExtraBackendMethods> Drop for Bomb<B> {
fn drop(&mut self) {
let msg = match self.result.take() {
Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result) },
Some(Err(FatalError)) => {
Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
}
None => Message::WorkItem::<B> { result: Err(None) },
};
drop(self.coordinator_send.send(msg));
}
}
let mut bomb = Bomb::<B> { coordinator_send, result: None };
// Execute the work itself, and if it finishes successfully then flag
// ourselves as a success as well.
//
// Note that we ignore any `FatalError` coming out of `execute_work_item`,
// as a diagnostic was already sent off to the main thread - just
// surface that there was an error in this worker.
bomb.result = {
let result = std::panic::catch_unwind(AssertUnwindSafe(|| {
let module_config = cgcx.config(work.module_kind());
Some(match work {
match work {
WorkItem::Optimize(m) => {
let _timer =
cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name);
@ -1763,7 +1736,7 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
"codegen_copy_artifacts_from_incr_cache",
&*m.name,
);
Ok(execute_copy_from_cache_work_item(&cgcx, m, module_config))
execute_copy_from_cache_work_item(&cgcx, m, module_config)
}
WorkItem::FatLto {
exported_symbols_for_lto,
@ -1788,8 +1761,22 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name());
execute_thin_lto_work_item(&cgcx, m, module_config)
}
})
}
}));
let msg = match result {
Ok(result) => Message::WorkItem::<B> { result: Ok(result) },
// We ignore any `FatalError` coming out of `execute_work_item`, as a
// diagnostic was already sent off to the main thread - just surface
// that there was an error in this worker.
Err(err) if err.is::<FatalErrorMarker>() => {
Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
}
Err(_) => Message::WorkItem::<B> { result: Err(None) },
};
drop(coordinator_send.send(msg));
})
.expect("failed to spawn work thread");
}

View file

@ -385,6 +385,8 @@ fn apply_overrides(tcx: TyCtxt<'_>, did: LocalDefId, codegen_fn_attrs: &mut Code
// Foreign items by default use no mangling for their symbol name.
if tcx.is_foreign_item(did) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::FOREIGN_ITEM;
// There's a few exceptions to this rule though:
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
// * `#[rustc_std_internal_symbol]` mangles the symbol name in a special way

View file

@ -1,6 +1,6 @@
use std::path::PathBuf;
use rustc_errors::{DiagCtxtHandle, FatalError};
use rustc_errors::DiagCtxtHandle;
use rustc_middle::dep_graph::WorkProduct;
use crate::back::lto::{SerializedModule, ThinModule};
@ -22,7 +22,7 @@ pub trait WriteBackendMethods: Clone + 'static {
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
) -> Result<ModuleCodegen<Self::Module>, FatalError>;
) -> ModuleCodegen<Self::Module>;
/// Performs thin LTO by performing necessary global analysis and returning two
/// lists, one of the modules that need optimization and another for modules that
/// can simply be copied over from the incr. comp. cache.
@ -32,7 +32,7 @@ pub trait WriteBackendMethods: Clone + 'static {
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError>;
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>);
fn print_pass_timings(&self);
fn print_statistics(&self);
fn optimize(
@ -40,16 +40,16 @@ pub trait WriteBackendMethods: Clone + 'static {
dcx: DiagCtxtHandle<'_>,
module: &mut ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<(), FatalError>;
);
fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: ThinModule<Self>,
) -> Result<ModuleCodegen<Self::Module>, FatalError>;
) -> ModuleCodegen<Self::Module>;
fn codegen(
cgcx: &CodegenContext<Self>,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError>;
) -> CompiledModule;
fn prepare_thin(
module: ModuleCodegen<Self::Module>,
want_summary: bool,

View file

@ -280,22 +280,110 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
interp_ok(match (a, b) {
// Comparisons between integers are always known.
(Scalar::Int(a), Scalar::Int(b)) => (a == b) as u8,
// Comparisons of null with an arbitrary scalar can be known if `scalar_may_be_null`
// indicates that the scalar can definitely *not* be null.
(Scalar::Int(int), ptr) | (ptr, Scalar::Int(int))
if int.is_null() && !self.scalar_may_be_null(ptr)? =>
{
0
// Comparing a pointer `ptr` with an integer `int` is equivalent to comparing
// `ptr-int` with null, so we can reduce this case to a `scalar_may_be_null` test.
(Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
let int = int.to_target_usize(*self.tcx);
// The `wrapping_neg` here may produce a value that is not
// a valid target usize any more... but `wrapping_offset` handles that correctly.
let offset_ptr = ptr.wrapping_offset(Size::from_bytes(int.wrapping_neg()), self);
if !self.scalar_may_be_null(Scalar::from_pointer(offset_ptr, self))? {
// `ptr.wrapping_sub(int)` is definitely not equal to `0`, so `ptr != int`
0
} else {
// `ptr.wrapping_sub(int)` could be equal to `0`, but might not be,
// so we cannot know for sure if `ptr == int` or not
2
}
}
(Scalar::Ptr(a, _), Scalar::Ptr(b, _)) => {
let (a_prov, a_offset) = a.prov_and_relative_offset();
let (b_prov, b_offset) = b.prov_and_relative_offset();
let a_allocid = a_prov.alloc_id();
let b_allocid = b_prov.alloc_id();
let a_info = self.get_alloc_info(a_allocid);
let b_info = self.get_alloc_info(b_allocid);
// Check if the pointers cannot be equal due to alignment
if a_info.align > Align::ONE && b_info.align > Align::ONE {
let min_align = Ord::min(a_info.align.bytes(), b_info.align.bytes());
let a_residue = a_offset.bytes() % min_align;
let b_residue = b_offset.bytes() % min_align;
if a_residue != b_residue {
// If the two pointers have a different residue modulo their
// common alignment, they cannot be equal.
return interp_ok(0);
}
// The pointers have the same residue modulo their common alignment,
// so they could be equal. Try the other checks.
}
if let (Some(GlobalAlloc::Static(a_did)), Some(GlobalAlloc::Static(b_did))) = (
self.tcx.try_get_global_alloc(a_allocid),
self.tcx.try_get_global_alloc(b_allocid),
) {
if a_allocid == b_allocid {
debug_assert_eq!(
a_did, b_did,
"different static item DefIds had same AllocId? {a_allocid:?} == {b_allocid:?}, {a_did:?} != {b_did:?}"
);
// Comparing two pointers into the same static. As per
// https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
// a static cannot be duplicated, so if two pointers are into the same
// static, they are equal if and only if their offsets are equal.
(a_offset == b_offset) as u8
} else {
debug_assert_ne!(
a_did, b_did,
"same static item DefId had two different AllocIds? {a_allocid:?} != {b_allocid:?}, {a_did:?} == {b_did:?}"
);
// Comparing two pointers into the different statics.
// We can never determine for sure that two pointers into different statics
// are *equal*, but we can know that they are *inequal* if they are both
// strictly in-bounds (i.e. in-bounds and not one-past-the-end) of
// their respective static, as different non-zero-sized statics cannot
// overlap or be deduplicated as per
// https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
// (non-deduplication), and
// https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
// (non-overlapping).
if a_offset < a_info.size && b_offset < b_info.size {
0
} else {
// Otherwise, conservatively say we don't know.
// There are some cases we could still return `0` for, e.g.
// if the pointers being equal would require their statics to overlap
// one or more bytes, but for simplicity we currently only check
// strictly in-bounds pointers.
2
}
}
} else {
// All other cases we conservatively say we don't know.
//
// For comparing statics to non-statics, as per https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
// immutable statics can overlap with other kinds of allocations sometimes.
//
// FIXME: We could be more decisive for (non-zero-sized) mutable statics,
// which cannot overlap with other kinds of allocations.
//
// Functions and vtables can be duplicated and deduplicated, so we
// cannot be sure of runtime equality of pointers to the same one, or the
// runtime inequality of pointers to different ones (see e.g. #73722),
// so comparing those should return 2, whether they are the same allocation
// or not.
//
// `GlobalAlloc::TypeId` exists mostly to prevent consteval from comparing
// `TypeId`s, so comparing those should always return 2, whether they are the
// same allocation or not.
//
// FIXME: We could revisit comparing pointers into the same
// `GlobalAlloc::Memory` once https://github.com/rust-lang/rust/issues/128775
// is fixed (but they can be deduplicated, so comparing pointers into different
// ones should return 2).
2
}
}
// Other ways of comparing integers and pointers can never be known for sure.
(Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2,
// FIXME: return a `1` for when both sides are the same pointer, *except* that
// some things (like functions and vtables) do not have stable addresses
// so we need to be careful around them (see e.g. #73722).
// FIXME: return `0` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time.
// Examples include comparison of addresses in different static items.
(Scalar::Ptr(..), Scalar::Ptr(..)) => 2,
})
}
}

View file

@ -46,7 +46,7 @@
//! Frozen::freeze(new_bar)`).
/// An owned immutable value.
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct Frozen<T>(T);
impl<T> Frozen<T> {

View file

@ -7,12 +7,12 @@ use rustc_ast::mut_visit::*;
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::visit::{self, AssocCtxt, Visitor, VisitorResult, try_visit, walk_list};
use rustc_ast::{
self as ast, AssocItemKind, AstNodeWrapper, AttrArgs, AttrStyle, AttrVec, CRATE_NODE_ID,
DUMMY_NODE_ID, ExprKind, ForeignItemKind, HasAttrs, HasNodeId, Inline, ItemKind, MacStmtStyle,
MetaItemInner, MetaItemKind, ModKind, NodeId, PatKind, StmtKind, TyKind, token,
self as ast, AssocItemKind, AstNodeWrapper, AttrArgs, AttrStyle, AttrVec, DUMMY_NODE_ID,
ExprKind, ForeignItemKind, HasAttrs, HasNodeId, Inline, ItemKind, MacStmtStyle, MetaItemInner,
MetaItemKind, ModKind, NodeId, PatKind, StmtKind, TyKind, token,
};
use rustc_ast_pretty::pprust;
use rustc_attr_parsing::{AttributeParser, EvalConfigResult, ShouldEmit, validate_attr};
use rustc_attr_parsing::{AttributeParser, Early, EvalConfigResult, ShouldEmit, validate_attr};
use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::PResult;
@ -2165,7 +2165,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
None,
Target::MacroCall,
call.span(),
CRATE_NODE_ID,
self.cx.current_expansion.lint_node_id,
Some(self.cx.ecfg.features),
ShouldEmit::ErrorsAndLints,
);
@ -2184,7 +2184,9 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
self.cx.current_expansion.lint_node_id,
BuiltinLintDiag::UnusedDocComment(attr.span),
);
} else if rustc_attr_parsing::is_builtin_attr(attr) {
} else if rustc_attr_parsing::is_builtin_attr(attr)
&& !AttributeParser::<Early>::is_parsed_attribute(&attr.path())
{
let attr_name = attr.ident().unwrap().name;
// `#[cfg]` and `#[cfg_attr]` are special - they are
// eagerly evaluated.

View file

@ -35,4 +35,5 @@ pub enum AttributeLintKind {
IllFormedAttributeInput { suggestions: Vec<String> },
EmptyAttribute { first_span: Span },
InvalidTarget { name: AttrPath, target: Target, applied: Vec<String>, only: &'static str },
InvalidStyle { name: AttrPath, is_used_as_inner: bool, target: Target, target_span: Span },
}

View file

@ -509,10 +509,6 @@ hir_analysis_supertrait_item_shadowee = item from `{$supertrait}` is shadowed by
hir_analysis_supertrait_item_shadowing = trait item `{$item}` from `{$subtrait}` shadows identically named item from supertrait
hir_analysis_tait_forward_compat2 = item does not constrain `{$opaque_type}`
.note = consider removing `#[define_opaque]` or adding an empty `#[define_opaque()]`
.opaque = this opaque type is supposed to be constrained
hir_analysis_target_feature_on_main = `main` function is not allowed to have `#[target_feature]`
hir_analysis_too_large_static = extern static is too large for the target architecture

View file

@ -4,9 +4,10 @@ use rustc_hir::{self as hir, Expr, ImplItem, Item, Node, TraitItem, def, intravi
use rustc_middle::bug;
use rustc_middle::hir::nested_filter;
use rustc_middle::ty::{self, DefiningScopeKind, Ty, TyCtxt, TypeVisitableExt};
use rustc_trait_selection::opaque_types::report_item_does_not_constrain_error;
use tracing::{debug, instrument, trace};
use crate::errors::{TaitForwardCompat2, UnconstrainedOpaqueType};
use crate::errors::UnconstrainedOpaqueType;
/// Checks "defining uses" of opaque `impl Trait` in associated types.
/// These can only be defined by associated items of the same trait.
@ -127,14 +128,11 @@ impl<'tcx> TaitConstraintLocator<'tcx> {
}
fn non_defining_use_in_defining_scope(&mut self, item_def_id: LocalDefId) {
let guar = self.tcx.dcx().emit_err(TaitForwardCompat2 {
span: self
.tcx
.def_ident_span(item_def_id)
.unwrap_or_else(|| self.tcx.def_span(item_def_id)),
opaque_type_span: self.tcx.def_span(self.def_id),
opaque_type: self.tcx.def_path_str(self.def_id),
});
// We make sure that all opaque types get defined while
// type checking the defining scope, so this error is unreachable
// with the new solver.
assert!(!self.tcx.next_trait_solver_globally());
let guar = report_item_does_not_constrain_error(self.tcx, item_def_id, self.def_id, None);
self.insert_found(ty::OpaqueHiddenType::new_error(self.tcx, guar));
}
@ -252,9 +250,7 @@ pub(super) fn find_opaque_ty_constraints_for_rpit<'tcx>(
} else if let Some(hidden_ty) = tables.concrete_opaque_types.get(&def_id) {
hidden_ty.ty
} else {
// FIXME(-Znext-solver): This should not be necessary and we should
// instead rely on inference variable fallback inside of typeck itself.
assert!(!tcx.next_trait_solver_globally());
// We failed to resolve the opaque type or it
// resolves to itself. We interpret this as the
// no values of the hidden type ever being constructed,
@ -273,6 +269,7 @@ pub(super) fn find_opaque_ty_constraints_for_rpit<'tcx>(
if let Err(guar) = hir_ty.error_reported() {
Ty::new_error(tcx, guar)
} else {
assert!(!tcx.next_trait_solver_globally());
hir_ty
}
}

View file

@ -410,17 +410,6 @@ pub(crate) struct UnconstrainedOpaqueType {
pub what: &'static str,
}
#[derive(Diagnostic)]
#[diag(hir_analysis_tait_forward_compat2)]
#[note]
pub(crate) struct TaitForwardCompat2 {
#[primary_span]
pub span: Span,
#[note(hir_analysis_opaque)]
pub opaque_type_span: Span,
pub opaque_type: String,
}
pub(crate) struct MissingTypeParams {
pub span: Span,
pub def_span: Span,

View file

@ -82,7 +82,7 @@ mod coherence;
mod collect;
mod constrained_generic_params;
mod delegation;
mod errors;
pub mod errors;
pub mod hir_ty_lowering;
pub mod hir_wf_check;
mod impl_wf_check;

View file

@ -511,7 +511,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Untranslatable diagnostics are okay for rustc internals
#[allow(rustc::untranslatable_diagnostic)]
#[allow(rustc::diagnostic_outside_of_impl)]
if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) {
if self.has_rustc_attrs
&& self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses)
{
let predicates = self.tcx.predicates_of(def_id);
let predicates = predicates.instantiate(self.tcx, args);
for (predicate, predicate_span) in predicates {
@ -894,7 +896,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
// If we have `rustc_do_not_const_check`, do not check `[const]` bounds.
if self.tcx.has_attr(self.body_id, sym::rustc_do_not_const_check) {
if self.has_rustc_attrs && self.tcx.has_attr(self.body_id, sym::rustc_do_not_const_check) {
return;
}

View file

@ -126,6 +126,10 @@ pub(crate) struct FnCtxt<'a, 'tcx> {
/// These are stored here so we may collect them when canonicalizing user
/// type ascriptions later.
pub(super) trait_ascriptions: RefCell<ItemLocalMap<Vec<ty::Clause<'tcx>>>>,
/// Whether the current crate enables the `rustc_attrs` feature.
/// This allows to skip processing attributes in many places.
pub(super) has_rustc_attrs: bool,
}
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
@ -154,6 +158,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
diverging_fallback_behavior,
diverging_block_behavior,
trait_ascriptions: Default::default(),
has_rustc_attrs: root_ctxt.tcx.features().rustc_attrs(),
}
}
@ -525,10 +530,13 @@ fn parse_never_type_options_attr(
let mut fallback = None;
let mut block = None;
let items = tcx
.get_attr(CRATE_DEF_ID, sym::rustc_never_type_options)
.map(|attr| attr.meta_item_list().unwrap())
.unwrap_or_default();
let items = if tcx.features().rustc_attrs() {
tcx.get_attr(CRATE_DEF_ID, sym::rustc_never_type_options)
.map(|attr| attr.meta_item_list().unwrap())
} else {
None
};
let items = items.unwrap_or_default();
for item in items {
if item.has_name(sym::fallback) && fallback.is_none() {

View file

@ -46,12 +46,12 @@ use rustc_data_structures::unord::UnordSet;
use rustc_errors::codes::*;
use rustc_errors::{Applicability, ErrorGuaranteed, pluralize, struct_span_code_err};
use rustc_hir as hir;
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::{HirId, HirIdMap, Node, find_attr};
use rustc_hir::{HirId, HirIdMap, Node};
use rustc_hir_analysis::check::{check_abi, check_custom_abi};
use rustc_hir_analysis::hir_ty_lowering::HirTyLowerer;
use rustc_infer::traits::{ObligationCauseCode, ObligationInspector, WellFormedLoc};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{bug, span_bug};
@ -174,7 +174,7 @@ fn typeck_with_inspect<'tcx>(
.map(|(idx, ty)| fcx.normalize(arg_span(idx), ty)),
);
if find_attr!(tcx.get_all_attrs(def_id), AttributeKind::Naked(..)) {
if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NAKED) {
naked_functions::typeck_naked_fn(tcx, def_id, body);
}
@ -247,6 +247,13 @@ fn typeck_with_inspect<'tcx>(
debug!(pending_obligations = ?fcx.fulfillment_cx.borrow().pending_obligations());
// We need to handle opaque types before emitting ambiguity errors as applying
// defining uses may guide type inference.
if fcx.next_trait_solver() {
fcx.handle_opaque_type_uses_next();
}
fcx.select_obligations_where_possible(|_| {});
if let None = fcx.infcx.tainted_by_errors() {
fcx.report_ambiguity_errors();
}

View file

@ -777,31 +777,16 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
self.assemble_inherent_candidates_from_object(generalized_self_ty);
self.assemble_inherent_impl_candidates_for_type(p.def_id(), receiver_steps);
if self.tcx.has_attr(p.def_id(), sym::rustc_has_incoherent_inherent_impls) {
self.assemble_inherent_candidates_for_incoherent_ty(
raw_self_ty,
receiver_steps,
);
}
self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty, receiver_steps);
}
ty::Adt(def, _) => {
let def_id = def.did();
self.assemble_inherent_impl_candidates_for_type(def_id, receiver_steps);
if self.tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls) {
self.assemble_inherent_candidates_for_incoherent_ty(
raw_self_ty,
receiver_steps,
);
}
self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty, receiver_steps);
}
ty::Foreign(did) => {
self.assemble_inherent_impl_candidates_for_type(did, receiver_steps);
if self.tcx.has_attr(did, sym::rustc_has_incoherent_inherent_impls) {
self.assemble_inherent_candidates_for_incoherent_ty(
raw_self_ty,
receiver_steps,
);
}
self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty, receiver_steps);
}
ty::Param(_) => {
self.assemble_inherent_candidates_from_param(raw_self_ty);
@ -2400,17 +2385,14 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
if !self.is_relevant_kind_for_mode(x.kind) {
return false;
}
if self.matches_by_doc_alias(x.def_id) {
return true;
}
match edit_distance_with_substrings(
if let Some(d) = edit_distance_with_substrings(
name.as_str(),
x.name().as_str(),
max_dist,
) {
Some(d) => d > 0,
None => false,
return d > 0;
}
self.matches_by_doc_alias(x.def_id)
})
.copied()
.collect()

View file

@ -1,5 +1,218 @@
use super::FnCtxt;
use rustc_hir::def::DefKind;
use rustc_infer::traits::ObligationCause;
use rustc_middle::ty::{
self, DefiningScopeKind, EarlyBinder, OpaqueHiddenType, OpaqueTypeKey, TypeVisitableExt,
TypingMode,
};
use rustc_trait_selection::error_reporting::infer::need_type_info::TypeAnnotationNeeded;
use rustc_trait_selection::opaque_types::{
NonDefiningUseReason, opaque_type_has_defining_use_args, report_item_does_not_constrain_error,
};
use rustc_trait_selection::solve;
use tracing::{debug, instrument};
use crate::FnCtxt;
impl<'tcx> FnCtxt<'_, 'tcx> {
/// This takes all the opaque type uses during HIR typeck. It first computes
/// the concrete hidden type by iterating over all defining uses.
///
/// A use during HIR typeck is defining if all non-lifetime arguments are
/// unique generic parameters and the hidden type does not reference any
/// inference variables.
///
/// It then uses these defining uses to guide inference for all other uses.
#[instrument(level = "debug", skip(self))]
pub(super) fn handle_opaque_type_uses_next(&mut self) {
// We clone the opaques instead of stealing them here as they are still used for
// normalization in the next generation trait solver.
let mut opaque_types: Vec<_> = self.infcx.clone_opaque_types();
let num_entries = self.inner.borrow_mut().opaque_types().num_entries();
let prev = self.checked_opaque_types_storage_entries.replace(Some(num_entries));
debug_assert_eq!(prev, None);
for entry in &mut opaque_types {
*entry = self.resolve_vars_if_possible(*entry);
}
debug!(?opaque_types);
self.compute_concrete_opaque_types(&opaque_types);
self.apply_computed_concrete_opaque_types(&opaque_types);
}
}
enum UsageKind<'tcx> {
None,
NonDefiningUse(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>),
UnconstrainedHiddenType(OpaqueHiddenType<'tcx>),
HasDefiningUse,
}
impl<'tcx> UsageKind<'tcx> {
fn merge(&mut self, other: UsageKind<'tcx>) {
match (&*self, &other) {
(UsageKind::HasDefiningUse, _) | (_, UsageKind::None) => unreachable!(),
(UsageKind::None, _) => *self = other,
// When mergining non-defining uses, prefer earlier ones. This means
// the error happens as early as possible.
(
UsageKind::NonDefiningUse(..) | UsageKind::UnconstrainedHiddenType(..),
UsageKind::NonDefiningUse(..),
) => {}
// When merging unconstrained hidden types, we prefer later ones. This is
// used as in most cases, the defining use is the final return statement
// of our function, and other uses with defining arguments are likely not
// intended to be defining.
(
UsageKind::NonDefiningUse(..) | UsageKind::UnconstrainedHiddenType(..),
UsageKind::UnconstrainedHiddenType(..) | UsageKind::HasDefiningUse,
) => *self = other,
}
}
}
impl<'tcx> FnCtxt<'_, 'tcx> {
fn compute_concrete_opaque_types(
&mut self,
opaque_types: &[(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>)],
) {
let tcx = self.tcx;
let TypingMode::Analysis { defining_opaque_types_and_generators } = self.typing_mode()
else {
unreachable!();
};
for def_id in defining_opaque_types_and_generators {
match tcx.def_kind(def_id) {
DefKind::OpaqueTy => {}
DefKind::Closure => continue,
_ => unreachable!("not opaque or generator: {def_id:?}"),
}
let mut usage_kind = UsageKind::None;
for &(opaque_type_key, hidden_type) in opaque_types {
if opaque_type_key.def_id != def_id {
continue;
}
usage_kind.merge(self.consider_opaque_type_use(opaque_type_key, hidden_type));
if let UsageKind::HasDefiningUse = usage_kind {
break;
}
}
let guar = match usage_kind {
UsageKind::None => {
if let Some(guar) = self.tainted_by_errors() {
guar
} else {
report_item_does_not_constrain_error(self.tcx, self.body_id, def_id, None)
}
}
UsageKind::NonDefiningUse(opaque_type_key, hidden_type) => {
report_item_does_not_constrain_error(
self.tcx,
self.body_id,
def_id,
Some((opaque_type_key, hidden_type.span)),
)
}
UsageKind::UnconstrainedHiddenType(hidden_type) => {
let infer_var = hidden_type
.ty
.walk()
.filter_map(ty::GenericArg::as_term)
.find(|term| term.is_infer())
.unwrap_or_else(|| hidden_type.ty.into());
self.err_ctxt()
.emit_inference_failure_err(
self.body_id,
hidden_type.span,
infer_var,
TypeAnnotationNeeded::E0282,
false,
)
.emit()
}
UsageKind::HasDefiningUse => continue,
};
self.typeck_results
.borrow_mut()
.concrete_opaque_types
.insert(def_id, OpaqueHiddenType::new_error(tcx, guar));
self.set_tainted_by_errors(guar);
}
}
fn consider_opaque_type_use(
&mut self,
opaque_type_key: OpaqueTypeKey<'tcx>,
hidden_type: OpaqueHiddenType<'tcx>,
) -> UsageKind<'tcx> {
if let Err(err) = opaque_type_has_defining_use_args(
&self,
opaque_type_key,
hidden_type.span,
DefiningScopeKind::HirTypeck,
) {
match err {
NonDefiningUseReason::Tainted(guar) => {
self.typeck_results.borrow_mut().concrete_opaque_types.insert(
opaque_type_key.def_id,
OpaqueHiddenType::new_error(self.tcx, guar),
);
return UsageKind::HasDefiningUse;
}
_ => return UsageKind::NonDefiningUse(opaque_type_key, hidden_type),
};
}
// We ignore uses of the opaque if they have any inference variables
// as this can frequently happen with recursive calls.
//
// See `tests/ui/traits/next-solver/opaques/universal-args-non-defining.rs`.
if hidden_type.ty.has_non_region_infer() {
return UsageKind::UnconstrainedHiddenType(hidden_type);
}
let cause = ObligationCause::misc(hidden_type.span, self.body_id);
let at = self.at(&cause, self.param_env);
let hidden_type = match solve::deeply_normalize(at, hidden_type) {
Ok(hidden_type) => hidden_type,
Err(errors) => {
let guar = self.err_ctxt().report_fulfillment_errors(errors);
OpaqueHiddenType::new_error(self.tcx, guar)
}
};
let hidden_type = hidden_type.remap_generic_params_to_declaration_params(
opaque_type_key,
self.tcx,
DefiningScopeKind::HirTypeck,
);
let prev = self
.typeck_results
.borrow_mut()
.concrete_opaque_types
.insert(opaque_type_key.def_id, hidden_type);
assert!(prev.is_none());
UsageKind::HasDefiningUse
}
fn apply_computed_concrete_opaque_types(
&mut self,
opaque_types: &[(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>)],
) {
let tcx = self.tcx;
for &(key, hidden_type) in opaque_types {
let expected =
*self.typeck_results.borrow_mut().concrete_opaque_types.get(&key.def_id).unwrap();
let expected = EarlyBinder::bind(expected.ty).instantiate(tcx, key.args);
self.demand_eqtype(hidden_type.span, expected, hidden_type.ty);
}
}
/// We may in theory add further uses of an opaque after cloning the opaque
/// types storage during writeback when computing the defining uses.
///

View file

@ -1747,7 +1747,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
fn should_log_capture_analysis(&self, closure_def_id: LocalDefId) -> bool {
self.tcx.has_attr(closure_def_id, sym::rustc_capture_analysis)
self.has_rustc_attrs && self.tcx.has_attr(closure_def_id, sym::rustc_capture_analysis)
}
fn log_capture_analysis_first_pass(

View file

@ -27,7 +27,7 @@ use rustc_middle::ty::{
};
use rustc_span::{Span, sym};
use rustc_trait_selection::error_reporting::infer::need_type_info::TypeAnnotationNeeded;
use rustc_trait_selection::opaque_types::check_opaque_type_parameter_valid;
use rustc_trait_selection::opaque_types::opaque_type_has_defining_use_args;
use rustc_trait_selection::solve;
use tracing::{debug, instrument};
@ -45,7 +45,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// This attribute causes us to dump some writeback information
// in the form of errors, which is used for unit tests.
let rustc_dump_user_args = self.tcx.has_attr(item_def_id, sym::rustc_dump_user_args);
let rustc_dump_user_args =
self.has_rustc_attrs && self.tcx.has_attr(item_def_id, sym::rustc_dump_user_args);
let mut wbcx = WritebackCx::new(self, body, rustc_dump_user_args);
for param in body.params {
@ -546,8 +547,24 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
}
}
fn visit_opaque_types_next(&mut self) {
let mut fcx_typeck_results = self.fcx.typeck_results.borrow_mut();
assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
for hidden_ty in fcx_typeck_results.concrete_opaque_types.values() {
assert!(!hidden_ty.has_infer());
}
assert_eq!(self.typeck_results.concrete_opaque_types.len(), 0);
self.typeck_results.concrete_opaque_types =
mem::take(&mut fcx_typeck_results.concrete_opaque_types);
}
#[instrument(skip(self), level = "debug")]
fn visit_opaque_types(&mut self) {
if self.fcx.next_trait_solver() {
return self.visit_opaque_types_next();
}
let tcx = self.tcx();
// We clone the opaques instead of stealing them here as they are still used for
// normalization in the next generation trait solver.
@ -558,17 +575,14 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
for (opaque_type_key, hidden_type) in opaque_types {
let hidden_type = self.resolve(hidden_type, &hidden_type.span);
let opaque_type_key = self.resolve(opaque_type_key, &hidden_type.span);
if !self.fcx.next_trait_solver() {
if let ty::Alias(ty::Opaque, alias_ty) = hidden_type.ty.kind()
&& alias_ty.def_id == opaque_type_key.def_id.to_def_id()
&& alias_ty.args == opaque_type_key.args
{
continue;
}
if let ty::Alias(ty::Opaque, alias_ty) = hidden_type.ty.kind()
&& alias_ty.def_id == opaque_type_key.def_id.to_def_id()
&& alias_ty.args == opaque_type_key.args
{
continue;
}
if let Err(err) = check_opaque_type_parameter_valid(
if let Err(err) = opaque_type_has_defining_use_args(
&self.fcx,
opaque_type_key,
hidden_type.span,
@ -923,6 +937,7 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
}
}
#[instrument(level = "debug", skip(self, outer_exclusive_binder, new_err))]
fn handle_term<T>(
&mut self,
value: T,

View file

@ -491,19 +491,15 @@ pub struct ChunkedBitSet<T> {
marker: PhantomData<T>,
}
// Note: the chunk domain size is duplicated in each variant. This is a bit
// inconvenient, but it allows the type size to be smaller than if we had an
// outer struct containing a chunk domain size plus the `Chunk`, because the
// compiler can place the chunk domain size after the tag.
// NOTE: The chunk size is computed on-the-fly on each manipulation of a chunk.
// This avoids storing it, as it's almost always CHUNK_BITS except for the last one.
#[derive(Clone, Debug, PartialEq, Eq)]
enum Chunk {
/// A chunk that is all zeros; we don't represent the zeros explicitly.
/// The `ChunkSize` is always non-zero.
Zeros(ChunkSize),
Zeros,
/// A chunk that is all ones; we don't represent the ones explicitly.
/// `ChunkSize` is always non-zero.
Ones(ChunkSize),
Ones,
/// A chunk that has a mix of zeros and ones, which are represented
/// explicitly and densely. It never has all zeros or all ones.
@ -514,16 +510,14 @@ enum Chunk {
/// to store the length, which would make this type larger. These excess
/// words are always zero, as are any excess bits in the final in-use word.
///
/// The first `ChunkSize` field is always non-zero.
///
/// The second `ChunkSize` field is the count of 1s set in the chunk, and
/// The `ChunkSize` field is the count of 1s set in the chunk, and
/// must satisfy `0 < count < chunk_domain_size`.
///
/// The words are within an `Rc` because it's surprisingly common to
/// duplicate an entire chunk, e.g. in `ChunkedBitSet::clone_from()`, or
/// when a `Mixed` chunk is union'd into a `Zeros` chunk. When we do need
/// to modify a chunk we use `Rc::make_mut`.
Mixed(ChunkSize, ChunkSize, Rc<[Word; CHUNK_WORDS]>),
Mixed(ChunkSize, Rc<[Word; CHUNK_WORDS]>),
}
// This type is used a lot. Make sure it doesn't unintentionally get bigger.
@ -535,6 +529,22 @@ impl<T> ChunkedBitSet<T> {
self.domain_size
}
#[inline]
fn last_chunk_size(&self) -> ChunkSize {
let n = self.domain_size % CHUNK_BITS;
if n == 0 { CHUNK_BITS as ChunkSize } else { n as ChunkSize }
}
/// All the chunks have a chunk_domain_size of `CHUNK_BITS` except the final one.
#[inline]
fn chunk_domain_size(&self, chunk: usize) -> ChunkSize {
if chunk == self.chunks.len() - 1 {
self.last_chunk_size()
} else {
CHUNK_BITS as ChunkSize
}
}
#[cfg(test)]
fn assert_valid(&self) {
if self.domain_size == 0 {
@ -544,8 +554,9 @@ impl<T> ChunkedBitSet<T> {
assert!((self.chunks.len() - 1) * CHUNK_BITS <= self.domain_size);
assert!(self.chunks.len() * CHUNK_BITS >= self.domain_size);
for chunk in self.chunks.iter() {
chunk.assert_valid();
for (chunk_index, chunk) in self.chunks.iter().enumerate() {
let chunk_domain_size = self.chunk_domain_size(chunk_index);
chunk.assert_valid(chunk_domain_size);
}
}
}
@ -556,16 +567,7 @@ impl<T: Idx> ChunkedBitSet<T> {
let chunks = if domain_size == 0 {
Box::new([])
} else {
// All the chunks have a chunk_domain_size of `CHUNK_BITS` except
// the final one.
let final_chunk_domain_size = {
let n = domain_size % CHUNK_BITS;
if n == 0 { CHUNK_BITS } else { n }
};
let mut chunks =
vec![Chunk::new(CHUNK_BITS, is_empty); num_chunks(domain_size)].into_boxed_slice();
*chunks.last_mut().unwrap() = Chunk::new(final_chunk_domain_size, is_empty);
chunks
vec![if is_empty { Zeros } else { Ones }; num_chunks(domain_size)].into_boxed_slice()
};
ChunkedBitSet { domain_size, chunks, marker: PhantomData }
}
@ -594,11 +596,15 @@ impl<T: Idx> ChunkedBitSet<T> {
/// Count the number of bits in the set.
pub fn count(&self) -> usize {
self.chunks.iter().map(|chunk| chunk.count()).sum()
self.chunks
.iter()
.enumerate()
.map(|(index, chunk)| chunk.count(self.chunk_domain_size(index)))
.sum()
}
pub fn is_empty(&self) -> bool {
self.chunks.iter().all(|chunk| matches!(chunk, Zeros(..)))
self.chunks.iter().all(|chunk| matches!(chunk, Zeros))
}
/// Returns `true` if `self` contains `elem`.
@ -607,9 +613,9 @@ impl<T: Idx> ChunkedBitSet<T> {
assert!(elem.index() < self.domain_size);
let chunk = &self.chunks[chunk_index(elem)];
match &chunk {
Zeros(_) => false,
Ones(_) => true,
Mixed(_, _, words) => {
Zeros => false,
Ones => true,
Mixed(_, words) => {
let (word_index, mask) = chunk_word_index_and_mask(elem);
(words[word_index] & mask) != 0
}
@ -625,9 +631,10 @@ impl<T: Idx> ChunkedBitSet<T> {
pub fn insert(&mut self, elem: T) -> bool {
assert!(elem.index() < self.domain_size);
let chunk_index = chunk_index(elem);
let chunk_domain_size = self.chunk_domain_size(chunk_index);
let chunk = &mut self.chunks[chunk_index];
match *chunk {
Zeros(chunk_domain_size) => {
Zeros => {
if chunk_domain_size > 1 {
#[cfg(feature = "nightly")]
let mut words = {
@ -649,14 +656,14 @@ impl<T: Idx> ChunkedBitSet<T> {
let (word_index, mask) = chunk_word_index_and_mask(elem);
words_ref[word_index] |= mask;
*chunk = Mixed(chunk_domain_size, 1, words);
*chunk = Mixed(1, words);
} else {
*chunk = Ones(chunk_domain_size);
*chunk = Ones;
}
true
}
Ones(_) => false,
Mixed(chunk_domain_size, ref mut count, ref mut words) => {
Ones => false,
Mixed(ref mut count, ref mut words) => {
// We skip all the work if the bit is already set.
let (word_index, mask) = chunk_word_index_and_mask(elem);
if (words[word_index] & mask) == 0 {
@ -665,7 +672,7 @@ impl<T: Idx> ChunkedBitSet<T> {
let words = Rc::make_mut(words);
words[word_index] |= mask;
} else {
*chunk = Ones(chunk_domain_size);
*chunk = Ones;
}
true
} else {
@ -678,11 +685,7 @@ impl<T: Idx> ChunkedBitSet<T> {
/// Sets all bits to true.
pub fn insert_all(&mut self) {
for chunk in self.chunks.iter_mut() {
*chunk = match *chunk {
Zeros(chunk_domain_size)
| Ones(chunk_domain_size)
| Mixed(chunk_domain_size, ..) => Ones(chunk_domain_size),
}
*chunk = Ones;
}
}
@ -690,10 +693,11 @@ impl<T: Idx> ChunkedBitSet<T> {
pub fn remove(&mut self, elem: T) -> bool {
assert!(elem.index() < self.domain_size);
let chunk_index = chunk_index(elem);
let chunk_domain_size = self.chunk_domain_size(chunk_index);
let chunk = &mut self.chunks[chunk_index];
match *chunk {
Zeros(_) => false,
Ones(chunk_domain_size) => {
Zeros => false,
Ones => {
if chunk_domain_size > 1 {
#[cfg(feature = "nightly")]
let mut words = {
@ -722,13 +726,13 @@ impl<T: Idx> ChunkedBitSet<T> {
);
let (word_index, mask) = chunk_word_index_and_mask(elem);
words_ref[word_index] &= !mask;
*chunk = Mixed(chunk_domain_size, chunk_domain_size - 1, words);
*chunk = Mixed(chunk_domain_size - 1, words);
} else {
*chunk = Zeros(chunk_domain_size);
*chunk = Zeros;
}
true
}
Mixed(chunk_domain_size, ref mut count, ref mut words) => {
Mixed(ref mut count, ref mut words) => {
// We skip all the work if the bit is already clear.
let (word_index, mask) = chunk_word_index_and_mask(elem);
if (words[word_index] & mask) != 0 {
@ -737,7 +741,7 @@ impl<T: Idx> ChunkedBitSet<T> {
let words = Rc::make_mut(words);
words[word_index] &= !mask;
} else {
*chunk = Zeros(chunk_domain_size);
*chunk = Zeros
}
true
} else {
@ -748,11 +752,12 @@ impl<T: Idx> ChunkedBitSet<T> {
}
fn chunk_iter(&self, chunk_index: usize) -> ChunkIter<'_> {
let chunk_domain_size = self.chunk_domain_size(chunk_index);
match self.chunks.get(chunk_index) {
Some(Zeros(_chunk_domain_size)) => ChunkIter::Zeros,
Some(Ones(chunk_domain_size)) => ChunkIter::Ones(0..*chunk_domain_size as usize),
Some(Mixed(chunk_domain_size, _, words)) => {
let num_words = num_words(*chunk_domain_size as usize);
Some(Zeros) => ChunkIter::Zeros,
Some(Ones) => ChunkIter::Ones(0..chunk_domain_size as usize),
Some(Mixed(_, words)) => {
let num_words = num_words(chunk_domain_size as usize);
ChunkIter::Mixed(BitIter::new(&words[0..num_words]))
}
None => ChunkIter::Finished,
@ -765,23 +770,33 @@ impl<T: Idx> ChunkedBitSet<T> {
impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
assert_eq!(self.domain_size, other.domain_size);
debug_assert_eq!(self.chunks.len(), other.chunks.len());
let num_chunks = self.chunks.len();
debug_assert_eq!(num_chunks, other.chunks.len());
let last_chunk_size = self.last_chunk_size();
debug_assert_eq!(last_chunk_size, other.last_chunk_size());
let mut changed = false;
for (mut self_chunk, other_chunk) in self.chunks.iter_mut().zip(other.chunks.iter()) {
for (chunk_index, (mut self_chunk, other_chunk)) in
self.chunks.iter_mut().zip(other.chunks.iter()).enumerate()
{
let chunk_domain_size = if chunk_index + 1 == num_chunks {
last_chunk_size
} else {
CHUNK_BITS as ChunkSize
};
match (&mut self_chunk, &other_chunk) {
(_, Zeros(_)) | (Ones(_), _) => {}
(Zeros(self_chunk_domain_size), Ones(other_chunk_domain_size))
| (Mixed(self_chunk_domain_size, ..), Ones(other_chunk_domain_size))
| (Zeros(self_chunk_domain_size), Mixed(other_chunk_domain_size, ..)) => {
(_, Zeros) | (Ones, _) => {}
(Zeros, Ones) | (Mixed(..), Ones) | (Zeros, Mixed(..)) => {
// `other_chunk` fully overwrites `self_chunk`
debug_assert_eq!(self_chunk_domain_size, other_chunk_domain_size);
*self_chunk = other_chunk.clone();
changed = true;
}
(
Mixed(self_chunk_domain_size, self_chunk_count, self_chunk_words),
Mixed(_other_chunk_domain_size, _other_chunk_count, other_chunk_words),
Mixed(self_chunk_count, self_chunk_words),
Mixed(_other_chunk_count, other_chunk_words),
) => {
// First check if the operation would change
// `self_chunk.words`. If not, we can avoid allocating some
@ -789,7 +804,7 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
// performance win. Also, we only need to operate on the
// in-use words, hence the slicing.
let op = |a, b| a | b;
let num_words = num_words(*self_chunk_domain_size as usize);
let num_words = num_words(chunk_domain_size as usize);
if bitwise_changes(
&self_chunk_words[0..num_words],
&other_chunk_words[0..num_words],
@ -806,8 +821,8 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
.iter()
.map(|w| w.count_ones() as ChunkSize)
.sum();
if *self_chunk_count == *self_chunk_domain_size {
*self_chunk = Ones(*self_chunk_domain_size);
if *self_chunk_count == chunk_domain_size {
*self_chunk = Ones;
}
changed = true;
}
@ -819,36 +834,41 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
fn subtract(&mut self, other: &ChunkedBitSet<T>) -> bool {
assert_eq!(self.domain_size, other.domain_size);
debug_assert_eq!(self.chunks.len(), other.chunks.len());
let num_chunks = self.chunks.len();
debug_assert_eq!(num_chunks, other.chunks.len());
let last_chunk_size = self.last_chunk_size();
debug_assert_eq!(last_chunk_size, other.last_chunk_size());
let mut changed = false;
for (mut self_chunk, other_chunk) in self.chunks.iter_mut().zip(other.chunks.iter()) {
for (chunk_index, (mut self_chunk, other_chunk)) in
self.chunks.iter_mut().zip(other.chunks.iter()).enumerate()
{
let chunk_domain_size = if chunk_index + 1 == num_chunks {
last_chunk_size
} else {
CHUNK_BITS as ChunkSize
};
match (&mut self_chunk, &other_chunk) {
(Zeros(..), _) | (_, Zeros(..)) => {}
(
Ones(self_chunk_domain_size) | Mixed(self_chunk_domain_size, _, _),
Ones(other_chunk_domain_size),
) => {
debug_assert_eq!(self_chunk_domain_size, other_chunk_domain_size);
(Zeros, _) | (_, Zeros) => {}
(Ones | Mixed(_, _), Ones) => {
changed = true;
*self_chunk = Zeros(*self_chunk_domain_size);
*self_chunk = Zeros;
}
(
Ones(self_chunk_domain_size),
Mixed(other_chunk_domain_size, other_chunk_count, other_chunk_words),
) => {
debug_assert_eq!(self_chunk_domain_size, other_chunk_domain_size);
(Ones, Mixed(other_chunk_count, other_chunk_words)) => {
changed = true;
let num_words = num_words(*self_chunk_domain_size as usize);
let num_words = num_words(chunk_domain_size as usize);
debug_assert!(num_words > 0 && num_words <= CHUNK_WORDS);
let mut tail_mask =
1 << (*other_chunk_domain_size - ((num_words - 1) * WORD_BITS) as u16) - 1;
1 << (chunk_domain_size - ((num_words - 1) * WORD_BITS) as u16) - 1;
let mut self_chunk_words = **other_chunk_words;
for word in self_chunk_words[0..num_words].iter_mut().rev() {
*word = !*word & tail_mask;
tail_mask = u64::MAX;
}
let self_chunk_count = *self_chunk_domain_size - *other_chunk_count;
let self_chunk_count = chunk_domain_size - *other_chunk_count;
debug_assert_eq!(
self_chunk_count,
self_chunk_words[0..num_words]
@ -856,16 +876,15 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
.map(|w| w.count_ones() as ChunkSize)
.sum()
);
*self_chunk =
Mixed(*self_chunk_domain_size, self_chunk_count, Rc::new(self_chunk_words));
*self_chunk = Mixed(self_chunk_count, Rc::new(self_chunk_words));
}
(
Mixed(self_chunk_domain_size, self_chunk_count, self_chunk_words),
Mixed(_other_chunk_domain_size, _other_chunk_count, other_chunk_words),
Mixed(self_chunk_count, self_chunk_words),
Mixed(_other_chunk_count, other_chunk_words),
) => {
// See [`<Self as BitRelations<ChunkedBitSet<T>>>::union`] for the explanation
let op = |a: u64, b: u64| a & !b;
let num_words = num_words(*self_chunk_domain_size as usize);
let num_words = num_words(chunk_domain_size as usize);
if bitwise_changes(
&self_chunk_words[0..num_words],
&other_chunk_words[0..num_words],
@ -883,7 +902,7 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
.map(|w| w.count_ones() as ChunkSize)
.sum();
if *self_chunk_count == 0 {
*self_chunk = Zeros(*self_chunk_domain_size);
*self_chunk = Zeros;
}
changed = true;
}
@ -895,28 +914,36 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
fn intersect(&mut self, other: &ChunkedBitSet<T>) -> bool {
assert_eq!(self.domain_size, other.domain_size);
debug_assert_eq!(self.chunks.len(), other.chunks.len());
let num_chunks = self.chunks.len();
debug_assert_eq!(num_chunks, other.chunks.len());
let last_chunk_size = self.last_chunk_size();
debug_assert_eq!(last_chunk_size, other.last_chunk_size());
let mut changed = false;
for (mut self_chunk, other_chunk) in self.chunks.iter_mut().zip(other.chunks.iter()) {
for (chunk_index, (mut self_chunk, other_chunk)) in
self.chunks.iter_mut().zip(other.chunks.iter()).enumerate()
{
let chunk_domain_size = if chunk_index + 1 == num_chunks {
last_chunk_size
} else {
CHUNK_BITS as ChunkSize
};
match (&mut self_chunk, &other_chunk) {
(Zeros(..), _) | (_, Ones(..)) => {}
(
Ones(self_chunk_domain_size),
Zeros(other_chunk_domain_size) | Mixed(other_chunk_domain_size, ..),
)
| (Mixed(self_chunk_domain_size, ..), Zeros(other_chunk_domain_size)) => {
debug_assert_eq!(self_chunk_domain_size, other_chunk_domain_size);
(Zeros, _) | (_, Ones) => {}
(Ones, Zeros | Mixed(..)) | (Mixed(..), Zeros) => {
changed = true;
*self_chunk = other_chunk.clone();
}
(
Mixed(self_chunk_domain_size, self_chunk_count, self_chunk_words),
Mixed(_other_chunk_domain_size, _other_chunk_count, other_chunk_words),
Mixed(self_chunk_count, self_chunk_words),
Mixed(_other_chunk_count, other_chunk_words),
) => {
// See [`<Self as BitRelations<ChunkedBitSet<T>>>::union`] for the explanation
let op = |a, b| a & b;
let num_words = num_words(*self_chunk_domain_size as usize);
let num_words = num_words(chunk_domain_size as usize);
if bitwise_changes(
&self_chunk_words[0..num_words],
&other_chunk_words[0..num_words],
@ -934,7 +961,7 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
.map(|w| w.count_ones() as ChunkSize)
.sum();
if *self_chunk_count == 0 {
*self_chunk = Zeros(*self_chunk_domain_size);
*self_chunk = Zeros;
}
changed = true;
}
@ -964,7 +991,7 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for DenseBitSet<T> {
words = &mut words[..CHUNK_WORDS];
}
match chunk {
Zeros(..) => {
Zeros => {
for word in words {
if *word != 0 {
changed = true;
@ -972,8 +999,8 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for DenseBitSet<T> {
}
}
}
Ones(..) => (),
Mixed(_, _, data) => {
Ones => (),
Mixed(_, data) => {
for (i, word) in words.iter_mut().enumerate() {
let new_val = *word & data[i];
if new_val != *word {
@ -1053,13 +1080,11 @@ impl<'a, T: Idx> Iterator for ChunkedBitIter<'a, T> {
impl Chunk {
#[cfg(test)]
fn assert_valid(&self) {
fn assert_valid(&self, chunk_domain_size: ChunkSize) {
assert!(chunk_domain_size as usize <= CHUNK_BITS);
match *self {
Zeros(chunk_domain_size) | Ones(chunk_domain_size) => {
assert!(chunk_domain_size as usize <= CHUNK_BITS);
}
Mixed(chunk_domain_size, count, ref words) => {
assert!(chunk_domain_size as usize <= CHUNK_BITS);
Zeros | Ones => {}
Mixed(count, ref words) => {
assert!(0 < count && count < chunk_domain_size);
// Check the number of set bits matches `count`.
@ -1083,18 +1108,12 @@ impl Chunk {
}
}
fn new(chunk_domain_size: usize, is_empty: bool) -> Self {
debug_assert!(0 < chunk_domain_size && chunk_domain_size <= CHUNK_BITS);
let chunk_domain_size = chunk_domain_size as ChunkSize;
if is_empty { Zeros(chunk_domain_size) } else { Ones(chunk_domain_size) }
}
/// Count the number of 1s in the chunk.
fn count(&self) -> usize {
fn count(&self, chunk_domain_size: ChunkSize) -> usize {
match *self {
Zeros(_) => 0,
Ones(chunk_domain_size) => chunk_domain_size as usize,
Mixed(_, count, _) => count as usize,
Zeros => 0,
Ones => chunk_domain_size as usize,
Mixed(count, _) => count as usize,
}
}
}

View file

@ -120,8 +120,9 @@ fn chunked_bitset() {
let mut b1 = ChunkedBitSet::<usize>::new_empty(1);
assert_eq!(
b1,
ChunkedBitSet { domain_size: 1, chunks: Box::new([Zeros(1)]), marker: PhantomData }
ChunkedBitSet { domain_size: 1, chunks: Box::new([Zeros]), marker: PhantomData }
);
assert_eq!(b1.chunk_domain_size(0), 1);
b1.assert_valid();
assert!(!b1.contains(0));
@ -129,12 +130,12 @@ fn chunked_bitset() {
assert!(b1.insert(0));
assert!(b1.contains(0));
assert_eq!(b1.count(), 1);
assert_eq!(b1.chunks(), [Ones(1)]);
assert_eq!(b1.chunks(), [Ones]);
assert!(!b1.insert(0));
assert!(b1.remove(0));
assert!(!b1.contains(0));
assert_eq!(b1.count(), 0);
assert_eq!(b1.chunks(), [Zeros(1)]);
assert_eq!(b1.chunks(), [Zeros]);
b1.assert_valid();
//-----------------------------------------------------------------------
@ -142,8 +143,9 @@ fn chunked_bitset() {
let mut b100 = ChunkedBitSet::<usize>::new_filled(100);
assert_eq!(
b100,
ChunkedBitSet { domain_size: 100, chunks: Box::new([Ones(100)]), marker: PhantomData }
ChunkedBitSet { domain_size: 100, chunks: Box::new([Ones]), marker: PhantomData }
);
assert_eq!(b100.chunk_domain_size(0), 100);
b100.assert_valid();
for i in 0..100 {
@ -152,7 +154,7 @@ fn chunked_bitset() {
assert_eq!(b100.count(), 100);
assert!(b100.remove(3));
assert!(b100.insert(3));
assert_eq!(b100.chunks(), vec![Ones(100)]);
assert_eq!(b100.chunks(), vec![Ones]);
assert!(
b100.remove(20) && b100.remove(30) && b100.remove(40) && b100.remove(99) && b100.insert(30)
);
@ -161,7 +163,6 @@ fn chunked_bitset() {
assert_eq!(
b100.chunks(),
vec![Mixed(
100,
97,
#[rustfmt::skip]
Rc::new([
@ -180,7 +181,7 @@ fn chunked_bitset() {
}
}
assert_eq!(num_removed, 97);
assert_eq!(b100.chunks(), vec![Zeros(100)]);
assert_eq!(b100.chunks(), vec![Zeros]);
b100.assert_valid();
//-----------------------------------------------------------------------
@ -188,23 +189,21 @@ fn chunked_bitset() {
let mut b2548 = ChunkedBitSet::<usize>::new_empty(2548);
assert_eq!(
b2548,
ChunkedBitSet {
domain_size: 2548,
chunks: Box::new([Zeros(2048), Zeros(500)]),
marker: PhantomData,
}
ChunkedBitSet { domain_size: 2548, chunks: Box::new([Zeros, Zeros]), marker: PhantomData }
);
assert_eq!(b2548.chunk_domain_size(0), 2048);
assert_eq!(b2548.chunk_domain_size(1), 500);
b2548.assert_valid();
b2548.insert(14);
b2548.remove(14);
assert_eq!(b2548.chunks(), vec![Zeros(2048), Zeros(500)]);
assert_eq!(b2548.chunks(), vec![Zeros, Zeros]);
b2548.insert_all();
for i in 0..2548 {
assert!(b2548.contains(i));
}
assert_eq!(b2548.count(), 2548);
assert_eq!(b2548.chunks(), vec![Ones(2048), Ones(500)]);
assert_eq!(b2548.chunks(), vec![Ones, Ones]);
b2548.assert_valid();
//-----------------------------------------------------------------------
@ -212,12 +211,10 @@ fn chunked_bitset() {
let mut b4096 = ChunkedBitSet::<usize>::new_empty(4096);
assert_eq!(
b4096,
ChunkedBitSet {
domain_size: 4096,
chunks: Box::new([Zeros(2048), Zeros(2048)]),
marker: PhantomData,
}
ChunkedBitSet { domain_size: 4096, chunks: Box::new([Zeros, Zeros]), marker: PhantomData }
);
assert_eq!(b4096.chunk_domain_size(0), 2048);
assert_eq!(b4096.chunk_domain_size(1), 2048);
b4096.assert_valid();
for i in 0..4096 {
@ -231,11 +228,11 @@ fn chunked_bitset() {
b4096.chunks(),
#[rustfmt::skip]
vec![
Mixed(2048, 1, Rc::new([
Mixed(1, Rc::new([
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
])),
Mixed(2048, 1, Rc::new([
Mixed(1, Rc::new([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x8000_0000_0000_0000
])),
@ -251,10 +248,15 @@ fn chunked_bitset() {
b10000,
ChunkedBitSet {
domain_size: 10000,
chunks: Box::new([Zeros(2048), Zeros(2048), Zeros(2048), Zeros(2048), Zeros(1808),]),
chunks: Box::new([Zeros, Zeros, Zeros, Zeros, Zeros,]),
marker: PhantomData,
}
);
assert_eq!(b10000.chunk_domain_size(0), 2048);
assert_eq!(b10000.chunk_domain_size(1), 2048);
assert_eq!(b10000.chunk_domain_size(2), 2048);
assert_eq!(b10000.chunk_domain_size(3), 2048);
assert_eq!(b10000.chunk_domain_size(4), 1808);
b10000.assert_valid();
assert!(b10000.insert(3000) && b10000.insert(5000));
@ -262,17 +264,17 @@ fn chunked_bitset() {
b10000.chunks(),
#[rustfmt::skip]
vec![
Zeros(2048),
Mixed(2048, 1, Rc::new([
Zeros,
Mixed(1, Rc::new([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0100_0000_0000_0000, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])),
Mixed(2048, 1, Rc::new([
Mixed(1, Rc::new([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0100, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])),
Zeros(2048),
Zeros(1808),
Zeros,
Zeros,
],
);
let mut b10000b = ChunkedBitSet::<usize>::new_empty(10000);

View file

@ -989,6 +989,10 @@ impl<'tcx> InferCtxt<'tcx> {
storage.var_infos.clone()
}
pub fn has_opaque_types_in_storage(&self) -> bool {
!self.inner.borrow().opaque_type_storage.is_empty()
}
#[instrument(level = "debug", skip(self), ret)]
pub fn take_opaque_types(&self) -> Vec<(OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>)> {
self.inner.borrow_mut().opaque_type_storage.take_opaque_types().collect()

View file

@ -488,6 +488,9 @@ extern "C" LLVMAttributeRef
LLVMRustCreateRangeAttribute(LLVMContextRef C, unsigned NumBits,
const uint64_t LowerWords[],
const uint64_t UpperWords[]) {
// FIXME(Zalathar): There appears to be no stable guarantee that C++
// `AttrKind` values correspond directly to the `unsigned KindID` values
// accepted by LLVM-C API functions, though in practice they currently do.
return LLVMCreateConstantRangeAttribute(C, Attribute::Range, NumBits,
LowerWords, UpperWords);
}

View file

@ -2,7 +2,6 @@ use std::borrow::Cow;
use rustc_abi::Align;
use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, Linkage, OptimizeAttr};
use rustc_hir::def_id::DefId;
use rustc_macros::{HashStable, TyDecodable, TyEncodable};
use rustc_span::Symbol;
use rustc_target::spec::SanitizerSet;
@ -161,6 +160,8 @@ bitflags::bitflags! {
const ALLOCATOR_ZEROED = 1 << 14;
/// `#[no_builtins]`: indicates that disable implicit builtin knowledge of functions for the function.
const NO_BUILTINS = 1 << 15;
/// Marks foreign items, to make `contains_extern_indicator` cheaper.
const FOREIGN_ITEM = 1 << 16;
}
}
rustc_data_structures::external_bitflags_debug! { CodegenFnAttrFlags }
@ -194,8 +195,8 @@ impl CodegenFnAttrs {
/// * `#[linkage]` is present
///
/// Keep this in sync with the logic for the unused_attributes for `#[inline]` lint.
pub fn contains_extern_indicator(&self, tcx: TyCtxt<'_>, did: DefId) -> bool {
if tcx.is_foreign_item(did) {
pub fn contains_extern_indicator(&self) -> bool {
if self.flags.contains(CodegenFnAttrFlags::FOREIGN_ITEM) {
return false;
}

View file

@ -149,7 +149,7 @@ impl<'tcx> MonoItem<'tcx> {
// instantiation:
// We emit an unused_attributes lint for this case, which should be kept in sync if possible.
let codegen_fn_attrs = tcx.codegen_instance_attrs(instance.def);
if codegen_fn_attrs.contains_extern_indicator(tcx, instance.def.def_id())
if codegen_fn_attrs.contains_extern_indicator()
|| codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED)
{
return InstantiationMode::GloballyShared { may_conflict: false };

View file

@ -450,6 +450,8 @@ rustc_queries! {
}
}
/// A list of all bodies inside of `key`, nested bodies are always stored
/// before their parent.
query nested_bodies_within(
key: LocalDefId
) -> &'tcx ty::List<LocalDefId> {

View file

@ -829,14 +829,15 @@ impl<'tcx> OpaqueHiddenType<'tcx> {
// Convert the type from the function into a type valid outside by mapping generic
// parameters to into the context of the opaque.
//
// We erase regions when doing this during HIR typeck.
// We erase regions when doing this during HIR typeck. We manually use `fold_regions`
// here as we do not want to anonymize bound variables.
let this = match defining_scope_kind {
DefiningScopeKind::HirTypeck => tcx.erase_regions(self),
DefiningScopeKind::HirTypeck => fold_regions(tcx, self, |_, _| tcx.lifetimes.re_erased),
DefiningScopeKind::MirBorrowck => self,
};
let result = this.fold_with(&mut opaque_types::ReverseMapper::new(tcx, map, self.span));
if cfg!(debug_assertions) && matches!(defining_scope_kind, DefiningScopeKind::HirTypeck) {
assert_eq!(result.ty, tcx.erase_regions(result.ty));
assert_eq!(result.ty, fold_regions(tcx, result.ty, |_, _| tcx.lifetimes.re_erased));
}
result
}

View file

@ -6,6 +6,7 @@ use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_macros::{Decodable, Encodable, HashStable};
use rustc_span::symbol::sym;
use tracing::debug;
use crate::query::LocalCrate;
@ -239,6 +240,12 @@ pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> Trait
/// Query provider for `incoherent_impls`.
pub(super) fn incoherent_impls_provider(tcx: TyCtxt<'_>, simp: SimplifiedType) -> &[DefId] {
if let Some(def_id) = simp.def()
&& !tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls)
{
return &[];
}
let mut impls = Vec::new();
for cnum in iter::once(LOCAL_CRATE).chain(tcx.crates(()).iter().copied()) {
for &impl_def_id in tcx.crate_incoherent_impls((cnum, simp)) {

View file

@ -18,7 +18,7 @@ fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
// If this has an extern indicator, then this function is globally shared and thus will not
// generate cgu-internal copies which would make it cross-crate inlinable.
if codegen_fn_attrs.contains_extern_indicator(tcx, def_id.into()) {
if codegen_fn_attrs.contains_extern_indicator() {
return false;
}

View file

@ -4,7 +4,6 @@ use std::ops::ControlFlow;
#[cfg(feature = "nightly")]
use rustc_macros::HashStable_NoContext;
use rustc_type_ir::data_structures::{HashMap, HashSet};
use rustc_type_ir::fast_reject::DeepRejectCtxt;
use rustc_type_ir::inherent::*;
use rustc_type_ir::relate::Relate;
use rustc_type_ir::relate::solver_relating::RelateExt;
@ -1128,6 +1127,7 @@ where
self.delegate.fetch_eligible_assoc_item(goal_trait_ref, trait_assoc_def_id, impl_def_id)
}
#[instrument(level = "debug", skip(self), ret)]
pub(super) fn register_hidden_type_in_storage(
&mut self,
opaque_type_key: ty::OpaqueTypeKey<I>,
@ -1154,29 +1154,6 @@ where
self.add_goals(GoalSource::AliasWellFormed, goals);
}
// Do something for each opaque/hidden pair defined with `def_id` in the
// current inference context.
pub(super) fn probe_existing_opaque_ty(
&mut self,
key: ty::OpaqueTypeKey<I>,
) -> Option<(ty::OpaqueTypeKey<I>, I::Ty)> {
// We shouldn't have any duplicate entries when using
// this function during `TypingMode::Analysis`.
let duplicate_entries = self.delegate.clone_duplicate_opaque_types();
assert!(duplicate_entries.is_empty(), "unexpected duplicates: {duplicate_entries:?}");
let mut matching = self.delegate.clone_opaque_types_lookup_table().into_iter().filter(
|(candidate_key, _)| {
candidate_key.def_id == key.def_id
&& DeepRejectCtxt::relate_rigid_rigid(self.cx())
.args_may_unify(candidate_key.args, key.args)
},
);
let first = matching.next();
let second = matching.next();
assert_eq!(second, None);
first
}
// Try to evaluate a const, or return `None` if the const is too generic.
// This doesn't mean the const isn't evaluatable, though, and should be treated
// as an ambiguity rather than no-solution.

View file

@ -1,13 +1,12 @@
//! Computes a normalizes-to (projection) goal for opaque types. This goal
//! behaves differently depending on the current `TypingMode`.
use rustc_index::bit_set::GrowableBitSet;
use rustc_type_ir::inherent::*;
use rustc_type_ir::solve::GoalSource;
use rustc_type_ir::{self as ty, Interner, TypingMode, fold_regions};
use crate::delegate::SolverDelegate;
use crate::solve::{Certainty, EvalCtxt, Goal, NoSolution, QueryResult, inspect};
use crate::solve::{Certainty, EvalCtxt, Goal, QueryResult};
impl<D, I> EvalCtxt<'_, D>
where
@ -39,100 +38,68 @@ where
self.add_goal(GoalSource::Misc, goal.with(cx, ty::PredicateKind::Ambiguous));
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
TypingMode::Analysis { defining_opaque_types_and_generators } => {
let Some(def_id) = opaque_ty
.def_id
.as_local()
.filter(|&def_id| defining_opaque_types_and_generators.contains(&def_id))
else {
self.structurally_instantiate_normalizes_to_term(goal, goal.predicate.alias);
return self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
};
// FIXME: This may have issues when the args contain aliases...
match uses_unique_placeholders_ignoring_regions(self.cx(), opaque_ty.args) {
Err(NotUniqueParam::NotParam(param)) if param.is_non_region_infer() => {
return self.evaluate_added_goals_and_make_canonical_response(
Certainty::AMBIGUOUS,
);
}
Err(_) => {
return Err(NoSolution);
}
Ok(()) => {}
}
// Prefer opaques registered already.
let opaque_type_key = ty::OpaqueTypeKey { def_id, args: opaque_ty.args };
// FIXME: This also unifies the previous hidden type with the expected.
//
// If that fails, we insert `expected` as a new hidden type instead of
// eagerly emitting an error.
let existing = self.probe_existing_opaque_ty(opaque_type_key);
if let Some((candidate_key, candidate_ty)) = existing {
return self
.probe(|result| inspect::ProbeKind::OpaqueTypeStorageLookup {
result: *result,
})
.enter(|ecx| {
for (a, b) in std::iter::zip(
candidate_key.args.iter(),
opaque_type_key.args.iter(),
) {
ecx.eq(goal.param_env, a, b)?;
}
ecx.eq(goal.param_env, candidate_ty, expected)?;
ecx.add_item_bounds_for_hidden_type(
def_id.into(),
candidate_key.args,
goal.param_env,
candidate_ty,
);
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
});
}
// Otherwise, define a new opaque type
let prev = self.register_hidden_type_in_storage(opaque_type_key, expected);
assert_eq!(prev, None);
self.add_item_bounds_for_hidden_type(
def_id.into(),
opaque_ty.args,
goal.param_env,
expected,
);
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
TypingMode::Analysis {
defining_opaque_types_and_generators: defining_opaque_types,
}
// Very similar to `TypingMode::Analysis` with some notably differences:
// - we accept opaque types even if they have non-universal arguments
// - we do a structural lookup instead of semantically unifying regions
// - the hidden type starts out as the type from HIR typeck with fresh region
// variables instead of a fully unconstrained inference variable
TypingMode::Borrowck { defining_opaque_types } => {
| TypingMode::Borrowck { defining_opaque_types } => {
let Some(def_id) = opaque_ty
.def_id
.as_local()
.filter(|&def_id| defining_opaque_types.contains(&def_id))
else {
// If we're not in the defining scope, treat the alias as rigid.
self.structurally_instantiate_normalizes_to_term(goal, goal.predicate.alias);
return self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
};
let opaque_type_key = ty::OpaqueTypeKey { def_id, args: opaque_ty.args };
let actual = self
.register_hidden_type_in_storage(opaque_type_key, expected)
.unwrap_or_else(|| {
let actual =
cx.type_of_opaque_hir_typeck(def_id).instantiate(cx, opaque_ty.args);
let actual = fold_regions(cx, actual, |re, _dbi| match re.kind() {
ty::ReErased => self.next_region_var(),
_ => re,
});
actual
});
self.eq(goal.param_env, expected, actual)?;
// We structurally normalize the args so that we're able to detect defining uses
// later on.
//
// This reduces the amount of duplicate definitions in the `opaque_type_storage` and
// strengthens inference. This causes us to subtly depend on the normalization behavior
// when inferring the hidden type of opaques.
//
// E.g. it's observable that we don't normalize nested aliases with bound vars in
// `structurally_normalize` and because we use structural lookup, we also don't
// reuse an entry for `Tait<for<'a> fn(&'a ())>` for `Tait<for<'b> fn(&'b ())>`.
let normalized_args =
cx.mk_args_from_iter(opaque_ty.args.iter().map(|arg| match arg.kind() {
ty::GenericArgKind::Lifetime(lt) => Ok(lt.into()),
ty::GenericArgKind::Type(ty) => {
self.structurally_normalize_ty(goal.param_env, ty).map(Into::into)
}
ty::GenericArgKind::Const(ct) => {
self.structurally_normalize_const(goal.param_env, ct).map(Into::into)
}
}))?;
let opaque_type_key = ty::OpaqueTypeKey { def_id, args: normalized_args };
if let Some(prev) = self.register_hidden_type_in_storage(opaque_type_key, expected)
{
self.eq(goal.param_env, expected, prev)?;
} else {
// During HIR typeck, opaque types start out as unconstrained
// inference variables. In borrowck we instead use the type
// computed in HIR typeck as the initial value.
match self.typing_mode() {
TypingMode::Analysis { .. } => {}
TypingMode::Borrowck { .. } => {
let actual = cx
.type_of_opaque_hir_typeck(def_id)
.instantiate(cx, opaque_ty.args);
let actual = fold_regions(cx, actual, |re, _dbi| match re.kind() {
ty::ReErased => self.next_region_var(),
_ => re,
});
self.eq(goal.param_env, expected, actual)?;
}
_ => unreachable!(),
}
}
self.add_item_bounds_for_hidden_type(
def_id.into(),
opaque_ty.args,
normalized_args,
goal.param_env,
expected,
);
@ -168,44 +135,3 @@ where
}
}
}
/// Checks whether each generic argument is simply a unique generic placeholder.
///
/// FIXME: Interner argument is needed to constrain the `I` parameter.
fn uses_unique_placeholders_ignoring_regions<I: Interner>(
_cx: I,
args: I::GenericArgs,
) -> Result<(), NotUniqueParam<I>> {
let mut seen = GrowableBitSet::default();
for arg in args.iter() {
match arg.kind() {
// Ignore regions, since we can't resolve those in a canonicalized
// query in the trait solver.
ty::GenericArgKind::Lifetime(_) => {}
ty::GenericArgKind::Type(t) => match t.kind() {
ty::Placeholder(p) => {
if !seen.insert(p.var()) {
return Err(NotUniqueParam::DuplicateParam(t.into()));
}
}
_ => return Err(NotUniqueParam::NotParam(t.into())),
},
ty::GenericArgKind::Const(c) => match c.kind() {
ty::ConstKind::Placeholder(p) => {
if !seen.insert(p.var()) {
return Err(NotUniqueParam::DuplicateParam(c.into()));
}
}
_ => return Err(NotUniqueParam::NotParam(c.into())),
},
}
}
Ok(())
}
// FIXME: This should check for dupes and non-params first, then infer vars.
enum NotUniqueParam<I: Interner> {
DuplicateParam(I::GenericArg),
NotParam(I::GenericArg),
}

View file

@ -463,8 +463,8 @@ impl<'a> Parser<'a> {
pub(super) fn expected_one_of_not_found(
&mut self,
edible: &[ExpTokenPair<'_>],
inedible: &[ExpTokenPair<'_>],
edible: &[ExpTokenPair],
inedible: &[ExpTokenPair],
) -> PResult<'a, ErrorGuaranteed> {
debug!("expected_one_of_not_found(edible: {:?}, inedible: {:?})", edible, inedible);
fn tokens_to_string(tokens: &[TokenType]) -> String {
@ -1092,7 +1092,7 @@ impl<'a> Parser<'a> {
/// Eats and discards tokens until one of `closes` is encountered. Respects token trees,
/// passes through any errors encountered. Used for error recovery.
pub(super) fn eat_to_tokens(&mut self, closes: &[ExpTokenPair<'_>]) {
pub(super) fn eat_to_tokens(&mut self, closes: &[ExpTokenPair]) {
if let Err(err) = self
.parse_seq_to_before_tokens(closes, &[], SeqSep::none(), |p| Ok(p.parse_token_tree()))
{
@ -1113,7 +1113,7 @@ impl<'a> Parser<'a> {
pub(super) fn check_trailing_angle_brackets(
&mut self,
segment: &PathSegment,
end: &[ExpTokenPair<'_>],
end: &[ExpTokenPair],
) -> Option<ErrorGuaranteed> {
if !self.may_recover() {
return None;
@ -1196,7 +1196,7 @@ impl<'a> Parser<'a> {
// second case.
if self.look_ahead(position, |t| {
trace!("check_trailing_angle_brackets: t={:?}", t);
end.iter().any(|exp| exp.tok == &t.kind)
end.iter().any(|exp| exp.tok == t.kind)
}) {
// Eat from where we started until the end token so that parsing can continue
// as if we didn't have those extra angle brackets.
@ -2120,8 +2120,8 @@ impl<'a> Parser<'a> {
pub(super) fn recover_seq_parse_error(
&mut self,
open: ExpTokenPair<'_>,
close: ExpTokenPair<'_>,
open: ExpTokenPair,
close: ExpTokenPair,
lo: Span,
err: Diag<'a>,
) -> Box<Expr> {
@ -2386,8 +2386,8 @@ impl<'a> Parser<'a> {
pub(super) fn consume_block(
&mut self,
open: ExpTokenPair<'_>,
close: ExpTokenPair<'_>,
open: ExpTokenPair,
close: ExpTokenPair,
consume_close: ConsumeClosingDelim,
) {
let mut brace_depth = 0;

View file

@ -1598,7 +1598,7 @@ impl<'a> Parser<'a> {
self.maybe_recover_from_bad_qpath(expr)
}
fn parse_expr_array_or_repeat(&mut self, close: ExpTokenPair<'_>) -> PResult<'a, Box<Expr>> {
fn parse_expr_array_or_repeat(&mut self, close: ExpTokenPair) -> PResult<'a, Box<Expr>> {
let lo = self.token.span;
self.bump(); // `[` or other open delim
@ -3661,7 +3661,7 @@ impl<'a> Parser<'a> {
&mut self,
pth: ast::Path,
recover: bool,
close: ExpTokenPair<'_>,
close: ExpTokenPair,
) -> PResult<
'a,
(
@ -3680,8 +3680,8 @@ impl<'a> Parser<'a> {
errors::HelpUseLatestEdition::new().add_to_diag(e);
};
while self.token != *close.tok {
if self.eat(exp!(DotDot)) || self.recover_struct_field_dots(close.tok) {
while self.token != close.tok {
if self.eat(exp!(DotDot)) || self.recover_struct_field_dots(&close.tok) {
let exp_span = self.prev_token.span;
// We permit `.. }` on the left-hand side of a destructuring assignment.
if self.check(close) {

View file

@ -54,7 +54,7 @@ impl<'a> Parser<'a> {
/// - `}` for mod items
pub fn parse_mod(
&mut self,
term: ExpTokenPair<'_>,
term: ExpTokenPair,
) -> PResult<'a, (AttrVec, ThinVec<Box<Item>>, ModSpans)> {
let lo = self.token.span;
let attrs = self.parse_inner_attributes()?;
@ -1201,7 +1201,7 @@ impl<'a> Parser<'a> {
}?;
let dash = exp!(Minus);
if self.token != *dash.tok {
if self.token != dash.tok {
return Ok(ident);
}

View file

@ -261,19 +261,19 @@ struct CaptureState {
/// A sequence separator.
#[derive(Debug)]
struct SeqSep<'a> {
struct SeqSep {
/// The separator token.
sep: Option<ExpTokenPair<'a>>,
sep: Option<ExpTokenPair>,
/// `true` if a trailing separator is allowed.
trailing_sep_allowed: bool,
}
impl<'a> SeqSep<'a> {
fn trailing_allowed(sep: ExpTokenPair<'a>) -> SeqSep<'a> {
impl SeqSep {
fn trailing_allowed(sep: ExpTokenPair) -> SeqSep {
SeqSep { sep: Some(sep), trailing_sep_allowed: true }
}
fn none() -> SeqSep<'a> {
fn none() -> SeqSep {
SeqSep { sep: None, trailing_sep_allowed: false }
}
}
@ -425,13 +425,13 @@ impl<'a> Parser<'a> {
}
/// Expects and consumes the token `t`. Signals an error if the next token is not `t`.
pub fn expect(&mut self, exp: ExpTokenPair<'_>) -> PResult<'a, Recovered> {
pub fn expect(&mut self, exp: ExpTokenPair) -> PResult<'a, Recovered> {
if self.expected_token_types.is_empty() {
if self.token == *exp.tok {
if self.token == exp.tok {
self.bump();
Ok(Recovered::No)
} else {
self.unexpected_try_recover(exp.tok)
self.unexpected_try_recover(&exp.tok)
}
} else {
self.expect_one_of(slice::from_ref(&exp), &[])
@ -443,13 +443,13 @@ impl<'a> Parser<'a> {
/// anything. Signal a fatal error if next token is unexpected.
fn expect_one_of(
&mut self,
edible: &[ExpTokenPair<'_>],
inedible: &[ExpTokenPair<'_>],
edible: &[ExpTokenPair],
inedible: &[ExpTokenPair],
) -> PResult<'a, Recovered> {
if edible.iter().any(|exp| exp.tok == &self.token.kind) {
if edible.iter().any(|exp| exp.tok == self.token.kind) {
self.bump();
Ok(Recovered::No)
} else if inedible.iter().any(|exp| exp.tok == &self.token.kind) {
} else if inedible.iter().any(|exp| exp.tok == self.token.kind) {
// leave it in the input
Ok(Recovered::No)
} else if self.token != token::Eof
@ -494,8 +494,8 @@ impl<'a> Parser<'a> {
/// This method will automatically add `tok` to `expected_token_types` if `tok` is not
/// encountered.
#[inline]
pub fn check(&mut self, exp: ExpTokenPair<'_>) -> bool {
let is_present = self.token == *exp.tok;
pub fn check(&mut self, exp: ExpTokenPair) -> bool {
let is_present = self.token == exp.tok;
if !is_present {
self.expected_token_types.insert(exp.token_type);
}
@ -542,7 +542,7 @@ impl<'a> Parser<'a> {
/// Consumes a token 'tok' if it exists. Returns whether the given token was present.
#[inline]
#[must_use]
pub fn eat(&mut self, exp: ExpTokenPair<'_>) -> bool {
pub fn eat(&mut self, exp: ExpTokenPair) -> bool {
let is_present = self.check(exp);
if is_present {
self.bump()
@ -745,13 +745,13 @@ impl<'a> Parser<'a> {
/// Eats the expected token if it's present possibly breaking
/// compound tokens like multi-character operators in process.
/// Returns `true` if the token was eaten.
fn break_and_eat(&mut self, exp: ExpTokenPair<'_>) -> bool {
if self.token == *exp.tok {
fn break_and_eat(&mut self, exp: ExpTokenPair) -> bool {
if self.token == exp.tok {
self.bump();
return true;
}
match self.token.kind.break_two_token_op(1) {
Some((first, second)) if first == *exp.tok => {
Some((first, second)) if first == exp.tok => {
let first_span = self.psess.source_map().start_point(self.token.span);
let second_span = self.token.span.with_lo(first_span.hi());
self.token = Token::new(first, first_span);
@ -826,7 +826,7 @@ impl<'a> Parser<'a> {
/// Checks if the next token is contained within `closes`, and returns `true` if so.
fn expect_any_with_type(
&mut self,
closes_expected: &[ExpTokenPair<'_>],
closes_expected: &[ExpTokenPair],
closes_not_expected: &[&TokenKind],
) -> bool {
closes_expected.iter().any(|&close| self.check(close))
@ -838,9 +838,9 @@ impl<'a> Parser<'a> {
/// closing bracket.
fn parse_seq_to_before_tokens<T>(
&mut self,
closes_expected: &[ExpTokenPair<'_>],
closes_expected: &[ExpTokenPair],
closes_not_expected: &[&TokenKind],
sep: SeqSep<'_>,
sep: SeqSep,
mut f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
) -> PResult<'a, (ThinVec<T>, Trailing, Recovered)> {
let mut first = true;
@ -869,7 +869,7 @@ impl<'a> Parser<'a> {
}
Err(mut expect_err) => {
let sp = self.prev_token.span.shrink_to_hi();
let token_str = pprust::token_kind_to_string(exp.tok);
let token_str = pprust::token_kind_to_string(&exp.tok);
match self.current_closure.take() {
Some(closure_spans) if self.token == TokenKind::Semi => {
@ -1039,8 +1039,8 @@ impl<'a> Parser<'a> {
/// closing bracket.
fn parse_seq_to_before_end<T>(
&mut self,
close: ExpTokenPair<'_>,
sep: SeqSep<'_>,
close: ExpTokenPair,
sep: SeqSep,
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
) -> PResult<'a, (ThinVec<T>, Trailing, Recovered)> {
self.parse_seq_to_before_tokens(&[close], &[], sep, f)
@ -1051,8 +1051,8 @@ impl<'a> Parser<'a> {
/// closing bracket.
fn parse_seq_to_end<T>(
&mut self,
close: ExpTokenPair<'_>,
sep: SeqSep<'_>,
close: ExpTokenPair,
sep: SeqSep,
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
) -> PResult<'a, (ThinVec<T>, Trailing)> {
let (val, trailing, recovered) = self.parse_seq_to_before_end(close, sep, f)?;
@ -1070,9 +1070,9 @@ impl<'a> Parser<'a> {
/// closing bracket.
fn parse_unspanned_seq<T>(
&mut self,
open: ExpTokenPair<'_>,
close: ExpTokenPair<'_>,
sep: SeqSep<'_>,
open: ExpTokenPair,
close: ExpTokenPair,
sep: SeqSep,
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
) -> PResult<'a, (ThinVec<T>, Trailing)> {
self.expect(open)?;
@ -1084,8 +1084,8 @@ impl<'a> Parser<'a> {
/// closing bracket.
fn parse_delim_comma_seq<T>(
&mut self,
open: ExpTokenPair<'_>,
close: ExpTokenPair<'_>,
open: ExpTokenPair,
close: ExpTokenPair,
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
) -> PResult<'a, (ThinVec<T>, Trailing)> {
self.parse_unspanned_seq(open, close, SeqSep::trailing_allowed(exp!(Comma)), f)

View file

@ -416,8 +416,8 @@ impl TokenType {
/// is always by used those methods. The second field is only used when the
/// first field doesn't match.
#[derive(Clone, Copy, Debug)]
pub struct ExpTokenPair<'a> {
pub tok: &'a TokenKind,
pub struct ExpTokenPair {
pub tok: TokenKind,
pub token_type: TokenType,
}
@ -444,7 +444,7 @@ macro_rules! exp {
// `ExpTokenPair` helper rules.
(@tok, $tok:ident) => {
$crate::parser::token_type::ExpTokenPair {
tok: &rustc_ast::token::$tok,
tok: rustc_ast::token::$tok,
token_type: $crate::parser::token_type::TokenType::$tok
}
};

View file

@ -462,8 +462,10 @@ passes_object_lifetime_err =
{$repr}
passes_outer_crate_level_attr =
crate-level attribute should be an inner attribute: add an exclamation mark: `#![foo]`
crate-level attribute should be an inner attribute
passes_outer_crate_level_attr_suggestion =
add a `!`
passes_panic_unwind_without_std =
unwinding panics are not supported without std

View file

@ -369,24 +369,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
if hir_id != CRATE_HIR_ID {
match attr {
// FIXME(jdonszelmann) move to attribute parsing when validation gets better there
&Attribute::Parsed(AttributeKind::CrateName {
attr_span: span, style, ..
}) => match style {
ast::AttrStyle::Outer => self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
span,
errors::OuterCrateLevelAttr,
),
ast::AttrStyle::Inner => self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
span,
errors::InnerCrateLevelAttr,
),
},
Attribute::Parsed(_) => { /* not crate-level */ }
Attribute::Parsed(_) => { /* Already validated. */ }
Attribute::Unparsed(attr) => {
// FIXME(jdonszelmann): remove once all crate-level attrs are parsed and caught by
// the above
@ -397,12 +380,26 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
.and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name))
{
match attr.style {
ast::AttrStyle::Outer => self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
attr.span,
errors::OuterCrateLevelAttr,
),
ast::AttrStyle::Outer => {
let attr_span = attr.span;
let bang_position = self
.tcx
.sess
.source_map()
.span_until_char(attr_span, '[')
.shrink_to_hi();
self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
attr.span,
errors::OuterCrateLevelAttr {
suggestion: errors::OuterCrateLevelAttrSuggestion {
bang_position,
},
},
)
}
ast::AttrStyle::Inner => self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
@ -495,7 +492,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
{
let attrs = self.tcx.codegen_fn_attrs(did);
// Not checking naked as `#[inline]` is forbidden for naked functions anyways.
if attrs.contains_extern_indicator(self.tcx, did.into()) {
if attrs.contains_extern_indicator() {
self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
@ -1851,12 +1848,24 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
{
if hir_id != CRATE_HIR_ID {
match style {
Some(ast::AttrStyle::Outer) => self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
attr.span(),
errors::OuterCrateLevelAttr,
),
Some(ast::AttrStyle::Outer) => {
let attr_span = attr.span();
let bang_position = self
.tcx
.sess
.source_map()
.span_until_char(attr_span, '[')
.shrink_to_hi();
self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
attr_span,
errors::OuterCrateLevelAttr {
suggestion: errors::OuterCrateLevelAttrSuggestion { bang_position },
},
)
}
Some(ast::AttrStyle::Inner) | None => self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,

Some files were not shown because too many files have changed in this diff Show more