Merge pull request #4449 from rust-lang/rustup-2025-07-07

Automatic Rustup
This commit is contained in:
Ralf Jung 2025-07-07 05:31:07 +00:00 committed by GitHub
commit 51cba7839a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
356 changed files with 4275 additions and 2706 deletions

View file

@ -1947,6 +1947,17 @@ dependencies = [
"unic-langid",
]
[[package]]
name = "io-uring"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013"
dependencies = [
"bitflags",
"cfg-if",
"libc",
]
[[package]]
name = "ipc-channel"
version = "0.19.0"
@ -2058,9 +2069,9 @@ dependencies = [
[[package]]
name = "jsonpath-rust"
version = "1.0.2"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b37465feaf9d41f74df7da98c6c1c31ca8ea06d11b5bf7869c8f1ccc51a793f"
checksum = "7d057f8fd19e20c3f14d3663983397155739b6bc1148dc5cd4c4a1a5b3130eb0"
dependencies = [
"pest",
"pest_derive",
@ -2117,9 +2128,9 @@ dependencies = [
[[package]]
name = "libffi"
version = "4.1.0"
version = "4.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebfd30a67b482a08116e753d0656cb626548cf4242543e5cc005be7639d99838"
checksum = "e7681c6fab541f799a829e44a445a0666cf8d8a6cfebf89419e6aed52c604e87"
dependencies = [
"libc",
"libffi-sys",
@ -2127,9 +2138,9 @@ dependencies = [
[[package]]
name = "libffi-sys"
version = "3.3.1"
version = "3.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f003aa318c9f0ee69eb0ada7c78f5c9d2fedd2ceb274173b5c7ff475eee584a3"
checksum = "7b0d828d367b4450ed08e7d510dc46636cd660055f50d67ac943bfe788767c29"
dependencies = [
"cc",
]
@ -5463,13 +5474,17 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.45.1"
version = "1.46.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17"
dependencies = [
"backtrace",
"bytes",
"io-uring",
"libc",
"mio",
"pin-project-lite",
"slab",
]
[[package]]
@ -5995,9 +6010,9 @@ dependencies = [
[[package]]
name = "wasm-component-ld"
version = "0.5.14"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b015ec93764aa5517bc8b839efa9941b90be8ce680b1134f8224644ba1e48e3f"
checksum = "6d95124e34fee1316222e03b9bbf41af186ecbae2c8b79f8debe6e21b3ff60c5"
dependencies = [
"anyhow",
"clap",

View file

@ -527,8 +527,7 @@ impl Size {
/// not a multiple of 8.
pub fn from_bits(bits: impl TryInto<u64>) -> Size {
let bits = bits.try_into().ok().unwrap();
// Avoid potential overflow from `bits + 7`.
Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
Size { raw: bits.div_ceil(8) }
}
#[inline]

View file

@ -1202,9 +1202,10 @@ macro_rules! common_visitor_and_walkers {
let TyPat { id, kind, span, tokens: _ } = tp;
try_visit!(visit_id(vis, id));
match kind {
TyPatKind::Range(start, end, _include_end) => {
TyPatKind::Range(start, end, Spanned { span, node: _include_end }) => {
visit_opt!(vis, visit_anon_const, start);
visit_opt!(vis, visit_anon_const, end);
try_visit!(visit_span(vis, span));
}
TyPatKind::Or(variants) => walk_list!(vis, visit_ty_pat, variants),
TyPatKind::Err(_) => {}
@ -1523,16 +1524,26 @@ macro_rules! common_visitor_and_walkers {
}
pub fn walk_inline_asm<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, asm: &$($lt)? $($mut)? InlineAsm) -> V::Result {
// FIXME: Visit spans inside all this currently ignored stuff.
let InlineAsm {
asm_macro: _,
template: _,
template_strs: _,
template,
template_strs,
operands,
clobber_abis: _,
clobber_abis,
options: _,
line_spans: _,
line_spans,
} = asm;
for piece in template {
match piece {
InlineAsmTemplatePiece::String(_str) => {}
InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span } => {
try_visit!(visit_span(vis, span));
}
}
}
for (_s1, _s2, span) in template_strs {
try_visit!(visit_span(vis, span));
}
for (op, span) in operands {
match op {
InlineAsmOperand::In { expr, reg: _ }
@ -1553,6 +1564,12 @@ macro_rules! common_visitor_and_walkers {
}
try_visit!(visit_span(vis, span));
}
for (_s1, span) in clobber_abis {
try_visit!(visit_span(vis, span))
}
for span in line_spans {
try_visit!(visit_span(vis, span))
}
V::Result::output()
}
@ -1565,9 +1582,9 @@ macro_rules! common_visitor_and_walkers {
vis.visit_path(path)
}
// FIXME: visit the template exhaustively.
pub fn walk_format_args<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, fmt: &$($lt)? $($mut)? FormatArgs) -> V::Result {
let FormatArgs { span, template: _, arguments, uncooked_fmt_str: _, is_source_literal: _ } = fmt;
let FormatArgs { span, template, arguments, uncooked_fmt_str: _, is_source_literal: _ } = fmt;
let args = $(${ignore($mut)} arguments.all_args_mut())? $(${ignore($lt)} arguments.all_args())? ;
for FormatArgument { kind, expr } in args {
match kind {
@ -1578,9 +1595,58 @@ macro_rules! common_visitor_and_walkers {
}
try_visit!(vis.visit_expr(expr));
}
for piece in template {
match piece {
FormatArgsPiece::Literal(_symbol) => {}
FormatArgsPiece::Placeholder(placeholder) => try_visit!(walk_format_placeholder(vis, placeholder)),
}
}
visit_span(vis, span)
}
fn walk_format_placeholder<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
placeholder: &$($lt)? $($mut)? FormatPlaceholder,
) -> V::Result {
let FormatPlaceholder { argument, span, format_options, format_trait: _ } = placeholder;
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
let FormatArgPosition { span, index: _, kind: _ } = argument;
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
let FormatOptions {
width,
precision,
alignment: _,
fill: _,
sign: _,
alternate: _,
zero_pad: _,
debug_hex: _,
} = format_options;
match width {
None => {}
Some(FormatCount::Literal(_)) => {}
Some(FormatCount::Argument(FormatArgPosition { span, index: _, kind: _ })) => {
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
}
}
match precision {
None => {}
Some(FormatCount::Literal(_)) => {}
Some(FormatCount::Argument(FormatArgPosition { span, index: _, kind: _ })) => {
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
}
}
V::Result::output()
}
pub fn walk_expr<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, expression: &$($lt)? $($mut)? Expr) -> V::Result {
let Expr { id, kind, span, attrs, tokens: _ } = expression;
try_visit!(visit_id(vis, id));
@ -1601,7 +1667,7 @@ macro_rules! common_visitor_and_walkers {
try_visit!(visit_expr_fields(vis, fields));
match rest {
StructRest::Base(expr) => try_visit!(vis.visit_expr(expr)),
StructRest::Rest(_span) => {}
StructRest::Rest(span) => try_visit!(visit_span(vis, span)),
StructRest::None => {}
}
}
@ -1688,7 +1754,8 @@ macro_rules! common_visitor_and_walkers {
visit_opt!(vis, visit_label, opt_label);
try_visit!(vis.visit_block(block));
}
ExprKind::Gen(_capt, body, _kind, decl_span) => {
ExprKind::Gen(capture_clause, body, _kind, decl_span) => {
try_visit!(vis.visit_capture_by(capture_clause));
try_visit!(vis.visit_block(body));
try_visit!(visit_span(vis, decl_span));
}
@ -1705,9 +1772,10 @@ macro_rules! common_visitor_and_walkers {
try_visit!(vis.visit_expr(rhs));
try_visit!(visit_span(vis, span));
}
ExprKind::AssignOp(_op, left_expression, right_expression) => {
ExprKind::AssignOp(Spanned { span, node: _ }, left_expression, right_expression) => {
try_visit!(vis.visit_expr(left_expression));
try_visit!(vis.visit_expr(right_expression));
try_visit!(visit_span(vis, span));
}
ExprKind::Field(subexpression, ident) => {
try_visit!(vis.visit_expr(subexpression));

View file

@ -67,8 +67,6 @@ pub enum ReprAttr {
ReprSimd,
ReprTransparent,
ReprAlign(Align),
// this one is just so we can emit a lint for it
ReprEmpty,
}
pub use ReprAttr::*;
@ -250,6 +248,13 @@ pub enum AttributeKind {
span: Span,
},
/// Represents `#[ignore]`
Ignore {
span: Span,
/// ignore can optionally have a reason: `#[ignore = "reason this is ignored"]`
reason: Option<Symbol>,
},
/// Represents `#[inline]` and `#[rustc_force_inline]`.
Inline(InlineAttr, Span),
@ -297,7 +302,7 @@ pub enum AttributeKind {
PubTransparent(Span),
/// Represents [`#[repr]`](https://doc.rust-lang.org/stable/reference/type-layout.html#representations).
Repr(ThinVec<(ReprAttr, Span)>),
Repr { reprs: ThinVec<(ReprAttr, Span)>, first_span: Span },
/// Represents `#[rustc_layout_scalar_valid_range_end]`.
RustcLayoutScalarValidRangeEnd(Box<u128>, Span),

View file

@ -26,6 +26,7 @@ impl AttributeKind {
Deprecation { .. } => Yes,
DocComment { .. } => Yes,
ExportName { .. } => Yes,
Ignore { .. } => No,
Inline(..) => No,
LinkName { .. } => Yes,
LinkSection { .. } => No,
@ -40,7 +41,7 @@ impl AttributeKind {
Optimize(..) => No,
PassByValue(..) => Yes,
PubTransparent(..) => Yes,
Repr(..) => No,
Repr { .. } => No,
RustcLayoutScalarValidRangeEnd(..) => Yes,
RustcLayoutScalarValidRangeStart(..) => Yes,
RustcObjectLifetimeDefault => No,

View file

@ -12,4 +12,5 @@ pub struct AttributeLint<Id> {
pub enum AttributeLintKind {
UnusedDuplicate { this: Span, other: Span, warning: bool },
IllFormedAttributeInput { suggestions: Vec<String> },
EmptyAttribute { first_span: Span },
}

View file

@ -6,6 +6,10 @@ attr_parsing_deprecated_item_suggestion =
.help = add `#![feature(deprecated_suggestion)]` to the crate root
.note = see #94785 for more details
attr_parsing_empty_attribute =
unused attribute
.suggestion = remove this attribute
attr_parsing_empty_confusables =
expected at least one confusable name
attr_parsing_expected_one_cfg_pattern =

View file

@ -298,6 +298,10 @@ impl<S: Stage> CombineAttributeParser<S> for TargetFeatureParser {
cx.expected_list(cx.attr_span);
return features;
};
if list.is_empty() {
cx.warn_empty_attribute(cx.attr_span);
return features;
}
for item in list.mixed() {
let Some(name_value) = item.meta_item() else {
cx.expected_name_value(item.span(), Some(sym::enable));

View file

@ -41,6 +41,7 @@ pub(crate) mod repr;
pub(crate) mod rustc_internal;
pub(crate) mod semantics;
pub(crate) mod stability;
pub(crate) mod test_attrs;
pub(crate) mod traits;
pub(crate) mod transparency;
pub(crate) mod util;

View file

@ -23,7 +23,8 @@ pub(crate) struct ReprParser;
impl<S: Stage> CombineAttributeParser<S> for ReprParser {
type Item = (ReprAttr, Span);
const PATH: &[Symbol] = &[sym::repr];
const CONVERT: ConvertFn<Self::Item> = |items, _| AttributeKind::Repr(items);
const CONVERT: ConvertFn<Self::Item> =
|items, first_span| AttributeKind::Repr { reprs: items, first_span };
// FIXME(jdonszelmann): never used
const TEMPLATE: AttributeTemplate =
template!(List: "C | Rust | align(...) | packed(...) | <integer type> | transparent");
@ -40,8 +41,8 @@ impl<S: Stage> CombineAttributeParser<S> for ReprParser {
};
if list.is_empty() {
// this is so validation can emit a lint
reprs.push((ReprAttr::ReprEmpty, cx.attr_span));
cx.warn_empty_attribute(cx.attr_span);
return reprs;
}
for param in list.mixed() {

View file

@ -0,0 +1,46 @@
use rustc_attr_data_structures::AttributeKind;
use rustc_attr_data_structures::lints::AttributeLintKind;
use rustc_feature::{AttributeTemplate, template};
use rustc_span::{Symbol, sym};
use crate::attributes::{AttributeOrder, OnDuplicate, SingleAttributeParser};
use crate::context::{AcceptContext, Stage};
use crate::parser::ArgParser;
pub(crate) struct IgnoreParser;
impl<S: Stage> SingleAttributeParser<S> for IgnoreParser {
const PATH: &[Symbol] = &[sym::ignore];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const TEMPLATE: AttributeTemplate = template!(Word, NameValueStr: "reason");
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> {
Some(AttributeKind::Ignore {
span: cx.attr_span,
reason: match args {
ArgParser::NoArgs => None,
ArgParser::NameValue(name_value) => {
let Some(str_value) = name_value.value_as_str() else {
let suggestions = <Self as SingleAttributeParser<S>>::TEMPLATE
.suggestions(false, "ignore");
let span = cx.attr_span;
cx.emit_lint(
AttributeLintKind::IllFormedAttributeInput { suggestions },
span,
);
return None;
};
Some(str_value)
}
ArgParser::List(_) => {
let suggestions =
<Self as SingleAttributeParser<S>>::TEMPLATE.suggestions(false, "ignore");
let span = cx.attr_span;
cx.emit_lint(AttributeLintKind::IllFormedAttributeInput { suggestions }, span);
return None;
}
},
})
}
}

View file

@ -37,6 +37,7 @@ use crate::attributes::semantics::MayDangleParser;
use crate::attributes::stability::{
BodyStabilityParser, ConstStabilityIndirectParser, ConstStabilityParser, StabilityParser,
};
use crate::attributes::test_attrs::IgnoreParser;
use crate::attributes::traits::SkipDuringMethodDispatchParser;
use crate::attributes::transparency::TransparencyParser;
use crate::attributes::{AttributeParser as _, Combine, Single, WithoutArgs};
@ -126,6 +127,7 @@ attribute_parsers!(
// tidy-alphabetical-start
Single<DeprecationParser>,
Single<ExportNameParser>,
Single<IgnoreParser>,
Single<InlineParser>,
Single<LinkNameParser>,
Single<LinkSectionParser>,
@ -163,6 +165,7 @@ mod private {
#[allow(private_interfaces)]
pub trait Stage: Sized + 'static + Sealed {
type Id: Copy;
const SHOULD_EMIT_LINTS: bool;
fn parsers() -> &'static group_type!(Self);
@ -173,6 +176,7 @@ pub trait Stage: Sized + 'static + Sealed {
#[allow(private_interfaces)]
impl Stage for Early {
type Id = NodeId;
const SHOULD_EMIT_LINTS: bool = false;
fn parsers() -> &'static group_type!(Self) {
&early::ATTRIBUTE_PARSERS
@ -186,6 +190,7 @@ impl Stage for Early {
#[allow(private_interfaces)]
impl Stage for Late {
type Id = HirId;
const SHOULD_EMIT_LINTS: bool = true;
fn parsers() -> &'static group_type!(Self) {
&late::ATTRIBUTE_PARSERS
@ -226,6 +231,9 @@ impl<'f, 'sess: 'f, S: Stage> SharedContext<'f, 'sess, S> {
/// must be delayed until after HIR is built. This method will take care of the details of
/// that.
pub(crate) fn emit_lint(&mut self, lint: AttributeLintKind, span: Span) {
if !S::SHOULD_EMIT_LINTS {
return;
}
let id = self.target_id;
(self.emit_lint)(AttributeLint { id, span, kind: lint });
}
@ -407,6 +415,10 @@ impl<'f, 'sess: 'f, S: Stage> AcceptContext<'f, 'sess, S> {
},
})
}
pub(crate) fn warn_empty_attribute(&mut self, span: Span) {
self.emit_lint(AttributeLintKind::EmptyAttribute { first_span: span }, span);
}
}
impl<'f, 'sess, S: Stage> Deref for AcceptContext<'f, 'sess, S> {

View file

@ -28,5 +28,11 @@ pub fn emit_attribute_lint<L: LintEmitter>(lint: &AttributeLint<HirId>, lint_emi
},
);
}
AttributeLintKind::EmptyAttribute { first_span } => lint_emitter.emit_node_span_lint(
rustc_session::lint::builtin::UNUSED_ATTRIBUTES,
*id,
*first_span,
session_diagnostics::EmptyAttributeList { attr_span: *first_span },
),
}
}

View file

@ -473,6 +473,13 @@ pub(crate) struct EmptyConfusables {
pub span: Span,
}
#[derive(LintDiagnostic)]
#[diag(attr_parsing_empty_attribute)]
pub(crate) struct EmptyAttributeList {
#[suggestion(code = "", applicability = "machine-applicable")]
pub attr_span: Span,
}
#[derive(Diagnostic)]
#[diag(attr_parsing_invalid_alignment_value, code = E0589)]
pub(crate) struct InvalidAlignmentValue {

View file

@ -4194,7 +4194,9 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
// anything.
let return_ty = sig.output();
match return_ty.skip_binder().kind() {
ty::Ref(return_region, _, _) if return_region.has_name() && !is_closure => {
ty::Ref(return_region, _, _)
if return_region.is_named(self.infcx.tcx) && !is_closure =>
{
// This is case 1 from above, return type is a named reference so we need to
// search for relevant arguments.
let mut arguments = Vec::new();

View file

@ -852,7 +852,8 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
return;
};
let lifetime = if f.has_name() { fr_name.name } else { kw::UnderscoreLifetime };
let lifetime =
if f.is_named(self.infcx.tcx) { fr_name.name } else { kw::UnderscoreLifetime };
let arg = match param.param.pat.simple_ident() {
Some(simple_ident) => format!("argument `{simple_ident}`"),

View file

@ -289,7 +289,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
debug!("give_region_a_name: error_region = {:?}", error_region);
match error_region.kind() {
ty::ReEarlyParam(ebr) => ebr.has_name().then(|| {
ty::ReEarlyParam(ebr) => ebr.is_named().then(|| {
let def_id = tcx.generics_of(self.mir_def_id()).region_param(ebr, tcx).def_id;
let span = tcx.hir_span_if_local(def_id).unwrap_or(DUMMY_SP);
RegionName { name: ebr.name, source: RegionNameSource::NamedEarlyParamRegion(span) }
@ -300,16 +300,11 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
}
ty::ReLateParam(late_param) => match late_param.kind {
ty::LateParamRegionKind::Named(region_def_id, name) => {
ty::LateParamRegionKind::Named(region_def_id) => {
// Get the span to point to, even if we don't use the name.
let span = tcx.hir_span_if_local(region_def_id).unwrap_or(DUMMY_SP);
debug!(
"bound region named: {:?}, is_named: {:?}",
name,
late_param.kind.is_named()
);
if late_param.kind.is_named() {
if let Some(name) = late_param.kind.get_name(tcx) {
// A named region that is actually named.
Some(RegionName {
name,
@ -369,6 +364,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
}
ty::LateParamRegionKind::Anon(_) => None,
ty::LateParamRegionKind::NamedAnon(_, _) => bug!("only used for pretty printing"),
},
ty::ReBound(..)
@ -899,7 +895,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
let ty::ReEarlyParam(region) = self.to_error_region(fr)?.kind() else {
return None;
};
if region.has_name() {
if region.is_named() {
return None;
};
@ -934,7 +930,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> {
let ty::ReEarlyParam(region) = self.to_error_region(fr)?.kind() else {
return None;
};
if region.has_name() {
if region.is_named() {
return None;
};

View file

@ -232,13 +232,13 @@ pub(super) fn dump_nll_mir<'tcx>(
// Also dump the region constraint graph as a graphviz file.
let _: io::Result<()> = try {
let mut file = create_dump_file(tcx, "regioncx.all.dot", false, "nll", &0, body)?;
regioncx.dump_graphviz_raw_constraints(&mut file)?;
regioncx.dump_graphviz_raw_constraints(tcx, &mut file)?;
};
// Also dump the region constraint SCC graph as a graphviz file.
let _: io::Result<()> = try {
let mut file = create_dump_file(tcx, "regioncx.scc.dot", false, "nll", &0, body)?;
regioncx.dump_graphviz_scc_constraints(&mut file)?;
regioncx.dump_graphviz_scc_constraints(tcx, &mut file)?;
};
}

View file

@ -116,7 +116,7 @@ fn emit_polonius_dump<'tcx>(
writeln!(out, "<div>")?;
writeln!(out, "NLL regions")?;
writeln!(out, "<pre class='mermaid'>")?;
emit_mermaid_nll_regions(regioncx, out)?;
emit_mermaid_nll_regions(tcx, regioncx, out)?;
writeln!(out, "</pre>")?;
writeln!(out, "</div>")?;
@ -124,7 +124,7 @@ fn emit_polonius_dump<'tcx>(
writeln!(out, "<div>")?;
writeln!(out, "NLL SCCs")?;
writeln!(out, "<pre class='mermaid'>")?;
emit_mermaid_nll_sccs(regioncx, out)?;
emit_mermaid_nll_sccs(tcx, regioncx, out)?;
writeln!(out, "</pre>")?;
writeln!(out, "</div>")?;
@ -306,9 +306,10 @@ fn emit_mermaid_cfg(body: &Body<'_>, out: &mut dyn io::Write) -> io::Result<()>
}
/// Emits a region's label: index, universe, external name.
fn render_region(
fn render_region<'tcx>(
tcx: TyCtxt<'tcx>,
region: RegionVid,
regioncx: &RegionInferenceContext<'_>,
regioncx: &RegionInferenceContext<'tcx>,
out: &mut dyn io::Write,
) -> io::Result<()> {
let def = regioncx.region_definition(region);
@ -318,7 +319,7 @@ fn render_region(
if !universe.is_root() {
write!(out, "/{universe:?}")?;
}
if let Some(name) = def.external_name.and_then(|e| e.get_name()) {
if let Some(name) = def.external_name.and_then(|e| e.get_name(tcx)) {
write!(out, " ({name})")?;
}
Ok(())
@ -327,6 +328,7 @@ fn render_region(
/// Emits a mermaid flowchart of the NLL regions and the outlives constraints between them, similar
/// to the graphviz version.
fn emit_mermaid_nll_regions<'tcx>(
tcx: TyCtxt<'tcx>,
regioncx: &RegionInferenceContext<'tcx>,
out: &mut dyn io::Write,
) -> io::Result<()> {
@ -336,7 +338,7 @@ fn emit_mermaid_nll_regions<'tcx>(
// Emit the region nodes.
for region in regioncx.definitions.indices() {
write!(out, "{}[\"", region.as_usize())?;
render_region(region, regioncx, out)?;
render_region(tcx, region, regioncx, out)?;
writeln!(out, "\"]")?;
}
@ -378,6 +380,7 @@ fn emit_mermaid_nll_regions<'tcx>(
/// Emits a mermaid flowchart of the NLL SCCs and the outlives constraints between them, similar
/// to the graphviz version.
fn emit_mermaid_nll_sccs<'tcx>(
tcx: TyCtxt<'tcx>,
regioncx: &RegionInferenceContext<'tcx>,
out: &mut dyn io::Write,
) -> io::Result<()> {
@ -395,7 +398,7 @@ fn emit_mermaid_nll_sccs<'tcx>(
// The node label: the regions contained in the SCC.
write!(out, "{scc}[\"SCC({scc}) = {{", scc = scc.as_usize())?;
for (idx, &region) in regions.iter().enumerate() {
render_region(region, regioncx, out)?;
render_region(tcx, region, regioncx, out)?;
if idx < regions.len() - 1 {
write!(out, ",")?;
}

View file

@ -109,6 +109,6 @@ impl PoloniusLocationTable {
impl LocationIndex {
fn is_start(self) -> bool {
// even indices are start points; odd indices are mid points
(self.index() % 2) == 0
self.index().is_multiple_of(2)
}
}

View file

@ -26,11 +26,15 @@ fn render_universe(u: UniverseIndex) -> String {
format!("/{:?}", u)
}
fn render_region_vid(rvid: RegionVid, regioncx: &RegionInferenceContext<'_>) -> String {
fn render_region_vid<'tcx>(
tcx: TyCtxt<'tcx>,
rvid: RegionVid,
regioncx: &RegionInferenceContext<'tcx>,
) -> String {
let universe_str = render_universe(regioncx.region_definition(rvid).universe);
let external_name_str = if let Some(external_name) =
regioncx.region_definition(rvid).external_name.and_then(|e| e.get_name())
regioncx.region_definition(rvid).external_name.and_then(|e| e.get_name(tcx))
{
format!(" ({external_name})")
} else {
@ -42,12 +46,20 @@ fn render_region_vid(rvid: RegionVid, regioncx: &RegionInferenceContext<'_>) ->
impl<'tcx> RegionInferenceContext<'tcx> {
/// Write out the region constraint graph.
pub(crate) fn dump_graphviz_raw_constraints(&self, mut w: &mut dyn Write) -> io::Result<()> {
dot::render(&RawConstraints { regioncx: self }, &mut w)
pub(crate) fn dump_graphviz_raw_constraints(
&self,
tcx: TyCtxt<'tcx>,
mut w: &mut dyn Write,
) -> io::Result<()> {
dot::render(&RawConstraints { tcx, regioncx: self }, &mut w)
}
/// Write out the region constraint SCC graph.
pub(crate) fn dump_graphviz_scc_constraints(&self, mut w: &mut dyn Write) -> io::Result<()> {
pub(crate) fn dump_graphviz_scc_constraints(
&self,
tcx: TyCtxt<'tcx>,
mut w: &mut dyn Write,
) -> io::Result<()> {
let mut nodes_per_scc: IndexVec<ConstraintSccIndex, _> =
self.constraint_sccs.all_sccs().map(|_| Vec::new()).collect();
@ -56,11 +68,12 @@ impl<'tcx> RegionInferenceContext<'tcx> {
nodes_per_scc[scc].push(region);
}
dot::render(&SccConstraints { regioncx: self, nodes_per_scc }, &mut w)
dot::render(&SccConstraints { tcx, regioncx: self, nodes_per_scc }, &mut w)
}
}
struct RawConstraints<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
}
@ -78,7 +91,7 @@ impl<'a, 'this, 'tcx> dot::Labeller<'this> for RawConstraints<'a, 'tcx> {
Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
}
fn node_label(&'this self, n: &RegionVid) -> dot::LabelText<'this> {
dot::LabelText::LabelStr(render_region_vid(*n, self.regioncx).into())
dot::LabelText::LabelStr(render_region_vid(self.tcx, *n, self.regioncx).into())
}
fn edge_label(&'this self, e: &OutlivesConstraint<'tcx>) -> dot::LabelText<'this> {
dot::LabelText::LabelStr(render_outlives_constraint(e).into())
@ -110,6 +123,7 @@ impl<'a, 'this, 'tcx> dot::GraphWalk<'this> for RawConstraints<'a, 'tcx> {
}
struct SccConstraints<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
nodes_per_scc: IndexVec<ConstraintSccIndex, Vec<RegionVid>>,
}
@ -128,8 +142,10 @@ impl<'a, 'this, 'tcx> dot::Labeller<'this> for SccConstraints<'a, 'tcx> {
Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
}
fn node_label(&'this self, n: &ConstraintSccIndex) -> dot::LabelText<'this> {
let nodes_str =
self.nodes_per_scc[*n].iter().map(|n| render_region_vid(*n, self.regioncx)).join(", ");
let nodes_str = self.nodes_per_scc[*n]
.iter()
.map(|n| render_region_vid(self.tcx, *n, self.regioncx))
.join(", ");
dot::LabelText::LabelStr(format!("SCC({n}) = {{{nodes_str}}}", n = n.as_usize()).into())
}
}

View file

@ -786,8 +786,11 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
let region_ctxt_fn = || {
let reg_info = match br.kind {
ty::BoundRegionKind::Anon => sym::anon,
ty::BoundRegionKind::Named(_, name) => name,
ty::BoundRegionKind::Named(def_id) => tcx.item_name(def_id),
ty::BoundRegionKind::ClosureEnv => sym::env,
ty::BoundRegionKind::NamedAnon(_) => {
bug!("only used for pretty printing")
}
};
RegionCtxt::LateBound(reg_info)

View file

@ -7,11 +7,11 @@ use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin};
use rustc_infer::traits::Obligation;
use rustc_infer::traits::solve::Goal;
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::span_bug;
use rustc_middle::traits::ObligationCause;
use rustc_middle::traits::query::NoSolution;
use rustc_middle::ty::relate::combine::{super_combine_consts, super_combine_tys};
use rustc_middle::ty::{self, FnMutDelegate, Ty, TyCtxt, TypeVisitableExt};
use rustc_middle::{bug, span_bug};
use rustc_span::{Span, Symbol, sym};
use tracing::{debug, instrument};
@ -215,7 +215,8 @@ impl<'a, 'b, 'tcx> NllTypeRelating<'a, 'b, 'tcx> {
if let Some(ex_reg_var) = reg_map.get(&br) {
*ex_reg_var
} else {
let ex_reg_var = self.next_existential_region_var(true, br.kind.get_name());
let ex_reg_var =
self.next_existential_region_var(true, br.kind.get_name(infcx.infcx.tcx));
debug!(?ex_reg_var);
reg_map.insert(br, ex_reg_var);
@ -263,8 +264,9 @@ impl<'a, 'b, 'tcx> NllTypeRelating<'a, 'b, 'tcx> {
let reg_info = match placeholder.bound.kind {
ty::BoundRegionKind::Anon => sym::anon,
ty::BoundRegionKind::Named(_, name) => name,
ty::BoundRegionKind::Named(def_id) => self.type_checker.tcx().item_name(def_id),
ty::BoundRegionKind::ClosureEnv => sym::env,
ty::BoundRegionKind::NamedAnon(_) => bug!("only used for pretty printing"),
};
if cfg!(debug_assertions) {

View file

@ -497,7 +497,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
|r| {
debug!(?r);
let region_vid = {
let name = r.get_name_or_anon();
let name = r.get_name_or_anon(self.infcx.tcx);
self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
};
@ -523,7 +523,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
let kind = ty::LateParamRegionKind::from_bound(ty::BoundVar::from_usize(idx), kind);
let r = ty::Region::new_late_param(self.infcx.tcx, self.mir_def.to_def_id(), kind);
let region_vid = {
let name = r.get_name_or_anon();
let name = r.get_name_or_anon(self.infcx.tcx);
self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
};
@ -861,7 +861,7 @@ impl<'tcx> BorrowckInferCtxt<'tcx> {
T: TypeFoldable<TyCtxt<'tcx>>,
{
fold_regions(self.infcx.tcx, value, |region, _depth| {
let name = region.get_name_or_anon();
let name = region.get_name_or_anon(self.infcx.tcx);
debug!(?region, ?name);
self.next_nll_region_var(origin, || RegionCtxt::Free(name))

View file

@ -485,7 +485,7 @@ impl<'a> TraitDef<'a> {
Annotatable::Item(item) => {
let is_packed = matches!(
AttributeParser::parse_limited(cx.sess, &item.attrs, sym::repr, item.span, item.id),
Some(Attribute::Parsed(AttributeKind::Repr(r))) if r.iter().any(|(x, _)| matches!(x, ReprPacked(..)))
Some(Attribute::Parsed(AttributeKind::Repr { reprs, .. })) if reprs.iter().any(|(x, _)| matches!(x, ReprPacked(..)))
);
let newitem = match &item.kind {

View file

@ -108,6 +108,7 @@ pub fn register_builtin_macros(resolver: &mut dyn ResolverExpand) {
}
register_attr! {
// tidy-alphabetical-start
alloc_error_handler: alloc_error_handler::expand,
autodiff_forward: autodiff::expand_forward,
autodiff_reverse: autodiff::expand_reverse,
@ -120,6 +121,7 @@ pub fn register_builtin_macros(resolver: &mut dyn ResolverExpand) {
global_allocator: global_allocator::expand,
test: test::expand_test,
test_case: test::expand_test_case,
// tidy-alphabetical-end
}
register_derive! {

View file

@ -143,6 +143,15 @@ dependencies = [
"libc",
]
[[package]]
name = "object"
version = "0.37.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03fd943161069e1768b4b3d050890ba48730e590f57e56d4aa04e7e090e61b4a"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.20.2"
@ -179,6 +188,7 @@ dependencies = [
"boml",
"gccjit",
"lang_tester",
"object",
"tempfile",
]

View file

@ -22,6 +22,8 @@ master = ["gccjit/master"]
default = ["master"]
[dependencies]
object = { version = "0.37.0", default-features = false, features = ["std", "read"] }
tempfile = "3.20"
gccjit = "2.7"
#gccjit = { git = "https://github.com/rust-lang/gccjit.rs" }
@ -31,7 +33,6 @@ gccjit = "2.7"
[dev-dependencies]
boml = "0.3.1"
lang_tester = "0.8.0"
tempfile = "3.20"
[profile.dev]
# By compiling dependencies with optimizations, performing tests gets much faster.

View file

@ -26,12 +26,9 @@
#![deny(clippy::pattern_type_mismatch)]
#![allow(clippy::needless_lifetimes, clippy::uninlined_format_args)]
// Some "regular" crates we want to share with rustc
extern crate object;
// These crates are pulled from the sysroot because they are part of
// rustc's public API, so we need to ensure version compatibility.
extern crate smallvec;
// FIXME(antoyo): clippy bug: remove the #[allow] when it's fixed.
#[allow(unused_extern_crates)]
extern crate tempfile;
#[macro_use]
extern crate tracing;

View file

@ -146,7 +146,7 @@ impl LlvmType for CastTarget {
"total size {:?} cannot be divided into units of zero size",
self.rest.total
);
if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 {
if !self.rest.total.bytes().is_multiple_of(self.rest.unit.size.bytes()) {
assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split");
}
self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes())

View file

@ -172,10 +172,10 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
let (reg_off, reg_top, slot_size) = if gr_type {
let nreg = (layout.size.bytes() + 7) / 8;
let nreg = layout.size.bytes().div_ceil(8);
(gr_offs, gr_top, nreg * 8)
} else {
let nreg = (layout.size.bytes() + 15) / 16;
let nreg = layout.size.bytes().div_ceil(16);
(vr_offs, vr_top, nreg * 16)
};

View file

@ -109,7 +109,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
if let hir::Attribute::Parsed(p) = attr {
match p {
AttributeKind::Repr(reprs) => {
AttributeKind::Repr { reprs, first_span: _ } => {
codegen_fn_attrs.alignment = reprs
.iter()
.filter_map(

View file

@ -1,5 +1,6 @@
use std::fmt;
use itertools::Either;
use rustc_abi as abi;
use rustc_abi::{
Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants,
@ -13,7 +14,7 @@ use rustc_session::config::OptLevel;
use tracing::{debug, instrument};
use super::place::{PlaceRef, PlaceValue};
use super::rvalue::transmute_immediate;
use super::rvalue::transmute_scalar;
use super::{FunctionCx, LocalRef};
use crate::common::IntPredicate;
use crate::traits::*;
@ -346,14 +347,16 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let val = if field.is_zst() {
OperandValue::ZeroSized
} else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
// codegen_transmute_operand doesn't support SIMD, but since the previous
// check handled ZSTs, the only possible field access into something SIMD
// is to the `non_1zst_field` that's the same SIMD. (Other things, even
// just padding, would change the wrapper's representation type.)
assert_eq!(field.size, self.layout.size);
self.val
} else if field.size == self.layout.size {
assert_eq!(offset.bytes(), 0);
fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| {
bug!(
"Expected `codegen_transmute_operand` to handle equal-size \
field {i:?} projection from {self:?} to {field:?}"
)
})
fx.codegen_transmute_operand(bx, *self, field)
} else {
let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
// Extract a scalar component from a pair.
@ -565,12 +568,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
/// Creates an incomplete operand containing the [`abi::Scalar`]s expected based
/// on the `layout` passed. This is for use with [`OperandRef::insert_field`]
/// later to set the necessary immediate(s).
/// later to set the necessary immediate(s), one-by-one converting all the `Right` to `Left`.
///
/// Returns `None` for `layout`s which cannot be built this way.
pub(crate) fn builder(
layout: TyAndLayout<'tcx>,
) -> Option<OperandRef<'tcx, Result<V, abi::Scalar>>> {
) -> Option<OperandRef<'tcx, Either<V, abi::Scalar>>> {
// Uninhabited types are weird, because for example `Result<!, !>`
// shows up as `FieldsShape::Primitive` and we need to be able to write
// a field into `(u32, !)`. We'll do that in an `alloca` instead.
@ -580,15 +583,15 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let val = match layout.backend_repr {
BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
BackendRepr::Scalar(s) => OperandValue::Immediate(Err(s)),
BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Err(a), Err(b)),
BackendRepr::Scalar(s) => OperandValue::Immediate(Either::Right(s)),
BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Either::Right(a), Either::Right(b)),
BackendRepr::Memory { .. } | BackendRepr::SimdVector { .. } => return None,
};
Some(OperandRef { val, layout })
}
}
impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&mut self,
bx: &mut Bx,
@ -612,31 +615,29 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
(field_layout.is_zst(), field_offset == Size::ZERO)
};
let mut update = |tgt: &mut Result<V, abi::Scalar>, src, from_scalar| {
let from_bty = bx.cx().type_from_scalar(from_scalar);
let to_scalar = tgt.unwrap_err();
let to_bty = bx.cx().type_from_scalar(to_scalar);
let imm = transmute_immediate(bx, src, from_scalar, from_bty, to_scalar, to_bty);
*tgt = Ok(imm);
let mut update = |tgt: &mut Either<V, abi::Scalar>, src, from_scalar| {
let to_scalar = tgt.unwrap_right();
let imm = transmute_scalar(bx, src, from_scalar, to_scalar);
*tgt = Either::Left(imm);
};
match (operand.val, operand.layout.backend_repr) {
(OperandValue::ZeroSized, _) if expect_zst => {}
(OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
OperandValue::Immediate(val @ Err(_)) if is_zero_offset => {
OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
update(val, v, from_scalar);
}
OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => {
OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
update(fst, v, from_scalar);
}
OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => {
OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
update(snd, v, from_scalar);
}
_ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
},
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
match &mut self.val {
OperandValue::Pair(fst @ Err(_), snd @ Err(_)) => {
OperandValue::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
update(fst, a, from_sa);
update(snd, b, from_sb);
}
@ -656,21 +657,21 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
let field_offset = self.layout.fields.offset(f.as_usize());
let is_zero_offset = field_offset == Size::ZERO;
match &mut self.val {
OperandValue::Immediate(val @ Err(_)) if is_zero_offset => {
*val = Ok(imm);
OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
*val = Either::Left(imm);
}
OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => {
*fst = Ok(imm);
OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
*fst = Either::Left(imm);
}
OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => {
*snd = Ok(imm);
OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
*snd = Either::Left(imm);
}
_ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
}
}
/// After having set all necessary fields, this converts the
/// `OperandValue<Result<V, _>>` (as obtained from [`OperandRef::builder`])
/// `OperandValue<Either<V, _>>` (as obtained from [`OperandRef::builder`])
/// to the normal `OperandValue<V>`.
///
/// ICEs if any required fields were not set.
@ -681,13 +682,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
// payload scalar will not actually have been set, so this converts
// unset scalars to corresponding `undef` values so long as the scalar
// from the layout allows uninit.
let unwrap = |r: Result<V, abi::Scalar>| match r {
Ok(v) => v,
Err(s) if s.is_uninit_valid() => {
let unwrap = |r: Either<V, abi::Scalar>| match r {
Either::Left(v) => v,
Either::Right(s) if s.is_uninit_valid() => {
let bty = cx.type_from_scalar(s);
cx.const_undef(bty)
}
Err(_) => bug!("OperandRef::build called while fields are missing {self:?}"),
Either::Right(_) => bug!("OperandRef::build called while fields are missing {self:?}"),
};
let val = match val {

View file

@ -1,10 +1,8 @@
use std::assert_matches::assert_matches;
use rustc_abi::{self as abi, FIRST_VARIANT};
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_middle::{bug, mir, span_bug};
use rustc_middle::{bug, mir};
use rustc_session::config::OptLevel;
use rustc_span::{DUMMY_SP, Span};
use tracing::{debug, instrument};
@ -12,7 +10,7 @@ use tracing::{debug, instrument};
use super::operand::{OperandRef, OperandValue};
use super::place::{PlaceRef, codegen_tag_value};
use super::{FunctionCx, LocalRef};
use crate::common::IntPredicate;
use crate::common::{IntPredicate, TypeKind};
use crate::traits::*;
use crate::{MemFlags, base};
@ -190,6 +188,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
/// Transmutes the `src` value to the destination type by writing it to `dst`.
///
/// See also [`Self::codegen_transmute_operand`] for cases that can be done
/// without needing a pre-allocated place for the destination.
fn codegen_transmute(
&mut self,
bx: &mut Bx,
@ -200,37 +202,36 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
assert!(src.layout.is_sized());
assert!(dst.layout.is_sized());
if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
val.store(bx, dst);
return;
}
match src.val {
OperandValue::Ref(..) | OperandValue::ZeroSized => {
span_bug!(
self.mir.span,
"Operand path should have handled transmute \
from {src:?} to place {dst:?}"
);
}
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
// When we have immediate(s), the alignment of the source is irrelevant,
// so we can store them using the destination's alignment.
src.val.store(bx, dst.val.with_type(src.layout));
}
if src.layout.size != dst.layout.size
|| src.layout.is_uninhabited()
|| dst.layout.is_uninhabited()
{
// These cases are all UB to actually hit, so don't emit code for them.
// (The size mismatches are reachable via `transmute_unchecked`.)
// We can't use unreachable because that's a terminator, and we
// need something that can be in the middle of a basic block.
bx.assume(bx.cx().const_bool(false))
} else {
// Since in this path we have a place anyway, we can store or copy to it,
// making sure we use the destination place's alignment even if the
// source would normally have a higher one.
src.val.store(bx, dst.val.with_type(src.layout));
}
}
/// Attempts to transmute an `OperandValue` to another `OperandValue`.
/// Transmutes an `OperandValue` to another `OperandValue`.
///
/// Returns `None` for cases that can't work in that framework, such as for
/// `Immediate`->`Ref` that needs an `alloc` to get the location.
/// This is supported only for cases where [`Self::rvalue_creates_operand`]
/// returns `true`, and will ICE otherwise. (In particular, anything that
/// would need to `alloca` in order to return a `PlaceValue` will ICE,
/// expecting those to go via [`Self::codegen_transmute`] instead where
/// the destination place is already allocated.)
pub(crate) fn codegen_transmute_operand(
&mut self,
bx: &mut Bx,
operand: OperandRef<'tcx, Bx::Value>,
cast: TyAndLayout<'tcx>,
) -> Option<OperandValue<Bx::Value>> {
) -> OperandValue<Bx::Value> {
// Check for transmutes that are always UB.
if operand.layout.size != cast.size
|| operand.layout.is_uninhabited()
@ -244,71 +245,34 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Because this transmute is UB, return something easy to generate,
// since it's fine that later uses of the value are probably UB.
return Some(OperandValue::poison(bx, cast));
return OperandValue::poison(bx, cast);
}
let operand_kind = self.value_kind(operand.layout);
let cast_kind = self.value_kind(cast);
match operand.val {
OperandValue::Ref(source_place_val) => {
match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
_ if cast.is_zst() => OperandValue::ZeroSized,
(_, _, abi::BackendRepr::Memory { .. }) => {
bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}");
}
(OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
assert_eq!(source_place_val.llextra, None);
assert_matches!(operand_kind, OperandValueKind::Ref);
// The existing alignment is part of `source_place_val`,
// so that alignment will be used, not `cast`'s.
Some(bx.load_operand(source_place_val.with_type(cast)).val)
}
OperandValue::ZeroSized => {
let OperandValueKind::ZeroSized = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}");
};
if let OperandValueKind::ZeroSized = cast_kind {
Some(OperandValue::ZeroSized)
} else {
None
}
}
OperandValue::Immediate(imm) => {
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}");
};
if let OperandValueKind::Immediate(to_scalar) = cast_kind
&& from_scalar.size(self.cx) == to_scalar.size(self.cx)
{
let from_backend_ty = bx.backend_type(operand.layout);
let to_backend_ty = bx.backend_type(cast);
Some(OperandValue::Immediate(transmute_immediate(
bx,
imm,
from_scalar,
from_backend_ty,
to_scalar,
to_backend_ty,
)))
} else {
None
}
}
OperandValue::Pair(imm_a, imm_b) => {
let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}");
};
if let OperandValueKind::Pair(out_a, out_b) = cast_kind
&& in_a.size(self.cx) == out_a.size(self.cx)
&& in_b.size(self.cx) == out_b.size(self.cx)
{
let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
Some(OperandValue::Pair(
transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
))
} else {
None
}
bx.load_operand(source_place_val.with_type(cast)).val
}
(
OperandValue::Immediate(imm),
abi::BackendRepr::Scalar(from_scalar),
abi::BackendRepr::Scalar(to_scalar),
) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)),
(
OperandValue::Pair(imm_a, imm_b),
abi::BackendRepr::ScalarPair(in_a, in_b),
abi::BackendRepr::ScalarPair(out_a, out_b),
) => OperandValue::Pair(
transmute_scalar(bx, imm_a, in_a, out_a),
transmute_scalar(bx, imm_b, in_b, out_b),
),
_ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"),
}
}
@ -479,9 +443,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// path as the other integer-to-X casts.
| mir::CastKind::PointerWithExposedProvenance => {
let imm = operand.immediate();
let operand_kind = self.value_kind(operand.layout);
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}");
let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
bug!("Found non-scalar for operand {operand:?}");
};
let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
@ -491,9 +454,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
return OperandRef { val, layout: cast };
}
let cast_kind = self.value_kind(cast);
let OperandValueKind::Immediate(to_scalar) = cast_kind else {
bug!("Found {cast_kind:?} for operand {cast:?}");
let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
bug!("Found non-scalar for cast {cast:?}");
};
self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
@ -503,9 +465,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
})
}
mir::CastKind::Transmute => {
self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
bug!("Unsupported transmute-as-operand of {operand:?} to {cast:?}");
})
self.codegen_transmute_operand(bx, operand, cast)
}
};
OperandRef { val, layout: cast }
@ -1011,37 +971,46 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Pair(val, of)
}
/// Returns `true` if the `rvalue` can be computed into an [`OperandRef`],
/// rather than needing a full `PlaceRef` for the assignment destination.
///
/// This is used by the [`super::analyze`] code to decide which MIR locals
/// can stay as SSA values (as opposed to generating `alloca` slots for them).
/// As such, some paths here return `true` even where the specific rvalue
/// will not actually take the operand path because the result type is such
/// that it always gets an `alloca`, but where it's not worth re-checking the
/// layout in this code when the right thing will happen anyway.
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
match *rvalue {
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
let operand_ty = operand.ty(self.mir, self.cx.tcx());
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
match (operand_layout.backend_repr, cast_layout.backend_repr) {
// When the output will be in memory anyway, just use its place
// (instead of the operand path) unless it's the trivial ZST case.
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
// Can always load from a pointer as needed
(OperandValueKind::Ref, _) => true,
// ZST-to-ZST is the easiest thing ever
(OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true,
// But if only one of them is a ZST the sizes can't match
(OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false,
// Need to generate an `alloc` to get a pointer from an immediate
(OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
// Otherwise (for a non-memory output) if the input is memory
// then we can just read the value from the place.
(abi::BackendRepr::Memory { .. }, _) => true,
// When we have scalar immediates, we can only convert things
// where the sizes match, to avoid endianness questions.
(OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
(abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
a.size(self.cx) == b.size(self.cx),
(OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
// Send mixings between scalars and pairs through the memory route
// FIXME: Maybe this could use insertvalue/extractvalue instead?
(OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
(OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
// Mixing Scalars and ScalarPairs can get quite complicated when
// padding and undef get involved, so leave that to the memory path.
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
// SIMD vectors aren't worth the trouble of dealing with complex
// cases like from vectors of f32 to vectors of pointers or
// from fat pointers to vectors of u16. (See #143194 #110021 ...)
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
}
}
mir::Rvalue::Ref(..) |
@ -1071,68 +1040,43 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// (*) this is only true if the type is suitable
}
/// Gets which variant of [`OperandValue`] is expected for a particular type.
fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
if layout.is_zst() {
OperandValueKind::ZeroSized
} else if self.cx.is_backend_immediate(layout) {
assert!(!self.cx.is_backend_scalar_pair(layout));
OperandValueKind::Immediate(match layout.backend_repr {
abi::BackendRepr::Scalar(s) => s,
abi::BackendRepr::SimdVector { element, .. } => element,
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
})
} else if self.cx.is_backend_scalar_pair(layout) {
let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
span_bug!(
self.mir.span,
"Couldn't translate {:?} as backend scalar pair",
layout.backend_repr,
);
};
OperandValueKind::Pair(s1, s2)
} else {
OperandValueKind::Ref
}
}
}
/// The variants of this match [`OperandValue`], giving details about the
/// backend values that will be held in that other type.
#[derive(Debug, Copy, Clone)]
enum OperandValueKind {
Ref,
Immediate(abi::Scalar),
Pair(abi::Scalar, abi::Scalar),
ZeroSized,
}
/// Transmutes one of the immediates from an [`OperandValue::Immediate`]
/// or an [`OperandValue::Pair`] to an immediate of the target type.
/// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`.
///
/// `to_backend_ty` must be the *non*-immediate backend type (so it will be
/// `i8`, not `i1`, for `bool`-like types.)
pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
/// This is expected to be in *immediate* form, as seen in [`OperandValue::Immediate`]
/// or [`OperandValue::Pair`] (so `i1` for bools, not `i8`, for example).
///
/// ICEs if the passed-in `imm` is not a value of the expected type for
/// `from_scalar`, such as if it's a vector or a pair.
pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
mut imm: Bx::Value,
from_scalar: abi::Scalar,
from_backend_ty: Bx::Type,
to_scalar: abi::Scalar,
to_backend_ty: Bx::Type,
) -> Bx::Value {
assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
let imm_ty = bx.cx().val_ty(imm);
assert_ne!(
bx.cx().type_kind(imm_ty),
TypeKind::Vector,
"Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
);
// While optimizations will remove no-op transmutes, they might still be
// there in debug or things that aren't no-op in MIR because they change
// the Rust type but not the underlying layout/niche.
if from_scalar == to_scalar && from_backend_ty == to_backend_ty {
if from_scalar == to_scalar {
return imm;
}
use abi::Primitive::*;
imm = bx.from_immediate(imm);
let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
// If we have a scalar, we must already know its range. Either
//
// 1) It's a parameter with `range` parameter metadata,
@ -1163,6 +1107,8 @@ pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
};
debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
// This `assume` remains important for cases like (a conceptual)
// transmute::<u32, NonZeroU32>(x) == 0
// since it's never passed to something with parameter metadata (especially

View file

@ -1,9 +1,9 @@
// Not in interpret to make sure we do not use private implementation details
use rustc_abi::{FieldIdx, VariantIdx};
use rustc_middle::query::Key;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_span::DUMMY_SP;
use tracing::instrument;
use crate::interpret::InterpCx;
@ -71,8 +71,7 @@ pub fn tag_for_variant_provider<'tcx>(
let (ty, variant_index) = key.value;
assert!(ty.is_enum());
let ecx =
InterpCx::new(tcx, ty.default_span(tcx), key.typing_env, crate::const_eval::DummyMachine);
let ecx = InterpCx::new(tcx, DUMMY_SP, key.typing_env, crate::const_eval::DummyMachine);
let layout = ecx.layout_of(ty).unwrap();
ecx.tag_for_variant(layout, variant_index).unwrap().map(|(tag, _tag_field)| tag)

View file

@ -537,7 +537,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
#[inline]
fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {
if offset % align.bytes() == 0 {
if offset.is_multiple_of(align.bytes()) {
None
} else {
// The biggest power of two through which `offset` is divisible.
@ -1554,7 +1554,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// If the allocation is N-aligned, and the offset is not divisible by N,
// then `base + offset` has a non-zero remainder after division by `N`,
// which means `base + offset` cannot be null.
if offset.bytes() % info.align.bytes() != 0 {
if !offset.bytes().is_multiple_of(info.align.bytes()) {
return interp_ok(false);
}
// We don't know enough, this might be null.

View file

@ -348,6 +348,10 @@ pub trait TTMacroExpander {
span: Span,
input: TokenStream,
) -> MacroExpanderResult<'cx>;
fn get_unused_rule(&self, _rule_i: usize) -> Option<(&Ident, Span)> {
None
}
}
pub type MacroExpanderResult<'cx> = ExpandResult<Box<dyn MacResult + 'cx>, ()>;

View file

@ -10,7 +10,7 @@ use rustc_span::source_map::SourceMap;
use rustc_span::{ErrorGuaranteed, Ident, Span};
use tracing::debug;
use super::macro_rules::{NoopTracker, parser_from_cx};
use super::macro_rules::{MacroRule, NoopTracker, parser_from_cx};
use crate::expand::{AstFragmentKind, parse_ast_fragment};
use crate::mbe::macro_parser::ParseResult::*;
use crate::mbe::macro_parser::{MatcherLoc, NamedParseResult, TtParser};
@ -22,14 +22,14 @@ pub(super) fn failed_to_match_macro(
def_span: Span,
name: Ident,
arg: TokenStream,
lhses: &[Vec<MatcherLoc>],
rules: &[MacroRule],
) -> (Span, ErrorGuaranteed) {
debug!("failed to match macro");
// An error occurred, try the expansion again, tracking the expansion closely for better
// diagnostics.
let mut tracker = CollectTrackerAndEmitter::new(psess.dcx(), sp);
let try_success_result = try_match_macro(psess, name, &arg, lhses, &mut tracker);
let try_success_result = try_match_macro(psess, name, &arg, rules, &mut tracker);
if try_success_result.is_ok() {
// Nonterminal parser recovery might turn failed matches into successful ones,
@ -80,12 +80,12 @@ pub(super) fn failed_to_match_macro(
// Check whether there's a missing comma in this macro call, like `println!("{}" a);`
if let Some((arg, comma_span)) = arg.add_comma() {
for lhs in lhses {
for rule in rules {
let parser = parser_from_cx(psess, arg.clone(), Recovery::Allowed);
let mut tt_parser = TtParser::new(name);
if let Success(_) =
tt_parser.parse_tt(&mut Cow::Borrowed(&parser), lhs, &mut NoopTracker)
tt_parser.parse_tt(&mut Cow::Borrowed(&parser), &rule.lhs, &mut NoopTracker)
{
if comma_span.is_dummy() {
err.note("you might be missing a comma");

View file

@ -36,6 +36,7 @@ use crate::base::{
};
use crate::expand::{AstFragment, AstFragmentKind, ensure_complete_parse, parse_ast_fragment};
use crate::mbe::macro_parser::{Error, ErrorReported, Failure, MatcherLoc, Success, TtParser};
use crate::mbe::quoted::{RulePart, parse_one_tt};
use crate::mbe::transcribe::transcribe;
use crate::mbe::{self, KleeneOp, macro_check};
@ -97,13 +98,18 @@ impl<'a> ParserAnyMacro<'a> {
}
}
pub(super) struct MacroRule {
pub(super) lhs: Vec<MatcherLoc>,
lhs_span: Span,
rhs: mbe::TokenTree,
}
struct MacroRulesMacroExpander {
node_id: NodeId,
name: Ident,
span: Span,
transparency: Transparency,
lhses: Vec<Vec<MatcherLoc>>,
rhses: Vec<mbe::TokenTree>,
rules: Vec<MacroRule>,
}
impl TTMacroExpander for MacroRulesMacroExpander {
@ -121,10 +127,15 @@ impl TTMacroExpander for MacroRulesMacroExpander {
self.name,
self.transparency,
input,
&self.lhses,
&self.rhses,
&self.rules,
))
}
fn get_unused_rule(&self, rule_i: usize) -> Option<(&Ident, Span)> {
// If the rhs contains an invocation like `compile_error!`, don't report it as unused.
let rule = &self.rules[rule_i];
if has_compile_error_macro(&rule.rhs) { None } else { Some((&self.name, rule.lhs_span)) }
}
}
struct DummyExpander(ErrorGuaranteed);
@ -183,9 +194,8 @@ impl<'matcher> Tracker<'matcher> for NoopTracker {
}
}
/// Expands the rules based macro defined by `lhses` and `rhses` for a given
/// input `arg`.
#[instrument(skip(cx, transparency, arg, lhses, rhses))]
/// Expands the rules based macro defined by `rules` for a given input `arg`.
#[instrument(skip(cx, transparency, arg, rules))]
fn expand_macro<'cx>(
cx: &'cx mut ExtCtxt<'_>,
sp: Span,
@ -194,8 +204,7 @@ fn expand_macro<'cx>(
name: Ident,
transparency: Transparency,
arg: TokenStream,
lhses: &[Vec<MatcherLoc>],
rhses: &[mbe::TokenTree],
rules: &[MacroRule],
) -> Box<dyn MacResult + 'cx> {
let psess = &cx.sess.psess;
// Macros defined in the current crate have a real node id,
@ -208,15 +217,14 @@ fn expand_macro<'cx>(
}
// Track nothing for the best performance.
let try_success_result = try_match_macro(psess, name, &arg, lhses, &mut NoopTracker);
let try_success_result = try_match_macro(psess, name, &arg, rules, &mut NoopTracker);
match try_success_result {
Ok((i, named_matches)) => {
let (rhs, rhs_span): (&mbe::Delimited, DelimSpan) = match &rhses[i] {
mbe::TokenTree::Delimited(span, _, delimited) => (&delimited, *span),
_ => cx.dcx().span_bug(sp, "malformed macro rhs"),
Ok((i, rule, named_matches)) => {
let mbe::TokenTree::Delimited(rhs_span, _, ref rhs) = rule.rhs else {
cx.dcx().span_bug(sp, "malformed macro rhs");
};
let arm_span = rhses[i].span();
let arm_span = rule.rhs.span();
// rhs has holes ( `$id` and `$(...)` that need filled)
let id = cx.current_expansion.id;
@ -262,7 +270,7 @@ fn expand_macro<'cx>(
Err(CanRetry::Yes) => {
// Retry and emit a better error.
let (span, guar) =
diagnostics::failed_to_match_macro(cx.psess(), sp, def_span, name, arg, lhses);
diagnostics::failed_to_match_macro(cx.psess(), sp, def_span, name, arg, rules);
cx.trace_macros_diag();
DummyResult::any(span, guar)
}
@ -278,14 +286,14 @@ pub(super) enum CanRetry {
/// Try expanding the macro. Returns the index of the successful arm and its named_matches if it was successful,
/// and nothing if it failed. On failure, it's the callers job to use `track` accordingly to record all errors
/// correctly.
#[instrument(level = "debug", skip(psess, arg, lhses, track), fields(tracking = %T::description()))]
#[instrument(level = "debug", skip(psess, arg, rules, track), fields(tracking = %T::description()))]
pub(super) fn try_match_macro<'matcher, T: Tracker<'matcher>>(
psess: &ParseSess,
name: Ident,
arg: &TokenStream,
lhses: &'matcher [Vec<MatcherLoc>],
rules: &'matcher [MacroRule],
track: &mut T,
) -> Result<(usize, NamedMatches), CanRetry> {
) -> Result<(usize, &'matcher MacroRule, NamedMatches), CanRetry> {
// We create a base parser that can be used for the "black box" parts.
// Every iteration needs a fresh copy of that parser. However, the parser
// is not mutated on many of the iterations, particularly when dealing with
@ -308,7 +316,7 @@ pub(super) fn try_match_macro<'matcher, T: Tracker<'matcher>>(
let parser = parser_from_cx(psess, arg.clone(), T::recovery());
// Try each arm's matchers.
let mut tt_parser = TtParser::new(name);
for (i, lhs) in lhses.iter().enumerate() {
for (i, rule) in rules.iter().enumerate() {
let _tracing_span = trace_span!("Matching arm", %i);
// Take a snapshot of the state of pre-expansion gating at this point.
@ -317,7 +325,7 @@ pub(super) fn try_match_macro<'matcher, T: Tracker<'matcher>>(
// are not recorded. On the first `Success(..)`ful matcher, the spans are merged.
let mut gated_spans_snapshot = mem::take(&mut *psess.gated_spans.spans.borrow_mut());
let result = tt_parser.parse_tt(&mut Cow::Borrowed(&parser), lhs, track);
let result = tt_parser.parse_tt(&mut Cow::Borrowed(&parser), &rule.lhs, track);
track.after_arm(&result);
@ -328,7 +336,7 @@ pub(super) fn try_match_macro<'matcher, T: Tracker<'matcher>>(
// Merge the gated spans from parsing the matcher with the preexisting ones.
psess.gated_spans.merge(gated_spans_snapshot);
return Ok((i, named_matches));
return Ok((i, rule, named_matches));
}
Failure(_) => {
trace!("Failed to match arm, trying the next one");
@ -364,7 +372,7 @@ pub fn compile_declarative_macro(
span: Span,
node_id: NodeId,
edition: Edition,
) -> (SyntaxExtension, Vec<(usize, Span)>) {
) -> (SyntaxExtension, usize) {
let mk_syn_ext = |expander| {
SyntaxExtension::new(
sess,
@ -377,7 +385,7 @@ pub fn compile_declarative_macro(
node_id != DUMMY_NODE_ID,
)
};
let dummy_syn_ext = |guar| (mk_syn_ext(Arc::new(DummyExpander(guar))), Vec::new());
let dummy_syn_ext = |guar| (mk_syn_ext(Arc::new(DummyExpander(guar))), 0);
let macro_rules = macro_def.macro_rules;
let exp_sep = if macro_rules { exp!(Semi) } else { exp!(Comma) };
@ -389,21 +397,11 @@ pub fn compile_declarative_macro(
let mut guar = None;
let mut check_emission = |ret: Result<(), ErrorGuaranteed>| guar = guar.or(ret.err());
let mut lhses = Vec::new();
let mut rhses = Vec::new();
let mut rules = Vec::new();
while p.token != token::Eof {
let lhs_tt = p.parse_token_tree();
let lhs_tt = mbe::quoted::parse(
&TokenStream::new(vec![lhs_tt]),
true, // LHS
sess,
node_id,
features,
edition,
)
.pop()
.unwrap();
let lhs_tt = parse_one_tt(lhs_tt, RulePart::Pattern, sess, node_id, features, edition);
// We don't handle errors here, the driver will abort after parsing/expansion. We can
// report every error in every macro this way.
check_emission(check_lhs_nt_follows(sess, node_id, &lhs_tt));
@ -421,20 +419,18 @@ pub fn compile_declarative_macro(
return dummy_syn_ext(guar);
}
let rhs_tt = p.parse_token_tree();
let rhs_tt = mbe::quoted::parse(
&TokenStream::new(vec![rhs_tt]),
false, // RHS
sess,
node_id,
features,
edition,
)
.pop()
.unwrap();
let rhs_tt = parse_one_tt(rhs_tt, RulePart::Body, sess, node_id, features, edition);
check_emission(check_rhs(sess, &rhs_tt));
check_emission(macro_check::check_meta_variables(&sess.psess, node_id, &lhs_tt, &rhs_tt));
lhses.push(lhs_tt);
rhses.push(rhs_tt);
let lhs_span = lhs_tt.span();
// Convert the lhs into `MatcherLoc` form, which is better for doing the
// actual matching.
let lhs = if let mbe::TokenTree::Delimited(.., delimited) = lhs_tt {
mbe::macro_parser::compute_locs(&delimited.tts)
} else {
return dummy_syn_ext(guar.unwrap());
};
rules.push(MacroRule { lhs, lhs_span, rhs: rhs_tt });
if p.token == token::Eof {
break;
}
@ -443,7 +439,7 @@ pub fn compile_declarative_macro(
}
}
if lhses.is_empty() {
if rules.is_empty() {
let guar = sess.dcx().span_err(span, "macros must contain at least one rule");
return dummy_syn_ext(guar);
}
@ -457,48 +453,12 @@ pub fn compile_declarative_macro(
return dummy_syn_ext(guar);
}
// Compute the spans of the macro rules for unused rule linting.
// Also, we are only interested in non-foreign macros.
let rule_spans = if node_id != DUMMY_NODE_ID {
lhses
.iter()
.zip(rhses.iter())
.enumerate()
// If the rhs contains an invocation like compile_error!,
// don't consider the rule for the unused rule lint.
.filter(|(_idx, (_lhs, rhs))| !has_compile_error_macro(rhs))
// We only take the span of the lhs here,
// so that the spans of created warnings are smaller.
.map(|(idx, (lhs, _rhs))| (idx, lhs.span()))
.collect::<Vec<_>>()
} else {
Vec::new()
};
// Return the number of rules for unused rule linting, if this is a local macro.
let nrules = if node_id != DUMMY_NODE_ID { rules.len() } else { 0 };
// Convert the lhses into `MatcherLoc` form, which is better for doing the
// actual matching.
let lhses = lhses
.iter()
.map(|lhs| {
// Ignore the delimiters around the matcher.
match lhs {
mbe::TokenTree::Delimited(.., delimited) => {
mbe::macro_parser::compute_locs(&delimited.tts)
}
_ => sess.dcx().span_bug(span, "malformed macro lhs"),
}
})
.collect();
let expander = Arc::new(MacroRulesMacroExpander {
name: ident,
span,
node_id,
transparency,
lhses,
rhses,
});
(mk_syn_ext(expander), rule_spans)
let expander =
Arc::new(MacroRulesMacroExpander { name: ident, span, node_id, transparency, rules });
(mk_syn_ext(expander), nrules)
}
fn check_lhs_nt_follows(

View file

@ -16,6 +16,27 @@ pub(crate) const VALID_FRAGMENT_NAMES_MSG: &str = "valid fragment specifiers are
`ident`, `block`, `stmt`, `expr`, `pat`, `ty`, `lifetime`, `literal`, `path`, \
`meta`, `tt`, `item` and `vis`, along with `expr_2021` and `pat_param` for edition compatibility";
/// Which part of a macro rule we're parsing
#[derive(Copy, Clone)]
pub(crate) enum RulePart {
/// The left-hand side, with patterns and metavar definitions with types
Pattern,
/// The right-hand side body, with metavar references and metavar expressions
Body,
}
impl RulePart {
#[inline(always)]
fn is_pattern(&self) -> bool {
matches!(self, Self::Pattern)
}
#[inline(always)]
fn is_body(&self) -> bool {
matches!(self, Self::Body)
}
}
/// Takes a `tokenstream::TokenStream` and returns a `Vec<self::TokenTree>`. Specifically, this
/// takes a generic `TokenStream`, such as is used in the rest of the compiler, and returns a
/// collection of `TokenTree` for use in parsing a macro.
@ -23,8 +44,8 @@ pub(crate) const VALID_FRAGMENT_NAMES_MSG: &str = "valid fragment specifiers are
/// # Parameters
///
/// - `input`: a token stream to read from, the contents of which we are parsing.
/// - `parsing_patterns`: `parse` can be used to parse either the "patterns" or the "body" of a
/// macro. Both take roughly the same form _except_ that:
/// - `part`: whether we're parsing the patterns or the body of a macro. Both take roughly the same
/// form _except_ that:
/// - In a pattern, metavars are declared with their "matcher" type. For example `$var:expr` or
/// `$id:ident`. In this example, `expr` and `ident` are "matchers". They are not present in the
/// body of a macro rule -- just in the pattern.
@ -36,9 +57,9 @@ pub(crate) const VALID_FRAGMENT_NAMES_MSG: &str = "valid fragment specifiers are
/// # Returns
///
/// A collection of `self::TokenTree`. There may also be some errors emitted to `sess`.
pub(super) fn parse(
fn parse(
input: &tokenstream::TokenStream,
parsing_patterns: bool,
part: RulePart,
sess: &Session,
node_id: NodeId,
features: &Features,
@ -53,9 +74,9 @@ pub(super) fn parse(
while let Some(tree) = iter.next() {
// Given the parsed tree, if there is a metavar and we are expecting matchers, actually
// parse out the matcher (i.e., in `$id:ident` this would parse the `:` and `ident`).
let tree = parse_tree(tree, &mut iter, parsing_patterns, sess, node_id, features, edition);
let tree = parse_tree(tree, &mut iter, part, sess, node_id, features, edition);
if !parsing_patterns {
if part.is_body() {
// No matchers allowed, nothing to process here
result.push(tree);
continue;
@ -131,6 +152,22 @@ pub(super) fn parse(
result
}
/// Takes a `tokenstream::TokenTree` and returns a `self::TokenTree`. Like `parse`, but for a
/// single token tree. Emits errors to `sess` if needed.
#[inline]
pub(super) fn parse_one_tt(
input: tokenstream::TokenTree,
part: RulePart,
sess: &Session,
node_id: NodeId,
features: &Features,
edition: Edition,
) -> TokenTree {
parse(&tokenstream::TokenStream::new(vec![input]), part, sess, node_id, features, edition)
.pop()
.unwrap()
}
/// Asks for the `macro_metavar_expr` feature if it is not enabled
fn maybe_emit_macro_metavar_expr_feature(features: &Features, sess: &Session, span: Span) {
if !features.macro_metavar_expr() {
@ -157,13 +194,13 @@ fn maybe_emit_macro_metavar_expr_concat_feature(features: &Features, sess: &Sess
/// - `tree`: the tree we wish to convert.
/// - `outer_iter`: an iterator over trees. We may need to read more tokens from it in order to finish
/// converting `tree`
/// - `parsing_patterns`: same as [parse].
/// - `part`: same as [parse].
/// - `sess`: the parsing session. Any errors will be emitted to this session.
/// - `features`: language features so we can do feature gating.
fn parse_tree<'a>(
tree: &'a tokenstream::TokenTree,
outer_iter: &mut TokenStreamIter<'a>,
parsing_patterns: bool,
part: RulePart,
sess: &Session,
node_id: NodeId,
features: &Features,
@ -189,7 +226,7 @@ fn parse_tree<'a>(
match next {
// `tree` is followed by a delimited set of token trees.
Some(&tokenstream::TokenTree::Delimited(delim_span, _, delim, ref tts)) => {
if parsing_patterns {
if part.is_pattern() {
if delim != Delimiter::Parenthesis {
span_dollar_dollar_or_metavar_in_the_lhs_err(
sess,
@ -244,13 +281,13 @@ fn parse_tree<'a>(
// If we didn't find a metavar expression above, then we must have a
// repetition sequence in the macro (e.g. `$(pat)*`). Parse the
// contents of the sequence itself
let sequence = parse(tts, parsing_patterns, sess, node_id, features, edition);
let sequence = parse(tts, part, sess, node_id, features, edition);
// Get the Kleene operator and optional separator
let (separator, kleene) =
parse_sep_and_kleene_op(&mut iter, delim_span.entire(), sess);
// Count the number of captured "names" (i.e., named metavars)
let num_captures =
if parsing_patterns { count_metavar_decls(&sequence) } else { 0 };
if part.is_pattern() { count_metavar_decls(&sequence) } else { 0 };
TokenTree::Sequence(
delim_span,
SequenceRepetition { tts: sequence, separator, kleene, num_captures },
@ -274,7 +311,7 @@ fn parse_tree<'a>(
Token { kind: token::Dollar, span: dollar_span2 },
_,
)) => {
if parsing_patterns {
if part.is_pattern() {
span_dollar_dollar_or_metavar_in_the_lhs_err(
sess,
&Token { kind: token::Dollar, span: dollar_span2 },
@ -306,10 +343,7 @@ fn parse_tree<'a>(
&tokenstream::TokenTree::Delimited(span, spacing, delim, ref tts) => TokenTree::Delimited(
span,
spacing,
Delimited {
delim,
tts: parse(tts, parsing_patterns, sess, node_id, features, edition),
},
Delimited { delim, tts: parse(tts, part, sess, node_id, features, edition) },
),
}
}

View file

@ -1303,6 +1303,7 @@ impl AttributeExt for Attribute {
Attribute::Parsed(AttributeKind::Deprecation { span, .. }) => *span,
Attribute::Parsed(AttributeKind::DocComment { span, .. }) => *span,
Attribute::Parsed(AttributeKind::MayDangle(span)) => *span,
Attribute::Parsed(AttributeKind::Ignore { span, .. }) => *span,
a => panic!("can't get the span of an arbitrary parsed attribute: {a:?}"),
}
}

View file

@ -1395,8 +1395,7 @@ fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) {
let repr = def.repr();
if repr.packed() {
if let Some(reprs) =
attrs::find_attr!(tcx.get_all_attrs(def.did()), attrs::AttributeKind::Repr(r) => r)
if let Some(reprs) = attrs::find_attr!(tcx.get_all_attrs(def.did()), attrs::AttributeKind::Repr { reprs, .. } => reprs)
{
for (r, _) in reprs {
if let ReprPacked(pack) = r
@ -1619,10 +1618,10 @@ fn check_enum(tcx: TyCtxt<'_>, def_id: LocalDefId) {
if def.variants().is_empty() {
attrs::find_attr!(
tcx.get_all_attrs(def_id),
attrs::AttributeKind::Repr(rs) => {
attrs::AttributeKind::Repr { reprs, first_span } => {
struct_span_code_err!(
tcx.dcx(),
rs.first().unwrap().1,
reprs.first().map(|repr| repr.1).unwrap_or(*first_span),
E0084,
"unsupported representation for zero-variant enum"
)

View file

@ -1239,7 +1239,7 @@ fn check_region_late_boundedness<'tcx>(
.unwrap_region_constraints()
.opportunistic_resolve_var(tcx, vid)
&& let ty::ReLateParam(ty::LateParamRegion {
kind: ty::LateParamRegionKind::Named(trait_param_def_id, _),
kind: ty::LateParamRegionKind::Named(trait_param_def_id),
..
}) = r.kind()
&& let ty::ReEarlyParam(ebr) = id_arg.expect_region().kind()
@ -1264,7 +1264,7 @@ fn check_region_late_boundedness<'tcx>(
.unwrap_region_constraints()
.opportunistic_resolve_var(tcx, vid)
&& let ty::ReLateParam(ty::LateParamRegion {
kind: ty::LateParamRegionKind::Named(impl_param_def_id, _),
kind: ty::LateParamRegionKind::Named(impl_param_def_id),
..
}) = r.kind()
&& let ty::ReEarlyParam(ebr) = id_arg.expect_region().kind()
@ -2468,7 +2468,7 @@ fn param_env_with_gat_bounds<'tcx>(
let normalize_impl_ty_args = ty::GenericArgs::identity_for_item(tcx, container_id)
.extend_to(tcx, impl_ty.def_id, |param, _| match param.kind {
GenericParamDefKind::Type { .. } => {
let kind = ty::BoundTyKind::Param(param.def_id, param.name);
let kind = ty::BoundTyKind::Param(param.def_id);
let bound_var = ty::BoundVariableKind::Ty(kind);
bound_vars.push(bound_var);
Ty::new_bound(
@ -2479,7 +2479,7 @@ fn param_env_with_gat_bounds<'tcx>(
.into()
}
GenericParamDefKind::Lifetime => {
let kind = ty::BoundRegionKind::Named(param.def_id, param.name);
let kind = ty::BoundRegionKind::Named(param.def_id);
let bound_var = ty::BoundVariableKind::Region(kind);
bound_vars.push(bound_var);
ty::Region::new_bound(

View file

@ -15,7 +15,6 @@ use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{Arm, Block, Expr, LetStmt, Pat, PatKind, Stmt};
use rustc_index::Idx;
use rustc_middle::bug;
use rustc_middle::middle::region::*;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint;
@ -34,14 +33,6 @@ struct Context {
struct ScopeResolutionVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
// The number of expressions and patterns visited in the current body.
expr_and_pat_count: usize,
// When this is `true`, we record the `Scopes` we encounter
// when processing a Yield expression. This allows us to fix
// up their indices.
pessimistic_yield: bool,
// Stores scopes when `pessimistic_yield` is `true`.
fixup_scopes: Vec<Scope>,
// The generated scope tree.
scope_tree: ScopeTree,
@ -199,19 +190,14 @@ fn resolve_arm<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, arm: &'tcx hir:
visitor.cx = prev_cx;
}
#[tracing::instrument(level = "debug", skip(visitor))]
fn resolve_pat<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, pat: &'tcx hir::Pat<'tcx>) {
// If this is a binding then record the lifetime of that binding.
if let PatKind::Binding(..) = pat.kind {
record_var_lifetime(visitor, pat.hir_id.local_id);
}
debug!("resolve_pat - pre-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
intravisit::walk_pat(visitor, pat);
visitor.expr_and_pat_count += 1;
debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
}
fn resolve_stmt<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt<'tcx>) {
@ -243,68 +229,15 @@ fn resolve_stmt<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, stmt: &'tcx hi
}
}
#[tracing::instrument(level = "debug", skip(visitor))]
fn resolve_expr<'tcx>(
visitor: &mut ScopeResolutionVisitor<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
terminating: bool,
) {
debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr);
let prev_cx = visitor.cx;
visitor.enter_node_scope_with_dtor(expr.hir_id.local_id, terminating);
let prev_pessimistic = visitor.pessimistic_yield;
// Ordinarily, we can rely on the visit order of HIR intravisit
// to correspond to the actual execution order of statements.
// However, there's a weird corner case with compound assignment
// operators (e.g. `a += b`). The evaluation order depends on whether
// or not the operator is overloaded (e.g. whether or not a trait
// like AddAssign is implemented).
// For primitive types (which, despite having a trait impl, don't actually
// end up calling it), the evaluation order is right-to-left. For example,
// the following code snippet:
//
// let y = &mut 0;
// *{println!("LHS!"); y} += {println!("RHS!"); 1};
//
// will print:
//
// RHS!
// LHS!
//
// However, if the operator is used on a non-primitive type,
// the evaluation order will be left-to-right, since the operator
// actually get desugared to a method call. For example, this
// nearly identical code snippet:
//
// let y = &mut String::new();
// *{println!("LHS String"); y} += {println!("RHS String"); "hi"};
//
// will print:
// LHS String
// RHS String
//
// To determine the actual execution order, we need to perform
// trait resolution. Unfortunately, we need to be able to compute
// yield_in_scope before type checking is even done, as it gets
// used by AST borrowcheck.
//
// Fortunately, we don't need to know the actual execution order.
// It suffices to know the 'worst case' order with respect to yields.
// Specifically, we need to know the highest 'expr_and_pat_count'
// that we could assign to the yield expression. To do this,
// we pick the greater of the two values from the left-hand
// and right-hand expressions. This makes us overly conservative
// about what types could possibly live across yield points,
// but we will never fail to detect that a type does actually
// live across a yield point. The latter part is critical -
// we're already overly conservative about what types will live
// across yield points, as the generated MIR will determine
// when things are actually live. However, for typecheck to work
// properly, we can't miss any types.
match expr.kind {
// Conditional or repeating scopes are always terminating
// scopes, meaning that temporaries cannot outlive them.
@ -360,55 +293,42 @@ fn resolve_expr<'tcx>(
let body = visitor.tcx.hir_body(body);
visitor.visit_body(body);
}
// Ordinarily, we can rely on the visit order of HIR intravisit
// to correspond to the actual execution order of statements.
// However, there's a weird corner case with compound assignment
// operators (e.g. `a += b`). The evaluation order depends on whether
// or not the operator is overloaded (e.g. whether or not a trait
// like AddAssign is implemented).
//
// For primitive types (which, despite having a trait impl, don't actually
// end up calling it), the evaluation order is right-to-left. For example,
// the following code snippet:
//
// let y = &mut 0;
// *{println!("LHS!"); y} += {println!("RHS!"); 1};
//
// will print:
//
// RHS!
// LHS!
//
// However, if the operator is used on a non-primitive type,
// the evaluation order will be left-to-right, since the operator
// actually get desugared to a method call. For example, this
// nearly identical code snippet:
//
// let y = &mut String::new();
// *{println!("LHS String"); y} += {println!("RHS String"); "hi"};
//
// will print:
// LHS String
// RHS String
//
// To determine the actual execution order, we need to perform
// trait resolution. Fortunately, we don't need to know the actual execution order.
hir::ExprKind::AssignOp(_, left_expr, right_expr) => {
debug!(
"resolve_expr - enabling pessimistic_yield, was previously {}",
prev_pessimistic
);
let start_point = visitor.fixup_scopes.len();
visitor.pessimistic_yield = true;
// If the actual execution order turns out to be right-to-left,
// then we're fine. However, if the actual execution order is left-to-right,
// then we'll assign too low a count to any `yield` expressions
// we encounter in 'right_expression' - they should really occur after all of the
// expressions in 'left_expression'.
visitor.visit_expr(right_expr);
visitor.pessimistic_yield = prev_pessimistic;
debug!("resolve_expr - restoring pessimistic_yield to {}", prev_pessimistic);
visitor.visit_expr(left_expr);
debug!("resolve_expr - fixing up counts to {}", visitor.expr_and_pat_count);
// Remove and process any scopes pushed by the visitor
let target_scopes = visitor.fixup_scopes.drain(start_point..);
for scope in target_scopes {
let yield_data =
visitor.scope_tree.yield_in_scope.get_mut(&scope).unwrap().last_mut().unwrap();
let count = yield_data.expr_and_pat_count;
let span = yield_data.span;
// expr_and_pat_count never decreases. Since we recorded counts in yield_in_scope
// before walking the left-hand side, it should be impossible for the recorded
// count to be greater than the left-hand side count.
if count > visitor.expr_and_pat_count {
bug!(
"Encountered greater count {} at span {:?} - expected no greater than {}",
count,
span,
visitor.expr_and_pat_count
);
}
let new_count = visitor.expr_and_pat_count;
debug!(
"resolve_expr - increasing count for scope {:?} from {} to {} at span {:?}",
scope, count, new_count, span
);
yield_data.expr_and_pat_count = new_count;
}
}
hir::ExprKind::If(cond, then, Some(otherwise)) => {
@ -453,43 +373,6 @@ fn resolve_expr<'tcx>(
_ => intravisit::walk_expr(visitor, expr),
}
visitor.expr_and_pat_count += 1;
debug!("resolve_expr post-increment {}, expr = {:?}", visitor.expr_and_pat_count, expr);
if let hir::ExprKind::Yield(_, source) = &expr.kind {
// Mark this expr's scope and all parent scopes as containing `yield`.
let mut scope = Scope { local_id: expr.hir_id.local_id, data: ScopeData::Node };
loop {
let data = YieldData {
span: expr.span,
expr_and_pat_count: visitor.expr_and_pat_count,
source: *source,
};
match visitor.scope_tree.yield_in_scope.get_mut(&scope) {
Some(yields) => yields.push(data),
None => {
visitor.scope_tree.yield_in_scope.insert(scope, vec![data]);
}
}
if visitor.pessimistic_yield {
debug!("resolve_expr in pessimistic_yield - marking scope {:?} for fixup", scope);
visitor.fixup_scopes.push(scope);
}
// Keep traversing up while we can.
match visitor.scope_tree.parent_map.get(&scope) {
// Don't cross from closure bodies to their parent.
Some(&superscope) => match superscope.data {
ScopeData::CallSite => break,
_ => scope = superscope,
},
None => break,
}
}
}
visitor.cx = prev_cx;
}
@ -612,8 +495,8 @@ fn resolve_local<'tcx>(
}
}
// Make sure we visit the initializer first, so expr_and_pat_count remains correct.
// The correct order, as shared between coroutine_interior, drop_ranges and intravisitor,
// Make sure we visit the initializer first.
// The correct order, as shared between drop_ranges and intravisitor,
// is to walk initializer, followed by pattern bindings, finally followed by the `else` block.
if let Some(expr) = init {
visitor.visit_expr(expr);
@ -798,16 +681,7 @@ impl<'tcx> ScopeResolutionVisitor<'tcx> {
}
fn enter_body(&mut self, hir_id: hir::HirId, f: impl FnOnce(&mut Self)) {
// Save all state that is specific to the outer function
// body. These will be restored once down below, once we've
// visited the body.
let outer_ec = mem::replace(&mut self.expr_and_pat_count, 0);
let outer_cx = self.cx;
// The 'pessimistic yield' flag is set to true when we are
// processing a `+=` statement and have to make pessimistic
// control flow assumptions. This doesn't apply to nested
// bodies within the `+=` statements. See #69307.
let outer_pessimistic_yield = mem::replace(&mut self.pessimistic_yield, false);
self.enter_scope(Scope { local_id: hir_id.local_id, data: ScopeData::CallSite });
self.enter_scope(Scope { local_id: hir_id.local_id, data: ScopeData::Arguments });
@ -815,9 +689,7 @@ impl<'tcx> ScopeResolutionVisitor<'tcx> {
f(self);
// Restore context we had at the start.
self.expr_and_pat_count = outer_ec;
self.cx = outer_cx;
self.pessimistic_yield = outer_pessimistic_yield;
}
}
@ -919,10 +791,7 @@ pub(crate) fn region_scope_tree(tcx: TyCtxt<'_>, def_id: DefId) -> &ScopeTree {
let mut visitor = ScopeResolutionVisitor {
tcx,
scope_tree: ScopeTree::default(),
expr_and_pat_count: 0,
cx: Context { parent: None, var_parent: None },
pessimistic_yield: false,
fixup_scopes: vec![],
extended_super_lets: Default::default(),
};

View file

@ -2338,7 +2338,7 @@ fn lint_redundant_lifetimes<'tcx>(
lifetimes.push(ty::Region::new_late_param(tcx, owner_id.to_def_id(), kind));
}
}
lifetimes.retain(|candidate| candidate.has_name());
lifetimes.retain(|candidate| candidate.is_named(tcx));
// Keep track of lifetimes which have already been replaced with other lifetimes.
// This makes sure that if `'a = 'b = 'c`, we don't say `'c` should be replaced by

View file

@ -654,7 +654,7 @@ fn infringing_fields_error<'tcx>(
.or_default()
.push(origin.span());
if let ty::RegionKind::ReEarlyParam(ebr) = b.kind()
&& ebr.has_name()
&& ebr.is_named()
{
bounds.push((b.to_string(), a.to_string(), None));
}

View file

@ -578,13 +578,7 @@ fn get_new_lifetime_name<'tcx>(
let existing_lifetimes = tcx
.collect_referenced_late_bound_regions(poly_trait_ref)
.into_iter()
.filter_map(|lt| {
if let ty::BoundRegionKind::Named(_, name) = lt {
Some(name.as_str().to_string())
} else {
None
}
})
.filter_map(|lt| lt.get_name(tcx).map(|name| name.as_str().to_string()))
.chain(generics.params.iter().filter_map(|param| {
if let hir::GenericParamKind::Lifetime { .. } = &param.kind {
Some(param.name.ident().as_str().to_string())

View file

@ -279,19 +279,13 @@ fn resolve_bound_vars(tcx: TyCtxt<'_>, local_def_id: hir::OwnerId) -> ResolveBou
rbv
}
fn late_arg_as_bound_arg<'tcx>(
tcx: TyCtxt<'tcx>,
param: &GenericParam<'tcx>,
) -> ty::BoundVariableKind {
fn late_arg_as_bound_arg<'tcx>(param: &GenericParam<'tcx>) -> ty::BoundVariableKind {
let def_id = param.def_id.to_def_id();
let name = tcx.item_name(def_id);
match param.kind {
GenericParamKind::Lifetime { .. } => {
ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(def_id, name))
}
GenericParamKind::Type { .. } => {
ty::BoundVariableKind::Ty(ty::BoundTyKind::Param(def_id, name))
ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(def_id))
}
GenericParamKind::Type { .. } => ty::BoundVariableKind::Ty(ty::BoundTyKind::Param(def_id)),
GenericParamKind::Const { .. } => ty::BoundVariableKind::Const,
}
}
@ -302,10 +296,10 @@ fn late_arg_as_bound_arg<'tcx>(
fn generic_param_def_as_bound_arg(param: &ty::GenericParamDef) -> ty::BoundVariableKind {
match param.kind {
ty::GenericParamDefKind::Lifetime => {
ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(param.def_id, param.name))
ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(param.def_id))
}
ty::GenericParamDefKind::Type { .. } => {
ty::BoundVariableKind::Ty(ty::BoundTyKind::Param(param.def_id, param.name))
ty::BoundVariableKind::Ty(ty::BoundTyKind::Param(param.def_id))
}
ty::GenericParamDefKind::Const { .. } => ty::BoundVariableKind::Const,
}
@ -386,7 +380,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
trait_ref.bound_generic_params.iter().enumerate().map(|(late_bound_idx, param)| {
let arg = ResolvedArg::late(initial_bound_vars + late_bound_idx as u32, param);
bound_vars.insert(param.def_id, arg);
late_arg_as_bound_arg(self.tcx, param)
late_arg_as_bound_arg(param)
});
binders.extend(binders_iter);
@ -485,7 +479,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
.map(|(late_bound_idx, param)| {
(
(param.def_id, ResolvedArg::late(late_bound_idx as u32, param)),
late_arg_as_bound_arg(self.tcx, param),
late_arg_as_bound_arg(param),
)
})
.unzip();
@ -718,7 +712,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
.map(|(late_bound_idx, param)| {
(
(param.def_id, ResolvedArg::late(late_bound_idx as u32, param)),
late_arg_as_bound_arg(self.tcx, param),
late_arg_as_bound_arg(param),
)
})
.unzip();
@ -748,7 +742,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
.map(|(late_bound_idx, param)| {
(
(param.def_id, ResolvedArg::late(late_bound_idx as u32, param)),
late_arg_as_bound_arg(self.tcx, param),
late_arg_as_bound_arg(param),
)
})
.unzip();
@ -957,7 +951,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
.map(|(late_bound_idx, param)| {
(
(param.def_id, ResolvedArg::late(late_bound_idx as u32, param)),
late_arg_as_bound_arg(self.tcx, param),
late_arg_as_bound_arg(param),
)
})
.unzip();
@ -1171,7 +1165,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
matches!(param.kind, GenericParamKind::Lifetime { .. })
&& self.tcx.is_late_bound(param.hir_id)
})
.map(|param| late_arg_as_bound_arg(self.tcx, param))
.map(|param| late_arg_as_bound_arg(param))
.collect();
self.record_late_bound_vars(hir_id, binders);
let scope = Scope::Binder {

View file

@ -12,7 +12,7 @@ use rustc_middle::ty::{
self as ty, IsSuggestable, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt,
TypeVisitor, Upcast,
};
use rustc_span::{ErrorGuaranteed, Ident, Span, Symbol, kw, sym};
use rustc_span::{ErrorGuaranteed, Ident, Span, kw, sym};
use rustc_trait_selection::traits;
use smallvec::SmallVec;
use tracing::{debug, instrument};
@ -888,7 +888,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
ty::INNERMOST,
ty::BoundRegion {
var: ty::BoundVar::from_usize(num_bound_vars),
kind: ty::BoundRegionKind::Named(param.def_id, param.name),
kind: ty::BoundRegionKind::Named(param.def_id),
},
)
.into(),
@ -1006,12 +1006,12 @@ fn check_assoc_const_binding_type<'tcx>(
ty_note,
}));
}
for (var_def_id, var_name) in collector.vars {
for var_def_id in collector.vars {
guar.get_or_insert(cx.dcx().emit_err(
crate::errors::EscapingBoundVarInTyOfAssocConstBinding {
span: assoc_const.span,
assoc_const,
var_name,
var_name: cx.tcx().item_name(var_def_id),
var_def_kind: tcx.def_descr(var_def_id),
var_defined_here_label: tcx.def_ident_span(var_def_id).unwrap(),
ty_note,
@ -1026,7 +1026,7 @@ fn check_assoc_const_binding_type<'tcx>(
struct GenericParamAndBoundVarCollector<'a, 'tcx> {
cx: &'a dyn HirTyLowerer<'tcx>,
params: FxIndexSet<u32>,
vars: FxIndexSet<(DefId, Symbol)>,
vars: FxIndexSet<DefId>,
depth: ty::DebruijnIndex,
}
@ -1050,7 +1050,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for GenericParamAndBoundVarCollector<'_, 't
}
ty::Bound(db, bt) if *db >= self.depth => {
self.vars.insert(match bt.kind {
ty::BoundTyKind::Param(def_id, name) => (def_id, name),
ty::BoundTyKind::Param(def_id) => def_id,
ty::BoundTyKind::Anon => {
let reported = self
.cx
@ -1073,7 +1073,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for GenericParamAndBoundVarCollector<'_, 't
}
ty::ReBound(db, br) if db >= self.depth => {
self.vars.insert(match br.kind {
ty::BoundRegionKind::Named(def_id, name) => (def_id, name),
ty::BoundRegionKind::Named(def_id) => def_id,
ty::BoundRegionKind::Anon | ty::BoundRegionKind::ClosureEnv => {
let guar = self
.cx
@ -1081,6 +1081,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for GenericParamAndBoundVarCollector<'_, 't
.delayed_bug(format!("unexpected bound region kind: {:?}", br.kind));
return ControlFlow::Break(guar);
}
ty::BoundRegionKind::NamedAnon(_) => bug!("only used for pretty printing"),
});
}
_ => {}

View file

@ -392,16 +392,14 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
#[instrument(level = "debug", skip(self), ret)]
pub fn lower_resolved_lifetime(&self, resolved: rbv::ResolvedArg) -> ty::Region<'tcx> {
let tcx = self.tcx();
let lifetime_name = |def_id| tcx.hir_name(tcx.local_def_id_to_hir_id(def_id));
match resolved {
rbv::ResolvedArg::StaticLifetime => tcx.lifetimes.re_static,
rbv::ResolvedArg::LateBound(debruijn, index, def_id) => {
let name = lifetime_name(def_id);
let br = ty::BoundRegion {
var: ty::BoundVar::from_u32(index),
kind: ty::BoundRegionKind::Named(def_id.to_def_id(), name),
kind: ty::BoundRegionKind::Named(def_id.to_def_id()),
};
ty::Region::new_bound(tcx, debruijn, br)
}
@ -415,11 +413,10 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
}
rbv::ResolvedArg::Free(scope, id) => {
let name = lifetime_name(id);
ty::Region::new_late_param(
tcx,
scope.to_def_id(),
ty::LateParamRegionKind::Named(id.to_def_id(), name),
ty::LateParamRegionKind::Named(id.to_def_id()),
)
// (*) -- not late-bound, won't change
@ -2070,10 +2067,9 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
let tcx = self.tcx();
match tcx.named_bound_var(hir_id) {
Some(rbv::ResolvedArg::LateBound(debruijn, index, def_id)) => {
let name = tcx.item_name(def_id.to_def_id());
let br = ty::BoundTy {
var: ty::BoundVar::from_u32(index),
kind: ty::BoundTyKind::Param(def_id.to_def_id(), name),
kind: ty::BoundTyKind::Param(def_id.to_def_id()),
};
Ty::new_bound(tcx, debruijn, br)
}
@ -2749,18 +2745,15 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
generate_err: impl Fn(&str) -> Diag<'cx>,
) {
for br in referenced_regions.difference(&constrained_regions) {
let br_name = match *br {
ty::BoundRegionKind::Named(_, kw::UnderscoreLifetime)
| ty::BoundRegionKind::Anon
| ty::BoundRegionKind::ClosureEnv => "an anonymous lifetime".to_string(),
ty::BoundRegionKind::Named(_, name) => format!("lifetime `{name}`"),
let br_name = if let Some(name) = br.get_name(self.tcx()) {
format!("lifetime `{name}`")
} else {
"an anonymous lifetime".to_string()
};
let mut err = generate_err(&br_name);
if let ty::BoundRegionKind::Named(_, kw::UnderscoreLifetime)
| ty::BoundRegionKind::Anon = *br
{
if !br.is_named(self.tcx()) {
// The only way for an anonymous lifetime to wind up
// in the return type but **also** be unconstrained is
// if it only appears in "associated types" in the

View file

@ -1744,13 +1744,13 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
#[inline]
fn num_words<T: Idx>(domain_size: T) -> usize {
(domain_size.index() + WORD_BITS - 1) / WORD_BITS
domain_size.index().div_ceil(WORD_BITS)
}
#[inline]
fn num_chunks<T: Idx>(domain_size: T) -> usize {
assert!(domain_size.index() > 0);
(domain_size.index() + CHUNK_BITS - 1) / CHUNK_BITS
domain_size.index().div_ceil(CHUNK_BITS)
}
#[inline]

View file

@ -655,7 +655,7 @@ impl<'tcx> fmt::Display for GenericKind<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
GenericKind::Param(ref p) => write!(f, "{p}"),
GenericKind::Placeholder(ref p) => write!(f, "{p:?}"),
GenericKind::Placeholder(ref p) => write!(f, "{p}"),
GenericKind::Alias(ref p) => write!(f, "{p}"),
}
}

View file

@ -136,7 +136,7 @@ enum ParamKind {
// Early-bound var.
Early(Symbol, u32),
// Late-bound var on function, not within a binder. We can capture these.
Free(DefId, Symbol),
Free(DefId),
// Late-bound var in a binder. We can't capture these yet.
Late,
}
@ -156,12 +156,11 @@ fn check_fn(tcx: TyCtxt<'_>, parent_def_id: LocalDefId) {
}
for bound_var in sig.bound_vars() {
let ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(def_id, name)) = bound_var
else {
let ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(def_id)) = bound_var else {
span_bug!(tcx.def_span(parent_def_id), "unexpected non-lifetime binder on fn sig");
};
in_scope_parameters.insert(def_id, ParamKind::Free(def_id, name));
in_scope_parameters.insert(def_id, ParamKind::Free(def_id));
}
let sig = tcx.liberate_late_bound_regions(parent_def_id.to_def_id(), sig);
@ -215,8 +214,8 @@ where
for arg in t.bound_vars() {
let arg: ty::BoundVariableKind = arg;
match arg {
ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(def_id, ..))
| ty::BoundVariableKind::Ty(ty::BoundTyKind::Param(def_id, _)) => {
ty::BoundVariableKind::Region(ty::BoundRegionKind::Named(def_id))
| ty::BoundVariableKind::Ty(ty::BoundTyKind::Param(def_id)) => {
added.push(def_id);
let unique = self.in_scope_parameters.insert(def_id, ParamKind::Late);
assert_eq!(unique, None);
@ -316,10 +315,10 @@ where
self.tcx,
ty::EarlyParamRegion { name, index },
),
ParamKind::Free(def_id, name) => ty::Region::new_late_param(
ParamKind::Free(def_id) => ty::Region::new_late_param(
self.tcx,
self.parent_def_id.to_def_id(),
ty::LateParamRegionKind::Named(def_id, name),
ty::LateParamRegionKind::Named(def_id),
),
// Totally ignore late bound args from binders.
ParamKind::Late => return true,
@ -463,13 +462,10 @@ fn extract_def_id_from_arg<'tcx>(
match arg.kind() {
ty::GenericArgKind::Lifetime(re) => match re.kind() {
ty::ReEarlyParam(ebr) => generics.region_param(ebr, tcx).def_id,
ty::ReBound(
_,
ty::BoundRegion { kind: ty::BoundRegionKind::Named(def_id, ..), .. },
)
ty::ReBound(_, ty::BoundRegion { kind: ty::BoundRegionKind::Named(def_id), .. })
| ty::ReLateParam(ty::LateParamRegion {
scope: _,
kind: ty::LateParamRegionKind::Named(def_id, ..),
kind: ty::LateParamRegionKind::Named(def_id),
}) => def_id,
_ => unreachable!(),
},
@ -532,13 +528,10 @@ impl<'tcx> TypeRelation<TyCtxt<'tcx>> for FunctionalVariances<'tcx> {
) -> RelateResult<'tcx, ty::Region<'tcx>> {
let def_id = match a.kind() {
ty::ReEarlyParam(ebr) => self.generics.region_param(ebr, self.tcx).def_id,
ty::ReBound(
_,
ty::BoundRegion { kind: ty::BoundRegionKind::Named(def_id, ..), .. },
)
ty::ReBound(_, ty::BoundRegion { kind: ty::BoundRegionKind::Named(def_id), .. })
| ty::ReLateParam(ty::LateParamRegion {
scope: _,
kind: ty::LateParamRegionKind::Named(def_id, ..),
kind: ty::LateParamRegionKind::Named(def_id),
}) => def_id,
_ => {
return Ok(a);

View file

@ -1,5 +1,4 @@
use rustc_hir::{Expr, ExprKind, HirId, Stmt, StmtKind};
use rustc_middle::query::Key;
use rustc_middle::ty::{self, Ty};
use rustc_session::{declare_lint, declare_lint_pass};
@ -69,7 +68,7 @@ impl<'tcx> LateLintPass<'tcx> for MapUnitFn {
.span_of_impl(*id)
.unwrap_or(default_span),
argument_label: args[0].span,
map_label: arg_ty.default_span(cx.tcx),
map_label: span,
suggestion: path.ident.span,
replace: "for_each".to_string(),
},
@ -88,7 +87,7 @@ impl<'tcx> LateLintPass<'tcx> for MapUnitFn {
.span_of_impl(*id)
.unwrap_or(default_span),
argument_label: args[0].span,
map_label: arg_ty.default_span(cx.tcx),
map_label: span,
suggestion: path.ident.span,
replace: "for_each".to_string(),
},

View file

@ -168,7 +168,7 @@ impl EarlyLintPass for NonCamelCaseTypes {
fn check_item(&mut self, cx: &EarlyContext<'_>, it: &ast::Item) {
let has_repr_c = matches!(
AttributeParser::parse_limited(cx.sess(), &it.attrs, sym::repr, it.span, it.id),
Some(Attribute::Parsed(AttributeKind::Repr(r))) if r.iter().any(|(r, _)| r == &ReprAttr::ReprC)
Some(Attribute::Parsed(AttributeKind::Repr { reprs, ..})) if reprs.iter().any(|(r, _)| r == &ReprAttr::ReprC)
);
if has_repr_c {

View file

@ -21,7 +21,8 @@ fn print_fields(name: &Ident, fields: &Fields) -> (TokenStream, TokenStream, Tok
__p.word_space(",");
}
__p.word(#string_name);
__p.word_space(":");
__p.word(":");
__p.nbsp();
__printed_anything = true;
}
#name.print_attribute(__p);

View file

@ -7,7 +7,6 @@
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
use std::fmt;
use std::ops::Deref;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::unord::UnordMap;
@ -228,82 +227,6 @@ pub struct ScopeTree {
/// This information is used later for linting to identify locals and
/// temporary values that will receive backwards-incompatible drop orders.
pub backwards_incompatible_scope: UnordMap<hir::ItemLocalId, Scope>,
/// If there are any `yield` nested within a scope, this map
/// stores the `Span` of the last one and its index in the
/// postorder of the Visitor traversal on the HIR.
///
/// HIR Visitor postorder indexes might seem like a peculiar
/// thing to care about. but it turns out that HIR bindings
/// and the temporary results of HIR expressions are never
/// storage-live at the end of HIR nodes with postorder indexes
/// lower than theirs, and therefore don't need to be suspended
/// at yield-points at these indexes.
///
/// For an example, suppose we have some code such as:
/// ```rust,ignore (example)
/// foo(f(), yield y, bar(g()))
/// ```
///
/// With the HIR tree (calls numbered for expository purposes)
///
/// ```text
/// Call#0(foo, [Call#1(f), Yield(y), Call#2(bar, Call#3(g))])
/// ```
///
/// Obviously, the result of `f()` was created before the yield
/// (and therefore needs to be kept valid over the yield) while
/// the result of `g()` occurs after the yield (and therefore
/// doesn't). If we want to infer that, we can look at the
/// postorder traversal:
/// ```plain,ignore
/// `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0
/// ```
///
/// In which we can easily see that `Call#1` occurs before the yield,
/// and `Call#3` after it.
///
/// To see that this method works, consider:
///
/// Let `D` be our binding/temporary and `U` be our other HIR node, with
/// `HIR-postorder(U) < HIR-postorder(D)`. Suppose, as in our example,
/// U is the yield and D is one of the calls.
/// Let's show that `D` is storage-dead at `U`.
///
/// Remember that storage-live/storage-dead refers to the state of
/// the *storage*, and does not consider moves/drop flags.
///
/// Then:
///
/// 1. From the ordering guarantee of HIR visitors (see
/// `rustc_hir::intravisit`), `D` does not dominate `U`.
///
/// 2. Therefore, `D` is *potentially* storage-dead at `U` (because
/// we might visit `U` without ever getting to `D`).
///
/// 3. However, we guarantee that at each HIR point, each
/// binding/temporary is always either always storage-live
/// or always storage-dead. This is what is being guaranteed
/// by `terminating_scopes` including all blocks where the
/// count of executions is not guaranteed.
///
/// 4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`,
/// QED.
///
/// This property ought to not on (3) in an essential way -- it
/// is probably still correct even if we have "unrestricted" terminating
/// scopes. However, why use the complicated proof when a simple one
/// works?
///
/// A subtle thing: `box` expressions, such as `box (&x, yield 2, &y)`. It
/// might seem that a `box` expression creates a `Box<T>` temporary
/// when it *starts* executing, at `HIR-preorder(BOX-EXPR)`. That might
/// be true in the MIR desugaring, but it is not important in the semantics.
///
/// The reason is that semantically, until the `box` expression returns,
/// the values are still owned by their containing expressions. So
/// we'll see that `&x`.
pub yield_in_scope: UnordMap<Scope, Vec<YieldData>>,
}
/// See the `rvalue_candidates` field for more information on rvalue
@ -316,15 +239,6 @@ pub struct RvalueCandidate {
pub lifetime: Option<Scope>,
}
#[derive(Debug, Copy, Clone, HashStable)]
pub struct YieldData {
/// The `Span` of the yield.
pub span: Span,
/// The number of expressions and patterns appearing before the `yield` in the body, plus one.
pub expr_and_pat_count: usize,
pub source: hir::YieldSource,
}
impl ScopeTree {
pub fn record_scope_parent(&mut self, child: Scope, parent: Option<Scope>) {
debug!("{:?}.parent = {:?}", child, parent);
@ -380,10 +294,4 @@ impl ScopeTree {
true
}
/// Checks whether the given scope contains a `yield`. If so,
/// returns `Some(YieldData)`. If not, returns `None`.
pub fn yield_in_scope(&self, scope: Scope) -> Option<&[YieldData]> {
self.yield_in_scope.get(&scope).map(Deref::deref)
}
}

View file

@ -3279,10 +3279,7 @@ impl<'tcx> TyCtxt<'tcx> {
return ty::Region::new_late_param(
self,
new_parent.to_def_id(),
ty::LateParamRegionKind::Named(
lbv.to_def_id(),
self.item_name(lbv.to_def_id()),
),
ty::LateParamRegionKind::Named(lbv.to_def_id()),
);
}
resolve_bound_vars::ResolvedArg::Error(guar) => {

View file

@ -474,7 +474,7 @@ impl<'tcx> rustc_type_ir::Flags for Ty<'tcx> {
impl EarlyParamRegion {
/// Does this early bound region have a name? Early bound regions normally
/// always have names except when using anonymous lifetimes (`'_`).
pub fn has_name(&self) -> bool {
pub fn is_named(&self) -> bool {
self.name != kw::UnderscoreLifetime
}
}
@ -1525,7 +1525,8 @@ impl<'tcx> TyCtxt<'tcx> {
field_shuffle_seed ^= user_seed;
}
if let Some(reprs) = attr::find_attr!(self.get_all_attrs(did), AttributeKind::Repr(r) => r)
if let Some(reprs) =
attr::find_attr!(self.get_all_attrs(did), AttributeKind::Repr { reprs, .. } => reprs)
{
for (r, _) in reprs {
flags.insert(match *r {
@ -1566,10 +1567,6 @@ impl<'tcx> TyCtxt<'tcx> {
max_align = max_align.max(Some(align));
ReprFlags::empty()
}
attr::ReprEmpty => {
/* skip these, they're just for diagnostics */
ReprFlags::empty()
}
});
}
}

View file

@ -11,7 +11,7 @@ use rustc_data_structures::unord::UnordMap;
use rustc_hir as hir;
use rustc_hir::LangItem;
use rustc_hir::def::{self, CtorKind, DefKind, Namespace};
use rustc_hir::def_id::{CRATE_DEF_ID, DefIdMap, DefIdSet, LOCAL_CRATE, ModDefId};
use rustc_hir::def_id::{DefIdMap, DefIdSet, LOCAL_CRATE, ModDefId};
use rustc_hir::definitions::{DefKey, DefPathDataName};
use rustc_macros::{Lift, extension};
use rustc_session::Limit;
@ -795,9 +795,9 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
ty::BoundTyKind::Anon => {
rustc_type_ir::debug_bound_var(self, debruijn, bound_ty.var)?
}
ty::BoundTyKind::Param(_, s) => match self.should_print_verbose() {
ty::BoundTyKind::Param(def_id) => match self.should_print_verbose() {
true => p!(write("{:?}", ty.kind())),
false => p!(write("{s}")),
false => p!(write("{}", self.tcx().item_name(def_id))),
},
},
ty::Adt(def, args) => {
@ -822,13 +822,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
ty::Alias(ty::Projection | ty::Inherent | ty::Free, ref data) => {
p!(print(data))
}
ty::Placeholder(placeholder) => match placeholder.bound.kind {
ty::BoundTyKind::Anon => p!(write("{placeholder:?}")),
ty::BoundTyKind::Param(_, name) => match self.should_print_verbose() {
true => p!(write("{:?}", ty.kind())),
false => p!(write("{name}")),
},
},
ty::Placeholder(placeholder) => p!(print(placeholder)),
ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
// We use verbose printing in 'NO_QUERIES' mode, to
// avoid needing to call `predicates_of`. This should
@ -2551,14 +2545,14 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> {
let identify_regions = self.tcx.sess.opts.unstable_opts.identify_regions;
match region.kind() {
ty::ReEarlyParam(ref data) => data.has_name(),
ty::ReEarlyParam(ref data) => data.is_named(),
ty::ReLateParam(ty::LateParamRegion { kind, .. }) => kind.is_named(),
ty::ReLateParam(ty::LateParamRegion { kind, .. }) => kind.is_named(self.tcx),
ty::ReBound(_, ty::BoundRegion { kind: br, .. })
| ty::RePlaceholder(ty::Placeholder {
bound: ty::BoundRegion { kind: br, .. }, ..
}) => {
if br.is_named() {
if br.is_named(self.tcx) {
return true;
}
@ -2626,7 +2620,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
return Ok(());
}
ty::ReLateParam(ty::LateParamRegion { kind, .. }) => {
if let Some(name) = kind.get_name() {
if let Some(name) = kind.get_name(self.tcx) {
p!(write("{}", name));
return Ok(());
}
@ -2635,9 +2629,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
| ty::RePlaceholder(ty::Placeholder {
bound: ty::BoundRegion { kind: br, .. }, ..
}) => {
if let ty::BoundRegionKind::Named(_, name) = br
&& br.is_named()
{
if let Some(name) = br.get_name(self.tcx) {
p!(write("{}", name));
return Ok(());
}
@ -2844,56 +2836,23 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
let mut name = |lifetime_idx: Option<ty::DebruijnIndex>,
binder_level_idx: ty::DebruijnIndex,
br: ty::BoundRegion| {
let (name, kind) = match br.kind {
ty::BoundRegionKind::Anon | ty::BoundRegionKind::ClosureEnv => {
let name = next_name(self);
if let Some(lt_idx) = lifetime_idx {
if lt_idx > binder_level_idx {
let kind =
ty::BoundRegionKind::Named(CRATE_DEF_ID.to_def_id(), name);
return ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: br.var, kind },
);
}
}
(name, ty::BoundRegionKind::Named(CRATE_DEF_ID.to_def_id(), name))
}
ty::BoundRegionKind::Named(def_id, kw::UnderscoreLifetime) => {
let name = next_name(self);
if let Some(lt_idx) = lifetime_idx {
if lt_idx > binder_level_idx {
let kind = ty::BoundRegionKind::Named(def_id, name);
return ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: br.var, kind },
);
}
}
(name, ty::BoundRegionKind::Named(def_id, name))
}
ty::BoundRegionKind::Named(_, name) => {
if let Some(lt_idx) = lifetime_idx {
if lt_idx > binder_level_idx {
let kind = br.kind;
return ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: br.var, kind },
);
}
}
(name, br.kind)
}
let (name, kind) = if let Some(name) = br.kind.get_name(tcx) {
(name, br.kind)
} else {
let name = next_name(self);
(name, ty::BoundRegionKind::NamedAnon(name))
};
if let Some(lt_idx) = lifetime_idx {
if lt_idx > binder_level_idx {
return ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: br.var, kind },
);
}
}
// Unconditionally render `unsafe<>`.
if !trim_path || mode == WrapBinderMode::Unsafe {
start_or_continue(self, mode.start_str(), ", ");
@ -2960,13 +2919,15 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
T: TypeFoldable<TyCtxt<'tcx>>,
{
struct RegionNameCollector<'tcx> {
tcx: TyCtxt<'tcx>,
used_region_names: FxHashSet<Symbol>,
type_collector: SsoHashSet<Ty<'tcx>>,
}
impl<'tcx> RegionNameCollector<'tcx> {
fn new() -> Self {
fn new(tcx: TyCtxt<'tcx>) -> Self {
RegionNameCollector {
tcx,
used_region_names: Default::default(),
type_collector: SsoHashSet::new(),
}
@ -2980,7 +2941,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
// Collect all named lifetimes. These allow us to prevent duplication
// of already existing lifetime names when introducing names for
// anonymous late-bound regions.
if let Some(name) = r.get_name() {
if let Some(name) = r.get_name(self.tcx) {
self.used_region_names.insert(name);
}
}
@ -2995,7 +2956,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
}
}
let mut collector = RegionNameCollector::new();
let mut collector = RegionNameCollector::new(self.tcx());
value.visit_with(&mut collector);
self.used_region_names = collector.used_region_names;
self.region_index = 0;
@ -3406,6 +3367,16 @@ define_print_and_forward_display! {
p!(write("{}", self.name))
}
ty::PlaceholderType {
match self.bound.kind {
ty::BoundTyKind::Anon => p!(write("{self:?}")),
ty::BoundTyKind::Param(def_id) => match cx.should_print_verbose() {
true => p!(write("{self:?}")),
false => p!(write("{}", cx.tcx().item_name(def_id))),
},
}
}
ty::ParamConst {
p!(write("{}", self.name))
}

View file

@ -163,37 +163,33 @@ impl<'tcx> Region<'tcx> {
*self.0.0
}
pub fn get_name(self) -> Option<Symbol> {
if self.has_name() {
match self.kind() {
ty::ReEarlyParam(ebr) => Some(ebr.name),
ty::ReBound(_, br) => br.kind.get_name(),
ty::ReLateParam(fr) => fr.kind.get_name(),
ty::ReStatic => Some(kw::StaticLifetime),
ty::RePlaceholder(placeholder) => placeholder.bound.kind.get_name(),
_ => None,
}
} else {
None
pub fn get_name(self, tcx: TyCtxt<'tcx>) -> Option<Symbol> {
match self.kind() {
ty::ReEarlyParam(ebr) => ebr.is_named().then_some(ebr.name),
ty::ReBound(_, br) => br.kind.get_name(tcx),
ty::ReLateParam(fr) => fr.kind.get_name(tcx),
ty::ReStatic => Some(kw::StaticLifetime),
ty::RePlaceholder(placeholder) => placeholder.bound.kind.get_name(tcx),
_ => None,
}
}
pub fn get_name_or_anon(self) -> Symbol {
match self.get_name() {
pub fn get_name_or_anon(self, tcx: TyCtxt<'tcx>) -> Symbol {
match self.get_name(tcx) {
Some(name) => name,
None => sym::anon,
}
}
/// Is this region named by the user?
pub fn has_name(self) -> bool {
pub fn is_named(self, tcx: TyCtxt<'tcx>) -> bool {
match self.kind() {
ty::ReEarlyParam(ebr) => ebr.has_name(),
ty::ReBound(_, br) => br.kind.is_named(),
ty::ReLateParam(fr) => fr.kind.is_named(),
ty::ReEarlyParam(ebr) => ebr.is_named(),
ty::ReBound(_, br) => br.kind.is_named(tcx),
ty::ReLateParam(fr) => fr.kind.is_named(tcx),
ty::ReStatic => true,
ty::ReVar(..) => false,
ty::RePlaceholder(placeholder) => placeholder.bound.kind.is_named(),
ty::RePlaceholder(placeholder) => placeholder.bound.kind.is_named(tcx),
ty::ReErased => false,
ty::ReError(_) => false,
}
@ -313,7 +309,7 @@ impl<'tcx> Region<'tcx> {
Some(tcx.generics_of(binding_item).region_param(ebr, tcx).def_id)
}
ty::ReLateParam(ty::LateParamRegion {
kind: ty::LateParamRegionKind::Named(def_id, _),
kind: ty::LateParamRegionKind::Named(def_id),
..
}) => Some(def_id),
_ => None,
@ -371,11 +367,13 @@ pub enum LateParamRegionKind {
/// sake of diagnostics in `FnCtxt::sig_of_closure_with_expectation`.
Anon(u32),
/// Named region parameters for functions (a in &'a T)
/// An anonymous region parameter with a `Symbol` name.
///
/// The `DefId` is needed to distinguish free regions in
/// the event of shadowing.
Named(DefId, Symbol),
/// Used to give late-bound regions names for things like pretty printing.
NamedAnon(u32, Symbol),
/// Late-bound regions that appear in the AST.
Named(DefId),
/// Anonymous region for the implicit env pointer parameter
/// to a closure
@ -386,32 +384,30 @@ impl LateParamRegionKind {
pub fn from_bound(var: BoundVar, br: BoundRegionKind) -> LateParamRegionKind {
match br {
BoundRegionKind::Anon => LateParamRegionKind::Anon(var.as_u32()),
BoundRegionKind::Named(def_id, name) => LateParamRegionKind::Named(def_id, name),
BoundRegionKind::Named(def_id) => LateParamRegionKind::Named(def_id),
BoundRegionKind::ClosureEnv => LateParamRegionKind::ClosureEnv,
BoundRegionKind::NamedAnon(name) => LateParamRegionKind::NamedAnon(var.as_u32(), name),
}
}
pub fn is_named(&self) -> bool {
pub fn is_named(&self, tcx: TyCtxt<'_>) -> bool {
self.get_name(tcx).is_some()
}
pub fn get_name(&self, tcx: TyCtxt<'_>) -> Option<Symbol> {
match *self {
LateParamRegionKind::Named(_, name) => name != kw::UnderscoreLifetime,
_ => false,
}
}
pub fn get_name(&self) -> Option<Symbol> {
if self.is_named() {
match *self {
LateParamRegionKind::Named(_, name) => return Some(name),
_ => unreachable!(),
LateParamRegionKind::Named(def_id) => {
let name = tcx.item_name(def_id);
if name != kw::UnderscoreLifetime { Some(name) } else { None }
}
LateParamRegionKind::NamedAnon(_, name) => Some(name),
_ => None,
}
None
}
pub fn get_id(&self) -> Option<DefId> {
match *self {
LateParamRegionKind::Named(id, _) => Some(id),
LateParamRegionKind::Named(id) => Some(id),
_ => None,
}
}
@ -423,11 +419,13 @@ pub enum BoundRegionKind {
/// An anonymous region parameter for a given fn (&T)
Anon,
/// Named region parameters for functions (a in &'a T)
/// An anonymous region parameter with a `Symbol` name.
///
/// The `DefId` is needed to distinguish free regions in
/// the event of shadowing.
Named(DefId, Symbol),
/// Used to give late-bound regions names for things like pretty printing.
NamedAnon(Symbol),
/// Late-bound regions that appear in the AST.
Named(DefId),
/// Anonymous region for the implicit env pointer parameter
/// to a closure
@ -456,35 +454,35 @@ impl core::fmt::Debug for BoundRegion {
match self.kind {
BoundRegionKind::Anon => write!(f, "{:?}", self.var),
BoundRegionKind::ClosureEnv => write!(f, "{:?}.Env", self.var),
BoundRegionKind::Named(def, symbol) => {
write!(f, "{:?}.Named({:?}, {:?})", self.var, def, symbol)
BoundRegionKind::Named(def) => {
write!(f, "{:?}.Named({:?})", self.var, def)
}
BoundRegionKind::NamedAnon(symbol) => {
write!(f, "{:?}.NamedAnon({:?})", self.var, symbol)
}
}
}
}
impl BoundRegionKind {
pub fn is_named(&self) -> bool {
match *self {
BoundRegionKind::Named(_, name) => name != kw::UnderscoreLifetime,
_ => false,
}
pub fn is_named(&self, tcx: TyCtxt<'_>) -> bool {
self.get_name(tcx).is_some()
}
pub fn get_name(&self) -> Option<Symbol> {
if self.is_named() {
match *self {
BoundRegionKind::Named(_, name) => return Some(name),
_ => unreachable!(),
pub fn get_name(&self, tcx: TyCtxt<'_>) -> Option<Symbol> {
match *self {
BoundRegionKind::Named(def_id) => {
let name = tcx.item_name(def_id);
if name != kw::UnderscoreLifetime { Some(name) } else { None }
}
BoundRegionKind::NamedAnon(name) => Some(name),
_ => None,
}
None
}
pub fn get_id(&self) -> Option<DefId> {
match *self {
BoundRegionKind::Named(id, _) => Some(id),
BoundRegionKind::Named(id) => Some(id),
_ => None,
}
}

View file

@ -69,12 +69,11 @@ impl fmt::Debug for ty::BoundRegionKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ty::BoundRegionKind::Anon => write!(f, "BrAnon"),
ty::BoundRegionKind::Named(did, name) => {
if did.is_crate_root() {
write!(f, "BrNamed({name})")
} else {
write!(f, "BrNamed({did:?}, {name})")
}
ty::BoundRegionKind::NamedAnon(name) => {
write!(f, "BrNamedAnon({name})")
}
ty::BoundRegionKind::Named(did) => {
write!(f, "BrNamed({did:?})")
}
ty::BoundRegionKind::ClosureEnv => write!(f, "BrEnv"),
}
@ -91,12 +90,11 @@ impl fmt::Debug for ty::LateParamRegionKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ty::LateParamRegionKind::Anon(idx) => write!(f, "LateAnon({idx})"),
ty::LateParamRegionKind::Named(did, name) => {
if did.is_crate_root() {
write!(f, "LateNamed({name})")
} else {
write!(f, "LateNamed({did:?}, {name})")
}
ty::LateParamRegionKind::NamedAnon(idx, name) => {
write!(f, "LateNamedAnon({idx:?}, {name})")
}
ty::LateParamRegionKind::Named(did) => {
write!(f, "LateNamed({did:?})")
}
ty::LateParamRegionKind::ClosureEnv => write!(f, "LateEnv"),
}
@ -185,7 +183,7 @@ impl fmt::Debug for ty::BoundTy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
ty::BoundTyKind::Anon => write!(f, "{:?}", self.var),
ty::BoundTyKind::Param(_, sym) => write!(f, "{sym:?}"),
ty::BoundTyKind::Param(def_id) => write!(f, "{def_id:?}"),
}
}
}
@ -274,7 +272,6 @@ TrivialTypeTraversalImpls! {
crate::ty::BoundVar,
crate::ty::InferConst,
crate::ty::Placeholder<crate::ty::BoundRegion>,
crate::ty::Placeholder<crate::ty::BoundTy>,
crate::ty::Placeholder<ty::BoundVar>,
crate::ty::UserTypeAnnotationIndex,
crate::ty::ValTree<'tcx>,
@ -305,6 +302,7 @@ TrivialTypeTraversalAndLiftImpls! {
// tidy-alphabetical-start
crate::ty::ParamConst,
crate::ty::ParamTy,
crate::ty::Placeholder<crate::ty::BoundTy>,
crate::ty::instance::ReifyReason,
rustc_hir::def_id::DefId,
// tidy-alphabetical-end

View file

@ -400,7 +400,7 @@ impl<'tcx> rustc_type_ir::inherent::BoundVarLike<TyCtxt<'tcx>> for BoundTy {
#[derive(HashStable)]
pub enum BoundTyKind {
Anon,
Param(DefId, Symbol),
Param(DefId),
}
impl From<BoundVar> for BoundTy {
@ -2032,7 +2032,7 @@ mod size_asserts {
use super::*;
// tidy-alphabetical-start
static_assert_size!(ty::RegionKind<'_>, 24);
static_assert_size!(ty::RegionKind<'_>, 20);
static_assert_size!(ty::TyKind<'_>, 24);
// tidy-alphabetical-end
}

View file

@ -171,9 +171,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
this.diverge_from(block);
block = success;
// The `Box<T>` temporary created here is not a part of the HIR,
// and therefore is not considered during coroutine auto-trait
// determination. See the comment about `box` at `yield_in_scope`.
let result = this.local_decls.push(LocalDecl::new(expr.ty, expr_span));
this.cfg
.push(block, Statement::new(source_info, StatementKind::StorageLive(result)));

View file

@ -322,8 +322,8 @@ fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) {
// visited before `local`, and we just have to copy the representing local.
let head = copies[rhs];
// Do not unify two borrowed locals.
if borrowed_classes.contains(local) && borrowed_classes.contains(head) {
// Do not unify borrowed locals.
if borrowed_classes.contains(local) || borrowed_classes.contains(head) {
continue;
}

View file

@ -1,5 +1,3 @@
use std::cmp::Ordering;
use rustc_type_ir::data_structures::{HashMap, ensure_sufficient_stack};
use rustc_type_ir::inherent::*;
use rustc_type_ir::solve::{Goal, QueryInput};
@ -266,11 +264,15 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
// See the rustc-dev-guide section about how we deal with universes
// during canonicalization in the new solver.
match self.canonicalize_mode {
// We try to deduplicate as many query calls as possible and hide
// all information which should not matter for the solver.
//
// For this we compress universes as much as possible.
CanonicalizeMode::Input { .. } => {}
// All placeholders and vars are canonicalized in the root universe.
CanonicalizeMode::Input { .. } => {
debug_assert!(
var_kinds.iter().all(|var| var.universe() == ty::UniverseIndex::ROOT),
"expected all vars to be canonicalized in root universe: {var_kinds:#?}"
);
let var_kinds = self.delegate.cx().mk_canonical_var_kinds(&var_kinds);
(ty::UniverseIndex::ROOT, var_kinds)
}
// When canonicalizing a response we map a universes already entered
// by the caller to the root universe and only return useful universe
// information for placeholders and inference variables created inside
@ -288,113 +290,10 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
.map(|kind| kind.universe())
.max()
.unwrap_or(ty::UniverseIndex::ROOT);
let var_kinds = self.delegate.cx().mk_canonical_var_kinds(&var_kinds);
return (max_universe, var_kinds);
(max_universe, var_kinds)
}
}
// Given a `var_kinds` with existentials `En` and universals `Un` in
// universes `n`, this algorithm compresses them in place so that:
//
// - the new universe indices are as small as possible
// - we create a new universe if we would otherwise
// 1. put existentials from a different universe into the same one
// 2. put a placeholder in the same universe as an existential which cannot name it
//
// Let's walk through an example:
// - var_kinds: [E0, U1, E5, U2, E2, E6, U6], curr_compressed_uv: 0, next_orig_uv: 0
// - var_kinds: [E0, U1, E5, U2, E2, E6, U6], curr_compressed_uv: 0, next_orig_uv: 1
// - var_kinds: [E0, U1, E5, U2, E2, E6, U6], curr_compressed_uv: 1, next_orig_uv: 2
// - var_kinds: [E0, U1, E5, U1, E1, E6, U6], curr_compressed_uv: 1, next_orig_uv: 5
// - var_kinds: [E0, U1, E2, U1, E1, E6, U6], curr_compressed_uv: 2, next_orig_uv: 6
// - var_kinds: [E0, U1, E1, U1, E1, E3, U3], curr_compressed_uv: 2, next_orig_uv: -
//
// This algorithm runs in `O(mn)` where `n` is the number of different universes and
// `m` the number of variables. This should be fine as both are expected to be small.
let mut curr_compressed_uv = ty::UniverseIndex::ROOT;
let mut existential_in_new_uv = None;
let mut next_orig_uv = Some(ty::UniverseIndex::ROOT);
while let Some(orig_uv) = next_orig_uv.take() {
let mut update_uv = |var: &mut CanonicalVarKind<I>, orig_uv, is_existential| {
let uv = var.universe();
match uv.cmp(&orig_uv) {
Ordering::Less => (), // Already updated
Ordering::Equal => {
if is_existential {
if existential_in_new_uv.is_some_and(|uv| uv < orig_uv) {
// Condition 1.
//
// We already put an existential from a outer universe
// into the current compressed universe, so we need to
// create a new one.
curr_compressed_uv = curr_compressed_uv.next_universe();
}
// `curr_compressed_uv` will now contain an existential from
// `orig_uv`. Trying to canonicalizing an existential from
// a higher universe has to therefore use a new compressed
// universe.
existential_in_new_uv = Some(orig_uv);
} else if existential_in_new_uv.is_some() {
// Condition 2.
//
// `var` is a placeholder from a universe which is not nameable
// by an existential which we already put into the compressed
// universe `curr_compressed_uv`. We therefore have to create a
// new universe for `var`.
curr_compressed_uv = curr_compressed_uv.next_universe();
existential_in_new_uv = None;
}
*var = var.with_updated_universe(curr_compressed_uv);
}
Ordering::Greater => {
// We can ignore this variable in this iteration. We only look at
// universes which actually occur in the input for performance.
//
// For this we set `next_orig_uv` to the next smallest, not yet compressed,
// universe of the input.
if next_orig_uv.is_none_or(|curr_next_uv| uv.cannot_name(curr_next_uv)) {
next_orig_uv = Some(uv);
}
}
}
};
// For each universe which occurs in the input, we first iterate over all
// placeholders and then over all inference variables.
//
// Whenever we compress the universe of a placeholder, no existential with
// an already compressed universe can name that placeholder.
for is_existential in [false, true] {
for var in var_kinds.iter_mut() {
// We simply put all regions from the input into the highest
// compressed universe, so we only deal with them at the end.
if !var.is_region() {
if is_existential == var.is_existential() {
update_uv(var, orig_uv, is_existential)
}
}
}
}
}
// We put all regions into a separate universe.
let mut first_region = true;
for var in var_kinds.iter_mut() {
if var.is_region() {
if first_region {
first_region = false;
curr_compressed_uv = curr_compressed_uv.next_universe();
}
debug_assert!(var.is_existential());
*var = var.with_updated_universe(curr_compressed_uv);
}
}
let var_kinds = self.delegate.cx().mk_canonical_var_kinds(&var_kinds);
(curr_compressed_uv, var_kinds)
}
fn cached_fold_ty(&mut self, t: I::Ty) -> I::Ty {
@ -407,11 +306,18 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
"ty vid should have been resolved fully before canonicalization"
);
CanonicalVarKind::Ty(CanonicalTyVarKind::General(
self.delegate
.universe_of_ty(vid)
.unwrap_or_else(|| panic!("ty var should have been resolved: {t:?}")),
))
match self.canonicalize_mode {
CanonicalizeMode::Input { .. } => CanonicalVarKind::Ty(
CanonicalTyVarKind::General(ty::UniverseIndex::ROOT),
),
CanonicalizeMode::Response { .. } => {
CanonicalVarKind::Ty(CanonicalTyVarKind::General(
self.delegate.universe_of_ty(vid).unwrap_or_else(|| {
panic!("ty var should have been resolved: {t:?}")
}),
))
}
}
}
ty::IntVar(vid) => {
debug_assert_eq!(
@ -435,7 +341,7 @@ impl<'a, D: SolverDelegate<Interner = I>, I: Interner> Canonicalizer<'a, D, I> {
},
ty::Placeholder(placeholder) => match self.canonicalize_mode {
CanonicalizeMode::Input { .. } => CanonicalVarKind::PlaceholderTy(
PlaceholderLike::new_anon(placeholder.universe(), self.variables.len().into()),
PlaceholderLike::new_anon(ty::UniverseIndex::ROOT, self.variables.len().into()),
),
CanonicalizeMode::Response { .. } => CanonicalVarKind::PlaceholderTy(placeholder),
},
@ -588,13 +494,21 @@ impl<D: SolverDelegate<Interner = I>, I: Interner> TypeFolder<I> for Canonicaliz
c,
"const vid should have been resolved fully before canonicalization"
);
CanonicalVarKind::Const(self.delegate.universe_of_ct(vid).unwrap())
match self.canonicalize_mode {
CanonicalizeMode::Input { .. } => {
CanonicalVarKind::Const(ty::UniverseIndex::ROOT)
}
CanonicalizeMode::Response { .. } => {
CanonicalVarKind::Const(self.delegate.universe_of_ct(vid).unwrap())
}
}
}
ty::InferConst::Fresh(_) => todo!(),
},
ty::ConstKind::Placeholder(placeholder) => match self.canonicalize_mode {
CanonicalizeMode::Input { .. } => CanonicalVarKind::PlaceholderConst(
PlaceholderLike::new_anon(placeholder.universe(), self.variables.len().into()),
PlaceholderLike::new_anon(ty::UniverseIndex::ROOT, self.variables.len().into()),
),
CanonicalizeMode::Response { .. } => {
CanonicalVarKind::PlaceholderConst(placeholder)

View file

@ -304,6 +304,7 @@ fn emit_malformed_attribute(
| sym::naked
| sym::no_mangle
| sym::non_exhaustive
| sym::ignore
| sym::must_use
| sym::track_caller
| sym::link_name
@ -319,8 +320,7 @@ fn emit_malformed_attribute(
// Some of previously accepted forms were used in practice,
// report them as warnings for now.
let should_warn =
|name| matches!(name, sym::doc | sym::ignore | sym::link | sym::test | sym::bench);
let should_warn = |name| matches!(name, sym::doc | sym::link | sym::test | sym::bench);
let error_msg = format!("malformed `{name}` attribute input");
let mut suggestions = vec![];

View file

@ -160,7 +160,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
Attribute::Parsed(AttributeKind::DocComment { .. }) => { /* `#[doc]` is actually a lot more than just doc comments, so is checked below*/
}
Attribute::Parsed(AttributeKind::Repr(_)) => { /* handled below this loop and elsewhere */
Attribute::Parsed(AttributeKind::Repr { .. }) => { /* handled below this loop and elsewhere */
}
Attribute::Parsed(AttributeKind::RustcObjectLifetimeDefault) => {
self.check_object_lifetime_default(hir_id);
@ -215,6 +215,9 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
Attribute::Parsed(AttributeKind::MayDangle(attr_span)) => {
self.check_may_dangle(hir_id, *attr_span)
}
Attribute::Parsed(AttributeKind::Ignore { span, .. }) => {
self.check_generic_attr(hir_id, sym::ignore, *span, target, Target::Fn)
}
Attribute::Parsed(AttributeKind::MustUse { span, .. }) => {
self.check_must_use(hir_id, *span, target)
}
@ -303,7 +306,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
[sym::path, ..] => self.check_generic_attr_unparsed(hir_id, attr, target, Target::Mod),
[sym::macro_export, ..] => self.check_macro_export(hir_id, attr, target),
[sym::ignore, ..] | [sym::should_panic, ..] => {
[sym::should_panic, ..] => {
self.check_generic_attr_unparsed(hir_id, attr, target, Target::Fn)
}
[sym::automatically_derived, ..] => {
@ -1945,7 +1948,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
// #[repr(foo)]
// #[repr(bar, align(8))]
// ```
let reprs = find_attr!(attrs, AttributeKind::Repr(r) => r.as_slice()).unwrap_or(&[]);
let (reprs, first_attr_span) = find_attr!(attrs, AttributeKind::Repr { reprs, first_span } => (reprs.as_slice(), Some(*first_span))).unwrap_or((&[], None));
let mut int_reprs = 0;
let mut is_explicit_rust = false;
@ -2042,33 +2045,32 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
continue;
}
}
// FIXME(jdonszelmann): move the diagnostic for unused repr attrs here, I think
// it's a better place for it.
ReprAttr::ReprEmpty => {
// catch `repr()` with no arguments, applied to an item (i.e. not `#![repr()]`)
if item.is_some() {
match target {
Target::Struct | Target::Union | Target::Enum => continue,
Target::Fn | Target::Method(_) => {
self.dcx().emit_err(errors::ReprAlignShouldBeAlign {
span: *repr_span,
item: target.name(),
});
}
_ => {
self.dcx().emit_err(errors::AttrApplication::StructEnumUnion {
hint_span: *repr_span,
span,
});
}
}
}
return;
}
};
}
// catch `repr()` with no arguments, applied to an item (i.e. not `#![repr()]`)
if let Some(first_attr_span) = first_attr_span
&& reprs.is_empty()
&& item.is_some()
{
match target {
Target::Struct | Target::Union | Target::Enum => {}
Target::Fn | Target::Method(_) => {
self.dcx().emit_err(errors::ReprAlignShouldBeAlign {
span: first_attr_span,
item: target.name(),
});
}
_ => {
self.dcx().emit_err(errors::AttrApplication::StructEnumUnion {
hint_span: first_attr_span,
span,
});
}
}
return;
}
// Just point at all repr hints if there are any incompatibilities.
// This is not ideal, but tracking precisely which ones are at fault is a huge hassle.
let hint_spans = reprs.iter().map(|(_, span)| *span);
@ -2321,43 +2323,8 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
fn check_unused_attribute(&self, hir_id: HirId, attr: &Attribute, style: Option<AttrStyle>) {
// FIXME(jdonszelmann): deduplicate these checks after more attrs are parsed. This is very
// ugly now but can 100% be removed later.
if let Attribute::Parsed(p) = attr {
match p {
AttributeKind::Repr(reprs) => {
for (r, span) in reprs {
if let ReprAttr::ReprEmpty = r {
self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
*span,
errors::Unused {
attr_span: *span,
note: errors::UnusedNote::EmptyList { name: sym::repr },
},
);
}
}
return;
}
AttributeKind::TargetFeature(features, span) if features.len() == 0 => {
self.tcx.emit_node_span_lint(
UNUSED_ATTRIBUTES,
hir_id,
*span,
errors::Unused {
attr_span: *span,
note: errors::UnusedNote::EmptyList { name: sym::target_feature },
},
);
return;
}
_ => {}
};
}
// Warn on useless empty attributes.
// FIXME(jdonszelmann): this lint should be moved to attribute parsing, see `AcceptContext::warn_empty_attribute`
let note = if attr.has_any_name(&[
sym::macro_use,
sym::allow,
@ -2573,7 +2540,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
fn check_rustc_pub_transparent(&self, attr_span: Span, span: Span, attrs: &[Attribute]) {
if !find_attr!(attrs, AttributeKind::Repr(r) => r.iter().any(|(r, _)| r == &ReprAttr::ReprTransparent))
if !find_attr!(attrs, AttributeKind::Repr { reprs, .. } => reprs.iter().any(|(r, _)| r == &ReprAttr::ReprTransparent))
.unwrap_or(false)
{
self.dcx().emit_err(errors::RustcPubTransparent { span, attr_span });
@ -2849,8 +2816,12 @@ fn check_invalid_crate_level_attr(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
ATTRS_TO_CHECK.iter().find(|attr_to_check| attr.has_name(**attr_to_check))
{
(attr.span(), *a)
} else if let Attribute::Parsed(AttributeKind::Repr(r)) = attr {
(r.first().unwrap().1, sym::repr)
} else if let Attribute::Parsed(AttributeKind::Repr {
reprs: _,
first_span: first_attr_span,
}) = attr
{
(*first_attr_span, sym::repr)
} else {
continue;
};

View file

@ -44,7 +44,7 @@ impl RWUTable {
const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS;
pub(super) fn new(live_nodes: usize, vars: usize) -> RWUTable {
let live_node_words = (vars + Self::WORD_RWU_COUNT - 1) / Self::WORD_RWU_COUNT;
let live_node_words = vars.div_ceil(Self::WORD_RWU_COUNT);
Self { live_nodes, vars, live_node_words, words: vec![0u8; live_node_words * live_nodes] }
}

View file

@ -597,7 +597,7 @@ where
// from disk. Re-hashing results is fairly expensive, so we can't
// currently afford to verify every hash. This subset should still
// give us some coverage of potential bugs though.
let try_verify = prev_fingerprint.split().1.as_u64() % 32 == 0;
let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
if std::intrinsics::unlikely(
try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
) {

View file

@ -1202,12 +1202,8 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> {
fn insert_unused_macro(&mut self, ident: Ident, def_id: LocalDefId, node_id: NodeId) {
if !ident.as_str().starts_with('_') {
self.r.unused_macros.insert(def_id, (node_id, ident));
for (rule_i, rule_span) in &self.r.macro_map[&def_id.to_def_id()].rule_spans {
self.r
.unused_macro_rules
.entry(node_id)
.or_default()
.insert(*rule_i, (ident, *rule_span));
for rule_i in 0..self.r.macro_map[&def_id.to_def_id()].nrules {
self.r.unused_macro_rules.entry(node_id).or_default().insert(rule_i);
}
}
}

View file

@ -1014,13 +1014,13 @@ struct DeriveData {
struct MacroData {
ext: Arc<SyntaxExtension>,
rule_spans: Vec<(usize, Span)>,
nrules: usize,
macro_rules: bool,
}
impl MacroData {
fn new(ext: Arc<SyntaxExtension>) -> MacroData {
MacroData { ext, rule_spans: Vec::new(), macro_rules: false }
MacroData { ext, nrules: 0, macro_rules: false }
}
}
@ -1135,7 +1135,7 @@ pub struct Resolver<'ra, 'tcx> {
ast_transform_scopes: FxHashMap<LocalExpnId, Module<'ra>>,
unused_macros: FxIndexMap<LocalDefId, (NodeId, Ident)>,
/// A map from the macro to all its potentially unused arms.
unused_macro_rules: FxIndexMap<NodeId, UnordMap<usize, (Ident, Span)>>,
unused_macro_rules: FxIndexMap<NodeId, UnordSet<usize>>,
proc_macro_stubs: FxHashSet<LocalDefId>,
/// Traces collected during macro resolution and validated when it's complete.
single_segment_macro_resolutions:

View file

@ -351,13 +351,23 @@ impl<'ra, 'tcx> ResolverExpand for Resolver<'ra, 'tcx> {
}
for (&node_id, unused_arms) in self.unused_macro_rules.iter() {
for (&arm_i, &(ident, rule_span)) in unused_arms.to_sorted_stable_ord() {
self.lint_buffer.buffer_lint(
UNUSED_MACRO_RULES,
node_id,
rule_span,
BuiltinLintDiag::MacroRuleNeverUsed(arm_i, ident.name),
);
if unused_arms.is_empty() {
continue;
}
let def_id = self.local_def_id(node_id).to_def_id();
let m = &self.macro_map[&def_id];
let SyntaxExtensionKind::LegacyBang(ref ext) = m.ext.kind else {
continue;
};
for &arm_i in unused_arms.to_sorted_stable_ord() {
if let Some((ident, rule_span)) = ext.get_unused_rule(arm_i) {
self.lint_buffer.buffer_lint(
UNUSED_MACRO_RULES,
node_id,
rule_span,
BuiltinLintDiag::MacroRuleNeverUsed(arm_i, ident.name),
);
}
}
}
}
@ -1146,7 +1156,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
node_id: NodeId,
edition: Edition,
) -> MacroData {
let (mut ext, mut rule_spans) = compile_declarative_macro(
let (mut ext, mut nrules) = compile_declarative_macro(
self.tcx.sess,
self.tcx.features(),
macro_def,
@ -1163,13 +1173,13 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
// The macro is a built-in, replace its expander function
// while still taking everything else from the source code.
ext.kind = builtin_ext_kind.clone();
rule_spans = Vec::new();
nrules = 0;
} else {
self.dcx().emit_err(errors::CannotFindBuiltinMacroWithName { span, ident });
}
}
MacroData { ext: Arc::new(ext), rule_spans, macro_rules: macro_def.macro_rules }
MacroData { ext: Arc::new(ext), nrules, macro_rules: macro_def.macro_rules }
}
fn path_accessible(

View file

@ -7,7 +7,7 @@ use crate::serialize::Decoder;
/// Returns the length of the longest LEB128 encoding for `T`, assuming `T` is an integer type
pub const fn max_leb128_len<T>() -> usize {
// The longest LEB128 encoding for an integer uses 7 bits per byte.
(size_of::<T>() * 8 + 6) / 7
(size_of::<T>() * 8).div_ceil(7)
}
/// Returns the length of the longest LEB128 encoding of all supported integer types.

View file

@ -7,7 +7,6 @@
use rustc_middle::ty::{self as rustc_ty, Const as InternalConst, Ty as InternalTy};
use rustc_smir::Tables;
use rustc_span::Symbol;
use stable_mir::abi::Layout;
use stable_mir::compiler_interface::BridgeTys;
use stable_mir::mir::alloc::AllocId;
@ -446,17 +445,15 @@ impl RustcInternal for BoundVariableKind {
match self {
BoundVariableKind::Ty(kind) => rustc_ty::BoundVariableKind::Ty(match kind {
BoundTyKind::Anon => rustc_ty::BoundTyKind::Anon,
BoundTyKind::Param(def, symbol) => rustc_ty::BoundTyKind::Param(
def.0.internal(tables, tcx),
Symbol::intern(symbol),
),
BoundTyKind::Param(def, _symbol) => {
rustc_ty::BoundTyKind::Param(def.0.internal(tables, tcx))
}
}),
BoundVariableKind::Region(kind) => rustc_ty::BoundVariableKind::Region(match kind {
BoundRegionKind::BrAnon => rustc_ty::BoundRegionKind::Anon,
BoundRegionKind::BrNamed(def, symbol) => rustc_ty::BoundRegionKind::Named(
def.0.internal(tables, tcx),
Symbol::intern(symbol),
),
BoundRegionKind::BrNamed(def, _symbol) => {
rustc_ty::BoundRegionKind::Named(def.0.internal(tables, tcx))
}
BoundRegionKind::BrEnv => rustc_ty::BoundRegionKind::ClosureEnv,
}),
BoundVariableKind::Const => rustc_ty::BoundVariableKind::Const,

View file

@ -1,7 +1,7 @@
//! Conversion of internal Rust compiler `ty` items to stable ones.
use rustc_middle::ty::Ty;
use rustc_middle::{mir, ty};
use rustc_middle::{bug, mir, ty};
use rustc_smir::Tables;
use rustc_smir::context::SmirCtxt;
use stable_mir::alloc;
@ -291,14 +291,14 @@ impl<'tcx> Stable<'tcx> for ty::BoundTyKind {
fn stable<'cx>(
&self,
tables: &mut Tables<'cx, BridgeTys>,
_: &SmirCtxt<'cx, BridgeTys>,
cx: &SmirCtxt<'cx, BridgeTys>,
) -> Self::T {
use stable_mir::ty::BoundTyKind;
match self {
ty::BoundTyKind::Anon => BoundTyKind::Anon,
ty::BoundTyKind::Param(def_id, symbol) => {
BoundTyKind::Param(tables.param_def(*def_id), symbol.to_string())
ty::BoundTyKind::Param(def_id) => {
BoundTyKind::Param(tables.param_def(*def_id), cx.tcx.item_name(*def_id).to_string())
}
}
}
@ -310,16 +310,18 @@ impl<'tcx> Stable<'tcx> for ty::BoundRegionKind {
fn stable<'cx>(
&self,
tables: &mut Tables<'cx, BridgeTys>,
_: &SmirCtxt<'cx, BridgeTys>,
cx: &SmirCtxt<'cx, BridgeTys>,
) -> Self::T {
use stable_mir::ty::BoundRegionKind;
match self {
ty::BoundRegionKind::Anon => BoundRegionKind::BrAnon,
ty::BoundRegionKind::Named(def_id, symbol) => {
BoundRegionKind::BrNamed(tables.br_named_def(*def_id), symbol.to_string())
}
ty::BoundRegionKind::Named(def_id) => BoundRegionKind::BrNamed(
tables.br_named_def(*def_id),
cx.tcx.item_name(*def_id).to_string(),
),
ty::BoundRegionKind::ClosureEnv => BoundRegionKind::BrEnv,
ty::BoundRegionKind::NamedAnon(_) => bug!("only used for pretty printing"),
}
}
}

View file

@ -130,7 +130,7 @@ pub fn edit_distance_with_substrings(a: &str, b: &str, limit: usize) -> Option<u
1 // Exact substring match, but not a total word match so return non-zero
} else if !big_len_diff {
// Not a big difference in length, discount cost of length difference
score + (len_diff + 1) / 2
score + len_diff.div_ceil(2)
} else {
// A big difference in length, add back the difference in length to the score
score + len_diff

View file

@ -90,7 +90,7 @@ where
_ => {}
}
if (offset.bytes() % 4) != 0
if !offset.bytes().is_multiple_of(4)
&& matches!(scalar2.primitive(), Primitive::Float(Float::F32 | Float::F64))
{
offset += Size::from_bytes(4 - (offset.bytes() % 4));
@ -181,7 +181,7 @@ where
// Structure { float, int, int } doesn't like to be handled like
// { float, long int }. Other way around it doesn't mind.
if data.last_offset < arg.layout.size
&& (data.last_offset.bytes() % 8) != 0
&& !data.last_offset.bytes().is_multiple_of(8)
&& data.prefix_index < data.prefix.len()
{
data.prefix[data.prefix_index] = Some(Reg::i32());
@ -190,7 +190,7 @@ where
}
let mut rest_size = arg.layout.size - data.last_offset;
if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() {
if !rest_size.bytes().is_multiple_of(8) && data.prefix_index < data.prefix.len() {
data.prefix[data.prefix_index] = Some(Reg::i32());
rest_size = rest_size - Reg::i32().size;
}

View file

@ -171,7 +171,7 @@ pub(crate) fn fill_inregs<'a, Ty, C>(
continue;
}
let size_in_regs = (arg.layout.size.bits() + 31) / 32;
let size_in_regs = arg.layout.size.bits().div_ceil(32);
if size_in_regs == 0 {
continue;

View file

@ -95,7 +95,7 @@ where
Ok(())
}
let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
let n = arg.layout.size.bytes().div_ceil(8) as usize;
if n > MAX_EIGHTBYTES {
return Err(Memory);
}

View file

@ -54,7 +54,7 @@ where
// Determine the number of GPRs needed to pass the current argument
// according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
// register pairs, so may consume 3 registers.
let mut needed_arg_gprs = (size + 32 - 1) / 32;
let mut needed_arg_gprs = size.div_ceil(32);
if needed_align == 64 {
needed_arg_gprs += *arg_gprs_left % 2;
}

View file

@ -2,8 +2,6 @@
//! where one region is named and the other is anonymous.
use rustc_errors::Diag;
use rustc_middle::ty;
use rustc_span::kw;
use tracing::debug;
use crate::error_reporting::infer::nice_region_error::NiceRegionError;
@ -27,12 +25,12 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
// only introduced anonymous regions in parameters) as well as a
// version new_ty of its type where the anonymous region is replaced
// with the named one.
let (named, anon, anon_param_info, region_info) = if sub.has_name()
let (named, anon, anon_param_info, region_info) = if sub.is_named(self.tcx())
&& let Some(region_info) = self.tcx().is_suitable_region(self.generic_param_scope, sup)
&& let Some(anon_param_info) = self.find_param_with_region(sup, sub)
{
(sub, sup, anon_param_info, region_info)
} else if sup.has_name()
} else if sup.is_named(self.tcx())
&& let Some(region_info) = self.tcx().is_suitable_region(self.generic_param_scope, sub)
&& let Some(anon_param_info) = self.find_param_with_region(sub, sup)
{
@ -58,14 +56,10 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let scope_def_id = region_info.scope;
let is_impl_item = region_info.is_impl_item;
match anon_param_info.kind {
ty::LateParamRegionKind::Named(_, kw::UnderscoreLifetime)
| ty::LateParamRegionKind::Anon(_) => {}
_ => {
/* not an anonymous region */
debug!("try_report_named_anon_conflict: not an anonymous region");
return None;
}
if anon_param_info.kind.is_named(self.tcx()) {
/* not an anonymous region */
debug!("try_report_named_anon_conflict: not an anonymous region");
return None;
}
if is_impl_item {

View file

@ -164,7 +164,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
sub_region @ Region(Interned(RePlaceholder(_), _)),
sup_region,
)) => self.try_report_trait_placeholder_mismatch(
(!sup_region.has_name()).then_some(*sup_region),
(!sup_region.is_named(self.tcx())).then_some(*sup_region),
cause,
Some(*sub_region),
None,
@ -176,7 +176,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
sub_region,
sup_region @ Region(Interned(RePlaceholder(_), _)),
)) => self.try_report_trait_placeholder_mismatch(
(!sub_region.has_name()).then_some(*sub_region),
(!sub_region.is_named(self.tcx())).then_some(*sub_region),
cause,
None,
Some(*sup_region),

View file

@ -1,5 +1,6 @@
use rustc_data_structures::intern::Interned;
use rustc_errors::Diag;
use rustc_middle::bug;
use rustc_middle::ty::{self, RePlaceholder, Region};
use crate::error_reporting::infer::nice_region_error::NiceRegionError;
@ -28,20 +29,22 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
)),
)) => {
let span = *span;
let (sub_span, sub_symbol) = match sub_name {
ty::BoundRegionKind::Named(def_id, symbol) => {
(Some(self.tcx().def_span(def_id)), Some(symbol))
let (sub_span, sub_symbol) = match *sub_name {
ty::BoundRegionKind::Named(def_id) => {
(Some(self.tcx().def_span(def_id)), Some(self.tcx().item_name(def_id)))
}
ty::BoundRegionKind::Anon | ty::BoundRegionKind::ClosureEnv => (None, None),
ty::BoundRegionKind::NamedAnon(_) => bug!("only used for pretty printing"),
};
let (sup_span, sup_symbol) = match sup_name {
ty::BoundRegionKind::Named(def_id, symbol) => {
(Some(self.tcx().def_span(def_id)), Some(symbol))
let (sup_span, sup_symbol) = match *sup_name {
ty::BoundRegionKind::Named(def_id) => {
(Some(self.tcx().def_span(def_id)), Some(self.tcx().item_name(def_id)))
}
ty::BoundRegionKind::Anon | ty::BoundRegionKind::ClosureEnv => (None, None),
ty::BoundRegionKind::NamedAnon(_) => bug!("only used for pretty printing"),
};
let diag = match (sub_span, sup_span, sub_symbol, sup_symbol) {
(Some(sub_span), Some(sup_span), Some(&sub_symbol), Some(&sup_symbol)) => {
(Some(sub_span), Some(sup_span), Some(sub_symbol), Some(sup_symbol)) => {
PlaceholderRelationLfNotSatisfied::HasBoth {
span,
sub_span,
@ -51,7 +54,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
note: (),
}
}
(Some(sub_span), Some(sup_span), _, Some(&sup_symbol)) => {
(Some(sub_span), Some(sup_span), _, Some(sup_symbol)) => {
PlaceholderRelationLfNotSatisfied::HasSup {
span,
sub_span,
@ -60,7 +63,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
note: (),
}
}
(Some(sub_span), Some(sup_span), Some(&sub_symbol), _) => {
(Some(sub_span), Some(sup_span), Some(sub_symbol), _) => {
PlaceholderRelationLfNotSatisfied::HasSub {
span,
sub_span,

View file

@ -45,7 +45,8 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let return_sp = sub_origin.span();
let param = self.find_param_with_region(*sup_r, *sub_r)?;
let simple_ident = param.param.pat.simple_ident();
let lifetime_name = if sup_r.has_name() { sup_r.to_string() } else { "'_".to_owned() };
let lifetime_name =
if sup_r.is_named(self.tcx()) { sup_r.to_string() } else { "'_".to_owned() };
let (mention_influencer, influencer_point) =
if sup_origin.span().overlaps(param.param_ty_span) {
@ -99,7 +100,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
// We don't need a note, it's already at the end, it can be shown as a `span_label`.
require_span_as_label: (!require_as_note).then_some(require_span),
has_lifetime: sup_r.has_name(),
has_lifetime: sup_r.is_named(self.tcx()),
lifetime: lifetime_name.clone(),
has_param_name: simple_ident.is_some(),
param_name: simple_ident.map(|x| x.to_string()).unwrap_or_default(),

View file

@ -60,14 +60,15 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
// Mark all unnamed regions in the type with a number.
// This diagnostic is called in response to lifetime errors, so be informative.
struct HighlightBuilder<'tcx> {
tcx: TyCtxt<'tcx>,
highlight: RegionHighlightMode<'tcx>,
counter: usize,
}
impl<'tcx> HighlightBuilder<'tcx> {
fn build(sig: ty::PolyFnSig<'tcx>) -> RegionHighlightMode<'tcx> {
fn build(tcx: TyCtxt<'tcx>, sig: ty::PolyFnSig<'tcx>) -> RegionHighlightMode<'tcx> {
let mut builder =
HighlightBuilder { highlight: RegionHighlightMode::default(), counter: 1 };
HighlightBuilder { tcx, highlight: RegionHighlightMode::default(), counter: 1 };
sig.visit_with(&mut builder);
builder.highlight
}
@ -75,15 +76,15 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for HighlightBuilder<'tcx> {
fn visit_region(&mut self, r: ty::Region<'tcx>) {
if !r.has_name() && self.counter <= 3 {
if !r.is_named(self.tcx) && self.counter <= 3 {
self.highlight.highlighting_region(r, self.counter);
self.counter += 1;
}
}
}
let expected_highlight = HighlightBuilder::build(expected);
let tcx = self.cx.tcx;
let expected_highlight = HighlightBuilder::build(tcx, expected);
let expected = Highlighted {
highlight: expected_highlight,
ns: Namespace::TypeNS,
@ -91,7 +92,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
value: expected,
}
.to_string();
let found_highlight = HighlightBuilder::build(found);
let found_highlight = HighlightBuilder::build(tcx, found);
let found =
Highlighted { highlight: found_highlight, ns: Namespace::TypeNS, tcx, value: found }
.to_string();

View file

@ -46,7 +46,7 @@ pub fn find_param_with_region<'tcx>(
ty::ReLateParam(late_param) => (late_param.scope, late_param.kind),
ty::ReEarlyParam(ebr) => {
let region_def = tcx.generics_of(generic_param_scope).region_param(ebr, tcx).def_id;
(tcx.parent(region_def), ty::LateParamRegionKind::Named(region_def, ebr.name))
(tcx.parent(region_def), ty::LateParamRegionKind::Named(region_def))
}
_ => return None, // not a free region
};
@ -144,7 +144,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
// We are only checking is any region meets the condition so order doesn't matter
#[allow(rustc::potential_query_instability)]
late_bound_regions.iter().any(|r| match *r {
ty::BoundRegionKind::Named(def_id, _) => def_id == region_def_id,
ty::BoundRegionKind::Named(def_id) => def_id == region_def_id,
_ => false,
})
}

View file

@ -713,14 +713,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
}
let labeled_user_string = match bound_kind {
GenericKind::Param(ref p) => format!("the parameter type `{p}`"),
GenericKind::Placeholder(ref p) => format!("the placeholder type `{p:?}`"),
GenericKind::Alias(ref p) => match p.kind(self.tcx) {
GenericKind::Param(_) => format!("the parameter type `{bound_kind}`"),
GenericKind::Placeholder(_) => format!("the placeholder type `{bound_kind}`"),
GenericKind::Alias(p) => match p.kind(self.tcx) {
ty::Projection | ty::Inherent => {
format!("the associated type `{p}`")
format!("the associated type `{bound_kind}`")
}
ty::Free => format!("the type alias `{p}`"),
ty::Opaque => format!("the opaque type `{p}`"),
ty::Free => format!("the type alias `{bound_kind}`"),
ty::Opaque => format!("the opaque type `{bound_kind}`"),
},
};
@ -729,7 +729,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
.dcx()
.struct_span_err(span, format!("{labeled_user_string} may not live long enough"));
err.code(match sub.kind() {
ty::ReEarlyParam(_) | ty::ReLateParam(_) if sub.has_name() => E0309,
ty::ReEarlyParam(_) | ty::ReLateParam(_) if sub.is_named(self.tcx) => E0309,
ty::ReStatic => E0310,
_ => E0311,
});
@ -755,7 +755,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
|| (bound_kind, sub).has_placeholders()
|| !bound_kind.is_suggestable(self.tcx, false)
{
let lt_name = sub.get_name_or_anon().to_string();
let lt_name = sub.get_name_or_anon(self.tcx).to_string();
err.help(format!("{msg} `{bound_kind}: {lt_name}`..."));
break 'suggestion;
}
@ -875,13 +875,13 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
}
}
let (lifetime_def_id, lifetime_scope) = match self
.tcx
.is_suitable_region(generic_param_scope, lifetime)
{
Some(info) if !lifetime.has_name() => (info.region_def_id.expect_local(), info.scope),
_ => return lifetime.get_name_or_anon().to_string(),
};
let (lifetime_def_id, lifetime_scope) =
match self.tcx.is_suitable_region(generic_param_scope, lifetime) {
Some(info) if !lifetime.is_named(self.tcx) => {
(info.region_def_id.expect_local(), info.scope)
}
_ => return lifetime.get_name_or_anon(self.tcx).to_string(),
};
let new_lt = {
let generics = self.tcx.generics_of(lifetime_scope);
@ -895,7 +895,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
// consider late-bound lifetimes ...
used_names.extend(self.tcx.late_bound_vars(hir_id).into_iter().filter_map(
|p| match p {
ty::BoundVariableKind::Region(lt) => lt.get_name(),
ty::BoundVariableKind::Region(lt) => lt.get_name(self.tcx),
_ => None,
},
));
@ -1006,7 +1006,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
fn report_inference_failure(&self, var_origin: RegionVariableOrigin) -> Diag<'_> {
let br_string = |br: ty::BoundRegionKind| {
let mut s = match br {
ty::BoundRegionKind::Named(_, name) => name.to_string(),
ty::BoundRegionKind::Named(def_id) => self.tcx.item_name(def_id).to_string(),
_ => String::new(),
};
if !s.is_empty() {
@ -1109,7 +1109,7 @@ fn msg_span_from_named_region<'tcx>(
ty::ReEarlyParam(br) => {
let param_def_id = tcx.generics_of(generic_param_scope).region_param(br, tcx).def_id;
let span = tcx.def_span(param_def_id);
let text = if br.has_name() {
let text = if br.is_named() {
format!("the lifetime `{}` as defined here", br.name)
} else {
"the anonymous lifetime as defined here".to_string()
@ -1117,13 +1117,14 @@ fn msg_span_from_named_region<'tcx>(
(text, Some(span))
}
ty::ReLateParam(ref fr) => {
if !fr.kind.is_named()
if !fr.kind.is_named(tcx)
&& let Some((ty, _)) = find_anon_type(tcx, generic_param_scope, region)
{
("the anonymous lifetime defined here".to_string(), Some(ty.span))
} else {
match fr.kind {
ty::LateParamRegionKind::Named(param_def_id, name) => {
ty::LateParamRegionKind::Named(param_def_id) => {
let name = tcx.item_name(param_def_id);
let span = tcx.def_span(param_def_id);
let text = if name == kw::UnderscoreLifetime {
"the anonymous lifetime as defined here".to_string()
@ -1145,9 +1146,12 @@ fn msg_span_from_named_region<'tcx>(
}
ty::ReStatic => ("the static lifetime".to_owned(), alt_span),
ty::RePlaceholder(ty::PlaceholderRegion {
bound: ty::BoundRegion { kind: ty::BoundRegionKind::Named(def_id, name), .. },
bound: ty::BoundRegion { kind: ty::BoundRegionKind::Named(def_id), .. },
..
}) => (format!("the lifetime `{name}` as defined here"), Some(tcx.def_span(def_id))),
}) => (
format!("the lifetime `{}` as defined here", tcx.item_name(def_id)),
Some(tcx.def_span(def_id)),
),
ty::RePlaceholder(ty::PlaceholderRegion {
bound: ty::BoundRegion { kind: ty::BoundRegionKind::Anon, .. },
..

View file

@ -32,21 +32,22 @@ impl<'a> DescriptionCtx<'a> {
} else {
tcx.def_span(scope)
};
if br.has_name() {
if br.is_named() {
(Some(span), "as_defined", br.name.to_string())
} else {
(Some(span), "as_defined_anon", String::new())
}
}
ty::ReLateParam(ref fr) => {
if !fr.kind.is_named()
if !fr.kind.is_named(tcx)
&& let Some((ty, _)) = find_anon_type(tcx, generic_param_scope, region)
{
(Some(ty.span), "defined_here", String::new())
} else {
let scope = fr.scope.expect_local();
match fr.kind {
ty::LateParamRegionKind::Named(_, name) => {
ty::LateParamRegionKind::Named(def_id) => {
let name = tcx.item_name(def_id);
let span = if let Some(param) = tcx
.hir_get_generics(scope)
.and_then(|generics| generics.get_named(name))

View file

@ -195,12 +195,39 @@ fn associated_types_for_impl_traits_in_associated_fn(
match tcx.def_kind(parent_def_id) {
DefKind::Trait => {
if let Some(output) = tcx.hir_get_fn_output(fn_def_id) {
let data = DefPathData::AnonAssocTy(tcx.item_name(fn_def_id.to_def_id()));
let def_path_id = |def_id: LocalDefId| tcx.item_name(def_id.to_def_id());
let def_path_data = def_path_id(fn_def_id);
let (.., trait_item_refs) = tcx.hir_expect_item(parent_def_id).expect_trait();
// The purpose of `disambiguator_idx` is to ensure there are
// no duplicate `def_id` in certain cases, such as:
// ```
// trait Foo {
// fn bar() -> impl Trait;
// fn bar() -> impl Trait;
// // ~~~~~~~~~~ It will generate the same ID if we dont disambiguate it.
// }
// ```
let disambiguator_idx = trait_item_refs
.iter()
.take_while(|item| item.id.owner_id.def_id != fn_def_id)
.fold(0, |acc, item| {
if !matches!(item.kind, hir::AssocItemKind::Fn { .. }) {
acc
} else if def_path_id(item.id.owner_id.def_id) == def_path_data {
tcx.def_key(item.id.owner_id.def_id).disambiguated_data.disambiguator
+ 1
} else {
acc
}
});
let data = DefPathData::AnonAssocTy(def_path_data);
let mut visitor = RPITVisitor {
tcx,
synthetics: vec![],
data,
disambiguator: DisambiguatorState::with(parent_def_id, data, 0),
disambiguator: DisambiguatorState::with(parent_def_id, data, disambiguator_idx),
};
visitor.visit_fn_ret_ty(output);
tcx.arena.alloc_from_iter(

View file

@ -8,7 +8,7 @@ use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, TyAndLayout};
pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
let tcx = cx.tcx();
if layout.size.bytes() % layout.align.abi.bytes() != 0 {
if !layout.size.bytes().is_multiple_of(layout.align.abi.bytes()) {
bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
}
if layout.size.bytes() >= tcx.data_layout.obj_size_bound() {

View file

@ -5,6 +5,7 @@
use crate::ffi::c_void;
#[allow(unused_imports)]
use crate::fmt;
use crate::intrinsics::{va_arg, va_copy, va_end};
use crate::marker::{PhantomData, PhantomInvariantLifetime};
use crate::ops::{Deref, DerefMut};
@ -280,20 +281,3 @@ impl<'f> Drop for VaListImpl<'f> {
// This works for now, since `va_end` is a no-op on all current LLVM targets.
}
}
/// Destroy the arglist `ap` after initialization with `va_start` or
/// `va_copy`.
#[rustc_intrinsic]
#[rustc_nounwind]
unsafe fn va_end(ap: &mut VaListImpl<'_>);
/// Copies the current location of arglist `src` to the arglist `dst`.
#[rustc_intrinsic]
#[rustc_nounwind]
unsafe fn va_copy<'f>(dest: *mut VaListImpl<'f>, src: &VaListImpl<'f>);
/// Loads an argument of type `T` from the `va_list` `ap` and increment the
/// argument `ap` points to.
#[rustc_intrinsic]
#[rustc_nounwind]
unsafe fn va_arg<T: VaArgSafe>(ap: &mut VaListImpl<'_>) -> T;

View file

@ -54,6 +54,7 @@
)]
#![allow(missing_docs)]
use crate::ffi::va_list::{VaArgSafe, VaListImpl};
use crate::marker::{ConstParamTy, DiscriminantKind, PointeeSized, Tuple};
use crate::ptr;
@ -3142,3 +3143,25 @@ pub(crate) const fn miri_promise_symbolic_alignment(ptr: *const (), align: usize
}
)
}
/// Copies the current location of arglist `src` to the arglist `dst`.
///
/// FIXME: document safety requirements
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn va_copy<'f>(dest: *mut VaListImpl<'f>, src: &VaListImpl<'f>);
/// Loads an argument of type `T` from the `va_list` `ap` and increment the
/// argument `ap` points to.
///
/// FIXME: document safety requirements
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn va_arg<T: VaArgSafe>(ap: &mut VaListImpl<'_>) -> T;
/// Destroy the arglist `ap` after initialization with `va_start` or `va_copy`.
///
/// FIXME: document safety requirements
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn va_end(ap: &mut VaListImpl<'_>);

View file

@ -627,7 +627,7 @@ macro_rules! rem_impl_float {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ops", issue = "90080")]
impl Rem for $t {
impl const Rem for $t {
type Output = $t;
#[inline]

View file

@ -1316,7 +1316,7 @@ impl<T> [T] {
assert_unsafe_precondition!(
check_language_ub,
"slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
(n: usize = N, len: usize = self.len()) => n != 0 && len % n == 0,
(n: usize = N, len: usize = self.len()) => n != 0 && len.is_multiple_of(n),
);
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe { exact_div(self.len(), N) };
@ -1512,7 +1512,7 @@ impl<T> [T] {
assert_unsafe_precondition!(
check_language_ub,
"slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
(n: usize = N, len: usize = self.len()) => n != 0 && len % n == 0
(n: usize = N, len: usize = self.len()) => n != 0 && len.is_multiple_of(n)
);
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe { exact_div(self.len(), N) };
@ -4866,7 +4866,7 @@ impl<T> [T] {
let byte_offset = elem_start.wrapping_sub(self_start);
if byte_offset % size_of::<T>() != 0 {
if !byte_offset.is_multiple_of(size_of::<T>()) {
return None;
}
@ -4920,7 +4920,7 @@ impl<T> [T] {
let byte_start = subslice_start.wrapping_sub(self_start);
if byte_start % size_of::<T>() != 0 {
if !byte_start.is_multiple_of(size_of::<T>()) {
return None;
}

View file

@ -823,7 +823,7 @@ unsafe fn bidirectional_merge<T: FreezeMarker, F: FnMut(&T, &T) -> bool>(
let right_end = right_rev.wrapping_add(1);
// Odd length, so one element is left unconsumed in the input.
if len % 2 != 0 {
if !len.is_multiple_of(2) {
let left_nonempty = left < left_end;
let last_src = if left_nonempty { left } else { right };
ptr::copy_nonoverlapping(last_src, dst, 1);

View file

@ -158,7 +158,7 @@ fn merge_tree_scale_factor(n: usize) -> u64 {
panic!("Platform not supported");
}
((1 << 62) + n as u64 - 1) / n as u64
(1u64 << 62).div_ceil(n as u64)
}
// Note: merge_tree_depth output is < 64 when left < right as f*x and f*y must
@ -182,7 +182,7 @@ fn sqrt_approx(n: usize) -> usize {
// Finally we note that the exponentiation / division can be done directly
// with shifts. We OR with 1 to avoid zero-checks in the integer log.
let ilog = (n | 1).ilog2();
let shift = (1 + ilog) / 2;
let shift = ilog.div_ceil(2);
((1 << shift) + (n >> shift)) / 2
}

View file

@ -52,7 +52,7 @@ fn do_count_chars(s: &str) -> usize {
// Check the properties of `CHUNK_SIZE` and `UNROLL_INNER` that are required
// for correctness.
const _: () = assert!(CHUNK_SIZE < 256);
const _: () = assert!(CHUNK_SIZE % UNROLL_INNER == 0);
const _: () = assert!(CHUNK_SIZE.is_multiple_of(UNROLL_INNER));
// SAFETY: transmuting `[u8]` to `[usize]` is safe except for size
// differences which are handled by `align_to`.

View file

@ -102,7 +102,7 @@ impl<'a> Iterator for Chars<'a> {
// `(len + 3)` can't overflow, because we know that the `slice::Iter`
// belongs to a slice in memory which has a maximum length of
// `isize::MAX` (that's well below `usize::MAX`).
((len + 3) / 4, Some(len))
(len.div_ceil(4), Some(len))
}
#[inline]
@ -1532,11 +1532,11 @@ impl<'a> Iterator for EncodeUtf16<'a> {
// belongs to a slice in memory which has a maximum length of
// `isize::MAX` (that's well below `usize::MAX`)
if self.extra == 0 {
((len + 2) / 3, Some(len))
(len.div_ceil(3), Some(len))
} else {
// We're in the middle of a surrogate pair, so add the remaining
// surrogate to the bounds.
((len + 2) / 3 + 1, Some(len + 1))
(len.div_ceil(3) + 1, Some(len + 1))
}
}
}

View file

@ -219,7 +219,7 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
// Ascii case, try to skip forward quickly.
// When the pointer is aligned, read 2 words of data per iteration
// until we find a word containing a non-ascii byte.
if align != usize::MAX && align.wrapping_sub(index) % USIZE_BYTES == 0 {
if align != usize::MAX && align.wrapping_sub(index).is_multiple_of(USIZE_BYTES) {
let ptr = v.as_ptr();
while index < blocks_end {
// SAFETY: since `align - index` and `ascii_block_size` are

Some files were not shown because too many files have changed in this diff Show more