Merge pull request #4640 from RalfJung/rustup

Rustup
This commit is contained in:
Ralf Jung 2025-10-22 07:01:10 +00:00 committed by GitHub
commit 86bb8ebacb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
1051 changed files with 17319 additions and 6797 deletions

View file

@ -645,6 +645,7 @@ version = "0.0.1"
dependencies = [
"clippy_config",
"clippy_utils",
"itertools",
"regex",
"rustc-semver",
]
@ -5262,9 +5263,9 @@ dependencies = [
[[package]]
name = "stringdex"
version = "0.0.1-alpha10"
version = "0.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa846a7d509d1828a4f90962dc09810e161abcada7fc6a921e92c168d0811d7"
checksum = "18b3bd4f10d15ef859c40291769f0d85209de6b0f1c30713ff9cdf45ac43ea36"
dependencies = [
"stacker",
]

View file

@ -869,11 +869,11 @@ pub enum PatKind {
Struct(Option<Box<QSelf>>, Path, ThinVec<PatField>, PatFieldsRest),
/// A tuple struct/variant pattern (`Variant(x, y, .., z)`).
TupleStruct(Option<Box<QSelf>>, Path, ThinVec<Box<Pat>>),
TupleStruct(Option<Box<QSelf>>, Path, ThinVec<Pat>),
/// An or-pattern `A | B | C`.
/// Invariant: `pats.len() >= 2`.
Or(ThinVec<Box<Pat>>),
Or(ThinVec<Pat>),
/// A possibly qualified path pattern.
/// Unqualified path patterns `A::B::C` can legally refer to variants, structs, constants
@ -882,7 +882,7 @@ pub enum PatKind {
Path(Option<Box<QSelf>>, Path),
/// A tuple pattern (`(a, b)`).
Tuple(ThinVec<Box<Pat>>),
Tuple(ThinVec<Pat>),
/// A `box` pattern.
Box(Box<Pat>),
@ -900,7 +900,7 @@ pub enum PatKind {
Range(Option<Box<Expr>>, Option<Box<Expr>>, Spanned<RangeEnd>),
/// A slice pattern `[a, b, c]`.
Slice(ThinVec<Box<Pat>>),
Slice(ThinVec<Pat>),
/// A rest pattern `..`.
///
@ -2579,7 +2579,7 @@ pub enum TyPatKind {
/// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
Range(Option<Box<AnonConst>>, Option<Box<AnonConst>>, Spanned<RangeEnd>),
Or(ThinVec<Box<TyPat>>),
Or(ThinVec<TyPat>),
/// Placeholder for a pattern that wasn't syntactically well formed in some way.
Err(ErrorGuaranteed),

View file

@ -389,9 +389,9 @@ macro_rules! common_visitor_and_walkers {
ThinVec<(NodeId, Path)>,
ThinVec<PathSegment>,
ThinVec<PreciseCapturingArg>,
ThinVec<Box<Pat>>,
ThinVec<Pat>,
ThinVec<Box<Ty>>,
ThinVec<Box<TyPat>>,
ThinVec<TyPat>,
);
// This macro generates `impl Visitable` and `impl MutVisitable` that forward to `Walkable`

View file

@ -0,0 +1,324 @@
use thin_vec::thin_vec;
use crate::LoweringContext;
impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// Lowered contracts are guarded with the `contract_checks` compiler flag,
/// i.e. the flag turns into a boolean guard in the lowered HIR. The reason
/// for not eliminating the contract code entirely when the `contract_checks`
/// flag is disabled is so that contracts can be type checked, even when
/// they are disabled, which avoids them becoming stale (i.e. out of sync
/// with the codebase) over time.
///
/// The optimiser should be able to eliminate all contract code guarded
/// by `if false`, leaving the original body intact when runtime contract
/// checks are disabled.
pub(super) fn lower_contract(
&mut self,
body: impl FnOnce(&mut Self) -> rustc_hir::Expr<'hir>,
contract: &rustc_ast::FnContract,
) -> rustc_hir::Expr<'hir> {
match (&contract.requires, &contract.ensures) {
(Some(req), Some(ens)) => {
// Lower the fn contract, which turns:
//
// { body }
//
// into:
//
// let __postcond = if contract_checks {
// contract_check_requires(PRECOND);
// Some(|ret_val| POSTCOND)
// } else {
// None
// };
// {
// let ret = { body };
//
// if contract_checks {
// contract_check_ensures(__postcond, ret)
// } else {
// ret
// }
// }
let precond = self.lower_precond(req);
let postcond_checker = self.lower_postcond_checker(ens);
let contract_check =
self.lower_contract_check_with_postcond(Some(precond), postcond_checker);
let wrapped_body =
self.wrap_body_with_contract_check(body, contract_check, postcond_checker.span);
self.expr_block(wrapped_body)
}
(None, Some(ens)) => {
// Lower the fn contract, which turns:
//
// { body }
//
// into:
//
// let __postcond = if contract_checks {
// Some(|ret_val| POSTCOND)
// } else {
// None
// };
// {
// let ret = { body };
//
// if contract_checks {
// contract_check_ensures(__postcond, ret)
// } else {
// ret
// }
// }
let postcond_checker = self.lower_postcond_checker(ens);
let contract_check =
self.lower_contract_check_with_postcond(None, postcond_checker);
let wrapped_body =
self.wrap_body_with_contract_check(body, contract_check, postcond_checker.span);
self.expr_block(wrapped_body)
}
(Some(req), None) => {
// Lower the fn contract, which turns:
//
// { body }
//
// into:
//
// {
// if contracts_checks {
// contract_requires(PRECOND);
// }
// body
// }
let precond = self.lower_precond(req);
let precond_check = self.lower_contract_check_just_precond(precond);
let body = self.arena.alloc(body(self));
// Flatten the body into precond check, then body.
let wrapped_body = self.block_all(
body.span,
self.arena.alloc_from_iter([precond_check].into_iter()),
Some(body),
);
self.expr_block(wrapped_body)
}
(None, None) => body(self),
}
}
/// Lower the precondition check intrinsic.
fn lower_precond(&mut self, req: &Box<rustc_ast::Expr>) -> rustc_hir::Stmt<'hir> {
let lowered_req = self.lower_expr_mut(&req);
let req_span = self.mark_span_with_reason(
rustc_span::DesugaringKind::Contract,
lowered_req.span,
None,
);
let precond = self.expr_call_lang_item_fn_mut(
req_span,
rustc_hir::LangItem::ContractCheckRequires,
&*arena_vec![self; lowered_req],
);
self.stmt_expr(req.span, precond)
}
fn lower_postcond_checker(
&mut self,
ens: &Box<rustc_ast::Expr>,
) -> &'hir rustc_hir::Expr<'hir> {
let ens_span = self.lower_span(ens.span);
let ens_span =
self.mark_span_with_reason(rustc_span::DesugaringKind::Contract, ens_span, None);
let lowered_ens = self.lower_expr_mut(&ens);
self.expr_call_lang_item_fn(
ens_span,
rustc_hir::LangItem::ContractBuildCheckEnsures,
&*arena_vec![self; lowered_ens],
)
}
fn lower_contract_check_just_precond(
&mut self,
precond: rustc_hir::Stmt<'hir>,
) -> rustc_hir::Stmt<'hir> {
let stmts = self.arena.alloc_from_iter([precond].into_iter());
let then_block_stmts = self.block_all(precond.span, stmts, None);
let then_block = self.arena.alloc(self.expr_block(&then_block_stmts));
let precond_check = rustc_hir::ExprKind::If(
self.arena.alloc(self.expr_bool_literal(precond.span, self.tcx.sess.contract_checks())),
then_block,
None,
);
let precond_check = self.expr(precond.span, precond_check);
self.stmt_expr(precond.span, precond_check)
}
fn lower_contract_check_with_postcond(
&mut self,
precond: Option<rustc_hir::Stmt<'hir>>,
postcond_checker: &'hir rustc_hir::Expr<'hir>,
) -> &'hir rustc_hir::Expr<'hir> {
let stmts = self.arena.alloc_from_iter(precond.into_iter());
let span = match precond {
Some(precond) => precond.span,
None => postcond_checker.span,
};
let postcond_checker = self.arena.alloc(self.expr_enum_variant_lang_item(
postcond_checker.span,
rustc_hir::lang_items::LangItem::OptionSome,
&*arena_vec![self; *postcond_checker],
));
let then_block_stmts = self.block_all(span, stmts, Some(postcond_checker));
let then_block = self.arena.alloc(self.expr_block(&then_block_stmts));
let none_expr = self.arena.alloc(self.expr_enum_variant_lang_item(
postcond_checker.span,
rustc_hir::lang_items::LangItem::OptionNone,
Default::default(),
));
let else_block = self.block_expr(none_expr);
let else_block = self.arena.alloc(self.expr_block(else_block));
let contract_check = rustc_hir::ExprKind::If(
self.arena.alloc(self.expr_bool_literal(span, self.tcx.sess.contract_checks())),
then_block,
Some(else_block),
);
self.arena.alloc(self.expr(span, contract_check))
}
fn wrap_body_with_contract_check(
&mut self,
body: impl FnOnce(&mut Self) -> rustc_hir::Expr<'hir>,
contract_check: &'hir rustc_hir::Expr<'hir>,
postcond_span: rustc_span::Span,
) -> &'hir rustc_hir::Block<'hir> {
let check_ident: rustc_span::Ident =
rustc_span::Ident::from_str_and_span("__ensures_checker", postcond_span);
let (check_hir_id, postcond_decl) = {
// Set up the postcondition `let` statement.
let (checker_pat, check_hir_id) = self.pat_ident_binding_mode_mut(
postcond_span,
check_ident,
rustc_hir::BindingMode::NONE,
);
(
check_hir_id,
self.stmt_let_pat(
None,
postcond_span,
Some(contract_check),
self.arena.alloc(checker_pat),
rustc_hir::LocalSource::Contract,
),
)
};
// Install contract_ensures so we will intercept `return` statements,
// then lower the body.
self.contract_ensures = Some((postcond_span, check_ident, check_hir_id));
let body = self.arena.alloc(body(self));
// Finally, inject an ensures check on the implicit return of the body.
let body = self.inject_ensures_check(body, postcond_span, check_ident, check_hir_id);
// Flatten the body into precond, then postcond, then wrapped body.
let wrapped_body = self.block_all(
body.span,
self.arena.alloc_from_iter([postcond_decl].into_iter()),
Some(body),
);
wrapped_body
}
/// Create an `ExprKind::Ret` that is optionally wrapped by a call to check
/// a contract ensures clause, if it exists.
pub(super) fn checked_return(
&mut self,
opt_expr: Option<&'hir rustc_hir::Expr<'hir>>,
) -> rustc_hir::ExprKind<'hir> {
let checked_ret =
if let Some((check_span, check_ident, check_hir_id)) = self.contract_ensures {
let expr = opt_expr.unwrap_or_else(|| self.expr_unit(check_span));
Some(self.inject_ensures_check(expr, check_span, check_ident, check_hir_id))
} else {
opt_expr
};
rustc_hir::ExprKind::Ret(checked_ret)
}
/// Wraps an expression with a call to the ensures check before it gets returned.
pub(super) fn inject_ensures_check(
&mut self,
expr: &'hir rustc_hir::Expr<'hir>,
span: rustc_span::Span,
cond_ident: rustc_span::Ident,
cond_hir_id: rustc_hir::HirId,
) -> &'hir rustc_hir::Expr<'hir> {
// {
// let ret = { body };
//
// if contract_checks {
// contract_check_ensures(__postcond, ret)
// } else {
// ret
// }
// }
let ret_ident: rustc_span::Ident = rustc_span::Ident::from_str_and_span("__ret", span);
// Set up the return `let` statement.
let (ret_pat, ret_hir_id) =
self.pat_ident_binding_mode_mut(span, ret_ident, rustc_hir::BindingMode::NONE);
let ret_stmt = self.stmt_let_pat(
None,
span,
Some(expr),
self.arena.alloc(ret_pat),
rustc_hir::LocalSource::Contract,
);
let ret = self.expr_ident(span, ret_ident, ret_hir_id);
let cond_fn = self.expr_ident(span, cond_ident, cond_hir_id);
let contract_check = self.expr_call_lang_item_fn_mut(
span,
rustc_hir::LangItem::ContractCheckEnsures,
arena_vec![self; *cond_fn, *ret],
);
let contract_check = self.arena.alloc(contract_check);
let call_expr = self.block_expr_block(contract_check);
// same ident can't be used in 2 places, so we create a new one for the
// else branch
let ret = self.expr_ident(span, ret_ident, ret_hir_id);
let ret_block = self.block_expr_block(ret);
let contracts_enabled: rustc_hir::Expr<'_> =
self.expr_bool_literal(span, self.tcx.sess.contract_checks());
let contract_check = self.arena.alloc(self.expr(
span,
rustc_hir::ExprKind::If(
self.arena.alloc(contracts_enabled),
call_expr,
Some(ret_block),
),
));
let attrs: rustc_ast::AttrVec = thin_vec![self.unreachable_code_attr(span)];
self.lower_attrs(contract_check.hir_id, &attrs, span, rustc_hir::Target::Expression);
let ret_block = self.block_all(span, arena_vec![self; ret_stmt], Some(contract_check));
self.arena.alloc(self.expr_block(self.arena.alloc(ret_block)))
}
}

View file

@ -383,36 +383,6 @@ impl<'hir> LoweringContext<'_, 'hir> {
})
}
/// Create an `ExprKind::Ret` that is optionally wrapped by a call to check
/// a contract ensures clause, if it exists.
fn checked_return(&mut self, opt_expr: Option<&'hir hir::Expr<'hir>>) -> hir::ExprKind<'hir> {
let checked_ret =
if let Some((check_span, check_ident, check_hir_id)) = self.contract_ensures {
let expr = opt_expr.unwrap_or_else(|| self.expr_unit(check_span));
Some(self.inject_ensures_check(expr, check_span, check_ident, check_hir_id))
} else {
opt_expr
};
hir::ExprKind::Ret(checked_ret)
}
/// Wraps an expression with a call to the ensures check before it gets returned.
pub(crate) fn inject_ensures_check(
&mut self,
expr: &'hir hir::Expr<'hir>,
span: Span,
cond_ident: Ident,
cond_hir_id: HirId,
) -> &'hir hir::Expr<'hir> {
let cond_fn = self.expr_ident(span, cond_ident, cond_hir_id);
let call_expr = self.expr_call_lang_item_fn_mut(
span,
hir::LangItem::ContractCheckEnsures,
arena_vec![self; *cond_fn, *expr],
);
self.arena.alloc(call_expr)
}
pub(crate) fn lower_const_block(&mut self, c: &AnonConst) -> hir::ConstBlock {
self.with_new_scopes(c.value.span, |this| {
let def_id = this.local_def_id(c.id);
@ -1971,16 +1941,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
)
};
// `#[allow(unreachable_code)]`
let attr = attr::mk_attr_nested_word(
&self.tcx.sess.psess.attr_id_generator,
AttrStyle::Outer,
Safety::Default,
sym::allow,
sym::unreachable_code,
try_span,
);
let attrs: AttrVec = thin_vec![attr];
let attrs: AttrVec = thin_vec![self.unreachable_code_attr(try_span)];
// `ControlFlow::Continue(val) => #[allow(unreachable_code)] val,`
let continue_arm = {
@ -2120,7 +2081,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.expr(span, hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, e))
}
fn expr_unit(&mut self, sp: Span) -> &'hir hir::Expr<'hir> {
pub(super) fn expr_unit(&mut self, sp: Span) -> &'hir hir::Expr<'hir> {
self.arena.alloc(self.expr(sp, hir::ExprKind::Tup(&[])))
}
@ -2161,6 +2122,43 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.expr(span, hir::ExprKind::Call(e, args))
}
pub(super) fn expr_struct(
&mut self,
span: Span,
path: &'hir hir::QPath<'hir>,
fields: &'hir [hir::ExprField<'hir>],
) -> hir::Expr<'hir> {
self.expr(span, hir::ExprKind::Struct(path, fields, rustc_hir::StructTailExpr::None))
}
pub(super) fn expr_enum_variant(
&mut self,
span: Span,
path: &'hir hir::QPath<'hir>,
fields: &'hir [hir::Expr<'hir>],
) -> hir::Expr<'hir> {
let fields = self.arena.alloc_from_iter(fields.into_iter().enumerate().map(|(i, f)| {
hir::ExprField {
hir_id: self.next_id(),
ident: Ident::from_str(&i.to_string()),
expr: f,
span: f.span,
is_shorthand: false,
}
}));
self.expr_struct(span, path, fields)
}
pub(super) fn expr_enum_variant_lang_item(
&mut self,
span: Span,
lang_item: hir::LangItem,
fields: &'hir [hir::Expr<'hir>],
) -> hir::Expr<'hir> {
let path = self.arena.alloc(self.lang_item_path(span, lang_item));
self.expr_enum_variant(span, path, fields)
}
pub(super) fn expr_call(
&mut self,
span: Span,
@ -2189,8 +2187,21 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.arena.alloc(self.expr_call_lang_item_fn_mut(span, lang_item, args))
}
fn expr_lang_item_path(&mut self, span: Span, lang_item: hir::LangItem) -> hir::Expr<'hir> {
self.expr(span, hir::ExprKind::Path(hir::QPath::LangItem(lang_item, self.lower_span(span))))
pub(super) fn expr_lang_item_path(
&mut self,
span: Span,
lang_item: hir::LangItem,
) -> hir::Expr<'hir> {
let path = self.lang_item_path(span, lang_item);
self.expr(span, hir::ExprKind::Path(path))
}
pub(super) fn lang_item_path(
&mut self,
span: Span,
lang_item: hir::LangItem,
) -> hir::QPath<'hir> {
hir::QPath::LangItem(lang_item, self.lower_span(span))
}
/// `<LangItem>::name`
@ -2270,6 +2281,17 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.expr(b.span, hir::ExprKind::Block(b, None))
}
/// Wrap an expression in a block, and wrap that block in an expression again.
/// Useful for constructing if-expressions, which require expressions of
/// kind block.
pub(super) fn block_expr_block(
&mut self,
expr: &'hir hir::Expr<'hir>,
) -> &'hir hir::Expr<'hir> {
let b = self.block_expr(expr);
self.arena.alloc(self.expr_block(b))
}
pub(super) fn expr_array_ref(
&mut self,
span: Span,
@ -2283,6 +2305,10 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.expr(span, hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, expr))
}
pub(super) fn expr_bool_literal(&mut self, span: Span, val: bool) -> hir::Expr<'hir> {
self.expr(span, hir::ExprKind::Lit(Spanned { node: LitKind::Bool(val), span }))
}
pub(super) fn expr(&mut self, span: Span, kind: hir::ExprKind<'hir>) -> hir::Expr<'hir> {
let hir_id = self.next_id();
hir::Expr { hir_id, kind, span: self.lower_span(span) }
@ -2316,6 +2342,19 @@ impl<'hir> LoweringContext<'_, 'hir> {
body: expr,
}
}
/// `#[allow(unreachable_code)]`
pub(super) fn unreachable_code_attr(&mut self, span: Span) -> Attribute {
let attr = attr::mk_attr_nested_word(
&self.tcx.sess.psess.attr_id_generator,
AttrStyle::Outer,
Safety::Default,
sym::allow,
sym::unreachable_code,
span,
);
attr
}
}
/// Used by [`LoweringContext::make_lowered_await`] to customize the desugaring based on what kind

View file

@ -426,7 +426,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
|this| {
this.lower_param_bounds(
bounds,
RelaxedBoundPolicy::Allowed,
RelaxedBoundPolicy::Forbidden(RelaxedBoundForbiddenReason::TraitAlias),
ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
)
},
@ -1214,76 +1214,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
let params =
this.arena.alloc_from_iter(decl.inputs.iter().map(|x| this.lower_param(x)));
// Optionally lower the fn contract, which turns:
//
// { body }
//
// into:
//
// { contract_requires(PRECOND); let __postcond = |ret_val| POSTCOND; postcond({ body }) }
// Optionally lower the fn contract
if let Some(contract) = contract {
let precond = if let Some(req) = &contract.requires {
// Lower the precondition check intrinsic.
let lowered_req = this.lower_expr_mut(&req);
let req_span = this.mark_span_with_reason(
DesugaringKind::Contract,
lowered_req.span,
None,
);
let precond = this.expr_call_lang_item_fn_mut(
req_span,
hir::LangItem::ContractCheckRequires,
&*arena_vec![this; lowered_req],
);
Some(this.stmt_expr(req.span, precond))
} else {
None
};
let (postcond, body) = if let Some(ens) = &contract.ensures {
let ens_span = this.lower_span(ens.span);
let ens_span =
this.mark_span_with_reason(DesugaringKind::Contract, ens_span, None);
// Set up the postcondition `let` statement.
let check_ident: Ident =
Ident::from_str_and_span("__ensures_checker", ens_span);
let (checker_pat, check_hir_id) = this.pat_ident_binding_mode_mut(
ens_span,
check_ident,
hir::BindingMode::NONE,
);
let lowered_ens = this.lower_expr_mut(&ens);
let postcond_checker = this.expr_call_lang_item_fn(
ens_span,
hir::LangItem::ContractBuildCheckEnsures,
&*arena_vec![this; lowered_ens],
);
let postcond = this.stmt_let_pat(
None,
ens_span,
Some(postcond_checker),
this.arena.alloc(checker_pat),
hir::LocalSource::Contract,
);
// Install contract_ensures so we will intercept `return` statements,
// then lower the body.
this.contract_ensures = Some((ens_span, check_ident, check_hir_id));
let body = this.arena.alloc(body(this));
// Finally, inject an ensures check on the implicit return of the body.
let body = this.inject_ensures_check(body, ens_span, check_ident, check_hir_id);
(Some(postcond), body)
} else {
let body = &*this.arena.alloc(body(this));
(None, body)
};
// Flatten the body into precond, then postcond, then wrapped body.
let wrapped_body = this.block_all(
body.span,
this.arena.alloc_from_iter([precond, postcond].into_iter().flatten()),
Some(body),
);
(params, this.expr_block(wrapped_body))
(params, this.lower_contract(body, contract))
} else {
(params, body(this))
}

View file

@ -77,6 +77,7 @@ macro_rules! arena_vec {
mod asm;
mod block;
mod contract;
mod delegation;
mod errors;
mod expr;
@ -296,6 +297,7 @@ enum RelaxedBoundPolicy<'a> {
enum RelaxedBoundForbiddenReason {
TraitObjectTy,
SuperTrait,
TraitAlias,
AssocTyBounds,
LateBoundVarsInScope,
}
@ -2085,12 +2087,14 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
span: Span,
rbp: RelaxedBoundPolicy<'_>,
) {
// Even though feature `more_maybe_bounds` bypasses the given policy and (currently) enables
// relaxed bounds in every conceivable position[^1], we don't want to advertise it to the user
// (via a feature gate) since it's super internal. Besides this, it'd be quite distracting.
// Even though feature `more_maybe_bounds` enables the user to relax all default bounds
// other than `Sized` in a lot more positions (thereby bypassing the given policy), we don't
// want to advertise it to the user (via a feature gate error) since it's super internal.
//
// [^1]: Strictly speaking, this is incorrect (at the very least for `Sized`) because it's
// no longer fully consistent with default trait elaboration in HIR ty lowering.
// FIXME(more_maybe_bounds): Moreover, if we actually were to add proper default traits
// (like a hypothetical `Move` or `Leak`) we would want to validate the location according
// to default trait elaboration in HIR ty lowering (which depends on the specific trait in
// question: E.g., `?Sized` & `?Move` most likely won't be allowed in all the same places).
match rbp {
RelaxedBoundPolicy::Allowed => return,
@ -2103,33 +2107,41 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
RelaxedBoundPolicy::Forbidden(reason) => {
let gate = |context, subject| {
let extended = self.tcx.features().more_maybe_bounds();
let is_sized = trait_ref
.trait_def_id()
.is_some_and(|def_id| self.tcx.is_lang_item(def_id, hir::LangItem::Sized));
if extended && !is_sized {
return;
}
let prefix = if extended { "`Sized` " } else { "" };
let mut diag = self.dcx().struct_span_err(
span,
format!("relaxed {prefix}bounds are not permitted in {context}"),
);
if is_sized {
diag.note(format!(
"{subject} are not implicitly bounded by `Sized`, \
so there is nothing to relax"
));
}
diag.emit();
};
match reason {
RelaxedBoundForbiddenReason::TraitObjectTy => {
if self.tcx.features().more_maybe_bounds() {
return;
}
self.dcx().span_err(
span,
"relaxed bounds are not permitted in trait object types",
);
gate("trait object types", "trait object types");
return;
}
RelaxedBoundForbiddenReason::SuperTrait => {
if self.tcx.features().more_maybe_bounds() {
return;
}
let mut diag = self.dcx().struct_span_err(
span,
"relaxed bounds are not permitted in supertrait bounds",
);
if let Some(def_id) = trait_ref.trait_def_id()
&& self.tcx.is_lang_item(def_id, hir::LangItem::Sized)
{
diag.note("traits are `?Sized` by default");
}
diag.emit();
gate("supertrait bounds", "traits");
return;
}
RelaxedBoundForbiddenReason::TraitAlias => {
gate("trait alias bounds", "trait aliases");
return;
}
RelaxedBoundForbiddenReason::AssocTyBounds
@ -2142,7 +2154,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
.struct_span_err(span, "this relaxed bound is not permitted here")
.with_note(
"in this context, relaxed bounds are only allowed on \
type parameters defined by the closest item",
type parameters defined on the closest item",
)
.emit();
}

View file

@ -154,7 +154,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_pat_tuple(
&mut self,
pats: &[Box<Pat>],
pats: &[Pat],
ctx: &str,
) -> (&'hir [hir::Pat<'hir>], hir::DotDotPos) {
let mut elems = Vec::with_capacity(pats.len());
@ -209,7 +209,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// When encountering `($binding_mode $ident @)? ..` (`slice`),
/// this is interpreted as a sub-slice pattern semantically.
/// Patterns that follow, which are not like `slice` -- or an error occurs, are in `after`.
fn lower_pat_slice(&mut self, pats: &[Box<Pat>]) -> hir::PatKind<'hir> {
fn lower_pat_slice(&mut self, pats: &[Pat]) -> hir::PatKind<'hir> {
let mut before = Vec::new();
let mut after = Vec::new();
let mut slice = None;

View file

@ -4,6 +4,8 @@ attr_parsing_as_needed_compatibility =
attr_parsing_bundle_needs_static =
linking modifier `bundle` is only compatible with `static` linking kind
attr_parsing_cfg_attr_bad_delim = wrong `cfg_attr` delimiters
attr_parsing_cfg_predicate_identifier =
`cfg` predicate key must be an identifier

View file

@ -1,19 +1,28 @@
use rustc_ast::{LitKind, NodeId};
use rustc_ast::token::Delimiter;
use rustc_ast::tokenstream::DelimSpan;
use rustc_ast::{AttrItem, Attribute, CRATE_NODE_ID, LitKind, NodeId, ast, token};
use rustc_errors::{Applicability, PResult};
use rustc_feature::{AttributeTemplate, Features, template};
use rustc_hir::RustcVersion;
use rustc_hir::attrs::CfgEntry;
use rustc_hir::{AttrPath, RustcVersion};
use rustc_parse::parser::{ForceCollect, Parser};
use rustc_parse::{exp, parse_in};
use rustc_session::Session;
use rustc_session::config::ExpectedValues;
use rustc_session::lint::BuiltinLintDiag;
use rustc_session::lint::builtin::UNEXPECTED_CFGS;
use rustc_session::parse::feature_err;
use rustc_session::parse::{ParseSess, feature_err};
use rustc_span::{Span, Symbol, sym};
use thin_vec::ThinVec;
use crate::context::{AcceptContext, ShouldEmit, Stage};
use crate::parser::{ArgParser, MetaItemListParser, MetaItemOrLitParser, NameValueParser};
use crate::session_diagnostics::{
AttributeParseError, AttributeParseErrorReason, CfgAttrBadDelim, MetaBadDelimSugg,
};
use crate::{
CfgMatchesLintEmitter, fluent_generated, parse_version, session_diagnostics, try_gate_cfg,
AttributeParser, CfgMatchesLintEmitter, fluent_generated, parse_version, session_diagnostics,
try_gate_cfg,
};
pub const CFG_TEMPLATE: AttributeTemplate = template!(
@ -21,7 +30,12 @@ pub const CFG_TEMPLATE: AttributeTemplate = template!(
"https://doc.rust-lang.org/reference/conditional-compilation.html#the-cfg-attribute"
);
pub fn parse_cfg_attr<'c, S: Stage>(
const CFG_ATTR_TEMPLATE: AttributeTemplate = template!(
List: &["predicate, attr1, attr2, ..."],
"https://doc.rust-lang.org/reference/conditional-compilation.html#the-cfg_attr-attribute"
);
pub fn parse_cfg<'c, S: Stage>(
cx: &'c mut AcceptContext<'_, '_, S>,
args: &'c ArgParser<'_>,
) -> Option<CfgEntry> {
@ -70,9 +84,7 @@ pub(crate) fn parse_cfg_entry<S: Stage>(
},
a @ (ArgParser::NoArgs | ArgParser::NameValue(_)) => {
let Some(name) = meta.path().word_sym() else {
cx.emit_err(session_diagnostics::CfgPredicateIdentifier {
span: meta.path().span(),
});
cx.expected_identifier(meta.path().span());
return None;
};
parse_name_value(name, meta.path().span(), a.name_value(), meta.span(), cx)?
@ -81,7 +93,7 @@ pub(crate) fn parse_cfg_entry<S: Stage>(
MetaItemOrLitParser::Lit(lit) => match lit.kind {
LitKind::Bool(b) => CfgEntry::Bool(b, lit.span),
_ => {
cx.emit_err(session_diagnostics::CfgPredicateIdentifier { span: lit.span });
cx.expected_identifier(lit.span);
return None;
}
},
@ -149,9 +161,7 @@ fn parse_cfg_entry_target<S: Stage>(
// Then, parse it as a name-value item
let Some(name) = sub_item.path().word_sym() else {
cx.emit_err(session_diagnostics::CfgPredicateIdentifier {
span: sub_item.path().span(),
});
cx.expected_identifier(sub_item.path().span());
return None;
};
let name = Symbol::intern(&format!("target_{name}"));
@ -300,3 +310,120 @@ impl EvalConfigResult {
}
}
}
pub fn parse_cfg_attr(
cfg_attr: &Attribute,
sess: &Session,
features: Option<&Features>,
) -> Option<(CfgEntry, Vec<(AttrItem, Span)>)> {
match cfg_attr.get_normal_item().args {
ast::AttrArgs::Delimited(ast::DelimArgs { dspan, delim, ref tokens })
if !tokens.is_empty() =>
{
check_cfg_attr_bad_delim(&sess.psess, dspan, delim);
match parse_in(&sess.psess, tokens.clone(), "`cfg_attr` input", |p| {
parse_cfg_attr_internal(p, sess, features, cfg_attr)
}) {
Ok(r) => return Some(r),
Err(e) => {
let suggestions = CFG_ATTR_TEMPLATE.suggestions(cfg_attr.style, sym::cfg_attr);
e.with_span_suggestions(
cfg_attr.span,
"must be of the form",
suggestions,
Applicability::HasPlaceholders,
)
.with_note(format!(
"for more information, visit <{}>",
CFG_ATTR_TEMPLATE.docs.expect("cfg_attr has docs")
))
.emit();
}
}
}
_ => {
let (span, reason) = if let ast::AttrArgs::Delimited(ast::DelimArgs { dspan, .. }) =
cfg_attr.get_normal_item().args
{
(dspan.entire(), AttributeParseErrorReason::ExpectedAtLeastOneArgument)
} else {
(cfg_attr.span, AttributeParseErrorReason::ExpectedList)
};
sess.dcx().emit_err(AttributeParseError {
span,
attr_span: cfg_attr.span,
template: CFG_ATTR_TEMPLATE,
attribute: AttrPath::from_ast(&cfg_attr.get_normal_item().path),
reason,
attr_style: cfg_attr.style,
});
}
}
None
}
fn check_cfg_attr_bad_delim(psess: &ParseSess, span: DelimSpan, delim: Delimiter) {
if let Delimiter::Parenthesis = delim {
return;
}
psess.dcx().emit_err(CfgAttrBadDelim {
span: span.entire(),
sugg: MetaBadDelimSugg { open: span.open, close: span.close },
});
}
/// Parses `cfg_attr(pred, attr_item_list)` where `attr_item_list` is comma-delimited.
fn parse_cfg_attr_internal<'a>(
parser: &mut Parser<'a>,
sess: &'a Session,
features: Option<&Features>,
attribute: &Attribute,
) -> PResult<'a, (CfgEntry, Vec<(ast::AttrItem, Span)>)> {
// Parse cfg predicate
let pred_start = parser.token.span;
let meta = MetaItemOrLitParser::parse_single(parser, ShouldEmit::ErrorsAndLints)?;
let pred_span = pred_start.with_hi(parser.token.span.hi());
let cfg_predicate = AttributeParser::parse_single_args(
sess,
attribute.span,
attribute.style,
AttrPath {
segments: attribute
.ident_path()
.expect("cfg_attr is not a doc comment")
.into_boxed_slice(),
span: attribute.span,
},
pred_span,
CRATE_NODE_ID,
features,
ShouldEmit::ErrorsAndLints,
&meta,
parse_cfg_entry,
&CFG_ATTR_TEMPLATE,
)
.ok_or_else(|| {
let mut diag = sess.dcx().struct_err(
"cfg_entry parsing failing with `ShouldEmit::ErrorsAndLints` should emit a error.",
);
diag.downgrade_to_delayed_bug();
diag
})?;
parser.expect(exp!(Comma))?;
// Presumably, the majority of the time there will only be one attr.
let mut expanded_attrs = Vec::with_capacity(1);
while parser.token != token::Eof {
let lo = parser.token.span;
let item = parser.parse_attr_item(ForceCollect::Yes)?;
expanded_attrs.push((item, lo.to(parser.prev_token.span)));
if !parser.eat(exp!(Comma)) {
break;
}
}
Ok((cfg_predicate, expanded_attrs))
}

View file

@ -1,7 +1,7 @@
use std::borrow::Cow;
use rustc_ast as ast;
use rustc_ast::NodeId;
use rustc_ast::{AttrStyle, NodeId};
use rustc_errors::DiagCtxtHandle;
use rustc_feature::{AttributeTemplate, Features};
use rustc_hir::attrs::AttributeKind;
@ -62,7 +62,8 @@ impl<'sess> AttributeParser<'sess, Early> {
)
}
/// Usually you want `parse_limited`, which defaults to no errors.
/// This does the same as `parse_limited`, except it has a `should_emit` parameter which allows it to emit errors.
/// Usually you want `parse_limited`, which emits no errors.
pub fn parse_limited_should_emit(
sess: &'sess Session,
attrs: &[ast::Attribute],
@ -86,6 +87,13 @@ impl<'sess> AttributeParser<'sess, Early> {
parsed.pop()
}
/// This method allows you to parse a list of attributes *before* `rustc_ast_lowering`.
/// This can be used for attributes that would be removed before `rustc_ast_lowering`, such as attributes on macro calls.
///
/// Try to use this as little as possible. Attributes *should* be lowered during
/// `rustc_ast_lowering`. Some attributes require access to features to parse, which would
/// crash if you tried to do so through [`parse_limited_all`](Self::parse_limited_all).
/// Therefore, if `parse_only` is None, then features *must* be provided.
pub fn parse_limited_all(
sess: &'sess Session,
attrs: &[ast::Attribute],
@ -111,6 +119,8 @@ impl<'sess> AttributeParser<'sess, Early> {
)
}
/// This method parses a single attribute, using `parse_fn`.
/// This is useful if you already know what exact attribute this is, and want to parse it.
pub fn parse_single<T>(
sess: &'sess Session,
attr: &ast::Attribute,
@ -121,13 +131,6 @@ impl<'sess> AttributeParser<'sess, Early> {
parse_fn: fn(cx: &mut AcceptContext<'_, '_, Early>, item: &ArgParser<'_>) -> Option<T>,
template: &AttributeTemplate,
) -> Option<T> {
let mut parser = Self {
features,
tools: Vec::new(),
parse_only: None,
sess,
stage: Early { emit_errors },
};
let ast::AttrKind::Normal(normal_attr) = &attr.kind else {
panic!("parse_single called on a doc attr")
};
@ -136,6 +139,43 @@ impl<'sess> AttributeParser<'sess, Early> {
let meta_parser = MetaItemParser::from_attr(normal_attr, &parts, &sess.psess, emit_errors)?;
let path = meta_parser.path();
let args = meta_parser.args();
Self::parse_single_args(
sess,
attr.span,
attr.style,
path.get_attribute_path(),
target_span,
target_node_id,
features,
emit_errors,
args,
parse_fn,
template,
)
}
/// This method is equivalent to `parse_single`, but parses arguments using `parse_fn` using manually created `args`.
/// This is useful when you want to parse other things than attributes using attribute parsers.
pub fn parse_single_args<T, I>(
sess: &'sess Session,
attr_span: Span,
attr_style: AttrStyle,
attr_path: AttrPath,
target_span: Span,
target_node_id: NodeId,
features: Option<&'sess Features>,
emit_errors: ShouldEmit,
args: &I,
parse_fn: fn(cx: &mut AcceptContext<'_, '_, Early>, item: &I) -> Option<T>,
template: &AttributeTemplate,
) -> Option<T> {
let mut parser = Self {
features,
tools: Vec::new(),
parse_only: None,
sess,
stage: Early { emit_errors },
};
let mut cx: AcceptContext<'_, 'sess, Early> = AcceptContext {
shared: SharedContext {
cx: &mut parser,
@ -145,10 +185,10 @@ impl<'sess> AttributeParser<'sess, Early> {
crate::lints::emit_attribute_lint(&lint, sess);
},
},
attr_span: attr.span,
attr_style: attr.style,
attr_span,
attr_style,
template,
attr_path: path.get_attribute_path(),
attr_path,
};
parse_fn(&mut cx, args)
}

View file

@ -105,7 +105,9 @@ mod session_diagnostics;
mod target_checking;
pub mod validate_attr;
pub use attributes::cfg::{CFG_TEMPLATE, EvalConfigResult, eval_config_entry, parse_cfg_attr};
pub use attributes::cfg::{
CFG_TEMPLATE, EvalConfigResult, eval_config_entry, parse_cfg, parse_cfg_attr,
};
pub use attributes::cfg_old::*;
pub use attributes::util::{is_builtin_attr, is_doc_alias_attrs_contain_symbol, parse_version};
pub use context::{Early, Late, OmitDoc, ShouldEmit};

View file

@ -8,7 +8,7 @@ use std::fmt::{Debug, Display};
use rustc_ast::token::{self, Delimiter, MetaVarKind};
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::{AttrArgs, DelimArgs, Expr, ExprKind, LitKind, MetaItemLit, NormalAttr, Path};
use rustc_ast::{AttrArgs, Expr, ExprKind, LitKind, MetaItemLit, NormalAttr, Path};
use rustc_ast_pretty::pprust;
use rustc_errors::{Diag, PResult};
use rustc_hir::{self as hir, AttrPath};
@ -124,7 +124,11 @@ impl<'a> ArgParser<'a> {
return None;
}
Self::List(MetaItemListParser::new(args, psess, should_emit)?)
Self::List(
MetaItemListParser::new(&args.tokens, args.dspan.entire(), psess, should_emit)
.map_err(|e| should_emit.emit_err(e))
.ok()?,
)
}
AttrArgs::Eq { eq_span, expr } => Self::NameValue(NameValueParser {
eq_span: *eq_span,
@ -186,7 +190,15 @@ pub enum MetaItemOrLitParser<'a> {
Err(Span, ErrorGuaranteed),
}
impl<'a> MetaItemOrLitParser<'a> {
impl<'sess> MetaItemOrLitParser<'sess> {
pub fn parse_single(
parser: &mut Parser<'sess>,
should_emit: ShouldEmit,
) -> PResult<'sess, MetaItemOrLitParser<'static>> {
let mut this = MetaItemListParserContext { parser, should_emit };
this.parse_meta_item_inner()
}
pub fn span(&self) -> Span {
match self {
MetaItemOrLitParser::MetaItemParser(generic_meta_item_parser) => {
@ -204,7 +216,7 @@ impl<'a> MetaItemOrLitParser<'a> {
}
}
pub fn meta_item(&self) -> Option<&MetaItemParser<'a>> {
pub fn meta_item(&self) -> Option<&MetaItemParser<'sess>> {
match self {
MetaItemOrLitParser::MetaItemParser(parser) => Some(parser),
_ => None,
@ -542,23 +554,13 @@ pub struct MetaItemListParser<'a> {
}
impl<'a> MetaItemListParser<'a> {
fn new<'sess>(
delim: &'a DelimArgs,
pub(crate) fn new<'sess>(
tokens: &'a TokenStream,
span: Span,
psess: &'sess ParseSess,
should_emit: ShouldEmit,
) -> Option<Self> {
match MetaItemListParserContext::parse(
delim.tokens.clone(),
psess,
delim.dspan.entire(),
should_emit,
) {
Ok(s) => Some(s),
Err(e) => {
should_emit.emit_err(e);
None
}
}
) -> Result<Self, Diag<'sess>> {
MetaItemListParserContext::parse(tokens.clone(), psess, span, should_emit)
}
/// Lets you pick and choose as what you want to parse each element in the list

View file

@ -971,3 +971,12 @@ pub(crate) struct LimitInvalid<'a> {
pub value_span: Span,
pub error_str: &'a str,
}
#[derive(Diagnostic)]
#[diag(attr_parsing_cfg_attr_bad_delim)]
pub(crate) struct CfgAttrBadDelim {
#[primary_span]
pub span: Span,
#[subdiagnostic]
pub sugg: MetaBadDelimSugg,
}

View file

@ -708,8 +708,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
return (false, false, None);
}
let my_def = self.body.source.def_id();
let Some(td) =
tcx.trait_impl_of_assoc(my_def).and_then(|id| self.infcx.tcx.trait_id_of_impl(id))
let Some(td) = tcx.trait_impl_of_assoc(my_def).map(|id| self.infcx.tcx.impl_trait_id(id))
else {
return (false, false, None);
};

View file

@ -21,10 +21,10 @@ pub(crate) fn renumber_mir<'tcx>(
let mut renumberer = RegionRenumberer { infcx };
for body in promoted.iter_mut() {
renumberer.visit_body(body);
renumberer.visit_body_preserves_cfg(body);
}
renumberer.visit_body(body);
renumberer.visit_body_preserves_cfg(body);
}
// The fields are used only for debugging output in `sccs_info`.

View file

@ -149,7 +149,7 @@ fn expand_requires_tts(
new_tts.push_tree(TokenTree::Delimited(
DelimSpan::from_single(attr_span),
DelimSpacing::new(Spacing::JointHidden, Spacing::JointHidden),
token::Delimiter::Parenthesis,
token::Delimiter::Brace,
annotation,
));
Ok(())
@ -171,7 +171,7 @@ fn expand_ensures_tts(
new_tts.push_tree(TokenTree::Delimited(
DelimSpan::from_single(attr_span),
DelimSpacing::new(Spacing::JointHidden, Spacing::JointHidden),
token::Delimiter::Parenthesis,
token::Delimiter::Brace,
annotation,
));
Ok(())

View file

@ -1507,7 +1507,7 @@ impl<'a> TraitDef<'a> {
struct_def: &'a VariantData,
prefixes: &[String],
by_ref: ByRef,
) -> ThinVec<Box<ast::Pat>> {
) -> ThinVec<ast::Pat> {
prefixes
.iter()
.map(|prefix| {
@ -1543,7 +1543,7 @@ impl<'a> TraitDef<'a> {
attrs: ast::AttrVec::new(),
id: ast::DUMMY_NODE_ID,
span: pat.span.with_ctxt(self.span.ctxt()),
pat,
pat: Box::new(pat),
is_placeholder: false,
}
})

View file

@ -32,7 +32,7 @@ fn parse_pat_ty<'a>(
let pat = pat_to_ty_pat(
cx,
*parser.parse_pat_no_top_guard(
parser.parse_pat_no_top_guard(
None,
RecoverComma::No,
RecoverColon::No,
@ -44,14 +44,14 @@ fn parse_pat_ty<'a>(
parser.unexpected()?;
}
Ok((ty, pat))
Ok((ty, Box::new(pat)))
}
fn ty_pat(kind: TyPatKind, span: Span) -> Box<TyPat> {
Box::new(TyPat { id: DUMMY_NODE_ID, kind, span, tokens: None })
fn ty_pat(kind: TyPatKind, span: Span) -> TyPat {
TyPat { id: DUMMY_NODE_ID, kind, span, tokens: None }
}
fn pat_to_ty_pat(cx: &mut ExtCtxt<'_>, pat: ast::Pat) -> Box<TyPat> {
fn pat_to_ty_pat(cx: &mut ExtCtxt<'_>, pat: ast::Pat) -> TyPat {
let kind = match pat.kind {
ast::PatKind::Range(start, end, include_end) => TyPatKind::Range(
start.map(|value| Box::new(AnonConst { id: DUMMY_NODE_ID, value })),
@ -59,7 +59,7 @@ fn pat_to_ty_pat(cx: &mut ExtCtxt<'_>, pat: ast::Pat) -> Box<TyPat> {
include_end,
),
ast::PatKind::Or(variants) => {
TyPatKind::Or(variants.into_iter().map(|pat| pat_to_ty_pat(cx, *pat)).collect())
TyPatKind::Or(variants.into_iter().map(|pat| pat_to_ty_pat(cx, pat)).collect())
}
ast::PatKind::Err(guar) => TyPatKind::Err(guar),
_ => TyPatKind::Err(cx.dcx().span_err(pat.span, "pattern not supported in pattern types")),

View file

@ -467,7 +467,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
true
} else {
instance.is_some_and(|inst| {
fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
fx.tcx.codegen_instance_attrs(inst.def).flags.contains(CodegenFnAttrFlags::COLD)
})
};
if is_cold {

View file

@ -5,7 +5,9 @@ use std::cmp::Ordering;
use cranelift_module::*;
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{AllocId, GlobalAlloc, Scalar, read_target_uint};
use rustc_middle::mir::interpret::{
AllocId, GlobalAlloc, PointerArithmetic, Scalar, read_target_uint,
};
use rustc_middle::ty::{ExistentialTraitRef, ScalarInt};
use crate::prelude::*;
@ -138,8 +140,11 @@ pub(crate) fn codegen_const_value<'tcx>(
let base_addr = match fx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
if alloc.inner().len() == 0 {
assert_eq!(offset, Size::ZERO);
fx.bcx.ins().iconst(fx.pointer_type, alloc.inner().align.bytes() as i64)
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
fx.bcx.ins().iconst(
fx.pointer_type,
fx.tcx.truncate_to_target_usize(val) as i64,
)
} else {
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,

View file

@ -5,7 +5,7 @@ use rustc_codegen_ssa::traits::{
BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods,
};
use rustc_middle::mir::Mutability;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar};
use rustc_middle::ty::layout::LayoutOf;
use crate::context::CodegenCx;
@ -247,8 +247,8 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
// This avoids generating a zero-sized constant value and actually needing a
// real address at runtime.
if alloc.inner().len() == 0 {
assert_eq!(offset.bytes(), 0);
let val = self.const_usize(alloc.inner().align.bytes());
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
let val = self.const_usize(self.tcx.truncate_to_target_usize(val));
return if matches!(layout.primitive(), Pointer(_)) {
self.context.new_cast(None, val, ty)
} else {

View file

@ -1,7 +1,5 @@
/*
* TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
* TODO(antoyo): support #[inline] attributes.
* TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html).
* For Thin LTO, this might be helpful:
// cspell:disable-next-line
* In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none.

View file

@ -16,25 +16,42 @@ pub(crate) fn handle_gpu_code<'ll>(
cx: &'ll SimpleCx<'_>,
) {
// The offload memory transfer type for each kernel
let mut o_types = vec![];
let mut kernels = vec![];
let offload_entry_ty = add_tgt_offload_entry(&cx);
let mut memtransfer_types = vec![];
let mut region_ids = vec![];
let offload_entry_ty = TgtOffloadEntry::new_decl(&cx);
for num in 0..9 {
let kernel = cx.get_function(&format!("kernel_{num}"));
if let Some(kernel) = kernel {
o_types.push(gen_define_handling(&cx, kernel, offload_entry_ty, num));
kernels.push(kernel);
let (o, k) = gen_define_handling(&cx, kernel, offload_entry_ty, num);
memtransfer_types.push(o);
region_ids.push(k);
}
}
gen_call_handling(&cx, &kernels, &o_types);
gen_call_handling(&cx, &memtransfer_types, &region_ids);
}
// ; Function Attrs: nounwind
// declare i32 @__tgt_target_kernel(ptr, i64, i32, i32, ptr, ptr) #2
fn generate_launcher<'ll>(cx: &'ll SimpleCx<'_>) -> (&'ll llvm::Value, &'ll llvm::Type) {
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let args = vec![tptr, ti64, ti32, ti32, tptr, tptr];
let tgt_fn_ty = cx.type_func(&args, ti32);
let name = "__tgt_target_kernel";
let tgt_decl = declare_offload_fn(&cx, name, tgt_fn_ty);
let nounwind = llvm::AttributeKind::NoUnwind.create_attr(cx.llcx);
attributes::apply_to_llfn(tgt_decl, Function, &[nounwind]);
(tgt_decl, tgt_fn_ty)
}
// What is our @1 here? A magic global, used in our data_{begin/update/end}_mapper:
// @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
// @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8
// FIXME(offload): @0 should include the file name (e.g. lib.rs) in which the function to be
// offloaded was defined.
fn generate_at_one<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Value {
// @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
let unknown_txt = ";unknown;unknown;0;0;;";
let c_entry_name = CString::new(unknown_txt).unwrap();
let c_val = c_entry_name.as_bytes_with_nul();
@ -59,15 +76,7 @@ fn generate_at_one<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Value {
at_one
}
pub(crate) fn add_tgt_offload_entry<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Type {
let offload_entry_ty = cx.type_named_struct("struct.__tgt_offload_entry");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let ti16 = cx.type_i16();
// For each kernel to run on the gpu, we will later generate one entry of this type.
// copied from LLVM
// typedef struct {
struct TgtOffloadEntry {
// uint64_t Reserved;
// uint16_t Version;
// uint16_t Kind;
@ -77,21 +86,40 @@ pub(crate) fn add_tgt_offload_entry<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Ty
// uint64_t Size; Size of the entry info (0 if it is a function)
// uint64_t Data;
// void *AuxAddr;
// } __tgt_offload_entry;
let entry_elements = vec![ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr];
cx.set_struct_body(offload_entry_ty, &entry_elements, false);
offload_entry_ty
}
fn gen_tgt_kernel_global<'ll>(cx: &'ll SimpleCx<'_>) {
let kernel_arguments_ty = cx.type_named_struct("struct.__tgt_kernel_arguments");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let tarr = cx.type_array(ti32, 3);
impl TgtOffloadEntry {
pub(crate) fn new_decl<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Type {
let offload_entry_ty = cx.type_named_struct("struct.__tgt_offload_entry");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let ti16 = cx.type_i16();
// For each kernel to run on the gpu, we will later generate one entry of this type.
// copied from LLVM
let entry_elements = vec![ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr];
cx.set_struct_body(offload_entry_ty, &entry_elements, false);
offload_entry_ty
}
// Taken from the LLVM APITypes.h declaration:
//struct KernelArgsTy {
fn new<'ll>(
cx: &'ll SimpleCx<'_>,
region_id: &'ll Value,
llglobal: &'ll Value,
) -> [&'ll Value; 9] {
let reserved = cx.get_const_i64(0);
let version = cx.get_const_i16(1);
let kind = cx.get_const_i16(1);
let flags = cx.get_const_i32(0);
let size = cx.get_const_i64(0);
let data = cx.get_const_i64(0);
let aux_addr = cx.const_null(cx.type_ptr());
[reserved, version, kind, flags, region_id, llglobal, size, data, aux_addr]
}
}
// Taken from the LLVM APITypes.h declaration:
struct KernelArgsTy {
// uint32_t Version = 0; // Version of this struct for ABI compatibility.
// uint32_t NumArgs = 0; // Number of arguments in each input pointer.
// void **ArgBasePtrs =
@ -102,25 +130,65 @@ fn gen_tgt_kernel_global<'ll>(cx: &'ll SimpleCx<'_>) {
// void **ArgNames = nullptr; // Name of the data for debugging, possibly null.
// void **ArgMappers = nullptr; // User-defined mappers, possibly null.
// uint64_t Tripcount =
// 0; // Tripcount for the teams / distribute loop, 0 otherwise.
// struct {
// 0; // Tripcount for the teams / distribute loop, 0 otherwise.
// struct {
// uint64_t NoWait : 1; // Was this kernel spawned with a `nowait` clause.
// uint64_t IsCUDA : 1; // Was this kernel spawned via CUDA.
// uint64_t Unused : 62;
// } Flags = {0, 0, 0};
// } Flags = {0, 0, 0}; // totals to 64 Bit, 8 Byte
// // The number of teams (for x,y,z dimension).
// uint32_t NumTeams[3] = {0, 0, 0};
// // The number of threads (for x,y,z dimension).
// uint32_t ThreadLimit[3] = {0, 0, 0};
// uint32_t DynCGroupMem = 0; // Amount of dynamic cgroup memory requested.
//};
let kernel_elements =
vec![ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr, tarr, ti32];
}
cx.set_struct_body(kernel_arguments_ty, &kernel_elements, false);
// For now we don't handle kernels, so for now we just add a global dummy
// to make sure that the __tgt_offload_entry is defined and handled correctly.
cx.declare_global("my_struct_global2", kernel_arguments_ty);
impl KernelArgsTy {
const OFFLOAD_VERSION: u64 = 3;
const FLAGS: u64 = 0;
const TRIPCOUNT: u64 = 0;
fn new_decl<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll Type {
let kernel_arguments_ty = cx.type_named_struct("struct.__tgt_kernel_arguments");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let tarr = cx.type_array(ti32, 3);
let kernel_elements =
vec![ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr, tarr, ti32];
cx.set_struct_body(kernel_arguments_ty, &kernel_elements, false);
kernel_arguments_ty
}
fn new<'ll>(
cx: &'ll SimpleCx<'_>,
num_args: u64,
memtransfer_types: &[&'ll Value],
geps: [&'ll Value; 3],
) -> [(Align, &'ll Value); 13] {
let four = Align::from_bytes(4).expect("4 Byte alignment should work");
let eight = Align::EIGHT;
let ti32 = cx.type_i32();
let ci32_0 = cx.get_const_i32(0);
[
(four, cx.get_const_i32(KernelArgsTy::OFFLOAD_VERSION)),
(four, cx.get_const_i32(num_args)),
(eight, geps[0]),
(eight, geps[1]),
(eight, geps[2]),
(eight, memtransfer_types[0]),
// The next two are debug infos. FIXME(offload): set them
(eight, cx.const_null(cx.type_ptr())), // dbg
(eight, cx.const_null(cx.type_ptr())), // dbg
(eight, cx.get_const_i64(KernelArgsTy::TRIPCOUNT)),
(eight, cx.get_const_i64(KernelArgsTy::FLAGS)),
(four, cx.const_array(ti32, &[cx.get_const_i32(2097152), ci32_0, ci32_0])),
(four, cx.const_array(ti32, &[cx.get_const_i32(256), ci32_0, ci32_0])),
(four, cx.get_const_i32(0)),
]
}
}
fn gen_tgt_data_mappers<'ll>(
@ -182,12 +250,15 @@ pub(crate) fn add_global<'ll>(
llglobal
}
// This function returns a memtransfer value which encodes how arguments to this kernel shall be
// mapped to/from the gpu. It also returns a region_id with the name of this kernel, to be
// concatenated into the list of region_ids.
fn gen_define_handling<'ll>(
cx: &'ll SimpleCx<'_>,
kernel: &'ll llvm::Value,
offload_entry_ty: &'ll llvm::Type,
num: i64,
) -> &'ll llvm::Value {
) -> (&'ll llvm::Value, &'ll llvm::Value) {
let types = cx.func_params_types(cx.get_type_of_global(kernel));
// It seems like non-pointer values are automatically mapped. So here, we focus on pointer (or
// reference) types.
@ -205,10 +276,14 @@ fn gen_define_handling<'ll>(
// or both to and from the gpu (=3). Other values shouldn't affect us for now.
// A non-mutable reference or pointer will be 1, an array that's not read, but fully overwritten
// will be 2. For now, everything is 3, until we have our frontend set up.
let o_types =
add_priv_unnamed_arr(&cx, &format!(".offload_maptypes.{num}"), &vec![3; num_ptr_types]);
// 1+2+32: 1 (MapTo), 2 (MapFrom), 32 (Add one extra input ptr per function, to be used later).
let memtransfer_types = add_priv_unnamed_arr(
&cx,
&format!(".offload_maptypes.{num}"),
&vec![1 + 2 + 32; num_ptr_types],
);
// Next: For each function, generate these three entries. A weak constant,
// the llvm.rodata entry name, and the omp_offloading_entries value
// the llvm.rodata entry name, and the llvm_offload_entries value
let name = format!(".kernel_{num}.region_id");
let initializer = cx.get_const_i8(0);
@ -222,19 +297,10 @@ fn gen_define_handling<'ll>(
let llglobal = add_unnamed_global(&cx, &offload_entry_name, initializer, InternalLinkage);
llvm::set_alignment(llglobal, Align::ONE);
llvm::set_section(llglobal, c".llvm.rodata.offloading");
// Not actively used yet, for calling real kernels
let name = format!(".offloading.entry.kernel_{num}");
// See the __tgt_offload_entry documentation above.
let reserved = cx.get_const_i64(0);
let version = cx.get_const_i16(1);
let kind = cx.get_const_i16(1);
let flags = cx.get_const_i32(0);
let size = cx.get_const_i64(0);
let data = cx.get_const_i64(0);
let aux_addr = cx.const_null(cx.type_ptr());
let elems = vec![reserved, version, kind, flags, region_id, llglobal, size, data, aux_addr];
let elems = TgtOffloadEntry::new(&cx, region_id, llglobal);
let initializer = crate::common::named_struct(offload_entry_ty, &elems);
let c_name = CString::new(name).unwrap();
@ -242,13 +308,13 @@ fn gen_define_handling<'ll>(
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, WeakAnyLinkage);
llvm::set_initializer(llglobal, initializer);
llvm::set_alignment(llglobal, Align::ONE);
let c_section_name = CString::new(".omp_offloading_entries").unwrap();
llvm::set_alignment(llglobal, Align::EIGHT);
let c_section_name = CString::new("llvm_offload_entries").unwrap();
llvm::set_section(llglobal, &c_section_name);
o_types
(memtransfer_types, region_id)
}
fn declare_offload_fn<'ll>(
pub(crate) fn declare_offload_fn<'ll>(
cx: &'ll SimpleCx<'_>,
name: &str,
ty: &'ll llvm::Type,
@ -285,9 +351,10 @@ fn declare_offload_fn<'ll>(
// 6. generate __tgt_target_data_end calls to move data from the GPU
fn gen_call_handling<'ll>(
cx: &'ll SimpleCx<'_>,
_kernels: &[&'ll llvm::Value],
o_types: &[&'ll llvm::Value],
memtransfer_types: &[&'ll llvm::Value],
region_ids: &[&'ll llvm::Value],
) {
let (tgt_decl, tgt_target_kernel_ty) = generate_launcher(&cx);
// %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr }
let tptr = cx.type_ptr();
let ti32 = cx.type_i32();
@ -295,7 +362,7 @@ fn gen_call_handling<'ll>(
let tgt_bin_desc = cx.type_named_struct("struct.__tgt_bin_desc");
cx.set_struct_body(tgt_bin_desc, &tgt_bin_desc_ty, false);
gen_tgt_kernel_global(&cx);
let tgt_kernel_decl = KernelArgsTy::new_decl(&cx);
let (begin_mapper_decl, _, end_mapper_decl, fn_ty) = gen_tgt_data_mappers(&cx);
let main_fn = cx.get_function("main");
@ -329,35 +396,32 @@ fn gen_call_handling<'ll>(
// These represent the sizes in bytes, e.g. the entry for `&[f64; 16]` will be 8*16.
let ty2 = cx.type_array(cx.type_i64(), num_args);
let a4 = builder.direct_alloca(ty2, Align::EIGHT, ".offload_sizes");
// Now we allocate once per function param, a copy to be passed to one of our maps.
let mut vals = vec![];
let mut geps = vec![];
let i32_0 = cx.get_const_i32(0);
for (index, in_ty) in types.iter().enumerate() {
// get function arg, store it into the alloca, and read it.
let p = llvm::get_param(called, index as u32);
let name = llvm::get_value_name(p);
let name = str::from_utf8(&name).unwrap();
let arg_name = format!("{name}.addr");
let alloca = builder.direct_alloca(in_ty, Align::EIGHT, &arg_name);
builder.store(p, alloca, Align::EIGHT);
let val = builder.load(in_ty, alloca, Align::EIGHT);
let gep = builder.inbounds_gep(cx.type_f32(), val, &[i32_0]);
vals.push(val);
geps.push(gep);
}
//%kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
let a5 = builder.direct_alloca(tgt_kernel_decl, Align::EIGHT, "kernel_args");
// Step 1)
unsafe { llvm::LLVMRustPositionBefore(builder.llbuilder, kernel_call) };
builder.memset(tgt_bin_desc_alloca, cx.get_const_i8(0), cx.get_const_i64(32), Align::EIGHT);
// Now we allocate once per function param, a copy to be passed to one of our maps.
let mut vals = vec![];
let mut geps = vec![];
let i32_0 = cx.get_const_i32(0);
for index in 0..types.len() {
let v = unsafe { llvm::LLVMGetOperand(kernel_call, index as u32).unwrap() };
let gep = builder.inbounds_gep(cx.type_f32(), v, &[i32_0]);
vals.push(v);
geps.push(gep);
}
let mapper_fn_ty = cx.type_func(&[cx.type_ptr()], cx.type_void());
let register_lib_decl = declare_offload_fn(&cx, "__tgt_register_lib", mapper_fn_ty);
let unregister_lib_decl = declare_offload_fn(&cx, "__tgt_unregister_lib", mapper_fn_ty);
let init_ty = cx.type_func(&[], cx.type_void());
let init_rtls_decl = declare_offload_fn(cx, "__tgt_init_all_rtls", init_ty);
// FIXME(offload): Later we want to add them to the wrapper code, rather than our main function.
// call void @__tgt_register_lib(ptr noundef %6)
builder.call(mapper_fn_ty, register_lib_decl, &[tgt_bin_desc_alloca], None);
// call void @__tgt_init_all_rtls()
@ -386,19 +450,19 @@ fn gen_call_handling<'ll>(
a1: &'ll Value,
a2: &'ll Value,
a4: &'ll Value,
) -> (&'ll Value, &'ll Value, &'ll Value) {
) -> [&'ll Value; 3] {
let i32_0 = cx.get_const_i32(0);
let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, i32_0]);
let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, i32_0]);
let gep3 = builder.inbounds_gep(ty2, a4, &[i32_0, i32_0]);
(gep1, gep2, gep3)
[gep1, gep2, gep3]
}
fn generate_mapper_call<'a, 'll>(
builder: &mut SBuilder<'a, 'll>,
cx: &'ll SimpleCx<'ll>,
geps: (&'ll Value, &'ll Value, &'ll Value),
geps: [&'ll Value; 3],
o_type: &'ll Value,
fn_to_call: &'ll Value,
fn_ty: &'ll Type,
@ -409,31 +473,51 @@ fn gen_call_handling<'ll>(
let i64_max = cx.get_const_i64(u64::MAX);
let num_args = cx.get_const_i32(num_args);
let args =
vec![s_ident_t, i64_max, num_args, geps.0, geps.1, geps.2, o_type, nullptr, nullptr];
vec![s_ident_t, i64_max, num_args, geps[0], geps[1], geps[2], o_type, nullptr, nullptr];
builder.call(fn_ty, fn_to_call, &args, None);
}
// Step 2)
let s_ident_t = generate_at_one(&cx);
let o = o_types[0];
let o = memtransfer_types[0];
let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4);
generate_mapper_call(&mut builder, &cx, geps, o, begin_mapper_decl, fn_ty, num_args, s_ident_t);
let values = KernelArgsTy::new(&cx, num_args, memtransfer_types, geps);
// Step 3)
// Here we will add code for the actual kernel launches in a follow-up PR.
// FIXME(offload): launch kernels
// Here we fill the KernelArgsTy, see the documentation above
for (i, value) in values.iter().enumerate() {
let ptr = builder.inbounds_gep(tgt_kernel_decl, a5, &[i32_0, cx.get_const_i32(i as u64)]);
builder.store(value.1, ptr, value.0);
}
let args = vec![
s_ident_t,
// FIXME(offload) give users a way to select which GPU to use.
cx.get_const_i64(u64::MAX), // MAX == -1.
// FIXME(offload): Don't hardcode the numbers of threads in the future.
cx.get_const_i32(2097152),
cx.get_const_i32(256),
region_ids[0],
a5,
];
let offload_success = builder.call(tgt_target_kernel_ty, tgt_decl, &args, None);
// %41 = call i32 @__tgt_target_kernel(ptr @1, i64 -1, i32 2097152, i32 256, ptr @.kernel_1.region_id, ptr %kernel_args)
unsafe {
let next = llvm::LLVMGetNextInstruction(offload_success).unwrap();
llvm::LLVMRustPositionAfter(builder.llbuilder, next);
llvm::LLVMInstructionEraseFromParent(next);
}
// Step 4)
unsafe { llvm::LLVMRustPositionAfter(builder.llbuilder, kernel_call) };
let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4);
generate_mapper_call(&mut builder, &cx, geps, o, end_mapper_decl, fn_ty, num_args, s_ident_t);
builder.call(mapper_fn_ty, unregister_lib_decl, &[tgt_bin_desc_alloca], None);
// With this we generated the following begin and end mappers. We could easily generate the
// update mapper in an update.
// call void @__tgt_target_data_begin_mapper(ptr @1, i64 -1, i32 3, ptr %27, ptr %28, ptr %29, ptr @.offload_maptypes, ptr null, ptr null)
// call void @__tgt_target_data_update_mapper(ptr @1, i64 -1, i32 2, ptr %46, ptr %47, ptr %48, ptr @.offload_maptypes.1, ptr null, ptr null)
// call void @__tgt_target_data_end_mapper(ptr @1, i64 -1, i32 3, ptr %49, ptr %50, ptr %51, ptr @.offload_maptypes, ptr null, ptr null)
drop(builder);
// FIXME(offload) The issue is that we right now add a call to the gpu version of the function,
// and then delete the call to the CPU version. In the future, we should use an intrinsic which
// directly resolves to a call to the GPU version.
unsafe { llvm::LLVMDeleteFunction(called) };
}

View file

@ -12,7 +12,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hashes::Hash128;
use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar};
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::DllImport;
use tracing::debug;
@ -281,8 +281,8 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
// This avoids generating a zero-sized constant value and actually needing a
// real address at runtime.
if alloc.inner().len() == 0 {
assert_eq!(offset.bytes(), 0);
let llval = self.const_usize(alloc.inner().align.bytes());
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
let llval = self.const_usize(self.tcx.truncate_to_target_usize(val));
return if matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {

View file

@ -1117,6 +1117,7 @@ unsafe extern "C" {
// Operations on functions
pub(crate) fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
pub(crate) fn LLVMDeleteFunction(Fn: &Value);
// Operations about llvm intrinsics
pub(crate) fn LLVMLookupIntrinsicID(Name: *const c_char, NameLen: size_t) -> c_uint;
@ -1146,6 +1147,8 @@ unsafe extern "C" {
pub(crate) fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>;
pub(crate) fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
pub(crate) fn LLVMGetOperand(Val: &Value, Index: c_uint) -> Option<&Value>;
pub(crate) fn LLVMGetNextInstruction(Val: &Value) -> Option<&Value>;
pub(crate) fn LLVMInstructionEraseFromParent(Val: &Value);
// Operations on call sites
pub(crate) fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);

View file

@ -200,10 +200,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let fn_ty = bx.fn_decl_backend_type(fn_abi);
let fn_attrs = if bx.tcx().def_kind(fx.instance.def_id()).has_codegen_attrs() {
Some(bx.tcx().codegen_fn_attrs(fx.instance.def_id()))
Some(bx.tcx().codegen_instance_attrs(fx.instance.def))
} else {
None
};
let fn_attrs = fn_attrs.as_deref();
if !fn_abi.can_unwind {
unwind = mir::UnwindAction::Unreachable;

View file

@ -40,12 +40,12 @@ impl<'tcx, V> Locals<'tcx, V> {
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub(super) fn initialize_locals(&mut self, values: Vec<LocalRef<'tcx, Bx::Value>>) {
assert!(self.locals.values.is_empty());
self.locals.values = IndexVec::from_raw(values);
// FIXME(#115215): After #115025 get's merged this might not be necessary
for (local, value) in values.into_iter().enumerate() {
for (local, value) in self.locals.values.iter_enumerated() {
match value {
LocalRef::Place(_) | LocalRef::UnsizedPlace(_) | LocalRef::PendingOperand => (),
LocalRef::Operand(op) => {
let local = mir::Local::from_usize(local);
let expected_ty = self.monomorphize(self.mir.local_decls[local].ty);
if expected_ty != op.layout.ty {
warn!(
@ -56,7 +56,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
}
self.locals.values.push(value);
}
}

View file

@ -180,6 +180,9 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let llfn = cx.get_fn(instance);
let mut mir = tcx.instance_mir(instance.def);
// Note that the ABI logic has deduced facts about the functions' parameters based on the MIR we
// got here (`deduce_param_attrs`). That means we can *not* apply arbitrary further MIR
// transforms as that may invalidate those deduced facts!
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
debug!("fn_abi: {:?}", fn_abi);
@ -317,6 +320,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
}
/// Replace `clone` calls that come from `use` statements with direct copies if possible.
// FIXME: Move this function to mir::transform when post-mono MIR passes land.
fn optimize_use_clone<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,

View file

@ -234,7 +234,7 @@ where
Rvalue::Discriminant(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in runtime MIR"),
Rvalue::CopyForDeref(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
Rvalue::Use(operand)
| Rvalue::Repeat(operand, _)

View file

@ -7,7 +7,7 @@ use rustc_middle::ty::TyCtxt;
fn parent_impl_or_trait_constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
let parent_id = tcx.local_parent(def_id);
match tcx.def_kind(parent_id) {
DefKind::Impl { of_trait: true } => tcx.impl_trait_header(parent_id).unwrap().constness,
DefKind::Impl { of_trait: true } => tcx.impl_trait_header(parent_id).constness,
DefKind::Trait => {
if tcx.is_const_trait(parent_id.into()) {
hir::Constness::Const

View file

@ -2154,11 +2154,11 @@ impl HumanEmitter {
assert!(!file_lines.lines.is_empty() || parts[0].span.is_dummy());
let line_start = sm.lookup_char_pos(parts[0].span.lo()).line;
let line_start = sm.lookup_char_pos(parts[0].original_span.lo()).line;
let mut lines = complete.lines();
if lines.clone().next().is_none() {
// Account for a suggestion to completely remove a line(s) with whitespace (#94192).
let line_end = sm.lookup_char_pos(parts[0].span.hi()).line;
let line_end = sm.lookup_char_pos(parts[0].original_span.hi()).line;
for line in line_start..=line_end {
self.draw_line_num(
&mut buffer,

View file

@ -224,6 +224,13 @@ pub struct SubstitutionPart {
pub snippet: String,
}
#[derive(Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
pub struct TrimmedSubstitutionPart {
pub original_span: Span,
pub span: Span,
pub snippet: String,
}
/// Used to translate between `Span`s and byte positions within a single output line in highlighted
/// code of structured suggestions.
#[derive(Debug, Clone, Copy)]
@ -233,6 +240,35 @@ pub(crate) struct SubstitutionHighlight {
}
impl SubstitutionPart {
/// Try to turn a replacement into an addition when the span that is being
/// overwritten matches either the prefix or suffix of the replacement.
fn trim_trivial_replacements(self, sm: &SourceMap) -> TrimmedSubstitutionPart {
let mut trimmed_part = TrimmedSubstitutionPart {
original_span: self.span,
span: self.span,
snippet: self.snippet,
};
if trimmed_part.snippet.is_empty() {
return trimmed_part;
}
let Ok(snippet) = sm.span_to_snippet(trimmed_part.span) else {
return trimmed_part;
};
if let Some((prefix, substr, suffix)) = as_substr(&snippet, &trimmed_part.snippet) {
trimmed_part.span = Span::new(
trimmed_part.span.lo() + BytePos(prefix as u32),
trimmed_part.span.hi() - BytePos(suffix as u32),
trimmed_part.span.ctxt(),
trimmed_part.span.parent(),
);
trimmed_part.snippet = substr.to_string();
}
trimmed_part
}
}
impl TrimmedSubstitutionPart {
pub fn is_addition(&self, sm: &SourceMap) -> bool {
!self.snippet.is_empty() && !self.replaces_meaningful_content(sm)
}
@ -260,27 +296,6 @@ impl SubstitutionPart {
sm.span_to_snippet(self.span)
.map_or(!self.span.is_empty(), |snippet| !snippet.trim().is_empty())
}
/// Try to turn a replacement into an addition when the span that is being
/// overwritten matches either the prefix or suffix of the replacement.
fn trim_trivial_replacements(&mut self, sm: &SourceMap) {
if self.snippet.is_empty() {
return;
}
let Ok(snippet) = sm.span_to_snippet(self.span) else {
return;
};
if let Some((prefix, substr, suffix)) = as_substr(&snippet, &self.snippet) {
self.span = Span::new(
self.span.lo() + BytePos(prefix as u32),
self.span.hi() - BytePos(suffix as u32),
self.span.ctxt(),
self.span.parent(),
);
self.snippet = substr.to_string();
}
}
}
/// Given an original string like `AACC`, and a suggestion like `AABBCC`, try to detect
@ -310,7 +325,8 @@ impl CodeSuggestion {
pub(crate) fn splice_lines(
&self,
sm: &SourceMap,
) -> Vec<(String, Vec<SubstitutionPart>, Vec<Vec<SubstitutionHighlight>>, ConfusionType)> {
) -> Vec<(String, Vec<TrimmedSubstitutionPart>, Vec<Vec<SubstitutionHighlight>>, ConfusionType)>
{
// For the `Vec<Vec<SubstitutionHighlight>>` value, the first level of the vector
// corresponds to the output snippet's lines, while the second level corresponds to the
// substrings within that line that should be highlighted.
@ -428,12 +444,17 @@ impl CodeSuggestion {
// or deleted code in order to point at the correct column *after* substitution.
let mut acc = 0;
let mut confusion_type = ConfusionType::None;
for part in &mut substitution.parts {
let trimmed_parts = substitution
.parts
.into_iter()
// If this is a replacement of, e.g. `"a"` into `"ab"`, adjust the
// suggestion and snippet to look as if we just suggested to add
// `"b"`, which is typically much easier for the user to understand.
part.trim_trivial_replacements(sm);
.map(|part| part.trim_trivial_replacements(sm))
.collect::<Vec<_>>();
for part in &trimmed_parts {
let part_confusion = detect_confusion_type(sm, &part.snippet, part.span);
confusion_type = confusion_type.combine(part_confusion);
let cur_lo = sm.lookup_char_pos(part.span.lo());
@ -521,7 +542,7 @@ impl CodeSuggestion {
if highlights.iter().all(|parts| parts.is_empty()) {
None
} else {
Some((buf, substitution.parts, highlights, confusion_type))
Some((buf, trimmed_parts, highlights, confusion_type))
}
})
.collect()

View file

@ -49,6 +49,9 @@ expand_feature_removed =
.note = removed in {$removed_rustc_version}{$pull_note}
.reason = {$reason}
expand_file_modules_in_proc_macro_input_are_unstable =
file modules in proc macro input are unstable
expand_glob_delegation_outside_impls =
glob delegation is only supported in impls
@ -158,9 +161,6 @@ expand_mve_unrecognized_expr =
expand_mve_unrecognized_var =
variable `{$key}` is not recognized in meta-variable expression
expand_non_inline_modules_in_proc_macro_input_are_unstable =
non-inline modules in proc macro input are unstable
expand_or_patterns_back_compat = the meaning of the `pat` fragment specifier is changing in Rust 2021, which may affect this macro
.suggestion = use pat_param to preserve semantics

View file

@ -233,7 +233,7 @@ impl<'a> ExtCtxt<'a> {
};
let local = Box::new(ast::Local {
super_: None,
pat,
pat: Box::new(pat),
ty,
id: ast::DUMMY_NODE_ID,
kind: LocalKind::Init(ex),
@ -249,7 +249,7 @@ impl<'a> ExtCtxt<'a> {
pub fn stmt_let_type_only(&self, span: Span, ty: Box<ast::Ty>) -> ast::Stmt {
let local = Box::new(ast::Local {
super_: None,
pat: self.pat_wild(span),
pat: Box::new(self.pat_wild(span)),
ty: Some(ty),
id: ast::DUMMY_NODE_ID,
kind: LocalKind::Decl,
@ -528,16 +528,16 @@ impl<'a> ExtCtxt<'a> {
self.expr_match(sp, head, thin_vec![ok_arm, err_arm])
}
pub fn pat(&self, span: Span, kind: PatKind) -> Box<ast::Pat> {
Box::new(ast::Pat { id: ast::DUMMY_NODE_ID, kind, span, tokens: None })
pub fn pat(&self, span: Span, kind: PatKind) -> ast::Pat {
ast::Pat { id: ast::DUMMY_NODE_ID, kind, span, tokens: None }
}
pub fn pat_wild(&self, span: Span) -> Box<ast::Pat> {
pub fn pat_wild(&self, span: Span) -> ast::Pat {
self.pat(span, PatKind::Wild)
}
pub fn pat_lit(&self, span: Span, expr: Box<ast::Expr>) -> Box<ast::Pat> {
pub fn pat_lit(&self, span: Span, expr: Box<ast::Expr>) -> ast::Pat {
self.pat(span, PatKind::Expr(expr))
}
pub fn pat_ident(&self, span: Span, ident: Ident) -> Box<ast::Pat> {
pub fn pat_ident(&self, span: Span, ident: Ident) -> ast::Pat {
self.pat_ident_binding_mode(span, ident, ast::BindingMode::NONE)
}
@ -546,19 +546,19 @@ impl<'a> ExtCtxt<'a> {
span: Span,
ident: Ident,
ann: ast::BindingMode,
) -> Box<ast::Pat> {
) -> ast::Pat {
let pat = PatKind::Ident(ann, ident.with_span_pos(span), None);
self.pat(span, pat)
}
pub fn pat_path(&self, span: Span, path: ast::Path) -> Box<ast::Pat> {
pub fn pat_path(&self, span: Span, path: ast::Path) -> ast::Pat {
self.pat(span, PatKind::Path(None, path))
}
pub fn pat_tuple_struct(
&self,
span: Span,
path: ast::Path,
subpats: ThinVec<Box<ast::Pat>>,
) -> Box<ast::Pat> {
subpats: ThinVec<ast::Pat>,
) -> ast::Pat {
self.pat(span, PatKind::TupleStruct(None, path, subpats))
}
pub fn pat_struct(
@ -566,23 +566,23 @@ impl<'a> ExtCtxt<'a> {
span: Span,
path: ast::Path,
field_pats: ThinVec<ast::PatField>,
) -> Box<ast::Pat> {
) -> ast::Pat {
self.pat(span, PatKind::Struct(None, path, field_pats, ast::PatFieldsRest::None))
}
pub fn pat_tuple(&self, span: Span, pats: ThinVec<Box<ast::Pat>>) -> Box<ast::Pat> {
pub fn pat_tuple(&self, span: Span, pats: ThinVec<ast::Pat>) -> ast::Pat {
self.pat(span, PatKind::Tuple(pats))
}
pub fn pat_some(&self, span: Span, pat: Box<ast::Pat>) -> Box<ast::Pat> {
pub fn pat_some(&self, span: Span, pat: ast::Pat) -> ast::Pat {
let some = self.std_path(&[sym::option, sym::Option, sym::Some]);
let path = self.path_global(span, some);
self.pat_tuple_struct(span, path, thin_vec![pat])
}
pub fn arm(&self, span: Span, pat: Box<ast::Pat>, expr: Box<ast::Expr>) -> ast::Arm {
pub fn arm(&self, span: Span, pat: ast::Pat, expr: Box<ast::Expr>) -> ast::Arm {
ast::Arm {
attrs: AttrVec::new(),
pat,
pat: Box::new(pat),
guard: None,
body: Some(expr),
span,
@ -661,11 +661,11 @@ impl<'a> ExtCtxt<'a> {
}
pub fn param(&self, span: Span, ident: Ident, ty: Box<ast::Ty>) -> ast::Param {
let arg_pat = self.pat_ident(span, ident);
let pat = Box::new(self.pat_ident(span, ident));
ast::Param {
attrs: AttrVec::default(),
id: ast::DUMMY_NODE_ID,
pat: arg_pat,
pat,
span,
ty,
is_placeholder: false,

View file

@ -13,7 +13,7 @@ use rustc_ast::{
use rustc_attr_parsing as attr;
use rustc_attr_parsing::validate_attr::deny_builtin_meta_unsafety;
use rustc_attr_parsing::{
AttributeParser, CFG_TEMPLATE, EvalConfigResult, ShouldEmit, eval_config_entry, parse_cfg_attr,
AttributeParser, CFG_TEMPLATE, EvalConfigResult, ShouldEmit, eval_config_entry, parse_cfg,
validate_attr,
};
use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
@ -303,7 +303,7 @@ impl<'a> StripUnconfigured<'a> {
let trace_attr = attr_into_trace(cfg_attr.clone(), sym::cfg_attr_trace);
let Some((cfg_predicate, expanded_attrs)) =
rustc_parse::parse_cfg_attr(cfg_attr, &self.sess.psess)
rustc_attr_parsing::parse_cfg_attr(cfg_attr, &self.sess, self.features)
else {
return vec![trace_attr];
};
@ -318,7 +318,15 @@ impl<'a> StripUnconfigured<'a> {
);
}
if !attr::cfg_matches(&cfg_predicate, &self.sess, self.lint_node_id, self.features) {
if !attr::eval_config_entry(
self.sess,
&cfg_predicate,
ast::CRATE_NODE_ID,
self.features,
ShouldEmit::ErrorsAndLints,
)
.as_bool()
{
return vec![trace_attr];
}
@ -428,7 +436,7 @@ impl<'a> StripUnconfigured<'a> {
node,
self.features,
emit_errors,
parse_cfg_attr,
parse_cfg,
&CFG_TEMPLATE,
) else {
// Cfg attribute was not parsable, give up
@ -488,7 +496,7 @@ impl<'a> StripUnconfigured<'a> {
}
/// FIXME: Still used by Rustdoc, should be removed after
pub fn parse_cfg<'a>(meta_item: &'a MetaItem, sess: &Session) -> Option<&'a MetaItemInner> {
pub fn parse_cfg_old<'a>(meta_item: &'a MetaItem, sess: &Session) -> Option<&'a MetaItemInner> {
let span = meta_item.span;
match meta_item.meta_item_list() {
None => {

View file

@ -1050,7 +1050,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> {
self.sess,
sym::proc_macro_hygiene,
item.span,
fluent_generated::expand_non_inline_modules_in_proc_macro_input_are_unstable,
fluent_generated::expand_file_modules_in_proc_macro_input_are_unstable,
)
.emit();
}
@ -1152,12 +1152,12 @@ pub fn parse_ast_fragment<'a>(
}
}
AstFragmentKind::Ty => AstFragment::Ty(this.parse_ty()?),
AstFragmentKind::Pat => AstFragment::Pat(this.parse_pat_allow_top_guard(
AstFragmentKind::Pat => AstFragment::Pat(Box::new(this.parse_pat_allow_top_guard(
None,
RecoverComma::No,
RecoverColon::Yes,
CommaRecoveryMode::LikelyTuple,
)?),
)?)),
AstFragmentKind::Crate => AstFragment::Crate(this.parse_crate_mod()?),
AstFragmentKind::Arms
| AstFragmentKind::ExprFields

View file

@ -148,8 +148,7 @@ fn ensure_impl_params_and_item_params_correspond<'tcx>(
ty::ImplPolarity::Positive | ty::ImplPolarity::Reservation => "",
ty::ImplPolarity::Negative => "!",
};
let trait_name = tcx
.item_name(tcx.trait_id_of_impl(impl_def_id.to_def_id()).expect("expected impl of trait"));
let trait_name = tcx.item_name(tcx.impl_trait_id(impl_def_id.to_def_id()));
let mut err = struct_span_code_err!(
tcx.dcx(),
impl_span,
@ -187,8 +186,7 @@ fn ensure_impl_predicates_are_implied_by_item_defn<'tcx>(
let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
let impl_span = tcx.def_span(impl_def_id.to_def_id());
let trait_name = tcx
.item_name(tcx.trait_id_of_impl(impl_def_id.to_def_id()).expect("expected impl of trait"));
let trait_name = tcx.item_name(tcx.impl_trait_id(impl_def_id.to_def_id()));
let polarity = match tcx.impl_polarity(impl_def_id) {
ty::ImplPolarity::Positive | ty::ImplPolarity::Reservation => "",
ty::ImplPolarity::Negative => "!",
@ -212,8 +210,7 @@ fn ensure_impl_predicates_are_implied_by_item_defn<'tcx>(
ty::EarlyBinder::bind(tcx.param_env(adt_def_id)).instantiate(tcx, adt_to_impl_args);
let fresh_impl_args = infcx.fresh_args_for_item(impl_span, impl_def_id.to_def_id());
let fresh_adt_ty =
tcx.impl_trait_ref(impl_def_id).unwrap().instantiate(tcx, fresh_impl_args).self_ty();
let fresh_adt_ty = tcx.impl_trait_ref(impl_def_id).instantiate(tcx, fresh_impl_args).self_ty();
ocx.eq(&ObligationCause::dummy_with_span(impl_span), adt_env, fresh_adt_ty, impl_adt_ty)
.expect("equating fully generic trait ref should never fail");

View file

@ -806,10 +806,10 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(),
DefKind::Impl { of_trait } => {
tcx.ensure_ok().generics_of(def_id);
tcx.ensure_ok().type_of(def_id);
tcx.ensure_ok().impl_trait_header(def_id);
tcx.ensure_ok().predicates_of(def_id);
tcx.ensure_ok().associated_items(def_id);
if of_trait && let Some(impl_trait_header) = tcx.impl_trait_header(def_id) {
if of_trait {
let impl_trait_header = tcx.impl_trait_header(def_id);
res = res.and(
tcx.ensure_ok()
.coherent_trait(impl_trait_header.trait_ref.instantiate_identity().def_id),
@ -1191,9 +1191,7 @@ fn check_impl_items_against_trait<'tcx>(
tcx,
ty_impl_item,
ty_trait_item,
tcx.impl_trait_ref(ty_impl_item.container_id(tcx))
.unwrap()
.instantiate_identity(),
tcx.impl_trait_ref(ty_impl_item.container_id(tcx)).instantiate_identity(),
);
}
ty::AssocKind::Const { .. } => {}

View file

@ -38,8 +38,7 @@ pub(super) fn compare_impl_item(
) -> Result<(), ErrorGuaranteed> {
let impl_item = tcx.associated_item(impl_item_def_id);
let trait_item = tcx.associated_item(impl_item.expect_trait_impl()?);
let impl_trait_ref =
tcx.impl_trait_ref(impl_item.container_id(tcx)).unwrap().instantiate_identity();
let impl_trait_ref = tcx.impl_trait_ref(impl_item.container_id(tcx)).instantiate_identity();
debug!(?impl_trait_ref);
match impl_item.kind {
@ -443,7 +442,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
let impl_m = tcx.associated_item(impl_m_def_id.to_def_id());
let trait_m = tcx.associated_item(impl_m.expect_trait_impl()?);
let impl_trait_ref =
tcx.impl_trait_ref(tcx.parent(impl_m_def_id.to_def_id())).unwrap().instantiate_identity();
tcx.impl_trait_ref(tcx.parent(impl_m_def_id.to_def_id())).instantiate_identity();
// First, check a few of the same things as `compare_impl_method`,
// just so we don't ICE during instantiation later.
check_method_is_structurally_compatible(tcx, impl_m, trait_m, impl_trait_ref, true)?;

View file

@ -647,11 +647,11 @@ pub(crate) fn check_intrinsic_type(
sym::box_new => (1, 0, vec![param(0)], Ty::new_box(tcx, param(0))),
// contract_checks() -> bool
sym::contract_checks => (0, 0, Vec::new(), tcx.types.bool),
// contract_check_requires::<C>(C) -> bool, where C: impl Fn() -> bool
sym::contract_check_requires => (1, 0, vec![param(0)], tcx.types.unit),
sym::contract_check_ensures => (2, 0, vec![param(0), param(1)], param(1)),
sym::contract_check_ensures => {
(2, 0, vec![Ty::new_option(tcx, param(0)), param(1)], param(1))
}
sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
(2, 0, vec![param(0), param(0)], param(1))

View file

@ -244,7 +244,7 @@ fn missing_items_err(
let snippet = with_types_for_signature!(suggestion_signature(
tcx,
trait_item,
tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity(),
tcx.impl_trait_ref(impl_def_id).instantiate_identity(),
));
let code = format!("{padding}{snippet}\n{padding}");
if let Some(span) = tcx.hir_span_if_local(trait_item.def_id) {

View file

@ -245,10 +245,10 @@ pub(super) fn check_item<'tcx>(
// won't be allowed unless there's an *explicit* implementation of `Send`
// for `T`
hir::ItemKind::Impl(ref impl_) => {
crate::impl_wf_check::check_impl_wf(tcx, def_id)?;
crate::impl_wf_check::check_impl_wf(tcx, def_id, impl_.of_trait.is_some())?;
let mut res = Ok(());
if let Some(of_trait) = impl_.of_trait {
let header = tcx.impl_trait_header(def_id).unwrap();
let header = tcx.impl_trait_header(def_id);
let is_auto = tcx.trait_is_auto(header.trait_ref.skip_binder().def_id);
if let (hir::Defaultness::Default { .. }, true) = (of_trait.defaultness, is_auto) {
let sp = of_trait.trait_ref.path.span;
@ -1258,7 +1258,7 @@ fn check_impl<'tcx>(
// `#[rustc_reservation_impl]` impls are not real impls and
// therefore don't need to be WF (the trait's `Self: Trait` predicate
// won't hold).
let trait_ref = tcx.impl_trait_ref(item.owner_id).unwrap().instantiate_identity();
let trait_ref = tcx.impl_trait_ref(item.owner_id).instantiate_identity();
// Avoid bogus "type annotations needed `Foo: Bar`" errors on `impl Bar for Foo` in case
// other `Foo` impls are incoherent.
tcx.ensure_ok().coherent_trait(trait_ref.def_id)?;

View file

@ -377,7 +377,7 @@ pub(crate) fn coerce_unsized_info<'tcx>(
let unsize_trait = tcx.require_lang_item(LangItem::Unsize, span);
let source = tcx.type_of(impl_did).instantiate_identity();
let trait_ref = tcx.impl_trait_ref(impl_did).unwrap().instantiate_identity();
let trait_ref = tcx.impl_trait_ref(impl_did).instantiate_identity();
assert_eq!(trait_ref.def_id, coerce_unsized_trait);
let target = trait_ref.args.type_at(1);
@ -707,7 +707,7 @@ fn visit_implementation_of_coerce_pointee_validity(
checker: &Checker<'_>,
) -> Result<(), ErrorGuaranteed> {
let tcx = checker.tcx;
let self_ty = tcx.impl_trait_ref(checker.impl_def_id).unwrap().instantiate_identity().self_ty();
let self_ty = tcx.impl_trait_ref(checker.impl_def_id).instantiate_identity().self_ty();
let span = tcx.def_span(checker.impl_def_id);
if !tcx.is_builtin_derived(checker.impl_def_id.into()) {
return Err(tcx.dcx().emit_err(errors::CoercePointeeNoUserValidityAssertion { span }));

View file

@ -163,7 +163,7 @@ fn coherent_trait(tcx: TyCtxt<'_>, def_id: DefId) -> Result<(), ErrorGuaranteed>
let mut res = tcx.ensure_ok().specialization_graph_of(def_id);
for &impl_def_id in impls {
let impl_header = tcx.impl_trait_header(impl_def_id).unwrap();
let impl_header = tcx.impl_trait_header(impl_def_id);
let trait_ref = impl_header.trait_ref.instantiate_identity();
let trait_def = tcx.trait_def(trait_ref.def_id);

View file

@ -22,7 +22,7 @@ pub(crate) fn orphan_check_impl(
tcx: TyCtxt<'_>,
impl_def_id: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity();
let trait_ref = tcx.impl_trait_ref(impl_def_id).instantiate_identity();
trait_ref.error_reported()?;
match orphan_check(tcx, impl_def_id, OrphanCheckMode::Proper) {
@ -294,7 +294,7 @@ fn orphan_check<'tcx>(
) -> Result<(), OrphanCheckErr<TyCtxt<'tcx>, FxIndexSet<DefId>>> {
// We only accept this routine to be invoked on implementations
// of a trait, not inherent implementations.
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let trait_ref = tcx.impl_trait_ref(impl_def_id);
debug!(trait_ref = ?trait_ref.skip_binder());
// If the *trait* is local to the crate, ok.

View file

@ -1291,28 +1291,26 @@ pub fn suggest_impl_trait<'tcx>(
None
}
fn impl_trait_header(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<ty::ImplTraitHeader<'_>> {
fn impl_trait_header(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::ImplTraitHeader<'_> {
let icx = ItemCtxt::new(tcx, def_id);
let item = tcx.hir_expect_item(def_id);
let impl_ = item.expect_impl();
let of_trait = impl_
.of_trait
.unwrap_or_else(|| panic!("expected impl trait, found inherent impl on {def_id:?}"));
let selfty = tcx.type_of(def_id).instantiate_identity();
let is_rustc_reservation = tcx.has_attr(def_id, sym::rustc_reservation_impl);
if is_rustc_reservation && impl_.of_trait.is_none() {
tcx.dcx().span_err(item.span, "reservation impls can't be inherent");
check_impl_constness(tcx, of_trait.constness, &of_trait.trait_ref);
let trait_ref = icx.lowerer().lower_impl_trait_ref(&of_trait.trait_ref, selfty);
ty::ImplTraitHeader {
trait_ref: ty::EarlyBinder::bind(trait_ref),
safety: of_trait.safety,
polarity: polarity_of_impl(tcx, of_trait, is_rustc_reservation),
constness: of_trait.constness,
}
impl_.of_trait.map(|of_trait| {
let selfty = tcx.type_of(def_id).instantiate_identity();
check_impl_constness(tcx, of_trait.constness, &of_trait.trait_ref);
let trait_ref = icx.lowerer().lower_impl_trait_ref(&of_trait.trait_ref, selfty);
ty::ImplTraitHeader {
trait_ref: ty::EarlyBinder::bind(trait_ref),
safety: of_trait.safety,
polarity: polarity_of_impl(tcx, of_trait, is_rustc_reservation),
constness: of_trait.constness,
}
})
}
fn check_impl_constness(

View file

@ -108,7 +108,7 @@ pub(crate) fn vtables<'tcx>(tcx: TyCtxt<'tcx>) {
let vtable_entries = match tcx.hir_item(id).kind {
hir::ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) => {
let trait_ref = tcx.impl_trait_ref(def_id).unwrap().instantiate_identity();
let trait_ref = tcx.impl_trait_ref(def_id).instantiate_identity();
if trait_ref.has_non_region_param() {
tcx.dcx().span_err(
attr.span(),

View file

@ -514,17 +514,15 @@ pub(super) fn impl_super_outlives(
tcx: TyCtxt<'_>,
def_id: DefId,
) -> ty::EarlyBinder<'_, ty::Clauses<'_>> {
tcx.impl_trait_header(def_id).expect("expected an impl of trait").trait_ref.map_bound(
|trait_ref| {
let clause: ty::Clause<'_> = trait_ref.upcast(tcx);
tcx.mk_clauses_from_iter(util::elaborate(tcx, [clause]).filter(|clause| {
matches!(
clause.kind().skip_binder(),
ty::ClauseKind::TypeOutlives(_) | ty::ClauseKind::RegionOutlives(_)
)
}))
},
)
tcx.impl_trait_header(def_id).trait_ref.map_bound(|trait_ref| {
let clause: ty::Clause<'_> = trait_ref.upcast(tcx);
tcx.mk_clauses_from_iter(util::elaborate(tcx, [clause]).filter(|clause| {
matches!(
clause.kind().skip_binder(),
ty::ClauseKind::TypeOutlives(_) | ty::ClauseKind::RegionOutlives(_)
)
}))
})
}
struct AssocTyToOpaque<'tcx> {

View file

@ -118,8 +118,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
let impl_assoc_identity_args = ty::GenericArgs::identity_for_item(tcx, def_id);
let impl_def_id = tcx.parent(fn_def_id);
let impl_trait_ref_args =
tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity().args;
let impl_trait_ref_args = tcx.impl_trait_ref(impl_def_id).instantiate_identity().args;
let impl_assoc_args =
impl_assoc_identity_args.rebase_onto(tcx, impl_def_id, impl_trait_ref_args);
@ -162,9 +161,8 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
if let Some(of_trait) = impl_.of_trait
&& of_trait.defaultness.is_default()
{
is_default_impl_trait = tcx
.impl_trait_ref(def_id)
.map(|t| ty::Binder::dummy(t.instantiate_identity()));
is_default_impl_trait =
Some(ty::Binder::dummy(tcx.impl_trait_ref(def_id).instantiate_identity()));
}
}
ItemKind::Trait(_, _, _, _, _, self_bounds, ..)
@ -350,9 +348,10 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
// before uses of `U`. This avoids false ambiguity errors
// in trait checking. See `setup_constraining_predicates`
// for details.
if let Node::Item(&Item { kind: ItemKind::Impl { .. }, .. }) = node {
if let Node::Item(&Item { kind: ItemKind::Impl(impl_), .. }) = node {
let self_ty = tcx.type_of(def_id).instantiate_identity();
let trait_ref = tcx.impl_trait_ref(def_id).map(ty::EarlyBinder::instantiate_identity);
let trait_ref =
impl_.of_trait.is_some().then(|| tcx.impl_trait_ref(def_id).instantiate_identity());
cgp::setup_constraining_predicates(
tcx,
&mut predicates,
@ -460,11 +459,12 @@ fn const_evaluatable_predicates_of<'tcx>(
}
if let hir::Node::Item(item) = node
&& let hir::ItemKind::Impl(_) = item.kind
&& let hir::ItemKind::Impl(impl_) = item.kind
{
if let Some(of_trait) = tcx.impl_trait_ref(def_id) {
if impl_.of_trait.is_some() {
debug!("visit impl trait_ref");
of_trait.instantiate_identity().visit_with(&mut collector);
let trait_ref = tcx.impl_trait_ref(def_id);
trait_ref.instantiate_identity().visit_with(&mut collector);
}
debug!("visit self_ty");

View file

@ -272,8 +272,7 @@ fn create_generic_args<'tcx>(
(FnKind::AssocTraitImpl, FnKind::AssocTrait) => {
let callee_generics = tcx.generics_of(sig_id);
let parent = tcx.parent(def_id.into());
let parent_args =
tcx.impl_trait_header(parent).unwrap().trait_ref.instantiate_identity().args;
let parent_args = tcx.impl_trait_header(parent).trait_ref.instantiate_identity().args;
let trait_args = ty::GenericArgs::identity_for_item(tcx, sig_id);
let method_args = tcx.mk_args(&trait_args[callee_generics.parent_count..]);

View file

@ -202,11 +202,9 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
// where we are guaranteed to catch *all* bounds like in
// `Self::lower_poly_trait_ref`. List of concrete issues:
// FIXME(more_maybe_bounds): We don't call this for trait object tys, supertrait
// bounds or associated type bounds (ATB)!
// FIXME(trait_alias, #143122): We don't call it for the RHS. Arguably however,
// AST lowering should reject them outright.
// bounds, trait alias bounds, assoc type bounds (ATB)!
let bounds = collect_relaxed_bounds(hir_bounds, self_ty_where_predicates);
self.check_and_report_invalid_relaxed_bounds(bounds);
self.reject_duplicate_relaxed_bounds(bounds);
}
let collected = collect_sizedness_bounds(tcx, hir_bounds, self_ty_where_predicates, span);
@ -310,6 +308,53 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
!self.tcx().has_attr(CRATE_DEF_ID, sym::rustc_no_implicit_bounds) && !collected.any()
}
fn reject_duplicate_relaxed_bounds(&self, relaxed_bounds: SmallVec<[&PolyTraitRef<'_>; 1]>) {
let tcx = self.tcx();
let mut grouped_bounds = FxIndexMap::<_, Vec<_>>::default();
for bound in &relaxed_bounds {
if let Res::Def(DefKind::Trait, trait_def_id) = bound.trait_ref.path.res {
grouped_bounds.entry(trait_def_id).or_default().push(bound.span);
}
}
for (trait_def_id, spans) in grouped_bounds {
if spans.len() > 1 {
let name = tcx.item_name(trait_def_id);
self.dcx()
.struct_span_err(spans, format!("duplicate relaxed `{name}` bounds"))
.with_code(E0203)
.emit();
}
}
}
pub(crate) fn require_bound_to_relax_default_trait(
&self,
trait_ref: hir::TraitRef<'_>,
span: Span,
) {
let tcx = self.tcx();
if let Res::Def(DefKind::Trait, def_id) = trait_ref.path.res
&& (tcx.is_lang_item(def_id, hir::LangItem::Sized) || tcx.is_default_trait(def_id))
{
return;
}
self.dcx().span_err(
span,
if tcx.sess.opts.unstable_opts.experimental_default_bounds
|| tcx.features().more_maybe_bounds()
{
"bound modifier `?` can only be applied to default traits"
} else {
"bound modifier `?` can only be applied to `Sized`"
},
);
}
/// Lower HIR bounds into `bounds` given the self type `param_ty` and the overarching late-bound vars if any.
///
/// ### Examples

View file

@ -8,7 +8,7 @@ use rustc_errors::{
};
use rustc_hir::def::{CtorOf, DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::{self as hir, HirId, PolyTraitRef};
use rustc_hir::{self as hir, HirId};
use rustc_middle::bug;
use rustc_middle::ty::fast_reject::{TreatParams, simplify_type};
use rustc_middle::ty::print::{PrintPolyTraitRefExt as _, PrintTraitRefExt as _};
@ -35,52 +35,6 @@ use crate::fluent_generated as fluent;
use crate::hir_ty_lowering::{AssocItemQSelf, HirTyLowerer};
impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
/// Check for duplicate relaxed bounds and relaxed bounds of non-default traits.
pub(crate) fn check_and_report_invalid_relaxed_bounds(
&self,
relaxed_bounds: SmallVec<[&PolyTraitRef<'_>; 1]>,
) {
let tcx = self.tcx();
let mut grouped_bounds = FxIndexMap::<_, Vec<_>>::default();
for bound in &relaxed_bounds {
if let Res::Def(DefKind::Trait, trait_def_id) = bound.trait_ref.path.res {
grouped_bounds.entry(trait_def_id).or_default().push(bound.span);
}
}
for (trait_def_id, spans) in grouped_bounds {
if spans.len() > 1 {
let name = tcx.item_name(trait_def_id);
self.dcx()
.struct_span_err(spans, format!("duplicate relaxed `{name}` bounds"))
.with_code(E0203)
.emit();
}
}
let sized_def_id = tcx.require_lang_item(hir::LangItem::Sized, DUMMY_SP);
for bound in relaxed_bounds {
if let Res::Def(DefKind::Trait, def_id) = bound.trait_ref.path.res
&& (def_id == sized_def_id || tcx.is_default_trait(def_id))
{
continue;
}
self.dcx().span_err(
bound.span,
if tcx.sess.opts.unstable_opts.experimental_default_bounds
|| tcx.features().more_maybe_bounds()
{
"bound modifier `?` can only be applied to default traits like `Sized`"
} else {
"bound modifier `?` can only be applied to `Sized`"
},
);
}
}
/// On missing type parameters, emit an E0393 error and provide a structured suggestion using
/// the type parameter's name as a placeholder.
pub(crate) fn report_missing_type_params(
@ -473,7 +427,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
} else {
// Find all the types that have an `impl` for the trait.
tcx.all_impls(trait_def_id)
.filter_map(|impl_def_id| tcx.impl_trait_header(impl_def_id))
.map(|impl_def_id| tcx.impl_trait_header(impl_def_id))
.filter(|header| {
// Consider only accessible traits
tcx.visibility(trait_def_id).is_accessible_from(self.item_def_id(), tcx)

View file

@ -717,16 +717,15 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
trait_ref: &hir::TraitRef<'tcx>,
self_ty: Ty<'tcx>,
) -> ty::TraitRef<'tcx> {
let _ = self.prohibit_generic_args(
trait_ref.path.segments.split_last().unwrap().1.iter(),
GenericsArgsErrExtend::None,
);
let [leading_segments @ .., segment] = trait_ref.path.segments else { bug!() };
let _ = self.prohibit_generic_args(leading_segments.iter(), GenericsArgsErrExtend::None);
self.lower_mono_trait_ref(
trait_ref.path.span,
trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise()),
self_ty,
trait_ref.path.segments.last().unwrap(),
segment,
true,
)
}
@ -757,7 +756,12 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
#[instrument(level = "debug", skip(self, bounds))]
pub(crate) fn lower_poly_trait_ref(
&self,
poly_trait_ref: &hir::PolyTraitRef<'tcx>,
&hir::PolyTraitRef {
bound_generic_params,
modifiers: hir::TraitBoundModifiers { constness, polarity },
trait_ref,
span,
}: &hir::PolyTraitRef<'tcx>,
self_ty: Ty<'tcx>,
bounds: &mut Vec<(ty::Clause<'tcx>, Span)>,
predicate_filter: PredicateFilter,
@ -767,50 +771,67 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
// We use the *resolved* bound vars later instead of the HIR ones since the former
// also include the bound vars of the overarching predicate if applicable.
let hir::PolyTraitRef { bound_generic_params: _, modifiers, ref trait_ref, span } =
*poly_trait_ref;
let hir::TraitBoundModifiers { constness, polarity } = modifiers;
let _ = bound_generic_params;
let trait_def_id = trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise());
// Relaxed bounds `?Trait` and `PointeeSized` bounds aren't represented in the `middle::ty` IR
// Relaxed bounds `?Trait` and `PointeeSized` bounds aren't represented in the middle::ty IR
// as they denote the *absence* of a default bound. However, we can't bail out early here since
// we still need to perform several validation steps (see below). Instead, simply "pour" all
// resulting bounds "down the drain", i.e., into a new `Vec` that just gets dropped at the end.
let (polarity, bounds) = match polarity {
rustc_ast::BoundPolarity::Positive
if tcx.is_lang_item(trait_def_id, hir::LangItem::PointeeSized) =>
{
let transient = match polarity {
hir::BoundPolarity::Positive => {
// To elaborate on the comment directly above, regarding `PointeeSized` specifically,
// we don't "reify" such bounds to avoid trait system limitations -- namely,
// non-global where-clauses being preferred over item bounds (where `PointeeSized`
// bounds would be proven) -- which can result in errors when a `PointeeSized`
// supertrait / bound / predicate is added to some items.
(ty::PredicatePolarity::Positive, &mut Vec::new())
tcx.is_lang_item(trait_def_id, hir::LangItem::PointeeSized)
}
rustc_ast::BoundPolarity::Positive => (ty::PredicatePolarity::Positive, bounds),
rustc_ast::BoundPolarity::Negative(_) => (ty::PredicatePolarity::Negative, bounds),
rustc_ast::BoundPolarity::Maybe(_) => {
(ty::PredicatePolarity::Positive, &mut Vec::new())
hir::BoundPolarity::Negative(_) => false,
hir::BoundPolarity::Maybe(_) => {
self.require_bound_to_relax_default_trait(trait_ref, span);
true
}
};
let bounds = if transient { &mut Vec::new() } else { bounds };
let trait_segment = trait_ref.path.segments.last().unwrap();
let polarity = match polarity {
hir::BoundPolarity::Positive | hir::BoundPolarity::Maybe(_) => {
ty::PredicatePolarity::Positive
}
hir::BoundPolarity::Negative(_) => ty::PredicatePolarity::Negative,
};
let _ = self.prohibit_generic_args(
trait_ref.path.segments.split_last().unwrap().1.iter(),
GenericsArgsErrExtend::None,
);
self.report_internal_fn_trait(span, trait_def_id, trait_segment, false);
let [leading_segments @ .., segment] = trait_ref.path.segments else { bug!() };
let _ = self.prohibit_generic_args(leading_segments.iter(), GenericsArgsErrExtend::None);
self.report_internal_fn_trait(span, trait_def_id, segment, false);
let (generic_args, arg_count) = self.lower_generic_args_of_path(
trait_ref.path.span,
trait_def_id,
&[],
trait_segment,
segment,
Some(self_ty),
);
let constraints = segment.args().constraints;
if transient && (!generic_args[1..].is_empty() || !constraints.is_empty()) {
// Since the bound won't be present in the middle::ty IR as established above, any
// arguments or constraints won't be checked for well-formedness in later passes.
//
// This is only an issue if the trait ref is otherwise valid which can only happen if
// the corresponding default trait has generic parameters or associated items. Such a
// trait would be degenerate. We delay a bug to detect and guard us against these.
//
// E.g: Given `/*default*/ trait Bound<'a: 'static, T, const N: usize> {}`,
// `?Bound<Vec<str>, { panic!() }>` won't be wfchecked.
self.dcx()
.span_delayed_bug(span, "transient bound should not have args or constraints");
}
let bound_vars = tcx.late_bound_vars(trait_ref.hir_ref_id);
debug!(?bound_vars);
@ -922,7 +943,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
== OverlappingAsssocItemConstraints::Forbidden)
.then_some(FxIndexMap::default());
for constraint in trait_segment.args().constraints {
for constraint in constraints {
// Don't register any associated item constraints for negative bounds,
// since we should have emitted an error for them earlier, and they
// would not be well-formed!
@ -1387,10 +1408,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
(_, Res::SelfTyAlias { alias_to: impl_def_id, is_trait_impl: true, .. }) => {
// `Self` in an impl of a trait -- we have a concrete self type and a
// trait reference.
let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) else {
// A cycle error occurred, most likely.
self.dcx().span_bug(span, "expected cycle error");
};
let trait_ref = tcx.impl_trait_ref(impl_def_id);
self.probe_single_bound_for_assoc_item(
|| {
@ -1618,7 +1636,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
.is_accessible_from(self.item_def_id(), tcx)
&& tcx.all_impls(*trait_def_id)
.any(|impl_def_id| {
let header = tcx.impl_trait_header(impl_def_id).unwrap();
let header = tcx.impl_trait_header(impl_def_id);
let trait_ref = header.trait_ref.instantiate(
tcx,
infcx.fresh_args_for_item(DUMMY_SP, impl_def_id),
@ -1914,10 +1932,12 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
Res::Def(DefKind::OpaqueTy, did) => {
// Check for desugared `impl Trait`.
assert_matches!(tcx.opaque_ty_origin(did), hir::OpaqueTyOrigin::TyAlias { .. });
let item_segment = path.segments.split_last().unwrap();
let _ = self
.prohibit_generic_args(item_segment.1.iter(), GenericsArgsErrExtend::OpaqueTy);
let args = self.lower_generic_args_of_path_segment(span, did, item_segment.0);
let [leading_segments @ .., segment] = path.segments else { bug!() };
let _ = self.prohibit_generic_args(
leading_segments.iter(),
GenericsArgsErrExtend::OpaqueTy,
);
let args = self.lower_generic_args_of_path_segment(span, did, segment);
Ty::new_opaque(tcx, did, args)
}
Res::Def(
@ -1929,11 +1949,10 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
did,
) => {
assert_eq!(opt_self_ty, None);
let _ = self.prohibit_generic_args(
path.segments.split_last().unwrap().1.iter(),
GenericsArgsErrExtend::None,
);
self.lower_path_segment(span, did, path.segments.last().unwrap())
let [leading_segments @ .., segment] = path.segments else { bug!() };
let _ = self
.prohibit_generic_args(leading_segments.iter(), GenericsArgsErrExtend::None);
self.lower_path_segment(span, did, segment)
}
Res::Def(kind @ DefKind::Variant, def_id)
if let PermitVariants::Yes = permit_variants =>
@ -1953,8 +1972,8 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
GenericsArgsErrExtend::DefVariant(&path.segments),
);
let GenericPathSegment(def_id, index) = generic_segments.last().unwrap();
self.lower_path_segment(span, *def_id, &path.segments[*index])
let &GenericPathSegment(def_id, index) = generic_segments.last().unwrap();
self.lower_path_segment(span, def_id, &path.segments[index])
}
Res::Def(DefKind::TyParam, def_id) => {
assert_eq!(opt_self_ty, None);
@ -2240,15 +2259,10 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
}
Res::Def(DefKind::Const | DefKind::Ctor(_, CtorKind::Const), did) => {
assert_eq!(opt_self_ty, None);
let _ = self.prohibit_generic_args(
path.segments.split_last().unwrap().1.iter(),
GenericsArgsErrExtend::None,
);
let args = self.lower_generic_args_of_path_segment(
span,
did,
path.segments.last().unwrap(),
);
let [leading_segments @ .., segment] = path.segments else { bug!() };
let _ = self
.prohibit_generic_args(leading_segments.iter(), GenericsArgsErrExtend::None);
let args = self.lower_generic_args_of_path_segment(span, did, segment);
ty::Const::new_unevaluated(tcx, ty::UnevaluatedConst::new(did, args))
}
Res::Def(DefKind::AssocConst, did) => {

View file

@ -56,6 +56,7 @@ mod min_specialization;
pub(crate) fn check_impl_wf(
tcx: TyCtxt<'_>,
impl_def_id: LocalDefId,
of_trait: bool,
) -> Result<(), ErrorGuaranteed> {
debug_assert_matches!(tcx.def_kind(impl_def_id), DefKind::Impl { .. });
@ -63,9 +64,9 @@ pub(crate) fn check_impl_wf(
// since unconstrained type/const params cause ICEs in projection, so we want to
// detect those specifically and project those to `TyKind::Error`.
let mut res = tcx.ensure_ok().enforce_impl_non_lifetime_params_are_constrained(impl_def_id);
res = res.and(enforce_impl_lifetime_params_are_constrained(tcx, impl_def_id));
res = res.and(enforce_impl_lifetime_params_are_constrained(tcx, impl_def_id, of_trait));
if tcx.features().min_specialization() {
if of_trait && tcx.features().min_specialization() {
res = res.and(check_min_specialization(tcx, impl_def_id));
}
res
@ -74,6 +75,7 @@ pub(crate) fn check_impl_wf(
pub(crate) fn enforce_impl_lifetime_params_are_constrained(
tcx: TyCtxt<'_>,
impl_def_id: LocalDefId,
of_trait: bool,
) -> Result<(), ErrorGuaranteed> {
let impl_self_ty = tcx.type_of(impl_def_id).instantiate_identity();
@ -83,7 +85,7 @@ pub(crate) fn enforce_impl_lifetime_params_are_constrained(
let impl_generics = tcx.generics_of(impl_def_id);
let impl_predicates = tcx.predicates_of(impl_def_id);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).map(ty::EarlyBinder::instantiate_identity);
let impl_trait_ref = of_trait.then(|| tcx.impl_trait_ref(impl_def_id).instantiate_identity());
impl_trait_ref.error_reported()?;
@ -171,7 +173,8 @@ pub(crate) fn enforce_impl_non_lifetime_params_are_constrained(
let impl_generics = tcx.generics_of(impl_def_id);
let impl_predicates = tcx.predicates_of(impl_def_id);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).map(ty::EarlyBinder::instantiate_identity);
let impl_trait_ref =
tcx.impl_opt_trait_ref(impl_def_id).map(ty::EarlyBinder::instantiate_identity);
impl_trait_ref.error_reported()?;

View file

@ -93,7 +93,7 @@ pub(super) fn check_min_specialization(
}
fn parent_specialization_node(tcx: TyCtxt<'_>, impl1_def_id: LocalDefId) -> Option<Node> {
let trait_ref = tcx.impl_trait_ref(impl1_def_id)?;
let trait_ref = tcx.impl_trait_ref(impl1_def_id);
let trait_def = tcx.trait_def(trait_ref.skip_binder().def_id);
let impl2_node = trait_def.ancestors(tcx, impl1_def_id.to_def_id()).ok()?.nth(1)?;
@ -215,7 +215,7 @@ fn unconstrained_parent_impl_args<'tcx>(
let impl_generic_predicates = tcx.predicates_of(impl_def_id);
let mut unconstrained_parameters = FxHashSet::default();
let mut constrained_params = FxHashSet::default();
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).map(ty::EarlyBinder::instantiate_identity);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).instantiate_identity();
// Unfortunately the functions in `constrained_generic_parameters` don't do
// what we want here. We want only a list of constrained parameters while
@ -224,7 +224,7 @@ fn unconstrained_parent_impl_args<'tcx>(
for (clause, _) in impl_generic_predicates.predicates.iter() {
if let ty::ClauseKind::Projection(proj) = clause.kind().skip_binder() {
let unbound_trait_ref = proj.projection_term.trait_ref(tcx);
if Some(unbound_trait_ref) == impl_trait_ref {
if unbound_trait_ref == impl_trait_ref {
continue;
}
@ -373,7 +373,7 @@ fn check_predicates<'tcx>(
.map(|(c, _span)| c.as_predicate());
// Include the well-formed predicates of the type parameters of the impl.
for arg in tcx.impl_trait_ref(impl1_def_id).unwrap().instantiate_identity().args {
for arg in tcx.impl_trait_ref(impl1_def_id).instantiate_identity().args {
let Some(term) = arg.as_term() else {
continue;
};

View file

@ -792,7 +792,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Binary(_, lhs, rhs), .. }),
Some(TypeError::Sorts(ExpectedFound { expected, .. })),
) if rhs.hir_id == expr.hir_id
&& self.typeck_results.borrow().expr_ty_adjusted_opt(lhs) == Some(expected) =>
&& self.typeck_results.borrow().expr_ty_adjusted_opt(lhs) == Some(expected)
// let expressions being marked as `bool` is confusing (see issue #147665)
&& !matches!(lhs.kind, hir::ExprKind::Let(..)) =>
{
err.span_label(lhs.span, format!("expected because this is `{expected}`"));
}

View file

@ -3629,7 +3629,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let ocx = ObligationCtxt::new_with_diagnostics(self);
let impl_args = self.fresh_args_for_item(base_expr.span, impl_def_id);
let impl_trait_ref =
self.tcx.impl_trait_ref(impl_def_id).unwrap().instantiate(self.tcx, impl_args);
self.tcx.impl_trait_ref(impl_def_id).instantiate(self.tcx, impl_args);
let cause = self.misc(base_expr.span);
// Match the impl self type against the base ty. If this fails,

View file

@ -711,7 +711,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
} else {
self.tcx
.impl_trait_ref(obligation.impl_or_alias_def_id)
.impl_opt_trait_ref(obligation.impl_or_alias_def_id)
.map(|impl_def| impl_def.skip_binder())
// It is possible that this is absent. In this case, we make no progress.
.ok_or(expr)?

View file

@ -285,7 +285,7 @@ fn infer_type_if_missing<'tcx>(fcx: &FnCtxt<'_, 'tcx>, node: Node<'tcx>) -> Opti
&& let ty::AssocContainer::TraitImpl(Ok(trait_item_def_id)) = item.container
{
let impl_def_id = item.container_id(tcx);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap().instantiate_identity();
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).instantiate_identity();
let args = ty::GenericArgs::identity_for_item(tcx, def_id).rebase_onto(
tcx,
impl_def_id,

View file

@ -18,8 +18,8 @@ use rustc_middle::ty::adjustment::{
Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCoercion,
};
use rustc_middle::ty::{
self, GenericArgs, GenericArgsRef, GenericParamDefKind, Ty, TyCtxt, TypeFoldable,
TypeVisitableExt, UserArgs,
self, AssocContainer, GenericArgs, GenericArgsRef, GenericParamDefKind, Ty, TyCtxt,
TypeFoldable, TypeVisitableExt, UserArgs,
};
use rustc_middle::{bug, span_bug};
use rustc_span::{DUMMY_SP, Span};
@ -272,7 +272,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
probe::InherentImplPick => {
let impl_def_id = pick.item.container_id(self.tcx);
assert!(
self.tcx.impl_trait_ref(impl_def_id).is_none(),
matches!(pick.item.container, AssocContainer::InherentImpl),
"impl {impl_def_id:?} is not an inherent impl"
);
self.fresh_args_for_item(self.span, impl_def_id)

View file

@ -242,7 +242,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match *source {
// Note: this cannot come from an inherent impl,
// because the first probing succeeded.
CandidateSource::Impl(def) => self.tcx.trait_id_of_impl(def),
CandidateSource::Impl(def) => Some(self.tcx.impl_trait_id(def)),
CandidateSource::Trait(_) => None,
}
})

View file

@ -1176,9 +1176,6 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
// things failed, so lets look at all traits, for diagnostic purposes now:
self.reset();
let span = self.span;
let tcx = self.tcx;
self.assemble_extension_candidates_for_all_traits();
let out_of_scope_traits = match self.pick_core(&mut Vec::new()) {
@ -1187,10 +1184,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
.into_iter()
.map(|source| match source {
CandidateSource::Trait(id) => id,
CandidateSource::Impl(impl_id) => match tcx.trait_id_of_impl(impl_id) {
Some(id) => id,
None => span_bug!(span, "found inherent method when looking at traits"),
},
CandidateSource::Impl(impl_id) => self.tcx.impl_trait_id(impl_id),
})
.collect(),
Some(Err(MethodError::NoMatch(NoMatchData {

View file

@ -1962,8 +1962,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Provide the best span we can. Use the item, if local to crate, else
// the impl, if local to crate (item may be defaulted), else nothing.
let Some(item) = self.associated_value(impl_did, item_name).or_else(|| {
let impl_trait_ref = self.tcx.impl_trait_ref(impl_did)?;
self.associated_value(impl_trait_ref.skip_binder().def_id, item_name)
let impl_trait_id = self.tcx.impl_opt_trait_id(impl_did)?;
self.associated_value(impl_trait_id, item_name)
}) else {
continue;
};
@ -1978,7 +1978,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let impl_ty = self.tcx.at(span).type_of(impl_did).instantiate_identity();
let insertion = match self.tcx.impl_trait_ref(impl_did) {
let insertion = match self.tcx.impl_opt_trait_ref(impl_did) {
None => String::new(),
Some(trait_ref) => {
format!(
@ -2013,7 +2013,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.note(note_str);
}
if let Some(sugg_span) = sugg_span
&& let Some(trait_ref) = self.tcx.impl_trait_ref(impl_did)
&& let Some(trait_ref) = self.tcx.impl_opt_trait_ref(impl_did)
&& let Some(sugg) = print_disambiguation_help(
self.tcx,
err,
@ -2547,6 +2547,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
"you must specify a type for this binding, like `{concrete_type}`",
);
// FIXME: Maybe FileName::Anon should also be handled,
// otherwise there would be no suggestion if the source is STDIN for example.
match (filename, parent_node) {
(
FileName::Real(_),
@ -2568,6 +2570,44 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Applicability::MaybeIncorrect,
);
}
// For closure parameters with reference patterns (e.g., |&v|), suggest the type annotation
// on the pattern itself, e.g., |&v: &i32|
(FileName::Real(_), Node::Pat(pat))
if let Node::Pat(binding_pat) = self.tcx.hir_node(hir_id)
&& let hir::PatKind::Binding(..) = binding_pat.kind
&& let Node::Pat(parent_pat) = parent_node
&& matches!(parent_pat.kind, hir::PatKind::Ref(..)) =>
{
err.span_label(span, "you must specify a type for this binding");
let mut ref_muts = Vec::new();
let mut current_node = parent_node;
while let Node::Pat(parent_pat) = current_node {
if let hir::PatKind::Ref(_, mutability) = parent_pat.kind {
ref_muts.push(mutability);
current_node = self.tcx.parent_hir_node(parent_pat.hir_id);
} else {
break;
}
}
let mut type_annotation = String::new();
for mutability in ref_muts.iter().rev() {
match mutability {
hir::Mutability::Mut => type_annotation.push_str("&mut "),
hir::Mutability::Not => type_annotation.push('&'),
}
}
type_annotation.push_str(&concrete_type);
err.span_suggestion_verbose(
pat.span.shrink_to_hi(),
"specify the type in the closure argument list",
format!(": {type_annotation}"),
Applicability::MaybeIncorrect,
);
}
_ => {
err.span_label(span, msg);
}
@ -3720,7 +3760,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
static_candidates.iter().all(|sc| match *sc {
CandidateSource::Trait(def_id) => def_id != info.def_id,
CandidateSource::Impl(def_id) => {
self.tcx.trait_id_of_impl(def_id) != Some(info.def_id)
self.tcx.impl_opt_trait_id(def_id) != Some(info.def_id)
}
})
})
@ -3980,11 +4020,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if self
.tcx
.all_impls(candidate.def_id)
.map(|imp_did| {
self.tcx.impl_trait_header(imp_did).expect(
"inherent impls can't be candidates, only trait impls can be",
)
})
.map(|imp_did| self.tcx.impl_trait_header(imp_did))
.filter(|header| header.polarity != ty::ImplPolarity::Positive)
.any(|header| {
let imp = header.trait_ref.instantiate_identity();

View file

@ -721,11 +721,37 @@ pub(crate) fn garbage_collect_session_directories(sess: &Session) -> io::Result<
}
}
let current_session_directory_name =
session_directory.file_name().expect("session directory is not `..`");
// Now garbage collect the valid session directories.
let deletion_candidates =
lock_file_to_session_dir.items().filter_map(|(lock_file_name, directory_name)| {
debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
if directory_name.as_str() == current_session_directory_name {
// Skipping our own directory is, unfortunately, important for correctness.
//
// To summarize #147821: we will try to lock directories before deciding they can be
// garbage collected, but the ability of `flock::Lock` to detect a lock held *by the
// same process* varies across file locking APIs. Then, if our own session directory
// has become old enough to be eligible for GC, we are beholden to platform-specific
// details about detecting the our own lock on the session directory.
//
// POSIX `fcntl(F_SETLK)`-style file locks are maintained across a process. On
// systems where this is the mechanism for `flock::Lock`, there is no way to
// discover if an `flock::Lock` has been created in the same process on the same
// file. Attempting to set a lock on the lockfile again will succeed, even if the
// lock was set by another thread, on another file descriptor. Then we would
// garbage collect our own live directory, unable to tell it was locked perhaps by
// this same thread.
//
// It's not clear that `flock::Lock` can be fixed for this in general, and our own
// incremental session directory is the only one which this process may own, so skip
// it here and avoid the problem. We know it's not garbage anyway: we're using it.
return None;
}
let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
debug!(
"found session-dir with malformed timestamp: {}",

View file

@ -8,7 +8,6 @@ use std::{fmt, iter, slice};
use Chunk::*;
#[cfg(feature = "nightly")]
use rustc_macros::{Decodable_NoContext, Encodable_NoContext};
use smallvec::{SmallVec, smallvec};
use crate::{Idx, IndexVec};
@ -118,7 +117,7 @@ macro_rules! bit_relations_inherent_impls {
#[derive(Eq, PartialEq, Hash)]
pub struct DenseBitSet<T> {
domain_size: usize,
words: SmallVec<[Word; 2]>,
words: Vec<Word>,
marker: PhantomData<T>,
}
@ -134,7 +133,7 @@ impl<T: Idx> DenseBitSet<T> {
#[inline]
pub fn new_empty(domain_size: usize) -> DenseBitSet<T> {
let num_words = num_words(domain_size);
DenseBitSet { domain_size, words: smallvec![0; num_words], marker: PhantomData }
DenseBitSet { domain_size, words: vec![0; num_words], marker: PhantomData }
}
/// Creates a new, filled bitset with a given `domain_size`.
@ -142,7 +141,7 @@ impl<T: Idx> DenseBitSet<T> {
pub fn new_filled(domain_size: usize) -> DenseBitSet<T> {
let num_words = num_words(domain_size);
let mut result =
DenseBitSet { domain_size, words: smallvec![!0; num_words], marker: PhantomData };
DenseBitSet { domain_size, words: vec![!0; num_words], marker: PhantomData };
result.clear_excess_bits();
result
}
@ -873,7 +872,7 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
let mut self_chunk_words = **other_chunk_words;
for word in self_chunk_words[0..num_words].iter_mut().rev() {
*word = !*word & tail_mask;
tail_mask = u64::MAX;
tail_mask = Word::MAX;
}
let self_chunk_count = chunk_domain_size - *other_chunk_count;
debug_assert_eq!(
@ -888,7 +887,7 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
) => {
// See `ChunkedBitSet::union` for details on what is happening here.
let num_words = num_words(chunk_domain_size as usize);
let op = |a: u64, b: u64| a & !b;
let op = |a: Word, b: Word| a & !b;
if !bitwise_changes(
&self_chunk_words[0..num_words],
&other_chunk_words[0..num_words],
@ -1384,7 +1383,7 @@ impl<T: Idx> From<DenseBitSet<T>> for GrowableBitSet<T> {
pub struct BitMatrix<R: Idx, C: Idx> {
num_rows: usize,
num_columns: usize,
words: SmallVec<[Word; 2]>,
words: Vec<Word>,
marker: PhantomData<(R, C)>,
}
@ -1397,7 +1396,7 @@ impl<R: Idx, C: Idx> BitMatrix<R, C> {
BitMatrix {
num_rows,
num_columns,
words: smallvec![0; num_rows * words_per_row],
words: vec![0; num_rows * words_per_row],
marker: PhantomData,
}
}

View file

@ -152,7 +152,10 @@ declare_lint_pass!(NonShorthandFieldPatterns => [NON_SHORTHAND_FIELD_PATTERNS]);
impl<'tcx> LateLintPass<'tcx> for NonShorthandFieldPatterns {
fn check_pat(&mut self, cx: &LateContext<'_>, pat: &hir::Pat<'_>) {
if let PatKind::Struct(ref qpath, field_pats, _) = pat.kind {
// The result shouldn't be tainted, otherwise it will cause ICE.
if let PatKind::Struct(ref qpath, field_pats, _) = pat.kind
&& cx.typeck_results().tainted_by_errors.is_none()
{
let variant = cx
.typeck_results()
.pat_ty(pat)

View file

@ -1,14 +1,12 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{Applicability, Diag};
use rustc_hir as hir;
use rustc_hir::attrs::AttributeKind;
use rustc_hir::find_attr;
use rustc_middle::ty;
use rustc_middle::ty::TyCtxt;
use rustc_session::{declare_lint, impl_lint_pass};
use rustc_span::Symbol;
use rustc_session::{declare_lint, declare_lint_pass};
use rustc_span::def_id::DefId;
use rustc_span::symbol::sym;
use rustc_span::{Span, Symbol};
use crate::{LateContext, LateLintPass};
@ -52,27 +50,24 @@ declare_lint! {
@feature_gate = default_field_values;
}
#[derive(Default)]
pub(crate) struct DefaultCouldBeDerived;
impl_lint_pass!(DefaultCouldBeDerived => [DEFAULT_OVERRIDES_DEFAULT_FIELDS]);
declare_lint_pass!(DefaultCouldBeDerived => [DEFAULT_OVERRIDES_DEFAULT_FIELDS]);
impl<'tcx> LateLintPass<'tcx> for DefaultCouldBeDerived {
fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) {
// Look for manual implementations of `Default`.
let Some(default_def_id) = cx.tcx.get_diagnostic_item(sym::Default) else { return };
let hir::ImplItemImplKind::Trait { trait_item_def_id, .. } = impl_item.impl_kind else {
return;
};
if !trait_item_def_id.is_ok_and(|id| cx.tcx.is_diagnostic_item(sym::default_fn, id)) {
return;
}
let hir::ImplItemKind::Fn(_sig, body_id) = impl_item.kind else { return };
let parent = cx.tcx.parent(impl_item.owner_id.to_def_id());
if find_attr!(cx.tcx.get_all_attrs(parent), AttributeKind::AutomaticallyDerived(..)) {
let impl_id = cx.tcx.local_parent(impl_item.owner_id.def_id);
if cx.tcx.is_automatically_derived(impl_id.to_def_id()) {
// We don't care about what `#[derive(Default)]` produces in this lint.
return;
}
let Some(trait_ref) = cx.tcx.impl_trait_ref(parent) else { return };
let trait_ref = trait_ref.instantiate_identity();
if trait_ref.def_id != default_def_id {
return;
}
let ty = trait_ref.self_ty();
let ty = cx.tcx.type_of(impl_id).instantiate_identity();
let ty::Adt(def, _) = ty.kind() else { return };
// We now know we have a manually written definition of a `<Type as Default>::default()`.
@ -150,11 +145,10 @@ impl<'tcx> LateLintPass<'tcx> for DefaultCouldBeDerived {
return;
}
let Some(local) = parent.as_local() else { return };
let hir_id = cx.tcx.local_def_id_to_hir_id(local);
let hir::Node::Item(item) = cx.tcx.hir_node(hir_id) else { return };
cx.tcx.node_span_lint(DEFAULT_OVERRIDES_DEFAULT_FIELDS, hir_id, item.span, |diag| {
mk_lint(cx.tcx, diag, type_def_id, parent, orig_fields, fields);
let hir_id = cx.tcx.local_def_id_to_hir_id(impl_id);
let span = cx.tcx.hir_span_with_body(hir_id);
cx.tcx.node_span_lint(DEFAULT_OVERRIDES_DEFAULT_FIELDS, hir_id, span, |diag| {
mk_lint(cx.tcx, diag, type_def_id, orig_fields, fields, span);
});
}
}
@ -163,9 +157,9 @@ fn mk_lint(
tcx: TyCtxt<'_>,
diag: &mut Diag<'_, ()>,
type_def_id: DefId,
impl_def_id: DefId,
orig_fields: FxHashMap<Symbol, &hir::FieldDef<'_>>,
fields: &[hir::ExprField<'_>],
impl_span: Span,
) {
diag.primary_message("`Default` impl doesn't use the declared default field values");
@ -186,18 +180,14 @@ fn mk_lint(
if removed_all_fields {
let msg = "to avoid divergence in behavior between `Struct { .. }` and \
`<Struct as Default>::default()`, derive the `Default`";
if let Some(hir::Node::Item(impl_)) = tcx.hir_get_if_local(impl_def_id) {
diag.multipart_suggestion_verbose(
msg,
vec![
(tcx.def_span(type_def_id).shrink_to_lo(), "#[derive(Default)] ".to_string()),
(impl_.span, String::new()),
],
Applicability::MachineApplicable,
);
} else {
diag.help(msg);
}
diag.multipart_suggestion_verbose(
msg,
vec![
(tcx.def_span(type_def_id).shrink_to_lo(), "#[derive(Default)] ".to_string()),
(impl_span, String::new()),
],
Applicability::MachineApplicable,
);
} else {
let msg = "use the default values in the `impl` with `Struct { mandatory_field, .. }` to \
avoid them diverging over time";

View file

@ -191,7 +191,7 @@ late_lint_methods!(
BuiltinCombinedModuleLateLintPass,
[
ForLoopsOverFallibles: ForLoopsOverFallibles,
DefaultCouldBeDerived: DefaultCouldBeDerived::default(),
DefaultCouldBeDerived: DefaultCouldBeDerived,
DerefIntoDynSupertrait: DerefIntoDynSupertrait,
DropForgetUseless: DropForgetUseless,
ImproperCTypesLint: ImproperCTypesLint,

View file

@ -3,6 +3,7 @@ use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{self as hir, LifetimeSource};
use rustc_session::{declare_lint, declare_lint_pass};
use rustc_span::Span;
use rustc_span::def_id::LocalDefId;
use tracing::instrument;
use crate::{LateContext, LateLintPass, LintContext, lints};
@ -78,11 +79,11 @@ impl<'tcx> LateLintPass<'tcx> for LifetimeSyntax {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: hir::intravisit::FnKind<'tcx>,
_: intravisit::FnKind<'tcx>,
fd: &'tcx hir::FnDecl<'tcx>,
_: &'tcx hir::Body<'tcx>,
_: rustc_span::Span,
_: rustc_span::def_id::LocalDefId,
_: Span,
_: LocalDefId,
) {
check_fn_like(cx, fd);
}
@ -97,11 +98,7 @@ impl<'tcx> LateLintPass<'tcx> for LifetimeSyntax {
}
#[instrument(skip_all)]
fn check_foreign_item(
&mut self,
cx: &LateContext<'tcx>,
fi: &'tcx rustc_hir::ForeignItem<'tcx>,
) {
fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, fi: &'tcx hir::ForeignItem<'tcx>) {
match fi.kind {
hir::ForeignItemKind::Fn(fn_sig, _idents, _generics) => check_fn_like(cx, fn_sig.decl),
hir::ForeignItemKind::Static(..) => {}
@ -111,35 +108,47 @@ impl<'tcx> LateLintPass<'tcx> for LifetimeSyntax {
}
fn check_fn_like<'tcx>(cx: &LateContext<'tcx>, fd: &'tcx hir::FnDecl<'tcx>) {
let mut input_map = Default::default();
let mut output_map = Default::default();
if fd.inputs.is_empty() {
return;
}
let hir::FnRetTy::Return(output) = fd.output else {
return;
};
let mut map: FxIndexMap<hir::LifetimeKind, LifetimeGroup<'_>> = FxIndexMap::default();
LifetimeInfoCollector::collect(output, |info| {
let group = map.entry(info.lifetime.kind).or_default();
group.outputs.push(info);
});
if map.is_empty() {
return;
}
for input in fd.inputs {
LifetimeInfoCollector::collect(input, &mut input_map);
}
if let hir::FnRetTy::Return(output) = fd.output {
LifetimeInfoCollector::collect(output, &mut output_map);
}
report_mismatches(cx, &input_map, &output_map);
}
#[instrument(skip_all)]
fn report_mismatches<'tcx>(
cx: &LateContext<'tcx>,
inputs: &LifetimeInfoMap<'tcx>,
outputs: &LifetimeInfoMap<'tcx>,
) {
for (resolved_lifetime, output_info) in outputs {
if let Some(input_info) = inputs.get(resolved_lifetime) {
if !lifetimes_use_matched_syntax(input_info, output_info) {
emit_mismatch_diagnostic(cx, input_info, output_info);
LifetimeInfoCollector::collect(input, |info| {
if let Some(group) = map.get_mut(&info.lifetime.kind) {
group.inputs.push(info);
}
});
}
for LifetimeGroup { ref inputs, ref outputs } in map.into_values() {
if inputs.is_empty() {
continue;
}
if !lifetimes_use_matched_syntax(inputs, outputs) {
emit_mismatch_diagnostic(cx, inputs, outputs);
}
}
}
#[derive(Default)]
struct LifetimeGroup<'tcx> {
inputs: Vec<Info<'tcx>>,
outputs: Vec<Info<'tcx>>,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum LifetimeSyntaxCategory {
Hidden,
@ -148,11 +157,11 @@ enum LifetimeSyntaxCategory {
}
impl LifetimeSyntaxCategory {
fn new(syntax_source: (hir::LifetimeSyntax, LifetimeSource)) -> Option<Self> {
fn new(lifetime: &hir::Lifetime) -> Option<Self> {
use LifetimeSource::*;
use hir::LifetimeSyntax::*;
match syntax_source {
match (lifetime.syntax, lifetime.source) {
// E.g. `&T`.
(Implicit, Reference) |
// E.g. `&'_ T`.
@ -216,7 +225,7 @@ impl<T> LifetimeSyntaxCategories<Vec<T>> {
pub fn iter_unnamed(&self) -> impl Iterator<Item = &T> {
let Self { hidden, elided, named: _ } = self;
[hidden.iter(), elided.iter()].into_iter().flatten()
std::iter::chain(hidden, elided)
}
}
@ -233,22 +242,8 @@ impl std::ops::Add for LifetimeSyntaxCategories<usize> {
}
fn lifetimes_use_matched_syntax(input_info: &[Info<'_>], output_info: &[Info<'_>]) -> bool {
let mut syntax_counts = LifetimeSyntaxCategories::<usize>::default();
for info in input_info.iter().chain(output_info) {
if let Some(category) = info.lifetime_syntax_category() {
*syntax_counts.select(category) += 1;
}
}
tracing::debug!(?syntax_counts);
matches!(
syntax_counts,
LifetimeSyntaxCategories { hidden: _, elided: 0, named: 0 }
| LifetimeSyntaxCategories { hidden: 0, elided: _, named: 0 }
| LifetimeSyntaxCategories { hidden: 0, elided: 0, named: _ }
)
let (first, inputs) = input_info.split_first().unwrap();
std::iter::chain(inputs, output_info).all(|info| info.syntax_category == first.syntax_category)
}
fn emit_mismatch_diagnostic<'tcx>(
@ -310,18 +305,13 @@ fn emit_mismatch_diagnostic<'tcx>(
use LifetimeSource::*;
use hir::LifetimeSyntax::*;
let syntax_source = info.syntax_source();
let lifetime = info.lifetime;
if let (_, Other) = syntax_source {
// Ignore any other kind of lifetime.
continue;
}
if let (ExplicitBound, _) = syntax_source {
if lifetime.syntax == ExplicitBound {
bound_lifetime = Some(info);
}
match syntax_source {
match (lifetime.syntax, lifetime.source) {
// E.g. `&T`.
(Implicit, Reference) => {
suggest_change_to_explicit_anonymous.push(info);
@ -341,8 +331,8 @@ fn emit_mismatch_diagnostic<'tcx>(
suggest_change_to_explicit_bound.push(info);
}
// E.g. `ContainsLifetime<'_>`.
(ExplicitAnonymous, Path { .. }) => {
// E.g. `ContainsLifetime<'_>`, `+ '_`, `+ use<'_>`.
(ExplicitAnonymous, Path { .. } | OutlivesBound | PreciseCapturing) => {
suggest_change_to_explicit_bound.push(info);
}
@ -353,8 +343,8 @@ fn emit_mismatch_diagnostic<'tcx>(
suggest_change_to_explicit_anonymous.push(info);
}
// E.g. `ContainsLifetime<'a>`.
(ExplicitBound, Path { .. }) => {
// E.g. `ContainsLifetime<'a>`, `+ 'a`, `+ use<'a>`.
(ExplicitBound, Path { .. } | OutlivesBound | PreciseCapturing) => {
suggest_change_to_mixed_explicit_anonymous.push(info);
suggest_change_to_explicit_anonymous.push(info);
}
@ -363,29 +353,18 @@ fn emit_mismatch_diagnostic<'tcx>(
panic!("This syntax / source combination is not possible");
}
// E.g. `+ '_`, `+ use<'_>`.
(ExplicitAnonymous, OutlivesBound | PreciseCapturing) => {
suggest_change_to_explicit_bound.push(info);
}
// E.g. `+ 'a`, `+ use<'a>`.
(ExplicitBound, OutlivesBound | PreciseCapturing) => {
suggest_change_to_mixed_explicit_anonymous.push(info);
suggest_change_to_explicit_anonymous.push(info);
}
(_, Other) => {
panic!("This syntax / source combination has already been skipped");
}
}
if matches!(syntax_source, (_, Path { .. } | OutlivesBound | PreciseCapturing)) {
if matches!(lifetime.source, Path { .. } | OutlivesBound | PreciseCapturing) {
allow_suggesting_implicit = false;
}
match syntax_source {
(_, Reference) => saw_a_reference = true,
(_, Path { .. }) => saw_a_path = true,
match lifetime.source {
Reference => saw_a_reference = true,
Path { .. } => saw_a_path = true,
_ => {}
}
}
@ -393,9 +372,7 @@ fn emit_mismatch_diagnostic<'tcx>(
let categorize = |infos: &[Info<'_>]| {
let mut categories = LifetimeSyntaxCategories::<Vec<_>>::default();
for info in infos {
if let Some(category) = info.lifetime_syntax_category() {
categories.select(category).push(info.reporting_span());
}
categories.select(info.syntax_category).push(info.reporting_span());
}
categories
};
@ -407,10 +384,10 @@ fn emit_mismatch_diagnostic<'tcx>(
|infos: &[&Info<'_>]| infos.iter().map(|i| i.removing_span()).collect::<Vec<_>>();
let explicit_bound_suggestion = bound_lifetime.map(|info| {
build_mismatch_suggestion(info.lifetime_name(), &suggest_change_to_explicit_bound)
build_mismatch_suggestion(info.lifetime.ident.as_str(), &suggest_change_to_explicit_bound)
});
let is_bound_static = bound_lifetime.is_some_and(|info| info.is_static());
let is_bound_static = bound_lifetime.is_some_and(|info| info.lifetime.is_static());
tracing::debug!(?bound_lifetime, ?explicit_bound_suggestion, ?is_bound_static);
@ -517,33 +494,17 @@ fn build_mismatch_suggestion(
#[derive(Debug)]
struct Info<'tcx> {
type_span: Span,
referenced_type_span: Option<Span>,
lifetime: &'tcx hir::Lifetime,
syntax_category: LifetimeSyntaxCategory,
ty: &'tcx hir::Ty<'tcx>,
}
impl<'tcx> Info<'tcx> {
fn syntax_source(&self) -> (hir::LifetimeSyntax, LifetimeSource) {
(self.lifetime.syntax, self.lifetime.source)
}
fn lifetime_syntax_category(&self) -> Option<LifetimeSyntaxCategory> {
LifetimeSyntaxCategory::new(self.syntax_source())
}
fn lifetime_name(&self) -> &str {
self.lifetime.ident.as_str()
}
fn is_static(&self) -> bool {
self.lifetime.is_static()
}
/// When reporting a lifetime that is implicit, we expand the span
/// to include the type. Otherwise we end up pointing at nothing,
/// which is a bit confusing.
fn reporting_span(&self) -> Span {
if self.lifetime.is_implicit() { self.type_span } else { self.lifetime.ident.span }
if self.lifetime.is_implicit() { self.ty.span } else { self.lifetime.ident.span }
}
/// When removing an explicit lifetime from a reference,
@ -560,12 +521,10 @@ impl<'tcx> Info<'tcx> {
/// ```
// FIXME: Ideally, we'd also remove the lifetime declaration.
fn removing_span(&self) -> Span {
let mut span = self.suggestion("'dummy").0;
if let Some(referenced_type_span) = self.referenced_type_span {
span = span.until(referenced_type_span);
let mut span = self.lifetime.ident.span;
if let hir::TyKind::Ref(_, mut_ty) = self.ty.kind {
span = span.until(mut_ty.ty.span);
}
span
}
@ -574,46 +533,38 @@ impl<'tcx> Info<'tcx> {
}
}
type LifetimeInfoMap<'tcx> = FxIndexMap<&'tcx hir::LifetimeKind, Vec<Info<'tcx>>>;
struct LifetimeInfoCollector<'a, 'tcx> {
type_span: Span,
referenced_type_span: Option<Span>,
map: &'a mut LifetimeInfoMap<'tcx>,
struct LifetimeInfoCollector<'tcx, F> {
info_func: F,
ty: &'tcx hir::Ty<'tcx>,
}
impl<'a, 'tcx> LifetimeInfoCollector<'a, 'tcx> {
fn collect(ty: &'tcx hir::Ty<'tcx>, map: &'a mut LifetimeInfoMap<'tcx>) {
let mut this = Self { type_span: ty.span, referenced_type_span: None, map };
impl<'tcx, F> LifetimeInfoCollector<'tcx, F>
where
F: FnMut(Info<'tcx>),
{
fn collect(ty: &'tcx hir::Ty<'tcx>, info_func: F) {
let mut this = Self { info_func, ty };
intravisit::walk_unambig_ty(&mut this, ty);
}
}
impl<'a, 'tcx> Visitor<'tcx> for LifetimeInfoCollector<'a, 'tcx> {
impl<'tcx, F> Visitor<'tcx> for LifetimeInfoCollector<'tcx, F>
where
F: FnMut(Info<'tcx>),
{
#[instrument(skip(self))]
fn visit_lifetime(&mut self, lifetime: &'tcx hir::Lifetime) {
let type_span = self.type_span;
let referenced_type_span = self.referenced_type_span;
let info = Info { type_span, referenced_type_span, lifetime };
self.map.entry(&lifetime.kind).or_default().push(info);
if let Some(syntax_category) = LifetimeSyntaxCategory::new(lifetime) {
let info = Info { lifetime, syntax_category, ty: self.ty };
(self.info_func)(info);
}
}
#[instrument(skip(self))]
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx, hir::AmbigArg>) -> Self::Result {
let old_type_span = self.type_span;
let old_referenced_type_span = self.referenced_type_span;
self.type_span = ty.span;
if let hir::TyKind::Ref(_, ty) = ty.kind {
self.referenced_type_span = Some(ty.ty.span);
}
let old_ty = std::mem::replace(&mut self.ty, ty.as_unambig_ty());
intravisit::walk_ty(self, ty);
self.type_span = old_type_span;
self.referenced_type_span = old_referenced_type_span;
self.ty = old_ty;
}
}

View file

@ -814,7 +814,7 @@ fn get_nullable_type<'tcx>(
/// A type is niche-optimization candidate iff:
/// - Is a zero-sized type with alignment 1 (a “1-ZST”).
/// - Has no fields.
/// - Is either a struct/tuple with no fields, or an enum with no variants.
/// - Does not have the `#[non_exhaustive]` attribute.
fn is_niche_optimization_candidate<'tcx>(
tcx: TyCtxt<'tcx>,
@ -828,7 +828,7 @@ fn is_niche_optimization_candidate<'tcx>(
match ty.kind() {
ty::Adt(ty_def, _) => {
let non_exhaustive = ty_def.is_variant_list_non_exhaustive();
let empty = (ty_def.is_struct() && ty_def.all_fields().next().is_none())
let empty = (ty_def.is_struct() && ty_def.non_enum_variant().fields.is_empty())
|| (ty_def.is_enum() && ty_def.variants().is_empty());
!non_exhaustive && empty

View file

@ -273,13 +273,13 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
expr: &hir::Expr<'_>,
span: Span,
) -> Option<MustUsePath> {
if ty.is_unit()
|| !ty.is_inhabited_from(
cx.tcx,
cx.tcx.parent_module(expr.hir_id).to_def_id(),
cx.typing_env(),
)
{
if ty.is_unit() {
return Some(MustUsePath::Suppressed);
}
let parent_mod_did = cx.tcx.parent_module(expr.hir_id).to_def_id();
let is_uninhabited =
|t: Ty<'tcx>| !t.is_inhabited_from(cx.tcx, parent_mod_did, cx.typing_env());
if is_uninhabited(ty) {
return Some(MustUsePath::Suppressed);
}
@ -293,6 +293,22 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
is_ty_must_use(cx, pinned_ty, expr, span)
.map(|inner| MustUsePath::Pinned(Box::new(inner)))
}
// Suppress warnings on `Result<(), Uninhabited>` (e.g. `Result<(), !>`).
ty::Adt(def, args)
if cx.tcx.is_diagnostic_item(sym::Result, def.did())
&& args.type_at(0).is_unit()
&& is_uninhabited(args.type_at(1)) =>
{
Some(MustUsePath::Suppressed)
}
// Suppress warnings on `ControlFlow<Uninhabited, ()>` (e.g. `ControlFlow<!, ()>`).
ty::Adt(def, args)
if cx.tcx.is_diagnostic_item(sym::ControlFlow, def.did())
&& args.type_at(1).is_unit()
&& is_uninhabited(args.type_at(0)) =>
{
Some(MustUsePath::Suppressed)
}
ty::Adt(def, _) => is_def_must_use(cx, def.did(), span),
ty::Alias(ty::Opaque | ty::Projection, ty::AliasTy { def_id: def, .. }) => {
elaborate(cx.tcx, cx.tcx.explicit_item_self_bounds(def).iter_identity_copied())

View file

@ -4065,7 +4065,6 @@ declare_lint! {
/// ### Example
///
/// ```rust,compile_fail
/// #![deny(never_type_fallback_flowing_into_unsafe)]
/// fn main() {
/// if true {
/// // return has type `!` which, is some cases, causes never type fallback
@ -4100,7 +4099,7 @@ declare_lint! {
/// [`!`]: https://doc.rust-lang.org/core/primitive.never.html
/// [`()`]: https://doc.rust-lang.org/core/primitive.unit.html
pub NEVER_TYPE_FALLBACK_FLOWING_INTO_UNSAFE,
Warn,
Deny,
"never type fallback affecting unsafe function calls",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::EditionAndFutureReleaseSemanticsChange(Edition::Edition2024),
@ -4122,7 +4121,7 @@ declare_lint! {
/// ### Example
///
/// ```rust,compile_fail,edition2021
/// #![deny(dependency_on_unit_never_type_fallback)]
/// # #![deny(dependency_on_unit_never_type_fallback)]
/// fn main() {
/// if true {
/// // return has type `!` which, is some cases, causes never type fallback
@ -4155,7 +4154,7 @@ declare_lint! {
///
/// See [Tracking Issue for making `!` fall back to `!`](https://github.com/rust-lang/rust/issues/123748).
pub DEPENDENCY_ON_UNIT_NEVER_TYPE_FALLBACK,
Warn,
Deny,
"never type fallback affecting unsafe function calls",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::EditionAndFutureReleaseError(Edition::Edition2024),

View file

@ -2115,7 +2115,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
};
let def_id = id.owner_id.to_def_id();
if of_trait && let Some(header) = tcx.impl_trait_header(def_id) {
if of_trait {
let header = tcx.impl_trait_header(def_id);
record!(self.tables.impl_trait_header[def_id] <- header);
self.tables.defaultness.set_some(def_id.index, tcx.defaultness(def_id));

View file

@ -24,12 +24,13 @@ use rustc_macros::{
use rustc_middle::metadata::ModChild;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile;
use rustc_middle::middle::deduced_param_attrs::DeducedParamAttrs;
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
use rustc_middle::middle::lib_features::FeatureStability;
use rustc_middle::middle::resolve_bound_vars::ObjectLifetimeDefault;
use rustc_middle::mir;
use rustc_middle::ty::fast_reject::SimplifiedType;
use rustc_middle::ty::{self, DeducedParamAttrs, Ty, TyCtxt, UnusedGenericParams};
use rustc_middle::ty::{self, Ty, TyCtxt, UnusedGenericParams};
use rustc_middle::util::Providers;
use rustc_serialize::opaque::FileEncoder;
use rustc_session::config::{SymbolManglingVersion, TargetModifier};

View file

@ -97,6 +97,7 @@ trivially_parameterized_over_tcx! {
rustc_middle::metadata::ModChild,
rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs,
rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile,
rustc_middle::middle::deduced_param_attrs::DeducedParamAttrs,
rustc_middle::middle::exported_symbols::SymbolExportInfo,
rustc_middle::middle::lib_features::FeatureStability,
rustc_middle::middle::resolve_bound_vars::ObjectLifetimeDefault,
@ -105,7 +106,6 @@ trivially_parameterized_over_tcx! {
rustc_middle::ty::AssocContainer,
rustc_middle::ty::AsyncDestructor,
rustc_middle::ty::Asyncness,
rustc_middle::ty::DeducedParamAttrs,
rustc_middle::ty::Destructor,
rustc_middle::ty::Generics,
rustc_middle::ty::ImplTraitInTraitData,

View file

@ -13,6 +13,8 @@ impl<'tcx> TyCtxt<'tcx> {
self,
instance_kind: InstanceKind<'_>,
) -> Cow<'tcx, CodegenFnAttrs> {
// NOTE: we try to not clone the `CodegenFnAttrs` when that is not needed.
// The `to_mut` method used below clones the inner value.
let mut attrs = Cow::Borrowed(self.codegen_fn_attrs(instance_kind.def_id()));
// Drop the `#[naked]` attribute on non-item `InstanceKind`s, like the shims that
@ -23,6 +25,28 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
// A shim created by `#[track_caller]` should not inherit any attributes
// that modify the symbol name. Failing to remove these attributes from
// the shim leads to errors like `symbol `foo` is already defined`.
//
// A `ClosureOnceShim` with the track_caller attribute does not have a symbol,
// and therefore can be skipped here.
if let InstanceKind::ReifyShim(_, _) = instance_kind
&& attrs.flags.contains(CodegenFnAttrFlags::TRACK_CALLER)
{
if attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE) {
attrs.to_mut().flags.remove(CodegenFnAttrFlags::NO_MANGLE);
}
if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
attrs.to_mut().flags.remove(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
}
if attrs.symbol_name.is_some() {
attrs.to_mut().symbol_name = None;
}
}
attrs
}
}

View file

@ -0,0 +1,68 @@
use rustc_macros::{Decodable, Encodable, HashStable};
use crate::ty::{Ty, TyCtxt, TypingEnv};
/// Flags that dictate how a parameter is mutated. If the flags are empty, the param is
/// read-only. If non-empty, it is read-only if *all* flags' conditions are met.
#[derive(Clone, Copy, PartialEq, Debug, Decodable, Encodable, HashStable)]
pub struct DeducedReadOnlyParam(u8);
bitflags::bitflags! {
impl DeducedReadOnlyParam: u8 {
/// This parameter is dropped. It is read-only if `!needs_drop`.
const IF_NO_DROP = 1 << 0;
/// This parameter is borrowed. It is read-only if `Freeze`.
const IF_FREEZE = 1 << 1;
/// This parameter is mutated. It is never read-only.
const MUTATED = 1 << 2;
}
}
/// Parameter attributes that can only be determined by examining the body of a function instead
/// of just its signature.
///
/// These can be useful for optimization purposes when a function is directly called. We compute
/// them and store them into the crate metadata so that downstream crates can make use of them.
///
/// Right now, we only have `read_only`, but `no_capture` and `no_alias` might be useful in the
/// future.
#[derive(Clone, Copy, PartialEq, Debug, Decodable, Encodable, HashStable)]
pub struct DeducedParamAttrs {
/// The parameter is marked immutable in the function.
pub read_only: DeducedReadOnlyParam,
}
// By default, consider the parameters to be mutated.
impl Default for DeducedParamAttrs {
#[inline]
fn default() -> DeducedParamAttrs {
DeducedParamAttrs { read_only: DeducedReadOnlyParam::MUTATED }
}
}
impl DeducedParamAttrs {
#[inline]
pub fn is_default(self) -> bool {
self.read_only.contains(DeducedReadOnlyParam::MUTATED)
}
pub fn read_only<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
typing_env: TypingEnv<'tcx>,
ty: Ty<'tcx>,
) -> bool {
let read_only = self.read_only;
// We have to check *all* set bits; only if all checks pass is this truly read-only.
if read_only.contains(DeducedReadOnlyParam::MUTATED) {
return false;
}
if read_only.contains(DeducedReadOnlyParam::IF_NO_DROP) && ty.needs_drop(tcx, typing_env) {
return false;
}
if read_only.contains(DeducedReadOnlyParam::IF_FREEZE) && !ty.is_freeze(tcx, typing_env) {
return false;
}
true
}
}

View file

@ -1,5 +1,6 @@
pub mod codegen_fn_attrs;
pub mod debugger_visualizer;
pub mod deduced_param_attrs;
pub mod dependency_format;
pub mod exported_symbols;
pub mod lang_items;

View file

@ -1,4 +1,4 @@
use std::sync::OnceLock;
use std::sync::{Arc, OnceLock};
use rustc_data_structures::graph;
use rustc_data_structures::graph::dominators::{Dominators, dominators};
@ -14,7 +14,8 @@ use crate::mir::{BasicBlock, BasicBlockData, START_BLOCK};
#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
pub struct BasicBlocks<'tcx> {
basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
cache: Cache,
/// Use an `Arc` so we can share the cache when we clone the MIR body, as borrowck does.
cache: Arc<Cache>,
}
// Typically 95%+ of basic blocks have 4 or fewer predecessors.
@ -38,9 +39,10 @@ struct Cache {
impl<'tcx> BasicBlocks<'tcx> {
#[inline]
pub fn new(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
BasicBlocks { basic_blocks, cache: Cache::default() }
BasicBlocks { basic_blocks, cache: Arc::new(Cache::default()) }
}
#[inline]
pub fn dominators(&self) -> &Dominators<BasicBlock> {
self.cache.dominators.get_or_init(|| dominators(self))
}
@ -104,7 +106,14 @@ impl<'tcx> BasicBlocks<'tcx> {
/// All other methods that allow you to mutate the basic blocks also call this method
/// themselves, thereby avoiding any risk of accidentally cache invalidation.
pub fn invalidate_cfg_cache(&mut self) {
self.cache = Cache::default();
if let Some(cache) = Arc::get_mut(&mut self.cache) {
// If we only have a single reference to this cache, clear it.
*cache = Cache::default();
} else {
// If we have several references to this cache, overwrite the pointer itself so other
// users can continue to use their (valid) cache.
self.cache = Arc::new(Cache::default());
}
}
}

View file

@ -137,6 +137,7 @@ pub enum RuntimePhase {
/// And the following variants are allowed:
/// * [`StatementKind::Retag`]
/// * [`StatementKind::SetDiscriminant`]
/// * [`PlaceElem::ConstantIndex`] / [`PlaceElem::Subslice`] after [`PlaceElem::Subslice`]
///
/// Furthermore, `Copy` operands are allowed for non-`Copy` types.
Initial = 0,
@ -1246,6 +1247,9 @@ pub enum ProjectionElem<V, T> {
///
/// If `from_end` is true `slice[from..slice.len() - to]`.
/// Otherwise `array[from..to]`.
///
/// This projection cannot have `ConstantIndex` or additional `Subslice` projections after it
/// before runtime MIR.
Subslice {
from: u64,
to: u64,

View file

@ -696,6 +696,28 @@ impl<'tcx> TerminatorKind<'tcx> {
_ => None,
}
}
/// Returns true if the terminator can write to memory.
pub fn can_write_to_memory(&self) -> bool {
match self {
TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::UnwindResume
| TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Assert { .. }
| TerminatorKind::CoroutineDrop
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::Unreachable => false,
TerminatorKind::Call { .. }
| TerminatorKind::Drop { .. }
| TerminatorKind::TailCall { .. }
// Yield writes to the resume_arg place.
| TerminatorKind::Yield { .. }
| TerminatorKind::InlineAsm { .. } => true,
}
}
}
#[derive(Copy, Clone, Debug)]

View file

@ -200,8 +200,8 @@ impl EraseType for Option<mir::DestructuredConstant<'_>> {
type Result = [u8; size_of::<Option<mir::DestructuredConstant<'static>>>()];
}
impl EraseType for Option<ty::ImplTraitHeader<'_>> {
type Result = [u8; size_of::<Option<ty::ImplTraitHeader<'static>>>()];
impl EraseType for ty::ImplTraitHeader<'_> {
type Result = [u8; size_of::<ty::ImplTraitHeader<'static>>()];
}
impl EraseType for Option<ty::EarlyBinder<'_, Ty<'_>>> {
@ -313,6 +313,7 @@ trivial! {
rustc_hir::Stability,
rustc_hir::Upvar,
rustc_index::bit_set::FiniteBitSet<u32>,
rustc_middle::middle::deduced_param_attrs::DeducedParamAttrs,
rustc_middle::middle::dependency_format::Linkage,
rustc_middle::middle::exported_symbols::SymbolExportInfo,
rustc_middle::middle::resolve_bound_vars::ObjectLifetimeDefault,
@ -336,7 +337,6 @@ trivial! {
rustc_middle::ty::AsyncDestructor,
rustc_middle::ty::BoundVariableKind,
rustc_middle::ty::AnonConstKind,
rustc_middle::ty::DeducedParamAttrs,
rustc_middle::ty::Destructor,
rustc_middle::ty::fast_reject::SimplifiedType,
rustc_middle::ty::ImplPolarity,

View file

@ -107,6 +107,7 @@ use crate::lint::LintExpectation;
use crate::metadata::ModChild;
use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
use crate::middle::debugger_visualizer::DebuggerVisualizerFile;
use crate::middle::deduced_param_attrs::DeducedParamAttrs;
use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
use crate::middle::lib_features::LibFeatures;
use crate::middle::privacy::EffectiveVisibilities;
@ -1104,8 +1105,7 @@ rustc_queries! {
}
/// Given an `impl_id`, return the trait it implements along with some header information.
/// Return `None` if this is an inherent impl.
query impl_trait_header(impl_id: DefId) -> Option<ty::ImplTraitHeader<'tcx>> {
query impl_trait_header(impl_id: DefId) -> ty::ImplTraitHeader<'tcx> {
desc { |tcx| "computing trait implemented by `{}`", tcx.def_path_str(impl_id) }
cache_on_disk_if { impl_id.is_local() }
separate_provide_extern
@ -1202,10 +1202,10 @@ rustc_queries! {
/// Return the live symbols in the crate for dead code check.
///
/// The second return value maps from ADTs to ignored derived traits (e.g. Debug and Clone).
query live_symbols_and_ignored_derived_traits(_: ()) -> &'tcx (
query live_symbols_and_ignored_derived_traits(_: ()) -> &'tcx Result<(
LocalDefIdSet,
LocalDefIdMap<FxIndexSet<DefId>>,
) {
), ErrorGuaranteed> {
arena_cache
desc { "finding live symbols in crate" }
}
@ -2657,7 +2657,7 @@ rustc_queries! {
return_result_from_ensure_ok
}
query deduced_param_attrs(def_id: DefId) -> &'tcx [ty::DeducedParamAttrs] {
query deduced_param_attrs(def_id: DefId) -> &'tcx [DeducedParamAttrs] {
desc { |tcx| "deducing parameter attributes for {}", tcx.def_path_str(def_id) }
separate_provide_extern
}

View file

@ -799,7 +799,7 @@ impl_ref_decoder! {<'tcx>
rustc_span::def_id::DefId,
rustc_span::def_id::LocalDefId,
(rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
ty::DeducedParamAttrs,
rustc_middle::middle::deduced_param_attrs::DeducedParamAttrs,
}
//- ENCODING -------------------------------------------------------------------

View file

@ -577,7 +577,7 @@ impl_arena_copy_decoder! {<'tcx>
rustc_span::def_id::DefId,
rustc_span::def_id::LocalDefId,
(rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
ty::DeducedParamAttrs,
rustc_middle::middle::deduced_param_attrs::DeducedParamAttrs,
}
#[macro_export]

View file

@ -41,7 +41,6 @@ use rustc_hir::lang_items::LangItem;
use rustc_hir::limit::Limit;
use rustc_hir::{self as hir, Attribute, HirId, Node, TraitCandidate, find_attr};
use rustc_index::IndexVec;
use rustc_macros::{HashStable, TyDecodable, TyEncodable};
use rustc_query_system::cache::WithDepNode;
use rustc_query_system::dep_graph::DepNodeIndex;
use rustc_query_system::ich::StableHashingContext;
@ -102,6 +101,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
type CoroutineId = DefId;
type AdtId = DefId;
type ImplId = DefId;
type UnevaluatedConstId = DefId;
type Span = Span;
type GenericArgs = ty::GenericArgsRef<'tcx>;
@ -679,7 +679,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
}
fn impl_trait_ref(self, impl_def_id: DefId) -> ty::EarlyBinder<'tcx, ty::TraitRef<'tcx>> {
self.impl_trait_ref(impl_def_id).unwrap()
self.impl_trait_ref(impl_def_id)
}
fn impl_polarity(self, impl_def_id: DefId) -> ty::ImplPolarity {
@ -1786,9 +1786,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn is_default_trait(self, def_id: DefId) -> bool {
self.default_traits()
.iter()
.any(|&default_trait| self.lang_items().get(default_trait) == Some(def_id))
self.default_traits().iter().any(|&default_trait| self.is_lang_item(def_id, default_trait))
}
pub fn is_sizedness_trait(self, def_id: DefId) -> bool {
@ -3472,7 +3470,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Whether the trait impl is marked const. This does not consider stability or feature gates.
pub fn is_const_trait_impl(self, def_id: DefId) -> bool {
self.def_kind(def_id) == DefKind::Impl { of_trait: true }
&& self.impl_trait_header(def_id).unwrap().constness == hir::Constness::Const
&& self.impl_trait_header(def_id).constness == hir::Constness::Const
}
pub fn is_sdylib_interface_build(self) -> bool {
@ -3530,19 +3528,6 @@ impl<'tcx> TyCtxt<'tcx> {
crate::dep_graph::make_metadata(self)
}
/// Given an `impl_id`, return the trait it implements.
/// Return `None` if this is an inherent impl.
pub fn impl_trait_ref(
self,
def_id: impl IntoQueryParam<DefId>,
) -> Option<ty::EarlyBinder<'tcx, ty::TraitRef<'tcx>>> {
Some(self.impl_trait_header(def_id)?.trait_ref)
}
pub fn impl_polarity(self, def_id: impl IntoQueryParam<DefId>) -> ty::ImplPolarity {
self.impl_trait_header(def_id).map_or(ty::ImplPolarity::Positive, |h| h.polarity)
}
pub fn needs_coroutine_by_move_body_def_id(self, def_id: DefId) -> bool {
if let Some(hir::CoroutineKind::Desugared(_, hir::CoroutineSource::Closure)) =
self.coroutine_kind(def_id)
@ -3575,21 +3560,6 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
/// Parameter attributes that can only be determined by examining the body of a function instead
/// of just its signature.
///
/// These can be useful for optimization purposes when a function is directly called. We compute
/// them and store them into the crate metadata so that downstream crates can make use of them.
///
/// Right now, we only have `read_only`, but `no_capture` and `no_alias` might be useful in the
/// future.
#[derive(Clone, Copy, PartialEq, Debug, Default, TyDecodable, TyEncodable, HashStable)]
pub struct DeducedParamAttrs {
/// The parameter is marked immutable in the function and contains no `UnsafeCell` (i.e. its
/// type is freeze).
pub read_only: bool,
}
pub fn provide(providers: &mut Providers) {
providers.is_panic_runtime =
|tcx, LocalCrate| contains_name(tcx.hir_krate_attrs(), sym::panic_runtime);

View file

@ -78,8 +78,7 @@ pub use self::consts::{
ExprKind, ScalarInt, UnevaluatedConst, ValTree, ValTreeKind, Value,
};
pub use self::context::{
CtxtInterners, CurrentGcx, DeducedParamAttrs, Feed, FreeRegionInfo, GlobalCtxt, Lift, TyCtxt,
TyCtxtFeed, tls,
CtxtInterners, CurrentGcx, Feed, FreeRegionInfo, GlobalCtxt, Lift, TyCtxt, TyCtxtFeed, tls,
};
pub use self::fold::*;
pub use self::instance::{Instance, InstanceKind, ReifyReason, UnusedGenericParams};
@ -1614,8 +1613,8 @@ impl<'tcx> TyCtxt<'tcx> {
def_id1: DefId,
def_id2: DefId,
) -> Option<ImplOverlapKind> {
let impl1 = self.impl_trait_header(def_id1).unwrap();
let impl2 = self.impl_trait_header(def_id2).unwrap();
let impl1 = self.impl_trait_header(def_id1);
let impl2 = self.impl_trait_header(def_id2);
let trait_ref1 = impl1.trait_ref.skip_binder();
let trait_ref2 = impl2.trait_ref.skip_binder();
@ -1913,12 +1912,6 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
/// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
/// If it implements no trait, returns `None`.
pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {
self.impl_trait_ref(def_id).map(|tr| tr.skip_binder().def_id)
}
/// If the given `DefId` is an associated item, returns the `DefId` and `DefKind` of the parent trait or impl.
pub fn assoc_parent(self, def_id: DefId) -> Option<(DefId, DefKind)> {
if !self.def_kind(def_id).is_assoc() {
@ -1943,6 +1936,14 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
pub fn impl_is_of_trait(self, def_id: impl IntoQueryParam<DefId>) -> bool {
let def_id = def_id.into_query_param();
let DefKind::Impl { of_trait } = self.def_kind(def_id) else {
panic!("expected Impl for {def_id:?}");
};
of_trait
}
/// If the given `DefId` is an associated item of an impl,
/// returns the `DefId` of the impl; otherwise returns `None`.
pub fn impl_of_assoc(self, def_id: DefId) -> Option<DefId> {
@ -1970,6 +1971,40 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
pub fn impl_polarity(self, def_id: impl IntoQueryParam<DefId>) -> ty::ImplPolarity {
self.impl_trait_header(def_id).polarity
}
/// Given an `impl_id`, return the trait it implements.
pub fn impl_trait_ref(
self,
def_id: impl IntoQueryParam<DefId>,
) -> ty::EarlyBinder<'tcx, ty::TraitRef<'tcx>> {
self.impl_trait_header(def_id).trait_ref
}
/// Given an `impl_id`, return the trait it implements.
/// Returns `None` if it is an inherent impl.
pub fn impl_opt_trait_ref(
self,
def_id: impl IntoQueryParam<DefId>,
) -> Option<ty::EarlyBinder<'tcx, ty::TraitRef<'tcx>>> {
let def_id = def_id.into_query_param();
self.impl_is_of_trait(def_id).then(|| self.impl_trait_ref(def_id))
}
/// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
pub fn impl_trait_id(self, def_id: impl IntoQueryParam<DefId>) -> DefId {
self.impl_trait_ref(def_id).skip_binder().def_id
}
/// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
/// Returns `None` if it is an inherent impl.
pub fn impl_opt_trait_id(self, def_id: impl IntoQueryParam<DefId>) -> Option<DefId> {
let def_id = def_id.into_query_param();
self.impl_is_of_trait(def_id).then(|| self.impl_trait_id(def_id))
}
pub fn is_exportable(self, def_id: DefId) -> bool {
self.exportable_items(def_id.krate).contains(&def_id)
}
@ -2062,7 +2097,7 @@ impl<'tcx> TyCtxt<'tcx> {
let def_id: DefId = def_id.into();
match self.def_kind(def_id) {
DefKind::Impl { of_trait: true } => {
let header = self.impl_trait_header(def_id).unwrap();
let header = self.impl_trait_header(def_id);
header.constness == hir::Constness::Const
&& self.is_const_trait(header.trait_ref.skip_binder().def_id)
}

View file

@ -45,7 +45,7 @@ pub trait Printer<'tcx>: Sized {
) -> Result<(), PrintError> {
let tcx = self.tcx();
let self_ty = tcx.type_of(impl_def_id);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
let impl_trait_ref = tcx.impl_opt_trait_ref(impl_def_id);
let (self_ty, impl_trait_ref) = if tcx.generics_of(impl_def_id).count() <= args.len() {
(
self_ty.instantiate(tcx, args),

View file

@ -919,6 +919,12 @@ impl<'tcx> Ty<'tcx> {
Ty::new_generic_adt(tcx, def_id, ty)
}
#[inline]
pub fn new_option(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
let def_id = tcx.require_lang_item(LangItem::Option, DUMMY_SP);
Ty::new_generic_adt(tcx, def_id, ty)
}
#[inline]
pub fn new_maybe_uninit(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
let def_id = tcx.require_lang_item(LangItem::MaybeUninit, DUMMY_SP);

View file

@ -271,9 +271,7 @@ pub(super) fn traits_provider(tcx: TyCtxt<'_>, _: LocalCrate) -> &[DefId] {
pub(super) fn trait_impls_in_crate_provider(tcx: TyCtxt<'_>, _: LocalCrate) -> &[DefId] {
let mut trait_impls = Vec::new();
for id in tcx.hir_free_items() {
if matches!(tcx.def_kind(id.owner_id), DefKind::Impl { .. })
&& tcx.impl_trait_ref(id.owner_id).is_some()
{
if tcx.def_kind(id.owner_id) == (DefKind::Impl { of_trait: true }) {
trait_impls.push(id.owner_id.to_def_id())
}
}

View file

@ -43,13 +43,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
) {
let tcx = self.tcx;
let (min_length, exact_size) = if let Some(place_resolved) = place.try_to_place(self) {
match place_resolved.ty(&self.local_decls, tcx).ty.kind() {
ty::Array(_, length) => (
length
.try_to_target_usize(tcx)
.expect("expected len of array pat to be definite"),
true,
),
let place_ty = place_resolved.ty(&self.local_decls, tcx).ty;
match place_ty.kind() {
ty::Array(_, length) => {
if let Some(length) = length.try_to_target_usize(tcx) {
(length, true)
} else {
// This can happen when the array length is a generic const
// expression that couldn't be evaluated (e.g., due to an error).
// Since there's already a compilation error, we use a fallback
// to avoid an ICE.
tcx.dcx().span_delayed_bug(
tcx.def_span(self.def_id),
"array length in pattern couldn't be evaluated",
);
((prefix.len() + suffix.len()).try_into().unwrap(), false)
}
}
_ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
}
} else {

View file

@ -9,7 +9,7 @@ use tracing::debug;
use super::{
Init, InitIndex, InitKind, InitLocation, LocationMap, LookupResult, MoveData, MoveOut,
MoveOutIndex, MovePath, MovePathIndex, MovePathLookup,
MoveOutIndex, MovePath, MovePathIndex, MovePathLookup, MoveSubPath, MoveSubPathResult,
};
struct MoveDataBuilder<'a, 'tcx, F> {
@ -94,26 +94,25 @@ fn new_move_path<'tcx>(
move_path
}
enum MovePathResult {
Path(MovePathIndex),
Union(MovePathIndex),
Error,
}
impl<'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> MoveDataBuilder<'a, 'tcx, F> {
/// This creates a MovePath for a given place, returning an `MovePathError`
/// if that place can't be moved from.
/// This creates a MovePath for a given place, calling `on_move`
/// if it can be moved from. If theres a union in the path, its
/// move place will be given to `on_move`. If there's a subslice
/// projection, `on_move` will be called for each element.
///
/// NOTE: places behind references *do not* get a move path, which is
/// problematic for borrowck.
///
/// Maybe we should have separate "borrowck" and "moveck" modes.
fn move_path_for(&mut self, place: Place<'tcx>) -> MovePathResult {
fn move_path_for<G>(&mut self, place: Place<'tcx>, mut on_move: G)
where
G: FnMut(&mut Self, MovePathIndex),
{
let data = &mut self.data;
debug!("lookup({:?})", place);
let Some(mut base) = data.rev_lookup.find_local(place.local) else {
return MovePathResult::Error;
return;
};
// The move path index of the first union that we find. Once this is
@ -123,144 +122,186 @@ impl<'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> MoveDataBuilder<'a, 'tcx, F> {
// from `*(u.f: &_)` isn't allowed.
let mut union_path = None;
for (place_ref, elem) in data.rev_lookup.un_derefer.iter_projections(place.as_ref()) {
let mut iter = data.rev_lookup.un_derefer.iter_projections(place.as_ref());
while let Some((place_ref, elem)) = iter.next() {
let body = self.body;
let tcx = self.tcx;
let place_ty = place_ref.ty(body, tcx).ty;
if place_ty.references_error() {
return MovePathResult::Error;
return;
}
match elem {
ProjectionElem::Deref => match place_ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => {
return MovePathResult::Error;
}
ty::Adt(adt, _) => {
if !adt.is_box() {
bug!("Adt should be a box type when Place is deref");
}
}
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Foreign(_)
| ty::Str
| ty::Array(_, _)
| ty::Pat(_, _)
| ty::Slice(_)
| ty::FnDef(_, _)
| ty::FnPtr(..)
| ty::Dynamic(_, _)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(_, _)
| ty::CoroutineWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::UnsafeBinder(_)
| ty::Alias(_, _)
| ty::Param(_)
| ty::Bound(_, _)
| ty::Infer(_)
| ty::Error(_)
| ty::Placeholder(_) => {
bug!("When Place is Deref it's type shouldn't be {place_ty:#?}")
}
},
ProjectionElem::Field(_, _) => match place_ty.kind() {
ty::Adt(adt, _) => {
if adt.has_dtor(tcx) {
return MovePathResult::Error;
}
if adt.is_union() {
union_path.get_or_insert(base);
}
}
ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(_, _)
| ty::Tuple(_) => (),
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Foreign(_)
| ty::Str
| ty::Array(_, _)
| ty::Pat(_, _)
| ty::Slice(_)
| ty::RawPtr(_, _)
| ty::Ref(_, _, _)
| ty::FnDef(_, _)
| ty::FnPtr(..)
| ty::Dynamic(_, _)
| ty::CoroutineWitness(..)
| ty::Never
| ty::UnsafeBinder(_)
| ty::Alias(_, _)
| ty::Param(_)
| ty::Bound(_, _)
| ty::Infer(_)
| ty::Error(_)
| ty::Placeholder(_) => bug!(
"When Place contains ProjectionElem::Field its type shouldn't be {place_ty:#?}"
),
},
ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {
match place_ty.kind() {
ty::Slice(_) => {
return MovePathResult::Error;
}
ty::Array(_, _) => (),
_ => bug!("Unexpected type {:#?}", place_ty.is_array()),
}
let res = MoveSubPath::of(elem.kind());
let move_elem = match res {
MoveSubPathResult::One(move_elem) => {
match move_elem {
MoveSubPath::Deref => match place_ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => {
return;
}
ty::Adt(adt, _) => {
if !adt.is_box() {
bug!("Adt should be a box type when Place is deref");
}
}
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Foreign(_)
| ty::Str
| ty::Array(_, _)
| ty::Pat(_, _)
| ty::Slice(_)
| ty::FnDef(_, _)
| ty::FnPtr(..)
| ty::Dynamic(_, _)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(_, _)
| ty::CoroutineWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::UnsafeBinder(_)
| ty::Alias(_, _)
| ty::Param(_)
| ty::Bound(_, _)
| ty::Infer(_)
| ty::Error(_)
| ty::Placeholder(_) => {
bug!("When Place is Deref it's type shouldn't be {place_ty:#?}")
}
},
MoveSubPath::Field(_) => match place_ty.kind() {
ty::Adt(adt, _) => {
if adt.has_dtor(tcx) {
return;
}
if adt.is_union() {
union_path.get_or_insert(base);
}
}
ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(_, _)
| ty::Tuple(_) => (),
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Foreign(_)
| ty::Str
| ty::Array(_, _)
| ty::Pat(_, _)
| ty::Slice(_)
| ty::RawPtr(_, _)
| ty::Ref(_, _, _)
| ty::FnDef(_, _)
| ty::FnPtr(..)
| ty::Dynamic(_, _)
| ty::CoroutineWitness(..)
| ty::Never
| ty::UnsafeBinder(_)
| ty::Alias(_, _)
| ty::Param(_)
| ty::Bound(_, _)
| ty::Infer(_)
| ty::Error(_)
| ty::Placeholder(_) => bug!(
"When Place contains ProjectionElem::Field its type shouldn't be {place_ty:#?}"
),
},
MoveSubPath::ConstantIndex(_) => match place_ty.kind() {
ty::Slice(_) => {
return;
}
ty::Array(_, _) => (),
_ => bug!("Unexpected type {:#?}", place_ty.is_array()),
},
MoveSubPath::Downcast(_) => (),
MoveSubPath::UnwrapUnsafeBinder => (),
};
move_elem
}
ProjectionElem::Index(_) => match place_ty.kind() {
ty::Array(..) | ty::Slice(_) => {
return MovePathResult::Error;
// Split `Subslice` patterns into the corresponding list of
// `ConstIndex` patterns. This is done to ensure that all move paths
// are disjoint, which is expected by drop elaboration.
MoveSubPathResult::Subslice { from, to } => {
assert!(
iter.all(
|(_, elem)| MoveSubPath::of(elem.kind()) == MoveSubPathResult::Skip
)
);
drop(iter); // drop for borrowck
let (&elem_ty, len) = match place_ty.kind() {
ty::Array(ty, size) => (
ty,
size.try_to_target_usize(self.tcx)
.expect("expected subslice projection on fixed-size array"),
),
_ => bug!("from_end: false slice pattern of non-array type"),
};
if !(self.filter)(elem_ty) {
return;
}
_ => bug!("Unexpected type {place_ty:#?}"),
},
ProjectionElem::UnwrapUnsafeBinder(_) => {}
// `OpaqueCast`:Only transmutes the type, so no moves there.
// `Downcast` :Only changes information about a `Place` without moving.
// So it's safe to skip these.
ProjectionElem::OpaqueCast(_) | ProjectionElem::Downcast(_, _) => (),
}
for offset in from..to {
let place_elem =
PlaceElem::ConstantIndex { offset, min_length: len, from_end: false };
let subpath_elem = MoveSubPath::ConstantIndex(offset);
let mpi = self.add_move_path(base, subpath_elem, |tcx| {
place_ref.project_deeper(&[place_elem], tcx)
});
on_move(self, mpi);
}
return;
}
MoveSubPathResult::Skip => continue,
MoveSubPathResult::Stop => return,
};
let elem_ty = PlaceTy::from_ty(place_ty).projection_ty(tcx, elem).ty;
if !(self.filter)(elem_ty) {
return MovePathResult::Error;
return;
}
if union_path.is_none() {
// inlined from add_move_path because of a borrowck conflict with the iterator
base =
*data.rev_lookup.projections.entry((base, elem.kind())).or_insert_with(|| {
new_move_path(
&mut data.move_paths,
&mut data.path_map,
&mut data.init_path_map,
Some(base),
place_ref.project_deeper(&[elem], tcx),
)
})
base = *data.rev_lookup.projections.entry((base, move_elem)).or_insert_with(|| {
new_move_path(
&mut data.move_paths,
&mut data.path_map,
&mut data.init_path_map,
Some(base),
place_ref.project_deeper(&[elem], tcx),
)
})
}
}
drop(iter); // drop for borrowck
if let Some(base) = union_path {
// Move out of union - always move the entire union.
MovePathResult::Union(base)
on_move(self, base);
} else {
MovePathResult::Path(base)
on_move(self, base);
}
}
fn add_move_path(
&mut self,
base: MovePathIndex,
elem: PlaceElem<'tcx>,
elem: MoveSubPath,
mk_place: impl FnOnce(TyCtxt<'tcx>) -> Place<'tcx>,
) -> MovePathIndex {
let MoveDataBuilder {
@ -268,7 +309,7 @@ impl<'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> MoveDataBuilder<'a, 'tcx, F> {
tcx,
..
} = self;
*rev_lookup.projections.entry((base, elem.kind())).or_insert_with(move || {
*rev_lookup.projections.entry((base, elem)).or_insert_with(move || {
new_move_path(move_paths, path_map, init_path_map, Some(base), mk_place(*tcx))
})
}
@ -276,7 +317,7 @@ impl<'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> MoveDataBuilder<'a, 'tcx, F> {
fn create_move_path(&mut self, place: Place<'tcx>) {
// This is an non-moving access (such as an overwrite or
// drop), so this not being a valid move path is OK.
let _ = self.move_path_for(place);
self.move_path_for(place, |_, _| ());
}
fn finalize(self) -> MoveData<'tcx> {
@ -525,46 +566,7 @@ impl<'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> MoveDataBuilder<'a, 'tcx, F> {
fn gather_move(&mut self, place: Place<'tcx>) {
debug!("gather_move({:?}, {:?})", self.loc, place);
if let [ref base @ .., ProjectionElem::Subslice { from, to, from_end: false }] =
**place.projection
{
// Split `Subslice` patterns into the corresponding list of
// `ConstIndex` patterns. This is done to ensure that all move paths
// are disjoint, which is expected by drop elaboration.
let base_place =
Place { local: place.local, projection: self.tcx.mk_place_elems(base) };
let base_path = match self.move_path_for(base_place) {
MovePathResult::Path(path) => path,
MovePathResult::Union(path) => {
self.record_move(place, path);
return;
}
MovePathResult::Error => {
return;
}
};
let base_ty = base_place.ty(self.body, self.tcx).ty;
let len: u64 = match base_ty.kind() {
ty::Array(_, size) => size
.try_to_target_usize(self.tcx)
.expect("expected subslice projection on fixed-size array"),
_ => bug!("from_end: false slice pattern of non-array type"),
};
for offset in from..to {
let elem =
ProjectionElem::ConstantIndex { offset, min_length: len, from_end: false };
let path =
self.add_move_path(base_path, elem, |tcx| tcx.mk_place_elem(base_place, elem));
self.record_move(place, path);
}
} else {
match self.move_path_for(place) {
MovePathResult::Path(path) | MovePathResult::Union(path) => {
self.record_move(place, path)
}
MovePathResult::Error => {}
};
}
self.move_path_for(place, |this, mpi| this.record_move(place, mpi));
}
fn record_move(&mut self, place: Place<'tcx>, path: MovePathIndex) {

View file

@ -1,18 +1,9 @@
//! The move-analysis portion of borrowck needs to work in an abstract domain of lifted `Place`s.
//! Most of the `Place` variants fall into a one-to-one mapping between the concrete and abstract
//! (e.g., a field projection on a local variable, `x.field`, has the same meaning in both
//! domains). In other words, all field projections for the same field on the same local do not
//! have meaningfully different types if ever. Indexed projections are the exception: `a[x]` needs
//! to be treated as mapping to the same move path as `a[y]` as well as `a[13]`, etc. So we map
//! these `x`/`y` values to `()`.
//!
//! (In theory, the analysis could be extended to work with sets of paths, so that `a[0]` and
//! `a[13]` could be kept distinct, while `a[x]` would still overlap them both. But that is not
//! what this representation does today.)
//! [`MovePath`]s track the initialization state of places and their sub-paths.
use std::fmt;
use std::ops::{Index, IndexMut};
use rustc_abi::{FieldIdx, VariantIdx};
use rustc_data_structures::fx::FxHashMap;
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::mir::*;
@ -309,7 +300,7 @@ pub struct MovePathLookup<'tcx> {
/// subsequent search so that it is solely relative to that
/// base-place). For the remaining lookup, we map the projection
/// elem to the associated MovePathIndex.
projections: FxHashMap<(MovePathIndex, ProjectionKind), MovePathIndex>,
projections: FxHashMap<(MovePathIndex, MoveSubPath), MovePathIndex>,
un_derefer: UnDerefer<'tcx>,
}
@ -333,7 +324,14 @@ impl<'tcx> MovePathLookup<'tcx> {
};
for (_, elem) in self.un_derefer.iter_projections(place) {
if let Some(&subpath) = self.projections.get(&(result, elem.kind())) {
let subpath = match MoveSubPath::of(elem.kind()) {
MoveSubPathResult::One(kind) => self.projections.get(&(result, kind)),
MoveSubPathResult::Subslice { .. } => None, // just use the parent MovePath
MoveSubPathResult::Skip => continue,
MoveSubPathResult::Stop => None,
};
if let Some(&subpath) = subpath {
result = subpath;
} else {
return LookupResult::Parent(Some(result));
@ -390,3 +388,58 @@ impl<'tcx> MoveData<'tcx> {
self.move_paths[root].find_descendant(&self.move_paths, pred)
}
}
/// A projection into a move path producing a child path
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum MoveSubPath {
Deref,
Field(FieldIdx),
ConstantIndex(u64),
Downcast(VariantIdx),
UnwrapUnsafeBinder,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum MoveSubPathResult {
One(MoveSubPath),
Subslice { from: u64, to: u64 },
Skip,
Stop,
}
impl MoveSubPath {
pub fn of(elem: ProjectionKind) -> MoveSubPathResult {
let subpath = match elem {
// correspond to a MoveSubPath
ProjectionKind::Deref => MoveSubPath::Deref,
ProjectionKind::Field(idx, _) => MoveSubPath::Field(idx),
ProjectionKind::ConstantIndex { offset, min_length: _, from_end: false } => {
MoveSubPath::ConstantIndex(offset)
}
ProjectionKind::Downcast(_, idx) => MoveSubPath::Downcast(idx),
ProjectionKind::UnwrapUnsafeBinder(_) => MoveSubPath::UnwrapUnsafeBinder,
// this should be the same move path as its parent
// its fine to skip because it cannot have sibling move paths
// and it is not a user visible path
ProjectionKind::OpaqueCast(_) => {
return MoveSubPathResult::Skip;
}
// these cannot be moved through
ProjectionKind::Index(_)
| ProjectionKind::ConstantIndex { offset: _, min_length: _, from_end: true }
| ProjectionKind::Subslice { from: _, to: _, from_end: true } => {
return MoveSubPathResult::Stop;
}
// subslice is special.
// it needs to be split into individual move paths
ProjectionKind::Subslice { from, to, from_end: false } => {
return MoveSubPathResult::Subslice { from, to };
}
};
MoveSubPathResult::One(subpath)
}
}

View file

@ -44,7 +44,7 @@ impl<'tcx> MirLint<'tcx> for CheckDropRecursion {
// First check if `body` is an `fn drop()` of `Drop`
if let DefKind::AssocFn = tcx.def_kind(def_id)
&& let Some(impl_id) = tcx.trait_impl_of_assoc(def_id.to_def_id())
&& let trait_ref = tcx.impl_trait_ref(impl_id).unwrap()
&& let trait_ref = tcx.impl_trait_ref(impl_id)
&& tcx.is_lang_item(trait_ref.instantiate_identity().def_id, LangItem::Drop)
// avoid erroneous `Drop` impls from causing ICEs below
&& let sig = tcx.fn_sig(def_id).instantiate_identity()

View file

@ -17,7 +17,7 @@
//! [`SpanMarker`]: rustc_middle::mir::coverage::CoverageKind::SpanMarker
use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::mir::{Body, BorrowKind, CastKind, Rvalue, StatementKind, TerminatorKind};
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use rustc_middle::ty::adjustment::PointerCoercion;
@ -25,7 +25,9 @@ pub(super) struct CleanupPostBorrowck;
impl<'tcx> crate::MirPass<'tcx> for CleanupPostBorrowck {
fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
for basic_block in body.basic_blocks.as_mut() {
// Manually invalidate CFG caches if we actually change a terminator's edges.
let mut invalidate_cfg = false;
for basic_block in body.basic_blocks.as_mut_preserves_cfg().iter_mut() {
for statement in basic_block.statements.iter_mut() {
match statement.kind {
StatementKind::AscribeUserType(..)
@ -59,16 +61,23 @@ impl<'tcx> crate::MirPass<'tcx> for CleanupPostBorrowck {
_ => (),
}
}
// If we change any terminator, we need to ensure that we invalidated the CFG cache.
let terminator = basic_block.terminator_mut();
match terminator.kind {
TerminatorKind::FalseEdge { real_target, .. }
| TerminatorKind::FalseUnwind { real_target, .. } => {
invalidate_cfg = true;
terminator.kind = TerminatorKind::Goto { target: real_target };
}
_ => {}
}
}
if invalidate_cfg {
body.basic_blocks.invalidate_cfg_cache();
}
body.user_type_annotations.raw.clear();
for decl in &mut body.local_decls {

View file

@ -4,57 +4,72 @@
//! body of the function instead of just the signature. These can be useful for optimization
//! purposes on a best-effort basis. We compute them here and store them into the crate metadata so
//! dependent crates can use them.
//!
//! Note that this *crucially* relies on codegen *not* doing any more MIR-level transformations
//! after `optimized_mir`! We check for things that are *not* guaranteed to be preserved by MIR
//! transforms, such as which local variables happen to be mutated.
use rustc_hir::def_id::LocalDefId;
use rustc_index::bit_set::DenseBitSet;
use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{Body, Location, Operand, Place, RETURN_PLACE, Terminator, TerminatorKind};
use rustc_middle::ty::{self, DeducedParamAttrs, Ty, TyCtxt};
use rustc_index::IndexVec;
use rustc_middle::middle::deduced_param_attrs::{DeducedParamAttrs, DeducedReadOnlyParam};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_session::config::OptLevel;
/// A visitor that determines which arguments have been mutated. We can't use the mutability field
/// on LocalDecl for this because it has no meaning post-optimization.
struct DeduceReadOnly {
/// Each bit is indexed by argument number, starting at zero (so 0 corresponds to local decl
/// 1). The bit is true if the argument may have been mutated or false if we know it hasn't
/// 1). The bit is false if the argument may have been mutated or true if we know it hasn't
/// been up to the point we're at.
mutable_args: DenseBitSet<usize>,
read_only: IndexVec<usize, DeducedReadOnlyParam>,
}
impl DeduceReadOnly {
/// Returns a new DeduceReadOnly instance.
fn new(arg_count: usize) -> Self {
Self { mutable_args: DenseBitSet::new_empty(arg_count) }
Self { read_only: IndexVec::from_elem_n(DeducedReadOnlyParam::empty(), arg_count) }
}
/// Returns whether the given local is a parameter and its index.
fn as_param(&self, local: Local) -> Option<usize> {
// Locals and parameters are shifted by `RETURN_PLACE`.
let param_index = local.as_usize().checked_sub(1)?;
if param_index < self.read_only.len() { Some(param_index) } else { None }
}
}
impl<'tcx> Visitor<'tcx> for DeduceReadOnly {
fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) {
// We're only interested in arguments.
if place.local == RETURN_PLACE || place.local.index() > self.mutable_args.domain_size() {
return;
}
let Some(param_index) = self.as_param(place.local) else { return };
let mark_as_mutable = match context {
PlaceContext::MutatingUse(..) => {
// This is a mutation, so mark it as such.
true
match context {
// Not mutating, so it's fine.
PlaceContext::NonUse(..) => {}
// Dereference is not a mutation.
_ if place.is_indirect_first_projection() => {}
// This is a `Drop`. It could disappear at monomorphization, so mark it specially.
PlaceContext::MutatingUse(MutatingUseContext::Drop)
// Projection changes the place's type, so `needs_drop(local.ty)` is not
// `needs_drop(place.ty)`.
if place.projection.is_empty() => {
self.read_only[param_index] |= DeducedReadOnlyParam::IF_NO_DROP;
}
PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => {
// Whether mutating though a `&raw const` is allowed is still undecided, so we
// disable any sketchy `readonly` optimizations for now. But we only need to do
// this if the pointer would point into the argument. IOW: for indirect places,
// like `&raw (*local).field`, this surely cannot mutate `local`.
!place.is_indirect()
// This is a mutation, so mark it as such.
PlaceContext::MutatingUse(..)
// Whether mutating though a `&raw const` is allowed is still undecided, so we
// disable any sketchy `readonly` optimizations for now.
| PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => {
self.read_only[param_index] |= DeducedReadOnlyParam::MUTATED;
}
PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => {
// Not mutating, so it's fine.
false
// Not mutating if the parameter is `Freeze`.
PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow) => {
self.read_only[param_index] |= DeducedReadOnlyParam::IF_FREEZE;
}
};
if mark_as_mutable {
self.mutable_args.insert(place.local.index() - 1);
// Not mutating, so it's fine.
PlaceContext::NonMutatingUse(..) => {}
}
}
@ -82,16 +97,12 @@ impl<'tcx> Visitor<'tcx> for DeduceReadOnly {
// from.
if let TerminatorKind::Call { ref args, .. } = terminator.kind {
for arg in args {
if let Operand::Move(place) = arg.node {
let local = place.local;
if place.is_indirect()
|| local == RETURN_PLACE
|| local.index() > self.mutable_args.domain_size()
{
continue;
}
self.mutable_args.insert(local.index() - 1);
if let Operand::Move(place) = arg.node
// We're only interested in arguments.
&& let Some(param_index) = self.as_param(place.local)
&& !place.is_indirect_first_projection()
{
self.read_only[param_index] |= DeducedReadOnlyParam::MUTATED;
}
}
};
@ -121,6 +132,7 @@ fn type_will_always_be_passed_directly(ty: Ty<'_>) -> bool {
/// body of the function instead of just the signature. These can be useful for optimization
/// purposes on a best-effort basis. We compute them here and store them into the crate metadata so
/// dependent crates can use them.
#[tracing::instrument(level = "trace", skip(tcx), ret)]
pub(super) fn deduced_param_attrs<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
@ -160,36 +172,19 @@ pub(super) fn deduced_param_attrs<'tcx>(
let body: &Body<'tcx> = tcx.optimized_mir(def_id);
let mut deduce_read_only = DeduceReadOnly::new(body.arg_count);
deduce_read_only.visit_body(body);
tracing::trace!(?deduce_read_only.read_only);
// Set the `readonly` attribute for every argument that we concluded is immutable and that
// contains no UnsafeCells.
//
// FIXME: This is overly conservative around generic parameters: `is_freeze()` will always
// return false for them. For a description of alternatives that could do a better job here,
// see [1].
//
// [1]: https://github.com/rust-lang/rust/pull/103172#discussion_r999139997
let typing_env = body.typing_env(tcx);
let mut deduced_param_attrs = tcx.arena.alloc_from_iter(
body.local_decls.iter().skip(1).take(body.arg_count).enumerate().map(
|(arg_index, local_decl)| DeducedParamAttrs {
read_only: !deduce_read_only.mutable_args.contains(arg_index)
// We must normalize here to reveal opaques and normalize
// their generic parameters, otherwise we'll see exponential
// blow-up in compile times: #113372
&& tcx
.normalize_erasing_regions(typing_env, local_decl.ty)
.is_freeze(tcx, typing_env),
},
),
let mut deduced_param_attrs: &[_] = tcx.arena.alloc_from_iter(
deduce_read_only.read_only.into_iter().map(|read_only| DeducedParamAttrs { read_only }),
);
// Trailing parameters past the size of the `deduced_param_attrs` array are assumed to have the
// default set of attributes, so we don't have to store them explicitly. Pop them off to save a
// few bytes in metadata.
while deduced_param_attrs.last() == Some(&DeducedParamAttrs::default()) {
let last_index = deduced_param_attrs.len() - 1;
deduced_param_attrs = &mut deduced_param_attrs[0..last_index];
while let Some((last, rest)) = deduced_param_attrs.split_last()
&& last.is_default()
{
deduced_param_attrs = rest;
}
deduced_param_attrs

View file

@ -1926,13 +1926,8 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, '_, 'tcx> {
self.assign(local, opaque);
}
}
// Function calls and ASM may invalidate (nested) derefs. We must handle them carefully.
// Currently, only preserving derefs for trivial terminators like SwitchInt and Goto.
let safe_to_preserve_derefs = matches!(
terminator.kind,
TerminatorKind::SwitchInt { .. } | TerminatorKind::Goto { .. }
);
if !safe_to_preserve_derefs {
// Terminators that can write to memory may invalidate (nested) derefs.
if terminator.kind.can_write_to_memory() {
self.invalidate_derefs();
}
self.super_terminator(terminator, location);

View file

@ -245,7 +245,7 @@ impl<'tcx> Inliner<'tcx> for ForceInliner<'tcx> {
fn on_inline_failure(&self, callsite: &CallSite<'tcx>, reason: &'static str) {
let tcx = self.tcx();
let InlineAttr::Force { attr_span, reason: justification } =
tcx.codegen_fn_attrs(callsite.callee.def_id()).inline
tcx.codegen_instance_attrs(callsite.callee.def).inline
else {
bug!("called on item without required inlining");
};
@ -603,7 +603,8 @@ fn try_inlining<'tcx, I: Inliner<'tcx>>(
let tcx = inliner.tcx();
check_mir_is_available(inliner, caller_body, callsite.callee)?;
let callee_attrs = tcx.codegen_fn_attrs(callsite.callee.def_id());
let callee_attrs = tcx.codegen_instance_attrs(callsite.callee.def);
let callee_attrs = callee_attrs.as_ref();
check_inline::is_inline_valid_on_fn(tcx, callsite.callee.def_id())?;
check_codegen_attributes(inliner, callsite, callee_attrs)?;
inliner.check_codegen_attributes_extra(callee_attrs)?;

View file

@ -189,6 +189,7 @@ declare_passes! {
Final
};
mod simplify_branches : SimplifyConstCondition {
AfterInstSimplify,
AfterConstProp,
Final
};
@ -708,6 +709,15 @@ pub(crate) fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'
// optimizations. This invalidates CFG caches, so avoid putting between
// `ReferencePropagation` and `GVN` which both use the dominator tree.
&instsimplify::InstSimplify::AfterSimplifyCfg,
// After `InstSimplify-after-simplifycfg` with `-Zub_checks=false`, simplify
// ```
// _13 = const false;
// assume(copy _13);
// Call(precondition_check);
// ```
// to unreachable to eliminate the call to help later passes.
// This invalidates CFG caches also.
&o1(simplify_branches::SimplifyConstCondition::AfterInstSimplify),
&ref_prop::ReferencePropagation,
&sroa::ScalarReplacementOfAggregates,
&simplify::SimplifyLocals::BeforeConstProp,

Some files were not shown because too many files have changed in this diff Show more