Merge from rustc

This commit is contained in:
Ralf Jung 2025-04-24 10:57:54 +02:00
commit 6496974882
906 changed files with 16696 additions and 9370 deletions

View file

@ -125,9 +125,6 @@ jobs:
# which then uses log commands to actually set them.
EXTRA_VARIABLES: ${{ toJson(matrix.env) }}
- name: setup upstream remote
run: src/ci/scripts/setup-upstream-remote.sh
- name: ensure the channel matches the target branch
run: src/ci/scripts/verify-channel.sh

View file

@ -546,12 +546,14 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "clippy"
version = "0.1.87"
version = "0.1.88"
dependencies = [
"anstream",
"askama",
"cargo_metadata 0.18.1",
"clippy_config",
"clippy_lints",
"clippy_lints_internal",
"clippy_utils",
"color-print",
"filetime",
@ -562,7 +564,6 @@ dependencies = [
"pulldown-cmark 0.11.3",
"quote",
"regex",
"rinja",
"rustc_tools_util 0.4.2",
"serde",
"serde_json",
@ -577,7 +578,7 @@ dependencies = [
[[package]]
name = "clippy_config"
version = "0.1.87"
version = "0.1.88"
dependencies = [
"clippy_utils",
"itertools",
@ -602,7 +603,7 @@ dependencies = [
[[package]]
name = "clippy_lints"
version = "0.1.87"
version = "0.1.88"
dependencies = [
"arrayvec",
"cargo_metadata 0.18.1",
@ -610,12 +611,9 @@ dependencies = [
"clippy_utils",
"itertools",
"quine-mc_cluskey",
"regex",
"regex-syntax 0.8.5",
"semver",
"serde",
"serde_json",
"tempfile",
"toml 0.7.8",
"unicode-normalization",
"unicode-script",
@ -623,9 +621,19 @@ dependencies = [
"walkdir",
]
[[package]]
name = "clippy_lints_internal"
version = "0.0.1"
dependencies = [
"clippy_config",
"clippy_utils",
"regex",
"rustc-semver",
]
[[package]]
name = "clippy_utils"
version = "0.1.87"
version = "0.1.88"
dependencies = [
"arrayvec",
"itertools",
@ -2244,22 +2252,6 @@ dependencies = [
"libc",
]
[[package]]
name = "mime"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
[[package]]
name = "mime_guess"
version = "2.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
dependencies = [
"mime",
"unicase",
]
[[package]]
name = "minifier"
version = "0.3.5"
@ -3084,45 +3076,6 @@ dependencies = [
"walkdir",
]
[[package]]
name = "rinja"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3dc4940d00595430b3d7d5a01f6222b5e5b51395d1120bdb28d854bb8abb17a5"
dependencies = [
"itoa",
"rinja_derive",
]
[[package]]
name = "rinja_derive"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d9ed0146aef6e2825f1b1515f074510549efba38d71f4554eec32eb36ba18b"
dependencies = [
"basic-toml",
"memchr",
"mime",
"mime_guess",
"proc-macro2",
"quote",
"rinja_parser",
"rustc-hash 2.1.1",
"serde",
"syn 2.0.100",
]
[[package]]
name = "rinja_parser"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93f9a866e2e00a7a1fb27e46e9e324a6f7c0e7edc4543cae1d38f4e4a100c610"
dependencies = [
"memchr",
"nom",
"serde",
]
[[package]]
name = "run_make_support"
version = "0.2.0"
@ -3196,6 +3149,12 @@ dependencies = [
"crossbeam-utils",
]
[[package]]
name = "rustc-semver"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5be1bdc7edf596692617627bbfeaba522131b18e06ca4df2b6b689e3c5d5ce84"
[[package]]
name = "rustc-stable-hash"
version = "0.1.2"
@ -4562,6 +4521,7 @@ dependencies = [
"rustc_hir",
"rustc_middle",
"rustc_span",
"smallvec",
"tracing",
]

View file

@ -5,7 +5,7 @@ use rustc_hir as hir;
use rustc_hir::GenericArg;
use rustc_hir::def::{DefKind, PartialRes, Res};
use rustc_hir::def_id::DefId;
use rustc_middle::span_bug;
use rustc_middle::{span_bug, ty};
use rustc_session::parse::add_feature_diagnostics;
use rustc_span::{BytePos, DUMMY_SP, DesugaringKind, Ident, Span, Symbol, sym};
use smallvec::{SmallVec, smallvec};
@ -590,14 +590,10 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// lowering of `async Fn()` bounds to desugar to another trait like `LendingFn`.
fn map_trait_to_async_trait(&self, def_id: DefId) -> Option<DefId> {
let lang_items = self.tcx.lang_items();
if Some(def_id) == lang_items.fn_trait() {
lang_items.async_fn_trait()
} else if Some(def_id) == lang_items.fn_mut_trait() {
lang_items.async_fn_mut_trait()
} else if Some(def_id) == lang_items.fn_once_trait() {
lang_items.async_fn_once_trait()
} else {
None
match self.tcx.fn_trait_kind_from_def_id(def_id)? {
ty::ClosureKind::Fn => lang_items.async_fn_trait(),
ty::ClosureKind::FnMut => lang_items.async_fn_mut_trait(),
ty::ClosureKind::FnOnce => lang_items.async_fn_once_trait(),
}
}
}

View file

@ -1265,12 +1265,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
&& let CallKind::FnCall { fn_trait_id, self_ty } = kind
&& let ty::Param(_) = self_ty.kind()
&& ty == self_ty
&& [
self.infcx.tcx.lang_items().fn_once_trait(),
self.infcx.tcx.lang_items().fn_mut_trait(),
self.infcx.tcx.lang_items().fn_trait(),
]
.contains(&Some(fn_trait_id))
&& self.infcx.tcx.fn_trait_kind_from_def_id(fn_trait_id).is_some()
{
// Do not suggest `F: FnOnce() + Clone`.
false

View file

@ -702,12 +702,12 @@ struct MirBorrowckCtxt<'a, 'infcx, 'tcx> {
// 2. loans made in overlapping scopes do not conflict
// 3. assignments do not affect things loaned out as immutable
// 4. moves do not affect things loaned out in any way
impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, Borrowck<'a, 'tcx>> for MirBorrowckCtxt<'a, '_, 'tcx> {
impl<'a, 'tcx> ResultsVisitor<'tcx, Borrowck<'a, 'tcx>> for MirBorrowckCtxt<'a, '_, 'tcx> {
fn visit_after_early_statement_effect(
&mut self,
_results: &mut Results<'tcx, Borrowck<'a, 'tcx>>,
state: &BorrowckDomain,
stmt: &'a Statement<'tcx>,
stmt: &Statement<'tcx>,
location: Location,
) {
debug!("MirBorrowckCtxt::process_statement({:?}, {:?}): {:?}", location, stmt, state);
@ -783,7 +783,7 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, Borrowck<'a, 'tcx>> for MirBorrowckCtxt<
&mut self,
_results: &mut Results<'tcx, Borrowck<'a, 'tcx>>,
state: &BorrowckDomain,
term: &'a Terminator<'tcx>,
term: &Terminator<'tcx>,
loc: Location,
) {
debug!("MirBorrowckCtxt::process_terminator({:?}, {:?}): {:?}", loc, term, state);
@ -896,7 +896,7 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, Borrowck<'a, 'tcx>> for MirBorrowckCtxt<
&mut self,
_results: &mut Results<'tcx, Borrowck<'a, 'tcx>>,
state: &BorrowckDomain,
term: &'a Terminator<'tcx>,
term: &Terminator<'tcx>,
loc: Location,
) {
let span = term.source_info.span;
@ -1363,7 +1363,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> {
fn consume_rvalue(
&mut self,
location: Location,
(rvalue, span): (&'a Rvalue<'tcx>, Span),
(rvalue, span): (&Rvalue<'tcx>, Span),
state: &BorrowckDomain,
) {
match rvalue {
@ -1636,7 +1636,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> {
fn consume_operand(
&mut self,
location: Location,
(operand, span): (&'a Operand<'tcx>, Span),
(operand, span): (&Operand<'tcx>, Span),
state: &BorrowckDomain,
) {
match *operand {

View file

@ -8,9 +8,6 @@
unboxed_closures
)]
#![allow(internal_features)]
// FIXME once abi_unsupported_vector_types is a hard error disable the foo test when the respective
// target feature is not enabled.
#![allow(abi_unsupported_vector_types)]
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;

View file

@ -56,7 +56,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
| ty::Coroutine(def_id, args) => self.print_def_path(def_id, args),
ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
ty::Alias(ty::Weak, _) => bug!("type_name: unexpected weak projection"),
ty::Alias(ty::Free, _) => bug!("type_name: unexpected free alias"),
ty::Alias(ty::Inherent, _) => bug!("type_name: unexpected inherent projection"),
ty::CoroutineWitness(..) => bug!("type_name: unexpected `CoroutineWitness`"),
}

View file

@ -2,8 +2,7 @@
use rustc_abi::ExternAbi;
use rustc_errors::DiagMessage;
use rustc_hir::{self as hir};
use rustc_middle::bug;
use rustc_hir::{self as hir, LangItem};
use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::def_id::LocalDefId;
@ -173,23 +172,22 @@ pub(crate) fn check_intrinsic_type(
ty::BoundVariableKind::Region(ty::BoundRegionKind::ClosureEnv),
]);
let mk_va_list_ty = |mutbl| {
tcx.lang_items().va_list().map(|did| {
let region = ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BoundRegionKind::Anon },
);
let env_region = ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion {
var: ty::BoundVar::from_u32(2),
kind: ty::BoundRegionKind::ClosureEnv,
},
);
let va_list_ty = tcx.type_of(did).instantiate(tcx, &[region.into()]);
(Ty::new_ref(tcx, env_region, va_list_ty, mutbl), va_list_ty)
})
let did = tcx.require_lang_item(LangItem::VaList, Some(span));
let region = ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BoundRegionKind::Anon },
);
let env_region = ty::Region::new_bound(
tcx,
ty::INNERMOST,
ty::BoundRegion {
var: ty::BoundVar::from_u32(2),
kind: ty::BoundRegionKind::ClosureEnv,
},
);
let va_list_ty = tcx.type_of(did).instantiate(tcx, &[region.into()]);
(Ty::new_ref(tcx, env_region, va_list_ty, mutbl), va_list_ty)
};
let (n_tps, n_lts, n_cts, inputs, output, safety) = if name_str.starts_with("atomic_") {
@ -548,23 +546,17 @@ pub(crate) fn check_intrinsic_type(
)
}
sym::va_start | sym::va_end => match mk_va_list_ty(hir::Mutability::Mut) {
Some((va_list_ref_ty, _)) => (0, 0, vec![va_list_ref_ty], tcx.types.unit),
None => bug!("`va_list` lang item needed for C-variadic intrinsics"),
},
sym::va_start | sym::va_end => {
(0, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], tcx.types.unit)
}
sym::va_copy => match mk_va_list_ty(hir::Mutability::Not) {
Some((va_list_ref_ty, va_list_ty)) => {
let va_list_ptr_ty = Ty::new_mut_ptr(tcx, va_list_ty);
(0, 0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.types.unit)
}
None => bug!("`va_list` lang item needed for C-variadic intrinsics"),
},
sym::va_copy => {
let (va_list_ref_ty, va_list_ty) = mk_va_list_ty(hir::Mutability::Not);
let va_list_ptr_ty = Ty::new_mut_ptr(tcx, va_list_ty);
(0, 0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.types.unit)
}
sym::va_arg => match mk_va_list_ty(hir::Mutability::Mut) {
Some((va_list_ref_ty, _)) => (1, 0, vec![va_list_ref_ty], param(0)),
None => bug!("`va_list` lang item needed for C-variadic intrinsics"),
},
sym::va_arg => (1, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], param(0)),
sym::nontemporal_store => {
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)

View file

@ -1991,7 +1991,7 @@ fn check_variances_for_type_defn<'tcx>(
ItemKind::TyAlias(..) => {
assert!(
tcx.type_alias_is_lazy(item.owner_id),
"should not be computing variance of non-weak type alias"
"should not be computing variance of non-free type alias"
);
}
kind => span_bug!(item.span, "cannot compute the variances of {kind:?}"),
@ -2223,7 +2223,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for IsProbablyCyclical<'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<(), ()> {
let def_id = match ty.kind() {
ty::Adt(adt_def, _) => Some(adt_def.did()),
ty::Alias(ty::Weak, alias_ty) => Some(alias_ty.def_id),
ty::Alias(ty::Free, alias_ty) => Some(alias_ty.def_id),
_ => None,
};
if let Some(def_id) = def_id {

View file

@ -750,7 +750,7 @@ fn visit_implementation_of_pointer_like(checker: &Checker<'_>) -> Result<(), Err
ObligationCause::misc(impl_span, checker.impl_def_id),
param_env,
nontrivial_field_ty,
tcx.lang_items().pointer_like().unwrap(),
tcx.require_lang_item(LangItem::PointerLike, Some(impl_span)),
);
// FIXME(dyn-star): We should regionck this implementation.
if ocx.select_all_or_error().is_empty() {

View file

@ -150,7 +150,7 @@ impl<'tcx> InherentCollect<'tcx> {
let id = id.owner_id.def_id;
let item_span = self.tcx.def_span(id);
let self_ty = self.tcx.type_of(id).instantiate_identity();
let mut self_ty = self.tcx.peel_off_weak_alias_tys(self_ty);
let mut self_ty = self.tcx.peel_off_free_alias_tys(self_ty);
// We allow impls on pattern types exactly when we allow impls on the base type.
// FIXME(pattern_types): Figure out the exact coherence rules we want here.
while let ty::Pat(base, _) = *self_ty.kind() {
@ -188,7 +188,7 @@ impl<'tcx> InherentCollect<'tcx> {
| ty::CoroutineClosure(..)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Alias(ty::Weak, _)
| ty::Alias(ty::Free, _)
| ty::Bound(..)
| ty::Placeholder(_)
| ty::Infer(_) => {

View file

@ -189,7 +189,7 @@ pub(crate) fn orphan_check_impl(
ty::Projection => "associated type",
// type Foo = (impl Sized, bool)
// impl AutoTrait for Foo {}
ty::Weak => "type alias",
ty::Free => "type alias",
// type Opaque = impl Trait;
// impl AutoTrait for Opaque {}
ty::Opaque => "opaque type",

View file

@ -49,7 +49,7 @@ pub(crate) fn parameters_for<'tcx>(
include_nonconstraining: bool,
) -> Vec<Parameter> {
let mut collector = ParameterCollector { parameters: vec![], include_nonconstraining };
let value = if !include_nonconstraining { tcx.expand_weak_alias_tys(value) } else { value };
let value = if !include_nonconstraining { tcx.expand_free_alias_tys(value) } else { value };
value.visit_with(&mut collector);
collector.parameters
}
@ -68,9 +68,9 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ParameterCollector {
{
return;
}
// All weak alias types should've been expanded beforehand.
ty::Alias(ty::Weak, _) if !self.include_nonconstraining => {
bug!("unexpected weak alias type")
// All free alias types should've been expanded beforehand.
ty::Alias(ty::Free, _) if !self.include_nonconstraining => {
bug!("unexpected free alias type")
}
ty::Param(param) => self.parameters.push(Parameter::from(param)),
_ => {}

View file

@ -958,7 +958,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
// feature `lazy_type_alias` enabled get encoded as a type alias that normalization will
// then actually instantiate the where bounds of.
let alias_ty = ty::AliasTy::new_from_args(tcx, did, args);
Ty::new_alias(tcx, ty::Weak, alias_ty)
Ty::new_alias(tcx, ty::Free, alias_ty)
} else {
tcx.at(span).type_of(did).instantiate(tcx, args)
}

View file

@ -157,10 +157,10 @@ fn insert_required_predicates_to_be_wf<'tcx>(
);
}
ty::Alias(ty::Weak, alias) => {
ty::Alias(ty::Free, alias) => {
// This corresponds to a type like `Type<'a, T>`.
// We check inferred and explicit predicates.
debug!("Weak");
debug!("Free");
check_inferred_predicates(
tcx,
alias.def_id,

View file

@ -107,7 +107,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
let current_item = &CurrentItem { inferred_start };
let ty = tcx.type_of(def_id).instantiate_identity();
// The type as returned by `type_of` is the underlying type and generally not a weak projection.
// The type as returned by `type_of` is the underlying type and generally not a free alias.
// Therefore we need to check the `DefKind` first.
if let DefKind::TyAlias = tcx.def_kind(def_id)
&& tcx.type_alias_is_lazy(def_id)
@ -282,7 +282,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
self.add_constraints_from_invariant_args(current, data.args, variance);
}
ty::Alias(ty::Weak, ref data) => {
ty::Alias(ty::Free, ref data) => {
self.add_constraints_from_args(current, data.def_id, data.args, variance);
}

View file

@ -37,7 +37,7 @@ pub(crate) fn check_legal_trait_for_method_call(
body_id: DefId,
) -> Result<(), ErrorGuaranteed> {
if tcx.is_lang_item(trait_id, LangItem::Drop)
&& tcx.lang_items().fallback_surface_drop_fn() != Some(body_id)
&& !tcx.is_lang_item(body_id, LangItem::FallbackSurfaceDrop)
{
let sugg = if let Some(receiver) = receiver.filter(|s| !s.is_empty()) {
errors::ExplicitDestructorCallSugg::Snippet {

View file

@ -163,7 +163,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Resume type defaults to `()` if the coroutine has no argument.
let resume_ty = liberated_sig.inputs().get(0).copied().unwrap_or(tcx.types.unit);
let interior = self.next_ty_var(expr_span);
// In the new solver, we can just instantiate this eagerly
// with the witness. This will ensure that goals that don't need
// to stall on interior types will get processed eagerly.
let interior = if self.next_trait_solver() {
Ty::new_coroutine_witness(tcx, expr_def_id.to_def_id(), parent_args)
} else {
self.next_ty_var(expr_span)
};
self.deferred_coroutine_interiors.borrow_mut().push((expr_def_id, interior));
// Coroutines that come from coroutine closures have not yet determined

View file

@ -635,34 +635,39 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut obligations = vec![];
for &(coroutine_def_id, interior) in coroutines.iter() {
debug!(?coroutine_def_id);
if !self.next_trait_solver() {
for &(coroutine_def_id, interior) in coroutines.iter() {
debug!(?coroutine_def_id);
// Create the `CoroutineWitness` type that we will unify with `interior`.
let args = ty::GenericArgs::identity_for_item(
self.tcx,
self.tcx.typeck_root_def_id(coroutine_def_id.to_def_id()),
);
let witness = Ty::new_coroutine_witness(self.tcx, coroutine_def_id.to_def_id(), args);
// Create the `CoroutineWitness` type that we will unify with `interior`.
let args = ty::GenericArgs::identity_for_item(
self.tcx,
self.tcx.typeck_root_def_id(coroutine_def_id.to_def_id()),
);
let witness =
Ty::new_coroutine_witness(self.tcx, coroutine_def_id.to_def_id(), args);
// Unify `interior` with `witness` and collect all the resulting obligations.
let span = self.tcx.hir_body_owned_by(coroutine_def_id).value.span;
let ty::Infer(ty::InferTy::TyVar(_)) = interior.kind() else {
span_bug!(span, "coroutine interior witness not infer: {:?}", interior.kind())
};
let ok = self
.at(&self.misc(span), self.param_env)
// Will never define opaque types, as all we do is instantiate a type variable.
.eq(DefineOpaqueTypes::Yes, interior, witness)
.expect("Failed to unify coroutine interior type");
// Unify `interior` with `witness` and collect all the resulting obligations.
let span = self.tcx.hir_body_owned_by(coroutine_def_id).value.span;
let ty::Infer(ty::InferTy::TyVar(_)) = interior.kind() else {
span_bug!(span, "coroutine interior witness not infer: {:?}", interior.kind())
};
let ok = self
.at(&self.misc(span), self.param_env)
// Will never define opaque types, as all we do is instantiate a type variable.
.eq(DefineOpaqueTypes::Yes, interior, witness)
.expect("Failed to unify coroutine interior type");
obligations.extend(ok.obligations);
obligations.extend(ok.obligations);
}
}
// FIXME: Use a real visitor for unstalled obligations in the new solver.
if !coroutines.is_empty() {
obligations
.extend(self.fulfillment_cx.borrow_mut().drain_unstalled_obligations(&self.infcx));
obligations.extend(
self.fulfillment_cx
.borrow_mut()
.drain_stalled_obligations_for_coroutines(&self.infcx),
);
}
self.typeck_results

View file

@ -925,7 +925,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let detect_dotdot = |err: &mut Diag<'_>, ty: Ty<'_>, expr: &hir::Expr<'_>| {
if let ty::Adt(adt, _) = ty.kind()
&& self.tcx().lang_items().get(hir::LangItem::RangeFull) == Some(adt.did())
&& self.tcx().is_lang_item(adt.did(), hir::LangItem::RangeFull)
&& let hir::ExprKind::Struct(
hir::QPath::LangItem(hir::LangItem::RangeFull, _),
[],

View file

@ -337,7 +337,7 @@ impl<'tcx> HirTyLowerer<'tcx> for FnCtxt<'_, 'tcx> {
match ty.kind() {
ty::Adt(adt_def, _) => Some(*adt_def),
// FIXME(#104767): Should we handle bound regions here?
ty::Alias(ty::Projection | ty::Inherent | ty::Weak, _)
ty::Alias(ty::Projection | ty::Inherent | ty::Free, _)
if !ty.has_escaping_bound_vars() =>
{
if self.next_trait_solver() {
@ -357,7 +357,7 @@ impl<'tcx> HirTyLowerer<'tcx> for FnCtxt<'_, 'tcx> {
// WF obligations that are registered elsewhere, but they have a
// better cause code assigned to them in `add_required_obligations_for_hir`.
// This means that they should shadow obligations with worse spans.
if let ty::Alias(ty::Projection | ty::Weak, ty::AliasTy { args, def_id, .. }) =
if let ty::Alias(ty::Projection | ty::Free, ty::AliasTy { args, def_id, .. }) =
ty.kind()
{
self.add_required_obligations_for_hir(span, *def_id, args, hir_id);

View file

@ -1457,15 +1457,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
_ => (None, None),
};
let ranges = &[
self.tcx.lang_items().range_struct(),
self.tcx.lang_items().range_from_struct(),
self.tcx.lang_items().range_to_struct(),
self.tcx.lang_items().range_full_struct(),
self.tcx.lang_items().range_inclusive_struct(),
self.tcx.lang_items().range_to_inclusive_struct(),
];
if type_def_id != None && ranges.contains(&type_def_id) {
let is_range = match type_def_id.and_then(|id| self.tcx.as_lang_item(id)) {
Some(
LangItem::Range
| LangItem::RangeFrom
| LangItem::RangeTo
| LangItem::RangeFull
| LangItem::RangeInclusiveStruct
| LangItem::RangeToInclusive,
) => true,
_ => false,
};
if is_range {
if !self.maybe_suggest_range_literal(&mut e, item_def_id, *ident) {
let msg = "constants only support matching by type, \
if you meant to match against a range of values, \

View file

@ -2,9 +2,8 @@ use std::cell::RefCell;
use std::ops::Deref;
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::{HirId, HirIdMap};
use rustc_hir::{self as hir, HirId, HirIdMap, LangItem};
use rustc_infer::infer::{InferCtxt, InferOk, TyCtxtInferExt};
use rustc_middle::span_bug;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt, TypingMode};
@ -84,7 +83,7 @@ impl<'tcx> TypeckRootCtxt<'tcx> {
let hir_owner = tcx.local_def_id_to_hir_id(def_id).owner;
let infcx =
tcx.infer_ctxt().ignoring_regions().build(TypingMode::analysis_in_body(tcx, def_id));
tcx.infer_ctxt().ignoring_regions().build(TypingMode::typeck_for_body(tcx, def_id));
let typeck_results = RefCell::new(ty::TypeckResults::new(hir_owner));
TypeckRootCtxt {
@ -137,7 +136,7 @@ impl<'tcx> TypeckRootCtxt<'tcx> {
obligation.predicate.kind().skip_binder()
&& let Some(ty) =
self.shallow_resolve(tpred.self_ty()).ty_vid().map(|t| self.root_var(t))
&& self.tcx.lang_items().sized_trait().is_some_and(|st| st != tpred.trait_ref.def_id)
&& !self.tcx.is_lang_item(tpred.trait_ref.def_id, LangItem::Sized)
{
let new_self_ty = self.tcx.types.unit;

View file

@ -8,6 +8,7 @@ use rustc_data_structures::unord::ExtendUnord;
use rustc_errors::ErrorGuaranteed;
use rustc_hir::intravisit::{self, InferKind, Visitor};
use rustc_hir::{self as hir, AmbigArg, HirId};
use rustc_infer::traits::solve::Goal;
use rustc_middle::span_bug;
use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCoercion};
@ -763,7 +764,32 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
T: TypeFoldable<TyCtxt<'tcx>>,
{
let value = self.fcx.resolve_vars_if_possible(value);
let value = value.fold_with(&mut Resolver::new(self.fcx, span, self.body, true));
let mut goals = vec![];
let value =
value.fold_with(&mut Resolver::new(self.fcx, span, self.body, true, &mut goals));
// Ensure that we resolve goals we get from normalizing coroutine interiors,
// but we shouldn't expect those goals to need normalizing (or else we'd get
// into a somewhat awkward fixpoint situation, and we don't need it anyways).
let mut unexpected_goals = vec![];
self.typeck_results.coroutine_stalled_predicates.extend(
goals
.into_iter()
.map(|pred| {
self.fcx.resolve_vars_if_possible(pred).fold_with(&mut Resolver::new(
self.fcx,
span,
self.body,
false,
&mut unexpected_goals,
))
})
// FIXME: throwing away the param-env :(
.map(|goal| (goal.predicate, self.fcx.misc(span.to_span(self.fcx.tcx)))),
);
assert_eq!(unexpected_goals, vec![]);
assert!(!value.has_infer());
// We may have introduced e.g. `ty::Error`, if inference failed, make sure
@ -781,7 +807,12 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
T: TypeFoldable<TyCtxt<'tcx>>,
{
let value = self.fcx.resolve_vars_if_possible(value);
let value = value.fold_with(&mut Resolver::new(self.fcx, span, self.body, false));
let mut goals = vec![];
let value =
value.fold_with(&mut Resolver::new(self.fcx, span, self.body, false, &mut goals));
assert_eq!(goals, vec![]);
assert!(!value.has_infer());
// We may have introduced e.g. `ty::Error`, if inference failed, make sure
@ -818,6 +849,7 @@ struct Resolver<'cx, 'tcx> {
/// Whether we should normalize using the new solver, disabled
/// both when using the old solver and when resolving predicates.
should_normalize: bool,
nested_goals: &'cx mut Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
}
impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
@ -826,8 +858,9 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
span: &'cx dyn Locatable,
body: &'tcx hir::Body<'tcx>,
should_normalize: bool,
nested_goals: &'cx mut Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
) -> Resolver<'cx, 'tcx> {
Resolver { fcx, span, body, should_normalize }
Resolver { fcx, span, body, nested_goals, should_normalize }
}
fn report_error(&self, p: impl Into<ty::GenericArg<'tcx>>) -> ErrorGuaranteed {
@ -864,12 +897,18 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
let cause = ObligationCause::misc(self.span.to_span(tcx), body_id);
let at = self.fcx.at(&cause, self.fcx.param_env);
let universes = vec![None; outer_exclusive_binder(value).as_usize()];
solve::deeply_normalize_with_skipped_universes(at, value, universes).unwrap_or_else(
|errors| {
match solve::deeply_normalize_with_skipped_universes_and_ambiguous_goals(
at, value, universes,
) {
Ok((value, goals)) => {
self.nested_goals.extend(goals);
value
}
Err(errors) => {
let guar = self.fcx.err_ctxt().report_fulfillment_errors(errors);
new_err(tcx, guar)
},
)
}
}
} else {
value
};

View file

@ -967,7 +967,9 @@ impl<'tcx> InferCtxt<'tcx> {
pub fn can_define_opaque_ty(&self, id: impl Into<DefId>) -> bool {
debug_assert!(!self.next_trait_solver());
match self.typing_mode() {
TypingMode::Analysis { defining_opaque_types }
TypingMode::Analysis {
defining_opaque_types_and_generators: defining_opaque_types,
}
| TypingMode::Borrowck { defining_opaque_types } => {
id.into().as_local().is_some_and(|def_id| defining_opaque_types.contains(&def_id))
}
@ -1262,7 +1264,7 @@ impl<'tcx> InferCtxt<'tcx> {
// to handle them without proper canonicalization. This means we may cause cycle
// errors and fail to reveal opaques while inside of bodies. We should rename this
// function and require explicit comments on all use-sites in the future.
ty::TypingMode::Analysis { defining_opaque_types: _ }
ty::TypingMode::Analysis { defining_opaque_types_and_generators: _ }
| ty::TypingMode::Borrowck { defining_opaque_types: _ } => {
TypingMode::non_body_analysis()
}

View file

@ -113,7 +113,7 @@ impl<'tcx> InferCtxt<'tcx> {
}]);
}
// The old solver only accepts projection predicates for associated types.
ty::Alias(ty::Inherent | ty::Weak | ty::Opaque, _) => {
ty::Alias(ty::Inherent | ty::Free | ty::Opaque, _) => {
return Err(TypeError::CyclicTy(source_ty));
}
_ => bug!("generalized `{source_ty:?} to infer, not an alias"),

View file

@ -94,7 +94,7 @@ pub trait TraitEngine<'tcx, E: 'tcx>: 'tcx {
/// Among all pending obligations, collect those are stalled on a inference variable which has
/// changed since the last call to `select_where_possible`. Those obligations are marked as
/// successful and returned.
fn drain_unstalled_obligations(
fn drain_stalled_obligations_for_coroutines(
&mut self,
infcx: &InferCtxt<'tcx>,
) -> PredicateObligations<'tcx>;

View file

@ -611,6 +611,11 @@ fn register_builtins(store: &mut LintStore) {
"converted into hard error, see PR #139001 \
<https://github.com/rust-lang/rust/issues/139001> for more information",
);
store.register_removed(
"abi_unsupported_vector_types",
"converted into hard error, \
see <https://github.com/rust-lang/rust/issues/116558> for more information",
);
}
fn register_internals(store: &mut LintStore) {

View file

@ -1,4 +1,4 @@
use rustc_hir as hir;
use rustc_hir::{self as hir, LangItem};
use rustc_middle::ty::{self, Ty};
use rustc_session::lint::FutureIncompatibilityReason;
use rustc_session::{declare_lint, impl_lint_pass};
@ -81,7 +81,7 @@ impl<'tcx> LateLintPass<'tcx> for ShadowedIntoIter {
let Some(method_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id) else {
return;
};
if Some(method_def_id) != cx.tcx.lang_items().into_iter_fn() {
if !cx.tcx.is_lang_item(method_def_id, LangItem::IntoIterIntoIter) {
return;
}

View file

@ -1371,7 +1371,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
ty::UnsafeBinder(_) => todo!("FIXME(unsafe_binder)"),
ty::Param(..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Weak, ..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
| ty::Infer(..)
| ty::Bound(..)
| ty::Error(_)

View file

@ -16,7 +16,6 @@ declare_lint_pass! {
/// that are used by other parts of the compiler.
HardwiredLints => [
// tidy-alphabetical-start
ABI_UNSUPPORTED_VECTOR_TYPES,
ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE,
AMBIGUOUS_ASSOCIATED_ITEMS,
AMBIGUOUS_GLOB_IMPORTS,
@ -5027,74 +5026,6 @@ declare_lint! {
crate_level_only
}
declare_lint! {
/// The `abi_unsupported_vector_types` lint detects function definitions and calls
/// whose ABI depends on enabling certain target features, but those features are not enabled.
///
/// ### Example
///
/// ```rust,ignore (fails on non-x86_64)
/// extern "C" fn missing_target_feature(_: std::arch::x86_64::__m256) {
/// todo!()
/// }
///
/// #[target_feature(enable = "avx")]
/// unsafe extern "C" fn with_target_feature(_: std::arch::x86_64::__m256) {
/// todo!()
/// }
///
/// fn main() {
/// let v = unsafe { std::mem::zeroed() };
/// unsafe { with_target_feature(v); }
/// }
/// ```
///
/// This will produce:
///
/// ```text
/// warning: ABI error: this function call uses a avx vector type, which is not enabled in the caller
/// --> lint_example.rs:18:12
/// |
/// | unsafe { with_target_feature(v); }
/// | ^^^^^^^^^^^^^^^^^^^^^^ function called here
/// |
/// = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
/// = note: for more information, see issue #116558 <https://github.com/rust-lang/rust/issues/116558>
/// = help: consider enabling it globally (-C target-feature=+avx) or locally (#[target_feature(enable="avx")])
/// = note: `#[warn(abi_unsupported_vector_types)]` on by default
///
///
/// warning: ABI error: this function definition uses a avx vector type, which is not enabled
/// --> lint_example.rs:3:1
/// |
/// | pub extern "C" fn with_target_feature(_: std::arch::x86_64::__m256) {
/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ function defined here
/// |
/// = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
/// = note: for more information, see issue #116558 <https://github.com/rust-lang/rust/issues/116558>
/// = help: consider enabling it globally (-C target-feature=+avx) or locally (#[target_feature(enable="avx")])
/// ```
///
///
///
/// ### Explanation
///
/// The C ABI for `__m256` requires the value to be passed in an AVX register,
/// which is only possible when the `avx` target feature is enabled.
/// Therefore, `missing_target_feature` cannot be compiled without that target feature.
/// A similar (but complementary) message is triggered when `with_target_feature` is called
/// by a function that does not enable the `avx` target feature.
///
/// Note that this lint is very similar to the `-Wpsabi` warning in `gcc`/`clang`.
pub ABI_UNSUPPORTED_VECTOR_TYPES,
Warn,
"this function call or definition uses a vector type which is not enabled",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #116558 <https://github.com/rust-lang/rust/issues/116558>",
};
}
declare_lint! {
/// The `wasm_c_abi` lint detects usage of the `extern "C"` ABI of wasm that is affected
/// by a planned ABI change that has the goal of aligning Rust with the standard C ABI

View file

@ -35,11 +35,10 @@ impl<'tcx> TyCtxt<'tcx> {
/// returns a corresponding [`ty::ClosureKind`].
/// For any other [`DefId`] return `None`.
pub fn fn_trait_kind_from_def_id(self, id: DefId) -> Option<ty::ClosureKind> {
let items = self.lang_items();
match Some(id) {
x if x == items.fn_trait() => Some(ty::ClosureKind::Fn),
x if x == items.fn_mut_trait() => Some(ty::ClosureKind::FnMut),
x if x == items.fn_once_trait() => Some(ty::ClosureKind::FnOnce),
match self.as_lang_item(id)? {
LangItem::Fn => Some(ty::ClosureKind::Fn),
LangItem::FnMut => Some(ty::ClosureKind::FnMut),
LangItem::FnOnce => Some(ty::ClosureKind::FnOnce),
_ => None,
}
}
@ -48,11 +47,10 @@ impl<'tcx> TyCtxt<'tcx> {
/// returns a corresponding [`ty::ClosureKind`].
/// For any other [`DefId`] return `None`.
pub fn async_fn_trait_kind_from_def_id(self, id: DefId) -> Option<ty::ClosureKind> {
let items = self.lang_items();
match Some(id) {
x if x == items.async_fn_trait() => Some(ty::ClosureKind::Fn),
x if x == items.async_fn_mut_trait() => Some(ty::ClosureKind::FnMut),
x if x == items.async_fn_once_trait() => Some(ty::ClosureKind::FnOnce),
match self.as_lang_item(id)? {
LangItem::AsyncFn => Some(ty::ClosureKind::Fn),
LangItem::AsyncFnMut => Some(ty::ClosureKind::FnMut),
LangItem::AsyncFnOnce => Some(ty::ClosureKind::FnOnce),
_ => None,
}
}

View file

@ -289,7 +289,7 @@ rustc_queries! {
/// Returns whether the type alias given by `DefId` is lazy.
///
/// I.e., if the type alias expands / ought to expand to a [weak] [alias type]
/// I.e., if the type alias expands / ought to expand to a [free] [alias type]
/// instead of the underyling aliased type.
///
/// Relevant for features `lazy_type_alias` and `type_alias_impl_trait`.
@ -298,7 +298,7 @@ rustc_queries! {
///
/// This query *may* panic if the given definition is not a type alias.
///
/// [weak]: rustc_middle::ty::Weak
/// [free]: rustc_middle::ty::Free
/// [alias type]: rustc_middle::ty::AliasTy
query type_alias_is_lazy(key: DefId) -> bool {
desc { |tcx|
@ -387,6 +387,15 @@ rustc_queries! {
}
}
query stalled_generators_within(
key: LocalDefId
) -> &'tcx ty::List<LocalDefId> {
desc {
|tcx| "computing the coroutines defined within `{}`",
tcx.def_path_str(key.to_def_id())
}
}
/// Returns the explicitly user-written *bounds* on the associated or opaque type given by `DefId`
/// that must be proven true at definition site (and which can be assumed at usage sites).
///
@ -2271,7 +2280,7 @@ rustc_queries! {
/// Do not call this query directly: Invoke `normalize` instead.
///
/// </div>
query normalize_canonicalized_weak_ty(
query normalize_canonicalized_free_alias(
goal: CanonicalAliasGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,

View file

@ -366,7 +366,7 @@ macro_rules! define_callbacks {
pub type Storage<'tcx> = <$($K)* as keys::Key>::Cache<Erase<$V>>;
// Ensure that keys grow no larger than 80 bytes by accident.
// Ensure that keys grow no larger than 88 bytes by accident.
// Increase this limit if necessary, but do try to keep the size low if possible
#[cfg(target_pointer_width = "64")]
const _: () = {

View file

@ -404,7 +404,7 @@ pub enum ObligationCauseCode<'tcx> {
/// Requirement for a `const N: Ty` to implement `Ty: ConstParamTy`
ConstParam(Ty<'tcx>),
/// Obligations emitted during the normalization of a weak type alias.
/// Obligations emitted during the normalization of a free type alias.
TypeAlias(ObligationCauseCodeHandle<'tcx>, Span, DefId),
}

View file

@ -181,7 +181,7 @@ pub struct MethodAutoderefBadTy<'tcx> {
pub ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
}
/// Result of the `normalize_canonicalized_{{,inherent_}projection,weak}_ty` queries.
/// Result of the `normalize_canonicalized_{{,inherent_}projection,free}_ty` queries.
#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable)]
pub struct NormalizationResult<'tcx> {
/// Result of the normalization.

View file

@ -106,7 +106,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
) -> Self::PredefinedOpaques {
self.mk_predefined_opaques_in_body(data)
}
type DefiningOpaqueTypes = &'tcx ty::List<LocalDefId>;
type LocalDefIds = &'tcx ty::List<LocalDefId>;
type CanonicalVars = CanonicalVarInfos<'tcx>;
fn mk_canonical_var_infos(self, infos: &[ty::CanonicalVarInfo<Self>]) -> Self::CanonicalVars {
self.mk_canonical_var_infos(infos)
@ -226,7 +226,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
}
}
DefKind::OpaqueTy => ty::Opaque,
DefKind::TyAlias => ty::Weak,
DefKind::TyAlias => ty::Free,
kind => bug!("unexpected DefKind in AliasTy: {kind:?}"),
}
}
@ -242,7 +242,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
}
}
DefKind::OpaqueTy => ty::AliasTermKind::OpaqueTy,
DefKind::TyAlias => ty::AliasTermKind::WeakTy,
DefKind::TyAlias => ty::AliasTermKind::FreeTy,
DefKind::AssocConst => ty::AliasTermKind::ProjectionConst,
DefKind::AnonConst | DefKind::Const | DefKind::Ctor(_, CtorKind::Const) => {
ty::AliasTermKind::UnevaluatedConst
@ -674,9 +674,24 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
self.anonymize_bound_vars(binder)
}
fn opaque_types_defined_by(self, defining_anchor: LocalDefId) -> Self::DefiningOpaqueTypes {
fn opaque_types_defined_by(self, defining_anchor: LocalDefId) -> Self::LocalDefIds {
self.opaque_types_defined_by(defining_anchor)
}
fn opaque_types_and_generators_defined_by(
self,
defining_anchor: Self::LocalDefId,
) -> Self::LocalDefIds {
if self.next_trait_solver_globally() {
self.mk_local_def_ids_from_iter(
self.opaque_types_defined_by(defining_anchor)
.iter()
.chain(self.stalled_generators_within(defining_anchor)),
)
} else {
self.opaque_types_defined_by(defining_anchor)
}
}
}
macro_rules! bidirectional_lang_item_map {
@ -2906,11 +2921,11 @@ impl<'tcx> TyCtxt<'tcx> {
self.interners.intern_clauses(clauses)
}
pub fn mk_local_def_ids(self, clauses: &[LocalDefId]) -> &'tcx List<LocalDefId> {
pub fn mk_local_def_ids(self, def_ids: &[LocalDefId]) -> &'tcx List<LocalDefId> {
// FIXME consider asking the input slice to be sorted to avoid
// re-interning permutations, in which case that would be asserted
// here.
self.intern_local_def_ids(clauses)
self.intern_local_def_ids(def_ids)
}
pub fn mk_local_def_ids_from_iter<I, T>(self, iter: I) -> T::Output

View file

@ -205,7 +205,7 @@ impl<'tcx> Ty<'tcx> {
ty::Placeholder(..) => "higher-ranked type".into(),
ty::Bound(..) => "bound type variable".into(),
ty::Alias(ty::Projection | ty::Inherent, _) => "associated type".into(),
ty::Alias(ty::Weak, _) => "type alias".into(),
ty::Alias(ty::Free, _) => "type alias".into(),
ty::Param(_) => "type parameter".into(),
ty::Alias(ty::Opaque, ..) => "opaque type".into(),
}

View file

@ -127,7 +127,7 @@ impl<'tcx> Ty<'tcx> {
InhabitedPredicate::True
}
Never => InhabitedPredicate::False,
Param(_) | Alias(ty::Projection | ty::Weak, _) => InhabitedPredicate::GenericType(self),
Param(_) | Alias(ty::Projection | ty::Free, _) => InhabitedPredicate::GenericType(self),
Alias(ty::Opaque, alias_ty) => {
match alias_ty.def_id.as_local() {
// Foreign opaque is considered inhabited.

View file

@ -820,7 +820,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
ty::Foreign(def_id) => {
p!(print_def_path(def_id, &[]));
}
ty::Alias(ty::Projection | ty::Inherent | ty::Weak, ref data) => {
ty::Alias(ty::Projection | ty::Inherent | ty::Free, ref data) => {
p!(print(data))
}
ty::Placeholder(placeholder) => match placeholder.bound.kind {
@ -3205,7 +3205,7 @@ define_print! {
p!(print_def_path(self.def_id, self.args));
}
}
| ty::AliasTermKind::WeakTy
| ty::AliasTermKind::FreeTy
| ty::AliasTermKind::OpaqueTy
| ty::AliasTermKind::UnevaluatedConst
| ty::AliasTermKind::ProjectionConst => {

View file

@ -489,7 +489,7 @@ impl<'tcx> Ty<'tcx> {
(kind, tcx.def_kind(alias_ty.def_id)),
(ty::Opaque, DefKind::OpaqueTy)
| (ty::Projection | ty::Inherent, DefKind::AssocTy)
| (ty::Weak, DefKind::TyAlias)
| (ty::Free, DefKind::TyAlias)
);
Ty::new(tcx, Alias(kind, alias_ty))
}
@ -1774,9 +1774,7 @@ impl<'tcx> Ty<'tcx> {
match pointee_ty.ptr_metadata_ty_or_tail(tcx, |x| x) {
Ok(metadata_ty) => metadata_ty,
Err(tail_ty) => {
let Some(metadata_def_id) = tcx.lang_items().metadata_type() else {
bug!("No metadata_type lang item while looking at {self:?}")
};
let metadata_def_id = tcx.require_lang_item(LangItem::Metadata, None);
Ty::new_projection(tcx, metadata_def_id, [tail_ty])
}
}

View file

@ -911,7 +911,7 @@ impl<'tcx> TyCtxt<'tcx> {
|| self.extern_crate(key).is_some_and(|e| e.is_direct())
}
/// Expand any [weak alias types][weak] contained within the given `value`.
/// Expand any [free alias types][free] contained within the given `value`.
///
/// This should be used over other normalization routines in situations where
/// it's important not to normalize other alias types and where the predicates
@ -926,19 +926,19 @@ impl<'tcx> TyCtxt<'tcx> {
/// <div class="warning">
/// This delays a bug on overflow! Therefore you need to be certain that the
/// contained types get fully normalized at a later stage. Note that even on
/// overflow all well-behaved weak alias types get expanded correctly, so the
/// overflow all well-behaved free alias types get expanded correctly, so the
/// result is still useful.
/// </div>
///
/// [weak]: ty::Weak
pub fn expand_weak_alias_tys<T: TypeFoldable<TyCtxt<'tcx>>>(self, value: T) -> T {
value.fold_with(&mut WeakAliasTypeExpander { tcx: self, depth: 0 })
/// [free]: ty::Free
pub fn expand_free_alias_tys<T: TypeFoldable<TyCtxt<'tcx>>>(self, value: T) -> T {
value.fold_with(&mut FreeAliasTypeExpander { tcx: self, depth: 0 })
}
/// Peel off all [weak alias types] in this type until there are none left.
/// Peel off all [free alias types] in this type until there are none left.
///
/// This only expands weak alias types in “head” / outermost positions. It can
/// be used over [expand_weak_alias_tys] as an optimization in situations where
/// This only expands free alias types in “head” / outermost positions. It can
/// be used over [expand_free_alias_tys] as an optimization in situations where
/// one only really cares about the *kind* of the final aliased type but not
/// the types the other constituent types alias.
///
@ -947,17 +947,17 @@ impl<'tcx> TyCtxt<'tcx> {
/// type gets fully normalized at a later stage.
/// </div>
///
/// [weak]: ty::Weak
/// [expand_weak_alias_tys]: Self::expand_weak_alias_tys
pub fn peel_off_weak_alias_tys(self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
let ty::Alias(ty::Weak, _) = ty.kind() else { return ty };
/// [free]: ty::Free
/// [expand_free_alias_tys]: Self::expand_free_alias_tys
pub fn peel_off_free_alias_tys(self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
let ty::Alias(ty::Free, _) = ty.kind() else { return ty };
let limit = self.recursion_limit();
let mut depth = 0;
while let ty::Alias(ty::Weak, alias) = ty.kind() {
while let ty::Alias(ty::Free, alias) = ty.kind() {
if !limit.value_within_limit(depth) {
let guar = self.dcx().delayed_bug("overflow expanding weak alias type");
let guar = self.dcx().delayed_bug("overflow expanding free alias type");
return Ty::new_error(self, guar);
}
@ -985,7 +985,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
ty::AliasTermKind::OpaqueTy => Some(self.variances_of(def_id)),
ty::AliasTermKind::InherentTy
| ty::AliasTermKind::WeakTy
| ty::AliasTermKind::FreeTy
| ty::AliasTermKind::UnevaluatedConst
| ty::AliasTermKind::ProjectionConst => None,
}
@ -1078,25 +1078,25 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for OpaqueTypeExpander<'tcx> {
}
}
struct WeakAliasTypeExpander<'tcx> {
struct FreeAliasTypeExpander<'tcx> {
tcx: TyCtxt<'tcx>,
depth: usize,
}
impl<'tcx> TypeFolder<TyCtxt<'tcx>> for WeakAliasTypeExpander<'tcx> {
impl<'tcx> TypeFolder<TyCtxt<'tcx>> for FreeAliasTypeExpander<'tcx> {
fn cx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
if !ty.has_type_flags(ty::TypeFlags::HAS_TY_WEAK) {
if !ty.has_type_flags(ty::TypeFlags::HAS_TY_FREE_ALIAS) {
return ty;
}
let ty::Alias(ty::Weak, alias) = ty.kind() else {
let ty::Alias(ty::Free, alias) = ty.kind() else {
return ty.super_fold_with(self);
};
if !self.tcx.recursion_limit().value_within_limit(self.depth) {
let guar = self.tcx.dcx().delayed_bug("overflow expanding weak alias type");
let guar = self.tcx.dcx().delayed_bug("overflow expanding free alias type");
return Ty::new_error(self.tcx, guar);
}
@ -1107,7 +1107,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for WeakAliasTypeExpander<'tcx> {
}
fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
if !ct.has_type_flags(ty::TypeFlags::HAS_TY_WEAK) {
if !ct.has_type_flags(ty::TypeFlags::HAS_TY_FREE_ALIAS) {
return ct;
}
ct.super_fold_with(self)

View file

@ -139,7 +139,7 @@ impl<'tcx> TyCtxt<'tcx> {
{
let mut collector = LateBoundRegionsCollector::new(just_constrained);
let value = value.skip_binder();
let value = if just_constrained { self.expand_weak_alias_tys(value) } else { value };
let value = if just_constrained { self.expand_free_alias_tys(value) } else { value };
value.visit_with(&mut collector);
collector.regions
}
@ -182,8 +182,8 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for LateBoundRegionsCollector {
ty::Alias(ty::Projection | ty::Inherent | ty::Opaque, _) => {
return;
}
// All weak alias types should've been expanded beforehand.
ty::Alias(ty::Weak, _) => bug!("unexpected weak alias type"),
// All free alias types should've been expanded beforehand.
ty::Alias(ty::Free, _) => bug!("unexpected free alias type"),
_ => {}
}
}

View file

@ -114,26 +114,11 @@ where
self.reachable_blocks.insert_all()
}
/// Returns the underlying `Results`.
pub fn results(&self) -> &Results<'tcx, A> {
&self.results
}
/// Returns the underlying `Results`.
pub fn mut_results(&mut self) -> &mut Results<'tcx, A> {
&mut self.results
}
/// Returns the `Analysis` used to generate the underlying `Results`.
pub fn analysis(&self) -> &A {
&self.results.analysis
}
/// Returns the `Analysis` used to generate the underlying `Results`.
pub fn mut_analysis(&mut self) -> &mut A {
&mut self.results.analysis
}
/// Resets the cursor to hold the entry set for the given basic block.
///
/// For forward dataflow analyses, this is the dataflow state prior to the first statement.

View file

@ -43,7 +43,7 @@ pub trait Direction {
block: BasicBlock,
block_data: &'mir mir::BasicBlockData<'tcx>,
results: &mut Results<'tcx, A>,
vis: &mut impl ResultsVisitor<'mir, 'tcx, A>,
vis: &mut impl ResultsVisitor<'tcx, A>,
) where
A: Analysis<'tcx>;
}
@ -212,7 +212,7 @@ impl Direction for Backward {
block: BasicBlock,
block_data: &'mir mir::BasicBlockData<'tcx>,
results: &mut Results<'tcx, A>,
vis: &mut impl ResultsVisitor<'mir, 'tcx, A>,
vis: &mut impl ResultsVisitor<'tcx, A>,
) where
A: Analysis<'tcx>,
{
@ -394,7 +394,7 @@ impl Direction for Forward {
block: BasicBlock,
block_data: &'mir mir::BasicBlockData<'tcx>,
results: &mut Results<'tcx, A>,
vis: &mut impl ResultsVisitor<'mir, 'tcx, A>,
vis: &mut impl ResultsVisitor<'tcx, A>,
) where
A: Analysis<'tcx>,
{

View file

@ -201,11 +201,12 @@ struct Formatter<'mir, 'tcx, A>
where
A: Analysis<'tcx>,
{
body: &'mir Body<'tcx>,
// The `RefCell` is used because `<Formatter as Labeller>::node_label`
// takes `&self`, but it needs to modify the cursor. This is also the
// takes `&self`, but it needs to modify the results. This is also the
// reason for the `Formatter`/`BlockFormatter` split; `BlockFormatter` has
// the operations that involve the mutation, i.e. within the `borrow_mut`.
cursor: RefCell<ResultsCursor<'mir, 'tcx, A>>,
results: RefCell<&'mir mut Results<'tcx, A>>,
style: OutputStyle,
reachable: DenseBitSet<BasicBlock>,
}
@ -220,11 +221,7 @@ where
style: OutputStyle,
) -> Self {
let reachable = traversal::reachable_as_bitset(body);
Formatter { cursor: results.as_results_cursor(body).into(), style, reachable }
}
fn body(&self) -> &'mir Body<'tcx> {
self.cursor.borrow().body()
Formatter { body, results: results.into(), style, reachable }
}
}
@ -253,7 +250,7 @@ where
type Edge = CfgEdge;
fn graph_id(&self) -> dot::Id<'_> {
let name = graphviz_safe_def_name(self.body().source.def_id());
let name = graphviz_safe_def_name(self.body.source.def_id());
dot::Id::new(format!("graph_for_def_id_{name}")).unwrap()
}
@ -262,10 +259,16 @@ where
}
fn node_label(&self, block: &Self::Node) -> dot::LabelText<'_> {
let mut cursor = self.cursor.borrow_mut();
let mut fmt =
BlockFormatter { cursor: &mut cursor, style: self.style, bg: Background::Light };
let label = fmt.write_node_label(*block).unwrap();
let mut results = self.results.borrow_mut();
let diffs = StateDiffCollector::run(self.body, *block, *results, self.style);
let mut fmt = BlockFormatter {
cursor: results.as_results_cursor(self.body),
style: self.style,
bg: Background::Light,
};
let label = fmt.write_node_label(*block, diffs).unwrap();
dot::LabelText::html(String::from_utf8(label).unwrap())
}
@ -275,7 +278,7 @@ where
}
fn edge_label(&self, e: &Self::Edge) -> dot::LabelText<'_> {
let label = &self.body()[e.source].terminator().kind.fmt_successor_labels()[e.index];
let label = &self.body[e.source].terminator().kind.fmt_successor_labels()[e.index];
dot::LabelText::label(label.clone())
}
}
@ -288,7 +291,7 @@ where
type Edge = CfgEdge;
fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
self.body()
self.body
.basic_blocks
.indices()
.filter(|&idx| self.reachable.contains(idx))
@ -297,10 +300,10 @@ where
}
fn edges(&self) -> dot::Edges<'_, Self::Edge> {
let body = self.body();
body.basic_blocks
self.body
.basic_blocks
.indices()
.flat_map(|bb| dataflow_successors(body, bb))
.flat_map(|bb| dataflow_successors(self.body, bb))
.collect::<Vec<_>>()
.into()
}
@ -310,20 +313,20 @@ where
}
fn target(&self, edge: &Self::Edge) -> Self::Node {
self.body()[edge.source].terminator().successors().nth(edge.index).unwrap()
self.body[edge.source].terminator().successors().nth(edge.index).unwrap()
}
}
struct BlockFormatter<'a, 'mir, 'tcx, A>
struct BlockFormatter<'mir, 'tcx, A>
where
A: Analysis<'tcx>,
{
cursor: &'a mut ResultsCursor<'mir, 'tcx, A>,
cursor: ResultsCursor<'mir, 'tcx, A>,
bg: Background,
style: OutputStyle,
}
impl<'tcx, A> BlockFormatter<'_, '_, 'tcx, A>
impl<'tcx, A> BlockFormatter<'_, 'tcx, A>
where
A: Analysis<'tcx>,
A::Domain: DebugWithContext<A>,
@ -336,7 +339,11 @@ where
bg
}
fn write_node_label(&mut self, block: BasicBlock) -> io::Result<Vec<u8>> {
fn write_node_label(
&mut self,
block: BasicBlock,
diffs: StateDiffCollector<A::Domain>,
) -> io::Result<Vec<u8>> {
use std::io::Write;
// Sample output:
@ -392,7 +399,7 @@ where
self.write_row_with_full_state(w, "", "(on start)")?;
// D + E: Statement and terminator transfer functions
self.write_statements_and_terminator(w, block)?;
self.write_statements_and_terminator(w, block, diffs)?;
// F: State at end of block
@ -575,14 +582,8 @@ where
&mut self,
w: &mut impl io::Write,
block: BasicBlock,
diffs: StateDiffCollector<A::Domain>,
) -> io::Result<()> {
let diffs = StateDiffCollector::run(
self.cursor.body(),
block,
self.cursor.mut_results(),
self.style,
);
let mut diffs_before = diffs.before.map(|v| v.into_iter());
let mut diffs_after = diffs.after.into_iter();
@ -709,7 +710,7 @@ impl<D> StateDiffCollector<D> {
}
}
impl<'tcx, A> ResultsVisitor<'_, 'tcx, A> for StateDiffCollector<A::Domain>
impl<'tcx, A> ResultsVisitor<'tcx, A> for StateDiffCollector<A::Domain>
where
A: Analysis<'tcx>,
A::Domain: DebugWithContext<A>,

View file

@ -47,7 +47,7 @@ where
&mut self,
body: &'mir Body<'tcx>,
blocks: impl IntoIterator<Item = BasicBlock>,
vis: &mut impl ResultsVisitor<'mir, 'tcx, A>,
vis: &mut impl ResultsVisitor<'tcx, A>,
) {
visit_results(body, blocks, self, vis)
}
@ -55,7 +55,7 @@ where
pub fn visit_reachable_with<'mir>(
&mut self,
body: &'mir Body<'tcx>,
vis: &mut impl ResultsVisitor<'mir, 'tcx, A>,
vis: &mut impl ResultsVisitor<'tcx, A>,
) {
let blocks = traversal::reachable(body);
visit_results(body, blocks.map(|(bb, _)| bb), self, vis)

View file

@ -8,7 +8,7 @@ pub fn visit_results<'mir, 'tcx, A>(
body: &'mir mir::Body<'tcx>,
blocks: impl IntoIterator<Item = BasicBlock>,
results: &mut Results<'tcx, A>,
vis: &mut impl ResultsVisitor<'mir, 'tcx, A>,
vis: &mut impl ResultsVisitor<'tcx, A>,
) where
A: Analysis<'tcx>,
{
@ -29,7 +29,7 @@ pub fn visit_results<'mir, 'tcx, A>(
/// A visitor over the results of an `Analysis`. Use this when you want to inspect domain values in
/// many or all locations; use `ResultsCursor` if you want to inspect domain values only in certain
/// locations.
pub trait ResultsVisitor<'mir, 'tcx, A>
pub trait ResultsVisitor<'tcx, A>
where
A: Analysis<'tcx>,
{
@ -40,7 +40,7 @@ where
&mut self,
_results: &mut Results<'tcx, A>,
_state: &A::Domain,
_statement: &'mir mir::Statement<'tcx>,
_statement: &mir::Statement<'tcx>,
_location: Location,
) {
}
@ -50,7 +50,7 @@ where
&mut self,
_results: &mut Results<'tcx, A>,
_state: &A::Domain,
_statement: &'mir mir::Statement<'tcx>,
_statement: &mir::Statement<'tcx>,
_location: Location,
) {
}
@ -60,7 +60,7 @@ where
&mut self,
_results: &mut Results<'tcx, A>,
_state: &A::Domain,
_terminator: &'mir mir::Terminator<'tcx>,
_terminator: &mir::Terminator<'tcx>,
_location: Location,
) {
}
@ -72,7 +72,7 @@ where
&mut self,
_results: &mut Results<'tcx, A>,
_state: &A::Domain,
_terminator: &'mir mir::Terminator<'tcx>,
_terminator: &mir::Terminator<'tcx>,
_location: Location,
) {
}

View file

@ -120,12 +120,12 @@ struct Visitor<'a, N: Idx> {
values: SparseIntervalMatrix<N, PointIndex>,
}
impl<'mir, 'tcx, A, N> ResultsVisitor<'mir, 'tcx, A> for Visitor<'_, N>
impl<'tcx, A, N> ResultsVisitor<'tcx, A> for Visitor<'_, N>
where
A: Analysis<'tcx, Domain = DenseBitSet<N>>,
N: Idx,
{
fn visit_after_primary_statement_effect(
fn visit_after_primary_statement_effect<'mir>(
&mut self,
_results: &mut Results<'tcx, A>,
state: &A::Domain,
@ -139,7 +139,7 @@ where
});
}
fn visit_after_primary_terminator_effect(
fn visit_after_primary_terminator_effect<'mir>(
&mut self,
_results: &mut Results<'tcx, A>,
state: &A::Domain,

View file

@ -3,6 +3,7 @@ use std::ops::ControlFlow;
use rustc_data_structures::graph::iterate::{
NodeStatus, TriColorDepthFirstSearch, TriColorVisitor,
};
use rustc_hir::LangItem;
use rustc_hir::def::DefKind;
use rustc_middle::mir::{self, BasicBlock, BasicBlocks, Body, Terminator, TerminatorKind};
use rustc_middle::ty::{self, GenericArg, GenericArgs, Instance, Ty, TyCtxt};
@ -44,8 +45,7 @@ impl<'tcx> MirLint<'tcx> for CheckDropRecursion {
if let DefKind::AssocFn = tcx.def_kind(def_id)
&& let Some(trait_ref) =
tcx.impl_of_method(def_id.to_def_id()).and_then(|def_id| tcx.impl_trait_ref(def_id))
&& let Some(drop_trait) = tcx.lang_items().drop_trait()
&& drop_trait == trait_ref.instantiate_identity().def_id
&& tcx.is_lang_item(trait_ref.instantiate_identity().def_id, LangItem::Drop)
// avoid erroneous `Drop` impls from causing ICEs below
&& let sig = tcx.fn_sig(def_id).instantiate_identity()
&& sig.inputs().skip_binder().len() == 1

View file

@ -875,14 +875,14 @@ struct StorageConflictVisitor<'a, 'tcx> {
eligible_storage_live: DenseBitSet<Local>,
}
impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, MaybeRequiresStorage<'a, 'tcx>>
impl<'a, 'tcx> ResultsVisitor<'tcx, MaybeRequiresStorage<'a, 'tcx>>
for StorageConflictVisitor<'a, 'tcx>
{
fn visit_after_early_statement_effect(
&mut self,
_results: &mut Results<'tcx, MaybeRequiresStorage<'a, 'tcx>>,
state: &DenseBitSet<Local>,
_statement: &'a Statement<'tcx>,
_statement: &Statement<'tcx>,
loc: Location,
) {
self.apply_state(state, loc);
@ -892,7 +892,7 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, MaybeRequiresStorage<'a, 'tcx>>
&mut self,
_results: &mut Results<'tcx, MaybeRequiresStorage<'a, 'tcx>>,
state: &DenseBitSet<Local>,
_terminator: &'a Terminator<'tcx>,
_terminator: &Terminator<'tcx>,
loc: Location,
) {
self.apply_state(state, loc);

View file

@ -958,13 +958,13 @@ fn try_write_constant<'tcx>(
interp_ok(())
}
impl<'mir, 'tcx> ResultsVisitor<'mir, 'tcx, ConstAnalysis<'_, 'tcx>> for Collector<'_, 'tcx> {
impl<'tcx> ResultsVisitor<'tcx, ConstAnalysis<'_, 'tcx>> for Collector<'_, 'tcx> {
#[instrument(level = "trace", skip(self, results, statement))]
fn visit_after_early_statement_effect(
&mut self,
results: &mut Results<'tcx, ConstAnalysis<'_, 'tcx>>,
state: &State<FlatSet<Scalar>>,
statement: &'mir Statement<'tcx>,
statement: &Statement<'tcx>,
location: Location,
) {
match &statement.kind {
@ -986,7 +986,7 @@ impl<'mir, 'tcx> ResultsVisitor<'mir, 'tcx, ConstAnalysis<'_, 'tcx>> for Collect
&mut self,
results: &mut Results<'tcx, ConstAnalysis<'_, 'tcx>>,
state: &State<FlatSet<Scalar>>,
statement: &'mir Statement<'tcx>,
statement: &Statement<'tcx>,
location: Location,
) {
match statement.kind {
@ -1011,7 +1011,7 @@ impl<'mir, 'tcx> ResultsVisitor<'mir, 'tcx, ConstAnalysis<'_, 'tcx>> for Collect
&mut self,
results: &mut Results<'tcx, ConstAnalysis<'_, 'tcx>>,
state: &State<FlatSet<Scalar>>,
terminator: &'mir Terminator<'tcx>,
terminator: &Terminator<'tcx>,
location: Location,
) {
OperandCollector {

View file

@ -70,10 +70,11 @@ pub(crate) struct UnknownCguCollectionMode<'a> {
pub mode: &'a str,
}
#[derive(LintDiagnostic)]
#[derive(Diagnostic)]
#[diag(monomorphize_abi_error_disabled_vector_type)]
#[help]
pub(crate) struct AbiErrorDisabledVectorType<'a> {
#[primary_span]
#[label]
pub span: Span,
pub required_feature: &'a str,
@ -82,9 +83,10 @@ pub(crate) struct AbiErrorDisabledVectorType<'a> {
pub is_call: bool,
}
#[derive(LintDiagnostic)]
#[derive(Diagnostic)]
#[diag(monomorphize_abi_error_unsupported_vector_type)]
pub(crate) struct AbiErrorUnsupportedVectorType<'a> {
#[primary_span]
#[label]
pub span: Span,
pub ty: Ty<'a>,

View file

@ -5,7 +5,7 @@ use rustc_hir::{CRATE_HIR_ID, HirId};
use rustc_middle::mir::{self, Location, traversal};
use rustc_middle::ty::layout::LayoutCx;
use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TyCtxt, TypingEnv};
use rustc_session::lint::builtin::{ABI_UNSUPPORTED_VECTOR_TYPES, WASM_C_ABI};
use rustc_session::lint::builtin::WASM_C_ABI;
use rustc_span::def_id::DefId;
use rustc_span::{DUMMY_SP, Span, Symbol, sym};
use rustc_target::callconv::{ArgAbi, Conv, FnAbi, PassMode};
@ -50,34 +50,24 @@ fn do_check_simd_vector_abi<'tcx>(
let feature = match feature_def.iter().find(|(bits, _)| size.bits() <= *bits) {
Some((_, feature)) => feature,
None => {
let (span, hir_id) = loc();
tcx.emit_node_span_lint(
ABI_UNSUPPORTED_VECTOR_TYPES,
hir_id,
let (span, _hir_id) = loc();
tcx.dcx().emit_err(errors::AbiErrorUnsupportedVectorType {
span,
errors::AbiErrorUnsupportedVectorType {
span,
ty: arg_abi.layout.ty,
is_call,
},
);
ty: arg_abi.layout.ty,
is_call,
});
continue;
}
};
if !have_feature(Symbol::intern(feature)) {
// Emit error.
let (span, hir_id) = loc();
tcx.emit_node_span_lint(
ABI_UNSUPPORTED_VECTOR_TYPES,
hir_id,
let (span, _hir_id) = loc();
tcx.dcx().emit_err(errors::AbiErrorDisabledVectorType {
span,
errors::AbiErrorDisabledVectorType {
span,
required_feature: feature,
ty: arg_abi.layout.ty,
is_call,
},
);
required_feature: feature,
ty: arg_abi.layout.ty,
is_call,
});
}
}
}

View file

@ -223,7 +223,7 @@ where
match mono_item.instantiation_mode(cx.tcx) {
InstantiationMode::GloballyShared { .. } => {}
InstantiationMode::LocalCopy => {
if Some(mono_item.def_id()) != cx.tcx.lang_items().start_fn() {
if !cx.tcx.is_lang_item(mono_item.def_id(), LangItem::Start) {
continue;
}
}

View file

@ -595,7 +595,7 @@ where
}
ty::Alias(kind @ (ty::Projection | ty::Opaque), alias_ty) => (kind, alias_ty),
ty::Alias(ty::Inherent | ty::Weak, _) => {
ty::Alias(ty::Inherent | ty::Free, _) => {
self.cx().delay_bug(format!("could not normalize {self_ty:?}, it is not WF"));
return;
}

View file

@ -48,7 +48,7 @@ where
ty::Dynamic(..)
| ty::Param(..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Weak, ..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(_) => {

View file

@ -329,7 +329,7 @@ where
TypingMode::Coherence | TypingMode::PostAnalysis => false,
// During analysis, opaques are rigid unless they may be defined by
// the current body.
TypingMode::Analysis { defining_opaque_types: non_rigid_opaques }
TypingMode::Analysis { defining_opaque_types_and_generators: non_rigid_opaques }
| TypingMode::Borrowck { defining_opaque_types: non_rigid_opaques }
| TypingMode::PostBorrowckAnalysis { defined_opaque_types: non_rigid_opaques } => {
!def_id.as_local().is_some_and(|def_id| non_rigid_opaques.contains(&def_id))

View file

@ -1,7 +1,7 @@
//! Computes a normalizes-to (projection) goal for inherent associated types,
//! `#![feature(lazy_type_alias)]` and `#![feature(type_alias_impl_trait)]`.
//!
//! Since a weak alias is never ambiguous, this just computes the `type_of` of
//! Since a free alias is never ambiguous, this just computes the `type_of` of
//! the alias and registers the where-clauses of the type alias.
use rustc_type_ir::{self as ty, Interner};
@ -14,22 +14,22 @@ where
D: SolverDelegate<Interner = I>,
I: Interner,
{
pub(super) fn normalize_weak_type(
pub(super) fn normalize_free_alias(
&mut self,
goal: Goal<I, ty::NormalizesTo<I>>,
) -> QueryResult<I> {
let cx = self.cx();
let weak_ty = goal.predicate.alias;
let free_ty = goal.predicate.alias;
// Check where clauses
self.add_goals(
GoalSource::Misc,
cx.predicates_of(weak_ty.def_id)
.iter_instantiated(cx, weak_ty.args)
cx.predicates_of(free_ty.def_id)
.iter_instantiated(cx, free_ty.args)
.map(|pred| goal.with(cx, pred)),
);
let actual = cx.type_of(weak_ty.def_id).instantiate(cx, weak_ty.args);
let actual = cx.type_of(free_ty.def_id).instantiate(cx, free_ty.args);
self.instantiate_normalizes_to_term(goal, actual.into());
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)

View file

@ -1,7 +1,7 @@
mod anon_const;
mod free_alias;
mod inherent;
mod opaque_types;
mod weak_types;
use rustc_type_ir::fast_reject::DeepRejectCtxt;
use rustc_type_ir::inherent::*;
@ -50,7 +50,7 @@ where
}
ty::AliasTermKind::InherentTy => self.normalize_inherent_associated_type(goal),
ty::AliasTermKind::OpaqueTy => self.normalize_opaque_type(goal),
ty::AliasTermKind::WeakTy => self.normalize_weak_type(goal),
ty::AliasTermKind::FreeTy => self.normalize_free_alias(goal),
ty::AliasTermKind::UnevaluatedConst => self.normalize_anon_const(goal),
}
}

View file

@ -33,11 +33,11 @@ where
);
self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
}
TypingMode::Analysis { defining_opaque_types } => {
TypingMode::Analysis { defining_opaque_types_and_generators } => {
let Some(def_id) = opaque_ty
.def_id
.as_local()
.filter(|&def_id| defining_opaque_types.contains(&def_id))
.filter(|&def_id| defining_opaque_types_and_generators.contains(&def_id))
else {
self.structurally_instantiate_normalizes_to_term(goal, goal.predicate.alias);
return self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);

View file

@ -208,6 +208,11 @@ where
}
}
// We need to make sure to stall any coroutines we are inferring to avoid query cycles.
if let Some(cand) = ecx.try_stall_coroutine_witness(goal.predicate.self_ty()) {
return cand;
}
ecx.probe_and_evaluate_goal_for_constituent_tys(
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
@ -259,6 +264,11 @@ where
return Err(NoSolution);
}
// We need to make sure to stall any coroutines we are inferring to avoid query cycles.
if let Some(cand) = ecx.try_stall_coroutine_witness(goal.predicate.self_ty()) {
return cand;
}
ecx.probe_and_evaluate_goal_for_constituent_tys(
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
@ -1143,7 +1153,7 @@ where
ty::Dynamic(..)
| ty::Param(..)
| ty::Foreign(..)
| ty::Alias(ty::Projection | ty::Weak | ty::Inherent, ..)
| ty::Alias(ty::Projection | ty::Free | ty::Inherent, ..)
| ty::Placeholder(..) => Some(Err(NoSolution)),
ty::Infer(_) | ty::Bound(_, _) => panic!("unexpected type `{self_ty:?}`"),
@ -1368,4 +1378,28 @@ where
let candidates = self.assemble_and_evaluate_candidates(goal, AssembleCandidatesFrom::All);
self.merge_trait_candidates(goal, candidates)
}
fn try_stall_coroutine_witness(
&mut self,
self_ty: I::Ty,
) -> Option<Result<Candidate<I>, NoSolution>> {
if let ty::CoroutineWitness(def_id, _) = self_ty.kind() {
match self.typing_mode() {
TypingMode::Analysis {
defining_opaque_types_and_generators: stalled_generators,
} => {
if def_id.as_local().is_some_and(|def_id| stalled_generators.contains(&def_id))
{
return Some(self.forced_ambiguity(MaybeCause::Ambiguity));
}
}
TypingMode::Coherence
| TypingMode::PostAnalysis
| TypingMode::Borrowck { defining_opaque_types: _ }
| TypingMode::PostBorrowckAnalysis { defined_opaque_types: _ } => {}
}
}
None
}
}

View file

@ -893,6 +893,7 @@ parse_unknown_prefix = prefix `{$prefix}` is unknown
.label = unknown prefix
.note = prefixed identifiers and literals are reserved since Rust 2021
.suggestion_br = use `br` for a raw byte string
.suggestion_cr = use `cr` for a raw C-string
.suggestion_str = if you meant to write a string literal, use double quotes
.suggestion_whitespace = consider inserting whitespace here

View file

@ -2140,6 +2140,13 @@ pub(crate) enum UnknownPrefixSugg {
style = "verbose"
)]
UseBr(#[primary_span] Span),
#[suggestion(
parse_suggestion_cr,
code = "cr",
applicability = "maybe-incorrect",
style = "verbose"
)]
UseCr(#[primary_span] Span),
#[suggestion(
parse_suggestion_whitespace,
code = " ",

View file

@ -1,14 +1,17 @@
use rustc_ast::token::Delimiter;
use rustc_errors::Diag;
use rustc_session::parse::ParseSess;
use rustc_span::Span;
use rustc_span::source_map::SourceMap;
use super::UnmatchedDelim;
use crate::errors::MismatchedClosingDelimiter;
use crate::pprust;
#[derive(Default)]
pub(super) struct TokenTreeDiagInfo {
/// Stack of open delimiters and their spans. Used for error message.
pub open_braces: Vec<(Delimiter, Span)>,
pub open_delimiters: Vec<(Delimiter, Span)>,
pub unmatched_delims: Vec<UnmatchedDelim>,
/// Used only for error recovery when arriving to EOF with mismatched braces.
@ -108,7 +111,7 @@ pub(super) fn report_suspicious_mismatch_block(
} else {
// If there is no suspicious span, give the last properly closed block may help
if let Some(parent) = diag_info.matching_block_spans.last()
&& diag_info.open_braces.last().is_none()
&& diag_info.open_delimiters.last().is_none()
&& diag_info.empty_block_spans.iter().all(|&sp| sp != parent.0.to(parent.1))
{
err.span_label(parent.0, "this opening brace...");
@ -116,3 +119,24 @@ pub(super) fn report_suspicious_mismatch_block(
}
}
}
pub(crate) fn make_unclosed_delims_error(
unmatched: UnmatchedDelim,
psess: &ParseSess,
) -> Option<Diag<'_>> {
// `None` here means an `Eof` was found. We already emit those errors elsewhere, we add them to
// `unmatched_delims` only for error recovery in the `Parser`.
let found_delim = unmatched.found_delim?;
let mut spans = vec![unmatched.found_span];
if let Some(sp) = unmatched.unclosed_span {
spans.push(sp);
};
let err = psess.dcx().create_err(MismatchedClosingDelimiter {
spans,
delimiter: pprust::token_kind_to_string(&found_delim.as_close_token_kind()).to_string(),
unmatched: unmatched.found_span,
opening_candidate: unmatched.candidate_span,
unclosed: unmatched.unclosed_span,
});
Some(err)
}

View file

@ -1,5 +1,6 @@
use std::ops::Range;
use diagnostics::make_unclosed_delims_error;
use rustc_ast::ast::{self, AttrStyle};
use rustc_ast::token::{self, CommentKind, Delimiter, IdentIsRaw, Token, TokenKind};
use rustc_ast::tokenstream::TokenStream;
@ -17,9 +18,9 @@ use rustc_session::parse::ParseSess;
use rustc_span::{BytePos, Pos, Span, Symbol};
use tracing::debug;
use crate::errors;
use crate::lexer::diagnostics::TokenTreeDiagInfo;
use crate::lexer::unicode_chars::UNICODE_ARRAY;
use crate::{errors, make_unclosed_delims_error};
mod diagnostics;
mod tokentrees;
@ -256,7 +257,6 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
let lit_start = start + BytePos(prefix_len);
self.pos = lit_start;
self.cursor = Cursor::new(&str_before[prefix_len as usize..]);
self.report_unknown_prefix(start);
let prefix_span = self.mk_sp(start, lit_start);
return (Token::new(self.ident(start), prefix_span), preceded_by_whitespace);
@ -789,13 +789,14 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
fn report_unknown_prefix(&self, start: BytePos) {
let prefix_span = self.mk_sp(start, self.pos);
let prefix = self.str_from_to(start, self.pos);
let expn_data = prefix_span.ctxt().outer_expn_data();
if expn_data.edition.at_least_rust_2021() {
// In Rust 2021, this is a hard error.
let sugg = if prefix == "rb" {
Some(errors::UnknownPrefixSugg::UseBr(prefix_span))
} else if prefix == "rc" {
Some(errors::UnknownPrefixSugg::UseCr(prefix_span))
} else if expn_data.is_root() {
if self.cursor.first() == '\''
&& let Some(start) = self.last_lifetime

View file

@ -54,8 +54,8 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
let mut err = self.dcx().struct_span_err(self.token.span, msg);
let unclosed_delimiter_show_limit = 5;
let len = usize::min(unclosed_delimiter_show_limit, self.diag_info.open_braces.len());
for &(_, span) in &self.diag_info.open_braces[..len] {
let len = usize::min(unclosed_delimiter_show_limit, self.diag_info.open_delimiters.len());
for &(_, span) in &self.diag_info.open_delimiters[..len] {
err.span_label(span, "unclosed delimiter");
self.diag_info.unmatched_delims.push(UnmatchedDelim {
found_delim: None,
@ -65,19 +65,19 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
});
}
if let Some((_, span)) = self.diag_info.open_braces.get(unclosed_delimiter_show_limit)
&& self.diag_info.open_braces.len() >= unclosed_delimiter_show_limit + 2
if let Some((_, span)) = self.diag_info.open_delimiters.get(unclosed_delimiter_show_limit)
&& self.diag_info.open_delimiters.len() >= unclosed_delimiter_show_limit + 2
{
err.span_label(
*span,
format!(
"another {} unclosed delimiters begin from here",
self.diag_info.open_braces.len() - unclosed_delimiter_show_limit
self.diag_info.open_delimiters.len() - unclosed_delimiter_show_limit
),
);
}
if let Some((delim, _)) = self.diag_info.open_braces.last() {
if let Some((delim, _)) = self.diag_info.open_delimiters.last() {
report_suspicious_mismatch_block(
&mut err,
&self.diag_info,
@ -95,7 +95,7 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
// The span for beginning of the delimited section.
let pre_span = self.token.span;
self.diag_info.open_braces.push((open_delim, self.token.span));
self.diag_info.open_delimiters.push((open_delim, self.token.span));
// Lex the token trees within the delimiters.
// We stop at any delimiter so we can try to recover if the user
@ -109,11 +109,12 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
let close_spacing = if let Some(close_delim) = self.token.kind.close_delim() {
if close_delim == open_delim {
// Correct delimiter.
let (open_brace, open_brace_span) = self.diag_info.open_braces.pop().unwrap();
let close_brace_span = self.token.span;
let (open_delimiter, open_delimiter_span) =
self.diag_info.open_delimiters.pop().unwrap();
let close_delimiter_span = self.token.span;
if tts.is_empty() && close_delim == Delimiter::Brace {
let empty_block_span = open_brace_span.to(close_brace_span);
let empty_block_span = open_delimiter_span.to(close_delimiter_span);
if !sm.is_multiline(empty_block_span) {
// Only track if the block is in the form of `{}`, otherwise it is
// likely that it was written on purpose.
@ -122,9 +123,11 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
}
// only add braces
if let (Delimiter::Brace, Delimiter::Brace) = (open_brace, open_delim) {
if let (Delimiter::Brace, Delimiter::Brace) = (open_delimiter, open_delim) {
// Add all the matching spans, we will sort by span later
self.diag_info.matching_block_spans.push((open_brace_span, close_brace_span));
self.diag_info
.matching_block_spans
.push((open_delimiter_span, close_delimiter_span));
}
// Move past the closing delimiter.
@ -140,18 +143,18 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
// This is a conservative error: only report the last unclosed
// delimiter. The previous unclosed delimiters could actually be
// closed! The lexer just hasn't gotten to them yet.
if let Some(&(_, sp)) = self.diag_info.open_braces.last() {
if let Some(&(_, sp)) = self.diag_info.open_delimiters.last() {
unclosed_delimiter = Some(sp);
};
for (brace, brace_span) in &self.diag_info.open_braces {
if same_indentation_level(sm, self.token.span, *brace_span)
&& brace == &close_delim
for (delimiter, delimiter_span) in &self.diag_info.open_delimiters {
if same_indentation_level(sm, self.token.span, *delimiter_span)
&& delimiter == &close_delim
{
// high likelihood of these two corresponding
candidate = Some(*brace_span);
candidate = Some(*delimiter_span);
}
}
let (_, _) = self.diag_info.open_braces.pop().unwrap();
let (_, _) = self.diag_info.open_delimiters.pop().unwrap();
self.diag_info.unmatched_delims.push(UnmatchedDelim {
found_delim: Some(close_delim),
found_span: self.token.span,
@ -159,7 +162,7 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
candidate_span: candidate,
});
} else {
self.diag_info.open_braces.pop();
self.diag_info.open_delimiters.pop();
}
// If the incorrect delimiter matches an earlier opening
@ -169,7 +172,7 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
// fn foo() {
// bar(baz(
// } // Incorrect delimiter but matches the earlier `{`
if !self.diag_info.open_braces.iter().any(|&(b, _)| b == close_delim) {
if !self.diag_info.open_delimiters.iter().any(|&(d, _)| d == close_delim) {
self.bump_minimal()
} else {
// The choice of value here doesn't matter.
@ -180,7 +183,7 @@ impl<'psess, 'src> Lexer<'psess, 'src> {
assert_eq!(self.token.kind, token::Eof);
// Silently recover, the EOF token will be seen again
// and an error emitted then. Thus we don't pop from
// self.open_braces here. The choice of spacing value here
// self.open_delimiters here. The choice of spacing value here
// doesn't matter.
Spacing::Alone
};

View file

@ -32,7 +32,7 @@ pub const MACRO_ARGUMENTS: Option<&str> = Some("macro arguments");
#[macro_use]
pub mod parser;
use parser::{Parser, make_unclosed_delims_error};
use parser::Parser;
pub mod lexer;
pub mod validate_attr;

View file

@ -43,11 +43,8 @@ use token_type::TokenTypeSet;
pub use token_type::{ExpKeywordPair, ExpTokenPair, TokenType};
use tracing::debug;
use crate::errors::{
self, IncorrectVisibilityRestriction, MismatchedClosingDelimiter, NonStringAbiLiteral,
};
use crate::errors::{self, IncorrectVisibilityRestriction, NonStringAbiLiteral};
use crate::exp;
use crate::lexer::UnmatchedDelim;
#[cfg(test)]
mod tests;
@ -1745,27 +1742,6 @@ impl<'a> Parser<'a> {
}
}
pub(crate) fn make_unclosed_delims_error(
unmatched: UnmatchedDelim,
psess: &ParseSess,
) -> Option<Diag<'_>> {
// `None` here means an `Eof` was found. We already emit those errors elsewhere, we add them to
// `unmatched_delims` only for error recovery in the `Parser`.
let found_delim = unmatched.found_delim?;
let mut spans = vec![unmatched.found_span];
if let Some(sp) = unmatched.unclosed_span {
spans.push(sp);
};
let err = psess.dcx().create_err(MismatchedClosingDelimiter {
spans,
delimiter: pprust::token_kind_to_string(&found_delim.as_close_token_kind()).to_string(),
unmatched: unmatched.found_span,
opening_candidate: unmatched.candidate_span,
unclosed: unmatched.unclosed_span,
});
Some(err)
}
/// A helper struct used when building an `AttrTokenStream` from
/// a `LazyAttrTokenStream`. Both delimiter and non-delimited tokens
/// are stored as `FlatToken::Token`. A vector of `FlatToken`s

View file

@ -213,7 +213,7 @@ where
}
}
}
ty::Alias(kind @ (ty::Inherent | ty::Weak | ty::Projection), data) => {
ty::Alias(kind @ (ty::Inherent | ty::Free | ty::Projection), data) => {
if self.def_id_visitor.skip_assoc_tys() {
// Visitors searching for minimal visibility/reachability want to
// conservatively approximate associated types like `Type::Alias`
@ -227,7 +227,7 @@ where
data.def_id,
match kind {
ty::Inherent | ty::Projection => "associated type",
ty::Weak => "type alias",
ty::Free => "type alias",
ty::Opaque => unreachable!(),
},
&LazyDefPathStr { def_id: data.def_id, tcx },

View file

@ -16,7 +16,7 @@ impl<'tcx> Stable<'tcx> for ty::AliasTyKind {
ty::Projection => stable_mir::ty::AliasKind::Projection,
ty::Inherent => stable_mir::ty::AliasKind::Inherent,
ty::Opaque => stable_mir::ty::AliasKind::Opaque,
ty::Weak => stable_mir::ty::AliasKind::Weak,
ty::Free => stable_mir::ty::AliasKind::Free,
}
}
}

View file

@ -1026,7 +1026,7 @@ pub enum AliasKind {
Projection,
Inherent,
Opaque,
Weak,
Free,
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize)]

View file

@ -775,7 +775,7 @@ const RISCV_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[
(32768, "zvl32768b"),
(65536, "zvl65536b"),
];
// Always warn on SPARC, as the necessary target features cannot be enabled in Rust at the moment.
// Always error on SPARC, as the necessary target features cannot be enabled in Rust at the moment.
const SPARC_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[/*(64, "vis")*/];
const HEXAGON_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] =

View file

@ -707,7 +707,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
ty::Projection | ty::Inherent => {
format!("the associated type `{p}`")
}
ty::Weak => format!("the type alias `{p}`"),
ty::Free => format!("the type alias `{p}`"),
ty::Opaque => format!("the opaque type `{p}`"),
},
};

View file

@ -81,9 +81,7 @@ pub fn call_kind<'tcx>(
}
});
let fn_call = parent.and_then(|p| {
lang_items::FN_TRAITS.iter().filter_map(|&l| tcx.lang_items().get(l)).find(|&id| id == p)
});
let fn_call = parent.filter(|&p| tcx.fn_trait_kind_from_def_id(p).is_some());
let operator = if !from_hir_call && let Some(p) = parent {
lang_items::OPERATORS.iter().filter_map(|&l| tcx.lang_items().get(l)).find(|&id| id == p)

View file

@ -146,7 +146,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
&& leaf_trait_predicate.def_id() != root_pred.def_id()
// The root trait is not `Unsize`, as to avoid talking about it in
// `tests/ui/coercion/coerce-issue-49593-box-never.rs`.
&& Some(root_pred.def_id()) != self.tcx.lang_items().unsize_trait()
&& !self.tcx.is_lang_item(root_pred.def_id(), LangItem::Unsize)
{
(
self.resolve_vars_if_possible(
@ -1682,7 +1682,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
ty::Alias(ty::Projection, ..) => Some(12),
ty::Alias(ty::Inherent, ..) => Some(13),
ty::Alias(ty::Opaque, ..) => Some(14),
ty::Alias(ty::Weak, ..) => Some(15),
ty::Alias(ty::Free, ..) => Some(15),
ty::Never => Some(16),
ty::Adt(..) => Some(17),
ty::Coroutine(..) => Some(18),
@ -2274,10 +2274,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
// auto-traits or fundamental traits that might not be exactly what
// the user might expect to be presented with. Instead this is
// useful for less general traits.
if peeled
&& !self.tcx.trait_is_auto(def_id)
&& !self.tcx.lang_items().iter().any(|(_, id)| id == def_id)
{
if peeled && !self.tcx.trait_is_auto(def_id) && self.tcx.as_lang_item(def_id).is_none() {
let impl_candidates = self.find_similar_impl_candidates(trait_pred);
self.report_similar_impl_candidates(
&impl_candidates,
@ -3013,8 +3010,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
// This shouldn't be common unless manually implementing one of the
// traits manually, but don't make it more confusing when it does
// happen.
if Some(expected_trait_ref.def_id) != self.tcx.lang_items().coroutine_trait() && not_tupled
{
if !self.tcx.is_lang_item(expected_trait_ref.def_id, LangItem::Coroutine) && not_tupled {
return Ok(self.report_and_explain_type_error(
TypeTrace::trait_refs(&obligation.cause, expected_trait_ref, found_trait_ref),
obligation.param_env,

View file

@ -3844,12 +3844,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
);
if let ty::PredicateKind::Clause(clause) = failed_pred.kind().skip_binder()
&& let ty::ClauseKind::Trait(pred) = clause
&& [
tcx.lang_items().fn_once_trait(),
tcx.lang_items().fn_mut_trait(),
tcx.lang_items().fn_trait(),
]
.contains(&Some(pred.def_id()))
&& tcx.fn_trait_kind_from_def_id(pred.def_id()).is_some()
{
if let [stmt, ..] = block.stmts
&& let hir::StmtKind::Semi(value) = stmt.kind

View file

@ -34,7 +34,7 @@ impl<'tcx> InferCtxt<'tcx> {
// FIXME(#132279): This should be removed as it causes us to incorrectly
// handle opaques in their defining scope.
if !(param_env, ty).has_infer() {
if !self.next_trait_solver() && !(param_env, ty).has_infer() {
return self.tcx.type_is_copy_modulo_regions(self.typing_env(param_env), ty);
}

View file

@ -9,5 +9,8 @@ mod select;
pub(crate) use delegate::SolverDelegate;
pub use fulfill::{FulfillmentCtxt, NextSolverError};
pub(crate) use normalize::deeply_normalize_for_diagnostics;
pub use normalize::{deeply_normalize, deeply_normalize_with_skipped_universes};
pub use normalize::{
deeply_normalize, deeply_normalize_with_skipped_universes,
deeply_normalize_with_skipped_universes_and_ambiguous_goals,
};
pub use select::InferCtxtSelectExt;

View file

@ -1,18 +1,26 @@
use std::marker::PhantomData;
use std::mem;
use std::ops::ControlFlow;
use rustc_data_structures::thinvec::ExtractIf;
use rustc_hir::def_id::LocalDefId;
use rustc_infer::infer::InferCtxt;
use rustc_infer::traits::query::NoSolution;
use rustc_infer::traits::{
FromSolverError, PredicateObligation, PredicateObligations, TraitEngine,
};
use rustc_middle::ty::{
self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, TypingMode,
};
use rustc_next_trait_solver::solve::{GenerateProofTree, HasChanged, SolverDelegateEvalExt as _};
use rustc_span::Span;
use rustc_type_ir::data_structures::DelayedSet;
use tracing::instrument;
use self::derive_errors::*;
use super::Certainty;
use super::delegate::SolverDelegate;
use super::inspect::{self, ProofTreeInferCtxtExt};
use crate::traits::{FulfillmentError, ScrubbedTraitError};
mod derive_errors;
@ -39,7 +47,7 @@ pub struct FulfillmentCtxt<'tcx, E: 'tcx> {
_errors: PhantomData<E>,
}
#[derive(Default)]
#[derive(Default, Debug)]
struct ObligationStorage<'tcx> {
/// Obligations which resulted in an overflow in fulfillment itself.
///
@ -55,20 +63,23 @@ impl<'tcx> ObligationStorage<'tcx> {
self.pending.push(obligation);
}
fn has_pending_obligations(&self) -> bool {
!self.pending.is_empty() || !self.overflowed.is_empty()
}
fn clone_pending(&self) -> PredicateObligations<'tcx> {
let mut obligations = self.pending.clone();
obligations.extend(self.overflowed.iter().cloned());
obligations
}
fn take_pending(&mut self) -> PredicateObligations<'tcx> {
let mut obligations = mem::take(&mut self.pending);
obligations.append(&mut self.overflowed);
obligations
}
fn unstalled_for_select(&mut self) -> impl Iterator<Item = PredicateObligation<'tcx>> + 'tcx {
mem::take(&mut self.pending).into_iter()
fn drain_pending(
&mut self,
cond: impl Fn(&PredicateObligation<'tcx>) -> bool,
) -> PredicateObligations<'tcx> {
let (unstalled, pending) = mem::take(&mut self.pending).into_iter().partition(cond);
self.pending = pending;
unstalled
}
fn on_fulfillment_overflow(&mut self, infcx: &InferCtxt<'tcx>) {
@ -160,7 +171,7 @@ where
}
let mut has_changed = false;
for obligation in self.obligations.unstalled_for_select() {
for obligation in self.obligations.drain_pending(|_| true) {
let goal = obligation.as_goal();
let result = <&SolverDelegate<'tcx>>::from(infcx)
.evaluate_root_goal(goal, GenerateProofTree::No, obligation.cause.span)
@ -196,15 +207,95 @@ where
}
fn has_pending_obligations(&self) -> bool {
!self.obligations.pending.is_empty() || !self.obligations.overflowed.is_empty()
self.obligations.has_pending_obligations()
}
fn pending_obligations(&self) -> PredicateObligations<'tcx> {
self.obligations.clone_pending()
}
fn drain_unstalled_obligations(&mut self, _: &InferCtxt<'tcx>) -> PredicateObligations<'tcx> {
self.obligations.take_pending()
fn drain_stalled_obligations_for_coroutines(
&mut self,
infcx: &InferCtxt<'tcx>,
) -> PredicateObligations<'tcx> {
let stalled_generators = match infcx.typing_mode() {
TypingMode::Analysis { defining_opaque_types_and_generators } => {
defining_opaque_types_and_generators
}
TypingMode::Coherence
| TypingMode::Borrowck { defining_opaque_types: _ }
| TypingMode::PostBorrowckAnalysis { defined_opaque_types: _ }
| TypingMode::PostAnalysis => return Default::default(),
};
if stalled_generators.is_empty() {
return Default::default();
}
self.obligations.drain_pending(|obl| {
infcx.probe(|_| {
infcx
.visit_proof_tree(
obl.as_goal(),
&mut StalledOnCoroutines {
stalled_generators,
span: obl.cause.span,
cache: Default::default(),
},
)
.is_break()
})
})
}
}
/// Detect if a goal is stalled on a coroutine that is owned by the current typeck root.
///
/// This function can (erroneously) fail to detect a predicate, i.e. it doesn't need to
/// be complete. However, this will lead to ambiguity errors, so we want to make it
/// accurate.
///
/// This function can be also return false positives, which will lead to poor diagnostics
/// so we want to keep this visitor *precise* too.
struct StalledOnCoroutines<'tcx> {
stalled_generators: &'tcx ty::List<LocalDefId>,
span: Span,
cache: DelayedSet<Ty<'tcx>>,
}
impl<'tcx> inspect::ProofTreeVisitor<'tcx> for StalledOnCoroutines<'tcx> {
type Result = ControlFlow<()>;
fn span(&self) -> rustc_span::Span {
self.span
}
fn visit_goal(&mut self, inspect_goal: &super::inspect::InspectGoal<'_, 'tcx>) -> Self::Result {
inspect_goal.goal().predicate.visit_with(self)?;
if let Some(candidate) = inspect_goal.unique_applicable_candidate() {
candidate.visit_nested_no_probe(self)
} else {
ControlFlow::Continue(())
}
}
}
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for StalledOnCoroutines<'tcx> {
type Result = ControlFlow<()>;
fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
if !self.cache.insert(ty) {
return ControlFlow::Continue(());
}
if let ty::CoroutineWitness(def_id, _) = *ty.kind()
&& def_id.as_local().is_some_and(|def_id| self.stalled_generators.contains(&def_id))
{
return ControlFlow::Break(());
}
ty.super_visit_with(self)
}
}

View file

@ -1,5 +1,6 @@
use std::ops::ControlFlow;
use rustc_hir::LangItem;
use rustc_infer::infer::InferCtxt;
use rustc_infer::traits::solve::{CandidateSource, GoalSource, MaybeCause};
use rustc_infer::traits::{
@ -109,10 +110,16 @@ pub(super) fn fulfillment_error_for_stalled<'tcx>(
false,
),
Ok((_, Certainty::Yes)) => {
bug!("did not expect successful goal when collecting ambiguity errors")
bug!(
"did not expect successful goal when collecting ambiguity errors for `{:?}`",
infcx.resolve_vars_if_possible(root_obligation.predicate),
)
}
Err(_) => {
bug!("did not expect selection error when collecting ambiguity errors")
bug!(
"did not expect selection error when collecting ambiguity errors for `{:?}`",
infcx.resolve_vars_if_possible(root_obligation.predicate),
)
}
}
});
@ -452,9 +459,8 @@ impl<'tcx> ProofTreeVisitor<'tcx> for BestObligation<'tcx> {
// We do this as a separate loop so that we do not choose to tell the user about some nested
// goal before we encounter a `T: FnPtr` nested goal.
for nested_goal in &nested_goals {
if let Some(fn_ptr_trait) = tcx.lang_items().fn_ptr_trait()
&& let Some(poly_trait_pred) = nested_goal.goal().predicate.as_trait_clause()
&& poly_trait_pred.def_id() == fn_ptr_trait
if let Some(poly_trait_pred) = nested_goal.goal().predicate.as_trait_clause()
&& tcx.is_lang_item(poly_trait_pred.def_id(), LangItem::FnPtrTrait)
&& let Err(NoSolution) = nested_goal.result()
{
return ControlFlow::Break(self.obligation.clone());

View file

@ -1,10 +1,10 @@
use std::assert_matches::assert_matches;
use std::fmt::Debug;
use std::marker::PhantomData;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_infer::infer::InferCtxt;
use rustc_infer::infer::at::At;
use rustc_infer::traits::solve::Goal;
use rustc_infer::traits::{FromSolverError, Obligation, TraitEngine};
use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::{
@ -41,15 +41,41 @@ pub fn deeply_normalize_with_skipped_universes<'tcx, T, E>(
value: T,
universes: Vec<Option<UniverseIndex>>,
) -> Result<T, Vec<E>>
where
T: TypeFoldable<TyCtxt<'tcx>>,
E: FromSolverError<'tcx, NextSolverError<'tcx>>,
{
let (value, goals) =
deeply_normalize_with_skipped_universes_and_ambiguous_goals(at, value, universes)?;
assert_eq!(goals, vec![]);
Ok(value)
}
/// Deeply normalize all aliases in `value`. This does not handle inference and expects
/// its input to be already fully resolved.
///
/// Additionally takes a list of universes which represents the binders which have been
/// entered before passing `value` to the function. This is currently needed for
/// `normalize_erasing_regions`, which skips binders as it walks through a type.
///
/// This returns a set of stalled obligations if the typing mode of the underlying infcx
/// has any stalled coroutine def ids.
pub fn deeply_normalize_with_skipped_universes_and_ambiguous_goals<'tcx, T, E>(
at: At<'_, 'tcx>,
value: T,
universes: Vec<Option<UniverseIndex>>,
) -> Result<(T, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), Vec<E>>
where
T: TypeFoldable<TyCtxt<'tcx>>,
E: FromSolverError<'tcx, NextSolverError<'tcx>>,
{
let fulfill_cx = FulfillmentCtxt::new(at.infcx);
let mut folder =
NormalizationFolder { at, fulfill_cx, depth: 0, universes, _errors: PhantomData };
value.try_fold_with(&mut folder)
NormalizationFolder { at, fulfill_cx, depth: 0, universes, stalled_goals: vec![] };
let value = value.try_fold_with(&mut folder)?;
let errors = folder.fulfill_cx.select_all_or_error(at.infcx);
if errors.is_empty() { Ok((value, folder.stalled_goals)) } else { Err(errors) }
}
struct NormalizationFolder<'me, 'tcx, E> {
@ -57,7 +83,7 @@ struct NormalizationFolder<'me, 'tcx, E> {
fulfill_cx: FulfillmentCtxt<'tcx, E>,
depth: usize,
universes: Vec<Option<UniverseIndex>>,
_errors: PhantomData<E>,
stalled_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
}
impl<'tcx, E> NormalizationFolder<'_, 'tcx, E>
@ -98,10 +124,7 @@ where
);
self.fulfill_cx.register_predicate_obligation(infcx, obligation);
let errors = self.fulfill_cx.select_all_or_error(infcx);
if !errors.is_empty() {
return Err(errors);
}
self.select_all_and_stall_coroutine_predicates()?;
// Alias is guaranteed to be fully structurally resolved,
// so we can super fold here.
@ -139,7 +162,7 @@ where
let result = if infcx.predicate_may_hold(&obligation) {
self.fulfill_cx.register_predicate_obligation(infcx, obligation);
let errors = self.fulfill_cx.select_all_or_error(infcx);
let errors = self.fulfill_cx.select_where_possible(infcx);
if !errors.is_empty() {
return Err(errors);
}
@ -152,6 +175,27 @@ where
self.depth -= 1;
Ok(result)
}
fn select_all_and_stall_coroutine_predicates(&mut self) -> Result<(), Vec<E>> {
let errors = self.fulfill_cx.select_where_possible(self.at.infcx);
if !errors.is_empty() {
return Err(errors);
}
self.stalled_goals.extend(
self.fulfill_cx
.drain_stalled_obligations_for_coroutines(self.at.infcx)
.into_iter()
.map(|obl| obl.as_goal()),
);
let errors = self.fulfill_cx.collect_remaining_errors(self.at.infcx);
if !errors.is_empty() {
return Err(errors);
}
Ok(())
}
}
impl<'tcx, E> FallibleTypeFolder<TyCtxt<'tcx>> for NormalizationFolder<'_, 'tcx, E>
@ -254,27 +298,31 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for DeeplyNormalizeForDiagnosticsFolder<'_,
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
let infcx = self.at.infcx;
infcx
.commit_if_ok(|_| {
deeply_normalize_with_skipped_universes(
self.at,
ty,
vec![None; ty.outer_exclusive_binder().as_usize()],
)
})
.unwrap_or_else(|_: Vec<ScrubbedTraitError<'tcx>>| ty.super_fold_with(self))
let result =
infcx.commit_if_ok(|_| {
deeply_normalize_with_skipped_universes_and_ambiguous_goals::<
_,
ScrubbedTraitError<'tcx>,
>(self.at, ty, vec![None; ty.outer_exclusive_binder().as_usize()])
});
match result {
Ok((ty, _)) => ty,
Err(_) => ty.super_fold_with(self),
}
}
fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
let infcx = self.at.infcx;
infcx
.commit_if_ok(|_| {
deeply_normalize_with_skipped_universes(
self.at,
ct,
vec![None; ct.outer_exclusive_binder().as_usize()],
)
})
.unwrap_or_else(|_: Vec<ScrubbedTraitError<'tcx>>| ct.super_fold_with(self))
let result =
infcx.commit_if_ok(|_| {
deeply_normalize_with_skipped_universes_and_ambiguous_goals::<
_,
ScrubbedTraitError<'tcx>,
>(self.at, ct, vec![None; ct.outer_exclusive_binder().as_usize()])
});
match result {
Ok((ct, _)) => ct,
Err(_) => ct.super_fold_with(self),
}
}
}

View file

@ -162,7 +162,7 @@ where
self.select(selcx)
}
fn drain_unstalled_obligations(
fn drain_stalled_obligations_for_coroutines(
&mut self,
infcx: &InferCtxt<'tcx>,
) -> PredicateObligations<'tcx> {

View file

@ -333,7 +333,7 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
);
normalized_ty
}
ty::Weak => {
ty::Free => {
let recursion_limit = self.cx().recursion_limit();
if !recursion_limit.value_within_limit(self.depth) {
self.selcx.infcx.err_ctxt().report_overflow_error(

View file

@ -468,7 +468,7 @@ fn normalize_to_error<'a, 'tcx>(
ty::AliasTermKind::ProjectionTy
| ty::AliasTermKind::InherentTy
| ty::AliasTermKind::OpaqueTy
| ty::AliasTermKind::WeakTy => selcx.infcx.next_ty_var(cause.span).into(),
| ty::AliasTermKind::FreeTy => selcx.infcx.next_ty_var(cause.span).into(),
ty::AliasTermKind::UnevaluatedConst | ty::AliasTermKind::ProjectionConst => {
selcx.infcx.next_const_var(cause.span).into()
}
@ -965,36 +965,38 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
let self_ty = selcx.infcx.shallow_resolve(obligation.predicate.self_ty());
let tcx = selcx.tcx();
let lang_items = selcx.tcx().lang_items();
if [
lang_items.coroutine_trait(),
lang_items.future_trait(),
lang_items.iterator_trait(),
lang_items.async_iterator_trait(),
lang_items.fn_trait(),
lang_items.fn_mut_trait(),
lang_items.fn_once_trait(),
lang_items.async_fn_trait(),
lang_items.async_fn_mut_trait(),
lang_items.async_fn_once_trait(),
]
.contains(&Some(trait_ref.def_id))
{
true
} else if tcx.is_lang_item(trait_ref.def_id, LangItem::AsyncFnKindHelper) {
// FIXME(async_closures): Validity constraints here could be cleaned up.
if obligation.predicate.args.type_at(0).is_ty_var()
|| obligation.predicate.args.type_at(4).is_ty_var()
|| obligation.predicate.args.type_at(5).is_ty_var()
{
candidate_set.mark_ambiguous();
true
} else {
obligation.predicate.args.type_at(0).to_opt_closure_kind().is_some()
&& obligation.predicate.args.type_at(1).to_opt_closure_kind().is_some()
match selcx.tcx().as_lang_item(trait_ref.def_id) {
Some(
LangItem::Coroutine
| LangItem::Future
| LangItem::Iterator
| LangItem::AsyncIterator
| LangItem::Fn
| LangItem::FnMut
| LangItem::FnOnce
| LangItem::AsyncFn
| LangItem::AsyncFnMut
| LangItem::AsyncFnOnce,
) => true,
Some(LangItem::AsyncFnKindHelper) => {
// FIXME(async_closures): Validity constraints here could be cleaned up.
if obligation.predicate.args.type_at(0).is_ty_var()
|| obligation.predicate.args.type_at(4).is_ty_var()
|| obligation.predicate.args.type_at(5).is_ty_var()
{
candidate_set.mark_ambiguous();
true
} else {
obligation.predicate.args.type_at(0).to_opt_closure_kind().is_some()
&& obligation
.predicate
.args
.type_at(1)
.to_opt_closure_kind()
.is_some()
}
}
} else if tcx.is_lang_item(trait_ref.def_id, LangItem::DiscriminantKind) {
match self_ty.kind() {
Some(LangItem::DiscriminantKind) => match self_ty.kind() {
ty::Bool
| ty::Char
| ty::Int(_)
@ -1031,9 +1033,8 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
| ty::Placeholder(..)
| ty::Infer(..)
| ty::Error(_) => false,
}
} else if tcx.is_lang_item(trait_ref.def_id, LangItem::AsyncDestruct) {
match self_ty.kind() {
},
Some(LangItem::AsyncDestruct) => match self_ty.kind() {
ty::Bool
| ty::Char
| ty::Int(_)
@ -1068,101 +1069,104 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
| ty::Placeholder(..)
| ty::Infer(_)
| ty::Error(_) => false,
}
} else if tcx.is_lang_item(trait_ref.def_id, LangItem::PointeeTrait) {
let tail = selcx.tcx().struct_tail_raw(
self_ty,
|ty| {
// We throw away any obligations we get from this, since we normalize
// and confirm these obligations once again during confirmation
normalize_with_depth(
selcx,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
ty,
)
.value
},
|| {},
);
},
Some(LangItem::PointeeTrait) => {
let tail = selcx.tcx().struct_tail_raw(
self_ty,
|ty| {
// We throw away any obligations we get from this, since we normalize
// and confirm these obligations once again during confirmation
normalize_with_depth(
selcx,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
ty,
)
.value
},
|| {},
);
match tail.kind() {
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Str
| ty::Array(..)
| ty::Pat(..)
| ty::Slice(_)
| ty::RawPtr(..)
| ty::Ref(..)
| ty::FnDef(..)
| ty::FnPtr(..)
| ty::Dynamic(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Never
// Extern types have unit metadata, according to RFC 2850
| ty::Foreign(_)
// If returned by `struct_tail` this is a unit struct
// without any fields, or not a struct, and therefore is Sized.
| ty::Adt(..)
// If returned by `struct_tail` this is the empty tuple.
| ty::Tuple(..)
// Integers and floats are always Sized, and so have unit type metadata.
| ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..))
// This happens if we reach the recursion limit when finding the struct tail.
| ty::Error(..) => true,
match tail.kind() {
ty::Bool
| ty::Char
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Str
| ty::Array(..)
| ty::Pat(..)
| ty::Slice(_)
| ty::RawPtr(..)
| ty::Ref(..)
| ty::FnDef(..)
| ty::FnPtr(..)
| ty::Dynamic(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Never
// Extern types have unit metadata, according to RFC 2850
| ty::Foreign(_)
// If returned by `struct_tail` this is a unit struct
// without any fields, or not a struct, and therefore is Sized.
| ty::Adt(..)
// If returned by `struct_tail` this is the empty tuple.
| ty::Tuple(..)
// Integers and floats are always Sized, and so have unit type metadata.
| ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..))
// This happens if we reach the recursion limit when finding the struct tail.
| ty::Error(..) => true,
// We normalize from `Wrapper<Tail>::Metadata` to `Tail::Metadata` if able.
// Otherwise, type parameters, opaques, and unnormalized projections have
// unit metadata if they're known (e.g. by the param_env) to be sized.
ty::Param(_) | ty::Alias(..)
if self_ty != tail
|| selcx.infcx.predicate_must_hold_modulo_regions(
&obligation.with(
selcx.tcx(),
ty::TraitRef::new(
// We normalize from `Wrapper<Tail>::Metadata` to `Tail::Metadata` if able.
// Otherwise, type parameters, opaques, and unnormalized projections have
// unit metadata if they're known (e.g. by the param_env) to be sized.
ty::Param(_) | ty::Alias(..)
if self_ty != tail
|| selcx.infcx.predicate_must_hold_modulo_regions(
&obligation.with(
selcx.tcx(),
selcx.tcx().require_lang_item(
LangItem::Sized,
Some(obligation.cause.span),
ty::TraitRef::new(
selcx.tcx(),
selcx.tcx().require_lang_item(
LangItem::Sized,
Some(obligation.cause.span),
),
[self_ty],
),
[self_ty],
),
),
) =>
{
true
}
ty::UnsafeBinder(_) => todo!("FIXME(unsafe_binder)"),
// FIXME(compiler-errors): are Bound and Placeholder types ever known sized?
ty::Param(_)
| ty::Alias(..)
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Infer(..) => {
if tail.has_infer_types() {
candidate_set.mark_ambiguous();
) =>
{
true
}
ty::UnsafeBinder(_) => todo!("FIXME(unsafe_binder)"),
// FIXME(compiler-errors): are Bound and Placeholder types ever known sized?
ty::Param(_)
| ty::Alias(..)
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Infer(..) => {
if tail.has_infer_types() {
candidate_set.mark_ambiguous();
}
false
}
false
}
}
} else if tcx.trait_is_auto(trait_ref.def_id) {
tcx.dcx().span_delayed_bug(
tcx.def_span(obligation.predicate.def_id),
"associated types not allowed on auto traits",
);
false
} else {
bug!("unexpected builtin trait with associated type: {trait_ref:?}")
_ if tcx.trait_is_auto(trait_ref.def_id) => {
tcx.dcx().span_delayed_bug(
tcx.def_span(obligation.predicate.def_id),
"associated types not allowed on auto traits",
);
false
}
_ => {
bug!("unexpected builtin trait with associated type: {trait_ref:?}")
}
}
}
ImplSource::Param(..) => {

View file

@ -253,7 +253,7 @@ impl<'a, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'a, 'tcx> {
}
}
ty::Projection | ty::Inherent | ty::Weak => {
ty::Projection | ty::Inherent | ty::Free => {
// See note in `rustc_trait_selection::traits::project`
let infcx = self.infcx;
@ -275,7 +275,7 @@ impl<'a, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'a, 'tcx> {
debug!("QueryNormalizer: orig_values = {:#?}", orig_values);
let result = match kind {
ty::Projection => tcx.normalize_canonicalized_projection_ty(c_data),
ty::Weak => tcx.normalize_canonicalized_weak_ty(c_data),
ty::Free => tcx.normalize_canonicalized_free_alias(c_data),
ty::Inherent => tcx.normalize_canonicalized_inherent_projection_ty(c_data),
kind => unreachable!("did not expect {kind:?} due to match arm above"),
}?;
@ -313,10 +313,10 @@ impl<'a, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'a, 'tcx> {
};
// `tcx.normalize_canonicalized_projection_ty` may normalize to a type that
// still has unevaluated consts, so keep normalizing here if that's the case.
// Similarly, `tcx.normalize_canonicalized_weak_ty` will only unwrap one layer
// Similarly, `tcx.normalize_canonicalized_free_alias` will only unwrap one layer
// of type and we need to continue folding it to reveal the TAIT behind it.
if res != ty
&& (res.has_type_flags(ty::TypeFlags::HAS_CT_PROJECTION) || kind == ty::Weak)
&& (res.has_type_flags(ty::TypeFlags::HAS_CT_PROJECTION) || kind == ty::Free)
{
res.try_fold_with(self)?
} else {

View file

@ -730,7 +730,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
ty::Param(..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Weak, ..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
| ty::Placeholder(..)
| ty::Bound(..) => {
// In these cases, we don't know what the actual

View file

@ -1498,7 +1498,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// However, if we disqualify *all* goals from being cached, perf suffers.
// This is likely fixed by better caching in general in the new solver.
// See: <https://github.com/rust-lang/rust/issues/132064>.
TypingMode::Analysis { defining_opaque_types }
TypingMode::Analysis {
defining_opaque_types_and_generators: defining_opaque_types,
}
| TypingMode::Borrowck { defining_opaque_types } => {
defining_opaque_types.is_empty() || !pred.has_opaque_types()
}
@ -2321,7 +2323,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
ty::Placeholder(..)
| ty::Dynamic(..)
| ty::Param(..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Weak, ..)
| ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
| ty::Bound(..)
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble constituent types of unexpected type: {:?}", t);

View file

@ -756,7 +756,7 @@ impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for WfPredicates<'a, 'tcx> {
// Simple cases that are WF if their type args are WF.
}
ty::Alias(ty::Projection | ty::Opaque | ty::Weak, data) => {
ty::Alias(ty::Projection | ty::Opaque | ty::Free, data) => {
let obligations = self.nominal_obligations(data.def_id, data.args);
self.out.extend(obligations);
}

View file

@ -13,7 +13,7 @@ use tracing::debug;
pub(crate) fn provide(p: &mut Providers) {
*p = Providers {
normalize_canonicalized_projection_ty,
normalize_canonicalized_weak_ty,
normalize_canonicalized_free_alias,
normalize_canonicalized_inherent_projection_ty,
..*p
};
@ -63,11 +63,11 @@ fn normalize_canonicalized_projection_ty<'tcx>(
)
}
fn normalize_canonicalized_weak_ty<'tcx>(
fn normalize_canonicalized_free_alias<'tcx>(
tcx: TyCtxt<'tcx>,
goal: CanonicalAliasGoal<'tcx>,
) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, NormalizationResult<'tcx>>>, NoSolution> {
debug!("normalize_canonicalized_weak_ty(goal={:#?})", goal);
debug!("normalize_canonicalized_free_alias(goal={:#?})", goal);
tcx.infer_ctxt().enter_canonical_trait_query(
&goal,

View file

@ -5,11 +5,13 @@ edition = "2024"
[dependencies]
# tidy-alphabetical-start
itertools = "0.12"
rustc_abi = { path = "../rustc_abi", optional = true }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_hir = { path = "../rustc_hir", optional = true }
rustc_middle = { path = "../rustc_middle", optional = true }
rustc_span = { path = "../rustc_span", optional = true }
smallvec = "1.8.1"
tracing = "0.1"
# tidy-alphabetical-end
@ -20,8 +22,3 @@ rustc = [
"dep:rustc_middle",
"dep:rustc_span",
]
[dev-dependencies]
# tidy-alphabetical-start
itertools = "0.12"
# tidy-alphabetical-end

View file

@ -1,8 +1,9 @@
use std::fmt;
use std::ops::RangeInclusive;
use std::sync::atomic::{AtomicU32, Ordering};
use super::{Byte, Ref, Tree, Uninhabited};
use crate::Map;
use crate::{Map, Set};
#[derive(PartialEq)]
#[cfg_attr(test, derive(Clone))]
@ -20,7 +21,7 @@ pub(crate) struct Transitions<R>
where
R: Ref,
{
byte_transitions: Map<Byte, State>,
byte_transitions: EdgeSet<State>,
ref_transitions: Map<R, State>,
}
@ -29,7 +30,7 @@ where
R: Ref,
{
fn default() -> Self {
Self { byte_transitions: Map::default(), ref_transitions: Map::default() }
Self { byte_transitions: EdgeSet::empty(), ref_transitions: Map::default() }
}
}
@ -56,15 +57,10 @@ where
{
#[cfg(test)]
pub(crate) fn bool() -> Self {
let mut transitions: Map<State, Transitions<R>> = Map::default();
let start = State::new();
let accept = State::new();
transitions.entry(start).or_default().byte_transitions.insert(Byte::Init(0x00), accept);
transitions.entry(start).or_default().byte_transitions.insert(Byte::Init(0x01), accept);
Self { transitions, start, accept }
Self::from_transitions(|accept| Transitions {
byte_transitions: EdgeSet::new(Byte::new(0x00..=0x01), accept),
ref_transitions: Map::default(),
})
}
pub(crate) fn unit() -> Self {
@ -76,23 +72,24 @@ where
}
pub(crate) fn from_byte(byte: Byte) -> Self {
let mut transitions: Map<State, Transitions<R>> = Map::default();
let start = State::new();
let accept = State::new();
transitions.entry(start).or_default().byte_transitions.insert(byte, accept);
Self { transitions, start, accept }
Self::from_transitions(|accept| Transitions {
byte_transitions: EdgeSet::new(byte, accept),
ref_transitions: Map::default(),
})
}
pub(crate) fn from_ref(r: R) -> Self {
let mut transitions: Map<State, Transitions<R>> = Map::default();
Self::from_transitions(|accept| Transitions {
byte_transitions: EdgeSet::empty(),
ref_transitions: [(r, accept)].into_iter().collect(),
})
}
fn from_transitions(f: impl FnOnce(State) -> Transitions<R>) -> Self {
let start = State::new();
let accept = State::new();
transitions.entry(start).or_default().ref_transitions.insert(r, accept);
Self { transitions, start, accept }
Self { transitions: [(start, f(accept))].into_iter().collect(), start, accept }
}
pub(crate) fn from_tree(tree: Tree<!, R>) -> Result<Self, Uninhabited> {
@ -132,13 +129,16 @@ where
for (source, transition) in other.transitions {
let fix_state = |state| if state == other.start { self.accept } else { state };
let entry = transitions.entry(fix_state(source)).or_default();
for (edge, destination) in transition.byte_transitions {
entry.byte_transitions.insert(edge, fix_state(destination));
}
for (edge, destination) in transition.ref_transitions {
entry.ref_transitions.insert(edge, fix_state(destination));
}
let byte_transitions = transition.byte_transitions.map_states(&fix_state);
let ref_transitions = transition
.ref_transitions
.into_iter()
.map(|(r, state)| (r, fix_state(state)))
.collect();
let old = transitions
.insert(fix_state(source), Transitions { byte_transitions, ref_transitions });
assert!(old.is_none());
}
Self { transitions, start, accept }
@ -170,67 +170,111 @@ where
let start = mapped((Some(a.start), Some(b.start)));
let mut transitions: Map<State, Transitions<R>> = Map::default();
let mut queue = vec![(Some(a.start), Some(b.start))];
let empty_transitions = Transitions::default();
while let Some((a_src, b_src)) = queue.pop() {
struct WorkQueue {
queue: Vec<(Option<State>, Option<State>)>,
// Track all entries ever enqueued to avoid duplicating work. This
// gives us a guarantee that a given (a_state, b_state) pair will
// only ever be visited once.
enqueued: Set<(Option<State>, Option<State>)>,
}
impl WorkQueue {
fn enqueue(&mut self, a_state: Option<State>, b_state: Option<State>) {
if self.enqueued.insert((a_state, b_state)) {
self.queue.push((a_state, b_state));
}
}
}
let mut queue = WorkQueue { queue: Vec::new(), enqueued: Set::default() };
queue.enqueue(Some(a.start), Some(b.start));
while let Some((a_src, b_src)) = queue.queue.pop() {
let src = mapped((a_src, b_src));
if src == accept {
// While it's possible to have a DFA whose accept state has
// out-edges, these do not affect the semantics of the DFA, and
// so there's no point in processing them. Continuing here also
// has the advantage of guaranteeing that we only ever process a
// given node in the output DFA once. In particular, with the
// exception of the accept state, we ensure that we only push a
// given node to the `queue` once. This allows the following
// code to assume that we're processing a node we've never
// processed before, which means we never need to merge two edge
// sets - we only ever need to construct a new edge set from
// whole cloth.
continue;
}
let a_transitions =
a_src.and_then(|a_src| a.transitions.get(&a_src)).unwrap_or(&empty_transitions);
let b_transitions =
b_src.and_then(|b_src| b.transitions.get(&b_src)).unwrap_or(&empty_transitions);
let byte_transitions =
a_transitions.byte_transitions.keys().chain(b_transitions.byte_transitions.keys());
for byte_transition in byte_transitions {
let a_dst = a_transitions.byte_transitions.get(byte_transition).copied();
let b_dst = b_transitions.byte_transitions.get(byte_transition).copied();
a_transitions.byte_transitions.union(&b_transitions.byte_transitions);
let byte_transitions = byte_transitions.map_states(|(a_dst, b_dst)| {
assert!(a_dst.is_some() || b_dst.is_some());
let src = mapped((a_src, b_src));
let dst = mapped((a_dst, b_dst));
transitions.entry(src).or_default().byte_transitions.insert(*byte_transition, dst);
if !transitions.contains_key(&dst) {
queue.push((a_dst, b_dst))
}
}
queue.enqueue(a_dst, b_dst);
mapped((a_dst, b_dst))
});
let ref_transitions =
a_transitions.ref_transitions.keys().chain(b_transitions.ref_transitions.keys());
for ref_transition in ref_transitions {
let a_dst = a_transitions.ref_transitions.get(ref_transition).copied();
let b_dst = b_transitions.ref_transitions.get(ref_transition).copied();
let ref_transitions = ref_transitions
.map(|ref_transition| {
let a_dst = a_transitions.ref_transitions.get(ref_transition).copied();
let b_dst = b_transitions.ref_transitions.get(ref_transition).copied();
assert!(a_dst.is_some() || b_dst.is_some());
assert!(a_dst.is_some() || b_dst.is_some());
let src = mapped((a_src, b_src));
let dst = mapped((a_dst, b_dst));
queue.enqueue(a_dst, b_dst);
(*ref_transition, mapped((a_dst, b_dst)))
})
.collect();
transitions.entry(src).or_default().ref_transitions.insert(*ref_transition, dst);
if !transitions.contains_key(&dst) {
queue.push((a_dst, b_dst))
}
}
let old = transitions.insert(src, Transitions { byte_transitions, ref_transitions });
// See `if src == accept { ... }` above. The comment there explains
// why this assert is valid.
assert_eq!(old, None);
}
Self { transitions, start, accept }
}
pub(crate) fn bytes_from(&self, start: State) -> Option<&Map<Byte, State>> {
Some(&self.transitions.get(&start)?.byte_transitions)
pub(crate) fn states_from(
&self,
state: State,
src_validity: RangeInclusive<u8>,
) -> impl Iterator<Item = (Byte, State)> {
self.transitions
.get(&state)
.map(move |t| t.byte_transitions.states_from(src_validity))
.into_iter()
.flatten()
}
pub(crate) fn byte_from(&self, start: State, byte: Byte) -> Option<State> {
self.transitions.get(&start)?.byte_transitions.get(&byte).copied()
pub(crate) fn get_uninit_edge_dst(&self, state: State) -> Option<State> {
let transitions = self.transitions.get(&state)?;
transitions.byte_transitions.get_uninit_edge_dst()
}
pub(crate) fn refs_from(&self, start: State) -> Option<&Map<R, State>> {
Some(&self.transitions.get(&start)?.ref_transitions)
pub(crate) fn bytes_from(&self, start: State) -> impl Iterator<Item = (Byte, State)> {
self.transitions
.get(&start)
.into_iter()
.flat_map(|transitions| transitions.byte_transitions.iter())
}
pub(crate) fn refs_from(&self, start: State) -> impl Iterator<Item = (R, State)> {
self.transitions
.get(&start)
.into_iter()
.flat_map(|transitions| transitions.ref_transitions.iter())
.map(|(r, s)| (*r, *s))
}
#[cfg(test)]
@ -241,15 +285,25 @@ where
) -> Self {
let start = State(start);
let accept = State(accept);
let mut transitions: Map<State, Transitions<R>> = Map::default();
let mut transitions: Map<State, Vec<(Byte, State)>> = Map::default();
for &(src, edge, dst) in edges {
let src = State(src);
let dst = State(dst);
let old = transitions.entry(src).or_default().byte_transitions.insert(edge.into(), dst);
assert!(old.is_none());
for (src, edge, dst) in edges.iter().copied() {
transitions.entry(State(src)).or_default().push((edge.into(), State(dst)));
}
let transitions = transitions
.into_iter()
.map(|(src, edges)| {
(
src,
Transitions {
byte_transitions: EdgeSet::from_edges(edges),
ref_transitions: Map::default(),
},
)
})
.collect();
Self { start, accept, transitions }
}
}
@ -277,3 +331,242 @@ where
writeln!(f, "}}")
}
}
use edge_set::EdgeSet;
mod edge_set {
use std::cmp;
use run::*;
use smallvec::{SmallVec, smallvec};
use super::*;
mod run {
use std::ops::{Range, RangeInclusive};
use super::*;
use crate::layout::Byte;
/// A logical set of edges.
///
/// A `Run` encodes one edge for every byte value in `start..=end`
/// pointing to `dst`.
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
pub(super) struct Run<S> {
// `start` and `end` are both inclusive (ie, closed) bounds, as this
// is required in order to be able to store 0..=255. We provide
// setters and getters which operate on closed/open ranges, which
// are more intuitive and easier for performing offset math.
start: u8,
end: u8,
pub(super) dst: S,
}
impl<S> Run<S> {
pub(super) fn new(range: RangeInclusive<u8>, dst: S) -> Self {
Self { start: *range.start(), end: *range.end(), dst }
}
pub(super) fn from_inclusive_exclusive(range: Range<u16>, dst: S) -> Self {
Self {
start: range.start.try_into().unwrap(),
end: (range.end - 1).try_into().unwrap(),
dst,
}
}
pub(super) fn contains(&self, idx: u16) -> bool {
idx >= u16::from(self.start) && idx <= u16::from(self.end)
}
pub(super) fn as_inclusive_exclusive(&self) -> (u16, u16) {
(u16::from(self.start), u16::from(self.end) + 1)
}
pub(super) fn as_byte(&self) -> Byte {
Byte::new(self.start..=self.end)
}
pub(super) fn map_state<SS>(self, f: impl FnOnce(S) -> SS) -> Run<SS> {
let Run { start, end, dst } = self;
Run { start, end, dst: f(dst) }
}
/// Produces a new `Run` whose lower bound is the greater of
/// `self`'s existing lower bound and `lower_bound`.
pub(super) fn clamp_lower(self, lower_bound: u8) -> Self {
let Run { start, end, dst } = self;
Run { start: cmp::max(start, lower_bound), end, dst }
}
}
}
/// The set of outbound byte edges associated with a DFA node (not including
/// reference edges).
#[derive(Eq, PartialEq, Clone, Debug)]
pub(super) struct EdgeSet<S = State> {
// A sequence of runs stored in ascending order. Since the graph is a
// DFA, these must be non-overlapping with one another.
runs: SmallVec<[Run<S>; 1]>,
// The edge labeled with the uninit byte, if any.
//
// FIXME(@joshlf): Make `State` a `NonZero` so that this is NPO'd.
uninit: Option<S>,
}
impl<S> EdgeSet<S> {
pub(crate) fn new(byte: Byte, dst: S) -> Self {
match byte.range() {
Some(range) => Self { runs: smallvec![Run::new(range, dst)], uninit: None },
None => Self { runs: SmallVec::new(), uninit: Some(dst) },
}
}
pub(crate) fn empty() -> Self {
Self { runs: SmallVec::new(), uninit: None }
}
#[cfg(test)]
pub(crate) fn from_edges(mut edges: Vec<(Byte, S)>) -> Self
where
S: Ord,
{
edges.sort();
Self {
runs: edges
.into_iter()
.map(|(byte, state)| Run::new(byte.range().unwrap(), state))
.collect(),
uninit: None,
}
}
pub(crate) fn iter(&self) -> impl Iterator<Item = (Byte, S)>
where
S: Copy,
{
self.uninit
.map(|dst| (Byte::uninit(), dst))
.into_iter()
.chain(self.runs.iter().map(|run| (run.as_byte(), run.dst)))
}
pub(crate) fn states_from(
&self,
byte: RangeInclusive<u8>,
) -> impl Iterator<Item = (Byte, S)>
where
S: Copy,
{
// FIXME(@joshlf): Optimize this. A manual scan over `self.runs` may
// permit us to more efficiently discard runs which will not be
// produced by this iterator.
self.iter().filter(move |(o, _)| Byte::new(byte.clone()).transmutable_into(&o))
}
pub(crate) fn get_uninit_edge_dst(&self) -> Option<S>
where
S: Copy,
{
self.uninit
}
pub(crate) fn map_states<SS>(self, mut f: impl FnMut(S) -> SS) -> EdgeSet<SS> {
EdgeSet {
// NOTE: It appears as through `<Vec<_> as
// IntoIterator>::IntoIter` and `std::iter::Map` both implement
// `TrustedLen`, which in turn means that this `.collect()`
// allocates the correct number of elements once up-front [1].
//
// [1] https://doc.rust-lang.org/1.85.0/src/alloc/vec/spec_from_iter_nested.rs.html#47
runs: self.runs.into_iter().map(|run| run.map_state(&mut f)).collect(),
uninit: self.uninit.map(f),
}
}
/// Unions two edge sets together.
///
/// If `u = a.union(b)`, then for each byte value, `u` will have an edge
/// with that byte value and with the destination `(Some(_), None)`,
/// `(None, Some(_))`, or `(Some(_), Some(_))` depending on whether `a`,
/// `b`, or both have an edge with that byte value.
///
/// If neither `a` nor `b` have an edge with a particular byte value,
/// then no edge with that value will be present in `u`.
pub(crate) fn union(&self, other: &Self) -> EdgeSet<(Option<S>, Option<S>)>
where
S: Copy,
{
let uninit = match (self.uninit, other.uninit) {
(None, None) => None,
(s, o) => Some((s, o)),
};
let mut runs = SmallVec::new();
// Iterate over `self.runs` and `other.runs` simultaneously,
// advancing `idx` as we go. At each step, we advance `idx` as far
// as we can without crossing a run boundary in either `self.runs`
// or `other.runs`.
// INVARIANT: `idx < s[0].end && idx < o[0].end`.
let (mut s, mut o) = (self.runs.as_slice(), other.runs.as_slice());
let mut idx = 0u16;
while let (Some((s_run, s_rest)), Some((o_run, o_rest))) =
(s.split_first(), o.split_first())
{
let (s_start, s_end) = s_run.as_inclusive_exclusive();
let (o_start, o_end) = o_run.as_inclusive_exclusive();
// Compute `end` as the end of the current run (which starts
// with `idx`).
let (end, dst) = match (s_run.contains(idx), o_run.contains(idx)) {
// `idx` is in an existing run in both `s` and `o`, so `end`
// is equal to the smallest of the two ends of those runs.
(true, true) => (cmp::min(s_end, o_end), (Some(s_run.dst), Some(o_run.dst))),
// `idx` is in an existing run in `s`, but not in any run in
// `o`. `end` is either the end of the `s` run or the
// beginning of the next `o` run, whichever comes first.
(true, false) => (cmp::min(s_end, o_start), (Some(s_run.dst), None)),
// The inverse of the previous case.
(false, true) => (cmp::min(s_start, o_end), (None, Some(o_run.dst))),
// `idx` is not in a run in either `s` or `o`, so advance it
// to the beginning of the next run.
(false, false) => {
idx = cmp::min(s_start, o_start);
continue;
}
};
// FIXME(@joshlf): If this is contiguous with the previous run
// and has the same `dst`, just merge it into that run rather
// than adding a new one.
runs.push(Run::from_inclusive_exclusive(idx..end, dst));
idx = end;
if idx >= s_end {
s = s_rest;
}
if idx >= o_end {
o = o_rest;
}
}
// At this point, either `s` or `o` have been exhausted, so the
// remaining elements in the other slice are guaranteed to be
// non-overlapping. We can add all remaining runs to `runs` with no
// further processing.
if let Ok(idx) = u8::try_from(idx) {
let (slc, map) = if !s.is_empty() {
let map: fn(_) -> _ = |st| (Some(st), None);
(s, map)
} else {
let map: fn(_) -> _ = |st| (None, Some(st));
(o, map)
};
runs.extend(slc.iter().map(|run| run.clamp_lower(idx).map_state(map)));
}
EdgeSet { runs, uninit }
}
}
}

View file

@ -1,5 +1,6 @@
use std::fmt::{self, Debug};
use std::hash::Hash;
use std::ops::RangeInclusive;
pub(crate) mod tree;
pub(crate) use tree::Tree;
@ -10,18 +11,56 @@ pub(crate) use dfa::Dfa;
#[derive(Debug)]
pub(crate) struct Uninhabited;
/// An instance of a byte is either initialized to a particular value, or uninitialized.
#[derive(Hash, Eq, PartialEq, Clone, Copy)]
pub(crate) enum Byte {
Uninit,
Init(u8),
/// A range of byte values, or the uninit byte.
#[derive(Hash, Eq, PartialEq, Ord, PartialOrd, Clone, Copy)]
pub(crate) struct Byte {
// An inclusive-inclusive range. We use this instead of `RangeInclusive`
// because `RangeInclusive: !Copy`.
//
// `None` means uninit.
//
// FIXME(@joshlf): Optimize this representation. Some pairs of values (where
// `lo > hi`) are illegal, and we could use these to represent `None`.
range: Option<(u8, u8)>,
}
impl Byte {
fn new(range: RangeInclusive<u8>) -> Self {
Self { range: Some((*range.start(), *range.end())) }
}
fn from_val(val: u8) -> Self {
Self { range: Some((val, val)) }
}
pub(crate) fn uninit() -> Byte {
Byte { range: None }
}
/// Returns `None` if `self` is the uninit byte.
pub(crate) fn range(&self) -> Option<RangeInclusive<u8>> {
self.range.map(|(lo, hi)| lo..=hi)
}
/// Are any of the values in `self` transmutable into `other`?
///
/// Note two special cases: An uninit byte is only transmutable into another
/// uninit byte. Any byte is transmutable into an uninit byte.
pub(crate) fn transmutable_into(&self, other: &Byte) -> bool {
match (self.range, other.range) {
(None, None) => true,
(None, Some(_)) => false,
(Some(_), None) => true,
(Some((slo, shi)), Some((olo, ohi))) => slo <= ohi && olo <= shi,
}
}
}
impl fmt::Debug for Byte {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
Self::Uninit => f.write_str("??u8"),
Self::Init(b) => write!(f, "{b:#04x}u8"),
match self.range {
None => write!(f, "uninit"),
Some((lo, hi)) => write!(f, "{lo}..={hi}"),
}
}
}
@ -29,7 +68,7 @@ impl fmt::Debug for Byte {
#[cfg(test)]
impl From<u8> for Byte {
fn from(src: u8) -> Self {
Self::Init(src)
Self::from_val(src)
}
}
@ -62,6 +101,21 @@ impl Ref for ! {
}
}
#[cfg(test)]
impl<const N: usize> Ref for [(); N] {
fn min_align(&self) -> usize {
N
}
fn size(&self) -> usize {
N
}
fn is_mutable(&self) -> bool {
false
}
}
#[cfg(feature = "rustc")]
pub mod rustc {
use std::fmt::{self, Write};

View file

@ -54,22 +54,22 @@ where
/// A `Tree` containing a single, uninitialized byte.
pub(crate) fn uninit() -> Self {
Self::Byte(Byte::Uninit)
Self::Byte(Byte::uninit())
}
/// A `Tree` representing the layout of `bool`.
pub(crate) fn bool() -> Self {
Self::from_bits(0x00).or(Self::from_bits(0x01))
Self::Byte(Byte::new(0x00..=0x01))
}
/// A `Tree` whose layout matches that of a `u8`.
pub(crate) fn u8() -> Self {
Self::Alt((0u8..=255).map(Self::from_bits).collect())
Self::Byte(Byte::new(0x00..=0xFF))
}
/// A `Tree` whose layout accepts exactly the given bit pattern.
pub(crate) fn from_bits(bits: u8) -> Self {
Self::Byte(Byte::Init(bits))
Self::Byte(Byte::from_val(bits))
}
/// A `Tree` whose layout is a number of the given width.

View file

@ -1,8 +1,9 @@
// tidy-alphabetical-start
#![cfg_attr(test, feature(test))]
#![feature(never_type)]
// tidy-alphabetical-end
pub(crate) use rustc_data_structures::fx::FxIndexMap as Map;
pub(crate) use rustc_data_structures::fx::{FxIndexMap as Map, FxIndexSet as Set};
pub mod layout;
mod maybe_transmutable;

View file

@ -1,10 +1,14 @@
use std::rc::Rc;
use std::{cmp, iter};
use itertools::Either;
use tracing::{debug, instrument, trace};
pub(crate) mod query_context;
#[cfg(test)]
mod tests;
use crate::layout::{self, Byte, Def, Dfa, Ref, Tree, Uninhabited, dfa};
use crate::layout::{self, Byte, Def, Dfa, Ref, Tree, dfa};
use crate::maybe_transmutable::query_context::QueryContext;
use crate::{Answer, Condition, Map, Reason};
@ -111,7 +115,7 @@ where
// the `src` type do not exist.
let src = match Dfa::from_tree(src) {
Ok(src) => src,
Err(Uninhabited) => return Answer::Yes,
Err(layout::Uninhabited) => return Answer::Yes,
};
// Convert `dst` from a tree-based representation to an DFA-based
@ -122,7 +126,7 @@ where
// free of safety invariants.
let dst = match Dfa::from_tree(dst) {
Ok(dst) => dst,
Err(Uninhabited) => return Answer::No(Reason::DstMayHaveSafetyInvariants),
Err(layout::Uninhabited) => return Answer::No(Reason::DstMayHaveSafetyInvariants),
};
MaybeTransmutableQuery { src, dst, assume, context }.answer()
@ -174,8 +178,8 @@ where
// are able to safely transmute, even with truncation.
Answer::Yes
} else if src_state == self.src.accept {
// extension: `size_of(Src) >= size_of(Dst)`
if let Some(dst_state_prime) = self.dst.byte_from(dst_state, Byte::Uninit) {
// extension: `size_of(Src) <= size_of(Dst)`
if let Some(dst_state_prime) = self.dst.get_uninit_edge_dst(dst_state) {
self.answer_memo(cache, src_state, dst_state_prime)
} else {
Answer::No(Reason::DstIsTooBig)
@ -193,26 +197,120 @@ where
Quantifier::ForAll
};
let c = &core::cell::RefCell::new(&mut *cache);
let bytes_answer = src_quantifier.apply(
// for each of the byte transitions out of the `src_state`...
self.src.bytes_from(src_state).unwrap_or(&Map::default()).into_iter().map(
|(&src_validity, &src_state_prime)| {
// ...try to find a matching transition out of `dst_state`.
if let Some(dst_state_prime) =
self.dst.byte_from(dst_state, src_validity)
{
self.answer_memo(cache, src_state_prime, dst_state_prime)
} else if let Some(dst_state_prime) =
// otherwise, see if `dst_state` has any outgoing `Uninit` transitions
// (any init byte is a valid uninit byte)
self.dst.byte_from(dst_state, Byte::Uninit)
{
self.answer_memo(cache, src_state_prime, dst_state_prime)
} else {
// otherwise, we've exhausted our options.
// the DFAs, from this point onwards, are bit-incompatible.
Answer::No(Reason::DstIsBitIncompatible)
// for each of the byte set transitions out of the `src_state`...
self.src.bytes_from(src_state).flat_map(
move |(src_validity, src_state_prime)| {
// ...find all matching transitions out of `dst_state`.
let Some(src_validity) = src_validity.range() else {
// NOTE: We construct an iterator here rather
// than just computing the value directly (via
// `self.answer_memo`) so that, if the iterator
// we produce from this branch is
// short-circuited, we don't waste time
// computing `self.answer_memo` unnecessarily.
// That will specifically happen if
// `src_quantifier == Quantifier::ThereExists`,
// since we emit `Answer::Yes` first (before
// chaining `answer_iter`).
let answer_iter = if let Some(dst_state_prime) =
self.dst.get_uninit_edge_dst(dst_state)
{
Either::Left(iter::once_with(move || {
let mut c = c.borrow_mut();
self.answer_memo(&mut *c, src_state_prime, dst_state_prime)
}))
} else {
Either::Right(iter::once(Answer::No(
Reason::DstIsBitIncompatible,
)))
};
// When `answer == Answer::No(...)`, there are
// two cases to consider:
// - If `assume.validity`, then we should
// succeed because the user is responsible for
// ensuring that the *specific* byte value
// appearing at runtime is valid for the
// destination type. When `assume.validity`,
// `src_quantifier ==
// Quantifier::ThereExists`, so adding an
// `Answer::Yes` has the effect of ensuring
// that the "there exists" is always
// satisfied.
// - If `!assume.validity`, then we should fail.
// In this case, `src_quantifier ==
// Quantifier::ForAll`, so adding an
// `Answer::Yes` has no effect.
return Either::Left(iter::once(Answer::Yes).chain(answer_iter));
};
#[derive(Copy, Clone, Debug)]
struct Accum {
// The number of matching byte edges that we
// have found in the destination so far.
sum: usize,
found_uninit: bool,
}
let accum1 = Rc::new(std::cell::Cell::new(Accum {
sum: 0,
found_uninit: false,
}));
let accum2 = Rc::clone(&accum1);
let sv = src_validity.clone();
let update_accum = move |mut accum: Accum, dst_validity: Byte| {
if let Some(dst_validity) = dst_validity.range() {
// Only add the part of `dst_validity` that
// overlaps with `src_validity`.
let start = cmp::max(*sv.start(), *dst_validity.start());
let end = cmp::min(*sv.end(), *dst_validity.end());
// We add 1 here to account for the fact
// that `end` is an inclusive bound.
accum.sum += 1 + usize::from(end.saturating_sub(start));
} else {
accum.found_uninit = true;
}
accum
};
let answers = self
.dst
.states_from(dst_state, src_validity.clone())
.map(move |(dst_validity, dst_state_prime)| {
let mut c = c.borrow_mut();
accum1.set(update_accum(accum1.get(), dst_validity));
let answer =
self.answer_memo(&mut *c, src_state_prime, dst_state_prime);
answer
})
.chain(
iter::once_with(move || {
let src_validity_len = usize::from(*src_validity.end())
- usize::from(*src_validity.start())
+ 1;
let accum = accum2.get();
// If this condition is false, then
// there are some byte values in the
// source which have no corresponding
// transition in the destination DFA. In
// that case, we add a `No` to our list
// of answers. When
// `!self.assume.validity`, this will
// cause the query to fail.
if accum.found_uninit || accum.sum == src_validity_len {
None
} else {
Some(Answer::No(Reason::DstIsBitIncompatible))
}
})
.flatten(),
);
Either::Right(answers)
},
),
);
@ -235,48 +333,38 @@ where
let refs_answer = src_quantifier.apply(
// for each reference transition out of `src_state`...
self.src.refs_from(src_state).unwrap_or(&Map::default()).into_iter().map(
|(&src_ref, &src_state_prime)| {
// ...there exists a reference transition out of `dst_state`...
Quantifier::ThereExists.apply(
self.dst
.refs_from(dst_state)
.unwrap_or(&Map::default())
.into_iter()
.map(|(&dst_ref, &dst_state_prime)| {
if !src_ref.is_mutable() && dst_ref.is_mutable() {
Answer::No(Reason::DstIsMoreUnique)
} else if !self.assume.alignment
&& src_ref.min_align() < dst_ref.min_align()
{
Answer::No(Reason::DstHasStricterAlignment {
src_min_align: src_ref.min_align(),
dst_min_align: dst_ref.min_align(),
})
} else if dst_ref.size() > src_ref.size() {
Answer::No(Reason::DstRefIsTooBig {
src: src_ref,
dst: dst_ref,
})
} else {
// ...such that `src` is transmutable into `dst`, if
// `src_ref` is transmutability into `dst_ref`.
and(
Answer::If(Condition::IfTransmutable {
src: src_ref,
dst: dst_ref,
}),
self.answer_memo(
cache,
src_state_prime,
dst_state_prime,
),
)
}
}),
)
},
),
self.src.refs_from(src_state).map(|(src_ref, src_state_prime)| {
// ...there exists a reference transition out of `dst_state`...
Quantifier::ThereExists.apply(self.dst.refs_from(dst_state).map(
|(dst_ref, dst_state_prime)| {
if !src_ref.is_mutable() && dst_ref.is_mutable() {
Answer::No(Reason::DstIsMoreUnique)
} else if !self.assume.alignment
&& src_ref.min_align() < dst_ref.min_align()
{
Answer::No(Reason::DstHasStricterAlignment {
src_min_align: src_ref.min_align(),
dst_min_align: dst_ref.min_align(),
})
} else if dst_ref.size() > src_ref.size() {
Answer::No(Reason::DstRefIsTooBig {
src: src_ref,
dst: dst_ref,
})
} else {
// ...such that `src` is transmutable into `dst`, if
// `src_ref` is transmutability into `dst_ref`.
and(
Answer::If(Condition::IfTransmutable {
src: src_ref,
dst: dst_ref,
}),
self.answer_memo(cache, src_state_prime, dst_state_prime),
)
}
},
))
}),
);
if self.assume.validity {

View file

@ -8,9 +8,17 @@ pub(crate) trait QueryContext {
#[cfg(test)]
pub(crate) mod test {
use std::marker::PhantomData;
use super::QueryContext;
pub(crate) struct UltraMinimal;
pub(crate) struct UltraMinimal<R = !>(PhantomData<R>);
impl<R> Default for UltraMinimal<R> {
fn default() -> Self {
Self(PhantomData)
}
}
#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)]
pub(crate) enum Def {
@ -24,9 +32,9 @@ pub(crate) mod test {
}
}
impl QueryContext for UltraMinimal {
impl<R: crate::layout::Ref> QueryContext for UltraMinimal<R> {
type Def = Def;
type Ref = !;
type Ref = R;
}
}

View file

@ -1,3 +1,5 @@
extern crate test;
use itertools::Itertools;
use super::query_context::test::{Def, UltraMinimal};
@ -12,15 +14,25 @@ trait Representation {
impl Representation for Tree {
fn is_transmutable(src: Self, dst: Self, assume: Assume) -> Answer<!> {
crate::maybe_transmutable::MaybeTransmutableQuery::new(src, dst, assume, UltraMinimal)
.answer()
crate::maybe_transmutable::MaybeTransmutableQuery::new(
src,
dst,
assume,
UltraMinimal::default(),
)
.answer()
}
}
impl Representation for Dfa {
fn is_transmutable(src: Self, dst: Self, assume: Assume) -> Answer<!> {
crate::maybe_transmutable::MaybeTransmutableQuery::new(src, dst, assume, UltraMinimal)
.answer()
crate::maybe_transmutable::MaybeTransmutableQuery::new(
src,
dst,
assume,
UltraMinimal::default(),
)
.answer()
}
}
@ -89,6 +101,36 @@ mod safety {
}
}
mod size {
use super::*;
#[test]
fn size() {
let small = Tree::number(1);
let large = Tree::number(2);
for alignment in [false, true] {
for lifetimes in [false, true] {
for safety in [false, true] {
for validity in [false, true] {
let assume = Assume { alignment, lifetimes, safety, validity };
assert_eq!(
is_transmutable(&small, &large, assume),
Answer::No(Reason::DstIsTooBig),
"assume: {assume:?}"
);
assert_eq!(
is_transmutable(&large, &small, assume),
Answer::Yes,
"assume: {assume:?}"
);
}
}
}
}
}
}
mod bool {
use super::*;
@ -112,6 +154,27 @@ mod bool {
);
}
#[test]
fn transmute_u8() {
let bool = &Tree::bool();
let u8 = &Tree::u8();
for (src, dst, assume_validity, answer) in [
(bool, u8, false, Answer::Yes),
(bool, u8, true, Answer::Yes),
(u8, bool, false, Answer::No(Reason::DstIsBitIncompatible)),
(u8, bool, true, Answer::Yes),
] {
assert_eq!(
is_transmutable(
src,
dst,
Assume { validity: assume_validity, ..Assume::default() }
),
answer
);
}
}
#[test]
fn should_permit_validity_expansion_and_reject_contraction() {
let b0 = layout::Tree::<Def, !>::from_bits(0);
@ -175,6 +238,62 @@ mod bool {
}
}
mod uninit {
use super::*;
#[test]
fn size() {
let mu = Tree::uninit();
let u8 = Tree::u8();
for alignment in [false, true] {
for lifetimes in [false, true] {
for safety in [false, true] {
for validity in [false, true] {
let assume = Assume { alignment, lifetimes, safety, validity };
let want = if validity {
Answer::Yes
} else {
Answer::No(Reason::DstIsBitIncompatible)
};
assert_eq!(is_transmutable(&mu, &u8, assume), want, "assume: {assume:?}");
assert_eq!(
is_transmutable(&u8, &mu, assume),
Answer::Yes,
"assume: {assume:?}"
);
}
}
}
}
}
}
mod alt {
use super::*;
use crate::Answer;
#[test]
fn should_permit_identity_transmutation() {
type Tree = layout::Tree<Def, !>;
let x = Tree::Seq(vec![Tree::from_bits(0), Tree::from_bits(0)]);
let y = Tree::Seq(vec![Tree::bool(), Tree::from_bits(1)]);
let layout = Tree::Alt(vec![x, y]);
let answer = crate::maybe_transmutable::MaybeTransmutableQuery::new(
layout.clone(),
layout.clone(),
crate::Assume::default(),
UltraMinimal::default(),
)
.answer();
assert_eq!(answer, Answer::Yes, "layout:{:#?}", layout);
}
}
mod union {
use super::*;
@ -203,3 +322,59 @@ mod union {
assert_eq!(is_transmutable(&t, &u, Assume::default()), Answer::Yes);
}
}
mod r#ref {
use super::*;
#[test]
fn should_permit_identity_transmutation() {
type Tree = crate::layout::Tree<Def, [(); 1]>;
let layout = Tree::Seq(vec![Tree::from_bits(0), Tree::Ref([()])]);
let answer = crate::maybe_transmutable::MaybeTransmutableQuery::new(
layout.clone(),
layout,
Assume::default(),
UltraMinimal::default(),
)
.answer();
assert_eq!(answer, Answer::If(crate::Condition::IfTransmutable { src: [()], dst: [()] }));
}
}
mod benches {
use std::hint::black_box;
use test::Bencher;
use super::*;
#[bench]
fn bench_dfa_from_tree(b: &mut Bencher) {
let num = Tree::number(8).prune(&|_| false);
let num = black_box(num);
b.iter(|| {
let _ = black_box(Dfa::from_tree(num.clone()));
})
}
#[bench]
fn bench_transmute(b: &mut Bencher) {
let num = Tree::number(8).prune(&|_| false);
let dfa = black_box(Dfa::from_tree(num).unwrap());
b.iter(|| {
let answer = crate::maybe_transmutable::MaybeTransmutableQuery::new(
dfa.clone(),
dfa.clone(),
Assume::default(),
UltraMinimal::default(),
)
.answer();
let answer = std::hint::black_box(answer);
assert_eq!(answer, Answer::Yes);
})
}
}

View file

@ -32,6 +32,7 @@ mod needs_drop;
mod opaque_types;
mod representability;
pub mod sig_types;
mod stalled_generators;
mod structural_match;
mod ty;
@ -50,4 +51,5 @@ pub fn provide(providers: &mut Providers) {
ty::provide(providers);
instance::provide(providers);
structural_match::provide(providers);
stalled_generators::provide(providers);
}

View file

@ -223,7 +223,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> {
}
// Skips type aliases, as they are meant to be transparent.
// FIXME(type_alias_impl_trait): can we require mentioning nested type aliases explicitly?
ty::Alias(ty::Weak, alias_ty) if alias_ty.def_id.is_local() => {
ty::Alias(ty::Free, alias_ty) if alias_ty.def_id.is_local() => {
self.tcx
.type_of(alias_ty.def_id)
.instantiate(self.tcx, alias_ty.args)

Some files were not shown because too many files have changed in this diff Show more