Auto merge of #137790 - jieyouxu:rollup-jrib20h, r=jieyouxu

Rollup of 9 pull requests

Successful merges:

 - #136424 (fix: overflowing bin hex)
 - #136824 (solver cycles are coinductive once they have one coinductive step)
 - #137220 (Support `rust.channel = "auto-detect"`)
 - #137712 (Clean up TypeckResults::extract_binding_mode)
 - #137713 (Fix enzyme build errors)
 - #137748 (Fix method name in `TyCtxt::hir_crate()` documentation)
 - #137778 (update enzyme to handle range metadata)
 - #137780 (Fix typo in query expansion documentation)
 - #137788 (Bump `rustc_{codegen_ssa,llvm}` `cc` to 1.2.16 to fix `x86` Windows jobs on newest Windows SDK)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2025-02-28 14:45:20 +00:00
commit 60493b8973
60 changed files with 792 additions and 552 deletions

View file

@ -407,9 +407,9 @@ version = "0.1.0"
[[package]]
name = "cc"
version = "1.2.13"
version = "1.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7777341816418c02e033934a09f20dc0ccaf65a5201ef8a450ae0105a573fda"
checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c"
dependencies = [
"shlex",
]

View file

@ -3,7 +3,6 @@
//! configs (autodiff enabled or disabled), so we have to add cfg's to each import.
//! FIXME(ZuseZ4): Remove this once we have a smarter linter.
#[cfg(llvm_enzyme)]
mod llvm_enzyme {
use std::str::FromStr;
use std::string::String;
@ -130,10 +129,14 @@ mod llvm_enzyme {
meta_item: &ast::MetaItem,
mut item: Annotatable,
) -> Vec<Annotatable> {
if cfg!(not(llvm_enzyme)) {
ecx.sess.dcx().emit_err(errors::AutoDiffSupportNotBuild { span: meta_item.span });
return vec![item];
}
let dcx = ecx.sess.dcx();
// first get the annotable item:
let (sig, is_impl): (FnSig, bool) = match &item {
Annotatable::Item(ref iitem) => {
Annotatable::Item(iitem) => {
let sig = match &iitem.kind {
ItemKind::Fn(box ast::Fn { sig, .. }) => sig,
_ => {
@ -143,7 +146,7 @@ mod llvm_enzyme {
};
(sig.clone(), false)
}
Annotatable::AssocItem(ref assoc_item, _) => {
Annotatable::AssocItem(assoc_item, _) => {
let sig = match &assoc_item.kind {
ast::AssocItemKind::Fn(box ast::Fn { sig, .. }) => sig,
_ => {
@ -171,8 +174,8 @@ mod llvm_enzyme {
let sig_span = ecx.with_call_site_ctxt(sig.span);
let (vis, primal) = match &item {
Annotatable::Item(ref iitem) => (iitem.vis.clone(), iitem.ident.clone()),
Annotatable::AssocItem(ref assoc_item, _) => {
Annotatable::Item(iitem) => (iitem.vis.clone(), iitem.ident.clone()),
Annotatable::AssocItem(assoc_item, _) => {
(assoc_item.vis.clone(), assoc_item.ident.clone())
}
_ => {
@ -801,25 +804,4 @@ mod llvm_enzyme {
}
}
#[cfg(not(llvm_enzyme))]
mod ad_fallback {
use rustc_ast::ast;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::Span;
use crate::errors;
pub(crate) fn expand(
ecx: &mut ExtCtxt<'_>,
_expand_span: Span,
meta_item: &ast::MetaItem,
item: Annotatable,
) -> Vec<Annotatable> {
ecx.sess.dcx().emit_err(errors::AutoDiffSupportNotBuild { span: meta_item.span });
return vec![item];
}
}
#[cfg(not(llvm_enzyme))]
pub(crate) use ad_fallback::expand;
#[cfg(llvm_enzyme)]
pub(crate) use llvm_enzyme::expand;

View file

@ -144,10 +144,8 @@ pub(crate) struct AllocMustStatics {
pub(crate) span: Span,
}
#[cfg(llvm_enzyme)]
pub(crate) use autodiff::*;
#[cfg(llvm_enzyme)]
mod autodiff {
use super::*;
#[derive(Diagnostic)]
@ -203,9 +201,7 @@ mod autodiff {
}
}
#[cfg(not(llvm_enzyme))]
pub(crate) use ad_fallback::*;
#[cfg(not(llvm_enzyme))]
mod ad_fallback {
use super::*;
#[derive(Diagnostic)]

View file

@ -42,10 +42,10 @@ pub use self::Enzyme_AD::*;
#[cfg(llvm_enzyme)]
pub mod Enzyme_AD {
use libc::c_void;
extern "C" {
unsafe extern "C" {
pub fn EnzymeSetCLBool(arg1: *mut ::std::os::raw::c_void, arg2: u8);
}
extern "C" {
unsafe extern "C" {
static mut EnzymePrintPerf: c_void;
static mut EnzymePrintActivity: c_void;
static mut EnzymePrintType: c_void;

View file

@ -11,7 +11,7 @@ bitflags = "2.4.1"
bstr = "1.11.3"
# Pinned so `cargo update` bumps don't cause breakage. Please also update the
# `cc` in `rustc_llvm` if you update the `cc` here.
cc = "=1.2.13"
cc = "=1.2.16"
either = "1.5.0"
itertools = "0.12"
pathdiff = "0.2.0"

View file

@ -1683,6 +1683,12 @@ pub enum PatKind<'hir> {
/// The `HirId` is the canonical ID for the variable being bound,
/// (e.g., in `Ok(x) | Err(x)`, both `x` use the same canonical ID),
/// which is the pattern ID of the first `x`.
///
/// The `BindingMode` is what's provided by the user, before match
/// ergonomics are applied. For the binding mode actually in use,
/// see [`TypeckResults::extract_binding_mode`].
///
/// [`TypeckResults::extract_binding_mode`]: ../../rustc_middle/ty/struct.TypeckResults.html#method.extract_binding_mode
Binding(BindingMode, HirId, Ident, Option<&'hir Pat<'hir>>),
/// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`).

View file

@ -895,48 +895,44 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx
match pat.kind {
PatKind::Binding(_, canonical_id, ..) => {
debug!("walk_pat: binding place={:?} pat={:?}", place, pat);
if let Some(bm) = self
let bm = self
.cx
.typeck_results()
.extract_binding_mode(tcx.sess, pat.hir_id, pat.span)
{
debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm);
.extract_binding_mode(tcx.sess, pat.hir_id, pat.span);
debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm);
// pat_ty: the type of the binding being produced.
let pat_ty = self.node_ty(pat.hir_id)?;
debug!("walk_pat: pat_ty={:?}", pat_ty);
// pat_ty: the type of the binding being produced.
let pat_ty = self.node_ty(pat.hir_id)?;
debug!("walk_pat: pat_ty={:?}", pat_ty);
let def = Res::Local(canonical_id);
if let Ok(ref binding_place) =
self.cat_res(pat.hir_id, pat.span, pat_ty, def)
{
self.delegate.borrow_mut().bind(binding_place, binding_place.hir_id);
let def = Res::Local(canonical_id);
if let Ok(ref binding_place) = self.cat_res(pat.hir_id, pat.span, pat_ty, def) {
self.delegate.borrow_mut().bind(binding_place, binding_place.hir_id);
}
// Subtle: MIR desugaring introduces immutable borrows for each pattern
// binding when lowering pattern guards to ensure that the guard does not
// modify the scrutinee.
if has_guard {
self.delegate.borrow_mut().borrow(
place,
discr_place.hir_id,
BorrowKind::Immutable,
);
}
// It is also a borrow or copy/move of the value being matched.
// In a cases of pattern like `let pat = upvar`, don't use the span
// of the pattern, as this just looks confusing, instead use the span
// of the discriminant.
match bm.0 {
hir::ByRef::Yes(m) => {
let bk = ty::BorrowKind::from_mutbl(m);
self.delegate.borrow_mut().borrow(place, discr_place.hir_id, bk);
}
// Subtle: MIR desugaring introduces immutable borrows for each pattern
// binding when lowering pattern guards to ensure that the guard does not
// modify the scrutinee.
if has_guard {
self.delegate.borrow_mut().borrow(
place,
discr_place.hir_id,
BorrowKind::Immutable,
);
}
// It is also a borrow or copy/move of the value being matched.
// In a cases of pattern like `let pat = upvar`, don't use the span
// of the pattern, as this just looks confusing, instead use the span
// of the discriminant.
match bm.0 {
hir::ByRef::Yes(m) => {
let bk = ty::BorrowKind::from_mutbl(m);
self.delegate.borrow_mut().borrow(place, discr_place.hir_id, bk);
}
hir::ByRef::No => {
debug!("walk_pat binding consuming pat");
self.consume_or_copy(place, discr_place.hir_id);
}
hir::ByRef::No => {
debug!("walk_pat binding consuming pat");
self.consume_or_copy(place, discr_place.hir_id);
}
}
}

View file

@ -319,11 +319,8 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> {
match p.kind {
hir::PatKind::Binding(..) => {
let typeck_results = self.fcx.typeck_results.borrow();
if let Some(bm) =
typeck_results.extract_binding_mode(self.tcx().sess, p.hir_id, p.span)
{
self.typeck_results.pat_binding_modes_mut().insert(p.hir_id, bm);
}
let bm = typeck_results.extract_binding_mode(self.tcx().sess, p.hir_id, p.span);
self.typeck_results.pat_binding_modes_mut().insert(p.hir_id, bm);
}
hir::PatKind::Struct(_, fields, _) => {
for field in fields {

View file

@ -186,7 +186,7 @@ fn report_bin_hex_error(
lit_no_suffix,
negative_val: actually.clone(),
int_ty: int_ty.name_str(),
uint_ty: int_ty.to_unsigned().name_str(),
uint_ty: Integer::fit_unsigned(val).uint_ty_str(),
})
})
.flatten();

View file

@ -12,5 +12,5 @@ libc = "0.2.73"
# tidy-alphabetical-start
# Pinned so `cargo update` bumps don't cause breakage. Please also update the
# pinned `cc` in `rustc_codegen_ssa` if you update `cc` here.
cc = "=1.2.13"
cc = "=1.2.16"
# tidy-alphabetical-end

View file

@ -143,11 +143,11 @@ rustc_queries! {
/// Represents crate as a whole (as distinct from the top-level crate module).
///
/// If you call `hir_crate` (e.g., indirectly by calling `tcx.hir().krate()`),
/// If you call `hir_crate` (e.g., indirectly by calling `tcx.hir_crate()`),
/// we will have to assume that any change means that you need to be recompiled.
/// This is because the `hir_crate` query gives you access to all other items.
/// To avoid this fate, do not call `tcx.hir().krate()`; instead,
/// prefer wrappers like `tcx.visit_all_items_in_krate()`.
/// To avoid this fate, do not call `tcx.hir_crate()`; instead,
/// prefer wrappers like [`TyCtxt::hir_visit_all_item_likes_in_crate`].
query hir_crate(key: ()) -> &'tcx Crate<'tcx> {
arena_cache
eval_always

View file

@ -258,7 +258,7 @@ macro_rules! query_if_arena {
};
}
/// If `separate_provide_if_extern`, then the key can be projected to its
/// If `separate_provide_extern`, then the key can be projected to its
/// local key via `<$K as AsLocalKey>::LocalKey`.
macro_rules! local_key_if_separate_extern {
([] $($K:tt)*) => {

View file

@ -594,6 +594,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
self.trait_is_auto(trait_def_id)
}
fn trait_is_coinductive(self, trait_def_id: DefId) -> bool {
self.trait_is_coinductive(trait_def_id)
}
fn trait_is_alias(self, trait_def_id: DefId) -> bool {
self.trait_is_alias(trait_def_id)
}

View file

@ -4,7 +4,6 @@ use rustc_data_structures::intern::Interned;
use rustc_hir::def_id::DefId;
use rustc_macros::{HashStable, extension};
use rustc_type_ir as ir;
use tracing::instrument;
use crate::ty::{
self, DebruijnIndex, EarlyBinder, PredicatePolarity, Ty, TyCtxt, TypeFlags, Upcast, UpcastFrom,
@ -51,10 +50,6 @@ impl<'tcx> rustc_type_ir::inherent::Predicate<TyCtxt<'tcx>> for Predicate<'tcx>
self.as_clause()
}
fn is_coinductive(self, interner: TyCtxt<'tcx>) -> bool {
self.is_coinductive(interner)
}
fn allow_normalization(self) -> bool {
self.allow_normalization()
}
@ -119,17 +114,6 @@ impl<'tcx> Predicate<'tcx> {
Some(tcx.mk_predicate(kind))
}
#[instrument(level = "debug", skip(tcx), ret)]
pub fn is_coinductive(self, tcx: TyCtxt<'tcx>) -> bool {
match self.kind().skip_binder() {
ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => {
tcx.trait_is_coinductive(data.def_id())
}
ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => true,
_ => false,
}
}
/// Whether this projection can be soundly normalized.
///
/// Wf predicates must not be normalized, as normalization

View file

@ -394,8 +394,10 @@ impl<'tcx> TypeckResults<'tcx> {
matches!(self.type_dependent_defs().get(expr.hir_id), Some(Ok((DefKind::AssocFn, _))))
}
pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> Option<BindingMode> {
self.pat_binding_modes().get(id).copied().or_else(|| {
/// Returns the computed binding mode for a `PatKind::Binding` pattern
/// (after match ergonomics adjustments).
pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> BindingMode {
self.pat_binding_modes().get(id).copied().unwrap_or_else(|| {
s.dcx().span_bug(sp, "missing binding mode");
})
}

View file

@ -21,7 +21,7 @@ use tracing::{debug, instrument, trace};
use crate::canonicalizer::Canonicalizer;
use crate::delegate::SolverDelegate;
use crate::resolve::EagerResolver;
use crate::solve::eval_ctxt::NestedGoals;
use crate::solve::eval_ctxt::{CurrentGoalKind, NestedGoals};
use crate::solve::{
CanonicalInput, CanonicalResponse, Certainty, EvalCtxt, ExternalConstraintsData, Goal,
MaybeCause, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput,
@ -109,18 +109,22 @@ where
//
// As we return all ambiguous nested goals, we can ignore the certainty returned
// by `try_evaluate_added_goals()`.
let (certainty, normalization_nested_goals) = if self.is_normalizes_to_goal {
let NestedGoals { normalizes_to_goals, goals } = std::mem::take(&mut self.nested_goals);
if cfg!(debug_assertions) {
assert!(normalizes_to_goals.is_empty());
if goals.is_empty() {
assert!(matches!(goals_certainty, Certainty::Yes));
let (certainty, normalization_nested_goals) = match self.current_goal_kind {
CurrentGoalKind::NormalizesTo => {
let NestedGoals { normalizes_to_goals, goals } =
std::mem::take(&mut self.nested_goals);
if cfg!(debug_assertions) {
assert!(normalizes_to_goals.is_empty());
if goals.is_empty() {
assert!(matches!(goals_certainty, Certainty::Yes));
}
}
(certainty, NestedNormalizationGoals(goals))
}
CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => {
let certainty = certainty.unify_with(goals_certainty);
(certainty, NestedNormalizationGoals::empty())
}
(certainty, NestedNormalizationGoals(goals))
} else {
let certainty = certainty.unify_with(goals_certainty);
(certainty, NestedNormalizationGoals::empty())
};
if let Certainty::Maybe(cause @ MaybeCause::Overflow { .. }) = certainty {
@ -163,19 +167,24 @@ where
// ambiguous alias types which get replaced with fresh inference variables
// during generalization. This prevents hangs caused by an exponential blowup,
// see tests/ui/traits/next-solver/coherence-alias-hang.rs.
//
// We don't do so for `NormalizesTo` goals as we erased the expected term and
// bailing with overflow here would prevent us from detecting a type-mismatch,
// causing a coherence error in diesel, see #131969. We still bail with overflow
// when later returning from the parent AliasRelate goal.
if !self.is_normalizes_to_goal {
let num_non_region_vars =
canonical.variables.iter().filter(|c| !c.is_region() && c.is_existential()).count();
if num_non_region_vars > self.cx().recursion_limit() {
debug!(?num_non_region_vars, "too many inference variables -> overflow");
return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow {
suggest_increasing_limit: true,
}));
match self.current_goal_kind {
// We don't do so for `NormalizesTo` goals as we erased the expected term and
// bailing with overflow here would prevent us from detecting a type-mismatch,
// causing a coherence error in diesel, see #131969. We still bail with overflow
// when later returning from the parent AliasRelate goal.
CurrentGoalKind::NormalizesTo => {}
CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => {
let num_non_region_vars = canonical
.variables
.iter()
.filter(|c| !c.is_region() && c.is_existential())
.count();
if num_non_region_vars > self.cx().recursion_limit() {
debug!(?num_non_region_vars, "too many inference variables -> overflow");
return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow {
suggest_increasing_limit: true,
}));
}
}
}

View file

@ -9,6 +9,7 @@ use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use rustc_type_ir::inherent::*;
use rustc_type_ir::relate::Relate;
use rustc_type_ir::relate::solver_relating::RelateExt;
use rustc_type_ir::search_graph::PathKind;
use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor};
use rustc_type_ir::{self as ty, CanonicalVarValues, InferCtxtLike, Interner, TypingMode};
use rustc_type_ir_macros::{Lift_Generic, TypeFoldable_Generic, TypeVisitable_Generic};
@ -20,12 +21,51 @@ use crate::solve::inspect::{self, ProofTreeBuilder};
use crate::solve::search_graph::SearchGraph;
use crate::solve::{
CanonicalInput, Certainty, FIXPOINT_STEP_LIMIT, Goal, GoalEvaluationKind, GoalSource,
HasChanged, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryResult,
HasChanged, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput,
QueryResult,
};
pub(super) mod canonical;
mod probe;
/// The kind of goal we're currently proving.
///
/// This has effects on cycle handling handling and on how we compute
/// query responses, see the variant descriptions for more info.
#[derive(Debug, Copy, Clone)]
enum CurrentGoalKind {
Misc,
/// We're proving an trait goal for a coinductive trait, either an auto trait or `Sized`.
///
/// These are currently the only goals whose impl where-clauses are considered to be
/// productive steps.
CoinductiveTrait,
/// Unlike other goals, `NormalizesTo` goals act like functions with the expected term
/// always being fully unconstrained. This would weaken inference however, as the nested
/// goals never get the inference constraints from the actual normalized-to type.
///
/// Because of this we return any ambiguous nested goals from `NormalizesTo` to the
/// caller when then adds these to its own context. The caller is always an `AliasRelate`
/// goal so this never leaks out of the solver.
NormalizesTo,
}
impl CurrentGoalKind {
fn from_query_input<I: Interner>(cx: I, input: QueryInput<I, I::Predicate>) -> CurrentGoalKind {
match input.goal.predicate.kind().skip_binder() {
ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred)) => {
if cx.trait_is_coinductive(pred.trait_ref.def_id) {
CurrentGoalKind::CoinductiveTrait
} else {
CurrentGoalKind::Misc
}
}
ty::PredicateKind::NormalizesTo(_) => CurrentGoalKind::NormalizesTo,
_ => CurrentGoalKind::Misc,
}
}
}
pub struct EvalCtxt<'a, D, I = <D as SolverDelegate>::Interner>
where
D: SolverDelegate<Interner = I>,
@ -51,14 +91,10 @@ where
/// The variable info for the `var_values`, only used to make an ambiguous response
/// with no constraints.
variables: I::CanonicalVars,
/// Whether we're currently computing a `NormalizesTo` goal. Unlike other goals,
/// `NormalizesTo` goals act like functions with the expected term always being
/// fully unconstrained. This would weaken inference however, as the nested goals
/// never get the inference constraints from the actual normalized-to type. Because
/// of this we return any ambiguous nested goals from `NormalizesTo` to the caller
/// when then adds these to its own context. The caller is always an `AliasRelate`
/// goal so this never leaks out of the solver.
is_normalizes_to_goal: bool,
/// What kind of goal we're currently computing, see the enum definition
/// for more info.
current_goal_kind: CurrentGoalKind,
pub(super) var_values: CanonicalVarValues<I>,
predefined_opaques_in_body: I::PredefinedOpaques,
@ -226,8 +262,22 @@ where
self.delegate.typing_mode()
}
pub(super) fn set_is_normalizes_to_goal(&mut self) {
self.is_normalizes_to_goal = true;
/// Computes the `PathKind` for the step from the current goal to the
/// nested goal required due to `source`.
///
/// See #136824 for a more detailed reasoning for this behavior. We
/// consider cycles to be coinductive if they 'step into' a where-clause
/// of a coinductive trait. We will likely extend this function in the future
/// and will need to clearly document it in the rustc-dev-guide before
/// stabilization.
pub(super) fn step_kind_for_source(&self, source: GoalSource) -> PathKind {
match (self.current_goal_kind, source) {
(_, GoalSource::NormalizeGoal(step_kind)) => step_kind,
(CurrentGoalKind::CoinductiveTrait, GoalSource::ImplWhereBound) => {
PathKind::Coinductive
}
_ => PathKind::Inductive,
}
}
/// Creates a root evaluation context and search graph. This should only be
@ -256,7 +306,7 @@ where
max_input_universe: ty::UniverseIndex::ROOT,
variables: Default::default(),
var_values: CanonicalVarValues::dummy(),
is_normalizes_to_goal: false,
current_goal_kind: CurrentGoalKind::Misc,
origin_span,
tainted: Ok(()),
};
@ -294,7 +344,7 @@ where
delegate,
variables: canonical_input.canonical.variables,
var_values,
is_normalizes_to_goal: false,
current_goal_kind: CurrentGoalKind::from_query_input(cx, input),
predefined_opaques_in_body: input.predefined_opaques_in_body,
max_input_universe: canonical_input.canonical.max_universe,
search_graph,
@ -340,6 +390,7 @@ where
cx: I,
search_graph: &'a mut SearchGraph<D>,
canonical_input: CanonicalInput<I>,
step_kind_from_parent: PathKind,
goal_evaluation: &mut ProofTreeBuilder<D>,
) -> QueryResult<I> {
let mut canonical_goal_evaluation =
@ -352,6 +403,7 @@ where
search_graph.with_new_goal(
cx,
canonical_input,
step_kind_from_parent,
&mut canonical_goal_evaluation,
|search_graph, canonical_goal_evaluation| {
EvalCtxt::enter_canonical(
@ -395,12 +447,10 @@ where
/// `NormalizesTo` is only used by `AliasRelate`, all other callsites
/// should use [`EvalCtxt::evaluate_goal`] which discards that empty
/// storage.
// FIXME(-Znext-solver=coinduction): `_source` is currently unused but will
// be necessary once we implement the new coinduction approach.
pub(super) fn evaluate_goal_raw(
&mut self,
goal_evaluation_kind: GoalEvaluationKind,
_source: GoalSource,
source: GoalSource,
goal: Goal<I, I::Predicate>,
) -> Result<(NestedNormalizationGoals<I>, HasChanged, Certainty), NoSolution> {
let (orig_values, canonical_goal) = self.canonicalize_goal(goal);
@ -410,6 +460,7 @@ where
self.cx(),
self.search_graph,
canonical_goal,
self.step_kind_for_source(source),
&mut goal_evaluation,
);
let response = match canonical_response {
@ -630,8 +681,11 @@ where
#[instrument(level = "trace", skip(self))]
pub(super) fn add_normalizes_to_goal(&mut self, mut goal: Goal<I, ty::NormalizesTo<I>>) {
goal.predicate =
goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, goal.param_env));
goal.predicate = goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(
self,
GoalSource::Misc,
goal.param_env,
));
self.inspect.add_normalizes_to_goal(self.delegate, self.max_input_universe, goal);
self.nested_goals.normalizes_to_goals.push(goal);
}
@ -639,7 +693,7 @@ where
#[instrument(level = "debug", skip(self))]
pub(super) fn add_goal(&mut self, source: GoalSource, mut goal: Goal<I, I::Predicate>) {
goal.predicate =
goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, goal.param_env));
goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, source, goal.param_env));
self.inspect.add_goal(self.delegate, self.max_input_universe, source, goal);
self.nested_goals.goals.push((source, goal));
}
@ -1053,6 +1107,13 @@ where
///
/// This is a performance optimization to more eagerly detect cycles during trait
/// solving. See tests/ui/traits/next-solver/cycles/cycle-modulo-ambig-aliases.rs.
///
/// The emitted goals get evaluated in the context of the parent goal; by
/// replacing aliases in nested goals we essentially pull the normalization out of
/// the nested goal. We want to treat the goal as if the normalization still happens
/// inside of the nested goal by inheriting the `step_kind` of the nested goal and
/// storing it in the `GoalSource` of the emitted `AliasRelate` goals.
/// This is necessary for tests/ui/sized/coinductive-1.rs to compile.
struct ReplaceAliasWithInfer<'me, 'a, D, I>
where
D: SolverDelegate<Interner = I>,
@ -1060,6 +1121,7 @@ where
{
ecx: &'me mut EvalCtxt<'a, D>,
param_env: I::ParamEnv,
normalization_goal_source: GoalSource,
cache: HashMap<I::Ty, I::Ty>,
}
@ -1068,8 +1130,18 @@ where
D: SolverDelegate<Interner = I>,
I: Interner,
{
fn new(ecx: &'me mut EvalCtxt<'a, D>, param_env: I::ParamEnv) -> Self {
ReplaceAliasWithInfer { ecx, param_env, cache: Default::default() }
fn new(
ecx: &'me mut EvalCtxt<'a, D>,
for_goal_source: GoalSource,
param_env: I::ParamEnv,
) -> Self {
let step_kind = ecx.step_kind_for_source(for_goal_source);
ReplaceAliasWithInfer {
ecx,
param_env,
normalization_goal_source: GoalSource::NormalizeGoal(step_kind),
cache: Default::default(),
}
}
}
@ -1092,7 +1164,7 @@ where
ty::AliasRelationDirection::Equate,
);
self.ecx.add_goal(
GoalSource::Misc,
self.normalization_goal_source,
Goal::new(self.cx(), self.param_env, normalizes_to),
);
infer_ty
@ -1121,7 +1193,7 @@ where
ty::AliasRelationDirection::Equate,
);
self.ecx.add_goal(
GoalSource::Misc,
self.normalization_goal_source,
Goal::new(self.cx(), self.param_env, normalizes_to),
);
infer_ct

View file

@ -34,7 +34,7 @@ where
delegate,
variables: outer_ecx.variables,
var_values: outer_ecx.var_values,
is_normalizes_to_goal: outer_ecx.is_normalizes_to_goal,
current_goal_kind: outer_ecx.current_goal_kind,
predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body,
max_input_universe,
search_graph: outer_ecx.search_graph,

View file

@ -39,7 +39,7 @@ where
//
// FIXME(-Znext-solver=coinductive): I think this should be split
// and we tag the impl bounds with `GoalSource::ImplWhereBound`?
// Right not this includes both the impl and the assoc item where bounds,
// Right now this includes both the impl and the assoc item where bounds,
// and I don't think the assoc item where-bounds are allowed to be coinductive.
self.add_goals(
GoalSource::Misc,

View file

@ -28,7 +28,6 @@ where
&mut self,
goal: Goal<I, NormalizesTo<I>>,
) -> QueryResult<I> {
self.set_is_normalizes_to_goal();
debug_assert!(self.term_is_fully_unconstrained(goal));
let cx = self.cx();
match goal.predicate.alias.kind(cx) {

View file

@ -2,7 +2,6 @@ use std::convert::Infallible;
use std::marker::PhantomData;
use rustc_type_ir::Interner;
use rustc_type_ir::inherent::*;
use rustc_type_ir::search_graph::{self, PathKind};
use rustc_type_ir::solve::{CanonicalInput, Certainty, QueryResult};
@ -94,10 +93,6 @@ where
let certainty = from_result.unwrap().value.certainty;
response_no_constraints(cx, for_input, certainty)
}
fn step_is_coinductive(cx: I, input: CanonicalInput<I>) -> bool {
input.canonical.value.goal.predicate.is_coinductive(cx)
}
}
fn response_no_constraints<I: Interner>(

View file

@ -438,7 +438,10 @@ impl<'tcx> ProofTreeVisitor<'tcx> for BestObligation<'tcx> {
let obligation;
match (child_mode, nested_goal.source()) {
(ChildMode::Trait(_) | ChildMode::Host(_), GoalSource::Misc) => {
(
ChildMode::Trait(_) | ChildMode::Host(_),
GoalSource::Misc | GoalSource::NormalizeGoal(_),
) => {
continue;
}
(ChildMode::Trait(parent_trait_pred), GoalSource::ImplWhereBound) => {

View file

@ -1225,15 +1225,21 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
/// that recursion is ok. This routine returns `true` if the top of the
/// stack (`cycle[0]`):
///
/// - is a defaulted trait,
/// - is a coinductive trait: an auto-trait or `Sized`,
/// - it also appears in the backtrace at some position `X`,
/// - all the predicates at positions `X..` between `X` and the top are
/// also defaulted traits.
/// also coinductive traits.
pub(crate) fn coinductive_match<I>(&mut self, mut cycle: I) -> bool
where
I: Iterator<Item = ty::Predicate<'tcx>>,
{
cycle.all(|predicate| predicate.is_coinductive(self.tcx()))
cycle.all(|p| match p.kind().skip_binder() {
ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => {
self.infcx.tcx.trait_is_coinductive(data.def_id())
}
ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => true,
_ => false,
})
}
/// Further evaluates `candidate` to decide whether all type parameters match and whether nested

View file

@ -16,7 +16,7 @@ rustc_macros = { path = "../rustc_macros", optional = true }
rustc_serialize = { path = "../rustc_serialize", optional = true }
rustc_span = { path = "../rustc_span", optional = true }
rustc_type_ir_macros = { path = "../rustc_type_ir_macros" }
smallvec = { version = "1.8.1", default-features = false }
smallvec = { version = "1.8.1", default-features = false, features = ["const_generics"] }
thin-vec = "0.2.12"
tracing = "0.1"
# tidy-alphabetical-end

View file

@ -462,8 +462,6 @@ pub trait Predicate<I: Interner<Predicate = Self>>:
{
fn as_clause(self) -> Option<I::Clause>;
fn is_coinductive(self, interner: I) -> bool;
// FIXME: Eventually uplift the impl out of rustc and make this defaulted.
fn allow_normalization(self) -> bool;
}

View file

@ -279,6 +279,8 @@ pub trait Interner:
fn trait_is_auto(self, trait_def_id: Self::DefId) -> bool;
fn trait_is_coinductive(self, trait_def_id: Self::DefId) -> bool;
fn trait_is_alias(self, trait_def_id: Self::DefId) -> bool;
fn trait_is_dyn_compatible(self, trait_def_id: Self::DefId) -> bool;

View file

@ -12,13 +12,15 @@
/// The global cache has to be completely unobservable, while the per-cycle cache may impact
/// behavior as long as the resulting behavior is still correct.
use std::cmp::Ordering;
use std::collections::BTreeSet;
use std::collections::BTreeMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
use derive_where::derive_where;
use rustc_index::{Idx, IndexVec};
#[cfg(feature = "nightly")]
use rustc_macros::HashStable_NoContext;
use tracing::debug;
use crate::data_structures::HashMap;
@ -104,27 +106,49 @@ pub trait Delegate {
for_input: <Self::Cx as Cx>::Input,
from_result: <Self::Cx as Cx>::Result,
) -> <Self::Cx as Cx>::Result;
fn step_is_coinductive(cx: Self::Cx, input: <Self::Cx as Cx>::Input) -> bool;
}
/// In the initial iteration of a cycle, we do not yet have a provisional
/// result. In the case we return an initial provisional result depending
/// on the kind of cycle.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "nightly", derive(HashStable_NoContext))]
pub enum PathKind {
Coinductive,
Inductive,
}
impl PathKind {
/// Returns the path kind when merging `self` with `rest`.
///
/// Given an inductive path `self` and a coinductive path `rest`,
/// the path `self -> rest` would be coinductive.
fn extend(self, rest: PathKind) -> PathKind {
match self {
PathKind::Coinductive => PathKind::Coinductive,
PathKind::Inductive => rest,
}
}
}
/// The kinds of cycles a cycle head was involved in.
///
/// This is used to avoid rerunning a cycle if there's
/// just a single usage kind and the final result matches
/// its provisional result.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UsageKind {
Single(PathKind),
Mixed,
}
impl From<PathKind> for UsageKind {
fn from(path: PathKind) -> UsageKind {
UsageKind::Single(path)
}
}
impl UsageKind {
fn merge(self, other: Self) -> Self {
match (self, other) {
#[must_use]
fn merge(self, other: impl Into<Self>) -> Self {
match (self, other.into()) {
(UsageKind::Mixed, _) | (_, UsageKind::Mixed) => UsageKind::Mixed,
(UsageKind::Single(lhs), UsageKind::Single(rhs)) => {
if lhs == rhs {
@ -135,7 +159,42 @@ impl UsageKind {
}
}
}
fn and_merge(&mut self, other: Self) {
fn and_merge(&mut self, other: impl Into<Self>) {
*self = self.merge(other);
}
}
/// For each goal we track whether the paths from this goal
/// to its cycle heads are coinductive.
///
/// This is a necessary condition to rebase provisional cache
/// entries.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AllPathsToHeadCoinductive {
Yes,
No,
}
impl From<PathKind> for AllPathsToHeadCoinductive {
fn from(path: PathKind) -> AllPathsToHeadCoinductive {
match path {
PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
_ => AllPathsToHeadCoinductive::No,
}
}
}
impl AllPathsToHeadCoinductive {
#[must_use]
fn merge(self, other: impl Into<Self>) -> Self {
match (self, other.into()) {
(AllPathsToHeadCoinductive::Yes, AllPathsToHeadCoinductive::Yes) => {
AllPathsToHeadCoinductive::Yes
}
(AllPathsToHeadCoinductive::No, _) | (_, AllPathsToHeadCoinductive::No) => {
AllPathsToHeadCoinductive::No
}
}
}
fn and_merge(&mut self, other: impl Into<Self>) {
*self = self.merge(other);
}
}
@ -177,10 +236,11 @@ impl AvailableDepth {
/// All cycle heads a given goal depends on, ordered by their stack depth.
///
/// We therefore pop the cycle heads from highest to lowest.
/// We also track all paths from this goal to that head. This is necessary
/// when rebasing provisional cache results.
#[derive(Clone, Debug, PartialEq, Eq, Default)]
struct CycleHeads {
heads: BTreeSet<StackDepth>,
heads: BTreeMap<StackDepth, AllPathsToHeadCoinductive>,
}
impl CycleHeads {
@ -189,15 +249,15 @@ impl CycleHeads {
}
fn highest_cycle_head(&self) -> StackDepth {
*self.heads.last().unwrap()
self.opt_highest_cycle_head().unwrap()
}
fn opt_highest_cycle_head(&self) -> Option<StackDepth> {
self.heads.last().copied()
self.heads.last_key_value().map(|(k, _)| *k)
}
fn opt_lowest_cycle_head(&self) -> Option<StackDepth> {
self.heads.first().copied()
self.heads.first_key_value().map(|(k, _)| *k)
}
fn remove_highest_cycle_head(&mut self) {
@ -205,28 +265,42 @@ impl CycleHeads {
debug_assert_ne!(last, None);
}
fn insert(&mut self, head: StackDepth) {
self.heads.insert(head);
fn insert(
&mut self,
head: StackDepth,
path_from_entry: impl Into<AllPathsToHeadCoinductive> + Copy,
) {
self.heads.entry(head).or_insert(path_from_entry.into()).and_merge(path_from_entry);
}
fn merge(&mut self, heads: &CycleHeads) {
for &head in heads.heads.iter() {
self.insert(head);
for (&head, &path_from_entry) in heads.heads.iter() {
self.insert(head, path_from_entry);
debug_assert!(matches!(self.heads[&head], AllPathsToHeadCoinductive::Yes));
}
}
fn iter(&self) -> impl Iterator<Item = (StackDepth, AllPathsToHeadCoinductive)> + '_ {
self.heads.iter().map(|(k, v)| (*k, *v))
}
/// Update the cycle heads of a goal at depth `this` given the cycle heads
/// of a nested goal. This merges the heads after filtering the parent goal
/// itself.
fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) {
for &head in child.heads.iter() {
fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &CycleHeads) {
for (&head, &path_from_entry) in child.heads.iter() {
match head.cmp(&this) {
Ordering::Less => {}
Ordering::Equal => continue,
Ordering::Greater => unreachable!(),
}
self.insert(head);
let path_from_entry = match step_kind {
PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
PathKind::Inductive => path_from_entry,
};
self.insert(head, path_from_entry);
}
}
}
@ -246,7 +320,7 @@ impl CycleHeads {
/// We need to disable the global cache if using it would hide a cycle, as
/// cycles can impact behavior. The cycle ABA may have different final
/// results from a the cycle BAB depending on the cycle root.
#[derive_where(Debug, Default; X: Cx)]
#[derive_where(Debug, Default, Clone; X: Cx)]
struct NestedGoals<X: Cx> {
nested_goals: HashMap<X::Input, UsageKind>,
}
@ -259,13 +333,6 @@ impl<X: Cx> NestedGoals<X> {
self.nested_goals.entry(input).or_insert(path_from_entry).and_merge(path_from_entry);
}
fn merge(&mut self, nested_goals: &NestedGoals<X>) {
#[allow(rustc::potential_query_instability)]
for (input, path_from_entry) in nested_goals.iter() {
self.insert(input, path_from_entry);
}
}
/// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
/// to the parent goal.
///
@ -276,8 +343,8 @@ impl<X: Cx> NestedGoals<X> {
#[allow(rustc::potential_query_instability)]
for (input, path_from_entry) in nested_goals.iter() {
let path_from_entry = match step_kind {
PathKind::Coinductive => path_from_entry,
PathKind::Inductive => UsageKind::Single(PathKind::Inductive),
PathKind::Coinductive => UsageKind::Single(PathKind::Coinductive),
PathKind::Inductive => path_from_entry,
};
self.insert(input, path_from_entry);
}
@ -289,10 +356,6 @@ impl<X: Cx> NestedGoals<X> {
self.nested_goals.iter().map(|(i, p)| (*i, *p))
}
fn get(&self, input: X::Input) -> Option<UsageKind> {
self.nested_goals.get(&input).copied()
}
fn contains(&self, input: X::Input) -> bool {
self.nested_goals.contains_key(&input)
}
@ -310,6 +373,12 @@ rustc_index::newtype_index! {
struct StackEntry<X: Cx> {
input: X::Input,
/// Whether proving this goal is a coinductive step.
///
/// This is used when encountering a trait solver cycle to
/// decide whether the initial provisional result of the cycle.
step_kind_from_parent: PathKind,
/// The available depth of a given goal, immutable.
available_depth: AvailableDepth,
@ -346,9 +415,9 @@ struct ProvisionalCacheEntry<X: Cx> {
encountered_overflow: bool,
/// All cycle heads this cache entry depends on.
heads: CycleHeads,
/// The path from the highest cycle head to this goal.
/// The path from the highest cycle head to this goal. This differs from
/// `heads` which tracks the path to the cycle head *from* this goal.
path_from_head: PathKind,
nested_goals: NestedGoals<X>,
result: X::Result,
}
@ -367,6 +436,20 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
_marker: PhantomData<D>,
}
/// While [`SearchGraph::update_parent_goal`] can be mostly shared between
/// ordinary nested goals/global cache hits and provisional cache hits,
/// using the provisional cache should not add any nested goals.
///
/// `nested_goals` are only used when checking whether global cache entries
/// are applicable. This only cares about whether a goal is actually accessed.
/// Given that the usage of the provisional cache is fully determinstic, we
/// don't need to track the nested goals used while computing a provisional
/// cache entry.
enum UpdateParentGoalCtxt<'a, X: Cx> {
Ordinary(&'a NestedGoals<X>),
ProvisionalCacheHit,
}
impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
pub fn new(root_depth: usize) -> SearchGraph<D> {
Self {
@ -382,27 +465,32 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// and using existing global cache entries to make sure they
/// have the same impact on the remaining evaluation.
fn update_parent_goal(
cx: X,
stack: &mut IndexVec<StackDepth, StackEntry<X>>,
step_kind_from_parent: PathKind,
reached_depth: StackDepth,
heads: &CycleHeads,
encountered_overflow: bool,
nested_goals: &NestedGoals<X>,
context: UpdateParentGoalCtxt<'_, X>,
) {
if let Some(parent_index) = stack.last_index() {
let parent = &mut stack[parent_index];
parent.reached_depth = parent.reached_depth.max(reached_depth);
parent.encountered_overflow |= encountered_overflow;
parent.heads.extend_from_child(parent_index, heads);
let step_kind = Self::step_kind(cx, parent.input);
parent.nested_goals.extend_from_child(step_kind, nested_goals);
parent.heads.extend_from_child(parent_index, step_kind_from_parent, heads);
let parent_depends_on_cycle = match context {
UpdateParentGoalCtxt::Ordinary(nested_goals) => {
parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
!nested_goals.is_empty()
}
UpdateParentGoalCtxt::ProvisionalCacheHit => true,
};
// Once we've got goals which encountered overflow or a cycle,
// we track all goals whose behavior may depend depend on these
// goals as this change may cause them to now depend on additional
// goals, resulting in new cycles. See the dev-guide for examples.
if !nested_goals.is_empty() {
parent.nested_goals.insert(parent.input, UsageKind::Single(PathKind::Coinductive))
if parent_depends_on_cycle {
parent.nested_goals.insert(parent.input, UsageKind::Single(PathKind::Inductive))
}
}
}
@ -422,21 +510,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
self.stack.len()
}
fn step_kind(cx: X, input: X::Input) -> PathKind {
if D::step_is_coinductive(cx, input) { PathKind::Coinductive } else { PathKind::Inductive }
}
/// Whether the path from `head` to the current stack entry is inductive or coinductive.
fn stack_path_kind(
cx: X,
///
/// The `step_kind_to_head` is used to add a single additional path segment to the path on
/// the stack which completes the cycle. This given an inductive step AB which then cycles
/// coinductively with A, we need to treat this cycle as coinductive.
fn cycle_path_kind(
stack: &IndexVec<StackDepth, StackEntry<X>>,
step_kind_to_head: PathKind,
head: StackDepth,
) -> PathKind {
if stack.raw[head.index()..].iter().all(|entry| D::step_is_coinductive(cx, entry.input)) {
PathKind::Coinductive
} else {
PathKind::Inductive
}
stack.raw[head.index() + 1..]
.iter()
.fold(step_kind_to_head, |curr, entry| curr.extend(entry.step_kind_from_parent))
}
/// Probably the most involved method of the whole solver.
@ -447,6 +533,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
&mut self,
cx: X,
input: X::Input,
step_kind_from_parent: PathKind,
inspect: &mut D::ProofTreeBuilder,
mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result,
) -> X::Result {
@ -464,7 +551,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// - A
// - BA cycle
// - CB :x:
if let Some(result) = self.lookup_provisional_cache(cx, input) {
if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) {
return result;
}
@ -477,10 +564,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// global cache has been disabled as it may otherwise change the result for
// cyclic goals. We don't care about goals which are not on the current stack
// so it's fine to drop their scope eagerly.
self.lookup_global_cache_untracked(cx, input, available_depth)
self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth)
.inspect(|expected| debug!(?expected, "validate cache entry"))
.map(|r| (scope, r))
} else if let Some(result) = self.lookup_global_cache(cx, input, available_depth) {
} else if let Some(result) =
self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth)
{
return result;
} else {
None
@ -490,8 +579,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// avoid iterating over the stack in case a goal has already been computed.
// This may not have an actual performance impact and we could reorder them
// as it may reduce the number of `nested_goals` we need to track.
if let Some(result) = self.check_cycle_on_stack(cx, input) {
debug_assert!(validate_cache.is_none(), "global cache and cycle on stack");
if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) {
debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}");
return result;
}
@ -499,6 +588,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
let depth = self.stack.next_index();
let entry = StackEntry {
input,
step_kind_from_parent,
available_depth,
reached_depth: depth,
heads: Default::default(),
@ -522,12 +612,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// We've finished computing the goal and have popped it from the stack,
// lazily update its parent goal.
Self::update_parent_goal(
cx,
&mut self.stack,
final_entry.step_kind_from_parent,
final_entry.reached_depth,
&final_entry.heads,
final_entry.encountered_overflow,
&final_entry.nested_goals,
UpdateParentGoalCtxt::Ordinary(&final_entry.nested_goals),
);
// We're now done with this goal. We only add the root of cycles to the global cache.
@ -541,19 +631,20 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
self.insert_global_cache(cx, input, final_entry, result, dep_node)
}
} else if D::ENABLE_PROVISIONAL_CACHE {
debug_assert!(validate_cache.is_none());
debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
let entry = self.provisional_cache.entry(input).or_default();
let StackEntry { heads, nested_goals, encountered_overflow, .. } = final_entry;
let path_from_head = Self::stack_path_kind(cx, &self.stack, heads.highest_cycle_head());
entry.push(ProvisionalCacheEntry {
encountered_overflow,
heads,
path_from_head,
nested_goals,
result,
});
let StackEntry { heads, encountered_overflow, .. } = final_entry;
let path_from_head = Self::cycle_path_kind(
&self.stack,
step_kind_from_parent,
heads.highest_cycle_head(),
);
let provisional_cache_entry =
ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result };
debug!(?provisional_cache_entry);
entry.push(provisional_cache_entry);
} else {
debug_assert!(validate_cache.is_none());
debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
}
result
@ -575,7 +666,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
//
// We must therefore not use the global cache entry for `B` in that case.
// See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs
last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive));
last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Inductive));
}
debug!("encountered stack overflow");
@ -607,7 +698,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// This can be thought of rotating the sub-tree of this provisional result and changing
/// its entry point while making sure that all paths through this sub-tree stay the same.
///
///
/// In case the popped cycle head failed to reach a fixpoint anything which depends on
/// its provisional result is invalid. Actually discarding provisional cache entries in
/// this case would cause hangs, so we instead change the result of dependant provisional
@ -616,7 +706,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// to me.
fn rebase_provisional_cache_entries(
&mut self,
cx: X,
stack_entry: &StackEntry<X>,
mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result,
) {
@ -628,25 +717,24 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
encountered_overflow: _,
heads,
path_from_head,
nested_goals,
result,
} = entry;
if heads.highest_cycle_head() != head {
if heads.highest_cycle_head() == head {
heads.remove_highest_cycle_head()
} else {
return true;
}
// We don't try rebasing if the path from the current head
// to the cache entry is not coinductive or if the path from
// the cache entry to the current head is not coinductive.
//
// Both of these constraints could be weakened, but by only
// accepting coinductive paths we don't have to worry about
// changing the cycle kind of the remaining cycles. We can
// extend this in the future once there's a known issue
// caused by it.
if *path_from_head != PathKind::Coinductive
|| nested_goals.get(stack_entry.input).unwrap()
!= UsageKind::Single(PathKind::Coinductive)
// We only try to rebase if all paths from the cache entry
// to its heads are coinductive. In this case these cycle
// kinds won't change, no matter the goals between these
// heads and the provisional cache entry.
if heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) {
return false;
}
// The same for nested goals of the cycle head.
if stack_entry.heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No))
{
return false;
}
@ -654,20 +742,23 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// Merge the cycle heads of the provisional cache entry and the
// popped head. If the popped cycle head was a root, discard all
// provisional cache entries which depend on it.
heads.remove_highest_cycle_head();
heads.merge(&stack_entry.heads);
let Some(head) = heads.opt_highest_cycle_head() else {
return false;
};
// As we've made sure that the path from the new highest cycle
// head to the uses of the popped cycle head are fully coinductive,
// we can be sure that the paths to all nested goals of the popped
// cycle head remain the same. We can simply merge them.
nested_goals.merge(&stack_entry.nested_goals);
// We now care about the path from the next highest cycle head to the
// provisional cache entry.
*path_from_head = Self::stack_path_kind(cx, &self.stack, head);
match path_from_head {
PathKind::Coinductive => {}
PathKind::Inductive => {
*path_from_head = Self::cycle_path_kind(
&self.stack,
stack_entry.step_kind_from_parent,
head,
)
}
}
// Mutate the result of the provisional cache entry in case we did
// not reach a fixpoint.
*result = mutate_result(input, *result);
@ -677,19 +768,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
});
}
fn lookup_provisional_cache(&mut self, cx: X, input: X::Input) -> Option<X::Result> {
fn lookup_provisional_cache(
&mut self,
input: X::Input,
step_kind_from_parent: PathKind,
) -> Option<X::Result> {
if !D::ENABLE_PROVISIONAL_CACHE {
return None;
}
let entries = self.provisional_cache.get(&input)?;
for &ProvisionalCacheEntry {
encountered_overflow,
ref heads,
path_from_head,
ref nested_goals,
result,
} in entries
for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in
entries
{
let head = heads.highest_cycle_head();
if encountered_overflow {
@ -710,22 +800,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// A provisional cache entry is only valid if the current path from its
// highest cycle head to the goal is the same.
if path_from_head == Self::stack_path_kind(cx, &self.stack, head) {
if path_from_head == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head) {
// While we don't have to track the full depth of the provisional cache entry,
// we do have to increment the required depth by one as we'd have already failed
// with overflow otherwise
let next_index = self.stack.next_index();
let last = &mut self.stack.raw.last_mut().unwrap();
let path_from_entry = Self::step_kind(cx, last.input);
last.nested_goals.insert(input, UsageKind::Single(path_from_entry));
Self::update_parent_goal(
cx,
&mut self.stack,
step_kind_from_parent,
next_index,
heads,
false,
nested_goals,
encountered_overflow,
UpdateParentGoalCtxt::ProvisionalCacheHit,
);
debug_assert!(self.stack[head].has_been_used.is_some());
debug!(?head, ?path_from_head, "provisional cache hit");
@ -740,8 +826,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// evaluating this entry would not have ended up depending on either a goal
/// already on the stack or a provisional cache entry.
fn candidate_is_applicable(
cx: X,
stack: &IndexVec<StackDepth, StackEntry<X>>,
step_kind_from_parent: PathKind,
provisional_cache: &HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
nested_goals: &NestedGoals<X>,
) -> bool {
@ -773,7 +859,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
encountered_overflow,
ref heads,
path_from_head,
nested_goals: _,
result: _,
} in entries.iter()
{
@ -786,9 +871,9 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// A provisional cache entry only applies if the path from its highest head
// matches the path when encountering the goal.
let head = heads.highest_cycle_head();
let full_path = match Self::stack_path_kind(cx, stack, head) {
PathKind::Coinductive => path_from_global_entry,
PathKind::Inductive => UsageKind::Single(PathKind::Inductive),
let full_path = match Self::cycle_path_kind(stack, step_kind_from_parent, head) {
PathKind::Coinductive => UsageKind::Single(PathKind::Coinductive),
PathKind::Inductive => path_from_global_entry,
};
match (full_path, path_from_head) {
@ -816,14 +901,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
&self,
cx: X,
input: X::Input,
step_kind_from_parent: PathKind,
available_depth: AvailableDepth,
) -> Option<X::Result> {
cx.with_global_cache(|cache| {
cache
.get(cx, input, available_depth, |nested_goals| {
Self::candidate_is_applicable(
cx,
&self.stack,
step_kind_from_parent,
&self.provisional_cache,
nested_goals,
)
@ -839,14 +925,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
&mut self,
cx: X,
input: X::Input,
step_kind_from_parent: PathKind,
available_depth: AvailableDepth,
) -> Option<X::Result> {
cx.with_global_cache(|cache| {
let CacheData { result, additional_depth, encountered_overflow, nested_goals } = cache
.get(cx, input, available_depth, |nested_goals| {
Self::candidate_is_applicable(
cx,
&self.stack,
step_kind_from_parent,
&self.provisional_cache,
nested_goals,
)
@ -860,12 +947,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// cycle heads are always empty.
let heads = Default::default();
Self::update_parent_goal(
cx,
&mut self.stack,
step_kind_from_parent,
reached_depth,
&heads,
encountered_overflow,
nested_goals,
UpdateParentGoalCtxt::Ordinary(nested_goals),
);
debug!(?additional_depth, "global cache hit");
@ -873,16 +960,21 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
})
}
fn check_cycle_on_stack(&mut self, cx: X, input: X::Input) -> Option<X::Result> {
fn check_cycle_on_stack(
&mut self,
cx: X,
input: X::Input,
step_kind_from_parent: PathKind,
) -> Option<X::Result> {
let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?;
debug!("encountered cycle with depth {head:?}");
// We have a nested goal which directly relies on a goal deeper in the stack.
//
// We start by tagging all cycle participants, as that's necessary for caching.
//
// Finally we can return either the provisional response or the initial response
// in case we're in the first fixpoint iteration for this goal.
let path_kind = Self::stack_path_kind(cx, &self.stack, head);
let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head);
debug!(?path_kind, "encountered cycle with depth {head:?}");
let usage_kind = UsageKind::Single(path_kind);
self.stack[head].has_been_used =
Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind)));
@ -894,11 +986,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
let last = &mut self.stack[last_index];
last.reached_depth = last.reached_depth.max(next_index);
let path_from_entry = Self::step_kind(cx, last.input);
last.nested_goals.insert(input, UsageKind::Single(path_from_entry));
last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive));
last.nested_goals.insert(input, UsageKind::Single(step_kind_from_parent));
last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Inductive));
if last_index != head {
last.heads.insert(head);
last.heads.insert(head, step_kind_from_parent);
}
// Return the provisional result or, if we're in the first iteration,
@ -964,7 +1055,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// this was only the root of either coinductive or inductive cycles, and the
// final result is equal to the initial response for that case.
if self.reached_fixpoint(cx, &stack_entry, usage_kind, result) {
self.rebase_provisional_cache_entries(cx, &stack_entry, |_, result| result);
self.rebase_provisional_cache_entries(&stack_entry, |_, result| result);
return (stack_entry, result);
}
@ -981,7 +1072,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// we also taint all provisional cache entries which depend on the
// current goal.
if D::is_ambiguous_result(result) {
self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| {
self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
D::propagate_ambiguity(cx, input, result)
});
return (stack_entry, result);
@ -993,7 +1084,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
if i >= D::FIXPOINT_STEP_LIMIT {
debug!("canonical cycle overflow");
let result = D::on_fixpoint_overflow(cx, input);
self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| {
self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
D::on_fixpoint_overflow(cx, input)
});
return (stack_entry, result);

View file

@ -8,6 +8,7 @@ use derive_where::derive_where;
use rustc_macros::{HashStable_NoContext, TyDecodable, TyEncodable};
use rustc_type_ir_macros::{Lift_Generic, TypeFoldable_Generic, TypeVisitable_Generic};
use crate::search_graph::PathKind;
use crate::{self as ty, Canonical, CanonicalVarValues, Interner, Upcast};
pub type CanonicalInput<I, T = <I as Interner>::Predicate> =
@ -78,6 +79,13 @@ pub enum GoalSource {
/// This is used in two places: projecting to an opaque whose hidden type
/// is already registered in the opaque type storage, and for rigid projections.
AliasWellFormed,
/// In case normalizing aliases in nested goals cycles, eagerly normalizing these
/// aliases in the context of the parent may incorrectly change the cycle kind.
/// Normalizing aliases in goals therefore tracks the original path kind for this
/// nested goal. See the comment of the `ReplaceAliasWithInfer` visitor for more
/// details.
NormalizeGoal(PathKind),
}
#[derive_where(Clone; I: Interner, Goal<I, P>: Clone)]

View file

@ -608,11 +608,12 @@
# The "channel" for the Rust build to produce. The stable/beta channels only
# allow using stable features, whereas the nightly and dev channels allow using
# nightly features
# nightly features.
#
# If using tarball sources, default value for `channel` is taken from the `src/ci/channel` file;
# otherwise, it's "dev".
#channel = if "is a tarball source" { content of `src/ci/channel` file } else { "dev" }
# You can set the channel to "auto-detect" to load the channel name from `src/ci/channel`.
#
# If using tarball sources, default value is "auto-detect", otherwise, it's "dev".
#channel = if "is a tarball source" { "auto-detect" } else { "dev" }
# A descriptive string to be appended to `rustc --version` output, which is
# also used in places like debuginfo `DW_AT_producer`. This may be useful for

View file

@ -1777,7 +1777,11 @@ impl Config {
let is_user_configured_rust_channel =
if let Some(channel) = toml.rust.as_ref().and_then(|r| r.channel.clone()) {
config.channel = channel;
if channel == "auto-detect" {
config.channel = ci_channel.into();
} else {
config.channel = channel;
}
true
} else {
false

View file

@ -360,4 +360,9 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[
severity: ChangeSeverity::Info,
summary: "Added `build.test-stage = 2` to 'tools' profile defaults",
},
ChangeInfo {
change_id: 137220,
severity: ChangeSeverity::Info,
summary: "`rust.channel` now supports \"auto-detect\" to load the channel from `src/ci/channel`",
},
];

View file

@ -1094,7 +1094,6 @@ pub fn capture_local_usage(cx: &LateContext<'_>, e: &Expr<'_>) -> CaptureKind {
pat.each_binding_or_first(&mut |_, id, span, _| match cx
.typeck_results()
.extract_binding_mode(cx.sess(), id, span)
.unwrap()
.0
{
ByRef::No if !is_copy(cx, cx.typeck_results().node_type(id)) => {

@ -1 +1 @@
Subproject commit 5004a8f6f5d8468b64fae457afb7d96e1784c783
Subproject commit a35f4f773118ccfbd8d05102eb12a34097b1ee55

View file

@ -6,30 +6,90 @@ LL | #![feature(const_trait_impl, generic_const_exprs)]
|
= help: remove one of these features
error[E0284]: type annotations needed: cannot satisfy `the constant `name_len::<T>()` can be evaluated`
error[E0275]: overflow evaluating the requirement `&T: ~const ConstName`
--> $DIR/issue-88119.rs:19:49
|
LL | impl<T: ?Sized + ConstName> const ConstName for &T
| ^^
error[E0275]: overflow evaluating the requirement `&T: ConstName`
--> $DIR/issue-88119.rs:19:49
|
LL | impl<T: ?Sized + ConstName> const ConstName for &T
| ^^
error[E0275]: overflow evaluating the requirement `[(); name_len::<T>()] well-formed`
--> $DIR/issue-88119.rs:21:5
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^^^^^^^ cannot satisfy `the constant `name_len::<T>()` can be evaluated`
| ^^^^^^^^^^^^^^^^^^^^^
|
note: required by a bound in `<&T as ConstName>`
--> $DIR/issue-88119.rs:21:5
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^^^^^^^ required by this bound in `<&T as ConstName>`
error[E0275]: overflow evaluating the requirement `[(); name_len::<T>()] well-formed`
--> $DIR/issue-88119.rs:21:10
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^ required by this bound in `<&T as ConstName>`
| ^^^^^^^^^^^^^^^
|
note: required by a bound in `<&T as ConstName>`
--> $DIR/issue-88119.rs:21:5
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^^^^^^^ required by this bound in `<&T as ConstName>`
error[E0284]: type annotations needed: cannot satisfy `the constant `name_len::<T>()` can be evaluated`
error[E0275]: overflow evaluating the requirement `&mut T: ~const ConstName`
--> $DIR/issue-88119.rs:26:49
|
LL | impl<T: ?Sized + ConstName> const ConstName for &mut T
| ^^^^^^
error[E0275]: overflow evaluating the requirement `&mut T: ConstName`
--> $DIR/issue-88119.rs:26:49
|
LL | impl<T: ?Sized + ConstName> const ConstName for &mut T
| ^^^^^^
error[E0275]: overflow evaluating the requirement `[(); name_len::<T>()] well-formed`
--> $DIR/issue-88119.rs:28:5
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^^^^^^^ cannot satisfy `the constant `name_len::<T>()` can be evaluated`
| ^^^^^^^^^^^^^^^^^^^^^
|
note: required by a bound in `<&mut T as ConstName>`
--> $DIR/issue-88119.rs:28:5
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^^^^^^^ required by this bound in `<&mut T as ConstName>`
error[E0275]: overflow evaluating the requirement `[(); name_len::<T>()] well-formed`
--> $DIR/issue-88119.rs:28:10
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^ required by this bound in `<&mut T as ConstName>`
| ^^^^^^^^^^^^^^^
|
note: required by a bound in `<&mut T as ConstName>`
--> $DIR/issue-88119.rs:28:5
|
LL | [(); name_len::<T>()]:,
| ^^^^^^^^^^^^^^^^^^^^^ required by this bound in `<&mut T as ConstName>`
error: aborting due to 3 previous errors
error[E0275]: overflow evaluating the requirement `&&mut u8: ConstName`
--> $DIR/issue-88119.rs:33:35
|
LL | pub const ICE_1: &'static [u8] = <&&mut u8 as ConstName>::NAME_BYTES;
| ^^^^^^^^
For more information about this error, try `rustc --explain E0284`.
error[E0275]: overflow evaluating the requirement `&mut &u8: ConstName`
--> $DIR/issue-88119.rs:34:35
|
LL | pub const ICE_2: &'static [u8] = <&mut &u8 as ConstName>::NAME_BYTES;
| ^^^^^^^^
error: aborting due to 11 previous errors
For more information about this error, try `rustc --explain E0275`.

View file

@ -41,8 +41,12 @@ fn main() {
let fail = 0x8FFF_FFFF_FFFF_FFFE; //~WARNING literal out of range for `i32`
//~| HELP consider using the type `u64` instead
//~| HELP
//~| HELP consider using the type `u64` for the literal and cast it to `i32`
let fail = -0b1111_1111i8; //~WARNING literal out of range for `i8`
//~| HELP consider using the type `i16` instead
let fail = 0x8000_0000_0000_0000_0000_0000_FFFF_FFFE; //~WARNING literal out of range for `i32`
//~| HELP consider using the type `u128` instead
//~| HELP consider using the type `u128` for the literal and cast it to `i32`
}

View file

@ -112,9 +112,9 @@ LL | let fail = 0x8FFF_FFFF_FFFF_FFFE;
|
= note: the literal `0x8FFF_FFFF_FFFF_FFFE` (decimal `10376293541461622782`) does not fit into the type `i32` and will become `-2i32`
= help: consider using the type `u64` instead
help: to use as a negative number (decimal `-2`), consider using the type `u32` for the literal and cast it to `i32`
help: to use as a negative number (decimal `-2`), consider using the type `u64` for the literal and cast it to `i32`
|
LL | let fail = 0x8FFF_FFFF_FFFF_FFFEu32 as i32;
LL | let fail = 0x8FFF_FFFF_FFFF_FFFEu64 as i32;
| ++++++++++
warning: literal out of range for `i8`
@ -126,5 +126,18 @@ LL | let fail = -0b1111_1111i8;
= note: the literal `0b1111_1111i8` (decimal `255`) does not fit into the type `i8`
= note: and the value `-0b1111_1111i8` will become `1i8`
warning: 11 warnings emitted
warning: literal out of range for `i32`
--> $DIR/type-overflow.rs:49:16
|
LL | let fail = 0x8000_0000_0000_0000_0000_0000_FFFF_FFFE;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: the literal `0x8000_0000_0000_0000_0000_0000_FFFF_FFFE` (decimal `170141183460469231731687303720179073022`) does not fit into the type `i32` and will become `-2i32`
= help: consider using the type `u128` instead
help: to use as a negative number (decimal `-2`), consider using the type `u128` for the literal and cast it to `i32`
|
LL | let fail = 0x8000_0000_0000_0000_0000_0000_FFFF_FFFEu128 as i32;
| +++++++++++
warning: 12 warnings emitted

View file

@ -1,4 +1,7 @@
//@ check-pass
//@ revisions: current next
//@ ignore-compare-mode-next-solver (explicit revisions)
//@[next] compile-flags: -Znext-solver
struct Node<C: Trait<Self>>(C::Assoc);
trait Trait<T> {

View file

@ -0,0 +1,23 @@
error[E0275]: overflow evaluating the requirement `Vec<u8>: Trait<String>`
--> $DIR/item-bound-via-impl-where-clause.rs:31:21
|
LL | let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
note: required for `Vec<u8>` to implement `Trait<String>`
--> $DIR/item-bound-via-impl-where-clause.rs:22:12
|
LL | impl<L, R> Trait<R> for L
| ^^^^^^^^ ^
LL | where
LL | L: Trait<R>,
| -------- unsatisfied trait bound introduced here
note: required by a bound in `transmute`
--> $DIR/item-bound-via-impl-where-clause.rs:29:17
|
LL | fn transmute<L: Trait<R>, R>(r: L) -> <L::Proof as Trait<R>>::Proof { r }
| ^^^^^^^^ required by this bound in `transmute`
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0275`.

View file

@ -0,0 +1,49 @@
error[E0275]: overflow evaluating the requirement `Vec<u8>: Trait<String>`
--> $DIR/item-bound-via-impl-where-clause.rs:31:33
|
LL | let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
| ^
|
note: required by a bound in `transmute`
--> $DIR/item-bound-via-impl-where-clause.rs:29:17
|
LL | fn transmute<L: Trait<R>, R>(r: L) -> <L::Proof as Trait<R>>::Proof { r }
| ^^^^^^^^ required by this bound in `transmute`
error[E0275]: overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof == _`
--> $DIR/item-bound-via-impl-where-clause.rs:31:21
|
LL | let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0275]: overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof == String`
--> $DIR/item-bound-via-impl-where-clause.rs:31:21
|
LL | let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0275]: overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof: Sized`
--> $DIR/item-bound-via-impl-where-clause.rs:31:21
|
LL | let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: the return type of a function must have a statically known size
error[E0275]: overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof well-formed`
--> $DIR/item-bound-via-impl-where-clause.rs:31:21
|
LL | let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0275]: overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof == _`
--> $DIR/item-bound-via-impl-where-clause.rs:31:21
|
LL | let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
error: aborting due to 6 previous errors
For more information about this error, try `rustc --explain E0275`.

View file

@ -0,0 +1,39 @@
//@ revisions: current next
//@ ignore-compare-mode-next-solver (explicit revisions)
//@[next] compile-flags: -Znext-solver
// A variation of #135246 where the cyclic bounds are part of
// the impl instead of the impl associated item.
trait Trait<R>: Sized {
type Proof: Trait<R, Proof = Self>;
}
// We need to use indirection here as we otherwise normalize
// `<L::Proof as Trait<R>>::Proof` before recursing into
// `R: Trait<R, Proof = <L::Proof as Trait<R>>::Proof>`.
trait Indir<L: Trait<R>, R>: Trait<R, Proof = <L::Proof as Trait<R>>::Proof> {}
impl<L, R> Indir<L, R> for R
where
L: Trait<R>,
R: Trait<R, Proof = <L::Proof as Trait<R>>::Proof>,
{}
impl<L, R> Trait<R> for L
where
L: Trait<R>,
R: Indir<L, R>,
{
type Proof = R;
}
fn transmute<L: Trait<R>, R>(r: L) -> <L::Proof as Trait<R>>::Proof { r }
fn main() {
let s: String = transmute::<_, String>(vec![65_u8, 66, 67]);
//~^ ERROR overflow evaluating the requirement `Vec<u8>: Trait<String>`
//[next]~| ERROR overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof == _`
//[next]~| ERROR overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof == String`
//[next]~| ERROR overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof: Sized`
//[next]~| ERROR overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof well-formed`
//[next]~| ERROR overflow evaluating the requirement `<<Vec<u8> as Trait<String>>::Proof as Trait<String>>::Proof == _`
println!("{}", s); // ABC
}

View file

@ -0,0 +1,22 @@
error[E0275]: overflow evaluating the requirement `u32: SendIndir<Foo<u32>>`
--> $DIR/only-one-coinductive-step-needed-trait.rs:24:5
|
LL | is_send::<Foo<u32>>();
| ^^^^^^^^^^^^^^^^^^^^^
|
note: required for `Foo<u32>` to implement `Send`
--> $DIR/only-one-coinductive-step-needed-trait.rs:17:35
|
LL | unsafe impl<T: SendIndir<Foo<T>>> Send for Foo<T> {}
| ----------------- ^^^^ ^^^^^^
| |
| unsatisfied trait bound introduced here
note: required by a bound in `is_send`
--> $DIR/only-one-coinductive-step-needed-trait.rs:22:15
|
LL | fn is_send<T: Send>() {}
| ^^^^ required by this bound in `is_send`
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0275`.

View file

@ -0,0 +1,26 @@
//@ revisions: current next
//@ ignore-compare-mode-next-solver (explicit revisions)
//@[next] compile-flags: -Znext-solver
//@[next] check-pass
// #136824 changed cycles to be coinductive if they have at least
// one productive step, causing this test to pass with the new solver.
//
// The cycle in the test is the following:
// - `Foo<T>: Send`, impl requires
// - `T: SendIndir<Foo<T>>`, impl requires
// - `Foo<T>: Send` cycle
//
// The old solver treats this cycle as inductive due to the `T: SendIndir` step.
struct Foo<T>(T);
unsafe impl<T: SendIndir<Foo<T>>> Send for Foo<T> {}
trait SendIndir<T> {}
impl<T, U: Send> SendIndir<U> for T {}
fn is_send<T: Send>() {}
fn main() {
is_send::<Foo<u32>>();
//[current]~^ ERROR overflow evaluating the requirement `u32: SendIndir<Foo<u32>>`
}

View file

@ -0,0 +1,17 @@
error[E0275]: overflow evaluating the requirement `Foo<T>: SendIndir`
--> $DIR/only-one-coinductive-step-needed.rs:17:15
|
LL | struct Foo<T>(<Foo<T> as Trait>::Assoc);
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
note: required for `Foo<T>` to implement `Trait`
--> $DIR/only-one-coinductive-step-needed.rs:26:20
|
LL | impl<T: SendIndir> Trait for T {
| --------- ^^^^^ ^
| |
| unsatisfied trait bound introduced here
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0275`.

View file

@ -0,0 +1,34 @@
//@ revisions: current next
//@ ignore-compare-mode-next-solver (explicit revisions)
//@[next] compile-flags: -Znext-solver
//@[next] check-pass
// #136824 changed cycles to be coinductive if they have at least
// one productive step, causing this test to pass with the new solver.
//
// The cycle in the test is the following:
// - `Foo<T>: Send`, builtin auto-trait impl requires
// - `<Foo<T> as Trait>::Assoc: Send`, requires normalizing self type via impl, requires
// - `Foo<T>: SendIndir`, via impl requires
// - `Foo<T>: Send` cycle
//
// The old solver treats this cycle as inductive due to the `Foo<T>: SendIndir` step.
struct Foo<T>(<Foo<T> as Trait>::Assoc);
//[current]~^ ERROR overflow evaluating the requirement `Foo<T>: SendIndir`
trait SendIndir {}
impl<T: Send> SendIndir for T {}
trait Trait {
type Assoc;
}
impl<T: SendIndir> Trait for T {
type Assoc = ();
}
fn is_send<T: Send>() {}
fn main() {
is_send::<Foo<u32>>();
}

View file

@ -1,37 +0,0 @@
//@ compile-flags: -Znext-solver
#![feature(rustc_attrs)]
// Test that having both an inductive and a coinductive cycle
// is handled correctly.
#[rustc_coinductive]
trait Trait {}
impl<T: Inductive + Coinductive> Trait for T {}
trait Inductive {}
impl<T: Trait> Inductive for T {}
#[rustc_coinductive]
trait Coinductive {}
impl<T: Trait> Coinductive for T {}
fn impls_trait<T: Trait>() {}
#[rustc_coinductive]
trait TraitRev {}
impl<T: CoinductiveRev + InductiveRev> TraitRev for T {}
trait InductiveRev {}
impl<T: TraitRev> InductiveRev for T {}
#[rustc_coinductive]
trait CoinductiveRev {}
impl<T: TraitRev> CoinductiveRev for T {}
fn impls_trait_rev<T: TraitRev>() {}
fn main() {
impls_trait::<()>();
//~^ ERROR overflow evaluating the requirement
impls_trait_rev::<()>();
//~^ ERROR overflow evaluating the requirement
}

View file

@ -1,27 +0,0 @@
error[E0275]: overflow evaluating the requirement `(): Trait`
--> $DIR/double-cycle-inductive-coinductive.rs:32:19
|
LL | impls_trait::<()>();
| ^^
|
note: required by a bound in `impls_trait`
--> $DIR/double-cycle-inductive-coinductive.rs:17:19
|
LL | fn impls_trait<T: Trait>() {}
| ^^^^^ required by this bound in `impls_trait`
error[E0275]: overflow evaluating the requirement `(): TraitRev`
--> $DIR/double-cycle-inductive-coinductive.rs:35:23
|
LL | impls_trait_rev::<()>();
| ^^
|
note: required by a bound in `impls_trait_rev`
--> $DIR/double-cycle-inductive-coinductive.rs:29:23
|
LL | fn impls_trait_rev<T: TraitRev>() {}
| ^^^^^^^^ required by this bound in `impls_trait_rev`
error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0275`.

View file

@ -1,46 +0,0 @@
//@ compile-flags: -Znext-solver
#![feature(rustc_attrs, trivial_bounds)]
// We have to be careful here:
//
// We either have the provisional result of `A -> B -> A` on the
// stack, which is a fully coinductive cycle. Accessing the
// provisional result for `B` as part of the `A -> C -> B -> A` cycle
// has to make sure we don't just use the result of `A -> B -> A` as the
// new cycle is inductive.
//
// Alternatively, if we have `A -> C -> A` first, then `A -> B -> A` has
// a purely inductive stack, so something could also go wrong here.
#[rustc_coinductive]
trait A {}
#[rustc_coinductive]
trait B {}
trait C {}
impl<T: B + C> A for T {}
impl<T: A> B for T {}
impl<T: B> C for T {}
fn impls_a<T: A>() {}
// The same test with reordered where clauses to make sure we're actually testing anything.
#[rustc_coinductive]
trait AR {}
#[rustc_coinductive]
trait BR {}
trait CR {}
impl<T: CR + BR> AR for T {}
impl<T: AR> BR for T {}
impl<T: BR> CR for T {}
fn impls_ar<T: AR>() {}
fn main() {
impls_a::<()>();
//~^ ERROR overflow evaluating the requirement `(): A`
impls_ar::<()>();
//~^ ERROR overflow evaluating the requirement `(): AR`
}

View file

@ -1,27 +0,0 @@
error[E0275]: overflow evaluating the requirement `(): A`
--> $DIR/inductive-not-on-stack.rs:41:15
|
LL | impls_a::<()>();
| ^^
|
note: required by a bound in `impls_a`
--> $DIR/inductive-not-on-stack.rs:25:15
|
LL | fn impls_a<T: A>() {}
| ^ required by this bound in `impls_a`
error[E0275]: overflow evaluating the requirement `(): AR`
--> $DIR/inductive-not-on-stack.rs:44:16
|
LL | impls_ar::<()>();
| ^^
|
note: required by a bound in `impls_ar`
--> $DIR/inductive-not-on-stack.rs:38:16
|
LL | fn impls_ar<T: AR>() {}
| ^^ required by this bound in `impls_ar`
error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0275`.

View file

@ -1,39 +0,0 @@
//@ compile-flags: -Znext-solver
#![feature(rustc_attrs)]
// A test intended to check how we handle provisional results
// for a goal computed with an inductive and a coinductive stack.
//
// Unfortunately this doesn't really detect whether we've done
// something wrong but instead only showcases that we thought of
// this.
//
// FIXME(-Znext-solver=coinductive): With the new coinduction approach
// the same goal stack can be both inductive and coinductive, depending
// on why we're proving a specific nested goal. Rewrite this test
// at that point instead of relying on `BInd`.
#[rustc_coinductive]
trait A {}
#[rustc_coinductive]
trait B {}
trait BInd {}
impl<T: ?Sized + B> BInd for T {}
#[rustc_coinductive]
trait C {}
trait CInd {}
impl<T: ?Sized + C> CInd for T {}
impl<T: ?Sized + BInd + C> A for T {}
impl<T: ?Sized + CInd + C> B for T {}
impl<T: ?Sized + B + A> C for T {}
fn impls_a<T: A>() {}
fn main() {
impls_a::<()>();
//~^ ERROR overflow evaluating the requirement `(): A`
}

View file

@ -1,15 +0,0 @@
error[E0275]: overflow evaluating the requirement `(): A`
--> $DIR/mixed-cycles-1.rs:37:15
|
LL | impls_a::<()>();
| ^^
|
note: required by a bound in `impls_a`
--> $DIR/mixed-cycles-1.rs:34:15
|
LL | fn impls_a<T: A>() {}
| ^ required by this bound in `impls_a`
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0275`.

View file

@ -1,32 +0,0 @@
//@ compile-flags: -Znext-solver
#![feature(rustc_attrs)]
// A test showcasing that the solver may need to
// compute a goal which is already in the provisional
// cache.
//
// However, given that `(): BInd` and `(): B` are currently distinct
// goals, this is actually not possible right now.
//
// FIXME(-Znext-solver=coinductive): With the new coinduction approach
// the same goal stack can be both inductive and coinductive, depending
// on why we're proving a specific nested goal. Rewrite this test
// at that point.
#[rustc_coinductive]
trait A {}
#[rustc_coinductive]
trait B {}
trait BInd {}
impl<T: ?Sized + B> BInd for T {}
impl<T: ?Sized + BInd + B> A for T {}
impl<T: ?Sized + BInd> B for T {}
fn impls_a<T: A>() {}
fn main() {
impls_a::<()>();
//~^ ERROR overflow evaluating the requirement `(): A`
}

View file

@ -1,15 +0,0 @@
error[E0275]: overflow evaluating the requirement `(): A`
--> $DIR/mixed-cycles-2.rs:30:15
|
LL | impls_a::<()>();
| ^^
|
note: required by a bound in `impls_a`
--> $DIR/mixed-cycles-2.rs:27:15
|
LL | fn impls_a<T: A>() {}
| ^ required by this bound in `impls_a`
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0275`.

View file

@ -0,0 +1,10 @@
error[E0391]: cycle detected when computing layout of `<[Hello] as Normalize>::Assoc`
|
= note: ...which requires computing layout of `Hello`...
= note: ...which again requires computing layout of `<[Hello] as Normalize>::Assoc`, completing the cycle
= note: cycle used when computing layout of `Hello`
= note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0391`.

View file

@ -1,6 +1,10 @@
// Regression test for #129541
//~^ ERROR cycle detected when computing layout of `<[Hello] as Normalize>::Assoc` [E0391]
//@ revisions: current next
//@ ignore-compare-mode-next-solver (explicit revisions)
//@[next] compile-flags: -Znext-solver
trait Bound {}
trait Normalize {
type Assoc;

View file

@ -1,6 +1,9 @@
// Regression test for #129541
//@ revisions: unique multiple
//@ revisions: unique_curr unique_next multiple_curr multiple_next
//@ ignore-compare-mode-next-solver (explicit revisions)
//@[unique_next] compile-flags: -Znext-solver
//@[multiple_next] compile-flags: -Znext-solver
//@ error-pattern: reached the recursion limit finding the struct tail for `<[Hello] as Normalize>::Assoc`
trait Bound {}
@ -8,7 +11,7 @@ trait Normalize {
type Assoc;
}
#[cfg(multiple)]
#[cfg(any(multiple_curr, multiple_next))]
impl<T: Bound> Normalize for T {
type Assoc = T;
}

View file

@ -0,0 +1,6 @@
error: reached the recursion limit finding the struct tail for `<[Hello] as Normalize>::Assoc`
|
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]`
error: aborting due to 1 previous error

View file

@ -0,0 +1,6 @@
error: reached the recursion limit finding the struct tail for `<[Hello] as Normalize>::Assoc`
|
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]`
error: aborting due to 1 previous error