Auto merge of #152755 - jdonszelmann:rollup-hcFNB2j, r=jdonszelmann

Rollup of 6 pull requests

Successful merges:

 - rust-lang/rust#152609 (Install LLVM DLL in the right place on Windows)
 - rust-lang/rust#149904 (`-Znext-solver` Remove the forced ambiguity hack from search graph)
 - rust-lang/rust#152704 (Remove `QueryCtxt` and trait `HasDepContext`)
 - rust-lang/rust#152746 (remove `#![allow(stable_features)]` from most tests)
 - rust-lang/rust#152675 (Improve `VaList` stdlib docs)
 - rust-lang/rust#152748 (Update `sysinfo` version to `0.38.2`)
This commit is contained in:
bors 2026-02-17 13:26:15 +00:00
commit dfbfbf785f
58 changed files with 370 additions and 452 deletions

View file

@ -2652,9 +2652,9 @@ dependencies = [
[[package]]
name = "objc2-core-foundation"
version = "0.3.2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536"
checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166"
dependencies = [
"bitflags",
]
@ -2667,9 +2667,9 @@ checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33"
[[package]]
name = "objc2-io-kit"
version = "0.3.2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33fafba39597d6dc1fb709123dfa8289d39406734be322956a69f0931c73bb15"
checksum = "71c1c64d6120e51cd86033f67176b1cb66780c2efe34dec55176f77befd93c0a"
dependencies = [
"libc",
"objc2-core-foundation",
@ -5354,9 +5354,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.38.0"
version = "0.38.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe840c5b1afe259a5657392a4dbb74473a14c8db999c3ec2f4ae812e028a94da"
checksum = "1efc19935b4b66baa6f654ac7924c192f55b175c00a7ab72410fc24284dacda8"
dependencies = [
"libc",
"objc2-core-foundation",

View file

@ -25,9 +25,6 @@ declare_features! (
// feature-group-start: for testing purposes
// -------------------------------------------------------------------------
/// A temporary feature gate used to enable parser extensions needed
/// to bootstrap fix for #5723.
(accepted, issue_5723_bootstrap, "1.0.0", None),
/// These are used to test this portion of the compiler,
/// they don't actually mean anything.
(accepted, test_accepted_feature, "1.0.0", None),

View file

@ -172,6 +172,9 @@ declare_features! (
/// Allow anonymous constants from an inline `const` block in pattern position
(removed, inline_const_pat, "1.88.0", Some(76001),
Some("removed due to implementation concerns as it requires significant refactorings"), 138492),
/// A temporary feature gate used to enable parser extensions needed
/// to bootstrap fix for #5723.
(removed, issue_5723_bootstrap, "CURRENT_RUSTC_VERSION", None, None),
/// Lazily evaluate constants. This allows constants to depend on type parameters.
(removed, lazy_normalization_consts, "1.56.0", Some(72219), Some("superseded by `generic_const_exprs`"), 88369),
/// Changes `impl Trait` to capture all lifetimes in scope.

View file

@ -16,7 +16,7 @@ use rustc_parse::lexer::StripTokens;
use rustc_parse::new_parser_from_source_str;
use rustc_parse::parser::Recovery;
use rustc_parse::parser::attr::AllowLeadingUnsafe;
use rustc_query_impl::{QueryCtxt, print_query_stack};
use rustc_query_impl::print_query_stack;
use rustc_session::config::{self, Cfg, CheckCfg, ExpectedValues, Input, OutFileName};
use rustc_session::parse::ParseSess;
use rustc_session::{CompilerIO, EarlyDiagCtxt, Session, lint};
@ -556,7 +556,7 @@ pub fn try_print_query_stack(
let all_frames = ty::tls::with_context_opt(|icx| {
if let Some(icx) = icx {
ty::print::with_no_queries!(print_query_stack(
QueryCtxt::new(icx.tcx),
icx.tcx,
icx.query,
dcx,
limit_frames,

View file

@ -18,6 +18,7 @@ use rustc_data_structures::sync;
use rustc_metadata::{DylibError, EncodedMetadata, load_symbol_from_dylib};
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::{CurrentGcx, TyCtxt};
use rustc_query_impl::collect_active_jobs_from_all_queries;
use rustc_session::config::{
Cfg, CrateType, OutFileName, OutputFilenames, OutputTypes, Sysroot, host_tuple,
};
@ -184,7 +185,7 @@ pub(crate) fn run_in_thread_pool_with_globals<
use rustc_data_structures::defer;
use rustc_data_structures::sync::FromDyn;
use rustc_middle::ty::tls;
use rustc_query_impl::{QueryCtxt, break_query_cycles};
use rustc_query_impl::break_query_cycles;
let thread_stack_size = init_stack_size(thread_builder_diag);
@ -253,7 +254,7 @@ internal compiler error: query cycle handler thread panicked, aborting process";
|| {
// Ensure there were no errors collecting all active jobs.
// We need the complete map to ensure we find a cycle to break.
QueryCtxt::new(tcx).collect_active_jobs_from_all_queries(false).expect(
collect_active_jobs_from_all_queries(tcx, false).expect(
"failed to collect active queries in deadlock handler",
)
},

View file

@ -61,6 +61,8 @@ pub fn walk_native_lib_search_dirs<R>(
// library directory instead of the self-contained directories.
// Sanitizer libraries have the same issue and are also linked by name on Apple targets.
// The targets here should be in sync with `copy_third_party_objects` in bootstrap.
// Finally there is shared LLVM library, which unlike compiler libraries, is linked by the name,
// therefore requiring the search path for the linker.
// FIXME: implement `-Clink-self-contained=+/-unwind,+/-sanitizers`, move the shipped libunwind
// and sanitizers to self-contained directory, and stop adding this search path.
// FIXME: On AIX this also has the side-effect of making the list of library search paths
@ -71,6 +73,9 @@ pub fn walk_native_lib_search_dirs<R>(
|| sess.target.os == Os::Fuchsia
|| sess.target.is_like_aix
|| sess.target.is_like_darwin && !sess.sanitizers().is_empty()
|| sess.target.os == Os::Windows
&& sess.target.env == Env::Gnu
&& sess.target.abi == Abi::Llvm
{
f(&sess.target_tlib_path.dir, false)?;
}

View file

@ -23,7 +23,7 @@ use {super::debug::EdgeFilter, std::env};
use super::query::DepGraphQuery;
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
use super::{DepKind, DepNode, HasDepContext, WorkProductId, read_deps, with_deps};
use super::{DepKind, DepNode, WorkProductId, read_deps, with_deps};
use crate::dep_graph::edges::EdgesVec;
use crate::ich::StableHashingContext;
use crate::ty::TyCtxt;
@ -268,17 +268,17 @@ impl DepGraph {
}
#[inline(always)]
pub fn with_task<'tcx, Ctxt: HasDepContext<'tcx>, A: Debug, R>(
pub fn with_task<'tcx, A: Debug, R>(
&self,
key: DepNode,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
dep_node: DepNode,
tcx: TyCtxt<'tcx>,
task_arg: A,
task_fn: fn(tcx: TyCtxt<'tcx>, task_arg: A) -> R,
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
) -> (R, DepNodeIndex) {
match self.data() {
Some(data) => data.with_task(key, cx, arg, task, hash_result),
None => (task(cx, arg), self.next_virtual_depnode_index()),
Some(data) => data.with_task(dep_node, tcx, task_arg, task_fn, hash_result),
None => (task_fn(tcx, task_arg), self.next_virtual_depnode_index()),
}
}
@ -310,33 +310,21 @@ impl DepGraphData {
/// prevent implicit 'leaks' of tracked state into the task (which
/// could then be read without generating correct edges in the
/// dep-graph -- see the [rustc dev guide] for more details on
/// the dep-graph). To this end, the task function gets exactly two
/// pieces of state: the context `cx` and an argument `arg`. Both
/// of these bits of state must be of some type that implements
/// `DepGraphSafe` and hence does not leak.
/// the dep-graph).
///
/// The choice of two arguments is not fundamental. One argument
/// would work just as well, since multiple values can be
/// collected using tuples. However, using two arguments works out
/// to be quite convenient, since it is common to need a context
/// (`cx`) and some argument (e.g., a `DefId` identifying what
/// item to process).
///
/// For cases where you need some other number of arguments:
///
/// - If you only need one argument, just use `()` for the `arg`
/// parameter.
/// - If you need 3+ arguments, use a tuple for the
/// `arg` parameter.
/// Therefore, the task function takes a `TyCtxt`, plus exactly one
/// additional argument, `task_arg`. The additional argument type can be
/// `()` if no argument is needed, or a tuple if multiple arguments are
/// needed.
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
#[inline(always)]
pub fn with_task<'tcx, Ctxt: HasDepContext<'tcx>, A: Debug, R>(
pub fn with_task<'tcx, A: Debug, R>(
&self,
key: DepNode,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
dep_node: DepNode,
tcx: TyCtxt<'tcx>,
task_arg: A,
task_fn: fn(tcx: TyCtxt<'tcx>, task_arg: A) -> R,
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
) -> (R, DepNodeIndex) {
// If the following assertion triggers, it can have two reasons:
@ -344,32 +332,28 @@ impl DepGraphData {
// in `DepGraph::try_mark_green()`.
// 2. Two distinct query keys get mapped to the same `DepNode`
// (see for example #48923).
self.assert_dep_node_not_yet_allocated_in_current_session(
cx.dep_context().sess,
&key,
|| {
format!(
"forcing query with already existing `DepNode`\n\
- query-key: {arg:?}\n\
- dep-node: {key:?}"
)
},
);
self.assert_dep_node_not_yet_allocated_in_current_session(tcx.sess, &dep_node, || {
format!(
"forcing query with already existing `DepNode`\n\
- query-key: {task_arg:?}\n\
- dep-node: {dep_node:?}"
)
});
let with_deps = |task_deps| with_deps(task_deps, || task(cx, arg));
let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
let with_deps = |task_deps| with_deps(task_deps, || task_fn(tcx, task_arg));
let (result, edges) = if tcx.is_eval_always(dep_node.kind) {
(with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
} else {
let task_deps = Lock::new(TaskDeps::new(
#[cfg(debug_assertions)]
Some(key),
Some(dep_node),
0,
));
(with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
};
let dep_node_index =
self.hash_result_and_alloc_node(cx.dep_context(), key, edges, &result, hash_result);
self.hash_result_and_alloc_node(tcx, dep_node, edges, &result, hash_result);
(result, dep_node_index)
}
@ -954,7 +938,7 @@ impl DepGraphData {
// We failed to mark it green, so we try to force the query.
debug!("trying to force dependency {dep_dep_node:?}");
if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
if !tcx.try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
// The DepNode could not be forced.
debug!("dependency {dep_dep_node:?} could not be forced");
return None;
@ -1001,10 +985,7 @@ impl DepGraphData {
let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
// We never try to mark eval_always nodes as green
debug_assert!(
!tcx.dep_context()
.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
);
debug_assert!(!tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind));
let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);

View file

@ -25,22 +25,6 @@ mod graph;
mod query;
mod serialized;
pub trait HasDepContext<'tcx>: Copy {
fn dep_context(&self) -> TyCtxt<'tcx>;
}
impl<'tcx> HasDepContext<'tcx> for TyCtxt<'tcx> {
fn dep_context(&self) -> TyCtxt<'tcx> {
*self
}
}
impl<'tcx, T: HasDepContext<'tcx>, Q: Copy> HasDepContext<'tcx> for (T, Q) {
fn dep_context(&self) -> TyCtxt<'tcx> {
self.0.dep_context()
}
}
/// Describes the contents of the fingerprint generated by a given query.
///
/// This is mainly for determining whether and how we can reconstruct a key

View file

@ -5,7 +5,7 @@ use rustc_data_structures::hash_table::{Entry, HashTable};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::{outline, sharded, sync};
use rustc_errors::{Diag, FatalError, StashKey};
use rustc_middle::dep_graph::{DepGraphData, DepNodeKey, HasDepContext};
use rustc_middle::dep_graph::{DepGraphData, DepNodeKey};
use rustc_middle::query::{
ActiveKeyStatus, CycleError, CycleErrorHandling, QueryCache, QueryJob, QueryJobId, QueryLatch,
QueryMode, QueryStackDeferred, QueryStackFrame, QueryState,
@ -16,7 +16,10 @@ use rustc_span::{DUMMY_SP, Span};
use crate::dep_graph::{DepNode, DepNodeIndex};
use crate::job::{QueryJobInfo, QueryJobMap, find_cycle_in_stack, report_cycle};
use crate::{QueryCtxt, QueryFlags, SemiDynamicQueryDispatcher};
use crate::plumbing::{
collect_active_jobs_from_all_queries, current_query_job, next_job_id, start_query,
};
use crate::{QueryFlags, SemiDynamicQueryDispatcher};
#[inline]
fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
@ -101,32 +104,32 @@ where
#[inline(never)]
fn mk_cycle<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
cycle_error: CycleError,
) -> C::Value {
let error = report_cycle(qcx.tcx.sess, &cycle_error);
handle_cycle_error(query, qcx, &cycle_error, error)
let error = report_cycle(tcx.sess, &cycle_error);
handle_cycle_error(query, tcx, &cycle_error, error)
}
fn handle_cycle_error<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
cycle_error: &CycleError,
error: Diag<'_>,
) -> C::Value {
match query.cycle_error_handling() {
CycleErrorHandling::Error => {
let guar = error.emit();
query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
query.value_from_cycle_error(tcx, cycle_error, guar)
}
CycleErrorHandling::Fatal => {
error.emit();
qcx.tcx.dcx().abort_if_errors();
tcx.dcx().abort_if_errors();
unreachable!()
}
CycleErrorHandling::DelayBug => {
let guar = error.delay_as_bug();
query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
query.value_from_cycle_error(tcx, cycle_error, guar)
}
CycleErrorHandling::Stash => {
let guar = if let Some(root) = cycle_error.cycle.first()
@ -136,7 +139,7 @@ fn handle_cycle_error<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
} else {
error.emit()
};
query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
query.value_from_cycle_error(tcx, cycle_error, guar)
}
}
}
@ -207,25 +210,24 @@ where
#[inline(never)]
fn cycle_error<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
try_execute: QueryJobId,
span: Span,
) -> (C::Value, Option<DepNodeIndex>) {
// Ensure there was no errors collecting all active jobs.
// We need the complete map to ensure we find a cycle to break.
let job_map = qcx
.collect_active_jobs_from_all_queries(false)
let job_map = collect_active_jobs_from_all_queries(tcx, false)
.ok()
.expect("failed to collect active queries");
let error = find_cycle_in_stack(try_execute, job_map, &qcx.current_query_job(), span);
(mk_cycle(query, qcx, error.lift()), None)
let error = find_cycle_in_stack(try_execute, job_map, &current_query_job(tcx), span);
(mk_cycle(query, tcx, error.lift()), None)
}
#[inline(always)]
fn wait_for_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
span: Span,
key: C::Key,
latch: QueryLatch<'tcx>,
@ -234,20 +236,20 @@ fn wait_for_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
// For parallel queries, we'll block and wait until the query running
// in another thread has completed. Record how long we wait in the
// self-profiler.
let query_blocked_prof_timer = qcx.tcx.prof.query_blocked();
let query_blocked_prof_timer = tcx.prof.query_blocked();
// With parallel queries we might just have to wait on some other
// thread.
let result = latch.wait_on(qcx.tcx, current, span);
let result = latch.wait_on(tcx, current, span);
match result {
Ok(()) => {
let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
let Some((v, index)) = query.query_cache(tcx).lookup(&key) else {
outline(|| {
// We didn't find the query result in the query cache. Check if it was
// poisoned due to a panic instead.
let key_hash = sharded::make_hash(&key);
let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
let shard = query.query_state(tcx).active.lock_shard_by_hash(key_hash);
match shard.find(key_hash, equivalent_key(&key)) {
// The query we waited on panicked. Continue unwinding here.
Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
@ -259,24 +261,24 @@ fn wait_for_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
})
};
qcx.tcx.prof.query_cache_hit(index.into());
tcx.prof.query_cache_hit(index.into());
query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
(v, Some(index))
}
Err(cycle) => (mk_cycle(query, qcx, cycle.lift()), None),
Err(cycle) => (mk_cycle(query, tcx, cycle.lift()), None),
}
}
#[inline(never)]
fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
span: Span,
key: C::Key,
dep_node: Option<DepNode>,
) -> (C::Value, Option<DepNodeIndex>) {
let state = query.query_state(qcx);
let state = query.query_state(tcx);
let key_hash = sharded::make_hash(&key);
let mut state_lock = state.active.lock_shard_by_hash(key_hash);
@ -286,27 +288,27 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
// re-executing the query since `try_start` only checks that the query is not currently
// executing, but another thread may have already completed the query and stores it result
// in the query cache.
if qcx.tcx.sess.threads() > 1 {
if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
qcx.tcx.prof.query_cache_hit(index.into());
if tcx.sess.threads() > 1 {
if let Some((value, index)) = query.query_cache(tcx).lookup(&key) {
tcx.prof.query_cache_hit(index.into());
return (value, Some(index));
}
}
let current_job_id = qcx.current_query_job();
let current_job_id = current_query_job(tcx);
match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
Entry::Vacant(entry) => {
// Nothing has computed or is computing the query, so we start a new job and insert it in the
// state map.
let id = qcx.next_job_id();
let id = next_job_id(tcx);
let job = QueryJob::new(id, span, current_job_id);
entry.insert((key, ActiveKeyStatus::Started(job)));
// Drop the lock before we start executing the query
drop(state_lock);
execute_job::<C, FLAGS, INCR>(query, qcx, state, key, key_hash, id, dep_node)
execute_job::<C, FLAGS, INCR>(query, tcx, state, key, key_hash, id, dep_node)
}
Entry::Occupied(mut entry) => {
match &mut entry.get_mut().1 {
@ -318,7 +320,7 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
// Only call `wait_for_query` if we're using a Rayon thread pool
// as it will attempt to mark the worker thread as blocked.
return wait_for_query(query, qcx, span, key, latch, current_job_id);
return wait_for_query(query, tcx, span, key, latch, current_job_id);
}
let id = job.id;
@ -326,7 +328,7 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
// If we are single-threaded we know that we have cycle error,
// so we just return the error.
cycle_error(query, qcx, id, span)
cycle_error(query, tcx, id, span)
}
ActiveKeyStatus::Poisoned => FatalError.raise(),
}
@ -337,7 +339,7 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
#[inline(always)]
fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
state: &'tcx QueryState<'tcx, C::Key>,
key: C::Key,
key_hash: u64,
@ -348,16 +350,16 @@ fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
// panic occurs while executing the query (or any intermediate plumbing).
let job_guard = ActiveJobGuard { state, key, key_hash };
debug_assert_eq!(qcx.tcx.dep_graph.is_fully_enabled(), INCR);
debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR);
// Delegate to another function to actually execute the query job.
let (result, dep_node_index) = if INCR {
execute_job_incr(query, qcx, qcx.tcx.dep_graph.data().unwrap(), key, dep_node, id)
execute_job_incr(query, tcx, key, dep_node, id)
} else {
execute_job_non_incr(query, qcx, key, id)
execute_job_non_incr(query, tcx, key, id)
};
let cache = query.query_cache(qcx);
let cache = query.query_cache(tcx);
if query.feedable() {
// We should not compute queries that also got a value via feeding.
// This can't happen, as query feeding adds the very dependencies to the fed query
@ -373,7 +375,7 @@ fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
);
};
let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
let (old_hash, new_hash) = tcx.with_stable_hashing_context(|mut hcx| {
(hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
});
let formatter = query.format_value();
@ -381,7 +383,7 @@ fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
// We have an inconsistency. This can happen if one of the two
// results is tainted by errors.
assert!(
qcx.tcx.dcx().has_errors().is_some(),
tcx.dcx().has_errors().is_some(),
"Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
computed={:#?}\nfed={:#?}",
query.dep_kind(),
@ -403,22 +405,22 @@ fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
#[inline(always)]
fn execute_job_non_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
key: C::Key,
job_id: QueryJobId,
) -> (C::Value, DepNodeIndex) {
debug_assert!(!qcx.tcx.dep_graph.is_fully_enabled());
debug_assert!(!tcx.dep_graph.is_fully_enabled());
// Fingerprint the key, just to assert that it doesn't
// have anything we don't consider hashable
if cfg!(debug_assertions) {
let _ = key.to_fingerprint(qcx.tcx);
let _ = key.to_fingerprint(tcx);
}
let prof_timer = qcx.tcx.prof.query_provider();
let prof_timer = tcx.prof.query_provider();
// Call the query provider.
let result = qcx.start_query(job_id, query.depth_limit(), || query.invoke_provider(qcx, key));
let dep_node_index = qcx.tcx.dep_graph.next_virtual_depnode_index();
let result = start_query(tcx, job_id, query.depth_limit(), || query.invoke_provider(tcx, key));
let dep_node_index = tcx.dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
// Similarly, fingerprint the result to assert that
@ -426,7 +428,7 @@ fn execute_job_non_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
if cfg!(debug_assertions)
&& let Some(hash_result) = query.hash_result()
{
qcx.dep_context().with_stable_hashing_context(|mut hcx| {
tcx.with_stable_hashing_context(|mut hcx| {
hash_result(&mut hcx, &result);
});
}
@ -437,44 +439,45 @@ fn execute_job_non_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
#[inline(always)]
fn execute_job_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
dep_graph_data: &DepGraphData,
tcx: TyCtxt<'tcx>,
key: C::Key,
mut dep_node_opt: Option<DepNode>,
job_id: QueryJobId,
) -> (C::Value, DepNodeIndex) {
let dep_graph_data =
tcx.dep_graph.data().expect("should always be present in incremental mode");
if !query.anon() && !query.eval_always() {
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node = dep_node_opt.get_or_insert_with(|| query.construct_dep_node(qcx.tcx, &key));
let dep_node = dep_node_opt.get_or_insert_with(|| query.construct_dep_node(tcx, &key));
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
if let Some(ret) = qcx.start_query(job_id, false, || {
try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, dep_node)
if let Some(ret) = start_query(tcx, job_id, false, || {
try_load_from_disk_and_cache_in_memory(query, dep_graph_data, tcx, &key, dep_node)
}) {
return ret;
}
}
let prof_timer = qcx.tcx.prof.query_provider();
let prof_timer = tcx.prof.query_provider();
let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || {
let (result, dep_node_index) = start_query(tcx, job_id, query.depth_limit(), || {
if query.anon() {
// Call the query provider inside an anon task.
return dep_graph_data.with_anon_task_inner(qcx.tcx, query.dep_kind(), || {
query.invoke_provider(qcx, key)
});
return dep_graph_data
.with_anon_task_inner(tcx, query.dep_kind(), || query.invoke_provider(tcx, key));
}
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node = dep_node_opt.unwrap_or_else(|| query.construct_dep_node(qcx.tcx, &key));
let dep_node = dep_node_opt.unwrap_or_else(|| query.construct_dep_node(tcx, &key));
// Call the query provider.
dep_graph_data.with_task(
dep_node,
(qcx, query),
key,
|(qcx, query), key| query.invoke_provider(qcx, key),
tcx,
(query, key),
|tcx, (query, key)| query.invoke_provider(tcx, key),
query.hash_result(),
)
});
@ -488,21 +491,21 @@ fn execute_job_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
fn try_load_from_disk_and_cache_in_memory<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
dep_graph_data: &DepGraphData,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
key: &C::Key,
dep_node: &DepNode,
) -> Option<(C::Value, DepNodeIndex)> {
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx.tcx, dep_node)?;
let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, dep_node)?;
debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
// First we try to load the result from the on-disk cache.
// Some things are never cached on disk.
if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
if std::intrinsics::unlikely(qcx.tcx.sess.opts.unstable_opts.query_dep_graph) {
if let Some(result) = query.try_load_from_disk(tcx, key, prev_dep_node_index, dep_node_index) {
if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) {
dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
}
@ -516,10 +519,10 @@ fn try_load_from_disk_and_cache_in_memory<'tcx, C: QueryCache, const FLAGS: Quer
// give us some coverage of potential bugs though.
let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
if std::intrinsics::unlikely(
try_verify || qcx.tcx.sess.opts.unstable_opts.incremental_verify_ich,
try_verify || tcx.sess.opts.unstable_opts.incremental_verify_ich,
) {
incremental_verify_ich(
qcx.tcx,
tcx,
dep_graph_data,
&result,
prev_dep_node_index,
@ -534,25 +537,25 @@ fn try_load_from_disk_and_cache_in_memory<'tcx, C: QueryCache, const FLAGS: Quer
// We always expect to find a cached result for things that
// can be forced from `DepNode`.
debug_assert!(
!query.will_cache_on_disk_for_key(qcx.tcx, key)
|| !qcx.tcx.fingerprint_style(dep_node.kind).reconstructible(),
!query.will_cache_on_disk_for_key(tcx, key)
|| !tcx.fingerprint_style(dep_node.kind).reconstructible(),
"missing on-disk cache entry for {dep_node:?}"
);
// Sanity check for the logic in `ensure`: if the node is green and the result loadable,
// we should actually be able to load it.
debug_assert!(
!query.is_loadable_from_disk(qcx, key, prev_dep_node_index),
!query.is_loadable_from_disk(tcx, key, prev_dep_node_index),
"missing on-disk cache entry for loadable {dep_node:?}"
);
// We could not load a result from the on-disk cache, so
// recompute.
let prof_timer = qcx.tcx.prof.query_provider();
let prof_timer = tcx.prof.query_provider();
// The dep-graph for this computation is already in-place.
// Call the query provider.
let result = qcx.tcx.dep_graph.with_ignore(|| query.invoke_provider(qcx, *key));
let result = tcx.dep_graph.with_ignore(|| query.invoke_provider(tcx, *key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@ -566,7 +569,7 @@ fn try_load_from_disk_and_cache_in_memory<'tcx, C: QueryCache, const FLAGS: Quer
// See issue #82920 for an example of a miscompilation that would get turned into
// an ICE by this check
incremental_verify_ich(
qcx.tcx,
tcx,
dep_graph_data,
&result,
prev_dep_node_index,
@ -588,7 +591,7 @@ fn try_load_from_disk_and_cache_in_memory<'tcx, C: QueryCache, const FLAGS: Quer
#[inline(never)]
fn ensure_must_run<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
key: &C::Key,
check_cache: bool,
) -> (bool, Option<DepNode>) {
@ -599,10 +602,10 @@ fn ensure_must_run<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
// Ensuring an anonymous query makes no sense
assert!(!query.anon());
let dep_node = query.construct_dep_node(qcx.tcx, key);
let dep_node = query.construct_dep_node(tcx, key);
let dep_graph = &qcx.tcx.dep_graph;
let serialized_dep_node_index = match dep_graph.try_mark_green(qcx.tcx, &dep_node) {
let dep_graph = &tcx.dep_graph;
let serialized_dep_node_index = match dep_graph.try_mark_green(tcx, &dep_node) {
None => {
// A None return from `try_mark_green` means that this is either
// a new dep node or that the dep node has already been marked red.
@ -614,7 +617,7 @@ fn ensure_must_run<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
}
Some((serialized_dep_node_index, dep_node_index)) => {
dep_graph.read_index(dep_node_index);
qcx.tcx.prof.query_cache_hit(dep_node_index.into());
tcx.prof.query_cache_hit(dep_node_index.into());
serialized_dep_node_index
}
};
@ -624,34 +627,34 @@ fn ensure_must_run<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
return (false, None);
}
let loadable = query.is_loadable_from_disk(qcx, key, serialized_dep_node_index);
let loadable = query.is_loadable_from_disk(tcx, key, serialized_dep_node_index);
(!loadable, Some(dep_node))
}
#[inline(always)]
pub(super) fn get_query_non_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
span: Span,
key: C::Key,
) -> C::Value {
debug_assert!(!qcx.tcx.dep_graph.is_fully_enabled());
debug_assert!(!tcx.dep_graph.is_fully_enabled());
ensure_sufficient_stack(|| try_execute_query::<C, FLAGS, false>(query, qcx, span, key, None).0)
ensure_sufficient_stack(|| try_execute_query::<C, FLAGS, false>(query, tcx, span, key, None).0)
}
#[inline(always)]
pub(super) fn get_query_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
span: Span,
key: C::Key,
mode: QueryMode,
) -> Option<C::Value> {
debug_assert!(qcx.tcx.dep_graph.is_fully_enabled());
debug_assert!(tcx.dep_graph.is_fully_enabled());
let dep_node = if let QueryMode::Ensure { check_cache } = mode {
let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
let (must_run, dep_node) = ensure_must_run(query, tcx, &key, check_cache);
if !must_run {
return None;
}
@ -661,30 +664,30 @@ pub(super) fn get_query_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
};
let (result, dep_node_index) = ensure_sufficient_stack(|| {
try_execute_query::<C, FLAGS, true>(query, qcx, span, key, dep_node)
try_execute_query::<C, FLAGS, true>(query, tcx, span, key, dep_node)
});
if let Some(dep_node_index) = dep_node_index {
qcx.tcx.dep_graph.read_index(dep_node_index)
tcx.dep_graph.read_index(dep_node_index)
}
Some(result)
}
pub(crate) fn force_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
key: C::Key,
dep_node: DepNode,
) {
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
qcx.tcx.prof.query_cache_hit(index.into());
if let Some((_, index)) = query.query_cache(tcx).lookup(&key) {
tcx.prof.query_cache_hit(index.into());
return;
}
debug_assert!(!query.anon());
ensure_sufficient_stack(|| {
try_execute_query::<C, FLAGS, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
try_execute_query::<C, FLAGS, true>(query, tcx, DUMMY_SP, key, Some(dep_node))
});
}

View file

@ -10,10 +10,11 @@ use rustc_middle::query::{
CycleError, QueryInfo, QueryJob, QueryJobId, QueryLatch, QueryStackDeferred, QueryStackFrame,
QueryWaiter,
};
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
use rustc_span::{DUMMY_SP, Span};
use crate::QueryCtxt;
use crate::plumbing::collect_active_jobs_from_all_queries;
/// Map from query job IDs to job information collected by
/// `collect_active_jobs_from_all_queries`.
@ -384,7 +385,7 @@ pub fn break_query_cycles<'tcx>(
}
pub fn print_query_stack<'tcx>(
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
mut current_query: Option<QueryJobId>,
dcx: DiagCtxtHandle<'_>,
limit_frames: Option<usize>,
@ -397,8 +398,7 @@ pub fn print_query_stack<'tcx>(
let mut count_total = 0;
// Make use of a partial query job map if we fail to take locks collecting active queries.
let job_map: QueryJobMap<'_> = qcx
.collect_active_jobs_from_all_queries(false)
let job_map: QueryJobMap<'_> = collect_active_jobs_from_all_queries(tcx, false)
.unwrap_or_else(|partial_job_map| partial_job_map);
if let Some(ref mut file) = file {
@ -425,7 +425,7 @@ pub fn print_query_stack<'tcx>(
file,
"#{} [{}] {}",
count_total,
qcx.tcx.dep_kind_vtable(query_info.frame.dep_kind).name,
tcx.dep_kind_vtable(query_info.frame.dep_kind).name,
query_extra.description
);
}

View file

@ -9,6 +9,7 @@
#![feature(try_blocks)]
// tidy-alphabetical-end
use std::fmt;
use std::marker::ConstParamTy;
use rustc_data_structures::sync::AtomicU64;
@ -26,7 +27,7 @@ use rustc_span::{ErrorGuaranteed, Span};
pub use crate::dep_kind_vtables::make_dep_kind_vtables;
pub use crate::job::{QueryJobMap, break_query_cycles, print_query_stack};
pub use crate::plumbing::{QueryCtxt, query_key_hash_verify_all};
pub use crate::plumbing::{collect_active_jobs_from_all_queries, query_key_hash_verify_all};
use crate::plumbing::{encode_all_query_results, try_mark_green};
use crate::profiling_support::QueryKeyStringCache;
pub use crate::profiling_support::alloc_self_profile_query_strings;
@ -76,6 +77,20 @@ impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> Clone
}
}
impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> fmt::Debug
for SemiDynamicQueryDispatcher<'tcx, C, FLAGS>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// When debug-printing a query dispatcher (e.g. for ICE or tracing),
// just print the query name to know what query we're dealing with.
// The other fields and flags are probably just unhelpful noise.
//
// If there is need for a more detailed dump of all flags and fields,
// consider writing a separate dump method and calling it explicitly.
f.write_str(self.name())
}
}
impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> SemiDynamicQueryDispatcher<'tcx, C, FLAGS> {
#[inline(always)]
fn name(self) -> &'static str {
@ -89,11 +104,11 @@ impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> SemiDynamicQueryDispatcher<'t
// Don't use this method to access query results, instead use the methods on TyCtxt.
#[inline(always)]
fn query_state(self, qcx: QueryCtxt<'tcx>) -> &'tcx QueryState<'tcx, C::Key> {
fn query_state(self, tcx: TyCtxt<'tcx>) -> &'tcx QueryState<'tcx, C::Key> {
// Safety:
// This is just manually doing the subfield referencing through pointer math.
unsafe {
&*(&qcx.tcx.query_system.states as *const QueryStates<'tcx>)
&*(&tcx.query_system.states as *const QueryStates<'tcx>)
.byte_add(self.vtable.query_state)
.cast::<QueryState<'tcx, C::Key>>()
}
@ -101,11 +116,11 @@ impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> SemiDynamicQueryDispatcher<'t
// Don't use this method to access query results, instead use the methods on TyCtxt.
#[inline(always)]
fn query_cache(self, qcx: QueryCtxt<'tcx>) -> &'tcx C {
fn query_cache(self, tcx: TyCtxt<'tcx>) -> &'tcx C {
// Safety:
// This is just manually doing the subfield referencing through pointer math.
unsafe {
&*(&qcx.tcx.query_system.caches as *const QueryCaches<'tcx>)
&*(&tcx.query_system.caches as *const QueryCaches<'tcx>)
.byte_add(self.vtable.query_cache)
.cast::<C>()
}
@ -121,30 +136,30 @@ impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> SemiDynamicQueryDispatcher<'t
/// Calls the actual provider function for this query.
/// See [`QueryVTable::invoke_provider_fn`] for more details.
#[inline(always)]
fn invoke_provider(self, qcx: QueryCtxt<'tcx>, key: C::Key) -> C::Value {
(self.vtable.invoke_provider_fn)(qcx.tcx, key)
fn invoke_provider(self, tcx: TyCtxt<'tcx>, key: C::Key) -> C::Value {
(self.vtable.invoke_provider_fn)(tcx, key)
}
#[inline(always)]
fn try_load_from_disk(
self,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
key: &C::Key,
prev_index: SerializedDepNodeIndex,
index: DepNodeIndex,
) -> Option<C::Value> {
// `?` will return None immediately for queries that never cache to disk.
self.vtable.try_load_from_disk_fn?(qcx.tcx, key, prev_index, index)
self.vtable.try_load_from_disk_fn?(tcx, key, prev_index, index)
}
#[inline]
fn is_loadable_from_disk(
self,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
key: &C::Key,
index: SerializedDepNodeIndex,
) -> bool {
self.vtable.is_loadable_from_disk_fn.map_or(false, |f| f(qcx.tcx, key, index))
self.vtable.is_loadable_from_disk_fn.map_or(false, |f| f(tcx, key, index))
}
/// Synthesize an error value to let compilation continue after a cycle.

View file

@ -15,7 +15,7 @@ use rustc_middle::bug;
#[expect(unused_imports, reason = "used by doc comments")]
use rustc_middle::dep_graph::DepKindVTable;
use rustc_middle::dep_graph::{
self, DepNode, DepNodeIndex, DepNodeKey, HasDepContext, SerializedDepNodeIndex, dep_kinds,
self, DepNode, DepNodeIndex, DepNodeKey, SerializedDepNodeIndex, dep_kinds,
};
use rustc_middle::query::on_disk_cache::{
AbsoluteBytePos, CacheDecoder, CacheEncoder, EncodedDepNodeIndex,
@ -36,115 +36,91 @@ use crate::execution::{all_inactive, force_query};
use crate::job::{QueryJobMap, find_dep_kind_root};
use crate::{QueryDispatcherUnerased, QueryFlags, SemiDynamicQueryDispatcher};
#[derive(Copy, Clone)]
pub struct QueryCtxt<'tcx> {
pub tcx: TyCtxt<'tcx>,
fn depth_limit_error<'tcx>(tcx: TyCtxt<'tcx>, job: QueryJobId) {
let job_map =
collect_active_jobs_from_all_queries(tcx, true).expect("failed to collect active queries");
let (info, depth) = find_dep_kind_root(job, job_map);
let suggested_limit = match tcx.recursion_limit() {
Limit(0) => Limit(2),
limit => limit * 2,
};
tcx.sess.dcx().emit_fatal(QueryOverflow {
span: info.job.span,
note: QueryOverflowNote { desc: info.frame.info.extract().description, depth },
suggested_limit,
crate_name: tcx.crate_name(LOCAL_CRATE),
});
}
impl<'tcx> QueryCtxt<'tcx> {
#[inline]
pub fn new(tcx: TyCtxt<'tcx>) -> Self {
QueryCtxt { tcx }
}
fn depth_limit_error(self, job: QueryJobId) {
let job_map = self
.collect_active_jobs_from_all_queries(true)
.expect("failed to collect active queries");
let (info, depth) = find_dep_kind_root(job, job_map);
let suggested_limit = match self.tcx.recursion_limit() {
Limit(0) => Limit(2),
limit => limit * 2,
};
self.tcx.sess.dcx().emit_fatal(QueryOverflow {
span: info.job.span,
note: QueryOverflowNote { desc: info.frame.info.extract().description, depth },
suggested_limit,
crate_name: self.tcx.crate_name(LOCAL_CRATE),
});
}
#[inline]
pub(crate) fn next_job_id(self) -> QueryJobId {
QueryJobId(
NonZero::new(
self.tcx.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
)
#[inline]
pub(crate) fn next_job_id<'tcx>(tcx: TyCtxt<'tcx>) -> QueryJobId {
QueryJobId(
NonZero::new(tcx.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed))
.unwrap(),
)
}
)
}
#[inline]
pub(crate) fn current_query_job(self) -> Option<QueryJobId> {
tls::with_related_context(self.tcx, |icx| icx.query)
}
#[inline]
pub(crate) fn current_query_job<'tcx>(tcx: TyCtxt<'tcx>) -> Option<QueryJobId> {
tls::with_related_context(tcx, |icx| icx.query)
}
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes.
#[inline(always)]
pub(crate) fn start_query<R>(
self,
token: QueryJobId,
depth_limit: bool,
compute: impl FnOnce() -> R,
) -> R {
// The `TyCtxt` stored in TLS has the same global interner lifetime
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`.
tls::with_related_context(self.tcx, move |current_icx| {
if depth_limit
&& !self.tcx.recursion_limit().value_within_limit(current_icx.query_depth)
{
self.depth_limit_error(token);
}
// Update the `ImplicitCtxt` to point to our new query job.
let new_icx = ImplicitCtxt {
tcx: self.tcx,
query: Some(token),
query_depth: current_icx.query_depth + depth_limit as usize,
task_deps: current_icx.task_deps,
};
// Use the `ImplicitCtxt` while we execute the query.
tls::enter_context(&new_icx, compute)
})
}
/// Returns a map of currently active query jobs, collected from all queries.
///
/// If `require_complete` is `true`, this function locks all shards of the
/// query results to produce a complete map, which always returns `Ok`.
/// Otherwise, it may return an incomplete map as an error if any shard
/// lock cannot be acquired.
///
/// Prefer passing `false` to `require_complete` to avoid potential deadlocks,
/// especially when called from within a deadlock handler, unless a
/// complete map is needed and no deadlock is possible at this call site.
pub fn collect_active_jobs_from_all_queries(
self,
require_complete: bool,
) -> Result<QueryJobMap<'tcx>, QueryJobMap<'tcx>> {
let mut job_map_out = QueryJobMap::default();
let mut complete = true;
for gather_fn in crate::PER_QUERY_GATHER_ACTIVE_JOBS_FNS.iter() {
if gather_fn(self.tcx, require_complete, &mut job_map_out).is_none() {
complete = false;
}
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes.
#[inline(always)]
pub(crate) fn start_query<'tcx, R>(
tcx: TyCtxt<'tcx>,
token: QueryJobId,
depth_limit: bool,
compute: impl FnOnce() -> R,
) -> R {
// The `TyCtxt` stored in TLS has the same global interner lifetime
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`.
tls::with_related_context(tcx, move |current_icx| {
if depth_limit && !tcx.recursion_limit().value_within_limit(current_icx.query_depth) {
depth_limit_error(tcx, token);
}
if complete { Ok(job_map_out) } else { Err(job_map_out) }
}
// Update the `ImplicitCtxt` to point to our new query job.
let new_icx = ImplicitCtxt {
tcx,
query: Some(token),
query_depth: current_icx.query_depth + depth_limit as usize,
task_deps: current_icx.task_deps,
};
// Use the `ImplicitCtxt` while we execute the query.
tls::enter_context(&new_icx, compute)
})
}
impl<'tcx> HasDepContext<'tcx> for QueryCtxt<'tcx> {
#[inline]
fn dep_context(&self) -> TyCtxt<'tcx> {
self.tcx
/// Returns a map of currently active query jobs, collected from all queries.
///
/// If `require_complete` is `true`, this function locks all shards of the
/// query results to produce a complete map, which always returns `Ok`.
/// Otherwise, it may return an incomplete map as an error if any shard
/// lock cannot be acquired.
///
/// Prefer passing `false` to `require_complete` to avoid potential deadlocks,
/// especially when called from within a deadlock handler, unless a
/// complete map is needed and no deadlock is possible at this call site.
pub fn collect_active_jobs_from_all_queries<'tcx>(
tcx: TyCtxt<'tcx>,
require_complete: bool,
) -> Result<QueryJobMap<'tcx>, QueryJobMap<'tcx>> {
let mut job_map_out = QueryJobMap::default();
let mut complete = true;
for gather_fn in crate::PER_QUERY_GATHER_ACTIVE_JOBS_FNS.iter() {
if gather_fn(tcx, require_complete, &mut job_map_out).is_none() {
complete = false;
}
}
if complete { Ok(job_map_out) } else { Err(job_map_out) }
}
pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
@ -361,19 +337,19 @@ where
pub(crate) fn encode_query_results<'a, 'tcx, Q, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
encoder: &mut CacheEncoder<'a, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) where
Q: QueryDispatcherUnerased<'tcx, C, FLAGS>,
Q::UnerasedValue: Encodable<CacheEncoder<'a, 'tcx>>,
{
let _timer = qcx.tcx.prof.generic_activity_with_arg("encode_query_results_for", query.name());
let _timer = tcx.prof.generic_activity_with_arg("encode_query_results_for", query.name());
assert!(all_inactive(query.query_state(qcx)));
let cache = query.query_cache(qcx);
assert!(all_inactive(query.query_state(tcx)));
let cache = query.query_cache(tcx);
cache.iter(&mut |key, value, dep_node| {
if query.will_cache_on_disk_for_key(qcx.tcx, key) {
if query.will_cache_on_disk_for_key(tcx, key) {
let dep_node = SerializedDepNodeIndex::new(dep_node.index());
// Record position of the cache entry.
@ -388,14 +364,14 @@ pub(crate) fn encode_query_results<'a, 'tcx, Q, C: QueryCache, const FLAGS: Quer
pub(crate) fn query_key_hash_verify<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
qcx: QueryCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
) {
let _timer = qcx.tcx.prof.generic_activity_with_arg("query_key_hash_verify_for", query.name());
let _timer = tcx.prof.generic_activity_with_arg("query_key_hash_verify_for", query.name());
let cache = query.query_cache(qcx);
let cache = query.query_cache(tcx);
let mut map = UnordMap::with_capacity(cache.len());
cache.iter(&mut |key, _, _| {
let node = DepNode::construct(qcx.tcx, query.dep_kind(), key);
let node = DepNode::construct(tcx, query.dep_kind(), key);
if let Some(other_key) = map.insert(node, *key) {
bug!(
"query key:\n\
@ -487,7 +463,7 @@ pub(crate) fn force_from_dep_node_inner<'tcx, C: QueryCache, const FLAGS: QueryF
);
if let Some(key) = C::Key::recover(tcx, &dep_node) {
force_query(query, QueryCtxt::new(tcx), key, dep_node);
force_query(query, tcx, key, dep_node);
true
} else {
false
@ -525,7 +501,7 @@ macro_rules! define_queries {
let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered();
execution::get_query_incr(
QueryType::query_dispatcher(tcx),
QueryCtxt::new(tcx),
tcx,
span,
key,
mode
@ -545,7 +521,7 @@ macro_rules! define_queries {
) -> Option<Erased<queries::$name::Value<'tcx>>> {
Some(execution::get_query_non_incr(
QueryType::query_dispatcher(tcx),
QueryCtxt::new(tcx),
tcx,
span,
key,
))
@ -729,7 +705,7 @@ macro_rules! define_queries {
_
> (
query_impl::$name::QueryType::query_dispatcher(tcx),
QueryCtxt::new(tcx),
tcx,
encoder,
query_result_index,
)
@ -739,7 +715,7 @@ macro_rules! define_queries {
pub(crate) fn query_key_hash_verify<'tcx>(tcx: TyCtxt<'tcx>) {
$crate::plumbing::query_key_hash_verify(
query_impl::$name::QueryType::query_dispatcher(tcx),
QueryCtxt::new(tcx),
tcx,
)
}
})*}

View file

@ -916,10 +916,9 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// heads from the stack. This may not necessarily mean that we've actually
/// reached a fixpoint for that cycle head, which impacts the way we rebase
/// provisional cache entries.
#[derive_where(Debug; X: Cx)]
enum RebaseReason<X: Cx> {
#[derive(Debug)]
enum RebaseReason {
NoCycleUsages,
Ambiguity(X::AmbiguityInfo),
Overflow,
/// We've actually reached a fixpoint.
///
@ -956,7 +955,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
&mut self,
cx: X,
stack_entry: &StackEntry<X>,
rebase_reason: RebaseReason<X>,
rebase_reason: RebaseReason,
) {
let popped_head_index = self.stack.next_index();
#[allow(rustc::potential_query_instability)]
@ -1035,9 +1034,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
// is not actually equal to the final provisional result. We
// need to discard the provisional cache entry in this case.
RebaseReason::NoCycleUsages => return false,
RebaseReason::Ambiguity(info) => {
*result = D::propagate_ambiguity(cx, input, info);
}
RebaseReason::Overflow => *result = D::fixpoint_overflow_result(cx, input),
RebaseReason::ReachedFixpoint(None) => {}
RebaseReason::ReachedFixpoint(Some(path_kind)) => {
@ -1352,27 +1348,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
}
// If computing this goal results in ambiguity with no constraints,
// we do not rerun it. It's incredibly difficult to get a different
// response in the next iteration in this case. These changes would
// likely either be caused by incompleteness or can change the maybe
// cause from ambiguity to overflow. Returning ambiguity always
// preserves soundness and completeness even if the goal is be known
// to succeed or fail.
//
// This prevents exponential blowup affecting multiple major crates.
// As we only get to this branch if we haven't yet reached a fixpoint,
// we also taint all provisional cache entries which depend on the
// current goal.
if let Some(info) = D::is_ambiguous_result(result) {
self.rebase_provisional_cache_entries(
cx,
&stack_entry,
RebaseReason::Ambiguity(info),
);
return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
};
// If we've reached the fixpoint step limit, we bail with overflow and taint all
// provisional cache entries which depend on the current goal.
i += 1;

View file

@ -183,7 +183,44 @@ crate::cfg_select! {
}
}
/// A variable argument list, equivalent to `va_list` in C.
/// A variable argument list, ABI-compatible with `va_list` in C.
///
/// This type is created in c-variadic functions when `...` is desugared. A `VaList`
/// is automatically initialized (equivalent to calling `va_start` in C).
///
/// ```
/// #![feature(c_variadic)]
///
/// use std::ffi::VaList;
///
/// /// # Safety
/// /// Must be passed at least `count` arguments of type `i32`.
/// unsafe extern "C" fn my_func(count: u32, ap: ...) -> i32 {
/// unsafe { vmy_func(count, ap) }
/// }
///
/// /// # Safety
/// /// Must be passed at least `count` arguments of type `i32`.
/// unsafe fn vmy_func(count: u32, mut ap: VaList<'_>) -> i32 {
/// let mut sum = 0;
/// for _ in 0..count {
/// sum += unsafe { ap.arg::<i32>() };
/// }
/// sum
/// }
///
/// assert_eq!(unsafe { my_func(1, 42i32) }, 42);
/// assert_eq!(unsafe { my_func(3, 42i32, -7i32, 20i32) }, 55);
/// ```
///
/// The [`VaList::arg`] method can be used to read an argument from the list. This method
/// automatically advances the `VaList` to the next argument. The C equivalent is `va_arg`.
///
/// Cloning a `VaList` performs the equivalent of C `va_copy`, producing an independent cursor
/// that arguments can be read from without affecting the original. Dropping a `VaList` performs
/// the equivalent of C `va_end`.
///
/// This can be used across an FFI boundary, and fully matches the platform's `va_list`.
#[repr(transparent)]
#[lang = "va_list"]
pub struct VaList<'a> {
@ -278,20 +315,17 @@ unsafe impl<T> VaArgSafe for *mut T {}
unsafe impl<T> VaArgSafe for *const T {}
impl<'f> VaList<'f> {
/// Advance to and read the next variable argument.
/// Read an argument from the variable argument list, and advance to the next argument.
///
/// Only types that implement [`VaArgSafe`] can be read from a variable argument list.
///
/// # Safety
///
/// This function is only sound to call when:
///
/// - there is a next variable argument available.
/// - the next argument's type must be ABI-compatible with the type `T`.
/// - the next argument must have a properly initialized value of type `T`.
/// This function is only sound to call when there is another argument to read, and that
/// argument is a properly initialized value of the type `T`.
///
/// Calling this function with an incompatible type, an invalid value, or when there
/// are no more variable arguments, is unsound.
///
/// [valid]: https://doc.rust-lang.org/nightly/nomicon/what-unsafe-does.html
#[inline]
#[rustc_const_unstable(feature = "const_c_variadic", issue = "151787")]
pub const unsafe fn arg<T: VaArgSafe>(&mut self) -> T {

View file

@ -1,5 +1,3 @@
#![allow(stable_features)]
use std::ptr::{read_volatile, write_volatile};
#[test]

View file

@ -472,18 +472,18 @@ dependencies = [
[[package]]
name = "objc2-core-foundation"
version = "0.3.2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536"
checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166"
dependencies = [
"bitflags",
]
[[package]]
name = "objc2-io-kit"
version = "0.3.2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33fafba39597d6dc1fb709123dfa8289d39406734be322956a69f0931c73bb15"
checksum = "71c1c64d6120e51cd86033f67176b1cb66780c2efe34dec55176f77befd93c0a"
dependencies = [
"libc",
"objc2-core-foundation",
@ -743,9 +743,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.38.0"
version = "0.38.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe840c5b1afe259a5657392a4dbb74473a14c8db999c3ec2f4ae812e028a94da"
checksum = "1efc19935b4b66baa6f654ac7924c192f55b175c00a7ab72410fc24284dacda8"
dependencies = [
"libc",
"memchr",

View file

@ -57,7 +57,7 @@ walkdir = "2.4"
xz2 = "0.1"
# Dependencies needed by the build-metrics feature
sysinfo = { version = "0.38.0", default-features = false, optional = true, features = ["system"] }
sysinfo = { version = "0.38.2", default-features = false, optional = true, features = ["system"] }
# Dependencies needed by the `tracing` feature
chrono = { version = "0.4", default-features = false, optional = true, features = ["now", "std"] }

View file

@ -2557,7 +2557,7 @@ pub fn maybe_install_llvm_target(builder: &Builder<'_>, target: TargetSelection,
),
)]
pub fn maybe_install_llvm_runtime(builder: &Builder<'_>, target: TargetSelection, sysroot: &Path) {
let dst_libdir = sysroot.join(builder.sysroot_libdir_relative(Compiler::new(1, target)));
let dst_libdir = sysroot.join(builder.libdir_relative(Compiler::new(1, target)));
// We do not need to copy LLVM files into the sysroot if it is not
// dynamically linked; it is already included into librustc_llvm
// statically.

View file

@ -712,6 +712,7 @@ auto:
--target=aarch64-pc-windows-gnullvm,i686-pc-windows-gnullvm
--enable-full-tools
--enable-profiler
--enable-llvm-link-shared
DIST_REQUIRE_ALL_TOOLS: 1
CODEGEN_BACKENDS: llvm,cranelift
CC_i686_pc_windows_gnullvm: i686-w64-mingw32-clang
@ -724,6 +725,7 @@ auto:
--build=x86_64-pc-windows-gnullvm
--enable-full-tools
--enable-profiler
--enable-llvm-link-shared
DIST_REQUIRE_ALL_TOOLS: 1
CODEGEN_BACKENDS: llvm,cranelift
<<: *job-windows

View file

@ -10,7 +10,7 @@ log = "0.4"
anyhow = "1"
humantime = "2"
humansize = "2"
sysinfo = { version = "0.38.0", default-features = false, features = ["disk"] }
sysinfo = { version = "0.38.2", default-features = false, features = ["disk"] }
fs_extra = "1"
camino = "1"
tar = "0.4"

View file

@ -1,8 +1,5 @@
//@ run-pass
#![allow(dead_code)]
#![allow(stable_features)]
#![feature(const_indexing)]
fn main() {
const ARR: [i32; 6] = [42, 43, 44, 45, 46, 47];

View file

@ -1,10 +1,6 @@
//@ run-pass
#![allow(stable_features)]
//@ compile-flags: -C debug-assertions
#![feature(iter_to_slice)]
use std::slice;
fn foo<T>(v: &[T]) -> Option<&[T]> {

View file

@ -1,11 +1,9 @@
//@ run-pass
#![allow(non_camel_case_types)]
#![allow(stable_features)]
// Issue 4691: Ensure that functional-struct-updates operates
// correctly and moves rather than copy when appropriate.
#![feature(core)]
struct ncint { v: isize }
fn ncint(v: isize) -> ncint { ncint { v: v } }

View file

@ -1,10 +1,6 @@
//@ run-pass
#![allow(stable_features)]
//@ compile-flags:-C target-feature=-crt-static -Z unstable-options
//@ ignore-musl - requires changing the linker which is hard
#![feature(cfg_target_feature)]
#[cfg(not(target_feature = "crt-static"))]
fn main() {}

View file

@ -1,10 +1,6 @@
//@ run-pass
#![allow(stable_features)]
// A very basic test of const fn functionality.
#![feature(const_indexing)]
const fn add(x: u32, y: u32) -> u32 {
x + y
}

View file

@ -1,8 +1,4 @@
//@ run-pass
#![allow(stable_features)]
#![feature(const_indexing)]
const ARR: [usize; 5] = [5, 4, 3, 2, 1];
fn main() {

View file

@ -1,13 +1,9 @@
//@ run-pass
#![allow(stable_features)]
#![allow(unused_imports)]
// Test that cleanup scope for temporaries created in a match
// arm is confined to the match arm itself.
#![feature(os)]
use std::os;
struct Test { x: isize }

View file

@ -1,10 +1,7 @@
//@ run-pass
#![allow(unused_variables)]
#![allow(stable_features)]
// Test a very simple custom DST coercion.
#![feature(core, rc_weak)]
use std::cell::RefCell;
use std::rc::{Rc, Weak};

View file

@ -1,6 +1,5 @@
//@ run-pass
#![allow(stable_features)]
#![feature(core, core_intrinsics)]
#![feature(core_intrinsics)]
extern crate core;
use core::intrinsics::discriminant_value;

View file

@ -1,15 +1,12 @@
//@ run-pass
#![allow(dead_code)]
#![allow(unused_variables)]
#![allow(stable_features)]
// Tests parallel codegen - this can fail if the symbol for the anonymous
// closure in `sum` pollutes the second codegen unit from the first.
//@ compile-flags: -C codegen_units=2
#![feature(iter_arith)]
mod a {
fn foo() {
let x = ["a", "bob", "c"];

View file

@ -1,9 +1,6 @@
//! Regression test for https://github.com/rust-lang/rust/issues/15673
//@ run-pass
#![allow(stable_features)]
#![feature(iter_arith)]
fn main() {
let x: [u64; 3] = [1, 2, 3];

View file

@ -1,14 +1,10 @@
//@ build-pass
#![allow(dead_code)]
#![allow(unused_imports)]
#![allow(stable_features)]
// A reduced version of the rustbook ice. The problem this encountered
// had to do with codegen ignoring binders.
#![feature(os)]
use std::iter;
use std::os;
use std::fs::File;

View file

@ -1,7 +1,4 @@
//@ run-pass
#![allow(stable_features)]
#![feature(cfg_target_feature)]
#[cfg(any(not(target_arch = "x86"), target_feature = "sse2"))]
fn main() {

View file

@ -1,8 +1,6 @@
//@ run-pass
#![allow(stable_features)]
// write_volatile causes an LLVM assert with composite types
#![feature(volatile)]
use std::ptr::{read_volatile, write_volatile};
#[derive(Debug, Eq, PartialEq)]

View file

@ -1,6 +1,4 @@
//@ check-pass
#![allow(stable_features)]
#![feature(associated_consts)]
use std::marker::PhantomData;

View file

@ -1,7 +1,5 @@
//@ check-pass
#![allow(dead_code)]
#![allow(stable_features)]
#![feature(associated_consts)]
impl A for i32 {
type Foo = u32;

View file

@ -1,11 +1,7 @@
//@ run-pass
#![allow(stable_features)]
// Test to see that the element type of .cloned() can be inferred
// properly. Previously this would fail to deduce the type of `sum`.
#![feature(iter_arith)]
fn square_sum(v: &[i64]) -> i64 {
let sum: i64 = v.iter().cloned().sum();
sum * sum

View file

@ -1,8 +1,5 @@
// https://github.com/rust-lang/rust/issues/15673
//@ run-pass
#![allow(stable_features)]
#![feature(iter_arith)]
fn main() {
let x: [u64; 3] = [1, 2, 3];

View file

@ -1,5 +1,4 @@
//@ run-pass
#![allow(stable_features)]
#![allow(unused_labels)]
#![allow(unreachable_code)]

View file

@ -3,14 +3,10 @@
#![allow(dead_code)]
#![allow(unused_assignments)]
#![allow(unused_variables)]
#![allow(stable_features)]
#![allow(dropping_copy_types)]
// Test parsing binary operators after macro invocations.
#![feature(macro_rules)]
macro_rules! id {
($e: expr) => { $e }
}

View file

@ -1,7 +1,6 @@
//@ run-pass
#![allow(dead_code)]
#![allow(unused_variables)]
#![allow(stable_features)]
// Test that we handle projection types which wind up important for
// resolving methods. This test was reduced from a larger example; the
@ -10,8 +9,6 @@
// type projection.
#![feature(associated_types)]
trait Hasher {
type Output;
fn finish(&self) -> Self::Output;

View file

@ -1,10 +1,7 @@
//@ run-pass
#![allow(unused_variables)]
#![allow(stable_features)]
// test that ordinary fat pointer operations work.
#![feature(braced_empty_structs)]
#![feature(rustc_attrs)]
use std::sync::atomic;

View file

@ -6,8 +6,7 @@
//@ run-pass
#![allow(stable_features)]
#![feature(no_core, core)]
#![feature(no_core)]
#![no_core]
extern crate core;

View file

@ -1,6 +1,5 @@
//@ run-pass
#![allow(unused_variables)]
#![allow(stable_features)]
use std::cell::RefCell;
use std::rc::Rc;

View file

@ -1,6 +1,4 @@
//@ run-pass
#![allow(stable_features)]
// Test overloaded indexing combined with autoderef.
use std::ops::{Index, IndexMut};

View file

@ -1,11 +1,8 @@
//@ run-pass
//@ needs-unwind
#![allow(stable_features)]
//@ needs-threads
//@ ignore-backends: gcc
#![feature(std_panic)]
#![feature(panic_update_hook)]
use std::sync::atomic::{AtomicUsize, Ordering};

View file

@ -2,9 +2,6 @@
//@ needs-unwind
//@ needs-threads
//@ ignore-backends: gcc
#![allow(stable_features)]
#![feature(std_panic)]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::panic;

View file

@ -1,14 +1,11 @@
//@ run-pass
//@ needs-unwind
#![allow(stable_features)]
#![allow(unused_must_use)]
//@ needs-threads
//@ ignore-backends: gcc
#![feature(std_panic)]
use std::panic;
use std::thread;

View file

@ -1,9 +1,6 @@
//@ run-pass
//@ needs-unwind
#![allow(unused_variables)]
#![allow(stable_features)]
#![feature(std_panic)]
//@ needs-threads
//@ ignore-backends: gcc

View file

@ -1,8 +1,5 @@
//@ run-pass
#![allow(dead_code)]
#![allow(stable_features)]
#![feature(issue_5723_bootstrap)]
trait Foo {
fn dummy(&self) { }

View file

@ -1,9 +1,6 @@
//@ run-pass
#![allow(dead_code)]
#![allow(unused_variables)]
#![allow(stable_features)]
#![feature(issue_5723_bootstrap)]
trait Foo {
fn dummy(&self) { }

View file

@ -1,12 +1,9 @@
//@ run-pass
#![allow(stable_features)]
#![allow(unused_variables)]
// #45662
#![feature(repr_align)]
#[repr(align(16))]
pub struct A(#[allow(dead_code)] i64);

View file

@ -1,13 +1,12 @@
//@ run-pass
#![allow(unused_variables)]
#![allow(stable_features)]
#![allow(overflowing_literals)]
//@ needs-subprocess
//@ ignore-fuchsia must translate zircon signal to SIGILL, FIXME (#58590)
//@ ignore-backends: gcc
#![feature(repr_simd, target_feature, cfg_target_feature)]
#![feature(repr_simd)]
#[path = "../../auxiliary/minisimd.rs"]
mod minisimd;

View file

@ -4,9 +4,6 @@
//@ run-pass
//@ ignore-i586 (no SSE2)
#![allow(stable_features)]
#![feature(cfg_target_feature)]
fn main() {
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
assert!(

View file

@ -1,7 +1,5 @@
//@ run-pass
#![allow(stable_features)]
//@ needs-threads
#![feature(thread_local_try_with)]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;

View file

@ -1,7 +1,5 @@
//@ run-pass
#![allow(stable_features)]
//@ needs-threads
#![feature(thread_local_try_with)]
use std::thread;

View file

@ -0,0 +1,45 @@
//@ check-pass
//@ compile-flags: -Znext-solver
// Regression test for https://github.com/rust-lang/trait-system-refactor-initiative/issues/257.
#![feature(rustc_attrs)]
#![expect(internal_features)]
#![rustc_no_implicit_bounds]
pub trait Bound {}
impl Bound for u8 {}
pub trait Proj {
type Assoc;
}
impl<U: Bound> Proj for U {
type Assoc = U;
}
impl Proj for MyField {
type Assoc = u8;
}
// While wf-checking the global bounds of `fn foo`, elaborating this outlives predicate triggered a
// cycle in the search graph along a particular probe path, which was not an actual solution.
// That cycle then resulted in a forced false-positive ambiguity due to a performance hack in the
// search graph and then ended up floundering the root goal evaluation.
pub trait Field: Proj<Assoc: Bound + 'static> {}
struct MyField;
impl Field for MyField {}
trait IdReqField {
type This;
}
impl<F: Field> IdReqField for F {
type This = F;
}
fn foo()
where
<MyField as IdReqField>::This: Field,
{
}
fn main() {}

View file

@ -1,9 +1,7 @@
//@ run-pass
#![allow(stable_features)]
#![allow(unused_imports)]
#![feature(no_core, core)]
#![feature(no_core)]
#![no_core]
extern crate std;