Rollup merge of #151976 - Zalathar:collect-active-jobs, r=nnethercote
Rename `collect_active_jobs` to several distinct names Key renames: - Function `collect_active_jobs` → `collect_active_jobs_from_all_queries` (method in trait `QueryContext`) - Constant `COLLECT_ACTIVE_JOBS` → `PER_QUERY_GATHER_ACTIVE_JOBS_FNS` (list of per-query function pointers) - Function `collect_active_jobs` → `gather_active_jobs` (per-query function in `query_impl::$name`) - Function `collect_active_jobs` → `gather_active_jobs_inner` (method in `QueryState`) - Giving these four things distinct names makes it a lot easier to tell them apart! - The switch from “collect” (all queries) to “gather” (single query) is intended to make the different parts a bit more memorable and searchable; I couldn't think of a more natural way to express this distinction so I settled for two different synonyms. There should be no change to compiler behaviour. r? nnethercote (or compiler)
This commit is contained in:
commit
f69082d0e7
5 changed files with 52 additions and 21 deletions
|
|
@ -254,7 +254,7 @@ internal compiler error: query cycle handler thread panicked, aborting process";
|
|||
|| {
|
||||
// Ensure there were no errors collecting all active jobs.
|
||||
// We need the complete map to ensure we find a cycle to break.
|
||||
QueryCtxt::new(tcx).collect_active_jobs(false).expect(
|
||||
QueryCtxt::new(tcx).collect_active_jobs_from_all_queries(false).expect(
|
||||
"failed to collect active queries in deadlock handler",
|
||||
)
|
||||
},
|
||||
|
|
|
|||
|
|
@ -50,7 +50,9 @@ impl<'tcx> QueryCtxt<'tcx> {
|
|||
}
|
||||
|
||||
fn depth_limit_error(self, job: QueryJobId) {
|
||||
let query_map = self.collect_active_jobs(true).expect("failed to collect active queries");
|
||||
let query_map = self
|
||||
.collect_active_jobs_from_all_queries(true)
|
||||
.expect("failed to collect active queries");
|
||||
let (info, depth) = job.find_dep_kind_root(query_map);
|
||||
|
||||
let suggested_limit = match self.tcx.recursion_limit() {
|
||||
|
|
@ -98,7 +100,7 @@ impl<'tcx> QueryContext<'tcx> for QueryCtxt<'tcx> {
|
|||
tls::with_related_context(self.tcx, |icx| icx.query)
|
||||
}
|
||||
|
||||
/// Returns a map of currently active query jobs.
|
||||
/// Returns a map of currently active query jobs, collected from all queries.
|
||||
///
|
||||
/// If `require_complete` is `true`, this function locks all shards of the
|
||||
/// query results to produce a complete map, which always returns `Ok`.
|
||||
|
|
@ -108,12 +110,15 @@ impl<'tcx> QueryContext<'tcx> for QueryCtxt<'tcx> {
|
|||
/// Prefer passing `false` to `require_complete` to avoid potential deadlocks,
|
||||
/// especially when called from within a deadlock handler, unless a
|
||||
/// complete map is needed and no deadlock is possible at this call site.
|
||||
fn collect_active_jobs(self, require_complete: bool) -> Result<QueryMap<'tcx>, QueryMap<'tcx>> {
|
||||
fn collect_active_jobs_from_all_queries(
|
||||
self,
|
||||
require_complete: bool,
|
||||
) -> Result<QueryMap<'tcx>, QueryMap<'tcx>> {
|
||||
let mut jobs = QueryMap::default();
|
||||
let mut complete = true;
|
||||
|
||||
for collect in super::COLLECT_ACTIVE_JOBS.iter() {
|
||||
if collect(self.tcx, &mut jobs, require_complete).is_none() {
|
||||
for gather_fn in crate::PER_QUERY_GATHER_ACTIVE_JOBS_FNS.iter() {
|
||||
if gather_fn(self.tcx, &mut jobs, require_complete).is_none() {
|
||||
complete = false;
|
||||
}
|
||||
}
|
||||
|
|
@ -731,7 +736,10 @@ macro_rules! define_queries {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn collect_active_jobs<'tcx>(
|
||||
/// Internal per-query plumbing for collecting the set of active jobs for this query.
|
||||
///
|
||||
/// Should only be called through `PER_QUERY_GATHER_ACTIVE_JOBS_FNS`.
|
||||
pub(crate) fn gather_active_jobs<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
qmap: &mut QueryMap<'tcx>,
|
||||
require_complete: bool,
|
||||
|
|
@ -741,12 +749,15 @@ macro_rules! define_queries {
|
|||
let name = stringify!($name);
|
||||
$crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name)
|
||||
};
|
||||
let res = tcx.query_system.states.$name.collect_active_jobs(
|
||||
|
||||
// Call `gather_active_jobs_inner` to do the actual work.
|
||||
let res = tcx.query_system.states.$name.gather_active_jobs_inner(
|
||||
tcx,
|
||||
make_frame,
|
||||
qmap,
|
||||
require_complete,
|
||||
);
|
||||
|
||||
// this can be called during unwinding, and the function has a `try_`-prefix, so
|
||||
// don't `unwrap()` here, just manually check for `None` and do best-effort error
|
||||
// reporting.
|
||||
|
|
@ -816,10 +827,17 @@ macro_rules! define_queries {
|
|||
|
||||
// These arrays are used for iteration and can't be indexed by `DepKind`.
|
||||
|
||||
const COLLECT_ACTIVE_JOBS: &[
|
||||
for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap<'tcx>, bool) -> Option<()>
|
||||
] =
|
||||
&[$(query_impl::$name::collect_active_jobs),*];
|
||||
/// Used by `collect_active_jobs_from_all_queries` to iterate over all
|
||||
/// queries, and gather the active jobs for each query.
|
||||
///
|
||||
/// (We arbitrarily use the word "gather" when collecting the jobs for
|
||||
/// each individual query, so that we have distinct function names to
|
||||
/// grep for.)
|
||||
const PER_QUERY_GATHER_ACTIVE_JOBS_FNS: &[
|
||||
for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap<'tcx>, require_complete: bool) -> Option<()>
|
||||
] = &[
|
||||
$(query_impl::$name::gather_active_jobs),*
|
||||
];
|
||||
|
||||
const ALLOC_SELF_PROFILE_QUERY_STRINGS: &[
|
||||
for<'tcx> fn(TyCtxt<'tcx>, &mut QueryKeyStringCache)
|
||||
|
|
|
|||
|
|
@ -32,6 +32,8 @@ impl<'tcx> QueryInfo<QueryStackDeferred<'tcx>> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Map from query job IDs to job information collected by
|
||||
/// [`QueryContext::collect_active_jobs_from_all_queries`].
|
||||
pub type QueryMap<'tcx> = FxHashMap<QueryJobId, QueryJobInfo<'tcx>>;
|
||||
|
||||
/// A value uniquely identifying an active query job.
|
||||
|
|
@ -613,7 +615,7 @@ pub fn print_query_stack<'tcx, Qcx: QueryContext<'tcx>>(
|
|||
let mut count_total = 0;
|
||||
|
||||
// Make use of a partial query map if we fail to take locks collecting active queries.
|
||||
let query_map = match qcx.collect_active_jobs(false) {
|
||||
let query_map = match qcx.collect_active_jobs_from_all_queries(false) {
|
||||
Ok(query_map) => query_map,
|
||||
Err(query_map) => query_map,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -166,7 +166,10 @@ pub trait QueryContext<'tcx>: HasDepContext {
|
|||
/// Get the query information from the TLS context.
|
||||
fn current_query_job(self) -> Option<QueryJobId>;
|
||||
|
||||
fn collect_active_jobs(self, require_complete: bool) -> Result<QueryMap<'tcx>, QueryMap<'tcx>>;
|
||||
fn collect_active_jobs_from_all_queries(
|
||||
self,
|
||||
require_complete: bool,
|
||||
) -> Result<QueryMap<'tcx>, QueryMap<'tcx>>;
|
||||
|
||||
/// Load a side effect associated to the node in the previous session.
|
||||
fn load_side_effect(
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ use rustc_data_structures::fingerprint::Fingerprint;
|
|||
use rustc_data_structures::hash_table::{self, Entry, HashTable};
|
||||
use rustc_data_structures::sharded::{self, Sharded};
|
||||
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||
use rustc_data_structures::sync::LockGuard;
|
||||
use rustc_data_structures::{outline, sync};
|
||||
use rustc_errors::{Diag, FatalError, StashKey};
|
||||
use rustc_span::{DUMMY_SP, Span};
|
||||
|
|
@ -79,7 +78,10 @@ where
|
|||
self.active.lock_shards().all(|shard| shard.is_empty())
|
||||
}
|
||||
|
||||
pub fn collect_active_jobs<Qcx: Copy>(
|
||||
/// Internal plumbing for collecting the set of active jobs for this query.
|
||||
///
|
||||
/// Should only be called from `gather_active_jobs`.
|
||||
pub fn gather_active_jobs_inner<Qcx: Copy>(
|
||||
&self,
|
||||
qcx: Qcx,
|
||||
make_frame: fn(Qcx, K) -> QueryStackFrame<QueryStackDeferred<'tcx>>,
|
||||
|
|
@ -88,23 +90,26 @@ where
|
|||
) -> Option<()> {
|
||||
let mut active = Vec::new();
|
||||
|
||||
let mut collect = |iter: LockGuard<'_, HashTable<(K, ActiveKeyStatus<'tcx>)>>| {
|
||||
for (k, v) in iter.iter() {
|
||||
// Helper to gather active jobs from a single shard.
|
||||
let mut gather_shard_jobs = |shard: &HashTable<(K, ActiveKeyStatus<'tcx>)>| {
|
||||
for (k, v) in shard.iter() {
|
||||
if let ActiveKeyStatus::Started(ref job) = *v {
|
||||
active.push((*k, job.clone()));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Lock shards and gather jobs from each shard.
|
||||
if require_complete {
|
||||
for shard in self.active.lock_shards() {
|
||||
collect(shard);
|
||||
gather_shard_jobs(&shard);
|
||||
}
|
||||
} else {
|
||||
// We use try_lock_shards here since we are called from the
|
||||
// deadlock handler, and this shouldn't be locked.
|
||||
for shard in self.active.try_lock_shards() {
|
||||
collect(shard?);
|
||||
let shard = shard?;
|
||||
gather_shard_jobs(&shard);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -294,7 +299,10 @@ where
|
|||
{
|
||||
// Ensure there was no errors collecting all active jobs.
|
||||
// We need the complete map to ensure we find a cycle to break.
|
||||
let query_map = qcx.collect_active_jobs(false).ok().expect("failed to collect active queries");
|
||||
let query_map = qcx
|
||||
.collect_active_jobs_from_all_queries(false)
|
||||
.ok()
|
||||
.expect("failed to collect active queries");
|
||||
|
||||
let error = try_execute.find_cycle_in_stack(query_map, &qcx.current_query_job(), span);
|
||||
(mk_cycle(query, qcx, error.lift()), None)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue