Remove now-unnecessary indirection.
The previous commit moved some code from `rustc_query_system`, which doesn't have access to `TyCtxt` and `QueryCtxt`, to `rustc_query_impl`, which does. We can now remove quite a bit of indirection. - Three methods in `trait QueryContext` are no longer needed (`next_job_id`, `current_query_job`, `start_query`). As a result, `QueryCtxt`'s trait impls of these methods are changed to inherent methods. - `qcx: Q::Qcx` parameters are simplified to `qcx: QueryCtxt<'tcx>`. - `*qcx.dep_context()` occurrences are simplified to `qcx.tcx`, and things like `qcx.dep_context().profiler()` become `qcx.tcx.prof`. - `DepGraphData<<Q::Qcx as HasDepContext>::Deps>` becomes `DepGraphData<DepsType>`. In short, various layers of indirection and abstraction are cut away. The resulting code is simpler, more concrete, and easier to understand. It's a good demonstration of the benefits of eliminating `rustc_query_system`, and there will be more to come.
This commit is contained in:
parent
563a2f04c7
commit
a6dd4d870a
3 changed files with 120 additions and 138 deletions
|
|
@ -5,6 +5,8 @@ use rustc_data_structures::hash_table::{Entry, HashTable};
|
|||
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||
use rustc_data_structures::{outline, sharded, sync};
|
||||
use rustc_errors::{Diag, FatalError, StashKey};
|
||||
use rustc_middle::dep_graph::DepsType;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_query_system::dep_graph::{DepGraphData, DepNodeKey, HasDepContext};
|
||||
use rustc_query_system::query::{
|
||||
ActiveKeyStatus, CycleError, CycleErrorHandling, QueryCache, QueryContext, QueryDispatcher,
|
||||
|
|
@ -14,6 +16,7 @@ use rustc_query_system::query::{
|
|||
use rustc_span::{DUMMY_SP, Span};
|
||||
|
||||
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex};
|
||||
use crate::plumbing::QueryCtxt;
|
||||
|
||||
#[inline]
|
||||
fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
|
||||
|
|
@ -38,10 +41,10 @@ pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool {
|
|||
/// Internal plumbing for collecting the set of active jobs for this query.
|
||||
///
|
||||
/// Should only be called from `gather_active_jobs`.
|
||||
pub(crate) fn gather_active_jobs_inner<'tcx, Qcx: Copy, K: Copy>(
|
||||
pub(crate) fn gather_active_jobs_inner<'tcx, K: Copy>(
|
||||
state: &QueryState<'tcx, K>,
|
||||
qcx: Qcx,
|
||||
make_frame: fn(Qcx, K) -> QueryStackFrame<QueryStackDeferred<'tcx>>,
|
||||
tcx: TyCtxt<'tcx>,
|
||||
make_frame: fn(TyCtxt<'tcx>, K) -> QueryStackFrame<QueryStackDeferred<'tcx>>,
|
||||
jobs: &mut QueryMap<'tcx>,
|
||||
require_complete: bool,
|
||||
) -> Option<()> {
|
||||
|
|
@ -73,7 +76,7 @@ pub(crate) fn gather_active_jobs_inner<'tcx, Qcx: Copy, K: Copy>(
|
|||
// Call `make_frame` while we're not holding a `state.active` lock as `make_frame` may call
|
||||
// queries leading to a deadlock.
|
||||
for (key, job) in active {
|
||||
let frame = make_frame(qcx, key);
|
||||
let frame = make_frame(tcx, key);
|
||||
jobs.insert(job.id, QueryJobInfo { frame, job });
|
||||
}
|
||||
|
||||
|
|
@ -92,36 +95,36 @@ where
|
|||
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn mk_cycle<'tcx, Q>(query: Q, qcx: Q::Qcx, cycle_error: CycleError) -> Q::Value
|
||||
fn mk_cycle<'tcx, Q>(query: Q, qcx: QueryCtxt<'tcx>, cycle_error: CycleError) -> Q::Value
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
|
||||
let error = report_cycle(qcx.tcx.sess, &cycle_error);
|
||||
handle_cycle_error(query, qcx, &cycle_error, error)
|
||||
}
|
||||
|
||||
fn handle_cycle_error<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
cycle_error: &CycleError,
|
||||
error: Diag<'_>,
|
||||
) -> Q::Value
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
match query.cycle_error_handling() {
|
||||
CycleErrorHandling::Error => {
|
||||
let guar = error.emit();
|
||||
query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
|
||||
query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
|
||||
}
|
||||
CycleErrorHandling::Fatal => {
|
||||
error.emit();
|
||||
qcx.dep_context().sess().dcx().abort_if_errors();
|
||||
qcx.tcx.dcx().abort_if_errors();
|
||||
unreachable!()
|
||||
}
|
||||
CycleErrorHandling::DelayBug => {
|
||||
let guar = error.delay_as_bug();
|
||||
query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
|
||||
query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
|
||||
}
|
||||
CycleErrorHandling::Stash => {
|
||||
let guar = if let Some(root) = cycle_error.cycle.first()
|
||||
|
|
@ -131,7 +134,7 @@ where
|
|||
} else {
|
||||
error.emit()
|
||||
};
|
||||
query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
|
||||
query.value_from_cycle_error(qcx.tcx, cycle_error, guar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -204,12 +207,12 @@ where
|
|||
#[inline(never)]
|
||||
fn cycle_error<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
try_execute: QueryJobId,
|
||||
span: Span,
|
||||
) -> (Q::Value, Option<DepNodeIndex>)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
// Ensure there was no errors collecting all active jobs.
|
||||
// We need the complete map to ensure we find a cycle to break.
|
||||
|
|
@ -225,19 +228,19 @@ where
|
|||
#[inline(always)]
|
||||
fn wait_for_query<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
span: Span,
|
||||
key: Q::Key,
|
||||
latch: QueryLatch<'tcx>,
|
||||
current: Option<QueryJobId>,
|
||||
) -> (Q::Value, Option<DepNodeIndex>)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
// For parallel queries, we'll block and wait until the query running
|
||||
// in another thread has completed. Record how long we wait in the
|
||||
// self-profiler.
|
||||
let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
|
||||
let query_blocked_prof_timer = qcx.tcx.prof.query_blocked();
|
||||
|
||||
// With parallel queries we might just have to wait on some other
|
||||
// thread.
|
||||
|
|
@ -262,7 +265,7 @@ where
|
|||
})
|
||||
};
|
||||
|
||||
qcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
qcx.tcx.prof.query_cache_hit(index.into());
|
||||
query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
|
||||
|
||||
(v, Some(index))
|
||||
|
|
@ -274,13 +277,13 @@ where
|
|||
#[inline(never)]
|
||||
fn try_execute_query<'tcx, Q, const INCR: bool>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
span: Span,
|
||||
key: Q::Key,
|
||||
dep_node: Option<DepNode>,
|
||||
) -> (Q::Value, Option<DepNodeIndex>)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
let state = query.query_state(qcx);
|
||||
let key_hash = sharded::make_hash(&key);
|
||||
|
|
@ -292,9 +295,9 @@ where
|
|||
// re-executing the query since `try_start` only checks that the query is not currently
|
||||
// executing, but another thread may have already completed the query and stores it result
|
||||
// in the query cache.
|
||||
if qcx.dep_context().sess().threads() > 1 {
|
||||
if qcx.tcx.sess.threads() > 1 {
|
||||
if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
|
||||
qcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
qcx.tcx.prof.query_cache_hit(index.into());
|
||||
return (value, Some(index));
|
||||
}
|
||||
}
|
||||
|
|
@ -343,7 +346,7 @@ where
|
|||
#[inline(always)]
|
||||
fn execute_job<'tcx, Q, const INCR: bool>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
state: &'tcx QueryState<'tcx, Q::Key>,
|
||||
key: Q::Key,
|
||||
key_hash: u64,
|
||||
|
|
@ -351,22 +354,15 @@ fn execute_job<'tcx, Q, const INCR: bool>(
|
|||
dep_node: Option<DepNode>,
|
||||
) -> (Q::Value, Option<DepNodeIndex>)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
// Use `JobOwner` so the query will be poisoned if executing it panics.
|
||||
let job_owner = JobOwner { state, key };
|
||||
|
||||
debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
|
||||
debug_assert_eq!(qcx.tcx.dep_graph.is_fully_enabled(), INCR);
|
||||
|
||||
let (result, dep_node_index) = if INCR {
|
||||
execute_job_incr(
|
||||
query,
|
||||
qcx,
|
||||
qcx.dep_context().dep_graph().data().unwrap(),
|
||||
key,
|
||||
dep_node,
|
||||
id,
|
||||
)
|
||||
execute_job_incr(query, qcx, qcx.tcx.dep_graph.data().unwrap(), key, dep_node, id)
|
||||
} else {
|
||||
execute_job_non_incr(query, qcx, key, id)
|
||||
};
|
||||
|
|
@ -395,7 +391,7 @@ where
|
|||
// We have an inconsistency. This can happen if one of the two
|
||||
// results is tainted by errors.
|
||||
assert!(
|
||||
qcx.dep_context().sess().dcx().has_errors().is_some(),
|
||||
qcx.tcx.dcx().has_errors().is_some(),
|
||||
"Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
|
||||
computed={:#?}\nfed={:#?}",
|
||||
query.dep_kind(),
|
||||
|
|
@ -415,24 +411,24 @@ where
|
|||
#[inline(always)]
|
||||
fn execute_job_non_incr<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
key: Q::Key,
|
||||
job_id: QueryJobId,
|
||||
) -> (Q::Value, DepNodeIndex)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
|
||||
debug_assert!(!qcx.tcx.dep_graph.is_fully_enabled());
|
||||
|
||||
// Fingerprint the key, just to assert that it doesn't
|
||||
// have anything we don't consider hashable
|
||||
if cfg!(debug_assertions) {
|
||||
let _ = key.to_fingerprint(*qcx.dep_context());
|
||||
let _ = key.to_fingerprint(qcx.tcx);
|
||||
}
|
||||
|
||||
let prof_timer = qcx.dep_context().profiler().query_provider();
|
||||
let prof_timer = qcx.tcx.prof.query_provider();
|
||||
let result = qcx.start_query(job_id, query.depth_limit(), || query.compute(qcx, key));
|
||||
let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
|
||||
let dep_node_index = qcx.tcx.dep_graph.next_virtual_depnode_index();
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
// Similarly, fingerprint the result to assert that
|
||||
|
|
@ -451,19 +447,18 @@ where
|
|||
#[inline(always)]
|
||||
fn execute_job_incr<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
dep_graph_data: &DepGraphData<<Q::Qcx as HasDepContext>::Deps>,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
dep_graph_data: &DepGraphData<DepsType>,
|
||||
key: Q::Key,
|
||||
mut dep_node_opt: Option<DepNode>,
|
||||
job_id: QueryJobId,
|
||||
) -> (Q::Value, DepNodeIndex)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
if !query.anon() && !query.eval_always() {
|
||||
// `to_dep_node` is expensive for some `DepKind`s.
|
||||
let dep_node =
|
||||
dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
|
||||
let dep_node = dep_node_opt.get_or_insert_with(|| query.construct_dep_node(qcx.tcx, &key));
|
||||
|
||||
// The diagnostics for this query will be promoted to the current session during
|
||||
// `try_mark_green()`, so we can ignore them here.
|
||||
|
|
@ -474,20 +469,16 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
let prof_timer = qcx.dep_context().profiler().query_provider();
|
||||
let prof_timer = qcx.tcx.prof.query_provider();
|
||||
|
||||
let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || {
|
||||
if query.anon() {
|
||||
return dep_graph_data.with_anon_task_inner(
|
||||
*qcx.dep_context(),
|
||||
query.dep_kind(),
|
||||
|| query.compute(qcx, key),
|
||||
);
|
||||
return dep_graph_data
|
||||
.with_anon_task_inner(qcx.tcx, query.dep_kind(), || query.compute(qcx, key));
|
||||
}
|
||||
|
||||
// `to_dep_node` is expensive for some `DepKind`s.
|
||||
let dep_node =
|
||||
dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
|
||||
let dep_node = dep_node_opt.unwrap_or_else(|| query.construct_dep_node(qcx.tcx, &key));
|
||||
|
||||
dep_graph_data.with_task(
|
||||
dep_node,
|
||||
|
|
@ -506,13 +497,13 @@ where
|
|||
#[inline(always)]
|
||||
fn try_load_from_disk_and_cache_in_memory<'tcx, Q>(
|
||||
query: Q,
|
||||
dep_graph_data: &DepGraphData<<Q::Qcx as HasDepContext>::Deps>,
|
||||
qcx: Q::Qcx,
|
||||
dep_graph_data: &DepGraphData<DepsType>,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
key: &Q::Key,
|
||||
dep_node: &DepNode,
|
||||
) -> Option<(Q::Value, DepNodeIndex)>
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
// Note this function can be called concurrently from the same query
|
||||
// We must ensure that this is handled correctly.
|
||||
|
|
@ -524,7 +515,7 @@ where
|
|||
// First we try to load the result from the on-disk cache.
|
||||
// Some things are never cached on disk.
|
||||
if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
|
||||
if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
|
||||
if std::intrinsics::unlikely(qcx.tcx.sess.opts.unstable_opts.query_dep_graph) {
|
||||
dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
|
||||
}
|
||||
|
||||
|
|
@ -538,10 +529,10 @@ where
|
|||
// give us some coverage of potential bugs though.
|
||||
let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
|
||||
if std::intrinsics::unlikely(
|
||||
try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
|
||||
try_verify || qcx.tcx.sess.opts.unstable_opts.incremental_verify_ich,
|
||||
) {
|
||||
incremental_verify_ich(
|
||||
*qcx.dep_context(),
|
||||
qcx.tcx,
|
||||
dep_graph_data,
|
||||
&result,
|
||||
prev_dep_node_index,
|
||||
|
|
@ -556,7 +547,7 @@ where
|
|||
// We always expect to find a cached result for things that
|
||||
// can be forced from `DepNode`.
|
||||
debug_assert!(
|
||||
!query.will_cache_on_disk_for_key(*qcx.dep_context(), key)
|
||||
!query.will_cache_on_disk_for_key(qcx.tcx, key)
|
||||
|| !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
|
||||
"missing on-disk cache entry for {dep_node:?}"
|
||||
);
|
||||
|
|
@ -570,10 +561,10 @@ where
|
|||
|
||||
// We could not load a result from the on-disk cache, so
|
||||
// recompute.
|
||||
let prof_timer = qcx.dep_context().profiler().query_provider();
|
||||
let prof_timer = qcx.tcx.prof.query_provider();
|
||||
|
||||
// The dep-graph for this computation is already in-place.
|
||||
let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
|
||||
let result = qcx.tcx.dep_graph.with_ignore(|| query.compute(qcx, *key));
|
||||
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
|
|
@ -587,7 +578,7 @@ where
|
|||
// See issue #82920 for an example of a miscompilation that would get turned into
|
||||
// an ICE by this check
|
||||
incremental_verify_ich(
|
||||
*qcx.dep_context(),
|
||||
qcx.tcx,
|
||||
dep_graph_data,
|
||||
&result,
|
||||
prev_dep_node_index,
|
||||
|
|
@ -609,12 +600,12 @@ where
|
|||
#[inline(never)]
|
||||
fn ensure_must_run<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
key: &Q::Key,
|
||||
check_cache: bool,
|
||||
) -> (bool, Option<DepNode>)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
if query.eval_always() {
|
||||
return (true, None);
|
||||
|
|
@ -623,9 +614,9 @@ where
|
|||
// Ensuring an anonymous query makes no sense
|
||||
assert!(!query.anon());
|
||||
|
||||
let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
|
||||
let dep_node = query.construct_dep_node(qcx.tcx, key);
|
||||
|
||||
let dep_graph = qcx.dep_context().dep_graph();
|
||||
let dep_graph = &qcx.tcx.dep_graph;
|
||||
let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
|
||||
None => {
|
||||
// A None return from `try_mark_green` means that this is either
|
||||
|
|
@ -638,7 +629,7 @@ where
|
|||
}
|
||||
Some((serialized_dep_node_index, dep_node_index)) => {
|
||||
dep_graph.read_index(dep_node_index);
|
||||
qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
|
||||
qcx.tcx.prof.query_cache_hit(dep_node_index.into());
|
||||
serialized_dep_node_index
|
||||
}
|
||||
};
|
||||
|
|
@ -655,14 +646,14 @@ where
|
|||
#[inline(always)]
|
||||
pub(super) fn get_query_non_incr<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
span: Span,
|
||||
key: Q::Key,
|
||||
) -> Q::Value
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
|
||||
debug_assert!(!qcx.tcx.dep_graph.is_fully_enabled());
|
||||
|
||||
ensure_sufficient_stack(|| try_execute_query::<Q, false>(query, qcx, span, key, None).0)
|
||||
}
|
||||
|
|
@ -670,15 +661,15 @@ where
|
|||
#[inline(always)]
|
||||
pub(super) fn get_query_incr<'tcx, Q>(
|
||||
query: Q,
|
||||
qcx: Q::Qcx,
|
||||
qcx: QueryCtxt<'tcx>,
|
||||
span: Span,
|
||||
key: Q::Key,
|
||||
mode: QueryMode,
|
||||
) -> Option<Q::Value>
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
|
||||
debug_assert!(qcx.tcx.dep_graph.is_fully_enabled());
|
||||
|
||||
let dep_node = if let QueryMode::Ensure { check_cache } = mode {
|
||||
let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
|
||||
|
|
@ -693,19 +684,19 @@ where
|
|||
let (result, dep_node_index) =
|
||||
ensure_sufficient_stack(|| try_execute_query::<Q, true>(query, qcx, span, key, dep_node));
|
||||
if let Some(dep_node_index) = dep_node_index {
|
||||
qcx.dep_context().dep_graph().read_index(dep_node_index)
|
||||
qcx.tcx.dep_graph.read_index(dep_node_index)
|
||||
}
|
||||
Some(result)
|
||||
}
|
||||
|
||||
pub(super) fn force_query<'tcx, Q>(query: Q, qcx: Q::Qcx, key: Q::Key, dep_node: DepNode)
|
||||
pub(super) fn force_query<'tcx, Q>(query: Q, qcx: QueryCtxt<'tcx>, key: Q::Key, dep_node: DepNode)
|
||||
where
|
||||
Q: QueryDispatcher<'tcx>,
|
||||
Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>,
|
||||
{
|
||||
// We may be concurrently trying both execute and force a query.
|
||||
// Ensure that only one of them runs the query.
|
||||
if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
|
||||
qcx.dep_context().profiler().query_cache_hit(index.into());
|
||||
qcx.tcx.prof.query_cache_hit(index.into());
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ use rustc_hir::limit::Limit;
|
|||
use rustc_index::Idx;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::dep_graph::{
|
||||
self, DepContext, DepKindVTable, DepNode, DepNodeIndex, SerializedDepNodeIndex, dep_kinds,
|
||||
self, DepContext, DepKindVTable, DepNode, DepNodeIndex, DepsType, SerializedDepNodeIndex,
|
||||
dep_kinds,
|
||||
};
|
||||
use rustc_middle::query::Key;
|
||||
use rustc_middle::query::on_disk_cache::{
|
||||
|
|
@ -69,10 +70,57 @@ impl<'tcx> QueryCtxt<'tcx> {
|
|||
crate_name: self.tcx.crate_name(LOCAL_CRATE),
|
||||
});
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn next_job_id(self) -> QueryJobId {
|
||||
QueryJobId(
|
||||
NonZero::new(
|
||||
self.tcx.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn current_query_job(self) -> Option<QueryJobId> {
|
||||
tls::with_related_context(self.tcx, |icx| icx.query)
|
||||
}
|
||||
|
||||
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
||||
/// new query job while it executes.
|
||||
#[inline(always)]
|
||||
pub(crate) fn start_query<R>(
|
||||
self,
|
||||
token: QueryJobId,
|
||||
depth_limit: bool,
|
||||
compute: impl FnOnce() -> R,
|
||||
) -> R {
|
||||
// The `TyCtxt` stored in TLS has the same global interner lifetime
|
||||
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
||||
// when accessing the `ImplicitCtxt`.
|
||||
tls::with_related_context(self.tcx, move |current_icx| {
|
||||
if depth_limit
|
||||
&& !self.tcx.recursion_limit().value_within_limit(current_icx.query_depth)
|
||||
{
|
||||
self.depth_limit_error(token);
|
||||
}
|
||||
|
||||
// Update the `ImplicitCtxt` to point to our new query job.
|
||||
let new_icx = ImplicitCtxt {
|
||||
tcx: self.tcx,
|
||||
query: Some(token),
|
||||
query_depth: current_icx.query_depth + depth_limit as usize,
|
||||
task_deps: current_icx.task_deps,
|
||||
};
|
||||
|
||||
// Use the `ImplicitCtxt` while we execute the query.
|
||||
tls::enter_context(&new_icx, compute)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
|
||||
type Deps = rustc_middle::dep_graph::DepsType;
|
||||
type Deps = DepsType;
|
||||
type DepContext = TyCtxt<'tcx>;
|
||||
|
||||
#[inline]
|
||||
|
|
@ -87,21 +135,6 @@ impl<'tcx> QueryContext<'tcx> for QueryCtxt<'tcx> {
|
|||
&self.tcx.jobserver_proxy
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn next_job_id(self) -> QueryJobId {
|
||||
QueryJobId(
|
||||
NonZero::new(
|
||||
self.tcx.query_system.jobs.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn current_query_job(self) -> Option<QueryJobId> {
|
||||
tls::with_related_context(self.tcx, |icx| icx.query)
|
||||
}
|
||||
|
||||
/// Returns a map of currently active query jobs, collected from all queries.
|
||||
///
|
||||
/// If `require_complete` is `true`, this function locks all shards of the
|
||||
|
|
@ -147,38 +180,6 @@ impl<'tcx> QueryContext<'tcx> for QueryCtxt<'tcx> {
|
|||
c.store_side_effect(dep_node_index, side_effect)
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
||||
/// new query job while it executes.
|
||||
#[inline(always)]
|
||||
fn start_query<R>(
|
||||
self,
|
||||
token: QueryJobId,
|
||||
depth_limit: bool,
|
||||
compute: impl FnOnce() -> R,
|
||||
) -> R {
|
||||
// The `TyCtxt` stored in TLS has the same global interner lifetime
|
||||
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
||||
// when accessing the `ImplicitCtxt`.
|
||||
tls::with_related_context(self.tcx, move |current_icx| {
|
||||
if depth_limit
|
||||
&& !self.tcx.recursion_limit().value_within_limit(current_icx.query_depth)
|
||||
{
|
||||
self.depth_limit_error(token);
|
||||
}
|
||||
|
||||
// Update the `ImplicitCtxt` to point to our new query job.
|
||||
let new_icx = ImplicitCtxt {
|
||||
tcx: self.tcx,
|
||||
query: Some(token),
|
||||
query_depth: current_icx.query_depth + depth_limit as usize,
|
||||
task_deps: current_icx.task_deps,
|
||||
};
|
||||
|
||||
// Use the `ImplicitCtxt` while we execute the query.
|
||||
tls::enter_context(&new_icx, compute)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
|
||||
|
|
|
|||
|
|
@ -161,11 +161,6 @@ pub trait QueryContext<'tcx>: HasDepContext {
|
|||
/// a token while waiting on a query.
|
||||
fn jobserver_proxy(&self) -> &Proxy;
|
||||
|
||||
fn next_job_id(self) -> QueryJobId;
|
||||
|
||||
/// Get the query information from the TLS context.
|
||||
fn current_query_job(self) -> Option<QueryJobId>;
|
||||
|
||||
fn collect_active_jobs_from_all_queries(
|
||||
self,
|
||||
require_complete: bool,
|
||||
|
|
@ -179,9 +174,4 @@ pub trait QueryContext<'tcx>: HasDepContext {
|
|||
|
||||
/// Register a side effect for the given node, for use in next session.
|
||||
fn store_side_effect(self, dep_node_index: DepNodeIndex, side_effect: QuerySideEffect);
|
||||
|
||||
/// Executes a job by changing the `ImplicitCtxt` to point to the
|
||||
/// new query job while it executes.
|
||||
fn start_query<R>(self, token: QueryJobId, depth_limit: bool, compute: impl FnOnce() -> R)
|
||||
-> R;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue