Auto merge of #151937 - Zalathar:rollup-WdVeaxy, r=Zalathar

Rollup of 3 pull requests

Successful merges:

 - rust-lang/rust#151927 (typeck_root_def_id: improve doc comment)
 - rust-lang/rust#151907 (Rename `QueryResult` to `ActiveKeyStatus`)
 - rust-lang/rust#151928 (ty::context: clean some code a little)
This commit is contained in:
bors 2026-02-01 03:27:21 +00:00
commit 878374e07f
3 changed files with 29 additions and 22 deletions

View file

@ -3470,10 +3470,9 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn intrinsic(self, def_id: impl IntoQueryParam<DefId> + Copy) -> Option<ty::IntrinsicDef> {
match self.def_kind(def_id) {
DefKind::Fn | DefKind::AssocFn => {}
_ => return None,
DefKind::Fn | DefKind::AssocFn => self.intrinsic_raw(def_id),
_ => None,
}
self.intrinsic_raw(def_id)
}
pub fn next_trait_solver_globally(self) -> bool {

View file

@ -642,12 +642,8 @@ impl<'tcx> TyCtxt<'tcx> {
/// has its own type-checking context or "inference environment".
///
/// For example, a closure has its own `DefId`, but it is type-checked
/// with the containing item. Similarly, an inline const block has its
/// own `DefId` but it is type-checked together with the containing item.
///
/// Therefore, when we fetch the
/// `typeck` the closure, for example, we really wind up
/// fetching the `typeck` the enclosing fn item.
/// with the containing item. Therefore, when we fetch the `typeck` of the closure,
/// for example, we really wind up fetching the `typeck` of the enclosing fn item.
pub fn typeck_root_def_id(self, def_id: DefId) -> DefId {
let mut def_id = def_id;
while self.is_typeck_child(def_id) {

View file

@ -33,13 +33,24 @@ fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
move |x| x.0 == *k
}
/// For a particular query, keeps track of "active" keys, i.e. keys whose
/// evaluation has started but has not yet finished successfully.
///
/// (Successful query evaluation for a key is represented by an entry in the
/// query's in-memory cache.)
pub struct QueryState<'tcx, K> {
active: Sharded<hash_table::HashTable<(K, QueryResult<'tcx>)>>,
active: Sharded<hash_table::HashTable<(K, ActiveKeyStatus<'tcx>)>>,
}
/// Indicates the state of a query for a given key in a query map.
enum QueryResult<'tcx> {
/// An already executing query. The query job can be used to await for its completion.
/// For a particular query and key, tracks the status of a query evaluation
/// that has started, but has not yet finished successfully.
///
/// (Successful query evaluation for a key is represented by an entry in the
/// query's in-memory cache.)
enum ActiveKeyStatus<'tcx> {
/// Some thread is already evaluating the query for this key.
///
/// The enclosed [`QueryJob`] can be used to wait for it to finish.
Started(QueryJob<'tcx>),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
@ -47,8 +58,9 @@ enum QueryResult<'tcx> {
Poisoned,
}
impl<'tcx> QueryResult<'tcx> {
/// Unwraps the query job expecting that it has started.
impl<'tcx> ActiveKeyStatus<'tcx> {
/// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
/// was poisoned by a panic.
fn expect_job(self) -> QueryJob<'tcx> {
match self {
Self::Started(job) => job,
@ -76,9 +88,9 @@ where
) -> Option<()> {
let mut active = Vec::new();
let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult<'tcx>)>>| {
let mut collect = |iter: LockGuard<'_, HashTable<(K, ActiveKeyStatus<'tcx>)>>| {
for (k, v) in iter.iter() {
if let QueryResult::Started(ref job) = *v {
if let ActiveKeyStatus::Started(ref job) = *v {
active.push((*k, job.clone()));
}
}
@ -222,7 +234,7 @@ where
Err(_) => panic!(),
Ok(occupied) => {
let ((key, value), vacant) = occupied.remove();
vacant.insert((key, QueryResult::Poisoned));
vacant.insert((key, ActiveKeyStatus::Poisoned));
value.expect_job()
}
}
@ -319,7 +331,7 @@ where
let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
match shard.find(key_hash, equivalent_key(&key)) {
// The query we waited on panicked. Continue unwinding here.
Some((_, QueryResult::Poisoned)) => FatalError.raise(),
Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
_ => panic!(
"query '{}' result must be in the cache or the query must be poisoned after a wait",
query.name()
@ -373,7 +385,7 @@ where
// state map.
let id = qcx.next_job_id();
let job = QueryJob::new(id, span, current_job_id);
entry.insert((key, QueryResult::Started(job)));
entry.insert((key, ActiveKeyStatus::Started(job)));
// Drop the lock before we start executing the query
drop(state_lock);
@ -382,7 +394,7 @@ where
}
Entry::Occupied(mut entry) => {
match &mut entry.get_mut().1 {
QueryResult::Started(job) => {
ActiveKeyStatus::Started(job) => {
if sync::is_dyn_thread_safe() {
// Get the latch out
let latch = job.latch();
@ -400,7 +412,7 @@ where
// so we just return the error.
cycle_error(query, qcx, id, span)
}
QueryResult::Poisoned => FatalError.raise(),
ActiveKeyStatus::Poisoned => FatalError.raise(),
}
}
}