Auto merge of #151210 - matthiaskrgr:rollup-JwDAUuv, r=matthiaskrgr

Rollup of 6 pull requests

Successful merges:

 - rust-lang/rust#145354 (Cache derive proc macro expansion with incremental query)
 - rust-lang/rust#151123 (Support primitives in type info reflection)
 - rust-lang/rust#151178 (simplify words initialization using Rc::new_zeroed)
 - rust-lang/rust#151187 (Use `default_field_values` more in `Resolver`)
 - rust-lang/rust#151197 (remove lcnr from compiler review rotation)
 - rust-lang/rust#151203 (Revert `QueryStackFrame` split)

r? @ghost
This commit is contained in:
bors 2026-01-16 16:44:08 +00:00
commit 503745e917
54 changed files with 1161 additions and 574 deletions

View file

@ -3887,11 +3887,13 @@ dependencies = [
"rustc_lexer",
"rustc_lint_defs",
"rustc_macros",
"rustc_middle",
"rustc_parse",
"rustc_proc_macro",
"rustc_serialize",
"rustc_session",
"rustc_span",
"scoped-tls",
"smallvec",
"thin-vec",
"tracing",

View file

@ -3348,7 +3348,8 @@ impl UseTree {
/// Distinguishes between `Attribute`s that decorate items and Attributes that
/// are contained as statements within items. These two cases need to be
/// distinguished for pretty-printing.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, HashStable_Generic, Walkable)]
#[derive(Clone, PartialEq, Eq, Hash, Debug, Copy)]
#[derive(Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum AttrStyle {
Outer,
Inner,

View file

@ -40,13 +40,13 @@ impl DocFragmentKind {
}
}
#[derive(Clone, Copy, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum CommentKind {
Line,
Block,
}
#[derive(Copy, Clone, PartialEq, Debug, Encodable, Decodable, HashStable_Generic)]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable, HashStable_Generic)]
pub enum InvisibleOrigin {
// From the expansion of a metavariable in a declarative macro.
MetaVar(MetaVarKind),
@ -123,7 +123,7 @@ impl fmt::Display for MetaVarKind {
/// Describes how a sequence of token trees is delimited.
/// Cannot use `proc_macro::Delimiter` directly because this
/// structure should implement some additional traits.
#[derive(Copy, Clone, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable, HashStable_Generic)]
pub enum Delimiter {
/// `( ... )`
Parenthesis,
@ -186,7 +186,7 @@ impl Delimiter {
// type. This means that float literals like `1f32` are classified by this type
// as `Int`. Only upon conversion to `ast::LitKind` will such a literal be
// given the `Float` kind.
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum LitKind {
Bool, // AST only, must never appear in a `Token`
Byte,
@ -203,7 +203,7 @@ pub enum LitKind {
}
/// A literal token.
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct Lit {
pub kind: LitKind,
pub symbol: Symbol,
@ -349,7 +349,7 @@ fn ident_can_begin_type(name: Symbol, span: Span, is_raw: IdentIsRaw) -> bool {
.contains(&name)
}
#[derive(PartialEq, Encodable, Decodable, Debug, Copy, Clone, HashStable_Generic)]
#[derive(PartialEq, Eq, Encodable, Decodable, Hash, Debug, Copy, Clone, HashStable_Generic)]
pub enum IdentIsRaw {
No,
Yes,
@ -376,7 +376,7 @@ impl From<bool> for IdentIsRaw {
}
}
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum TokenKind {
/* Expression-operator symbols. */
/// `=`
@ -526,7 +526,7 @@ pub enum TokenKind {
Eof,
}
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct Token {
pub kind: TokenKind,
pub span: Span,

View file

@ -5,6 +5,7 @@
//! which are themselves a single [`Token`] or a `Delimited` subsequence of tokens.
use std::borrow::Cow;
use std::hash::Hash;
use std::ops::Range;
use std::sync::Arc;
use std::{cmp, fmt, iter, mem};
@ -22,7 +23,7 @@ use crate::token::{self, Delimiter, Token, TokenKind};
use crate::{AttrVec, Attribute};
/// Part of a `TokenStream`.
#[derive(Debug, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encodable, Decodable, HashStable_Generic)]
pub enum TokenTree {
/// A single token. Should never be `OpenDelim` or `CloseDelim`, because
/// delimiters are implicitly represented by `Delimited`.
@ -538,7 +539,7 @@ pub struct AttrsTarget {
/// compound token. Used for conversions to `proc_macro::Spacing`. Also used to
/// guide pretty-printing, which is where the `JointHidden` value (which isn't
/// part of `proc_macro::Spacing`) comes in useful.
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable, HashStable_Generic)]
pub enum Spacing {
/// The token cannot join with the following token to form a compound
/// token.
@ -595,7 +596,7 @@ pub enum Spacing {
}
/// A `TokenStream` is an abstract sequence of tokens, organized into [`TokenTree`]s.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Encodable, Decodable)]
pub struct TokenStream(pub(crate) Arc<Vec<TokenTree>>);
impl TokenStream {
@ -811,14 +812,6 @@ impl TokenStream {
}
}
impl PartialEq<TokenStream> for TokenStream {
fn eq(&self, other: &TokenStream) -> bool {
self.iter().eq(other.iter())
}
}
impl Eq for TokenStream {}
impl FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(iter: I) -> Self {
TokenStream::new(iter.into_iter().collect::<Vec<TokenTree>>())
@ -970,7 +963,8 @@ impl TokenCursor {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
#[derive(Encodable, Decodable, HashStable_Generic, Walkable)]
pub struct DelimSpan {
pub open: Span,
pub close: Span,
@ -994,7 +988,7 @@ impl DelimSpan {
}
}
#[derive(Copy, Clone, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable, HashStable_Generic)]
pub struct DelimSpacing {
pub open: Spacing,
pub close: Spacing,

View file

@ -1,6 +1,6 @@
use rustc_abi::FieldIdx;
use rustc_hir::LangItem;
use rustc_middle::mir::interpret::CtfeProvenance;
use rustc_middle::mir::interpret::{CtfeProvenance, Scalar};
use rustc_middle::span_bug;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Const, ScalarInt, Ty};
@ -35,6 +35,7 @@ impl<'tcx> InterpCx<'tcx, CompileTimeMachine<'tcx>> {
interp_ok((variant_id, self.project_downcast(&field_dest, variant_id)?))
};
let ptr_bit_width = || self.tcx.data_layout.pointer_size().bits();
match field.name {
sym::kind => {
let variant_index = match ty.kind() {
@ -64,13 +65,46 @@ impl<'tcx> InterpCx<'tcx, CompileTimeMachine<'tcx>> {
variant
}
// For now just merge all primitives into one `Leaf` variant with no data
ty::Uint(_) | ty::Int(_) | ty::Float(_) | ty::Char | ty::Bool => {
downcast(sym::Leaf)?.0
ty::Bool => {
let (variant, _variant_place) = downcast(sym::Bool)?;
variant
}
ty::Char => {
let (variant, _variant_place) = downcast(sym::Char)?;
variant
}
ty::Int(int_ty) => {
let (variant, variant_place) = downcast(sym::Int)?;
let place = self.project_field(&variant_place, FieldIdx::ZERO)?;
self.write_int_type_info(
place,
int_ty.bit_width().unwrap_or_else(/* isize */ ptr_bit_width),
true,
)?;
variant
}
ty::Uint(uint_ty) => {
let (variant, variant_place) = downcast(sym::Int)?;
let place = self.project_field(&variant_place, FieldIdx::ZERO)?;
self.write_int_type_info(
place,
uint_ty.bit_width().unwrap_or_else(/* usize */ ptr_bit_width),
false,
)?;
variant
}
ty::Float(float_ty) => {
let (variant, variant_place) = downcast(sym::Float)?;
let place = self.project_field(&variant_place, FieldIdx::ZERO)?;
self.write_float_type_info(place, float_ty.bit_width())?;
variant
}
ty::Str => {
let (variant, _variant_place) = downcast(sym::Str)?;
variant
}
ty::Adt(_, _)
| ty::Foreign(_)
| ty::Str
| ty::Pat(_, _)
| ty::Slice(_)
| ty::RawPtr(..)
@ -203,4 +237,46 @@ impl<'tcx> InterpCx<'tcx, CompileTimeMachine<'tcx>> {
interp_ok(())
}
fn write_int_type_info(
&mut self,
place: impl Writeable<'tcx, CtfeProvenance>,
bit_width: u64,
signed: bool,
) -> InterpResult<'tcx> {
for (field_idx, field) in
place.layout().ty.ty_adt_def().unwrap().non_enum_variant().fields.iter_enumerated()
{
let field_place = self.project_field(&place, field_idx)?;
match field.name {
sym::bit_width => self.write_scalar(
ScalarInt::try_from_target_usize(bit_width, self.tcx.tcx).unwrap(),
&field_place,
)?,
sym::signed => self.write_scalar(Scalar::from_bool(signed), &field_place)?,
other => span_bug!(self.tcx.def_span(field.did), "unimplemented field {other}"),
}
}
interp_ok(())
}
fn write_float_type_info(
&mut self,
place: impl Writeable<'tcx, CtfeProvenance>,
bit_width: u64,
) -> InterpResult<'tcx> {
for (field_idx, field) in
place.layout().ty.ty_adt_def().unwrap().non_enum_variant().fields.iter_enumerated()
{
let field_place = self.project_field(&place, field_idx)?;
match field.name {
sym::bit_width => self.write_scalar(
ScalarInt::try_from_target_usize(bit_width, self.tcx.tcx).unwrap(),
&field_place,
)?,
other => span_bug!(self.tcx.def_span(field.did), "unimplemented field {other}"),
}
}
interp_ok(())
}
}

View file

@ -28,3 +28,18 @@ macro_rules! define_stable_id_collections {
pub type $entry_name<'a, T> = $crate::fx::IndexEntry<'a, $key, T>;
};
}
pub mod default {
use super::{FxBuildHasher, FxHashMap, FxHashSet};
// FIXME: These two functions will become unnecessary after
// <https://github.com/rust-lang/rustc-hash/pull/63> lands and we start using the corresponding
// `rustc-hash` version. After that we can use `Default::default()` instead.
pub const fn fx_hash_map<K, V>() -> FxHashMap<K, V> {
FxHashMap::with_hasher(FxBuildHasher)
}
pub const fn fx_hash_set<V>() -> FxHashSet<V> {
FxHashSet::with_hasher(FxBuildHasher)
}
}

View file

@ -18,6 +18,8 @@
#![feature(assert_matches)]
#![feature(auto_traits)]
#![feature(cfg_select)]
#![feature(const_default)]
#![feature(const_trait_impl)]
#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
#![feature(extend_one)]

View file

@ -8,7 +8,7 @@ use std::hash::Hash;
use std::iter::{Product, Sum};
use std::ops::Index;
use rustc_hash::{FxHashMap, FxHashSet};
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use rustc_macros::{Decodable_NoContext, Encodable_NoContext};
use crate::fingerprint::Fingerprint;
@ -241,10 +241,10 @@ pub struct UnordSet<V: Eq + Hash> {
impl<V: Eq + Hash> UnordCollection for UnordSet<V> {}
impl<V: Eq + Hash> Default for UnordSet<V> {
impl<V: Eq + Hash> const Default for UnordSet<V> {
#[inline]
fn default() -> Self {
Self { inner: FxHashSet::default() }
Self { inner: FxHashSet::with_hasher(FxBuildHasher) }
}
}
@ -438,10 +438,10 @@ pub struct UnordMap<K: Eq + Hash, V> {
impl<K: Eq + Hash, V> UnordCollection for UnordMap<K, V> {}
impl<K: Eq + Hash, V> Default for UnordMap<K, V> {
impl<K: Eq + Hash, V> const Default for UnordMap<K, V> {
#[inline]
fn default() -> Self {
Self { inner: FxHashMap::default() }
Self { inner: FxHashMap::with_hasher(FxBuildHasher) }
}
}

View file

@ -21,6 +21,7 @@ rustc_hir = { path = "../rustc_hir" }
rustc_lexer = { path = "../rustc_lexer" }
rustc_lint_defs = { path = "../rustc_lint_defs" }
rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_parse = { path = "../rustc_parse" }
# We must use the proc_macro version that we will compile proc-macros against,
# not the one from our own sysroot.
@ -28,6 +29,7 @@ rustc_proc_macro = { path = "../rustc_proc_macro" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
scoped-tls = "1.0"
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
thin-vec = "0.2.12"
tracing = "0.1"

View file

@ -29,4 +29,8 @@ pub mod module;
#[allow(rustc::untranslatable_diagnostic)]
pub mod proc_macro;
pub fn provide(providers: &mut rustc_middle::query::Providers) {
providers.derive_macro_expansion = proc_macro::provide_derive_macro_expansion;
}
rustc_fluent_macro::fluent_messages! { "../messages.ftl" }

View file

@ -1,9 +1,11 @@
use rustc_ast::tokenstream::TokenStream;
use rustc_errors::ErrorGuaranteed;
use rustc_middle::ty::{self, TyCtxt};
use rustc_parse::parser::{ForceCollect, Parser};
use rustc_session::Session;
use rustc_session::config::ProcMacroExecutionStrategy;
use rustc_span::Span;
use rustc_span::profiling::SpannedEventArgRecorder;
use rustc_span::{LocalExpnId, Span};
use {rustc_ast as ast, rustc_proc_macro as pm};
use crate::base::{self, *};
@ -30,9 +32,9 @@ impl<T> pm::bridge::server::MessagePipe<T> for MessagePipe<T> {
}
}
fn exec_strategy(ecx: &ExtCtxt<'_>) -> impl pm::bridge::server::ExecutionStrategy + 'static {
fn exec_strategy(sess: &Session) -> impl pm::bridge::server::ExecutionStrategy + 'static {
pm::bridge::server::MaybeCrossThread::<MessagePipe<_>>::new(
ecx.sess.opts.unstable_opts.proc_macro_execution_strategy
sess.opts.unstable_opts.proc_macro_execution_strategy
== ProcMacroExecutionStrategy::CrossThread,
)
}
@ -54,7 +56,7 @@ impl base::BangProcMacro for BangProcMacro {
});
let proc_macro_backtrace = ecx.ecfg.proc_macro_backtrace;
let strategy = exec_strategy(ecx);
let strategy = exec_strategy(ecx.sess);
let server = proc_macro_server::Rustc::new(ecx);
self.client.run(&strategy, server, input, proc_macro_backtrace).map_err(|e| {
ecx.dcx().emit_err(errors::ProcMacroPanicked {
@ -85,7 +87,7 @@ impl base::AttrProcMacro for AttrProcMacro {
});
let proc_macro_backtrace = ecx.ecfg.proc_macro_backtrace;
let strategy = exec_strategy(ecx);
let strategy = exec_strategy(ecx.sess);
let server = proc_macro_server::Rustc::new(ecx);
self.client.run(&strategy, server, annotation, annotated, proc_macro_backtrace).map_err(
|e| {
@ -101,7 +103,7 @@ impl base::AttrProcMacro for AttrProcMacro {
}
pub struct DeriveProcMacro {
pub client: pm::bridge::client::Client<pm::TokenStream, pm::TokenStream>,
pub client: DeriveClient,
}
impl MultiItemModifier for DeriveProcMacro {
@ -113,6 +115,13 @@ impl MultiItemModifier for DeriveProcMacro {
item: Annotatable,
_is_derive_const: bool,
) -> ExpandResult<Vec<Annotatable>, Annotatable> {
let _timer = ecx.sess.prof.generic_activity_with_arg_recorder(
"expand_derive_proc_macro_outer",
|recorder| {
recorder.record_arg_with_span(ecx.sess.source_map(), ecx.expansion_descr(), span);
},
);
// We need special handling for statement items
// (e.g. `fn foo() { #[derive(Debug)] struct Bar; }`)
let is_stmt = matches!(item, Annotatable::Stmt(..));
@ -123,36 +132,31 @@ impl MultiItemModifier for DeriveProcMacro {
// altogether. See #73345.
crate::base::ann_pretty_printing_compatibility_hack(&item, &ecx.sess.psess);
let input = item.to_tokens();
let stream = {
let _timer =
ecx.sess.prof.generic_activity_with_arg_recorder("expand_proc_macro", |recorder| {
recorder.record_arg_with_span(
ecx.sess.source_map(),
ecx.expansion_descr(),
span,
);
});
let proc_macro_backtrace = ecx.ecfg.proc_macro_backtrace;
let strategy = exec_strategy(ecx);
let server = proc_macro_server::Rustc::new(ecx);
match self.client.run(&strategy, server, input, proc_macro_backtrace) {
Ok(stream) => stream,
Err(e) => {
ecx.dcx().emit_err({
errors::ProcMacroDerivePanicked {
span,
message: e.as_str().map(|message| {
errors::ProcMacroDerivePanickedHelp { message: message.into() }
}),
}
});
return ExpandResult::Ready(vec![]);
}
}
let invoc_id = ecx.current_expansion.id;
let res = if ecx.sess.opts.incremental.is_some()
&& ecx.sess.opts.unstable_opts.cache_proc_macros
{
ty::tls::with(|tcx| {
let input = &*tcx.arena.alloc(input);
let key: (LocalExpnId, &TokenStream) = (invoc_id, input);
QueryDeriveExpandCtx::enter(ecx, self.client, move || {
tcx.derive_macro_expansion(key).cloned()
})
})
} else {
expand_derive_macro(invoc_id, input, ecx, self.client)
};
let Ok(output) = res else {
// error will already have been emitted
return ExpandResult::Ready(vec![]);
};
let error_count_before = ecx.dcx().err_count();
let mut parser = Parser::new(&ecx.sess.psess, stream, Some("proc-macro derive"));
let mut parser = Parser::new(&ecx.sess.psess, output, Some("proc-macro derive"));
let mut items = vec![];
loop {
@ -180,3 +184,101 @@ impl MultiItemModifier for DeriveProcMacro {
ExpandResult::Ready(items)
}
}
/// Provide a query for computing the output of a derive macro.
pub(super) fn provide_derive_macro_expansion<'tcx>(
tcx: TyCtxt<'tcx>,
key: (LocalExpnId, &'tcx TokenStream),
) -> Result<&'tcx TokenStream, ()> {
let (invoc_id, input) = key;
// Make sure that we invalidate the query when the crate defining the proc macro changes
let _ = tcx.crate_hash(invoc_id.expn_data().macro_def_id.unwrap().krate);
QueryDeriveExpandCtx::with(|ecx, client| {
expand_derive_macro(invoc_id, input.clone(), ecx, client).map(|ts| &*tcx.arena.alloc(ts))
})
}
type DeriveClient = pm::bridge::client::Client<pm::TokenStream, pm::TokenStream>;
fn expand_derive_macro(
invoc_id: LocalExpnId,
input: TokenStream,
ecx: &mut ExtCtxt<'_>,
client: DeriveClient,
) -> Result<TokenStream, ()> {
let _timer =
ecx.sess.prof.generic_activity_with_arg_recorder("expand_proc_macro", |recorder| {
let invoc_expn_data = invoc_id.expn_data();
let span = invoc_expn_data.call_site;
let event_arg = invoc_expn_data.kind.descr();
recorder.record_arg_with_span(ecx.sess.source_map(), event_arg.clone(), span);
});
let proc_macro_backtrace = ecx.ecfg.proc_macro_backtrace;
let strategy = exec_strategy(ecx.sess);
let server = proc_macro_server::Rustc::new(ecx);
match client.run(&strategy, server, input, proc_macro_backtrace) {
Ok(stream) => Ok(stream),
Err(e) => {
let invoc_expn_data = invoc_id.expn_data();
let span = invoc_expn_data.call_site;
ecx.dcx().emit_err({
errors::ProcMacroDerivePanicked {
span,
message: e.as_str().map(|message| errors::ProcMacroDerivePanickedHelp {
message: message.into(),
}),
}
});
Err(())
}
}
}
/// Stores the context necessary to expand a derive proc macro via a query.
struct QueryDeriveExpandCtx {
/// Type-erased version of `&mut ExtCtxt`
expansion_ctx: *mut (),
client: DeriveClient,
}
impl QueryDeriveExpandCtx {
/// Store the extension context and the client into the thread local value.
/// It will be accessible via the `with` method while `f` is active.
fn enter<F, R>(ecx: &mut ExtCtxt<'_>, client: DeriveClient, f: F) -> R
where
F: FnOnce() -> R,
{
// We need erasure to get rid of the lifetime
let ctx = Self { expansion_ctx: ecx as *mut _ as *mut (), client };
DERIVE_EXPAND_CTX.set(&ctx, || f())
}
/// Accesses the thread local value of the derive expansion context.
/// Must be called while the `enter` function is active.
fn with<F, R>(f: F) -> R
where
F: for<'a, 'b> FnOnce(&'b mut ExtCtxt<'a>, DeriveClient) -> R,
{
DERIVE_EXPAND_CTX.with(|ctx| {
let ectx = {
let casted = ctx.expansion_ctx.cast::<ExtCtxt<'_>>();
// SAFETY: We can only get the value from `with` while the `enter` function
// is active (on the callstack), and that function's signature ensures that the
// lifetime is valid.
// If `with` is called at some other time, it will panic due to usage of
// `scoped_tls::with`.
unsafe { casted.as_mut().unwrap() }
};
f(ectx, ctx.client)
})
}
}
// When we invoke a query to expand a derive proc macro, we need to provide it with the expansion
// context and derive Client. We do that using a thread-local.
scoped_tls::scoped_thread_local!(static DERIVE_EXPAND_CTX: QueryDeriveExpandCtx);

View file

@ -103,7 +103,7 @@ pub struct DisambiguatorState {
}
impl DisambiguatorState {
pub fn new() -> Self {
pub const fn new() -> Self {
Self { next: Default::default() }
}

View file

@ -26,7 +26,7 @@ use rustc_hir::def_id::LocalDefId;
use rustc_hir::{
Attribute, ImplItemKind, ItemKind as HirItem, Node as HirNode, TraitItemKind, intravisit,
};
use rustc_middle::dep_graph::{DepNode, DepNodeExt, label_strs};
use rustc_middle::dep_graph::{DepNode, DepNodeExt, dep_kind_from_label, label_strs};
use rustc_middle::hir::nested_filter;
use rustc_middle::ty::TyCtxt;
use rustc_span::{Span, Symbol, sym};
@ -357,17 +357,6 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
}
}
fn assert_loaded_from_disk(&self, item_span: Span, dep_node: DepNode) {
debug!("assert_loaded_from_disk({:?})", dep_node);
if !self.tcx.dep_graph.debug_was_loaded_from_disk(dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx
.dcx()
.emit_err(errors::NotLoaded { span: item_span, dep_node_str: &dep_node_str });
}
}
fn check_item(&mut self, item_id: LocalDefId) {
let item_span = self.tcx.def_span(item_id.to_def_id());
let def_path_hash = self.tcx.def_path_hash(item_id.to_def_id());
@ -385,8 +374,27 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
self.assert_dirty(item_span, dep_node);
}
for label in assertion.loaded_from_disk.items().into_sorted_stable_ord() {
let dep_node = DepNode::from_label_string(self.tcx, label, def_path_hash).unwrap();
self.assert_loaded_from_disk(item_span, dep_node);
match DepNode::from_label_string(self.tcx, label, def_path_hash) {
Ok(dep_node) => {
if !self.tcx.dep_graph.debug_was_loaded_from_disk(dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.dcx().emit_err(errors::NotLoaded {
span: item_span,
dep_node_str: &dep_node_str,
});
}
}
// Opaque/unit hash, we only know the dep kind
Err(()) => {
let dep_kind = dep_kind_from_label(label);
if !self.tcx.dep_graph.debug_dep_kind_was_loaded_from_disk(dep_kind) {
self.tcx.dcx().emit_err(errors::NotLoaded {
span: item_span,
dep_node_str: &label,
});
}
}
}
}
}
}

View file

@ -634,22 +634,12 @@ impl<T: Idx> ChunkedBitSet<T> {
match *chunk {
Zeros => {
if chunk_domain_size > 1 {
#[cfg(feature = "nightly")]
let mut words = {
// We take some effort to avoid copying the words.
let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
// SAFETY: `words` can safely be all zeroes.
unsafe { words.assume_init() }
};
#[cfg(not(feature = "nightly"))]
let mut words = {
// FIXME: unconditionally use `Rc::new_zeroed` once it is stable (#129396).
let words = mem::MaybeUninit::<[Word; CHUNK_WORDS]>::zeroed();
// SAFETY: `words` can safely be all zeroes.
let words = unsafe { words.assume_init() };
// Unfortunate possibly-large copy
Rc::new(words)
};
let words_ref = Rc::get_mut(&mut words).unwrap();
let (word_index, mask) = chunk_word_index_and_mask(elem);
@ -695,22 +685,12 @@ impl<T: Idx> ChunkedBitSet<T> {
Zeros => false,
Ones => {
if chunk_domain_size > 1 {
#[cfg(feature = "nightly")]
let mut words = {
// We take some effort to avoid copying the words.
let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
// SAFETY: `words` can safely be all zeroes.
unsafe { words.assume_init() }
};
#[cfg(not(feature = "nightly"))]
let mut words = {
// FIXME: unconditionally use `Rc::new_zeroed` once it is stable (#129396).
let words = mem::MaybeUninit::<[Word; CHUNK_WORDS]>::zeroed();
// SAFETY: `words` can safely be all zeroes.
let words = unsafe { words.assume_init() };
// Unfortunate possibly-large copy
Rc::new(words)
};
let words_ref = Rc::get_mut(&mut words).unwrap();
// Set only the bits in use.

View file

@ -889,6 +889,7 @@ pub static DEFAULT_QUERY_PROVIDERS: LazyLock<Providers> = LazyLock::new(|| {
providers.queries.env_var_os = env_var_os;
limits::provide(&mut providers.queries);
proc_macro_decls::provide(&mut providers.queries);
rustc_expand::provide(&mut providers.queries);
rustc_const_eval::provide(providers);
rustc_middle::hir::provide(&mut providers.queries);
rustc_borrowck::provide(&mut providers.queries);

View file

@ -93,7 +93,7 @@ struct QueryModifiers {
cache: Option<(Option<Pat>, Block)>,
/// A cycle error for this query aborting the compilation with a fatal error.
fatal_cycle: Option<Ident>,
cycle_fatal: Option<Ident>,
/// A cycle error results in a delay_bug call
cycle_delay_bug: Option<Ident>,
@ -136,7 +136,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
let mut arena_cache = None;
let mut cache = None;
let mut desc = None;
let mut fatal_cycle = None;
let mut cycle_fatal = None;
let mut cycle_delay_bug = None;
let mut cycle_stash = None;
let mut no_hash = None;
@ -189,8 +189,8 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
try_insert!(cache = (args, block));
} else if modifier == "arena_cache" {
try_insert!(arena_cache = modifier);
} else if modifier == "fatal_cycle" {
try_insert!(fatal_cycle = modifier);
} else if modifier == "cycle_fatal" {
try_insert!(cycle_fatal = modifier);
} else if modifier == "cycle_delay_bug" {
try_insert!(cycle_delay_bug = modifier);
} else if modifier == "cycle_stash" {
@ -220,7 +220,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
arena_cache,
cache,
desc,
fatal_cycle,
cycle_fatal,
cycle_delay_bug,
cycle_stash,
no_hash,
@ -366,8 +366,8 @@ pub(super) fn rustc_queries(input: TokenStream) -> TokenStream {
}
passthrough!(
fatal_cycle,
arena_cache,
cycle_fatal,
cycle_delay_bug,
cycle_stash,
no_hash,

View file

@ -119,6 +119,7 @@ macro_rules! arena_types {
[decode] specialization_graph: rustc_middle::traits::specialization_graph::Graph,
[] crate_inherent_impls: rustc_middle::ty::CrateInherentImpls,
[] hir_owner_nodes: rustc_hir::OwnerNodes<'tcx>,
[decode] token_stream: rustc_ast::tokenstream::TokenStream,
]);
)
}

View file

@ -176,6 +176,12 @@ impl DepNodeExt for DepNode {
}
}
/// Maps a query label to its DepKind. Panics if a query with the given label does not exist.
pub fn dep_kind_from_label(label: &str) -> DepKind {
dep_kind_from_label_string(label)
.unwrap_or_else(|_| panic!("Query label {label} does not exist"))
}
impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for () {
#[inline(always)]
fn fingerprint_style() -> FingerprintStyle {

View file

@ -8,7 +8,7 @@ use crate::ty::{self, TyCtxt};
#[macro_use]
mod dep_node;
pub use dep_node::{DepKind, DepNode, DepNodeExt, dep_kinds, label_strs};
pub use dep_node::{DepKind, DepNode, DepNodeExt, dep_kind_from_label, dep_kinds, label_strs};
pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item, make_metadata};
pub use rustc_query_system::dep_graph::debug::{DepNodeFilter, EdgeFilter};
pub use rustc_query_system::dep_graph::{

View file

@ -2,6 +2,7 @@ use std::ffi::OsStr;
use std::intrinsics::transmute_unchecked;
use std::mem::MaybeUninit;
use rustc_ast::tokenstream::TokenStream;
use rustc_span::ErrorGuaranteed;
use rustc_span::source_map::Spanned;
@ -188,6 +189,10 @@ impl EraseType
>()];
}
impl EraseType for Result<&'_ TokenStream, ()> {
type Result = [u8; size_of::<Result<&'static TokenStream, ()>>()];
}
impl<T> EraseType for Option<&'_ T> {
type Result = [u8; size_of::<Option<&'static ()>>()];
}

View file

@ -2,11 +2,12 @@
use std::ffi::OsStr;
use rustc_ast::tokenstream::TokenStream;
use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE, LocalDefId, LocalModDefId, ModDefId};
use rustc_hir::hir_id::{HirId, OwnerId};
use rustc_query_system::dep_graph::DepNodeIndex;
use rustc_query_system::query::{DefIdCache, DefaultCache, SingleCache, VecCache};
use rustc_span::{DUMMY_SP, Ident, Span, Symbol};
use rustc_span::{DUMMY_SP, Ident, LocalExpnId, Span, Symbol};
use crate::infer::canonical::CanonicalQueryInput;
use crate::mir::mono::CollectionMode;
@ -616,6 +617,19 @@ impl Key for (LocalDefId, HirId) {
}
}
impl<'tcx> Key for (LocalExpnId, &'tcx TokenStream) {
type Cache<V> = DefaultCache<Self, V>;
fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
self.0.expn_data().call_site
}
#[inline(always)]
fn key_as_def_id(&self) -> Option<DefId> {
None
}
}
impl<'tcx> Key for (ValidityRequirement, ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) {
type Cache<V> = DefaultCache<Self, V>;

View file

@ -28,7 +28,7 @@
//! - `desc { ... }`: Sets the human-readable description for diagnostics and profiling. Required for every query.
//! - `arena_cache`: Use an arena for in-memory caching of the query result.
//! - `cache_on_disk_if { ... }`: Cache the query result to disk if the provided block evaluates to true.
//! - `fatal_cycle`: If a dependency cycle is detected, abort compilation with a fatal error.
//! - `cycle_fatal`: If a dependency cycle is detected, abort compilation with a fatal error.
//! - `cycle_delay_bug`: If a dependency cycle is detected, emit a delayed bug instead of aborting immediately.
//! - `cycle_stash`: If a dependency cycle is detected, stash the error for later handling.
//! - `no_hash`: Do not hash the query result for incremental compilation; just mark as dirty if recomputed.
@ -70,6 +70,7 @@ use std::sync::Arc;
use rustc_abi::Align;
use rustc_arena::TypedArena;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_ast::tokenstream::TokenStream;
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::steal::Steal;
@ -87,7 +88,7 @@ use rustc_index::IndexVec;
use rustc_lint_defs::LintId;
use rustc_macros::rustc_queries;
use rustc_query_system::ich::StableHashingContext;
use rustc_query_system::query::{QueryMode, QueryStackDeferred, QueryState};
use rustc_query_system::query::{QueryMode, QueryState};
use rustc_session::Limits;
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
use rustc_session::cstore::{
@ -96,7 +97,7 @@ use rustc_session::cstore::{
use rustc_session::lint::LintExpectationId;
use rustc_span::def_id::LOCAL_CRATE;
use rustc_span::source_map::Spanned;
use rustc_span::{DUMMY_SP, Span, Symbol};
use rustc_span::{DUMMY_SP, LocalExpnId, Span, Symbol};
use rustc_target::spec::PanicStrategy;
use {rustc_abi as abi, rustc_ast as ast, rustc_hir as hir};
@ -160,9 +161,20 @@ pub mod plumbing;
// The result type of each query must implement `Clone`, and additionally
// `ty::query::values::Value`, which produces an appropriate placeholder
// (error) value if the query resulted in a query cycle.
// Queries marked with `fatal_cycle` do not need the latter implementation,
// Queries marked with `cycle_fatal` do not need the latter implementation,
// as they will raise an fatal error on query cycles instead.
rustc_queries! {
/// Caches the expansion of a derive proc macro, e.g. `#[derive(Serialize)]`.
/// The key is:
/// - A unique key corresponding to the invocation of a macro.
/// - Token stream which serves as an input to the macro.
///
/// The output is the token stream generated by the proc macro.
query derive_macro_expansion(key: (LocalExpnId, &'tcx TokenStream)) -> Result<&'tcx TokenStream, ()> {
desc { "expanding a derive (proc) macro" }
cache_on_disk_if { true }
}
/// This exists purely for testing the interactions between delayed bugs and incremental.
query trigger_delayed_bug(key: DefId) {
desc { "triggering a delayed bug for testing incremental" }
@ -584,7 +596,7 @@ rustc_queries! {
}
query is_panic_runtime(_: CrateNum) -> bool {
fatal_cycle
cycle_fatal
desc { "checking if the crate is_panic_runtime" }
separate_provide_extern
}
@ -1315,7 +1327,7 @@ rustc_queries! {
/// Return the set of (transitive) callees that may result in a recursive call to `key`,
/// if we were able to walk all callees.
query mir_callgraph_cyclic(key: LocalDefId) -> &'tcx Option<UnordSet<LocalDefId>> {
fatal_cycle
cycle_fatal
arena_cache
desc { |tcx|
"computing (transitive) callees of `{}` that may recurse",
@ -1326,7 +1338,7 @@ rustc_queries! {
/// Obtain all the calls into other local functions
query mir_inliner_callees(key: ty::InstanceKind<'tcx>) -> &'tcx [(DefId, GenericArgsRef<'tcx>)] {
fatal_cycle
cycle_fatal
desc { |tcx|
"computing all local function calls in `{}`",
tcx.def_path_str(key.def_id()),
@ -1822,31 +1834,31 @@ rustc_queries! {
}
query is_compiler_builtins(_: CrateNum) -> bool {
fatal_cycle
cycle_fatal
desc { "checking if the crate is_compiler_builtins" }
separate_provide_extern
}
query has_global_allocator(_: CrateNum) -> bool {
// This query depends on untracked global state in CStore
eval_always
fatal_cycle
cycle_fatal
desc { "checking if the crate has_global_allocator" }
separate_provide_extern
}
query has_alloc_error_handler(_: CrateNum) -> bool {
// This query depends on untracked global state in CStore
eval_always
fatal_cycle
cycle_fatal
desc { "checking if the crate has_alloc_error_handler" }
separate_provide_extern
}
query has_panic_handler(_: CrateNum) -> bool {
fatal_cycle
cycle_fatal
desc { "checking if the crate has_panic_handler" }
separate_provide_extern
}
query is_profiler_runtime(_: CrateNum) -> bool {
fatal_cycle
cycle_fatal
desc { "checking if a crate is `#![profiler_runtime]`" }
separate_provide_extern
}
@ -1855,22 +1867,22 @@ rustc_queries! {
cache_on_disk_if { true }
}
query required_panic_strategy(_: CrateNum) -> Option<PanicStrategy> {
fatal_cycle
cycle_fatal
desc { "getting a crate's required panic strategy" }
separate_provide_extern
}
query panic_in_drop_strategy(_: CrateNum) -> PanicStrategy {
fatal_cycle
cycle_fatal
desc { "getting a crate's configured panic-in-drop strategy" }
separate_provide_extern
}
query is_no_builtins(_: CrateNum) -> bool {
fatal_cycle
cycle_fatal
desc { "getting whether a crate has `#![no_builtins]`" }
separate_provide_extern
}
query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion {
fatal_cycle
cycle_fatal
desc { "getting a crate's symbol mangling version" }
separate_provide_extern
}

View file

@ -785,6 +785,13 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>>
}
}
impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx rustc_ast::tokenstream::TokenStream {
#[inline]
fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
RefDecodable::decode(d)
}
}
macro_rules! impl_ref_decoder {
(<$tcx:tt> $($ty:ty,)*) => {
$(impl<'a, $tcx> Decodable<CacheDecoder<'a, $tcx>> for &$tcx [$ty] {

View file

@ -278,7 +278,7 @@ macro_rules! define_callbacks {
($V)
);
/// This function takes `ProvidedValue` and coverts it to an erased `Value` by
/// This function takes `ProvidedValue` and converts it to an erased `Value` by
/// allocating it on an arena if the query has the `arena_cache` modifier. The
/// value is then erased and returned. This will happen when computing the query
/// using a provider or decoding a stored result.
@ -431,7 +431,7 @@ macro_rules! define_callbacks {
#[derive(Default)]
pub struct QueryStates<'tcx> {
$(
pub $name: QueryState<$($K)*, QueryStackDeferred<'tcx>>,
pub $name: QueryState<$($K)*>,
)*
}
@ -532,7 +532,7 @@ macro_rules! define_feedable {
// The result type of each query must implement `Clone`, and additionally
// `ty::query::values::Value`, which produces an appropriate placeholder
// (error) value if the query resulted in a query cycle.
// Queries marked with `fatal_cycle` do not need the latter implementation,
// Queries marked with `cycle_fatal` do not need the latter implementation,
// as they will raise an fatal error on query cycles instead.
mod sealed {

View file

@ -88,7 +88,7 @@ impl<'tcx> Value<TyCtxt<'tcx>> for Representability {
if info.query.dep_kind == dep_kinds::representability
&& let Some(field_id) = info.query.def_id
&& let Some(field_id) = field_id.as_local()
&& let Some(DefKind::Field) = info.query.info.def_kind
&& let Some(DefKind::Field) = info.query.def_kind
{
let parent_id = tcx.parent(field_id.to_def_id());
let item_id = match tcx.def_kind(parent_id) {
@ -224,7 +224,7 @@ impl<'tcx, T> Value<TyCtxt<'tcx>> for Result<T, &'_ ty::layout::LayoutError<'_>>
continue;
};
let frame_span =
frame.query.info.default_span(cycle[(i + 1) % cycle.len()].span);
frame.query.default_span(cycle[(i + 1) % cycle.len()].span);
if frame_span.is_dummy() {
continue;
}

View file

@ -21,8 +21,8 @@ use rustc_middle::ty::TyCtxt;
use rustc_query_system::dep_graph::SerializedDepNodeIndex;
use rustc_query_system::ich::StableHashingContext;
use rustc_query_system::query::{
CycleError, HashResult, QueryCache, QueryConfig, QueryMap, QueryMode, QueryStackDeferred,
QueryState, get_query_incr, get_query_non_incr,
CycleError, HashResult, QueryCache, QueryConfig, QueryMap, QueryMode, QueryState,
get_query_incr, get_query_non_incr,
};
use rustc_query_system::{HandleCycleError, Value};
use rustc_span::{ErrorGuaranteed, Span};
@ -79,10 +79,7 @@ where
}
#[inline(always)]
fn query_state<'a>(
self,
qcx: QueryCtxt<'tcx>,
) -> &'a QueryState<Self::Key, QueryStackDeferred<'tcx>>
fn query_state<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key>
where
QueryCtxt<'tcx>: 'a,
{
@ -91,7 +88,7 @@ where
unsafe {
&*(&qcx.tcx.query_system.states as *const QueryStates<'tcx>)
.byte_add(self.dynamic.query_state)
.cast::<QueryState<Self::Key, QueryStackDeferred<'tcx>>>()
.cast::<QueryState<Self::Key>>()
}
}

View file

@ -6,7 +6,6 @@ use std::num::NonZero;
use rustc_data_structures::jobserver::Proxy;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::{DynSend, DynSync};
use rustc_data_structures::unord::UnordMap;
use rustc_hashes::Hash64;
use rustc_hir::limit::Limit;
@ -27,8 +26,8 @@ use rustc_middle::ty::{self, TyCtxt};
use rustc_query_system::dep_graph::{DepNodeParams, HasDepContext};
use rustc_query_system::ich::StableHashingContext;
use rustc_query_system::query::{
QueryCache, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffect,
QueryStackDeferred, QueryStackFrame, QueryStackFrameExtra, force_query,
QueryCache, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffect, QueryStackFrame,
force_query,
};
use rustc_query_system::{QueryOverflow, QueryOverflowNote};
use rustc_serialize::{Decodable, Encodable};
@ -67,9 +66,7 @@ impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
}
}
impl<'tcx> QueryContext for QueryCtxt<'tcx> {
type QueryInfo = QueryStackDeferred<'tcx>;
impl QueryContext for QueryCtxt<'_> {
#[inline]
fn jobserver_proxy(&self) -> &Proxy {
&*self.jobserver_proxy
@ -98,10 +95,7 @@ impl<'tcx> QueryContext for QueryCtxt<'tcx> {
/// Prefer passing `false` to `require_complete` to avoid potential deadlocks,
/// especially when called from within a deadlock handler, unless a
/// complete map is needed and no deadlock is possible at this call site.
fn collect_active_jobs(
self,
require_complete: bool,
) -> Result<QueryMap<QueryStackDeferred<'tcx>>, QueryMap<QueryStackDeferred<'tcx>>> {
fn collect_active_jobs(self, require_complete: bool) -> Result<QueryMap, QueryMap> {
let mut jobs = QueryMap::default();
let mut complete = true;
@ -114,13 +108,6 @@ impl<'tcx> QueryContext for QueryCtxt<'tcx> {
if complete { Ok(jobs) } else { Err(jobs) }
}
fn lift_query_info(
self,
info: &QueryStackDeferred<'tcx>,
) -> rustc_query_system::query::QueryStackFrameExtra {
info.extract()
}
// Interactions with on_disk_cache
fn load_side_effect(
self,
@ -181,10 +168,7 @@ impl<'tcx> QueryContext for QueryCtxt<'tcx> {
self.sess.dcx().emit_fatal(QueryOverflow {
span: info.job.span,
note: QueryOverflowNote {
desc: self.lift_query_info(&info.query.info).description,
depth,
},
note: QueryOverflowNote { desc: info.query.description, depth },
suggested_limit,
crate_name: self.crate_name(LOCAL_CRATE),
});
@ -219,7 +203,7 @@ macro_rules! handle_cycle_error {
([]) => {{
rustc_query_system::HandleCycleError::Error
}};
([(fatal_cycle) $($rest:tt)*]) => {{
([(cycle_fatal) $($rest:tt)*]) => {{
rustc_query_system::HandleCycleError::Fatal
}};
([(cycle_stash) $($rest:tt)*]) => {{
@ -321,17 +305,16 @@ macro_rules! should_ever_cache_on_disk {
};
}
fn create_query_frame_extra<'tcx, K: Key + Copy + 'tcx>(
(tcx, key, kind, name, do_describe): (
TyCtxt<'tcx>,
K,
DepKind,
&'static str,
fn(TyCtxt<'tcx>, K) -> String,
),
) -> QueryStackFrameExtra {
let def_id = key.key_as_def_id();
pub(crate) fn create_query_frame<
'tcx,
K: Copy + Key + for<'a> HashStable<StableHashingContext<'a>>,
>(
tcx: TyCtxt<'tcx>,
do_describe: fn(TyCtxt<'tcx>, K) -> String,
key: K,
kind: DepKind,
name: &'static str,
) -> QueryStackFrame {
// If reduced queries are requested, we may be printing a query stack due
// to a panic. Avoid using `default_span` and `def_kind` in that case.
let reduce_queries = with_reduced_queries();
@ -343,49 +326,36 @@ fn create_query_frame_extra<'tcx, K: Key + Copy + 'tcx>(
} else {
description
};
let span = if kind == dep_graph::dep_kinds::def_span || reduce_queries {
let span = if reduce_queries {
// The `def_span` query is used to calculate `default_span`,
// so exit to avoid infinite recursion.
None
} else {
Some(key.default_span(tcx))
Some(tcx.with_reduced_queries(|| key.default_span(tcx)))
};
let def_kind = if kind == dep_graph::dep_kinds::def_kind || reduce_queries {
let def_id = key.key_as_def_id();
let def_kind = if reduce_queries {
// Try to avoid infinite recursion.
None
} else {
def_id.and_then(|def_id| def_id.as_local()).map(|def_id| tcx.def_kind(def_id))
def_id
.and_then(|def_id| def_id.as_local())
.map(|def_id| tcx.with_reduced_queries(|| tcx.def_kind(def_id)))
};
QueryStackFrameExtra::new(description, span, def_kind)
}
pub(crate) fn create_query_frame<
'tcx,
K: Copy + DynSend + DynSync + Key + for<'a> HashStable<StableHashingContext<'a>> + 'tcx,
>(
tcx: TyCtxt<'tcx>,
do_describe: fn(TyCtxt<'tcx>, K) -> String,
key: K,
kind: DepKind,
name: &'static str,
) -> QueryStackFrame<QueryStackDeferred<'tcx>> {
let def_id = key.key_as_def_id();
let hash = || {
tcx.with_stable_hashing_context(|mut hcx| {
let mut hasher = StableHasher::new();
kind.as_usize().hash_stable(&mut hcx, &mut hasher);
key.hash_stable(&mut hcx, &mut hasher);
hasher.finish::<Hash64>()
})
};
let def_id_for_ty_in_cycle = key.def_id_for_ty_in_cycle();
let info =
QueryStackDeferred::new((tcx, key, kind, name, do_describe), create_query_frame_extra);
let hash = tcx.with_stable_hashing_context(|mut hcx| {
let mut hasher = StableHasher::new();
kind.as_usize().hash_stable(&mut hcx, &mut hasher);
key.hash_stable(&mut hcx, &mut hasher);
hasher.finish::<Hash64>()
});
QueryStackFrame::new(info, kind, hash, def_id, def_id_for_ty_in_cycle)
QueryStackFrame::new(description, span, def_id, def_kind, kind, def_id_for_ty_in_cycle, hash)
}
pub(crate) fn encode_query_results<'a, 'tcx, Q>(
@ -737,7 +707,7 @@ macro_rules! define_queries {
pub(crate) fn collect_active_jobs<'tcx>(
tcx: TyCtxt<'tcx>,
qmap: &mut QueryMap<QueryStackDeferred<'tcx>>,
qmap: &mut QueryMap,
require_complete: bool,
) -> Option<()> {
let make_query = |tcx, key| {
@ -821,7 +791,7 @@ macro_rules! define_queries {
// These arrays are used for iteration and can't be indexed by `DepKind`.
const COLLECT_ACTIVE_JOBS: &[
for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap<QueryStackDeferred<'tcx>>, bool) -> Option<()>
for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap, bool) -> Option<()>
] =
&[$(query_impl::$name::collect_active_jobs),*];

View file

@ -795,6 +795,18 @@ impl<D: Deps> DepGraph<D> {
self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
}
pub fn debug_dep_kind_was_loaded_from_disk(&self, dep_kind: DepKind) -> bool {
// We only check if we have a dep node corresponding to the given dep kind.
#[allow(rustc::potential_query_instability)]
self.data
.as_ref()
.unwrap()
.debug_loaded_from_disk
.lock()
.iter()
.any(|node| node.kind == dep_kind)
}
#[cfg(debug_assertions)]
#[inline(always)]
pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)

View file

@ -6,7 +6,6 @@ use std::hash::Hash;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_span::ErrorGuaranteed;
use super::QueryStackFrameExtra;
use crate::dep_graph::{DepKind, DepNode, DepNodeParams, SerializedDepNodeIndex};
use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
@ -28,7 +27,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn format_value(self) -> fn(&Self::Value) -> String;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::QueryInfo>
fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key>
where
Qcx: 'a;
@ -58,7 +57,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn value_from_cycle_error(
self,
tcx: Qcx::DepContext,
cycle_error: &CycleError<QueryStackFrameExtra>,
cycle_error: &CycleError,
guar: ErrorGuaranteed,
) -> Self::Value;

View file

@ -1,4 +1,3 @@
use std::fmt::Debug;
use std::hash::Hash;
use std::io::Write;
use std::iter;
@ -12,7 +11,6 @@ use rustc_hir::def::DefKind;
use rustc_session::Session;
use rustc_span::{DUMMY_SP, Span};
use super::QueryStackFrameExtra;
use crate::dep_graph::DepContext;
use crate::error::CycleStack;
use crate::query::plumbing::CycleError;
@ -20,54 +18,45 @@ use crate::query::{QueryContext, QueryStackFrame};
/// Represents a span and a query key.
#[derive(Clone, Debug)]
pub struct QueryInfo<I> {
pub struct QueryInfo {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
pub query: QueryStackFrame<I>,
pub query: QueryStackFrame,
}
impl<I> QueryInfo<I> {
pub(crate) fn lift<Qcx: QueryContext<QueryInfo = I>>(
&self,
qcx: Qcx,
) -> QueryInfo<QueryStackFrameExtra> {
QueryInfo { span: self.span, query: self.query.lift(qcx) }
}
}
pub type QueryMap<I> = FxHashMap<QueryJobId, QueryJobInfo<I>>;
pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
/// A value uniquely identifying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub struct QueryJobId(pub NonZero<u64>);
impl QueryJobId {
fn query<I: Clone>(self, map: &QueryMap<I>) -> QueryStackFrame<I> {
fn query(self, map: &QueryMap) -> QueryStackFrame {
map.get(&self).unwrap().query.clone()
}
fn span<I>(self, map: &QueryMap<I>) -> Span {
fn span(self, map: &QueryMap) -> Span {
map.get(&self).unwrap().job.span
}
fn parent<I>(self, map: &QueryMap<I>) -> Option<QueryJobId> {
fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
map.get(&self).unwrap().job.parent
}
fn latch<I>(self, map: &QueryMap<I>) -> Option<&QueryLatch<I>> {
fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
#[derive(Clone, Debug)]
pub struct QueryJobInfo<I> {
pub query: QueryStackFrame<I>,
pub job: QueryJob<I>,
pub struct QueryJobInfo {
pub query: QueryStackFrame,
pub job: QueryJob,
}
/// Represents an active query job.
#[derive(Debug)]
pub struct QueryJob<I> {
pub struct QueryJob {
pub id: QueryJobId,
/// The span corresponding to the reason for which this query was required.
@ -77,23 +66,23 @@ pub struct QueryJob<I> {
pub parent: Option<QueryJobId>,
/// The latch that is used to wait on this job.
latch: Option<QueryLatch<I>>,
latch: Option<QueryLatch>,
}
impl<I> Clone for QueryJob<I> {
impl Clone for QueryJob {
fn clone(&self) -> Self {
Self { id: self.id, span: self.span, parent: self.parent, latch: self.latch.clone() }
}
}
impl<I> QueryJob<I> {
impl QueryJob {
/// Creates a new query job.
#[inline]
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
QueryJob { id, span, parent, latch: None }
}
pub(super) fn latch(&mut self) -> QueryLatch<I> {
pub(super) fn latch(&mut self) -> QueryLatch {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
@ -113,12 +102,12 @@ impl<I> QueryJob<I> {
}
impl QueryJobId {
pub(super) fn find_cycle_in_stack<I: Clone>(
pub(super) fn find_cycle_in_stack(
&self,
query_map: QueryMap<I>,
query_map: QueryMap,
current_job: &Option<QueryJobId>,
span: Span,
) -> CycleError<I> {
) -> CycleError {
// Find the waitee amongst `current_job` parents
let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job);
@ -152,7 +141,7 @@ impl QueryJobId {
#[cold]
#[inline(never)]
pub fn find_dep_kind_root<I: Clone>(&self, query_map: QueryMap<I>) -> (QueryJobInfo<I>, usize) {
pub fn find_dep_kind_root(&self, query_map: QueryMap) -> (QueryJobInfo, usize) {
let mut depth = 1;
let info = query_map.get(&self).unwrap();
let dep_kind = info.query.dep_kind;
@ -172,31 +161,31 @@ impl QueryJobId {
}
#[derive(Debug)]
struct QueryWaiter<I> {
struct QueryWaiter {
query: Option<QueryJobId>,
condvar: Condvar,
span: Span,
cycle: Mutex<Option<CycleError<I>>>,
cycle: Mutex<Option<CycleError>>,
}
#[derive(Debug)]
struct QueryLatchInfo<I> {
struct QueryLatchInfo {
complete: bool,
waiters: Vec<Arc<QueryWaiter<I>>>,
waiters: Vec<Arc<QueryWaiter>>,
}
#[derive(Debug)]
pub(super) struct QueryLatch<I> {
info: Arc<Mutex<QueryLatchInfo<I>>>,
pub(super) struct QueryLatch {
info: Arc<Mutex<QueryLatchInfo>>,
}
impl<I> Clone for QueryLatch<I> {
impl Clone for QueryLatch {
fn clone(&self) -> Self {
Self { info: Arc::clone(&self.info) }
}
}
impl<I> QueryLatch<I> {
impl QueryLatch {
fn new() -> Self {
QueryLatch {
info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@ -209,7 +198,7 @@ impl<I> QueryLatch<I> {
qcx: impl QueryContext,
query: Option<QueryJobId>,
span: Span,
) -> Result<(), CycleError<I>> {
) -> Result<(), CycleError> {
let waiter =
Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
self.wait_on_inner(qcx, &waiter);
@ -224,7 +213,7 @@ impl<I> QueryLatch<I> {
}
/// Awaits the caller on this latch by blocking the current thread.
fn wait_on_inner(&self, qcx: impl QueryContext, waiter: &Arc<QueryWaiter<I>>) {
fn wait_on_inner(&self, qcx: impl QueryContext, waiter: &Arc<QueryWaiter>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
@ -260,7 +249,7 @@ impl<I> QueryLatch<I> {
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<I>> {
fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
@ -280,11 +269,7 @@ type Waiter = (QueryJobId, usize);
/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
fn visit_waiters<I, F>(
query_map: &QueryMap<I>,
query: QueryJobId,
mut visit: F,
) -> Option<Option<Waiter>>
fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
where
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
{
@ -314,8 +299,8 @@ where
/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
fn cycle_check<I>(
query_map: &QueryMap<I>,
fn cycle_check(
query_map: &QueryMap,
query: QueryJobId,
span: Span,
stack: &mut Vec<(Span, QueryJobId)>,
@ -354,8 +339,8 @@ fn cycle_check<I>(
/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
fn connected_to_root<I>(
query_map: &QueryMap<I>,
fn connected_to_root(
query_map: &QueryMap,
query: QueryJobId,
visited: &mut FxHashSet<QueryJobId>,
) -> bool {
@ -376,7 +361,7 @@ fn connected_to_root<I>(
}
// Deterministically pick an query from a list
fn pick_query<'a, I: Clone, T, F>(query_map: &QueryMap<I>, queries: &'a [T], f: F) -> &'a T
fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
where
F: Fn(&T) -> (Span, QueryJobId),
{
@ -401,10 +386,10 @@ where
/// the function return true.
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
fn remove_cycle<I: Clone>(
query_map: &QueryMap<I>,
fn remove_cycle(
query_map: &QueryMap,
jobs: &mut Vec<QueryJobId>,
wakelist: &mut Vec<Arc<QueryWaiter<I>>>,
wakelist: &mut Vec<Arc<QueryWaiter>>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
@ -505,10 +490,7 @@ fn remove_cycle<I: Clone>(
/// uses a query latch and then resuming that waiter.
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
pub fn break_query_cycles<I: Clone + Debug>(
query_map: QueryMap<I>,
registry: &rustc_thread_pool::Registry,
) {
pub fn break_query_cycles(query_map: QueryMap, registry: &rustc_thread_pool::Registry) {
let mut wakelist = Vec::new();
// It is OK per the comments:
// - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798854932
@ -559,7 +541,7 @@ pub fn report_cycle<'a>(
) -> Diag<'a> {
assert!(!stack.is_empty());
let span = stack[0].query.info.default_span(stack[1 % stack.len()].span);
let span = stack[0].query.default_span(stack[1 % stack.len()].span);
let mut cycle_stack = Vec::new();
@ -568,31 +550,31 @@ pub fn report_cycle<'a>(
for i in 1..stack.len() {
let query = &stack[i].query;
let span = query.info.default_span(stack[(i + 1) % stack.len()].span);
cycle_stack.push(CycleStack { span, desc: query.info.description.to_owned() });
let span = query.default_span(stack[(i + 1) % stack.len()].span);
cycle_stack.push(CycleStack { span, desc: query.description.to_owned() });
}
let mut cycle_usage = None;
if let Some((span, ref query)) = *usage {
cycle_usage = Some(crate::error::CycleUsage {
span: query.info.default_span(span),
usage: query.info.description.to_string(),
span: query.default_span(span),
usage: query.description.to_string(),
});
}
let alias =
if stack.iter().all(|entry| matches!(entry.query.info.def_kind, Some(DefKind::TyAlias))) {
Some(crate::error::Alias::Ty)
} else if stack.iter().all(|entry| entry.query.info.def_kind == Some(DefKind::TraitAlias)) {
Some(crate::error::Alias::Trait)
} else {
None
};
let alias = if stack.iter().all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias)))
{
Some(crate::error::Alias::Ty)
} else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
Some(crate::error::Alias::Trait)
} else {
None
};
let cycle_diag = crate::error::Cycle {
span,
cycle_stack,
stack_bottom: stack[0].query.info.description.to_owned(),
stack_bottom: stack[0].query.description.to_owned(),
alias,
cycle_usage,
stack_count,
@ -628,7 +610,6 @@ pub fn print_query_stack<Qcx: QueryContext>(
let Some(query_info) = query_map.get(&query) else {
break;
};
let query_extra = qcx.lift_query_info(&query_info.query.info);
if Some(count_printed) < limit_frames || limit_frames.is_none() {
// Only print to stderr as many stack frames as `num_frames` when present.
// FIXME: needs translation
@ -636,7 +617,7 @@ pub fn print_query_stack<Qcx: QueryContext>(
#[allow(rustc::untranslatable_diagnostic)]
dcx.struct_failure_note(format!(
"#{} [{:?}] {}",
count_printed, query_info.query.dep_kind, query_extra.description
count_printed, query_info.query.dep_kind, query_info.query.description
))
.with_span(query_info.job.span)
.emit();
@ -649,7 +630,7 @@ pub fn print_query_stack<Qcx: QueryContext>(
"#{} [{}] {}",
count_total,
qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
query_extra.description
query_info.query.description
);
}

View file

@ -1,23 +1,4 @@
mod plumbing;
use std::fmt::Debug;
use std::marker::PhantomData;
use std::mem::transmute;
use std::sync::Arc;
pub use self::plumbing::*;
mod job;
pub use self::job::{
QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, break_query_cycles, print_query_stack,
report_cycle,
};
mod caches;
pub use self::caches::{DefIdCache, DefaultCache, QueryCache, SingleCache, VecCache};
mod config;
use rustc_data_structures::jobserver::Proxy;
use rustc_data_structures::sync::{DynSend, DynSync};
use rustc_errors::DiagInner;
use rustc_hashes::Hash64;
use rustc_hir::def::DefKind;
@ -25,66 +6,49 @@ use rustc_macros::{Decodable, Encodable};
use rustc_span::Span;
use rustc_span::def_id::DefId;
pub use self::caches::{DefIdCache, DefaultCache, QueryCache, SingleCache, VecCache};
pub use self::config::{HashResult, QueryConfig};
pub use self::job::{
QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, break_query_cycles, print_query_stack,
report_cycle,
};
pub use self::plumbing::*;
use crate::dep_graph::{DepKind, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
mod caches;
mod config;
mod job;
mod plumbing;
/// Description of a frame in the query stack.
///
/// This is mostly used in case of cycles for error reporting.
#[derive(Clone, Debug)]
pub struct QueryStackFrame<I> {
/// This field initially stores a `QueryStackDeferred` during collection,
/// but can later be changed to `QueryStackFrameExtra` containing concrete information
/// by calling `lift`. This is done so that collecting query does not need to invoke
/// queries, instead `lift` will call queries in a more appropriate location.
pub info: I,
pub struct QueryStackFrame {
pub description: String,
span: Option<Span>,
pub def_id: Option<DefId>,
pub def_kind: Option<DefKind>,
/// A def-id that is extracted from a `Ty` in a query key
pub def_id_for_ty_in_cycle: Option<DefId>,
pub dep_kind: DepKind,
/// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler.
hash: Hash64,
pub def_id: Option<DefId>,
/// A def-id that is extracted from a `Ty` in a query key
pub def_id_for_ty_in_cycle: Option<DefId>,
}
impl<I> QueryStackFrame<I> {
impl QueryStackFrame {
#[inline]
pub fn new(
info: I,
dep_kind: DepKind,
hash: impl FnOnce() -> Hash64,
description: String,
span: Option<Span>,
def_id: Option<DefId>,
def_kind: Option<DefKind>,
dep_kind: DepKind,
def_id_for_ty_in_cycle: Option<DefId>,
hash: Hash64,
) -> Self {
Self { info, def_id, dep_kind, hash: hash(), def_id_for_ty_in_cycle }
}
fn lift<Qcx: QueryContext<QueryInfo = I>>(
&self,
qcx: Qcx,
) -> QueryStackFrame<QueryStackFrameExtra> {
QueryStackFrame {
info: qcx.lift_query_info(&self.info),
dep_kind: self.dep_kind,
hash: self.hash,
def_id: self.def_id,
def_id_for_ty_in_cycle: self.def_id_for_ty_in_cycle,
}
}
}
#[derive(Clone, Debug)]
pub struct QueryStackFrameExtra {
pub description: String,
span: Option<Span>,
pub def_kind: Option<DefKind>,
}
impl QueryStackFrameExtra {
#[inline]
pub fn new(description: String, span: Option<Span>, def_kind: Option<DefKind>) -> Self {
Self { description, span, def_kind }
Self { description, span, def_id, def_kind, def_id_for_ty_in_cycle, dep_kind, hash }
}
// FIXME(eddyb) Get more valid `Span`s on queries.
@ -97,40 +61,6 @@ impl QueryStackFrameExtra {
}
}
/// Track a 'side effect' for a particular query.
/// This is used to hold a closure which can create `QueryStackFrameExtra`.
#[derive(Clone)]
pub struct QueryStackDeferred<'tcx> {
_dummy: PhantomData<&'tcx ()>,
// `extract` may contain references to 'tcx, but we can't tell drop checking that it won't
// access it in the destructor.
extract: Arc<dyn Fn() -> QueryStackFrameExtra + DynSync + DynSend>,
}
impl<'tcx> QueryStackDeferred<'tcx> {
pub fn new<C: Copy + DynSync + DynSend + 'tcx>(
context: C,
extract: fn(C) -> QueryStackFrameExtra,
) -> Self {
let extract: Arc<dyn Fn() -> QueryStackFrameExtra + DynSync + DynSend + 'tcx> =
Arc::new(move || extract(context));
// SAFETY: The `extract` closure does not access 'tcx in its destructor as the only
// captured variable is `context` which is Copy and cannot have a destructor.
Self { _dummy: PhantomData, extract: unsafe { transmute(extract) } }
}
pub fn extract(&self) -> QueryStackFrameExtra {
(self.extract)()
}
}
impl<'tcx> Debug for QueryStackDeferred<'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("QueryStackDeferred")
}
}
/// Tracks 'side effects' for a particular query.
/// This struct is saved to disk along with the query result,
/// and loaded from disk if we mark the query as green.
@ -150,8 +80,6 @@ pub enum QuerySideEffect {
}
pub trait QueryContext: HasDepContext {
type QueryInfo: Clone;
/// Gets a jobserver reference which is used to release then acquire
/// a token while waiting on a query.
fn jobserver_proxy(&self) -> &Proxy;
@ -161,12 +89,7 @@ pub trait QueryContext: HasDepContext {
/// Get the query information from the TLS context.
fn current_query_job(self) -> Option<QueryJobId>;
fn collect_active_jobs(
self,
require_complete: bool,
) -> Result<QueryMap<Self::QueryInfo>, QueryMap<Self::QueryInfo>>;
fn lift_query_info(self, info: &Self::QueryInfo) -> QueryStackFrameExtra;
fn collect_active_jobs(self, require_complete: bool) -> Result<QueryMap, QueryMap>;
/// Load a side effect associated to the node in the previous session.
fn load_side_effect(

View file

@ -18,7 +18,7 @@ use rustc_errors::{Diag, FatalError, StashKey};
use rustc_span::{DUMMY_SP, Span};
use tracing::instrument;
use super::{QueryConfig, QueryStackFrameExtra};
use super::QueryConfig;
use crate::HandleCycleError;
use crate::dep_graph::{DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams};
use crate::ich::StableHashingContext;
@ -31,23 +31,23 @@ fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
move |x| x.0 == *k
}
pub struct QueryState<K, I> {
active: Sharded<hashbrown::HashTable<(K, QueryResult<I>)>>,
pub struct QueryState<K> {
active: Sharded<hashbrown::HashTable<(K, QueryResult)>>,
}
/// Indicates the state of a query for a given key in a query map.
enum QueryResult<I> {
enum QueryResult {
/// An already executing query. The query job can be used to await for its completion.
Started(QueryJob<I>),
Started(QueryJob),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
impl<I> QueryResult<I> {
impl QueryResult {
/// Unwraps the query job expecting that it has started.
fn expect_job(self) -> QueryJob<I> {
fn expect_job(self) -> QueryJob {
match self {
Self::Started(job) => job,
Self::Poisoned => {
@ -57,7 +57,7 @@ impl<I> QueryResult<I> {
}
}
impl<K, I> QueryState<K, I>
impl<K> QueryState<K>
where
K: Eq + Hash + Copy + Debug,
{
@ -68,13 +68,13 @@ where
pub fn collect_active_jobs<Qcx: Copy>(
&self,
qcx: Qcx,
make_query: fn(Qcx, K) -> QueryStackFrame<I>,
jobs: &mut QueryMap<I>,
make_query: fn(Qcx, K) -> QueryStackFrame,
jobs: &mut QueryMap,
require_complete: bool,
) -> Option<()> {
let mut active = Vec::new();
let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult<I>)>>| {
let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult)>>| {
for (k, v) in iter.iter() {
if let QueryResult::Started(ref job) = *v {
active.push((*k, job.clone()));
@ -105,19 +105,19 @@ where
}
}
impl<K, I> Default for QueryState<K, I> {
fn default() -> QueryState<K, I> {
impl<K> Default for QueryState<K> {
fn default() -> QueryState<K> {
QueryState { active: Default::default() }
}
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
struct JobOwner<'tcx, K, I>
struct JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
{
state: &'tcx QueryState<K, I>,
state: &'tcx QueryState<K>,
key: K,
}
@ -159,7 +159,7 @@ where
}
Stash => {
let guar = if let Some(root) = cycle_error.cycle.first()
&& let Some(span) = root.query.info.span
&& let Some(span) = root.query.span
{
error.stash(span, StashKey::Cycle).unwrap()
} else {
@ -170,7 +170,7 @@ where
}
}
impl<'tcx, K, I> JobOwner<'tcx, K, I>
impl<'tcx, K> JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
{
@ -207,7 +207,7 @@ where
}
}
impl<'tcx, K, I> Drop for JobOwner<'tcx, K, I>
impl<'tcx, K> Drop for JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
{
@ -235,19 +235,10 @@ where
}
#[derive(Clone, Debug)]
pub struct CycleError<I = QueryStackFrameExtra> {
pub struct CycleError {
/// The query and related span that uses the cycle.
pub usage: Option<(Span, QueryStackFrame<I>)>,
pub cycle: Vec<QueryInfo<I>>,
}
impl<I> CycleError<I> {
fn lift<Qcx: QueryContext<QueryInfo = I>>(&self, qcx: Qcx) -> CycleError<QueryStackFrameExtra> {
CycleError {
usage: self.usage.as_ref().map(|(span, frame)| (*span, frame.lift(qcx))),
cycle: self.cycle.iter().map(|info| info.lift(qcx)).collect(),
}
}
pub usage: Option<(Span, QueryStackFrame)>,
pub cycle: Vec<QueryInfo>,
}
/// Checks whether there is already a value for this key in the in-memory
@ -284,10 +275,10 @@ where
{
// Ensure there was no errors collecting all active jobs.
// We need the complete map to ensure we find a cycle to break.
let query_map = qcx.collect_active_jobs(false).ok().expect("failed to collect active queries");
let query_map = qcx.collect_active_jobs(false).expect("failed to collect active queries");
let error = try_execute.find_cycle_in_stack(query_map, &qcx.current_query_job(), span);
(mk_cycle(query, qcx, error.lift(qcx)), None)
(mk_cycle(query, qcx, error), None)
}
#[inline(always)]
@ -296,7 +287,7 @@ fn wait_for_query<Q, Qcx>(
qcx: Qcx,
span: Span,
key: Q::Key,
latch: QueryLatch<Qcx::QueryInfo>,
latch: QueryLatch,
current: Option<QueryJobId>,
) -> (Q::Value, Option<DepNodeIndex>)
where
@ -336,7 +327,7 @@ where
(v, Some(index))
}
Err(cycle) => (mk_cycle(query, qcx, cycle.lift(qcx)), None),
Err(cycle) => (mk_cycle(query, qcx, cycle), None),
}
}
@ -414,7 +405,7 @@ where
fn execute_job<Q, Qcx, const INCR: bool>(
query: Q,
qcx: Qcx,
state: &QueryState<Q::Key, Qcx::QueryInfo>,
state: &QueryState<Q::Key>,
key: Q::Key,
key_hash: u64,
id: QueryJobId,

View file

@ -13,6 +13,8 @@
#![feature(arbitrary_self_types)]
#![feature(assert_matches)]
#![feature(box_patterns)]
#![feature(const_default)]
#![feature(const_trait_impl)]
#![feature(control_flow_into_value)]
#![feature(decl_macro)]
#![feature(default_field_values)]
@ -45,7 +47,7 @@ use rustc_ast::{
self as ast, AngleBracketedArg, CRATE_NODE_ID, Crate, Expr, ExprKind, GenericArg, GenericArgs,
NodeId, Path, attr,
};
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet, default};
use rustc_data_structures::intern::Interned;
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{FreezeReadGuard, FreezeWriteGuard};
@ -1113,7 +1115,7 @@ pub struct Resolver<'ra, 'tcx> {
tcx: TyCtxt<'tcx>,
/// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`.
expn_that_defined: UnordMap<LocalDefId, ExpnId>,
expn_that_defined: UnordMap<LocalDefId, ExpnId> = Default::default(),
graph_root: Module<'ra>,
@ -1124,12 +1126,12 @@ pub struct Resolver<'ra, 'tcx> {
extern_prelude: FxIndexMap<Macros20NormalizedIdent, ExternPreludeEntry<'ra>>,
/// N.B., this is used only for better diagnostics, not name resolution itself.
field_names: LocalDefIdMap<Vec<Ident>>,
field_defaults: LocalDefIdMap<Vec<Symbol>>,
field_names: LocalDefIdMap<Vec<Ident>> = Default::default(),
field_defaults: LocalDefIdMap<Vec<Symbol>> = Default::default(),
/// Span of the privacy modifier in fields of an item `DefId` accessible with dot syntax.
/// Used for hints during error reporting.
field_visibility_spans: FxHashMap<DefId, Vec<Span>>,
field_visibility_spans: FxHashMap<DefId, Vec<Span>> = default::fx_hash_map(),
/// All imports known to succeed or fail.
determined_imports: Vec<Import<'ra>> = Vec::new(),
@ -1139,26 +1141,26 @@ pub struct Resolver<'ra, 'tcx> {
// Spans for local variables found during pattern resolution.
// Used for suggestions during error reporting.
pat_span_map: NodeMap<Span>,
pat_span_map: NodeMap<Span> = Default::default(),
/// Resolutions for nodes that have a single resolution.
partial_res_map: NodeMap<PartialRes>,
partial_res_map: NodeMap<PartialRes> = Default::default(),
/// Resolutions for import nodes, which have multiple resolutions in different namespaces.
import_res_map: NodeMap<PerNS<Option<Res>>>,
import_res_map: NodeMap<PerNS<Option<Res>>> = Default::default(),
/// An import will be inserted into this map if it has been used.
import_use_map: FxHashMap<Import<'ra>, Used>,
import_use_map: FxHashMap<Import<'ra>, Used> = default::fx_hash_map(),
/// Resolutions for labels (node IDs of their corresponding blocks or loops).
label_res_map: NodeMap<NodeId>,
label_res_map: NodeMap<NodeId> = Default::default(),
/// Resolutions for lifetimes.
lifetimes_res_map: NodeMap<LifetimeRes>,
lifetimes_res_map: NodeMap<LifetimeRes> = Default::default(),
/// Lifetime parameters that lowering will have to introduce.
extra_lifetime_params_map: NodeMap<Vec<(Ident, NodeId, LifetimeRes)>>,
extra_lifetime_params_map: NodeMap<Vec<(Ident, NodeId, LifetimeRes)>> = Default::default(),
/// `CrateNum` resolutions of `extern crate` items.
extern_crate_map: UnordMap<LocalDefId, CrateNum>,
module_children: LocalDefIdMap<Vec<ModChild>>,
ambig_module_children: LocalDefIdMap<Vec<AmbigModChild>>,
trait_map: NodeMap<Vec<TraitCandidate>>,
extern_crate_map: UnordMap<LocalDefId, CrateNum> = Default::default(),
module_children: LocalDefIdMap<Vec<ModChild>> = Default::default(),
ambig_module_children: LocalDefIdMap<Vec<AmbigModChild>> = Default::default(),
trait_map: NodeMap<Vec<TraitCandidate>> = Default::default(),
/// A map from nodes to anonymous modules.
/// Anonymous modules are pseudo-modules that are implicitly created around items
@ -1174,7 +1176,7 @@ pub struct Resolver<'ra, 'tcx> {
///
/// There will be an anonymous module created around `g` with the ID of the
/// entry block for `f`.
block_map: NodeMap<Module<'ra>>,
block_map: NodeMap<Module<'ra>> = Default::default(),
/// A fake module that contains no definition and no prelude. Used so that
/// some AST passes can generate identifiers that only resolve to local or
/// lang items.
@ -1190,7 +1192,7 @@ pub struct Resolver<'ra, 'tcx> {
glob_map: FxIndexMap<LocalDefId, FxIndexSet<Symbol>>,
glob_error: Option<ErrorGuaranteed> = None,
visibilities_for_hashing: Vec<(LocalDefId, Visibility)> = Vec::new(),
used_imports: FxHashSet<NodeId>,
used_imports: FxHashSet<NodeId> = default::fx_hash_set(),
maybe_unused_trait_imports: FxIndexSet<LocalDefId>,
/// Privacy errors are delayed until the end in order to deduplicate them.
@ -1206,56 +1208,56 @@ pub struct Resolver<'ra, 'tcx> {
/// When a type is re-exported that has an inaccessible constructor because it has fields that
/// are inaccessible from the import's scope, we mark that as the type won't be able to be built
/// through the re-export. We use this information to extend the existing diagnostic.
inaccessible_ctor_reexport: FxHashMap<Span, Span>,
inaccessible_ctor_reexport: FxHashMap<Span, Span> = default::fx_hash_map(),
arenas: &'ra ResolverArenas<'ra>,
dummy_decl: Decl<'ra>,
builtin_type_decls: FxHashMap<Symbol, Decl<'ra>>,
builtin_attr_decls: FxHashMap<Symbol, Decl<'ra>>,
registered_tool_decls: FxHashMap<Ident, Decl<'ra>>,
macro_names: FxHashSet<Ident>,
builtin_macros: FxHashMap<Symbol, SyntaxExtensionKind>,
macro_names: FxHashSet<Ident> = default::fx_hash_set(),
builtin_macros: FxHashMap<Symbol, SyntaxExtensionKind> = default::fx_hash_map(),
registered_tools: &'tcx RegisteredTools,
macro_use_prelude: FxIndexMap<Symbol, Decl<'ra>>,
/// Eagerly populated map of all local macro definitions.
local_macro_map: FxHashMap<LocalDefId, &'ra MacroData>,
local_macro_map: FxHashMap<LocalDefId, &'ra MacroData> = default::fx_hash_map(),
/// Lazily populated cache of macro definitions loaded from external crates.
extern_macro_map: CacheRefCell<FxHashMap<DefId, &'ra MacroData>>,
dummy_ext_bang: Arc<SyntaxExtension>,
dummy_ext_derive: Arc<SyntaxExtension>,
non_macro_attr: &'ra MacroData,
local_macro_def_scopes: FxHashMap<LocalDefId, Module<'ra>>,
ast_transform_scopes: FxHashMap<LocalExpnId, Module<'ra>>,
local_macro_def_scopes: FxHashMap<LocalDefId, Module<'ra>> = default::fx_hash_map(),
ast_transform_scopes: FxHashMap<LocalExpnId, Module<'ra>> = default::fx_hash_map(),
unused_macros: FxIndexMap<LocalDefId, (NodeId, Ident)>,
/// A map from the macro to all its potentially unused arms.
unused_macro_rules: FxIndexMap<NodeId, DenseBitSet<usize>>,
proc_macro_stubs: FxHashSet<LocalDefId>,
proc_macro_stubs: FxHashSet<LocalDefId> = default::fx_hash_set(),
/// Traces collected during macro resolution and validated when it's complete.
single_segment_macro_resolutions:
CmRefCell<Vec<(Ident, MacroKind, ParentScope<'ra>, Option<Decl<'ra>>, Option<Span>)>>,
multi_segment_macro_resolutions:
CmRefCell<Vec<(Vec<Segment>, Span, MacroKind, ParentScope<'ra>, Option<Res>, Namespace)>>,
builtin_attrs: Vec<(Ident, ParentScope<'ra>)>,
builtin_attrs: Vec<(Ident, ParentScope<'ra>)> = Vec::new(),
/// `derive(Copy)` marks items they are applied to so they are treated specially later.
/// Derive macros cannot modify the item themselves and have to store the markers in the global
/// context, so they attach the markers to derive container IDs using this resolver table.
containers_deriving_copy: FxHashSet<LocalExpnId>,
containers_deriving_copy: FxHashSet<LocalExpnId> = default::fx_hash_set(),
/// Parent scopes in which the macros were invoked.
/// FIXME: `derives` are missing in these parent scopes and need to be taken from elsewhere.
invocation_parent_scopes: FxHashMap<LocalExpnId, ParentScope<'ra>>,
invocation_parent_scopes: FxHashMap<LocalExpnId, ParentScope<'ra>> = default::fx_hash_map(),
/// `macro_rules` scopes *produced* by expanding the macro invocations,
/// include all the `macro_rules` items and other invocations generated by them.
output_macro_rules_scopes: FxHashMap<LocalExpnId, MacroRulesScopeRef<'ra>>,
output_macro_rules_scopes: FxHashMap<LocalExpnId, MacroRulesScopeRef<'ra>> = default::fx_hash_map(),
/// `macro_rules` scopes produced by `macro_rules` item definitions.
macro_rules_scopes: FxHashMap<LocalDefId, MacroRulesScopeRef<'ra>>,
macro_rules_scopes: FxHashMap<LocalDefId, MacroRulesScopeRef<'ra>> = default::fx_hash_map(),
/// Helper attributes that are in scope for the given expansion.
helper_attrs: FxHashMap<LocalExpnId, Vec<(Macros20NormalizedIdent, Decl<'ra>)>>,
helper_attrs: FxHashMap<LocalExpnId, Vec<(Macros20NormalizedIdent, Decl<'ra>)>> = default::fx_hash_map(),
/// Ready or in-progress results of resolving paths inside the `#[derive(...)]` attribute
/// with the given `ExpnId`.
derive_data: FxHashMap<LocalExpnId, DeriveData>,
derive_data: FxHashMap<LocalExpnId, DeriveData> = default::fx_hash_map(),
/// Avoid duplicated errors for "name already defined".
name_already_seen: FxHashMap<Symbol, Span>,
name_already_seen: FxHashMap<Symbol, Span> = default::fx_hash_map(),
potentially_unused_imports: Vec<Import<'ra>> = Vec::new(),
@ -1264,7 +1266,7 @@ pub struct Resolver<'ra, 'tcx> {
/// Table for mapping struct IDs into struct constructor IDs,
/// it's not used during normal resolution, only for better error reporting.
/// Also includes of list of each fields visibility
struct_constructors: LocalDefIdMap<(Res, Visibility<DefId>, Vec<Visibility<DefId>>)>,
struct_constructors: LocalDefIdMap<(Res, Visibility<DefId>, Vec<Visibility<DefId>>)> = Default::default(),
lint_buffer: LintBuffer,
@ -1272,19 +1274,19 @@ pub struct Resolver<'ra, 'tcx> {
node_id_to_def_id: NodeMap<Feed<'tcx, LocalDefId>>,
disambiguator: DisambiguatorState,
disambiguator: DisambiguatorState = DisambiguatorState::new(),
/// Indices of unnamed struct or variant fields with unresolved attributes.
placeholder_field_indices: FxHashMap<NodeId, usize>,
placeholder_field_indices: FxHashMap<NodeId, usize> = default::fx_hash_map(),
/// When collecting definitions from an AST fragment produced by a macro invocation `ExpnId`
/// we know what parent node that fragment should be attached to thanks to this table,
/// and how the `impl Trait` fragments were introduced.
invocation_parents: FxHashMap<LocalExpnId, InvocationParent>,
/// Amount of lifetime parameters for each item in the crate.
item_generics_num_lifetimes: FxHashMap<LocalDefId, usize>,
delegation_fn_sigs: LocalDefIdMap<DelegationFnSig>,
delegation_infos: LocalDefIdMap<DelegationInfo>,
item_generics_num_lifetimes: FxHashMap<LocalDefId, usize> = default::fx_hash_map(),
delegation_fn_sigs: LocalDefIdMap<DelegationFnSig> = Default::default(),
delegation_infos: LocalDefIdMap<DelegationInfo> = Default::default(),
main_def: Option<MainDefinition> = None,
trait_impls: FxIndexMap<DefId, Vec<LocalDefId>>,
@ -1293,7 +1295,7 @@ pub struct Resolver<'ra, 'tcx> {
proc_macros: Vec<LocalDefId> = Vec::new(),
confused_type_with_std_module: FxIndexMap<Span, Span>,
/// Whether lifetime elision was successful.
lifetime_elision_allowed: FxHashSet<NodeId>,
lifetime_elision_allowed: FxHashSet<NodeId> = default::fx_hash_set(),
/// Names of items that were stripped out via cfg with their corresponding cfg meta item.
stripped_cfg_items: Vec<StrippedCfgItem<NodeId>> = Vec::new(),
@ -1301,22 +1303,22 @@ pub struct Resolver<'ra, 'tcx> {
effective_visibilities: EffectiveVisibilities,
doc_link_resolutions: FxIndexMap<LocalDefId, DocLinkResMap>,
doc_link_traits_in_scope: FxIndexMap<LocalDefId, Vec<DefId>>,
all_macro_rules: UnordSet<Symbol>,
all_macro_rules: UnordSet<Symbol> = Default::default(),
/// Invocation ids of all glob delegations.
glob_delegation_invoc_ids: FxHashSet<LocalExpnId>,
glob_delegation_invoc_ids: FxHashSet<LocalExpnId> = default::fx_hash_set(),
/// Analogue of module `unexpanded_invocations` but in trait impls, excluding glob delegations.
/// Needed because glob delegations wait for all other neighboring macros to expand.
impl_unexpanded_invocations: FxHashMap<LocalDefId, FxHashSet<LocalExpnId>>,
impl_unexpanded_invocations: FxHashMap<LocalDefId, FxHashSet<LocalExpnId>> = default::fx_hash_map(),
/// Simplified analogue of module `resolutions` but in trait impls, excluding glob delegations.
/// Needed because glob delegations exclude explicitly defined names.
impl_binding_keys: FxHashMap<LocalDefId, FxHashSet<BindingKey>>,
impl_binding_keys: FxHashMap<LocalDefId, FxHashSet<BindingKey>> = default::fx_hash_map(),
/// This is the `Span` where an `extern crate foo;` suggestion would be inserted, if `foo`
/// could be a crate that wasn't imported. For diagnostics use only.
current_crate_outer_attr_insert_span: Span,
mods_with_parse_errors: FxHashSet<DefId>,
mods_with_parse_errors: FxHashSet<DefId> = default::fx_hash_set(),
/// Whether `Resolver::register_macros_for_all_crates` has been called once already, as we
/// don't need to run it more than once.
@ -1325,7 +1327,7 @@ pub struct Resolver<'ra, 'tcx> {
// Stores pre-expansion and pre-placeholder-fragment-insertion names for `impl Trait` types
// that were encountered during resolution. These names are used to generate item names
// for APITs, so we don't want to leak details of resolution into these names.
impl_trait_names: FxHashMap<NodeId, Symbol>,
impl_trait_names: FxHashMap<NodeId, Symbol> = default::fx_hash_map(),
}
/// This provides memory for the rest of the crate. The `'ra` lifetime that is
@ -1587,41 +1589,19 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
let mut resolver = Resolver {
tcx,
expn_that_defined: Default::default(),
// The outermost module has def ID 0; this is not reflected in the
// AST.
graph_root,
assert_speculative: false, // Only set/cleared in Resolver::resolve_imports for now
prelude: None,
extern_prelude,
field_names: Default::default(),
field_defaults: Default::default(),
field_visibility_spans: FxHashMap::default(),
pat_span_map: Default::default(),
partial_res_map: Default::default(),
import_res_map: Default::default(),
import_use_map: Default::default(),
label_res_map: Default::default(),
lifetimes_res_map: Default::default(),
extra_lifetime_params_map: Default::default(),
extern_crate_map: Default::default(),
module_children: Default::default(),
ambig_module_children: Default::default(),
trait_map: NodeMap::default(),
empty_module,
local_modules,
local_module_map,
extern_module_map: Default::default(),
block_map: Default::default(),
ast_transform_scopes: FxHashMap::default(),
glob_map: Default::default(),
used_imports: FxHashSet::default(),
maybe_unused_trait_imports: Default::default(),
inaccessible_ctor_reexport: Default::default(),
arenas,
dummy_decl: arenas.new_pub_def_decl(Res::Err, DUMMY_SP, LocalExpnId::ROOT),
@ -1649,53 +1629,27 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
(*ident, decl)
})
.collect(),
macro_names: FxHashSet::default(),
builtin_macros: Default::default(),
registered_tools,
macro_use_prelude: Default::default(),
local_macro_map: Default::default(),
extern_macro_map: Default::default(),
dummy_ext_bang: Arc::new(SyntaxExtension::dummy_bang(edition)),
dummy_ext_derive: Arc::new(SyntaxExtension::dummy_derive(edition)),
non_macro_attr: arenas
.alloc_macro(MacroData::new(Arc::new(SyntaxExtension::non_macro_attr(edition)))),
invocation_parent_scopes: Default::default(),
output_macro_rules_scopes: Default::default(),
macro_rules_scopes: Default::default(),
helper_attrs: Default::default(),
derive_data: Default::default(),
local_macro_def_scopes: FxHashMap::default(),
name_already_seen: FxHashMap::default(),
struct_constructors: Default::default(),
unused_macros: Default::default(),
unused_macro_rules: Default::default(),
proc_macro_stubs: Default::default(),
single_segment_macro_resolutions: Default::default(),
multi_segment_macro_resolutions: Default::default(),
builtin_attrs: Default::default(),
containers_deriving_copy: Default::default(),
lint_buffer: LintBuffer::default(),
node_id_to_def_id,
disambiguator: DisambiguatorState::new(),
placeholder_field_indices: Default::default(),
invocation_parents,
item_generics_num_lifetimes: Default::default(),
trait_impls: Default::default(),
confused_type_with_std_module: Default::default(),
lifetime_elision_allowed: Default::default(),
stripped_cfg_items: Default::default(),
effective_visibilities: Default::default(),
doc_link_resolutions: Default::default(),
doc_link_traits_in_scope: Default::default(),
all_macro_rules: Default::default(),
delegation_fn_sigs: Default::default(),
glob_delegation_invoc_ids: Default::default(),
impl_unexpanded_invocations: Default::default(),
impl_binding_keys: Default::default(),
current_crate_outer_attr_insert_span,
mods_with_parse_errors: Default::default(),
impl_trait_names: Default::default(),
delegation_infos: Default::default(),
..
};

View file

@ -2278,6 +2278,8 @@ options! {
"set options for branch target identification and pointer authentication on AArch64"),
build_sdylib_interface: bool = (false, parse_bool, [UNTRACKED],
"whether the stable interface is being built"),
cache_proc_macros: bool = (false, parse_bool, [TRACKED],
"cache the results of derive proc macro invocations (potentially unsound!) (default: no"),
cf_protection: CFProtection = (CFProtection::None, parse_cfprotection, [TRACKED],
"instrument control-flow architecture protection"),
check_cfg_all_expected: bool = (false, parse_bool, [UNTRACKED],

View file

@ -1571,3 +1571,9 @@ impl<CTX: HashStableContext> HashStable<CTX> for ExpnId {
hash.hash_stable(ctx, hasher);
}
}
impl<CTX: HashStableContext> HashStable<CTX> for LocalExpnId {
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
self.to_expn_id().hash_stable(hcx, hasher);
}
}

View file

@ -190,6 +190,7 @@ symbols! {
BTreeMap,
BTreeSet,
BinaryHeap,
Bool,
Borrow,
BorrowMut,
Break,
@ -202,6 +203,7 @@ symbols! {
Capture,
Cell,
Center,
Char,
Child,
Cleanup,
Clone,
@ -238,6 +240,7 @@ symbols! {
Error,
File,
FileType,
Float,
FmtArgumentsNew,
FmtWrite,
Fn,
@ -263,6 +266,7 @@ symbols! {
IndexOutput,
Input,
Instant,
Int,
Into,
IntoFuture,
IntoIterator,
@ -285,7 +289,6 @@ symbols! {
IteratorItem,
IteratorMap,
Layout,
Leaf,
Left,
LinkedList,
LintDiagnostic,
@ -363,6 +366,7 @@ symbols! {
Some,
SpanCtxt,
Stdin,
Str,
String,
StructuralPartialEq,
SubdiagMessage,
@ -585,6 +589,7 @@ symbols! {
binaryheap_iter,
bind_by_move_pattern_guards,
bindings_after_at,
bit_width,
bitand,
bitand_assign,
bitor,
@ -2060,6 +2065,7 @@ symbols! {
shr_assign,
sig_dfl,
sig_ign,
signed,
simd,
simd_add,
simd_and,

View file

@ -45,9 +45,16 @@ pub enum TypeKind {
Tuple(Tuple),
/// Arrays.
Array(Array),
/// Primitives
/// FIXME(#146922): disambiguate further
Leaf,
/// Primitive boolean type.
Bool(Bool),
/// Primitive character type.
Char(Char),
/// Primitive signed and unsigned integer type.
Int(Int),
/// Primitive floating-point type.
Float(Float),
/// String slice type.
Str(Str),
/// FIXME(#146922): add all the common types
Other,
}
@ -82,3 +89,47 @@ pub struct Array {
/// The length of the array.
pub len: usize,
}
/// Compile-time type information about `bool`.
#[derive(Debug)]
#[non_exhaustive]
#[unstable(feature = "type_info", issue = "146922")]
pub struct Bool {
// No additional information to provide for now.
}
/// Compile-time type information about `char`.
#[derive(Debug)]
#[non_exhaustive]
#[unstable(feature = "type_info", issue = "146922")]
pub struct Char {
// No additional information to provide for now.
}
/// Compile-time type information about signed and unsigned integer types.
#[derive(Debug)]
#[non_exhaustive]
#[unstable(feature = "type_info", issue = "146922")]
pub struct Int {
/// The bit width of the signed integer type.
pub bit_width: usize,
/// Whether the integer type is signed.
pub signed: bool,
}
/// Compile-time type information about floating-point types.
#[derive(Debug)]
#[non_exhaustive]
#[unstable(feature = "type_info", issue = "146922")]
pub struct Float {
/// The bit width of the floating-point type.
pub bit_width: usize,
}
/// Compile-time type information about string slice types.
#[derive(Debug)]
#[non_exhaustive]
#[unstable(feature = "type_info", issue = "146922")]
pub struct Str {
// No additional information to provide for now.
}

View file

@ -38,7 +38,7 @@ fn test_tuples() {
assert_tuple_arity::<(u8, u8), 2>();
const {
match Type::of::<(u8, u8)>().kind {
match Type::of::<(i8, u8)>().kind {
TypeKind::Tuple(tup) => {
let [a, b] = tup.fields else { unreachable!() };
@ -46,7 +46,10 @@ fn test_tuples() {
assert!(b.offset == 1);
match (a.ty.info().kind, b.ty.info().kind) {
(TypeKind::Leaf, TypeKind::Leaf) => {}
(TypeKind::Int(a), TypeKind::Int(b)) => {
assert!(a.bit_width == 8 && a.signed);
assert!(b.bit_width == 8 && !b.signed);
}
_ => unreachable!(),
}
}
@ -54,3 +57,41 @@ fn test_tuples() {
}
}
}
#[test]
fn test_primitives() {
use TypeKind::*;
let Type { kind: Bool(_ty), size, .. } = (const { Type::of::<bool>() }) else { panic!() };
assert_eq!(size, Some(1));
let Type { kind: Char(_ty), size, .. } = (const { Type::of::<char>() }) else { panic!() };
assert_eq!(size, Some(4));
let Type { kind: Int(ty), size, .. } = (const { Type::of::<i32>() }) else { panic!() };
assert_eq!(size, Some(4));
assert_eq!(ty.bit_width, 32);
assert!(ty.signed);
let Type { kind: Int(ty), size, .. } = (const { Type::of::<isize>() }) else { panic!() };
assert_eq!(size, Some(size_of::<isize>()));
assert_eq!(ty.bit_width, size_of::<isize>() * 8);
assert!(ty.signed);
let Type { kind: Int(ty), size, .. } = (const { Type::of::<u32>() }) else { panic!() };
assert_eq!(size, Some(4));
assert_eq!(ty.bit_width, 32);
assert!(!ty.signed);
let Type { kind: Int(ty), size, .. } = (const { Type::of::<usize>() }) else { panic!() };
assert_eq!(size, Some(size_of::<usize>()));
assert_eq!(ty.bit_width, size_of::<usize>() * 8);
assert!(!ty.signed);
let Type { kind: Float(ty), size, .. } = (const { Type::of::<f32>() }) else { panic!() };
assert_eq!(size, Some(4));
assert_eq!(ty.bit_width, 32);
let Type { kind: Str(_ty), size, .. } = (const { Type::of::<str>() }) else { panic!() };
assert_eq!(size, None);
}

View file

@ -0,0 +1,15 @@
extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro_derive(Nothing)]
pub fn derive(_input: TokenStream) -> TokenStream {
return r#"
pub mod nothing_mod {
pub fn nothing() {
eprintln!("nothing");
}
}
"#
.parse()
.unwrap();
}

View file

@ -0,0 +1,18 @@
// This test tests that derive proc macro execution is cached.
//@ proc-macro:derive_nothing.rs
//@ revisions:rpass1 rpass2
//@ compile-flags: -Zquery-dep-graph -Zcache-proc-macros
//@ ignore-backends: gcc
#![feature(rustc_attrs)]
#[macro_use]
extern crate derive_nothing;
#[cfg(any(rpass1, rpass2))]
#[rustc_clean(cfg = "rpass2", loaded_from_disk = "derive_macro_expansion")]
#[derive(Nothing)]
pub struct Foo;
fn main() {}

View file

@ -19,7 +19,7 @@
debug _enum_without_variants => const [ZeroSized: Empty];
let _9: main::Str<"<22><><EFBFBD>">;
scope 4 {
debug _non_utf8_str => const Str::<"<22><><EFBFBD>">;
debug _non_utf8_str => const main::Str::<"<22><><EFBFBD>">;
}
}
}

View file

@ -21,7 +21,7 @@
let _9: main::Str<"<22><><EFBFBD>">;
scope 4 {
- debug _non_utf8_str => _9;
+ debug _non_utf8_str => const Str::<"<22><><EFBFBD>">;
+ debug _non_utf8_str => const main::Str::<"<22><><EFBFBD>">;
}
}
}

View file

@ -1,3 +1,5 @@
//~vv HELP consider importing this struct
type Real = double;
//~^ ERROR cannot find type `double` in this scope
//~| HELP perhaps you intended to use this type

View file

@ -1,5 +1,5 @@
error[E0425]: cannot find type `double` in this scope
--> $DIR/recommend-literal.rs:1:13
--> $DIR/recommend-literal.rs:3:13
|
LL | type Real = double;
| ^^^^^^
@ -8,7 +8,7 @@ LL | type Real = double;
| help: perhaps you intended to use this type: `f64`
error[E0425]: cannot find type `long` in this scope
--> $DIR/recommend-literal.rs:7:12
--> $DIR/recommend-literal.rs:9:12
|
LL | let y: long = 74802374902374923;
| ^^^^
@ -17,7 +17,7 @@ LL | let y: long = 74802374902374923;
| help: perhaps you intended to use this type: `i64`
error[E0425]: cannot find type `Boolean` in this scope
--> $DIR/recommend-literal.rs:10:13
--> $DIR/recommend-literal.rs:12:13
|
LL | let v1: Boolean = true;
| ^^^^^^^
@ -26,7 +26,7 @@ LL | let v1: Boolean = true;
| help: perhaps you intended to use this type: `bool`
error[E0425]: cannot find type `Bool` in this scope
--> $DIR/recommend-literal.rs:13:13
--> $DIR/recommend-literal.rs:15:13
|
LL | let v2: Bool = true;
| ^^^^
@ -41,9 +41,13 @@ help: perhaps you intended to use this type
LL - let v2: Bool = true;
LL + let v2: bool = true;
|
help: consider importing this struct
|
LL + use std::mem::type_info::Bool;
|
error[E0425]: cannot find type `boolean` in this scope
--> $DIR/recommend-literal.rs:19:9
--> $DIR/recommend-literal.rs:21:9
|
LL | fn z(a: boolean) {
| ^^^^^^^
@ -52,7 +56,7 @@ LL | fn z(a: boolean) {
| help: perhaps you intended to use this type: `bool`
error[E0425]: cannot find type `byte` in this scope
--> $DIR/recommend-literal.rs:24:11
--> $DIR/recommend-literal.rs:26:11
|
LL | fn a() -> byte {
| ^^^^
@ -61,7 +65,7 @@ LL | fn a() -> byte {
| help: perhaps you intended to use this type: `u8`
error[E0425]: cannot find type `float` in this scope
--> $DIR/recommend-literal.rs:31:12
--> $DIR/recommend-literal.rs:33:12
|
LL | width: float,
| ^^^^^
@ -70,7 +74,7 @@ LL | width: float,
| help: perhaps you intended to use this type: `f32`
error[E0425]: cannot find type `int` in this scope
--> $DIR/recommend-literal.rs:34:19
--> $DIR/recommend-literal.rs:36:19
|
LL | depth: Option<int>,
| ^^^ not found in this scope
@ -86,7 +90,7 @@ LL | struct Data<int> {
| +++++
error[E0425]: cannot find type `short` in this scope
--> $DIR/recommend-literal.rs:40:16
--> $DIR/recommend-literal.rs:42:16
|
LL | impl Stuff for short {}
| ^^^^^

View file

@ -0,0 +1,184 @@
Type {
kind: Tuple(
Tuple {
fields: [
Field {
ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
offset: 0,
},
Field {
ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
offset: 1,
},
Field {
ty: TypeId(0x41223169ff28813ba79b7268a2a968d9),
offset: 2,
},
],
},
),
size: Some(
2,
),
}
Type {
kind: Array(
Array {
element_ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
len: 2,
},
),
size: Some(
2,
),
}
Type {
kind: Int(
Int {
bit_width: 8,
signed: true,
},
),
size: Some(
1,
),
}
Type {
kind: Int(
Int {
bit_width: 32,
signed: true,
},
),
size: Some(
4,
),
}
Type {
kind: Int(
Int {
bit_width: 64,
signed: true,
},
),
size: Some(
8,
),
}
Type {
kind: Int(
Int {
bit_width: 128,
signed: true,
},
),
size: Some(
16,
),
}
Type {
kind: Int(
Int {
bit_width: 32,
signed: true,
},
),
size: Some(
4,
),
}
Type {
kind: Int(
Int {
bit_width: 8,
signed: false,
},
),
size: Some(
1,
),
}
Type {
kind: Int(
Int {
bit_width: 32,
signed: false,
},
),
size: Some(
4,
),
}
Type {
kind: Int(
Int {
bit_width: 64,
signed: false,
},
),
size: Some(
8,
),
}
Type {
kind: Int(
Int {
bit_width: 128,
signed: false,
},
),
size: Some(
16,
),
}
Type {
kind: Int(
Int {
bit_width: 32,
signed: false,
},
),
size: Some(
4,
),
}
Type {
kind: Other,
size: Some(
4,
),
}
Type {
kind: Other,
size: Some(
12,
),
}
Type {
kind: Other,
size: Some(
8,
),
}
Type {
kind: Other,
size: Some(
8,
),
}
Type {
kind: Other,
size: Some(
8,
),
}
Type {
kind: Str(
Str,
),
size: None,
}
Type {
kind: Other,
size: None,
}

View file

@ -0,0 +1,184 @@
Type {
kind: Tuple(
Tuple {
fields: [
Field {
ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
offset: 0,
},
Field {
ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
offset: 1,
},
Field {
ty: TypeId(0x41223169ff28813ba79b7268a2a968d9),
offset: 2,
},
],
},
),
size: Some(
2,
),
}
Type {
kind: Array(
Array {
element_ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
len: 2,
},
),
size: Some(
2,
),
}
Type {
kind: Int(
Int {
bit_width: 8,
signed: true,
},
),
size: Some(
1,
),
}
Type {
kind: Int(
Int {
bit_width: 32,
signed: true,
},
),
size: Some(
4,
),
}
Type {
kind: Int(
Int {
bit_width: 64,
signed: true,
},
),
size: Some(
8,
),
}
Type {
kind: Int(
Int {
bit_width: 128,
signed: true,
},
),
size: Some(
16,
),
}
Type {
kind: Int(
Int {
bit_width: 64,
signed: true,
},
),
size: Some(
8,
),
}
Type {
kind: Int(
Int {
bit_width: 8,
signed: false,
},
),
size: Some(
1,
),
}
Type {
kind: Int(
Int {
bit_width: 32,
signed: false,
},
),
size: Some(
4,
),
}
Type {
kind: Int(
Int {
bit_width: 64,
signed: false,
},
),
size: Some(
8,
),
}
Type {
kind: Int(
Int {
bit_width: 128,
signed: false,
},
),
size: Some(
16,
),
}
Type {
kind: Int(
Int {
bit_width: 64,
signed: false,
},
),
size: Some(
8,
),
}
Type {
kind: Other,
size: Some(
4,
),
}
Type {
kind: Other,
size: Some(
24,
),
}
Type {
kind: Other,
size: Some(
16,
),
}
Type {
kind: Other,
size: Some(
16,
),
}
Type {
kind: Other,
size: Some(
16,
),
}
Type {
kind: Str(
Str,
),
size: None,
}
Type {
kind: Other,
size: None,
}

View file

@ -1,6 +1,11 @@
#![feature(type_info)]
// Some types whose length depends on the target pointer length will be dumped.
//@ revisions: bit32 bit64
//@[bit32] only-32bit
//@[bit64] only-64bit
//@ run-pass
//@ check-run-results
#![feature(type_info)]
#![allow(dead_code)]
use std::mem::type_info::Type;
@ -20,14 +25,20 @@ struct Unsized {
s: str,
}
fn main() {
println!("{:#?}", const { Type::of::<(u8, u8, ())>() }.kind);
println!("{:#?}", const { Type::of::<[u8; 2]>() }.kind);
println!("{:#?}", const { Type::of::<Foo>() }.kind);
println!("{:#?}", const { Type::of::<Bar>() }.kind);
println!("{:#?}", const { Type::of::<&Unsized>() }.kind);
println!("{:#?}", const { Type::of::<&str>() }.kind);
println!("{:#?}", const { Type::of::<&[u8]>() }.kind);
println!("{:#?}", const { Type::of::<str>() }.kind);
println!("{:#?}", const { Type::of::<[u8]>() }.kind);
macro_rules! dump_types {
($($ty:ty),+ $(,)?) => {
$(println!("{:#?}", const { Type::of::<$ty>() });)+
};
}
fn main() {
dump_types! {
(u8, u8, ()),
[u8; 2],
i8, i32, i64, i128, isize,
u8, u32, u64, u128, usize,
Foo, Bar,
&Unsized, &str, &[u8],
str, [u8],
}
}

View file

@ -1,31 +0,0 @@
Tuple(
Tuple {
fields: [
Field {
ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
offset: 0,
},
Field {
ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
offset: 1,
},
Field {
ty: TypeId(0x41223169ff28813ba79b7268a2a968d9),
offset: 2,
},
],
},
)
Array(
Array {
element_ty: TypeId(0x0596b48cc04376e64d5c788c2aa46bdb),
len: 2,
},
)
Other
Other
Other
Other
Other
Other
Other

View file

@ -0,0 +1,24 @@
//~ ERROR: cycle detected when getting HIR ID of `Default`
trait Default {
type Id;
fn intu(&self) -> &Self::Id;
}
impl<T: Default<Id = U>, U: Copy> Default for U {
default type Id = T;
fn intu(&self) -> &Self::Id {
self
}
}
fn specialization<T>(t: T) -> U {
*t.intu()
}
use std::num::NonZero;
fn main() {
let assert_eq = NonZero::<u8, Option<NonZero<u8>>>(0);
assert_eq!(specialization, None);
}

View file

@ -0,0 +1,12 @@
error[E0391]: cycle detected when getting HIR ID of `Default`
|
= note: ...which requires getting the crate HIR...
= note: ...which requires perform lints prior to AST lowering...
= note: ...which requires looking up span for `Default`...
= note: ...which again requires getting HIR ID of `Default`, completing the cycle
= note: cycle used when getting the resolver for lowering
= note: see https://rustc-dev-guide.rust-lang.org/overview.html#queries and https://rustc-dev-guide.rust-lang.org/query.html for more information
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0391`.

View file

@ -21,14 +21,14 @@ LL | .collect::<String>();
|
= help: the trait `FromIterator<()>` is not implemented for `String`
= help: the following other types implement trait `FromIterator<A>`:
`String` implements `FromIterator<&Char>`
`String` implements `FromIterator<&char>`
`String` implements `FromIterator<&std::ascii::Char>`
`String` implements `FromIterator<&str>`
`String` implements `FromIterator<Box<str, A>>`
`String` implements `FromIterator<Char>`
`String` implements `FromIterator<Cow<'_, str>>`
`String` implements `FromIterator<String>`
`String` implements `FromIterator<char>`
`String` implements `FromIterator<std::ascii::Char>`
note: the method call chain might not have had the expected associated types
--> $DIR/semi-suggestion-when-stmt-and-expr-span-equal.rs:20:10
|

View file

@ -44,10 +44,10 @@ LL | let ips: Vec<_> = (0..100_000).map(|_| u32::from(0u32.into())).collect(
| type must be known at this point
|
= note: multiple `impl`s satisfying `u32: From<_>` found in the `core` crate:
- impl From<Char> for u32;
- impl From<Ipv4Addr> for u32;
- impl From<bool> for u32;
- impl From<char> for u32;
- impl From<std::ascii::Char> for u32;
- impl From<u16> for u32;
- impl From<u8> for u32;
help: try using a fully qualified path to specify the expected types

View file

@ -18,7 +18,7 @@ help: the following other types implement trait `From<T>`
= note: in this macro invocation
--> $SRC_DIR/core/src/ascii/ascii_char.rs:LL:COL
|
= note: `u8` implements `From<Char>`
= note: `u8` implements `From<std::ascii::Char>`
::: $SRC_DIR/core/src/ascii/ascii_char.rs:LL:COL
|
= note: in this macro invocation

View file

@ -1458,7 +1458,6 @@ compiler = [
"@jieyouxu",
"@jdonszelmann",
"@JonathanBrouwer",
"@lcnr",
"@madsmtm",
"@mati865",
"@Nadrieril",