De-genericize the dep graph.

By removing the generic `D` parameter from `DepGraph`, `DepGraphData`,
`CurrentDepGraph`, `SerializedDepGraph`, `SerializedNodeHeader`, and
`EncoderState`.
This commit is contained in:
Nicholas Nethercote 2026-02-12 09:37:42 +11:00
parent 32e6a1a0ab
commit 1d83208683
6 changed files with 88 additions and 100 deletions

View file

@ -25,13 +25,14 @@ use {super::debug::EdgeFilter, std::env};
use super::query::DepGraphQuery;
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
use super::{DepContext, DepKind, DepNode, Deps, DepsType, HasDepContext, WorkProductId};
use crate::dep_graph::edges::EdgesVec;
use crate::ty::TyCtxt;
use crate::verify_ich::incremental_verify_ich;
pub struct DepGraph<D: Deps> {
data: Option<Arc<DepGraphData<D>>>,
#[derive(Clone)]
pub struct DepGraph {
data: Option<Arc<DepGraphData>>,
/// This field is used for assigning DepNodeIndices when running in
/// non-incremental mode. Even in non-incremental mode we make sure that
@ -40,17 +41,6 @@ pub struct DepGraph<D: Deps> {
virtual_dep_node_index: Arc<AtomicU32>,
}
/// Manual clone impl that does not require `D: Clone`.
impl<D: Deps> Clone for DepGraph<D> {
fn clone(&self) -> Self {
let Self { data, virtual_dep_node_index } = self;
Self {
data: Option::<Arc<_>>::clone(data),
virtual_dep_node_index: Arc::clone(virtual_dep_node_index),
}
}
}
rustc_index::newtype_index! {
pub struct DepNodeIndex {}
}
@ -84,12 +74,12 @@ pub(super) enum DepNodeColor {
Unknown,
}
pub struct DepGraphData<D: Deps> {
pub struct DepGraphData {
/// The new encoding of the dependency graph, optimized for red/green
/// tracking. The `current` field is the dependency graph of only the
/// current compilation session: We don't merge the previous dep-graph into
/// current one anymore, but we do reference shared data to save space.
current: CurrentDepGraph<D>,
current: CurrentDepGraph,
/// The dep-graph from the previous compilation session. It contains all
/// nodes and edges as well as all fingerprints of nodes that have them.
@ -120,13 +110,13 @@ where
stable_hasher.finish()
}
impl<D: Deps> DepGraph<D> {
impl DepGraph {
pub fn new(
session: &Session,
prev_graph: Arc<SerializedDepGraph>,
prev_work_products: WorkProductMap,
encoder: FileEncoder,
) -> DepGraph<D> {
) -> DepGraph {
let prev_graph_node_count = prev_graph.node_count();
let current =
@ -136,7 +126,7 @@ impl<D: Deps> DepGraph<D> {
// Instantiate a node with zero dependencies only once for anonymous queries.
let _green_node_index = current.alloc_new_node(
DepNode { kind: D::DEP_KIND_ANON_ZERO_DEPS, hash: current.anon_id_seed.into() },
DepNode { kind: DepsType::DEP_KIND_ANON_ZERO_DEPS, hash: current.anon_id_seed.into() },
EdgesVec::new(),
Fingerprint::ZERO,
);
@ -144,7 +134,7 @@ impl<D: Deps> DepGraph<D> {
// Instantiate a dependy-less red node only once for anonymous queries.
let red_node_index = current.alloc_new_node(
DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
DepNode { kind: DepsType::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
EdgesVec::new(),
Fingerprint::ZERO,
);
@ -168,12 +158,12 @@ impl<D: Deps> DepGraph<D> {
}
}
pub fn new_disabled() -> DepGraph<D> {
pub fn new_disabled() -> DepGraph {
DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
}
#[inline]
pub fn data(&self) -> Option<&DepGraphData<D>> {
pub fn data(&self) -> Option<&DepGraphData> {
self.data.as_deref()
}
@ -191,7 +181,7 @@ impl<D: Deps> DepGraph<D> {
pub fn assert_ignored(&self) {
if let Some(..) = self.data {
D::read_deps(|task_deps| {
DepsType::read_deps(|task_deps| {
assert_matches!(
task_deps,
TaskDepsRef::Ignore,
@ -205,7 +195,7 @@ impl<D: Deps> DepGraph<D> {
where
OP: FnOnce() -> R,
{
D::with_deps(TaskDepsRef::Ignore, op)
DepsType::with_deps(TaskDepsRef::Ignore, op)
}
/// Used to wrap the deserialization of a query result from disk,
@ -258,11 +248,11 @@ impl<D: Deps> DepGraph<D> {
where
OP: FnOnce() -> R,
{
D::with_deps(TaskDepsRef::Forbid, op)
DepsType::with_deps(TaskDepsRef::Forbid, op)
}
#[inline(always)]
pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
pub fn with_task<Ctxt: HasDepContext<Deps = DepsType>, A: Debug, R>(
&self,
key: DepNode,
cx: Ctxt,
@ -276,7 +266,7 @@ impl<D: Deps> DepGraph<D> {
}
}
pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
pub fn with_anon_task<Tcx: DepContext<Deps = DepsType>, OP, R>(
&self,
cx: Tcx,
dep_kind: DepKind,
@ -296,7 +286,7 @@ impl<D: Deps> DepGraph<D> {
}
}
impl<D: Deps> DepGraphData<D> {
impl DepGraphData {
/// Starts a new dep-graph task. Dep-graph tasks are specified
/// using a free function (`task`) and **not** a closure -- this
/// is intentional because we want to exercise tight control over
@ -325,7 +315,7 @@ impl<D: Deps> DepGraphData<D> {
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
#[inline(always)]
pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
pub fn with_task<Ctxt: HasDepContext<Deps = DepsType>, A: Debug, R>(
&self,
key: DepNode,
cx: Ctxt,
@ -350,7 +340,7 @@ impl<D: Deps> DepGraphData<D> {
},
);
let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
let with_deps = |task_deps| DepsType::with_deps(task_deps, || task(cx, arg));
let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
(with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
} else {
@ -379,7 +369,7 @@ impl<D: Deps> DepGraphData<D> {
/// FIXME: This could perhaps return a `WithDepNode` to ensure that the
/// user of this function actually performs the read; we'll have to see
/// how to make that work with `anon` in `execute_job_incr`, though.
pub fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
pub fn with_anon_task_inner<Tcx: DepContext<Deps = DepsType>, OP, R>(
&self,
cx: Tcx,
dep_kind: DepKind,
@ -397,7 +387,7 @@ impl<D: Deps> DepGraphData<D> {
None,
128,
));
let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
let result = DepsType::with_deps(TaskDepsRef::Allow(&task_deps), op);
let task_deps = task_deps.into_inner();
let reads = task_deps.reads;
@ -448,7 +438,7 @@ impl<D: Deps> DepGraphData<D> {
}
/// Intern the new `DepNode` with the dependencies up-to-now.
fn hash_result_and_alloc_node<Ctxt: DepContext<Deps = D>, R>(
fn hash_result_and_alloc_node<Ctxt: DepContext<Deps = DepsType>, R>(
&self,
cx: &Ctxt,
node: DepNode,
@ -466,11 +456,11 @@ impl<D: Deps> DepGraphData<D> {
}
}
impl<D: Deps> DepGraph<D> {
impl DepGraph {
#[inline]
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
if let Some(ref data) = self.data {
D::read_deps(|task_deps| {
DepsType::read_deps(|task_deps| {
let mut task_deps = match task_deps {
TaskDepsRef::Allow(deps) => deps.lock(),
TaskDepsRef::EvalAlways => {
@ -527,7 +517,7 @@ impl<D: Deps> DepGraph<D> {
#[inline]
pub fn record_diagnostic<'tcx>(&self, tcx: TyCtxt<'tcx>, diagnostic: &DiagInner) {
if let Some(ref data) = self.data {
D::read_deps(|task_deps| match task_deps {
DepsType::read_deps(|task_deps| match task_deps {
TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
self.read_index(data.encode_diagnostic(tcx, diagnostic));
@ -563,7 +553,7 @@ impl<D: Deps> DepGraph<D> {
/// FIXME: If the code is changed enough for this node to be marked before requiring the
/// caller's node, we suppose that those changes will be enough to mark this node red and
/// force a recomputation using the "normal" way.
pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R>(
pub fn with_feed_task<Ctxt: DepContext<Deps = DepsType>, R>(
&self,
node: DepNode,
cx: Ctxt,
@ -604,7 +594,7 @@ impl<D: Deps> DepGraph<D> {
}
let mut edges = EdgesVec::new();
D::read_deps(|task_deps| match task_deps {
DepsType::read_deps(|task_deps| match task_deps {
TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
TaskDepsRef::EvalAlways => {
edges.push(DepNodeIndex::FOREVER_RED_NODE);
@ -626,7 +616,7 @@ impl<D: Deps> DepGraph<D> {
}
}
impl<D: Deps> DepGraphData<D> {
impl DepGraphData {
fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
&self,
sess: &Session,
@ -688,7 +678,7 @@ impl<D: Deps> DepGraphData<D> {
// Use `send_new` so we get an unique index, even though the dep node is not.
let dep_node_index = self.current.encoder.send_new(
DepNode {
kind: D::DEP_KIND_SIDE_EFFECT,
kind: DepsType::DEP_KIND_SIDE_EFFECT,
hash: PackedFingerprint::from(Fingerprint::ZERO),
},
Fingerprint::ZERO,
@ -705,7 +695,7 @@ impl<D: Deps> DepGraphData<D> {
/// refer to a node created used `encode_diagnostic` in the previous session.
#[inline]
fn force_diagnostic_node<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) {
D::with_deps(TaskDepsRef::Ignore, || {
DepsType::with_deps(TaskDepsRef::Ignore, || {
let side_effect = tcx.load_side_effect(prev_index).unwrap();
match &side_effect {
@ -721,7 +711,7 @@ impl<D: Deps> DepGraphData<D> {
prev_index,
&self.colors,
DepNode {
kind: D::DEP_KIND_SIDE_EFFECT,
kind: DepsType::DEP_KIND_SIDE_EFFECT,
hash: PackedFingerprint::from(Fingerprint::ZERO),
},
Fingerprint::ZERO,
@ -799,7 +789,7 @@ impl<D: Deps> DepGraphData<D> {
}
}
impl<D: Deps> DepGraph<D> {
impl DepGraph {
/// Checks whether a previous work product exists for `v` and, if
/// so, return the path that leads to it. Used to skip doing work.
pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
@ -864,7 +854,7 @@ impl<D: Deps> DepGraph<D> {
}
}
impl<D: Deps> DepGraphData<D> {
impl DepGraphData {
/// Try to mark a node index for the node dep_node.
///
/// A node will have an index, when it's already been marked green, or when we can mark it
@ -1029,7 +1019,7 @@ impl<D: Deps> DepGraphData<D> {
}
}
impl<D: Deps> DepGraph<D> {
impl DepGraph {
/// Returns true if the given node has been marked as red during the
/// current compilation session. Used in various assertions
pub fn is_red(&self, dep_node: &DepNode) -> bool {
@ -1163,8 +1153,8 @@ rustc_index::newtype_index! {
/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
/// first, and `data` second.
pub(super) struct CurrentDepGraph<D: Deps> {
encoder: GraphEncoder<D>,
pub(super) struct CurrentDepGraph {
encoder: GraphEncoder,
anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
/// This is used to verify that fingerprints do not change between the creation of a node
@ -1202,7 +1192,7 @@ pub(super) struct CurrentDepGraph<D: Deps> {
pub(super) total_duplicate_read_count: AtomicU64,
}
impl<D: Deps> CurrentDepGraph<D> {
impl CurrentDepGraph {
fn new(
session: &Session,
prev_graph_node_count: usize,
@ -1436,7 +1426,7 @@ impl DepNodeColorMap {
#[inline(never)]
#[cold]
pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: &MarkFrame<'_>) {
pub(crate) fn print_markframe_trace(graph: &DepGraph, frame: &MarkFrame<'_>) {
let data = graph.data.as_ref().unwrap();
eprintln!("there was a panic while trying to force a dep node");
@ -1456,7 +1446,7 @@ pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: &MarkFr
#[cold]
#[inline(never)]
fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
fn panic_on_forbidden_read(data: &DepGraphData, dep_node_index: DepNodeIndex) -> ! {
// We have to do an expensive reverse-lookup of the DepNode that
// corresponds to `dep_node_index`, but that's OK since we are about
// to ICE anyway.

View file

@ -10,7 +10,7 @@ pub use self::dep_node::{
DepKind, DepNode, DepNodeKey, WorkProductId, dep_kind_from_label, dep_kinds, label_strs,
};
pub use self::graph::{
DepGraphData, DepNodeIndex, TaskDepsRef, WorkProduct, WorkProductMap, hash_result,
DepGraph, DepGraphData, DepNodeIndex, TaskDepsRef, WorkProduct, WorkProductMap, hash_result,
};
use self::graph::{MarkFrame, print_markframe_trace};
pub use self::query::DepGraphQuery;
@ -34,7 +34,7 @@ pub trait DepContext: Copy {
fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R;
/// Access the DepGraph.
fn dep_graph(&self) -> &graph::DepGraph<Self::Deps>;
fn dep_graph(&self) -> &DepGraph;
/// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef;
@ -179,8 +179,6 @@ impl FingerprintStyle {
}
}
pub type DepGraph = graph::DepGraph<DepsType>;
pub type DepKindVTable<'tcx> = dep_node::DepKindVTable<TyCtxt<'tcx>>;
pub struct DepsType;

View file

@ -41,7 +41,6 @@
use std::cell::RefCell;
use std::cmp::max;
use std::marker::PhantomData;
use std::sync::Arc;
use std::sync::atomic::Ordering;
use std::{iter, mem, u64};
@ -62,6 +61,7 @@ use tracing::{debug, instrument};
use super::graph::{CurrentDepGraph, DepNodeColorMap};
use super::query::DepGraphQuery;
use super::{DepKind, DepNode, DepNodeIndex, Deps};
use crate::dep_graph::DepsType;
use crate::dep_graph::edges::EdgesVec;
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
@ -192,7 +192,7 @@ fn mask(bits: usize) -> usize {
impl SerializedDepGraph {
#[instrument(level = "debug", skip(d))]
pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> Arc<SerializedDepGraph> {
pub fn decode(d: &mut MemDecoder<'_>) -> Arc<SerializedDepGraph> {
// The last 16 bytes are the node count and edge count.
debug!("position: {:?}", d.position());
@ -213,7 +213,10 @@ impl SerializedDepGraph {
let graph_bytes = d.len() - (3 * IntEncodedWithFixedSize::ENCODED_SIZE) - d.position();
let mut nodes = IndexVec::from_elem_n(
DepNode { kind: D::DEP_KIND_NULL, hash: PackedFingerprint::from(Fingerprint::ZERO) },
DepNode {
kind: DepsType::DEP_KIND_NULL,
hash: PackedFingerprint::from(Fingerprint::ZERO),
},
node_max,
);
let mut fingerprints = IndexVec::from_elem_n(Fingerprint::ZERO, node_max);
@ -230,19 +233,21 @@ impl SerializedDepGraph {
// least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
// length is about the same fractional overhead and it amortizes for yet greater lengths.
let mut edge_list_data =
Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader<D>>());
Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader>());
for _ in 0..node_count {
// Decode the header for this edge; the header packs together as many of the fixed-size
// fields as possible to limit the number of times we update decoder state.
let node_header =
SerializedNodeHeader::<D> { bytes: d.read_array(), _marker: PhantomData };
let node_header = SerializedNodeHeader { bytes: d.read_array() };
let index = node_header.index();
let node = &mut nodes[index];
// Make sure there's no duplicate indices in the dep graph.
assert!(node_header.node().kind != D::DEP_KIND_NULL && node.kind == D::DEP_KIND_NULL);
assert!(
node_header.node().kind != DepsType::DEP_KIND_NULL
&& node.kind == DepsType::DEP_KIND_NULL
);
*node = node_header.node();
fingerprints[index] = node_header.fingerprint();
@ -270,7 +275,7 @@ impl SerializedDepGraph {
edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
// Read the number of each dep kind and use it to create an hash map with a suitable size.
let mut index: Vec<_> = (0..(D::DEP_KIND_MAX + 1))
let mut index: Vec<_> = (0..(DepsType::DEP_KIND_MAX + 1))
.map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
.collect();
@ -279,8 +284,10 @@ impl SerializedDepGraph {
for (idx, node) in nodes.iter_enumerated() {
if index[node.kind.as_usize()].insert(node.hash, idx).is_some() {
// Empty nodes and side effect nodes can have duplicates
if node.kind != D::DEP_KIND_NULL && node.kind != D::DEP_KIND_SIDE_EFFECT {
let name = D::name(node.kind);
if node.kind != DepsType::DEP_KIND_NULL
&& node.kind != DepsType::DEP_KIND_SIDE_EFFECT
{
let name = DepsType::name(node.kind);
panic!(
"Error: A dep graph node ({name}) does not have an unique index. \
Running a clean build on a nightly compiler with `-Z incremental-verify-ich` \
@ -310,13 +317,12 @@ impl SerializedDepGraph {
/// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
/// * The byte width of the encoded edges for this node
/// * In whatever bits remain, the length of the edge list for this node, if it fits
struct SerializedNodeHeader<D> {
struct SerializedNodeHeader {
// 2 bytes for the DepNode
// 4 bytes for the index
// 16 for Fingerprint in DepNode
// 16 for Fingerprint in NodeInfo
bytes: [u8; 38],
_marker: PhantomData<D>,
}
// The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
@ -337,11 +343,11 @@ struct Unpacked {
// 0..M length of the edge
// M..M+N bytes per index
// M+N..16 kind
impl<D: Deps> SerializedNodeHeader<D> {
impl SerializedNodeHeader {
const TOTAL_BITS: usize = size_of::<DepKind>() * 8;
const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
const KIND_BITS: usize = Self::TOTAL_BITS - DepsType::DEP_KIND_MAX.leading_zeros() as usize;
const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
#[inline]
@ -377,14 +383,14 @@ impl<D: Deps> SerializedNodeHeader<D> {
#[cfg(debug_assertions)]
{
let res = Self { bytes, _marker: PhantomData };
let res = Self { bytes };
assert_eq!(fingerprint, res.fingerprint());
assert_eq!(*node, res.node());
if let Some(len) = res.len() {
assert_eq!(edge_count, len as usize);
}
}
Self { bytes, _marker: PhantomData }
Self { bytes }
}
#[inline]
@ -451,15 +457,10 @@ struct NodeInfo {
}
impl NodeInfo {
fn encode<D: Deps>(&self, e: &mut MemEncoder, index: DepNodeIndex) {
fn encode(&self, e: &mut MemEncoder, index: DepNodeIndex) {
let NodeInfo { ref node, fingerprint, ref edges } = *self;
let header = SerializedNodeHeader::<D>::new(
node,
index,
fingerprint,
edges.max_index(),
edges.len(),
);
let header =
SerializedNodeHeader::new(node, index, fingerprint, edges.max_index(), edges.len());
e.write_array(header.bytes);
if header.len().is_none() {
@ -480,7 +481,7 @@ impl NodeInfo {
/// the previous dep graph and expects all edges to already have a new dep node index assigned.
/// This avoids the overhead of constructing `EdgesVec`, which would be needed to call `encode`.
#[inline]
fn encode_promoted<D: Deps>(
fn encode_promoted(
e: &mut MemEncoder,
node: &DepNode,
index: DepNodeIndex,
@ -496,7 +497,7 @@ impl NodeInfo {
let edge_max =
edges.clone().map(|i| colors.current(i).unwrap().as_u32()).max().unwrap_or(0);
let header = SerializedNodeHeader::<D>::new(node, index, fingerprint, edge_max, edge_count);
let header = SerializedNodeHeader::new(node, index, fingerprint, edge_max, edge_count);
e.write_array(header.bytes);
if header.len().is_none() {
@ -543,16 +544,15 @@ struct LocalEncoderResult {
kind_stats: Vec<u32>,
}
struct EncoderState<D: Deps> {
struct EncoderState {
next_node_index: AtomicU64,
previous: Arc<SerializedDepGraph>,
file: Lock<Option<FileEncoder>>,
local: WorkerLocal<RefCell<LocalEncoderState>>,
stats: Option<Lock<FxHashMap<DepKind, Stat>>>,
marker: PhantomData<D>,
}
impl<D: Deps> EncoderState<D> {
impl EncoderState {
fn new(encoder: FileEncoder, record_stats: bool, previous: Arc<SerializedDepGraph>) -> Self {
Self {
previous,
@ -566,10 +566,9 @@ impl<D: Deps> EncoderState<D> {
edge_count: 0,
node_count: 0,
encoder: MemEncoder::new(),
kind_stats: iter::repeat_n(0, D::DEP_KIND_MAX as usize + 1).collect(),
kind_stats: iter::repeat_n(0, DepsType::DEP_KIND_MAX as usize + 1).collect(),
})
}),
marker: PhantomData,
}
}
@ -658,7 +657,7 @@ impl<D: Deps> EncoderState<D> {
record_graph: &Option<Lock<DepGraphQuery>>,
local: &mut LocalEncoderState,
) {
node.encode::<D>(&mut local.encoder, index);
node.encode(&mut local.encoder, index);
self.flush_mem_encoder(&mut *local);
self.record(
&node.node,
@ -687,7 +686,7 @@ impl<D: Deps> EncoderState<D> {
) {
let node = self.previous.index_to_node(prev_index);
let fingerprint = self.previous.fingerprint_by_index(prev_index);
let edge_count = NodeInfo::encode_promoted::<D>(
let edge_count = NodeInfo::encode_promoted(
&mut local.encoder,
node,
index,
@ -712,7 +711,7 @@ impl<D: Deps> EncoderState<D> {
);
}
fn finish(&self, profiler: &SelfProfilerRef, current: &CurrentDepGraph<D>) -> FileEncodeResult {
fn finish(&self, profiler: &SelfProfilerRef, current: &CurrentDepGraph) -> FileEncodeResult {
// Prevent more indices from being allocated.
self.next_node_index.store(u32::MAX as u64 + 1, Ordering::SeqCst);
@ -735,7 +734,8 @@ impl<D: Deps> EncoderState<D> {
let mut encoder = self.file.lock().take().unwrap();
let mut kind_stats: Vec<u32> = iter::repeat_n(0, D::DEP_KIND_MAX as usize + 1).collect();
let mut kind_stats: Vec<u32> =
iter::repeat_n(0, DepsType::DEP_KIND_MAX as usize + 1).collect();
let mut node_max = 0;
let mut node_count = 0;
@ -778,7 +778,7 @@ impl<D: Deps> EncoderState<D> {
fn print_incremental_info(
&self,
current: &CurrentDepGraph<D>,
current: &CurrentDepGraph,
total_node_count: usize,
total_edge_count: usize,
) {
@ -835,13 +835,13 @@ impl<D: Deps> EncoderState<D> {
}
}
pub(crate) struct GraphEncoder<D: Deps> {
pub(crate) struct GraphEncoder {
profiler: SelfProfilerRef,
status: EncoderState<D>,
status: EncoderState,
record_graph: Option<Lock<DepGraphQuery>>,
}
impl<D: Deps> GraphEncoder<D> {
impl GraphEncoder {
pub(crate) fn new(
sess: &Session,
encoder: FileEncoder,
@ -945,7 +945,7 @@ impl<D: Deps> GraphEncoder<D> {
}
}
pub(crate) fn finish(&self, current: &CurrentDepGraph<D>) -> FileEncodeResult {
pub(crate) fn finish(&self, current: &CurrentDepGraph) -> FileEncodeResult {
let _prof_timer = self.profiler.generic_activity("incr_comp_encode_dep_graph_finish");
self.status.finish(&self.profiler, current)

View file

@ -10,7 +10,7 @@ use crate::dep_graph::{DepContext, DepGraphData, SerializedDepNodeIndex};
#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
pub fn incremental_verify_ich<Tcx, V>(
tcx: Tcx,
dep_graph_data: &DepGraphData<Tcx::Deps>,
dep_graph_data: &DepGraphData,
result: &V,
prev_index: SerializedDepNodeIndex,
hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,