incr.comp.: Cache TypeckTables and add -Zincremental-queries flag.

This commit is contained in:
Michael Woerister 2017-11-14 19:52:49 +01:00
parent 0b1438307e
commit 2c1aeddf27
8 changed files with 95 additions and 18 deletions

View file

@ -327,6 +327,7 @@ impl DepGraph {
}
}
#[inline]
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint {
match self.fingerprints.borrow().get(dep_node) {
Some(&fingerprint) => fingerprint,
@ -340,6 +341,11 @@ impl DepGraph {
self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
}
#[inline]
pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
self.data.as_ref().unwrap().previous.node_to_index(dep_node)
}
/// Indicates that a previous work product exists for `v`. This is
/// invoked during initial start-up based on what nodes are clean
/// (and what files exist in the incr. directory).

View file

@ -44,6 +44,11 @@ impl PreviousDepGraph {
self.data.nodes[dep_node_index].0
}
#[inline]
pub fn node_to_index(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
self.index[dep_node]
}
#[inline]
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
self.index

View file

@ -184,7 +184,7 @@ impl DefIndexAddressSpace {
/// A DefId identifies a particular *definition*, by combining a crate
/// index and a def index.
#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, RustcDecodable, Hash, Copy)]
#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, Hash, Copy)]
pub struct DefId {
pub krate: CrateNum,
pub index: DefIndex,

View file

@ -1042,6 +1042,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"enable incremental compilation (experimental)"),
incremental_cc: bool = (false, parse_bool, [UNTRACKED],
"enable cross-crate incremental compilation (even more experimental)"),
incremental_queries: bool = (false, parse_bool, [UNTRACKED],
"enable incremental compilation support for queries (experimental)"),
incremental_info: bool = (false, parse_bool, [UNTRACKED],
"print high-level information about incremental reuse (or the lack thereof)"),
incremental_dump_hash: bool = (false, parse_bool, [UNTRACKED],

View file

@ -8,6 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use dep_graph::SerializedDepNodeIndex;
use hir::def_id::{CrateNum, DefId, DefIndex};
use ty::{self, Ty, TyCtxt};
use ty::maps::queries;
@ -25,6 +26,16 @@ pub trait QueryConfig {
pub(super) trait QueryDescription<'tcx>: QueryConfig {
fn describe(tcx: TyCtxt, key: Self::Key) -> String;
fn cache_on_disk(_: Self::Key) -> bool {
false
}
fn load_from_disk<'a>(_: TyCtxt<'a, 'tcx, 'tcx>,
_: SerializedDepNodeIndex)
-> Self::Value {
bug!("QueryDescription::load_from_disk() called for unsupport query.")
}
}
impl<'tcx, M: QueryConfig<Key=DefId>> QueryDescription<'tcx> for M {
@ -538,3 +549,19 @@ impl<'tcx> QueryDescription<'tcx> for queries::fully_normalize_monormophic_ty<'t
format!("normalizing types")
}
}
impl<'tcx> QueryDescription<'tcx> for queries::typeck_tables_of<'tcx> {
#[inline]
fn cache_on_disk(def_id: Self::Key) -> bool {
def_id.is_local()
}
fn load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: SerializedDepNodeIndex)
-> Self::Value {
let typeck_tables: ty::TypeckTables<'tcx> = tcx.on_disk_query_result_cache
.load_query_result(tcx, id);
tcx.alloc_tables(typeck_tables)
}
}

View file

@ -32,6 +32,7 @@ use syntax_pos::{BytePos, Span, NO_EXPANSION, DUMMY_SP};
use ty;
use ty::codec::{self as ty_codec, TyDecoder, TyEncoder};
use ty::context::TyCtxt;
use ty::maps::config::QueryDescription;
use ty::subst::Substs;
// Some magic values used for verifying that encoding and decoding. These are
@ -229,9 +230,22 @@ impl<'sess> OnDiskCache<'sess> {
// Encode query results
let query_result_index = EncodedQueryResultIndex::new();
// ... we don't encode anything yet, actually
let mut query_result_index = EncodedQueryResultIndex::new();
// Encode TypeckTables
for (def_id, entry) in tcx.maps.typeck_tables_of.borrow().map.iter() {
if ty::maps::queries::typeck_tables_of::cache_on_disk(*def_id) {
let dep_node = SerializedDepNodeIndex::new(entry.index.index());
// Record position of the cache entry
query_result_index.push((dep_node, encoder.position()));
// Encode the type check tables with the SerializedDepNodeIndex
// as tag.
let typeck_tables: &ty::TypeckTables<'gcx> = &entry.value;
encoder.encode_tagged(dep_node, typeck_tables)?;
}
}
// Encode query result index
let query_result_index_pos = encoder.position() as u64;
@ -522,9 +536,7 @@ impl<'a, 'tcx, 'x> SpecializedDecoder<Span> for CacheDecoder<'a, 'tcx, 'x> {
impl<'a, 'tcx, 'x> SpecializedDecoder<CrateNum> for CacheDecoder<'a, 'tcx, 'x> {
fn specialized_decode(&mut self) -> Result<CrateNum, Self::Error> {
let cnum = CrateNum::from_u32(u32::decode(self)?);
let mapped = self.map_encoded_cnum_to_current(cnum);
Ok(mapped)
ty_codec::decode_cnum(self)
}
}
@ -576,6 +588,8 @@ impl<'a, 'tcx, 'x> SpecializedDecoder<hir::HirId> for CacheDecoder<'a, 'tcx, 'x>
.as_ref()
.unwrap()[&def_path_hash];
debug_assert!(def_id.is_local());
// The ItemLocalId needs no remapping.
let local_id = hir::ItemLocalId::decode(self)?;
@ -721,6 +735,20 @@ impl<'enc, 'tcx, E> SpecializedEncoder<ty::GenericPredicates<'tcx>>
}
}
impl<'enc, 'tcx, E> SpecializedEncoder<hir::HirId> for CacheEncoder<'enc, 'tcx, E>
where E: 'enc + ty_codec::TyEncoder
{
fn specialized_encode(&mut self, id: &hir::HirId) -> Result<(), Self::Error> {
let hir::HirId {
owner,
local_id,
} = *id;
owner.encode(self)?;
local_id.encode(self)
}
}
// NodeIds are not stable across compilation sessions, so we store them in their
// HirId representation. This allows use to map them to the current NodeId.
impl<'enc, 'tcx, E> SpecializedEncoder<NodeId> for CacheEncoder<'enc, 'tcx, E>

View file

@ -379,18 +379,26 @@ macro_rules! define_maps {
{
debug_assert!(tcx.dep_graph.is_green(dep_node_index));
// We don't do any caching yet, so recompute.
// The diagnostics for this query have already been promoted to
// the current session during try_mark_green(), so we can ignore
// them here.
let (result, _) = tcx.cycle_check(span, Query::$name(key), || {
tcx.sess.diagnostic().track_diagnostics(|| {
// The dep-graph for this computation is already in place
tcx.dep_graph.with_ignore(|| {
Self::compute_result(tcx, key)
let result = if tcx.sess.opts.debugging_opts.incremental_queries &&
Self::cache_on_disk(key) {
let prev_dep_node_index =
tcx.dep_graph.prev_dep_node_index_of(dep_node);
Self::load_from_disk(tcx.global_tcx(), prev_dep_node_index)
} else {
let (result, _ ) = tcx.cycle_check(span, Query::$name(key), || {
// The diagnostics for this query have already been
// promoted to the current session during
// try_mark_green(), so we can ignore them here.
tcx.sess.diagnostic().track_diagnostics(|| {
// The dep-graph for this computation is already in
// place
tcx.dep_graph.with_ignore(|| {
Self::compute_result(tcx, key)
})
})
})
})?;
})?;
result
};
// If -Zincremental-verify-ich is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint.