2016-03-28 17:42:39 -04:00
|
|
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2016-04-05 18:37:51 -04:00
|
|
|
use rbml::opaque::Encoder;
|
2016-08-05 20:14:47 -04:00
|
|
|
use rustc::dep_graph::DepNode;
|
2016-07-29 16:49:26 -04:00
|
|
|
use rustc::hir::def_id::DefId;
|
2016-08-11 19:02:39 -04:00
|
|
|
use rustc::hir::svh::Svh;
|
2016-07-21 12:44:59 -04:00
|
|
|
use rustc::session::Session;
|
2016-05-03 04:56:42 +03:00
|
|
|
use rustc::ty::TyCtxt;
|
2016-08-05 20:14:47 -04:00
|
|
|
use rustc_data_structures::fnv::FnvHashMap;
|
2016-08-05 09:48:22 -04:00
|
|
|
use rustc_serialize::Encodable as RustcEncodable;
|
2016-08-01 19:55:20 -04:00
|
|
|
use std::hash::{Hash, Hasher, SipHasher};
|
2016-04-05 18:37:51 -04:00
|
|
|
use std::io::{self, Cursor, Write};
|
2016-03-28 17:42:39 -04:00
|
|
|
use std::fs::{self, File};
|
2016-04-06 17:28:59 -04:00
|
|
|
use std::path::PathBuf;
|
2016-03-28 17:42:39 -04:00
|
|
|
|
2016-08-23 07:47:14 -04:00
|
|
|
use IncrementalHashesMap;
|
2016-03-28 17:42:39 -04:00
|
|
|
use super::data::*;
|
|
|
|
use super::directory::*;
|
2016-05-06 15:09:31 -04:00
|
|
|
use super::hash::*;
|
2016-08-05 20:14:47 -04:00
|
|
|
use super::preds::*;
|
2016-08-11 19:02:39 -04:00
|
|
|
use super::fs::*;
|
2016-03-28 17:42:39 -04:00
|
|
|
|
2016-08-23 07:47:14 -04:00
|
|
|
pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
2016-08-11 19:02:39 -04:00
|
|
|
incremental_hashes_map: &IncrementalHashesMap,
|
|
|
|
svh: Svh) {
|
2016-07-21 12:44:59 -04:00
|
|
|
debug!("save_dep_graph()");
|
2016-03-28 17:42:39 -04:00
|
|
|
let _ignore = tcx.dep_graph.in_ignore();
|
2016-07-21 12:44:59 -04:00
|
|
|
let sess = tcx.sess;
|
2016-08-01 19:55:20 -04:00
|
|
|
if sess.opts.incremental.is_none() {
|
|
|
|
return;
|
|
|
|
}
|
2016-08-23 07:47:14 -04:00
|
|
|
let mut hcx = HashContext::new(tcx, incremental_hashes_map);
|
2016-08-01 19:55:20 -04:00
|
|
|
let mut builder = DefIdDirectoryBuilder::new(tcx);
|
|
|
|
let query = tcx.dep_graph.query();
|
2016-08-05 20:14:47 -04:00
|
|
|
let preds = Predecessors::new(&query, &mut hcx);
|
2016-08-05 09:48:22 -04:00
|
|
|
save_in(sess,
|
2016-08-11 19:02:39 -04:00
|
|
|
dep_graph_path(sess),
|
2016-08-05 20:14:47 -04:00
|
|
|
|e| encode_dep_graph(&preds, &mut builder, e));
|
2016-08-05 09:48:22 -04:00
|
|
|
save_in(sess,
|
2016-08-11 19:02:39 -04:00
|
|
|
metadata_hash_export_path(sess),
|
|
|
|
|e| encode_metadata_hashes(tcx, svh, &preds, &mut builder, e));
|
2016-04-06 17:28:59 -04:00
|
|
|
}
|
2016-03-28 17:42:39 -04:00
|
|
|
|
2016-08-11 19:02:39 -04:00
|
|
|
pub fn save_work_products(sess: &Session) {
|
|
|
|
if sess.opts.incremental.is_none() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-21 12:44:59 -04:00
|
|
|
debug!("save_work_products()");
|
|
|
|
let _ignore = sess.dep_graph.in_ignore();
|
2016-08-11 19:02:39 -04:00
|
|
|
let path = work_products_path(sess);
|
2016-07-21 12:44:59 -04:00
|
|
|
save_in(sess, path, |e| encode_work_products(sess, e));
|
|
|
|
}
|
2016-05-06 15:09:31 -04:00
|
|
|
|
2016-08-11 19:02:39 -04:00
|
|
|
fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F)
|
2016-07-21 12:44:59 -04:00
|
|
|
where F: FnOnce(&mut Encoder) -> io::Result<()>
|
|
|
|
{
|
2016-04-06 17:28:59 -04:00
|
|
|
// delete the old dep-graph, if any
|
2016-08-11 19:02:39 -04:00
|
|
|
// Note: It's important that we actually delete the old file and not just
|
|
|
|
// truncate and overwrite it, since it might be a shared hard-link, the
|
|
|
|
// underlying data of which we don't want to modify
|
2016-04-06 17:28:59 -04:00
|
|
|
if path_buf.exists() {
|
|
|
|
match fs::remove_file(&path_buf) {
|
2016-08-05 09:48:22 -04:00
|
|
|
Ok(()) => {}
|
2016-03-28 17:42:39 -04:00
|
|
|
Err(err) => {
|
2016-08-05 09:48:22 -04:00
|
|
|
sess.err(&format!("unable to delete old dep-graph at `{}`: {}",
|
|
|
|
path_buf.display(),
|
|
|
|
err));
|
2016-03-28 17:42:39 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2016-04-06 17:28:59 -04:00
|
|
|
}
|
2016-03-28 17:42:39 -04:00
|
|
|
|
2016-04-06 17:28:59 -04:00
|
|
|
// generate the data in a memory buffer
|
|
|
|
let mut wr = Cursor::new(Vec::new());
|
2016-07-21 12:44:59 -04:00
|
|
|
match encode(&mut Encoder::new(&mut wr)) {
|
2016-08-05 09:48:22 -04:00
|
|
|
Ok(()) => {}
|
2016-04-06 17:28:59 -04:00
|
|
|
Err(err) => {
|
2016-08-05 09:48:22 -04:00
|
|
|
sess.err(&format!("could not encode dep-graph to `{}`: {}",
|
|
|
|
path_buf.display(),
|
|
|
|
err));
|
2016-04-06 17:28:59 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// write the data out
|
|
|
|
let data = wr.into_inner();
|
2016-08-05 09:48:22 -04:00
|
|
|
match File::create(&path_buf).and_then(|mut file| file.write_all(&data)) {
|
|
|
|
Ok(_) => {}
|
2016-04-06 17:28:59 -04:00
|
|
|
Err(err) => {
|
2016-08-05 09:48:22 -04:00
|
|
|
sess.err(&format!("failed to write dep-graph to `{}`: {}",
|
|
|
|
path_buf.display(),
|
|
|
|
err));
|
2016-04-06 17:28:59 -04:00
|
|
|
return;
|
2016-03-28 17:42:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-05 20:14:47 -04:00
|
|
|
pub fn encode_dep_graph(preds: &Predecessors,
|
|
|
|
builder: &mut DefIdDirectoryBuilder,
|
|
|
|
encoder: &mut Encoder)
|
|
|
|
-> io::Result<()> {
|
2016-08-04 18:29:58 -04:00
|
|
|
// First encode the commandline arguments hash
|
|
|
|
let tcx = builder.tcx();
|
|
|
|
try!(tcx.sess.opts.dep_tracking_hash().encode(encoder));
|
|
|
|
|
2016-08-05 20:14:47 -04:00
|
|
|
// Create a flat list of (Input, WorkProduct) edges for
|
|
|
|
// serialization.
|
|
|
|
let mut edges = vec![];
|
|
|
|
for (&target, sources) in &preds.inputs {
|
|
|
|
match *target {
|
2016-08-09 08:24:26 -04:00
|
|
|
DepNode::MetaData(ref def_id) => {
|
|
|
|
// Metadata *targets* are always local metadata nodes. We handle
|
|
|
|
// those in `encode_metadata_hashes`, which comes later.
|
|
|
|
assert!(def_id.is_local());
|
|
|
|
continue;
|
|
|
|
}
|
2016-08-05 20:14:47 -04:00
|
|
|
_ => (),
|
|
|
|
}
|
|
|
|
let target = builder.map(target);
|
|
|
|
for &source in sources {
|
|
|
|
let source = builder.map(source);
|
|
|
|
edges.push((source, target.clone()));
|
|
|
|
}
|
|
|
|
}
|
2016-03-28 17:42:39 -04:00
|
|
|
|
2016-05-06 15:09:31 -04:00
|
|
|
// Create the serialized dep-graph.
|
2016-03-28 17:42:39 -04:00
|
|
|
let graph = SerializedDepGraph {
|
2016-08-05 20:14:47 -04:00
|
|
|
edges: edges,
|
|
|
|
hashes: preds.hashes
|
|
|
|
.iter()
|
|
|
|
.map(|(&dep_node, &hash)| {
|
|
|
|
SerializedHash {
|
|
|
|
dep_node: builder.map(dep_node),
|
|
|
|
hash: hash,
|
|
|
|
}
|
2016-08-05 09:48:22 -04:00
|
|
|
})
|
|
|
|
.collect(),
|
2016-03-28 17:42:39 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
debug!("graph = {:#?}", graph);
|
|
|
|
|
2016-04-05 18:37:51 -04:00
|
|
|
// Encode the directory and then the graph data.
|
2016-08-01 19:55:20 -04:00
|
|
|
try!(builder.directory().encode(encoder));
|
2016-04-05 18:37:51 -04:00
|
|
|
try!(graph.encode(encoder));
|
2016-03-28 17:42:39 -04:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-08-05 20:14:47 -04:00
|
|
|
pub fn encode_metadata_hashes(tcx: TyCtxt,
|
2016-08-11 19:02:39 -04:00
|
|
|
svh: Svh,
|
2016-08-05 20:14:47 -04:00
|
|
|
preds: &Predecessors,
|
|
|
|
builder: &mut DefIdDirectoryBuilder,
|
|
|
|
encoder: &mut Encoder)
|
|
|
|
-> io::Result<()> {
|
|
|
|
let mut def_id_hashes = FnvHashMap();
|
|
|
|
let mut def_id_hash = |def_id: DefId| -> u64 {
|
|
|
|
*def_id_hashes.entry(def_id)
|
|
|
|
.or_insert_with(|| {
|
|
|
|
let index = builder.add(def_id);
|
|
|
|
let path = builder.lookup_def_path(index);
|
|
|
|
path.deterministic_hash(tcx)
|
|
|
|
})
|
|
|
|
};
|
|
|
|
|
|
|
|
// For each `MetaData(X)` node where `X` is local, accumulate a
|
|
|
|
// hash. These are the metadata items we export. Downstream
|
|
|
|
// crates will want to see a hash that tells them whether we might
|
|
|
|
// have changed the metadata for a given item since they last
|
|
|
|
// compiled.
|
|
|
|
//
|
|
|
|
// (I initially wrote this with an iterator, but it seemed harder to read.)
|
|
|
|
let mut serialized_hashes = SerializedMetadataHashes { hashes: vec![] };
|
|
|
|
for (&target, sources) in &preds.inputs {
|
|
|
|
let def_id = match *target {
|
|
|
|
DepNode::MetaData(def_id) => {
|
|
|
|
assert!(def_id.is_local());
|
|
|
|
def_id
|
|
|
|
}
|
|
|
|
_ => continue,
|
|
|
|
};
|
2016-04-06 17:28:59 -04:00
|
|
|
|
|
|
|
// To create the hash for each item `X`, we don't hash the raw
|
2016-05-06 15:09:31 -04:00
|
|
|
// bytes of the metadata (though in principle we
|
|
|
|
// could). Instead, we walk the predecessors of `MetaData(X)`
|
|
|
|
// from the dep-graph. This corresponds to all the inputs that
|
|
|
|
// were read to construct the metadata. To create the hash for
|
|
|
|
// the metadata, we hash (the hash of) all of those inputs.
|
2016-08-05 20:14:47 -04:00
|
|
|
debug!("save: computing metadata hash for {:?}", def_id);
|
|
|
|
|
|
|
|
// Create a vector containing a pair of (source-id, hash).
|
|
|
|
// The source-id is stored as a `DepNode<u64>`, where the u64
|
|
|
|
// is the det. hash of the def-path. This is convenient
|
2016-08-09 08:24:26 -04:00
|
|
|
// because we can sort this to get a stable ordering across
|
2016-08-05 20:14:47 -04:00
|
|
|
// compilations, even if the def-ids themselves have changed.
|
|
|
|
let mut hashes: Vec<(DepNode<u64>, u64)> = sources.iter()
|
|
|
|
.map(|dep_node| {
|
|
|
|
let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id))).unwrap();
|
|
|
|
let hash = preds.hashes[dep_node];
|
|
|
|
(hash_dep_node, hash)
|
|
|
|
})
|
|
|
|
.collect();
|
2016-08-05 09:48:22 -04:00
|
|
|
|
2016-08-05 20:14:47 -04:00
|
|
|
hashes.sort();
|
|
|
|
let mut state = SipHasher::new();
|
|
|
|
hashes.hash(&mut state);
|
|
|
|
let hash = state.finish();
|
2016-08-05 09:48:22 -04:00
|
|
|
|
2016-08-05 20:14:47 -04:00
|
|
|
debug!("save: metadata hash for {:?} is {}", def_id, hash);
|
|
|
|
serialized_hashes.hashes.push(SerializedMetadataHash {
|
|
|
|
def_index: def_id.index,
|
|
|
|
hash: hash,
|
2016-08-05 09:48:22 -04:00
|
|
|
});
|
2016-08-05 20:14:47 -04:00
|
|
|
}
|
2016-04-06 17:28:59 -04:00
|
|
|
|
|
|
|
// Encode everything.
|
2016-08-11 19:02:39 -04:00
|
|
|
try!(svh.encode(encoder));
|
2016-04-06 17:28:59 -04:00
|
|
|
try!(serialized_hashes.encode(encoder));
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2016-07-21 12:44:59 -04:00
|
|
|
|
2016-08-05 09:48:22 -04:00
|
|
|
pub fn encode_work_products(sess: &Session, encoder: &mut Encoder) -> io::Result<()> {
|
|
|
|
let work_products: Vec<_> = sess.dep_graph
|
|
|
|
.work_products()
|
|
|
|
.iter()
|
|
|
|
.map(|(id, work_product)| {
|
|
|
|
SerializedWorkProduct {
|
|
|
|
id: id.clone(),
|
|
|
|
work_product: work_product.clone(),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
2016-07-21 12:44:59 -04:00
|
|
|
|
|
|
|
work_products.encode(encoder)
|
|
|
|
}
|