nits from pnkfelix

This commit is contained in:
Niko Matsakis 2015-08-21 14:40:07 -04:00
parent 10b8941bce
commit b247402666
3 changed files with 76 additions and 45 deletions

View File

@ -16,6 +16,7 @@ use rustc_data_structures::transitive_relation::TransitiveRelation;
#[derive(Clone)]
pub struct FreeRegionMap {
// Stores the relation `a < b`, where `a` and `b` are regions.
relation: TransitiveRelation<Region>
}

View File

@ -52,7 +52,7 @@ impl BitVector {
/// A "bit matrix" is basically a square matrix of booleans
/// represented as one gigantic bitvector. In other words, it is as if
/// you have N bitvectors, each of length N.
/// you have N bitvectors, each of length N. Note that `elements` here is `N`/
#[derive(Clone)]
pub struct BitMatrix {
elements: usize,
@ -60,6 +60,7 @@ pub struct BitMatrix {
}
impl BitMatrix {
// Create a new `elements x elements` matrix, initially empty.
pub fn new(elements: usize) -> BitMatrix {
// For every element, we need one bit for every other
// element. Round up to an even number of u64s.
@ -88,15 +89,17 @@ impl BitMatrix {
}
/// Do the bits from `source` contain `target`?
/// Put another way, can `source` reach `target`?
///
/// Put another way, if the matrix represents (transitive)
/// reachability, can `source` reach `target`?
pub fn contains(&self, source: usize, target: usize) -> bool {
let (start, _) = self.range(source);
let (word, mask) = word_mask(target);
(self.vector[start+word] & mask) != 0
}
/// Returns those indices that are reachable from both source and
/// target. This is an O(n) operation where `n` is the number of
/// Returns those indices that are reachable from both `a` and
/// `b`. This is an O(n) operation where `n` is the number of
/// elements (somewhat independent from the actual size of the
/// intersection, in particular).
pub fn intersection(&self, a: usize, b: usize) -> Vec<usize> {
@ -114,24 +117,24 @@ impl BitMatrix {
result
}
/// Add the bits from source to the bits from destination,
/// Add the bits from `read` to the bits from `write`,
/// return true if anything changed.
///
/// This is used when computing reachability because if you have
/// an edge `destination -> source`, because in that case
/// `destination` can reach everything that `source` can (and
/// This is used when computing transitive reachability because if
/// you have an edge `write -> read`, because in that case
/// `write` can reach everything that `read` can (and
/// potentially more).
pub fn merge(&mut self, source: usize, destination: usize) -> bool {
let (source_start, source_end) = self.range(source);
let (destination_start, destination_end) = self.range(destination);
pub fn merge(&mut self, read: usize, write: usize) -> bool {
let (read_start, read_end) = self.range(read);
let (write_start, write_end) = self.range(write);
let vector = &mut self.vector[..];
let mut changed = false;
for (source_index, destination_index) in
(source_start..source_end).zip(destination_start..destination_end)
for (read_index, write_index) in
(read_start..read_end).zip(write_start..write_end)
{
let v1 = vector[destination_index];
let v2 = v1 | vector[source_index];
vector[destination_index] = v2;
let v1 = vector[write_index];
let v2 = v1 | vector[read_index];
vector[write_index] = v2;
changed = changed | (v1 != v2);
}
changed
@ -183,25 +186,29 @@ fn grow() {
fn matrix_intersection() {
let mut vec1 = BitMatrix::new(200);
// (*) Elements reachable from both 2 and 65.
vec1.add(2, 3);
vec1.add(2, 6);
vec1.add(2, 10);
vec1.add(2, 64);
vec1.add(2, 10); // (*)
vec1.add(2, 64); // (*)
vec1.add(2, 65);
vec1.add(2, 130);
vec1.add(2, 160);
vec1.add(2, 160); // (*)
vec1.add(64, 133);
vec1.add(65, 2);
vec1.add(65, 8);
vec1.add(65, 10); // X
vec1.add(65, 64); // X
vec1.add(65, 10); // (*)
vec1.add(65, 64); // (*)
vec1.add(65, 68);
vec1.add(65, 133);
vec1.add(65, 160); // X
vec1.add(65, 160); // (*)
let intersection = vec1.intersection(2, 64);
assert!(intersection.is_empty());
let intersection = vec1.intersection(2, 65);
assert_eq!(intersection, vec![10, 64, 160]);
assert_eq!(intersection, &[10, 64, 160]);
}

View File

@ -61,6 +61,10 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
Some(i) => i,
None => {
self.elements.push(a);
// if we changed the dimensions, clear the cache
*self.closure.borrow_mut() = None;
Index(self.elements.len() - 1)
}
}
@ -73,17 +77,17 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
let edge = Edge { source: a, target: b };
if !self.edges.contains(&edge) {
self.edges.push(edge);
}
// clear cached closure, if any
*self.closure.borrow_mut() = None;
// added an edge, clear the cache
*self.closure.borrow_mut() = None;
}
}
/// Check whether `a < target` (transitively)
pub fn contains(&self, a: &T, b: &T) -> bool {
match (self.index(a), self.index(b)) {
(Some(a), Some(b)) =>
self.take_closure(|closure| closure.contains(a.0, b.0)),
self.with_closure(|closure| closure.contains(a.0, b.0)),
(None, _) | (_, None) =>
false,
}
@ -102,7 +106,8 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
/// itself is not fully sufficient).
///
/// Examples are probably clearer than any prose I could write
/// (there are corresponding tests below, btw):
/// (there are corresponding tests below, btw). In each case,
/// the query is `best_upper_bound(a, b)`:
///
/// ```
/// // returns Some(x), which is also LUB
@ -140,7 +145,11 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
/// Returns the set of bounds `X` such that:
///
/// - `a < X` and `b < X`
/// - there is no `Y` such that `a < Y` and `Y < X`
/// - there is no `Y != X` such that `a < Y` and `Y < X`
/// - except for the case where `X < a` (i.e., a strongly connected
/// component in the graph). In that case, the smallest
/// representative of the SCC is returned (as determined by the
/// internal indices).
///
/// Note that this set can, in principle, have any size.
pub fn minimal_upper_bounds(&self, a: &T, b: &T) -> Vec<&T> {
@ -157,7 +166,7 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
mem::swap(&mut a, &mut b);
}
let lub_indices = self.take_closure(|closure| {
let lub_indices = self.with_closure(|closure| {
// Easy case is when either a < b or b < a:
if closure.contains(a.0, b.0) {
return vec![b.0];
@ -178,18 +187,28 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
// to the steps below.
// - This vector contains upper bounds, but they are
// not minimal upper bounds. So you may have e.g.
// `[a, b, tcx, x]` where `a < tcx` and `b < tcx` and
// `x < a` and `x < b`. This could be reduced to
// just `[x]`.
// `[x, y, tcx, z]` where `x < tcx` and `y < tcx` and
// `z < x` and `z < y`:
//
// z --+---> x ----+----> tcx
// | |
// | |
// +---> y ----+
//
// In this case, we really want to return just `[z]`.
// The following steps below achieve this by gradually
// reducing the list.
// 2. Pare down the vector using `pare_down`. This will
// remove elements from the vector that can be reached
// by an earlier element.
// - In the example above, this would convert
// `[a, b, tcx, x]` to `[a, b, x]`. Note that `x`
// remains because `x < a` but not `a < x.`
// - In the example above, this would convert `[x, y,
// tcx, z]` to `[x, y, z]`. Note that `x` and `y` are
// still in the vector; this is because while `z < x`
// (and `z < y`) holds, `z` comes after them in the
// vector.
// 3. Reverse the vector and repeat the pare down process.
// - In the example above, we would reverse to
// `[x, b, a]` and then pare down to `[x]`.
// `[z, y, x]` and then pare down to `[z]`.
// 4. Reverse once more just so that we yield a vector in
// increasing order of index. Maybe this is silly.
//
@ -213,7 +232,7 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
.collect()
}
fn take_closure<OP,R>(&self, op: OP) -> R
fn with_closure<OP,R>(&self, op: OP) -> R
where OP: FnOnce(&BitMatrix) -> R
{
let mut closure_cell = self.closure.borrow_mut();
@ -248,7 +267,8 @@ impl<T:Debug+PartialEq> TransitiveRelation<T> {
/// there exists an earlier element i<j such that i -> j. That is,
/// after you run `pare_down`, you know that for all elements that
/// remain in candidates, they cannot reach any of the elements that
/// come after them.
/// come after them. (Note that it may permute the ordering in some
/// cases.)
///
/// Examples follow. Assume that a -> b -> c and x -> y -> z.
///
@ -264,9 +284,13 @@ fn pare_down(candidates: &mut Vec<usize>, closure: &BitMatrix) {
let mut j = i;
while j < candidates.len() {
if closure.contains(candidate, candidates[j]) {
// if i can reach j, then we can remove j
println!("pare_down: candidates[{:?}]={:?} candidates[{:?}] = {:?}",
i-1, candidate, j, candidates[j]);
// If `i` can reach `j`, then we can remove `j`. Given
// how careful this algorithm is about ordering, it
// may seem odd to use swap-remove. The reason it is
// ok is that we are swapping two elements (`j` and
// `max`) that are both to the right of our cursor
// `i`, and the invariant that we are establishing
// continues to hold for everything left of `i`.
candidates.swap_remove(j);
} else {
j += 1;
@ -371,9 +395,8 @@ fn mubs_best_choice2() {
#[test]
fn mubs_no_best_choice() {
// in this case, the intersection yields [1, 2],
// and we need the first "pare down" call to narrow
// this down to [2]
// in this case, the intersection yields [1, 2], and the "pare
// down" calls find nothing to remove.
let mut relation = TransitiveRelation::new();
relation.add("0", "1");
relation.add("0", "2");