2017-08-31 21:37:38 +03:00
|
|
|
//! This file builds up the `ScopeTree`, which describes
|
|
|
|
//! the parent links in the region hierarchy.
|
2014-11-25 21:17:11 -05:00
|
|
|
//!
|
2018-03-16 12:43:22 -05:00
|
|
|
//! For more information about how MIR-based region-checking works,
|
2020-03-05 18:07:42 -03:00
|
|
|
//! see the [rustc dev guide].
|
2018-03-16 12:43:22 -05:00
|
|
|
//!
|
2020-03-09 18:33:04 -03:00
|
|
|
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
|
2013-05-17 15:28:44 -07:00
|
|
|
|
2020-02-29 20:37:32 +03:00
|
|
|
use rustc_ast::walk_list;
|
2019-12-24 05:02:53 +01:00
|
|
|
use rustc_data_structures::fx::FxHashSet;
|
2020-01-05 02:37:57 +01:00
|
|
|
use rustc_hir as hir;
|
|
|
|
use rustc_hir::def_id::DefId;
|
2020-01-07 18:12:06 +01:00
|
|
|
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
|
2020-01-05 02:37:57 +01:00
|
|
|
use rustc_hir::{Arm, Block, Expr, Local, Node, Pat, PatKind, Stmt};
|
2019-09-26 05:38:33 +00:00
|
|
|
use rustc_index::vec::Idx;
|
2020-03-29 17:19:48 +02:00
|
|
|
use rustc_middle::middle::region::*;
|
|
|
|
use rustc_middle::ty::query::Providers;
|
|
|
|
use rustc_middle::ty::TyCtxt;
|
2020-01-01 19:25:28 +01:00
|
|
|
use rustc_span::source_map;
|
2019-12-31 20:15:40 +03:00
|
|
|
use rustc_span::Span;
|
2019-02-05 11:20:45 -06:00
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
use std::mem;
|
2012-03-09 16:39:54 -08:00
|
|
|
|
2015-03-30 09:38:44 -04:00
|
|
|
#[derive(Debug, Copy, Clone)]
|
2017-05-11 16:10:47 +03:00
|
|
|
pub struct Context {
|
2019-09-06 03:57:44 +01:00
|
|
|
/// The root of the current region tree. This is typically the id
|
2015-03-04 10:52:39 -05:00
|
|
|
/// of the innermost fn body. Each fn forms its own disjoint tree
|
|
|
|
/// in the region hierarchy. These fn bodies are themselves
|
2015-03-05 06:24:22 -05:00
|
|
|
/// arranged into a tree. See the "Modeling closures" section of
|
2020-04-03 19:03:13 +09:00
|
|
|
/// the README in `rustc_trait_selection::infer::region_constraints`
|
|
|
|
/// for more details.
|
2017-08-29 19:24:49 +03:00
|
|
|
root_id: Option<hir::ItemLocalId>,
|
2015-03-04 10:52:39 -05:00
|
|
|
|
2018-06-06 12:44:05 +10:00
|
|
|
/// The scope that contains any new variables declared, plus its depth in
|
|
|
|
/// the scope tree.
|
|
|
|
var_parent: Option<(Scope, ScopeDepth)>,
|
2013-03-15 15:24:24 -04:00
|
|
|
|
2018-06-06 12:44:05 +10:00
|
|
|
/// Region parent of expressions, etc., plus its depth in the scope tree.
|
|
|
|
parent: Option<(Scope, ScopeDepth)>,
|
2012-08-17 14:09:20 -07:00
|
|
|
}
|
2012-03-12 12:43:02 -07:00
|
|
|
|
2019-06-11 22:03:44 +03:00
|
|
|
struct RegionResolutionVisitor<'tcx> {
|
2019-06-14 00:48:52 +03:00
|
|
|
tcx: TyCtxt<'tcx>,
|
2013-09-25 10:59:56 +02:00
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
// The number of expressions and patterns visited in the current body.
|
2017-09-20 16:36:20 +03:00
|
|
|
expr_and_pat_count: usize,
|
2019-06-23 15:50:59 -04:00
|
|
|
// When this is `true`, we record the `Scopes` we encounter
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
// when processing a Yield expression. This allows us to fix
|
|
|
|
// up their indices.
|
|
|
|
pessimistic_yield: bool,
|
2019-09-06 03:57:44 +01:00
|
|
|
// Stores scopes when `pessimistic_yield` is `true`.
|
2019-06-23 20:22:02 -04:00
|
|
|
fixup_scopes: Vec<Scope>,
|
2019-09-06 03:57:44 +01:00
|
|
|
// The generated scope tree.
|
2017-08-31 21:37:38 +03:00
|
|
|
scope_tree: ScopeTree,
|
2014-09-12 13:10:30 +03:00
|
|
|
|
2017-05-11 16:10:47 +03:00
|
|
|
cx: Context,
|
2015-08-20 01:46:28 +03:00
|
|
|
|
|
|
|
/// `terminating_scopes` is a set containing the ids of each
|
|
|
|
/// statement, or conditional/repeating expression. These scopes
|
|
|
|
/// are calling "terminating scopes" because, when attempting to
|
|
|
|
/// find the scope of a temporary, by default we search up the
|
|
|
|
/// enclosing scopes until we encounter the terminating scope. A
|
|
|
|
/// conditional/repeating expression is one which is not
|
|
|
|
/// guaranteed to execute exactly once upon entering the parent
|
|
|
|
/// scope. This could be because the expression only executes
|
|
|
|
/// conditionally, such as the expression `b` in `a && b`, or
|
|
|
|
/// because the expression may execute many times, such as a loop
|
|
|
|
/// body. The reason that we distinguish such expressions is that,
|
|
|
|
/// upon exiting the parent scope, we cannot statically know how
|
|
|
|
/// many times the expression executed, and thus if the expression
|
|
|
|
/// creates temporaries we cannot know statically how many such
|
2019-02-08 14:53:55 +01:00
|
|
|
/// temporaries we would have to cleanup. Therefore, we ensure that
|
2015-08-20 01:46:28 +03:00
|
|
|
/// the temporaries never outlast the conditional/repeating
|
|
|
|
/// expression, preventing the need for dynamic checks and/or
|
|
|
|
/// arbitrary amounts of stack space. Terminating scopes end
|
|
|
|
/// up being contained in a DestructionScope that contains the
|
|
|
|
/// destructor's execution.
|
2017-08-29 19:24:49 +03:00
|
|
|
terminating_scopes: FxHashSet<hir::ItemLocalId>,
|
2013-09-25 10:59:56 +02:00
|
|
|
}
|
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
/// Records the lifetime of a local variable as `cx.var_parent`
|
2019-06-12 00:11:55 +03:00
|
|
|
fn record_var_lifetime(
|
|
|
|
visitor: &mut RegionResolutionVisitor<'_>,
|
|
|
|
var_id: hir::ItemLocalId,
|
|
|
|
_sp: Span,
|
|
|
|
) {
|
2015-08-20 01:46:28 +03:00
|
|
|
match visitor.cx.var_parent {
|
2017-04-19 16:45:07 -04:00
|
|
|
None => {
|
2014-01-15 14:39:08 -05:00
|
|
|
// this can happen in extern fn declarations like
|
|
|
|
//
|
|
|
|
// extern fn isalnum(c: c_int) -> c_int
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
Some((parent_scope, _)) => visitor.scope_tree.record_var_scope(var_id, parent_scope),
|
2012-03-09 16:39:54 -08:00
|
|
|
}
|
2012-03-12 12:43:02 -07:00
|
|
|
}
|
|
|
|
|
2019-11-29 13:43:03 +01:00
|
|
|
fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx hir::Block<'tcx>) {
|
2019-02-22 15:48:14 +01:00
|
|
|
debug!("resolve_block(blk.hir_id={:?})", blk.hir_id);
|
2012-03-09 16:39:54 -08:00
|
|
|
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
let prev_cx = visitor.cx;
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
|
|
// We treat the tail expression in the block (if any) somewhat
|
|
|
|
// differently from the statements. The issue has to do with
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
// temporary lifetimes. Consider the following:
|
2014-01-15 14:39:08 -05:00
|
|
|
//
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
// quux({
|
|
|
|
// let inner = ... (&bar()) ...;
|
2014-01-15 14:39:08 -05:00
|
|
|
//
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
// (... (&foo()) ...) // (the tail expression)
|
|
|
|
// }, other_argument());
|
|
|
|
//
|
|
|
|
// Each of the statements within the block is a terminating
|
2018-11-27 02:59:49 +00:00
|
|
|
// scope, and thus a temporary (e.g., the result of calling
|
2018-02-16 15:56:50 +01:00
|
|
|
// `bar()` in the initializer expression for `let inner = ...;`)
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
// will be cleaned up immediately after its corresponding
|
2018-11-27 02:59:49 +00:00
|
|
|
// statement (i.e., `let inner = ...;`) executes.
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
//
|
|
|
|
// On the other hand, temporaries associated with evaluating the
|
|
|
|
// tail expression for the block are assigned lifetimes so that
|
|
|
|
// they will be cleaned up as part of the terminating scope
|
|
|
|
// *surrounding* the block expression. Here, the terminating
|
|
|
|
// scope for the block expression is the `quux(..)` call; so
|
|
|
|
// those temporaries will only be cleaned up *after* both
|
|
|
|
// `other_argument()` has run and also the call to `quux(..)`
|
|
|
|
// itself has returned.
|
|
|
|
|
2017-08-31 21:37:38 +03:00
|
|
|
visitor.enter_node_scope_with_dtor(blk.hir_id.local_id);
|
2017-05-11 16:10:47 +03:00
|
|
|
visitor.cx.var_parent = visitor.cx.parent;
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
|
|
|
|
{
|
|
|
|
// This block should be kept approximately in sync with
|
2015-11-17 17:51:44 -05:00
|
|
|
// `intravisit::walk_block`. (We manually walk the block, rather
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
// than call `walk_block`, in order to maintain precise
|
2015-08-20 01:46:28 +03:00
|
|
|
// index information.)
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
|
|
|
|
for (i, statement) in blk.stmts.iter().enumerate() {
|
2019-09-26 17:34:50 +01:00
|
|
|
match statement.kind {
|
2019-12-22 17:42:04 -05:00
|
|
|
hir::StmtKind::Local(..) | hir::StmtKind::Item(..) => {
|
2019-01-17 10:39:24 +11:00
|
|
|
// Each declaration introduces a subscope for bindings
|
|
|
|
// introduced by the declaration; this subscope covers a
|
|
|
|
// suffix of the block. Each subscope in a block has the
|
|
|
|
// previous subscope in the block as a parent, except for
|
|
|
|
// the first such subscope, which has the block itself as a
|
|
|
|
// parent.
|
2019-12-22 17:42:04 -05:00
|
|
|
visitor.enter_scope(Scope {
|
|
|
|
id: blk.hir_id.local_id,
|
|
|
|
data: ScopeData::Remainder(FirstStatementIndex::new(i)),
|
|
|
|
});
|
2019-01-17 10:39:24 +11:00
|
|
|
visitor.cx.var_parent = visitor.cx.parent;
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
}
|
2015-12-07 17:17:41 +03:00
|
|
|
visitor.visit_stmt(statement)
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
}
|
2015-09-29 00:23:54 +03:00
|
|
|
walk_list!(visitor, visit_expr, &blk.expr);
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2014-09-12 13:10:30 +03:00
|
|
|
visitor.cx = prev_cx;
|
2012-03-09 16:39:54 -08:00
|
|
|
}
|
|
|
|
|
2019-11-29 13:43:03 +01:00
|
|
|
fn resolve_arm<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, arm: &'tcx hir::Arm<'tcx>) {
|
2019-04-03 19:21:51 +01:00
|
|
|
let prev_cx = visitor.cx;
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
visitor.enter_scope(Scope { id: arm.hir_id.local_id, data: ScopeData::Node });
|
2019-04-03 19:21:51 +01:00
|
|
|
visitor.cx.var_parent = visitor.cx.parent;
|
|
|
|
|
2017-08-29 19:24:49 +03:00
|
|
|
visitor.terminating_scopes.insert(arm.body.hir_id.local_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2018-10-02 18:05:06 +02:00
|
|
|
if let Some(hir::Guard::If(ref expr)) = arm.guard {
|
|
|
|
visitor.terminating_scopes.insert(expr.hir_id.local_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
|
2015-11-17 17:51:44 -05:00
|
|
|
intravisit::walk_arm(visitor, arm);
|
2019-04-03 19:21:51 +01:00
|
|
|
|
|
|
|
visitor.cx = prev_cx;
|
2012-03-11 12:05:17 -07:00
|
|
|
}
|
|
|
|
|
2019-11-29 13:43:03 +01:00
|
|
|
fn resolve_pat<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, pat: &'tcx hir::Pat<'tcx>) {
|
2018-09-15 13:10:29 -04:00
|
|
|
visitor.record_child_scope(Scope { id: pat.hir_id.local_id, data: ScopeData::Node });
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2016-03-06 15:54:44 +03:00
|
|
|
// If this is a binding then record the lifetime of that binding.
|
2019-09-26 16:18:31 +01:00
|
|
|
if let PatKind::Binding(..) = pat.kind {
|
2017-08-29 19:24:49 +03:00
|
|
|
record_var_lifetime(visitor, pat.hir_id.local_id, pat.span);
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
|
2017-10-07 16:36:28 +02:00
|
|
|
debug!("resolve_pat - pre-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
|
|
|
|
|
2015-11-17 17:51:44 -05:00
|
|
|
intravisit::walk_pat(visitor, pat);
|
2017-09-20 16:36:20 +03:00
|
|
|
|
|
|
|
visitor.expr_and_pat_count += 1;
|
2017-10-07 16:36:28 +02:00
|
|
|
|
|
|
|
debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
|
2012-03-11 12:05:17 -07:00
|
|
|
}
|
|
|
|
|
2019-11-29 13:43:03 +01:00
|
|
|
fn resolve_stmt<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt<'tcx>) {
|
2019-02-03 08:51:50 +01:00
|
|
|
let stmt_id = stmt.hir_id.local_id;
|
2014-12-20 00:09:35 -08:00
|
|
|
debug!("resolve_stmt(stmt.id={:?})", stmt_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
// Every statement will clean up the temporaries created during
|
|
|
|
// execution of that statement. Therefore each statement has an
|
2017-08-31 21:37:38 +03:00
|
|
|
// associated destruction scope that represents the scope of the
|
|
|
|
// statement plus its destructors, and thus the scope for which
|
Add `CodeExtent::Remainder` variant; pre-req for new scoping/drop rules.
This new variant introduces finer-grain code extents, i.e. we now
track that a binding lives only for a suffix of a block, and
(importantly) will be dropped when it goes out of scope *before* the
bindings that occurred earlier in the block.
Both of these notions are neatly captured by marking the block (and
each suffix) as an enclosing scope of the next suffix beneath it.
This is work that is part of the foundation for issue #8861.
(It actually has been seen in earlier posted pull requests; I have
just factored it out into its own PR to ease my own rebasing.)
----
These finer grained scopes do mean that some code is newly rejected by
`rustc`; for example:
```rust
let mut map : HashMap<u8, &u8> = HashMap::new();
let tmp = Box::new(2);
map.insert(43, &*tmp);
```
This will now fail to compile with a message that `*tmp` does not live
long enough, because the scope of `tmp` is now strictly smaller than
that of `map`, and the use of `&u8` in map's type requires that the
borrowed references are all to data that live at least as long as the
map.
The usual fix for a case like this is to move the binding for `tmp`
up above that of `map`; note that you can still leave the initialization
in the original spot, like so:
```rust
let tmp;
let mut map : HashMap<u8, &u8> = HashMap::new();
tmp = box 2;
map.insert(43, &*tmp);
```
Similarly, one can encounter an analogous situation with `Vec`: one
would need to rewrite:
```rust
let mut vec = Vec::new();
let tmp = 'c';
vec.push(&tmp);
```
as:
```
let tmp;
let mut vec = Vec::new();
tmp = 'c';
vec.push(&tmp);
```
----
In some corner cases, it does not suffice to reorder the bindings; in
particular, when the types for both bindings need to reflect exactly
the *same* code extent, and a parent/child relationship between them
does not work.
In pnkfelix's experience this has arisen most often when mixing uses
of cyclic data structures while also allowing a lifetime parameter
`'a` to flow into a type parameter context where the type is
*invariant* with respect to the type parameter. An important instance
of this is `arena::TypedArena<T>`, which is invariant with respect
to `T`.
(The reason that variance is relevant is this: *if* `TypedArena` were
covariant with respect to its type parameter, then we could assign it
the longer lifetime when it is initialized, and then convert it to a
subtype (via covariance) with a shorter lifetime when necessary. But
`TypedArena` is invariant with respect to its type parameter, and thus
if `S` is a subtype of `T` (in particular, if `S` has a lifetime
parameter that is shorter than that of `T`), then a `TypedArena<S>` is
unrelated to `TypedArena<T>`.)
Concretely, consider code like this:
```rust
struct Node<'a> { sibling: Option<&'a Node<'a>> }
struct Context<'a> {
// because of this field, `Context<'a>` is invariant with respect to `'a`.
arena: &'a TypedArena<Node<'a>>,
...
}
fn new_ctxt<'a>(arena: &'a TypedArena<Node<'a>>) -> Context<'a> { ... }
fn use_ctxt<'a>(fcx: &'a Context<'a>) { ... }
let arena = TypedArena::new();
let ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
In these situations, if you try to introduce two bindings via two
distinct `let` statements, each is (with this commit) assigned a
distinct extent, and the region inference system cannot find a single
region to assign to the lifetime `'a` that works for both of the
bindings. So you get an error that `ctxt` does not live long enough;
but moving its binding up above that of `arena` just shifts the error
so now the compiler complains that `arena` does not live long enough.
SO: What to do? The easiest fix in this case is to ensure that the two
bindings *do* get assigned the same static extent, by stuffing both
bindings into the same let statement, like so:
```rust
let (arena, ctxt): (TypedArena, Context);
arena = TypedArena::new();
ctxt = new_ctxt(&arena);
use_ctxt(&ctxt);
```
Due to the new code rejections outlined above, this is a ...
[breaking-change]
2015-01-26 12:48:19 +01:00
|
|
|
// regions referenced by the destructors need to survive.
|
2015-08-20 01:46:28 +03:00
|
|
|
visitor.terminating_scopes.insert(stmt_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2014-09-12 13:10:30 +03:00
|
|
|
let prev_parent = visitor.cx.parent;
|
2017-08-31 21:37:38 +03:00
|
|
|
visitor.enter_node_scope_with_dtor(stmt_id);
|
2017-05-11 16:10:47 +03:00
|
|
|
|
2015-11-17 17:51:44 -05:00
|
|
|
intravisit::walk_stmt(visitor, stmt);
|
2017-05-11 16:10:47 +03:00
|
|
|
|
2014-09-12 13:10:30 +03:00
|
|
|
visitor.cx.parent = prev_parent;
|
2012-08-17 14:09:20 -07:00
|
|
|
}
|
|
|
|
|
2019-11-29 13:43:03 +01:00
|
|
|
fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
|
2017-10-07 16:36:28 +02:00
|
|
|
debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2014-09-12 13:10:30 +03:00
|
|
|
let prev_cx = visitor.cx;
|
2017-08-31 21:37:38 +03:00
|
|
|
visitor.enter_node_scope_with_dtor(expr.hir_id.local_id);
|
2014-12-19 12:44:24 +13:00
|
|
|
|
2014-11-18 14:22:59 +01:00
|
|
|
{
|
2015-08-20 01:46:28 +03:00
|
|
|
let terminating_scopes = &mut visitor.terminating_scopes;
|
2017-08-29 19:24:49 +03:00
|
|
|
let mut terminating = |id: hir::ItemLocalId| {
|
2015-08-20 01:46:28 +03:00
|
|
|
terminating_scopes.insert(id);
|
2014-11-18 14:22:59 +01:00
|
|
|
};
|
2019-09-26 14:39:48 +01:00
|
|
|
match expr.kind {
|
2014-11-18 14:22:59 +01:00
|
|
|
// Conditional or repeating scopes are always terminating
|
|
|
|
// scopes, meaning that temporaries cannot outlive them.
|
|
|
|
// This ensures fixed size stacks.
|
2018-08-18 12:14:03 +02:00
|
|
|
hir::ExprKind::Binary(
|
2019-12-22 17:42:04 -05:00
|
|
|
source_map::Spanned { node: hir::BinOpKind::And, .. },
|
|
|
|
_,
|
|
|
|
ref r,
|
|
|
|
)
|
|
|
|
| hir::ExprKind::Binary(
|
|
|
|
source_map::Spanned { node: hir::BinOpKind::Or, .. },
|
|
|
|
_,
|
|
|
|
ref r,
|
|
|
|
) => {
|
|
|
|
// For shortcircuiting operators, mark the RHS as a terminating
|
|
|
|
// scope since it only executes conditionally.
|
|
|
|
terminating(r.hir_id.local_id);
|
2014-11-18 14:22:59 +01:00
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2018-07-11 20:05:29 +08:00
|
|
|
hir::ExprKind::Loop(ref body, _, _) => {
|
2017-08-29 19:24:49 +03:00
|
|
|
terminating(body.hir_id.local_id);
|
2014-11-18 14:22:59 +01:00
|
|
|
}
|
2014-02-09 13:44:10 +01:00
|
|
|
|
2019-04-30 17:46:59 +02:00
|
|
|
hir::ExprKind::DropTemps(ref expr) => {
|
|
|
|
// `DropTemps(expr)` does not denote a conditional scope.
|
2019-04-24 06:39:40 +02:00
|
|
|
// Rather, we want to achieve the same behavior as `{ let _t = expr; _t }`.
|
|
|
|
terminating(expr.hir_id.local_id);
|
|
|
|
}
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
hir::ExprKind::AssignOp(..)
|
|
|
|
| hir::ExprKind::Index(..)
|
|
|
|
| hir::ExprKind::Unary(..)
|
|
|
|
| hir::ExprKind::Call(..)
|
|
|
|
| hir::ExprKind::MethodCall(..) => {
|
2017-10-07 13:19:34 +03:00
|
|
|
// FIXME(https://github.com/rust-lang/rfcs/issues/811) Nested method calls
|
2014-11-18 14:22:59 +01:00
|
|
|
//
|
|
|
|
// The lifetimes for a call or method call look as follows:
|
|
|
|
//
|
|
|
|
// call.id
|
|
|
|
// - arg0.id
|
|
|
|
// - ...
|
|
|
|
// - argN.id
|
|
|
|
// - call.callee_id
|
|
|
|
//
|
|
|
|
// The idea is that call.callee_id represents *the time when
|
|
|
|
// the invoked function is actually running* and call.id
|
|
|
|
// represents *the time to prepare the arguments and make the
|
2015-02-18 19:34:55 -08:00
|
|
|
// call*. See the section "Borrows in Calls" borrowck/README.md
|
2014-11-18 14:22:59 +01:00
|
|
|
// for an extended explanation of why this distinction is
|
|
|
|
// important.
|
|
|
|
//
|
|
|
|
// record_superlifetime(new_cx, expr.callee_id);
|
|
|
|
}
|
2013-03-15 15:24:24 -04:00
|
|
|
|
2014-11-18 14:22:59 +01:00
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
2012-07-26 08:51:57 -07:00
|
|
|
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
let prev_pessimistic = visitor.pessimistic_yield;
|
|
|
|
|
|
|
|
// Ordinarily, we can rely on the visit order of HIR intravisit
|
2019-06-23 15:50:59 -04:00
|
|
|
// to correspond to the actual execution order of statements.
|
2020-03-06 12:13:55 +01:00
|
|
|
// However, there's a weird corner case with compound assignment
|
2019-06-23 15:50:59 -04:00
|
|
|
// operators (e.g. `a += b`). The evaluation order depends on whether
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
// or not the operator is overloaded (e.g. whether or not a trait
|
|
|
|
// like AddAssign is implemented).
|
|
|
|
|
|
|
|
// For primitive types (which, despite having a trait impl, don't actually
|
|
|
|
// end up calling it), the evluation order is right-to-left. For example,
|
|
|
|
// the following code snippet:
|
|
|
|
//
|
|
|
|
// let y = &mut 0;
|
|
|
|
// *{println!("LHS!"); y} += {println!("RHS!"); 1};
|
|
|
|
//
|
|
|
|
// will print:
|
|
|
|
//
|
|
|
|
// RHS!
|
|
|
|
// LHS!
|
|
|
|
//
|
|
|
|
// However, if the operator is used on a non-primitive type,
|
|
|
|
// the evaluation order will be left-to-right, since the operator
|
|
|
|
// actually get desugared to a method call. For example, this
|
|
|
|
// nearly identical code snippet:
|
|
|
|
//
|
|
|
|
// let y = &mut String::new();
|
|
|
|
// *{println!("LHS String"); y} += {println!("RHS String"); "hi"};
|
|
|
|
//
|
|
|
|
// will print:
|
|
|
|
// LHS String
|
|
|
|
// RHS String
|
|
|
|
//
|
|
|
|
// To determine the actual execution order, we need to perform
|
|
|
|
// trait resolution. Unfortunately, we need to be able to compute
|
|
|
|
// yield_in_scope before type checking is even done, as it gets
|
2019-06-23 15:50:59 -04:00
|
|
|
// used by AST borrowcheck.
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
//
|
|
|
|
// Fortunately, we don't need to know the actual execution order.
|
2019-06-23 15:50:59 -04:00
|
|
|
// It suffices to know the 'worst case' order with respect to yields.
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
// Specifically, we need to know the highest 'expr_and_pat_count'
|
|
|
|
// that we could assign to the yield expression. To do this,
|
|
|
|
// we pick the greater of the two values from the left-hand
|
|
|
|
// and right-hand expressions. This makes us overly conservative
|
|
|
|
// about what types could possibly live across yield points,
|
|
|
|
// but we will never fail to detect that a type does actually
|
|
|
|
// live across a yield point. The latter part is critical -
|
|
|
|
// we're already overly conservative about what types will live
|
|
|
|
// across yield points, as the generated MIR will determine
|
|
|
|
// when things are actually live. However, for typecheck to work
|
|
|
|
// properly, we can't miss any types.
|
|
|
|
|
2019-09-26 14:39:48 +01:00
|
|
|
match expr.kind {
|
2017-04-24 22:03:47 +03:00
|
|
|
// Manually recurse over closures, because they are the only
|
|
|
|
// case of nested bodies that share the parent environment.
|
2018-07-11 20:05:29 +08:00
|
|
|
hir::ExprKind::Closure(.., body, _, _) => {
|
2018-12-04 13:45:36 +01:00
|
|
|
let body = visitor.tcx.hir().body(body);
|
2017-04-24 22:03:47 +03:00
|
|
|
visitor.visit_body(body);
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2019-06-23 20:22:02 -04:00
|
|
|
hir::ExprKind::AssignOp(_, ref left_expr, ref right_expr) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
debug!(
|
|
|
|
"resolve_expr - enabling pessimistic_yield, was previously {}",
|
|
|
|
prev_pessimistic
|
|
|
|
);
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
|
2019-06-23 20:22:02 -04:00
|
|
|
let start_point = visitor.fixup_scopes.len();
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
visitor.pessimistic_yield = true;
|
|
|
|
|
|
|
|
// If the actual execution order turns out to be right-to-left,
|
|
|
|
// then we're fine. However, if the actual execution order is left-to-right,
|
2019-06-23 15:50:59 -04:00
|
|
|
// then we'll assign too low a count to any `yield` expressions
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
// we encounter in 'right_expression' - they should really occur after all of the
|
|
|
|
// expressions in 'left_expression'.
|
2019-06-23 20:22:02 -04:00
|
|
|
visitor.visit_expr(&right_expr);
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
visitor.pessimistic_yield = prev_pessimistic;
|
|
|
|
|
|
|
|
debug!("resolve_expr - restoring pessimistic_yield to {}", prev_pessimistic);
|
2019-06-23 20:22:02 -04:00
|
|
|
visitor.visit_expr(&left_expr);
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
debug!("resolve_expr - fixing up counts to {}", visitor.expr_and_pat_count);
|
|
|
|
|
2019-06-23 20:22:02 -04:00
|
|
|
// Remove and process any scopes pushed by the visitor
|
|
|
|
let target_scopes = visitor.fixup_scopes.drain(start_point..);
|
|
|
|
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
for scope in target_scopes {
|
2019-06-22 17:30:56 -04:00
|
|
|
let mut yield_data = visitor.scope_tree.yield_in_scope.get_mut(&scope).unwrap();
|
|
|
|
let count = yield_data.expr_and_pat_count;
|
|
|
|
let span = yield_data.span;
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
|
|
|
|
// expr_and_pat_count never decreases. Since we recorded counts in yield_in_scope
|
|
|
|
// before walking the left-hand side, it should be impossible for the recorded
|
|
|
|
// count to be greater than the left-hand side count.
|
|
|
|
if count > visitor.expr_and_pat_count {
|
2019-12-22 17:42:04 -05:00
|
|
|
bug!(
|
|
|
|
"Encountered greater count {} at span {:?} - expected no greater than {}",
|
|
|
|
count,
|
|
|
|
span,
|
|
|
|
visitor.expr_and_pat_count
|
|
|
|
);
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
}
|
|
|
|
let new_count = visitor.expr_and_pat_count;
|
2019-12-22 17:42:04 -05:00
|
|
|
debug!(
|
|
|
|
"resolve_expr - increasing count for scope {:?} from {} to {} at span {:?}",
|
|
|
|
scope, count, new_count, span
|
|
|
|
);
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
|
2019-06-22 17:30:56 -04:00
|
|
|
yield_data.expr_and_pat_count = new_count;
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
}
|
2017-04-24 22:03:47 +03:00
|
|
|
}
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => intravisit::walk_expr(visitor, expr),
|
2017-04-24 22:03:47 +03:00
|
|
|
}
|
|
|
|
|
2017-09-20 16:36:20 +03:00
|
|
|
visitor.expr_and_pat_count += 1;
|
2017-09-13 01:09:56 +03:00
|
|
|
|
2017-10-07 16:36:28 +02:00
|
|
|
debug!("resolve_expr post-increment {}, expr = {:?}", visitor.expr_and_pat_count, expr);
|
|
|
|
|
2019-09-26 14:39:48 +01:00
|
|
|
if let hir::ExprKind::Yield(_, source) = &expr.kind {
|
2017-09-08 08:52:03 +02:00
|
|
|
// Mark this expr's scope and all parent scopes as containing `yield`.
|
2018-09-15 13:10:29 -04:00
|
|
|
let mut scope = Scope { id: expr.hir_id.local_id, data: ScopeData::Node };
|
2017-09-08 08:52:03 +02:00
|
|
|
loop {
|
2019-06-18 14:34:51 -07:00
|
|
|
let data = YieldData {
|
|
|
|
span: expr.span,
|
|
|
|
expr_and_pat_count: visitor.expr_and_pat_count,
|
|
|
|
source: *source,
|
|
|
|
};
|
|
|
|
visitor.scope_tree.yield_in_scope.insert(scope, data);
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
if visitor.pessimistic_yield {
|
|
|
|
debug!("resolve_expr in pessimistic_yield - marking scope {:?} for fixup", scope);
|
2019-06-23 20:22:02 -04:00
|
|
|
visitor.fixup_scopes.push(scope);
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
}
|
2017-09-08 08:52:03 +02:00
|
|
|
|
|
|
|
// Keep traversing up while we can.
|
|
|
|
match visitor.scope_tree.parent_map.get(&scope) {
|
|
|
|
// Don't cross from closure bodies to their parent.
|
2018-09-15 13:10:29 -04:00
|
|
|
Some(&(superscope, _)) => match superscope.data {
|
2018-09-10 14:40:12 +02:00
|
|
|
ScopeData::CallSite => break,
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => scope = superscope,
|
2017-09-24 16:13:54 +03:00
|
|
|
},
|
2019-12-22 17:42:04 -05:00
|
|
|
None => break,
|
2017-09-08 08:52:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-12 13:10:30 +03:00
|
|
|
visitor.cx = prev_cx;
|
2012-03-11 17:01:28 -07:00
|
|
|
}
|
|
|
|
|
2019-06-12 00:11:55 +03:00
|
|
|
fn resolve_local<'tcx>(
|
|
|
|
visitor: &mut RegionResolutionVisitor<'tcx>,
|
2019-11-29 13:43:03 +01:00
|
|
|
pat: Option<&'tcx hir::Pat<'tcx>>,
|
|
|
|
init: Option<&'tcx hir::Expr<'tcx>>,
|
2019-06-12 00:11:55 +03:00
|
|
|
) {
|
2017-07-21 14:01:22 +03:00
|
|
|
debug!("resolve_local(pat={:?}, init={:?})", pat, init);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2018-06-06 12:44:05 +10:00
|
|
|
let blk_scope = visitor.cx.var_parent.map(|(p, _)| p);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
|
|
// As an exception to the normal rules governing temporary
|
|
|
|
// lifetimes, initializers in a let have a temporary lifetime
|
2018-11-27 02:59:49 +00:00
|
|
|
// of the enclosing block. This means that e.g., a program
|
2014-01-15 14:39:08 -05:00
|
|
|
// like the following is legal:
|
|
|
|
//
|
|
|
|
// let ref x = HashMap::new();
|
|
|
|
//
|
|
|
|
// Because the hash map will be freed in the enclosing block.
|
|
|
|
//
|
|
|
|
// We express the rules more formally based on 3 grammars (defined
|
|
|
|
// fully in the helpers below that implement them):
|
|
|
|
//
|
|
|
|
// 1. `E&`, which matches expressions like `&<rvalue>` that
|
|
|
|
// own a pointer into the stack.
|
|
|
|
//
|
|
|
|
// 2. `P&`, which matches patterns like `ref x` or `(ref x, ref
|
|
|
|
// y)` that produce ref bindings into the value they are
|
|
|
|
// matched against or something (at least partially) owned by
|
|
|
|
// the value they are matched against. (By partially owned,
|
|
|
|
// I mean that creating a binding into a ref-counted or managed value
|
|
|
|
// would still count.)
|
|
|
|
//
|
2018-01-29 01:49:29 +02:00
|
|
|
// 3. `ET`, which matches both rvalues like `foo()` as well as places
|
2014-01-15 14:39:08 -05:00
|
|
|
// based on rvalues like `foo().x[2].y`.
|
|
|
|
//
|
|
|
|
// A subexpression `<rvalue>` that appears in a let initializer
|
|
|
|
// `let pat [: ty] = expr` has an extended temporary lifetime if
|
|
|
|
// any of the following conditions are met:
|
|
|
|
//
|
|
|
|
// A. `pat` matches `P&` and `expr` matches `ET`
|
|
|
|
// (covers cases where `pat` creates ref bindings into an rvalue
|
|
|
|
// produced by `expr`)
|
|
|
|
// B. `ty` is a borrowed pointer and `expr` matches `ET`
|
|
|
|
// (covers cases where coercion creates a borrow)
|
|
|
|
// C. `expr` matches `E&`
|
|
|
|
// (covers cases `expr` borrows an rvalue that is then assigned
|
|
|
|
// to memory (at least partially) owned by the binding)
|
|
|
|
//
|
2014-04-21 00:49:39 -04:00
|
|
|
// Here are some examples hopefully giving an intuition where each
|
2014-01-15 14:39:08 -05:00
|
|
|
// rule comes into play and why:
|
|
|
|
//
|
|
|
|
// Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)`
|
|
|
|
// would have an extended lifetime, but not `foo()`.
|
|
|
|
//
|
2019-09-18 23:17:36 -06:00
|
|
|
// Rule B. `let x = &foo().x`. The rvalue `foo()` would have extended
|
2014-01-15 14:39:08 -05:00
|
|
|
// lifetime.
|
|
|
|
//
|
|
|
|
// In some cases, multiple rules may apply (though not to the same
|
|
|
|
// rvalue). For example:
|
|
|
|
//
|
|
|
|
// let ref x = [&a(), &b()];
|
|
|
|
//
|
|
|
|
// Here, the expression `[...]` has an extended lifetime due to rule
|
|
|
|
// A, but the inner rvalues `a()` and `b()` have an extended lifetime
|
|
|
|
// due to rule C.
|
|
|
|
|
2017-07-21 14:01:22 +03:00
|
|
|
if let Some(expr) = init {
|
2016-10-17 19:00:20 -07:00
|
|
|
record_rvalue_scope_if_borrow_expr(visitor, &expr, blk_scope);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2017-07-21 14:01:22 +03:00
|
|
|
if let Some(pat) = pat {
|
|
|
|
if is_binding_pat(pat) {
|
|
|
|
record_rvalue_scope(visitor, &expr, blk_scope);
|
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-07 16:36:28 +02:00
|
|
|
// Make sure we visit the initializer first, so expr_and_pat_count remains correct
|
2017-07-21 14:01:22 +03:00
|
|
|
if let Some(expr) = init {
|
|
|
|
visitor.visit_expr(expr);
|
|
|
|
}
|
2017-10-07 16:36:28 +02:00
|
|
|
if let Some(pat) = pat {
|
|
|
|
visitor.visit_pat(pat);
|
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Returns `true` if `pat` match the `P&` non-terminal.
|
2014-11-25 21:17:11 -05:00
|
|
|
///
|
2019-12-29 11:16:22 +01:00
|
|
|
/// ```text
|
2014-11-25 21:17:11 -05:00
|
|
|
/// P& = ref X
|
|
|
|
/// | StructName { ..., P&, ... }
|
|
|
|
/// | VariantName(..., P&, ...)
|
|
|
|
/// | [ ..., P&, ... ]
|
|
|
|
/// | ( ..., P&, ... )
|
2019-12-19 17:33:10 +01:00
|
|
|
/// | ... "|" P& "|" ...
|
2014-11-25 21:17:11 -05:00
|
|
|
/// | box P&
|
2019-12-29 11:16:22 +01:00
|
|
|
/// ```
|
2019-11-29 13:43:03 +01:00
|
|
|
fn is_binding_pat(pat: &hir::Pat<'_>) -> bool {
|
2017-07-21 19:29:43 -04:00
|
|
|
// Note that the code below looks for *explicit* refs only, that is, it won't
|
|
|
|
// know about *implicit* refs as introduced in #42640.
|
|
|
|
//
|
|
|
|
// This is not a problem. For example, consider
|
|
|
|
//
|
|
|
|
// let (ref x, ref y) = (Foo { .. }, Bar { .. });
|
|
|
|
//
|
|
|
|
// Due to the explicit refs on the left hand side, the below code would signal
|
|
|
|
// that the temporary value on the right hand side should live until the end of
|
|
|
|
// the enclosing block (as opposed to being dropped after the let is complete).
|
|
|
|
//
|
|
|
|
// To create an implicit ref, however, you must have a borrowed value on the RHS
|
|
|
|
// already, as in this example (which won't compile before #42640):
|
|
|
|
//
|
|
|
|
// let Foo { x, .. } = &Foo { x: ..., ... };
|
|
|
|
//
|
|
|
|
// in place of
|
|
|
|
//
|
|
|
|
// let Foo { ref x, .. } = Foo { ... };
|
|
|
|
//
|
|
|
|
// In the former case (the implicit ref version), the temporary is created by the
|
|
|
|
// & expression, and its lifetime would be extended to the end of the block (due
|
|
|
|
// to a different rule, not the below code).
|
2019-09-26 16:18:31 +01:00
|
|
|
match pat.kind {
|
2019-12-22 17:42:04 -05:00
|
|
|
PatKind::Binding(hir::BindingAnnotation::Ref, ..)
|
|
|
|
| PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true,
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2016-02-14 15:25:12 +03:00
|
|
|
PatKind::Struct(_, ref field_pats, _) => {
|
2019-08-15 02:35:36 +03:00
|
|
|
field_pats.iter().any(|fp| is_binding_pat(&fp.pat))
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
|
2016-09-20 02:14:46 +02:00
|
|
|
PatKind::Slice(ref pats1, ref pats2, ref pats3) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
pats1.iter().any(|p| is_binding_pat(&p))
|
|
|
|
|| pats2.iter().any(|p| is_binding_pat(&p))
|
|
|
|
|| pats3.iter().any(|p| is_binding_pat(&p))
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
PatKind::Or(ref subpats)
|
|
|
|
| PatKind::TupleStruct(_, ref subpats, _)
|
|
|
|
| PatKind::Tuple(ref subpats, _) => subpats.iter().any(|p| is_binding_pat(&p)),
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
PatKind::Box(ref subpat) => is_binding_pat(&subpat),
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
PatKind::Ref(_, _)
|
2020-04-16 17:38:52 -07:00
|
|
|
| PatKind::Binding(
|
|
|
|
hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
|
|
|
|
..,
|
|
|
|
)
|
2019-12-22 17:42:04 -05:00
|
|
|
| PatKind::Wild
|
|
|
|
| PatKind::Path(_)
|
|
|
|
| PatKind::Lit(_)
|
|
|
|
| PatKind::Range(_, _, _) => false,
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
/// If `expr` matches the `E&` grammar, then records an extended rvalue scope as appropriate:
|
|
|
|
///
|
2019-12-29 11:16:22 +01:00
|
|
|
/// ```text
|
2014-11-25 21:17:11 -05:00
|
|
|
/// E& = & ET
|
|
|
|
/// | StructName { ..., f: E&, ... }
|
|
|
|
/// | [ ..., E&, ... ]
|
|
|
|
/// | ( ..., E&, ... )
|
|
|
|
/// | {...; E&}
|
|
|
|
/// | box E&
|
|
|
|
/// | E& as ...
|
|
|
|
/// | ( E& )
|
2019-12-29 11:16:22 +01:00
|
|
|
/// ```
|
2019-06-11 23:35:39 +03:00
|
|
|
fn record_rvalue_scope_if_borrow_expr<'tcx>(
|
2019-06-11 22:03:44 +03:00
|
|
|
visitor: &mut RegionResolutionVisitor<'tcx>,
|
2019-11-29 13:43:03 +01:00
|
|
|
expr: &hir::Expr<'_>,
|
2019-06-12 00:11:55 +03:00
|
|
|
blk_id: Option<Scope>,
|
|
|
|
) {
|
2019-09-26 14:39:48 +01:00
|
|
|
match expr.kind {
|
2019-11-23 14:15:49 +00:00
|
|
|
hir::ExprKind::AddrOf(_, _, ref subexpr) => {
|
2016-02-09 22:00:20 +01:00
|
|
|
record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id);
|
2017-06-02 23:43:50 +00:00
|
|
|
record_rvalue_scope(visitor, &subexpr, blk_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
2019-11-29 14:08:03 +01:00
|
|
|
hir::ExprKind::Struct(_, fields, _) => {
|
2015-01-31 12:20:46 -05:00
|
|
|
for field in fields {
|
2019-12-22 17:42:04 -05:00
|
|
|
record_rvalue_scope_if_borrow_expr(visitor, &field.expr, blk_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
}
|
2019-11-29 14:08:03 +01:00
|
|
|
hir::ExprKind::Array(subexprs) | hir::ExprKind::Tup(subexprs) => {
|
2015-01-31 12:20:46 -05:00
|
|
|
for subexpr in subexprs {
|
2019-12-22 17:42:04 -05:00
|
|
|
record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
}
|
2018-07-11 20:05:29 +08:00
|
|
|
hir::ExprKind::Cast(ref subexpr, _) => {
|
2016-02-09 22:00:20 +01:00
|
|
|
record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id)
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
2018-07-11 20:05:29 +08:00
|
|
|
hir::ExprKind::Block(ref block, _) => {
|
2016-10-17 19:00:20 -07:00
|
|
|
if let Some(ref subexpr) = block.expr {
|
2019-12-22 17:42:04 -05:00
|
|
|
record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id);
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
}
|
2016-10-17 19:00:20 -07:00
|
|
|
_ => {}
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
/// Applied to an expression `expr` if `expr` -- or something owned or partially owned by
|
|
|
|
/// `expr` -- is going to be indirectly referenced by a variable in a let statement. In that
|
|
|
|
/// case, the "temporary lifetime" or `expr` is extended to be the block enclosing the `let`
|
|
|
|
/// statement.
|
|
|
|
///
|
|
|
|
/// More formally, if `expr` matches the grammar `ET`, record the rvalue scope of the matching
|
|
|
|
/// `<rvalue>` as `blk_id`:
|
|
|
|
///
|
2019-12-29 11:16:22 +01:00
|
|
|
/// ```text
|
2014-11-25 21:17:11 -05:00
|
|
|
/// ET = *ET
|
|
|
|
/// | ET[...]
|
|
|
|
/// | ET.f
|
|
|
|
/// | (ET)
|
|
|
|
/// | <rvalue>
|
2019-12-29 11:16:22 +01:00
|
|
|
/// ```
|
2014-11-25 21:17:11 -05:00
|
|
|
///
|
2018-01-29 01:49:29 +02:00
|
|
|
/// Note: ET is intended to match "rvalues or places based on rvalues".
|
2019-06-12 00:11:55 +03:00
|
|
|
fn record_rvalue_scope<'tcx>(
|
|
|
|
visitor: &mut RegionResolutionVisitor<'tcx>,
|
2019-11-29 13:43:03 +01:00
|
|
|
expr: &hir::Expr<'_>,
|
2019-06-12 00:11:55 +03:00
|
|
|
blk_scope: Option<Scope>,
|
|
|
|
) {
|
2014-01-15 14:39:08 -05:00
|
|
|
let mut expr = expr;
|
|
|
|
loop {
|
|
|
|
// Note: give all the expressions matching `ET` with the
|
|
|
|
// extended temporary lifetime, not just the innermost rvalue,
|
2018-11-27 02:59:49 +00:00
|
|
|
// because in codegen if we must compile e.g., `*rvalue()`
|
2014-01-15 14:39:08 -05:00
|
|
|
// into a temporary, we request the temporary scope of the
|
|
|
|
// outer expression.
|
2017-08-31 21:37:38 +03:00
|
|
|
visitor.scope_tree.record_rvalue_scope(expr.hir_id.local_id, blk_scope);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2019-09-26 14:39:48 +01:00
|
|
|
match expr.kind {
|
2019-12-22 17:42:04 -05:00
|
|
|
hir::ExprKind::AddrOf(_, _, ref subexpr)
|
2020-01-05 01:50:05 +01:00
|
|
|
| hir::ExprKind::Unary(hir::UnOp::UnDeref, ref subexpr)
|
2019-12-22 17:42:04 -05:00
|
|
|
| hir::ExprKind::Field(ref subexpr, _)
|
|
|
|
| hir::ExprKind::Index(ref subexpr, _) => {
|
2016-02-09 22:00:20 +01:00
|
|
|
expr = &subexpr;
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-03-14 11:41:20 -07:00
|
|
|
}
|
|
|
|
|
2019-06-11 23:35:39 +03:00
|
|
|
impl<'tcx> RegionResolutionVisitor<'tcx> {
|
2015-08-20 01:46:28 +03:00
|
|
|
/// Records the current parent (if any) as the parent of `child_scope`.
|
2018-06-06 12:44:05 +10:00
|
|
|
/// Returns the depth of `child_scope`.
|
|
|
|
fn record_child_scope(&mut self, child_scope: Scope) -> ScopeDepth {
|
2017-04-20 04:45:53 -04:00
|
|
|
let parent = self.cx.parent;
|
2017-08-31 21:37:38 +03:00
|
|
|
self.scope_tree.record_scope_parent(child_scope, parent);
|
2018-06-06 12:44:05 +10:00
|
|
|
// If `child_scope` has no parent, it must be the root node, and so has
|
|
|
|
// a depth of 1. Otherwise, its depth is one more than its parent's.
|
|
|
|
parent.map_or(1, |(_p, d)| d + 1)
|
2015-08-20 01:46:28 +03:00
|
|
|
}
|
2013-08-13 14:36:10 +02:00
|
|
|
|
2017-05-11 16:10:47 +03:00
|
|
|
/// Records the current parent (if any) as the parent of `child_scope`,
|
|
|
|
/// and sets `child_scope` as the new current parent.
|
2017-08-31 21:37:38 +03:00
|
|
|
fn enter_scope(&mut self, child_scope: Scope) {
|
2018-06-06 12:44:05 +10:00
|
|
|
let child_depth = self.record_child_scope(child_scope);
|
|
|
|
self.cx.parent = Some((child_scope, child_depth));
|
2015-08-20 01:46:28 +03:00
|
|
|
}
|
|
|
|
|
2017-08-31 21:37:38 +03:00
|
|
|
fn enter_node_scope_with_dtor(&mut self, id: hir::ItemLocalId) {
|
2015-08-20 01:46:28 +03:00
|
|
|
// If node was previously marked as a terminating scope during the
|
|
|
|
// recursive visit of its parent node in the AST, then we need to
|
2017-08-31 21:37:38 +03:00
|
|
|
// account for the destruction scope representing the scope of
|
2015-08-20 01:46:28 +03:00
|
|
|
// the destructors that run immediately after it completes.
|
|
|
|
if self.terminating_scopes.contains(&id) {
|
2018-09-15 13:10:29 -04:00
|
|
|
self.enter_scope(Scope { id, data: ScopeData::Destruction });
|
2015-08-20 01:46:28 +03:00
|
|
|
}
|
2018-09-15 13:10:29 -04:00
|
|
|
self.enter_scope(Scope { id, data: ScopeData::Node });
|
2015-08-20 01:46:28 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-11 23:35:39 +03:00
|
|
|
impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> {
|
2020-03-11 12:05:32 +01:00
|
|
|
type Map = intravisit::ErasedMap<'tcx>;
|
2020-01-07 17:25:33 +01:00
|
|
|
|
2020-02-09 15:32:00 +01:00
|
|
|
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
|
2017-04-24 22:03:47 +03:00
|
|
|
NestedVisitorMap::None
|
2016-10-28 22:58:32 +02:00
|
|
|
}
|
|
|
|
|
2019-11-29 13:43:03 +01:00
|
|
|
fn visit_block(&mut self, b: &'tcx Block<'tcx>) {
|
2014-09-12 13:10:30 +03:00
|
|
|
resolve_block(self, b);
|
2013-08-13 14:36:10 +02:00
|
|
|
}
|
|
|
|
|
2019-11-29 11:09:23 +01:00
|
|
|
fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) {
|
2017-04-24 22:03:47 +03:00
|
|
|
let body_id = body.id();
|
2018-12-04 13:45:36 +01:00
|
|
|
let owner_id = self.tcx.hir().body_owner(body_id);
|
2013-08-13 14:36:10 +02:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
debug!(
|
|
|
|
"visit_body(id={:?}, span={:?}, body.id={:?}, cx.parent={:?})",
|
|
|
|
owner_id,
|
|
|
|
self.tcx.sess.source_map().span_to_string(body.value.span),
|
|
|
|
body_id,
|
|
|
|
self.cx.parent
|
|
|
|
);
|
2015-08-20 01:46:28 +03:00
|
|
|
|
2020-04-08 14:49:57 +00:00
|
|
|
// Save all state that is specific to the outer function
|
|
|
|
// body. These will be restored once down below, once we've
|
|
|
|
// visited the body.
|
2017-09-20 16:36:20 +03:00
|
|
|
let outer_ec = mem::replace(&mut self.expr_and_pat_count, 0);
|
2017-04-24 22:03:47 +03:00
|
|
|
let outer_cx = self.cx;
|
2019-06-30 11:30:01 -07:00
|
|
|
let outer_ts = mem::take(&mut self.terminating_scopes);
|
2020-04-08 14:49:57 +00:00
|
|
|
// The 'pessimistic yield' flag is set to true when we are
|
|
|
|
// processing a `+=` statement and have to make pessimistic
|
|
|
|
// control flow assumptions. This doesn't apply to nested
|
|
|
|
// bodies within the `+=` statements. See #69307.
|
2020-03-24 15:20:19 -04:00
|
|
|
let outer_pessimistic_yield = mem::replace(&mut self.pessimistic_yield, false);
|
2017-08-29 19:24:49 +03:00
|
|
|
self.terminating_scopes.insert(body.value.hir_id.local_id);
|
2017-04-24 22:03:47 +03:00
|
|
|
|
|
|
|
if let Some(root_id) = self.cx.root_id {
|
2017-08-31 21:37:38 +03:00
|
|
|
self.scope_tree.record_closure_parent(body.value.hir_id.local_id, root_id);
|
2017-04-24 22:03:47 +03:00
|
|
|
}
|
2017-08-29 19:24:49 +03:00
|
|
|
self.cx.root_id = Some(body.value.hir_id.local_id);
|
2017-04-24 22:03:47 +03:00
|
|
|
|
2018-09-15 13:10:29 -04:00
|
|
|
self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::CallSite });
|
|
|
|
self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::Arguments });
|
2017-04-24 22:03:47 +03:00
|
|
|
|
|
|
|
// The arguments and `self` are parented to the fn.
|
|
|
|
self.cx.var_parent = self.cx.parent.take();
|
2019-11-29 11:09:23 +01:00
|
|
|
for param in body.params {
|
2019-08-27 13:24:32 +02:00
|
|
|
self.visit_pat(¶m.pat);
|
2017-04-24 22:03:47 +03:00
|
|
|
}
|
2017-07-05 14:57:26 -07:00
|
|
|
|
2017-04-24 22:03:47 +03:00
|
|
|
// The body of the every fn is a root scope.
|
|
|
|
self.cx.parent = self.cx.var_parent;
|
2019-06-14 18:58:55 +02:00
|
|
|
if self.tcx.hir().body_owner_kind(owner_id).is_fn_or_closure() {
|
2018-12-07 18:25:55 +01:00
|
|
|
self.visit_expr(&body.value)
|
2017-07-21 14:01:22 +03:00
|
|
|
} else {
|
|
|
|
// Only functions have an outer terminating (drop) scope, while
|
|
|
|
// temporaries in constant initializers may be 'static, but only
|
|
|
|
// according to rvalue lifetime semantics, using the same
|
|
|
|
// syntactical rules used for let initializers.
|
|
|
|
//
|
2018-11-27 02:59:49 +00:00
|
|
|
// e.g., in `let x = &f();`, the temporary holding the result from
|
2017-07-21 14:01:22 +03:00
|
|
|
// the `f()` call lives for the entirety of the surrounding block.
|
|
|
|
//
|
|
|
|
// Similarly, `const X: ... = &f();` would have the result of `f()`
|
|
|
|
// live for `'static`, implying (if Drop restrictions on constants
|
|
|
|
// ever get lifted) that the value *could* have a destructor, but
|
|
|
|
// it'd get leaked instead of the destructor running during the
|
|
|
|
// evaluation of `X` (if at all allowed by CTFE).
|
|
|
|
//
|
|
|
|
// However, `const Y: ... = g(&f());`, like `let y = g(&f());`,
|
|
|
|
// would *not* let the `f()` temporary escape into an outer scope
|
2018-11-27 02:59:49 +00:00
|
|
|
// (i.e., `'static`), which means that after `g` returns, it drops,
|
2017-07-21 14:01:22 +03:00
|
|
|
// and all the associated destruction scope rules apply.
|
|
|
|
self.cx.var_parent = None;
|
|
|
|
resolve_local(self, None, Some(&body.value));
|
|
|
|
}
|
2017-04-24 22:03:47 +03:00
|
|
|
|
2019-06-18 14:34:51 -07:00
|
|
|
if body.generator_kind.is_some() {
|
2017-09-20 16:36:20 +03:00
|
|
|
self.scope_tree.body_expr_count.insert(body_id, self.expr_and_pat_count);
|
2017-09-07 14:35:02 +02:00
|
|
|
}
|
|
|
|
|
2017-04-24 22:03:47 +03:00
|
|
|
// Restore context we had at the start.
|
2017-09-20 16:36:20 +03:00
|
|
|
self.expr_and_pat_count = outer_ec;
|
2017-04-24 22:03:47 +03:00
|
|
|
self.cx = outer_cx;
|
|
|
|
self.terminating_scopes = outer_ts;
|
2020-03-24 15:20:19 -04:00
|
|
|
self.pessimistic_yield = outer_pessimistic_yield;
|
2013-08-13 14:36:10 +02:00
|
|
|
}
|
2017-04-24 22:03:47 +03:00
|
|
|
|
2019-11-29 13:43:03 +01:00
|
|
|
fn visit_arm(&mut self, a: &'tcx Arm<'tcx>) {
|
2014-09-12 13:10:30 +03:00
|
|
|
resolve_arm(self, a);
|
2013-08-13 14:36:10 +02:00
|
|
|
}
|
2019-11-29 13:43:03 +01:00
|
|
|
fn visit_pat(&mut self, p: &'tcx Pat<'tcx>) {
|
2014-09-12 13:10:30 +03:00
|
|
|
resolve_pat(self, p);
|
2013-08-13 14:36:10 +02:00
|
|
|
}
|
2019-11-29 13:43:03 +01:00
|
|
|
fn visit_stmt(&mut self, s: &'tcx Stmt<'tcx>) {
|
2014-09-12 13:10:30 +03:00
|
|
|
resolve_stmt(self, s);
|
2013-08-13 14:36:10 +02:00
|
|
|
}
|
2019-11-29 13:43:03 +01:00
|
|
|
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
|
2014-09-12 13:10:30 +03:00
|
|
|
resolve_expr(self, ex);
|
2013-08-13 14:36:10 +02:00
|
|
|
}
|
2019-11-29 13:43:03 +01:00
|
|
|
fn visit_local(&mut self, l: &'tcx Local<'tcx>) {
|
Fix clippy warnings
Fixes clippy::{cone_on_copy, filter_next, redundant_closure, single_char_pattern, len_zero,redundant_field_names, useless_format, identity_conversion, map_clone, into_iter_on_ref, needless_return, option_as_ref_deref, unused_unit, unnecessary_mut_passed}
2020-05-11 13:01:37 +02:00
|
|
|
resolve_local(self, Some(&l.pat), l.init.as_deref());
|
2013-08-13 14:36:10 +02:00
|
|
|
}
|
2012-04-26 16:02:01 -07:00
|
|
|
}
|
|
|
|
|
2019-06-21 23:49:03 +02:00
|
|
|
fn region_scope_tree(tcx: TyCtxt<'_>, def_id: DefId) -> &ScopeTree {
|
2017-04-24 22:03:47 +03:00
|
|
|
let closure_base_def_id = tcx.closure_base_def_id(def_id);
|
|
|
|
if closure_base_def_id != def_id {
|
2017-08-31 21:37:38 +03:00
|
|
|
return tcx.region_scope_tree(closure_base_def_id);
|
2017-04-10 00:00:08 -07:00
|
|
|
}
|
2017-03-27 23:54:41 -07:00
|
|
|
|
2020-08-12 12:22:56 +02:00
|
|
|
let id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
|
2019-06-14 18:58:55 +02:00
|
|
|
let scope_tree = if let Some(body_id) = tcx.hir().maybe_body_owned_by(id) {
|
2017-05-11 17:18:23 +03:00
|
|
|
let mut visitor = RegionResolutionVisitor {
|
|
|
|
tcx,
|
2017-08-31 21:37:38 +03:00
|
|
|
scope_tree: ScopeTree::default(),
|
2017-09-20 16:36:20 +03:00
|
|
|
expr_and_pat_count: 0,
|
2019-12-22 17:42:04 -05:00
|
|
|
cx: Context { root_id: None, parent: None, var_parent: None },
|
2018-10-16 16:57:53 +02:00
|
|
|
terminating_scopes: Default::default(),
|
Change how we compute yield_in_scope
Compound operators (e.g. 'a += b') have two different possible
evaluation orders. When the left-hand side is a primitive type, the
expression is evaluated right-to-left. However, when the left-hand side
is a non-primitive type, the expression is evaluated left-to-right.
This causes problems when we try to determine if a type is live across a
yield point. Since we need to perform this computation before typecheck
has run, we can't simply check the types of the operands.
This commit calculates the most 'pessimistic' scenario - that is,
erring on the side of treating more types as live, rather than fewer.
This is perfectly safe - in fact, this initial liveness computation is
already overly conservative (e.g. issue #57478). The important thing is
that we compute a superset of the types that are actually live across
yield points. When we generate MIR, we'll determine which types actually
need to stay live across a given yield point, and which ones cam
actually be dropped.
Concretely, we force the computed HIR traversal index for
right-hand-side yield expression to be equal to the maximum index for
the left-hand side. This covers both possible execution orders:
* If the expression is evalauted right-to-left, our 'pessismitic' index
is unecessary, but safe. We visit the expressions in an
ExprKind::AssignOp from right to left, so it actually would have been
safe to do nothing. However, while increasing the index of a yield point
might cause the compiler to reject code that could actually compile, it
will never cause incorrect code to be accepted.
* If the expression is evaluated left-to-right, our 'pessimistic' index
correctly ensures that types in the left-hand-side are seen as occuring
before the yield - which is exactly what we want
2019-06-06 22:23:28 -04:00
|
|
|
pessimistic_yield: false,
|
2019-06-23 15:50:59 -04:00
|
|
|
fixup_scopes: vec![],
|
2017-05-11 17:18:23 +03:00
|
|
|
};
|
|
|
|
|
2018-12-04 13:45:36 +01:00
|
|
|
let body = tcx.hir().body(body_id);
|
2017-08-31 21:37:38 +03:00
|
|
|
visitor.scope_tree.root_body = Some(body.value.hir_id);
|
2017-05-10 16:59:41 +03:00
|
|
|
|
|
|
|
// If the item is an associated const or a method,
|
|
|
|
// record its impl/trait parent, as it can also have
|
|
|
|
// lifetime parameters free in this body.
|
2019-06-20 10:39:19 +02:00
|
|
|
match tcx.hir().get(id) {
|
2019-12-22 17:42:04 -05:00
|
|
|
Node::ImplItem(_) | Node::TraitItem(_) => {
|
2019-03-04 09:00:30 +01:00
|
|
|
visitor.scope_tree.root_parent = Some(tcx.hir().get_parent_item(id));
|
2017-05-10 16:59:41 +03:00
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
2017-08-29 19:24:49 +03:00
|
|
|
visitor.visit_body(body);
|
2017-05-11 17:18:23 +03:00
|
|
|
|
2017-08-31 21:37:38 +03:00
|
|
|
visitor.scope_tree
|
2017-05-11 17:18:23 +03:00
|
|
|
} else {
|
2017-08-31 21:37:38 +03:00
|
|
|
ScopeTree::default()
|
2017-05-11 17:18:23 +03:00
|
|
|
};
|
2017-04-10 00:00:08 -07:00
|
|
|
|
2018-11-30 17:38:31 +01:00
|
|
|
tcx.arena.alloc(scope_tree)
|
2017-03-27 23:54:41 -07:00
|
|
|
}
|
|
|
|
|
2020-07-05 23:00:14 +03:00
|
|
|
pub fn provide(providers: &mut Providers) {
|
2019-12-22 17:42:04 -05:00
|
|
|
*providers = Providers { region_scope_tree, ..*providers };
|
2014-01-15 14:39:08 -05:00
|
|
|
}
|