2014-02-10 08:36:31 -06:00
|
|
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 18:48:01 -06:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2014-11-25 20:17:11 -06:00
|
|
|
//! # Translation of Expressions
|
|
|
|
//!
|
2015-02-09 19:23:16 -06:00
|
|
|
//! The expr module handles translation of expressions. The most general
|
|
|
|
//! translation routine is `trans()`, which will translate an expression
|
|
|
|
//! into a datum. `trans_into()` is also available, which will translate
|
|
|
|
//! an expression and write the result directly into memory, sometimes
|
|
|
|
//! avoiding the need for a temporary stack slot. Finally,
|
|
|
|
//! `trans_to_lvalue()` is available if you'd like to ensure that the
|
|
|
|
//! result has cleanup scheduled.
|
|
|
|
//!
|
|
|
|
//! Internally, each of these functions dispatches to various other
|
|
|
|
//! expression functions depending on the kind of expression. We divide
|
|
|
|
//! up expressions into:
|
|
|
|
//!
|
|
|
|
//! - **Datum expressions:** Those that most naturally yield values.
|
|
|
|
//! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
|
|
|
|
//! - **DPS expressions:** Those that most naturally write into a location
|
|
|
|
//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
|
|
|
|
//! - **Statement expressions:** That that do not generate a meaningful
|
|
|
|
//! result. Examples would be `while { ... }` or `return 44`.
|
|
|
|
//!
|
2014-11-25 20:17:11 -06:00
|
|
|
//! Public entry points:
|
|
|
|
//!
|
|
|
|
//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
|
|
|
|
//! storing the result into `dest`. This is the preferred form, if you
|
|
|
|
//! can manage it.
|
|
|
|
//!
|
|
|
|
//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
|
|
|
|
//! `Datum` with the result. You can then store the datum, inspect
|
|
|
|
//! the value, etc. This may introduce temporaries if the datum is a
|
|
|
|
//! structural type.
|
|
|
|
//!
|
|
|
|
//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
|
|
|
|
//! expression and ensures that the result has a cleanup associated with it,
|
|
|
|
//! creating a temporary stack slot if necessary.
|
|
|
|
//!
|
|
|
|
//! - `trans_local_var -> Datum`: looks up a local variable or upvar.
|
2013-05-17 17:28:44 -05:00
|
|
|
|
2014-03-21 20:05:05 -05:00
|
|
|
#![allow(non_camel_case_types)]
|
2014-02-10 08:36:31 -06:00
|
|
|
|
2014-11-06 02:05:53 -06:00
|
|
|
pub use self::Dest::*;
|
|
|
|
use self::lazy_binop_ty::*;
|
|
|
|
|
2013-02-25 13:11:21 -06:00
|
|
|
use back::abi;
|
2015-06-09 16:45:45 -05:00
|
|
|
use llvm::{self, ValueRef, TypeKind};
|
2016-01-21 03:52:37 -06:00
|
|
|
use middle::const_qualif::ConstQualif;
|
2016-01-20 13:31:10 -06:00
|
|
|
use middle::def::Def;
|
2015-11-02 07:46:39 -06:00
|
|
|
use middle::subst::Substs;
|
2016-03-06 09:32:47 -06:00
|
|
|
use trans::{_match, adt, asm, base, closure, consts, controlflow};
|
2014-11-15 19:30:33 -06:00
|
|
|
use trans::base::*;
|
|
|
|
use trans::build::*;
|
2016-03-06 09:32:47 -06:00
|
|
|
use trans::callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp};
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 02:25:14 -05:00
|
|
|
use trans::cleanup::{self, CleanupMethods, DropHintMethods};
|
2014-11-15 19:30:33 -06:00
|
|
|
use trans::common::*;
|
|
|
|
use trans::datum::*;
|
2014-12-11 06:53:30 -06:00
|
|
|
use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
|
2015-08-17 19:00:45 -05:00
|
|
|
use trans::declare;
|
2014-11-25 13:21:20 -06:00
|
|
|
use trans::glue;
|
|
|
|
use trans::machine;
|
|
|
|
use trans::tvec;
|
|
|
|
use trans::type_of;
|
2016-01-16 09:03:09 -06:00
|
|
|
use trans::Disr;
|
2015-09-14 06:55:56 -05:00
|
|
|
use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
|
2016-02-11 10:31:20 -06:00
|
|
|
use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
|
|
|
|
use middle::ty::adjustment::CustomCoerceUnsized;
|
2016-02-29 17:36:51 -06:00
|
|
|
use middle::ty::{self, Ty, TyCtxt};
|
2014-11-25 13:21:20 -06:00
|
|
|
use middle::ty::MethodCall;
|
2015-09-06 10:32:34 -05:00
|
|
|
use middle::ty::cast::{CastKind, CastTy};
|
2012-12-13 15:05:22 -06:00
|
|
|
use util::common::indenter;
|
2014-11-15 19:30:33 -06:00
|
|
|
use trans::machine::{llsize_of, llsize_of_alloc};
|
|
|
|
use trans::type_::Type;
|
2013-06-16 05:52:44 -05:00
|
|
|
|
2015-07-31 02:04:06 -05:00
|
|
|
use rustc_front;
|
|
|
|
use rustc_front::hir;
|
|
|
|
|
2015-11-28 13:02:07 -06:00
|
|
|
use syntax::{ast, codemap};
|
2015-01-05 23:56:30 -06:00
|
|
|
use syntax::parse::token::InternedString;
|
2015-01-29 06:03:34 -06:00
|
|
|
use std::mem;
|
2014-05-16 12:15:33 -05:00
|
|
|
|
2012-08-28 17:54:45 -05:00
|
|
|
// Destinations
|
|
|
|
|
|
|
|
// These are passed around by the code generating functions to track the
|
|
|
|
// destination of a computation's value.
|
|
|
|
|
2015-03-30 08:38:44 -05:00
|
|
|
#[derive(Copy, Clone, PartialEq)]
|
2013-01-29 19:57:02 -06:00
|
|
|
pub enum Dest {
|
2012-08-28 17:54:45 -05:00
|
|
|
SaveIn(ValueRef),
|
|
|
|
Ignore,
|
|
|
|
}
|
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Dest {
|
2014-06-21 05:39:03 -05:00
|
|
|
pub fn to_string(&self, ccx: &CrateContext) -> String {
|
2013-02-22 00:41:37 -06:00
|
|
|
match *self {
|
2014-09-05 11:18:53 -05:00
|
|
|
SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
|
2014-05-25 05:17:19 -05:00
|
|
|
Ignore => "Ignore".to_string()
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-25 20:17:11 -06:00
|
|
|
/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
|
|
|
|
/// better optimized LLVM code.
|
2014-09-06 11:13:04 -05:00
|
|
|
pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2014-09-06 11:13:04 -05:00
|
|
|
dest: Dest)
|
|
|
|
-> Block<'blk, 'tcx> {
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
|
|
|
|
2015-01-29 06:03:34 -06:00
|
|
|
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
|
|
|
|
|
Skip no-op adjustments in trans
That allows us to keep using trans_into() in case of adjustments that
may actually be ignored in trans because they are a plain deref/ref pair
with no overloaded deref or unsizing.
Unoptimized(!) benchmarks from servo/servo#7638
Before
```
test goser::bench_clone ... bench: 17,701 ns/iter (+/- 58) = 30 MB/s
test goser::bincode::bench_decoder ... bench: 33,715 ns/iter (+/- 300) = 11 MB/s
test goser::bincode::bench_deserialize ... bench: 36,804 ns/iter (+/- 329) = 9 MB/s
test goser::bincode::bench_encoder ... bench: 34,695 ns/iter (+/- 149) = 11 MB/s
test goser::bincode::bench_populate ... bench: 18,879 ns/iter (+/- 88)
test goser::bincode::bench_serialize ... bench: 31,668 ns/iter (+/- 156) = 11 MB/s
test goser::capnp::bench_deserialize ... bench: 2,049 ns/iter (+/- 87) = 218 MB/s
test goser::capnp::bench_deserialize_packed ... bench: 10,707 ns/iter (+/- 258) = 31 MB/s
test goser::capnp::bench_populate ... bench: 635 ns/iter (+/- 5)
test goser::capnp::bench_serialize ... bench: 35,657 ns/iter (+/- 155) = 12 MB/s
test goser::capnp::bench_serialize_packed ... bench: 37,881 ns/iter (+/- 146) = 8 MB/s
test goser::msgpack::bench_decoder ... bench: 50,634 ns/iter (+/- 307) = 5 MB/s
test goser::msgpack::bench_encoder ... bench: 25,738 ns/iter (+/- 90) = 11 MB/s
test goser::msgpack::bench_populate ... bench: 18,900 ns/iter (+/- 138)
test goser::protobuf::bench_decoder ... bench: 2,791 ns/iter (+/- 29) = 102 MB/s
test goser::protobuf::bench_encoder ... bench: 75,414 ns/iter (+/- 358) = 3 MB/s
test goser::protobuf::bench_populate ... bench: 19,248 ns/iter (+/- 92)
test goser::rustc_serialize_json::bench_decoder ... bench: 109,999 ns/iter (+/- 797) = 5 MB/s
test goser::rustc_serialize_json::bench_encoder ... bench: 58,777 ns/iter (+/- 418) = 10 MB/s
test goser::rustc_serialize_json::bench_populate ... bench: 18,887 ns/iter (+/- 76)
test goser::serde_json::bench_deserializer ... bench: 104,803 ns/iter (+/- 770) = 5 MB/s
test goser::serde_json::bench_populate ... bench: 18,890 ns/iter (+/- 69)
test goser::serde_json::bench_serializer ... bench: 75,046 ns/iter (+/- 435) = 8 MB/s
```
After
```
test goser::bench_clone ... bench: 16,052 ns/iter (+/- 188) = 34 MB/s
test goser::bincode::bench_decoder ... bench: 31,194 ns/iter (+/- 941) = 12 MB/s
test goser::bincode::bench_deserialize ... bench: 33,934 ns/iter (+/- 352) = 10 MB/s
test goser::bincode::bench_encoder ... bench: 30,737 ns/iter (+/- 1,969) = 13 MB/s
test goser::bincode::bench_populate ... bench: 17,234 ns/iter (+/- 176)
test goser::bincode::bench_serialize ... bench: 28,269 ns/iter (+/- 452) = 12 MB/s
test goser::capnp::bench_deserialize ... bench: 2,019 ns/iter (+/- 85) = 221 MB/s
test goser::capnp::bench_deserialize_packed ... bench: 10,662 ns/iter (+/- 527) = 31 MB/s
test goser::capnp::bench_populate ... bench: 607 ns/iter (+/- 2)
test goser::capnp::bench_serialize ... bench: 30,488 ns/iter (+/- 219) = 14 MB/s
test goser::capnp::bench_serialize_packed ... bench: 33,731 ns/iter (+/- 201) = 9 MB/s
test goser::msgpack::bench_decoder ... bench: 46,921 ns/iter (+/- 461) = 6 MB/s
test goser::msgpack::bench_encoder ... bench: 22,315 ns/iter (+/- 96) = 12 MB/s
test goser::msgpack::bench_populate ... bench: 17,268 ns/iter (+/- 73)
test goser::protobuf::bench_decoder ... bench: 2,658 ns/iter (+/- 44) = 107 MB/s
test goser::protobuf::bench_encoder ... bench: 71,024 ns/iter (+/- 359) = 4 MB/s
test goser::protobuf::bench_populate ... bench: 17,704 ns/iter (+/- 104)
test goser::rustc_serialize_json::bench_decoder ... bench: 107,867 ns/iter (+/- 759) = 5 MB/s
test goser::rustc_serialize_json::bench_encoder ... bench: 52,327 ns/iter (+/- 479) = 11 MB/s
test goser::rustc_serialize_json::bench_populate ... bench: 17,262 ns/iter (+/- 68)
test goser::serde_json::bench_deserializer ... bench: 99,156 ns/iter (+/- 657) = 6 MB/s
test goser::serde_json::bench_populate ... bench: 17,264 ns/iter (+/- 77)
test goser::serde_json::bench_serializer ... bench: 66,135 ns/iter (+/- 392) = 9 MB/s
```
2015-09-18 08:46:58 -05:00
|
|
|
if adjustment_required(bcx, expr) {
|
2014-01-15 13:39:08 -06:00
|
|
|
// use trans, which may be less efficient but
|
|
|
|
// which will perform the adjustments:
|
|
|
|
let datum = unpack_datum!(bcx, trans(bcx, expr));
|
2015-01-29 06:03:34 -06:00
|
|
|
return datum.store_to_dest(bcx, dest, expr.id);
|
|
|
|
}
|
|
|
|
|
2015-03-21 20:15:47 -05:00
|
|
|
let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
|
2016-01-21 03:52:37 -06:00
|
|
|
if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
|
|
|
|
if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
|
2015-01-29 06:03:34 -06:00
|
|
|
if let SaveIn(lldest) = dest {
|
2015-10-02 03:36:45 -05:00
|
|
|
match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
|
|
|
|
bcx.fcx.param_substs,
|
|
|
|
consts::TrueConst::No) {
|
|
|
|
Ok(global) => {
|
|
|
|
// Cast pointer to destination, because constants
|
|
|
|
// have different types.
|
|
|
|
let lldest = PointerCast(bcx, lldest, val_ty(global));
|
|
|
|
memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
|
|
|
|
return bcx;
|
|
|
|
},
|
|
|
|
Err(consts::ConstEvalFailure::Runtime(_)) => {
|
|
|
|
// in case const evaluation errors, translate normally
|
|
|
|
// debug assertions catch the same errors
|
|
|
|
// see RFC 1229
|
|
|
|
},
|
|
|
|
Err(consts::ConstEvalFailure::Compiletime(_)) => {
|
|
|
|
return bcx;
|
|
|
|
},
|
|
|
|
}
|
2015-01-29 06:03:34 -06:00
|
|
|
}
|
2016-01-31 13:31:06 -06:00
|
|
|
|
|
|
|
// If we see a const here, that's because it evaluates to a type with zero size. We
|
|
|
|
// should be able to just discard it, since const expressions are guaranteed not to
|
|
|
|
// have side effects. This seems to be reached through tuple struct constructors being
|
|
|
|
// passed zero-size constants.
|
|
|
|
if let hir::ExprPath(..) = expr.node {
|
|
|
|
match bcx.def(expr.id) {
|
|
|
|
Def::Const(_) | Def::AssociatedConst(_) => {
|
2016-02-02 13:41:19 -06:00
|
|
|
assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
|
2016-01-31 13:31:06 -06:00
|
|
|
return bcx;
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-07 16:32:37 -05:00
|
|
|
// Even if we don't have a value to emit, and the expression
|
|
|
|
// doesn't have any side-effects, we still have to translate the
|
|
|
|
// body of any closures.
|
|
|
|
// FIXME: Find a better way of handling this case.
|
2015-01-29 06:03:34 -06:00
|
|
|
} else {
|
|
|
|
// The only way we're going to see a `const` at this point is if
|
|
|
|
// it prefers in-place instantiation, likely because it contains
|
|
|
|
// `[x; N]` somewhere within.
|
|
|
|
match expr.node {
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprPath(..) => {
|
2015-01-29 06:03:34 -06:00
|
|
|
match bcx.def(expr.id) {
|
2016-01-31 13:31:06 -06:00
|
|
|
Def::Const(did) | Def::AssociatedConst(did) => {
|
2015-08-03 21:16:24 -05:00
|
|
|
let empty_substs = bcx.tcx().mk_substs(Substs::trans_empty());
|
|
|
|
let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
|
|
|
|
empty_substs);
|
2015-01-29 06:03:34 -06:00
|
|
|
// Temporarily get cleanup scopes out of the way,
|
|
|
|
// as they require sub-expressions to be contained
|
|
|
|
// inside the current AST scope.
|
|
|
|
// These should record no cleanups anyways, `const`
|
|
|
|
// can't have destructors.
|
|
|
|
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
|
|
|
|
vec![]);
|
2015-03-05 08:00:25 -06:00
|
|
|
// Lock emitted debug locations to the location of
|
|
|
|
// the constant reference expression.
|
|
|
|
debuginfo::with_source_location_override(bcx.fcx,
|
|
|
|
expr.debug_loc(),
|
|
|
|
|| {
|
|
|
|
bcx = trans_into(bcx, const_expr, dest)
|
|
|
|
});
|
2015-01-29 06:03:34 -06:00
|
|
|
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
|
|
|
|
scopes);
|
|
|
|
assert!(scopes.is_empty());
|
|
|
|
return bcx;
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
|
2015-06-18 12:25:05 -05:00
|
|
|
debug!("trans_into() expr={:?}", expr);
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2014-11-27 06:54:01 -06:00
|
|
|
let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
|
|
|
|
expr.id,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
expr.span,
|
|
|
|
false);
|
|
|
|
bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2015-06-21 14:29:13 -05:00
|
|
|
let kind = expr_kind(bcx.tcx(), expr);
|
2014-01-15 13:39:08 -06:00
|
|
|
bcx = match kind {
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::Lvalue | ExprKind::RvalueDatum => {
|
2014-01-15 13:39:08 -06:00
|
|
|
trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
|
|
|
|
}
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueDps => {
|
2014-01-15 13:39:08 -06:00
|
|
|
trans_rvalue_dps_unadjusted(bcx, expr, dest)
|
|
|
|
}
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueStmt => {
|
2014-01-15 13:39:08 -06:00
|
|
|
trans_rvalue_stmt_unadjusted(bcx, expr)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
|
|
|
|
}
|
|
|
|
|
2014-11-25 20:17:11 -06:00
|
|
|
/// Translates an expression, returning a datum (and new block) encapsulating the result. When
|
|
|
|
/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
|
|
|
|
/// stack.
|
2014-09-06 11:13:04 -05:00
|
|
|
pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2015-06-18 12:25:05 -05:00
|
|
|
debug!("trans(expr={:?})", expr);
|
2013-03-15 14:24:24 -05:00
|
|
|
|
2013-05-07 10:41:27 -05:00
|
|
|
let mut bcx = bcx;
|
2014-01-15 13:39:08 -06:00
|
|
|
let fcx = bcx.fcx;
|
2015-03-21 20:15:47 -05:00
|
|
|
let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
|
2016-01-21 03:52:37 -06:00
|
|
|
let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
|
|
|
|
let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
|
2015-10-02 03:36:45 -05:00
|
|
|
match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
|
|
|
|
bcx.fcx.param_substs,
|
|
|
|
consts::TrueConst::No) {
|
|
|
|
Ok(global) => {
|
2016-01-21 03:52:37 -06:00
|
|
|
if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
|
2015-10-02 03:36:45 -05:00
|
|
|
// Is borrowed as 'static, must return lvalue.
|
2015-01-29 06:03:34 -06:00
|
|
|
|
2015-10-02 03:36:45 -05:00
|
|
|
// Cast pointer to global, because constants have different types.
|
|
|
|
let const_ty = expr_ty_adjusted(bcx, expr);
|
|
|
|
let llty = type_of::type_of(bcx.ccx(), const_ty);
|
|
|
|
let global = PointerCast(bcx, global, llty.ptr_to());
|
|
|
|
let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
|
|
|
|
return DatumBlock::new(bcx, datum.to_expr_datum());
|
|
|
|
}
|
2015-01-29 06:03:34 -06:00
|
|
|
|
2015-10-02 03:36:45 -05:00
|
|
|
// Otherwise, keep around and perform adjustments, if needed.
|
|
|
|
let const_ty = if adjusted_global {
|
|
|
|
expr_ty_adjusted(bcx, expr)
|
|
|
|
} else {
|
|
|
|
expr_ty(bcx, expr)
|
|
|
|
};
|
2015-01-29 06:03:34 -06:00
|
|
|
|
2015-10-02 03:36:45 -05:00
|
|
|
// This could use a better heuristic.
|
|
|
|
Some(if type_is_immediate(bcx.ccx(), const_ty) {
|
|
|
|
// Cast pointer to global, because constants have different types.
|
|
|
|
let llty = type_of::type_of(bcx.ccx(), const_ty);
|
|
|
|
let global = PointerCast(bcx, global, llty.ptr_to());
|
|
|
|
// Maybe just get the value directly, instead of loading it?
|
|
|
|
immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
|
|
|
|
} else {
|
|
|
|
let scratch = alloc_ty(bcx, const_ty, "const");
|
|
|
|
call_lifetime_start(bcx, scratch);
|
|
|
|
let lldest = if !const_ty.is_structural() {
|
|
|
|
// Cast pointer to slot, because constants have different types.
|
|
|
|
PointerCast(bcx, scratch, val_ty(global))
|
|
|
|
} else {
|
|
|
|
// In this case, memcpy_ty calls llvm.memcpy after casting both
|
|
|
|
// source and destination to i8*, so we don't need any casts.
|
|
|
|
scratch
|
|
|
|
};
|
|
|
|
memcpy_ty(bcx, lldest, global, const_ty);
|
|
|
|
Datum::new(scratch, const_ty, Rvalue::new(ByRef))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
Err(consts::ConstEvalFailure::Runtime(_)) => {
|
|
|
|
// in case const evaluation errors, translate normally
|
|
|
|
// debug assertions catch the same errors
|
|
|
|
// see RFC 1229
|
|
|
|
None
|
|
|
|
},
|
|
|
|
Err(consts::ConstEvalFailure::Compiletime(_)) => {
|
|
|
|
// generate a dummy llvm value
|
|
|
|
let const_ty = expr_ty(bcx, expr);
|
|
|
|
let llty = type_of::type_of(bcx.ccx(), const_ty);
|
|
|
|
let dummy = C_undef(llty.ptr_to());
|
|
|
|
Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
|
|
|
|
},
|
|
|
|
}
|
2015-01-29 06:03:34 -06:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2014-11-27 06:54:01 -06:00
|
|
|
let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
|
|
|
|
expr.id,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
expr.span,
|
|
|
|
false);
|
|
|
|
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
|
2015-01-29 06:03:34 -06:00
|
|
|
let datum = match global {
|
|
|
|
Some(rvalue) => rvalue.to_expr_datum(),
|
|
|
|
None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
|
|
|
|
};
|
|
|
|
let datum = if adjusted_global {
|
|
|
|
datum // trans::consts already performed adjustments.
|
|
|
|
} else {
|
|
|
|
unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
|
|
|
|
};
|
2014-01-15 13:39:08 -06:00
|
|
|
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
|
2014-05-28 14:36:05 -05:00
|
|
|
return DatumBlock::new(bcx, datum);
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
|
2015-08-24 14:50:50 -05:00
|
|
|
pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
|
2015-08-24 15:51:57 -05:00
|
|
|
StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
|
2015-08-24 15:51:57 -05:00
|
|
|
StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
}
|
|
|
|
|
2015-03-15 11:16:04 -05:00
|
|
|
pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
|
|
|
|
Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
|
2015-08-24 14:50:50 -05:00
|
|
|
Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
|
2015-03-15 11:16:04 -05:00
|
|
|
}
|
|
|
|
|
Skip no-op adjustments in trans
That allows us to keep using trans_into() in case of adjustments that
may actually be ignored in trans because they are a plain deref/ref pair
with no overloaded deref or unsizing.
Unoptimized(!) benchmarks from servo/servo#7638
Before
```
test goser::bench_clone ... bench: 17,701 ns/iter (+/- 58) = 30 MB/s
test goser::bincode::bench_decoder ... bench: 33,715 ns/iter (+/- 300) = 11 MB/s
test goser::bincode::bench_deserialize ... bench: 36,804 ns/iter (+/- 329) = 9 MB/s
test goser::bincode::bench_encoder ... bench: 34,695 ns/iter (+/- 149) = 11 MB/s
test goser::bincode::bench_populate ... bench: 18,879 ns/iter (+/- 88)
test goser::bincode::bench_serialize ... bench: 31,668 ns/iter (+/- 156) = 11 MB/s
test goser::capnp::bench_deserialize ... bench: 2,049 ns/iter (+/- 87) = 218 MB/s
test goser::capnp::bench_deserialize_packed ... bench: 10,707 ns/iter (+/- 258) = 31 MB/s
test goser::capnp::bench_populate ... bench: 635 ns/iter (+/- 5)
test goser::capnp::bench_serialize ... bench: 35,657 ns/iter (+/- 155) = 12 MB/s
test goser::capnp::bench_serialize_packed ... bench: 37,881 ns/iter (+/- 146) = 8 MB/s
test goser::msgpack::bench_decoder ... bench: 50,634 ns/iter (+/- 307) = 5 MB/s
test goser::msgpack::bench_encoder ... bench: 25,738 ns/iter (+/- 90) = 11 MB/s
test goser::msgpack::bench_populate ... bench: 18,900 ns/iter (+/- 138)
test goser::protobuf::bench_decoder ... bench: 2,791 ns/iter (+/- 29) = 102 MB/s
test goser::protobuf::bench_encoder ... bench: 75,414 ns/iter (+/- 358) = 3 MB/s
test goser::protobuf::bench_populate ... bench: 19,248 ns/iter (+/- 92)
test goser::rustc_serialize_json::bench_decoder ... bench: 109,999 ns/iter (+/- 797) = 5 MB/s
test goser::rustc_serialize_json::bench_encoder ... bench: 58,777 ns/iter (+/- 418) = 10 MB/s
test goser::rustc_serialize_json::bench_populate ... bench: 18,887 ns/iter (+/- 76)
test goser::serde_json::bench_deserializer ... bench: 104,803 ns/iter (+/- 770) = 5 MB/s
test goser::serde_json::bench_populate ... bench: 18,890 ns/iter (+/- 69)
test goser::serde_json::bench_serializer ... bench: 75,046 ns/iter (+/- 435) = 8 MB/s
```
After
```
test goser::bench_clone ... bench: 16,052 ns/iter (+/- 188) = 34 MB/s
test goser::bincode::bench_decoder ... bench: 31,194 ns/iter (+/- 941) = 12 MB/s
test goser::bincode::bench_deserialize ... bench: 33,934 ns/iter (+/- 352) = 10 MB/s
test goser::bincode::bench_encoder ... bench: 30,737 ns/iter (+/- 1,969) = 13 MB/s
test goser::bincode::bench_populate ... bench: 17,234 ns/iter (+/- 176)
test goser::bincode::bench_serialize ... bench: 28,269 ns/iter (+/- 452) = 12 MB/s
test goser::capnp::bench_deserialize ... bench: 2,019 ns/iter (+/- 85) = 221 MB/s
test goser::capnp::bench_deserialize_packed ... bench: 10,662 ns/iter (+/- 527) = 31 MB/s
test goser::capnp::bench_populate ... bench: 607 ns/iter (+/- 2)
test goser::capnp::bench_serialize ... bench: 30,488 ns/iter (+/- 219) = 14 MB/s
test goser::capnp::bench_serialize_packed ... bench: 33,731 ns/iter (+/- 201) = 9 MB/s
test goser::msgpack::bench_decoder ... bench: 46,921 ns/iter (+/- 461) = 6 MB/s
test goser::msgpack::bench_encoder ... bench: 22,315 ns/iter (+/- 96) = 12 MB/s
test goser::msgpack::bench_populate ... bench: 17,268 ns/iter (+/- 73)
test goser::protobuf::bench_decoder ... bench: 2,658 ns/iter (+/- 44) = 107 MB/s
test goser::protobuf::bench_encoder ... bench: 71,024 ns/iter (+/- 359) = 4 MB/s
test goser::protobuf::bench_populate ... bench: 17,704 ns/iter (+/- 104)
test goser::rustc_serialize_json::bench_decoder ... bench: 107,867 ns/iter (+/- 759) = 5 MB/s
test goser::rustc_serialize_json::bench_encoder ... bench: 52,327 ns/iter (+/- 479) = 11 MB/s
test goser::rustc_serialize_json::bench_populate ... bench: 17,262 ns/iter (+/- 68)
test goser::serde_json::bench_deserializer ... bench: 99,156 ns/iter (+/- 657) = 6 MB/s
test goser::serde_json::bench_populate ... bench: 17,264 ns/iter (+/- 77)
test goser::serde_json::bench_serializer ... bench: 66,135 ns/iter (+/- 392) = 9 MB/s
```
2015-09-18 08:46:58 -05:00
|
|
|
fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
expr: &hir::Expr) -> bool {
|
|
|
|
let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
|
|
|
|
None => { return false; }
|
|
|
|
Some(adj) => adj
|
|
|
|
};
|
|
|
|
|
|
|
|
// Don't skip a conversion from Box<T> to &T, etc.
|
|
|
|
if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
match adjustment {
|
2016-03-06 09:32:47 -06:00
|
|
|
AdjustReifyFnPointer => true,
|
2016-02-11 10:31:20 -06:00
|
|
|
AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
|
Skip no-op adjustments in trans
That allows us to keep using trans_into() in case of adjustments that
may actually be ignored in trans because they are a plain deref/ref pair
with no overloaded deref or unsizing.
Unoptimized(!) benchmarks from servo/servo#7638
Before
```
test goser::bench_clone ... bench: 17,701 ns/iter (+/- 58) = 30 MB/s
test goser::bincode::bench_decoder ... bench: 33,715 ns/iter (+/- 300) = 11 MB/s
test goser::bincode::bench_deserialize ... bench: 36,804 ns/iter (+/- 329) = 9 MB/s
test goser::bincode::bench_encoder ... bench: 34,695 ns/iter (+/- 149) = 11 MB/s
test goser::bincode::bench_populate ... bench: 18,879 ns/iter (+/- 88)
test goser::bincode::bench_serialize ... bench: 31,668 ns/iter (+/- 156) = 11 MB/s
test goser::capnp::bench_deserialize ... bench: 2,049 ns/iter (+/- 87) = 218 MB/s
test goser::capnp::bench_deserialize_packed ... bench: 10,707 ns/iter (+/- 258) = 31 MB/s
test goser::capnp::bench_populate ... bench: 635 ns/iter (+/- 5)
test goser::capnp::bench_serialize ... bench: 35,657 ns/iter (+/- 155) = 12 MB/s
test goser::capnp::bench_serialize_packed ... bench: 37,881 ns/iter (+/- 146) = 8 MB/s
test goser::msgpack::bench_decoder ... bench: 50,634 ns/iter (+/- 307) = 5 MB/s
test goser::msgpack::bench_encoder ... bench: 25,738 ns/iter (+/- 90) = 11 MB/s
test goser::msgpack::bench_populate ... bench: 18,900 ns/iter (+/- 138)
test goser::protobuf::bench_decoder ... bench: 2,791 ns/iter (+/- 29) = 102 MB/s
test goser::protobuf::bench_encoder ... bench: 75,414 ns/iter (+/- 358) = 3 MB/s
test goser::protobuf::bench_populate ... bench: 19,248 ns/iter (+/- 92)
test goser::rustc_serialize_json::bench_decoder ... bench: 109,999 ns/iter (+/- 797) = 5 MB/s
test goser::rustc_serialize_json::bench_encoder ... bench: 58,777 ns/iter (+/- 418) = 10 MB/s
test goser::rustc_serialize_json::bench_populate ... bench: 18,887 ns/iter (+/- 76)
test goser::serde_json::bench_deserializer ... bench: 104,803 ns/iter (+/- 770) = 5 MB/s
test goser::serde_json::bench_populate ... bench: 18,890 ns/iter (+/- 69)
test goser::serde_json::bench_serializer ... bench: 75,046 ns/iter (+/- 435) = 8 MB/s
```
After
```
test goser::bench_clone ... bench: 16,052 ns/iter (+/- 188) = 34 MB/s
test goser::bincode::bench_decoder ... bench: 31,194 ns/iter (+/- 941) = 12 MB/s
test goser::bincode::bench_deserialize ... bench: 33,934 ns/iter (+/- 352) = 10 MB/s
test goser::bincode::bench_encoder ... bench: 30,737 ns/iter (+/- 1,969) = 13 MB/s
test goser::bincode::bench_populate ... bench: 17,234 ns/iter (+/- 176)
test goser::bincode::bench_serialize ... bench: 28,269 ns/iter (+/- 452) = 12 MB/s
test goser::capnp::bench_deserialize ... bench: 2,019 ns/iter (+/- 85) = 221 MB/s
test goser::capnp::bench_deserialize_packed ... bench: 10,662 ns/iter (+/- 527) = 31 MB/s
test goser::capnp::bench_populate ... bench: 607 ns/iter (+/- 2)
test goser::capnp::bench_serialize ... bench: 30,488 ns/iter (+/- 219) = 14 MB/s
test goser::capnp::bench_serialize_packed ... bench: 33,731 ns/iter (+/- 201) = 9 MB/s
test goser::msgpack::bench_decoder ... bench: 46,921 ns/iter (+/- 461) = 6 MB/s
test goser::msgpack::bench_encoder ... bench: 22,315 ns/iter (+/- 96) = 12 MB/s
test goser::msgpack::bench_populate ... bench: 17,268 ns/iter (+/- 73)
test goser::protobuf::bench_decoder ... bench: 2,658 ns/iter (+/- 44) = 107 MB/s
test goser::protobuf::bench_encoder ... bench: 71,024 ns/iter (+/- 359) = 4 MB/s
test goser::protobuf::bench_populate ... bench: 17,704 ns/iter (+/- 104)
test goser::rustc_serialize_json::bench_decoder ... bench: 107,867 ns/iter (+/- 759) = 5 MB/s
test goser::rustc_serialize_json::bench_encoder ... bench: 52,327 ns/iter (+/- 479) = 11 MB/s
test goser::rustc_serialize_json::bench_populate ... bench: 17,262 ns/iter (+/- 68)
test goser::serde_json::bench_deserializer ... bench: 99,156 ns/iter (+/- 657) = 6 MB/s
test goser::serde_json::bench_populate ... bench: 17,264 ns/iter (+/- 77)
test goser::serde_json::bench_serializer ... bench: 66,135 ns/iter (+/- 392) = 9 MB/s
```
2015-09-18 08:46:58 -05:00
|
|
|
// purely a type-level thing
|
|
|
|
false
|
|
|
|
}
|
|
|
|
AdjustDerefRef(ref adj) => {
|
|
|
|
// We are a bit paranoid about adjustments and thus might have a re-
|
|
|
|
// borrow here which merely derefs and then refs again (it might have
|
|
|
|
// a different region or mutability, but we don't care here).
|
|
|
|
!(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-25 20:17:11 -06:00
|
|
|
/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
|
|
|
|
/// translation of `expr`.
|
2014-09-06 11:13:04 -05:00
|
|
|
fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2014-09-29 14:11:30 -05:00
|
|
|
datum: Datum<'tcx, Expr>)
|
2015-02-17 04:17:19 -06:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr>
|
|
|
|
{
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
|
|
|
let mut datum = datum;
|
2015-06-24 15:40:54 -05:00
|
|
|
let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
|
2014-03-20 21:49:20 -05:00
|
|
|
None => {
|
2014-05-28 14:36:05 -05:00
|
|
|
return DatumBlock::new(bcx, datum);
|
2013-12-19 20:26:45 -06:00
|
|
|
}
|
2014-03-20 21:49:20 -05:00
|
|
|
Some(adj) => { adj }
|
2013-05-07 10:41:27 -05:00
|
|
|
};
|
2015-06-18 12:25:05 -05:00
|
|
|
debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
|
|
|
|
expr,
|
2014-11-07 14:18:08 -06:00
|
|
|
datum.to_string(bcx.ccx()),
|
2015-02-17 04:17:19 -06:00
|
|
|
adjustment);
|
2014-04-10 08:26:26 -05:00
|
|
|
match adjustment {
|
2015-03-16 11:45:01 -05:00
|
|
|
AdjustReifyFnPointer => {
|
2016-03-06 09:32:47 -06:00
|
|
|
match datum.ty.sty {
|
|
|
|
ty::TyFnDef(def_id, substs, _) => {
|
|
|
|
datum = Callee::def(bcx.ccx(), def_id, substs, datum.ty)
|
|
|
|
.reify(bcx.ccx()).to_expr_datum();
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
unreachable!("{} cannot be reified to a fn ptr", datum.ty)
|
|
|
|
}
|
|
|
|
}
|
2013-05-07 10:41:27 -05:00
|
|
|
}
|
2016-02-11 10:31:20 -06:00
|
|
|
AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
|
2015-03-17 14:22:11 -05:00
|
|
|
// purely a type-level thing
|
|
|
|
}
|
2014-09-11 00:07:49 -05:00
|
|
|
AdjustDerefRef(ref adj) => {
|
2015-03-16 11:45:01 -05:00
|
|
|
let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
|
2014-08-25 19:35:25 -05:00
|
|
|
// We are a bit paranoid about adjustments and thus might have a re-
|
|
|
|
// borrow here which merely derefs and then refs again (it might have
|
2015-03-16 11:45:01 -05:00
|
|
|
// a different region or mutability, but we don't care here).
|
|
|
|
match datum.ty.sty {
|
|
|
|
// Don't skip a conversion from Box<T> to &T, etc.
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyRef(..) => {
|
2015-07-07 10:45:21 -05:00
|
|
|
if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
|
2015-03-16 11:45:01 -05:00
|
|
|
// Don't skip an overloaded deref.
|
|
|
|
0
|
|
|
|
} else {
|
|
|
|
1
|
2014-08-25 19:35:25 -05:00
|
|
|
}
|
|
|
|
}
|
2015-03-16 11:45:01 -05:00
|
|
|
_ => 0
|
2014-08-25 19:35:25 -05:00
|
|
|
}
|
2015-03-16 11:45:01 -05:00
|
|
|
} else {
|
|
|
|
0
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
};
|
|
|
|
|
2015-03-16 11:45:01 -05:00
|
|
|
if adj.autoderefs > skip_reborrows {
|
2014-08-25 19:35:25 -05:00
|
|
|
// Schedule cleanup.
|
|
|
|
let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
|
2015-03-16 11:45:01 -05:00
|
|
|
datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
|
|
|
|
lval.to_expr_datum(),
|
|
|
|
adj.autoderefs - skip_reborrows));
|
2012-09-11 23:25:01 -05:00
|
|
|
}
|
|
|
|
|
2014-08-25 19:35:25 -05:00
|
|
|
// (You might think there is a more elegant way to do this than a
|
2015-03-16 11:45:01 -05:00
|
|
|
// skip_reborrows bool, but then you remember that the borrow checker exists).
|
|
|
|
if skip_reborrows == 0 && adj.autoref.is_some() {
|
2015-08-26 12:41:27 -05:00
|
|
|
datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
|
2015-03-16 11:45:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(target) = adj.unsize {
|
2015-04-14 18:57:29 -05:00
|
|
|
// We do not arrange cleanup ourselves; if we already are an
|
|
|
|
// L-value, then cleanup will have already been scheduled (and
|
|
|
|
// the `datum.to_rvalue_datum` call below will emit code to zero
|
|
|
|
// the drop flag when moving out of the L-value). If we are an
|
|
|
|
// R-value, then we do not need to schedule cleanup.
|
|
|
|
let source_datum = unpack_datum!(bcx,
|
|
|
|
datum.to_rvalue_datum(bcx, "__coerce_source"));
|
|
|
|
|
|
|
|
let target = bcx.monomorphize(&target);
|
|
|
|
|
2015-08-25 11:29:24 -05:00
|
|
|
let scratch = alloc_ty(bcx, target, "__coerce_target");
|
2015-08-25 11:24:16 -05:00
|
|
|
call_lifetime_start(bcx, scratch);
|
2015-04-14 18:57:29 -05:00
|
|
|
let target_datum = Datum::new(scratch, target,
|
|
|
|
Rvalue::new(ByRef));
|
|
|
|
bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
|
|
|
|
datum = Datum::new(scratch, target,
|
|
|
|
RvalueExpr(Rvalue::new(ByRef)));
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
}
|
2013-12-26 12:54:41 -06:00
|
|
|
}
|
2013-05-07 10:41:27 -05:00
|
|
|
}
|
2014-06-21 05:39:03 -05:00
|
|
|
debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
|
2015-04-14 18:57:29 -05:00
|
|
|
DatumBlock::new(bcx, datum)
|
|
|
|
}
|
2012-09-11 23:25:01 -05:00
|
|
|
|
2015-04-14 18:57:29 -05:00
|
|
|
fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
span: codemap::Span,
|
|
|
|
source: Datum<'tcx, Rvalue>,
|
|
|
|
target: Datum<'tcx, Rvalue>)
|
|
|
|
-> Block<'blk, 'tcx> {
|
|
|
|
let mut bcx = bcx;
|
|
|
|
debug!("coerce_unsized({} -> {})",
|
|
|
|
source.to_string(bcx.ccx()),
|
|
|
|
target.to_string(bcx.ccx()));
|
|
|
|
|
|
|
|
match (&source.ty.sty, &target.ty.sty) {
|
2015-06-11 18:21:46 -05:00
|
|
|
(&ty::TyBox(a), &ty::TyBox(b)) |
|
2015-07-10 21:16:35 -05:00
|
|
|
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
|
|
|
|
&ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
|
|
|
|
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
|
|
|
|
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
|
|
|
|
(&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
|
|
|
|
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
2015-04-14 18:57:29 -05:00
|
|
|
let (inner_source, inner_target) = (a, b);
|
|
|
|
|
|
|
|
let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
|
|
|
|
// Normally, the source is a thin pointer and we are
|
|
|
|
// adding extra info to make a fat pointer. The exception
|
|
|
|
// is when we are upcasting an existing object fat pointer
|
|
|
|
// to use a different vtable. In that case, we want to
|
|
|
|
// load out the original data pointer so we can repackage
|
|
|
|
// it.
|
|
|
|
(Load(bcx, get_dataptr(bcx, source.val)),
|
2015-08-24 14:50:50 -05:00
|
|
|
Some(Load(bcx, get_meta(bcx, source.val))))
|
2015-04-14 18:57:29 -05:00
|
|
|
} else {
|
|
|
|
let val = if source.kind.is_by_ref() {
|
|
|
|
load_ty(bcx, source.val, source.ty)
|
|
|
|
} else {
|
|
|
|
source.val
|
|
|
|
};
|
|
|
|
(val, None)
|
|
|
|
};
|
|
|
|
|
2016-03-06 09:32:47 -06:00
|
|
|
let info = unsized_info(bcx.ccx(), inner_source, inner_target, old_info);
|
2015-04-14 18:57:29 -05:00
|
|
|
|
|
|
|
// Compute the base pointer. This doesn't change the pointer value,
|
|
|
|
// but merely its type.
|
|
|
|
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
|
|
|
|
let base = PointerCast(bcx, base, ptr_ty);
|
|
|
|
|
|
|
|
Store(bcx, base, get_dataptr(bcx, target.val));
|
2015-08-24 14:50:50 -05:00
|
|
|
Store(bcx, info, get_meta(bcx, target.val));
|
2015-03-16 11:45:01 -05:00
|
|
|
}
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
|
2015-04-14 18:57:29 -05:00
|
|
|
// This can be extended to enums and tuples in the future.
|
2015-06-11 18:21:46 -05:00
|
|
|
// (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
|
|
|
|
(&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
|
2015-04-14 18:57:29 -05:00
|
|
|
assert_eq!(def_id_a, def_id_b);
|
2015-03-16 11:45:01 -05:00
|
|
|
|
2015-04-14 18:57:29 -05:00
|
|
|
// The target is already by-ref because it's to be written to.
|
|
|
|
let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
|
|
|
|
assert!(target.kind.is_by_ref());
|
2015-03-16 11:45:01 -05:00
|
|
|
|
2015-11-02 07:46:39 -06:00
|
|
|
let kind = custom_coerce_unsize_info(bcx.ccx(), source.ty, target.ty);
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
|
2015-04-14 18:57:29 -05:00
|
|
|
let repr_source = adt::represent_type(bcx.ccx(), source.ty);
|
2015-04-19 17:52:26 -05:00
|
|
|
let src_fields = match &*repr_source {
|
|
|
|
&adt::Repr::Univariant(ref s, _) => &s.fields,
|
|
|
|
_ => bcx.sess().span_bug(span,
|
|
|
|
&format!("Non univariant struct? (repr_source: {:?})",
|
|
|
|
repr_source)),
|
|
|
|
};
|
2015-04-14 18:57:29 -05:00
|
|
|
let repr_target = adt::represent_type(bcx.ccx(), target.ty);
|
2015-04-19 17:52:26 -05:00
|
|
|
let target_fields = match &*repr_target {
|
|
|
|
&adt::Repr::Univariant(ref s, _) => &s.fields,
|
|
|
|
_ => bcx.sess().span_bug(span,
|
|
|
|
&format!("Non univariant struct? (repr_target: {:?})",
|
|
|
|
repr_target)),
|
|
|
|
};
|
2015-04-14 18:57:29 -05:00
|
|
|
|
|
|
|
let coerce_index = match kind {
|
2015-09-14 06:55:56 -05:00
|
|
|
CustomCoerceUnsized::Struct(i) => i
|
2015-04-14 18:57:29 -05:00
|
|
|
};
|
2015-04-19 17:52:26 -05:00
|
|
|
assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
|
2015-04-14 18:57:29 -05:00
|
|
|
|
2015-12-06 07:38:29 -06:00
|
|
|
let source_val = adt::MaybeSizedValue::sized(source.val);
|
|
|
|
let target_val = adt::MaybeSizedValue::sized(target.val);
|
|
|
|
|
2015-06-10 11:22:20 -05:00
|
|
|
let iter = src_fields.iter().zip(target_fields).enumerate();
|
2015-04-19 17:52:26 -05:00
|
|
|
for (i, (src_ty, target_ty)) in iter {
|
2016-01-16 09:03:09 -06:00
|
|
|
let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
|
|
|
|
let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
|
2015-04-14 18:57:29 -05:00
|
|
|
|
|
|
|
// If this is the field we need to coerce, recurse on it.
|
|
|
|
if i == coerce_index {
|
|
|
|
coerce_unsized(bcx, span,
|
2015-04-19 17:52:26 -05:00
|
|
|
Datum::new(ll_source, src_ty,
|
2015-04-14 18:57:29 -05:00
|
|
|
Rvalue::new(ByRef)),
|
2015-04-19 17:52:26 -05:00
|
|
|
Datum::new(ll_target, target_ty,
|
2015-04-14 18:57:29 -05:00
|
|
|
Rvalue::new(ByRef)));
|
|
|
|
} else {
|
|
|
|
// Otherwise, simply copy the data from the source.
|
2015-09-12 20:19:32 -05:00
|
|
|
assert!(src_ty.is_phantom_data() || src_ty == target_ty);
|
2015-04-19 17:52:26 -05:00
|
|
|
memcpy_ty(bcx, ll_target, ll_source, src_ty);
|
2015-04-14 18:57:29 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-06-18 12:25:05 -05:00
|
|
|
_ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
|
|
|
|
source.ty,
|
|
|
|
target.ty))
|
2012-09-11 23:25:01 -05:00
|
|
|
}
|
2015-04-14 18:57:29 -05:00
|
|
|
bcx
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-11-25 20:17:11 -06:00
|
|
|
/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
|
|
|
|
/// that the expr represents.
|
|
|
|
///
|
|
|
|
/// If this expression is an rvalue, this implies introducing a temporary. In other words,
|
|
|
|
/// something like `x().f` is translated into roughly the equivalent of
|
|
|
|
///
|
|
|
|
/// { tmp = x(); tmp.f }
|
2014-09-06 11:13:04 -05:00
|
|
|
pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2014-09-06 11:13:04 -05:00
|
|
|
name: &str)
|
|
|
|
-> DatumBlock<'blk, 'tcx, Lvalue> {
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
|
|
|
let datum = unpack_datum!(bcx, trans(bcx, expr));
|
|
|
|
return datum.to_lvalue_datum(bcx, name, expr.id);
|
2012-09-11 23:25:01 -05:00
|
|
|
}
|
|
|
|
|
2014-11-25 20:17:11 -06:00
|
|
|
/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
|
|
|
|
/// directly.
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2012-08-28 17:54:45 -05:00
|
|
|
let mut bcx = bcx;
|
|
|
|
|
2015-06-18 12:25:05 -05:00
|
|
|
debug!("trans_unadjusted(expr={:?})", expr);
|
2012-08-28 17:54:45 -05:00
|
|
|
let _indenter = indenter();
|
|
|
|
|
2013-08-19 11:23:43 -05:00
|
|
|
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2015-06-21 14:29:13 -05:00
|
|
|
return match expr_kind(bcx.tcx(), expr) {
|
|
|
|
ExprKind::Lvalue | ExprKind::RvalueDatum => {
|
2012-09-11 23:25:01 -05:00
|
|
|
let datum = unpack_datum!(bcx, {
|
2014-01-15 13:39:08 -06:00
|
|
|
trans_datum_unadjusted(bcx, expr)
|
2012-09-11 23:25:01 -05:00
|
|
|
});
|
2014-01-15 13:39:08 -06:00
|
|
|
|
|
|
|
DatumBlock {bcx: bcx, datum: datum}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueStmt => {
|
2012-09-11 23:25:01 -05:00
|
|
|
bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
|
2014-01-15 13:39:08 -06:00
|
|
|
nil(bcx, expr_ty(bcx, expr))
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueDps => {
|
2012-08-28 17:54:45 -05:00
|
|
|
let ty = expr_ty(bcx, expr);
|
2014-01-16 18:10:17 -06:00
|
|
|
if type_is_zero_size(bcx.ccx(), ty) {
|
2012-09-11 23:25:01 -05:00
|
|
|
bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
|
2014-01-15 13:39:08 -06:00
|
|
|
nil(bcx, ty)
|
2012-08-28 17:54:45 -05:00
|
|
|
} else {
|
2014-01-15 13:39:08 -06:00
|
|
|
let scratch = rvalue_scratch_datum(bcx, ty, "");
|
2012-09-11 23:25:01 -05:00
|
|
|
bcx = trans_rvalue_dps_unadjusted(
|
|
|
|
bcx, expr, SaveIn(scratch.val));
|
2012-09-06 17:21:42 -05:00
|
|
|
|
|
|
|
// Note: this is not obviously a good idea. It causes
|
|
|
|
// immediate values to be loaded immediately after a
|
|
|
|
// return from a call or other similar expression,
|
|
|
|
// which in turn leads to alloca's having shorter
|
|
|
|
// lifetimes and hence larger stack frames. However,
|
|
|
|
// in turn it can lead to more register pressure.
|
|
|
|
// Still, in practice it seems to increase
|
|
|
|
// performance, since we have fewer problems with
|
|
|
|
// morestack churn.
|
2014-01-15 13:39:08 -06:00
|
|
|
let scratch = unpack_datum!(
|
|
|
|
bcx, scratch.to_appropriate_datum(bcx));
|
2012-09-06 17:21:42 -05:00
|
|
|
|
2014-05-28 14:36:05 -05:00
|
|
|
DatumBlock::new(bcx, scratch.to_expr_datum())
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
};
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-09-29 14:11:30 -05:00
|
|
|
fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-01-16 14:11:22 -06:00
|
|
|
let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
|
|
|
|
let datum = immediate_rvalue(llval, ty);
|
2014-05-28 14:36:05 -05:00
|
|
|
DatumBlock::new(bcx, datum.to_expr_datum())
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
|
|
|
let fcx = bcx.fcx;
|
|
|
|
let _icx = push_ctxt("trans_datum_unadjusted");
|
2012-08-28 17:54:45 -05:00
|
|
|
|
|
|
|
match expr.node {
|
2015-02-01 01:59:46 -06:00
|
|
|
hir::ExprType(ref e, _) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans(bcx, &e)
|
2015-02-01 01:59:46 -06:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprPath(..) => {
|
2014-01-15 13:39:08 -06:00
|
|
|
trans_def(bcx, expr, bcx.def(expr.id))
|
|
|
|
}
|
2015-09-20 06:00:18 -05:00
|
|
|
hir::ExprField(ref base, name) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_rec_field(bcx, &base, name.node)
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprTupField(ref base, idx) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_rec_tup_field(bcx, &base, idx.node)
|
2014-08-09 22:54:33 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprIndex(ref base, ref idx) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
|
2014-09-15 03:48:58 -05:00
|
|
|
}
|
2015-09-24 10:00:08 -05:00
|
|
|
hir::ExprBox(ref contents) => {
|
2014-09-30 17:26:04 -05:00
|
|
|
// Special case for `Box<T>`
|
2013-12-17 18:46:18 -06:00
|
|
|
let box_ty = expr_ty(bcx, expr);
|
2016-02-09 14:24:11 -06:00
|
|
|
let contents_ty = expr_ty(bcx, &contents);
|
2014-10-31 03:51:16 -05:00
|
|
|
match box_ty.sty {
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyBox(..) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
|
2014-05-15 20:18:00 -05:00
|
|
|
}
|
|
|
|
_ => bcx.sess().span_bug(expr.span,
|
2014-09-30 17:26:04 -05:00
|
|
|
"expected unique box")
|
2014-05-15 20:18:00 -05:00
|
|
|
}
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
|
2013-12-17 18:46:18 -06:00
|
|
|
}
|
2016-02-09 14:24:11 -06:00
|
|
|
hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprBinary(op, ref lhs, ref rhs) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_binary(bcx, expr, op, &lhs, &rhs)
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprUnary(op, ref x) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_unary(bcx, expr, op, &x)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprAddrOf(_, ref x) => {
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
match x.node {
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprRepeat(..) | hir::ExprVec(..) => {
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
// Special case for slices.
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
let cleanup_debug_loc =
|
2014-11-27 06:54:01 -06:00
|
|
|
debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
|
|
|
|
x.id,
|
|
|
|
x.span,
|
|
|
|
false);
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
let datum = unpack_datum!(
|
2016-02-09 14:24:11 -06:00
|
|
|
bcx, tvec::trans_slice_vec(bcx, expr, &x));
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
|
|
|
|
DatumBlock::new(bcx, datum)
|
|
|
|
}
|
|
|
|
_ => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_addr_of(bcx, expr, &x)
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprCast(ref val, _) => {
|
2014-01-15 13:39:08 -06:00
|
|
|
// Datum output mode means this is a scalar cast:
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_imm_cast(bcx, &val, expr.id)
|
2012-10-27 19:14:09 -05:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
_ => {
|
|
|
|
bcx.tcx().sess.span_bug(
|
|
|
|
expr.span,
|
2015-01-07 10:58:31 -06:00
|
|
|
&format!("trans_rvalue_datum_unadjusted reached \
|
2014-12-20 02:09:35 -06:00
|
|
|
fall-through case: {:?}",
|
2015-02-20 13:08:14 -06:00
|
|
|
expr.node));
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-09 12:44:51 -06:00
|
|
|
fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
base: &hir::Expr,
|
2014-12-09 12:44:51 -06:00
|
|
|
get_idx: F)
|
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> where
|
2016-02-29 17:36:51 -06:00
|
|
|
F: FnOnce(&'blk TyCtxt<'tcx>, &VariantInfo<'tcx>) -> usize,
|
2014-12-09 12:44:51 -06:00
|
|
|
{
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
|
|
|
let _icx = push_ctxt("trans_rec_field");
|
|
|
|
|
|
|
|
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
|
2015-02-17 12:27:01 -06:00
|
|
|
let bare_ty = base_datum.ty;
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
let repr = adt::represent_type(bcx.ccx(), bare_ty);
|
2015-08-02 14:52:50 -05:00
|
|
|
let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
|
2015-08-02 14:52:50 -05:00
|
|
|
let ix = get_idx(bcx.tcx(), &vinfo);
|
|
|
|
let d = base_datum.get_element(
|
|
|
|
bcx,
|
|
|
|
vinfo.fields[ix].1,
|
2015-12-06 07:38:29 -06:00
|
|
|
|srcval| {
|
2016-02-09 14:24:11 -06:00
|
|
|
adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
|
2015-12-06 07:38:29 -06:00
|
|
|
});
|
2014-08-09 22:54:33 -05:00
|
|
|
|
2015-08-02 14:52:50 -05:00
|
|
|
if type_is_sized(bcx.tcx(), d.ty) {
|
|
|
|
DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
|
|
|
|
} else {
|
|
|
|
let scratch = rvalue_scratch_datum(bcx, d.ty, "");
|
|
|
|
Store(bcx, d.val, get_dataptr(bcx, scratch.val));
|
2015-08-24 14:50:50 -05:00
|
|
|
let info = Load(bcx, get_meta(bcx, base_datum.val));
|
|
|
|
Store(bcx, info, get_meta(bcx, scratch.val));
|
2015-08-02 14:52:50 -05:00
|
|
|
|
|
|
|
// Always generate an lvalue datum, because this pointer doesn't own
|
|
|
|
// the data and cleanup is scheduled elsewhere.
|
|
|
|
DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
|
|
|
|
}
|
2014-08-09 22:54:33 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Translates `base.field`.
|
|
|
|
fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
base: &hir::Expr,
|
2015-03-23 19:41:35 -05:00
|
|
|
field: ast::Name)
|
2014-08-09 22:54:33 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2015-08-02 14:52:50 -05:00
|
|
|
trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
|
2014-08-09 22:54:33 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Translates `base.<idx>`.
|
|
|
|
fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
base: &hir::Expr,
|
2015-03-25 19:06:52 -05:00
|
|
|
idx: usize)
|
2014-08-09 22:54:33 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
|
|
|
trans_field(bcx, base, |_, _| idx)
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
index_expr: &hir::Expr,
|
|
|
|
base: &hir::Expr,
|
|
|
|
idx: &hir::Expr,
|
2014-09-06 11:13:04 -05:00
|
|
|
method_call: MethodCall)
|
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-01-15 13:39:08 -06:00
|
|
|
//! Translates `base[idx]`.
|
|
|
|
|
|
|
|
let _icx = push_ctxt("trans_index");
|
|
|
|
let ccx = bcx.ccx();
|
|
|
|
let mut bcx = bcx;
|
|
|
|
|
2015-02-04 10:42:32 -06:00
|
|
|
let index_expr_debug_loc = index_expr.debug_loc();
|
|
|
|
|
2014-07-03 16:32:41 -05:00
|
|
|
// Check for overloaded index.
|
2016-03-06 09:32:47 -06:00
|
|
|
let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
|
|
|
|
let elt_datum = match method {
|
|
|
|
Some(method) => {
|
|
|
|
let method_ty = monomorphize_type(bcx, method.ty);
|
2015-01-06 04:03:42 -06:00
|
|
|
|
2014-07-03 16:32:41 -05:00
|
|
|
let base_datum = unpack_datum!(bcx, trans(bcx, base));
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2014-07-03 16:32:41 -05:00
|
|
|
// Translate index expression.
|
|
|
|
let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2015-01-06 04:03:42 -06:00
|
|
|
let ref_ty = // invoked methods have LB regions instantiated:
|
2015-06-25 15:42:17 -05:00
|
|
|
bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
|
2015-09-03 12:10:34 -05:00
|
|
|
let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
|
2014-07-03 16:32:41 -05:00
|
|
|
None => {
|
|
|
|
bcx.tcx().sess.span_bug(index_expr.span,
|
|
|
|
"index method didn't return a \
|
|
|
|
dereferenceable type?!")
|
|
|
|
}
|
|
|
|
Some(elt_tm) => elt_tm.ty,
|
|
|
|
};
|
2014-11-01 01:31:16 -05:00
|
|
|
|
2016-03-06 09:32:47 -06:00
|
|
|
// Overloaded. Invoke the index() method, which basically
|
|
|
|
// yields a `&T` pointer. We can then proceed down the
|
|
|
|
// normal path (below) to dereference that `&T`.
|
2014-11-01 01:31:16 -05:00
|
|
|
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
|
2016-03-06 09:32:47 -06:00
|
|
|
|
|
|
|
bcx = Callee::method(bcx, method)
|
|
|
|
.call(bcx, index_expr_debug_loc,
|
|
|
|
ArgOverloadedOp(base_datum, Some(ix_datum)),
|
|
|
|
Some(SaveIn(scratch.val))).bcx;
|
|
|
|
|
2014-11-01 01:31:16 -05:00
|
|
|
let datum = scratch.to_expr_datum();
|
2015-06-05 14:34:03 -05:00
|
|
|
let lval = Lvalue::new("expr::trans_index overload");
|
2014-12-18 08:26:10 -06:00
|
|
|
if type_is_sized(bcx.tcx(), elt_ty) {
|
2015-06-05 14:34:03 -05:00
|
|
|
Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
|
2014-11-01 01:31:16 -05:00
|
|
|
} else {
|
2015-06-05 14:34:03 -05:00
|
|
|
Datum::new(datum.val, elt_ty, LvalueExpr(lval))
|
2014-11-01 01:31:16 -05:00
|
|
|
}
|
2014-07-03 16:32:41 -05:00
|
|
|
}
|
|
|
|
None => {
|
|
|
|
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
|
|
|
|
base,
|
|
|
|
"index"));
|
|
|
|
|
|
|
|
// Translate index expression and cast to a suitable LLVM integer.
|
|
|
|
// Rust is less strict than LLVM in this regard.
|
|
|
|
let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
|
|
|
|
let ix_val = ix_datum.to_llscalarish(bcx);
|
|
|
|
let ix_size = machine::llbitsize_of_real(bcx.ccx(),
|
|
|
|
val_ty(ix_val));
|
|
|
|
let int_size = machine::llbitsize_of_real(bcx.ccx(),
|
2014-09-05 11:18:53 -05:00
|
|
|
ccx.int_type());
|
2014-07-03 16:32:41 -05:00
|
|
|
let ix_val = {
|
|
|
|
if ix_size < int_size {
|
2015-06-24 00:24:13 -05:00
|
|
|
if expr_ty(bcx, idx).is_signed() {
|
2014-09-05 11:18:53 -05:00
|
|
|
SExt(bcx, ix_val, ccx.int_type())
|
|
|
|
} else { ZExt(bcx, ix_val, ccx.int_type()) }
|
2014-07-03 16:32:41 -05:00
|
|
|
} else if ix_size > int_size {
|
2014-09-05 11:18:53 -05:00
|
|
|
Trunc(bcx, ix_val, ccx.int_type())
|
2014-07-03 16:32:41 -05:00
|
|
|
} else {
|
|
|
|
ix_val
|
|
|
|
}
|
|
|
|
};
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2015-06-24 00:24:13 -05:00
|
|
|
let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
|
2014-07-03 16:32:41 -05:00
|
|
|
|
|
|
|
let (base, len) = base_datum.get_vec_base_and_len(bcx);
|
|
|
|
|
2014-06-21 05:39:03 -05:00
|
|
|
debug!("trans_index: base {}", bcx.val_to_string(base));
|
|
|
|
debug!("trans_index: len {}", bcx.val_to_string(len));
|
2014-07-03 16:32:41 -05:00
|
|
|
|
2015-02-04 10:42:32 -06:00
|
|
|
let bounds_check = ICmp(bcx,
|
|
|
|
llvm::IntUGE,
|
|
|
|
ix_val,
|
|
|
|
len,
|
|
|
|
index_expr_debug_loc);
|
2014-07-03 16:32:41 -05:00
|
|
|
let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
|
|
|
|
let expected = Call(bcx,
|
|
|
|
expect,
|
2014-11-17 02:39:01 -06:00
|
|
|
&[bounds_check, C_bool(ccx, false)],
|
2014-12-11 06:53:30 -06:00
|
|
|
None,
|
2015-02-04 10:42:32 -06:00
|
|
|
index_expr_debug_loc);
|
2014-07-03 16:32:41 -05:00
|
|
|
bcx = with_cond(bcx, expected, |bcx| {
|
|
|
|
controlflow::trans_fail_bounds_check(bcx,
|
2015-02-04 10:16:59 -06:00
|
|
|
expr_info(index_expr),
|
2014-07-03 16:32:41 -05:00
|
|
|
ix_val,
|
|
|
|
len)
|
|
|
|
});
|
2014-11-17 02:39:01 -06:00
|
|
|
let elt = InBoundsGEP(bcx, base, &[ix_val]);
|
2015-03-13 19:36:41 -05:00
|
|
|
let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
|
2015-06-05 14:34:03 -05:00
|
|
|
let lval = Lvalue::new("expr::trans_index fallback");
|
|
|
|
Datum::new(elt, unit_ty, LvalueExpr(lval))
|
2014-07-03 16:32:41 -05:00
|
|
|
}
|
|
|
|
};
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2014-07-03 16:32:41 -05:00
|
|
|
DatumBlock::new(bcx, elt_datum)
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
ref_expr: &hir::Expr,
|
2016-01-20 13:31:10 -06:00
|
|
|
def: Def)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-01-15 13:39:08 -06:00
|
|
|
//! Translates a reference to a path.
|
|
|
|
|
|
|
|
let _icx = push_ctxt("trans_def_lvalue");
|
|
|
|
match def {
|
2016-01-20 13:31:10 -06:00
|
|
|
Def::Static(did, _) => {
|
2014-01-15 13:39:08 -06:00
|
|
|
let const_ty = expr_ty(bcx, ref_expr);
|
2015-11-10 16:19:37 -06:00
|
|
|
let val = get_static_val(bcx.ccx(), did, const_ty);
|
2015-06-05 14:34:03 -05:00
|
|
|
let lval = Lvalue::new("expr::trans_def");
|
|
|
|
DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval)))
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
2016-03-06 09:32:47 -06:00
|
|
|
Def::Local(..) | Def::Upvar(..) => {
|
|
|
|
DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 10:17:01 -05:00
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
_ => {
|
2016-03-06 09:32:47 -06:00
|
|
|
bcx.sess().span_bug(ref_expr.span,
|
|
|
|
&format!("{:?} should not reach expr::trans_def", def))
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> Block<'blk, 'tcx> {
|
2012-08-28 17:54:45 -05:00
|
|
|
let mut bcx = bcx;
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_rvalue_stmt");
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2013-12-18 16:54:42 -06:00
|
|
|
if bcx.unreachable.get() {
|
2013-05-21 15:15:48 -05:00
|
|
|
return bcx;
|
|
|
|
}
|
|
|
|
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
|
|
|
|
|
2012-08-28 17:54:45 -05:00
|
|
|
match expr.node {
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprBreak(label_opt) => {
|
2015-09-23 12:04:49 -05:00
|
|
|
controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name))
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-02-01 01:59:46 -06:00
|
|
|
hir::ExprType(ref e, _) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_into(bcx, &e, Ignore)
|
2015-02-01 01:59:46 -06:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprAgain(label_opt) => {
|
2015-09-23 12:04:49 -05:00
|
|
|
controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name))
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprRet(ref ex) => {
|
2014-12-15 17:35:59 -06:00
|
|
|
// Check to see if the return expression itself is reachable.
|
|
|
|
// This can occur when the inner expression contains a return
|
|
|
|
let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
|
|
|
|
cfg.node_is_reachable(expr.id)
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
};
|
|
|
|
|
|
|
|
if reachable {
|
2014-12-11 06:53:30 -06:00
|
|
|
controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
|
2014-12-15 17:35:59 -06:00
|
|
|
} else {
|
|
|
|
// If it's not reachable, just translate the inner expression
|
|
|
|
// directly. This avoids having to manage a return slot when
|
|
|
|
// it won't actually be used anyway.
|
|
|
|
if let &Some(ref x) = ex {
|
2016-02-09 14:24:11 -06:00
|
|
|
bcx = trans_into(bcx, &x, Ignore);
|
2014-12-15 18:09:35 -06:00
|
|
|
}
|
2014-12-17 22:43:50 -06:00
|
|
|
// Mark the end of the block as unreachable. Once we get to
|
|
|
|
// a return expression, there's no more we should be doing
|
|
|
|
// after this.
|
|
|
|
Unreachable(bcx);
|
2014-12-15 17:35:59 -06:00
|
|
|
bcx
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprWhile(ref cond, ref body, _) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
controlflow::trans_while(bcx, expr, &cond, &body)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprLoop(ref body, _) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
controlflow::trans_loop(bcx, expr, &body)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprAssign(ref dst, ref src) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
let src_datum = unpack_datum!(bcx, trans(bcx, &src));
|
|
|
|
let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2015-02-25 16:09:58 -06:00
|
|
|
if bcx.fcx.type_needs_drop(dst_datum.ty) {
|
2014-01-15 13:39:08 -06:00
|
|
|
// If there are destructors involved, make sure we
|
|
|
|
// are copying from an rvalue, since that cannot possible
|
|
|
|
// alias an lvalue. We are concerned about code like:
|
|
|
|
//
|
|
|
|
// a = a
|
|
|
|
//
|
|
|
|
// but also
|
|
|
|
//
|
|
|
|
// a = a.b
|
|
|
|
//
|
|
|
|
// where e.g. a : Option<Foo> and a.b :
|
|
|
|
// Option<Foo>. In that case, freeing `a` before the
|
|
|
|
// assignment may also free `a.b`!
|
|
|
|
//
|
|
|
|
// We could avoid this intermediary with some analysis
|
|
|
|
// to determine whether `dst` may possibly own `src`.
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
|
2014-01-15 13:39:08 -06:00
|
|
|
let src_datum = unpack_datum!(
|
|
|
|
bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
|
2015-06-10 03:40:32 -05:00
|
|
|
let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
|
|
|
|
let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
|
|
|
|
|
|
|
|
// 1. Drop the data at the destination, passing the
|
|
|
|
// drop-hint in case the lvalue has already been
|
|
|
|
// dropped or moved.
|
|
|
|
bcx = glue::drop_ty_core(bcx,
|
|
|
|
dst_datum.val,
|
|
|
|
dst_datum.ty,
|
|
|
|
expr.debug_loc(),
|
|
|
|
false,
|
|
|
|
opt_hint_val);
|
|
|
|
|
|
|
|
// 2. We are overwriting the destination; ensure that
|
|
|
|
// its drop-hint (if any) says "initialized."
|
|
|
|
if let Some(hint_val) = opt_hint_val {
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 02:25:14 -05:00
|
|
|
let hint_llval = hint_val.value();
|
2015-08-05 02:46:59 -05:00
|
|
|
let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 02:25:14 -05:00
|
|
|
Store(bcx, drop_needed, hint_llval);
|
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
src_datum.store_to(bcx, dst_datum.val)
|
|
|
|
} else {
|
2014-10-22 14:23:26 -05:00
|
|
|
src_datum.store_to(bcx, dst_datum.val)
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprAssignOp(op, ref dst, ref src) => {
|
2016-03-06 09:32:47 -06:00
|
|
|
let method = bcx.tcx().tables
|
|
|
|
.borrow()
|
|
|
|
.method_map
|
|
|
|
.get(&MethodCall::expr(expr.id)).cloned();
|
2015-09-10 19:16:57 -05:00
|
|
|
|
2016-03-06 09:32:47 -06:00
|
|
|
if let Some(method) = method {
|
2016-02-09 14:24:11 -06:00
|
|
|
let dst = unpack_datum!(bcx, trans(bcx, &dst));
|
|
|
|
let src_datum = unpack_datum!(bcx, trans(bcx, &src));
|
2016-03-06 09:32:47 -06:00
|
|
|
|
|
|
|
Callee::method(bcx, method)
|
|
|
|
.call(bcx, expr.debug_loc(),
|
|
|
|
ArgOverloadedOp(dst, Some(src_datum)), None).bcx
|
2015-09-10 19:16:57 -05:00
|
|
|
} else {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_assign_op(bcx, expr, op, &dst, &src)
|
2015-09-10 19:16:57 -05:00
|
|
|
}
|
2012-10-27 19:14:09 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprInlineAsm(ref a) => {
|
2014-01-15 13:39:08 -06:00
|
|
|
asm::trans_inline_asm(bcx, a)
|
2013-03-12 19:53:25 -05:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
_ => {
|
|
|
|
bcx.tcx().sess.span_bug(
|
|
|
|
expr.span,
|
2015-01-07 10:58:31 -06:00
|
|
|
&format!("trans_rvalue_stmt_unadjusted reached \
|
2014-12-20 02:09:35 -06:00
|
|
|
fall-through case: {:?}",
|
2015-02-20 13:08:14 -06:00
|
|
|
expr.node));
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2014-09-06 11:13:04 -05:00
|
|
|
dest: Dest)
|
|
|
|
-> Block<'blk, 'tcx> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
2012-08-28 17:54:45 -05:00
|
|
|
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
|
|
|
|
|
2016-03-06 09:32:47 -06:00
|
|
|
// Entry into the method table if this is an overloaded call/op.
|
|
|
|
let method_call = MethodCall::expr(expr.id);
|
|
|
|
|
2013-01-10 12:59:58 -06:00
|
|
|
match expr.node {
|
2015-02-01 01:59:46 -06:00
|
|
|
hir::ExprType(ref e, _) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
trans_into(bcx, &e, dest)
|
2015-02-01 01:59:46 -06:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprPath(..) => {
|
2014-01-27 06:18:36 -06:00
|
|
|
trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprIf(ref cond, ref thn, ref els) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprMatch(ref discr, ref arms, _) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
_match::trans_match(bcx, expr, &discr, &arms[..], dest)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprBlock(ref blk) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
controlflow::trans_block(bcx, &blk, dest)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprStruct(_, ref fields, ref base) => {
|
2014-08-14 21:16:35 -05:00
|
|
|
trans_struct(bcx,
|
2015-02-18 13:48:57 -06:00
|
|
|
&fields[..],
|
2014-09-07 12:09:06 -05:00
|
|
|
base.as_ref().map(|e| &**e),
|
2014-08-14 21:16:35 -05:00
|
|
|
expr.span,
|
|
|
|
expr.id,
|
2014-12-14 18:17:11 -06:00
|
|
|
node_id_type(bcx, expr.id),
|
2014-08-14 21:16:35 -05:00
|
|
|
dest)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprTup(ref args) => {
|
|
|
|
let numbered_fields: Vec<(usize, &hir::Expr)> =
|
2014-09-07 12:09:06 -05:00
|
|
|
args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
trans_adt(bcx,
|
|
|
|
expr_ty(bcx, expr),
|
2016-01-16 09:03:09 -06:00
|
|
|
Disr(0),
|
2015-02-18 13:48:57 -06:00
|
|
|
&numbered_fields[..],
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
None,
|
|
|
|
dest,
|
2014-12-11 06:53:30 -06:00
|
|
|
expr.debug_loc())
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprLit(ref lit) => {
|
2014-01-03 17:08:48 -06:00
|
|
|
match lit.node {
|
2016-02-08 10:06:20 -06:00
|
|
|
ast::LitKind::Str(ref s, _) => {
|
2014-01-10 16:02:36 -06:00
|
|
|
tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
|
2014-01-03 17:08:48 -06:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
bcx.tcx()
|
|
|
|
.sess
|
|
|
|
.span_bug(expr.span,
|
|
|
|
"trans_rvalue_dps_unadjusted shouldn't be \
|
|
|
|
translating this type of literal")
|
|
|
|
}
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprVec(..) | hir::ExprRepeat(..) => {
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
tvec::trans_fixed_vstore(bcx, expr, dest)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprClosure(_, ref decl, ref body) => {
|
2015-01-29 06:03:34 -06:00
|
|
|
let dest = match dest {
|
|
|
|
SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
|
|
|
|
Ignore => closure::Dest::Ignore(bcx.ccx())
|
|
|
|
};
|
2015-09-17 13:29:59 -05:00
|
|
|
|
|
|
|
// NB. To get the id of the closure, we don't use
|
|
|
|
// `local_def_id(id)`, but rather we extract the closure
|
|
|
|
// def-id from the expr's type. This is because this may
|
|
|
|
// be an inlined expression from another crate, and we
|
|
|
|
// want to get the ORIGINAL closure def-id, since that is
|
|
|
|
// the key we need to find the closure-kind and
|
|
|
|
// closure-type etc.
|
|
|
|
let (def_id, substs) = match expr_ty(bcx, expr).sty {
|
|
|
|
ty::TyClosure(def_id, ref substs) => (def_id, substs),
|
2015-07-16 08:46:35 -05:00
|
|
|
ref t =>
|
|
|
|
bcx.tcx().sess.span_bug(
|
|
|
|
expr.span,
|
|
|
|
&format!("closure expr without closure type: {:?}", t)),
|
|
|
|
};
|
2015-09-17 13:29:59 -05:00
|
|
|
|
2015-12-31 07:06:23 -06:00
|
|
|
closure::trans_closure_expr(dest,
|
|
|
|
decl,
|
|
|
|
body,
|
|
|
|
expr.id,
|
|
|
|
def_id,
|
|
|
|
substs,
|
|
|
|
&expr.attrs).unwrap_or(bcx)
|
2014-05-29 00:26:56 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprCall(ref f, ref args) => {
|
2016-03-06 09:32:47 -06:00
|
|
|
let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned();
|
|
|
|
let (callee, args) = if let Some(method) = method {
|
|
|
|
let mut all_args = vec![&**f];
|
|
|
|
all_args.extend(args.iter().map(|e| &**e));
|
|
|
|
|
|
|
|
(Callee::method(bcx, method), ArgOverloadedCall(all_args))
|
2014-06-01 18:35:01 -05:00
|
|
|
} else {
|
2016-03-06 09:32:47 -06:00
|
|
|
let f = unpack_datum!(bcx, trans(bcx, f));
|
|
|
|
(match f.ty.sty {
|
|
|
|
ty::TyFnDef(def_id, substs, _) => {
|
|
|
|
Callee::def(bcx.ccx(), def_id, substs, f.ty)
|
|
|
|
}
|
|
|
|
ty::TyFnPtr(_) => {
|
|
|
|
let f = unpack_datum!(bcx,
|
|
|
|
f.to_rvalue_datum(bcx, "callee"));
|
|
|
|
Callee::ptr(f)
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
bcx.tcx().sess.span_bug(expr.span,
|
|
|
|
&format!("type of callee is not a fn: {}", f.ty));
|
|
|
|
}
|
|
|
|
}, ArgExprs(&args))
|
|
|
|
};
|
|
|
|
callee.call(bcx, expr.debug_loc(), args, Some(dest)).bcx
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprMethodCall(_, _, ref args) => {
|
2016-03-06 09:32:47 -06:00
|
|
|
Callee::method_call(bcx, method_call)
|
|
|
|
.call(bcx, expr.debug_loc(), ArgExprs(&args), Some(dest)).bcx
|
2012-11-30 13:18:25 -06:00
|
|
|
}
|
2016-03-06 09:32:47 -06:00
|
|
|
hir::ExprBinary(op, ref lhs, ref rhs_expr) => {
|
2012-08-28 17:54:45 -05:00
|
|
|
// if not overloaded, would be RvalueDatumExpr
|
2016-02-09 14:24:11 -06:00
|
|
|
let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
|
2016-03-06 09:32:47 -06:00
|
|
|
let mut rhs = unpack_datum!(bcx, trans(bcx, &rhs_expr));
|
|
|
|
if !rustc_front::util::is_by_value_binop(op.node) {
|
|
|
|
rhs = unpack_datum!(bcx, auto_ref(bcx, rhs, rhs_expr));
|
|
|
|
}
|
|
|
|
|
|
|
|
Callee::method_call(bcx, method_call)
|
|
|
|
.call(bcx, expr.debug_loc(),
|
|
|
|
ArgOverloadedOp(lhs, Some(rhs)), Some(dest)).bcx
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2016-03-06 09:32:47 -06:00
|
|
|
hir::ExprUnary(_, ref subexpr) => {
|
2012-08-28 17:54:45 -05:00
|
|
|
// if not overloaded, would be RvalueDatumExpr
|
2016-02-09 14:24:11 -06:00
|
|
|
let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
|
2016-03-06 09:32:47 -06:00
|
|
|
|
|
|
|
Callee::method_call(bcx, method_call)
|
|
|
|
.call(bcx, expr.debug_loc(),
|
|
|
|
ArgOverloadedOp(arg, None), Some(dest)).bcx
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprCast(..) => {
|
Add trivial cast lints.
This permits all coercions to be performed in casts, but adds lints to warn in those cases.
Part of this patch moves cast checking to a later stage of type checking. We acquire obligations to check casts as part of type checking where we previously checked them. Once we have type checked a function or module, then we check any cast obligations which have been acquired. That means we have more type information available to check casts (this was crucial to making coercions work properly in place of some casts), but it means that casts cannot feed input into type inference.
[breaking change]
* Adds two new lints for trivial casts and trivial numeric casts, these are warn by default, but can cause errors if you build with warnings as errors. Previously, trivial numeric casts and casts to trait objects were allowed.
* The unused casts lint has gone.
* Interactions between casting and type inference have changed in subtle ways. Two ways this might manifest are:
- You may need to 'direct' casts more with extra type information, for example, in some cases where `foo as _ as T` succeeded, you may now need to specify the type for `_`
- Casts do not influence inference of integer types. E.g., the following used to type check:
```
let x = 42;
let y = &x as *const u32;
```
Because the cast would inform inference that `x` must have type `u32`. This no longer applies and the compiler will fallback to `i32` for `x` and thus there will be a type error in the cast. The solution is to add more type information:
```
let x: u32 = 42;
let y = &x as *const u32;
```
2015-03-19 23:15:27 -05:00
|
|
|
// Trait casts used to come this way, now they should be coercions.
|
|
|
|
bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-09-10 19:16:57 -05:00
|
|
|
hir::ExprAssignOp(op, _, _) => {
|
|
|
|
bcx.tcx().sess.span_bug(
|
|
|
|
expr.span,
|
|
|
|
&format!("augmented assignment `{}=` should always be a rvalue_stmt",
|
|
|
|
rustc_front::util::binop_to_string(op.node)))
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
bcx.tcx().sess.span_bug(
|
|
|
|
expr.span,
|
2015-01-07 10:58:31 -06:00
|
|
|
&format!("trans_rvalue_dps_unadjusted reached fall-through \
|
2014-12-20 02:09:35 -06:00
|
|
|
case: {:?}",
|
2015-02-20 13:08:14 -06:00
|
|
|
expr.node));
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
ref_expr: &hir::Expr,
|
2016-01-20 13:31:10 -06:00
|
|
|
def: Def,
|
2014-09-06 11:13:04 -05:00
|
|
|
dest: Dest)
|
|
|
|
-> Block<'blk, 'tcx> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_def_dps_unadjusted");
|
2012-08-28 17:54:45 -05:00
|
|
|
|
|
|
|
let lldest = match dest {
|
|
|
|
SaveIn(lldest) => lldest,
|
|
|
|
Ignore => { return bcx; }
|
|
|
|
};
|
|
|
|
|
2016-03-06 09:32:47 -06:00
|
|
|
let ty = expr_ty(bcx, ref_expr);
|
|
|
|
if let ty::TyFnDef(..) = ty.sty {
|
|
|
|
// Zero-sized function or ctor.
|
|
|
|
return bcx;
|
|
|
|
}
|
|
|
|
|
2012-08-28 17:54:45 -05:00
|
|
|
match def {
|
2016-01-20 13:31:10 -06:00
|
|
|
Def::Variant(tid, vid) => {
|
2015-08-02 14:52:50 -05:00
|
|
|
let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
|
2016-03-06 09:32:47 -06:00
|
|
|
// Nullary variant.
|
|
|
|
let ty = expr_ty(bcx, ref_expr);
|
|
|
|
let repr = adt::represent_type(bcx.ccx(), ty);
|
|
|
|
adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
|
|
|
|
bcx
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2016-01-20 13:31:10 -06:00
|
|
|
Def::Struct(..) => {
|
2014-10-31 03:51:16 -05:00
|
|
|
match ty.sty {
|
2015-08-25 13:52:15 -05:00
|
|
|
ty::TyStruct(def, _) if def.has_dtor() => {
|
2014-03-15 15:29:34 -05:00
|
|
|
let repr = adt::represent_type(bcx.ccx(), ty);
|
2016-02-09 14:24:11 -06:00
|
|
|
adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
|
2013-06-01 16:39:39 -05:00
|
|
|
}
|
2014-01-27 06:18:36 -06:00
|
|
|
_ => {}
|
2013-06-01 16:39:39 -05:00
|
|
|
}
|
2014-01-27 06:18:36 -06:00
|
|
|
bcx
|
2012-10-30 17:53:06 -05:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
_ => {
|
2015-01-07 10:58:31 -06:00
|
|
|
bcx.tcx().sess.span_bug(ref_expr.span, &format!(
|
2014-12-20 02:09:35 -06:00
|
|
|
"Non-DPS def {:?} referened by {}",
|
2015-02-20 13:08:14 -06:00
|
|
|
def, bcx.node_id_to_string(ref_expr.id)));
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-25 20:17:11 -06:00
|
|
|
/// Translates a reference to a local variable or argument. This always results in an lvalue datum.
|
2014-09-06 11:13:04 -05:00
|
|
|
pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2016-01-20 13:31:10 -06:00
|
|
|
def: Def)
|
2014-09-29 14:11:30 -05:00
|
|
|
-> Datum<'tcx, Lvalue> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_local_var");
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-09-17 09:28:19 -05:00
|
|
|
match def {
|
2016-01-20 13:31:10 -06:00
|
|
|
Def::Upvar(_, nid, _, _) => {
|
2013-01-10 12:59:58 -06:00
|
|
|
// Can't move upvars, so this is never a ZeroMemLastUse.
|
2012-08-28 17:54:45 -05:00
|
|
|
let local_ty = node_id_type(bcx, nid);
|
2015-06-10 04:42:48 -05:00
|
|
|
let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
|
|
|
|
bcx, nid, HintKind::ZeroAndMaintain);
|
2014-11-06 11:25:16 -06:00
|
|
|
match bcx.fcx.llupvars.borrow().get(&nid) {
|
2015-06-05 14:34:03 -05:00
|
|
|
Some(&val) => Datum::new(val, local_ty, lval),
|
2012-08-28 17:54:45 -05:00
|
|
|
None => {
|
2015-01-07 10:58:31 -06:00
|
|
|
bcx.sess().bug(&format!(
|
2014-10-15 01:25:34 -05:00
|
|
|
"trans_local_var: no llval for upvar {} found",
|
2015-02-20 13:08:14 -06:00
|
|
|
nid));
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-01-20 13:31:10 -06:00
|
|
|
Def::Local(_, nid) => {
|
2014-11-06 11:25:16 -06:00
|
|
|
let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
|
2014-09-17 09:28:19 -05:00
|
|
|
Some(&v) => v,
|
|
|
|
None => {
|
2015-01-07 10:58:31 -06:00
|
|
|
bcx.sess().bug(&format!(
|
2014-10-15 01:25:34 -05:00
|
|
|
"trans_local_var: no datum for local/arg {} found",
|
2015-02-20 13:08:14 -06:00
|
|
|
nid));
|
2014-09-17 09:28:19 -05:00
|
|
|
}
|
|
|
|
};
|
2014-10-15 01:25:34 -05:00
|
|
|
debug!("take_local(nid={}, v={}, ty={})",
|
2015-06-18 12:25:05 -05:00
|
|
|
nid, bcx.val_to_string(datum.val), datum.ty);
|
2014-09-17 09:28:19 -05:00
|
|
|
datum
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
_ => {
|
2015-01-07 10:58:31 -06:00
|
|
|
bcx.sess().unimpl(&format!(
|
2014-12-20 02:09:35 -06:00
|
|
|
"unsupported def type in trans_local_var: {:?}",
|
2015-02-20 13:08:14 -06:00
|
|
|
def));
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
fields: &[hir::Field],
|
|
|
|
base: Option<&hir::Expr>,
|
2014-09-06 11:13:04 -05:00
|
|
|
expr_span: codemap::Span,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
expr_id: ast::NodeId,
|
2014-12-14 18:17:11 -06:00
|
|
|
ty: Ty<'tcx>,
|
2014-09-06 11:13:04 -05:00
|
|
|
dest: Dest) -> Block<'blk, 'tcx> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_rec");
|
2012-08-28 17:54:45 -05:00
|
|
|
|
|
|
|
let tcx = bcx.tcx();
|
2015-08-02 14:52:50 -05:00
|
|
|
let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
|
|
|
|
|
|
|
|
let mut need_base = vec![true; vinfo.fields.len()];
|
|
|
|
|
|
|
|
let numbered_fields = fields.iter().map(|field| {
|
2015-09-20 06:00:18 -05:00
|
|
|
let pos = vinfo.field_index(field.name.node);
|
2015-08-02 14:52:50 -05:00
|
|
|
need_base[pos] = false;
|
|
|
|
(pos, &*field.expr)
|
|
|
|
}).collect::<Vec<_>>();
|
|
|
|
|
|
|
|
let optbase = match base {
|
|
|
|
Some(base_expr) => {
|
|
|
|
let mut leftovers = Vec::new();
|
|
|
|
for (i, b) in need_base.iter().enumerate() {
|
|
|
|
if *b {
|
|
|
|
leftovers.push((i, vinfo.fields[i].1));
|
2012-10-23 17:56:40 -05:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-08-02 14:52:50 -05:00
|
|
|
Some(StructBaseInfo {expr: base_expr,
|
|
|
|
fields: leftovers })
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
if need_base.iter().any(|b| *b) {
|
|
|
|
tcx.sess.span_bug(expr_span, "missing fields and no base expr")
|
2013-02-24 00:53:40 -06:00
|
|
|
}
|
2015-08-02 14:52:50 -05:00
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2015-08-02 14:52:50 -05:00
|
|
|
trans_adt(bcx,
|
|
|
|
ty,
|
|
|
|
vinfo.discr,
|
|
|
|
&numbered_fields,
|
|
|
|
optbase,
|
|
|
|
dest,
|
|
|
|
DebugLoc::At(expr_id, expr_span))
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-11-24 19:06:06 -06:00
|
|
|
/// Information that `trans_adt` needs in order to fill in the fields
|
|
|
|
/// of a struct copied from a base struct (e.g., from an expression
|
|
|
|
/// like `Foo { a: b, ..base }`.
|
|
|
|
///
|
|
|
|
/// Note that `fields` may be empty; the base expression must always be
|
|
|
|
/// evaluated for side-effects.
|
2014-09-29 14:11:30 -05:00
|
|
|
pub struct StructBaseInfo<'a, 'tcx> {
|
2013-03-02 16:03:41 -06:00
|
|
|
/// The base expression; will be evaluated after all explicit fields.
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &'a hir::Expr,
|
2013-03-02 16:03:41 -06:00
|
|
|
/// The indices of fields to copy paired with their types.
|
2015-03-25 19:06:52 -05:00
|
|
|
fields: Vec<(usize, Ty<'tcx>)>
|
2014-09-07 12:09:06 -05:00
|
|
|
}
|
2013-02-24 00:53:40 -06:00
|
|
|
|
2014-11-24 19:06:06 -06:00
|
|
|
/// Constructs an ADT instance:
|
|
|
|
///
|
|
|
|
/// - `fields` should be a list of field indices paired with the
|
|
|
|
/// expression to store into that field. The initializers will be
|
|
|
|
/// evaluated in the order specified by `fields`.
|
|
|
|
///
|
|
|
|
/// - `optbase` contains information on the base struct (if any) from
|
|
|
|
/// which remaining fields are copied; see comments on `StructBaseInfo`.
|
2014-09-29 14:11:30 -05:00
|
|
|
pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
|
|
|
|
ty: Ty<'tcx>,
|
2016-01-16 09:03:09 -06:00
|
|
|
discr: Disr,
|
2015-07-31 02:04:06 -05:00
|
|
|
fields: &[(usize, &hir::Expr)],
|
2014-09-29 14:11:30 -05:00
|
|
|
optbase: Option<StructBaseInfo<'a, 'tcx>>,
|
|
|
|
dest: Dest,
|
2014-12-11 06:53:30 -06:00
|
|
|
debug_location: DebugLoc)
|
2014-09-29 14:11:30 -05:00
|
|
|
-> Block<'blk, 'tcx> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_adt");
|
2014-01-15 13:39:08 -06:00
|
|
|
let fcx = bcx.fcx;
|
2014-08-14 21:45:57 -05:00
|
|
|
let repr = adt::represent_type(bcx.ccx(), ty);
|
|
|
|
|
2014-12-11 06:53:30 -06:00
|
|
|
debug_location.apply(bcx.fcx);
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
|
2014-08-14 21:45:57 -05:00
|
|
|
// If we don't care about the result, just make a
|
|
|
|
// temporary stack slot
|
2012-08-28 17:54:45 -05:00
|
|
|
let addr = match dest {
|
2014-08-14 21:45:57 -05:00
|
|
|
SaveIn(pos) => pos,
|
2015-08-25 11:24:16 -05:00
|
|
|
Ignore => {
|
|
|
|
let llresult = alloc_ty(bcx, ty, "temp");
|
|
|
|
call_lifetime_start(bcx, llresult);
|
|
|
|
llresult
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
};
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2016-01-08 13:40:13 -06:00
|
|
|
debug!("trans_adt");
|
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
// This scope holds intermediates that must be cleaned should
|
2014-10-09 14:17:22 -05:00
|
|
|
// panic occur before the ADT as a whole is ready.
|
2014-01-15 13:39:08 -06:00
|
|
|
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
|
|
|
|
|
2015-08-06 10:25:15 -05:00
|
|
|
if ty.is_simd() {
|
2015-03-08 14:46:04 -05:00
|
|
|
// Issue 23112: The original logic appeared vulnerable to same
|
|
|
|
// order-of-eval bug. But, SIMD values are tuple-structs;
|
|
|
|
// i.e. functional record update (FRU) syntax is unavailable.
|
|
|
|
//
|
|
|
|
// To be safe, double-check that we did not get here via FRU.
|
|
|
|
assert!(optbase.is_none());
|
|
|
|
|
|
|
|
// This is the constructor of a SIMD type, such types are
|
|
|
|
// always primitive machine types and so do not have a
|
|
|
|
// destructor or require any clean-up.
|
|
|
|
let llty = type_of::type_of(bcx.ccx(), ty);
|
|
|
|
|
|
|
|
// keep a vector as a register, and running through the field
|
|
|
|
// `insertelement`ing them directly into that register
|
|
|
|
// (i.e. avoid GEPi and `store`s to an alloca) .
|
|
|
|
let mut vec_val = C_undef(llty);
|
|
|
|
|
|
|
|
for &(i, ref e) in fields {
|
2016-02-09 14:24:11 -06:00
|
|
|
let block_datum = trans(bcx, &e);
|
2015-03-08 14:46:04 -05:00
|
|
|
bcx = block_datum.bcx;
|
|
|
|
let position = C_uint(bcx.ccx(), i);
|
|
|
|
let value = block_datum.datum.to_llscalarish(bcx);
|
|
|
|
vec_val = InsertElement(bcx, vec_val, value, position);
|
|
|
|
}
|
|
|
|
Store(bcx, vec_val, addr);
|
|
|
|
} else if let Some(base) = optbase {
|
|
|
|
// Issue 23112: If there is a base, then order-of-eval
|
|
|
|
// requires field expressions eval'ed before base expression.
|
|
|
|
|
|
|
|
// First, trans field expressions to temporary scratch values.
|
|
|
|
let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
|
2016-02-09 14:24:11 -06:00
|
|
|
let datum = unpack_datum!(bcx, trans(bcx, &e));
|
2015-03-08 14:46:04 -05:00
|
|
|
(i, datum)
|
|
|
|
}).collect();
|
|
|
|
|
|
|
|
debug_location.apply(bcx.fcx);
|
|
|
|
|
|
|
|
// Second, trans the base to the dest.
|
2016-01-16 09:03:09 -06:00
|
|
|
assert_eq!(discr, Disr(0));
|
2014-08-14 21:16:35 -05:00
|
|
|
|
2015-12-06 07:38:29 -06:00
|
|
|
let addr = adt::MaybeSizedValue::sized(addr);
|
2016-02-09 14:24:11 -06:00
|
|
|
match expr_kind(bcx.tcx(), &base.expr) {
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
|
2016-02-09 14:24:11 -06:00
|
|
|
bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
|
2014-09-16 13:11:16 -05:00
|
|
|
},
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueStmt => {
|
|
|
|
bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
|
|
|
|
}
|
2014-09-16 13:11:16 -05:00
|
|
|
_ => {
|
2016-02-09 14:24:11 -06:00
|
|
|
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
|
2015-01-31 11:20:46 -06:00
|
|
|
for &(i, t) in &base.fields {
|
2014-08-14 21:16:35 -05:00
|
|
|
let datum = base_datum.get_element(
|
2016-02-09 14:24:11 -06:00
|
|
|
bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
|
2014-12-18 08:26:10 -06:00
|
|
|
assert!(type_is_sized(bcx.tcx(), datum.ty));
|
2016-02-09 14:24:11 -06:00
|
|
|
let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
|
2014-08-14 21:16:35 -05:00
|
|
|
bcx = datum.store_to(bcx, dest);
|
|
|
|
}
|
2014-09-16 13:11:16 -05:00
|
|
|
}
|
2014-08-14 21:16:35 -05:00
|
|
|
}
|
Translate SIMD construction as `insertelement`s and a single store.
This almost completely avoids GEPi's and pointer manipulation,
postponing it until the end with one big write of the whole vector. This
leads to a small speed-up in compilation, and makes it easier for LLVM
to work with the values, e.g. with `--opt-level=0`,
pub fn foo() -> f32x4 {
f32x4(0.,0.,0.,0.)
}
was previously compiled to
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
%sret_slot = alloca <4 x float>
%0 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 0
store float 0.000000e+00, float* %0
%1 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 1
store float 0.000000e+00, float* %1
%2 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 2
store float 0.000000e+00, float* %2
%3 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 3
store float 0.000000e+00, float* %3
%4 = load <4 x float>* %sret_slot
ret <4 x float> %4
}
but now becomes
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
ret <4 x float> zeroinitializer
}
2014-11-04 03:59:00 -06:00
|
|
|
|
2015-03-08 14:46:04 -05:00
|
|
|
// Finally, move scratch field values into actual field locations
|
2015-06-10 11:22:20 -05:00
|
|
|
for (i, datum) in scratch_vals {
|
2016-02-09 14:24:11 -06:00
|
|
|
let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
|
2015-03-08 14:46:04 -05:00
|
|
|
bcx = datum.store_to(bcx, dest);
|
Translate SIMD construction as `insertelement`s and a single store.
This almost completely avoids GEPi's and pointer manipulation,
postponing it until the end with one big write of the whole vector. This
leads to a small speed-up in compilation, and makes it easier for LLVM
to work with the values, e.g. with `--opt-level=0`,
pub fn foo() -> f32x4 {
f32x4(0.,0.,0.,0.)
}
was previously compiled to
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
%sret_slot = alloca <4 x float>
%0 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 0
store float 0.000000e+00, float* %0
%1 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 1
store float 0.000000e+00, float* %1
%2 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 2
store float 0.000000e+00, float* %2
%3 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 3
store float 0.000000e+00, float* %3
%4 = load <4 x float>* %sret_slot
ret <4 x float> %4
}
but now becomes
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
ret <4 x float> zeroinitializer
}
2014-11-04 03:59:00 -06:00
|
|
|
}
|
|
|
|
} else {
|
2015-03-08 14:46:04 -05:00
|
|
|
// No base means we can write all fields directly in place.
|
2015-12-06 07:38:29 -06:00
|
|
|
let addr = adt::MaybeSizedValue::sized(addr);
|
2015-01-31 11:20:46 -06:00
|
|
|
for &(i, ref e) in fields {
|
2016-02-09 14:24:11 -06:00
|
|
|
let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
|
|
|
|
let e_ty = expr_ty_adjusted(bcx, &e);
|
|
|
|
bcx = trans_into(bcx, &e, SaveIn(dest));
|
Translate SIMD construction as `insertelement`s and a single store.
This almost completely avoids GEPi's and pointer manipulation,
postponing it until the end with one big write of the whole vector. This
leads to a small speed-up in compilation, and makes it easier for LLVM
to work with the values, e.g. with `--opt-level=0`,
pub fn foo() -> f32x4 {
f32x4(0.,0.,0.,0.)
}
was previously compiled to
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
%sret_slot = alloca <4 x float>
%0 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 0
store float 0.000000e+00, float* %0
%1 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 1
store float 0.000000e+00, float* %1
%2 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 2
store float 0.000000e+00, float* %2
%3 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 3
store float 0.000000e+00, float* %3
%4 = load <4 x float>* %sret_slot
ret <4 x float> %4
}
but now becomes
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
ret <4 x float> zeroinitializer
}
2014-11-04 03:59:00 -06:00
|
|
|
let scope = cleanup::CustomScope(custom_cleanup_scope);
|
|
|
|
fcx.schedule_lifetime_end(scope, dest);
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 02:25:14 -05:00
|
|
|
// FIXME: nonzeroing move should generalize to fields
|
|
|
|
fcx.schedule_drop_mem(scope, dest, e_ty, None);
|
Translate SIMD construction as `insertelement`s and a single store.
This almost completely avoids GEPi's and pointer manipulation,
postponing it until the end with one big write of the whole vector. This
leads to a small speed-up in compilation, and makes it easier for LLVM
to work with the values, e.g. with `--opt-level=0`,
pub fn foo() -> f32x4 {
f32x4(0.,0.,0.,0.)
}
was previously compiled to
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
%sret_slot = alloca <4 x float>
%0 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 0
store float 0.000000e+00, float* %0
%1 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 1
store float 0.000000e+00, float* %1
%2 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 2
store float 0.000000e+00, float* %2
%3 = getelementptr inbounds <4 x float>* %sret_slot, i32 0, i32 3
store float 0.000000e+00, float* %3
%4 = load <4 x float>* %sret_slot
ret <4 x float> %4
}
but now becomes
define <4 x float> @_ZN3foo20h74913e8b13d89666eaaE() unnamed_addr #0 {
entry-block:
ret <4 x float> zeroinitializer
}
2014-11-04 03:59:00 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2016-02-09 14:24:11 -06:00
|
|
|
adt::trans_set_discr(bcx, &repr, addr, discr);
|
2014-07-10 17:09:21 -05:00
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
|
|
|
|
|
2014-08-14 21:45:57 -05:00
|
|
|
// If we don't care about the result drop the temporary we made
|
|
|
|
match dest {
|
|
|
|
SaveIn(_) => bcx,
|
|
|
|
Ignore => {
|
2014-12-11 06:53:30 -06:00
|
|
|
bcx = glue::drop_ty(bcx, addr, ty, debug_location);
|
2014-08-14 21:45:57 -05:00
|
|
|
base::call_lifetime_end(bcx, addr);
|
|
|
|
bcx
|
|
|
|
}
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2013-02-24 00:53:40 -06:00
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2015-09-14 04:58:20 -05:00
|
|
|
lit: &ast::Lit)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2012-08-28 17:54:45 -05:00
|
|
|
// must not be a string constant, that is a RvalueDpsExpr
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_immediate_lit");
|
2012-08-28 17:54:45 -05:00
|
|
|
let ty = expr_ty(bcx, expr);
|
2014-01-15 13:39:08 -06:00
|
|
|
let v = consts::const_lit(bcx.ccx(), expr, lit);
|
|
|
|
immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
|
|
|
op: hir::UnOp,
|
|
|
|
sub_expr: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-03-15 15:29:34 -05:00
|
|
|
let ccx = bcx.ccx();
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_unary_datum");
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-03-06 11:24:11 -06:00
|
|
|
let method_call = MethodCall::expr(expr.id);
|
2014-03-08 10:33:39 -06:00
|
|
|
|
|
|
|
// The only overloaded operator that is translated to a datum
|
|
|
|
// is an overloaded deref, since it is always yields a `&T`.
|
|
|
|
// Otherwise, we should be in the RvalueDpsExpr path.
|
2015-07-31 02:04:06 -05:00
|
|
|
assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-03-06 11:24:11 -06:00
|
|
|
let un_ty = expr_ty(bcx, expr);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-12-11 06:53:30 -06:00
|
|
|
let debug_loc = expr.debug_loc();
|
|
|
|
|
2014-03-06 11:24:11 -06:00
|
|
|
match op {
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::UnNot => {
|
2014-01-15 13:39:08 -06:00
|
|
|
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
|
2014-12-11 06:53:30 -06:00
|
|
|
let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
|
2014-01-15 13:39:08 -06:00
|
|
|
immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::UnNeg => {
|
2014-01-15 13:39:08 -06:00
|
|
|
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
|
|
|
|
let val = datum.to_llscalarish(bcx);
|
2015-04-14 19:19:52 -05:00
|
|
|
let (bcx, llneg) = {
|
2015-06-24 00:24:13 -05:00
|
|
|
if un_ty.is_fp() {
|
2015-04-14 19:19:52 -05:00
|
|
|
let result = FNeg(bcx, val, debug_loc);
|
|
|
|
(bcx, result)
|
2012-08-28 17:54:45 -05:00
|
|
|
} else {
|
2015-06-24 00:24:13 -05:00
|
|
|
let is_signed = un_ty.is_signed();
|
2015-04-14 19:19:52 -05:00
|
|
|
let result = Neg(bcx, val, debug_loc);
|
|
|
|
let bcx = if bcx.ccx().check_overflow() && is_signed {
|
|
|
|
let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
|
|
|
|
let is_min = ICmp(bcx, llvm::IntEQ, val,
|
|
|
|
C_integral(llty, min, true), debug_loc);
|
|
|
|
with_cond(bcx, is_min, |bcx| {
|
|
|
|
let msg = InternedString::new(
|
|
|
|
"attempted to negate with overflow");
|
|
|
|
controlflow::trans_fail(bcx, expr_info(expr), msg)
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
bcx
|
|
|
|
};
|
|
|
|
(bcx, result)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
};
|
2014-01-15 13:39:08 -06:00
|
|
|
immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::UnDeref => {
|
2014-03-06 11:24:11 -06:00
|
|
|
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
|
2014-06-11 17:01:48 -05:00
|
|
|
deref_once(bcx, expr, datum, method_call)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2014-03-06 11:24:11 -06:00
|
|
|
}
|
2013-12-17 18:46:18 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
box_expr: &hir::Expr,
|
2014-09-29 14:11:30 -05:00
|
|
|
box_ty: Ty<'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
contents: &hir::Expr,
|
2014-09-29 14:11:30 -05:00
|
|
|
contents_ty: Ty<'tcx>)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-04-06 05:54:41 -05:00
|
|
|
let _icx = push_ctxt("trans_uniq_expr");
|
2014-01-15 13:39:08 -06:00
|
|
|
let fcx = bcx.fcx;
|
2014-12-18 08:26:10 -06:00
|
|
|
assert!(type_is_sized(bcx.tcx(), contents_ty));
|
2014-04-06 05:54:41 -05:00
|
|
|
let llty = type_of::type_of(bcx.ccx(), contents_ty);
|
|
|
|
let size = llsize_of(bcx.ccx(), llty);
|
2014-10-14 15:36:11 -05:00
|
|
|
let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
|
2014-04-24 22:14:52 -05:00
|
|
|
let llty_ptr = llty.ptr_to();
|
2015-02-04 10:16:59 -06:00
|
|
|
let Result { bcx, val } = malloc_raw_dyn(bcx,
|
|
|
|
llty_ptr,
|
|
|
|
box_ty,
|
|
|
|
size,
|
|
|
|
align,
|
|
|
|
box_expr.debug_loc());
|
2014-05-05 20:56:44 -05:00
|
|
|
// Unique boxes do not allocate for zero-size types. The standard library
|
|
|
|
// may assume that `free` is never called on the pointer returned for
|
|
|
|
// `Box<ZeroSizeType>`.
|
2014-04-06 05:54:41 -05:00
|
|
|
let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
|
|
|
|
trans_into(bcx, contents, SaveIn(val))
|
2013-12-17 18:46:18 -06:00
|
|
|
} else {
|
2014-01-15 13:39:08 -06:00
|
|
|
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
|
|
|
|
fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
|
2014-05-20 23:18:10 -05:00
|
|
|
val, cleanup::HeapExchange, contents_ty);
|
2014-04-06 05:54:41 -05:00
|
|
|
let bcx = trans_into(bcx, contents, SaveIn(val));
|
2014-01-15 13:39:08 -06:00
|
|
|
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
|
2014-04-06 05:54:41 -05:00
|
|
|
bcx
|
|
|
|
};
|
|
|
|
immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
|
|
|
subexpr: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_addr_of");
|
2012-08-28 17:54:45 -05:00
|
|
|
let mut bcx = bcx;
|
2014-01-15 13:39:08 -06:00
|
|
|
let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
|
2015-10-01 04:55:52 -05:00
|
|
|
let ty = expr_ty(bcx, expr);
|
2015-02-17 12:27:01 -06:00
|
|
|
if !type_is_sized(bcx.tcx(), sub_datum.ty) {
|
2015-10-01 04:55:52 -05:00
|
|
|
// Always generate an lvalue datum, because this pointer doesn't own
|
|
|
|
// the data and cleanup is scheduled elsewhere.
|
|
|
|
DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
|
2015-02-17 12:27:01 -06:00
|
|
|
} else {
|
|
|
|
// Sized value, ref to a thin pointer
|
|
|
|
immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2015-09-05 13:27:43 -05:00
|
|
|
fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
binop_expr: &hir::Expr,
|
|
|
|
binop_ty: Ty<'tcx>,
|
|
|
|
op: hir::BinOp,
|
|
|
|
lhs: Datum<'tcx, Rvalue>,
|
|
|
|
rhs: Datum<'tcx, Rvalue>)
|
|
|
|
-> DatumBlock<'blk, 'tcx, Expr>
|
|
|
|
{
|
|
|
|
let _icx = push_ctxt("trans_scalar_binop");
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-05-02 13:04:46 -05:00
|
|
|
let tcx = bcx.tcx();
|
2015-09-05 13:27:43 -05:00
|
|
|
let lhs_t = lhs.ty;
|
2015-08-31 17:06:00 -05:00
|
|
|
assert!(!lhs_t.is_simd());
|
|
|
|
let is_float = lhs_t.is_fp();
|
|
|
|
let is_signed = lhs_t.is_signed();
|
2015-01-05 23:56:30 -06:00
|
|
|
let info = expr_info(binop_expr);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-12-11 06:53:30 -06:00
|
|
|
let binop_debug_loc = binop_expr.debug_loc();
|
|
|
|
|
2012-08-28 17:54:45 -05:00
|
|
|
let mut bcx = bcx;
|
2015-09-05 13:27:43 -05:00
|
|
|
let lhs = lhs.to_llscalarish(bcx);
|
|
|
|
let rhs = rhs.to_llscalarish(bcx);
|
2015-01-12 21:24:37 -06:00
|
|
|
let val = match op.node {
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiAdd => {
|
2014-12-11 06:53:30 -06:00
|
|
|
if is_float {
|
|
|
|
FAdd(bcx, lhs, rhs, binop_debug_loc)
|
|
|
|
} else {
|
2015-01-05 23:56:30 -06:00
|
|
|
let (newbcx, res) = with_overflow_check(
|
|
|
|
bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
|
|
|
|
bcx = newbcx;
|
|
|
|
res
|
2014-12-11 06:53:30 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiSub => {
|
2014-12-11 06:53:30 -06:00
|
|
|
if is_float {
|
|
|
|
FSub(bcx, lhs, rhs, binop_debug_loc)
|
|
|
|
} else {
|
2015-01-05 23:56:30 -06:00
|
|
|
let (newbcx, res) = with_overflow_check(
|
|
|
|
bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
|
|
|
|
bcx = newbcx;
|
|
|
|
res
|
2014-12-11 06:53:30 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiMul => {
|
2014-12-11 06:53:30 -06:00
|
|
|
if is_float {
|
|
|
|
FMul(bcx, lhs, rhs, binop_debug_loc)
|
|
|
|
} else {
|
2015-01-05 23:56:30 -06:00
|
|
|
let (newbcx, res) = with_overflow_check(
|
|
|
|
bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
|
|
|
|
bcx = newbcx;
|
|
|
|
res
|
2014-12-11 06:53:30 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiDiv => {
|
2012-08-28 17:54:45 -05:00
|
|
|
if is_float {
|
2014-12-11 06:53:30 -06:00
|
|
|
FDiv(bcx, lhs, rhs, binop_debug_loc)
|
2012-08-28 17:54:45 -05:00
|
|
|
} else {
|
|
|
|
// Only zero-check integers; fp /0 is NaN
|
2015-02-04 10:16:59 -06:00
|
|
|
bcx = base::fail_if_zero_or_overflows(bcx,
|
|
|
|
expr_info(binop_expr),
|
|
|
|
op,
|
|
|
|
lhs,
|
|
|
|
rhs,
|
2015-09-05 13:27:43 -05:00
|
|
|
lhs_t);
|
2014-05-02 13:04:46 -05:00
|
|
|
if is_signed {
|
2014-12-11 06:53:30 -06:00
|
|
|
SDiv(bcx, lhs, rhs, binop_debug_loc)
|
2012-08-28 17:54:45 -05:00
|
|
|
} else {
|
2014-12-11 06:53:30 -06:00
|
|
|
UDiv(bcx, lhs, rhs, binop_debug_loc)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiRem => {
|
2012-08-28 17:54:45 -05:00
|
|
|
if is_float {
|
2015-08-17 19:00:45 -05:00
|
|
|
// LLVM currently always lowers the `frem` instructions appropriate
|
|
|
|
// library calls typically found in libm. Notably f64 gets wired up
|
|
|
|
// to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
|
|
|
|
// us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
|
|
|
|
// instead just an inline function in a header that goes up to a
|
|
|
|
// f64, uses `fmod`, and then comes back down to a f32.
|
|
|
|
//
|
|
|
|
// Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
|
|
|
|
// still unconditionally lower frem instructions over 32-bit floats
|
|
|
|
// to a call to `fmodf`. To work around this we special case MSVC
|
|
|
|
// 32-bit float rem instructions and instead do the call out to
|
|
|
|
// `fmod` ourselves.
|
|
|
|
//
|
|
|
|
// Note that this is currently duplicated with src/libcore/ops.rs
|
|
|
|
// which does the same thing, and it would be nice to perhaps unify
|
|
|
|
// these two implementations on day! Also note that we call `fmod`
|
|
|
|
// for both 32 and 64-bit floats because if we emit any FRem
|
|
|
|
// instruction at all then LLVM is capable of optimizing it into a
|
|
|
|
// 32-bit FRem (which we're trying to avoid).
|
|
|
|
let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
|
|
|
|
tcx.sess.target.target.arch == "x86";
|
|
|
|
if use_fmod {
|
|
|
|
let f64t = Type::f64(bcx.ccx());
|
|
|
|
let fty = Type::func(&[f64t, f64t], &f64t);
|
|
|
|
let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
|
|
|
|
tcx.types.f64);
|
|
|
|
if lhs_t == tcx.types.f32 {
|
|
|
|
let lhs = FPExt(bcx, lhs, f64t);
|
|
|
|
let rhs = FPExt(bcx, rhs, f64t);
|
|
|
|
let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
|
|
|
|
FPTrunc(bcx, res, Type::f32(bcx.ccx()))
|
|
|
|
} else {
|
|
|
|
Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
FRem(bcx, lhs, rhs, binop_debug_loc)
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
} else {
|
|
|
|
// Only zero-check integers; fp %0 is NaN
|
2015-02-04 10:16:59 -06:00
|
|
|
bcx = base::fail_if_zero_or_overflows(bcx,
|
|
|
|
expr_info(binop_expr),
|
2015-09-05 13:27:43 -05:00
|
|
|
op, lhs, rhs, lhs_t);
|
2014-05-02 13:04:46 -05:00
|
|
|
if is_signed {
|
2014-12-11 06:53:30 -06:00
|
|
|
SRem(bcx, lhs, rhs, binop_debug_loc)
|
2012-08-28 17:54:45 -05:00
|
|
|
} else {
|
2014-12-11 06:53:30 -06:00
|
|
|
URem(bcx, lhs, rhs, binop_debug_loc)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
|
|
|
|
hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
|
|
|
|
hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
|
|
|
|
hir::BiShl => {
|
2015-03-19 13:52:08 -05:00
|
|
|
let (newbcx, res) = with_overflow_check(
|
|
|
|
bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
|
|
|
|
bcx = newbcx;
|
|
|
|
res
|
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiShr => {
|
2015-03-19 13:52:08 -05:00
|
|
|
let (newbcx, res) = with_overflow_check(
|
|
|
|
bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
|
|
|
|
bcx = newbcx;
|
|
|
|
res
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
|
2015-08-31 17:06:00 -05:00
|
|
|
base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
|
2012-09-07 20:53:14 -05:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
_ => {
|
2013-05-02 11:28:53 -05:00
|
|
|
bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// refinement types would obviate the need for this
|
2014-01-07 10:54:58 -06:00
|
|
|
enum lazy_binop_ty {
|
|
|
|
lazy_and,
|
|
|
|
lazy_or,
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
binop_expr: &hir::Expr,
|
2014-09-06 11:13:04 -05:00
|
|
|
op: lazy_binop_ty,
|
2015-07-31 02:04:06 -05:00
|
|
|
a: &hir::Expr,
|
|
|
|
b: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_lazy_binop");
|
2012-08-28 17:54:45 -05:00
|
|
|
let binop_ty = expr_ty(bcx, binop_expr);
|
2014-01-15 13:39:08 -06:00
|
|
|
let fcx = bcx.fcx;
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
|
|
|
|
let lhs = lhs.to_llscalarish(past_lhs);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2013-12-18 16:54:42 -06:00
|
|
|
if past_lhs.unreachable.get() {
|
2014-01-15 13:39:08 -06:00
|
|
|
return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
let join = fcx.new_id_block("join", binop_expr.id);
|
|
|
|
let before_rhs = fcx.new_id_block("before_rhs", b.id);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
|
|
|
match op {
|
2014-12-11 06:53:30 -06:00
|
|
|
lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
|
|
|
|
lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2013-02-06 16:28:02 -06:00
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
|
|
|
|
let rhs = rhs.to_llscalarish(past_rhs);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2013-12-18 16:54:42 -06:00
|
|
|
if past_rhs.unreachable.get() {
|
2014-01-15 13:39:08 -06:00
|
|
|
return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-12-11 06:53:30 -06:00
|
|
|
Br(past_rhs, join.llbb, DebugLoc::None);
|
2014-11-17 02:39:01 -06:00
|
|
|
let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
|
|
|
|
&[past_lhs.llbb, past_rhs.llbb]);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
|
|
|
op: hir::BinOp,
|
|
|
|
lhs: &hir::Expr,
|
|
|
|
rhs: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_binary");
|
2014-01-15 13:39:08 -06:00
|
|
|
let ccx = bcx.ccx();
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-03-06 11:24:11 -06:00
|
|
|
// if overloaded, would be RvalueDpsExpr
|
2015-07-07 10:45:21 -05:00
|
|
|
assert!(!ccx.tcx().is_method_call(expr.id));
|
2014-03-06 11:24:11 -06:00
|
|
|
|
2015-01-12 21:24:37 -06:00
|
|
|
match op.node {
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiAnd => {
|
2014-03-06 11:24:11 -06:00
|
|
|
trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::BiOr => {
|
2014-03-06 11:24:11 -06:00
|
|
|
trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
let mut bcx = bcx;
|
2014-03-06 11:24:11 -06:00
|
|
|
let binop_ty = expr_ty(bcx, expr);
|
2014-01-15 13:39:08 -06:00
|
|
|
|
2015-09-05 13:27:43 -05:00
|
|
|
let lhs = unpack_datum!(bcx, trans(bcx, lhs));
|
|
|
|
let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
|
|
|
|
debug!("trans_binary (expr {}): lhs={}",
|
|
|
|
expr.id, lhs.to_string(ccx));
|
|
|
|
let rhs = unpack_datum!(bcx, trans(bcx, rhs));
|
|
|
|
let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
|
|
|
|
debug!("trans_binary (expr {}): rhs={}",
|
|
|
|
expr.id, rhs.to_string(ccx));
|
|
|
|
|
|
|
|
if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
|
|
|
|
assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
|
|
|
|
"built-in binary operators on fat pointers are homogeneous");
|
2015-11-08 18:16:19 -06:00
|
|
|
assert_eq!(binop_ty, bcx.tcx().types.bool);
|
|
|
|
let val = base::compare_scalar_types(
|
|
|
|
bcx,
|
|
|
|
lhs.val,
|
|
|
|
rhs.val,
|
|
|
|
lhs.ty,
|
|
|
|
op.node,
|
|
|
|
expr.debug_loc());
|
|
|
|
immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
|
2015-09-05 13:27:43 -05:00
|
|
|
} else {
|
|
|
|
assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
|
|
|
|
"built-in binary operators on fat pointers are homogeneous");
|
|
|
|
trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
|
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-29 17:36:51 -06:00
|
|
|
pub fn cast_is_noop<'tcx>(tcx: &TyCtxt<'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2015-05-05 11:36:47 -05:00
|
|
|
t_in: Ty<'tcx>,
|
|
|
|
t_out: Ty<'tcx>)
|
|
|
|
-> bool {
|
|
|
|
if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
|
|
|
|
return true;
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2015-09-03 12:10:34 -05:00
|
|
|
match (t_in.builtin_deref(true, ty::NoPreference),
|
|
|
|
t_out.builtin_deref(true, ty::NoPreference)) {
|
2015-07-10 20:27:06 -05:00
|
|
|
(Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
|
2014-09-02 21:09:07 -05:00
|
|
|
t_in == t_out
|
|
|
|
}
|
2015-05-05 11:36:47 -05:00
|
|
|
_ => {
|
|
|
|
// This condition isn't redundant with the check for CoercionCast:
|
|
|
|
// different types can be substituted into the same type, and
|
|
|
|
// == equality can be overconservative if there are regions.
|
|
|
|
t_in == t_out
|
|
|
|
}
|
2014-09-02 21:09:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2014-09-06 11:13:04 -05:00
|
|
|
id: ast::NodeId)
|
2015-05-05 11:36:47 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr>
|
|
|
|
{
|
2015-09-06 10:32:34 -05:00
|
|
|
use middle::ty::cast::CastTy::*;
|
|
|
|
use middle::ty::cast::IntTy::*;
|
2015-05-05 11:36:47 -05:00
|
|
|
|
|
|
|
fn int_cast(bcx: Block,
|
|
|
|
lldsttype: Type,
|
|
|
|
llsrctype: Type,
|
|
|
|
llsrc: ValueRef,
|
|
|
|
signed: bool)
|
|
|
|
-> ValueRef
|
|
|
|
{
|
|
|
|
let _icx = push_ctxt("int_cast");
|
|
|
|
let srcsz = llsrctype.int_width();
|
|
|
|
let dstsz = lldsttype.int_width();
|
|
|
|
return if dstsz == srcsz {
|
|
|
|
BitCast(bcx, llsrc, lldsttype)
|
|
|
|
} else if srcsz > dstsz {
|
|
|
|
TruncOrBitCast(bcx, llsrc, lldsttype)
|
|
|
|
} else if signed {
|
|
|
|
SExtOrBitCast(bcx, llsrc, lldsttype)
|
|
|
|
} else {
|
|
|
|
ZExtOrBitCast(bcx, llsrc, lldsttype)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn float_cast(bcx: Block,
|
|
|
|
lldsttype: Type,
|
|
|
|
llsrctype: Type,
|
|
|
|
llsrc: ValueRef)
|
|
|
|
-> ValueRef
|
|
|
|
{
|
|
|
|
let _icx = push_ctxt("float_cast");
|
|
|
|
let srcsz = llsrctype.float_width();
|
|
|
|
let dstsz = lldsttype.float_width();
|
|
|
|
return if dstsz > srcsz {
|
|
|
|
FPExt(bcx, llsrc, lldsttype)
|
|
|
|
} else if srcsz > dstsz {
|
|
|
|
FPTrunc(bcx, llsrc, lldsttype)
|
|
|
|
} else { llsrc };
|
|
|
|
}
|
|
|
|
|
2015-05-19 14:54:44 -05:00
|
|
|
let _icx = push_ctxt("trans_cast");
|
|
|
|
let mut bcx = bcx;
|
|
|
|
let ccx = bcx.ccx();
|
2012-08-28 17:54:45 -05:00
|
|
|
|
Add trivial cast lints.
This permits all coercions to be performed in casts, but adds lints to warn in those cases.
Part of this patch moves cast checking to a later stage of type checking. We acquire obligations to check casts as part of type checking where we previously checked them. Once we have type checked a function or module, then we check any cast obligations which have been acquired. That means we have more type information available to check casts (this was crucial to making coercions work properly in place of some casts), but it means that casts cannot feed input into type inference.
[breaking change]
* Adds two new lints for trivial casts and trivial numeric casts, these are warn by default, but can cause errors if you build with warnings as errors. Previously, trivial numeric casts and casts to trait objects were allowed.
* The unused casts lint has gone.
* Interactions between casting and type inference have changed in subtle ways. Two ways this might manifest are:
- You may need to 'direct' casts more with extra type information, for example, in some cases where `foo as _ as T` succeeded, you may now need to specify the type for `_`
- Casts do not influence inference of integer types. E.g., the following used to type check:
```
let x = 42;
let y = &x as *const u32;
```
Because the cast would inform inference that `x` must have type `u32`. This no longer applies and the compiler will fallback to `i32` for `x` and thus there will be a type error in the cast. The solution is to add more type information:
```
let x: u32 = 42;
let y = &x as *const u32;
```
2015-03-19 23:15:27 -05:00
|
|
|
let t_in = expr_ty_adjusted(bcx, expr);
|
2014-01-15 13:39:08 -06:00
|
|
|
let t_out = node_id_type(bcx, id);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2015-06-18 12:25:05 -05:00
|
|
|
debug!("trans_cast({:?} as {:?})", t_in, t_out);
|
2015-05-05 11:36:47 -05:00
|
|
|
let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
|
|
|
|
let ll_t_out = type_of::arg_type_of(ccx, t_out);
|
2014-01-15 13:39:08 -06:00
|
|
|
// Convert the value to be cast into a ValueRef, either by-ref or
|
|
|
|
// by-value as appropriate given its type:
|
2014-09-02 21:09:07 -05:00
|
|
|
let mut datum = unpack_datum!(bcx, trans(bcx, expr));
|
|
|
|
|
Add trivial cast lints.
This permits all coercions to be performed in casts, but adds lints to warn in those cases.
Part of this patch moves cast checking to a later stage of type checking. We acquire obligations to check casts as part of type checking where we previously checked them. Once we have type checked a function or module, then we check any cast obligations which have been acquired. That means we have more type information available to check casts (this was crucial to making coercions work properly in place of some casts), but it means that casts cannot feed input into type inference.
[breaking change]
* Adds two new lints for trivial casts and trivial numeric casts, these are warn by default, but can cause errors if you build with warnings as errors. Previously, trivial numeric casts and casts to trait objects were allowed.
* The unused casts lint has gone.
* Interactions between casting and type inference have changed in subtle ways. Two ways this might manifest are:
- You may need to 'direct' casts more with extra type information, for example, in some cases where `foo as _ as T` succeeded, you may now need to specify the type for `_`
- Casts do not influence inference of integer types. E.g., the following used to type check:
```
let x = 42;
let y = &x as *const u32;
```
Because the cast would inform inference that `x` must have type `u32`. This no longer applies and the compiler will fallback to `i32` for `x` and thus there will be a type error in the cast. The solution is to add more type information:
```
let x: u32 = 42;
let y = &x as *const u32;
```
2015-03-19 23:15:27 -05:00
|
|
|
let datum_ty = monomorphize_type(bcx, datum.ty);
|
2015-05-05 11:36:47 -05:00
|
|
|
|
|
|
|
if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
|
2014-09-02 21:09:07 -05:00
|
|
|
datum.ty = t_out;
|
|
|
|
return DatumBlock::new(bcx, datum);
|
|
|
|
}
|
|
|
|
|
2015-05-05 11:36:47 -05:00
|
|
|
if type_is_fat_ptr(bcx.tcx(), t_in) {
|
|
|
|
assert!(datum.kind.is_by_ref());
|
|
|
|
if type_is_fat_ptr(bcx.tcx(), t_out) {
|
|
|
|
return DatumBlock::new(bcx, Datum::new(
|
|
|
|
PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
|
|
|
|
t_out,
|
|
|
|
Rvalue::new(ByRef)
|
|
|
|
)).to_expr_datumblock();
|
|
|
|
} else {
|
|
|
|
// Return the address
|
|
|
|
return immediate_rvalue_bcx(bcx,
|
2015-05-13 13:58:26 -05:00
|
|
|
PointerCast(bcx,
|
|
|
|
Load(bcx, get_dataptr(bcx, datum.val)),
|
|
|
|
ll_t_out),
|
2015-05-05 11:36:47 -05:00
|
|
|
t_out).to_expr_datumblock();
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
2015-05-05 11:36:47 -05:00
|
|
|
}
|
|
|
|
|
2015-08-02 14:52:50 -05:00
|
|
|
let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
|
|
|
|
let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
|
2015-05-05 11:36:47 -05:00
|
|
|
|
|
|
|
let (llexpr, signed) = if let Int(CEnum) = r_t_in {
|
|
|
|
let repr = adt::represent_type(ccx, t_in);
|
|
|
|
let datum = unpack_datum!(
|
|
|
|
bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
|
|
|
|
let llexpr_ptr = datum.to_llref();
|
2016-02-09 14:24:11 -06:00
|
|
|
let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
|
2015-10-23 00:07:19 -05:00
|
|
|
Some(Type::i64(ccx)), true);
|
2015-05-05 11:36:47 -05:00
|
|
|
ll_t_in = val_ty(discr);
|
2016-02-09 14:24:11 -06:00
|
|
|
(discr, adt::is_discr_signed(&repr))
|
2015-05-05 11:36:47 -05:00
|
|
|
} else {
|
2015-06-24 00:24:13 -05:00
|
|
|
(datum.to_llscalarish(bcx), t_in.is_signed())
|
2015-05-05 11:36:47 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
let newval = match (r_t_in, r_t_out) {
|
2015-05-14 07:04:49 -05:00
|
|
|
(Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
|
|
|
|
PointerCast(bcx, llexpr, ll_t_out)
|
|
|
|
}
|
|
|
|
(Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
|
2015-05-05 11:36:47 -05:00
|
|
|
(Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
|
|
|
|
|
|
|
|
(Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
|
|
|
|
(Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
|
|
|
|
(Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
|
|
|
|
(Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
|
|
|
|
(Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
|
|
|
|
(Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
|
|
|
|
|
|
|
|
_ => ccx.sess().span_bug(expr.span,
|
|
|
|
&format!("translating unsupported cast: \
|
2015-06-18 12:25:05 -05:00
|
|
|
{:?} -> {:?}",
|
|
|
|
t_in,
|
|
|
|
t_out)
|
2015-05-05 11:36:47 -05:00
|
|
|
)
|
2014-01-15 13:39:08 -06:00
|
|
|
};
|
|
|
|
return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
|
|
|
op: hir::BinOp,
|
|
|
|
dst: &hir::Expr,
|
|
|
|
src: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> Block<'blk, 'tcx> {
|
2013-06-16 23:23:24 -05:00
|
|
|
let _icx = push_ctxt("trans_assign_op");
|
2012-08-28 17:54:45 -05:00
|
|
|
let mut bcx = bcx;
|
|
|
|
|
2015-06-18 12:25:05 -05:00
|
|
|
debug!("trans_assign_op(expr={:?})", expr);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
// User-defined operator methods cannot be used with `+=` etc right now
|
2015-07-07 10:45:21 -05:00
|
|
|
assert!(!bcx.tcx().is_method_call(expr.id));
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2014-01-15 13:39:08 -06:00
|
|
|
// Evaluate LHS (destination), which should be an lvalue
|
2015-09-05 13:27:43 -05:00
|
|
|
let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
|
|
|
|
assert!(!bcx.fcx.type_needs_drop(dst.ty));
|
|
|
|
let lhs = load_ty(bcx, dst.val, dst.ty);
|
|
|
|
let lhs = immediate_rvalue(lhs, dst.ty);
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2015-09-05 13:27:43 -05:00
|
|
|
// Evaluate RHS - FIXME(#28160) this sucks
|
2016-02-09 14:24:11 -06:00
|
|
|
let rhs = unpack_datum!(bcx, trans(bcx, &src));
|
2015-09-05 13:27:43 -05:00
|
|
|
let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
|
2012-08-28 17:54:45 -05:00
|
|
|
|
|
|
|
// Perform computation and store the result
|
2014-01-15 13:39:08 -06:00
|
|
|
let result_datum = unpack_datum!(
|
2015-09-05 13:27:43 -05:00
|
|
|
bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
|
|
|
|
return result_datum.store_to(bcx, dst.val);
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2014-09-29 14:11:30 -05:00
|
|
|
datum: Datum<'tcx, Expr>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-03-06 11:24:11 -06:00
|
|
|
let mut bcx = bcx;
|
|
|
|
|
|
|
|
// Ensure cleanup of `datum` if not already scheduled and obtain
|
|
|
|
// a "by ref" pointer.
|
|
|
|
let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
|
|
|
|
|
|
|
|
// Compute final type. Note that we are loose with the region and
|
|
|
|
// mutability, since those things don't matter in trans.
|
|
|
|
let referent_ty = lv_datum.ty;
|
2015-06-24 20:09:46 -05:00
|
|
|
let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
|
2014-03-06 11:24:11 -06:00
|
|
|
|
2015-12-27 10:56:15 -06:00
|
|
|
// Construct the resulting datum. The right datum to return here would be an Lvalue datum,
|
|
|
|
// because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
|
|
|
|
// we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
|
|
|
|
// indirection and for thin pointers, this has no ill effects.
|
|
|
|
let kind = if type_is_sized(bcx.tcx(), referent_ty) {
|
|
|
|
RvalueExpr(Rvalue::new(ByValue))
|
|
|
|
} else {
|
|
|
|
LvalueExpr(lv_datum.kind)
|
|
|
|
};
|
|
|
|
|
2014-03-06 11:24:11 -06:00
|
|
|
// Get the pointer.
|
|
|
|
let llref = lv_datum.to_llref();
|
2015-12-27 10:56:15 -06:00
|
|
|
DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
|
2014-03-06 11:24:11 -06:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2014-09-29 14:11:30 -05:00
|
|
|
datum: Datum<'tcx, Expr>,
|
2015-03-25 19:06:52 -05:00
|
|
|
times: usize)
|
2014-09-06 11:13:04 -05:00
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-01-15 13:39:08 -06:00
|
|
|
let mut bcx = bcx;
|
|
|
|
let mut datum = datum;
|
2015-01-26 14:46:12 -06:00
|
|
|
for i in 0..times {
|
2015-03-16 11:45:01 -05:00
|
|
|
let method_call = MethodCall::autoderef(expr.id, i as u32);
|
2014-06-11 17:01:48 -05:00
|
|
|
datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
DatumBlock { bcx: bcx, datum: datum }
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2015-07-31 02:04:06 -05:00
|
|
|
expr: &hir::Expr,
|
2014-09-29 14:11:30 -05:00
|
|
|
datum: Datum<'tcx, Expr>,
|
2014-09-06 11:13:04 -05:00
|
|
|
method_call: MethodCall)
|
|
|
|
-> DatumBlock<'blk, 'tcx, Expr> {
|
2014-01-15 13:39:08 -06:00
|
|
|
let ccx = bcx.ccx();
|
|
|
|
|
2015-06-18 12:25:05 -05:00
|
|
|
debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
|
|
|
|
expr,
|
2014-06-21 05:39:03 -05:00
|
|
|
datum.to_string(ccx),
|
2014-06-11 17:01:48 -05:00
|
|
|
method_call);
|
2014-01-15 13:39:08 -06:00
|
|
|
|
|
|
|
let mut bcx = bcx;
|
|
|
|
|
2014-03-08 10:33:39 -06:00
|
|
|
// Check for overloaded deref.
|
2016-03-06 09:32:47 -06:00
|
|
|
let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
|
|
|
|
let datum = match method {
|
|
|
|
Some(method) => {
|
|
|
|
let method_ty = monomorphize_type(bcx, method.ty);
|
|
|
|
|
|
|
|
// Overloaded. Invoke the deref() method, which basically
|
2014-07-03 16:32:41 -05:00
|
|
|
// converts from the `Smaht<T>` pointer that we have into
|
2014-03-08 10:33:39 -06:00
|
|
|
// a `&T` pointer. We can then proceed down the normal
|
|
|
|
// path (below) to dereference that `&T`.
|
2015-03-16 11:45:01 -05:00
|
|
|
let datum = if method_call.autoderef == 0 {
|
|
|
|
datum
|
|
|
|
} else {
|
2014-06-11 17:01:48 -05:00
|
|
|
// Always perform an AutoPtr when applying an overloaded auto-deref
|
2015-03-16 11:45:01 -05:00
|
|
|
unpack_datum!(bcx, auto_ref(bcx, datum, expr))
|
2014-03-06 11:24:11 -06:00
|
|
|
};
|
2014-09-02 22:31:36 -05:00
|
|
|
|
2015-01-06 04:03:42 -06:00
|
|
|
let ref_ty = // invoked methods have their LB regions instantiated
|
2015-06-25 15:42:17 -05:00
|
|
|
ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
|
2014-09-02 22:31:36 -05:00
|
|
|
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
|
|
|
|
|
2016-03-06 09:32:47 -06:00
|
|
|
bcx = Callee::method(bcx, method)
|
|
|
|
.call(bcx, expr.debug_loc(),
|
|
|
|
ArgOverloadedOp(datum, None),
|
|
|
|
Some(SaveIn(scratch.val))).bcx;
|
2014-09-02 22:31:36 -05:00
|
|
|
scratch.to_expr_datum()
|
2014-03-06 11:24:11 -06:00
|
|
|
}
|
2014-03-08 10:33:39 -06:00
|
|
|
None => {
|
|
|
|
// Not overloaded. We already have a pointer we know how to deref.
|
|
|
|
datum
|
|
|
|
}
|
2014-03-06 11:24:11 -06:00
|
|
|
};
|
|
|
|
|
2014-10-31 03:51:16 -05:00
|
|
|
let r = match datum.ty.sty {
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyBox(content_ty) => {
|
2015-05-20 04:06:38 -05:00
|
|
|
// Make sure we have an lvalue datum here to get the
|
|
|
|
// proper cleanups scheduled
|
|
|
|
let datum = unpack_datum!(
|
|
|
|
bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
|
|
|
|
|
2014-12-18 08:26:10 -06:00
|
|
|
if type_is_sized(bcx.tcx(), content_ty) {
|
2015-05-20 04:06:38 -05:00
|
|
|
let ptr = load_ty(bcx, datum.val, datum.ty);
|
2015-06-05 14:34:03 -05:00
|
|
|
DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
} else {
|
2015-02-17 12:27:01 -06:00
|
|
|
// A fat pointer and a DST lvalue have the same representation
|
|
|
|
// just different types. Since there is no temporary for `*e`
|
|
|
|
// here (because it is unsized), we cannot emulate the sized
|
|
|
|
// object code path for running drop glue and free. Instead,
|
|
|
|
// we schedule cleanup for `e`, turning it into an lvalue.
|
2014-08-31 23:14:56 -05:00
|
|
|
|
2015-06-05 14:34:03 -05:00
|
|
|
let lval = Lvalue::new("expr::deref_once ty_uniq");
|
|
|
|
let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
|
2014-08-31 23:14:56 -05:00
|
|
|
DatumBlock::new(bcx, datum)
|
2014-04-09 02:15:31 -05:00
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
|
2015-07-10 20:27:06 -05:00
|
|
|
ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
|
|
|
|
ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
|
2015-06-05 14:34:03 -05:00
|
|
|
let lval = Lvalue::new("expr::deref_once ptr");
|
2014-12-18 08:26:10 -06:00
|
|
|
if type_is_sized(bcx.tcx(), content_ty) {
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
let ptr = datum.to_llscalarish(bcx);
|
|
|
|
|
|
|
|
// Always generate an lvalue datum, even if datum.mode is
|
|
|
|
// an rvalue. This is because datum.mode is only an
|
|
|
|
// rvalue for non-owning pointers like &T or *T, in which
|
|
|
|
// case cleanup *is* scheduled elsewhere, by the true
|
|
|
|
// owner (or, in the case of *T, by the user).
|
2015-06-05 14:34:03 -05:00
|
|
|
DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
} else {
|
2015-02-17 12:27:01 -06:00
|
|
|
// A fat pointer and a DST lvalue have the same representation
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 07:20:11 -05:00
|
|
|
// just different types.
|
2015-06-05 14:34:03 -05:00
|
|
|
DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
|
2014-04-09 02:15:31 -05:00
|
|
|
}
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
_ => {
|
|
|
|
bcx.tcx().sess.span_bug(
|
|
|
|
expr.span,
|
2015-07-26 19:49:38 -05:00
|
|
|
&format!("deref invoked on expr of invalid type {:?}",
|
2015-06-18 12:25:05 -05:00
|
|
|
datum.ty));
|
2014-01-15 13:39:08 -06:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-12-20 02:09:35 -06:00
|
|
|
debug!("deref_once(expr={}, method_call={:?}, result={})",
|
2014-06-21 05:39:03 -05:00
|
|
|
expr.id, method_call, r.datum.to_string(ccx));
|
2014-01-15 13:39:08 -06:00
|
|
|
|
|
|
|
return r;
|
2013-08-28 01:12:05 -05:00
|
|
|
}
|
2015-01-05 23:56:30 -06:00
|
|
|
|
2015-03-24 14:34:59 -05:00
|
|
|
#[derive(Debug)]
|
2015-01-05 23:56:30 -06:00
|
|
|
enum OverflowOp {
|
|
|
|
Add,
|
|
|
|
Sub,
|
|
|
|
Mul,
|
2015-03-19 13:52:08 -05:00
|
|
|
Shl,
|
|
|
|
Shr,
|
2015-01-05 23:56:30 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
impl OverflowOp {
|
2015-03-19 13:52:08 -05:00
|
|
|
fn codegen_strategy(&self) -> OverflowCodegen {
|
|
|
|
use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
|
|
|
|
match *self {
|
|
|
|
OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
|
|
|
|
OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
|
|
|
|
OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
|
|
|
|
|
|
|
|
OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
|
|
|
|
OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
enum OverflowCodegen {
|
|
|
|
ViaIntrinsic(OverflowOpViaIntrinsic),
|
|
|
|
ViaInputCheck(OverflowOpViaInputCheck),
|
|
|
|
}
|
|
|
|
|
|
|
|
enum OverflowOpViaInputCheck { Shl, Shr, }
|
|
|
|
|
2015-03-24 14:34:59 -05:00
|
|
|
#[derive(Debug)]
|
2015-03-19 13:52:08 -05:00
|
|
|
enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
|
|
|
|
|
|
|
|
impl OverflowOpViaIntrinsic {
|
|
|
|
fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
|
|
|
|
let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
|
|
|
|
bcx.ccx().get_intrinsic(&name)
|
|
|
|
}
|
2016-02-29 17:36:51 -06:00
|
|
|
fn to_intrinsic_name(&self, tcx: &TyCtxt, ty: Ty) -> &'static str {
|
2015-09-14 04:58:20 -05:00
|
|
|
use syntax::ast::IntTy::*;
|
|
|
|
use syntax::ast::UintTy::*;
|
2015-06-11 18:21:46 -05:00
|
|
|
use middle::ty::{TyInt, TyUint};
|
2015-01-05 23:56:30 -06:00
|
|
|
|
|
|
|
let new_sty = match ty.sty {
|
2016-02-08 09:20:57 -06:00
|
|
|
TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
|
|
|
|
"32" => TyInt(I32),
|
|
|
|
"64" => TyInt(I64),
|
2015-01-05 23:56:30 -06:00
|
|
|
_ => panic!("unsupported target word size")
|
|
|
|
},
|
2016-02-08 09:20:57 -06:00
|
|
|
TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
|
|
|
|
"32" => TyUint(U32),
|
|
|
|
"64" => TyUint(U64),
|
2015-01-05 23:56:30 -06:00
|
|
|
_ => panic!("unsupported target word size")
|
|
|
|
},
|
2015-06-11 18:21:46 -05:00
|
|
|
ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
|
2015-03-24 14:34:59 -05:00
|
|
|
_ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
|
|
|
|
*self)
|
2015-01-05 23:56:30 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
match *self {
|
2015-03-19 13:52:08 -05:00
|
|
|
OverflowOpViaIntrinsic::Add => match new_sty {
|
2016-02-08 09:20:57 -06:00
|
|
|
TyInt(I8) => "llvm.sadd.with.overflow.i8",
|
|
|
|
TyInt(I16) => "llvm.sadd.with.overflow.i16",
|
|
|
|
TyInt(I32) => "llvm.sadd.with.overflow.i32",
|
|
|
|
TyInt(I64) => "llvm.sadd.with.overflow.i64",
|
2015-01-05 23:56:30 -06:00
|
|
|
|
2016-02-08 09:20:57 -06:00
|
|
|
TyUint(U8) => "llvm.uadd.with.overflow.i8",
|
|
|
|
TyUint(U16) => "llvm.uadd.with.overflow.i16",
|
|
|
|
TyUint(U32) => "llvm.uadd.with.overflow.i32",
|
|
|
|
TyUint(U64) => "llvm.uadd.with.overflow.i64",
|
2015-01-05 23:56:30 -06:00
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
2015-03-19 13:52:08 -05:00
|
|
|
OverflowOpViaIntrinsic::Sub => match new_sty {
|
2016-02-08 09:20:57 -06:00
|
|
|
TyInt(I8) => "llvm.ssub.with.overflow.i8",
|
|
|
|
TyInt(I16) => "llvm.ssub.with.overflow.i16",
|
|
|
|
TyInt(I32) => "llvm.ssub.with.overflow.i32",
|
|
|
|
TyInt(I64) => "llvm.ssub.with.overflow.i64",
|
2015-01-05 23:56:30 -06:00
|
|
|
|
2016-02-08 09:20:57 -06:00
|
|
|
TyUint(U8) => "llvm.usub.with.overflow.i8",
|
|
|
|
TyUint(U16) => "llvm.usub.with.overflow.i16",
|
|
|
|
TyUint(U32) => "llvm.usub.with.overflow.i32",
|
|
|
|
TyUint(U64) => "llvm.usub.with.overflow.i64",
|
2015-01-05 23:56:30 -06:00
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
2015-03-19 13:52:08 -05:00
|
|
|
OverflowOpViaIntrinsic::Mul => match new_sty {
|
2016-02-08 09:20:57 -06:00
|
|
|
TyInt(I8) => "llvm.smul.with.overflow.i8",
|
|
|
|
TyInt(I16) => "llvm.smul.with.overflow.i16",
|
|
|
|
TyInt(I32) => "llvm.smul.with.overflow.i32",
|
|
|
|
TyInt(I64) => "llvm.smul.with.overflow.i64",
|
|
|
|
|
|
|
|
TyUint(U8) => "llvm.umul.with.overflow.i8",
|
|
|
|
TyUint(U16) => "llvm.umul.with.overflow.i16",
|
|
|
|
TyUint(U32) => "llvm.umul.with.overflow.i32",
|
|
|
|
TyUint(U64) => "llvm.umul.with.overflow.i64",
|
2015-01-05 23:56:30 -06:00
|
|
|
|
|
|
|
_ => unreachable!(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-19 13:52:08 -05:00
|
|
|
fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
|
|
|
|
info: NodeIdAndSpan,
|
|
|
|
lhs_t: Ty<'tcx>, lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
|
|
|
binop_debug_loc: DebugLoc)
|
|
|
|
-> (Block<'blk, 'tcx>, ValueRef) {
|
|
|
|
let llfn = self.to_intrinsic(bcx, lhs_t);
|
2015-01-05 23:56:30 -06:00
|
|
|
|
|
|
|
let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
|
|
|
|
let result = ExtractValue(bcx, val, 0); // iN operation result
|
|
|
|
let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
|
|
|
|
|
|
|
|
let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
|
|
|
|
binop_debug_loc);
|
|
|
|
|
|
|
|
let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
|
|
|
|
Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
|
|
|
|
None, binop_debug_loc);
|
|
|
|
|
|
|
|
let bcx =
|
|
|
|
base::with_cond(bcx, cond, |bcx|
|
|
|
|
controlflow::trans_fail(bcx, info,
|
|
|
|
InternedString::new("arithmetic operation overflowed")));
|
|
|
|
|
|
|
|
(bcx, result)
|
2015-03-19 13:52:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl OverflowOpViaInputCheck {
|
|
|
|
fn build_with_input_check<'blk, 'tcx>(&self,
|
|
|
|
bcx: Block<'blk, 'tcx>,
|
|
|
|
info: NodeIdAndSpan,
|
|
|
|
lhs_t: Ty<'tcx>,
|
|
|
|
lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
|
|
|
binop_debug_loc: DebugLoc)
|
|
|
|
-> (Block<'blk, 'tcx>, ValueRef)
|
|
|
|
{
|
|
|
|
let lhs_llty = val_ty(lhs);
|
|
|
|
let rhs_llty = val_ty(rhs);
|
|
|
|
|
|
|
|
// Panic if any bits are set outside of bits that we always
|
|
|
|
// mask in.
|
|
|
|
//
|
|
|
|
// Note that the mask's value is derived from the LHS type
|
|
|
|
// (since that is where the 32/64 distinction is relevant) but
|
|
|
|
// the mask's type must match the RHS type (since they will
|
2015-10-07 17:11:25 -05:00
|
|
|
// both be fed into an and-binop)
|
2015-06-09 16:45:45 -05:00
|
|
|
let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
|
2015-03-19 13:52:08 -05:00
|
|
|
|
|
|
|
let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
|
2015-06-09 16:45:45 -05:00
|
|
|
let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
|
2015-03-19 13:52:08 -05:00
|
|
|
let result = match *self {
|
|
|
|
OverflowOpViaInputCheck::Shl =>
|
|
|
|
build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
|
|
|
|
OverflowOpViaInputCheck::Shr =>
|
|
|
|
build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
|
|
|
|
};
|
|
|
|
let bcx =
|
|
|
|
base::with_cond(bcx, cond, |bcx|
|
|
|
|
controlflow::trans_fail(bcx, info,
|
|
|
|
InternedString::new("shift operation overflowed")));
|
|
|
|
|
|
|
|
(bcx, result)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-09 16:45:45 -05:00
|
|
|
// Check if an integer or vector contains a nonzero element.
|
|
|
|
fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
value: ValueRef,
|
|
|
|
binop_debug_loc: DebugLoc) -> ValueRef {
|
|
|
|
let llty = val_ty(value);
|
|
|
|
let kind = llty.kind();
|
|
|
|
match kind {
|
|
|
|
TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
|
|
|
|
TypeKind::Vector => {
|
|
|
|
// Check if any elements of the vector are nonzero by treating
|
|
|
|
// it as a wide integer and checking if the integer is nonzero.
|
|
|
|
let width = llty.vector_length() as u64 * llty.element_type().int_width();
|
|
|
|
let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
|
|
|
|
build_nonzero_check(bcx, int_value, binop_debug_loc)
|
|
|
|
},
|
|
|
|
_ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
|
|
|
|
}
|
2015-03-19 13:52:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
|
|
|
|
lhs_t: Ty<'tcx>, lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
|
|
|
binop_debug_loc: DebugLoc)
|
|
|
|
-> (Block<'blk, 'tcx>, ValueRef) {
|
|
|
|
if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
|
|
|
|
if bcx.ccx().check_overflow() {
|
|
|
|
|
|
|
|
match oop.codegen_strategy() {
|
|
|
|
OverflowCodegen::ViaIntrinsic(oop) =>
|
|
|
|
oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
|
|
|
|
OverflowCodegen::ViaInputCheck(oop) =>
|
|
|
|
oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
|
|
|
|
}
|
2015-01-05 23:56:30 -06:00
|
|
|
} else {
|
|
|
|
let res = match oop {
|
|
|
|
OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
|
|
|
|
OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
|
|
|
|
OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
|
2015-03-19 13:52:08 -05:00
|
|
|
|
|
|
|
OverflowOp::Shl =>
|
|
|
|
build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
|
|
|
|
OverflowOp::Shr =>
|
|
|
|
build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
|
2015-01-05 23:56:30 -06:00
|
|
|
};
|
|
|
|
(bcx, res)
|
|
|
|
}
|
|
|
|
}
|
2015-06-21 14:29:13 -05:00
|
|
|
|
|
|
|
/// We categorize expressions into three kinds. The distinction between
|
|
|
|
/// lvalue/rvalue is fundamental to the language. The distinction between the
|
|
|
|
/// two kinds of rvalues is an artifact of trans which reflects how we will
|
|
|
|
/// generate code for that kind of expression. See trans/expr.rs for more
|
|
|
|
/// information.
|
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
enum ExprKind {
|
|
|
|
Lvalue,
|
|
|
|
RvalueDps,
|
|
|
|
RvalueDatum,
|
|
|
|
RvalueStmt
|
|
|
|
}
|
|
|
|
|
2016-02-29 17:36:51 -06:00
|
|
|
fn expr_kind(tcx: &TyCtxt, expr: &hir::Expr) -> ExprKind {
|
2015-07-07 10:45:21 -05:00
|
|
|
if tcx.is_method_call(expr.id) {
|
2015-06-21 14:29:13 -05:00
|
|
|
// Overloaded operations are generally calls, and hence they are
|
|
|
|
// generated via DPS, but there are a few exceptions:
|
|
|
|
return match expr.node {
|
|
|
|
// `a += b` has a unit result.
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
|
2015-06-21 14:29:13 -05:00
|
|
|
|
|
|
|
// the deref method invoked for `*a` always yields an `&T`
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
|
2015-06-21 14:29:13 -05:00
|
|
|
|
|
|
|
// the index method invoked for `a[i]` always yields an `&T`
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprIndex(..) => ExprKind::Lvalue,
|
2015-06-21 14:29:13 -05:00
|
|
|
|
|
|
|
// in the general case, result could be any type, use DPS
|
|
|
|
_ => ExprKind::RvalueDps
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
match expr.node {
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprPath(..) => {
|
2015-06-25 15:42:17 -05:00
|
|
|
match tcx.resolve_expr(expr) {
|
2016-03-06 09:32:47 -06:00
|
|
|
// Put functions and ctors with the ADTs, as they
|
|
|
|
// are zero-sized, so DPS is the cheapest option.
|
|
|
|
Def::Struct(..) | Def::Variant(..) |
|
|
|
|
Def::Fn(..) | Def::Method(..) => {
|
|
|
|
ExprKind::RvalueDps
|
2015-06-21 14:29:13 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note: there is actually a good case to be made that
|
|
|
|
// DefArg's, particularly those of immediate type, ought to
|
|
|
|
// considered rvalues.
|
2016-01-20 13:31:10 -06:00
|
|
|
Def::Static(..) |
|
|
|
|
Def::Upvar(..) |
|
|
|
|
Def::Local(..) => ExprKind::Lvalue,
|
2015-06-21 14:29:13 -05:00
|
|
|
|
2016-01-20 13:31:10 -06:00
|
|
|
Def::Const(..) |
|
|
|
|
Def::AssociatedConst(..) => ExprKind::RvalueDatum,
|
2015-06-21 14:29:13 -05:00
|
|
|
|
|
|
|
def => {
|
|
|
|
tcx.sess.span_bug(
|
|
|
|
expr.span,
|
|
|
|
&format!("uncategorized def for expr {}: {:?}",
|
|
|
|
expr.id,
|
|
|
|
def));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-02 20:37:48 -06:00
|
|
|
hir::ExprType(ref expr, _) => {
|
|
|
|
expr_kind(tcx, expr)
|
|
|
|
}
|
|
|
|
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprUnary(hir::UnDeref, _) |
|
|
|
|
hir::ExprField(..) |
|
|
|
|
hir::ExprTupField(..) |
|
|
|
|
hir::ExprIndex(..) => {
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::Lvalue
|
|
|
|
}
|
|
|
|
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprCall(..) |
|
|
|
|
hir::ExprMethodCall(..) |
|
|
|
|
hir::ExprStruct(..) |
|
|
|
|
hir::ExprTup(..) |
|
|
|
|
hir::ExprIf(..) |
|
|
|
|
hir::ExprMatch(..) |
|
|
|
|
hir::ExprClosure(..) |
|
|
|
|
hir::ExprBlock(..) |
|
|
|
|
hir::ExprRepeat(..) |
|
|
|
|
hir::ExprVec(..) => {
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueDps
|
|
|
|
}
|
|
|
|
|
2015-11-28 13:02:07 -06:00
|
|
|
hir::ExprLit(ref lit) if lit.node.is_str() => {
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueDps
|
|
|
|
}
|
|
|
|
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprBreak(..) |
|
|
|
|
hir::ExprAgain(..) |
|
|
|
|
hir::ExprRet(..) |
|
|
|
|
hir::ExprWhile(..) |
|
|
|
|
hir::ExprLoop(..) |
|
|
|
|
hir::ExprAssign(..) |
|
|
|
|
hir::ExprInlineAsm(..) |
|
|
|
|
hir::ExprAssignOp(..) => {
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueStmt
|
|
|
|
}
|
|
|
|
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprLit(_) | // Note: LitStr is carved out above
|
|
|
|
hir::ExprUnary(..) |
|
2015-09-24 10:00:08 -05:00
|
|
|
hir::ExprBox(_) |
|
2015-07-31 02:04:06 -05:00
|
|
|
hir::ExprAddrOf(..) |
|
|
|
|
hir::ExprBinary(..) |
|
|
|
|
hir::ExprCast(..) => {
|
2015-06-21 14:29:13 -05:00
|
|
|
ExprKind::RvalueDatum
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|