diff --git a/src/interpreter/terminator/mod.rs b/src/interpreter/terminator/mod.rs index 6c400a040fb..1a0edb0b91a 100644 --- a/src/interpreter/terminator/mod.rs +++ b/src/interpreter/terminator/mod.rs @@ -3,7 +3,7 @@ use rustc::mir; use rustc::traits::{self, Reveal}; use rustc::ty::fold::TypeFoldable; use rustc::ty::layout::Layout; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{Substs, Kind}; use rustc::ty::{self, Ty, TyCtxt, BareFnTy}; use std::rc::Rc; use syntax::codemap::{DUMMY_SP, Span}; @@ -12,7 +12,7 @@ use syntax::{ast, attr}; use error::{EvalError, EvalResult}; use memory::Pointer; use primval::PrimVal; -use super::{EvalContext, Lvalue, IntegerExt, StackPopCleanup}; +use super::{EvalContext, Lvalue, IntegerExt, StackPopCleanup, LvalueExtra, monomorphize_field_ty}; use super::value::Value; mod intrinsics; @@ -146,9 +146,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { Ok(()) } - fn eval_drop_impls(&mut self, drops: Vec<(DefId, Pointer, &'tcx Substs<'tcx>)>) -> EvalResult<'tcx, ()> { + fn eval_drop_impls(&mut self, drops: Vec<(DefId, Value, &'tcx Substs<'tcx>)>) -> EvalResult<'tcx, ()> { let span = self.frame().span; - for (drop_def_id, adt_ptr, substs) in drops { + // add them to the stack in reverse order, because the impl that needs to run the last + // is the one that needs to be at the bottom of the stack + for (drop_def_id, self_arg, substs) in drops.into_iter().rev() { // FIXME: supply a real span let mir = self.load_mir(drop_def_id)?; trace!("substs for drop glue: {:?}", substs); @@ -165,7 +167,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { assert!(arg_locals.next().is_none(), "drop impl should have only one arg"); let dest = self.eval_lvalue(&mir::Lvalue::Local(first))?; let ty = self.frame().mir.local_decls[first].ty; - self.write_value(Value::ByVal(PrimVal::from_ptr(adt_ptr)), dest, ty)?; + self.write_value(self_arg, dest, ty)?; } Ok(()) } @@ -513,7 +515,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { &mut self, lval: Lvalue<'tcx>, ty: Ty<'tcx>, - drop: &mut Vec<(DefId, Pointer, &'tcx Substs<'tcx>)>, + drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>, ) -> EvalResult<'tcx, ()> { if !self.type_needs_drop(ty) { debug!("no need to drop {:?}", ty); @@ -525,18 +527,53 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { // special case `Box` to deallocate the inner allocation ty::TyBox(contents_ty) => { let val = self.read_lvalue(lval)?; - let contents_ptr = val.read_ptr(&self.memory)?; - self.drop(Lvalue::from_ptr(contents_ptr), contents_ty, drop)?; - trace!("-deallocating box"); - self.memory.deallocate(contents_ptr)?; + // we are going through the read_value path, because that already does all the + // checks for the trait object types. We'd only be repeating ourselves here. + let val = self.follow_by_ref_value(val, ty)?; + trace!("box dealloc on {:?}", val); + match val { + Value::ByRef(_) => bug!("follow_by_ref_value can't result in ByRef"), + Value::ByVal(ptr) => { + assert!(self.type_is_sized(contents_ty)); + let contents_ptr = ptr.expect_ptr("value of Box type must be a pointer"); + self.drop(Lvalue::from_ptr(contents_ptr), contents_ty, drop)?; + }, + Value::ByValPair(prim_ptr, extra) => { + let ptr = prim_ptr.expect_ptr("value of Box type must be a pointer"); + let extra = match extra.try_as_ptr() { + Some(vtable) => LvalueExtra::Vtable(vtable), + None => LvalueExtra::Length(extra.expect_uint("slice length")), + }; + self.drop( + Lvalue::Ptr { + ptr: ptr, + extra: extra, + }, + contents_ty, + drop, + )?; + }, + } + let box_free_fn = self.tcx.lang_items.box_free_fn().expect("no box_free lang item"); + let substs = self.tcx.intern_substs(&[Kind::from(contents_ty)]); + // this is somewhat hacky, but hey, there's no representation difference between + // pointers and references, so + // #[lang = "box_free"] unsafe fn box_free(ptr: *mut T) + // is the same as + // fn drop(&mut self) if Self is Box + drop.push((box_free_fn, val, substs)); }, ty::TyAdt(adt_def, substs) => { // FIXME: some structs are represented as ByValPair - let adt_ptr = self.force_allocation(lval)?.to_ptr(); + let lval = self.force_allocation(lval)?; + let adt_ptr = match lval { + Lvalue::Ptr { ptr, .. } => ptr, + _ => bug!("force allocation can only yield Lvalue::Ptr"), + }; // run drop impl before the fields' drop impls if let Some(drop_def_id) = adt_def.destructor() { - drop.push((drop_def_id, adt_ptr, substs)); + drop.push((drop_def_id, Value::ByVal(PrimVal::from_ptr(adt_ptr)), substs)); } let layout = self.type_layout(ty); let fields = match *layout { @@ -565,10 +602,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { if discr == nndiscr { assert_eq!(adt_def.variants[discr as usize].fields.len(), 1); let field_ty = &adt_def.variants[discr as usize].fields[0]; - let field_ty = self.monomorphize_field_ty(field_ty, substs); + let field_ty = monomorphize_field_ty(self.tcx, field_ty, substs); // FIXME: once read_discriminant_value works with lvalue, don't force // alloc in the RawNullablePointer case - self.drop(Lvalue::from_ptr(adt_ptr), field_ty, drop)?; + self.drop(lval, field_ty, drop)?; return Ok(()); } else { // FIXME: the zst variant might contain zst types that impl Drop @@ -577,18 +614,19 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { }, _ => bug!("{:?} is not an adt layout", layout), }; - for (field_ty, offset) in fields { - let field_ty = self.monomorphize_field_ty(field_ty, substs); - self.drop(Lvalue::from_ptr(adt_ptr.offset(offset.bytes() as isize)), field_ty, drop)?; - } + let tcx = self.tcx; + self.drop_fields( + fields.map(|(ty, &offset)| (monomorphize_field_ty(tcx, ty, substs), offset)), + lval, + drop, + )?; }, ty::TyTuple(fields) => { - // FIXME: some tuples are represented as ByValPair - let ptr = self.force_allocation(lval)?.to_ptr(); - for (i, field_ty) in fields.iter().enumerate() { - let offset = self.get_field_offset(ty, i)?.bytes() as isize; - self.drop(Lvalue::from_ptr(ptr.offset(offset)), field_ty, drop)?; - } + let offsets = match *self.type_layout(ty) { + Layout::Univariant { ref variant, .. } => &variant.offsets, + _ => bug!("tuples must be univariant"), + }; + self.drop_fields(fields.iter().cloned().zip(offsets.iter().cloned()), lval, drop)?; }, // other types do not need to process drop _ => {}, @@ -596,6 +634,37 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { Ok(()) } + + fn drop_fields< + I: Iterator, ty::layout::Size)>, + >( + &mut self, + mut fields: I, + lval: Lvalue<'tcx>, + drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>, + ) -> EvalResult<'tcx, ()> { + // FIXME: some aggregates may be represented by PrimVal::Pair + let (adt_ptr, extra) = match self.force_allocation(lval)? { + Lvalue::Ptr { ptr, extra } => (ptr, extra), + _ => bug!("force allocation must yield Lvalue::Ptr"), + }; + // manual iteration, because we need to be careful about the last field if it is unsized + while let Some((field_ty, offset)) = fields.next() { + let ptr = adt_ptr.offset(offset.bytes() as isize); + if self.type_is_sized(field_ty) { + self.drop(Lvalue::from_ptr(ptr), field_ty, drop)?; + } else { + let lvalue = Lvalue::Ptr { + ptr: ptr, + extra: extra, + }; + self.drop(lvalue, field_ty, drop)?; + break; // if it is not sized, then this is the last field anyway + } + } + assert!(fields.next().is_none()); + Ok(()) + } } #[derive(Debug)] diff --git a/tests/run-pass/move-arg-2-unique.rs b/tests/run-pass/move-arg-2-unique.rs new file mode 100644 index 00000000000..d44c83763b7 --- /dev/null +++ b/tests/run-pass/move-arg-2-unique.rs @@ -0,0 +1,20 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused_features, unused_variables)] +#![feature(box_syntax)] + +fn test(foo: Box> ) { assert_eq!((*foo)[0], 10); } + +pub fn main() { + let x = box vec![10]; + // Test forgetting a local by move-in + test(x); +}