diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index b5d16d29272..e107d19a87c 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -354,7 +354,8 @@ impl Drop for Arc { // more than once (but it is guaranteed to be zeroed after the first if // it's run more than once) let ptr = *self._ptr; - if ptr.is_null() { return } + // if ptr.is_null() { return } + if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return } // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This @@ -485,7 +486,7 @@ impl Drop for Weak { let ptr = *self._ptr; // see comments above for why this check is here - if ptr.is_null() { return } + if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return } // If we find out that we were the last weak pointer, then its time to // deallocate the data entirely. See the discussion in Arc::drop() about diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index eb3c5c16726..e0d7e32ecf5 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -160,7 +160,7 @@ use core::default::Default; use core::fmt; use core::hash::{Hasher, Hash}; use core::marker; -use core::mem::{min_align_of, size_of, forget}; +use core::mem::{self, min_align_of, size_of, forget}; use core::nonzero::NonZero; use core::ops::{Deref, Drop}; use core::option::Option; @@ -407,7 +407,7 @@ impl Drop for Rc { fn drop(&mut self) { unsafe { let ptr = *self._ptr; - if !ptr.is_null() { + if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE { self.dec_strong(); if self.strong() == 0 { ptr::read(&**self); // destroy the contained object @@ -718,7 +718,7 @@ impl Drop for Weak { fn drop(&mut self) { unsafe { let ptr = *self._ptr; - if !ptr.is_null() { + if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE { self.dec_weak(); // the weak count starts at 1, and will only go to zero if all // the strong pointers have disappeared. diff --git a/src/libcollections/btree/node.rs b/src/libcollections/btree/node.rs index 23eafa41d8a..bf57d745a0d 100644 --- a/src/libcollections/btree/node.rs +++ b/src/libcollections/btree/node.rs @@ -280,9 +280,11 @@ impl Drop for RawItems { #[unsafe_destructor] impl Drop for Node { fn drop(&mut self) { - if self.keys.is_null() { + if self.keys.is_null() || + (unsafe { self.keys.get() as *const K as usize == mem::POST_DROP_USIZE }) + { // Since we have #[unsafe_no_drop_flag], we have to watch - // out for a null value being stored in self.keys. (Using + // out for the sentinel value being stored in self.keys. (Using // null is technically a violation of the `Unique` // requirements, though.) return; diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index e71077c96c7..cf0163f3ef4 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -1694,7 +1694,7 @@ impl Drop for Vec { fn drop(&mut self) { // This is (and should always remain) a no-op if the fields are // zeroed (when moving out, because of #[unsafe_no_drop_flag]). - if self.cap != 0 { + if self.cap != 0 && self.cap != mem::POST_DROP_USIZE { unsafe { for x in &*self { ptr::read(x); @@ -1977,7 +1977,7 @@ impl<'a, T> ExactSizeIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Drop for Drain<'a, T> { fn drop(&mut self) { - // self.ptr == self.end == null if drop has already been called, + // self.ptr == self.end == mem::POST_DROP_USIZE if drop has already been called, // so we can use #[unsafe_no_drop_flag]. // destroy the remaining elements diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 1f1044b0b21..eb74a41db55 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -191,13 +191,35 @@ extern "rust-intrinsic" { /// crate it is invoked in. pub fn type_id() -> u64; + /// Create a value initialized to so that its drop flag, + /// if any, says that it has been dropped. + /// + /// `init_dropped` is unsafe because it returns a datum with all + /// of its bytes set to the drop flag, which generally does not + /// correspond to a valid value. + /// + /// This intrinsic is likely to be deprecated in the future when + /// Rust moves to non-zeroing dynamic drop (and thus removes the + /// embedded drop flags that are being established by this + /// intrinsic). + #[cfg(not(stage0))] + pub fn init_dropped() -> T; + /// Create a value initialized to zero. /// /// `init` is unsafe because it returns a zeroed-out datum, - /// which is unsafe unless T is Copy. + /// which is unsafe unless T is `Copy`. Also, even if T is + /// `Copy`, an all-zero value may not correspond to any legitimate + /// state for the type in question. pub fn init() -> T; /// Create an uninitialized value. + /// + /// `uninit` is unsafe because there is no guarantee of what its + /// contents are. In particular its drop-flag may be set to any + /// state, which means it may claim either dropped or + /// undropped. In the general case one must use `ptr::write` to + /// initialize memory previous set to the result of `uninit`. pub fn uninit() -> T; /// Move a value out of scope without running drop glue. diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 1e6fb51a8a5..e5b6c3f3472 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -158,6 +158,29 @@ pub unsafe fn zeroed() -> T { intrinsics::init() } +/// Create a value initialized to an unspecified series of bytes. +/// +/// The byte sequence usually indicates that the value at the memory +/// in question has been dropped. Thus, *if* T carries a drop flag, +/// any associated destructor will not be run when the value falls out +/// of scope. +/// +/// Some code at one time used the `zeroed` function above to +/// accomplish this goal. +/// +/// This function is expected to be deprecated with the transition +/// to non-zeroing drop. +#[inline] +#[unstable(feature = "filling_drop")] +pub unsafe fn dropped() -> T { + let mut x: T = uninitialized(); + let p: *mut u8 = transmute(&mut x as *mut T); + for i in 0..size_of::() { + *p.offset(i as isize) = POST_DROP_U8; + } + x +} + /// Create an uninitialized value. /// /// Care must be taken when using this function, if the type `T` has a destructor and the value @@ -291,6 +314,40 @@ pub fn replace(dest: &mut T, mut src: T) -> T { #[stable(feature = "rust1", since = "1.0.0")] pub fn drop(_x: T) { } +macro_rules! repeat_u8_as_u32 { + ($name:expr) => { (($name as u32) << 24 | + ($name as u32) << 16 | + ($name as u32) << 8 | + ($name as u32)) } +} +macro_rules! repeat_u8_as_u64 { + ($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 | + (repeat_u8_as_u32!($name) as u64)) } +} + +// NOTE: Keep synchronized with values used in librustc_trans::trans::adt. +// +// In particular, the POST_DROP_U8 marker must never equal the +// DTOR_NEEDED_U8 marker. +// +// For a while pnkfelix was using 0xc1 here. +// But having the sign bit set is a pain, so 0x1d is probably better. +// +// And of course, 0x00 brings back the old world of zero'ing on drop. +#[cfg(not(stage0))] pub const POST_DROP_U8: u8 = 0x0; +#[cfg(not(stage0))] pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8); +#[cfg(not(stage0))] pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8); + +#[cfg(target_pointer_width = "32")] +#[cfg(not(stage0))] pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize; +#[cfg(target_pointer_width = "64")] +#[cfg(not(stage0))] pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize; + +#[cfg(stage0)] pub const POST_DROP_U8: u8 = 0; +#[cfg(stage0)] pub const POST_DROP_U32: u32 = 0; +#[cfg(stage0)] pub const POST_DROP_U64: u64 = 0; +#[cfg(stage0)] pub const POST_DROP_USIZE: usize = 0; + /// Interprets `src` as `&U`, and then reads `src` without moving the contained value. /// /// This function will unsafely assume the pointer `src` is valid for `sizeof(U)` bytes by diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 9b3ee3ef5e0..7a9c9274f3b 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -230,6 +230,21 @@ pub unsafe fn read_and_zero(dest: *mut T) -> T { tmp } +/// Variant of read_and_zero that writes the specific drop-flag byte +/// (which may be more apropriate than zero). +#[inline(always)] +#[unstable(feature = "core", + reason = "may play a larger role in std::ptr future extensions")] +pub unsafe fn read_and_drop(dest: *mut T) -> T { + // Copy the data out from `dest`: + let tmp = read(&*dest); + + // Now mark `dest` as dropped: + write_bytes(dest, mem::POST_DROP_U8, 1); + + tmp +} + /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs index c48b63cdcb6..d583eef0e0f 100644 --- a/src/librustc_trans/trans/_match.rs +++ b/src/librustc_trans/trans/_match.rs @@ -1528,7 +1528,7 @@ pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let scope = cleanup::var_scope(tcx, p_id); bcx = mk_binding_alloca( bcx, p_id, &path1.node, scope, (), - |(), bcx, llval, ty| { zero_mem(bcx, llval, ty); bcx }); + |(), bcx, llval, ty| { drop_done_fill_mem(bcx, llval, ty); bcx }); }); bcx } diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs index 61214f65c87..9f90b5cea5f 100644 --- a/src/librustc_trans/trans/adt.rs +++ b/src/librustc_trans/trans/adt.rs @@ -81,14 +81,18 @@ pub enum Repr<'tcx> { /// Structs with destructors need a dynamic destroyedness flag to /// avoid running the destructor too many times; this is included /// in the `Struct` if present. - Univariant(Struct<'tcx>, bool), + /// (The flag if nonzero, represents the initialization value to use; + /// if zero, then use no flag at all.) + Univariant(Struct<'tcx>, u8), /// General-case enums: for each case there is a struct, and they /// all start with a field for the discriminant. /// /// Types with destructors need a dynamic destroyedness flag to /// avoid running the destructor too many times; the last argument /// indicates whether such a flag is present. - General(IntType, Vec>, bool), + /// (The flag, if nonzero, represents the initialization value to use; + /// if zero, then use no flag at all.) + General(IntType, Vec>, u8), /// Two cases distinguished by a nullable pointer: the case with discriminant /// `nndiscr` must have single field which is known to be nonnull due to its type. /// The other case is known to be zero sized. Hence we represent the enum @@ -151,11 +155,59 @@ pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, repr } +macro_rules! repeat_u8_as_u32 { + ($name:expr) => { (($name as u32) << 24 | + ($name as u32) << 16 | + ($name as u32) << 8 | + ($name as u32)) } +} +macro_rules! repeat_u8_as_u64 { + ($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 | + (repeat_u8_as_u32!($name) as u64)) } +} + +pub const DTOR_NEEDED: u8 = 0x1; +pub const DTOR_NEEDED_U32: u32 = repeat_u8_as_u32!(DTOR_NEEDED); +pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64!(DTOR_NEEDED); +#[allow(dead_code)] +pub fn dtor_needed_usize(ccx: &CrateContext) -> usize { + match &ccx.tcx().sess.target.target.target_pointer_width[..] { + "32" => DTOR_NEEDED_U32 as usize, + "64" => DTOR_NEEDED_U64 as usize, + tws => panic!("Unsupported target word size for int: {}", tws), + } +} + +pub const DTOR_DONE: u8 = 0x0; +pub const DTOR_DONE_U32: u32 = repeat_u8_as_u32!(DTOR_DONE); +pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64!(DTOR_DONE); +#[allow(dead_code)] +pub fn dtor_done_usize(ccx: &CrateContext) -> usize { + match &ccx.tcx().sess.target.target.target_pointer_width[..] { + "32" => DTOR_DONE_U32 as usize, + "64" => DTOR_DONE_U64 as usize, + tws => panic!("Unsupported target word size for int: {}", tws), + } +} + +fn dtor_to_init_u8(dtor: bool) -> u8 { + if dtor { DTOR_NEEDED } else { 0 } +} + +pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; } +impl<'tcx> GetDtorType<'tcx> for ty::ctxt<'tcx> { + fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 } +} + +fn dtor_active(flag: u8) -> bool { + flag != 0 +} + fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Repr<'tcx> { match t.sty { ty::ty_tup(ref elems) => { - Univariant(mk_struct(cx, &elems[..], false, t), false) + Univariant(mk_struct(cx, &elems[..], false, t), 0) } ty::ty_struct(def_id, substs) => { let fields = ty::lookup_struct_fields(cx.tcx(), def_id); @@ -165,15 +217,15 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }).collect::>(); let packed = ty::lookup_packed(cx.tcx(), def_id); let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag(); - if dtor { ftys.push(cx.tcx().types.bool); } + if dtor { ftys.push(cx.tcx().dtor_type()); } - Univariant(mk_struct(cx, &ftys[..], packed, t), dtor) + Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor)) } ty::ty_closure(def_id, substs) => { let typer = NormalizingClosureTyper::new(cx.tcx()); let upvars = typer.closure_upvars(def_id, substs).unwrap(); let upvar_types = upvars.iter().map(|u| u.ty).collect::>(); - Univariant(mk_struct(cx, &upvar_types[..], false, t), false) + Univariant(mk_struct(cx, &upvar_types[..], false, t), 0) } ty::ty_enum(def_id, substs) => { let cases = get_cases(cx.tcx(), def_id, substs); @@ -186,9 +238,9 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) assert_eq!(hint, attr::ReprAny); - let ftys = if dtor { vec!(cx.tcx().types.bool) } else { vec!() }; + let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() }; return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor); + dtor_to_init_u8(dtor)); } if !dtor && cases.iter().all(|c| c.tys.len() == 0) { @@ -218,9 +270,9 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // (Typechecking will reject discriminant-sizing attrs.) assert_eq!(hint, attr::ReprAny); let mut ftys = cases[0].tys.clone(); - if dtor { ftys.push(cx.tcx().types.bool); } + if dtor { ftys.push(cx.tcx().dtor_type()); } return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor); + dtor_to_init_u8(dtor)); } if !dtor && cases.len() == 2 && hint == attr::ReprAny { @@ -266,7 +318,7 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let fields : Vec<_> = cases.iter().map(|c| { let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity)); ftys.push_all(&c.tys); - if dtor { ftys.push(cx.tcx().types.bool); } + if dtor { ftys.push(cx.tcx().dtor_type()); } mk_struct(cx, &ftys, false, t) }).collect(); @@ -319,13 +371,13 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let fields : Vec<_> = cases.iter().map(|c| { let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity)); ftys.push_all(&c.tys); - if dtor { ftys.push(cx.tcx().types.bool); } + if dtor { ftys.push(cx.tcx().dtor_type()); } mk_struct(cx, &ftys[..], false, t) }).collect(); ensure_enum_fits_in_address_space(cx, &fields[..], t); - General(ity, fields, dtor) + General(ity, fields, dtor_to_init_u8(dtor)) } _ => cx.sess().bug(&format!("adt::represent_type called on non-ADT type: {}", ty_to_string(cx.tcx(), t))) @@ -830,18 +882,18 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, val) } General(ity, ref cases, dtor) => { - if dtor { + if dtor_active(dtor) { let ptr = trans_field_ptr(bcx, r, val, discr, cases[discr as uint].fields.len() - 2); - Store(bcx, C_u8(bcx.ccx(), 1), ptr); + Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), ptr); } Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true), GEPi(bcx, val, &[0, 0])) } Univariant(ref st, dtor) => { assert_eq!(discr, 0); - if dtor { - Store(bcx, C_u8(bcx.ccx(), 1), + if dtor_active(dtor) { + Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), GEPi(bcx, val, &[0, st.fields.len() - 1])); } } @@ -875,10 +927,10 @@ pub fn num_args(r: &Repr, discr: Disr) -> uint { CEnum(..) => 0, Univariant(ref st, dtor) => { assert_eq!(discr, 0); - st.fields.len() - (if dtor { 1 } else { 0 }) + st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 }) } General(_, ref cases, dtor) => { - cases[discr as uint].fields.len() - 1 - (if dtor { 1 } else { 0 }) + cases[discr as uint].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 }) } RawNullablePointer { nndiscr, ref nullfields, .. } => { if discr == nndiscr { 1 } else { nullfields.len() } @@ -992,17 +1044,17 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr<'tcx -> datum::DatumBlock<'blk, 'tcx, datum::Expr> { let tcx = bcx.tcx(); - let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), tcx.types.bool); + let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), tcx.dtor_type()); match *r { - Univariant(ref st, true) => { + Univariant(ref st, dtor) if dtor_active(dtor) => { let flag_ptr = GEPi(bcx, val, &[0, st.fields.len() - 1]); datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock() } - General(_, _, true) => { + General(_, _, dtor) if dtor_active(dtor) => { let fcx = bcx.fcx; let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( - bcx, tcx.types.bool, "drop_flag", + bcx, tcx.dtor_type(), "drop_flag", cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx )); bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs index 2f944e49b15..aee163973fd 100644 --- a/src/librustc_trans/trans/base.rs +++ b/src/librustc_trans/trans/base.rs @@ -1146,20 +1146,27 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -pub fn zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { +pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { if cx.unreachable.get() { return; } - let _icx = push_ctxt("zero_mem"); + let _icx = push_ctxt("drop_done_fill_mem"); let bcx = cx; - memzero(&B(bcx), llptr, t); + memfill(&B(bcx), llptr, t, adt::DTOR_DONE); } -// Always use this function instead of storing a zero constant to the memory -// in question. If you store a zero constant, LLVM will drown in vreg +pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { + if cx.unreachable.get() { return; } + let _icx = push_ctxt("init_zero_mem"); + let bcx = cx; + memfill(&B(bcx), llptr, t, 0); +} + +// Always use this function instead of storing a constant byte to the memory +// in question. e.g. if you store a zero constant, LLVM will drown in vreg // allocation for large data structures, and the generated code will be // awful. (A telltale sign of this is large quantities of // `mov [byte ptr foo],0` in the generated code.) -fn memzero<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>) { - let _icx = push_ctxt("memzero"); +fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) { + let _icx = push_ctxt("memfill"); let ccx = b.ccx; let llty = type_of::type_of(ccx, ty); @@ -1172,7 +1179,7 @@ fn memzero<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>) { let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key); let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to()); - let llzeroval = C_u8(ccx, 0); + let llzeroval = C_u8(ccx, byte as usize); let size = machine::llsize_of(ccx, llty); let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); let volatile = C_bool(ccx, false); diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs index ad07f3953cc..c7897f9b62e 100644 --- a/src/librustc_trans/trans/cleanup.rs +++ b/src/librustc_trans/trans/cleanup.rs @@ -1015,7 +1015,7 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { glue::drop_ty(bcx, self.val, self.ty, debug_loc) }; if self.zero { - base::zero_mem(bcx, self.val, self.ty); + base::drop_done_fill_mem(bcx, self.val, self.ty); } bcx } diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs index 15738d1e61a..399b7eb102e 100644 --- a/src/librustc_trans/trans/datum.rs +++ b/src/librustc_trans/trans/datum.rs @@ -307,8 +307,8 @@ impl KindOps for Lvalue { -> Block<'blk, 'tcx> { let _icx = push_ctxt("::post_store"); if bcx.fcx.type_needs_drop(ty) { - // cancel cleanup of affine values by zeroing out - let () = zero_mem(bcx, val, ty); + // cancel cleanup of affine values by drop-filling the memory + let () = drop_done_fill_mem(bcx, val, ty); bcx } else { bcx diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs index b2de8435f64..7eb4c6f8ca5 100644 --- a/src/librustc_trans/trans/glue.rs +++ b/src/librustc_trans/trans/glue.rs @@ -21,6 +21,7 @@ use middle::lang_items::ExchangeFreeFnLangItem; use middle::subst; use middle::subst::{Subst, Substs}; use trans::adt; +use trans::adt::GetDtorType; // for tcx.dtor_type() use trans::base::*; use trans::build::*; use trans::callee; @@ -39,6 +40,7 @@ use util::ppaux; use arena::TypedArena; use libc::c_uint; +use session::config::NoDebugInfo; use syntax::ast; pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, @@ -231,9 +233,31 @@ fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, Load(bcx, llval) }; let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data)); - with_cond(bcx, load_ty(bcx, drop_flag.val, bcx.tcx().types.bool), |cx| { + let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type()); + let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); + let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false); + + let bcx = if bcx.tcx().sess.opts.debuginfo == NoDebugInfo { + bcx + } else { + let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); + let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false); + let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None); + let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None); + let drop_flag_neither_initialized_nor_cleared = + And(bcx, not_init, not_done, DebugLoc::None); + with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { + let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap")); + Call(cx, llfn, &[], None, DebugLoc::None); + cx + }) + }; + + let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None); + with_cond(bcx, drop_flag_dtor_needed, |cx| { trans_struct_drop(cx, t, v0, dtor_did, class_did, substs) }) + } fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, @@ -395,13 +419,24 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>) -> Block<'blk, 'tcx> { // NB: v0 is an *alias* of type t here, not a direct value. let _icx = push_ctxt("make_drop_glue"); + + // Only drop the value when it ... well, we used to check for + // non-null, (and maybe we need to continue doing so), but we now + // must definitely check for special bit-patterns corresponding to + // the special dtor markings. + + let inttype = Type::int(bcx.ccx()); + let dropped_pattern = C_integral(inttype, adt::dtor_done_usize(bcx.fcx.ccx) as u64, false); + match t.sty { ty::ty_uniq(content_ty) => { if !type_is_sized(bcx.tcx(), content_ty) { let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]); let llbox = Load(bcx, llval); - let not_null = IsNotNull(bcx, llbox); - with_cond(bcx, not_null, |bcx| { + let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx())); + let drop_flag_not_dropped_already = + ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); + with_cond(bcx, drop_flag_not_dropped_already, |bcx| { let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]); let info = Load(bcx, info); @@ -420,8 +455,10 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>) } else { let llval = v0; let llbox = Load(bcx, llval); - let not_null = IsNotNull(bcx, llbox); - with_cond(bcx, not_null, |bcx| { + let llbox_as_usize = PtrToInt(bcx, llbox, inttype); + let drop_flag_not_dropped_already = + ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); + with_cond(bcx, drop_flag_not_dropped_already, |bcx| { let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) }) diff --git a/src/librustc_trans/trans/intrinsic.rs b/src/librustc_trans/trans/intrinsic.rs index f714c5800c5..ee0274b1827 100644 --- a/src/librustc_trans/trans/intrinsic.rs +++ b/src/librustc_trans/trans/intrinsic.rs @@ -359,11 +359,18 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, &ccx.link_meta().crate_hash); C_u64(ccx, hash) } + (_, "init_dropped") => { + let tp_ty = *substs.types.get(FnSpace, 0); + if !return_type_is_void(ccx, tp_ty) { + drop_done_fill_mem(bcx, llresult, tp_ty); + } + C_nil(ccx) + } (_, "init") => { let tp_ty = *substs.types.get(FnSpace, 0); if !return_type_is_void(ccx, tp_ty) { // Just zero out the stack slot. (See comment on base::memzero for explanation) - zero_mem(bcx, llresult, tp_ty); + init_zero_mem(bcx, llresult, tp_ty); } C_nil(ccx) } diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 1e38a7d2d9f..d24ec16cc21 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -5384,7 +5384,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) { "breakpoint" => (0, Vec::new(), ty::mk_nil(tcx)), "size_of" | "pref_align_of" | "min_align_of" => (1, Vec::new(), ccx.tcx.types.uint), - "init" => (1, Vec::new(), param(ccx, 0)), + "init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)), "uninit" => (1, Vec::new(), param(ccx, 0)), "forget" => (1, vec!( param(ccx, 0) ), ty::mk_nil(tcx)), "transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)), diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs index 052bcfd7e16..710f0fe19db 100644 --- a/src/libstd/collections/hash/table.rs +++ b/src/libstd/collections/hash/table.rs @@ -985,7 +985,7 @@ impl Clone for RawTable { #[unsafe_destructor] impl Drop for RawTable { fn drop(&mut self) { - if self.capacity == 0 { + if self.capacity == 0 || self.capacity == mem::POST_DROP_USIZE { return; } diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index 105a61d0857..d4451cc7b71 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -40,7 +40,7 @@ impl MoveMap for Vec { for p in &mut self { unsafe { // FIXME(#5016) this shouldn't need to zero to be safe. - ptr::write(p, f(ptr::read_and_zero(p))); + ptr::write(p, f(ptr::read_and_drop(p))); } } self diff --git a/src/libsyntax/ptr.rs b/src/libsyntax/ptr.rs index ca3a1848c3a..7e0bcd3e1dc 100644 --- a/src/libsyntax/ptr.rs +++ b/src/libsyntax/ptr.rs @@ -71,8 +71,8 @@ impl P { { unsafe { let p = &mut *self.ptr; - // FIXME(#5016) this shouldn't need to zero to be safe. - ptr::write(p, f(ptr::read_and_zero(p))); + // FIXME(#5016) this shouldn't need to drop-fill to be safe. + ptr::write(p, f(ptr::read_and_drop(p))); } self } diff --git a/src/test/run-pass/intrinsic-move-val.rs b/src/test/run-pass/intrinsic-move-val.rs index 89aea93e7b3..cd517bcad30 100644 --- a/src/test/run-pass/intrinsic-move-val.rs +++ b/src/test/run-pass/intrinsic-move-val.rs @@ -14,7 +14,7 @@ #![feature(box_syntax)] #![feature(intrinsics)] -use std::mem::transmute; +use std::mem::{self, transmute}; mod rusti { extern "rust-intrinsic" { @@ -30,6 +30,6 @@ pub fn main() { let mut z: *const uint = transmute(&x); rusti::move_val_init(&mut y, x); assert_eq!(*y, 1); - assert_eq!(*z, 0); // `x` is nulled out, not directly visible + assert_eq!(*z, mem::POST_DROP_USIZE); // `x` is nulled out, not directly visible } }