rollup merge of #23535: pnkfelix/fsk-filling-drop

Replace zeroing-on-drop with filling-on-drop.

This is meant to set the stage for removing *all* zeroing and filling (on drop) in the future.

Note that the code is meant to be entirely abstract with respect to the particular values used for the drop flags: the final commit demonstrates how to go from zeroing-on-drop to filling-on-drop by changing the value of three constants (in two files).

See further discussion on the internals thread:
  http://internals.rust-lang.org/t/attention-hackers-filling-drop/1715/11

[breaking-change] especially for structs / enums using `#[unsafe_no_drop_flag]`.
This commit is contained in:
Alex Crichton 2015-03-27 10:07:41 -07:00
commit e42521aa58
31 changed files with 637 additions and 63 deletions

View File

@ -354,7 +354,8 @@ impl<T> Drop for Arc<T> {
// more than once (but it is guaranteed to be zeroed after the first if
// it's run more than once)
let ptr = *self._ptr;
if ptr.is_null() { return }
// if ptr.is_null() { return }
if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
@ -485,7 +486,7 @@ impl<T> Drop for Weak<T> {
let ptr = *self._ptr;
// see comments above for why this check is here
if ptr.is_null() { return }
if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }
// If we find out that we were the last weak pointer, then its time to
// deallocate the data entirely. See the discussion in Arc::drop() about

View File

@ -75,7 +75,7 @@
#![feature(box_syntax)]
#![feature(optin_builtin_traits)]
#![feature(unboxed_closures)]
#![feature(unsafe_no_drop_flag)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(core)]
#![feature(unique)]
#![cfg_attr(test, feature(test, alloc, rustc_private))]

View File

@ -160,7 +160,7 @@ use core::default::Default;
use core::fmt;
use core::hash::{Hasher, Hash};
use core::marker;
use core::mem::{min_align_of, size_of, forget};
use core::mem::{self, min_align_of, size_of, forget};
use core::nonzero::NonZero;
use core::ops::{Deref, Drop};
use core::option::Option;
@ -407,7 +407,7 @@ impl<T> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
if !ptr.is_null() {
if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE {
self.dec_strong();
if self.strong() == 0 {
ptr::read(&**self); // destroy the contained object
@ -718,7 +718,7 @@ impl<T> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
if !ptr.is_null() {
if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all
// the strong pointers have disappeared.

View File

@ -280,9 +280,11 @@ impl<T> Drop for RawItems<T> {
#[unsafe_destructor]
impl<K, V> Drop for Node<K, V> {
fn drop(&mut self) {
if self.keys.is_null() {
if self.keys.is_null() ||
(unsafe { self.keys.get() as *const K as usize == mem::POST_DROP_USIZE })
{
// Since we have #[unsafe_no_drop_flag], we have to watch
// out for a null value being stored in self.keys. (Using
// out for the sentinel value being stored in self.keys. (Using
// null is technically a violation of the `Unique`
// requirements, though.)
return;

View File

@ -36,7 +36,7 @@
#![feature(unicode)]
#![feature(unsafe_destructor)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(step_by)]
#![feature(str_char)]
#![feature(convert)]

View File

@ -1694,7 +1694,7 @@ impl<T> Drop for Vec<T> {
fn drop(&mut self) {
// This is (and should always remain) a no-op if the fields are
// zeroed (when moving out, because of #[unsafe_no_drop_flag]).
if self.cap != 0 {
if self.cap != 0 && self.cap != mem::POST_DROP_USIZE {
unsafe {
for x in &*self {
ptr::read(x);
@ -1977,7 +1977,7 @@ impl<'a, T> ExactSizeIterator for Drain<'a, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Drop for Drain<'a, T> {
fn drop(&mut self) {
// self.ptr == self.end == null if drop has already been called,
// self.ptr == self.end == mem::POST_DROP_USIZE if drop has already been called,
// so we can use #[unsafe_no_drop_flag].
// destroy the remaining elements

View File

@ -191,13 +191,35 @@ extern "rust-intrinsic" {
/// crate it is invoked in.
pub fn type_id<T: ?Sized + 'static>() -> u64;
/// Create a value initialized to so that its drop flag,
/// if any, says that it has been dropped.
///
/// `init_dropped` is unsafe because it returns a datum with all
/// of its bytes set to the drop flag, which generally does not
/// correspond to a valid value.
///
/// This intrinsic is likely to be deprecated in the future when
/// Rust moves to non-zeroing dynamic drop (and thus removes the
/// embedded drop flags that are being established by this
/// intrinsic).
#[cfg(not(stage0))]
pub fn init_dropped<T>() -> T;
/// Create a value initialized to zero.
///
/// `init` is unsafe because it returns a zeroed-out datum,
/// which is unsafe unless T is Copy.
/// which is unsafe unless T is `Copy`. Also, even if T is
/// `Copy`, an all-zero value may not correspond to any legitimate
/// state for the type in question.
pub fn init<T>() -> T;
/// Create an uninitialized value.
///
/// `uninit` is unsafe because there is no guarantee of what its
/// contents are. In particular its drop-flag may be set to any
/// state, which means it may claim either dropped or
/// undropped. In the general case one must use `ptr::write` to
/// initialize memory previous set to the result of `uninit`.
pub fn uninit<T>() -> T;
/// Move a value out of scope without running drop glue.

View File

@ -158,6 +158,32 @@ pub unsafe fn zeroed<T>() -> T {
intrinsics::init()
}
/// Create a value initialized to an unspecified series of bytes.
///
/// The byte sequence usually indicates that the value at the memory
/// in question has been dropped. Thus, *if* T carries a drop flag,
/// any associated destructor will not be run when the value falls out
/// of scope.
///
/// Some code at one time used the `zeroed` function above to
/// accomplish this goal.
///
/// This function is expected to be deprecated with the transition
/// to non-zeroing drop.
#[inline]
#[unstable(feature = "filling_drop")]
pub unsafe fn dropped<T>() -> T {
#[cfg(stage0)]
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { zeroed() }
#[cfg(not(stage0))]
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { intrinsics::init_dropped() }
dropped_impl()
}
/// Create an uninitialized value.
///
/// Care must be taken when using this function, if the type `T` has a destructor and the value
@ -291,6 +317,49 @@ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
($name as u32) << 8 |
($name as u32)) }
}
macro_rules! repeat_u8_as_u64 {
($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
(repeat_u8_as_u32!($name) as u64)) }
}
// NOTE: Keep synchronized with values used in librustc_trans::trans::adt.
//
// In particular, the POST_DROP_U8 marker must never equal the
// DTOR_NEEDED_U8 marker.
//
// For a while pnkfelix was using 0xc1 here.
// But having the sign bit set is a pain, so 0x1d is probably better.
//
// And of course, 0x00 brings back the old world of zero'ing on drop.
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U8: u8 = 0x1d;
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
#[cfg(target_pointer_width = "32")]
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
#[cfg(target_pointer_width = "64")]
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;
#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U8: u8 = 0;
#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U32: u32 = 0;
#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U64: u64 = 0;
#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_USIZE: usize = 0;
/// Interprets `src` as `&U`, and then reads `src` without moving the contained value.
///
/// This function will unsafely assume the pointer `src` is valid for `sizeof(U)` bytes by

View File

@ -230,6 +230,21 @@ pub unsafe fn read_and_zero<T>(dest: *mut T) -> T {
tmp
}
/// Variant of read_and_zero that writes the specific drop-flag byte
/// (which may be more appropriate than zero).
#[inline(always)]
#[unstable(feature = "core",
reason = "may play a larger role in std::ptr future extensions")]
pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
// Now mark `dest` as dropped:
write_bytes(dest, mem::POST_DROP_U8, 1);
tmp
}
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///

View File

@ -604,6 +604,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"Print the size of enums and their variants"),
force_overflow_checks: Option<bool> = (None, parse_opt_bool,
"Force overflow checks on or off"),
force_dropflag_checks: Option<bool> = (None, parse_opt_bool,
"Force drop flag checks on or off"),
}
pub fn default_lib_output() -> CrateType {

View File

@ -1528,7 +1528,7 @@ pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let scope = cleanup::var_scope(tcx, p_id);
bcx = mk_binding_alloca(
bcx, p_id, &path1.node, scope, (),
|(), bcx, llval, ty| { zero_mem(bcx, llval, ty); bcx });
|(), bcx, llval, ty| { drop_done_fill_mem(bcx, llval, ty); bcx });
});
bcx
}

View File

@ -81,14 +81,18 @@ pub enum Repr<'tcx> {
/// Structs with destructors need a dynamic destroyedness flag to
/// avoid running the destructor too many times; this is included
/// in the `Struct` if present.
Univariant(Struct<'tcx>, bool),
/// (The flag if nonzero, represents the initialization value to use;
/// if zero, then use no flag at all.)
Univariant(Struct<'tcx>, u8),
/// General-case enums: for each case there is a struct, and they
/// all start with a field for the discriminant.
///
/// Types with destructors need a dynamic destroyedness flag to
/// avoid running the destructor too many times; the last argument
/// indicates whether such a flag is present.
General(IntType, Vec<Struct<'tcx>>, bool),
/// (The flag, if nonzero, represents the initialization value to use;
/// if zero, then use no flag at all.)
General(IntType, Vec<Struct<'tcx>>, u8),
/// Two cases distinguished by a nullable pointer: the case with discriminant
/// `nndiscr` must have single field which is known to be nonnull due to its type.
/// The other case is known to be zero sized. Hence we represent the enum
@ -151,11 +155,59 @@ pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
repr
}
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
($name as u32) << 8 |
($name as u32)) }
}
macro_rules! repeat_u8_as_u64 {
($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
(repeat_u8_as_u32!($name) as u64)) }
}
pub const DTOR_NEEDED: u8 = 0xd4;
pub const DTOR_NEEDED_U32: u32 = repeat_u8_as_u32!(DTOR_NEEDED);
pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64!(DTOR_NEEDED);
#[allow(dead_code)]
pub fn dtor_needed_usize(ccx: &CrateContext) -> usize {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => DTOR_NEEDED_U32 as usize,
"64" => DTOR_NEEDED_U64 as usize,
tws => panic!("Unsupported target word size for int: {}", tws),
}
}
pub const DTOR_DONE: u8 = 0x1d;
pub const DTOR_DONE_U32: u32 = repeat_u8_as_u32!(DTOR_DONE);
pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64!(DTOR_DONE);
#[allow(dead_code)]
pub fn dtor_done_usize(ccx: &CrateContext) -> usize {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => DTOR_DONE_U32 as usize,
"64" => DTOR_DONE_U64 as usize,
tws => panic!("Unsupported target word size for int: {}", tws),
}
}
fn dtor_to_init_u8(dtor: bool) -> u8 {
if dtor { DTOR_NEEDED } else { 0 }
}
pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; }
impl<'tcx> GetDtorType<'tcx> for ty::ctxt<'tcx> {
fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 }
}
fn dtor_active(flag: u8) -> bool {
flag != 0
}
fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Repr<'tcx> {
match t.sty {
ty::ty_tup(ref elems) => {
Univariant(mk_struct(cx, &elems[..], false, t), false)
Univariant(mk_struct(cx, &elems[..], false, t), 0)
}
ty::ty_struct(def_id, substs) => {
let fields = ty::lookup_struct_fields(cx.tcx(), def_id);
@ -165,15 +217,15 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
}).collect::<Vec<_>>();
let packed = ty::lookup_packed(cx.tcx(), def_id);
let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag();
if dtor { ftys.push(cx.tcx().types.bool); }
if dtor { ftys.push(cx.tcx().dtor_type()); }
Univariant(mk_struct(cx, &ftys[..], packed, t), dtor)
Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor))
}
ty::ty_closure(def_id, substs) => {
let typer = NormalizingClosureTyper::new(cx.tcx());
let upvars = typer.closure_upvars(def_id, substs).unwrap();
let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>();
Univariant(mk_struct(cx, &upvar_types[..], false, t), false)
Univariant(mk_struct(cx, &upvar_types[..], false, t), 0)
}
ty::ty_enum(def_id, substs) => {
let cases = get_cases(cx.tcx(), def_id, substs);
@ -186,9 +238,9 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
// Uninhabitable; represent as unit
// (Typechecking will reject discriminant-sizing attrs.)
assert_eq!(hint, attr::ReprAny);
let ftys = if dtor { vec!(cx.tcx().types.bool) } else { vec!() };
let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() };
return Univariant(mk_struct(cx, &ftys[..], false, t),
dtor);
dtor_to_init_u8(dtor));
}
if !dtor && cases.iter().all(|c| c.tys.len() == 0) {
@ -218,9 +270,9 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
// (Typechecking will reject discriminant-sizing attrs.)
assert_eq!(hint, attr::ReprAny);
let mut ftys = cases[0].tys.clone();
if dtor { ftys.push(cx.tcx().types.bool); }
if dtor { ftys.push(cx.tcx().dtor_type()); }
return Univariant(mk_struct(cx, &ftys[..], false, t),
dtor);
dtor_to_init_u8(dtor));
}
if !dtor && cases.len() == 2 && hint == attr::ReprAny {
@ -266,7 +318,7 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let fields : Vec<_> = cases.iter().map(|c| {
let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity));
ftys.push_all(&c.tys);
if dtor { ftys.push(cx.tcx().types.bool); }
if dtor { ftys.push(cx.tcx().dtor_type()); }
mk_struct(cx, &ftys, false, t)
}).collect();
@ -319,13 +371,13 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let fields : Vec<_> = cases.iter().map(|c| {
let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity));
ftys.push_all(&c.tys);
if dtor { ftys.push(cx.tcx().types.bool); }
if dtor { ftys.push(cx.tcx().dtor_type()); }
mk_struct(cx, &ftys[..], false, t)
}).collect();
ensure_enum_fits_in_address_space(cx, &fields[..], t);
General(ity, fields, dtor)
General(ity, fields, dtor_to_init_u8(dtor))
}
_ => cx.sess().bug(&format!("adt::represent_type called on non-ADT type: {}",
ty_to_string(cx.tcx(), t)))
@ -830,18 +882,18 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
val)
}
General(ity, ref cases, dtor) => {
if dtor {
if dtor_active(dtor) {
let ptr = trans_field_ptr(bcx, r, val, discr,
cases[discr as uint].fields.len() - 2);
Store(bcx, C_u8(bcx.ccx(), 1), ptr);
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), ptr);
}
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
GEPi(bcx, val, &[0, 0]))
}
Univariant(ref st, dtor) => {
assert_eq!(discr, 0);
if dtor {
Store(bcx, C_u8(bcx.ccx(), 1),
if dtor_active(dtor) {
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize),
GEPi(bcx, val, &[0, st.fields.len() - 1]));
}
}
@ -875,10 +927,10 @@ pub fn num_args(r: &Repr, discr: Disr) -> uint {
CEnum(..) => 0,
Univariant(ref st, dtor) => {
assert_eq!(discr, 0);
st.fields.len() - (if dtor { 1 } else { 0 })
st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
}
General(_, ref cases, dtor) => {
cases[discr as uint].fields.len() - 1 - (if dtor { 1 } else { 0 })
cases[discr as uint].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
}
RawNullablePointer { nndiscr, ref nullfields, .. } => {
if discr == nndiscr { 1 } else { nullfields.len() }
@ -992,17 +1044,17 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr<'tcx
-> datum::DatumBlock<'blk, 'tcx, datum::Expr>
{
let tcx = bcx.tcx();
let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), tcx.types.bool);
let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), tcx.dtor_type());
match *r {
Univariant(ref st, true) => {
Univariant(ref st, dtor) if dtor_active(dtor) => {
let flag_ptr = GEPi(bcx, val, &[0, st.fields.len() - 1]);
datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
}
General(_, _, true) => {
General(_, _, dtor) if dtor_active(dtor) => {
let fcx = bcx.fcx;
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
bcx, tcx.types.bool, "drop_flag",
bcx, tcx.dtor_type(), "drop_flag",
cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx
));
bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {

View File

@ -1146,20 +1146,27 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}
}
pub fn zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
if cx.unreachable.get() { return; }
let _icx = push_ctxt("zero_mem");
let _icx = push_ctxt("drop_done_fill_mem");
let bcx = cx;
memzero(&B(bcx), llptr, t);
memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
}
// Always use this function instead of storing a zero constant to the memory
// in question. If you store a zero constant, LLVM will drown in vreg
pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
if cx.unreachable.get() { return; }
let _icx = push_ctxt("init_zero_mem");
let bcx = cx;
memfill(&B(bcx), llptr, t, 0);
}
// Always use this function instead of storing a constant byte to the memory
// in question. e.g. if you store a zero constant, LLVM will drown in vreg
// allocation for large data structures, and the generated code will be
// awful. (A telltale sign of this is large quantities of
// `mov [byte ptr foo],0` in the generated code.)
fn memzero<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>) {
let _icx = push_ctxt("memzero");
fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
let _icx = push_ctxt("memfill");
let ccx = b.ccx;
let llty = type_of::type_of(ccx, ty);
@ -1172,7 +1179,7 @@ fn memzero<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>) {
let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
let llzeroval = C_u8(ccx, 0);
let llzeroval = C_u8(ccx, byte as usize);
let size = machine::llsize_of(ccx, llty);
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
let volatile = C_bool(ccx, false);
@ -3022,6 +3029,12 @@ pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
tcx.sess.opts.debug_assertions
};
let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
v
} else {
tcx.sess.opts.debug_assertions
};
// Before we touch LLVM, make sure that multithreading is enabled.
unsafe {
use std::sync::{Once, ONCE_INIT};
@ -3050,7 +3063,8 @@ pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
Sha256::new(),
link_meta.clone(),
reachable,
check_overflow);
check_overflow,
check_dropflag);
{
let ccx = shared_ccx.get_ccx(0);

View File

@ -1015,7 +1015,7 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
glue::drop_ty(bcx, self.val, self.ty, debug_loc)
};
if self.zero {
base::zero_mem(bcx, self.val, self.ty);
base::drop_done_fill_mem(bcx, self.val, self.ty);
}
bcx
}

View File

@ -69,6 +69,7 @@ pub struct SharedCrateContext<'tcx> {
tcx: ty::ctxt<'tcx>,
stats: Stats,
check_overflow: bool,
check_drop_flag_for_sanity: bool,
available_monomorphizations: RefCell<FnvHashSet<String>>,
available_drop_glues: RefCell<FnvHashMap<Ty<'tcx>, String>>,
@ -242,7 +243,8 @@ impl<'tcx> SharedCrateContext<'tcx> {
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet,
check_overflow: bool)
check_overflow: bool,
check_drop_flag_for_sanity: bool)
-> SharedCrateContext<'tcx> {
let (metadata_llcx, metadata_llmod) = unsafe {
create_context_and_module(&tcx.sess, "metadata")
@ -271,6 +273,7 @@ impl<'tcx> SharedCrateContext<'tcx> {
fn_stats: RefCell::new(Vec::new()),
},
check_overflow: check_overflow,
check_drop_flag_for_sanity: check_drop_flag_for_sanity,
available_monomorphizations: RefCell::new(FnvHashSet()),
available_drop_glues: RefCell::new(FnvHashMap()),
};
@ -727,6 +730,13 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
pub fn check_overflow(&self) -> bool {
self.shared.check_overflow
}
pub fn check_drop_flag_for_sanity(&self) -> bool {
// This controls whether we emit a conditional llvm.debugtrap
// guarded on whether the dropflag is one of its (two) valid
// values.
self.shared.check_drop_flag_for_sanity
}
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {

View File

@ -307,8 +307,8 @@ impl KindOps for Lvalue {
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("<Lvalue as KindOps>::post_store");
if bcx.fcx.type_needs_drop(ty) {
// cancel cleanup of affine values by zeroing out
let () = zero_mem(bcx, val, ty);
// cancel cleanup of affine values by drop-filling the memory
let () = drop_done_fill_mem(bcx, val, ty);
bcx
} else {
bcx

View File

@ -21,6 +21,7 @@ use middle::lang_items::ExchangeFreeFnLangItem;
use middle::subst;
use middle::subst::{Subst, Substs};
use trans::adt;
use trans::adt::GetDtorType; // for tcx.dtor_type()
use trans::base::*;
use trans::build::*;
use trans::callee;
@ -231,9 +232,31 @@ fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
Load(bcx, llval)
};
let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
with_cond(bcx, load_ty(bcx, drop_flag.val, bcx.tcx().types.bool), |cx| {
let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
bcx
} else {
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
let drop_flag_neither_initialized_nor_cleared =
And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], None, DebugLoc::None);
cx
})
};
let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
with_cond(bcx, drop_flag_dtor_needed, |cx| {
trans_struct_drop(cx, t, v0, dtor_did, class_did, substs)
})
}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
@ -395,13 +418,24 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>)
-> Block<'blk, 'tcx> {
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
// Only drop the value when it ... well, we used to check for
// non-null, (and maybe we need to continue doing so), but we now
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
let inttype = Type::int(bcx.ccx());
let dropped_pattern = C_integral(inttype, adt::dtor_done_usize(bcx.fcx.ccx) as u64, false);
match t.sty {
ty::ty_uniq(content_ty) => {
if !type_is_sized(bcx.tcx(), content_ty) {
let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let llbox = Load(bcx, llval);
let not_null = IsNotNull(bcx, llbox);
with_cond(bcx, not_null, |bcx| {
let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
let info = Load(bcx, info);
@ -420,8 +454,10 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>)
} else {
let llval = v0;
let llbox = Load(bcx, llval);
let not_null = IsNotNull(bcx, llbox);
with_cond(bcx, not_null, |bcx| {
let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
})

View File

@ -359,11 +359,18 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
&ccx.link_meta().crate_hash);
C_u64(ccx, hash)
}
(_, "init_dropped") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !return_type_is_void(ccx, tp_ty) {
drop_done_fill_mem(bcx, llresult, tp_ty);
}
C_nil(ccx)
}
(_, "init") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !return_type_is_void(ccx, tp_ty) {
// Just zero out the stack slot. (See comment on base::memzero for explanation)
zero_mem(bcx, llresult, tp_ty);
init_zero_mem(bcx, llresult, tp_ty);
}
C_nil(ccx)
}

View File

@ -5379,7 +5379,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
"breakpoint" => (0, Vec::new(), ty::mk_nil(tcx)),
"size_of" |
"pref_align_of" | "min_align_of" => (1, Vec::new(), ccx.tcx.types.uint),
"init" => (1, Vec::new(), param(ccx, 0)),
"init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)),
"uninit" => (1, Vec::new(), param(ccx, 0)),
"forget" => (1, vec!( param(ccx, 0) ), ty::mk_nil(tcx)),
"transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)),

View File

@ -985,7 +985,7 @@ impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
#[unsafe_destructor]
impl<K, V> Drop for RawTable<K, V> {
fn drop(&mut self) {
if self.capacity == 0 {
if self.capacity == 0 || self.capacity == mem::POST_DROP_USIZE {
return;
}

View File

@ -121,7 +121,7 @@
#![feature(unboxed_closures)]
#![feature(unicode)]
#![feature(unsafe_destructor)]
#![feature(unsafe_no_drop_flag)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(macro_reexport)]
#![feature(int_uint)]
#![feature(unique)]

View File

@ -40,7 +40,7 @@ impl<T> MoveMap<T> for Vec<T> {
for p in &mut self {
unsafe {
// FIXME(#5016) this shouldn't need to zero to be safe.
ptr::write(p, f(ptr::read_and_zero(p)));
ptr::write(p, f(ptr::read_and_drop(p)));
}
}
self

View File

@ -71,8 +71,8 @@ impl<T: 'static> P<T> {
{
unsafe {
let p = &mut *self.ptr;
// FIXME(#5016) this shouldn't need to zero to be safe.
ptr::write(p, f(ptr::read_and_zero(p)));
// FIXME(#5016) this shouldn't need to drop-fill to be safe.
ptr::write(p, f(ptr::read_and_drop(p)));
}
self
}

View File

@ -0,0 +1,52 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we cleanup a fixed size Box<[D; k]> properly when D has a
// destructor.
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
struct D(u8);
impl Drop for D {
fn drop(&mut self) {
println!("Dropping {}", self.0);
let old = LOG.load(Ordering::SeqCst);
LOG.compare_and_swap(old, old << 4 | self.0 as usize, Ordering::SeqCst);
}
}
fn main() {
fn die() -> D { panic!("Oh no"); }
let g = thread::spawn(|| {
let _b1: Box<[D; 4]> = Box::new([D( 1), D( 2), D( 3), D( 4)]);
let _b2: Box<[D; 4]> = Box::new([D( 5), D( 6), D( 7), D( 8)]);
let _b3: Box<[D; 4]> = Box::new([D( 9), D(10), die(), D(12)]);
let _b4: Box<[D; 4]> = Box::new([D(13), D(14), D(15), D(16)]);
});
assert!(g.join().is_err());
// When the panic occurs, we will be in the midst of constructing
// the input to `_b3`. Therefore, we drop the elements of the
// partially filled array first, before we get around to dropping
// the elements of `_b1` and _b2`.
// Issue 23222: The order in which the elements actually get
// dropped is a little funky. See similar notes in nested-vec-3;
// in essence, I would not be surprised if we change the ordering
// given in `expect` in the future.
let expect = 0x__A_9__5_6_7_8__1_2_3_4;
let actual = LOG.load(Ordering::SeqCst);
assert!(actual == expect, "expect: 0x{:x} actual: 0x{:x}", expect, actual);
}

View File

@ -0,0 +1,52 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we cleanup dynamic sized Box<[D]> properly when D has a
// destructor.
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
struct D(u8);
impl Drop for D {
fn drop(&mut self) {
println!("Dropping {}", self.0);
let old = LOG.load(Ordering::SeqCst);
LOG.compare_and_swap(old, old << 4 | self.0 as usize, Ordering::SeqCst);
}
}
fn main() {
fn die() -> D { panic!("Oh no"); }
let g = thread::spawn(|| {
let _b1: Box<[D; 4]> = Box::new([D( 1), D( 2), D( 3), D( 4)]);
let _b2: Box<[D; 4]> = Box::new([D( 5), D( 6), D( 7), D( 8)]);
let _b3: Box<[D; 4]> = Box::new([D( 9), D(10), die(), D(12)]);
let _b4: Box<[D; 4]> = Box::new([D(13), D(14), D(15), D(16)]);
});
assert!(g.join().is_err());
// When the panic occurs, we will be in the midst of constructing
// the input to `_b3`. Therefore, we drop the elements of the
// partially filled array first, before we get around to dropping
// the elements of `_b1` and _b2`.
// Issue 23222: The order in which the elements actually get
// dropped is a little funky. See similar notes in nested-vec-3;
// in essence, I would not be surprised if we change the ordering
// given in `expect` in the future.
let expect = 0x__A_9__5_6_7_8__1_2_3_4;
let actual = LOG.load(Ordering::SeqCst);
assert!(actual == expect, "expect: 0x{:x} actual: 0x{:x}", expect, actual);
}

View File

@ -0,0 +1,69 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z force-dropflag-checks=on
// Quick-and-dirty test to ensure -Z force-dropflag-checks=on works as
// expected. Note that the inlined drop-flag is slated for removal
// (RFC 320); when that happens, the -Z flag and this test should
// simply be removed.
//
// See also drop-flag-skip-sanity-check.rs.
#![feature(old_io)]
use std::env;
use std::old_io::process::{Command, ExitSignal, ExitStatus};
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "test" {
return test();
}
let mut p = Command::new(&args[0]).arg("test").spawn().unwrap();
// The invocation should fail due to the drop-flag sanity check.
assert!(!p.wait().unwrap().success());
}
#[derive(Debug)]
struct Corrupted {
x: u8
}
impl Drop for Corrupted {
fn drop(&mut self) { println!("dropping"); }
}
fn test() {
{
let mut c1 = Corrupted { x: 1 };
let mut c2 = Corrupted { x: 2 };
unsafe {
let p1 = &mut c1 as *mut Corrupted as *mut u8;
let p2 = &mut c2 as *mut Corrupted as *mut u8;
for i in 0..std::mem::size_of::<Corrupted>() {
// corrupt everything, *including the drop flag.
//
// (We corrupt via two different means to safeguard
// against the hypothetical assignment of the
// dtor_needed/dtor_done values to v and v+k. that
// happen to match with one of the corruption values
// below.)
*p1.offset(i as isize) += 2;
*p2.offset(i as isize) += 3;
}
}
// Here, at the end of the scope of `c1` and `c2`, the
// drop-glue should detect the corruption of (at least one of)
// the drop-flags.
}
println!("We should never get here.");
}

View File

@ -0,0 +1,69 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z force-dropflag-checks=off
// Quick-and-dirty test to ensure -Z force-dropflag-checks=off works as
// expected. Note that the inlined drop-flag is slated for removal
// (RFC 320); when that happens, the -Z flag and this test should
// simply be removed.
//
// See also drop-flag-sanity-check.rs.
#![feature(old_io)]
use std::env;
use std::old_io::process::{Command, ExitSignal, ExitStatus};
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "test" {
return test();
}
let mut p = Command::new(&args[0]).arg("test").spawn().unwrap();
// Invocatinn should succeed as drop-flag sanity check is skipped.
assert!(p.wait().unwrap().success());
}
#[derive(Debug)]
struct Corrupted {
x: u8
}
impl Drop for Corrupted {
fn drop(&mut self) { println!("dropping"); }
}
fn test() {
{
let mut c1 = Corrupted { x: 1 };
let mut c2 = Corrupted { x: 2 };
unsafe {
let p1 = &mut c1 as *mut Corrupted as *mut u8;
let p2 = &mut c2 as *mut Corrupted as *mut u8;
for i in 0..std::mem::size_of::<Corrupted>() {
// corrupt everything, *including the drop flag.
//
// (We corrupt via two different means to safeguard
// against the hypothetical assignment of the
// dtor_needed/dtor_done values to v and v+k. that
// happen to match with one of the corruption values
// below.)
*p1.offset(i as isize) += 2;
*p2.offset(i as isize) += 3;
}
}
// Here, at the end of the scope of `c1` and `c2`, the
// drop-glue should detect the corruption of (at least one of)
// the drop-flags.
}
println!("We should never get here.");
}

View File

@ -13,8 +13,10 @@
#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(intrinsics)]
// needed to check for drop fill word.
#![feature(filling_drop)]
use std::mem::transmute;
use std::mem::{self, transmute};
mod rusti {
extern "rust-intrinsic" {
@ -30,6 +32,7 @@ pub fn main() {
let mut z: *const uint = transmute(&x);
rusti::move_val_init(&mut y, x);
assert_eq!(*y, 1);
assert_eq!(*z, 0); // `x` is nulled out, not directly visible
// `x` is nulled out, not directly visible
assert_eq!(*z, mem::POST_DROP_USIZE);
}
}

View File

@ -0,0 +1,16 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that using the `vec!` macro nested within itself works
fn main() {
let nested = vec![vec![1u32, 2u32, 3u32]];
assert_eq!(nested[0][1], 2);
}

View File

@ -0,0 +1,23 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that using the `vec!` macro nested within itself works
// when the contents implement Drop
struct D(u32);
impl Drop for D {
fn drop(&mut self) { println!("Dropping {}", self.0); }
}
fn main() {
let nested = vec![vec![D(1u32), D(2u32), D(3u32)]];
assert_eq!(nested[0][1].0, 2);
}

View File

@ -0,0 +1,60 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that using the `vec!` macro nested within itself works when
// the contents implement Drop and we hit a panic in the middle of
// construction.
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
struct D(u8);
impl Drop for D {
fn drop(&mut self) {
println!("Dropping {}", self.0);
let old = LOG.load(Ordering::SeqCst);
LOG.compare_and_swap(old, old << 4 | self.0 as usize, Ordering::SeqCst);
}
}
fn main() {
fn die() -> D { panic!("Oh no"); }
let g = thread::spawn(|| {
let _nested = vec![vec![D( 1), D( 2), D( 3), D( 4)],
vec![D( 5), D( 6), D( 7), D( 8)],
vec![D( 9), D(10), die(), D(12)],
vec![D(13), D(14), D(15), D(16)]];
});
assert!(g.join().is_err());
// When the panic occurs, we will be in the midst of constructing the
// second inner vector. Therefore, we drop the elements of the
// partially filled vector first, before we get around to dropping
// the elements of the filled vector.
// Issue 23222: The order in which the elements actually get
// dropped is a little funky: as noted above, we'll drop the 9+10
// first, but due to #23222, they get dropped in reverse
// order. Likewise, again due to #23222, we will drop the second
// filled vec before the first filled vec.
//
// If Issue 23222 is "fixed", then presumably the corrected
// expected order of events will be 0x__9_A__1_2_3_4__5_6_7_8;
// that is, we would still drop 9+10 first, since they belong to
// the more deeply nested expression when the panic occurs.
let expect = 0x__A_9__5_6_7_8__1_2_3_4;
let actual = LOG.load(Ordering::SeqCst);
assert!(actual == expect, "expect: 0x{:x} actual: 0x{:x}", expect, actual);
}