auto merge of #14322 : thestinger/rust/secret_santa_heap, r=alexcrichton

This commit is contained in:
bors 2014-05-22 01:06:25 -07:00
commit 022a7b3cfb
8 changed files with 67 additions and 45 deletions

View File

@ -9,7 +9,7 @@
// except according to those terms.
// FIXME: #13994: port to the sized deallocation API when available
// FIXME: #13996: need a way to mark the `allocate` and `reallocate` return values as `noalias`
// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias` and `nonnull`
use core::intrinsics::{abort, cttz32};
use core::option::{None, Option};
@ -119,14 +119,8 @@ pub fn stats_print() {
/// The allocator for unique pointers.
#[cfg(not(test))]
#[lang="exchange_malloc"]
#[inline(always)]
pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
exchange_malloc(size, align)
}
/// The allocator for unique pointers.
#[inline]
pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
// The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
// allocations can point to this `static`. It would be incorrect to use a null
// pointer, due to enums assuming types like unique pointers are never null.
@ -139,14 +133,20 @@ pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
}
}
#[cfg(not(test))]
#[cfg(not(test), stage0)]
#[lang="exchange_free"]
#[inline]
// FIXME: #13994 (rustc should pass align and size here)
unsafe fn exchange_free(ptr: *mut u8) {
deallocate(ptr, 0, 8);
}
#[cfg(not(test), not(stage0))]
#[lang="exchange_free"]
#[inline]
unsafe fn exchange_free(ptr: *mut u8, size: uint, align: uint) {
deallocate(ptr, size, align);
}
// FIXME: #7496
#[cfg(not(test))]
#[lang="closure_exchange_malloc"]
@ -167,8 +167,8 @@ unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uin
#[doc(hidden)]
#[deprecated]
#[cfg(not(test))]
pub unsafe extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
exchange_malloc(size, align)
pub unsafe extern "C" fn rust_allocate(size: uint, align: uint) -> *mut u8 {
allocate(size, align)
}
// hack for libcore
@ -176,7 +176,7 @@ pub unsafe extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
#[doc(hidden)]
#[deprecated]
#[cfg(not(test))]
pub unsafe extern "C" fn rust_free(ptr: *mut u8, size: uint, align: uint) {
pub unsafe extern "C" fn rust_deallocate(ptr: *mut u8, size: uint, align: uint) {
deallocate(ptr, size, align)
}

View File

@ -38,7 +38,7 @@ use std::mem;
use std::num;
use std::ptr::read;
use std::rc::Rc;
use std::rt::heap::exchange_malloc;
use std::rt::heap::allocate;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
@ -358,8 +358,7 @@ impl<T> TypedArenaChunk<T> {
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = exchange_malloc(size,
mem::min_align_of::<TypedArenaChunk<T>>());
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
mem::overwrite(&mut chunk.next, next);
chunk

View File

@ -44,14 +44,14 @@ use str::StrSlice;
#[allow(ctypes)]
extern {
fn rust_malloc(size: uint, align: uint) -> *u8;
fn rust_free(ptr: *u8, size: uint, align: uint);
fn rust_allocate(size: uint, align: uint) -> *u8;
fn rust_deallocate(ptr: *u8, size: uint, align: uint);
}
unsafe fn alloc(cap: uint) -> *mut Vec<()> {
let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
// this should use the real alignment, but the new representation will take care of that
let ret = rust_malloc(cap, 8) as *mut Vec<()>;
let ret = rust_allocate(cap, 8) as *mut Vec<()>;
if ret.is_null() {
intrinsics::abort();
}
@ -119,7 +119,7 @@ impl FromIterator<char> for ~str {
&(*ptr).data,
len);
// FIXME: #13994: port to the sized deallocation API when available
rust_free(ptr as *u8, 0, 8);
rust_deallocate(ptr as *u8, 0, 8);
mem::forget(ret);
ret = mem::transmute(ptr2);
ptr = ptr2;
@ -191,7 +191,7 @@ impl<A: Clone> Clone for ~[A] {
for j in range(0, *i as int) {
ptr::read(&*p.offset(j));
}
rust_free(ret as *u8, 0, 8);
rust_deallocate(ret as *u8, 0, 8);
});
mem::transmute(ret)
}

View File

@ -278,13 +278,14 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap) {
heap: Heap,
content_ty: ty::t) {
/*!
* Schedules a call to `free(val)`. Note that this is a shallow
* operation.
*/
let drop = box FreeValue { ptr: val, heap: heap };
let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
debug!("schedule_free_value({:?}, val={}, heap={:?})",
cleanup_scope,
@ -847,6 +848,7 @@ pub enum Heap {
pub struct FreeValue {
ptr: ValueRef,
heap: Heap,
content_ty: ty::t
}
impl Cleanup for FreeValue {
@ -860,7 +862,7 @@ impl Cleanup for FreeValue {
glue::trans_free(bcx, self.ptr)
}
HeapExchange => {
glue::trans_exchange_free(bcx, self.ptr)
glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
}
}
}
@ -931,7 +933,8 @@ pub trait CleanupMethods<'a> {
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap);
heap: Heap,
content_ty: ty::t);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: Box<Cleanup>);

View File

@ -1183,7 +1183,7 @@ fn trans_uniq_expr<'a>(bcx: &'a Block<'a>,
} else {
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
val, cleanup::HeapExchange);
val, cleanup::HeapExchange, contents_ty);
let bcx = trans_into(bcx, contents, SaveIn(val));
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
bcx
@ -1205,7 +1205,7 @@ fn trans_managed_expr<'a>(bcx: &'a Block<'a>,
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
bx, cleanup::HeapManaged);
bx, cleanup::HeapManaged, contents_ty);
let bcx = trans_into(bcx, contents, SaveIn(body));
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
immediate_rvalue_bcx(bcx, bx, box_ty).to_expr_datumblock()
@ -1789,13 +1789,14 @@ fn deref_once<'a>(bcx: &'a Block<'a>,
let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
let ptr = Load(bcx, datum.val);
if !type_is_zero_size(bcx.ccx(), content_ty) {
bcx.fcx.schedule_free_value(scope, ptr, cleanup::HeapExchange);
bcx.fcx.schedule_free_value(scope, ptr, cleanup::HeapExchange, content_ty);
}
}
RvalueExpr(Rvalue { mode: ByValue }) => {
let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
if !type_is_zero_size(bcx.ccx(), content_ty) {
bcx.fcx.schedule_free_value(scope, datum.val, cleanup::HeapExchange);
bcx.fcx.schedule_free_value(scope, datum.val, cleanup::HeapExchange,
content_ty);
}
}
LvalueExpr => { }

View File

@ -50,15 +50,30 @@ pub fn trans_free<'a>(cx: &'a Block<'a>, v: ValueRef) -> &'a Block<'a> {
Some(expr::Ignore)).bcx
}
pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef)
-> &'a Block<'a> {
fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
align: u64) -> &'a Block<'a> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx,
langcall(cx, None, "", ExchangeFreeFnLangItem),
[PointerCast(cx, v, Type::i8p(cx.ccx()))],
[PointerCast(cx, v, Type::i8p(ccx)), C_uint(ccx, size as uint), C_uint(ccx, align as uint)],
Some(expr::Ignore)).bcx
}
pub fn trans_exchange_free_ty<'a>(bcx: &'a Block<'a>, ptr: ValueRef,
content_ty: ty::t) -> &'a Block<'a> {
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
// `Box<ZeroSizeType>` does not allocate.
if content_size != 0 {
let content_align = llalign_of_min(bcx.ccx(), sizing_type);
trans_exchange_free(bcx, ptr, content_size, content_align)
} else {
bcx
}
}
pub fn take_ty<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
-> &'a Block<'a> {
// NB: v is an *alias* of type t here, not a direct value.
@ -87,17 +102,15 @@ fn get_drop_glue_type(ccx: &CrateContext, t: ty::t) -> ty::t {
ty::ty_vec(_, None) | ty::ty_str => t,
_ => {
let llty = sizing_type_of(ccx, typ);
// Unique boxes do not allocate for zero-size types. The standard
// library may assume that `free` is never called on the pointer
// returned for `Box<ZeroSizeType>`.
// `Box<ZeroSizeType>` does not allocate.
if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8()
} else {
ty::mk_uniq(tcx, ty::mk_i8())
}
}
}
}
}
}
_ => t
}
}
@ -285,20 +298,22 @@ fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'
ty::ty_vec(mt, None) => {
with_cond(bcx, not_null, |bcx| {
let bcx = tvec::make_drop_glue_unboxed(bcx, llbox, mt.ty);
trans_exchange_free(bcx, llbox)
// FIXME: #13994: the old `Box<[T]>` will not support sized deallocation
trans_exchange_free(bcx, llbox, 0, 8)
})
}
ty::ty_str => {
with_cond(bcx, not_null, |bcx| {
let unit_ty = ty::sequence_element_type(bcx.tcx(), t);
let bcx = tvec::make_drop_glue_unboxed(bcx, llbox, unit_ty);
trans_exchange_free(bcx, llbox)
// FIXME: #13994: the old `Box<str>` will not support sized deallocation
trans_exchange_free(bcx, llbox, 0, 8)
})
}
_ => {
with_cond(bcx, not_null, |bcx| {
let bcx = drop_ty(bcx, llbox, content_ty);
trans_exchange_free(bcx, llbox)
trans_exchange_free_ty(bcx, llbox, content_ty)
})
}
}
@ -340,7 +355,8 @@ fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'
Call(bcx, dtor, [PointerCast(bcx, cdata, Type::i8p(bcx.ccx()))], []);
// Free the environment itself
trans_exchange_free(bcx, env)
// FIXME: #13994: pass align and size here
trans_exchange_free(bcx, env, 0, 8)
})
}
_ => {

View File

@ -287,8 +287,11 @@ pub fn trans_uniq_vstore<'a>(bcx: &'a Block<'a>,
// Create a temporary scope lest execution should fail while
// constructing the vector.
let temp_scope = fcx.push_custom_cleanup_scope();
// FIXME: #13994: the old `Box<[T]> will not support sized deallocation, this is a placeholder
let content_ty = vt.unit_ty;
fcx.schedule_free_value(cleanup::CustomScope(temp_scope),
val, cleanup::HeapExchange);
val, cleanup::HeapExchange, content_ty);
let dataptr = get_dataptr(bcx, val);

View File

@ -109,7 +109,7 @@ use ops::Drop;
use option::{None, Option, Some};
use ptr::RawPtr;
use ptr;
use rt::heap::{exchange_malloc, deallocate};
use rt::heap::{allocate, deallocate};
use unstable::finally::try_finally;
use vec::Vec;
@ -304,7 +304,7 @@ impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
unsafe {
// this should pass the real required alignment
let ret = exchange_malloc(size, 8) as *mut RawVec<()>;
let ret = allocate(size, 8) as *mut RawVec<()>;
let a_size = mem::size_of::<T>();
let a_size = if a_size == 0 {1} else {a_size};
@ -968,7 +968,7 @@ mod tests {
assert_eq!(v_b[0], 2);
assert_eq!(v_b[1], 3);
// Test on exchange heap.
// Test `Box<[T]>`
let vec_unique = box [1, 2, 3, 4, 5, 6];
let v_d = vec_unique.slice(1u, 6u).to_owned();
assert_eq!(v_d.len(), 5u);