2012-12-03 18:48:01 -06:00
|
|
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2012-09-19 18:52:32 -05:00
|
|
|
#[doc(hidden)];
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
use libc::c_void;
|
2013-10-26 03:10:39 -05:00
|
|
|
use ptr;
|
2013-06-22 14:36:00 -05:00
|
|
|
use unstable::intrinsics::TyDesc;
|
2013-07-21 19:20:52 -05:00
|
|
|
use unstable::raw;
|
2012-09-14 18:12:18 -05:00
|
|
|
|
2013-11-19 19:36:32 -06:00
|
|
|
type DropGlue<'self> = 'self |**TyDesc, *c_void|;
|
2012-09-14 18:12:18 -05:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Box annihilation
|
|
|
|
*
|
|
|
|
* This runs at task death to free all boxes.
|
|
|
|
*/
|
|
|
|
|
2013-02-20 17:02:21 -06:00
|
|
|
struct AnnihilateStats {
|
|
|
|
n_total_boxes: uint,
|
|
|
|
n_unique_boxes: uint,
|
|
|
|
n_bytes_freed: uint
|
|
|
|
}
|
|
|
|
|
2013-05-02 17:33:18 -05:00
|
|
|
unsafe fn each_live_alloc(read_next_before: bool,
|
2013-11-18 23:15:42 -06:00
|
|
|
f: |box: *mut raw::Box<()>, uniq: bool| -> bool)
|
|
|
|
-> bool {
|
2013-05-02 17:33:18 -05:00
|
|
|
//! Walks the internal list of allocations
|
|
|
|
|
|
|
|
use managed;
|
2013-06-22 03:09:06 -05:00
|
|
|
use rt::local_heap;
|
2013-05-02 17:33:18 -05:00
|
|
|
|
2013-07-21 19:20:52 -05:00
|
|
|
let mut box = local_heap::live_allocs();
|
2013-10-26 03:10:39 -05:00
|
|
|
while box != ptr::mut_null() {
|
2013-07-22 23:32:39 -05:00
|
|
|
let next_before = (*box).next;
|
|
|
|
let uniq = (*box).ref_count == managed::RC_MANAGED_UNIQUE;
|
2013-05-02 17:33:18 -05:00
|
|
|
|
2013-07-22 23:32:39 -05:00
|
|
|
if !f(box as *mut raw::Box<()>, uniq) {
|
2013-05-02 17:33:18 -05:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if read_next_before {
|
|
|
|
box = next_before;
|
|
|
|
} else {
|
2013-07-22 23:32:39 -05:00
|
|
|
box = (*box).next;
|
2013-05-02 17:33:18 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2013-02-20 17:02:21 -06:00
|
|
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
fn debug_mem() -> bool {
|
2013-06-22 03:09:06 -05:00
|
|
|
// XXX: Need to port the environment struct to newsched
|
2013-08-01 01:12:20 -05:00
|
|
|
false
|
2013-02-20 17:02:21 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(windows)]
|
|
|
|
fn debug_mem() -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2012-09-14 18:58:23 -05:00
|
|
|
/// Destroys all managed memory (i.e. @ boxes) held by the current task.
|
|
|
|
pub unsafe fn annihilate() {
|
2013-06-22 03:09:06 -05:00
|
|
|
use rt::local_heap::local_free;
|
2013-10-16 20:34:01 -05:00
|
|
|
use mem;
|
2013-02-20 17:02:21 -06:00
|
|
|
use managed;
|
2012-09-14 18:58:23 -05:00
|
|
|
|
2013-02-20 17:02:21 -06:00
|
|
|
let mut stats = AnnihilateStats {
|
|
|
|
n_total_boxes: 0,
|
|
|
|
n_unique_boxes: 0,
|
|
|
|
n_bytes_freed: 0
|
|
|
|
};
|
2012-09-14 18:12:18 -05:00
|
|
|
|
|
|
|
// Pass 1: Make all boxes immortal.
|
2013-03-15 14:24:24 -05:00
|
|
|
//
|
|
|
|
// In this pass, nothing gets freed, so it does not matter whether
|
|
|
|
// we read the next field before or after the callback.
|
2013-07-31 14:07:44 -05:00
|
|
|
do each_live_alloc(true) |box, uniq| {
|
2013-02-20 17:02:21 -06:00
|
|
|
stats.n_total_boxes += 1;
|
|
|
|
if uniq {
|
|
|
|
stats.n_unique_boxes += 1;
|
|
|
|
} else {
|
2013-07-21 19:20:52 -05:00
|
|
|
(*box).ref_count = managed::RC_IMMORTAL;
|
2013-02-20 17:02:21 -06:00
|
|
|
}
|
2013-07-31 14:07:44 -05:00
|
|
|
true
|
|
|
|
};
|
2012-09-14 18:12:18 -05:00
|
|
|
|
|
|
|
// Pass 2: Drop all boxes.
|
2013-03-15 14:24:24 -05:00
|
|
|
//
|
|
|
|
// In this pass, unique-managed boxes may get freed, but not
|
|
|
|
// managed boxes, so we must read the `next` field *after* the
|
|
|
|
// callback, as the original value may have been freed.
|
2013-07-31 14:07:44 -05:00
|
|
|
do each_live_alloc(false) |box, uniq| {
|
2013-02-20 17:02:21 -06:00
|
|
|
if !uniq {
|
2013-07-22 23:32:39 -05:00
|
|
|
let tydesc = (*box).type_desc;
|
|
|
|
let data = &(*box).data as *();
|
|
|
|
((*tydesc).drop_glue)(data as *i8);
|
2013-02-20 17:02:21 -06:00
|
|
|
}
|
2013-07-31 14:07:44 -05:00
|
|
|
true
|
|
|
|
};
|
2012-09-14 18:12:18 -05:00
|
|
|
|
|
|
|
// Pass 3: Free all boxes.
|
2013-03-15 14:24:24 -05:00
|
|
|
//
|
|
|
|
// In this pass, managed boxes may get freed (but not
|
|
|
|
// unique-managed boxes, though I think that none of those are
|
|
|
|
// left), so we must read the `next` field before, since it will
|
|
|
|
// not be valid after.
|
2013-07-31 14:07:44 -05:00
|
|
|
do each_live_alloc(true) |box, uniq| {
|
2013-02-20 17:02:21 -06:00
|
|
|
if !uniq {
|
|
|
|
stats.n_bytes_freed +=
|
2013-07-21 19:20:52 -05:00
|
|
|
(*((*box).type_desc)).size
|
2013-10-16 20:34:01 -05:00
|
|
|
+ mem::size_of::<raw::Box<()>>();
|
2013-07-21 19:20:52 -05:00
|
|
|
local_free(box as *i8);
|
2013-02-20 17:02:21 -06:00
|
|
|
}
|
2013-07-31 14:07:44 -05:00
|
|
|
true
|
|
|
|
};
|
2013-02-20 17:02:21 -06:00
|
|
|
|
|
|
|
if debug_mem() {
|
|
|
|
// We do logging here w/o allocation.
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("annihilator stats:\n \
|
2013-10-09 12:34:27 -05:00
|
|
|
total boxes: {}\n \
|
|
|
|
unique boxes: {}\n \
|
|
|
|
bytes freed: {}",
|
|
|
|
stats.n_total_boxes, stats.n_unique_boxes, stats.n_bytes_freed);
|
2012-09-14 18:12:18 -05:00
|
|
|
}
|
|
|
|
}
|