Removing more unnecessary unsafe blocks throughout

This commit is contained in:
Alex Crichton 2013-04-23 19:33:33 -04:00
parent c089a17854
commit 4c08a8d6c3
8 changed files with 82 additions and 96 deletions

View File

@ -231,66 +231,64 @@ unsafe fn walk_gc_roots(mem: Memory, sentinel: **Word, visitor: Visitor) {
// the stack.
let mut reached_sentinel = ptr::is_null(sentinel);
for stackwalk::walk_stack |frame| {
unsafe {
let pc = last_ret;
let Segment {segment: next_segment, boundary: boundary} =
find_segment_for_frame(frame.fp, segment);
segment = next_segment;
// Each stack segment is bounded by a morestack frame. The
// morestack frame includes two return addresses, one for
// morestack itself, at the normal offset from the frame
// pointer, and then a second return address for the
// function prologue (which called morestack after
// determining that it had hit the end of the stack).
// Since morestack itself takes two parameters, the offset
// for this second return address is 3 greater than the
// return address for morestack.
let ret_offset = if boundary { 4 } else { 1 };
last_ret = *ptr::offset(frame.fp, ret_offset) as *Word;
let pc = last_ret;
let Segment {segment: next_segment, boundary: boundary} =
find_segment_for_frame(frame.fp, segment);
segment = next_segment;
// Each stack segment is bounded by a morestack frame. The
// morestack frame includes two return addresses, one for
// morestack itself, at the normal offset from the frame
// pointer, and then a second return address for the
// function prologue (which called morestack after
// determining that it had hit the end of the stack).
// Since morestack itself takes two parameters, the offset
// for this second return address is 3 greater than the
// return address for morestack.
let ret_offset = if boundary { 4 } else { 1 };
last_ret = *ptr::offset(frame.fp, ret_offset) as *Word;
if ptr::is_null(pc) {
loop;
}
if ptr::is_null(pc) {
loop;
}
let mut delay_reached_sentinel = reached_sentinel;
let sp = is_safe_point(pc);
match sp {
Some(sp_info) => {
for walk_safe_point(frame.fp, sp_info) |root, tydesc| {
// Skip roots until we see the sentinel.
if !reached_sentinel {
if root == sentinel {
delay_reached_sentinel = true;
}
loop;
let mut delay_reached_sentinel = reached_sentinel;
let sp = is_safe_point(pc);
match sp {
Some(sp_info) => {
for walk_safe_point(frame.fp, sp_info) |root, tydesc| {
// Skip roots until we see the sentinel.
if !reached_sentinel {
if root == sentinel {
delay_reached_sentinel = true;
}
loop;
}
// Skip null pointers, which can occur when a
// unique pointer has already been freed.
if ptr::is_null(*root) {
loop;
// Skip null pointers, which can occur when a
// unique pointer has already been freed.
if ptr::is_null(*root) {
loop;
}
if ptr::is_null(tydesc) {
// Root is a generic box.
let refcount = **root;
if mem | task_local_heap != 0 && refcount != -1 {
if !visitor(root, tydesc) { return; }
} else if mem | exchange_heap != 0 && refcount == -1 {
if !visitor(root, tydesc) { return; }
}
if ptr::is_null(tydesc) {
// Root is a generic box.
let refcount = **root;
if mem | task_local_heap != 0 && refcount != -1 {
if !visitor(root, tydesc) { return; }
} else if mem | exchange_heap != 0 && refcount == -1 {
if !visitor(root, tydesc) { return; }
}
} else {
// Root is a non-immediate.
if mem | stack != 0 {
if !visitor(root, tydesc) { return; }
}
} else {
// Root is a non-immediate.
if mem | stack != 0 {
if !visitor(root, tydesc) { return; }
}
}
}
None => ()
}
reached_sentinel = delay_reached_sentinel;
}
None => ()
}
reached_sentinel = delay_reached_sentinel;
}
}

View File

@ -156,9 +156,7 @@ pub impl PacketHeader {
unsafe fn unblock(&self) {
let old_task = swap_task(&mut self.blocked_task, ptr::null());
if !old_task.is_null() {
unsafe {
rustrt::rust_task_deref(old_task)
}
rustrt::rust_task_deref(old_task)
}
match swap_state_acq(&mut self.state, Empty) {
Empty | Blocked => (),

View File

@ -80,10 +80,8 @@ pub unsafe fn unsafe_borrow() -> &mut Scheduler {
}
pub unsafe fn unsafe_borrow_io() -> &mut IoFactoryObject {
unsafe {
let sched = unsafe_borrow();
return sched.event_loop.io().unwrap();
}
let sched = unsafe_borrow();
return sched.event_loop.io().unwrap();
}
fn tls_key() -> tls::Key {

View File

@ -98,7 +98,7 @@ pub enum uv_req_type {
pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void {
assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX);
let size = unsafe { rust_uv_handle_size(handle as uint) };
let size = rust_uv_handle_size(handle as uint);
let p = malloc(size);
assert!(p.is_not_null());
return p;
@ -110,7 +110,7 @@ pub unsafe fn free_handle(v: *c_void) {
pub unsafe fn malloc_req(req: uv_req_type) -> *c_void {
assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX);
let size = unsafe { rust_uv_req_size(req as uint) };
let size = rust_uv_req_size(req as uint);
let p = malloc(size);
assert!(p.is_not_null());
return p;

View File

@ -262,18 +262,16 @@ pub impl<T:Owned> Exclusive<T> {
// the exclusive. Supporting that is a work in progress.
#[inline(always)]
unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U {
unsafe {
let rec = get_shared_mutable_state(&self.x);
do (*rec).lock.lock {
if (*rec).failed {
fail!(
~"Poisoned exclusive - another task failed inside!");
}
(*rec).failed = true;
let result = f(&mut (*rec).data);
(*rec).failed = false;
result
let rec = get_shared_mutable_state(&self.x);
do (*rec).lock.lock {
if (*rec).failed {
fail!(
~"Poisoned exclusive - another task failed inside!");
}
(*rec).failed = true;
let result = f(&mut (*rec).data);
(*rec).failed = false;
result
}
}

View File

@ -43,11 +43,11 @@ pub unsafe fn weaken_task(f: &fn(Port<ShutdownMsg>)) {
let task = get_task_id();
// Expect the weak task service to be alive
assert!(service.try_send(RegisterWeakTask(task, shutdown_chan)));
unsafe { rust_dec_kernel_live_count(); }
rust_dec_kernel_live_count();
do (|| {
f(shutdown_port.take())
}).finally || {
unsafe { rust_inc_kernel_live_count(); }
rust_inc_kernel_live_count();
// Service my have already exited
service.send(UnregisterWeakTask(task));
}

View File

@ -2628,13 +2628,11 @@ pub fn get_item_val(ccx: @CrateContext, id: ast::node_id) -> ValueRef {
let class_ty = ty::lookup_item_type(tcx, parent_id).ty;
// This code shouldn't be reached if the class is generic
assert!(!ty::type_has_params(class_ty));
let lldty = unsafe {
T_fn(~[
let lldty = T_fn(~[
T_ptr(T_i8()),
T_ptr(type_of(ccx, class_ty))
],
T_nil())
};
T_nil());
let s = get_dtor_symbol(ccx, /*bad*/copy *pt, dt.node.id, None);
/* Make the declaration for the dtor */

View File

@ -177,15 +177,13 @@ pub impl<T:Owned> MutexARC<T> {
*/
#[inline(always)]
unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
unsafe {
let state = get_shared_mutable_state(&self.x);
// Borrowck would complain about this if the function were
// not already unsafe. See borrow_rwlock, far below.
do (&(*state).lock).lock {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
let state = get_shared_mutable_state(&self.x);
// Borrowck would complain about this if the function were
// not already unsafe. See borrow_rwlock, far below.
do (&(*state).lock).lock {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
}
@ -195,16 +193,14 @@ pub impl<T:Owned> MutexARC<T> {
&self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U
{
unsafe {
let state = get_shared_mutable_state(&self.x);
do (&(*state).lock).lock_cond |cond| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: true,
failed: &mut (*state).failed,
cond: cond })
}
let state = get_shared_mutable_state(&self.x);
do (&(*state).lock).lock_cond |cond| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: true,
failed: &mut (*state).failed,
cond: cond })
}
}
}