rust/src/libcore/unsafe.rs

526 lines
18 KiB
Rust
Raw Normal View History

//! Unsafe operations
2012-06-27 12:11:57 -05:00
export reinterpret_cast, forget, bump_box_refcount, transmute;
export transmute_mut, transmute_immut, transmute_region, transmute_mut_region;
2012-01-17 19:28:21 -06:00
export SharedMutableState, shared_mutable_state, clone_shared_mutable_state;
export get_shared_mutable_state, get_shared_immutable_state;
export unwrap_shared_mutable_state;
export Exclusive, exclusive, unwrap_exclusive;
export copy_lifetime;
2012-09-04 13:12:17 -05:00
use task::atomically;
#[abi = "rust-intrinsic"]
extern mod rusti {
fn forget<T>(-x: T);
fn reinterpret_cast<T, U>(e: T) -> U;
}
/// Casts the value at `src` to U. The two types must have the same length.
#[inline(always)]
2012-08-29 18:00:36 -05:00
unsafe fn reinterpret_cast<T, U>(src: &T) -> U {
rusti::reinterpret_cast(*src)
}
/**
* Move a thing into the void
*
* The forget function will take ownership of the provided value but neglect
* to run any required cleanup or memory-management operations on it. This
* can be used for various acts of magick, particularly when using
* reinterpret_cast on managed pointer types.
*/
#[inline(always)]
2012-09-11 19:17:54 -05:00
unsafe fn forget<T>(-thing: T) { rusti::forget(move thing); }
2012-01-17 19:28:21 -06:00
/**
* Force-increment the reference count on a shared box. If used
2012-09-11 19:17:54 -05:00
* carelessly, this can leak the box. Use this in conjunction with transmute
* and/or reinterpret_cast when such calls would otherwise scramble a box's
* reference count
*/
2012-09-11 19:17:54 -05:00
unsafe fn bump_box_refcount<T>(+t: @T) { forget(move t); }
2012-06-27 12:11:57 -05:00
/**
* Transform a value of one type into a value of another type.
* Both types must have the same size and alignment.
*
* # Example
*
* assert transmute("L") == ~[76u8, 0u8];
*/
unsafe fn transmute<L, G>(-thing: L) -> G {
2012-09-11 19:17:54 -05:00
debug!(">>> in transmute! <<<");
debug!("transmute 1: %?", &thing);
let newthing: G = reinterpret_cast(&thing);
forget(move thing);
debug!("transmute 2: %?", &newthing);
move newthing
}
/// Coerce an immutable reference to be mutable.
2012-09-11 19:17:54 -05:00
unsafe fn transmute_mut<T>(+ptr: &a/T) -> &a/mut T { transmute(move ptr) }
/// Coerce a mutable reference to be immutable.
2012-09-11 19:17:54 -05:00
unsafe fn transmute_immut<T>(+ptr: &a/mut T) -> &a/T { transmute(move ptr) }
/// Coerce a borrowed pointer to have an arbitrary associated region.
2012-09-11 19:17:54 -05:00
unsafe fn transmute_region<T>(+ptr: &a/T) -> &b/T { transmute(move ptr) }
/// Coerce a borrowed mutable pointer to have an arbitrary associated region.
unsafe fn transmute_mut_region<T>(+ptr: &a/mut T) -> &b/mut T {
2012-09-11 19:17:54 -05:00
transmute(move ptr)
}
/// Transforms lifetime of the second pointer to match the first.
unsafe fn copy_lifetime<S,T>(_ptr: &a/S, ptr: &T) -> &a/T {
transmute_region(ptr)
}
/****************************************************************************
* Shared state & exclusive ARC
****************************************************************************/
// An unwrapper uses this protocol to communicate with the "other" task that
// drops the last refcount on an arc. Unfortunately this can't be a proper
// pipe protocol because the unwrapper has to access both stages at once.
2012-08-28 13:11:15 -05:00
type UnwrapProto = ~mut Option<(pipes::ChanOne<()>, pipes::PortOne<bool>)>;
struct ArcData<T> {
mut count: libc::intptr_t,
mut unwrapper: libc::uintptr_t, // either a UnwrapProto or 0
// FIXME(#3224) should be able to make this non-option to save memory, and
// in unwrap() use "let ~ArcData { data: result, _ } = thing" to unwrap it
mut data: Option<T>,
}
2012-08-15 20:46:55 -05:00
struct ArcDestruct<T> {
mut data: *libc::c_void,
drop unsafe {
if self.data.is_null() {
return; // Happens when destructing an unwrapper's handle.
}
do task::unkillable {
2012-08-29 18:00:36 -05:00
let data: ~ArcData<T> = unsafe::reinterpret_cast(&self.data);
let new_count = rustrt::rust_atomic_decrement(&mut data.count);
assert new_count >= 0;
if new_count == 0 {
// Were we really last, or should we hand off to an unwrapper?
// It's safe to not xchg because the unwrapper will set the
// unwrap lock *before* dropping his/her reference. In effect,
// being here means we're the only *awake* task with the data.
if data.unwrapper != 0 {
let p: UnwrapProto =
2012-08-29 18:00:36 -05:00
unsafe::reinterpret_cast(&data.unwrapper);
let (message, response) = option::swap_unwrap(p);
// Send 'ready' and wait for a response.
pipes::send_one(move message, ());
// Unkillable wait. Message guaranteed to come.
if pipes::recv_one(move response) {
// Other task got the data.
2012-09-11 19:17:54 -05:00
unsafe::forget(move data);
} else {
// Other task was killed. drop glue takes over.
}
} else {
// drop glue takes over.
}
} else {
2012-09-11 19:17:54 -05:00
unsafe::forget(move data);
}
}
}
}
2012-09-04 17:23:28 -05:00
fn ArcDestruct<T>(data: *libc::c_void) -> ArcDestruct<T> {
ArcDestruct {
data: data
}
}
unsafe fn unwrap_shared_mutable_state<T: Send>(+rc: SharedMutableState<T>)
-> T {
struct DeathThroes<T> {
mut ptr: Option<~ArcData<T>>,
mut response: Option<pipes::ChanOne<bool>>,
drop unsafe {
let response = option::swap_unwrap(&mut self.response);
// In case we get killed early, we need to tell the person who
// tried to wake us whether they should hand-off the data to us.
if task::failing() {
pipes::send_one(move response, false);
// Either this swap_unwrap or the one below (at "Got here")
// ought to run.
unsafe::forget(option::swap_unwrap(&mut self.ptr));
} else {
assert self.ptr.is_none();
pipes::send_one(move response, true);
}
}
}
do task::unkillable {
2012-08-29 18:00:36 -05:00
let ptr: ~ArcData<T> = unsafe::reinterpret_cast(&rc.data);
let (c1,p1) = pipes::oneshot(); // ()
let (c2,p2) = pipes::oneshot(); // bool
let server: UnwrapProto = ~mut Some((move c1,move p2));
2012-09-11 19:17:54 -05:00
let serverp: libc::uintptr_t = unsafe::transmute(move server);
// Try to put our server end in the unwrapper slot.
if rustrt::rust_compare_and_swap_ptr(&mut ptr.unwrapper, 0, serverp) {
// Got in. Step 0: Tell destructor not to run. We are now it.
rc.data = ptr::null();
// Step 1 - drop our own reference.
let new_count = rustrt::rust_atomic_decrement(&mut ptr.count);
assert new_count >= 0;
if new_count == 0 {
// We were the last owner. Can unwrap immediately.
// Also we have to free the server endpoints.
2012-09-11 19:17:54 -05:00
let _server: UnwrapProto = unsafe::transmute(move serverp);
option::swap_unwrap(&mut ptr.data)
// drop glue takes over.
} else {
// The *next* person who sees the refcount hit 0 will wake us.
let end_result =
DeathThroes { ptr: Some(move ptr),
response: Some(move c2) };
let mut p1 = Some(move p1); // argh
do task::rekillable {
pipes::recv_one(option::swap_unwrap(&mut p1));
}
// Got here. Back in the 'unkillable' without getting killed.
// Recover ownership of ptr, then take the data out.
let ptr = option::swap_unwrap(&mut end_result.ptr);
option::swap_unwrap(&mut ptr.data)
// drop glue takes over.
}
} else {
// Somebody else was trying to unwrap. Avoid guaranteed deadlock.
2012-09-11 19:17:54 -05:00
unsafe::forget(move ptr);
// Also we have to free the (rejected) server endpoints.
2012-09-11 19:17:54 -05:00
let _server: UnwrapProto = unsafe::transmute(move serverp);
fail ~"Another task is already unwrapping this ARC!";
}
}
}
/**
* COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc.
*
* Data races between tasks can result in crashes and, with sufficient
* cleverness, arbitrary type coercion.
*/
type SharedMutableState<T: Send> = ArcDestruct<T>;
unsafe fn shared_mutable_state<T: Send>(+data: T) -> SharedMutableState<T> {
let data = ~ArcData { count: 1, unwrapper: 0, data: Some(move data) };
unsafe {
2012-09-11 19:17:54 -05:00
let ptr = unsafe::transmute(move data);
ArcDestruct(ptr)
}
}
#[inline(always)]
unsafe fn get_shared_mutable_state<T: Send>(rc: &a/SharedMutableState<T>)
-> &a/mut T {
unsafe {
2012-08-29 18:00:36 -05:00
let ptr: ~ArcData<T> = unsafe::reinterpret_cast(&(*rc).data);
assert ptr.count > 0;
// Cast us back into the correct region
let r = unsafe::transmute_region(option::get_ref(&ptr.data));
2012-09-11 19:17:54 -05:00
unsafe::forget(move ptr);
return unsafe::transmute_mut(r);
}
}
#[inline(always)]
unsafe fn get_shared_immutable_state<T: Send>(rc: &a/SharedMutableState<T>)
-> &a/T {
unsafe {
2012-08-29 18:00:36 -05:00
let ptr: ~ArcData<T> = unsafe::reinterpret_cast(&(*rc).data);
assert ptr.count > 0;
// Cast us back into the correct region
let r = unsafe::transmute_region(option::get_ref(&ptr.data));
2012-09-11 19:17:54 -05:00
unsafe::forget(move ptr);
return r;
}
}
unsafe fn clone_shared_mutable_state<T: Send>(rc: &SharedMutableState<T>)
-> SharedMutableState<T> {
unsafe {
2012-08-29 18:00:36 -05:00
let ptr: ~ArcData<T> = unsafe::reinterpret_cast(&(*rc).data);
let new_count = rustrt::rust_atomic_increment(&mut ptr.count);
assert new_count >= 2;
2012-09-11 19:17:54 -05:00
unsafe::forget(move ptr);
}
ArcDestruct((*rc).data)
}
/****************************************************************************/
#[allow(non_camel_case_types)] // runtime type
type rust_little_lock = *libc::c_void;
#[abi = "cdecl"]
extern mod rustrt {
#[rust_stack]
fn rust_atomic_increment(p: &mut libc::intptr_t)
-> libc::intptr_t;
#[rust_stack]
fn rust_atomic_decrement(p: &mut libc::intptr_t)
-> libc::intptr_t;
#[rust_stack]
fn rust_compare_and_swap_ptr(address: &mut libc::uintptr_t,
oldval: libc::uintptr_t,
newval: libc::uintptr_t) -> bool;
fn rust_create_little_lock() -> rust_little_lock;
fn rust_destroy_little_lock(lock: rust_little_lock);
fn rust_lock_little_lock(lock: rust_little_lock);
fn rust_unlock_little_lock(lock: rust_little_lock);
}
2012-08-15 20:46:55 -05:00
struct LittleLock {
2012-09-06 21:40:15 -05:00
l: rust_little_lock,
drop { rustrt::rust_destroy_little_lock(self.l); }
}
2012-09-04 17:23:28 -05:00
fn LittleLock() -> LittleLock {
LittleLock {
l: rustrt::rust_create_little_lock()
}
}
impl LittleLock {
#[inline(always)]
unsafe fn lock<T>(f: fn() -> T) -> T {
2012-08-15 20:46:55 -05:00
struct Unlock {
2012-09-06 21:40:15 -05:00
l: rust_little_lock,
drop { rustrt::rust_unlock_little_lock(self.l); }
}
2012-09-04 17:23:28 -05:00
fn Unlock(l: rust_little_lock) -> Unlock {
Unlock {
l: l
}
}
do atomically {
rustrt::rust_lock_little_lock(self.l);
let _r = Unlock(self.l);
f()
}
}
}
struct ExData<T: Send> { lock: LittleLock, mut failed: bool, mut data: T, }
/**
* An arc over mutable data that is protected by a lock. For library use only.
*/
struct Exclusive<T: Send> { x: SharedMutableState<ExData<T>> }
fn exclusive<T:Send >(+user_data: T) -> Exclusive<T> {
let data = ExData {
lock: LittleLock(), mut failed: false, mut data: user_data
};
Exclusive { x: unsafe { shared_mutable_state(move data) } }
}
impl<T: Send> Exclusive<T> {
// Duplicate an exclusive ARC, as std::arc::clone.
fn clone() -> Exclusive<T> {
Exclusive { x: unsafe { clone_shared_mutable_state(&self.x) } }
}
// Exactly like std::arc::mutex_arc,access(), but with the little_lock
// instead of a proper mutex. Same reason for being unsafe.
//
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
// accessing the provided condition variable) are prohibited while inside
// the exclusive. Supporting that is a work in progress.
#[inline(always)]
unsafe fn with<U>(f: fn(x: &mut T) -> U) -> U {
let rec = unsafe { get_shared_mutable_state(&self.x) };
do rec.lock.lock {
if rec.failed {
fail ~"Poisoned exclusive - another task failed inside!";
}
rec.failed = true;
let result = f(&mut rec.data);
rec.failed = false;
move result
}
}
#[inline(always)]
unsafe fn with_imm<U>(f: fn(x: &T) -> U) -> U {
do self.with |x| {
f(unsafe::transmute_immut(x))
}
}
}
// FIXME(#2585) make this a by-move method on the exclusive
fn unwrap_exclusive<T: Send>(+arc: Exclusive<T>) -> T {
let Exclusive { x: x } <- arc;
let inner = unsafe { unwrap_shared_mutable_state(move x) };
let ExData { data: data, _ } <- inner;
move data
}
/****************************************************************************
* Tests
****************************************************************************/
2012-01-17 19:28:21 -06:00
#[cfg(test)]
mod tests {
#[test]
fn test_reinterpret_cast() {
2012-08-29 18:00:36 -05:00
assert unsafe { reinterpret_cast(&1) } == 1u;
2012-01-17 19:28:21 -06:00
}
2012-06-27 12:11:57 -05:00
#[test]
fn test_bump_box_refcount() {
unsafe {
let box = @~"box box box"; // refcount 1
2012-06-27 12:11:57 -05:00
bump_box_refcount(box); // refcount 2
let ptr: *int = transmute(box); // refcount 2
2012-08-29 18:00:36 -05:00
let _box1: @~str = reinterpret_cast(&ptr);
let _box2: @~str = reinterpret_cast(&ptr);
assert *_box1 == ~"box box box";
assert *_box2 == ~"box box box";
2012-06-27 12:11:57 -05:00
// Will destroy _box1 and _box2. Without the bump, this would
// use-after-free. With too many bumps, it would leak.
}
}
#[test]
fn test_transmute() {
unsafe {
let x = @1;
let x: *int = transmute(x);
assert *x == 1;
let _x: @int = transmute(x);
}
}
#[test]
fn test_transmute2() {
unsafe {
assert transmute(~"L") == ~[76u8, 0u8];
}
}
#[test]
fn exclusive_arc() {
let mut futures = ~[];
let num_tasks = 10u;
let count = 10u;
let total = exclusive(~mut 0u);
for uint::range(0u, num_tasks) |_i| {
let total = total.clone();
vec::push(futures, future::spawn(|| {
for uint::range(0u, count) |_i| {
do total.with |count| {
**count += 1u;
}
}
}));
};
for futures.each |f| { f.get() }
do total.with |total| {
assert **total == num_tasks * count
};
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn exclusive_poison() {
// Tests that if one task fails inside of an exclusive, subsequent
// accesses will also fail.
let x = exclusive(1);
let x2 = x.clone();
do task::try {
do x2.with |one| {
assert *one == 2;
}
};
do x.with |one| {
assert *one == 1;
}
}
#[test]
fn exclusive_unwrap_basic() {
let x = exclusive(~~"hello");
assert unwrap_exclusive(x) == ~~"hello";
}
#[test]
fn exclusive_unwrap_contended() {
let x = exclusive(~~"hello");
2012-08-20 14:23:37 -05:00
let x2 = ~mut Some(x.clone());
do task::spawn {
let x2 = option::swap_unwrap(x2);
do x2.with |_hello| { }
task::yield();
}
assert unwrap_exclusive(x) == ~~"hello";
// Now try the same thing, but with the child task blocking.
let x = exclusive(~~"hello");
2012-08-20 14:23:37 -05:00
let x2 = ~mut Some(x.clone());
let mut res = None;
do task::task().future_result(|+r| res = Some(r)).spawn {
let x2 = option::swap_unwrap(x2);
assert unwrap_exclusive(x2) == ~~"hello";
}
// Have to get rid of our reference before blocking.
{ let _x = move x; } // FIXME(#3161) util::ignore doesn't work here
let res = option::swap_unwrap(&mut res);
future::get(&res);
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn exclusive_unwrap_conflict() {
let x = exclusive(~~"hello");
2012-08-20 14:23:37 -05:00
let x2 = ~mut Some(x.clone());
let mut res = None;
do task::task().future_result(|+r| res = Some(r)).spawn {
let x2 = option::swap_unwrap(x2);
assert unwrap_exclusive(x2) == ~~"hello";
}
assert unwrap_exclusive(x) == ~~"hello";
let res = option::swap_unwrap(&mut res);
future::get(&res);
}
#[test] #[ignore(cfg(windows))]
fn exclusive_unwrap_deadlock() {
// This is not guaranteed to get to the deadlock before being killed,
// but it will show up sometimes, and if the deadlock were not there,
// the test would nondeterministically fail.
let result = do task::try {
// a task that has two references to the same exclusive will
// deadlock when it unwraps. nothing to be done about that.
let x = exclusive(~~"hello");
let x2 = x.clone();
do task::spawn {
for 10.times { task::yield(); } // try to let the unwrapper go
fail; // punt it awake from its deadlock
}
let _z = unwrap_exclusive(x);
do x2.with |_hello| { }
};
assert result.is_err();
}
2012-01-17 19:28:21 -06:00
}