2013-05-09 17:36:45 -07:00
|
|
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
|
|
|
use cast;
|
2013-07-02 13:37:19 -04:00
|
|
|
use comm;
|
|
|
|
use ptr;
|
2013-11-11 13:33:43 -08:00
|
|
|
use option::{Option,Some,None};
|
2013-05-09 17:36:45 -07:00
|
|
|
use task;
|
2013-07-31 15:56:18 -04:00
|
|
|
use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst};
|
2013-11-14 00:21:43 -08:00
|
|
|
use unstable::mutex::Mutex;
|
2013-05-09 17:36:45 -07:00
|
|
|
use ops::Drop;
|
|
|
|
use clone::Clone;
|
2013-06-05 17:56:24 -07:00
|
|
|
use kinds::Send;
|
2013-07-18 20:34:42 -04:00
|
|
|
use vec;
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-05-09 20:27:42 -07:00
|
|
|
/// An atomically reference counted pointer.
|
|
|
|
///
|
|
|
|
/// Enforces no shared-memory safety.
|
2013-10-07 21:58:33 -04:00
|
|
|
//#[unsafe_no_drop_flag] FIXME: #9758
|
2013-08-27 20:00:57 +10:00
|
|
|
pub struct UnsafeArc<T> {
|
2013-08-27 22:17:58 +10:00
|
|
|
data: *mut ArcData<T>,
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-10-11 23:20:34 +02:00
|
|
|
pub enum UnsafeArcUnwrap<T> {
|
|
|
|
UnsafeArcSelf(UnsafeArc<T>),
|
|
|
|
UnsafeArcT(T)
|
|
|
|
}
|
|
|
|
|
2013-12-08 02:55:28 -05:00
|
|
|
#[cfg(test)]
|
2013-10-11 23:20:34 +02:00
|
|
|
impl<T> UnsafeArcUnwrap<T> {
|
|
|
|
fn expect_t(self, msg: &'static str) -> T {
|
|
|
|
match self {
|
|
|
|
UnsafeArcSelf(_) => fail!(msg),
|
|
|
|
UnsafeArcT(t) => t
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_self(&self) -> bool {
|
|
|
|
match *self {
|
|
|
|
UnsafeArcSelf(_) => true,
|
|
|
|
UnsafeArcT(_) => false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-27 20:00:57 +10:00
|
|
|
struct ArcData<T> {
|
2013-07-02 13:37:19 -04:00
|
|
|
count: AtomicUint,
|
|
|
|
// An unwrapper uses this protocol to communicate with the "other" task that
|
|
|
|
// drops the last refcount on an arc. Unfortunately this can't be a proper
|
|
|
|
// pipe protocol because the unwrapper has to access both stages at once.
|
|
|
|
// FIXME(#7544): Maybe use AtomicPtr instead (to avoid xchg in take() later)?
|
|
|
|
unwrapper: AtomicOption<(comm::ChanOne<()>, comm::PortOne<bool>)>,
|
|
|
|
// FIXME(#3224) should be able to make this non-option to save memory
|
2013-05-09 20:27:42 -07:00
|
|
|
data: Option<T>,
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-08-27 22:17:58 +10:00
|
|
|
unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
|
2013-08-27 20:00:57 +10:00
|
|
|
let data = ~ArcData { count: AtomicUint::new(refcount),
|
|
|
|
unwrapper: AtomicOption::empty(),
|
|
|
|
data: Some(data) };
|
2013-07-18 20:34:42 -04:00
|
|
|
cast::transmute(data)
|
|
|
|
}
|
|
|
|
|
2013-12-04 16:06:55 -08:00
|
|
|
/// A helper object used by `UnsafeArc::unwrap`.
|
|
|
|
struct ChannelAndDataGuard<T> {
|
|
|
|
channel: Option<comm::ChanOne<bool>>,
|
|
|
|
data: Option<~ArcData<T>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[unsafe_destructor]
|
|
|
|
impl<T> Drop for ChannelAndDataGuard<T> {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if task::failing() {
|
|
|
|
// Killed during wait. Because this might happen while
|
|
|
|
// someone else still holds a reference, we can't free
|
|
|
|
// the data now; the "other" last refcount will free it.
|
|
|
|
unsafe {
|
|
|
|
let channel = self.channel.take_unwrap();
|
|
|
|
let data = self.data.take_unwrap();
|
|
|
|
channel.send(false);
|
|
|
|
cast::forget(data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> ChannelAndDataGuard<T> {
|
|
|
|
fn unwrap(mut self) -> (comm::ChanOne<bool>, ~ArcData<T>) {
|
|
|
|
(self.channel.take_unwrap(), self.data.take_unwrap())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-27 20:00:57 +10:00
|
|
|
impl<T: Send> UnsafeArc<T> {
|
|
|
|
pub fn new(data: T) -> UnsafeArc<T> {
|
|
|
|
unsafe { UnsafeArc { data: new_inner(data, 1) } }
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-07-11 22:14:54 -04:00
|
|
|
/// As new(), but returns an extra pre-cloned handle.
|
2013-08-27 20:00:57 +10:00
|
|
|
pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
|
2013-07-11 22:14:54 -04:00
|
|
|
unsafe {
|
2013-07-18 20:34:42 -04:00
|
|
|
let ptr = new_inner(data, 2);
|
2013-08-27 20:00:57 +10:00
|
|
|
(UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
|
2013-07-18 20:34:42 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// As new(), but returns a vector of as many pre-cloned handles as requested.
|
2013-08-27 20:00:57 +10:00
|
|
|
pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc<T>] {
|
2013-07-18 20:34:42 -04:00
|
|
|
unsafe {
|
|
|
|
if num_handles == 0 {
|
|
|
|
~[] // need to free data here
|
|
|
|
} else {
|
|
|
|
let ptr = new_inner(data, num_handles);
|
2013-08-27 20:00:57 +10:00
|
|
|
vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
|
2013-07-18 20:34:42 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// As newN(), but from an already-existing handle. Uses one xadd.
|
2013-08-27 20:00:57 +10:00
|
|
|
pub fn cloneN(self, num_handles: uint) -> ~[UnsafeArc<T>] {
|
2013-07-18 20:34:42 -04:00
|
|
|
if num_handles == 0 {
|
|
|
|
~[] // The "num_handles - 1" trick (below) fails in the 0 case.
|
|
|
|
} else {
|
|
|
|
unsafe {
|
|
|
|
// Minus one because we are recycling the given handle's refcount.
|
2013-08-27 22:17:58 +10:00
|
|
|
let old_count = (*self.data).count.fetch_add(num_handles - 1, Acquire);
|
|
|
|
// let old_count = (*self.data).count.fetch_add(num_handles, Acquire);
|
2013-07-18 20:34:42 -04:00
|
|
|
assert!(old_count >= 1);
|
2013-08-27 22:17:58 +10:00
|
|
|
let ptr = self.data;
|
2013-07-18 20:34:42 -04:00
|
|
|
cast::forget(self); // Don't run the destructor on this handle.
|
2013-08-27 20:00:57 +10:00
|
|
|
vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
|
2013-07-18 20:34:42 -04:00
|
|
|
}
|
2013-07-11 22:14:54 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-18 14:45:18 -07:00
|
|
|
#[inline]
|
2013-07-23 17:17:34 -04:00
|
|
|
pub fn get(&self) -> *mut T {
|
|
|
|
unsafe {
|
2013-08-27 22:17:58 +10:00
|
|
|
assert!((*self.data).count.load(Relaxed) > 0);
|
|
|
|
let r: *mut T = (*self.data).data.get_mut_ref();
|
2013-07-23 17:17:34 -04:00
|
|
|
return r;
|
|
|
|
}
|
2013-05-09 20:27:42 -07:00
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-06-18 14:45:18 -07:00
|
|
|
#[inline]
|
2013-07-23 17:17:34 -04:00
|
|
|
pub fn get_immut(&self) -> *T {
|
|
|
|
unsafe {
|
2013-08-27 22:17:58 +10:00
|
|
|
assert!((*self.data).count.load(Relaxed) > 0);
|
|
|
|
let r: *T = (*self.data).data.get_ref();
|
2013-07-23 17:17:34 -04:00
|
|
|
return r;
|
|
|
|
}
|
2013-05-09 20:27:42 -07:00
|
|
|
}
|
2013-07-02 13:37:19 -04:00
|
|
|
|
|
|
|
/// Wait until all other handles are dropped, then retrieve the enclosed
|
2013-07-22 13:57:40 -07:00
|
|
|
/// data. See extra::arc::Arc for specific semantics documentation.
|
2013-07-02 13:37:19 -04:00
|
|
|
/// If called when the task is already unkillable, unwrap will unkillably
|
|
|
|
/// block; otherwise, an unwrapping task can be killed by linked failure.
|
2013-07-23 17:17:34 -04:00
|
|
|
pub fn unwrap(self) -> T {
|
2013-11-21 16:55:40 -08:00
|
|
|
unsafe {
|
2013-11-24 11:44:28 -08:00
|
|
|
let mut this = self;
|
2013-11-21 16:55:40 -08:00
|
|
|
// The ~ dtor needs to run if this code succeeds.
|
|
|
|
let mut data: ~ArcData<T> = cast::transmute(this.data);
|
|
|
|
// Set up the unwrap protocol.
|
|
|
|
let (p1,c1) = comm::oneshot(); // ()
|
|
|
|
let (p2,c2) = comm::oneshot(); // bool
|
|
|
|
// Try to put our server end in the unwrapper slot.
|
|
|
|
// This needs no barrier -- it's protected by the release barrier on
|
|
|
|
// the xadd, and the acquire+release barrier in the destructor's xadd.
|
|
|
|
if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() {
|
|
|
|
// Got in. Tell this handle's destructor not to run (we are now it).
|
|
|
|
this.data = ptr::mut_null();
|
|
|
|
// Drop our own reference.
|
|
|
|
let old_count = data.count.fetch_sub(1, Release);
|
|
|
|
assert!(old_count >= 1);
|
|
|
|
if old_count == 1 {
|
|
|
|
// We were the last owner. Can unwrap immediately.
|
|
|
|
// AtomicOption's destructor will free the server endpoint.
|
|
|
|
// FIXME(#3224): it should be like this
|
|
|
|
// let ~ArcData { data: user_data, _ } = data;
|
|
|
|
// user_data
|
|
|
|
data.data.take_unwrap()
|
|
|
|
} else {
|
|
|
|
// The *next* person who sees the refcount hit 0 will wake us.
|
2013-12-04 16:06:55 -08:00
|
|
|
let c2_and_data = ChannelAndDataGuard {
|
|
|
|
channel: Some(c2),
|
|
|
|
data: Some(data),
|
|
|
|
};
|
|
|
|
p1.recv();
|
|
|
|
// Got here. Back in the 'unkillable' without getting killed.
|
|
|
|
let (c2, data) = c2_and_data.unwrap();
|
|
|
|
c2.send(true);
|
|
|
|
// FIXME(#3224): it should be like this
|
|
|
|
// let ~ArcData { data: user_data, _ } = data;
|
|
|
|
// user_data
|
|
|
|
let mut data = data;
|
|
|
|
data.data.take_unwrap()
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
2013-11-21 16:55:40 -08:00
|
|
|
} else {
|
|
|
|
// If 'put' returns the server end back to us, we were rejected;
|
|
|
|
// someone else was trying to unwrap. Avoid guaranteed deadlock.
|
|
|
|
cast::forget(data);
|
|
|
|
fail!("Another task is already unwrapping this Arc!");
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
2013-11-24 11:44:28 -08:00
|
|
|
}
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
2013-07-02 14:13:52 -04:00
|
|
|
|
2013-10-11 23:20:34 +02:00
|
|
|
/// As unwrap above, but without blocking. Returns 'UnsafeArcSelf(self)' if this is
|
|
|
|
/// not the last reference; 'UnsafeArcT(unwrapped_data)' if so.
|
2013-10-28 16:56:24 -07:00
|
|
|
pub fn try_unwrap(mut self) -> UnsafeArcUnwrap<T> {
|
2013-07-23 17:17:34 -04:00
|
|
|
unsafe {
|
2013-08-27 22:17:58 +10:00
|
|
|
// The ~ dtor needs to run if this code succeeds.
|
2013-10-28 16:56:24 -07:00
|
|
|
let mut data: ~ArcData<T> = cast::transmute(self.data);
|
2013-07-23 17:17:34 -04:00
|
|
|
// This can of course race with anybody else who has a handle, but in
|
|
|
|
// such a case, the returned count will always be at least 2. If we
|
|
|
|
// see 1, no race was possible. All that matters is 1 or not-1.
|
|
|
|
let count = data.count.load(Acquire);
|
|
|
|
assert!(count >= 1);
|
|
|
|
// The more interesting race is one with an unwrapper. They may have
|
|
|
|
// already dropped their count -- but if so, the unwrapper pointer
|
|
|
|
// will have been set first, which the barriers ensure we will see.
|
|
|
|
// (Note: using is_empty(), not take(), to not free the unwrapper.)
|
|
|
|
if count == 1 && data.unwrapper.is_empty(Acquire) {
|
|
|
|
// Tell this handle's destructor not to run (we are now it).
|
2013-10-28 16:56:24 -07:00
|
|
|
self.data = ptr::mut_null();
|
2013-07-23 17:17:34 -04:00
|
|
|
// FIXME(#3224) as above
|
2013-10-11 23:20:34 +02:00
|
|
|
UnsafeArcT(data.data.take_unwrap())
|
2013-07-23 17:17:34 -04:00
|
|
|
} else {
|
|
|
|
cast::forget(data);
|
2013-10-28 16:56:24 -07:00
|
|
|
UnsafeArcSelf(self)
|
2013-07-23 17:17:34 -04:00
|
|
|
}
|
2013-07-02 14:13:52 -04:00
|
|
|
}
|
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-08-27 20:00:57 +10:00
|
|
|
impl<T: Send> Clone for UnsafeArc<T> {
|
|
|
|
fn clone(&self) -> UnsafeArc<T> {
|
2013-05-09 20:27:42 -07:00
|
|
|
unsafe {
|
2013-07-02 13:37:19 -04:00
|
|
|
// This barrier might be unnecessary, but I'm not sure...
|
2013-08-27 22:17:58 +10:00
|
|
|
let old_count = (*self.data).count.fetch_add(1, Acquire);
|
2013-07-02 13:37:19 -04:00
|
|
|
assert!(old_count >= 1);
|
2013-08-27 20:00:57 +10:00
|
|
|
return UnsafeArc { data: self.data };
|
2013-05-09 20:27:42 -07:00
|
|
|
}
|
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-05-09 20:27:42 -07:00
|
|
|
#[unsafe_destructor]
|
2013-08-27 20:00:57 +10:00
|
|
|
impl<T> Drop for UnsafeArc<T>{
|
2013-09-16 21:18:07 -04:00
|
|
|
fn drop(&mut self) {
|
2013-05-09 17:36:45 -07:00
|
|
|
unsafe {
|
2013-08-29 20:23:29 -04:00
|
|
|
// Happens when destructing an unwrapper's handle and from `#[unsafe_no_drop_flag]`
|
2013-07-02 13:37:19 -04:00
|
|
|
if self.data.is_null() {
|
2013-08-29 20:23:29 -04:00
|
|
|
return
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
2013-08-27 20:00:57 +10:00
|
|
|
let mut data: ~ArcData<T> = cast::transmute(self.data);
|
2013-08-07 20:26:15 -04:00
|
|
|
// Must be acquire+release, not just release, to make sure this
|
|
|
|
// doesn't get reordered to after the unwrapper pointer load.
|
|
|
|
let old_count = data.count.fetch_sub(1, SeqCst);
|
|
|
|
assert!(old_count >= 1);
|
|
|
|
if old_count == 1 {
|
|
|
|
// Were we really last, or should we hand off to an
|
|
|
|
// unwrapper? It's safe to not xchg because the unwrapper
|
|
|
|
// will set the unwrap lock *before* dropping his/her
|
|
|
|
// reference. In effect, being here means we're the only
|
|
|
|
// *awake* task with the data.
|
|
|
|
match data.unwrapper.take(Acquire) {
|
|
|
|
Some(~(message,response)) => {
|
2013-11-21 16:55:40 -08:00
|
|
|
// Send 'ready' and wait for a response.
|
|
|
|
message.send(());
|
|
|
|
// Unkillable wait. Message guaranteed to come.
|
|
|
|
if response.recv() {
|
|
|
|
// Other task got the data.
|
|
|
|
cast::forget(data);
|
|
|
|
} else {
|
|
|
|
// Other task was killed. drop glue takes over.
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
|
|
|
}
|
2013-08-07 20:26:15 -04:00
|
|
|
None => {
|
|
|
|
// drop glue takes over.
|
|
|
|
}
|
2013-05-09 20:27:42 -07:00
|
|
|
}
|
2013-08-07 20:26:15 -04:00
|
|
|
} else {
|
|
|
|
cast::forget(data);
|
2013-05-09 20:27:42 -07:00
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-09 20:27:42 -07:00
|
|
|
|
2013-05-09 17:36:45 -07:00
|
|
|
/****************************************************************************/
|
|
|
|
|
2013-12-05 17:34:37 -08:00
|
|
|
pub struct AtomicGuard {
|
|
|
|
on: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for AtomicGuard {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
use rt::task::{Task, GreenTask, SchedTask};
|
|
|
|
use rt::local::Local;
|
|
|
|
|
|
|
|
if self.on {
|
|
|
|
unsafe {
|
|
|
|
let task_opt: Option<*mut Task> = Local::try_unsafe_borrow();
|
|
|
|
match task_opt {
|
|
|
|
Some(t) => {
|
|
|
|
match (*t).task_type {
|
|
|
|
GreenTask(_) => (*t).death.allow_deschedule(),
|
|
|
|
SchedTask => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-30 21:38:44 -04:00
|
|
|
/**
|
2013-12-05 17:34:37 -08:00
|
|
|
* Enables a runtime assertion that no operation while the returned guard is
|
|
|
|
* live uses scheduler operations (deschedule, recv, spawn, etc). This is for
|
|
|
|
* use with pthread mutexes, which may block the entire scheduler thread,
|
|
|
|
* rather than just one task, and is hence prone to deadlocks if mixed with
|
|
|
|
* descheduling.
|
2013-07-30 21:38:44 -04:00
|
|
|
*
|
|
|
|
* NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
|
|
|
|
* synchronization whatsoever. It only makes sense to use for CPU-local issues.
|
|
|
|
*/
|
|
|
|
// FIXME(#8140) should not be pub
|
2013-12-05 17:34:37 -08:00
|
|
|
pub unsafe fn atomic() -> AtomicGuard {
|
2013-08-16 20:14:30 -07:00
|
|
|
use rt::task::{Task, GreenTask, SchedTask};
|
2013-07-30 21:38:44 -04:00
|
|
|
use rt::local::Local;
|
|
|
|
|
2013-08-08 11:38:10 -07:00
|
|
|
let task_opt: Option<*mut Task> = Local::try_unsafe_borrow();
|
|
|
|
match task_opt {
|
2013-08-16 20:14:30 -07:00
|
|
|
Some(t) => {
|
|
|
|
match (*t).task_type {
|
|
|
|
GreenTask(_) => {
|
2013-12-05 17:34:37 -08:00
|
|
|
(*t).death.inhibit_deschedule();
|
|
|
|
return AtomicGuard {
|
|
|
|
on: true,
|
|
|
|
};
|
2013-08-16 20:14:30 -07:00
|
|
|
}
|
2013-12-05 17:34:37 -08:00
|
|
|
SchedTask => {}
|
2013-08-16 20:14:30 -07:00
|
|
|
}
|
2013-07-30 21:38:44 -04:00
|
|
|
}
|
2013-12-05 17:34:37 -08:00
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
|
|
|
|
AtomicGuard {
|
|
|
|
on: false,
|
2013-07-30 21:38:44 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 21:15:34 -04:00
|
|
|
pub struct LittleLock {
|
2013-11-14 00:21:43 -08:00
|
|
|
priv l: Mutex,
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-11-25 17:55:41 -08:00
|
|
|
pub struct LittleGuard<'a> {
|
|
|
|
priv l: &'a mut Mutex,
|
|
|
|
}
|
|
|
|
|
2013-05-09 17:36:45 -07:00
|
|
|
impl Drop for LittleLock {
|
2013-09-16 21:18:07 -04:00
|
|
|
fn drop(&mut self) {
|
2013-11-25 17:55:41 -08:00
|
|
|
unsafe { self.l.destroy(); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[unsafe_destructor]
|
|
|
|
impl<'a> Drop for LittleGuard<'a> {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
unsafe { self.l.unlock(); }
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-22 13:57:40 -07:00
|
|
|
impl LittleLock {
|
|
|
|
pub fn new() -> LittleLock {
|
2013-11-25 17:55:41 -08:00
|
|
|
unsafe { LittleLock { l: Mutex::new() } }
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-11-25 17:55:41 -08:00
|
|
|
pub unsafe fn lock<'a>(&'a mut self) -> LittleGuard<'a> {
|
|
|
|
self.l.lock();
|
|
|
|
LittleGuard { l: &mut self.l }
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
2013-10-22 15:00:37 -07:00
|
|
|
|
2013-11-25 17:55:41 -08:00
|
|
|
pub unsafe fn try_lock<'a>(&'a mut self) -> Option<LittleGuard<'a>> {
|
|
|
|
if self.l.trylock() {
|
|
|
|
Some(LittleGuard { l: &mut self.l })
|
|
|
|
} else {
|
|
|
|
None
|
2013-11-11 13:33:43 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-25 17:55:41 -08:00
|
|
|
pub unsafe fn signal(&mut self) {
|
|
|
|
self.l.signal();
|
2013-10-22 15:00:37 -07:00
|
|
|
}
|
2013-11-25 17:55:41 -08:00
|
|
|
}
|
2013-10-22 15:00:37 -07:00
|
|
|
|
2013-11-25 17:55:41 -08:00
|
|
|
impl<'a> LittleGuard<'a> {
|
|
|
|
pub unsafe fn wait(&mut self) {
|
|
|
|
self.l.wait();
|
2013-10-22 15:00:37 -07:00
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct ExData<T> {
|
|
|
|
lock: LittleLock,
|
|
|
|
failed: bool,
|
|
|
|
data: T,
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* An arc over mutable data that is protected by a lock. For library use only.
|
2013-07-02 13:37:19 -04:00
|
|
|
*
|
|
|
|
* # Safety note
|
|
|
|
*
|
|
|
|
* This uses a pthread mutex, not one that's aware of the userspace scheduler.
|
2013-07-22 13:57:40 -07:00
|
|
|
* The user of an Exclusive must be careful not to invoke any functions that may
|
2013-07-02 13:37:19 -04:00
|
|
|
* reschedule the task while holding the lock, or deadlock may result. If you
|
2013-08-16 12:49:40 -07:00
|
|
|
* need to block or deschedule while accessing shared state, use extra::sync::RWArc.
|
2013-05-09 17:36:45 -07:00
|
|
|
*/
|
|
|
|
pub struct Exclusive<T> {
|
2013-10-20 06:03:09 +05:30
|
|
|
priv x: UnsafeArc<ExData<T>>
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-06-05 17:56:24 -07:00
|
|
|
impl<T:Send> Clone for Exclusive<T> {
|
2013-07-22 13:57:40 -07:00
|
|
|
// Duplicate an Exclusive Arc, as std::arc::clone.
|
2013-05-09 17:36:45 -07:00
|
|
|
fn clone(&self) -> Exclusive<T> {
|
2013-05-09 20:27:42 -07:00
|
|
|
Exclusive { x: self.x.clone() }
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 17:56:24 -07:00
|
|
|
impl<T:Send> Exclusive<T> {
|
2013-07-22 13:57:40 -07:00
|
|
|
pub fn new(user_data: T) -> Exclusive<T> {
|
|
|
|
let data = ExData {
|
|
|
|
lock: LittleLock::new(),
|
|
|
|
failed: false,
|
|
|
|
data: user_data
|
|
|
|
};
|
|
|
|
Exclusive {
|
2013-08-27 20:00:57 +10:00
|
|
|
x: UnsafeArc::new(data)
|
2013-07-22 13:57:40 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exactly like std::arc::MutexArc,access(), but with the LittleLock
|
2013-05-09 17:36:45 -07:00
|
|
|
// instead of a proper mutex. Same reason for being unsafe.
|
|
|
|
//
|
2013-08-16 12:49:40 -07:00
|
|
|
// Currently, scheduling operations (i.e., descheduling, receiving on a pipe,
|
2013-05-09 17:36:45 -07:00
|
|
|
// accessing the provided condition variable) are prohibited while inside
|
2013-07-22 13:57:40 -07:00
|
|
|
// the Exclusive. Supporting that is a work in progress.
|
2013-06-18 14:45:18 -07:00
|
|
|
#[inline]
|
2013-11-18 21:15:42 -08:00
|
|
|
pub unsafe fn with<U>(&self, f: |x: &mut T| -> U) -> U {
|
2013-05-09 20:27:42 -07:00
|
|
|
let rec = self.x.get();
|
2013-11-25 17:55:41 -08:00
|
|
|
let _l = (*rec).lock.lock();
|
|
|
|
if (*rec).failed {
|
|
|
|
fail!("Poisoned Exclusive::new - another task failed inside!");
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
2013-11-25 17:55:41 -08:00
|
|
|
(*rec).failed = true;
|
|
|
|
let result = f(&mut (*rec).data);
|
|
|
|
(*rec).failed = false;
|
|
|
|
result
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-06-18 14:45:18 -07:00
|
|
|
#[inline]
|
2013-11-18 21:15:42 -08:00
|
|
|
pub unsafe fn with_imm<U>(&self, f: |x: &T| -> U) -> U {
|
2013-12-01 10:18:47 -05:00
|
|
|
self.with(|x| f(x))
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-10-22 15:00:37 -07:00
|
|
|
#[inline]
|
2013-11-18 21:15:42 -08:00
|
|
|
pub unsafe fn hold_and_signal(&self, f: |x: &mut T|) {
|
2013-10-22 15:00:37 -07:00
|
|
|
let rec = self.x.get();
|
2013-11-25 17:55:41 -08:00
|
|
|
let _l = (*rec).lock.lock();
|
|
|
|
if (*rec).failed {
|
|
|
|
fail!("Poisoned Exclusive::new - another task failed inside!");
|
2013-10-22 15:00:37 -07:00
|
|
|
}
|
2013-11-25 17:55:41 -08:00
|
|
|
(*rec).failed = true;
|
|
|
|
f(&mut (*rec).data);
|
|
|
|
(*rec).failed = false;
|
|
|
|
(*rec).lock.signal();
|
2013-10-22 15:00:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2013-11-18 21:15:42 -08:00
|
|
|
pub unsafe fn hold_and_wait(&self, f: |x: &T| -> bool) {
|
2013-10-22 15:00:37 -07:00
|
|
|
let rec = self.x.get();
|
2013-11-25 17:55:41 -08:00
|
|
|
let mut l = (*rec).lock.lock();
|
|
|
|
if (*rec).failed {
|
|
|
|
fail!("Poisoned Exclusive::new - another task failed inside!");
|
|
|
|
}
|
|
|
|
(*rec).failed = true;
|
|
|
|
let result = f(&(*rec).data);
|
|
|
|
(*rec).failed = false;
|
|
|
|
if result {
|
|
|
|
l.wait();
|
2013-10-22 15:00:37 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 13:37:19 -04:00
|
|
|
pub fn unwrap(self) -> T {
|
|
|
|
let Exclusive { x: x } = self;
|
2013-07-22 13:57:40 -07:00
|
|
|
// Someday we might need to unkillably unwrap an Exclusive, but not today.
|
2013-07-23 17:17:34 -04:00
|
|
|
let inner = x.unwrap();
|
2013-11-28 12:22:53 -08:00
|
|
|
let ExData { data: user_data, .. } = inner; // will destroy the LittleLock
|
2013-07-02 13:37:19 -04:00
|
|
|
user_data
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use comm;
|
2013-07-02 13:37:19 -04:00
|
|
|
use option::*;
|
2013-08-01 18:35:46 -04:00
|
|
|
use prelude::*;
|
2013-12-05 17:34:37 -08:00
|
|
|
use super::{Exclusive, UnsafeArc, atomic};
|
2013-05-09 17:36:45 -07:00
|
|
|
use task;
|
2013-10-16 18:34:01 -07:00
|
|
|
use mem::size_of;
|
2013-08-29 20:23:29 -04:00
|
|
|
|
2013-10-07 21:58:33 -04:00
|
|
|
//#[unsafe_no_drop_flag] FIXME: #9758
|
|
|
|
#[ignore]
|
2013-08-29 20:23:29 -04:00
|
|
|
#[test]
|
|
|
|
fn test_size() {
|
|
|
|
assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
|
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-07-30 21:38:44 -04:00
|
|
|
#[test]
|
2013-12-05 17:34:37 -08:00
|
|
|
fn test_atomic() {
|
2013-07-30 21:38:44 -04:00
|
|
|
// NB. The whole runtime will abort on an 'atomic-sleep' violation,
|
|
|
|
// so we can't really test for the converse behaviour.
|
2013-12-05 17:34:37 -08:00
|
|
|
unsafe { let _ = atomic(); } // oughtn't fail
|
2013-07-30 21:38:44 -04:00
|
|
|
}
|
|
|
|
|
2013-05-09 17:36:45 -07:00
|
|
|
#[test]
|
2013-07-22 13:57:40 -07:00
|
|
|
fn exclusive_new_arc() {
|
2013-05-24 19:35:29 -07:00
|
|
|
unsafe {
|
|
|
|
let mut futures = ~[];
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-05-24 19:35:29 -07:00
|
|
|
let num_tasks = 10;
|
|
|
|
let count = 10;
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-07-22 13:57:40 -07:00
|
|
|
let total = Exclusive::new(~0);
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-08-03 12:45:23 -04:00
|
|
|
for _ in range(0u, num_tasks) {
|
2013-05-24 19:35:29 -07:00
|
|
|
let total = total.clone();
|
|
|
|
let (port, chan) = comm::stream();
|
|
|
|
futures.push(port);
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-05-24 19:35:29 -07:00
|
|
|
do task::spawn || {
|
2013-08-03 12:45:23 -04:00
|
|
|
for _ in range(0u, count) {
|
2013-11-20 14:17:12 -08:00
|
|
|
total.with(|count| **count += 1);
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
2013-05-24 19:35:29 -07:00
|
|
|
chan.send(());
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
2013-05-24 19:35:29 -07:00
|
|
|
};
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-08-03 12:45:23 -04:00
|
|
|
for f in futures.iter() { f.recv() }
|
2013-05-09 17:36:45 -07:00
|
|
|
|
2013-11-20 14:17:12 -08:00
|
|
|
total.with(|total| assert!(**total == num_tasks * count));
|
2013-05-24 19:35:29 -07:00
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
|
2013-08-19 15:40:37 -07:00
|
|
|
#[test] #[should_fail]
|
2013-07-22 13:57:40 -07:00
|
|
|
fn exclusive_new_poison() {
|
2013-05-24 19:35:29 -07:00
|
|
|
unsafe {
|
2013-07-22 13:57:40 -07:00
|
|
|
// Tests that if one task fails inside of an Exclusive::new, subsequent
|
2013-05-24 19:35:29 -07:00
|
|
|
// accesses will also fail.
|
2013-07-22 13:57:40 -07:00
|
|
|
let x = Exclusive::new(1);
|
2013-05-24 19:35:29 -07:00
|
|
|
let x2 = x.clone();
|
|
|
|
do task::try || {
|
2013-11-20 14:17:12 -08:00
|
|
|
x2.with(|one| assert_eq!(*one, 2))
|
2013-05-24 19:35:29 -07:00
|
|
|
};
|
2013-11-20 14:17:12 -08:00
|
|
|
x.with(|one| assert_eq!(*one, 1));
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|
|
|
|
}
|
2013-07-02 13:37:19 -04:00
|
|
|
|
2013-07-19 20:45:54 -04:00
|
|
|
#[test]
|
|
|
|
fn arclike_newN() {
|
|
|
|
// Tests that the many-refcounts-at-once constructors don't leak.
|
2013-08-27 20:00:57 +10:00
|
|
|
let _ = UnsafeArc::new2(~~"hello");
|
|
|
|
let x = UnsafeArc::newN(~~"hello", 0);
|
2013-07-19 20:45:54 -04:00
|
|
|
assert_eq!(x.len(), 0)
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::newN(~~"hello", 1);
|
2013-07-19 20:45:54 -04:00
|
|
|
assert_eq!(x.len(), 1)
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::newN(~~"hello", 10);
|
2013-07-19 20:45:54 -04:00
|
|
|
assert_eq!(x.len(), 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn arclike_cloneN() {
|
|
|
|
// Tests that the many-refcounts-at-once special-clone doesn't leak.
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::new(~~"hello");
|
2013-07-19 20:45:54 -04:00
|
|
|
let x = x.cloneN(0);
|
|
|
|
assert_eq!(x.len(), 0);
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::new(~~"hello");
|
2013-07-19 20:45:54 -04:00
|
|
|
let x = x.cloneN(1);
|
|
|
|
assert_eq!(x.len(), 1);
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::new(~~"hello");
|
2013-07-19 20:45:54 -04:00
|
|
|
let x = x.cloneN(10);
|
|
|
|
assert_eq!(x.len(), 10);
|
|
|
|
}
|
|
|
|
|
2013-07-02 13:37:19 -04:00
|
|
|
#[test]
|
2013-07-02 14:13:52 -04:00
|
|
|
fn arclike_unwrap_basic() {
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::new(~~"hello");
|
2013-07-23 17:17:34 -04:00
|
|
|
assert!(x.unwrap() == ~~"hello");
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
|
|
|
|
2013-07-02 14:13:52 -04:00
|
|
|
#[test]
|
|
|
|
fn arclike_try_unwrap() {
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::new(~~"hello");
|
2013-10-11 23:20:34 +02:00
|
|
|
assert!(x.try_unwrap().expect_t("try_unwrap failed") == ~~"hello");
|
2013-07-02 14:13:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn arclike_try_unwrap_fail() {
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::new(~~"hello");
|
2013-07-23 17:17:34 -04:00
|
|
|
let x2 = x.clone();
|
|
|
|
let left_x = x.try_unwrap();
|
2013-10-11 23:20:34 +02:00
|
|
|
assert!(left_x.is_self());
|
2013-12-02 22:37:26 -08:00
|
|
|
drop(left_x);
|
2013-10-11 23:20:34 +02:00
|
|
|
assert!(x2.try_unwrap().expect_t("try_unwrap none") == ~~"hello");
|
2013-07-02 14:13:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn arclike_try_unwrap_unwrap_race() {
|
|
|
|
// When an unwrap and a try_unwrap race, the unwrapper should always win.
|
2013-08-27 20:00:57 +10:00
|
|
|
let x = UnsafeArc::new(~~"hello");
|
2013-11-20 14:17:12 -08:00
|
|
|
let x2 = x.clone();
|
2013-07-23 17:17:34 -04:00
|
|
|
let (p,c) = comm::stream();
|
|
|
|
do task::spawn {
|
|
|
|
c.send(());
|
2013-11-20 14:17:12 -08:00
|
|
|
assert!(x2.unwrap() == ~~"hello");
|
2013-07-23 17:17:34 -04:00
|
|
|
c.send(());
|
2013-07-02 14:13:52 -04:00
|
|
|
}
|
2013-07-23 17:17:34 -04:00
|
|
|
p.recv();
|
2013-08-16 12:49:40 -07:00
|
|
|
task::deschedule(); // Try to make the unwrapper get blocked first.
|
2013-07-23 17:17:34 -04:00
|
|
|
let left_x = x.try_unwrap();
|
2013-10-11 23:20:34 +02:00
|
|
|
assert!(left_x.is_self());
|
2013-12-02 22:37:26 -08:00
|
|
|
drop(left_x);
|
2013-07-23 17:17:34 -04:00
|
|
|
p.recv();
|
2013-07-02 14:13:52 -04:00
|
|
|
}
|
|
|
|
|
2013-07-02 13:37:19 -04:00
|
|
|
#[test]
|
2013-07-22 13:57:40 -07:00
|
|
|
fn exclusive_new_unwrap_basic() {
|
2013-07-02 13:37:19 -04:00
|
|
|
// Unlike the above, also tests no double-freeing of the LittleLock.
|
2013-07-22 13:57:40 -07:00
|
|
|
let x = Exclusive::new(~~"hello");
|
2013-07-02 13:37:19 -04:00
|
|
|
assert!(x.unwrap() == ~~"hello");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2013-07-22 13:57:40 -07:00
|
|
|
fn exclusive_new_unwrap_contended() {
|
|
|
|
let x = Exclusive::new(~~"hello");
|
2013-11-20 14:17:12 -08:00
|
|
|
let x2 = x.clone();
|
2013-07-02 13:37:19 -04:00
|
|
|
do task::spawn {
|
2013-11-20 14:17:12 -08:00
|
|
|
unsafe { x2.with(|_hello| ()); }
|
2013-08-16 12:49:40 -07:00
|
|
|
task::deschedule();
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
|
|
|
assert!(x.unwrap() == ~~"hello");
|
|
|
|
|
|
|
|
// Now try the same thing, but with the child task blocking.
|
2013-07-22 13:57:40 -07:00
|
|
|
let x = Exclusive::new(~~"hello");
|
2013-11-20 14:17:12 -08:00
|
|
|
let x2 = x.clone();
|
2013-07-02 13:37:19 -04:00
|
|
|
let mut builder = task::task();
|
2013-10-18 10:38:46 +02:00
|
|
|
let res = builder.future_result();
|
2013-07-02 13:37:19 -04:00
|
|
|
do builder.spawn {
|
|
|
|
assert!(x2.unwrap() == ~~"hello");
|
|
|
|
}
|
|
|
|
// Have to get rid of our reference before blocking.
|
2013-12-02 22:37:26 -08:00
|
|
|
drop(x);
|
2013-10-18 10:38:46 +02:00
|
|
|
res.recv();
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
|
|
|
|
2013-08-19 15:40:37 -07:00
|
|
|
#[test] #[should_fail]
|
2013-07-22 13:57:40 -07:00
|
|
|
fn exclusive_new_unwrap_conflict() {
|
|
|
|
let x = Exclusive::new(~~"hello");
|
2013-11-20 14:17:12 -08:00
|
|
|
let x2 = x.clone();
|
2013-07-02 13:37:19 -04:00
|
|
|
let mut builder = task::task();
|
2013-10-18 10:38:46 +02:00
|
|
|
let res = builder.future_result();
|
2013-07-02 13:37:19 -04:00
|
|
|
do builder.spawn {
|
|
|
|
assert!(x2.unwrap() == ~~"hello");
|
|
|
|
}
|
|
|
|
assert!(x.unwrap() == ~~"hello");
|
2013-10-11 23:20:34 +02:00
|
|
|
assert!(res.recv().is_ok());
|
2013-07-02 13:37:19 -04:00
|
|
|
}
|
2013-05-09 17:36:45 -07:00
|
|
|
}
|