2014-01-28 20:05:57 -06:00
|
|
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 18:48:01 -06:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2013-05-20 05:07:14 -05:00
|
|
|
/*!
|
2012-08-10 17:20:03 -05:00
|
|
|
* Concurrency-enabled mechanisms for sharing mutable and/or immutable state
|
|
|
|
* between tasks.
|
2013-05-20 05:07:14 -05:00
|
|
|
*
|
|
|
|
* # Example
|
|
|
|
*
|
|
|
|
* In this example, a large vector of floats is shared between several tasks.
|
2013-07-22 15:57:40 -05:00
|
|
|
* With simple pipes, without Arc, a copy would have to be made for each task.
|
2013-05-20 05:07:14 -05:00
|
|
|
*
|
2013-09-23 19:20:36 -05:00
|
|
|
* ```rust
|
2014-03-01 18:33:24 -06:00
|
|
|
* extern crate sync;
|
|
|
|
* extern crate rand;
|
2014-03-09 16:58:32 -05:00
|
|
|
*
|
2014-03-01 18:33:24 -06:00
|
|
|
* use std::vec;
|
2014-03-09 16:58:32 -05:00
|
|
|
* use sync::Arc;
|
2013-05-20 05:07:14 -05:00
|
|
|
*
|
2014-03-01 18:33:24 -06:00
|
|
|
* fn main() {
|
|
|
|
* let numbers = vec::from_fn(100, |i| (i as f32) * rand::random());
|
|
|
|
* let shared_numbers = Arc::new(numbers);
|
2013-12-22 15:31:37 -06:00
|
|
|
*
|
2014-03-01 18:33:24 -06:00
|
|
|
* for _ in range(0, 10) {
|
2014-03-09 16:58:32 -05:00
|
|
|
* let (tx, rx) = channel();
|
|
|
|
* tx.send(shared_numbers.clone());
|
2013-05-20 05:07:14 -05:00
|
|
|
*
|
2014-03-01 18:33:24 -06:00
|
|
|
* spawn(proc() {
|
2014-03-09 16:58:32 -05:00
|
|
|
* let shared_numbers = rx.recv();
|
2014-03-01 18:33:24 -06:00
|
|
|
* let local_numbers = shared_numbers.get();
|
2013-05-20 05:07:14 -05:00
|
|
|
*
|
2014-03-01 18:33:24 -06:00
|
|
|
* // Work with the local numbers
|
|
|
|
* });
|
|
|
|
* }
|
|
|
|
* }
|
2013-09-23 19:20:36 -05:00
|
|
|
* ```
|
2012-08-10 17:20:03 -05:00
|
|
|
*/
|
|
|
|
|
2014-01-30 14:04:47 -06:00
|
|
|
#[allow(missing_doc, dead_code)];
|
2013-05-28 22:11:41 -05:00
|
|
|
|
2013-05-17 17:28:44 -05:00
|
|
|
|
2012-12-23 16:41:37 -06:00
|
|
|
use sync;
|
2013-07-22 15:57:40 -05:00
|
|
|
use sync::{Mutex, RWLock};
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2013-06-28 17:32:26 -05:00
|
|
|
use std::cast;
|
2014-01-22 13:03:02 -06:00
|
|
|
use std::kinds::marker;
|
2013-12-12 19:27:37 -06:00
|
|
|
use std::sync::arc::UnsafeArc;
|
2013-06-28 17:32:26 -05:00
|
|
|
use std::task;
|
2012-08-13 18:45:17 -05:00
|
|
|
|
2014-02-27 01:41:55 -06:00
|
|
|
/// As sync::condvar, a mechanism for unlock-and-descheduling and
|
|
|
|
/// signaling, for use with the Arc types.
|
|
|
|
pub struct ArcCondvar<'a> {
|
2013-07-02 12:37:19 -05:00
|
|
|
priv is_mutex: bool,
|
2013-12-21 22:10:45 -06:00
|
|
|
priv failed: &'a bool,
|
2013-12-10 01:16:18 -06:00
|
|
|
priv cond: &'a sync::Condvar<'a>
|
2013-02-26 13:34:00 -06:00
|
|
|
}
|
2012-08-13 18:45:17 -05:00
|
|
|
|
2014-02-27 01:41:55 -06:00
|
|
|
impl<'a> ArcCondvar<'a> {
|
2013-07-22 15:57:40 -05:00
|
|
|
/// Atomically exit the associated Arc and block until a signal is sent.
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn wait(&self) { self.wait_on(0) }
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-14 22:35:12 -05:00
|
|
|
/**
|
2013-07-22 15:57:40 -05:00
|
|
|
* Atomically exit the associated Arc and block on a specified condvar
|
2012-08-14 22:35:12 -05:00
|
|
|
* until a signal is sent on that same condvar (as sync::cond.wait_on).
|
|
|
|
*
|
|
|
|
* wait() is equivalent to wait_on(0).
|
|
|
|
*/
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn wait_on(&self, condvar_id: uint) {
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(!*self.failed);
|
2012-08-14 22:35:12 -05:00
|
|
|
self.cond.wait_on(condvar_id);
|
2012-08-13 18:45:17 -05:00
|
|
|
// This is why we need to wrap sync::condvar.
|
|
|
|
check_poison(self.is_mutex, *self.failed);
|
|
|
|
}
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-13 18:45:17 -05:00
|
|
|
/// Wake up a blocked task. Returns false if there was no blocked task.
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn signal(&self) -> bool { self.signal_on(0) }
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-14 22:35:12 -05:00
|
|
|
/**
|
|
|
|
* Wake up a blocked task on a specified condvar (as
|
|
|
|
* sync::cond.signal_on). Returns false if there was no blocked task.
|
|
|
|
*/
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn signal_on(&self, condvar_id: uint) -> bool {
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(!*self.failed);
|
2012-08-14 22:35:12 -05:00
|
|
|
self.cond.signal_on(condvar_id)
|
2012-08-13 18:45:17 -05:00
|
|
|
}
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-13 18:45:17 -05:00
|
|
|
/// Wake up all blocked tasks. Returns the number of tasks woken.
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-14 22:35:12 -05:00
|
|
|
/**
|
|
|
|
* Wake up all blocked tasks on a specified condvar (as
|
2013-06-06 02:38:41 -05:00
|
|
|
* sync::cond.broadcast_on). Returns the number of tasks woken.
|
2012-08-14 22:35:12 -05:00
|
|
|
*/
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn broadcast_on(&self, condvar_id: uint) -> uint {
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(!*self.failed);
|
2012-08-14 22:35:12 -05:00
|
|
|
self.cond.broadcast_on(condvar_id)
|
2012-08-13 18:45:17 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-10 17:20:03 -05:00
|
|
|
|
|
|
|
/****************************************************************************
|
2013-07-22 15:57:40 -05:00
|
|
|
* Immutable Arc
|
2012-08-10 17:20:03 -05:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/// An atomically reference counted wrapper for shared immutable state.
|
2013-08-27 05:00:57 -05:00
|
|
|
pub struct Arc<T> { priv x: UnsafeArc<T> }
|
2012-08-10 17:20:03 -05:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Access the underlying data in an atomically reference counted
|
|
|
|
* wrapper.
|
|
|
|
*/
|
2013-07-22 15:57:40 -05:00
|
|
|
impl<T:Freeze+Send> Arc<T> {
|
|
|
|
/// Create an atomically reference counted wrapper.
|
2013-10-24 15:50:21 -05:00
|
|
|
#[inline]
|
2013-07-22 15:57:40 -05:00
|
|
|
pub fn new(data: T) -> Arc<T> {
|
2013-08-27 05:00:57 -05:00
|
|
|
Arc { x: UnsafeArc::new(data) }
|
2013-07-22 15:57:40 -05:00
|
|
|
}
|
|
|
|
|
2013-10-24 15:50:21 -05:00
|
|
|
#[inline]
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn get<'a>(&'a self) -> &'a T {
|
2013-05-20 05:07:14 -05:00
|
|
|
unsafe { &*self.x.get_immut() }
|
|
|
|
}
|
|
|
|
}
|
2013-05-24 15:54:58 -05:00
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
impl<T:Freeze + Send> Clone for Arc<T> {
|
2013-08-01 17:02:03 -05:00
|
|
|
/**
|
|
|
|
* Duplicate an atomically reference counted wrapper.
|
|
|
|
*
|
|
|
|
* The resulting two `arc` objects will point to the same underlying data
|
|
|
|
* object. However, one of the `arc` objects can be sent to another task,
|
|
|
|
* allowing them to share the underlying data.
|
|
|
|
*/
|
2013-10-24 15:21:49 -05:00
|
|
|
#[inline]
|
2013-07-22 15:57:40 -05:00
|
|
|
fn clone(&self) -> Arc<T> {
|
|
|
|
Arc { x: self.x.clone() }
|
2012-11-26 18:12:47 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-10 17:20:03 -05:00
|
|
|
/****************************************************************************
|
2013-07-22 15:57:40 -05:00
|
|
|
* Mutex protected Arc (unsafe)
|
2012-08-10 17:20:03 -05:00
|
|
|
****************************************************************************/
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2014-01-24 13:02:03 -06:00
|
|
|
struct MutexArcInner<T> { lock: Mutex, failed: bool, data: T }
|
2013-09-03 18:07:11 -05:00
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
/// An Arc with mutable data protected by a blocking mutex.
|
2014-01-22 13:03:02 -06:00
|
|
|
pub struct MutexArc<T> {
|
|
|
|
priv x: UnsafeArc<MutexArcInner<T>>,
|
|
|
|
priv marker: marker::NoFreeze,
|
|
|
|
}
|
2012-08-10 19:46:19 -05:00
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
impl<T:Send> Clone for MutexArc<T> {
|
2013-08-01 17:02:03 -05:00
|
|
|
/// Duplicate a mutex-protected Arc. See arc::clone for more details.
|
2013-10-24 15:21:49 -05:00
|
|
|
#[inline]
|
2013-07-22 15:57:40 -05:00
|
|
|
fn clone(&self) -> MutexArc<T> {
|
2012-08-10 19:46:19 -05:00
|
|
|
// NB: Cloning the underlying mutex is not necessary. Its reference
|
|
|
|
// count would be exactly the same as the shared state's.
|
2014-01-22 13:03:02 -06:00
|
|
|
MutexArc { x: self.x.clone(),
|
|
|
|
marker: marker::NoFreeze, }
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2012-11-26 18:12:47 -06:00
|
|
|
}
|
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
impl<T:Send> MutexArc<T> {
|
|
|
|
/// Create a mutex-protected Arc with the supplied data.
|
|
|
|
pub fn new(user_data: T) -> MutexArc<T> {
|
|
|
|
MutexArc::new_with_condvars(user_data, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a mutex-protected Arc with the supplied data and a specified number
|
|
|
|
* of condvars (as sync::Mutex::new_with_condvars).
|
|
|
|
*/
|
2013-07-22 16:43:30 -05:00
|
|
|
pub fn new_with_condvars(user_data: T, num_condvars: uint) -> MutexArc<T> {
|
|
|
|
let data = MutexArcInner {
|
|
|
|
lock: Mutex::new_with_condvars(num_condvars),
|
|
|
|
failed: false, data: user_data
|
|
|
|
};
|
2014-01-22 13:03:02 -06:00
|
|
|
MutexArc { x: UnsafeArc::new(data),
|
|
|
|
marker: marker::NoFreeze, }
|
2013-07-22 15:57:40 -05:00
|
|
|
}
|
2012-08-10 19:46:19 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Access the underlying mutable data with mutual exclusion from other
|
|
|
|
* tasks. The argument closure will be run with the mutex locked; all
|
|
|
|
* other tasks wishing to access the data will block until the closure
|
|
|
|
* finishes running.
|
|
|
|
*
|
2013-09-04 02:14:56 -05:00
|
|
|
* If you wish to nest MutexArcs, one strategy for ensuring safety at
|
2012-08-10 19:46:19 -05:00
|
|
|
* runtime is to add a "nesting level counter" inside the stored data, and
|
|
|
|
* when traversing the arcs, assert that they monotonically decrease.
|
|
|
|
*
|
|
|
|
* # Failure
|
|
|
|
*
|
2013-07-22 15:57:40 -05:00
|
|
|
* Failing while inside the Arc will unlock the Arc while unwinding, so
|
|
|
|
* that other tasks won't block forever. It will also poison the Arc:
|
2012-08-10 19:46:19 -05:00
|
|
|
* any tasks that subsequently try to access it (including those already
|
|
|
|
* blocked on the mutex) will also fail immediately.
|
|
|
|
*/
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2014-02-16 22:23:05 -06:00
|
|
|
pub fn access<U>(&self, blk: |x: &mut T| -> U) -> U {
|
2013-09-03 19:24:04 -05:00
|
|
|
let state = self.x.get();
|
2014-02-16 22:23:05 -06:00
|
|
|
unsafe {
|
|
|
|
// Borrowck would complain about this if the code were
|
|
|
|
// not already unsafe. See borrow_rwlock, far below.
|
|
|
|
(&(*state).lock).lock(|| {
|
|
|
|
check_poison(true, (*state).failed);
|
|
|
|
let _z = PoisonOnFail::new(&mut (*state).failed);
|
|
|
|
blk(&mut (*state).data)
|
|
|
|
})
|
|
|
|
}
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2014-02-16 22:23:05 -06:00
|
|
|
/// As access(), but with a condvar, as sync::mutex.lock_cond().
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2014-02-27 01:41:55 -06:00
|
|
|
pub fn access_cond<U>(&self, blk: |x: &mut T, c: &ArcCondvar| -> U) -> U {
|
2013-09-03 19:24:04 -05:00
|
|
|
let state = self.x.get();
|
2014-02-16 22:23:05 -06:00
|
|
|
unsafe {
|
|
|
|
(&(*state).lock).lock_cond(|cond| {
|
|
|
|
check_poison(true, (*state).failed);
|
|
|
|
let _z = PoisonOnFail::new(&mut (*state).failed);
|
|
|
|
blk(&mut (*state).data,
|
2014-02-27 01:41:55 -06:00
|
|
|
&ArcCondvar {is_mutex: true,
|
2014-02-16 22:23:05 -06:00
|
|
|
failed: &(*state).failed,
|
|
|
|
cond: cond })
|
|
|
|
})
|
|
|
|
}
|
2013-09-03 18:07:11 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-10 19:46:19 -05:00
|
|
|
// Common code for {mutex.access,rwlock.write}{,_cond}.
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-10 19:46:19 -05:00
|
|
|
fn check_poison(is_mutex: bool, failed: bool) {
|
|
|
|
if failed {
|
|
|
|
if is_mutex {
|
2013-10-21 15:08:31 -05:00
|
|
|
fail!("Poisoned MutexArc - another task failed inside!");
|
2012-08-10 19:46:19 -05:00
|
|
|
} else {
|
2013-10-21 15:08:31 -05:00
|
|
|
fail!("Poisoned rw_arc - another task failed inside!");
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-26 20:28:36 -05:00
|
|
|
struct PoisonOnFail {
|
2013-12-21 21:53:43 -06:00
|
|
|
flag: *mut bool,
|
|
|
|
failed: bool,
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
|
|
|
|
2013-02-14 13:47:00 -06:00
|
|
|
impl Drop for PoisonOnFail {
|
2013-09-16 20:18:07 -05:00
|
|
|
fn drop(&mut self) {
|
2013-01-23 20:15:06 -06:00
|
|
|
unsafe {
|
2013-03-28 20:39:09 -05:00
|
|
|
/* assert!(!*self.failed);
|
2013-01-23 20:15:06 -06:00
|
|
|
-- might be false in case of cond.wait() */
|
2013-12-21 21:53:43 -06:00
|
|
|
if !self.failed && task::failing() {
|
|
|
|
*self.flag = true;
|
2013-01-23 20:15:06 -06:00
|
|
|
}
|
|
|
|
}
|
2012-08-13 18:45:17 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-21 21:53:43 -06:00
|
|
|
impl PoisonOnFail {
|
|
|
|
fn new<'a>(flag: &'a mut bool) -> PoisonOnFail {
|
|
|
|
PoisonOnFail {
|
|
|
|
flag: flag,
|
|
|
|
failed: task::failing()
|
|
|
|
}
|
2012-09-04 19:22:09 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-10 17:20:03 -05:00
|
|
|
/****************************************************************************
|
2013-07-22 15:57:40 -05:00
|
|
|
* R/W lock protected Arc
|
2012-08-10 17:20:03 -05:00
|
|
|
****************************************************************************/
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2014-01-24 13:02:03 -06:00
|
|
|
struct RWArcInner<T> { lock: RWLock, failed: bool, data: T }
|
2012-08-10 19:46:19 -05:00
|
|
|
/**
|
2013-07-22 15:57:40 -05:00
|
|
|
* A dual-mode Arc protected by a reader-writer lock. The data can be accessed
|
2012-08-10 19:46:19 -05:00
|
|
|
* mutably or immutably, and immutably-accessing tasks may run concurrently.
|
|
|
|
*
|
|
|
|
* Unlike mutex_arcs, rw_arcs are safe, because they cannot be nested.
|
|
|
|
*/
|
2013-08-07 02:11:34 -05:00
|
|
|
pub struct RWArc<T> {
|
2013-08-27 05:00:57 -05:00
|
|
|
priv x: UnsafeArc<RWArcInner<T>>,
|
2014-01-22 13:03:02 -06:00
|
|
|
priv marker: marker::NoFreeze,
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
|
2013-08-01 17:02:03 -05:00
|
|
|
impl<T:Freeze + Send> Clone for RWArc<T> {
|
|
|
|
/// Duplicate a rwlock-protected Arc. See arc::clone for more details.
|
2013-10-24 15:21:49 -05:00
|
|
|
#[inline]
|
2013-08-01 17:02:03 -05:00
|
|
|
fn clone(&self) -> RWArc<T> {
|
2014-01-22 13:03:02 -06:00
|
|
|
RWArc { x: self.x.clone(),
|
|
|
|
marker: marker::NoFreeze, }
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
|
2012-11-26 18:12:47 -06:00
|
|
|
}
|
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
impl<T:Freeze + Send> RWArc<T> {
|
|
|
|
/// Create a reader/writer Arc with the supplied data.
|
|
|
|
pub fn new(user_data: T) -> RWArc<T> {
|
|
|
|
RWArc::new_with_condvars(user_data, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a reader/writer Arc with the supplied data and a specified number
|
|
|
|
* of condvars (as sync::RWLock::new_with_condvars).
|
|
|
|
*/
|
|
|
|
pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWArc<T> {
|
2013-07-22 16:43:30 -05:00
|
|
|
let data = RWArcInner {
|
|
|
|
lock: RWLock::new_with_condvars(num_condvars),
|
|
|
|
failed: false, data: user_data
|
|
|
|
};
|
2014-01-22 13:03:02 -06:00
|
|
|
RWArc { x: UnsafeArc::new(data),
|
|
|
|
marker: marker::NoFreeze, }
|
2013-07-22 15:57:40 -05:00
|
|
|
}
|
|
|
|
|
2012-08-10 19:46:19 -05:00
|
|
|
/**
|
|
|
|
* Access the underlying data mutably. Locks the rwlock in write mode;
|
|
|
|
* other readers and writers will block.
|
|
|
|
*
|
|
|
|
* # Failure
|
|
|
|
*
|
2013-07-22 15:57:40 -05:00
|
|
|
* Failing while inside the Arc will unlock the Arc while unwinding, so
|
|
|
|
* that other tasks won't block forever. As MutexArc.access, it will also
|
|
|
|
* poison the Arc, so subsequent readers and writers will both also fail.
|
2012-08-10 19:46:19 -05:00
|
|
|
*/
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-11-18 23:54:13 -06:00
|
|
|
pub fn write<U>(&self, blk: |x: &mut T| -> U) -> U {
|
2013-02-09 00:21:45 -06:00
|
|
|
unsafe {
|
2013-05-09 22:27:42 -05:00
|
|
|
let state = self.x.get();
|
2013-11-20 17:46:49 -06:00
|
|
|
(*borrow_rwlock(state)).write(|| {
|
2013-02-09 00:21:45 -06:00
|
|
|
check_poison(false, (*state).failed);
|
2013-12-21 21:53:43 -06:00
|
|
|
let _z = PoisonOnFail::new(&mut (*state).failed);
|
2013-02-09 00:21:45 -06:00
|
|
|
blk(&mut (*state).data)
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
}
|
2013-05-31 17:17:22 -05:00
|
|
|
|
2012-08-10 19:46:19 -05:00
|
|
|
/// As write(), but with a condvar, as sync::rwlock.write_cond().
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-10-29 05:14:59 -05:00
|
|
|
pub fn write_cond<U>(&self,
|
2014-02-27 01:41:55 -06:00
|
|
|
blk: |x: &mut T, c: &ArcCondvar| -> U)
|
2013-10-29 05:14:59 -05:00
|
|
|
-> U {
|
2013-02-09 00:21:45 -06:00
|
|
|
unsafe {
|
2013-05-09 22:27:42 -05:00
|
|
|
let state = self.x.get();
|
2013-11-20 17:46:49 -06:00
|
|
|
(*borrow_rwlock(state)).write_cond(|cond| {
|
2013-02-09 00:21:45 -06:00
|
|
|
check_poison(false, (*state).failed);
|
2013-12-21 21:53:43 -06:00
|
|
|
let _z = PoisonOnFail::new(&mut (*state).failed);
|
2013-02-09 00:21:45 -06:00
|
|
|
blk(&mut (*state).data,
|
2014-02-27 01:41:55 -06:00
|
|
|
&ArcCondvar {is_mutex: false,
|
2013-12-21 22:10:45 -06:00
|
|
|
failed: &(*state).failed,
|
2013-02-09 00:21:45 -06:00
|
|
|
cond: cond})
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
}
|
2013-05-31 17:17:22 -05:00
|
|
|
|
2012-08-10 19:46:19 -05:00
|
|
|
/**
|
|
|
|
* Access the underlying data immutably. May run concurrently with other
|
|
|
|
* reading tasks.
|
|
|
|
*
|
|
|
|
* # Failure
|
|
|
|
*
|
2013-07-22 15:57:40 -05:00
|
|
|
* Failing will unlock the Arc while unwinding. However, unlike all other
|
|
|
|
* access modes, this will not poison the Arc.
|
2012-08-10 19:46:19 -05:00
|
|
|
*/
|
2013-11-18 23:54:13 -06:00
|
|
|
pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
|
2013-05-09 22:27:42 -05:00
|
|
|
unsafe {
|
2013-05-23 21:12:16 -05:00
|
|
|
let state = self.x.get();
|
2013-11-20 17:46:49 -06:00
|
|
|
(*state).lock.read(|| {
|
2013-05-09 22:27:42 -05:00
|
|
|
check_poison(false, (*state).failed);
|
|
|
|
blk(&(*state).data)
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-14 12:32:41 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* As write(), but with the ability to atomically 'downgrade' the lock.
|
2012-08-26 20:28:36 -05:00
|
|
|
* See sync::rwlock.write_downgrade(). The RWWriteMode token must be used
|
|
|
|
* to obtain the &mut T, and can be transformed into a RWReadMode token by
|
|
|
|
* calling downgrade(), after which a &T can be obtained instead.
|
2013-05-27 08:49:54 -05:00
|
|
|
*
|
|
|
|
* # Example
|
|
|
|
*
|
2013-09-23 19:20:36 -05:00
|
|
|
* ```rust
|
2014-01-30 14:04:47 -06:00
|
|
|
* use sync::RWArc;
|
2013-12-22 15:31:37 -06:00
|
|
|
*
|
|
|
|
* let arc = RWArc::new(1);
|
|
|
|
* arc.write_downgrade(|mut write_token| {
|
|
|
|
* write_token.write_cond(|state, condvar| {
|
|
|
|
* // ... exclusive access with mutable state ...
|
|
|
|
* });
|
2013-06-13 14:20:38 -05:00
|
|
|
* let read_token = arc.downgrade(write_token);
|
2013-12-22 15:31:37 -06:00
|
|
|
* read_token.read(|state| {
|
|
|
|
* // ... shared access with immutable state ...
|
|
|
|
* });
|
|
|
|
* })
|
2013-09-23 19:20:36 -05:00
|
|
|
* ```
|
2012-08-14 12:32:41 -05:00
|
|
|
*/
|
2013-11-18 23:54:13 -06:00
|
|
|
pub fn write_downgrade<U>(&self, blk: |v: RWWriteMode<T>| -> U) -> U {
|
2013-02-09 00:21:45 -06:00
|
|
|
unsafe {
|
2013-05-09 22:27:42 -05:00
|
|
|
let state = self.x.get();
|
2013-11-20 17:46:49 -06:00
|
|
|
(*borrow_rwlock(state)).write_downgrade(|write_mode| {
|
2013-02-09 00:21:45 -06:00
|
|
|
check_poison(false, (*state).failed);
|
2013-03-07 19:23:14 -06:00
|
|
|
blk(RWWriteMode {
|
|
|
|
data: &mut (*state).data,
|
|
|
|
token: write_mode,
|
2013-12-21 21:53:43 -06:00
|
|
|
poison: PoisonOnFail::new(&mut (*state).failed)
|
2013-03-07 19:23:14 -06:00
|
|
|
})
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// To be called inside of the write_downgrade block.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn downgrade<'a>(&self, token: RWWriteMode<'a, T>)
|
|
|
|
-> RWReadMode<'a, T> {
|
2013-05-09 22:27:42 -05:00
|
|
|
unsafe {
|
|
|
|
// The rwlock should assert that the token belongs to us for us.
|
|
|
|
let state = self.x.get();
|
|
|
|
let RWWriteMode {
|
|
|
|
data: data,
|
|
|
|
token: t,
|
|
|
|
poison: _poison
|
|
|
|
} = token;
|
|
|
|
// Let readers in
|
|
|
|
let new_token = (*state).lock.downgrade(t);
|
|
|
|
// Whatever region the input reference had, it will be safe to use
|
|
|
|
// the same region for the output reference. (The only 'unsafe' part
|
|
|
|
// of this cast is removing the mutability.)
|
2013-12-01 09:18:47 -06:00
|
|
|
let new_data = data;
|
2013-05-09 22:27:42 -05:00
|
|
|
// Downgrade ensured the token belonged to us. Just a sanity check.
|
2014-01-28 20:05:57 -06:00
|
|
|
assert!((&(*state).data as *T as uint) == (new_data as *mut T as uint));
|
2013-05-09 22:27:42 -05:00
|
|
|
// Produce new token
|
|
|
|
RWReadMode {
|
|
|
|
data: new_data,
|
|
|
|
token: new_token,
|
|
|
|
}
|
2013-03-07 19:23:14 -06:00
|
|
|
}
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Borrowck rightly complains about immutably aliasing the rwlock in order to
|
|
|
|
// lock it. This wraps the unsafety, with the justification that the 'lock'
|
|
|
|
// field is never overwritten; only 'failed' and 'data'.
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-07-22 15:57:40 -05:00
|
|
|
fn borrow_rwlock<T:Freeze + Send>(state: *mut RWArcInner<T>) -> *RWLock {
|
2013-06-23 22:44:11 -05:00
|
|
|
unsafe { cast::transmute(&(*state).lock) }
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
/// The "write permission" token used for RWArc.write_downgrade().
|
2013-12-10 01:16:18 -06:00
|
|
|
pub struct RWWriteMode<'a, T> {
|
|
|
|
priv data: &'a mut T,
|
|
|
|
priv token: sync::RWLockWriteMode<'a>,
|
2013-10-19 19:33:09 -05:00
|
|
|
priv poison: PoisonOnFail,
|
2013-03-07 19:23:14 -06:00
|
|
|
}
|
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
/// The "read permission" token used for RWArc.write_downgrade().
|
2013-12-10 01:16:18 -06:00
|
|
|
pub struct RWReadMode<'a, T> {
|
|
|
|
priv data: &'a T,
|
|
|
|
priv token: sync::RWLockReadMode<'a>,
|
2013-03-07 19:23:14 -06:00
|
|
|
}
|
2012-08-14 12:32:41 -05:00
|
|
|
|
2013-12-10 01:16:18 -06:00
|
|
|
impl<'a, T:Freeze + Send> RWWriteMode<'a, T> {
|
2013-07-22 15:57:40 -05:00
|
|
|
/// Access the pre-downgrade RWArc in write mode.
|
2013-11-18 23:54:13 -06:00
|
|
|
pub fn write<U>(&mut self, blk: |x: &mut T| -> U) -> U {
|
2012-08-14 12:32:41 -05:00
|
|
|
match *self {
|
2013-03-07 19:23:14 -06:00
|
|
|
RWWriteMode {
|
2013-03-15 14:24:24 -05:00
|
|
|
data: &ref mut data,
|
2013-03-07 19:23:14 -06:00
|
|
|
token: ref token,
|
|
|
|
poison: _
|
|
|
|
} => {
|
2013-11-20 17:46:49 -06:00
|
|
|
token.write(|| blk(data))
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-05-31 17:17:22 -05:00
|
|
|
|
2013-07-22 15:57:40 -05:00
|
|
|
/// Access the pre-downgrade RWArc in write mode with a condvar.
|
2013-10-29 05:14:59 -05:00
|
|
|
pub fn write_cond<U>(&mut self,
|
2014-02-27 01:41:55 -06:00
|
|
|
blk: |x: &mut T, c: &ArcCondvar| -> U)
|
2013-10-29 05:14:59 -05:00
|
|
|
-> U {
|
2012-08-14 12:32:41 -05:00
|
|
|
match *self {
|
2013-03-07 19:23:14 -06:00
|
|
|
RWWriteMode {
|
2013-03-15 14:24:24 -05:00
|
|
|
data: &ref mut data,
|
2013-03-07 19:23:14 -06:00
|
|
|
token: ref token,
|
|
|
|
poison: ref poison
|
|
|
|
} => {
|
2013-11-20 17:46:49 -06:00
|
|
|
token.write_cond(|cond| {
|
2013-01-23 20:15:06 -06:00
|
|
|
unsafe {
|
2014-02-27 01:41:55 -06:00
|
|
|
let cvar = ArcCondvar {
|
2013-01-23 20:15:06 -06:00
|
|
|
is_mutex: false,
|
2013-12-21 22:10:45 -06:00
|
|
|
failed: &*poison.flag,
|
2013-01-23 20:15:06 -06:00
|
|
|
cond: cond
|
|
|
|
};
|
2013-03-15 14:24:24 -05:00
|
|
|
blk(data, &cvar)
|
2013-01-23 20:15:06 -06:00
|
|
|
}
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-10 01:16:18 -06:00
|
|
|
impl<'a, T:Freeze + Send> RWReadMode<'a, T> {
|
2012-08-14 12:32:41 -05:00
|
|
|
/// Access the post-downgrade rwlock in read mode.
|
2013-11-18 23:54:13 -06:00
|
|
|
pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
|
2012-08-14 12:32:41 -05:00
|
|
|
match *self {
|
2013-03-07 19:23:14 -06:00
|
|
|
RWReadMode {
|
|
|
|
data: data,
|
|
|
|
token: ref token
|
|
|
|
} => {
|
2013-11-20 17:46:49 -06:00
|
|
|
token.read(|| blk(data))
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-30 18:17:35 -06:00
|
|
|
/****************************************************************************
|
|
|
|
* Copy-on-write Arc
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
pub struct CowArc<T> { priv x: UnsafeArc<T> }
|
|
|
|
|
|
|
|
/// A Copy-on-write Arc functions the same way as an `arc` except it allows
|
|
|
|
/// mutation of the contents if there is only a single reference to
|
|
|
|
/// the data. If there are multiple references the data is automatically
|
|
|
|
/// cloned and the task modifies the cloned data in place of the shared data.
|
2013-12-31 15:14:58 -06:00
|
|
|
impl<T:Clone+Send+Freeze> CowArc<T> {
|
2013-12-30 18:17:35 -06:00
|
|
|
/// Create a copy-on-write atomically reference counted wrapper
|
|
|
|
#[inline]
|
|
|
|
pub fn new(data: T) -> CowArc<T> {
|
|
|
|
CowArc { x: UnsafeArc::new(data) }
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn get<'a>(&'a self) -> &'a T {
|
|
|
|
unsafe { &*self.x.get_immut() }
|
|
|
|
}
|
|
|
|
|
|
|
|
/// get a mutable reference to the contents. If there are more then one
|
|
|
|
/// reference to the contents of the `CowArc` will be cloned
|
|
|
|
/// and this reference updated to point to the cloned data.
|
|
|
|
#[inline]
|
|
|
|
pub fn get_mut<'a>(&'a mut self) -> &'a mut T {
|
|
|
|
if !self.x.is_owned() {
|
|
|
|
*self = CowArc::new(self.get().clone())
|
|
|
|
}
|
|
|
|
unsafe { &mut *self.x.get() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-31 15:14:58 -06:00
|
|
|
impl<T:Clone+Send+Freeze> Clone for CowArc<T> {
|
2013-12-30 18:17:35 -06:00
|
|
|
/// Duplicate a Copy-on-write Arc. See arc::clone for more details.
|
|
|
|
fn clone(&self) -> CowArc<T> {
|
|
|
|
CowArc { x: self.x.clone() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2012-08-10 17:20:03 -05:00
|
|
|
/****************************************************************************
|
|
|
|
* Tests
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2013-05-24 21:35:29 -05:00
|
|
|
|
2014-01-30 14:04:47 -06:00
|
|
|
use super::{Arc, RWArc, MutexArc, CowArc};
|
2012-12-28 14:46:08 -06:00
|
|
|
|
2013-06-28 17:32:26 -05:00
|
|
|
use std::task;
|
2013-05-24 21:35:29 -05:00
|
|
|
|
2012-08-10 17:20:03 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn manually_share_arc() {
|
2012-08-10 17:20:03 -05:00
|
|
|
let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
2013-07-22 15:57:40 -05:00
|
|
|
let arc_v = Arc::new(v);
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-03-09 16:58:32 -05:00
|
|
|
let (tx, rx) = channel();
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2014-03-09 16:58:32 -05:00
|
|
|
let arc_v: Arc<~[int]> = rx.recv();
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2013-08-31 13:26:01 -05:00
|
|
|
let v = arc_v.get().clone();
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(v[3], 4);
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-03-09 16:58:32 -05:00
|
|
|
tx.send(arc_v.clone());
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2013-05-24 14:38:12 -05:00
|
|
|
assert_eq!(arc_v.get()[2], 3);
|
2013-05-20 05:07:14 -05:00
|
|
|
assert_eq!(arc_v.get()[4], 5);
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2013-10-21 15:08:31 -05:00
|
|
|
info!("{:?}", arc_v);
|
2012-08-10 17:20:03 -05:00
|
|
|
}
|
2012-08-10 19:46:19 -05:00
|
|
|
|
2012-08-13 18:45:17 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_arc_condvar() {
|
2013-09-03 18:07:11 -05:00
|
|
|
let arc = ~MutexArc::new(false);
|
|
|
|
let arc2 = ~arc.clone();
|
2014-03-09 16:58:32 -05:00
|
|
|
let (tx, rx) = channel();
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2013-09-03 18:07:11 -05:00
|
|
|
// wait until parent gets in
|
2014-03-09 16:58:32 -05:00
|
|
|
rx.recv();
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.access_cond(|state, cond| {
|
2013-09-03 18:07:11 -05:00
|
|
|
*state = true;
|
|
|
|
cond.signal();
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-09-03 18:07:11 -05:00
|
|
|
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.access_cond(|state, cond| {
|
2014-03-09 16:58:32 -05:00
|
|
|
tx.send(());
|
2013-09-03 18:07:11 -05:00
|
|
|
assert!(!*state);
|
|
|
|
while !*state {
|
|
|
|
cond.wait();
|
|
|
|
}
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2013-09-03 18:07:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test] #[should_fail]
|
|
|
|
fn test_arc_condvar_poison() {
|
|
|
|
let arc = ~MutexArc::new(1);
|
|
|
|
let arc2 = ~arc.clone();
|
2014-03-09 16:58:32 -05:00
|
|
|
let (tx, rx) = channel();
|
2013-09-03 18:07:11 -05:00
|
|
|
|
2014-01-26 22:13:24 -06:00
|
|
|
spawn(proc() {
|
2014-03-09 16:58:32 -05:00
|
|
|
let _ = rx.recv();
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.access_cond(|one, cond| {
|
2013-09-03 18:07:11 -05:00
|
|
|
cond.signal();
|
|
|
|
// Parent should fail when it wakes up.
|
|
|
|
assert_eq!(*one, 0);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-09-03 18:07:11 -05:00
|
|
|
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.access_cond(|one, cond| {
|
2014-03-09 16:58:32 -05:00
|
|
|
tx.send(());
|
2013-09-03 18:07:11 -05:00
|
|
|
while *one == 1 {
|
|
|
|
cond.wait();
|
|
|
|
}
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2013-09-03 18:07:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test] #[should_fail]
|
|
|
|
fn test_mutex_arc_poison() {
|
|
|
|
let arc = ~MutexArc::new(1);
|
|
|
|
let arc2 = ~arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.access(|one| {
|
2013-09-03 18:07:11 -05:00
|
|
|
assert_eq!(*one, 2);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.access(|one| {
|
2013-09-03 18:07:11 -05:00
|
|
|
assert_eq!(*one, 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2013-09-03 18:07:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2014-02-16 22:23:05 -06:00
|
|
|
fn test_mutex_arc_nested() {
|
|
|
|
// Tests nested mutexes and access
|
|
|
|
// to underlaying data.
|
|
|
|
let arc = ~MutexArc::new(1);
|
|
|
|
let arc2 = ~MutexArc::new(*arc);
|
|
|
|
task::spawn(proc() {
|
|
|
|
(*arc2).access(|mutex| {
|
|
|
|
(*mutex).access(|one| {
|
|
|
|
assert!(*one == 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-02-16 22:23:05 -06:00
|
|
|
})
|
|
|
|
});
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2013-09-03 18:07:11 -05:00
|
|
|
|
2013-12-21 21:53:43 -06:00
|
|
|
#[test]
|
|
|
|
fn test_mutex_arc_access_in_unwind() {
|
|
|
|
let arc = MutexArc::new(1i);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try::<()>(proc() {
|
2013-12-21 21:53:43 -06:00
|
|
|
struct Unwinder {
|
|
|
|
i: MutexArc<int>
|
|
|
|
}
|
|
|
|
impl Drop for Unwinder {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.i.access(|num| *num += 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let _u = Unwinder { i: arc2 };
|
|
|
|
fail!();
|
|
|
|
});
|
|
|
|
assert_eq!(2, arc.access(|n| *n));
|
|
|
|
}
|
|
|
|
|
2013-08-19 17:40:37 -05:00
|
|
|
#[test] #[should_fail]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_arc_poison_wr() {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(1);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.write(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 2);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.read(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2013-09-03 18:07:11 -05:00
|
|
|
|
2013-08-19 17:40:37 -05:00
|
|
|
#[test] #[should_fail]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_arc_poison_ww() {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(1);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.write(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 2);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.write(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2013-08-19 17:40:37 -05:00
|
|
|
#[test] #[should_fail]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_arc_poison_dw() {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(1);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.write_downgrade(|mut write_mode| {
|
|
|
|
write_mode.write(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 2);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.write(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
2013-08-19 17:40:37 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_arc_no_poison_rr() {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(1);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.read(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 2);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.read(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2013-08-19 17:40:37 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_arc_no_poison_rw() {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(1);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.read(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 2);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.write(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2013-08-19 17:40:37 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_arc_no_poison_dr() {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(1);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.write_downgrade(|write_mode| {
|
2013-02-15 01:30:30 -06:00
|
|
|
let read_mode = arc2.downgrade(write_mode);
|
2013-11-20 17:46:49 -06:00
|
|
|
read_mode.read(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 2);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.write(|one| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*one, 1);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
2012-08-10 19:46:19 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_arc() {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(0);
|
|
|
|
let arc2 = arc.clone();
|
2014-03-09 16:58:32 -05:00
|
|
|
let (tx, rx) = channel();
|
2012-08-10 19:46:19 -05:00
|
|
|
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.write(|num| {
|
2014-01-29 18:20:34 -06:00
|
|
|
for _ in range(0, 10) {
|
2012-08-10 19:46:19 -05:00
|
|
|
let tmp = *num;
|
|
|
|
*num = -1;
|
2013-08-16 14:49:40 -05:00
|
|
|
task::deschedule();
|
2012-08-10 19:46:19 -05:00
|
|
|
*num = tmp + 1;
|
2014-01-29 18:20:34 -06:00
|
|
|
}
|
2014-03-09 16:58:32 -05:00
|
|
|
tx.send(());
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2012-09-18 23:41:37 -05:00
|
|
|
|
2012-08-10 19:46:19 -05:00
|
|
|
// Readers try to catch the writer in the act
|
|
|
|
let mut children = ~[];
|
2014-01-29 18:20:34 -06:00
|
|
|
for _ in range(0, 5) {
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc3 = arc.clone();
|
2013-05-07 19:57:58 -05:00
|
|
|
let mut builder = task::task();
|
2013-10-18 03:38:46 -05:00
|
|
|
children.push(builder.future_result());
|
2014-01-26 22:13:24 -06:00
|
|
|
builder.spawn(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
arc3.read(|num| {
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(*num >= 0);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2014-01-29 18:20:34 -06:00
|
|
|
}
|
2012-09-18 23:41:37 -05:00
|
|
|
|
2012-08-10 19:46:19 -05:00
|
|
|
// Wait for children to pass their asserts
|
2013-12-05 20:19:06 -06:00
|
|
|
for r in children.mut_iter() {
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = r.recv();
|
2013-05-07 19:57:58 -05:00
|
|
|
}
|
2012-09-18 23:41:37 -05:00
|
|
|
|
2012-08-10 19:46:19 -05:00
|
|
|
// Wait for writer to finish
|
2014-03-09 16:58:32 -05:00
|
|
|
rx.recv();
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.read(|num| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*num, 10);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2013-12-21 21:53:43 -06:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rw_arc_access_in_unwind() {
|
|
|
|
let arc = RWArc::new(1i);
|
|
|
|
let arc2 = arc.clone();
|
2014-01-30 16:28:20 -06:00
|
|
|
let _ = task::try::<()>(proc() {
|
2013-12-21 21:53:43 -06:00
|
|
|
struct Unwinder {
|
|
|
|
i: RWArc<int>
|
|
|
|
}
|
|
|
|
impl Drop for Unwinder {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.i.write(|num| *num += 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let _u = Unwinder { i: arc2 };
|
|
|
|
fail!();
|
|
|
|
});
|
|
|
|
assert_eq!(2, arc.read(|n| *n));
|
|
|
|
}
|
|
|
|
|
2012-08-14 12:32:41 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rw_downgrade() {
|
2012-08-14 12:32:41 -05:00
|
|
|
// (1) A downgrader gets in write mode and does cond.wait.
|
|
|
|
// (2) A writer gets in write mode, sets state to 42, and does signal.
|
|
|
|
// (3) Downgrader wakes, sets state to 31337.
|
|
|
|
// (4) tells writer and all other readers to contend as it downgrades.
|
|
|
|
// (5) Writer attempts to set state back to 42, while downgraded task
|
|
|
|
// and all reader tasks assert that it's 31337.
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc = RWArc::new(0);
|
2012-08-14 12:32:41 -05:00
|
|
|
|
|
|
|
// Reader tasks
|
|
|
|
let mut reader_convos = ~[];
|
2014-01-29 18:20:34 -06:00
|
|
|
for _ in range(0, 10) {
|
2014-03-09 16:58:32 -05:00
|
|
|
let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
|
|
|
|
reader_convos.push((tx1, rx2));
|
2013-08-31 13:26:01 -05:00
|
|
|
let arcn = arc.clone();
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2014-03-09 16:58:32 -05:00
|
|
|
rx1.recv(); // wait for downgrader to give go-ahead
|
2013-11-20 17:46:49 -06:00
|
|
|
arcn.read(|state| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*state, 31337);
|
2014-03-09 16:58:32 -05:00
|
|
|
tx2.send(());
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2014-01-29 18:20:34 -06:00
|
|
|
}
|
2012-08-14 12:32:41 -05:00
|
|
|
|
|
|
|
// Writer task
|
2013-08-31 13:26:01 -05:00
|
|
|
let arc2 = arc.clone();
|
2014-03-09 16:58:32 -05:00
|
|
|
let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2014-03-09 16:58:32 -05:00
|
|
|
rx1.recv();
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.write_cond(|state, cond| {
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*state, 0);
|
2012-08-14 12:32:41 -05:00
|
|
|
*state = 42;
|
|
|
|
cond.signal();
|
2013-11-20 17:46:49 -06:00
|
|
|
});
|
2014-03-09 16:58:32 -05:00
|
|
|
rx1.recv();
|
2013-11-20 17:46:49 -06:00
|
|
|
arc2.write(|state| {
|
2012-08-14 12:32:41 -05:00
|
|
|
// This shouldn't happen until after the downgrade read
|
|
|
|
// section, and all other readers, finish.
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*state, 31337);
|
2012-08-14 12:32:41 -05:00
|
|
|
*state = 42;
|
2013-11-20 17:46:49 -06:00
|
|
|
});
|
2014-03-09 16:58:32 -05:00
|
|
|
tx2.send(());
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2012-08-14 12:32:41 -05:00
|
|
|
|
|
|
|
// Downgrader (us)
|
2013-11-20 17:46:49 -06:00
|
|
|
arc.write_downgrade(|mut write_mode| {
|
|
|
|
write_mode.write_cond(|state, cond| {
|
2014-03-09 16:58:32 -05:00
|
|
|
tx1.send(()); // send to another writer who will wake us up
|
2012-08-14 12:32:41 -05:00
|
|
|
while *state == 0 {
|
|
|
|
cond.wait();
|
|
|
|
}
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*state, 42);
|
2012-08-14 12:32:41 -05:00
|
|
|
*state = 31337;
|
|
|
|
// send to other readers
|
2013-12-05 20:19:06 -06:00
|
|
|
for &(ref mut rc, _) in reader_convos.mut_iter() {
|
2013-08-07 09:58:56 -05:00
|
|
|
rc.send(())
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
2013-11-20 17:46:49 -06:00
|
|
|
});
|
2013-02-15 01:30:30 -06:00
|
|
|
let read_mode = arc.downgrade(write_mode);
|
2013-11-20 17:46:49 -06:00
|
|
|
read_mode.read(|state| {
|
2012-08-14 12:32:41 -05:00
|
|
|
// complete handshake with other readers
|
2013-12-05 20:19:06 -06:00
|
|
|
for &(_, ref mut rp) in reader_convos.mut_iter() {
|
2013-08-07 09:58:56 -05:00
|
|
|
rp.recv()
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
2014-03-09 16:58:32 -05:00
|
|
|
tx1.send(()); // tell writer to try again
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*state, 31337);
|
2013-11-20 17:46:49 -06:00
|
|
|
});
|
|
|
|
});
|
2012-08-14 12:32:41 -05:00
|
|
|
|
2014-03-09 16:58:32 -05:00
|
|
|
rx2.recv(); // complete handshake with writer
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
2013-06-12 16:46:28 -05:00
|
|
|
#[cfg(test)]
|
|
|
|
fn test_rw_write_cond_downgrade_read_race_helper() {
|
|
|
|
// Tests that when a downgrader hands off the "reader cloud" lock
|
|
|
|
// because of a contending reader, a writer can't race to get it
|
|
|
|
// instead, which would result in readers_and_writers. This tests
|
|
|
|
// the sync module rather than this one, but it's here because an
|
|
|
|
// rwarc gives us extra shared state to help check for the race.
|
|
|
|
// If you want to see this test fail, go to sync.rs and replace the
|
2013-07-22 15:57:40 -05:00
|
|
|
// line in RWLock::write_cond() that looks like:
|
2014-02-27 01:41:55 -06:00
|
|
|
// "blk(&ArcCondvar { order: opt_lock, ..*cond })"
|
2013-06-12 16:46:28 -05:00
|
|
|
// with just "blk(cond)".
|
2013-08-31 13:26:01 -05:00
|
|
|
let x = RWArc::new(true);
|
2014-03-09 16:58:32 -05:00
|
|
|
let (tx, rx) = channel();
|
2013-06-12 16:46:28 -05:00
|
|
|
|
|
|
|
// writer task
|
2013-08-31 13:26:01 -05:00
|
|
|
let xw = x.clone();
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2013-11-20 17:46:49 -06:00
|
|
|
xw.write_cond(|state, c| {
|
2014-03-09 16:58:32 -05:00
|
|
|
tx.send(()); // tell downgrader it's ok to go
|
2013-06-12 16:46:28 -05:00
|
|
|
c.wait();
|
|
|
|
// The core of the test is here: the condvar reacquire path
|
|
|
|
// must involve order_lock, so that it cannot race with a reader
|
|
|
|
// trying to receive the "reader cloud lock hand-off".
|
|
|
|
*state = false;
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2013-06-12 16:46:28 -05:00
|
|
|
|
2014-03-09 16:58:32 -05:00
|
|
|
rx.recv(); // wait for writer to get in
|
2013-06-12 16:46:28 -05:00
|
|
|
|
2013-11-20 17:46:49 -06:00
|
|
|
x.write_downgrade(|mut write_mode| {
|
|
|
|
write_mode.write_cond(|state, c| {
|
2013-06-12 16:46:28 -05:00
|
|
|
assert!(*state);
|
|
|
|
// make writer contend in the cond-reacquire path
|
|
|
|
c.signal();
|
2013-11-20 17:46:49 -06:00
|
|
|
});
|
2013-06-12 16:46:28 -05:00
|
|
|
// make a reader task to trigger the "reader cloud lock" handoff
|
2013-08-31 13:26:01 -05:00
|
|
|
let xr = x.clone();
|
2014-03-09 16:58:32 -05:00
|
|
|
let (tx, rx) = channel();
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2014-03-09 16:58:32 -05:00
|
|
|
tx.send(());
|
2013-11-20 17:46:49 -06:00
|
|
|
xr.read(|_state| { })
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2014-03-09 16:58:32 -05:00
|
|
|
rx.recv(); // wait for reader task to exist
|
2013-06-12 16:46:28 -05:00
|
|
|
|
|
|
|
let read_mode = x.downgrade(write_mode);
|
2013-11-20 17:46:49 -06:00
|
|
|
read_mode.read(|state| {
|
2013-06-12 16:46:28 -05:00
|
|
|
// if writer mistakenly got in, make sure it mutates state
|
|
|
|
// before we assert on it
|
2014-01-29 18:20:34 -06:00
|
|
|
for _ in range(0, 5) { task::deschedule(); }
|
2013-06-12 16:46:28 -05:00
|
|
|
// make sure writer didn't get in.
|
|
|
|
assert!(*state);
|
2013-11-20 17:46:49 -06:00
|
|
|
})
|
|
|
|
});
|
2013-06-12 16:46:28 -05:00
|
|
|
}
|
|
|
|
#[test]
|
|
|
|
fn test_rw_write_cond_downgrade_read_race() {
|
2013-08-16 14:49:40 -05:00
|
|
|
// Ideally the above test case would have deschedule statements in it that
|
2013-06-12 16:46:28 -05:00
|
|
|
// helped to expose the race nearly 100% of the time... but adding
|
2013-08-16 14:49:40 -05:00
|
|
|
// deschedules in the intuitively-right locations made it even less likely,
|
2013-06-12 16:46:28 -05:00
|
|
|
// and I wasn't sure why :( . This is a mediocre "next best" option.
|
2014-01-29 18:20:34 -06:00
|
|
|
for _ in range(0, 8) { test_rw_write_cond_downgrade_read_race_helper(); }
|
2013-06-12 16:46:28 -05:00
|
|
|
}
|
2013-12-30 18:17:35 -06:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_cowarc_clone()
|
|
|
|
{
|
|
|
|
let cow0 = CowArc::new(75u);
|
|
|
|
let cow1 = cow0.clone();
|
|
|
|
let cow2 = cow1.clone();
|
|
|
|
|
|
|
|
assert!(75 == *cow0.get());
|
|
|
|
assert!(75 == *cow1.get());
|
|
|
|
assert!(75 == *cow2.get());
|
|
|
|
|
|
|
|
assert!(cow0.get() == cow1.get());
|
|
|
|
assert!(cow0.get() == cow2.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_cowarc_clone_get_mut()
|
|
|
|
{
|
|
|
|
let mut cow0 = CowArc::new(75u);
|
|
|
|
let mut cow1 = cow0.clone();
|
|
|
|
let mut cow2 = cow1.clone();
|
|
|
|
|
|
|
|
assert!(75 == *cow0.get_mut());
|
|
|
|
assert!(75 == *cow1.get_mut());
|
|
|
|
assert!(75 == *cow2.get_mut());
|
|
|
|
|
|
|
|
*cow0.get_mut() += 1;
|
|
|
|
*cow1.get_mut() += 2;
|
|
|
|
*cow2.get_mut() += 3;
|
|
|
|
|
|
|
|
assert!(76 == *cow0.get());
|
|
|
|
assert!(77 == *cow1.get());
|
|
|
|
assert!(78 == *cow2.get());
|
|
|
|
|
|
|
|
// none should point to the same backing memory
|
|
|
|
assert!(cow0.get() != cow1.get());
|
|
|
|
assert!(cow0.get() != cow2.get());
|
|
|
|
assert!(cow1.get() != cow2.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_cowarc_clone_get_mut2()
|
|
|
|
{
|
|
|
|
let mut cow0 = CowArc::new(75u);
|
|
|
|
let cow1 = cow0.clone();
|
|
|
|
let cow2 = cow1.clone();
|
|
|
|
|
|
|
|
assert!(75 == *cow0.get());
|
|
|
|
assert!(75 == *cow1.get());
|
|
|
|
assert!(75 == *cow2.get());
|
|
|
|
|
|
|
|
*cow0.get_mut() += 1;
|
|
|
|
|
|
|
|
assert!(76 == *cow0.get());
|
|
|
|
assert!(75 == *cow1.get());
|
|
|
|
assert!(75 == *cow2.get());
|
|
|
|
|
|
|
|
// cow1 and cow2 should share the same contents
|
|
|
|
// cow0 should have a unique reference
|
|
|
|
assert!(cow0.get() != cow1.get());
|
|
|
|
assert!(cow0.get() != cow2.get());
|
|
|
|
assert!(cow1.get() == cow2.get());
|
|
|
|
}
|
2012-08-10 17:20:03 -05:00
|
|
|
}
|