rust/src/libextra/arc.rs

806 lines
25 KiB
Rust
Raw Normal View History

// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Concurrency-enabled mechanisms for sharing mutable and/or immutable state
* between tasks.
*
* # Example
*
* In this example, a large vector of floats is shared between several tasks.
* With simple pipes, without ARC, a copy would have to be made for each task.
*
* ~~~
* extern mod std;
* use std::arc;
* let numbers=vec::from_fn(100, |ind| (ind as float)*rand::random());
* let shared_numbers=arc::ARC(numbers);
*
* for 10.times {
* let (port, chan) = stream();
* chan.send(shared_numbers.clone());
*
* do spawn {
* let shared_numbers=port.recv();
* let local_numbers=shared_numbers.get();
*
* // Work with the local numbers
* }
* }
* ~~~
*/
use core::prelude::*;
use sync;
use sync::{Mutex, mutex_with_condvars, RWlock, rwlock_with_condvars};
use core::cast;
use core::unstable::sync::UnsafeAtomicRcBox;
use core::ptr;
use core::task;
2012-08-13 18:45:17 -05:00
/// As sync::condvar, a mechanism for unlock-and-descheduling and signaling.
pub struct Condvar<'self> {
is_mutex: bool,
failed: &'self mut bool,
cond: &'self sync::Condvar<'self>
}
2012-08-13 18:45:17 -05:00
pub impl<'self> Condvar<'self> {
2012-08-13 18:45:17 -05:00
/// Atomically exit the associated ARC and block until a signal is sent.
#[inline(always)]
fn wait(&self) { self.wait_on(0) }
/**
* Atomically exit the associated ARC and block on a specified condvar
* until a signal is sent on that same condvar (as sync::cond.wait_on).
*
* wait() is equivalent to wait_on(0).
*/
#[inline(always)]
fn wait_on(&self, condvar_id: uint) {
2013-03-28 20:39:09 -05:00
assert!(!*self.failed);
self.cond.wait_on(condvar_id);
2012-08-13 18:45:17 -05:00
// This is why we need to wrap sync::condvar.
check_poison(self.is_mutex, *self.failed);
}
2012-08-13 18:45:17 -05:00
/// Wake up a blocked task. Returns false if there was no blocked task.
#[inline(always)]
fn signal(&self) -> bool { self.signal_on(0) }
/**
* Wake up a blocked task on a specified condvar (as
* sync::cond.signal_on). Returns false if there was no blocked task.
*/
#[inline(always)]
fn signal_on(&self, condvar_id: uint) -> bool {
2013-03-28 20:39:09 -05:00
assert!(!*self.failed);
self.cond.signal_on(condvar_id)
2012-08-13 18:45:17 -05:00
}
2012-08-13 18:45:17 -05:00
/// Wake up all blocked tasks. Returns the number of tasks woken.
#[inline(always)]
fn broadcast(&self) -> uint { self.broadcast_on(0) }
/**
* Wake up all blocked tasks on a specified condvar (as
* sync::cond.broadcast_on). Returns Returns the number of tasks woken.
*/
#[inline(always)]
fn broadcast_on(&self, condvar_id: uint) -> uint {
2013-03-28 20:39:09 -05:00
assert!(!*self.failed);
self.cond.broadcast_on(condvar_id)
2012-08-13 18:45:17 -05:00
}
}
/****************************************************************************
* Immutable ARC
****************************************************************************/
/// An atomically reference counted wrapper for shared immutable state.
struct ARC<T> { x: UnsafeAtomicRcBox<T> }
/// Create an atomically reference counted wrapper.
pub fn ARC<T:Const + Owned>(data: T) -> ARC<T> {
ARC { x: UnsafeAtomicRcBox::new(data) }
}
/**
* Access the underlying data in an atomically reference counted
* wrapper.
*/
pub fn get<'a, T:Const + Owned>(rc: &'a ARC<T>) -> &'a T {
rc.get()
}
impl<T:Const+Owned> ARC<T> {
pub fn get<'a>(&'a self) -> &'a T {
unsafe { &*self.x.get_immut() }
}
}
/**
* Duplicate an atomically reference counted wrapper.
*
* The resulting two `arc` objects will point to the same underlying data
* object. However, one of the `arc` objects can be sent to another task,
* allowing them to share the underlying data.
*/
pub fn clone<T:Const + Owned>(rc: &ARC<T>) -> ARC<T> {
ARC { x: rc.x.clone() }
}
impl<T:Const + Owned> Clone for ARC<T> {
2012-11-26 18:12:47 -06:00
fn clone(&self) -> ARC<T> {
clone(self)
}
}
/****************************************************************************
* Mutex protected ARC (unsafe)
****************************************************************************/
#[doc(hidden)]
struct MutexARCInner<T> { lock: Mutex, failed: bool, data: T }
/// An ARC with mutable data protected by a blocking mutex.
struct MutexARC<T> { x: UnsafeAtomicRcBox<MutexARCInner<T>> }
/// Create a mutex-protected ARC with the supplied data.
pub fn MutexARC<T:Owned>(user_data: T) -> MutexARC<T> {
2013-02-15 01:30:30 -06:00
mutex_arc_with_condvars(user_data, 1)
}
/**
* Create a mutex-protected ARC with the supplied data and a specified number
* of condvars (as sync::mutex_with_condvars).
*/
pub fn mutex_arc_with_condvars<T:Owned>(user_data: T,
2012-08-26 20:28:36 -05:00
num_condvars: uint) -> MutexARC<T> {
let data =
2012-08-26 20:28:36 -05:00
MutexARCInner { lock: mutex_with_condvars(num_condvars),
2013-02-15 01:30:30 -06:00
failed: false, data: user_data };
MutexARC { x: UnsafeAtomicRcBox::new(data) }
}
impl<T:Owned> Clone for MutexARC<T> {
/// Duplicate a mutex-protected ARC, as arc::clone.
2012-11-26 18:12:47 -06:00
fn clone(&self) -> MutexARC<T> {
// NB: Cloning the underlying mutex is not necessary. Its reference
// count would be exactly the same as the shared state's.
MutexARC { x: self.x.clone() }
}
2012-11-26 18:12:47 -06:00
}
pub impl<T:Owned> MutexARC<T> {
/**
* Access the underlying mutable data with mutual exclusion from other
* tasks. The argument closure will be run with the mutex locked; all
* other tasks wishing to access the data will block until the closure
* finishes running.
*
* The reason this function is 'unsafe' is because it is possible to
* construct a circular reference among multiple ARCs by mutating the
* underlying data. This creates potential for deadlock, but worse, this
* will guarantee a memory leak of all involved ARCs. Using mutex ARCs
* inside of other ARCs is safe in absence of circular references.
*
* If you wish to nest mutex_arcs, one strategy for ensuring safety at
* runtime is to add a "nesting level counter" inside the stored data, and
* when traversing the arcs, assert that they monotonically decrease.
*
* # Failure
*
* Failing while inside the ARC will unlock the ARC while unwinding, so
* that other tasks won't block forever. It will also poison the ARC:
* any tasks that subsequently try to access it (including those already
* blocked on the mutex) will also fail immediately.
*/
#[inline(always)]
unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
let state = self.x.get();
// Borrowck would complain about this if the function were
// not already unsafe. See borrow_rwlock, far below.
do (&(*state).lock).lock {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
}
/// As access(), but with a condvar, as sync::mutex.lock_cond().
#[inline(always)]
unsafe fn access_cond<'x, 'c, U>(
&self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U
{
let state = self.x.get();
do (&(*state).lock).lock_cond |cond| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: true,
failed: &mut (*state).failed,
cond: cond })
}
}
}
// Common code for {mutex.access,rwlock.write}{,_cond}.
#[inline(always)]
#[doc(hidden)]
fn check_poison(is_mutex: bool, failed: bool) {
if failed {
if is_mutex {
fail!("Poisoned MutexARC - another task failed inside!");
} else {
fail!("Poisoned rw_arc - another task failed inside!");
}
}
}
#[doc(hidden)]
2012-08-26 20:28:36 -05:00
struct PoisonOnFail {
failed: *mut bool,
2012-11-13 20:38:18 -06:00
}
impl Drop for PoisonOnFail {
fn finalize(&self) {
unsafe {
2013-03-28 20:39:09 -05:00
/* assert!(!*self.failed);
-- might be false in case of cond.wait() */
if task::failing() {
*self.failed = true;
}
}
2012-08-13 18:45:17 -05:00
}
}
fn PoisonOnFail<'r>(failed: &'r mut bool) -> PoisonOnFail {
2012-09-04 19:22:09 -05:00
PoisonOnFail {
failed: ptr::to_mut_unsafe_ptr(failed)
2012-09-04 19:22:09 -05:00
}
}
/****************************************************************************
* R/W lock protected ARC
****************************************************************************/
#[doc(hidden)]
struct RWARCInner<T> { lock: RWlock, failed: bool, data: T }
/**
* A dual-mode ARC protected by a reader-writer lock. The data can be accessed
* mutably or immutably, and immutably-accessing tasks may run concurrently.
*
* Unlike mutex_arcs, rw_arcs are safe, because they cannot be nested.
*/
2013-05-07 12:00:00 -05:00
#[mutable]
struct RWARC<T> {
x: UnsafeAtomicRcBox<RWARCInner<T>>,
2013-03-24 11:41:19 -05:00
cant_nest: ()
}
/// Create a reader/writer ARC with the supplied data.
pub fn RWARC<T:Const + Owned>(user_data: T) -> RWARC<T> {
2013-02-15 01:30:30 -06:00
rw_arc_with_condvars(user_data, 1)
}
/**
* Create a reader/writer ARC with the supplied data and a specified number
* of condvars (as sync::rwlock_with_condvars).
*/
pub fn rw_arc_with_condvars<T:Const + Owned>(
user_data: T,
num_condvars: uint) -> RWARC<T>
{
let data =
2012-08-26 20:28:36 -05:00
RWARCInner { lock: rwlock_with_condvars(num_condvars),
2013-02-15 01:30:30 -06:00
failed: false, data: user_data };
RWARC { x: UnsafeAtomicRcBox::new(data), cant_nest: () }
}
pub impl<T:Const + Owned> RWARC<T> {
/// Duplicate a rwlock-protected ARC, as arc::clone.
2012-11-26 18:12:47 -06:00
fn clone(&self) -> RWARC<T> {
RWARC { x: self.x.clone(),
2012-08-26 20:28:36 -05:00
cant_nest: () }
}
2012-11-26 18:12:47 -06:00
}
pub impl<T:Const + Owned> RWARC<T> {
/**
* Access the underlying data mutably. Locks the rwlock in write mode;
* other readers and writers will block.
*
* # Failure
*
* Failing while inside the ARC will unlock the ARC while unwinding, so
2012-08-26 20:28:36 -05:00
* that other tasks won't block forever. As MutexARC.access, it will also
* poison the ARC, so subsequent readers and writers will both also fail.
*/
#[inline(always)]
fn write<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
}
}
/// As write(), but with a condvar, as sync::rwlock.write_cond().
#[inline(always)]
fn write_cond<'x, 'c, U>(&self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_cond |cond| {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: false,
failed: &mut (*state).failed,
cond: cond})
}
}
}
/**
* Access the underlying data immutably. May run concurrently with other
* reading tasks.
*
* # Failure
*
* Failing will unlock the ARC while unwinding. However, unlike all other
* access modes, this will not poison the ARC.
*/
fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
let state = self.x.get();
unsafe {
do (*state).lock.read {
check_poison(false, (*state).failed);
blk(&(*state).data)
}
}
}
/**
* As write(), but with the ability to atomically 'downgrade' the lock.
2012-08-26 20:28:36 -05:00
* See sync::rwlock.write_downgrade(). The RWWriteMode token must be used
* to obtain the &mut T, and can be transformed into a RWReadMode token by
* calling downgrade(), after which a &T can be obtained instead.
* ~~~
* do arc.write_downgrade |write_mode| {
* do (&write_mode).write_cond |state, condvar| {
* ... exclusive access with mutable state ...
* }
* let read_mode = arc.downgrade(write_mode);
* do (&read_mode).read |state| {
* ... shared access with immutable state ...
* }
* }
* ~~~
*/
fn write_downgrade<U>(&self, blk: &fn(v: RWWriteMode<T>) -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_downgrade |write_mode| {
check_poison(false, (*state).failed);
blk(RWWriteMode {
data: &mut (*state).data,
token: write_mode,
poison: PoisonOnFail(&mut (*state).failed)
})
}
}
}
/// To be called inside of the write_downgrade block.
fn downgrade<'a>(&self, token: RWWriteMode<'a, T>) -> RWReadMode<'a, T> {
unsafe {
// The rwlock should assert that the token belongs to us for us.
let state = self.x.get();
let RWWriteMode {
data: data,
token: t,
poison: _poison
} = token;
// Let readers in
let new_token = (*state).lock.downgrade(t);
// Whatever region the input reference had, it will be safe to use
// the same region for the output reference. (The only 'unsafe' part
// of this cast is removing the mutability.)
let new_data = cast::transmute_immut(data);
// Downgrade ensured the token belonged to us. Just a sanity check.
assert!(ptr::ref_eq(&(*state).data, new_data));
// Produce new token
RWReadMode {
data: new_data,
token: new_token,
}
}
}
}
// Borrowck rightly complains about immutably aliasing the rwlock in order to
// lock it. This wraps the unsafety, with the justification that the 'lock'
// field is never overwritten; only 'failed' and 'data'.
#[doc(hidden)]
fn borrow_rwlock<T:Const + Owned>(state: *const RWARCInner<T>) -> *RWlock {
unsafe { cast::transmute(&const (*state).lock) }
}
2012-08-26 20:28:36 -05:00
/// The "write permission" token used for RWARC.write_downgrade().
pub struct RWWriteMode<'self, T> {
data: &'self mut T,
token: sync::RWlockWriteMode<'self>,
poison: PoisonOnFail,
}
2012-08-26 20:28:36 -05:00
/// The "read permission" token used for RWARC.write_downgrade().
pub struct RWReadMode<'self, T> {
data: &'self T,
token: sync::RWlockReadMode<'self>,
}
pub impl<'self, T:Const + Owned> RWWriteMode<'self, T> {
2012-08-26 20:28:36 -05:00
/// Access the pre-downgrade RWARC in write mode.
2013-03-15 14:24:24 -05:00
fn write<U>(&mut self, blk: &fn(x: &mut T) -> U) -> U {
match *self {
RWWriteMode {
2013-03-15 14:24:24 -05:00
data: &ref mut data,
token: ref token,
poison: _
} => {
do token.write {
2013-03-15 14:24:24 -05:00
blk(data)
}
}
}
}
2012-08-26 20:28:36 -05:00
/// Access the pre-downgrade RWARC in write mode with a condvar.
2013-03-15 14:24:24 -05:00
fn write_cond<'x, 'c, U>(&mut self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
match *self {
RWWriteMode {
2013-03-15 14:24:24 -05:00
data: &ref mut data,
token: ref token,
poison: ref poison
} => {
do token.write_cond |cond| {
unsafe {
let cvar = Condvar {
is_mutex: false,
failed: &mut *poison.failed,
cond: cond
};
2013-03-15 14:24:24 -05:00
blk(data, &cvar)
}
}
}
}
}
}
pub impl<'self, T:Const + Owned> RWReadMode<'self, T> {
/// Access the post-downgrade rwlock in read mode.
fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
match *self {
RWReadMode {
data: data,
token: ref token
} => {
do token.read { blk(data) }
}
}
}
}
/****************************************************************************
* Tests
****************************************************************************/
#[cfg(test)]
mod tests {
use arc::*;
2012-12-28 14:46:08 -06:00
use arc;
use core::cell::Cell;
use core::task;
#[test]
fn manually_share_arc() {
let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2012-08-29 16:45:25 -05:00
let arc_v = arc::ARC(v);
2013-02-02 05:10:12 -06:00
let (p, c) = comm::stream();
2013-02-15 01:30:30 -06:00
do task::spawn() || {
let p = comm::PortSet::new();
2012-08-14 16:17:27 -05:00
c.send(p.chan());
let arc_v = p.recv();
let v = copy *arc::get::<~[int]>(&arc_v);
assert_eq!(v[3], 4);
};
let c = p.recv();
c.send(arc::clone(&arc_v));
assert_eq!((*arc::get(&arc_v))[2], 3);
assert_eq!(arc_v.get()[4], 5);
2013-03-08 14:39:42 -06:00
info!(arc_v);
}
2012-08-13 18:45:17 -05:00
#[test]
fn test_mutex_arc_condvar() {
2012-08-29 16:45:25 -05:00
let arc = ~MutexARC(false);
2012-08-13 18:45:17 -05:00
let arc2 = ~arc.clone();
2013-02-02 05:10:12 -06:00
let (p,c) = comm::oneshot();
let (c,p) = (Cell(c), Cell(p));
2013-02-15 01:30:30 -06:00
do task::spawn || {
2012-08-13 18:45:17 -05:00
// wait until parent gets in
comm::recv_one(p.take());
2012-08-13 18:45:17 -05:00
do arc2.access_cond |state, cond| {
*state = true;
cond.signal();
}
}
do arc.access_cond |state, cond| {
comm::send_one(c.take(), ());
2013-03-28 20:39:09 -05:00
assert!(!*state);
2012-08-13 18:45:17 -05:00
while !*state {
cond.wait();
}
}
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_arc_condvar_poison() {
2012-08-29 16:45:25 -05:00
let arc = ~MutexARC(1);
2012-08-13 18:45:17 -05:00
let arc2 = ~arc.clone();
2013-02-02 05:10:12 -06:00
let (p, c) = comm::stream();
2012-08-13 18:45:17 -05:00
2013-02-15 01:30:30 -06:00
do task::spawn_unlinked || {
2012-08-13 18:45:17 -05:00
let _ = p.recv();
do arc2.access_cond |one, cond| {
cond.signal();
2013-03-06 21:09:17 -06:00
// Parent should fail when it wakes up.
assert_eq!(*one, 0);
2012-08-13 18:45:17 -05:00
}
}
do arc.access_cond |one, cond| {
c.send(());
while *one == 1 {
cond.wait();
}
}
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_mutex_arc_poison() {
2012-08-29 16:45:25 -05:00
let arc = ~MutexARC(1);
let arc2 = ~arc.clone();
2013-02-15 01:30:30 -06:00
do task::try || {
do arc2.access |one| {
assert_eq!(*one, 2);
}
};
do arc.access |one| {
assert_eq!(*one, 1);
}
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_rw_arc_poison_wr() {
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(1);
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-15 01:30:30 -06:00
do task::try || {
do arc2.write |one| {
assert_eq!(*one, 2);
}
};
do arc.read |one| {
assert_eq!(*one, 1);
}
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_rw_arc_poison_ww() {
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(1);
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-15 01:30:30 -06:00
do task::try || {
do arc2.write |one| {
assert_eq!(*one, 2);
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_rw_arc_poison_dw() {
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(1);
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-15 01:30:30 -06:00
do task::try || {
do arc2.write_downgrade |mut write_mode| {
do write_mode.write |one| {
assert_eq!(*one, 2);
}
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test] #[ignore(cfg(windows))]
fn test_rw_arc_no_poison_rr() {
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(1);
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-15 01:30:30 -06:00
do task::try || {
do arc2.read |one| {
assert_eq!(*one, 2);
}
};
do arc.read |one| {
assert_eq!(*one, 1);
}
}
#[test] #[ignore(cfg(windows))]
fn test_rw_arc_no_poison_rw() {
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(1);
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-15 01:30:30 -06:00
do task::try || {
do arc2.read |one| {
assert_eq!(*one, 2);
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test] #[ignore(cfg(windows))]
fn test_rw_arc_no_poison_dr() {
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(1);
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-15 01:30:30 -06:00
do task::try || {
do arc2.write_downgrade |write_mode| {
2013-02-15 01:30:30 -06:00
let read_mode = arc2.downgrade(write_mode);
do (&read_mode).read |one| {
assert_eq!(*one, 2);
}
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test]
fn test_rw_arc() {
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(0);
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-02 05:10:12 -06:00
let (p,c) = comm::stream();
2013-02-15 01:30:30 -06:00
do task::spawn || {
do arc2.write |num| {
for 10.times {
let tmp = *num;
*num = -1;
task::yield();
*num = tmp + 1;
}
c.send(());
}
}
// Readers try to catch the writer in the act
let mut children = ~[];
for 5.times {
2013-03-15 17:26:59 -05:00
let arc3 = (*arc).clone();
2013-05-07 19:57:58 -05:00
let mut builder = task::task();
builder.future_result(|r| children.push(r));
do builder.spawn {
do arc3.read |num| {
2013-03-28 20:39:09 -05:00
assert!(*num >= 0);
}
}
}
// Wait for children to pass their asserts
for children.each |r| {
2013-05-07 19:57:58 -05:00
r.recv();
}
// Wait for writer to finish
p.recv();
2013-05-07 19:57:58 -05:00
do arc.read |num| {
assert_eq!(*num, 10);
2013-05-07 19:57:58 -05:00
}
}
#[test]
fn test_rw_downgrade() {
// (1) A downgrader gets in write mode and does cond.wait.
// (2) A writer gets in write mode, sets state to 42, and does signal.
// (3) Downgrader wakes, sets state to 31337.
// (4) tells writer and all other readers to contend as it downgrades.
// (5) Writer attempts to set state back to 42, while downgraded task
// and all reader tasks assert that it's 31337.
2012-08-29 16:45:25 -05:00
let arc = ~RWARC(0);
// Reader tasks
let mut reader_convos = ~[];
for 10.times {
2013-02-02 05:10:12 -06:00
let ((rp1,rc1),(rp2,rc2)) = (comm::stream(),comm::stream());
2013-02-15 01:30:30 -06:00
reader_convos.push((rc1, rp2));
2013-03-15 17:26:59 -05:00
let arcn = (*arc).clone();
2013-02-15 01:30:30 -06:00
do task::spawn || {
rp1.recv(); // wait for downgrader to give go-ahead
do arcn.read |state| {
assert_eq!(*state, 31337);
rc2.send(());
}
}
}
// Writer task
2013-03-15 17:26:59 -05:00
let arc2 = (*arc).clone();
2013-02-02 05:10:12 -06:00
let ((wp1,wc1),(wp2,wc2)) = (comm::stream(),comm::stream());
2013-02-15 01:30:30 -06:00
do task::spawn || {
wp1.recv();
do arc2.write_cond |state, cond| {
assert_eq!(*state, 0);
*state = 42;
cond.signal();
}
wp1.recv();
do arc2.write |state| {
// This shouldn't happen until after the downgrade read
// section, and all other readers, finish.
assert_eq!(*state, 31337);
*state = 42;
}
wc2.send(());
}
// Downgrader (us)
do arc.write_downgrade |mut write_mode| {
do write_mode.write_cond |state, cond| {
wc1.send(()); // send to another writer who will wake us up
while *state == 0 {
cond.wait();
}
assert_eq!(*state, 42);
*state = 31337;
// send to other readers
for reader_convos.each |x| {
match *x {
2012-09-28 04:26:20 -05:00
(ref rc, _) => rc.send(()),
}
}
}
2013-02-15 01:30:30 -06:00
let read_mode = arc.downgrade(write_mode);
do (&read_mode).read |state| {
// complete handshake with other readers
for reader_convos.each |x| {
match *x {
2012-09-28 04:26:20 -05:00
(_, ref rp) => rp.recv(),
}
}
wc1.send(()); // tell writer to try again
assert_eq!(*state, 31337);
}
}
wp2.recv(); // complete handshake with writer
}
}