2013-03-01 21:57:05 -06:00
|
|
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 18:48:01 -06:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2012-08-07 17:03:30 -05:00
|
|
|
/**
|
|
|
|
* The concurrency primitives you know and love.
|
|
|
|
*
|
|
|
|
* Maybe once we have a "core exports x only to std" mechanism, these can be
|
|
|
|
* in std.
|
|
|
|
*/
|
|
|
|
|
2013-05-17 17:28:44 -05:00
|
|
|
use core::prelude::*;
|
|
|
|
|
2013-06-02 18:16:40 -05:00
|
|
|
use core::borrow;
|
2013-05-24 21:35:29 -05:00
|
|
|
use core::comm;
|
2012-12-23 16:41:37 -06:00
|
|
|
use core::task;
|
2013-06-10 21:13:56 -05:00
|
|
|
use core::unstable::sync::{Exclusive, exclusive, UnsafeAtomicRcBox};
|
|
|
|
use core::unstable::atomics;
|
2012-12-23 16:41:37 -06:00
|
|
|
use core::util;
|
2012-08-07 17:03:30 -05:00
|
|
|
|
2012-08-07 19:09:46 -05:00
|
|
|
/****************************************************************************
|
|
|
|
* Internals
|
|
|
|
****************************************************************************/
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
// Each waiting task receives on one of these.
|
|
|
|
#[doc(hidden)]
|
2013-02-02 05:10:12 -06:00
|
|
|
type WaitEnd = comm::PortOne<()>;
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-02-02 05:10:12 -06:00
|
|
|
type SignalEnd = comm::ChanOne<()>;
|
2012-08-07 17:03:30 -05:00
|
|
|
// A doubly-ended queue of waiting tasks.
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-02-02 05:10:12 -06:00
|
|
|
struct Waitqueue { head: comm::Port<SignalEnd>,
|
|
|
|
tail: comm::Chan<SignalEnd> }
|
2012-08-07 17:03:30 -05:00
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-26 20:14:39 -05:00
|
|
|
fn new_waitqueue() -> Waitqueue {
|
2013-02-02 05:10:12 -06:00
|
|
|
let (block_head, block_tail) = comm::stream();
|
2013-02-15 01:30:30 -06:00
|
|
|
Waitqueue { head: block_head, tail: block_tail }
|
2012-08-15 13:11:39 -05:00
|
|
|
}
|
|
|
|
|
2012-08-08 21:49:22 -05:00
|
|
|
// Signals one live task from the queue.
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-26 20:14:39 -05:00
|
|
|
fn signal_waitqueue(q: &Waitqueue) -> bool {
|
2012-08-08 21:49:22 -05:00
|
|
|
// The peek is mandatory to make sure recv doesn't block.
|
|
|
|
if q.head.peek() {
|
|
|
|
// Pop and send a wakeup signal. If the waiter was killed, its port
|
|
|
|
// will have closed. Keep trying until we get a live task.
|
2013-02-02 05:10:12 -06:00
|
|
|
if comm::try_send_one(q.head.recv(), ()) {
|
2012-08-08 21:49:22 -05:00
|
|
|
true
|
|
|
|
} else {
|
|
|
|
signal_waitqueue(q)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-26 20:14:39 -05:00
|
|
|
fn broadcast_waitqueue(q: &Waitqueue) -> uint {
|
2012-08-08 21:49:22 -05:00
|
|
|
let mut count = 0;
|
|
|
|
while q.head.peek() {
|
2013-02-02 05:10:12 -06:00
|
|
|
if comm::try_send_one(q.head.recv(), ()) {
|
2012-08-08 21:49:22 -05:00
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
count
|
|
|
|
}
|
|
|
|
|
|
|
|
// The building-block used to make semaphores, mutexes, and rwlocks.
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-26 20:14:39 -05:00
|
|
|
struct SemInner<Q> {
|
2013-03-24 11:41:19 -05:00
|
|
|
count: int,
|
2012-09-07 16:50:47 -05:00
|
|
|
waiters: Waitqueue,
|
2012-08-07 19:09:46 -05:00
|
|
|
// Can be either unit or another waitqueue. Some sems shouldn't come with
|
|
|
|
// a condition variable attached, others should.
|
2012-09-07 16:50:47 -05:00
|
|
|
blocked: Q
|
2012-08-09 21:31:05 -05:00
|
|
|
}
|
2013-03-07 19:23:14 -06:00
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-07 19:23:14 -06:00
|
|
|
struct Sem<Q>(Exclusive<SemInner<Q>>);
|
2012-08-07 17:03:30 -05:00
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-06-05 19:56:24 -05:00
|
|
|
fn new_sem<Q:Send>(count: int, q: Q) -> Sem<Q> {
|
2012-08-26 20:14:39 -05:00
|
|
|
Sem(exclusive(SemInner {
|
2013-03-01 21:57:05 -06:00
|
|
|
count: count, waiters: new_waitqueue(), blocked: q }))
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-15 13:11:39 -05:00
|
|
|
fn new_sem_and_signal(count: int, num_condvars: uint)
|
2013-01-29 21:19:41 -06:00
|
|
|
-> Sem<~[Waitqueue]> {
|
2012-09-21 20:43:30 -05:00
|
|
|
let mut queues = ~[];
|
2012-08-14 22:21:39 -05:00
|
|
|
for num_condvars.times {
|
2012-09-26 19:33:34 -05:00
|
|
|
queues.push(new_waitqueue());
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
2013-01-29 22:23:35 -06:00
|
|
|
new_sem(count, queues)
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-06-05 19:56:24 -05:00
|
|
|
impl<Q:Send> Sem<Q> {
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn acquire(&self) {
|
2013-05-23 21:12:16 -05:00
|
|
|
unsafe {
|
|
|
|
let mut waiter_nobe = None;
|
|
|
|
do (**self).with |state| {
|
|
|
|
state.count -= 1;
|
|
|
|
if state.count < 0 {
|
|
|
|
// Create waiter nobe.
|
|
|
|
let (WaitEnd, SignalEnd) = comm::oneshot();
|
|
|
|
// Tell outer scope we need to block.
|
|
|
|
waiter_nobe = Some(WaitEnd);
|
|
|
|
// Enqueue ourself.
|
|
|
|
state.waiters.tail.send(SignalEnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Uncomment if you wish to test for sem races. Not valgrind-friendly.
|
|
|
|
/* for 1000.times { task::yield(); } */
|
|
|
|
// Need to wait outside the exclusive.
|
|
|
|
if waiter_nobe.is_some() {
|
|
|
|
let _ = comm::recv_one(waiter_nobe.unwrap());
|
2012-08-07 17:03:30 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-05-31 17:17:22 -05:00
|
|
|
|
|
|
|
pub fn release(&self) {
|
2013-05-23 21:12:16 -05:00
|
|
|
unsafe {
|
|
|
|
do (**self).with |state| {
|
|
|
|
state.count += 1;
|
|
|
|
if state.count <= 0 {
|
|
|
|
signal_waitqueue(&state.waiters);
|
|
|
|
}
|
2012-08-07 17:03:30 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
2012-08-26 20:14:39 -05:00
|
|
|
// FIXME(#3154) move both copies of this into Sem<Q>, and unify the 2 structs
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Sem<()> {
|
|
|
|
pub fn access<U>(&self, blk: &fn() -> U) -> U {
|
2012-08-20 14:23:37 -05:00
|
|
|
let mut release = None;
|
2012-08-08 21:49:22 -05:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
|
|
|
self.acquire();
|
2013-03-12 19:33:54 -05:00
|
|
|
release = Some(SemRelease(self));
|
2012-08-08 21:49:22 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-07 17:03:30 -05:00
|
|
|
blk()
|
|
|
|
}
|
|
|
|
}
|
2013-05-31 17:17:22 -05:00
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Sem<~[Waitqueue]> {
|
|
|
|
pub fn access<U>(&self, blk: &fn() -> U) -> U {
|
2012-08-20 14:23:37 -05:00
|
|
|
let mut release = None;
|
2012-08-08 21:49:22 -05:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
|
|
|
self.acquire();
|
2013-03-12 19:33:54 -05:00
|
|
|
release = Some(SemAndSignalRelease(self));
|
2012-08-08 21:49:22 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
blk()
|
|
|
|
}
|
|
|
|
}
|
2012-08-07 17:03:30 -05:00
|
|
|
|
2012-10-11 17:32:09 -05:00
|
|
|
// FIXME(#3588) should go inside of access()
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-25 15:21:04 -05:00
|
|
|
type SemRelease<'self> = SemReleaseGeneric<'self, ()>;
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-25 15:21:04 -05:00
|
|
|
type SemAndSignalRelease<'self> = SemReleaseGeneric<'self, ~[Waitqueue]>;
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-25 15:21:04 -05:00
|
|
|
struct SemReleaseGeneric<'self, Q> { sem: &'self Sem<Q> }
|
2012-11-13 20:38:18 -06:00
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-06-05 19:56:24 -05:00
|
|
|
impl<'self, Q:Send> Drop for SemReleaseGeneric<'self, Q> {
|
2013-06-20 20:06:13 -05:00
|
|
|
fn drop(&self) {
|
2013-04-09 00:31:42 -05:00
|
|
|
self.sem.release();
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
2012-08-07 17:03:30 -05:00
|
|
|
}
|
2012-09-04 19:22:09 -05:00
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-25 15:21:04 -05:00
|
|
|
fn SemRelease<'r>(sem: &'r Sem<()>) -> SemRelease<'r> {
|
2012-12-06 21:27:44 -06:00
|
|
|
SemReleaseGeneric {
|
2012-09-04 19:22:09 -05:00
|
|
|
sem: sem
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-25 15:21:04 -05:00
|
|
|
fn SemAndSignalRelease<'r>(sem: &'r Sem<~[Waitqueue]>)
|
|
|
|
-> SemAndSignalRelease<'r> {
|
2012-12-06 21:27:44 -06:00
|
|
|
SemReleaseGeneric {
|
2012-09-04 19:22:09 -05:00
|
|
|
sem: sem
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-12 19:50:16 -05:00
|
|
|
// FIXME(#3598): Want to use an Option down below, but we need a custom enum
|
|
|
|
// that's not polymorphic to get around the fact that lifetimes are invariant
|
|
|
|
// inside of type parameters.
|
|
|
|
enum ReacquireOrderLock<'self> {
|
|
|
|
Nothing, // c.c
|
|
|
|
Just(&'self Semaphore),
|
|
|
|
}
|
|
|
|
|
2012-08-07 19:09:46 -05:00
|
|
|
/// A mechanism for atomic-unlock-and-deschedule blocking and signalling.
|
2013-06-12 19:50:16 -05:00
|
|
|
pub struct Condvar<'self> {
|
|
|
|
// The 'Sem' object associated with this condvar. This is the one that's
|
|
|
|
// atomically-unlocked-and-descheduled upon and reacquired during wakeup.
|
|
|
|
priv sem: &'self Sem<~[Waitqueue]>,
|
|
|
|
// This is (can be) an extra semaphore which is held around the reacquire
|
|
|
|
// operation on the first one. This is only used in cvars associated with
|
|
|
|
// rwlocks, and is needed to ensure that, when a downgrader is trying to
|
|
|
|
// hand off the access lock (which would be the first field, here), a 2nd
|
|
|
|
// writer waking up from a cvar wait can't race with a reader to steal it,
|
|
|
|
// See the comment in write_cond for more detail.
|
|
|
|
priv order: ReacquireOrderLock<'self>,
|
|
|
|
}
|
2012-11-13 20:38:18 -06:00
|
|
|
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-06-20 20:06:13 -05:00
|
|
|
impl<'self> Drop for Condvar<'self> { fn drop(&self) {} }
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl<'self> Condvar<'self> {
|
2012-08-15 12:55:20 -05:00
|
|
|
/**
|
|
|
|
* Atomically drop the associated lock, and block until a signal is sent.
|
|
|
|
*
|
|
|
|
* # Failure
|
|
|
|
* A task which is killed (i.e., by linked failure with another task)
|
|
|
|
* while waiting on a condition variable will wake up, fail, and unlock
|
|
|
|
* the associated lock as it unwinds.
|
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn wait(&self) { self.wait_on(0) }
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-14 22:21:39 -05:00
|
|
|
/**
|
|
|
|
* As wait(), but can specify which of multiple condition variables to
|
|
|
|
* wait on. Only a signal_on() or broadcast_on() with the same condvar_id
|
|
|
|
* will wake this thread.
|
|
|
|
*
|
|
|
|
* The associated lock must have been initialised with an appropriate
|
|
|
|
* number of condvars. The condvar_id must be between 0 and num_condvars-1
|
|
|
|
* or else this call will fail.
|
|
|
|
*
|
|
|
|
* wait() is equivalent to wait_on(0).
|
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn wait_on(&self, condvar_id: uint) {
|
2012-08-08 21:49:22 -05:00
|
|
|
// Create waiter nobe.
|
2013-02-02 05:10:12 -06:00
|
|
|
let (WaitEnd, SignalEnd) = comm::oneshot();
|
2013-02-15 01:30:30 -06:00
|
|
|
let mut WaitEnd = Some(WaitEnd);
|
|
|
|
let mut SignalEnd = Some(SignalEnd);
|
2012-08-20 14:23:37 -05:00
|
|
|
let mut reacquire = None;
|
|
|
|
let mut out_of_bounds = None;
|
2012-08-07 19:09:46 -05:00
|
|
|
unsafe {
|
2012-08-08 21:49:22 -05:00
|
|
|
do task::unkillable {
|
2012-08-14 22:21:39 -05:00
|
|
|
// Release lock, 'atomically' enqueuing ourselves in so doing.
|
|
|
|
do (**self.sem).with |state| {
|
2013-05-14 04:52:12 -05:00
|
|
|
if condvar_id < state.blocked.len() {
|
2012-08-14 22:21:39 -05:00
|
|
|
// Drop the lock.
|
|
|
|
state.count += 1;
|
|
|
|
if state.count <= 0 {
|
|
|
|
signal_waitqueue(&state.waiters);
|
|
|
|
}
|
|
|
|
// Enqueue ourself to be woken up by a signaller.
|
2013-03-16 14:49:12 -05:00
|
|
|
let SignalEnd = SignalEnd.swap_unwrap();
|
2013-02-15 01:30:30 -06:00
|
|
|
state.blocked[condvar_id].tail.send(SignalEnd);
|
2012-08-14 22:21:39 -05:00
|
|
|
} else {
|
2013-05-14 04:52:12 -05:00
|
|
|
out_of_bounds = Some(state.blocked.len());
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-08 21:49:22 -05:00
|
|
|
// If yield checks start getting inserted anywhere, we can be
|
|
|
|
// killed before or after enqueueing. Deciding whether to
|
|
|
|
// unkillably reacquire the lock needs to happen atomically
|
|
|
|
// wrt enqueuing.
|
2012-08-14 22:21:39 -05:00
|
|
|
if out_of_bounds.is_none() {
|
2013-06-12 19:50:16 -05:00
|
|
|
reacquire = Some(CondvarReacquire { sem: self.sem,
|
|
|
|
order: self.order });
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-14 22:21:39 -05:00
|
|
|
do check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()") {
|
|
|
|
// Unconditionally "block". (Might not actually block if a
|
|
|
|
// signaller already sent -- I mean 'unconditionally' in contrast
|
|
|
|
// with acquire().)
|
2013-03-16 14:49:12 -05:00
|
|
|
let _ = comm::recv_one(WaitEnd.swap_unwrap());
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
|
|
|
|
// This is needed for a failing condition variable to reacquire the
|
|
|
|
// mutex during unwinding. As long as the wrapper (mutex, etc) is
|
|
|
|
// bounded in when it gets released, this shouldn't hang forever.
|
2013-06-12 19:50:16 -05:00
|
|
|
struct CondvarReacquire<'self> {
|
2013-03-14 13:22:51 -05:00
|
|
|
sem: &'self Sem<~[Waitqueue]>,
|
2013-06-12 19:50:16 -05:00
|
|
|
order: ReacquireOrderLock<'self>,
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
|
|
|
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-06-12 19:50:16 -05:00
|
|
|
impl<'self> Drop for CondvarReacquire<'self> {
|
2013-06-20 20:06:13 -05:00
|
|
|
fn drop(&self) {
|
2012-11-13 20:38:18 -06:00
|
|
|
unsafe {
|
|
|
|
// Needs to succeed, instead of itself dying.
|
|
|
|
do task::unkillable {
|
2013-06-12 19:50:16 -05:00
|
|
|
match self.order {
|
|
|
|
Just(lock) => do lock.access {
|
|
|
|
self.sem.acquire();
|
|
|
|
},
|
|
|
|
Nothing => {
|
|
|
|
self.sem.acquire();
|
|
|
|
},
|
|
|
|
}
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Wake up a blocked task. Returns false if there was no blocked task.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn signal(&self) -> bool { self.signal_on(0) }
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-14 22:21:39 -05:00
|
|
|
/// As signal, but with a specified condvar_id. See wait_on.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn signal_on(&self, condvar_id: uint) -> bool {
|
2013-05-23 21:12:16 -05:00
|
|
|
unsafe {
|
|
|
|
let mut out_of_bounds = None;
|
|
|
|
let mut result = false;
|
|
|
|
do (**self.sem).with |state| {
|
|
|
|
if condvar_id < state.blocked.len() {
|
|
|
|
result = signal_waitqueue(&state.blocked[condvar_id]);
|
|
|
|
} else {
|
|
|
|
out_of_bounds = Some(state.blocked.len());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") {
|
|
|
|
result
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Wake up all blocked tasks. Returns the number of tasks woken.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-14 22:21:39 -05:00
|
|
|
/// As broadcast, but with a specified condvar_id. See wait_on.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn broadcast_on(&self, condvar_id: uint) -> uint {
|
2012-08-20 14:23:37 -05:00
|
|
|
let mut out_of_bounds = None;
|
|
|
|
let mut queue = None;
|
2013-05-23 21:12:16 -05:00
|
|
|
unsafe {
|
|
|
|
do (**self.sem).with |state| {
|
|
|
|
if condvar_id < state.blocked.len() {
|
|
|
|
// To avoid :broadcast_heavy, we make a new waitqueue,
|
|
|
|
// swap it out with the old one, and broadcast on the
|
|
|
|
// old one outside of the little-lock.
|
|
|
|
queue = Some(util::replace(&mut state.blocked[condvar_id],
|
|
|
|
new_waitqueue()));
|
|
|
|
} else {
|
|
|
|
out_of_bounds = Some(state.blocked.len());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") {
|
|
|
|
let queue = queue.swap_unwrap();
|
|
|
|
broadcast_waitqueue(&queue)
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks whether a condvar ID was out of bounds, and fails if so, or does
|
|
|
|
// something else next on success.
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-20 14:23:37 -05:00
|
|
|
fn check_cvar_bounds<U>(out_of_bounds: Option<uint>, id: uint, act: &str,
|
2013-03-07 16:38:38 -06:00
|
|
|
blk: &fn() -> U) -> U {
|
2012-08-14 22:21:39 -05:00
|
|
|
match out_of_bounds {
|
2012-08-20 14:23:37 -05:00
|
|
|
Some(0) =>
|
2013-05-09 06:52:07 -05:00
|
|
|
fail!("%s with illegal ID %u - this lock has no condvars!", act, id),
|
2012-08-20 14:23:37 -05:00
|
|
|
Some(length) =>
|
2013-05-09 06:52:07 -05:00
|
|
|
fail!("%s with illegal ID %u - ID must be less than %u", act, id, length),
|
2012-08-20 14:23:37 -05:00
|
|
|
None => blk()
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Sem<~[Waitqueue]> {
|
2013-06-12 19:50:16 -05:00
|
|
|
// The only other places that condvars get built are rwlock.write_cond()
|
|
|
|
// and rwlock_write_mode.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn access_cond<U>(&self, blk: &fn(c: &Condvar) -> U) -> U {
|
2013-06-12 19:50:16 -05:00
|
|
|
do self.access {
|
|
|
|
blk(&Condvar { sem: self, order: Nothing })
|
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Semaphores
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/// A counting, blocking, bounded-waiting semaphore.
|
2012-09-07 16:50:47 -05:00
|
|
|
struct Semaphore { priv sem: Sem<()> }
|
2012-08-07 19:09:46 -05:00
|
|
|
|
|
|
|
/// Create a new semaphore with the specified count.
|
2013-01-11 05:51:16 -06:00
|
|
|
pub fn semaphore(count: int) -> Semaphore {
|
2012-08-26 20:14:39 -05:00
|
|
|
Semaphore { sem: new_sem(count, ()) }
|
2012-08-09 21:31:05 -05:00
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2013-02-14 13:47:00 -06:00
|
|
|
impl Clone for Semaphore {
|
2012-08-07 19:09:46 -05:00
|
|
|
/// Create a new handle to the semaphore.
|
2012-11-26 18:12:47 -06:00
|
|
|
fn clone(&self) -> Semaphore {
|
|
|
|
Semaphore { sem: Sem((*self.sem).clone()) }
|
|
|
|
}
|
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Semaphore {
|
2012-08-07 19:09:46 -05:00
|
|
|
/**
|
|
|
|
* Acquire a resource represented by the semaphore. Blocks if necessary
|
|
|
|
* until resource(s) become available.
|
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn acquire(&self) { (&self.sem).acquire() }
|
2012-08-07 19:09:46 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Release a held resource represented by the semaphore. Wakes a blocked
|
2012-08-08 21:49:22 -05:00
|
|
|
* contending task, if any exist. Won't block the caller.
|
2012-08-07 19:09:46 -05:00
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn release(&self) { (&self.sem).release() }
|
2012-08-07 19:09:46 -05:00
|
|
|
|
|
|
|
/// Run a function with ownership of one of the semaphore's resources.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn access<U>(&self, blk: &fn() -> U) -> U { (&self.sem).access(blk) }
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Mutexes
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A blocking, bounded-waiting, mutual exclusion lock with an associated
|
|
|
|
* FIFO condition variable.
|
2012-08-15 12:55:20 -05:00
|
|
|
*
|
|
|
|
* # Failure
|
|
|
|
* A task which fails while holding a mutex will unlock the mutex as it
|
|
|
|
* unwinds.
|
2012-08-07 19:09:46 -05:00
|
|
|
*/
|
2013-02-25 13:11:21 -06:00
|
|
|
pub struct Mutex { priv sem: Sem<~[Waitqueue]> }
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2012-08-14 22:21:39 -05:00
|
|
|
/// Create a new mutex, with one associated condvar.
|
2012-09-28 18:05:33 -05:00
|
|
|
pub fn Mutex() -> Mutex { mutex_with_condvars(1) }
|
2012-08-14 22:21:39 -05:00
|
|
|
/**
|
|
|
|
* Create a new mutex, with a specified number of associated condvars. This
|
|
|
|
* will allow calling wait_on/signal_on/broadcast_on with condvar IDs between
|
|
|
|
* 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be allowed but
|
|
|
|
* any operations on the condvar will fail.)
|
|
|
|
*/
|
2012-09-28 18:05:33 -05:00
|
|
|
pub fn mutex_with_condvars(num_condvars: uint) -> Mutex {
|
2012-08-26 20:14:39 -05:00
|
|
|
Mutex { sem: new_sem_and_signal(1, num_condvars) }
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2013-02-14 13:47:00 -06:00
|
|
|
impl Clone for Mutex {
|
2012-08-07 19:09:46 -05:00
|
|
|
/// Create a new handle to the mutex.
|
2012-11-26 18:12:47 -06:00
|
|
|
fn clone(&self) -> Mutex { Mutex { sem: Sem((*self.sem).clone()) } }
|
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Mutex {
|
2012-08-07 19:09:46 -05:00
|
|
|
/// Run a function with ownership of the mutex.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn lock<U>(&self, blk: &fn() -> U) -> U { (&self.sem).access(blk) }
|
2012-08-07 19:09:46 -05:00
|
|
|
|
|
|
|
/// Run a function with ownership of the mutex and a handle to a condvar.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn lock_cond<U>(&self, blk: &fn(c: &Condvar) -> U) -> U {
|
2012-08-09 21:31:05 -05:00
|
|
|
(&self.sem).access_cond(blk)
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Reader-writer locks
|
|
|
|
****************************************************************************/
|
|
|
|
|
2012-08-09 19:22:43 -05:00
|
|
|
// NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2012-08-26 20:14:39 -05:00
|
|
|
struct RWlockInner {
|
2013-06-10 21:13:56 -05:00
|
|
|
// You might ask, "Why don't you need to use an atomic for the mode flag?"
|
|
|
|
// This flag affects the behaviour of readers (for plain readers, they
|
|
|
|
// assert on it; for downgraders, they use it to decide which mode to
|
|
|
|
// unlock for). Consider that the flag is only unset when the very last
|
|
|
|
// reader exits; therefore, it can never be unset during a reader/reader
|
|
|
|
// (or reader/downgrader) race.
|
|
|
|
// By the way, if we didn't care about the assert in the read unlock path,
|
|
|
|
// we could instead store the mode flag in write_downgrade's stack frame,
|
|
|
|
// and have the downgrade tokens store a borrowed pointer to it.
|
2012-09-07 16:50:47 -05:00
|
|
|
read_mode: bool,
|
2013-06-10 21:13:56 -05:00
|
|
|
// The only way the count flag is ever accessed is with xadd. Since it is
|
|
|
|
// a read-modify-write operation, multiple xadds on different cores will
|
|
|
|
// always be consistent with respect to each other, so a monotonic/relaxed
|
|
|
|
// consistency ordering suffices (i.e., no extra barriers are needed).
|
|
|
|
// FIXME(#6598): The atomics module has no relaxed ordering flag, so I use
|
|
|
|
// acquire/release orderings superfluously. Change these someday.
|
|
|
|
read_count: atomics::AtomicUint,
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
|
2012-08-15 12:55:20 -05:00
|
|
|
/**
|
|
|
|
* A blocking, no-starvation, reader-writer lock with an associated condvar.
|
|
|
|
*
|
|
|
|
* # Failure
|
|
|
|
* A task which fails while holding an rwlock will unlock the rwlock as it
|
|
|
|
* unwinds.
|
|
|
|
*/
|
2013-02-25 13:11:21 -06:00
|
|
|
pub struct RWlock {
|
2013-01-11 02:27:01 -06:00
|
|
|
priv order_lock: Semaphore,
|
2013-01-29 21:19:41 -06:00
|
|
|
priv access_lock: Sem<~[Waitqueue]>,
|
2013-06-10 21:13:56 -05:00
|
|
|
priv state: UnsafeAtomicRcBox<RWlockInner>,
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
|
2012-08-14 22:21:39 -05:00
|
|
|
/// Create a new rwlock, with one associated condvar.
|
2012-09-28 18:05:33 -05:00
|
|
|
pub fn RWlock() -> RWlock { rwlock_with_condvars(1) }
|
2012-08-14 22:21:39 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a new rwlock, with a specified number of associated condvars.
|
|
|
|
* Similar to mutex_with_condvars.
|
|
|
|
*/
|
2012-09-28 18:05:33 -05:00
|
|
|
pub fn rwlock_with_condvars(num_condvars: uint) -> RWlock {
|
2013-06-10 21:13:56 -05:00
|
|
|
let state = UnsafeAtomicRcBox::new(RWlockInner {
|
|
|
|
read_mode: false,
|
|
|
|
read_count: atomics::AtomicUint::new(0),
|
|
|
|
});
|
|
|
|
RWlock { order_lock: semaphore(1),
|
2012-08-14 22:21:39 -05:00
|
|
|
access_lock: new_sem_and_signal(1, num_condvars),
|
2013-06-10 21:13:56 -05:00
|
|
|
state: state, }
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl RWlock {
|
2012-08-09 21:07:37 -05:00
|
|
|
/// Create a new handle to the rwlock.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn clone(&self) -> RWlock {
|
2012-08-26 20:14:39 -05:00
|
|
|
RWlock { order_lock: (&(self.order_lock)).clone(),
|
|
|
|
access_lock: Sem((*self.access_lock).clone()),
|
2012-08-09 19:22:43 -05:00
|
|
|
state: self.state.clone() }
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Run a function with the rwlock in read mode. Calls to 'read' from other
|
|
|
|
* tasks may run concurrently with this one.
|
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn read<U>(&self, blk: &fn() -> U) -> U {
|
2012-08-20 14:23:37 -05:00
|
|
|
let mut release = None;
|
2012-08-09 21:07:37 -05:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
|
|
|
do (&self.order_lock).access {
|
2013-06-10 21:13:56 -05:00
|
|
|
let state = &mut *self.state.get();
|
|
|
|
let old_count = state.read_count.fetch_add(1, atomics::Acquire);
|
|
|
|
if old_count == 0 {
|
2012-08-09 21:07:37 -05:00
|
|
|
(&self.access_lock).acquire();
|
2013-06-10 21:13:56 -05:00
|
|
|
state.read_mode = true;
|
2012-08-09 21:07:37 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-26 20:14:39 -05:00
|
|
|
release = Some(RWlockReleaseRead(self));
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
blk()
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Run a function with the rwlock in write mode. No calls to 'read' or
|
|
|
|
* 'write' from other tasks will run concurrently with this one.
|
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn write<U>(&self, blk: &fn() -> U) -> U {
|
2012-08-09 21:07:37 -05:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
|
|
|
(&self.order_lock).acquire();
|
2012-08-09 22:15:30 -05:00
|
|
|
do (&self.access_lock).access {
|
|
|
|
(&self.order_lock).release();
|
|
|
|
task::rekillable(blk)
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* As write(), but also with a handle to a condvar. Waiting on this
|
|
|
|
* condvar will allow readers and writers alike to take the rwlock before
|
2012-08-09 22:15:30 -05:00
|
|
|
* the waiting task is signalled. (Note: a writer that waited and then
|
|
|
|
* was signalled might reacquire the lock before other waiting writers.)
|
2012-08-09 19:22:43 -05:00
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn write_cond<U>(&self, blk: &fn(c: &Condvar) -> U) -> U {
|
2013-06-12 19:50:16 -05:00
|
|
|
// It's important to thread our order lock into the condvar, so that
|
|
|
|
// when a cond.wait() wakes up, it uses it while reacquiring the
|
|
|
|
// access lock. If we permitted a waking-up writer to "cut in line",
|
|
|
|
// there could arise a subtle race when a downgrader attempts to hand
|
|
|
|
// off the reader cloud lock to a waiting reader. This race is tested
|
|
|
|
// in arc.rs (test_rw_write_cond_downgrade_read_race) and looks like:
|
|
|
|
// T1 (writer) T2 (downgrader) T3 (reader)
|
|
|
|
// [in cond.wait()]
|
|
|
|
// [locks for writing]
|
|
|
|
// [holds access_lock]
|
|
|
|
// [is signalled, perhaps by
|
|
|
|
// downgrader or a 4th thread]
|
|
|
|
// tries to lock access(!)
|
|
|
|
// lock order_lock
|
|
|
|
// xadd read_count[0->1]
|
|
|
|
// tries to lock access
|
|
|
|
// [downgrade]
|
|
|
|
// xadd read_count[1->2]
|
|
|
|
// unlock access
|
|
|
|
// Since T1 contended on the access lock before T3 did, it will steal
|
|
|
|
// the lock handoff. Adding order_lock in the condvar reacquire path
|
|
|
|
// solves this because T1 will hold order_lock while waiting on access,
|
|
|
|
// which will cause T3 to have to wait until T1 finishes its write,
|
|
|
|
// which can't happen until T2 finishes the downgrade-read entirely.
|
2013-06-13 14:20:38 -05:00
|
|
|
// The astute reader will also note that making waking writers use the
|
|
|
|
// order_lock is better for not starving readers.
|
2012-08-09 22:15:30 -05:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
|
|
|
(&self.order_lock).acquire();
|
|
|
|
do (&self.access_lock).access_cond |cond| {
|
|
|
|
(&self.order_lock).release();
|
2013-06-12 19:50:16 -05:00
|
|
|
do task::rekillable {
|
|
|
|
let opt_lock = Just(&self.order_lock);
|
|
|
|
blk(&Condvar { order: opt_lock, ..*cond })
|
|
|
|
}
|
2012-08-09 22:15:30 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
|
2012-08-13 14:22:32 -05:00
|
|
|
/**
|
|
|
|
* As write(), but with the ability to atomically 'downgrade' the lock;
|
|
|
|
* i.e., to become a reader without letting other writers get the lock in
|
|
|
|
* the meantime (such as unlocking and then re-locking as a reader would
|
|
|
|
* do). The block takes a "write mode token" argument, which can be
|
|
|
|
* transformed into a "read mode token" by calling downgrade(). Example:
|
2013-05-27 08:49:54 -05:00
|
|
|
*
|
|
|
|
* # Example
|
|
|
|
*
|
|
|
|
* ~~~ {.rust}
|
2013-06-13 14:20:38 -05:00
|
|
|
* do lock.write_downgrade |mut write_token| {
|
|
|
|
* do write_token.write_cond |condvar| {
|
2012-08-14 12:32:41 -05:00
|
|
|
* ... exclusive access ...
|
2012-08-13 14:22:32 -05:00
|
|
|
* }
|
2013-06-10 21:13:56 -05:00
|
|
|
* let read_token = lock.downgrade(write_token);
|
2013-06-13 14:20:38 -05:00
|
|
|
* do read_token.read {
|
2012-08-14 12:32:41 -05:00
|
|
|
* ... shared access ...
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
* ~~~
|
2012-08-13 14:22:32 -05:00
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn write_downgrade<U>(&self, blk: &fn(v: RWlockWriteMode) -> U) -> U {
|
2012-08-13 14:22:32 -05:00
|
|
|
// Implementation slightly different from the slicker 'write's above.
|
|
|
|
// The exit path is conditional on whether the caller downgrades.
|
2012-08-20 14:23:37 -05:00
|
|
|
let mut _release = None;
|
2012-08-13 14:22:32 -05:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
|
|
|
(&self.order_lock).acquire();
|
|
|
|
(&self.access_lock).acquire();
|
|
|
|
(&self.order_lock).release();
|
|
|
|
}
|
2012-08-26 20:14:39 -05:00
|
|
|
_release = Some(RWlockReleaseDowngrade(self));
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
2012-08-26 20:14:39 -05:00
|
|
|
blk(RWlockWriteMode { lock: self })
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
|
|
|
|
2012-08-14 12:32:41 -05:00
|
|
|
/// To be called inside of the write_downgrade block.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn downgrade<'a>(&self, token: RWlockWriteMode<'a>)
|
|
|
|
-> RWlockReadMode<'a> {
|
2013-06-02 18:16:40 -05:00
|
|
|
if !borrow::ref_eq(self, token.lock) {
|
2013-05-05 17:18:51 -05:00
|
|
|
fail!("Can't downgrade() with a different rwlock's write_mode!");
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
2013-06-10 21:13:56 -05:00
|
|
|
let state = &mut *self.state.get();
|
|
|
|
assert!(!state.read_mode);
|
|
|
|
state.read_mode = true;
|
|
|
|
// If a reader attempts to enter at this point, both the
|
|
|
|
// downgrader and reader will set the mode flag. This is fine.
|
|
|
|
let old_count = state.read_count.fetch_add(1, atomics::Release);
|
|
|
|
// If another reader was already blocking, we need to hand-off
|
|
|
|
// the "reader cloud" access lock to them.
|
|
|
|
if old_count != 0 {
|
2012-08-13 14:22:32 -05:00
|
|
|
// Guaranteed not to let another writer in, because
|
|
|
|
// another reader was holding the order_lock. Hence they
|
|
|
|
// must be the one to get the access_lock (because all
|
2013-06-12 19:50:16 -05:00
|
|
|
// access_locks are acquired with order_lock held). See
|
|
|
|
// the comment in write_cond for more justification.
|
2012-08-13 14:22:32 -05:00
|
|
|
(&self.access_lock).release();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-26 20:14:39 -05:00
|
|
|
RWlockReadMode { lock: token.lock }
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2012-10-11 17:32:09 -05:00
|
|
|
// FIXME(#3588) should go inside of read()
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-25 15:21:04 -05:00
|
|
|
struct RWlockReleaseRead<'self> {
|
2013-03-14 13:22:51 -05:00
|
|
|
lock: &'self RWlock,
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-03-22 17:52:50 -05:00
|
|
|
impl<'self> Drop for RWlockReleaseRead<'self> {
|
2013-06-20 20:06:13 -05:00
|
|
|
fn drop(&self) {
|
2012-11-13 20:38:18 -06:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
2013-06-10 21:13:56 -05:00
|
|
|
let state = &mut *self.lock.state.get();
|
|
|
|
assert!(state.read_mode);
|
|
|
|
let old_count = state.read_count.fetch_sub(1, atomics::Release);
|
|
|
|
assert!(old_count > 0);
|
|
|
|
if old_count == 1 {
|
|
|
|
state.read_mode = false;
|
|
|
|
// Note: this release used to be outside of a locked access
|
|
|
|
// to exclusive-protected state. If this code is ever
|
|
|
|
// converted back to such (instead of using atomic ops),
|
|
|
|
// this access MUST NOT go inside the exclusive access.
|
2012-11-13 20:38:18 -06:00
|
|
|
(&self.lock.access_lock).release();
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
2012-11-14 16:51:16 -06:00
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-22 17:52:50 -05:00
|
|
|
fn RWlockReleaseRead<'r>(lock: &'r RWlock) -> RWlockReleaseRead<'r> {
|
2012-09-04 19:22:09 -05:00
|
|
|
RWlockReleaseRead {
|
|
|
|
lock: lock
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-11 17:32:09 -05:00
|
|
|
// FIXME(#3588) should go inside of downgrade()
|
2012-08-15 12:55:20 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-03-25 15:21:04 -05:00
|
|
|
struct RWlockReleaseDowngrade<'self> {
|
2013-03-14 13:22:51 -05:00
|
|
|
lock: &'self RWlock,
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-03-22 17:52:50 -05:00
|
|
|
impl<'self> Drop for RWlockReleaseDowngrade<'self> {
|
2013-06-20 20:06:13 -05:00
|
|
|
fn drop(&self) {
|
2012-11-13 20:38:18 -06:00
|
|
|
unsafe {
|
|
|
|
do task::unkillable {
|
2013-06-10 21:13:56 -05:00
|
|
|
let writer_or_last_reader;
|
|
|
|
// Check if we're releasing from read mode or from write mode.
|
|
|
|
let state = &mut *self.lock.state.get();
|
|
|
|
if state.read_mode {
|
|
|
|
// Releasing from read mode.
|
|
|
|
let old_count = state.read_count.fetch_sub(1, atomics::Release);
|
|
|
|
assert!(old_count > 0);
|
|
|
|
// Check if other readers remain.
|
|
|
|
if old_count == 1 {
|
|
|
|
// Case 1: Writer downgraded & was the last reader
|
2012-11-13 20:38:18 -06:00
|
|
|
writer_or_last_reader = true;
|
2013-06-10 21:13:56 -05:00
|
|
|
state.read_mode = false;
|
|
|
|
} else {
|
|
|
|
// Case 2: Writer downgraded & was not the last reader
|
|
|
|
writer_or_last_reader = false;
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
2013-06-10 21:13:56 -05:00
|
|
|
} else {
|
|
|
|
// Case 3: Writer did not downgrade
|
|
|
|
writer_or_last_reader = true;
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
2012-11-13 20:38:18 -06:00
|
|
|
if writer_or_last_reader {
|
2013-06-10 21:13:56 -05:00
|
|
|
// Nobody left inside; release the "reader cloud" lock.
|
2012-11-13 20:38:18 -06:00
|
|
|
(&self.lock.access_lock).release();
|
|
|
|
}
|
2012-11-14 16:51:16 -06:00
|
|
|
}
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-10 21:13:56 -05:00
|
|
|
#[doc(hidden)]
|
2013-03-22 17:52:50 -05:00
|
|
|
fn RWlockReleaseDowngrade<'r>(lock: &'r RWlock)
|
|
|
|
-> RWlockReleaseDowngrade<'r> {
|
2012-09-04 19:22:09 -05:00
|
|
|
RWlockReleaseDowngrade {
|
|
|
|
lock: lock
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-13 14:22:32 -05:00
|
|
|
/// The "write permission" token used for rwlock.write_downgrade().
|
2013-03-22 17:52:50 -05:00
|
|
|
pub struct RWlockWriteMode<'self> { priv lock: &'self RWlock }
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-06-20 20:06:13 -05:00
|
|
|
impl<'self> Drop for RWlockWriteMode<'self> { fn drop(&self) {} }
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2012-08-13 14:22:32 -05:00
|
|
|
/// The "read permission" token used for rwlock.write_downgrade().
|
2013-03-25 15:21:04 -05:00
|
|
|
pub struct RWlockReadMode<'self> { priv lock: &'self RWlock }
|
2013-03-20 20:18:57 -05:00
|
|
|
#[unsafe_destructor]
|
2013-06-20 20:06:13 -05:00
|
|
|
impl<'self> Drop for RWlockReadMode<'self> { fn drop(&self) {} }
|
2012-08-13 14:22:32 -05:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl<'self> RWlockWriteMode<'self> {
|
2012-08-13 14:22:32 -05:00
|
|
|
/// Access the pre-downgrade rwlock in write mode.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn write<U>(&self, blk: &fn() -> U) -> U { blk() }
|
2012-08-13 14:22:32 -05:00
|
|
|
/// Access the pre-downgrade rwlock in write mode with a condvar.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn write_cond<U>(&self, blk: &fn(c: &Condvar) -> U) -> U {
|
2013-06-12 19:50:16 -05:00
|
|
|
// Need to make the condvar use the order lock when reacquiring the
|
|
|
|
// access lock. See comment in RWlock::write_cond for why.
|
|
|
|
blk(&Condvar { sem: &self.lock.access_lock,
|
|
|
|
order: Just(&self.lock.order_lock), })
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
|
|
|
}
|
2013-02-26 13:34:00 -06:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl<'self> RWlockReadMode<'self> {
|
2012-08-13 14:22:32 -05:00
|
|
|
/// Access the post-downgrade rwlock in read mode.
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn read<U>(&self, blk: &fn() -> U) -> U { blk() }
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
|
|
|
|
2012-08-07 19:09:46 -05:00
|
|
|
/****************************************************************************
|
|
|
|
* Tests
|
|
|
|
****************************************************************************/
|
2012-08-07 17:03:30 -05:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2013-05-21 19:24:31 -05:00
|
|
|
use core::prelude::*;
|
2013-01-08 21:37:25 -06:00
|
|
|
|
|
|
|
use sync::*;
|
|
|
|
|
2012-12-28 14:46:08 -06:00
|
|
|
use core::cast;
|
2013-02-25 13:11:21 -06:00
|
|
|
use core::cell::Cell;
|
2013-05-24 21:35:29 -05:00
|
|
|
use core::comm;
|
2012-12-27 20:24:18 -06:00
|
|
|
use core::result;
|
|
|
|
use core::task;
|
|
|
|
|
2012-08-09 21:07:37 -05:00
|
|
|
/************************************************************************
|
|
|
|
* Semaphore tests
|
|
|
|
************************************************************************/
|
2012-08-08 21:49:22 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_sem_acquire_release() {
|
2012-08-09 21:31:05 -05:00
|
|
|
let s = ~semaphore(1);
|
2012-08-08 21:49:22 -05:00
|
|
|
s.acquire();
|
|
|
|
s.release();
|
|
|
|
s.acquire();
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_sem_basic() {
|
2012-08-09 21:31:05 -05:00
|
|
|
let s = ~semaphore(1);
|
2012-08-08 21:49:22 -05:00
|
|
|
do s.access { }
|
|
|
|
}
|
2012-08-07 17:03:30 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_sem_as_mutex() {
|
2012-08-09 21:31:05 -05:00
|
|
|
let s = ~semaphore(1);
|
2012-08-07 17:03:30 -05:00
|
|
|
let s2 = ~s.clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-07 17:03:30 -05:00
|
|
|
do s2.access {
|
2012-08-08 21:49:22 -05:00
|
|
|
for 5.times { task::yield(); }
|
2012-08-07 17:03:30 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
do s.access {
|
2012-08-08 21:49:22 -05:00
|
|
|
for 5.times { task::yield(); }
|
2012-08-07 17:03:30 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_sem_as_cvar() {
|
2012-08-07 17:03:30 -05:00
|
|
|
/* Child waits and parent signals */
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2012-08-09 21:31:05 -05:00
|
|
|
let s = ~semaphore(0);
|
2012-08-07 17:03:30 -05:00
|
|
|
let s2 = ~s.clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-07 17:42:33 -05:00
|
|
|
s2.acquire();
|
2012-08-07 17:03:30 -05:00
|
|
|
c.send(());
|
|
|
|
}
|
2012-08-08 21:49:22 -05:00
|
|
|
for 5.times { task::yield(); }
|
2012-08-07 17:42:33 -05:00
|
|
|
s.release();
|
2012-08-07 17:03:30 -05:00
|
|
|
let _ = p.recv();
|
2012-08-07 17:20:02 -05:00
|
|
|
|
2012-08-07 17:03:30 -05:00
|
|
|
/* Parent waits and child signals */
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2012-08-09 21:31:05 -05:00
|
|
|
let s = ~semaphore(0);
|
2012-08-07 17:03:30 -05:00
|
|
|
let s2 = ~s.clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-08 21:49:22 -05:00
|
|
|
for 5.times { task::yield(); }
|
2012-08-07 17:42:33 -05:00
|
|
|
s2.release();
|
2012-08-07 17:03:30 -05:00
|
|
|
let _ = p.recv();
|
|
|
|
}
|
2012-08-07 17:42:33 -05:00
|
|
|
s.acquire();
|
2012-08-07 17:03:30 -05:00
|
|
|
c.send(());
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_sem_multi_resource() {
|
2012-08-07 17:42:33 -05:00
|
|
|
// Parent and child both get in the critical section at the same
|
|
|
|
// time, and shake hands.
|
2012-08-09 21:31:05 -05:00
|
|
|
let s = ~semaphore(2);
|
2012-08-07 17:42:33 -05:00
|
|
|
let s2 = ~s.clone();
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p1,c1) = comm::stream();
|
|
|
|
let (p2,c2) = comm::stream();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-07 17:42:33 -05:00
|
|
|
do s2.access {
|
|
|
|
let _ = p2.recv();
|
|
|
|
c1.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
do s.access {
|
|
|
|
c2.send(());
|
|
|
|
let _ = p1.recv();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_sem_runtime_friendly_blocking() {
|
2012-08-07 17:42:33 -05:00
|
|
|
// Force the runtime to schedule two threads on the same sched_loop.
|
|
|
|
// When one blocks, it should schedule the other one.
|
2012-08-15 16:10:46 -05:00
|
|
|
do task::spawn_sched(task::ManualThreads(1)) {
|
2012-08-09 21:31:05 -05:00
|
|
|
let s = ~semaphore(1);
|
2012-08-07 17:03:30 -05:00
|
|
|
let s2 = ~s.clone();
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2013-06-04 05:03:58 -05:00
|
|
|
let child_data = Cell::new((s2, c));
|
2012-08-07 17:03:30 -05:00
|
|
|
do s.access {
|
2013-02-26 13:32:00 -06:00
|
|
|
let (s2, c) = child_data.take();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-07 17:03:30 -05:00
|
|
|
c.send(());
|
|
|
|
do s2.access { }
|
|
|
|
c.send(());
|
|
|
|
}
|
|
|
|
let _ = p.recv(); // wait for child to come alive
|
|
|
|
for 5.times { task::yield(); } // let the child contend
|
|
|
|
}
|
|
|
|
let _ = p.recv(); // wait for child to be done
|
|
|
|
}
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
/************************************************************************
|
|
|
|
* Mutex tests
|
|
|
|
************************************************************************/
|
2012-08-07 19:09:46 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_lock() {
|
2012-08-07 19:09:46 -05:00
|
|
|
// Unsafely achieve shared state, and do the textbook
|
2012-10-23 13:11:23 -05:00
|
|
|
// "load tmp = move ptr; inc tmp; store ptr <- tmp" dance.
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2013-03-15 17:26:59 -05:00
|
|
|
let m2 = m.clone();
|
2012-09-11 23:25:01 -05:00
|
|
|
let mut sharedstate = ~0;
|
2013-04-26 16:04:39 -05:00
|
|
|
{
|
|
|
|
let ptr: *int = &*sharedstate;
|
|
|
|
do task::spawn || {
|
|
|
|
let sharedstate: &mut int =
|
|
|
|
unsafe { cast::transmute(ptr) };
|
|
|
|
access_shared(sharedstate, m2, 10);
|
|
|
|
c.send(());
|
2012-08-13 14:22:32 -05:00
|
|
|
|
2013-04-26 16:04:39 -05:00
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
2013-04-26 16:04:39 -05:00
|
|
|
{
|
|
|
|
access_shared(sharedstate, m, 10);
|
|
|
|
let _ = p.recv();
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*sharedstate, 20);
|
2013-04-26 16:04:39 -05:00
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
|
2012-08-26 20:14:39 -05:00
|
|
|
fn access_shared(sharedstate: &mut int, m: &Mutex, n: uint) {
|
2012-08-07 19:09:46 -05:00
|
|
|
for n.times {
|
2012-08-08 21:49:22 -05:00
|
|
|
do m.lock {
|
2012-08-07 19:09:46 -05:00
|
|
|
let oldval = *sharedstate;
|
|
|
|
task::yield();
|
|
|
|
*sharedstate = oldval + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_cond_wait() {
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2012-08-07 19:09:46 -05:00
|
|
|
|
|
|
|
// Child wakes up parent
|
|
|
|
do m.lock_cond |cond| {
|
2012-08-08 21:49:22 -05:00
|
|
|
let m2 = ~m.clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-08 21:49:22 -05:00
|
|
|
do m2.lock_cond |cond| {
|
|
|
|
let woken = cond.signal();
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(woken);
|
2012-08-08 21:49:22 -05:00
|
|
|
}
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
cond.wait();
|
|
|
|
}
|
|
|
|
// Parent wakes up child
|
2013-02-02 05:10:12 -06:00
|
|
|
let (port,chan) = comm::stream();
|
2012-08-07 19:09:46 -05:00
|
|
|
let m3 = ~m.clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-07 19:09:46 -05:00
|
|
|
do m3.lock_cond |cond| {
|
|
|
|
chan.send(());
|
|
|
|
cond.wait();
|
|
|
|
chan.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let _ = port.recv(); // Wait until child gets in the mutex
|
|
|
|
do m.lock_cond |cond| {
|
2012-08-08 21:49:22 -05:00
|
|
|
let woken = cond.signal();
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(woken);
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
let _ = port.recv(); // Wait until child wakes up
|
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
#[cfg(test)]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_cond_broadcast_helper(num_waiters: uint) {
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2012-08-07 19:09:46 -05:00
|
|
|
let mut ports = ~[];
|
|
|
|
|
|
|
|
for num_waiters.times {
|
|
|
|
let mi = ~m.clone();
|
2013-02-02 05:10:12 -06:00
|
|
|
let (port, chan) = comm::stream();
|
2013-02-15 01:30:30 -06:00
|
|
|
ports.push(port);
|
|
|
|
do task::spawn || {
|
2012-08-07 19:09:46 -05:00
|
|
|
do mi.lock_cond |cond| {
|
|
|
|
chan.send(());
|
|
|
|
cond.wait();
|
|
|
|
chan.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait until all children get in the mutex
|
2013-06-21 07:29:53 -05:00
|
|
|
for ports.iter().advance |port| { let _ = port.recv(); }
|
2012-08-07 19:09:46 -05:00
|
|
|
do m.lock_cond |cond| {
|
|
|
|
let num_woken = cond.broadcast();
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(num_woken, num_waiters);
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
|
|
|
// wait until all children wake up
|
2013-06-21 07:29:53 -05:00
|
|
|
for ports.iter().advance |port| { let _ = port.recv(); }
|
2012-08-07 19:09:46 -05:00
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_cond_broadcast() {
|
2012-08-09 19:22:43 -05:00
|
|
|
test_mutex_cond_broadcast_helper(12);
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_cond_broadcast_none() {
|
2012-08-09 19:22:43 -05:00
|
|
|
test_mutex_cond_broadcast_helper(0);
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_cond_no_waiter() {
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2012-08-09 19:22:43 -05:00
|
|
|
let m2 = ~m.clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::try || {
|
2012-08-09 19:22:43 -05:00
|
|
|
do m.lock_cond |_x| { }
|
|
|
|
};
|
|
|
|
do m2.lock_cond |cond| {
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(!cond.signal());
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-08 21:49:22 -05:00
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_killed_simple() {
|
2012-08-08 21:49:22 -05:00
|
|
|
// Mutex must get automatically unlocked if failed/killed within.
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2012-08-08 21:49:22 -05:00
|
|
|
let m2 = ~m.clone();
|
|
|
|
|
2013-02-15 01:30:30 -06:00
|
|
|
let result: result::Result<(),()> = do task::try || {
|
2012-08-08 21:49:22 -05:00
|
|
|
do m2.lock {
|
2013-02-11 21:26:38 -06:00
|
|
|
fail!();
|
2012-08-08 21:49:22 -05:00
|
|
|
}
|
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-08 21:49:22 -05:00
|
|
|
// child task must have finished by the time try returns
|
|
|
|
do m.lock { }
|
|
|
|
}
|
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_killed_cond() {
|
2012-08-08 21:49:22 -05:00
|
|
|
// Getting killed during cond wait must not corrupt the mutex while
|
|
|
|
// unwinding (e.g. double unlock).
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2012-08-08 21:49:22 -05:00
|
|
|
let m2 = ~m.clone();
|
|
|
|
|
2013-02-15 01:30:30 -06:00
|
|
|
let result: result::Result<(),()> = do task::try || {
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || { // linked
|
2012-08-08 21:49:22 -05:00
|
|
|
let _ = p.recv(); // wait for sibling to get in the mutex
|
|
|
|
task::yield();
|
2013-02-11 21:26:38 -06:00
|
|
|
fail!();
|
2012-08-08 21:49:22 -05:00
|
|
|
}
|
|
|
|
do m2.lock_cond |cond| {
|
|
|
|
c.send(()); // tell sibling go ahead
|
|
|
|
cond.wait(); // block forever
|
|
|
|
}
|
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-08 21:49:22 -05:00
|
|
|
// child task must have finished by the time try returns
|
|
|
|
do m.lock_cond |cond| {
|
2012-08-14 17:44:31 -05:00
|
|
|
let woken = cond.signal();
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(!woken);
|
2012-08-14 17:44:31 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_killed_broadcast() {
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2012-08-14 17:44:31 -05:00
|
|
|
let m2 = ~m.clone();
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2012-08-14 17:44:31 -05:00
|
|
|
|
2013-02-15 01:30:30 -06:00
|
|
|
let result: result::Result<(),()> = do task::try || {
|
2012-08-14 17:44:31 -05:00
|
|
|
let mut sibling_convos = ~[];
|
|
|
|
for 2.times {
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2013-06-04 05:03:58 -05:00
|
|
|
let c = Cell::new(c);
|
2013-02-15 01:30:30 -06:00
|
|
|
sibling_convos.push(p);
|
2012-08-14 17:44:31 -05:00
|
|
|
let mi = ~m2.clone();
|
|
|
|
// spawn sibling task
|
2013-02-26 13:32:00 -06:00
|
|
|
do task::spawn { // linked
|
2012-08-14 17:44:31 -05:00
|
|
|
do mi.lock_cond |cond| {
|
2013-02-26 13:32:00 -06:00
|
|
|
let c = c.take();
|
2012-08-14 17:44:31 -05:00
|
|
|
c.send(()); // tell sibling to go ahead
|
2013-02-15 01:30:30 -06:00
|
|
|
let _z = SendOnFailure(c);
|
2012-08-14 17:44:31 -05:00
|
|
|
cond.wait(); // block forever
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-21 07:29:53 -05:00
|
|
|
for sibling_convos.iter().advance |p| {
|
2012-08-14 17:44:31 -05:00
|
|
|
let _ = p.recv(); // wait for sibling to get in the mutex
|
|
|
|
}
|
|
|
|
do m2.lock { }
|
2013-02-15 01:30:30 -06:00
|
|
|
c.send(sibling_convos); // let parent wait on all children
|
2013-02-11 21:26:38 -06:00
|
|
|
fail!();
|
2012-08-14 17:44:31 -05:00
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-14 17:44:31 -05:00
|
|
|
// child task must have finished by the time try returns
|
2013-06-24 17:34:20 -05:00
|
|
|
let r = p.recv();
|
|
|
|
for r.iter().advance |p| { p.recv(); } // wait on all its siblings
|
2012-08-14 17:44:31 -05:00
|
|
|
do m.lock_cond |cond| {
|
|
|
|
let woken = cond.broadcast();
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(woken, 0);
|
2012-08-14 17:44:31 -05:00
|
|
|
}
|
2012-08-26 20:14:39 -05:00
|
|
|
struct SendOnFailure {
|
2013-02-02 05:10:12 -06:00
|
|
|
c: comm::Chan<()>,
|
2012-11-13 20:38:18 -06:00
|
|
|
}
|
|
|
|
|
2013-02-14 13:47:00 -06:00
|
|
|
impl Drop for SendOnFailure {
|
2013-06-20 20:06:13 -05:00
|
|
|
fn drop(&self) {
|
2012-11-13 20:38:18 -06:00
|
|
|
self.c.send(());
|
|
|
|
}
|
2012-08-08 21:49:22 -05:00
|
|
|
}
|
2012-09-04 19:22:09 -05:00
|
|
|
|
2013-02-02 05:10:12 -06:00
|
|
|
fn SendOnFailure(c: comm::Chan<()>) -> SendOnFailure {
|
2012-09-04 19:22:09 -05:00
|
|
|
SendOnFailure {
|
2013-02-15 01:30:30 -06:00
|
|
|
c: c
|
2012-09-04 19:22:09 -05:00
|
|
|
}
|
|
|
|
}
|
2012-08-08 21:49:22 -05:00
|
|
|
}
|
2012-08-14 22:21:39 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_cond_signal_on_0() {
|
2012-08-14 22:21:39 -05:00
|
|
|
// Tests that signal_on(0) is equivalent to signal().
|
2012-08-29 16:45:25 -05:00
|
|
|
let m = ~Mutex();
|
2012-08-14 22:21:39 -05:00
|
|
|
do m.lock_cond |cond| {
|
|
|
|
let m2 = ~m.clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-14 22:21:39 -05:00
|
|
|
do m2.lock_cond |cond| {
|
|
|
|
cond.signal_on(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cond.wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_different_conds() {
|
2012-08-14 22:21:39 -05:00
|
|
|
let result = do task::try {
|
|
|
|
let m = ~mutex_with_condvars(2);
|
|
|
|
let m2 = ~m.clone();
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-14 22:21:39 -05:00
|
|
|
do m2.lock_cond |cond| {
|
|
|
|
c.send(());
|
|
|
|
cond.wait_on(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let _ = p.recv();
|
|
|
|
do m.lock_cond |cond| {
|
|
|
|
if !cond.signal_on(0) {
|
2013-02-11 21:26:38 -06:00
|
|
|
fail!(); // success; punt sibling awake.
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_mutex_no_condvars() {
|
2012-08-14 22:21:39 -05:00
|
|
|
let result = do task::try {
|
|
|
|
let m = ~mutex_with_condvars(0);
|
|
|
|
do m.lock_cond |cond| { cond.wait(); }
|
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-14 22:21:39 -05:00
|
|
|
let result = do task::try {
|
|
|
|
let m = ~mutex_with_condvars(0);
|
|
|
|
do m.lock_cond |cond| { cond.signal(); }
|
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-14 22:21:39 -05:00
|
|
|
let result = do task::try {
|
|
|
|
let m = ~mutex_with_condvars(0);
|
|
|
|
do m.lock_cond |cond| { cond.broadcast(); }
|
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-14 22:21:39 -05:00
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
/************************************************************************
|
|
|
|
* Reader/writer lock tests
|
|
|
|
************************************************************************/
|
|
|
|
#[cfg(test)]
|
2013-01-29 14:06:09 -06:00
|
|
|
pub enum RWlockMode { Read, Write, Downgrade, DowngradeRead }
|
2012-08-13 14:22:32 -05:00
|
|
|
#[cfg(test)]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn lock_rwlock_in_mode(x: &RWlock, mode: RWlockMode, blk: &fn()) {
|
2012-08-13 14:22:32 -05:00
|
|
|
match mode {
|
2012-08-26 20:14:39 -05:00
|
|
|
Read => x.read(blk),
|
|
|
|
Write => x.write(blk),
|
|
|
|
Downgrade =>
|
2012-08-14 12:32:41 -05:00
|
|
|
do x.write_downgrade |mode| {
|
2012-08-23 15:51:53 -05:00
|
|
|
(&mode).write(blk);
|
2012-08-14 12:32:41 -05:00
|
|
|
},
|
2012-08-26 20:14:39 -05:00
|
|
|
DowngradeRead =>
|
2012-08-14 12:32:41 -05:00
|
|
|
do x.write_downgrade |mode| {
|
2013-02-15 01:30:30 -06:00
|
|
|
let mode = x.downgrade(mode);
|
2012-08-23 15:51:53 -05:00
|
|
|
(&mode).read(blk);
|
2012-08-14 12:32:41 -05:00
|
|
|
},
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
#[cfg(test)]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_exclusion(x: ~RWlock,
|
2013-01-29 14:06:09 -06:00
|
|
|
mode1: RWlockMode,
|
|
|
|
mode2: RWlockMode) {
|
2012-08-09 19:22:43 -05:00
|
|
|
// Test mutual exclusion between readers and writers. Just like the
|
|
|
|
// mutex mutual exclusion test, a ways above.
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p,c) = comm::stream();
|
2013-03-15 17:26:59 -05:00
|
|
|
let x2 = (*x).clone();
|
2012-09-11 23:25:01 -05:00
|
|
|
let mut sharedstate = ~0;
|
2013-04-26 16:04:39 -05:00
|
|
|
{
|
|
|
|
let ptr: *int = &*sharedstate;
|
|
|
|
do task::spawn || {
|
|
|
|
let sharedstate: &mut int =
|
|
|
|
unsafe { cast::transmute(ptr) };
|
|
|
|
access_shared(sharedstate, &x2, mode1, 10);
|
|
|
|
c.send(());
|
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
2013-04-26 16:04:39 -05:00
|
|
|
{
|
|
|
|
access_shared(sharedstate, x, mode2, 10);
|
|
|
|
let _ = p.recv();
|
2012-08-09 19:22:43 -05:00
|
|
|
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(*sharedstate, 20);
|
2013-04-26 16:04:39 -05:00
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
|
2012-08-26 20:14:39 -05:00
|
|
|
fn access_shared(sharedstate: &mut int, x: &RWlock, mode: RWlockMode,
|
2012-08-09 19:22:43 -05:00
|
|
|
n: uint) {
|
|
|
|
for n.times {
|
2012-08-13 14:22:32 -05:00
|
|
|
do lock_rwlock_in_mode(x, mode) {
|
2012-08-09 19:22:43 -05:00
|
|
|
let oldval = *sharedstate;
|
|
|
|
task::yield();
|
|
|
|
*sharedstate = oldval + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_readers_wont_modify_the_data() {
|
2012-08-29 16:45:25 -05:00
|
|
|
test_rwlock_exclusion(~RWlock(), Read, Write);
|
|
|
|
test_rwlock_exclusion(~RWlock(), Write, Read);
|
|
|
|
test_rwlock_exclusion(~RWlock(), Read, Downgrade);
|
|
|
|
test_rwlock_exclusion(~RWlock(), Downgrade, Read);
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_writers_and_writers() {
|
2012-08-29 16:45:25 -05:00
|
|
|
test_rwlock_exclusion(~RWlock(), Write, Write);
|
|
|
|
test_rwlock_exclusion(~RWlock(), Write, Downgrade);
|
|
|
|
test_rwlock_exclusion(~RWlock(), Downgrade, Write);
|
|
|
|
test_rwlock_exclusion(~RWlock(), Downgrade, Downgrade);
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
2012-08-13 14:22:32 -05:00
|
|
|
#[cfg(test)]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_handshake(x: ~RWlock,
|
2013-01-29 14:06:09 -06:00
|
|
|
mode1: RWlockMode,
|
|
|
|
mode2: RWlockMode,
|
|
|
|
make_mode2_go_first: bool) {
|
2012-08-09 19:22:43 -05:00
|
|
|
// Much like sem_multi_resource.
|
2013-03-15 17:26:59 -05:00
|
|
|
let x2 = (*x).clone();
|
2013-02-02 05:10:12 -06:00
|
|
|
let (p1,c1) = comm::stream();
|
|
|
|
let (p2,c2) = comm::stream();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-13 14:22:32 -05:00
|
|
|
if !make_mode2_go_first {
|
|
|
|
let _ = p2.recv(); // parent sends to us once it locks, or ...
|
|
|
|
}
|
2013-03-15 17:26:59 -05:00
|
|
|
do lock_rwlock_in_mode(&x2, mode2) {
|
2012-08-13 14:22:32 -05:00
|
|
|
if make_mode2_go_first {
|
|
|
|
c1.send(()); // ... we send to it once we lock
|
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
let _ = p2.recv();
|
|
|
|
c1.send(());
|
|
|
|
}
|
|
|
|
}
|
2012-08-13 14:22:32 -05:00
|
|
|
if make_mode2_go_first {
|
|
|
|
let _ = p1.recv(); // child sends to us once it locks, or ...
|
|
|
|
}
|
|
|
|
do lock_rwlock_in_mode(x, mode1) {
|
|
|
|
if !make_mode2_go_first {
|
|
|
|
c2.send(()); // ... we send to it once we lock
|
|
|
|
}
|
2012-08-09 19:22:43 -05:00
|
|
|
c2.send(());
|
|
|
|
let _ = p1.recv();
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
}
|
2012-08-09 22:15:30 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_readers_and_readers() {
|
2012-08-29 16:45:25 -05:00
|
|
|
test_rwlock_handshake(~RWlock(), Read, Read, false);
|
2012-08-13 14:22:32 -05:00
|
|
|
// The downgrader needs to get in before the reader gets in, otherwise
|
|
|
|
// they cannot end up reading at the same time.
|
2012-08-29 16:45:25 -05:00
|
|
|
test_rwlock_handshake(~RWlock(), DowngradeRead, Read, false);
|
|
|
|
test_rwlock_handshake(~RWlock(), Read, DowngradeRead, true);
|
2012-08-13 14:22:32 -05:00
|
|
|
// Two downgrade_reads can never both end up reading at the same time.
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_downgrade_unlock() {
|
2012-08-13 15:47:35 -05:00
|
|
|
// Tests that downgrade can unlock the lock in both modes
|
2012-08-29 16:45:25 -05:00
|
|
|
let x = ~RWlock();
|
2012-08-26 20:14:39 -05:00
|
|
|
do lock_rwlock_in_mode(x, Downgrade) { }
|
2013-02-15 01:30:30 -06:00
|
|
|
test_rwlock_handshake(x, Read, Read, false);
|
2012-08-29 16:45:25 -05:00
|
|
|
let y = ~RWlock();
|
2012-08-26 20:14:39 -05:00
|
|
|
do lock_rwlock_in_mode(y, DowngradeRead) { }
|
2013-02-15 01:30:30 -06:00
|
|
|
test_rwlock_exclusion(y, Write, Write);
|
2012-08-13 15:47:35 -05:00
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_read_recursive() {
|
2012-08-29 16:45:25 -05:00
|
|
|
let x = ~RWlock();
|
2012-08-13 15:47:35 -05:00
|
|
|
do x.read { do x.read { } }
|
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_cond_wait() {
|
2012-08-09 22:15:30 -05:00
|
|
|
// As test_mutex_cond_wait above.
|
2012-08-29 16:45:25 -05:00
|
|
|
let x = ~RWlock();
|
2012-08-09 22:15:30 -05:00
|
|
|
|
|
|
|
// Child wakes up parent
|
|
|
|
do x.write_cond |cond| {
|
2013-03-15 17:26:59 -05:00
|
|
|
let x2 = (*x).clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-09 22:15:30 -05:00
|
|
|
do x2.write_cond |cond| {
|
|
|
|
let woken = cond.signal();
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(woken);
|
2012-08-09 22:15:30 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
cond.wait();
|
|
|
|
}
|
|
|
|
// Parent wakes up child
|
2013-02-02 05:10:12 -06:00
|
|
|
let (port,chan) = comm::stream();
|
2013-03-15 17:26:59 -05:00
|
|
|
let x3 = (*x).clone();
|
2013-02-15 01:30:30 -06:00
|
|
|
do task::spawn || {
|
2012-08-09 22:15:30 -05:00
|
|
|
do x3.write_cond |cond| {
|
|
|
|
chan.send(());
|
|
|
|
cond.wait();
|
|
|
|
chan.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let _ = port.recv(); // Wait until child gets in the rwlock
|
|
|
|
do x.read { } // Must be able to get in as a reader in the meantime
|
|
|
|
do x.write_cond |cond| { // Or as another writer
|
|
|
|
let woken = cond.signal();
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(woken);
|
2012-08-09 22:15:30 -05:00
|
|
|
}
|
|
|
|
let _ = port.recv(); // Wait until child wakes up
|
|
|
|
do x.read { } // Just for good measure
|
|
|
|
}
|
2012-08-13 15:47:35 -05:00
|
|
|
#[cfg(test)]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_cond_broadcast_helper(num_waiters: uint,
|
2013-01-29 14:06:09 -06:00
|
|
|
dg1: bool,
|
|
|
|
dg2: bool) {
|
2012-08-13 15:47:35 -05:00
|
|
|
// Much like the mutex broadcast test. Downgrade-enabled.
|
2013-03-07 16:38:38 -06:00
|
|
|
fn lock_cond(x: &RWlock, downgrade: bool, blk: &fn(c: &Condvar)) {
|
2012-08-13 15:47:35 -05:00
|
|
|
if downgrade {
|
2012-08-14 12:32:41 -05:00
|
|
|
do x.write_downgrade |mode| {
|
2012-08-23 15:51:53 -05:00
|
|
|
(&mode).write_cond(blk)
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
2012-08-13 15:47:35 -05:00
|
|
|
} else {
|
|
|
|
x.write_cond(blk)
|
|
|
|
}
|
|
|
|
}
|
2012-08-29 16:45:25 -05:00
|
|
|
let x = ~RWlock();
|
2012-08-13 15:47:35 -05:00
|
|
|
let mut ports = ~[];
|
|
|
|
|
|
|
|
for num_waiters.times {
|
2013-03-15 17:26:59 -05:00
|
|
|
let xi = (*x).clone();
|
2013-02-02 05:10:12 -06:00
|
|
|
let (port, chan) = comm::stream();
|
2013-02-15 01:30:30 -06:00
|
|
|
ports.push(port);
|
|
|
|
do task::spawn || {
|
2013-03-15 17:26:59 -05:00
|
|
|
do lock_cond(&xi, dg1) |cond| {
|
2012-08-13 15:47:35 -05:00
|
|
|
chan.send(());
|
|
|
|
cond.wait();
|
|
|
|
chan.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait until all children get in the mutex
|
2013-06-21 07:29:53 -05:00
|
|
|
for ports.iter().advance |port| { let _ = port.recv(); }
|
2012-08-13 15:47:35 -05:00
|
|
|
do lock_cond(x, dg2) |cond| {
|
|
|
|
let num_woken = cond.broadcast();
|
2013-05-18 21:02:45 -05:00
|
|
|
assert_eq!(num_woken, num_waiters);
|
2012-08-13 15:47:35 -05:00
|
|
|
}
|
|
|
|
// wait until all children wake up
|
2013-06-21 07:29:53 -05:00
|
|
|
for ports.iter().advance |port| { let _ = port.recv(); }
|
2012-08-13 15:47:35 -05:00
|
|
|
}
|
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_cond_broadcast() {
|
2012-08-13 15:47:35 -05:00
|
|
|
test_rwlock_cond_broadcast_helper(0, true, true);
|
|
|
|
test_rwlock_cond_broadcast_helper(0, true, false);
|
|
|
|
test_rwlock_cond_broadcast_helper(0, false, true);
|
|
|
|
test_rwlock_cond_broadcast_helper(0, false, false);
|
|
|
|
test_rwlock_cond_broadcast_helper(12, true, true);
|
|
|
|
test_rwlock_cond_broadcast_helper(12, true, false);
|
|
|
|
test_rwlock_cond_broadcast_helper(12, false, true);
|
|
|
|
test_rwlock_cond_broadcast_helper(12, false, false);
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
#[cfg(test)] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn rwlock_kill_helper(mode1: RWlockMode, mode2: RWlockMode) {
|
2012-08-09 21:07:37 -05:00
|
|
|
// Mutex must get automatically unlocked if failed/killed within.
|
2012-08-29 16:45:25 -05:00
|
|
|
let x = ~RWlock();
|
2013-03-15 17:26:59 -05:00
|
|
|
let x2 = (*x).clone();
|
2012-08-09 19:22:43 -05:00
|
|
|
|
2013-02-15 01:30:30 -06:00
|
|
|
let result: result::Result<(),()> = do task::try || {
|
2013-03-15 17:26:59 -05:00
|
|
|
do lock_rwlock_in_mode(&x2, mode1) {
|
2013-02-11 21:26:38 -06:00
|
|
|
fail!();
|
2012-08-09 21:07:37 -05:00
|
|
|
}
|
|
|
|
};
|
2013-03-28 20:39:09 -05:00
|
|
|
assert!(result.is_err());
|
2012-08-09 21:07:37 -05:00
|
|
|
// child task must have finished by the time try returns
|
2012-08-13 14:22:32 -05:00
|
|
|
do lock_rwlock_in_mode(x, mode2) { }
|
2012-08-09 19:22:43 -05:00
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_reader_killed_writer() {
|
2013-01-29 14:06:09 -06:00
|
|
|
rwlock_kill_helper(Read, Write);
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_writer_killed_reader() {
|
2013-01-29 14:06:09 -06:00
|
|
|
rwlock_kill_helper(Write,Read );
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_reader_killed_reader() {
|
2013-01-29 14:06:09 -06:00
|
|
|
rwlock_kill_helper(Read, Read );
|
|
|
|
}
|
2012-08-09 21:07:37 -05:00
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_writer_killed_writer() {
|
2013-01-29 14:06:09 -06:00
|
|
|
rwlock_kill_helper(Write,Write);
|
|
|
|
}
|
2012-08-13 15:47:35 -05:00
|
|
|
#[test] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_kill_downgrader() {
|
2012-08-26 20:14:39 -05:00
|
|
|
rwlock_kill_helper(Downgrade, Read);
|
|
|
|
rwlock_kill_helper(Read, Downgrade);
|
|
|
|
rwlock_kill_helper(Downgrade, Write);
|
|
|
|
rwlock_kill_helper(Write, Downgrade);
|
|
|
|
rwlock_kill_helper(DowngradeRead, Read);
|
|
|
|
rwlock_kill_helper(Read, DowngradeRead);
|
|
|
|
rwlock_kill_helper(DowngradeRead, Write);
|
|
|
|
rwlock_kill_helper(Write, DowngradeRead);
|
|
|
|
rwlock_kill_helper(DowngradeRead, Downgrade);
|
|
|
|
rwlock_kill_helper(DowngradeRead, Downgrade);
|
|
|
|
rwlock_kill_helper(Downgrade, DowngradeRead);
|
|
|
|
rwlock_kill_helper(Downgrade, DowngradeRead);
|
2012-08-13 15:47:35 -05:00
|
|
|
}
|
2012-08-13 14:22:32 -05:00
|
|
|
#[test] #[should_fail] #[ignore(cfg(windows))]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn test_rwlock_downgrade_cant_swap() {
|
2012-08-13 14:22:32 -05:00
|
|
|
// Tests that you can't downgrade with a different rwlock's token.
|
2012-08-29 16:45:25 -05:00
|
|
|
let x = ~RWlock();
|
|
|
|
let y = ~RWlock();
|
2012-08-13 14:22:32 -05:00
|
|
|
do x.write_downgrade |xwrite| {
|
2013-02-15 01:30:30 -06:00
|
|
|
let mut xopt = Some(xwrite);
|
2012-08-13 14:22:32 -05:00
|
|
|
do y.write_downgrade |_ywrite| {
|
2013-03-16 14:49:12 -05:00
|
|
|
y.downgrade(xopt.swap_unwrap());
|
2012-08-14 12:32:41 -05:00
|
|
|
error!("oops, y.downgrade(x) should have failed!");
|
2012-08-13 14:22:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-07 17:03:30 -05:00
|
|
|
}
|