2013-05-15 19:20:48 -05:00
|
|
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2013-05-17 19:47:10 -05:00
|
|
|
//! Ports and channels.
|
|
|
|
|
2013-05-15 19:20:48 -05:00
|
|
|
use option::*;
|
|
|
|
use cast;
|
|
|
|
use util;
|
|
|
|
use ops::Drop;
|
2013-07-11 13:29:33 -05:00
|
|
|
use rt::kill::BlockedTask;
|
2013-06-05 19:56:24 -05:00
|
|
|
use kinds::Send;
|
2013-07-02 19:36:58 -05:00
|
|
|
use rt::sched::Scheduler;
|
2013-05-19 17:45:39 -05:00
|
|
|
use rt::local::Local;
|
2013-07-18 13:20:14 -05:00
|
|
|
use unstable::atomics::{AtomicUint, AtomicOption, Acquire, SeqCst};
|
2013-06-01 04:11:59 -05:00
|
|
|
use unstable::sync::UnsafeAtomicRcBox;
|
2013-05-15 19:20:48 -05:00
|
|
|
use util::Void;
|
|
|
|
use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable};
|
|
|
|
use cell::Cell;
|
2013-06-01 04:11:59 -05:00
|
|
|
use clone::Clone;
|
2013-05-15 19:20:48 -05:00
|
|
|
|
2013-07-11 13:29:33 -05:00
|
|
|
/// A combined refcount / BlockedTask-as-uint pointer.
|
2013-05-15 19:20:48 -05:00
|
|
|
///
|
|
|
|
/// Can be equal to the following values:
|
|
|
|
///
|
|
|
|
/// * 2 - both endpoints are alive
|
|
|
|
/// * 1 - either the sender or the receiver is dead, determined by context
|
2013-07-11 13:29:33 -05:00
|
|
|
/// * <ptr> - A pointer to a blocked Task (see BlockedTask::cast_{to,from}_uint)
|
2013-06-02 04:13:24 -05:00
|
|
|
type State = uint;
|
2013-05-15 19:20:48 -05:00
|
|
|
|
|
|
|
static STATE_BOTH: State = 2;
|
|
|
|
static STATE_ONE: State = 1;
|
|
|
|
|
2013-05-17 19:47:10 -05:00
|
|
|
/// The heap-allocated structure shared between two endpoints.
|
2013-05-15 19:20:48 -05:00
|
|
|
struct Packet<T> {
|
2013-06-02 04:13:24 -05:00
|
|
|
state: AtomicUint,
|
2013-05-15 19:20:48 -05:00
|
|
|
payload: Option<T>,
|
|
|
|
}
|
|
|
|
|
2013-05-17 19:47:10 -05:00
|
|
|
/// A one-shot channel.
|
|
|
|
pub struct ChanOne<T> {
|
2013-05-15 19:20:48 -05:00
|
|
|
void_packet: *mut Void,
|
|
|
|
suppress_finalize: bool
|
|
|
|
}
|
|
|
|
|
2013-07-17 19:57:30 -05:00
|
|
|
/// A one-shot port.
|
|
|
|
pub struct PortOne<T> {
|
2013-05-15 19:20:48 -05:00
|
|
|
void_packet: *mut Void,
|
|
|
|
suppress_finalize: bool
|
|
|
|
}
|
|
|
|
|
2013-06-05 19:56:24 -05:00
|
|
|
pub fn oneshot<T: Send>() -> (PortOne<T>, ChanOne<T>) {
|
2013-05-15 19:20:48 -05:00
|
|
|
let packet: ~Packet<T> = ~Packet {
|
2013-06-02 04:13:24 -05:00
|
|
|
state: AtomicUint::new(STATE_BOTH),
|
2013-05-15 19:20:48 -05:00
|
|
|
payload: None
|
|
|
|
};
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
let packet: *mut Void = cast::transmute(packet);
|
|
|
|
let port = PortOne {
|
2013-07-17 19:57:30 -05:00
|
|
|
void_packet: packet,
|
|
|
|
suppress_finalize: false
|
2013-05-15 19:20:48 -05:00
|
|
|
};
|
|
|
|
let chan = ChanOne {
|
2013-07-17 19:57:30 -05:00
|
|
|
void_packet: packet,
|
|
|
|
suppress_finalize: false
|
2013-05-15 19:20:48 -05:00
|
|
|
};
|
|
|
|
return (port, chan);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-17 19:47:10 -05:00
|
|
|
impl<T> ChanOne<T> {
|
2013-07-17 19:57:30 -05:00
|
|
|
fn packet(&self) -> *mut Packet<T> {
|
|
|
|
unsafe {
|
|
|
|
let p: *mut ~Packet<T> = cast::transmute(&self.void_packet);
|
|
|
|
let p: *mut Packet<T> = &mut **p;
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
2013-05-17 19:47:10 -05:00
|
|
|
|
|
|
|
pub fn send(self, val: T) {
|
|
|
|
self.try_send(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn try_send(self, val: T) -> bool {
|
|
|
|
let mut this = self;
|
|
|
|
let mut recvr_active = true;
|
2013-07-17 19:57:30 -05:00
|
|
|
let packet = this.packet();
|
2013-05-17 19:47:10 -05:00
|
|
|
|
|
|
|
unsafe {
|
|
|
|
|
|
|
|
// Install the payload
|
|
|
|
assert!((*packet).payload.is_none());
|
|
|
|
(*packet).payload = Some(val);
|
|
|
|
|
|
|
|
// Atomically swap out the old state to figure out what
|
|
|
|
// the port's up to, issuing a release barrier to prevent
|
|
|
|
// reordering of the payload write. This also issues an
|
|
|
|
// acquire barrier that keeps the subsequent access of the
|
|
|
|
// ~Task pointer from being reordered.
|
2013-06-02 04:13:24 -05:00
|
|
|
let oldstate = (*packet).state.swap(STATE_ONE, SeqCst);
|
2013-05-17 19:47:10 -05:00
|
|
|
match oldstate {
|
|
|
|
STATE_BOTH => {
|
|
|
|
// Port is not waiting yet. Nothing to do
|
2013-06-10 17:29:02 -05:00
|
|
|
do Local::borrow::<Scheduler, ()> |sched| {
|
2013-05-30 00:38:15 -05:00
|
|
|
rtdebug!("non-rendezvous send");
|
|
|
|
sched.metrics.non_rendezvous_sends += 1;
|
|
|
|
}
|
2013-05-17 19:47:10 -05:00
|
|
|
}
|
|
|
|
STATE_ONE => {
|
2013-06-10 17:29:02 -05:00
|
|
|
do Local::borrow::<Scheduler, ()> |sched| {
|
2013-05-30 00:38:15 -05:00
|
|
|
rtdebug!("rendezvous send");
|
|
|
|
sched.metrics.rendezvous_sends += 1;
|
|
|
|
}
|
2013-05-17 19:47:10 -05:00
|
|
|
// Port has closed. Need to clean up.
|
2013-07-17 19:57:30 -05:00
|
|
|
let _packet: ~Packet<T> = cast::transmute(this.void_packet);
|
2013-05-17 19:47:10 -05:00
|
|
|
recvr_active = false;
|
|
|
|
}
|
|
|
|
task_as_state => {
|
|
|
|
// Port is blocked. Wake it up.
|
2013-07-11 13:29:33 -05:00
|
|
|
let recvr = BlockedTask::cast_from_uint(task_as_state);
|
|
|
|
do recvr.wake().map_consume |woken_task| {
|
|
|
|
let mut sched = Local::take::<Scheduler>();
|
|
|
|
rtdebug!("rendezvous send");
|
|
|
|
sched.metrics.rendezvous_sends += 1;
|
|
|
|
sched.schedule_task(woken_task);
|
|
|
|
};
|
2013-05-17 19:47:10 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Suppress the synchronizing actions in the finalizer. We're done with the packet.
|
2013-07-17 19:57:30 -05:00
|
|
|
this.suppress_finalize = true;
|
2013-05-17 19:47:10 -05:00
|
|
|
return recvr_active;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-15 19:20:48 -05:00
|
|
|
impl<T> PortOne<T> {
|
2013-07-17 19:57:30 -05:00
|
|
|
fn packet(&self) -> *mut Packet<T> {
|
|
|
|
unsafe {
|
|
|
|
let p: *mut ~Packet<T> = cast::transmute(&self.void_packet);
|
|
|
|
let p: *mut Packet<T> = &mut **p;
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-15 19:20:48 -05:00
|
|
|
pub fn recv(self) -> T {
|
|
|
|
match self.try_recv() {
|
|
|
|
Some(val) => val,
|
|
|
|
None => {
|
|
|
|
fail!("receiving on closed channel");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn try_recv(self) -> Option<T> {
|
|
|
|
let mut this = self;
|
2013-07-17 19:57:30 -05:00
|
|
|
let packet = this.packet();
|
2013-05-17 19:47:10 -05:00
|
|
|
|
2013-07-18 13:20:14 -05:00
|
|
|
// Optimistic check. If data was sent already, we don't even need to block.
|
|
|
|
// No release barrier needed here; we're not handing off our task pointer yet.
|
|
|
|
if unsafe { (*packet).state.load(Acquire) } != STATE_ONE {
|
|
|
|
// No data available yet.
|
|
|
|
// Switch to the scheduler to put the ~Task into the Packet state.
|
|
|
|
let sched = Local::take::<Scheduler>();
|
|
|
|
do sched.deschedule_running_task_and_then |sched, task| {
|
|
|
|
unsafe {
|
|
|
|
// Atomically swap the task pointer into the Packet state, issuing
|
|
|
|
// an acquire barrier to prevent reordering of the subsequent read
|
|
|
|
// of the payload. Also issues a release barrier to prevent
|
|
|
|
// reordering of any previous writes to the task structure.
|
|
|
|
let task_as_state = task.cast_to_uint();
|
|
|
|
let oldstate = (*packet).state.swap(task_as_state, SeqCst);
|
|
|
|
match oldstate {
|
|
|
|
STATE_BOTH => {
|
|
|
|
// Data has not been sent. Now we're blocked.
|
|
|
|
rtdebug!("non-rendezvous recv");
|
|
|
|
sched.metrics.non_rendezvous_recvs += 1;
|
|
|
|
}
|
|
|
|
STATE_ONE => {
|
|
|
|
rtdebug!("rendezvous recv");
|
|
|
|
sched.metrics.rendezvous_recvs += 1;
|
|
|
|
|
|
|
|
// Channel is closed. Switch back and check the data.
|
|
|
|
// NB: We have to drop back into the scheduler event loop here
|
|
|
|
// instead of switching immediately back or we could end up
|
|
|
|
// triggering infinite recursion on the scheduler's stack.
|
|
|
|
let recvr = BlockedTask::cast_from_uint(task_as_state);
|
|
|
|
sched.enqueue_blocked_task(recvr);
|
|
|
|
}
|
|
|
|
_ => util::unreachable()
|
2013-05-17 19:47:10 -05:00
|
|
|
}
|
2013-05-15 19:20:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Task resumes.
|
|
|
|
|
|
|
|
// No further memory barrier is needed here to access the
|
|
|
|
// payload. Some scenarios:
|
|
|
|
//
|
|
|
|
// 1) We encountered STATE_ONE above - the atomic_xchg was the acq barrier. We're fine.
|
2013-05-17 19:47:10 -05:00
|
|
|
// 2) We encountered STATE_BOTH above and blocked. The sending task then ran us
|
|
|
|
// and ran on its thread. The sending task issued a read barrier when taking the
|
|
|
|
// pointer to the receiving task.
|
2013-05-15 19:20:48 -05:00
|
|
|
// 3) We encountered STATE_BOTH above and blocked, but the receiving task (this task)
|
|
|
|
// is pinned to some other scheduler, so the sending task had to give us to
|
|
|
|
// a different scheduler for resuming. That send synchronized memory.
|
|
|
|
|
|
|
|
unsafe {
|
2013-07-18 13:20:14 -05:00
|
|
|
let payload = (*packet).payload.take();
|
2013-05-15 19:20:48 -05:00
|
|
|
|
|
|
|
// The sender has closed up shop. Drop the packet.
|
2013-07-17 19:57:30 -05:00
|
|
|
let _packet: ~Packet<T> = cast::transmute(this.void_packet);
|
2013-05-17 19:47:10 -05:00
|
|
|
// Suppress the synchronizing actions in the finalizer. We're done with the packet.
|
2013-07-17 19:57:30 -05:00
|
|
|
this.suppress_finalize = true;
|
2013-05-15 19:20:48 -05:00
|
|
|
return payload;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Peekable<T> for PortOne<T> {
|
|
|
|
fn peek(&self) -> bool {
|
|
|
|
unsafe {
|
2013-07-17 19:57:30 -05:00
|
|
|
let packet: *mut Packet<T> = self.packet();
|
2013-06-02 04:13:24 -05:00
|
|
|
let oldstate = (*packet).state.load(SeqCst);
|
2013-05-15 19:20:48 -05:00
|
|
|
match oldstate {
|
|
|
|
STATE_BOTH => false,
|
|
|
|
STATE_ONE => (*packet).payload.is_some(),
|
|
|
|
_ => util::unreachable()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[unsafe_destructor]
|
2013-07-17 19:57:30 -05:00
|
|
|
impl<T> Drop for ChanOne<T> {
|
2013-06-20 20:06:13 -05:00
|
|
|
fn drop(&self) {
|
2013-05-15 19:20:48 -05:00
|
|
|
if self.suppress_finalize { return }
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
let this = cast::transmute_mut(self);
|
2013-06-02 04:13:24 -05:00
|
|
|
let oldstate = (*this.packet()).state.swap(STATE_ONE, SeqCst);
|
2013-05-15 19:20:48 -05:00
|
|
|
match oldstate {
|
|
|
|
STATE_BOTH => {
|
2013-05-17 19:47:10 -05:00
|
|
|
// Port still active. It will destroy the Packet.
|
2013-05-15 19:20:48 -05:00
|
|
|
},
|
|
|
|
STATE_ONE => {
|
|
|
|
let _packet: ~Packet<T> = cast::transmute(this.void_packet);
|
2013-05-17 19:47:10 -05:00
|
|
|
},
|
|
|
|
task_as_state => {
|
|
|
|
// The port is blocked waiting for a message we will never send. Wake it.
|
|
|
|
assert!((*this.packet()).payload.is_none());
|
2013-07-11 13:29:33 -05:00
|
|
|
let recvr = BlockedTask::cast_from_uint(task_as_state);
|
|
|
|
do recvr.wake().map_consume |woken_task| {
|
|
|
|
let sched = Local::take::<Scheduler>();
|
|
|
|
sched.schedule_task(woken_task);
|
|
|
|
};
|
2013-05-15 19:20:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[unsafe_destructor]
|
2013-07-17 19:57:30 -05:00
|
|
|
impl<T> Drop for PortOne<T> {
|
2013-06-20 20:06:13 -05:00
|
|
|
fn drop(&self) {
|
2013-05-15 19:20:48 -05:00
|
|
|
if self.suppress_finalize { return }
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
let this = cast::transmute_mut(self);
|
2013-06-02 04:13:24 -05:00
|
|
|
let oldstate = (*this.packet()).state.swap(STATE_ONE, SeqCst);
|
2013-05-15 19:20:48 -05:00
|
|
|
match oldstate {
|
|
|
|
STATE_BOTH => {
|
2013-05-17 19:47:10 -05:00
|
|
|
// Chan still active. It will destroy the packet.
|
2013-05-15 19:20:48 -05:00
|
|
|
},
|
|
|
|
STATE_ONE => {
|
|
|
|
let _packet: ~Packet<T> = cast::transmute(this.void_packet);
|
2013-05-17 19:47:10 -05:00
|
|
|
}
|
2013-07-15 14:32:35 -05:00
|
|
|
task_as_state => {
|
|
|
|
// This case occurs during unwinding, when the blocked
|
|
|
|
// receiver was killed awake. The task can't still be
|
|
|
|
// blocked (we are it), but we need to free the handle.
|
|
|
|
let recvr = BlockedTask::cast_from_uint(task_as_state);
|
|
|
|
// FIXME(#7554)(bblum): Make this cfg(test) dependent.
|
|
|
|
// in a later commit.
|
|
|
|
assert!(recvr.wake().is_none());
|
2013-05-15 19:20:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-17 19:47:10 -05:00
|
|
|
struct StreamPayload<T> {
|
|
|
|
val: T,
|
|
|
|
next: PortOne<StreamPayload<T>>
|
2013-05-15 19:20:48 -05:00
|
|
|
}
|
|
|
|
|
2013-06-01 04:11:59 -05:00
|
|
|
type StreamChanOne<T> = ChanOne<StreamPayload<T>>;
|
|
|
|
type StreamPortOne<T> = PortOne<StreamPayload<T>>;
|
|
|
|
|
2013-05-17 19:47:10 -05:00
|
|
|
/// A channel with unbounded size.
|
2013-05-15 19:20:48 -05:00
|
|
|
pub struct Chan<T> {
|
|
|
|
// FIXME #5372. Using Cell because we don't take &mut self
|
2013-06-01 04:11:59 -05:00
|
|
|
next: Cell<StreamChanOne<T>>
|
2013-05-15 19:20:48 -05:00
|
|
|
}
|
|
|
|
|
2013-05-17 19:47:10 -05:00
|
|
|
/// An port with unbounded size.
|
|
|
|
pub struct Port<T> {
|
|
|
|
// FIXME #5372. Using Cell because we don't take &mut self
|
2013-06-01 04:11:59 -05:00
|
|
|
next: Cell<StreamPortOne<T>>
|
2013-05-17 19:47:10 -05:00
|
|
|
}
|
|
|
|
|
2013-06-05 19:56:24 -05:00
|
|
|
pub fn stream<T: Send>() -> (Port<T>, Chan<T>) {
|
2013-05-15 19:20:48 -05:00
|
|
|
let (pone, cone) = oneshot();
|
2013-06-04 05:03:58 -05:00
|
|
|
let port = Port { next: Cell::new(pone) };
|
|
|
|
let chan = Chan { next: Cell::new(cone) };
|
2013-05-15 19:20:48 -05:00
|
|
|
return (port, chan);
|
|
|
|
}
|
|
|
|
|
2013-06-05 19:56:24 -05:00
|
|
|
impl<T: Send> GenericChan<T> for Chan<T> {
|
2013-05-17 19:47:10 -05:00
|
|
|
fn send(&self, val: T) {
|
|
|
|
self.try_send(val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 19:56:24 -05:00
|
|
|
impl<T: Send> GenericSmartChan<T> for Chan<T> {
|
2013-05-17 19:47:10 -05:00
|
|
|
fn try_send(&self, val: T) -> bool {
|
|
|
|
let (next_pone, next_cone) = oneshot();
|
|
|
|
let cone = self.next.take();
|
|
|
|
self.next.put_back(next_cone);
|
|
|
|
cone.try_send(StreamPayload { val: val, next: next_pone })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-15 19:20:48 -05:00
|
|
|
impl<T> GenericPort<T> for Port<T> {
|
|
|
|
fn recv(&self) -> T {
|
|
|
|
match self.try_recv() {
|
|
|
|
Some(val) => val,
|
|
|
|
None => {
|
|
|
|
fail!("receiving on closed channel");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn try_recv(&self) -> Option<T> {
|
|
|
|
let pone = self.next.take();
|
|
|
|
match pone.try_recv() {
|
2013-05-17 19:47:10 -05:00
|
|
|
Some(StreamPayload { val, next }) => {
|
2013-05-15 19:20:48 -05:00
|
|
|
self.next.put_back(next);
|
|
|
|
Some(val)
|
|
|
|
}
|
|
|
|
None => None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Peekable<T> for Port<T> {
|
|
|
|
fn peek(&self) -> bool {
|
|
|
|
self.next.with_mut_ref(|p| p.peek())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-01 04:11:59 -05:00
|
|
|
pub struct SharedChan<T> {
|
|
|
|
// Just like Chan, but a shared AtomicOption instead of Cell
|
|
|
|
priv next: UnsafeAtomicRcBox<AtomicOption<StreamChanOne<T>>>
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> SharedChan<T> {
|
|
|
|
pub fn new(chan: Chan<T>) -> SharedChan<T> {
|
|
|
|
let next = chan.next.take();
|
|
|
|
let next = AtomicOption::new(~next);
|
|
|
|
SharedChan { next: UnsafeAtomicRcBox::new(next) }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
impl<T: Send> GenericChan<T> for SharedChan<T> {
|
2013-06-01 04:11:59 -05:00
|
|
|
fn send(&self, val: T) {
|
|
|
|
self.try_send(val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
impl<T: Send> GenericSmartChan<T> for SharedChan<T> {
|
2013-06-01 04:11:59 -05:00
|
|
|
fn try_send(&self, val: T) -> bool {
|
|
|
|
unsafe {
|
|
|
|
let (next_pone, next_cone) = oneshot();
|
|
|
|
let cone = (*self.next.get()).swap(~next_cone, SeqCst);
|
|
|
|
cone.unwrap().try_send(StreamPayload { val: val, next: next_pone })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Clone for SharedChan<T> {
|
|
|
|
fn clone(&self) -> SharedChan<T> {
|
|
|
|
SharedChan {
|
|
|
|
next: self.next.clone()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-01 15:34:05 -05:00
|
|
|
pub struct SharedPort<T> {
|
|
|
|
// The next port on which we will receive the next port on which we will receive T
|
|
|
|
priv next_link: UnsafeAtomicRcBox<AtomicOption<PortOne<StreamPortOne<T>>>>
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> SharedPort<T> {
|
|
|
|
pub fn new(port: Port<T>) -> SharedPort<T> {
|
|
|
|
// Put the data port into a new link pipe
|
|
|
|
let next_data_port = port.next.take();
|
|
|
|
let (next_link_port, next_link_chan) = oneshot();
|
|
|
|
next_link_chan.send(next_data_port);
|
|
|
|
let next_link = AtomicOption::new(~next_link_port);
|
|
|
|
SharedPort { next_link: UnsafeAtomicRcBox::new(next_link) }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
impl<T: Send> GenericPort<T> for SharedPort<T> {
|
2013-06-01 15:34:05 -05:00
|
|
|
fn recv(&self) -> T {
|
|
|
|
match self.try_recv() {
|
|
|
|
Some(val) => val,
|
|
|
|
None => {
|
|
|
|
fail!("receiving on a closed channel");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn try_recv(&self) -> Option<T> {
|
|
|
|
unsafe {
|
|
|
|
let (next_link_port, next_link_chan) = oneshot();
|
|
|
|
let link_port = (*self.next_link.get()).swap(~next_link_port, SeqCst);
|
|
|
|
let link_port = link_port.unwrap();
|
|
|
|
let data_port = link_port.recv();
|
|
|
|
let (next_data_port, res) = match data_port.try_recv() {
|
|
|
|
Some(StreamPayload { val, next }) => {
|
|
|
|
(next, Some(val))
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
let (next_data_port, _) = oneshot();
|
|
|
|
(next_data_port, None)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
next_link_chan.send(next_data_port);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Clone for SharedPort<T> {
|
|
|
|
fn clone(&self) -> SharedPort<T> {
|
|
|
|
SharedPort {
|
|
|
|
next_link: self.next_link.clone()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-01 16:03:38 -05:00
|
|
|
// XXX: Need better name
|
|
|
|
type MegaPipe<T> = (SharedPort<T>, SharedChan<T>);
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
pub fn megapipe<T: Send>() -> MegaPipe<T> {
|
2013-06-01 16:03:38 -05:00
|
|
|
let (port, chan) = stream();
|
|
|
|
(SharedPort::new(port), SharedChan::new(chan))
|
|
|
|
}
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
impl<T: Send> GenericChan<T> for MegaPipe<T> {
|
2013-06-01 16:03:38 -05:00
|
|
|
fn send(&self, val: T) {
|
|
|
|
match *self {
|
|
|
|
(_, ref c) => c.send(val)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
impl<T: Send> GenericSmartChan<T> for MegaPipe<T> {
|
2013-06-01 16:03:38 -05:00
|
|
|
fn try_send(&self, val: T) -> bool {
|
|
|
|
match *self {
|
|
|
|
(_, ref c) => c.try_send(val)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 19:36:58 -05:00
|
|
|
impl<T: Send> GenericPort<T> for MegaPipe<T> {
|
2013-06-01 16:03:38 -05:00
|
|
|
fn recv(&self) -> T {
|
|
|
|
match *self {
|
|
|
|
(ref p, _) => p.recv()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn try_recv(&self) -> Option<T> {
|
|
|
|
match *self {
|
|
|
|
(ref p, _) => p.try_recv()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-15 19:20:48 -05:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
use option::*;
|
|
|
|
use rt::test::*;
|
|
|
|
use cell::Cell;
|
|
|
|
use iter::Times;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_close_port_first() {
|
|
|
|
// Simple test of closing without sending
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, _chan) = oneshot::<int>();
|
|
|
|
{ let _p = port; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_close_chan_first() {
|
|
|
|
// Simple test of closing without sending
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (_port, chan) = oneshot::<int>();
|
|
|
|
{ let _c = chan; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_send_port_close() {
|
|
|
|
// Testing that the sender cleans up the payload if receiver is closed
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<~int>();
|
|
|
|
{ let _p = port; }
|
|
|
|
chan.send(~0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_recv_chan_close() {
|
|
|
|
// Receiving on a closed chan will fail
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let res = do spawntask_try {
|
|
|
|
let (port, chan) = oneshot::<~int>();
|
|
|
|
{ let _c = chan; }
|
|
|
|
port.recv();
|
|
|
|
};
|
2013-06-26 18:41:00 -05:00
|
|
|
// What is our res?
|
|
|
|
rtdebug!("res is: %?", res.is_err());
|
2013-05-15 19:20:48 -05:00
|
|
|
assert!(res.is_err());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_send_then_recv() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<~int>();
|
|
|
|
chan.send(~10);
|
|
|
|
assert!(port.recv() == ~10);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_try_send_open() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
|
|
|
assert!(chan.try_send(10));
|
|
|
|
assert!(port.recv() == 10);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_try_send_closed() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
|
|
|
{ let _p = port; }
|
|
|
|
assert!(!chan.try_send(10));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_try_recv_open() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
|
|
|
chan.send(10);
|
|
|
|
assert!(port.try_recv() == Some(10));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_try_recv_closed() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
|
|
|
{ let _c = chan; }
|
|
|
|
assert!(port.try_recv() == None);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_peek_data() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
|
|
|
assert!(!port.peek());
|
|
|
|
chan.send(10);
|
|
|
|
assert!(port.peek());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_peek_close() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
|
|
|
{ let _c = chan; }
|
|
|
|
assert!(!port.peek());
|
|
|
|
assert!(!port.peek());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_single_thread_peek_open() {
|
|
|
|
do run_in_newsched_task {
|
2013-05-23 11:39:17 -05:00
|
|
|
let (port, _) = oneshot::<int>();
|
2013-05-15 19:20:48 -05:00
|
|
|
assert!(!port.peek());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_multi_task_recv_then_send() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<~int>();
|
2013-06-04 05:03:58 -05:00
|
|
|
let port_cell = Cell::new(port);
|
2013-05-15 19:20:48 -05:00
|
|
|
do spawntask_immediately {
|
|
|
|
assert!(port_cell.take().recv() == ~10);
|
|
|
|
}
|
|
|
|
|
|
|
|
chan.send(~10);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_multi_task_recv_then_close() {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<~int>();
|
2013-06-04 05:03:58 -05:00
|
|
|
let port_cell = Cell::new(port);
|
|
|
|
let chan_cell = Cell::new(chan);
|
2013-05-15 19:20:48 -05:00
|
|
|
do spawntask_later {
|
|
|
|
let _cell = chan_cell.take();
|
|
|
|
}
|
|
|
|
let res = do spawntask_try {
|
|
|
|
assert!(port_cell.take().recv() == ~10);
|
|
|
|
};
|
|
|
|
assert!(res.is_err());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_multi_thread_close_stress() {
|
|
|
|
for stress_factor().times {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
2013-06-04 05:03:58 -05:00
|
|
|
let port_cell = Cell::new(port);
|
2013-05-15 19:20:48 -05:00
|
|
|
let _thread = do spawntask_thread {
|
|
|
|
let _p = port_cell.take();
|
|
|
|
};
|
|
|
|
let _chan = chan;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_multi_thread_send_close_stress() {
|
|
|
|
for stress_factor().times {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
2013-06-04 05:03:58 -05:00
|
|
|
let chan_cell = Cell::new(chan);
|
|
|
|
let port_cell = Cell::new(port);
|
2013-05-15 19:20:48 -05:00
|
|
|
let _thread1 = do spawntask_thread {
|
|
|
|
let _p = port_cell.take();
|
|
|
|
};
|
|
|
|
let _thread2 = do spawntask_thread {
|
|
|
|
let c = chan_cell.take();
|
|
|
|
c.send(1);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_multi_thread_recv_close_stress() {
|
|
|
|
for stress_factor().times {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<int>();
|
2013-06-04 05:03:58 -05:00
|
|
|
let chan_cell = Cell::new(chan);
|
|
|
|
let port_cell = Cell::new(port);
|
2013-05-15 19:20:48 -05:00
|
|
|
let _thread1 = do spawntask_thread {
|
2013-06-04 05:03:58 -05:00
|
|
|
let port_cell = Cell::new(port_cell.take());
|
2013-05-15 19:20:48 -05:00
|
|
|
let res = do spawntask_try {
|
|
|
|
port_cell.take().recv();
|
|
|
|
};
|
|
|
|
assert!(res.is_err());
|
|
|
|
};
|
|
|
|
let _thread2 = do spawntask_thread {
|
2013-06-04 05:03:58 -05:00
|
|
|
let chan_cell = Cell::new(chan_cell.take());
|
2013-05-15 19:20:48 -05:00
|
|
|
do spawntask {
|
|
|
|
chan_cell.take();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn oneshot_multi_thread_send_recv_stress() {
|
|
|
|
for stress_factor().times {
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = oneshot::<~int>();
|
2013-06-04 05:03:58 -05:00
|
|
|
let chan_cell = Cell::new(chan);
|
|
|
|
let port_cell = Cell::new(port);
|
2013-05-15 19:20:48 -05:00
|
|
|
let _thread1 = do spawntask_thread {
|
|
|
|
chan_cell.take().send(~10);
|
|
|
|
};
|
|
|
|
let _thread2 = do spawntask_thread {
|
|
|
|
assert!(port_cell.take().recv() == ~10);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2013-05-17 19:47:10 -05:00
|
|
|
fn stream_send_recv_stress() {
|
2013-05-15 19:20:48 -05:00
|
|
|
for stress_factor().times {
|
2013-06-06 02:01:22 -05:00
|
|
|
do run_in_mt_newsched_task {
|
2013-05-15 19:20:48 -05:00
|
|
|
let (port, chan) = stream::<~int>();
|
|
|
|
|
|
|
|
send(chan, 0);
|
|
|
|
recv(port, 0);
|
|
|
|
|
|
|
|
fn send(chan: Chan<~int>, i: int) {
|
|
|
|
if i == 10 { return }
|
|
|
|
|
2013-06-04 05:03:58 -05:00
|
|
|
let chan_cell = Cell::new(chan);
|
2013-06-06 02:01:22 -05:00
|
|
|
do spawntask_random {
|
2013-05-15 19:20:48 -05:00
|
|
|
let chan = chan_cell.take();
|
|
|
|
chan.send(~i);
|
|
|
|
send(chan, i + 1);
|
2013-06-06 02:01:22 -05:00
|
|
|
}
|
2013-05-15 19:20:48 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn recv(port: Port<~int>, i: int) {
|
|
|
|
if i == 10 { return }
|
|
|
|
|
2013-06-04 05:03:58 -05:00
|
|
|
let port_cell = Cell::new(port);
|
2013-06-06 02:01:22 -05:00
|
|
|
do spawntask_random {
|
2013-05-15 19:20:48 -05:00
|
|
|
let port = port_cell.take();
|
|
|
|
assert!(port.recv() == ~i);
|
|
|
|
recv(port, i + 1);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-05-29 19:25:29 -05:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn recv_a_lot() {
|
|
|
|
// Regression test that we don't run out of stack in scheduler context
|
|
|
|
do run_in_newsched_task {
|
|
|
|
let (port, chan) = stream();
|
|
|
|
for 10000.times { chan.send(()) }
|
|
|
|
for 10000.times { port.recv() }
|
|
|
|
}
|
|
|
|
}
|
2013-06-01 04:11:59 -05:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shared_chan_stress() {
|
|
|
|
do run_in_mt_newsched_task {
|
|
|
|
let (port, chan) = stream();
|
|
|
|
let chan = SharedChan::new(chan);
|
|
|
|
let total = stress_factor() + 100;
|
|
|
|
for total.times {
|
|
|
|
let chan_clone = chan.clone();
|
|
|
|
do spawntask_random {
|
|
|
|
chan_clone.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for total.times {
|
|
|
|
port.recv();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-01 15:34:05 -05:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shared_port_stress() {
|
|
|
|
do run_in_mt_newsched_task {
|
|
|
|
// XXX: Removing these type annotations causes an ICE
|
|
|
|
let (end_port, end_chan) = stream::<()>();
|
|
|
|
let (port, chan) = stream::<()>();
|
|
|
|
let end_chan = SharedChan::new(end_chan);
|
|
|
|
let port = SharedPort::new(port);
|
|
|
|
let total = stress_factor() + 100;
|
|
|
|
for total.times {
|
|
|
|
let end_chan_clone = end_chan.clone();
|
|
|
|
let port_clone = port.clone();
|
|
|
|
do spawntask_random {
|
|
|
|
port_clone.recv();
|
|
|
|
end_chan_clone.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for total.times {
|
|
|
|
chan.send(());
|
|
|
|
}
|
|
|
|
|
|
|
|
for total.times {
|
|
|
|
end_port.recv();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shared_port_close_simple() {
|
|
|
|
do run_in_mt_newsched_task {
|
|
|
|
let (port, chan) = stream::<()>();
|
|
|
|
let port = SharedPort::new(port);
|
|
|
|
{ let _chan = chan; }
|
|
|
|
assert!(port.try_recv().is_none());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shared_port_close() {
|
|
|
|
do run_in_mt_newsched_task {
|
|
|
|
let (end_port, end_chan) = stream::<bool>();
|
|
|
|
let (port, chan) = stream::<()>();
|
|
|
|
let end_chan = SharedChan::new(end_chan);
|
|
|
|
let port = SharedPort::new(port);
|
|
|
|
let chan = SharedChan::new(chan);
|
|
|
|
let send_total = 10;
|
|
|
|
let recv_total = 20;
|
|
|
|
do spawntask_random {
|
|
|
|
for send_total.times {
|
|
|
|
let chan_clone = chan.clone();
|
|
|
|
do spawntask_random {
|
|
|
|
chan_clone.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let end_chan_clone = end_chan.clone();
|
|
|
|
do spawntask_random {
|
|
|
|
for recv_total.times {
|
|
|
|
let port_clone = port.clone();
|
|
|
|
let end_chan_clone = end_chan_clone.clone();
|
|
|
|
do spawntask_random {
|
|
|
|
let recvd = port_clone.try_recv().is_some();
|
|
|
|
end_chan_clone.send(recvd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut recvd = 0;
|
|
|
|
for recv_total.times {
|
|
|
|
recvd += if end_port.recv() { 1 } else { 0 };
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(recvd == send_total);
|
|
|
|
}
|
|
|
|
}
|
2013-06-01 16:03:38 -05:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn megapipe_stress() {
|
|
|
|
use rand;
|
|
|
|
use rand::RngUtil;
|
|
|
|
|
|
|
|
do run_in_mt_newsched_task {
|
|
|
|
let (end_port, end_chan) = stream::<()>();
|
|
|
|
let end_chan = SharedChan::new(end_chan);
|
|
|
|
let pipe = megapipe();
|
|
|
|
let total = stress_factor() + 10;
|
|
|
|
let mut rng = rand::rng();
|
|
|
|
for total.times {
|
|
|
|
let msgs = rng.gen_uint_range(0, 10);
|
|
|
|
let pipe_clone = pipe.clone();
|
|
|
|
let end_chan_clone = end_chan.clone();
|
|
|
|
do spawntask_random {
|
|
|
|
for msgs.times {
|
|
|
|
pipe_clone.send(());
|
|
|
|
}
|
|
|
|
for msgs.times {
|
|
|
|
pipe_clone.recv();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
end_chan_clone.send(());
|
|
|
|
}
|
|
|
|
|
|
|
|
for total.times {
|
|
|
|
end_port.recv();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-26 18:41:00 -05:00
|
|
|
|
2013-05-15 19:20:48 -05:00
|
|
|
}
|