2013-12-12 19:47:48 -06:00
|
|
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
|
|
|
//! A concurrent queue used to signal remote event loops
|
|
|
|
//!
|
|
|
|
//! This queue implementation is used to send tasks among event loops. This is
|
|
|
|
//! backed by a multi-producer/single-consumer queue from libstd and uv_async_t
|
|
|
|
//! handles (to wake up a remote event loop).
|
|
|
|
//!
|
|
|
|
//! The uv_async_t is stored next to the event loop, so in order to not keep the
|
|
|
|
//! event loop alive we use uv_ref and uv_unref in order to control when the
|
|
|
|
//! async handle is active or not.
|
|
|
|
|
2013-12-13 23:14:08 -06:00
|
|
|
#[allow(dead_code)];
|
|
|
|
|
2013-12-12 19:47:48 -06:00
|
|
|
use std::cast;
|
|
|
|
use std::libc::{c_void, c_int};
|
|
|
|
use std::rt::task::BlockedTask;
|
|
|
|
use std::unstable::sync::LittleLock;
|
2014-01-06 17:23:37 -06:00
|
|
|
use std::sync::arc::UnsafeArc;
|
2013-12-12 19:47:48 -06:00
|
|
|
use mpsc = std::sync::mpsc_queue;
|
|
|
|
|
|
|
|
use async::AsyncWatcher;
|
|
|
|
use super::{Loop, UvHandle};
|
|
|
|
use uvll;
|
|
|
|
|
|
|
|
enum Message {
|
|
|
|
Task(BlockedTask),
|
|
|
|
Increment,
|
|
|
|
Decrement,
|
|
|
|
}
|
|
|
|
|
|
|
|
struct State {
|
|
|
|
handle: *uvll::uv_async_t,
|
|
|
|
lock: LittleLock, // see comments in async_cb for why this is needed
|
2014-01-06 17:23:37 -06:00
|
|
|
queue: mpsc::Queue<Message>,
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/// This structure is intended to be stored next to the event loop, and it is
|
|
|
|
/// used to create new `Queue` structures.
|
|
|
|
pub struct QueuePool {
|
2014-01-06 17:23:37 -06:00
|
|
|
priv queue: UnsafeArc<State>,
|
2013-12-12 19:47:48 -06:00
|
|
|
priv refcnt: uint,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This type is used to send messages back to the original event loop.
|
|
|
|
pub struct Queue {
|
2014-01-06 17:23:37 -06:00
|
|
|
priv queue: UnsafeArc<State>,
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) {
|
|
|
|
assert_eq!(status, 0);
|
2014-01-06 17:23:37 -06:00
|
|
|
let pool: &mut QueuePool = unsafe {
|
2013-12-12 19:47:48 -06:00
|
|
|
cast::transmute(uvll::get_data_for_uv_handle(handle))
|
|
|
|
};
|
2014-01-06 17:23:37 -06:00
|
|
|
let state: &mut State = unsafe { cast::transmute(pool.queue.get()) };
|
2013-12-12 19:47:48 -06:00
|
|
|
|
|
|
|
// Remember that there is no guarantee about how many times an async
|
|
|
|
// callback is called with relation to the number of sends, so process the
|
|
|
|
// entire queue in a loop.
|
|
|
|
loop {
|
2014-01-06 17:23:37 -06:00
|
|
|
match state.queue.pop() {
|
2013-12-12 19:47:48 -06:00
|
|
|
mpsc::Data(Task(task)) => {
|
2014-01-16 21:58:42 -06:00
|
|
|
let _ = task.wake().map(|t| t.reawaken());
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
mpsc::Data(Increment) => unsafe {
|
2014-01-06 17:23:37 -06:00
|
|
|
if pool.refcnt == 0 {
|
|
|
|
uvll::uv_ref(state.handle);
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
2014-01-06 17:23:37 -06:00
|
|
|
pool.refcnt += 1;
|
2013-12-12 19:47:48 -06:00
|
|
|
},
|
|
|
|
mpsc::Data(Decrement) => unsafe {
|
2014-01-06 17:23:37 -06:00
|
|
|
pool.refcnt -= 1;
|
|
|
|
if pool.refcnt == 0 {
|
|
|
|
uvll::uv_unref(state.handle);
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
},
|
|
|
|
mpsc::Empty | mpsc::Inconsistent => break
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the refcount is now zero after processing the queue, then there is no
|
|
|
|
// longer a reference on the async handle and it is possible that this event
|
|
|
|
// loop can exit. What we're not guaranteed, however, is that a producer in
|
|
|
|
// the middle of dropping itself is yet done with the handle. It could be
|
|
|
|
// possible that we saw their Decrement message but they have yet to signal
|
|
|
|
// on the async handle. If we were to return immediately, the entire uv loop
|
|
|
|
// could be destroyed meaning the call to uv_async_send would abort()
|
|
|
|
//
|
|
|
|
// In order to fix this, an OS mutex is used to wait for the other end to
|
|
|
|
// finish before we continue. The drop block on a handle will acquire a
|
|
|
|
// mutex and then drop it after both the push and send have been completed.
|
|
|
|
// If we acquire the mutex here, then we are guaranteed that there are no
|
|
|
|
// longer any senders which are holding on to their handles, so we can
|
|
|
|
// safely allow the event loop to exit.
|
2014-01-06 17:23:37 -06:00
|
|
|
if pool.refcnt == 0 {
|
2013-12-12 19:47:48 -06:00
|
|
|
unsafe {
|
2014-01-06 17:23:37 -06:00
|
|
|
let _l = state.lock.lock();
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl QueuePool {
|
|
|
|
pub fn new(loop_: &mut Loop) -> ~QueuePool {
|
|
|
|
let handle = UvHandle::alloc(None::<AsyncWatcher>, uvll::UV_ASYNC);
|
2014-01-06 17:23:37 -06:00
|
|
|
let state = UnsafeArc::new(State {
|
2013-12-12 19:47:48 -06:00
|
|
|
handle: handle,
|
|
|
|
lock: LittleLock::new(),
|
2014-01-06 17:23:37 -06:00
|
|
|
queue: mpsc::Queue::new(),
|
2013-12-12 19:47:48 -06:00
|
|
|
});
|
|
|
|
let q = ~QueuePool {
|
|
|
|
refcnt: 0,
|
2014-01-06 17:23:37 -06:00
|
|
|
queue: state,
|
2013-12-12 19:47:48 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
assert_eq!(uvll::uv_async_init(loop_.handle, handle, async_cb), 0);
|
|
|
|
uvll::uv_unref(handle);
|
|
|
|
let data: *c_void = *cast::transmute::<&~QueuePool, &*c_void>(&q);
|
|
|
|
uvll::set_data_for_uv_handle(handle, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn queue(&mut self) -> Queue {
|
|
|
|
unsafe {
|
|
|
|
if self.refcnt == 0 {
|
2014-01-06 17:23:37 -06:00
|
|
|
uvll::uv_ref((*self.queue.get()).handle);
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
self.refcnt += 1;
|
|
|
|
}
|
2014-01-06 17:23:37 -06:00
|
|
|
Queue { queue: self.queue.clone() }
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
2013-12-16 00:20:53 -06:00
|
|
|
|
|
|
|
pub fn handle(&self) -> *uvll::uv_async_t {
|
2014-01-06 17:23:37 -06:00
|
|
|
unsafe { (*self.queue.get()).handle }
|
2013-12-16 00:20:53 -06:00
|
|
|
}
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Queue {
|
|
|
|
pub fn push(&mut self, task: BlockedTask) {
|
|
|
|
unsafe {
|
2014-01-06 17:23:37 -06:00
|
|
|
(*self.queue.get()).queue.push(Task(task));
|
|
|
|
uvll::uv_async_send((*self.queue.get()).handle);
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Clone for Queue {
|
|
|
|
fn clone(&self) -> Queue {
|
|
|
|
// Push a request to increment on the queue, but there's no need to
|
|
|
|
// signal the event loop to process it at this time. We're guaranteed
|
|
|
|
// that the count is at least one (because we have a queue right here),
|
|
|
|
// and if the queue is dropped later on it'll see the increment for the
|
|
|
|
// decrement anyway.
|
|
|
|
unsafe {
|
2014-01-06 17:23:37 -06:00
|
|
|
(*self.queue.get()).queue.push(Increment);
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
Queue { queue: self.queue.clone() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Queue {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
// See the comments in the async_cb function for why there is a lock
|
|
|
|
// that is acquired only on a drop.
|
|
|
|
unsafe {
|
2014-01-06 17:23:37 -06:00
|
|
|
let state = self.queue.get();
|
2013-12-12 19:47:48 -06:00
|
|
|
let _l = (*state).lock.lock();
|
2014-01-06 17:23:37 -06:00
|
|
|
(*state).queue.push(Decrement);
|
2013-12-12 19:47:48 -06:00
|
|
|
uvll::uv_async_send((*state).handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for State {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
unsafe {
|
|
|
|
uvll::uv_close(self.handle, cast::transmute(0));
|
2013-12-16 00:20:53 -06:00
|
|
|
// Note that this does *not* free the handle, that is the
|
|
|
|
// responsibility of the caller because the uv loop must be closed
|
|
|
|
// before we deallocate this uv handle.
|
2013-12-12 19:47:48 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|