2013-02-03 18:15:43 -08:00
|
|
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
use std::cast;
|
|
|
|
use std::rand::{XorShiftRng, Rng, Rand};
|
|
|
|
use std::rt::local::Local;
|
|
|
|
use std::rt::rtio::{RemoteCallback, PausibleIdleCallback, Callback, EventLoop};
|
|
|
|
use std::rt::task::BlockedTask;
|
|
|
|
use std::rt::task::Task;
|
|
|
|
use std::sync::deque;
|
|
|
|
use std::unstable::mutex::Mutex;
|
|
|
|
use std::unstable::raw;
|
|
|
|
use mpsc = std::sync::mpsc_queue;
|
|
|
|
|
|
|
|
use context::Context;
|
|
|
|
use coroutine::Coroutine;
|
|
|
|
use sleeper_list::SleeperList;
|
|
|
|
use stack::StackPool;
|
|
|
|
use task::{TypeSched, GreenTask, HomeSched, AnySched};
|
2013-12-04 19:51:29 -08:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
/// A scheduler is responsible for coordinating the execution of Tasks
|
|
|
|
/// on a single thread. The scheduler runs inside a slightly modified
|
|
|
|
/// Rust Task. When not running this task is stored in the scheduler
|
|
|
|
/// struct. The scheduler struct acts like a baton, all scheduling
|
|
|
|
/// actions are transfers of the baton.
|
2013-05-29 21:03:21 -07:00
|
|
|
///
|
|
|
|
/// XXX: This creates too many callbacks to run_sched_once, resulting
|
|
|
|
/// in too much allocation and too many events.
|
2013-02-03 18:15:43 -08:00
|
|
|
pub struct Scheduler {
|
2013-12-12 18:01:59 -08:00
|
|
|
/// ID number of the pool that this scheduler is a member of. When
|
|
|
|
/// reawakening green tasks, this is used to ensure that tasks aren't
|
|
|
|
/// reawoken on the wrong pool of schedulers.
|
|
|
|
pool_id: uint,
|
2013-08-05 13:06:24 -07:00
|
|
|
/// There are N work queues, one per scheduler.
|
2013-12-12 18:01:59 -08:00
|
|
|
work_queue: deque::Worker<~GreenTask>,
|
2013-08-05 13:06:24 -07:00
|
|
|
/// Work queues for the other schedulers. These are created by
|
|
|
|
/// cloning the core work queues.
|
2013-12-12 18:01:59 -08:00
|
|
|
work_queues: ~[deque::Stealer<~GreenTask>],
|
2013-05-29 15:55:23 -07:00
|
|
|
/// The queue of incoming messages from other schedulers.
|
|
|
|
/// These are enqueued by SchedHandles after which a remote callback
|
|
|
|
/// is triggered to handle the message.
|
2013-12-05 18:19:06 -08:00
|
|
|
message_queue: mpsc::Consumer<SchedMessage, ()>,
|
|
|
|
/// Producer used to clone sched handles from
|
|
|
|
message_producer: mpsc::Producer<SchedMessage, ()>,
|
2013-05-28 19:53:55 -07:00
|
|
|
/// A shared list of sleeping schedulers. We'll use this to wake
|
|
|
|
/// up schedulers when pushing work onto the work queue.
|
2013-07-30 19:02:21 -07:00
|
|
|
sleeper_list: SleeperList,
|
2013-05-29 15:55:23 -07:00
|
|
|
/// Indicates that we have previously pushed a handle onto the
|
|
|
|
/// SleeperList but have not yet received the Wake message.
|
|
|
|
/// Being `true` does not necessarily mean that the scheduler is
|
|
|
|
/// not active since there are multiple event sources that may
|
|
|
|
/// wake the scheduler. It just prevents the scheduler from pushing
|
|
|
|
/// multiple handles onto the sleeper list.
|
2013-11-26 09:40:24 -08:00
|
|
|
sleepy: bool,
|
2013-05-29 15:55:23 -07:00
|
|
|
/// A flag to indicate we've received the shutdown message and should
|
|
|
|
/// no longer try to go to sleep, but exit instead.
|
|
|
|
no_sleep: bool,
|
2013-02-03 18:15:43 -08:00
|
|
|
stack_pool: StackPool,
|
2013-08-15 19:46:23 -07:00
|
|
|
/// The scheduler runs on a special task. When it is not running
|
|
|
|
/// it is stored here instead of the work queue.
|
2013-12-12 18:01:59 -08:00
|
|
|
sched_task: Option<~GreenTask>,
|
2013-04-14 19:24:43 -07:00
|
|
|
/// An action performed after a context switch on behalf of the
|
|
|
|
/// code running before the context switch
|
2013-11-26 09:40:24 -08:00
|
|
|
cleanup_job: Option<CleanupJob>,
|
2013-07-29 12:06:36 -07:00
|
|
|
/// If the scheduler shouldn't run some tasks, a friend to send
|
|
|
|
/// them to.
|
2013-11-26 09:40:24 -08:00
|
|
|
friend_handle: Option<SchedHandle>,
|
2013-12-12 18:01:59 -08:00
|
|
|
/// Should this scheduler run any task, or only pinned tasks?
|
|
|
|
run_anything: bool,
|
2013-08-05 13:06:24 -07:00
|
|
|
/// A fast XorShift rng for scheduler use
|
2013-08-15 11:13:41 -07:00
|
|
|
rng: XorShiftRng,
|
2013-12-15 16:26:09 +11:00
|
|
|
/// A togglable idle callback
|
2013-12-15 16:29:17 +11:00
|
|
|
idle_callback: Option<~PausableIdleCallback>,
|
2013-09-20 18:49:31 -07:00
|
|
|
/// A countdown that starts at a random value and is decremented
|
|
|
|
/// every time a yield check is performed. When it hits 0 a task
|
|
|
|
/// will yield.
|
2013-11-26 09:40:24 -08:00
|
|
|
yield_check_count: uint,
|
2013-09-20 18:49:31 -07:00
|
|
|
/// A flag to tell the scheduler loop it needs to do some stealing
|
|
|
|
/// in order to introduce randomness as part of a yield
|
2013-11-26 09:40:24 -08:00
|
|
|
steal_for_yield: bool,
|
2013-10-22 14:59:21 -07:00
|
|
|
|
|
|
|
// n.b. currently destructors of an object are run in top-to-bottom in order
|
2013-12-15 16:29:17 +11:00
|
|
|
// of field declaration. Due to its nature, the pausable idle callback
|
2013-10-22 14:59:21 -07:00
|
|
|
// must have some sort of handle to the event loop, so it needs to get
|
|
|
|
// destroyed before the event loop itself. For this reason, we destroy
|
|
|
|
// the event loop last to ensure that any unsafe references to it are
|
|
|
|
// destroyed before it's actually destroyed.
|
|
|
|
|
|
|
|
/// The event loop used to drive the scheduler and perform I/O
|
2013-10-22 15:00:37 -07:00
|
|
|
event_loop: ~EventLoop,
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
|
|
|
|
2013-08-17 19:55:22 -07:00
|
|
|
/// An indication of how hard to work on a given operation, the difference
|
|
|
|
/// mainly being whether memory is synchronized or not
|
|
|
|
#[deriving(Eq)]
|
|
|
|
enum EffortLevel {
|
|
|
|
DontTryTooHard,
|
|
|
|
GiveItYourBest
|
|
|
|
}
|
|
|
|
|
2013-12-05 18:19:06 -08:00
|
|
|
static MAX_YIELD_CHECKS: uint = 20000;
|
2013-09-20 18:49:31 -07:00
|
|
|
|
|
|
|
fn reset_yield_check(rng: &mut XorShiftRng) -> uint {
|
|
|
|
let r: uint = Rand::rand(rng);
|
|
|
|
r % MAX_YIELD_CHECKS + 1
|
|
|
|
}
|
|
|
|
|
2013-05-31 15:17:22 -07:00
|
|
|
impl Scheduler {
|
2013-02-03 18:15:43 -08:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// * Initialization Functions
|
2013-06-12 11:32:22 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn new(pool_id: uint,
|
|
|
|
event_loop: ~EventLoop,
|
|
|
|
work_queue: deque::Worker<~GreenTask>,
|
|
|
|
work_queues: ~[deque::Stealer<~GreenTask>],
|
2013-06-16 02:03:37 -07:00
|
|
|
sleeper_list: SleeperList)
|
2013-05-28 19:53:55 -07:00
|
|
|
-> Scheduler {
|
2013-03-15 18:35:33 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
Scheduler::new_special(pool_id, event_loop, work_queue, work_queues,
|
2013-08-05 13:06:24 -07:00
|
|
|
sleeper_list, true, None)
|
2013-06-12 14:55:32 -07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn new_special(pool_id: uint,
|
|
|
|
event_loop: ~EventLoop,
|
|
|
|
work_queue: deque::Worker<~GreenTask>,
|
|
|
|
work_queues: ~[deque::Stealer<~GreenTask>],
|
2013-06-16 02:03:37 -07:00
|
|
|
sleeper_list: SleeperList,
|
2013-07-29 12:06:36 -07:00
|
|
|
run_anything: bool,
|
|
|
|
friend: Option<SchedHandle>)
|
2013-06-12 14:55:32 -07:00
|
|
|
-> Scheduler {
|
2013-03-15 18:35:33 -07:00
|
|
|
|
2013-12-05 18:19:06 -08:00
|
|
|
let (consumer, producer) = mpsc::queue(());
|
2013-09-20 18:49:31 -07:00
|
|
|
let mut sched = Scheduler {
|
2013-12-12 18:01:59 -08:00
|
|
|
pool_id: pool_id,
|
2013-05-28 19:53:55 -07:00
|
|
|
sleeper_list: sleeper_list,
|
2013-12-05 18:19:06 -08:00
|
|
|
message_queue: consumer,
|
|
|
|
message_producer: producer,
|
2013-05-29 15:55:23 -07:00
|
|
|
sleepy: false,
|
|
|
|
no_sleep: false,
|
2013-02-03 18:15:43 -08:00
|
|
|
event_loop: event_loop,
|
2013-05-22 21:20:19 -07:00
|
|
|
work_queue: work_queue,
|
2013-08-05 13:06:24 -07:00
|
|
|
work_queues: work_queues,
|
2013-02-03 18:15:43 -08:00
|
|
|
stack_pool: StackPool::new(),
|
2013-07-19 14:25:05 -07:00
|
|
|
sched_task: None,
|
2013-05-29 15:55:23 -07:00
|
|
|
cleanup_job: None,
|
2013-07-29 12:06:36 -07:00
|
|
|
run_anything: run_anything,
|
2013-08-05 13:06:24 -07:00
|
|
|
friend_handle: friend,
|
2013-09-22 20:51:57 +10:00
|
|
|
rng: new_sched_rng(),
|
2013-09-20 18:49:31 -07:00
|
|
|
idle_callback: None,
|
|
|
|
yield_check_count: 0,
|
|
|
|
steal_for_yield: false
|
|
|
|
};
|
|
|
|
|
|
|
|
sched.yield_check_count = reset_yield_check(&mut sched.rng);
|
|
|
|
|
|
|
|
return sched;
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// XXX: This may eventually need to be refactored so that
|
|
|
|
// the scheduler itself doesn't have to call event_loop.run.
|
|
|
|
// That will be important for embedding the runtime into external
|
|
|
|
// event loops.
|
2013-07-19 14:25:05 -07:00
|
|
|
|
|
|
|
// Take a main task to run, and a scheduler to run it in. Create a
|
|
|
|
// scheduler task and bootstrap into it.
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn bootstrap(mut ~self, task: ~GreenTask) {
|
2013-08-15 11:13:41 -07:00
|
|
|
|
2013-08-20 15:33:20 -07:00
|
|
|
// Build an Idle callback.
|
2013-11-06 11:38:53 -08:00
|
|
|
let cb = ~SchedRunner as ~Callback;
|
2013-12-15 16:29:17 +11:00
|
|
|
self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb));
|
2013-08-20 15:33:20 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// Create a task for the scheduler with an empty context.
|
2013-12-13 16:56:50 -08:00
|
|
|
let sched_task = GreenTask::new_typed(Some(Coroutine::empty()),
|
|
|
|
TypeSched);
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-08-15 11:13:41 -07:00
|
|
|
// Before starting our first task, make sure the idle callback
|
|
|
|
// is active. As we do not start in the sleep state this is
|
|
|
|
// important.
|
2013-11-06 11:38:53 -08:00
|
|
|
self.idle_callback.get_mut_ref().resume();
|
2013-08-15 11:13:41 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
// Now, as far as all the scheduler state is concerned, we are inside
|
|
|
|
// the "scheduler" context. So we can act like the scheduler and resume
|
|
|
|
// the provided task. Let it think that the currently running task is
|
|
|
|
// actually the sched_task so it knows where to squirrel it away.
|
|
|
|
let mut sched_task = self.resume_task_immediately(sched_task, task);
|
2013-07-19 14:25:05 -07:00
|
|
|
|
|
|
|
// Now we are back in the scheduler context, having
|
|
|
|
// successfully run the input task. Start by running the
|
|
|
|
// scheduler. Grab it out of TLS - performing the scheduler
|
|
|
|
// action will have given it away.
|
2013-12-12 18:01:59 -08:00
|
|
|
let sched = sched_task.sched.take_unwrap();
|
2013-09-27 17:02:31 -07:00
|
|
|
rtdebug!("starting scheduler {}", sched.sched_id());
|
2013-12-12 18:01:59 -08:00
|
|
|
let mut sched_task = sched.run(sched_task);
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-08-20 15:33:20 -07:00
|
|
|
// Close the idle callback.
|
2013-12-12 18:01:59 -08:00
|
|
|
let mut sched = sched_task.sched.take_unwrap();
|
2013-11-06 11:38:53 -08:00
|
|
|
sched.idle_callback.take();
|
2013-08-20 15:33:20 -07:00
|
|
|
// Make one go through the loop to run the close callback.
|
2013-12-12 18:01:59 -08:00
|
|
|
let mut stask = sched.run(sched_task);
|
2013-08-20 15:33:20 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// Now that we are done with the scheduler, clean up the
|
|
|
|
// scheduler task. Do so by removing it from TLS and manually
|
|
|
|
// cleaning up the memory it uses. As we didn't actually call
|
|
|
|
// task.run() on the scheduler task we never get through all
|
|
|
|
// the cleanup code it runs.
|
2013-09-27 17:02:31 -07:00
|
|
|
rtdebug!("stopping scheduler {}", stask.sched.get_ref().sched_id());
|
2013-07-30 19:02:21 -07:00
|
|
|
|
2013-08-03 23:29:21 -07:00
|
|
|
// Should not have any messages
|
|
|
|
let message = stask.sched.get_mut_ref().message_queue.pop();
|
2013-12-05 18:19:06 -08:00
|
|
|
rtassert!(match message { mpsc::Empty => true, _ => false });
|
2013-08-03 23:29:21 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
stask.task.get_mut_ref().destroyed = true;
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// This does not return a scheduler, as the scheduler is placed
|
|
|
|
// inside the task.
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn run(mut ~self, stask: ~GreenTask) -> ~GreenTask {
|
2013-04-18 19:32:32 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// This is unsafe because we need to place the scheduler, with
|
|
|
|
// the event_loop inside, inside our task. But we still need a
|
|
|
|
// mutable reference to the event_loop to give it the "run"
|
|
|
|
// command.
|
2013-04-18 19:32:32 -07:00
|
|
|
unsafe {
|
2013-10-28 16:56:24 -07:00
|
|
|
let event_loop: *mut ~EventLoop = &mut self.event_loop;
|
2013-12-12 18:01:59 -08:00
|
|
|
// Our scheduler must be in the task before the event loop
|
|
|
|
// is started.
|
|
|
|
stask.put_with_sched(self);
|
2013-05-11 00:42:16 -07:00
|
|
|
(*event_loop).run();
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
2013-12-12 18:01:59 -08:00
|
|
|
|
|
|
|
// This is a serious code smell, but this function could be done away
|
|
|
|
// with if necessary. The ownership of `stask` was transferred into
|
|
|
|
// local storage just before the event loop ran, so it is possible to
|
|
|
|
// transmute `stask` as a uint across the running of the event loop to
|
|
|
|
// re-acquire ownership here.
|
|
|
|
//
|
|
|
|
// This would involve removing the Task from TLS, removing the runtime,
|
|
|
|
// forgetting the runtime, and then putting the task into `stask`. For
|
|
|
|
// now, because we have `GreenTask::convert`, I chose to take this
|
|
|
|
// method for cleanliness. This function is *not* a fundamental reason
|
|
|
|
// why this function should exist.
|
|
|
|
GreenTask::convert(Local::take())
|
2013-05-11 00:42:16 -07:00
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// * Execution Functions - Core Loop Logic
|
2013-05-29 15:55:23 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// The model for this function is that you continue through it
|
|
|
|
// until you either use the scheduler while performing a schedule
|
2013-08-15 19:46:23 -07:00
|
|
|
// action, in which case you give it away and return early, or
|
2013-07-19 14:25:05 -07:00
|
|
|
// you reach the end and sleep. In the case that a scheduler
|
|
|
|
// action is performed the loop is evented such that this function
|
|
|
|
// is called again.
|
2013-12-12 18:01:59 -08:00
|
|
|
fn run_sched_once(mut ~self, stask: ~GreenTask) {
|
|
|
|
// Make sure that we're not lying in that the `stask` argument is indeed
|
|
|
|
// the scheduler task for this scheduler.
|
|
|
|
assert!(self.sched_task.is_none());
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-08-15 11:13:41 -07:00
|
|
|
// Assume that we need to continue idling unless we reach the
|
|
|
|
// end of this function without performing an action.
|
2013-12-12 18:01:59 -08:00
|
|
|
self.idle_callback.get_mut_ref().resume();
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// First we check for scheduler messages, these are higher
|
|
|
|
// priority than regular tasks.
|
2013-12-12 18:01:59 -08:00
|
|
|
let (sched, stask) =
|
|
|
|
match self.interpret_message_queue(stask, DontTryTooHard) {
|
|
|
|
Some(pair) => pair,
|
|
|
|
None => return
|
|
|
|
};
|
2013-07-31 13:52:22 -07:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// This helper will use a randomized work-stealing algorithm
|
|
|
|
// to find work.
|
2013-12-12 18:01:59 -08:00
|
|
|
let (sched, stask) = match sched.do_work(stask) {
|
|
|
|
Some(pair) => pair,
|
2013-08-17 19:55:22 -07:00
|
|
|
None => return
|
|
|
|
};
|
|
|
|
|
|
|
|
// Now, before sleeping we need to find out if there really
|
|
|
|
// were any messages. Give it your best!
|
2013-12-12 18:01:59 -08:00
|
|
|
let (mut sched, stask) =
|
|
|
|
match sched.interpret_message_queue(stask, GiveItYourBest) {
|
|
|
|
Some(pair) => pair,
|
|
|
|
None => return
|
|
|
|
};
|
2013-05-29 15:55:23 -07:00
|
|
|
|
|
|
|
// If we got here then there was no work to do.
|
|
|
|
// Generate a SchedHandle and push it to the sleeper list so
|
|
|
|
// somebody can wake us up later.
|
2013-07-19 14:25:05 -07:00
|
|
|
if !sched.sleepy && !sched.no_sleep {
|
|
|
|
rtdebug!("scheduler has no work to do, going to sleep");
|
|
|
|
sched.sleepy = true;
|
|
|
|
let handle = sched.make_handle();
|
|
|
|
sched.sleeper_list.push(handle);
|
2013-08-15 11:13:41 -07:00
|
|
|
// Since we are sleeping, deactivate the idle callback.
|
2013-08-20 15:33:20 -07:00
|
|
|
sched.idle_callback.get_mut_ref().pause();
|
2013-07-19 14:25:05 -07:00
|
|
|
} else {
|
|
|
|
rtdebug!("not sleeping, already doing so or no_sleep set");
|
2013-08-15 11:13:41 -07:00
|
|
|
// We may not be sleeping, but we still need to deactivate
|
|
|
|
// the idle callback.
|
2013-08-20 15:33:20 -07:00
|
|
|
sched.idle_callback.get_mut_ref().pause();
|
2013-05-29 15:55:23 -07:00
|
|
|
}
|
2013-07-19 14:25:05 -07:00
|
|
|
|
|
|
|
// Finished a cycle without using the Scheduler. Place it back
|
|
|
|
// in TLS.
|
2013-12-12 18:01:59 -08:00
|
|
|
stask.put_with_sched(sched);
|
2013-05-29 15:55:23 -07:00
|
|
|
}
|
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// This function returns None if the scheduler is "used", or it
|
2013-08-15 19:46:23 -07:00
|
|
|
// returns the still-available scheduler. At this point all
|
|
|
|
// message-handling will count as a turn of work, and as a result
|
|
|
|
// return None.
|
2013-12-12 18:01:59 -08:00
|
|
|
fn interpret_message_queue(mut ~self, stask: ~GreenTask,
|
|
|
|
effort: EffortLevel)
|
|
|
|
-> Option<(~Scheduler, ~GreenTask)>
|
|
|
|
{
|
2013-08-17 19:55:22 -07:00
|
|
|
|
|
|
|
let msg = if effort == DontTryTooHard {
|
2013-10-28 16:56:24 -07:00
|
|
|
self.message_queue.casual_pop()
|
2013-08-17 19:55:22 -07:00
|
|
|
} else {
|
2013-12-05 18:19:06 -08:00
|
|
|
// When popping our message queue, we could see an "inconsistent"
|
|
|
|
// state which means that we *should* be able to pop data, but we
|
|
|
|
// are unable to at this time. Our options are:
|
|
|
|
//
|
|
|
|
// 1. Spin waiting for data
|
|
|
|
// 2. Ignore this and pretend we didn't find a message
|
|
|
|
//
|
|
|
|
// If we choose route 1, then if the pusher in question is currently
|
|
|
|
// pre-empted, we're going to take up our entire time slice just
|
|
|
|
// spinning on this queue. If we choose route 2, then the pusher in
|
|
|
|
// question is still guaranteed to make a send() on its async
|
|
|
|
// handle, so we will guaranteed wake up and see its message at some
|
|
|
|
// point.
|
|
|
|
//
|
|
|
|
// I have chosen to take route #2.
|
|
|
|
match self.message_queue.pop() {
|
|
|
|
mpsc::Data(t) => Some(t),
|
|
|
|
mpsc::Empty | mpsc::Inconsistent => None
|
|
|
|
}
|
2013-08-17 19:55:22 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
match msg {
|
2013-06-14 12:17:56 -07:00
|
|
|
Some(PinnedTask(task)) => {
|
2013-08-04 21:55:52 -07:00
|
|
|
let mut task = task;
|
2013-12-12 18:01:59 -08:00
|
|
|
task.give_home(HomeSched(self.make_handle()));
|
|
|
|
self.resume_task_immediately(stask, task).put();
|
2013-07-19 14:25:05 -07:00
|
|
|
return None;
|
2013-06-12 11:32:22 -07:00
|
|
|
}
|
2013-07-29 12:06:36 -07:00
|
|
|
Some(TaskFromFriend(task)) => {
|
2013-08-03 23:29:21 -07:00
|
|
|
rtdebug!("got a task from a friend. lovely!");
|
2013-12-12 18:01:59 -08:00
|
|
|
self.process_task(stask, task,
|
|
|
|
Scheduler::resume_task_immediately_cl);
|
2013-08-15 11:13:41 -07:00
|
|
|
return None;
|
2013-07-29 12:06:36 -07:00
|
|
|
}
|
Fire fewer homing missiles
This optimizes the `home_for_io` code path by requiring fewer scheduler
operations in some situtations.
When moving to your home scheduler, this no longer forces a context switch if
you're already on the home scheduler. Instead, the homing code now simply pins
you to your current scheduler (making it so you can't be stolen away). If you're
not on your home scheduler, then we context switch away, sending you to your
home scheduler.
When the I/O operation is done, then we also no longer forcibly trigger a
context switch. Instead, the action is cased on whether the task is homed or
not. If a task does not have a home, then the task is re-flagged as not having a
home and no context switch is performed. If a task is homed to the current
scheduler, then we don't do anything, and if the task is homed to a foreign
scheduler, then it's sent along its merry way.
I verified that there are about a third as many `write` syscalls done in print
operations now. Libuv uses write to implement async handles, and the homing
before and after each I/O operation was triggering a write on these async
handles. Additionally, using the terrible benchmark of printing 10k times in a
loop, this drives the runtime from 0.6s down to 0.3s (yay!).
2013-10-25 11:11:30 -07:00
|
|
|
Some(RunOnce(task)) => {
|
|
|
|
// bypass the process_task logic to force running this task once
|
|
|
|
// on this home scheduler. This is often used for I/O (homing).
|
2013-12-12 18:01:59 -08:00
|
|
|
self.resume_task_immediately(stask, task).put();
|
Fire fewer homing missiles
This optimizes the `home_for_io` code path by requiring fewer scheduler
operations in some situtations.
When moving to your home scheduler, this no longer forces a context switch if
you're already on the home scheduler. Instead, the homing code now simply pins
you to your current scheduler (making it so you can't be stolen away). If you're
not on your home scheduler, then we context switch away, sending you to your
home scheduler.
When the I/O operation is done, then we also no longer forcibly trigger a
context switch. Instead, the action is cased on whether the task is homed or
not. If a task does not have a home, then the task is re-flagged as not having a
home and no context switch is performed. If a task is homed to the current
scheduler, then we don't do anything, and if the task is homed to a foreign
scheduler, then it's sent along its merry way.
I verified that there are about a third as many `write` syscalls done in print
operations now. Libuv uses write to implement async handles, and the homing
before and after each I/O operation was triggering a write on these async
handles. Additionally, using the terrible benchmark of printing 10k times in a
loop, this drives the runtime from 0.6s down to 0.3s (yay!).
2013-10-25 11:11:30 -07:00
|
|
|
return None;
|
|
|
|
}
|
2013-05-29 15:55:23 -07:00
|
|
|
Some(Wake) => {
|
2013-10-28 16:56:24 -07:00
|
|
|
self.sleepy = false;
|
2013-12-12 18:01:59 -08:00
|
|
|
stask.put_with_sched(self);
|
2013-08-15 11:13:41 -07:00
|
|
|
return None;
|
2013-05-29 15:55:23 -07:00
|
|
|
}
|
|
|
|
Some(Shutdown) => {
|
2013-08-15 22:48:35 -07:00
|
|
|
rtdebug!("shutting down");
|
2013-10-28 16:56:24 -07:00
|
|
|
if self.sleepy {
|
2013-06-12 11:32:22 -07:00
|
|
|
// There may be an outstanding handle on the
|
|
|
|
// sleeper list. Pop them all to make sure that's
|
|
|
|
// not the case.
|
2013-05-29 15:55:23 -07:00
|
|
|
loop {
|
2013-10-28 16:56:24 -07:00
|
|
|
match self.sleeper_list.pop() {
|
2013-05-29 15:55:23 -07:00
|
|
|
Some(handle) => {
|
|
|
|
let mut handle = handle;
|
|
|
|
handle.send(Wake);
|
|
|
|
}
|
2013-05-30 00:18:49 -07:00
|
|
|
None => break
|
2013-05-29 15:55:23 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-12 11:32:22 -07:00
|
|
|
// No more sleeping. After there are no outstanding
|
|
|
|
// event loop references we will shut down.
|
2013-10-28 16:56:24 -07:00
|
|
|
self.no_sleep = true;
|
|
|
|
self.sleepy = false;
|
2013-12-12 18:01:59 -08:00
|
|
|
stask.put_with_sched(self);
|
2013-08-15 11:13:41 -07:00
|
|
|
return None;
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
2013-12-13 16:26:02 -08:00
|
|
|
Some(NewNeighbor(neighbor)) => {
|
|
|
|
self.work_queues.push(neighbor);
|
|
|
|
return Some((self, stask));
|
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
None => {
|
2013-12-12 18:01:59 -08:00
|
|
|
return Some((self, stask));
|
2013-05-29 15:55:23 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
fn do_work(mut ~self, stask: ~GreenTask) -> Option<(~Scheduler, ~GreenTask)> {
|
2013-08-15 19:46:23 -07:00
|
|
|
rtdebug!("scheduler calling do work");
|
2013-10-28 16:56:24 -07:00
|
|
|
match self.find_work() {
|
2013-08-15 19:46:23 -07:00
|
|
|
Some(task) => {
|
2013-12-12 18:01:59 -08:00
|
|
|
rtdebug!("found some work! running the task");
|
|
|
|
self.process_task(stask, task,
|
|
|
|
Scheduler::resume_task_immediately_cl);
|
2013-09-20 18:49:31 -07:00
|
|
|
return None;
|
2013-07-29 12:06:36 -07:00
|
|
|
}
|
|
|
|
None => {
|
2013-08-15 19:46:23 -07:00
|
|
|
rtdebug!("no work was found, returning the scheduler struct");
|
2013-12-12 18:01:59 -08:00
|
|
|
return Some((self, stask));
|
2013-07-29 12:06:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-05 13:06:24 -07:00
|
|
|
// Workstealing: In this iteration of the runtime each scheduler
|
|
|
|
// thread has a distinct work queue. When no work is available
|
|
|
|
// locally, make a few attempts to steal work from the queues of
|
|
|
|
// other scheduler threads. If a few steals fail we end up in the
|
|
|
|
// old "no work" path which is fine.
|
2013-02-03 18:15:43 -08:00
|
|
|
|
2013-08-05 13:06:24 -07:00
|
|
|
// First step in the process is to find a task. This function does
|
|
|
|
// that by first checking the local queue, and if there is no work
|
|
|
|
// there, trying to steal from the remote work queues.
|
2013-12-12 18:01:59 -08:00
|
|
|
fn find_work(&mut self) -> Option<~GreenTask> {
|
2013-08-05 13:06:24 -07:00
|
|
|
rtdebug!("scheduler looking for work");
|
2013-09-20 18:49:31 -07:00
|
|
|
if !self.steal_for_yield {
|
|
|
|
match self.work_queue.pop() {
|
|
|
|
Some(task) => {
|
|
|
|
rtdebug!("found a task locally");
|
|
|
|
return Some(task)
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
rtdebug!("scheduler trying to steal");
|
|
|
|
return self.try_steals();
|
|
|
|
}
|
2013-08-05 13:06:24 -07:00
|
|
|
}
|
2013-09-20 18:49:31 -07:00
|
|
|
} else {
|
|
|
|
// During execution of the last task, it performed a 'yield',
|
|
|
|
// so we're doing some work stealing in order to introduce some
|
|
|
|
// scheduling randomness. Otherwise we would just end up popping
|
|
|
|
// that same task again. This is pretty lame and is to work around
|
|
|
|
// the problem that work stealing is not designed for 'non-strict'
|
|
|
|
// (non-fork-join) task parallelism.
|
|
|
|
self.steal_for_yield = false;
|
|
|
|
match self.try_steals() {
|
|
|
|
Some(task) => {
|
|
|
|
rtdebug!("stole a task after yielding");
|
|
|
|
return Some(task);
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
rtdebug!("did not steal a task after yielding");
|
|
|
|
// Back to business
|
|
|
|
return self.find_work();
|
|
|
|
}
|
2013-08-05 13:06:24 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-09-04 16:54:05 -07:00
|
|
|
// Try stealing from all queues the scheduler knows about. This
|
|
|
|
// naive implementation can steal from our own queue or from other
|
|
|
|
// special schedulers.
|
2013-12-12 18:01:59 -08:00
|
|
|
fn try_steals(&mut self) -> Option<~GreenTask> {
|
2013-09-04 16:54:05 -07:00
|
|
|
let work_queues = &mut self.work_queues;
|
|
|
|
let len = work_queues.len();
|
2013-10-10 20:18:07 +11:00
|
|
|
let start_index = self.rng.gen_range(0, len);
|
2013-09-04 16:54:05 -07:00
|
|
|
for index in range(0, len).map(|i| (i + start_index) % len) {
|
2013-08-05 13:06:24 -07:00
|
|
|
match work_queues[index].steal() {
|
2013-11-26 09:40:24 -08:00
|
|
|
deque::Data(task) => {
|
2013-08-15 19:46:23 -07:00
|
|
|
rtdebug!("found task by stealing");
|
|
|
|
return Some(task)
|
2013-08-05 13:06:24 -07:00
|
|
|
}
|
2013-11-26 09:40:24 -08:00
|
|
|
_ => ()
|
2013-08-05 13:06:24 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
rtdebug!("giving up on stealing");
|
|
|
|
return None;
|
|
|
|
}
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// * Task Routing Functions - Make sure tasks send up in the right
|
|
|
|
// place.
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
fn process_task(mut ~self, cur: ~GreenTask,
|
|
|
|
mut next: ~GreenTask, schedule_fn: SchedulingFn) {
|
2013-08-05 13:06:24 -07:00
|
|
|
rtdebug!("processing a task");
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
match next.take_unwrap_home() {
|
|
|
|
HomeSched(home_handle) => {
|
2013-10-28 16:56:24 -07:00
|
|
|
if home_handle.sched_id != self.sched_id() {
|
2013-08-05 13:06:24 -07:00
|
|
|
rtdebug!("sending task home");
|
2013-12-12 18:01:59 -08:00
|
|
|
next.give_home(HomeSched(home_handle));
|
|
|
|
Scheduler::send_task_home(next);
|
|
|
|
cur.put_with_sched(self);
|
2013-08-05 13:06:24 -07:00
|
|
|
} else {
|
|
|
|
rtdebug!("running task here");
|
2013-12-12 18:01:59 -08:00
|
|
|
next.give_home(HomeSched(home_handle));
|
|
|
|
schedule_fn(self, cur, next);
|
2013-06-12 14:55:32 -07:00
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
2013-10-28 16:56:24 -07:00
|
|
|
AnySched if self.run_anything => {
|
2013-08-05 13:06:24 -07:00
|
|
|
rtdebug!("running anysched task here");
|
2013-12-12 18:01:59 -08:00
|
|
|
next.give_home(AnySched);
|
|
|
|
schedule_fn(self, cur, next);
|
2013-08-05 13:06:24 -07:00
|
|
|
}
|
|
|
|
AnySched => {
|
|
|
|
rtdebug!("sending task to friend");
|
2013-12-12 18:01:59 -08:00
|
|
|
next.give_home(AnySched);
|
|
|
|
self.send_to_friend(next);
|
|
|
|
cur.put_with_sched(self);
|
2013-08-05 13:06:24 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
fn send_task_home(task: ~GreenTask) {
|
2013-08-15 19:46:23 -07:00
|
|
|
let mut task = task;
|
2013-12-12 18:01:59 -08:00
|
|
|
match task.take_unwrap_home() {
|
|
|
|
HomeSched(mut home_handle) => home_handle.send(PinnedTask(task)),
|
|
|
|
AnySched => rtabort!("error: cannot send anysched task home"),
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
/// Take a non-homed task we aren't allowed to run here and send
|
|
|
|
/// it to the designated friend scheduler to execute.
|
2013-12-12 18:01:59 -08:00
|
|
|
fn send_to_friend(&mut self, task: ~GreenTask) {
|
2013-08-15 19:46:23 -07:00
|
|
|
rtdebug!("sending a task to friend");
|
|
|
|
match self.friend_handle {
|
|
|
|
Some(ref mut handle) => {
|
|
|
|
handle.send(TaskFromFriend(task));
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
rtabort!("tried to send task to a friend but scheduler has no friends");
|
|
|
|
}
|
2013-04-18 23:54:55 -07:00
|
|
|
}
|
2013-04-18 20:16:33 -07:00
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
/// Schedule a task to be executed later.
|
|
|
|
///
|
|
|
|
/// Pushes the task onto the work stealing queue and tells the
|
|
|
|
/// event loop to run it later. Always use this instead of pushing
|
|
|
|
/// to the work queue directly.
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn enqueue_task(&mut self, task: ~GreenTask) {
|
2013-04-18 20:16:33 -07:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// We push the task onto our local queue clone.
|
2013-12-12 18:01:59 -08:00
|
|
|
assert!(!task.is_sched());
|
2013-10-28 16:56:24 -07:00
|
|
|
self.work_queue.push(task);
|
|
|
|
self.idle_callback.get_mut_ref().resume();
|
2013-05-15 17:20:48 -07:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// We've made work available. Notify a
|
|
|
|
// sleeping scheduler.
|
2013-05-15 17:20:48 -07:00
|
|
|
|
2013-10-28 16:56:24 -07:00
|
|
|
match self.sleeper_list.casual_pop() {
|
2013-08-15 19:46:23 -07:00
|
|
|
Some(handle) => {
|
2013-10-22 15:09:23 -07:00
|
|
|
let mut handle = handle;
|
2013-08-15 19:46:23 -07:00
|
|
|
handle.send(Wake)
|
|
|
|
}
|
|
|
|
None => { (/* pass */) }
|
|
|
|
};
|
2013-07-29 13:34:08 -07:00
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// * Core Context Switching Functions
|
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// The primary function for changing contexts. In the current
|
|
|
|
// design the scheduler is just a slightly modified GreenTask, so
|
2013-12-12 18:01:59 -08:00
|
|
|
// all context swaps are from GreenTask to GreenTask. The only difference
|
2013-07-19 14:25:05 -07:00
|
|
|
// between the various cases is where the inputs come from, and
|
|
|
|
// what is done with the resulting task. That is specified by the
|
|
|
|
// cleanup function f, which takes the scheduler and the
|
|
|
|
// old task as inputs.
|
|
|
|
|
2013-10-28 16:56:24 -07:00
|
|
|
pub fn change_task_context(mut ~self,
|
2013-12-12 18:01:59 -08:00
|
|
|
current_task: ~GreenTask,
|
|
|
|
mut next_task: ~GreenTask,
|
|
|
|
f: |&mut Scheduler, ~GreenTask|) -> ~GreenTask {
|
|
|
|
let f_opaque = ClosureConverter::from_fn(f);
|
|
|
|
|
|
|
|
let current_task_dupe = unsafe {
|
|
|
|
*cast::transmute::<&~GreenTask, &uint>(¤t_task)
|
2013-07-19 14:25:05 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
// The current task is placed inside an enum with the cleanup
|
|
|
|
// function. This enum is then placed inside the scheduler.
|
2013-10-28 16:56:24 -07:00
|
|
|
self.cleanup_job = Some(CleanupJob::new(current_task, f_opaque));
|
2013-04-11 17:34:52 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// The scheduler is then placed inside the next task.
|
2013-10-28 16:56:24 -07:00
|
|
|
next_task.sched = Some(self);
|
2013-04-15 16:19:01 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// However we still need an internal mutable pointer to the
|
|
|
|
// original task. The strategy here was "arrange memory, then
|
|
|
|
// get pointers", so we crawl back up the chain using
|
|
|
|
// transmute to eliminate borrowck errors.
|
2013-04-18 19:32:32 -07:00
|
|
|
unsafe {
|
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
let sched: &mut Scheduler =
|
2013-12-12 18:01:59 -08:00
|
|
|
cast::transmute_mut_region(*next_task.sched.get_mut_ref());
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let current_task: &mut GreenTask = match sched.cleanup_job {
|
2013-11-28 12:22:53 -08:00
|
|
|
Some(CleanupJob { task: ref task, .. }) => {
|
2013-12-12 18:01:59 -08:00
|
|
|
let task_ptr: *~GreenTask = task;
|
|
|
|
cast::transmute_mut_region(*cast::transmute_mut_unsafe(task_ptr))
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
|
|
|
None => {
|
|
|
|
rtabort!("no cleanup job");
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let (current_task_context, next_task_context) =
|
|
|
|
Scheduler::get_contexts(current_task, next_task);
|
|
|
|
|
|
|
|
// Done with everything - put the next task in TLS. This
|
|
|
|
// works because due to transmute the borrow checker
|
|
|
|
// believes that we have no internal pointers to
|
|
|
|
// next_task.
|
2013-12-12 18:01:59 -08:00
|
|
|
cast::forget(next_task);
|
2013-07-19 14:25:05 -07:00
|
|
|
|
|
|
|
// The raw context swap operation. The next action taken
|
|
|
|
// will be running the cleanup job from the context of the
|
|
|
|
// next task.
|
|
|
|
Context::swap(current_task_context, next_task_context);
|
|
|
|
}
|
2013-04-18 19:32:32 -07:00
|
|
|
|
2013-07-31 13:52:22 -07:00
|
|
|
// When the context swaps back to this task we immediately
|
2013-07-19 14:25:05 -07:00
|
|
|
// run the cleanup job, as expected by the previously called
|
|
|
|
// swap_contexts function.
|
2013-12-12 18:01:59 -08:00
|
|
|
let mut current_task: ~GreenTask = unsafe {
|
|
|
|
cast::transmute(current_task_dupe)
|
|
|
|
};
|
|
|
|
current_task.sched.get_mut_ref().run_cleanup_job();
|
|
|
|
|
|
|
|
// See the comments in switch_running_tasks_and_then for why a lock
|
|
|
|
// is acquired here. This is the resumption points and the "bounce"
|
|
|
|
// that it is referring to.
|
2013-07-19 14:25:05 -07:00
|
|
|
unsafe {
|
2013-12-12 18:01:59 -08:00
|
|
|
current_task.nasty_deschedule_lock.lock();
|
|
|
|
current_task.nasty_deschedule_lock.unlock();
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
2013-12-12 18:01:59 -08:00
|
|
|
return current_task;
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// Returns a mutable reference to both contexts involved in this
|
|
|
|
// swap. This is unsafe - we are getting mutable internal
|
|
|
|
// references to keep even when we don't own the tasks. It looks
|
|
|
|
// kinda safe because we are doing transmutes before passing in
|
|
|
|
// the arguments.
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn get_contexts<'a>(current_task: &mut GreenTask, next_task: &mut GreenTask) ->
|
2013-08-15 19:46:23 -07:00
|
|
|
(&'a mut Context, &'a mut Context) {
|
|
|
|
let current_task_context =
|
|
|
|
&mut current_task.coroutine.get_mut_ref().saved_context;
|
|
|
|
let next_task_context =
|
|
|
|
&mut next_task.coroutine.get_mut_ref().saved_context;
|
|
|
|
unsafe {
|
2013-12-12 18:01:59 -08:00
|
|
|
(cast::transmute_mut_region(current_task_context),
|
|
|
|
cast::transmute_mut_region(next_task_context))
|
2013-08-15 19:46:23 -07:00
|
|
|
}
|
|
|
|
}
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// * Context Swapping Helpers - Here be ugliness!
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn resume_task_immediately(~self, cur: ~GreenTask,
|
|
|
|
next: ~GreenTask) -> ~GreenTask {
|
|
|
|
assert!(cur.is_sched());
|
|
|
|
self.change_task_context(cur, next, |sched, stask| {
|
|
|
|
assert!(sched.sched_task.is_none());
|
2013-07-19 14:25:05 -07:00
|
|
|
sched.sched_task = Some(stask);
|
2013-11-20 14:17:12 -08:00
|
|
|
})
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
fn resume_task_immediately_cl(sched: ~Scheduler,
|
2013-12-12 18:01:59 -08:00
|
|
|
cur: ~GreenTask,
|
|
|
|
next: ~GreenTask) {
|
|
|
|
sched.resume_task_immediately(cur, next).put()
|
2013-07-11 14:29:33 -04:00
|
|
|
}
|
|
|
|
|
2013-02-03 18:15:43 -08:00
|
|
|
/// Block a running task, context switch to the scheduler, then pass the
|
|
|
|
/// blocked task to a closure.
|
|
|
|
///
|
|
|
|
/// # Safety note
|
|
|
|
///
|
|
|
|
/// The closure here is a *stack* closure that lives in the
|
|
|
|
/// running task. It gets transmuted to the scheduler's lifetime
|
|
|
|
/// and called while the task is blocked.
|
2013-05-29 17:52:00 -07:00
|
|
|
///
|
|
|
|
/// This passes a Scheduler pointer to the fn after the context switch
|
|
|
|
/// in order to prevent that fn from performing further scheduling operations.
|
|
|
|
/// Doing further scheduling could easily result in infinite recursion.
|
2013-12-04 19:51:29 -08:00
|
|
|
///
|
|
|
|
/// Note that if the closure provided relinquishes ownership of the
|
|
|
|
/// BlockedTask, then it is possible for the task to resume execution before
|
|
|
|
/// the closure has finished executing. This would naturally introduce a
|
|
|
|
/// race if the closure and task shared portions of the environment.
|
|
|
|
///
|
|
|
|
/// This situation is currently prevented, or in other words it is
|
|
|
|
/// guaranteed that this function will not return before the given closure
|
|
|
|
/// has returned.
|
2013-10-28 16:56:24 -07:00
|
|
|
pub fn deschedule_running_task_and_then(mut ~self,
|
2013-12-12 18:01:59 -08:00
|
|
|
cur: ~GreenTask,
|
2013-11-18 21:15:42 -08:00
|
|
|
f: |&mut Scheduler, BlockedTask|) {
|
2013-07-19 14:25:05 -07:00
|
|
|
// Trickier - we need to get the scheduler task out of self
|
|
|
|
// and use it as the destination.
|
2013-10-28 16:56:24 -07:00
|
|
|
let stask = self.sched_task.take_unwrap();
|
2013-07-19 14:25:05 -07:00
|
|
|
// Otherwise this is the same as below.
|
2013-12-12 18:01:59 -08:00
|
|
|
self.switch_running_tasks_and_then(cur, stask, f)
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn switch_running_tasks_and_then(~self,
|
|
|
|
cur: ~GreenTask,
|
|
|
|
next: ~GreenTask,
|
2013-11-18 21:15:42 -08:00
|
|
|
f: |&mut Scheduler, BlockedTask|) {
|
2013-12-04 19:51:29 -08:00
|
|
|
// And here comes one of the sad moments in which a lock is used in a
|
|
|
|
// core portion of the rust runtime. As always, this is highly
|
|
|
|
// undesirable, so there's a good reason behind it.
|
|
|
|
//
|
|
|
|
// There is an excellent outline of the problem in issue #8132, and it's
|
|
|
|
// summarized in that `f` is executed on a sched task, but its
|
|
|
|
// environment is on the previous task. If `f` relinquishes ownership of
|
|
|
|
// the BlockedTask, then it may introduce a race where `f` is using the
|
|
|
|
// environment as well as the code after the 'deschedule' block.
|
|
|
|
//
|
|
|
|
// The solution we have chosen to adopt for now is to acquire a
|
|
|
|
// task-local lock around this block. The resumption of the task in
|
|
|
|
// context switching will bounce on the lock, thereby waiting for this
|
|
|
|
// block to finish, eliminating the race mentioned above.
|
2013-12-12 18:01:59 -08:00
|
|
|
// fail!("should never return!");
|
2013-12-04 19:51:29 -08:00
|
|
|
//
|
|
|
|
// To actually maintain a handle to the lock, we use an unsafe pointer
|
|
|
|
// to it, but we're guaranteed that the task won't exit until we've
|
|
|
|
// unlocked the lock so there's no worry of this memory going away.
|
2013-12-12 18:01:59 -08:00
|
|
|
let cur = self.change_task_context(cur, next, |sched, mut task| {
|
2013-12-04 19:51:29 -08:00
|
|
|
let lock: *mut Mutex = &mut task.nasty_deschedule_lock;
|
|
|
|
unsafe { (*lock).lock() }
|
2013-12-12 18:01:59 -08:00
|
|
|
f(sched, BlockedTask::block(task.swap()));
|
2013-12-04 19:51:29 -08:00
|
|
|
unsafe { (*lock).unlock() }
|
2013-12-12 18:01:59 -08:00
|
|
|
});
|
|
|
|
cur.put();
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
fn switch_task(sched: ~Scheduler, cur: ~GreenTask, next: ~GreenTask) {
|
|
|
|
sched.change_task_context(cur, next, |sched, last_task| {
|
|
|
|
if last_task.is_sched() {
|
|
|
|
assert!(sched.sched_task.is_none());
|
|
|
|
sched.sched_task = Some(last_task);
|
|
|
|
} else {
|
|
|
|
sched.enqueue_task(last_task);
|
|
|
|
}
|
|
|
|
}).put()
|
2013-08-15 19:46:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// * Task Context Helpers
|
|
|
|
|
|
|
|
/// Called by a running task to end execution, after which it will
|
|
|
|
/// be recycled by the scheduler for reuse in a new task.
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn terminate_current_task(mut ~self, cur: ~GreenTask) {
|
2013-08-15 19:46:23 -07:00
|
|
|
// Similar to deschedule running task and then, but cannot go through
|
|
|
|
// the task-blocking path. The task is already dying.
|
2013-10-28 16:56:24 -07:00
|
|
|
let stask = self.sched_task.take_unwrap();
|
2013-12-12 18:01:59 -08:00
|
|
|
let _cur = self.change_task_context(cur, stask, |sched, mut dead_task| {
|
2013-08-15 19:46:23 -07:00
|
|
|
let coroutine = dead_task.coroutine.take_unwrap();
|
|
|
|
coroutine.recycle(&mut sched.stack_pool);
|
2013-12-12 18:01:59 -08:00
|
|
|
});
|
|
|
|
fail!("should never return!");
|
2013-08-15 19:46:23 -07:00
|
|
|
}
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn run_task(~self, cur: ~GreenTask, next: ~GreenTask) {
|
|
|
|
self.process_task(cur, next, Scheduler::switch_task);
|
2013-08-15 19:46:23 -07:00
|
|
|
}
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn run_task_later(mut cur: ~GreenTask, next: ~GreenTask) {
|
|
|
|
let mut sched = cur.sched.take_unwrap();
|
|
|
|
sched.enqueue_task(next);
|
|
|
|
cur.put_with_sched(sched);
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
|
2013-09-20 18:49:31 -07:00
|
|
|
/// Yield control to the scheduler, executing another task. This is guaranteed
|
|
|
|
/// to introduce some amount of randomness to the scheduler. Currently the
|
|
|
|
/// randomness is a result of performing a round of work stealing (which
|
|
|
|
/// may end up stealing from the current scheduler).
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn yield_now(mut ~self, cur: ~GreenTask) {
|
|
|
|
if cur.is_sched() {
|
|
|
|
assert!(self.sched_task.is_none());
|
|
|
|
self.run_sched_once(cur);
|
|
|
|
} else {
|
|
|
|
self.yield_check_count = reset_yield_check(&mut self.rng);
|
|
|
|
// Tell the scheduler to start stealing on the next iteration
|
|
|
|
self.steal_for_yield = true;
|
|
|
|
let stask = self.sched_task.take_unwrap();
|
|
|
|
let cur = self.change_task_context(cur, stask, |sched, task| {
|
|
|
|
sched.enqueue_task(task);
|
|
|
|
});
|
|
|
|
cur.put()
|
|
|
|
}
|
2013-09-20 18:49:31 -07:00
|
|
|
}
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn maybe_yield(mut ~self, cur: ~GreenTask) {
|
|
|
|
// The number of times to do the yield check before yielding, chosen
|
|
|
|
// arbitrarily.
|
2013-10-28 16:56:24 -07:00
|
|
|
rtassert!(self.yield_check_count > 0);
|
|
|
|
self.yield_check_count -= 1;
|
|
|
|
if self.yield_check_count == 0 {
|
2013-12-12 18:01:59 -08:00
|
|
|
self.yield_now(cur);
|
2013-09-20 18:49:31 -07:00
|
|
|
} else {
|
2013-12-12 18:01:59 -08:00
|
|
|
cur.put_with_sched(self);
|
2013-09-20 18:49:31 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// * Utility Functions
|
2013-02-03 18:15:43 -08:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn sched_id(&self) -> uint { unsafe { cast::transmute(self) } }
|
2013-02-03 18:15:43 -08:00
|
|
|
|
2013-05-31 15:17:22 -07:00
|
|
|
pub fn run_cleanup_job(&mut self) {
|
2013-07-16 12:47:01 -07:00
|
|
|
let cleanup_job = self.cleanup_job.take_unwrap();
|
2013-12-12 18:01:59 -08:00
|
|
|
cleanup_job.run(self)
|
2013-08-15 19:46:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn make_handle(&mut self) -> SchedHandle {
|
2013-11-04 12:45:05 -08:00
|
|
|
let remote = self.event_loop.remote_callback(~SchedRunner as ~Callback);
|
2013-08-15 19:46:23 -07:00
|
|
|
|
|
|
|
return SchedHandle {
|
|
|
|
remote: remote,
|
2013-12-05 18:19:06 -08:00
|
|
|
queue: self.message_producer.clone(),
|
2013-08-15 19:46:23 -07:00
|
|
|
sched_id: self.sched_id()
|
2013-12-12 18:01:59 -08:00
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
// Supporting types
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
type SchedulingFn = extern "Rust" fn (~Scheduler, ~GreenTask, ~GreenTask);
|
2013-08-15 19:46:23 -07:00
|
|
|
|
|
|
|
pub enum SchedMessage {
|
|
|
|
Wake,
|
|
|
|
Shutdown,
|
2013-12-13 16:26:02 -08:00
|
|
|
NewNeighbor(deque::Stealer<~GreenTask>),
|
2013-12-12 18:01:59 -08:00
|
|
|
PinnedTask(~GreenTask),
|
|
|
|
TaskFromFriend(~GreenTask),
|
|
|
|
RunOnce(~GreenTask),
|
2013-08-15 19:46:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct SchedHandle {
|
2013-10-16 14:48:05 -07:00
|
|
|
priv remote: ~RemoteCallback,
|
2013-12-05 18:19:06 -08:00
|
|
|
priv queue: mpsc::Producer<SchedMessage, ()>,
|
2013-08-15 19:46:23 -07:00
|
|
|
sched_id: uint
|
2013-06-19 15:23:14 -07:00
|
|
|
}
|
|
|
|
|
2013-05-29 15:55:23 -07:00
|
|
|
impl SchedHandle {
|
|
|
|
pub fn send(&mut self, msg: SchedMessage) {
|
|
|
|
self.queue.push(msg);
|
|
|
|
self.remote.fire();
|
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
|
|
|
|
2013-11-04 12:45:05 -08:00
|
|
|
struct SchedRunner;
|
|
|
|
|
|
|
|
impl Callback for SchedRunner {
|
|
|
|
fn call(&mut self) {
|
2013-12-12 18:01:59 -08:00
|
|
|
// In theory, this function needs to invoke the `run_sched_once`
|
|
|
|
// function on the scheduler. Sadly, we have no context here, except for
|
|
|
|
// knowledge of the local `Task`. In order to avoid a call to
|
|
|
|
// `GreenTask::convert`, we just call `yield_now` and the scheduler will
|
|
|
|
// detect when a sched task performs a yield vs a green task performing
|
|
|
|
// a yield (and act accordingly).
|
|
|
|
//
|
|
|
|
// This function could be converted to `GreenTask::convert` if
|
|
|
|
// absolutely necessary, but for cleanliness it is much better to not
|
|
|
|
// use the conversion function.
|
|
|
|
let task: ~Task = Local::take();
|
|
|
|
task.yield_now();
|
2013-11-04 12:45:05 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-15 19:46:23 -07:00
|
|
|
struct CleanupJob {
|
2013-12-12 18:01:59 -08:00
|
|
|
task: ~GreenTask,
|
2013-08-15 19:46:23 -07:00
|
|
|
f: UnsafeTaskReceiver
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CleanupJob {
|
2013-12-12 18:01:59 -08:00
|
|
|
pub fn new(task: ~GreenTask, f: UnsafeTaskReceiver) -> CleanupJob {
|
2013-08-15 19:46:23 -07:00
|
|
|
CleanupJob {
|
|
|
|
task: task,
|
|
|
|
f: f
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn run(self, sched: &mut Scheduler) {
|
|
|
|
let CleanupJob { task: task, f: f } = self;
|
|
|
|
f.to_fn()(sched, task)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-18 21:15:42 -08:00
|
|
|
// XXX: Some hacks to put a || closure in Scheduler without borrowck
|
2013-05-22 15:39:39 -07:00
|
|
|
// complaining
|
2013-07-21 17:20:52 -07:00
|
|
|
type UnsafeTaskReceiver = raw::Closure;
|
2013-05-22 15:39:39 -07:00
|
|
|
trait ClosureConverter {
|
2013-12-12 18:01:59 -08:00
|
|
|
fn from_fn(|&mut Scheduler, ~GreenTask|) -> Self;
|
|
|
|
fn to_fn(self) -> |&mut Scheduler, ~GreenTask|;
|
2013-05-22 15:39:39 -07:00
|
|
|
}
|
|
|
|
impl ClosureConverter for UnsafeTaskReceiver {
|
2013-12-12 18:01:59 -08:00
|
|
|
fn from_fn(f: |&mut Scheduler, ~GreenTask|) -> UnsafeTaskReceiver {
|
|
|
|
unsafe { cast::transmute(f) }
|
|
|
|
}
|
|
|
|
fn to_fn(self) -> |&mut Scheduler, ~GreenTask| {
|
|
|
|
unsafe { cast::transmute(self) }
|
2013-07-11 14:29:33 -04:00
|
|
|
}
|
2013-05-22 15:39:39 -07:00
|
|
|
}
|
|
|
|
|
2013-09-22 20:51:57 +10:00
|
|
|
// On unix, we read randomness straight from /dev/urandom, but the
|
2013-10-31 15:15:30 -07:00
|
|
|
// default constructor of an XorShiftRng does this via io::fs, which
|
2013-09-22 20:51:57 +10:00
|
|
|
// relies on the scheduler existing, so we have to manually load
|
|
|
|
// randomness. Windows has its own C API for this, so we don't need to
|
|
|
|
// worry there.
|
|
|
|
#[cfg(windows)]
|
|
|
|
fn new_sched_rng() -> XorShiftRng {
|
|
|
|
XorShiftRng::new()
|
|
|
|
}
|
|
|
|
#[cfg(unix)]
|
|
|
|
fn new_sched_rng() -> XorShiftRng {
|
2013-12-12 18:01:59 -08:00
|
|
|
use std::libc;
|
|
|
|
use std::mem;
|
|
|
|
use std::rand::SeedableRng;
|
2013-09-22 20:51:57 +10:00
|
|
|
|
2013-11-20 14:17:12 -08:00
|
|
|
let fd = "/dev/urandom".with_c_str(|name| {
|
2013-10-08 23:19:20 +11:00
|
|
|
unsafe { libc::open(name, libc::O_RDONLY, 0) }
|
2013-11-20 14:17:12 -08:00
|
|
|
});
|
2013-10-08 23:19:20 +11:00
|
|
|
if fd == -1 {
|
2013-09-22 20:51:57 +10:00
|
|
|
rtabort!("could not open /dev/urandom for reading.")
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut seeds = [0u32, .. 4];
|
2013-10-16 18:34:01 -07:00
|
|
|
let size = mem::size_of_val(&seeds);
|
2013-09-22 20:51:57 +10:00
|
|
|
loop {
|
2013-12-18 02:13:20 +11:00
|
|
|
let nbytes = unsafe {
|
|
|
|
libc::read(fd,
|
|
|
|
seeds.as_mut_ptr() as *mut libc::c_void,
|
|
|
|
size as libc::size_t)
|
|
|
|
};
|
2013-10-08 23:19:20 +11:00
|
|
|
rtassert!(nbytes as uint == size);
|
2013-09-22 20:51:57 +10:00
|
|
|
|
|
|
|
if !seeds.iter().all(|x| *x == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-08 23:19:20 +11:00
|
|
|
unsafe {libc::close(fd);}
|
2013-09-22 20:51:57 +10:00
|
|
|
|
2013-09-30 01:29:28 +10:00
|
|
|
SeedableRng::from_seed(seeds)
|
2013-09-22 20:51:57 +10:00
|
|
|
}
|
|
|
|
|
2013-05-11 00:42:16 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2013-07-19 14:25:05 -07:00
|
|
|
use borrow::to_uint;
|
2013-11-26 09:40:24 -08:00
|
|
|
use rt::deque::BufferPool;
|
2013-10-22 15:00:37 -07:00
|
|
|
use rt::basic;
|
2013-12-05 18:19:06 -08:00
|
|
|
use rt::sched::{Scheduler};
|
2013-12-12 18:01:59 -08:00
|
|
|
use rt::task::{GreenTask, Sched};
|
2013-12-05 18:19:06 -08:00
|
|
|
use rt::thread::Thread;
|
2013-08-09 16:30:44 -07:00
|
|
|
use rt::util;
|
2013-12-05 18:19:06 -08:00
|
|
|
use task::TaskResult;
|
|
|
|
use unstable::run_in_bare_thread;
|
2013-07-19 14:25:05 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn trivial_run_in_newsched_task_test() {
|
|
|
|
let mut task_ran = false;
|
|
|
|
let task_ran_ptr: *mut bool = &mut task_ran;
|
|
|
|
do run_in_newsched_task || {
|
|
|
|
unsafe { *task_ran_ptr = true };
|
|
|
|
rtdebug!("executed from the new scheduler")
|
|
|
|
}
|
|
|
|
assert!(task_ran);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn multiple_task_test() {
|
|
|
|
let total = 10;
|
|
|
|
let mut task_run_count = 0;
|
|
|
|
let task_run_count_ptr: *mut uint = &mut task_run_count;
|
|
|
|
do run_in_newsched_task || {
|
2013-08-03 12:45:23 -04:00
|
|
|
for _ in range(0u, total) {
|
2013-07-19 14:25:05 -07:00
|
|
|
do spawntask || {
|
|
|
|
unsafe { *task_run_count_ptr = *task_run_count_ptr + 1};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(task_run_count == total);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn multiple_task_nested_test() {
|
|
|
|
let mut task_run_count = 0;
|
|
|
|
let task_run_count_ptr: *mut uint = &mut task_run_count;
|
|
|
|
do run_in_newsched_task || {
|
|
|
|
do spawntask || {
|
|
|
|
unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 };
|
|
|
|
do spawntask || {
|
|
|
|
unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 };
|
|
|
|
do spawntask || {
|
|
|
|
unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(task_run_count == 3);
|
|
|
|
}
|
2013-06-12 11:32:22 -07:00
|
|
|
|
|
|
|
// Confirm that a sched_id actually is the uint form of the
|
|
|
|
// pointer to the scheduler struct.
|
|
|
|
#[test]
|
|
|
|
fn simple_sched_id_test() {
|
|
|
|
do run_in_bare_thread {
|
|
|
|
let sched = ~new_test_uv_sched();
|
|
|
|
assert!(to_uint(sched) == sched.sched_id());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compare two scheduler ids that are different, this should never
|
|
|
|
// fail but may catch a mistake someday.
|
|
|
|
#[test]
|
|
|
|
fn compare_sched_id_test() {
|
|
|
|
do run_in_bare_thread {
|
|
|
|
let sched_one = ~new_test_uv_sched();
|
|
|
|
let sched_two = ~new_test_uv_sched();
|
|
|
|
assert!(sched_one.sched_id() != sched_two.sched_id());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
|
|
|
|
// A very simple test that confirms that a task executing on the
|
|
|
|
// home scheduler notices that it is home.
|
2013-06-12 11:32:22 -07:00
|
|
|
#[test]
|
|
|
|
fn test_home_sched() {
|
|
|
|
do run_in_bare_thread {
|
|
|
|
let mut task_ran = false;
|
|
|
|
let task_ran_ptr: *mut bool = &mut task_ran;
|
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
let mut sched = ~new_test_uv_sched();
|
2013-06-12 11:32:22 -07:00
|
|
|
let sched_handle = sched.make_handle();
|
2013-06-14 12:17:56 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let mut task = ~do GreenTask::new_root_homed(&mut sched.stack_pool, None,
|
2013-07-19 14:25:05 -07:00
|
|
|
Sched(sched_handle)) {
|
2013-06-12 11:32:22 -07:00
|
|
|
unsafe { *task_ran_ptr = true };
|
2013-12-12 18:01:59 -08:00
|
|
|
assert!(GreenTask::on_appropriate_sched());
|
2013-06-12 11:32:22 -07:00
|
|
|
};
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-12-05 18:19:06 -08:00
|
|
|
let on_exit: proc(TaskResult) = proc(exit_status) {
|
|
|
|
rtassert!(exit_status.is_ok())
|
2013-11-18 13:25:09 -08:00
|
|
|
};
|
2013-07-19 14:25:05 -07:00
|
|
|
task.death.on_exit = Some(on_exit);
|
|
|
|
|
|
|
|
sched.bootstrap(task);
|
2013-06-12 11:32:22 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// An advanced test that checks all four possible states that a
|
|
|
|
// (task,sched) can be in regarding homes.
|
|
|
|
|
2013-06-14 12:17:56 -07:00
|
|
|
#[test]
|
|
|
|
fn test_schedule_home_states() {
|
|
|
|
use rt::sleeper_list::SleeperList;
|
2013-07-19 14:25:05 -07:00
|
|
|
use rt::sched::Shutdown;
|
|
|
|
use borrow;
|
2013-06-14 12:17:56 -07:00
|
|
|
|
|
|
|
do run_in_bare_thread {
|
|
|
|
|
|
|
|
let sleepers = SleeperList::new();
|
2013-12-03 19:37:57 -08:00
|
|
|
let mut pool = BufferPool::new();
|
2013-11-26 09:40:24 -08:00
|
|
|
let (normal_worker, normal_stealer) = pool.deque();
|
|
|
|
let (special_worker, special_stealer) = pool.deque();
|
|
|
|
let queues = ~[normal_stealer, special_stealer];
|
2013-06-14 12:17:56 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// Our normal scheduler
|
2013-06-14 12:17:56 -07:00
|
|
|
let mut normal_sched = ~Scheduler::new(
|
2013-10-22 15:00:37 -07:00
|
|
|
basic::event_loop(),
|
2013-11-26 09:40:24 -08:00
|
|
|
normal_worker,
|
2013-08-05 13:06:24 -07:00
|
|
|
queues.clone(),
|
2013-06-14 12:17:56 -07:00
|
|
|
sleepers.clone());
|
|
|
|
|
2013-12-03 16:44:16 -08:00
|
|
|
let normal_handle = normal_sched.make_handle();
|
2013-06-14 12:17:56 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// Our special scheduler
|
2013-06-14 12:17:56 -07:00
|
|
|
let mut special_sched = ~Scheduler::new_special(
|
2013-10-22 15:00:37 -07:00
|
|
|
basic::event_loop(),
|
2013-11-26 09:40:24 -08:00
|
|
|
special_worker,
|
2013-08-05 13:06:24 -07:00
|
|
|
queues.clone(),
|
2013-06-14 12:17:56 -07:00
|
|
|
sleepers.clone(),
|
2013-07-29 12:06:36 -07:00
|
|
|
false,
|
|
|
|
Some(friend_handle));
|
2013-06-14 12:17:56 -07:00
|
|
|
|
2013-12-03 16:44:16 -08:00
|
|
|
let special_handle = special_sched.make_handle();
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-06-14 12:17:56 -07:00
|
|
|
let t1_handle = special_sched.make_handle();
|
|
|
|
let t4_handle = special_sched.make_handle();
|
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
// Four test tasks:
|
|
|
|
// 1) task is home on special
|
|
|
|
// 2) task not homed, sched doesn't care
|
|
|
|
// 3) task not homed, sched requeues
|
|
|
|
// 4) task not home, send home
|
2013-06-14 12:17:56 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let task1 = ~do GreenTask::new_root_homed(&mut special_sched.stack_pool, None,
|
2013-07-19 14:25:05 -07:00
|
|
|
Sched(t1_handle)) || {
|
2013-12-12 18:01:59 -08:00
|
|
|
rtassert!(GreenTask::on_appropriate_sched());
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
2013-09-27 17:02:31 -07:00
|
|
|
rtdebug!("task1 id: **{}**", borrow::to_uint(task1));
|
2013-06-14 12:17:56 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let task2 = ~do GreenTask::new_root(&mut normal_sched.stack_pool, None) {
|
|
|
|
rtassert!(GreenTask::on_appropriate_sched());
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let task3 = ~do GreenTask::new_root(&mut normal_sched.stack_pool, None) {
|
|
|
|
rtassert!(GreenTask::on_appropriate_sched());
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let task4 = ~do GreenTask::new_root_homed(&mut special_sched.stack_pool, None,
|
2013-07-19 14:25:05 -07:00
|
|
|
Sched(t4_handle)) {
|
2013-12-12 18:01:59 -08:00
|
|
|
rtassert!(GreenTask::on_appropriate_sched());
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
2013-09-27 17:02:31 -07:00
|
|
|
rtdebug!("task4 id: **{}**", borrow::to_uint(task4));
|
2013-07-19 14:25:05 -07:00
|
|
|
|
|
|
|
// Signal from the special task that we are done.
|
2013-12-05 18:19:06 -08:00
|
|
|
let (port, chan) = Chan::<()>::new();
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let normal_task = ~do GreenTask::new_root(&mut normal_sched.stack_pool, None) {
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("*about to submit task2*");
|
2013-12-03 16:44:16 -08:00
|
|
|
Scheduler::run_task(task2);
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("*about to submit task4*");
|
2013-12-03 16:44:16 -08:00
|
|
|
Scheduler::run_task(task4);
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("*normal_task done*");
|
2013-12-03 16:44:16 -08:00
|
|
|
port.recv();
|
|
|
|
let mut nh = normal_handle;
|
2013-07-19 14:25:05 -07:00
|
|
|
nh.send(Shutdown);
|
2013-12-03 16:44:16 -08:00
|
|
|
let mut sh = special_handle;
|
2013-07-19 14:25:05 -07:00
|
|
|
sh.send(Shutdown);
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
|
|
|
|
2013-09-27 17:02:31 -07:00
|
|
|
rtdebug!("normal task: {}", borrow::to_uint(normal_task));
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-12-12 18:01:59 -08:00
|
|
|
let special_task = ~do GreenTask::new_root(&mut special_sched.stack_pool, None) {
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("*about to submit task1*");
|
2013-12-03 16:44:16 -08:00
|
|
|
Scheduler::run_task(task1);
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("*about to submit task3*");
|
2013-12-03 16:44:16 -08:00
|
|
|
Scheduler::run_task(task3);
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("*done with special_task*");
|
2013-12-03 16:44:16 -08:00
|
|
|
chan.send(());
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
|
|
|
|
2013-09-27 17:02:31 -07:00
|
|
|
rtdebug!("special task: {}", borrow::to_uint(special_task));
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-12-03 16:44:16 -08:00
|
|
|
let normal_sched = normal_sched;
|
2013-06-14 12:17:56 -07:00
|
|
|
let normal_thread = do Thread::start {
|
2013-12-03 16:44:16 -08:00
|
|
|
normal_sched.bootstrap(normal_task);
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("finished with normal_thread");
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
|
|
|
|
2013-12-03 16:44:16 -08:00
|
|
|
let special_sched = special_sched;
|
2013-06-14 12:17:56 -07:00
|
|
|
let special_thread = do Thread::start {
|
2013-12-03 16:44:16 -08:00
|
|
|
special_sched.bootstrap(special_task);
|
2013-07-19 14:25:05 -07:00
|
|
|
rtdebug!("finished with special_sched");
|
2013-06-14 12:17:56 -07:00
|
|
|
};
|
|
|
|
|
2013-07-27 12:05:15 -07:00
|
|
|
normal_thread.join();
|
|
|
|
special_thread.join();
|
2013-06-14 12:17:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_stress_schedule_task_states() {
|
2013-08-09 16:30:44 -07:00
|
|
|
if util::limit_thread_creation_due_to_osx_and_valgrind() { return; }
|
2013-06-14 12:17:56 -07:00
|
|
|
let n = stress_factor() * 120;
|
2013-08-03 12:45:23 -04:00
|
|
|
for _ in range(0, n as int) {
|
2013-06-14 12:17:56 -07:00
|
|
|
test_schedule_home_states();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-11 00:42:16 -07:00
|
|
|
#[test]
|
|
|
|
fn test_io_callback() {
|
2013-11-10 22:46:32 -08:00
|
|
|
use io::timer;
|
2013-10-24 11:30:35 -07:00
|
|
|
|
2013-05-11 00:42:16 -07:00
|
|
|
// This is a regression test that when there are no schedulable tasks
|
|
|
|
// in the work queue, but we are performing I/O, that once we do put
|
|
|
|
// something in the work queue again the scheduler picks it up and doesn't
|
|
|
|
// exit before emptying the work queue
|
2013-10-22 15:00:37 -07:00
|
|
|
do run_in_uv_task {
|
2013-07-19 14:25:05 -07:00
|
|
|
do spawntask {
|
2013-10-24 11:30:35 -07:00
|
|
|
timer::sleep(10);
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
2013-05-11 00:42:16 -07:00
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|
2013-05-22 15:39:39 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn handle() {
|
|
|
|
do run_in_bare_thread {
|
2013-12-05 18:19:06 -08:00
|
|
|
let (port, chan) = Chan::new();
|
2013-05-22 15:39:39 -07:00
|
|
|
|
2013-07-31 13:52:22 -07:00
|
|
|
let thread_one = do Thread::start {
|
2013-12-03 16:44:16 -08:00
|
|
|
let chan = chan;
|
2013-07-19 14:25:05 -07:00
|
|
|
do run_in_newsched_task_core {
|
2013-12-03 16:44:16 -08:00
|
|
|
chan.send(());
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
2013-05-22 15:39:39 -07:00
|
|
|
};
|
|
|
|
|
2013-07-31 13:52:22 -07:00
|
|
|
let thread_two = do Thread::start {
|
2013-12-03 16:44:16 -08:00
|
|
|
let port = port;
|
2013-07-19 14:25:05 -07:00
|
|
|
do run_in_newsched_task_core {
|
2013-12-03 16:44:16 -08:00
|
|
|
port.recv();
|
2013-07-19 14:25:05 -07:00
|
|
|
}
|
2013-05-22 15:39:39 -07:00
|
|
|
};
|
2013-07-27 12:05:15 -07:00
|
|
|
|
2013-07-31 13:52:22 -07:00
|
|
|
thread_two.join();
|
|
|
|
thread_one.join();
|
2013-05-22 15:39:39 -07:00
|
|
|
}
|
|
|
|
}
|
2013-05-23 00:04:50 -07:00
|
|
|
|
2013-08-15 22:48:35 -07:00
|
|
|
// A regression test that the final message is always handled.
|
|
|
|
// Used to deadlock because Shutdown was never recvd.
|
|
|
|
#[test]
|
|
|
|
fn no_missed_messages() {
|
|
|
|
use rt::sleeper_list::SleeperList;
|
|
|
|
use rt::stack::StackPool;
|
|
|
|
use rt::sched::{Shutdown, TaskFromFriend};
|
|
|
|
|
|
|
|
do run_in_bare_thread {
|
2013-11-21 17:23:21 -08:00
|
|
|
stress_factor().times(|| {
|
2013-08-15 22:48:35 -07:00
|
|
|
let sleepers = SleeperList::new();
|
2013-12-03 19:37:57 -08:00
|
|
|
let mut pool = BufferPool::new();
|
2013-11-26 09:40:24 -08:00
|
|
|
let (worker, stealer) = pool.deque();
|
2013-08-15 22:48:35 -07:00
|
|
|
|
|
|
|
let mut sched = ~Scheduler::new(
|
2013-10-22 15:00:37 -07:00
|
|
|
basic::event_loop(),
|
2013-11-26 09:40:24 -08:00
|
|
|
worker,
|
|
|
|
~[stealer],
|
2013-08-15 22:48:35 -07:00
|
|
|
sleepers.clone());
|
|
|
|
|
|
|
|
let mut handle = sched.make_handle();
|
|
|
|
|
2013-12-03 16:44:16 -08:00
|
|
|
let sched = sched;
|
2013-08-15 22:48:35 -07:00
|
|
|
let thread = do Thread::start {
|
2013-12-03 16:44:16 -08:00
|
|
|
let mut sched = sched;
|
2013-11-21 23:36:52 -08:00
|
|
|
let bootstrap_task =
|
2013-12-12 18:01:59 -08:00
|
|
|
~GreenTask::new_root(&mut sched.stack_pool,
|
2013-11-21 23:36:52 -08:00
|
|
|
None,
|
|
|
|
proc()());
|
2013-08-15 22:48:35 -07:00
|
|
|
sched.bootstrap(bootstrap_task);
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut stack_pool = StackPool::new();
|
2013-12-12 18:01:59 -08:00
|
|
|
let task = ~GreenTask::new_root(&mut stack_pool, None, proc()());
|
2013-08-15 22:48:35 -07:00
|
|
|
handle.send(TaskFromFriend(task));
|
|
|
|
|
|
|
|
handle.send(Shutdown);
|
2013-12-02 22:37:26 -08:00
|
|
|
drop(handle);
|
2013-08-15 22:48:35 -07:00
|
|
|
|
|
|
|
thread.join();
|
2013-11-21 17:23:21 -08:00
|
|
|
})
|
2013-08-15 22:48:35 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-23 00:04:50 -07:00
|
|
|
#[test]
|
|
|
|
fn multithreading() {
|
2013-08-29 01:27:24 -04:00
|
|
|
use num::Times;
|
2013-05-23 00:04:50 -07:00
|
|
|
use vec::OwnedVector;
|
2013-05-29 15:55:23 -07:00
|
|
|
use container::Container;
|
2013-05-23 00:04:50 -07:00
|
|
|
|
2013-05-29 15:55:23 -07:00
|
|
|
do run_in_mt_newsched_task {
|
|
|
|
let mut ports = ~[];
|
2013-11-21 17:23:21 -08:00
|
|
|
10.times(|| {
|
2013-12-05 18:19:06 -08:00
|
|
|
let (port, chan) = Chan::new();
|
2013-05-29 15:55:23 -07:00
|
|
|
do spawntask_later {
|
2013-12-03 16:44:16 -08:00
|
|
|
chan.send(());
|
2013-05-29 15:55:23 -07:00
|
|
|
}
|
|
|
|
ports.push(port);
|
2013-11-21 17:23:21 -08:00
|
|
|
});
|
2013-05-23 00:04:50 -07:00
|
|
|
|
2013-05-29 15:55:23 -07:00
|
|
|
while !ports.is_empty() {
|
|
|
|
ports.pop().recv();
|
|
|
|
}
|
2013-05-23 00:04:50 -07:00
|
|
|
}
|
|
|
|
}
|
2013-05-30 00:18:49 -07:00
|
|
|
|
2013-07-19 14:25:05 -07:00
|
|
|
#[test]
|
2013-05-30 00:18:49 -07:00
|
|
|
fn thread_ring() {
|
|
|
|
do run_in_mt_newsched_task {
|
2013-12-05 18:19:06 -08:00
|
|
|
let (end_port, end_chan) = Chan::new();
|
2013-05-30 00:18:49 -07:00
|
|
|
|
|
|
|
let n_tasks = 10;
|
|
|
|
let token = 2000;
|
|
|
|
|
2013-12-05 18:19:06 -08:00
|
|
|
let (mut p, ch1) = Chan::new();
|
2013-10-22 15:09:23 -07:00
|
|
|
ch1.send((token, end_chan));
|
|
|
|
let mut i = 2;
|
2013-05-30 00:18:49 -07:00
|
|
|
while i <= n_tasks {
|
2013-12-05 18:19:06 -08:00
|
|
|
let (next_p, ch) = Chan::new();
|
2013-05-30 00:18:49 -07:00
|
|
|
let imm_i = i;
|
|
|
|
let imm_p = p;
|
|
|
|
do spawntask_random {
|
|
|
|
roundtrip(imm_i, n_tasks, &imm_p, &ch);
|
|
|
|
};
|
|
|
|
p = next_p;
|
|
|
|
i += 1;
|
|
|
|
}
|
2013-12-05 18:19:06 -08:00
|
|
|
let p = p;
|
2013-05-30 00:18:49 -07:00
|
|
|
do spawntask_random {
|
2013-12-05 18:19:06 -08:00
|
|
|
roundtrip(1, n_tasks, &p, &ch1);
|
2013-05-30 00:18:49 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
end_port.recv();
|
|
|
|
}
|
|
|
|
|
|
|
|
fn roundtrip(id: int, n_tasks: int,
|
2013-12-05 18:19:06 -08:00
|
|
|
p: &Port<(int, Chan<()>)>,
|
|
|
|
ch: &Chan<(int, Chan<()>)>) {
|
2013-05-30 00:18:49 -07:00
|
|
|
while (true) {
|
|
|
|
match p.recv() {
|
|
|
|
(1, end_chan) => {
|
2013-12-05 18:19:06 -08:00
|
|
|
debug!("{}\n", id);
|
|
|
|
end_chan.send(());
|
|
|
|
return;
|
2013-05-30 00:18:49 -07:00
|
|
|
}
|
|
|
|
(token, end_chan) => {
|
2013-10-21 13:08:31 -07:00
|
|
|
debug!("thread: {} got token: {}", id, token);
|
2013-05-30 00:18:49 -07:00
|
|
|
ch.send((token - 1, end_chan));
|
|
|
|
if token <= n_tasks {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-03 18:58:26 -07:00
|
|
|
#[test]
|
|
|
|
fn start_closure_dtor() {
|
|
|
|
use ops::Drop;
|
|
|
|
|
2013-06-14 12:17:56 -07:00
|
|
|
// Regression test that the `start` task entrypoint can
|
|
|
|
// contain dtors that use task resources
|
2013-06-03 18:58:26 -07:00
|
|
|
do run_in_newsched_task {
|
|
|
|
struct S { field: () }
|
|
|
|
|
|
|
|
impl Drop for S {
|
2013-09-16 21:18:07 -04:00
|
|
|
fn drop(&mut self) {
|
|
|
|
let _foo = @0;
|
2013-06-03 18:58:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let s = S { field: () };
|
|
|
|
|
|
|
|
do spawntask {
|
2013-09-16 21:18:07 -04:00
|
|
|
let _ss = &s;
|
2013-06-03 18:58:26 -07:00
|
|
|
}
|
2013-06-14 12:17:56 -07:00
|
|
|
}
|
2013-06-03 18:58:26 -07:00
|
|
|
}
|
2013-07-19 14:25:05 -07:00
|
|
|
|
2013-09-22 18:25:59 -04:00
|
|
|
// FIXME: #9407: xfail-test
|
2013-12-20 01:12:56 +11:00
|
|
|
#[ignore]
|
|
|
|
#[test]
|
2013-09-04 16:54:05 -07:00
|
|
|
fn dont_starve_1() {
|
2013-11-21 17:23:21 -08:00
|
|
|
stress_factor().times(|| {
|
2013-09-22 18:25:59 -04:00
|
|
|
do run_in_mt_newsched_task {
|
2013-12-05 18:19:06 -08:00
|
|
|
let (port, chan) = Chan::new();
|
2013-09-04 16:54:05 -07:00
|
|
|
|
2013-09-22 18:25:59 -04:00
|
|
|
// This task should not be able to starve the sender;
|
|
|
|
// The sender should get stolen to another thread.
|
|
|
|
do spawntask {
|
2013-12-05 18:19:06 -08:00
|
|
|
while port.try_recv().is_none() { }
|
2013-09-22 13:48:23 -04:00
|
|
|
}
|
2013-09-22 18:25:59 -04:00
|
|
|
|
|
|
|
chan.send(());
|
2013-09-04 16:54:05 -07:00
|
|
|
}
|
2013-11-21 17:23:21 -08:00
|
|
|
})
|
2013-09-04 16:54:05 -07:00
|
|
|
}
|
2013-09-20 18:49:31 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn dont_starve_2() {
|
2013-11-21 17:23:21 -08:00
|
|
|
stress_factor().times(|| {
|
2013-09-20 18:49:31 -07:00
|
|
|
do run_in_newsched_task {
|
2013-12-05 18:19:06 -08:00
|
|
|
let (port, chan) = Chan::new();
|
|
|
|
let (_port2, chan2) = Chan::new();
|
2013-09-20 18:49:31 -07:00
|
|
|
|
|
|
|
// This task should not be able to starve the other task.
|
|
|
|
// The sends should eventually yield.
|
|
|
|
do spawntask {
|
2013-12-05 18:19:06 -08:00
|
|
|
while port.try_recv().is_none() {
|
2013-09-20 18:49:31 -07:00
|
|
|
chan2.send(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
chan.send(());
|
|
|
|
}
|
2013-11-21 17:23:21 -08:00
|
|
|
})
|
2013-09-20 18:49:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Regression test for a logic bug that would cause single-threaded schedulers
|
|
|
|
// to sleep forever after yielding and stealing another task.
|
|
|
|
#[test]
|
|
|
|
fn single_threaded_yield() {
|
|
|
|
use task::{spawn, spawn_sched, SingleThreaded, deschedule};
|
|
|
|
use num::Times;
|
|
|
|
|
|
|
|
do spawn_sched(SingleThreaded) {
|
2013-11-21 17:23:21 -08:00
|
|
|
5.times(|| { deschedule(); })
|
2013-09-20 18:49:31 -07:00
|
|
|
}
|
|
|
|
do spawn { }
|
|
|
|
do spawn { }
|
|
|
|
}
|
2013-02-03 18:15:43 -08:00
|
|
|
}
|