2010-06-23 23:03:09 -05:00
|
|
|
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include "rust_internal.h"
|
2011-05-31 19:44:54 -05:00
|
|
|
#include "globals.h"
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::rust_scheduler(rust_kernel *kernel,
|
2010-09-07 20:39:07 -05:00
|
|
|
rust_message_queue *message_queue, rust_srv *srv,
|
2011-05-24 18:07:30 -05:00
|
|
|
const char *name) :
|
2010-06-23 23:03:09 -05:00
|
|
|
interrupt_flag(0),
|
|
|
|
_log(srv, this),
|
2011-04-19 05:21:57 -05:00
|
|
|
log_lvl(log_note),
|
2010-06-23 23:03:09 -05:00
|
|
|
srv(srv),
|
2010-08-08 21:24:35 -05:00
|
|
|
name(name),
|
2010-09-10 03:21:29 -05:00
|
|
|
newborn_tasks(this, "newborn"),
|
|
|
|
running_tasks(this, "running"),
|
|
|
|
blocked_tasks(this, "blocked"),
|
|
|
|
dead_tasks(this, "dead"),
|
2011-05-26 20:20:48 -05:00
|
|
|
cache(this),
|
2010-06-23 23:03:09 -05:00
|
|
|
root_task(NULL),
|
|
|
|
curr_task(NULL),
|
2010-08-27 20:26:36 -05:00
|
|
|
rval(0),
|
2010-09-07 20:39:07 -05:00
|
|
|
kernel(kernel),
|
|
|
|
message_queue(message_queue)
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2011-04-07 15:05:45 -05:00
|
|
|
LOGPTR(this, "new dom", (uintptr_t)this);
|
2010-07-25 23:45:09 -05:00
|
|
|
isaac_init(this, &rctx);
|
|
|
|
#ifndef __WIN32__
|
2010-06-23 23:03:09 -05:00
|
|
|
pthread_attr_init(&attr);
|
|
|
|
pthread_attr_setstacksize(&attr, 1024 * 1024);
|
|
|
|
pthread_attr_setdetachstate(&attr, true);
|
|
|
|
#endif
|
2010-09-10 03:21:29 -05:00
|
|
|
root_task = create_task(NULL, name);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::~rust_scheduler() {
|
|
|
|
DLOG(this, dom, "~rust_scheduler %s @0x%" PRIxPTR, name, (uintptr_t)this);
|
2011-06-27 21:15:03 -05:00
|
|
|
|
2010-09-10 03:21:29 -05:00
|
|
|
newborn_tasks.delete_all();
|
|
|
|
running_tasks.delete_all();
|
|
|
|
blocked_tasks.delete_all();
|
|
|
|
dead_tasks.delete_all();
|
2010-06-23 23:03:09 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
pthread_attr_destroy(&attr);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::activate(rust_task *task) {
|
2011-05-31 19:44:54 -05:00
|
|
|
context ctx;
|
|
|
|
|
|
|
|
task->ctx.next = &ctx;
|
|
|
|
DLOG(this, task, "descheduling...");
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.unlock();
|
2011-05-31 19:44:54 -05:00
|
|
|
task->ctx.swap(ctx);
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.lock();
|
2011-05-31 19:44:54 -05:00
|
|
|
DLOG(this, task, "task has returned");
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
void
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::log(rust_task* task, uint32_t level, char const *fmt, ...) {
|
2011-01-14 15:41:39 -06:00
|
|
|
char buf[BUF_BYTES];
|
2011-04-19 05:21:57 -05:00
|
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
|
|
vsnprintf(buf, sizeof(buf), fmt, args);
|
|
|
|
_log.trace_ln(task, level, buf);
|
|
|
|
va_end(args);
|
2010-07-19 16:05:18 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::fail() {
|
2011-04-19 05:21:57 -05:00
|
|
|
log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed",
|
2010-08-08 21:24:35 -05:00
|
|
|
name, this);
|
2010-06-23 23:03:09 -05:00
|
|
|
I(this, rval == 0);
|
|
|
|
rval = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::number_of_live_tasks() {
|
2010-06-23 23:03:09 -05:00
|
|
|
return running_tasks.length() + blocked_tasks.length();
|
|
|
|
}
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
/**
|
|
|
|
* Delete any dead tasks.
|
|
|
|
*/
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2011-06-29 20:47:47 -05:00
|
|
|
rust_scheduler::reap_dead_tasks(int id) {
|
2011-06-24 18:50:06 -05:00
|
|
|
I(this, kernel->scheduler_lock.lock_held_by_current_thread());
|
2010-06-23 23:03:09 -05:00
|
|
|
for (size_t i = 0; i < dead_tasks.length(); ) {
|
2010-07-19 16:05:18 -05:00
|
|
|
rust_task *task = dead_tasks[i];
|
2011-06-21 20:08:34 -05:00
|
|
|
// Make sure this task isn't still running somewhere else...
|
2011-06-29 20:47:47 -05:00
|
|
|
if (task->ref_count == 0 && task->can_schedule(id)) {
|
2010-07-28 18:24:50 -05:00
|
|
|
I(this, task->tasks_waiting_to_join.is_empty());
|
2010-09-10 03:21:29 -05:00
|
|
|
dead_tasks.remove(task);
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, task,
|
2010-08-08 21:24:35 -05:00
|
|
|
"deleting unreferenced dead task %s @0x%" PRIxPTR,
|
|
|
|
task->name, task);
|
2010-07-19 16:05:18 -05:00
|
|
|
delete task;
|
2010-06-23 23:03:09 -05:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
/**
|
|
|
|
* Drains and processes incoming pending messages.
|
|
|
|
*/
|
2011-06-28 14:15:41 -05:00
|
|
|
void rust_scheduler::drain_incoming_message_queue(bool process) {
|
2010-07-19 16:05:18 -05:00
|
|
|
rust_message *message;
|
2010-09-07 20:39:07 -05:00
|
|
|
while (message_queue->dequeue(&message)) {
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, comm, "<== receiving \"%s\" " PTR,
|
2010-09-07 20:39:07 -05:00
|
|
|
message->label, message);
|
2010-08-24 23:06:56 -05:00
|
|
|
if (process) {
|
|
|
|
message->process();
|
|
|
|
}
|
2010-09-07 20:39:07 -05:00
|
|
|
delete message;
|
2010-07-28 18:24:50 -05:00
|
|
|
}
|
2010-07-28 18:46:13 -05:00
|
|
|
}
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
/**
|
|
|
|
* Schedules a running task for execution. Only running tasks can be
|
|
|
|
* activated. Blocked tasks have to be unblocked before they can be
|
|
|
|
* activated.
|
|
|
|
*
|
|
|
|
* Returns NULL if no tasks can be scheduled.
|
|
|
|
*/
|
2010-06-23 23:03:09 -05:00
|
|
|
rust_task *
|
2011-06-29 20:47:47 -05:00
|
|
|
rust_scheduler::schedule_task(int id) {
|
2010-06-23 23:03:09 -05:00
|
|
|
I(this, this);
|
|
|
|
// FIXME: in the face of failing tasks, this is not always right.
|
|
|
|
// I(this, n_live_tasks() > 0);
|
|
|
|
if (running_tasks.length() > 0) {
|
2011-06-20 19:19:50 -05:00
|
|
|
size_t k = rand(&rctx);
|
|
|
|
// Look around for a runnable task, starting at k.
|
|
|
|
for(size_t j = 0; j < running_tasks.length(); ++j) {
|
|
|
|
size_t i = (j + k) % running_tasks.length();
|
2011-06-29 20:47:47 -05:00
|
|
|
if (running_tasks[i]->can_schedule(id)) {
|
2011-06-20 19:19:50 -05:00
|
|
|
return (rust_task *)running_tasks[i];
|
|
|
|
}
|
2010-08-11 23:23:34 -05:00
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-07-28 16:53:08 -05:00
|
|
|
void
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::log_state() {
|
2011-04-19 05:21:57 -05:00
|
|
|
if (log_rt_task < log_note) return;
|
|
|
|
|
2010-07-28 16:53:08 -05:00
|
|
|
if (!running_tasks.is_empty()) {
|
2011-04-19 05:21:57 -05:00
|
|
|
log(NULL, log_note, "running tasks:");
|
2010-07-28 16:53:08 -05:00
|
|
|
for (size_t i = 0; i < running_tasks.length(); i++) {
|
2011-04-19 05:21:57 -05:00
|
|
|
log(NULL, log_note, "\t task: %s @0x%" PRIxPTR " timeout: %d",
|
2010-08-11 23:23:34 -05:00
|
|
|
running_tasks[i]->name,
|
|
|
|
running_tasks[i],
|
|
|
|
running_tasks[i]->yield_timer.get_timeout());
|
2010-07-28 16:53:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!blocked_tasks.is_empty()) {
|
2011-04-19 05:21:57 -05:00
|
|
|
log(NULL, log_note, "blocked tasks:");
|
2010-07-28 16:53:08 -05:00
|
|
|
for (size_t i = 0; i < blocked_tasks.length(); i++) {
|
2011-04-19 05:21:57 -05:00
|
|
|
log(NULL, log_note, "\t task: %s @0x%" PRIxPTR ", blocked on: 0x%"
|
|
|
|
PRIxPTR " '%s'",
|
2010-08-08 21:24:35 -05:00
|
|
|
blocked_tasks[i]->name, blocked_tasks[i],
|
2010-08-18 01:26:43 -05:00
|
|
|
blocked_tasks[i]->cond, blocked_tasks[i]->cond_name);
|
2010-07-28 16:53:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dead_tasks.is_empty()) {
|
2011-04-19 05:21:57 -05:00
|
|
|
log(NULL, log_note, "dead tasks:");
|
2010-07-28 16:53:08 -05:00
|
|
|
for (size_t i = 0; i < dead_tasks.length(); i++) {
|
2011-04-19 05:21:57 -05:00
|
|
|
log(NULL, log_note, "\t task: %s 0x%" PRIxPTR ", ref_count: %d",
|
2010-08-18 01:26:43 -05:00
|
|
|
dead_tasks[i]->name, dead_tasks[i],
|
2010-08-10 15:26:00 -05:00
|
|
|
dead_tasks[i]->ref_count);
|
2010-07-28 16:53:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-07-19 16:05:18 -05:00
|
|
|
/**
|
|
|
|
* Starts the main scheduler loop which performs task scheduling for this
|
|
|
|
* domain.
|
|
|
|
*
|
2010-07-28 18:49:16 -05:00
|
|
|
* Returns once no more tasks can be scheduled and all task ref_counts
|
|
|
|
* drop to zero.
|
2010-07-19 16:05:18 -05:00
|
|
|
*/
|
|
|
|
int
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::start_main_loop(int id) {
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.lock();
|
2011-06-20 19:19:50 -05:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
// Make sure someone is watching, to pull us out of infinite loops.
|
2011-06-27 12:08:57 -05:00
|
|
|
//
|
|
|
|
// FIXME: time-based interruption is not presently working; worked
|
|
|
|
// in rustboot and has been completely broken in rustc.
|
|
|
|
//
|
|
|
|
// rust_timer timer(this);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-06-20 19:19:50 -05:00
|
|
|
DLOG(this, dom, "started domain loop %d", id);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2010-09-10 03:21:29 -05:00
|
|
|
while (number_of_live_tasks() > 0) {
|
2010-09-07 20:39:07 -05:00
|
|
|
A(this, kernel->is_deadlocked() == false, "deadlock");
|
2010-08-18 01:47:11 -05:00
|
|
|
|
2011-06-20 19:19:50 -05:00
|
|
|
DLOG(this, dom, "worker %d, number_of_live_tasks = %d",
|
|
|
|
id, number_of_live_tasks());
|
|
|
|
|
2010-08-24 23:06:56 -05:00
|
|
|
drain_incoming_message_queue(true);
|
2010-07-28 18:24:50 -05:00
|
|
|
|
2011-06-29 20:47:47 -05:00
|
|
|
rust_task *scheduled_task = schedule_task(id);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2010-08-16 20:44:26 -05:00
|
|
|
// The scheduler busy waits until a task is available for scheduling.
|
|
|
|
// Eventually we'll want a smarter way to do this, perhaps sleep
|
|
|
|
// for a minimum amount of time.
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
if (scheduled_task == NULL) {
|
2011-04-19 05:21:57 -05:00
|
|
|
log_state();
|
|
|
|
DLOG(this, task,
|
2011-06-20 19:19:50 -05:00
|
|
|
"all tasks are blocked, scheduler id %d yielding ...",
|
|
|
|
id);
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.unlock();
|
2010-09-15 13:56:45 -05:00
|
|
|
sync::sleep(100);
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.lock();
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, task,
|
2010-08-09 10:06:08 -05:00
|
|
|
"scheduler resuming ...");
|
2010-07-19 16:05:18 -05:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
I(this, scheduled_task->running());
|
|
|
|
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, task,
|
2010-08-10 15:26:00 -05:00
|
|
|
"activating task %s 0x%" PRIxPTR
|
|
|
|
", sp=0x%" PRIxPTR
|
|
|
|
", ref_count=%d"
|
|
|
|
", state: %s",
|
|
|
|
scheduled_task->name,
|
|
|
|
(uintptr_t)scheduled_task,
|
|
|
|
scheduled_task->rust_sp,
|
|
|
|
scheduled_task->ref_count,
|
2010-09-10 03:21:29 -05:00
|
|
|
scheduled_task->state->name);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
interrupt_flag = 0;
|
|
|
|
|
2011-06-20 19:19:50 -05:00
|
|
|
DLOG(this, task,
|
|
|
|
"Running task %p on worker %d",
|
|
|
|
scheduled_task, id);
|
2011-06-29 20:47:47 -05:00
|
|
|
scheduled_task->running_on = id;
|
2010-07-19 16:05:18 -05:00
|
|
|
activate(scheduled_task);
|
2011-06-29 20:47:47 -05:00
|
|
|
scheduled_task->running_on = -1;
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, task,
|
2011-06-20 19:19:50 -05:00
|
|
|
"returned from task %s @0x%" PRIxPTR
|
2011-06-21 20:08:34 -05:00
|
|
|
" in state '%s', sp=0x%x, worker id=%d" PRIxPTR,
|
2011-06-20 19:19:50 -05:00
|
|
|
scheduled_task->name,
|
|
|
|
(uintptr_t)scheduled_task,
|
|
|
|
scheduled_task->state->name,
|
|
|
|
scheduled_task->rust_sp,
|
|
|
|
id);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-06-29 20:47:47 -05:00
|
|
|
reap_dead_tasks(id);
|
2010-07-19 16:05:18 -05:00
|
|
|
}
|
|
|
|
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, dom,
|
2011-04-07 15:05:45 -05:00
|
|
|
"terminated scheduler loop, reaping dead tasks ...");
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
while (dead_tasks.length() > 0) {
|
2010-09-07 20:39:07 -05:00
|
|
|
if (message_queue->is_empty()) {
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, dom,
|
2010-08-09 10:06:08 -05:00
|
|
|
"waiting for %d dead tasks to become dereferenced, "
|
|
|
|
"scheduler yielding ...",
|
|
|
|
dead_tasks.length());
|
2011-04-19 05:21:57 -05:00
|
|
|
log_state();
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.unlock();
|
2010-08-09 10:06:08 -05:00
|
|
|
sync::yield();
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.lock();
|
2010-07-19 16:05:18 -05:00
|
|
|
} else {
|
2010-08-24 23:06:56 -05:00
|
|
|
drain_incoming_message_queue(true);
|
2010-07-19 16:05:18 -05:00
|
|
|
}
|
2011-06-29 20:47:47 -05:00
|
|
|
reap_dead_tasks(id);
|
2010-07-19 16:05:18 -05:00
|
|
|
}
|
|
|
|
|
2011-06-20 19:19:50 -05:00
|
|
|
DLOG(this, dom, "finished main-loop %d (dom.rval = %d)", id, rval);
|
|
|
|
|
2011-06-24 18:50:06 -05:00
|
|
|
kernel->scheduler_lock.unlock();
|
2011-06-20 19:19:50 -05:00
|
|
|
return rval;
|
|
|
|
}
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
rust_crate_cache *
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::get_cache() {
|
2011-05-26 20:20:48 -05:00
|
|
|
return &cache;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task *
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::create_task(rust_task *spawner, const char *name) {
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task *task =
|
2011-06-27 21:15:03 -05:00
|
|
|
new (this->kernel) rust_task (this, &newborn_tasks, spawner, name);
|
2011-04-19 05:21:57 -05:00
|
|
|
DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",
|
2010-09-10 03:21:29 -05:00
|
|
|
task, spawner ? spawner->name : "null", name);
|
2011-06-29 20:56:34 -05:00
|
|
|
if(spawner)
|
|
|
|
task->pin(spawner->pinned_on);
|
2010-09-10 03:21:29 -05:00
|
|
|
newborn_tasks.append(task);
|
|
|
|
return task;
|
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
|
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 70;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
|
|
|
|
// End:
|
|
|
|
//
|