rt: Clean up the way the kernel tracks tasks

This commit is contained in:
Brian Anderson 2012-02-03 17:26:54 -08:00
parent e7f00b6493
commit 12fa90888e
6 changed files with 49 additions and 42 deletions

View File

@ -11,9 +11,9 @@ rust_kernel::rust_kernel(rust_srv *srv, size_t num_threads) :
_region(srv, true),
_log(srv, NULL),
srv(srv),
max_id(0),
rval(0),
live_tasks(0),
max_task_id(0),
rval(0),
env(srv->env)
{
sched = new (this, "rust_scheduler")
@ -84,14 +84,35 @@ rust_kernel::fail() {
void
rust_kernel::register_task(rust_task *task) {
scoped_lock with(_kernel_lock);
task->user.id = max_id++;
task_table.put(task->user.id, task);
{
scoped_lock with(task_lock);
task->user.id = max_task_id++;
task_table.put(task->user.id, task);
}
KLOG_("Registered task %" PRIdPTR, task->user.id);
int new_live_tasks = sync::increment(live_tasks);
KLOG_("Total outstanding tasks: %d", new_live_tasks);
}
void
rust_kernel::release_task_id(rust_task_id id) {
KLOG_("Releasing task %" PRIdPTR, id);
{
scoped_lock with(task_lock);
task_table.remove(id);
}
int new_live_tasks = sync::decrement(live_tasks);
KLOG_("Total outstanding tasks: %d", new_live_tasks);
if (new_live_tasks == 0) {
// There are no more tasks and there never will be.
// Tell all the schedulers to exit.
sched->exit();
}
}
rust_task *
rust_kernel::get_task_by_id(rust_task_id id) {
scoped_lock with(_kernel_lock);
scoped_lock with(task_lock);
rust_task *task = NULL;
// get leaves task unchanged if not found.
task_table.get(id, &task);
@ -109,16 +130,6 @@ rust_kernel::get_task_by_id(rust_task_id id) {
return task;
}
void
rust_kernel::release_task_id(rust_task_id id) {
scoped_lock with(_kernel_lock);
task_table.remove(id);
}
void rust_kernel::exit_schedulers() {
sched->exit();
}
#ifdef __WIN32__
void
rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
@ -140,7 +151,7 @@ rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
void
rust_kernel::set_exit_status(int code) {
scoped_lock with(_kernel_lock);
scoped_lock with(rval_lock);
// If we've already failed then that's the code we're going to use
if (rval != PROC_FAIL_CODE) {
rval = code;

View File

@ -20,25 +20,29 @@ class rust_kernel {
public:
rust_srv *srv;
private:
lock_and_signal _kernel_lock;
rust_scheduler *sched;
rust_task_id max_id;
// Tracks the number of tasks that are being managed by
// schedulers. When this hits 0 we will tell all schedulers
// to exit.
volatile int live_tasks;
// Protects max_task_id and task_table
lock_and_signal task_lock;
rust_task_id max_task_id;
hash_map<rust_task_id, rust_task *> task_table;
lock_and_signal rval_lock;
int rval;
public:
volatile int live_tasks;
struct rust_env *env;
rust_kernel(rust_srv *srv, size_t num_threads);
void exit_schedulers();
~rust_kernel();
void log(uint32_t level, char const *fmt, ...);
void fatal(char const *fmt, ...);
virtual ~rust_kernel();
void *malloc(size_t size, const char *tag);
void *realloc(void *mem, size_t size);

View File

@ -87,9 +87,7 @@ rust_scheduler::create_task(rust_task *spawner, const char *name,
thread_no = isaac_rand(&rctx) % num_threads;
}
rust_task_thread *thread = threads[thread_no];
rust_task *t = thread->create_task(spawner, name, init_stack_sz);
kernel->register_task(t);
return t->user.id;
return thread->create_task(spawner, name, init_stack_sz);
}
rust_task_id

View File

@ -275,12 +275,11 @@ rust_task::~rust_task()
DLOG(thread, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
name, (uintptr_t)this, ref_count);
// FIXME: We should do this when the task exits, not in the destructor
if (supervisor) {
supervisor->deref();
}
kernel->release_task_id(user.id);
/* FIXME: tighten this up, there are some more
assertions that hold at task-lifecycle events. */
I(thread, ref_count == 0); // ||
@ -288,6 +287,7 @@ rust_task::~rust_task()
// Delete all the stacks. There may be more than one if the task failed
// and no landing pads stopped to clean up.
// FIXME: We should do this when the task exits, not in the destructor
while (stk != NULL) {
del_stk(this, stk);
}

View File

@ -137,13 +137,8 @@ rust_task_thread::reap_dead_tasks() {
for (size_t i = 0; i < dead_tasks_len; ++i) {
rust_task *task = dead_tasks_copy[i];
if (task) {
kernel->release_task_id(task->user.id);
task->deref();
int live_tasks = sync::decrement(kernel->live_tasks);
if (live_tasks == 0) {
// There are no more tasks and there never will be.
// Tell all the schedulers to exit.
kernel->exit_schedulers();
}
}
}
srv->free(dead_tasks_copy);
@ -219,8 +214,8 @@ rust_task_thread::start_main_loop() {
DLOG(this, dom, "started domain loop %d", id);
while (!should_exit) {
DLOG(this, dom, "worker %d, number_of_live_tasks = %d, total = %d",
id, number_of_live_tasks(), kernel->live_tasks);
DLOG(this, dom, "worker %d, number_of_live_tasks = %d",
id, number_of_live_tasks());
rust_task *scheduled_task = schedule_task();
@ -281,7 +276,7 @@ rust_task_thread::get_cache() {
return &cache;
}
rust_task *
rust_task_id
rust_task_thread::create_task(rust_task *spawner, const char *name,
size_t init_stack_sz) {
rust_task *task =
@ -295,9 +290,8 @@ rust_task_thread::create_task(rust_task *spawner, const char *name,
newborn_tasks.append(task);
}
sync::increment(kernel->live_tasks);
return task;
kernel->register_task(task);
return task->user.id;
}
void rust_task_thread::run() {

View File

@ -112,8 +112,8 @@ struct rust_task_thread : public kernel_owned<rust_task_thread>,
void kill_all_tasks();
rust_task *create_task(rust_task *spawner, const char *name,
size_t init_stack_sz);
rust_task_id create_task(rust_task *spawner, const char *name,
size_t init_stack_sz);
virtual void run();