rust/src/rt/rust_task.cpp

625 lines
15 KiB
C++
Raw Normal View History

2010-06-23 23:03:09 -05:00
#include "rust_internal.h"
#include "rust_cc.h"
2010-06-23 23:03:09 -05:00
#include "vg/valgrind.h"
#include "vg/memcheck.h"
2010-06-23 23:03:09 -05:00
2010-10-11 18:40:18 -05:00
#ifndef __WIN32__
#include <execinfo.h>
#endif
2011-11-14 15:52:35 -06:00
#include <iostream>
#include <cassert>
#include <cstring>
2010-10-11 18:40:18 -05:00
#include "globals.h"
// The amount of extra space at the end of each stack segment, available
// to the rt, compiler and dynamic linker for running small functions
// FIXME: We want this to be 128 but need to slim the red zone calls down
#ifdef __i386__
#define RED_ZONE_SIZE 2048
#endif
#ifdef __x86_64__
#define RED_ZONE_SIZE 2048
#endif
2011-11-14 15:52:35 -06:00
// Stack size
size_t g_custom_min_stack_size = 0;
static size_t
get_min_stk_size(size_t default_size) {
if (g_custom_min_stack_size != 0) {
return g_custom_min_stack_size;
} else {
return default_size;
}
}
2010-06-23 23:03:09 -05:00
// Task stack segments. Heap allocated and chained together.
static stk_seg*
new_stk(rust_scheduler *sched, rust_task *task, size_t minsz)
2010-06-23 23:03:09 -05:00
{
size_t min_stk_bytes = get_min_stk_size(sched->min_stack_size);
2010-06-23 23:03:09 -05:00
if (minsz < min_stk_bytes)
minsz = min_stk_bytes;
2011-11-14 15:52:35 -06:00
size_t sz = sizeof(stk_seg) + minsz + RED_ZONE_SIZE;
stk_seg *stk = (stk_seg *)task->malloc(sz, "stack");
LOGPTR(task->sched, "new stk", (uintptr_t)stk);
2010-06-23 23:03:09 -05:00
memset(stk, 0, sizeof(stk_seg));
stk->next = task->stk;
2011-11-14 15:52:35 -06:00
stk->limit = (uintptr_t) &stk->data[minsz + RED_ZONE_SIZE];
LOGPTR(task->sched, "stk limit", stk->limit);
2010-06-23 23:03:09 -05:00
stk->valgrind_id =
VALGRIND_STACK_REGISTER(&stk->data[0],
2011-11-14 15:52:35 -06:00
&stk->data[minsz + RED_ZONE_SIZE]);
task->stk = stk;
2010-06-23 23:03:09 -05:00
return stk;
}
static void
del_stk(rust_task *task, stk_seg *stk)
2010-06-23 23:03:09 -05:00
{
assert(stk == task->stk && "Freeing stack segments out of order!");
task->stk = stk->next;
2010-06-23 23:03:09 -05:00
VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
task->free(stk);
2010-06-23 23:03:09 -05:00
}
// Tasks
rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
rust_task *spawner, const char *name) :
2011-07-27 16:51:25 -05:00
ref_count(1),
stk(NULL),
2010-06-23 23:03:09 -05:00
runtime_sp(0),
sched(sched),
2010-06-23 23:03:09 -05:00
cache(NULL),
kernel(sched->kernel),
name(name),
state(state),
2010-06-23 23:03:09 -05:00
cond(NULL),
2010-08-18 01:26:43 -05:00
cond_name("none"),
supervisor(spawner),
list_index(-1),
2011-08-08 20:09:42 -05:00
next_port_id(0),
rendezvous_ptr(0),
running_on(-1),
pinned_on(-1),
local_region(&sched->srv->local_region),
failed(false),
2011-09-14 16:20:13 -05:00
killed(false),
propagate_failure(true),
dynastack(this),
cc_counter(0)
2010-06-23 23:03:09 -05:00
{
LOGPTR(sched, "new task", (uintptr_t)this);
DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
assert((void*)this == (void*)&user);
user.notify_enabled = 0;
stk = new_stk(sched, this, 0);
user.rust_sp = stk->limit;
if (supervisor) {
supervisor->ref();
}
2010-06-23 23:03:09 -05:00
}
rust_task::~rust_task()
{
I(sched, !sched->lock.lock_held_by_current_thread());
2011-11-11 13:46:07 -06:00
I(sched, port_table.is_empty());
DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
name, (uintptr_t)this, ref_count);
2010-06-23 23:03:09 -05:00
if (supervisor) {
supervisor->deref();
}
kernel->release_task_id(user.id);
2010-06-23 23:03:09 -05:00
/* FIXME: tighten this up, there are some more
assertions that hold at task-lifecycle events. */
I(sched, ref_count == 0); // ||
2011-07-23 16:01:43 -05:00
// (ref_count == 1 && this == sched->root_task));
2010-06-23 23:03:09 -05:00
// Delete all the stacks. There may be more than one if the task failed
// FIXME: This is not correct. During unwinding we need to delete
// the stacks and record the stack limit, otherwise the stack
// stack is corrupted when destructors are running.
while (stk != NULL) {
del_stk(this, stk);
}
2010-06-23 23:03:09 -05:00
}
struct spawn_args {
rust_task *task;
uintptr_t a3;
uintptr_t a4;
void (*CDECL f)(int *, uintptr_t, uintptr_t);
};
struct rust_closure_env {
intptr_t ref_count;
type_desc *td;
};
// This runs on the Rust stack
extern "C" CDECL
void task_start_wrapper(spawn_args *a)
{
rust_task *task = a->task;
int rval = 42;
bool failed = false;
try {
a->f(&rval, a->a3, a->a4);
} catch (rust_task *ex) {
A(task->sched, ex == task,
"Expected this task to be thrown for unwinding");
failed = true;
}
cc::do_cc(task);
rust_closure_env* env = (rust_closure_env*)a->a3;
if(env) {
// free the environment.
I(task->sched, 1 == env->ref_count); // the ref count better be 1
//env->td->drop_glue(NULL, task, NULL, env->td->first_param, env);
//env->td->free_glue(NULL, task, NULL, env->td->first_param, env);
task->free(env);
}
task->die();
if (task->killed && !failed) {
LOG(task, task, "Task killed during termination");
failed = true;
}
task->notify(!failed);
if (failed) {
#ifndef __WIN32__
task->conclude_failure();
#else
A(task->sched, false, "Shouldn't happen");
#endif
}
task->ctx.next->swap(task->ctx);
}
2010-06-23 23:03:09 -05:00
void
rust_task::start(uintptr_t spawnee_fn,
uintptr_t args,
uintptr_t env)
{
LOG(this, task, "starting task from fn 0x%" PRIxPTR
" with args 0x%" PRIxPTR, spawnee_fn, args);
I(sched, stk->data != NULL);
char *sp = (char *)user.rust_sp;
sp -= sizeof(spawn_args);
spawn_args *a = (spawn_args *)sp;
a->task = this;
a->a3 = env;
a->a4 = args;
void **f = (void **)&a->f;
*f = (void *)spawnee_fn;
2011-06-27 12:08:57 -05:00
ctx.call((void *)task_start_wrapper, a, sp);
2010-06-23 23:03:09 -05:00
this->start();
}
void
rust_task::start(uintptr_t spawnee_fn,
uintptr_t args)
{
start(spawnee_fn, args, 0);
}
void rust_task::start()
{
yield_timer.reset_us(0);
transition(&sched->newborn_tasks, &sched->running_tasks);
sched->lock.signal();
2010-06-23 23:03:09 -05:00
}
void
rust_task::grow(size_t n_frame_bytes)
{
// FIXME (issue #151): Just fail rather than almost certainly crashing
// mysteriously later. The commented-out logic below won't work at all in
// the presence of non-word-aligned pointers.
abort();
2010-06-23 23:03:09 -05:00
}
// Only run this on the rust stack
void
rust_task::yield(size_t time_in_us, bool *killed) {
if (this->killed) {
*killed = true;
2011-09-14 16:20:13 -05:00
}
yield_timer.reset_us(time_in_us);
2010-06-23 23:03:09 -05:00
// Return to the scheduler.
ctx.next->swap(ctx);
2011-09-14 16:20:13 -05:00
if (this->killed) {
*killed = true;
2011-09-14 16:20:13 -05:00
}
2010-06-23 23:03:09 -05:00
}
void
rust_task::kill() {
if (dead()) {
// Task is already dead, can't kill what's already dead.
fail_parent();
return;
}
2010-06-23 23:03:09 -05:00
// Note the distinction here: kill() is when you're in an upcall
// from task A and want to force-fail task B, you do B->kill().
// If you want to fail yourself you do self->fail().
LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
2011-09-14 16:20:13 -05:00
// When the task next goes to yield or resume it will fail
killed = true;
2010-06-23 23:03:09 -05:00
// Unblock the task so it can unwind.
unblock();
sched->lock.signal();
LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
2011-05-24 16:28:37 -05:00
// run_on_resume(rust_unwind_glue);
2010-06-23 23:03:09 -05:00
}
void
rust_task::fail() {
2010-06-23 23:03:09 -05:00
// See note in ::kill() regarding who should call this.
DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
2010-10-11 18:40:18 -05:00
backtrace();
#ifndef __WIN32__
throw this;
#else
die();
conclude_failure();
#endif
}
void
rust_task::conclude_failure() {
fail_parent();
failed = true;
}
void
rust_task::fail_parent() {
if (supervisor) {
DLOG(sched, task,
"task %s @0x%" PRIxPTR
" propagating failure to supervisor %s @0x%" PRIxPTR,
name, this, supervisor->name, supervisor);
supervisor->kill();
2010-06-23 23:03:09 -05:00
}
2011-05-04 10:04:53 -05:00
// FIXME: implement unwinding again.
if (NULL == supervisor && propagate_failure)
sched->fail();
2010-06-23 23:03:09 -05:00
}
void
rust_task::unsupervise()
{
DLOG(sched, task,
"task %s @0x%" PRIxPTR
" disconnecting from supervisor %s @0x%" PRIxPTR,
name, this, supervisor->name, supervisor);
if (supervisor) {
supervisor->deref();
}
supervisor = NULL;
propagate_failure = false;
}
2010-06-23 23:03:09 -05:00
frame_glue_fns*
rust_task::get_frame_glue_fns(uintptr_t fp) {
fp -= sizeof(uintptr_t);
return *((frame_glue_fns**) fp);
}
bool
rust_task::running()
{
return state == &sched->running_tasks;
2010-06-23 23:03:09 -05:00
}
bool
rust_task::blocked()
{
return state == &sched->blocked_tasks;
2010-06-23 23:03:09 -05:00
}
bool
rust_task::blocked_on(rust_cond *on)
{
return blocked() && cond == on;
}
bool
rust_task::dead()
{
return state == &sched->dead_tasks;
2010-06-23 23:03:09 -05:00
}
void *
rust_task::malloc(size_t sz, const char *tag, type_desc *td)
{
return local_region.malloc(sz, tag);
}
void *
rust_task::realloc(void *data, size_t sz, bool is_gc)
{
return local_region.realloc(data, sz);
}
void
rust_task::free(void *p, bool is_gc)
{
local_region.free(p);
}
2010-06-23 23:03:09 -05:00
void
rust_task::transition(rust_task_list *src, rust_task_list *dst) {
bool unlock = false;
if(!sched->lock.lock_held_by_current_thread()) {
unlock = true;
sched->lock.lock();
}
DLOG(sched, task,
"task %s " PTR " state change '%s' -> '%s' while in '%s'",
name, (uintptr_t)this, src->name, dst->name, state->name);
I(sched, state == src);
src->remove(this);
dst->append(this);
2010-06-23 23:03:09 -05:00
state = dst;
if(unlock)
sched->lock.unlock();
2010-06-23 23:03:09 -05:00
}
void
rust_task::block(rust_cond *on, const char* name) {
I(sched, !lock.lock_held_by_current_thread());
scoped_lock with(lock);
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
(uintptr_t) on, (uintptr_t) cond);
A(sched, cond == NULL, "Cannot block an already blocked task.");
A(sched, on != NULL, "Cannot block on a NULL object.");
transition(&sched->running_tasks, &sched->blocked_tasks);
2010-06-23 23:03:09 -05:00
cond = on;
2010-08-18 01:26:43 -05:00
cond_name = name;
2010-06-23 23:03:09 -05:00
}
void
rust_task::wakeup(rust_cond *from) {
I(sched, !lock.lock_held_by_current_thread());
scoped_lock with(lock);
A(sched, cond != NULL, "Cannot wake up unblocked task.");
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
(uintptr_t) cond, (uintptr_t) from);
A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
transition(&sched->blocked_tasks, &sched->running_tasks);
I(sched, cond == from);
cond = NULL;
2010-08-18 01:26:43 -05:00
cond_name = "none";
sched->lock.signal();
2010-06-23 23:03:09 -05:00
}
void
rust_task::die() {
I(sched, !lock.lock_held_by_current_thread());
scoped_lock with(lock);
transition(&sched->running_tasks, &sched->dead_tasks);
sched->lock.signal();
2010-06-23 23:03:09 -05:00
}
void
rust_task::unblock() {
if (blocked()) {
// FIXME: What if another thread unblocks the task between when
// we checked and here?
2010-06-23 23:03:09 -05:00
wakeup(cond);
}
2010-06-23 23:03:09 -05:00
}
rust_crate_cache *
rust_task::get_crate_cache()
2010-06-23 23:03:09 -05:00
{
if (!cache) {
DLOG(sched, task, "fetching cache for current crate");
cache = sched->get_cache();
2010-06-23 23:03:09 -05:00
}
return cache;
}
2010-10-11 18:40:18 -05:00
void
rust_task::backtrace() {
if (!log_rt_backtrace) return;
2010-10-11 18:40:18 -05:00
#ifndef __WIN32__
void *call_stack[256];
int nframes = ::backtrace(call_stack, 256);
backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
#endif
}
bool rust_task::can_schedule(int id)
{
return yield_timer.has_timed_out() &&
running_on == -1 &&
(pinned_on == -1 || pinned_on == id);
}
void *
rust_task::calloc(size_t size, const char *tag) {
return local_region.calloc(size, tag);
}
void rust_task::pin() {
I(this->sched, running_on != -1);
pinned_on = running_on;
}
void rust_task::pin(int id) {
I(this->sched, running_on == -1);
pinned_on = id;
}
void rust_task::unpin() {
pinned_on = -1;
}
2011-08-08 20:09:42 -05:00
rust_port_id rust_task::register_port(rust_port *port) {
I(sched, !lock.lock_held_by_current_thread());
2011-08-08 20:09:42 -05:00
scoped_lock with(lock);
rust_port_id id = next_port_id++;
port_table.put(id, port);
return id;
}
void rust_task::release_port(rust_port_id id) {
I(sched, lock.lock_held_by_current_thread());
2011-08-08 20:09:42 -05:00
port_table.remove(id);
}
rust_port *rust_task::get_port_by_id(rust_port_id id) {
I(sched, !lock.lock_held_by_current_thread());
2011-08-08 20:09:42 -05:00
scoped_lock with(lock);
rust_port *port = NULL;
port_table.get(id, &port);
2011-11-11 13:46:07 -06:00
if (port) {
port->ref();
}
2011-08-08 20:09:42 -05:00
return port;
}
// Temporary routine to allow boxes on one task's shared heap to be reparented
// to another.
const type_desc *
rust_task::release_alloc(void *alloc) {
I(sched, !lock.lock_held_by_current_thread());
lock.lock();
assert(local_allocs.find(alloc) != local_allocs.end());
const type_desc *tydesc = local_allocs[alloc];
local_allocs.erase(alloc);
local_region.release_alloc(alloc);
lock.unlock();
return tydesc;
}
// Temporary routine to allow boxes from one task's shared heap to be
// reparented to this one.
void
rust_task::claim_alloc(void *alloc, const type_desc *tydesc) {
I(sched, !lock.lock_held_by_current_thread());
lock.lock();
assert(local_allocs.find(alloc) == local_allocs.end());
local_allocs[alloc] = tydesc;
local_region.claim_alloc(alloc);
lock.unlock();
}
void
rust_task::notify(bool success) {
// FIXME (1078) Do this in rust code
if(user.notify_enabled) {
rust_task *target_task = kernel->get_task_by_id(user.notify_chan.task);
if (target_task) {
rust_port *target_port =
target_task->get_port_by_id(user.notify_chan.port);
if(target_port) {
task_notification msg;
msg.id = user.id;
msg.result = !success ? tr_failure : tr_success;
target_port->send(&msg);
scoped_lock with(target_task->lock);
target_port->deref();
}
target_task->deref();
}
}
}
2011-11-30 19:54:11 -06:00
extern "C" CDECL void
record_sp(void *limit);
void *
rust_task::new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
stk_seg *stk_seg = new_stk(sched, this, stk_sz + args_sz);
uint8_t *new_sp = (uint8_t*)stk_seg->limit;
size_t sizeof_retaddr = sizeof(void*);
// Make enough room on the new stack to hold the old stack pointer
// in addition to the function arguments
new_sp = align_down(new_sp - (args_sz + sizeof_retaddr));
new_sp += sizeof_retaddr;
memcpy(new_sp, args_addr, args_sz);
record_stack_limit();
2011-11-30 19:54:11 -06:00
return new_sp;
}
void
rust_task::del_stack() {
del_stk(this, stk);
record_stack_limit();
2011-11-30 19:54:11 -06:00
}
void
rust_task::record_stack_limit() {
// The function prolog compares the amount of stack needed to the end of
// the stack. As an optimization, when the frame size is less than 256
// bytes, it will simply compare %esp to to the stack limit instead of
// subtracting the frame size. As a result we need our stack limit to
// account for those 256 bytes.
const unsigned LIMIT_OFFSET = 256;
A(sched,
(uintptr_t)stk->limit - RED_ZONE_SIZE
- (uintptr_t)stk->data >= LIMIT_OFFSET,
"Stack size must be greater than LIMIT_OFFSET");
record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
}
2010-06-23 23:03:09 -05:00
//
// Local Variables:
// mode: C++
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//