2010-06-23 23:03:09 -05:00
|
|
|
|
|
|
|
#include "rust_internal.h"
|
2011-09-23 20:30:22 -05:00
|
|
|
#include "rust_cc.h"
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-12-06 01:11:11 -06:00
|
|
|
#include "vg/valgrind.h"
|
|
|
|
#include "vg/memcheck.h"
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
#include <execinfo.h>
|
|
|
|
#endif
|
2011-11-14 15:52:35 -06:00
|
|
|
#include <iostream>
|
2011-10-31 15:31:04 -05:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstring>
|
2011-12-14 17:36:31 -06:00
|
|
|
#include <algorithm>
|
2010-10-11 18:40:18 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
#include "globals.h"
|
|
|
|
|
2011-12-01 17:50:00 -06:00
|
|
|
// The amount of extra space at the end of each stack segment, available
|
|
|
|
// to the rt, compiler and dynamic linker for running small functions
|
|
|
|
// FIXME: We want this to be 128 but need to slim the red zone calls down
|
2011-12-18 16:40:15 -06:00
|
|
|
#define RZ_LINUX_32 1024*20
|
|
|
|
#define RZ_LINUX_64 1024*20
|
|
|
|
#define RZ_MAC_32 1024*20
|
|
|
|
#define RZ_MAC_64 1024*20
|
|
|
|
#define RZ_WIN_32 1024*20
|
|
|
|
|
|
|
|
#ifdef __linux__
|
2011-12-01 17:50:00 -06:00
|
|
|
#ifdef __i386__
|
2011-12-18 16:40:15 -06:00
|
|
|
#define RED_ZONE_SIZE RZ_LINUX_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_LINUX_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef __APPLE__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_MAC_32
|
2011-12-01 17:50:00 -06:00
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
2011-12-18 16:40:15 -06:00
|
|
|
#define RED_ZONE_SIZE RZ_MAC_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef __WIN32__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_WIN_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_WIN_64
|
|
|
|
#endif
|
2011-12-01 17:50:00 -06:00
|
|
|
#endif
|
2011-11-14 15:52:35 -06:00
|
|
|
|
2011-07-27 16:34:39 -05:00
|
|
|
// Stack size
|
|
|
|
size_t g_custom_min_stack_size = 0;
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
get_min_stk_size(size_t default_size) {
|
|
|
|
if (g_custom_min_stack_size != 0) {
|
|
|
|
return g_custom_min_stack_size;
|
|
|
|
} else {
|
|
|
|
return default_size;
|
2011-07-08 13:34:42 -05:00
|
|
|
}
|
|
|
|
}
|
2011-06-15 20:16:17 -05:00
|
|
|
|
2011-12-14 17:36:31 -06:00
|
|
|
static size_t
|
|
|
|
get_next_stk_size(rust_scheduler *sched, rust_task *task,
|
|
|
|
size_t min, size_t current, size_t requested) {
|
|
|
|
LOG(task, mem, "calculating new stack size for 0x%" PRIxPTR, task);
|
|
|
|
LOG(task, mem,
|
|
|
|
"min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
|
|
|
|
min, current, requested);
|
|
|
|
|
|
|
|
// Allocate at least enough to accomodate the next frame
|
|
|
|
size_t sz = std::max(min, requested);
|
|
|
|
|
|
|
|
// And double the stack size each allocation
|
|
|
|
const size_t max = 1024 * 1024;
|
|
|
|
size_t next = std::min(max, current * 2);
|
|
|
|
|
|
|
|
sz = std::max(sz, next);
|
|
|
|
|
|
|
|
LOG(task, mem, "next stack size: %" PRIdPTR, sz);
|
2011-12-14 20:57:52 -06:00
|
|
|
I(sched, requested <= sz);
|
2011-12-14 17:36:31 -06:00
|
|
|
return sz;
|
|
|
|
}
|
2011-07-27 16:34:39 -05:00
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
// Task stack segments. Heap allocated and chained together.
|
|
|
|
|
2011-12-14 20:57:52 -06:00
|
|
|
static void
|
|
|
|
config_valgrind_stack(stk_seg *stk) {
|
|
|
|
stk->valgrind_id =
|
|
|
|
VALGRIND_STACK_REGISTER(&stk->data[0],
|
|
|
|
stk->end);
|
|
|
|
#ifndef NVALGRIND
|
|
|
|
// Establish that the stack is accessible. This must be done when reusing
|
|
|
|
// old stack segments, since the act of popping the stack previously
|
|
|
|
// caused valgrind to consider the whole thing inaccessible.
|
|
|
|
size_t sz = stk->end - (uintptr_t)&stk->data[0];
|
|
|
|
VALGRIND_MAKE_MEM_UNDEFINED(stk->data, sz);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
unconfig_valgrind_stack(stk_seg *stk) {
|
|
|
|
VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
free_stk(rust_task *task, stk_seg *stk) {
|
|
|
|
LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
|
|
|
|
task->free(stk);
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
static stk_seg*
|
2011-12-14 17:36:31 -06:00
|
|
|
new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2011-12-14 20:57:52 -06:00
|
|
|
LOG(task, mem, "creating new stack for task %" PRIxPTR, task);
|
|
|
|
|
2011-12-14 17:36:31 -06:00
|
|
|
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
|
|
|
|
size_t min_sz = get_min_stk_size(sched->min_stack_size);
|
2011-12-14 20:57:52 -06:00
|
|
|
|
|
|
|
// Try to reuse an existing stack segment
|
|
|
|
if (task->stk != NULL && task->stk->prev != NULL) {
|
|
|
|
size_t prev_sz = (size_t)(task->stk->prev->end
|
|
|
|
- (uintptr_t)&task->stk->prev->data[0]
|
|
|
|
- RED_ZONE_SIZE);
|
2011-12-20 00:47:28 -06:00
|
|
|
if (min_sz <= prev_sz && requested_sz <= prev_sz) {
|
2011-12-14 20:57:52 -06:00
|
|
|
LOG(task, mem, "reusing existing stack");
|
|
|
|
task->stk = task->stk->prev;
|
|
|
|
A(sched, task->stk->prev == NULL, "Bogus stack ptr");
|
|
|
|
config_valgrind_stack(task->stk);
|
|
|
|
return task->stk;
|
|
|
|
} else {
|
|
|
|
LOG(task, mem, "existing stack is not big enough");
|
|
|
|
free_stk(task, task->stk->prev);
|
|
|
|
task->stk->prev = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-14 17:36:31 -06:00
|
|
|
// The size of the current stack segment, excluding red zone
|
|
|
|
size_t current_sz = 0;
|
|
|
|
if (task->stk != NULL) {
|
|
|
|
current_sz = (size_t)(task->stk->end
|
|
|
|
- (uintptr_t)&task->stk->data[0]
|
|
|
|
- RED_ZONE_SIZE);
|
|
|
|
}
|
|
|
|
// The calculated size of the new stack, excluding red zone
|
|
|
|
size_t rust_stk_sz = get_next_stk_size(sched, task, min_sz,
|
|
|
|
current_sz, requested_sz);
|
|
|
|
|
|
|
|
size_t sz = sizeof(stk_seg) + rust_stk_sz + RED_ZONE_SIZE;
|
2011-07-18 14:02:26 -05:00
|
|
|
stk_seg *stk = (stk_seg *)task->malloc(sz, "stack");
|
2011-06-28 14:15:41 -05:00
|
|
|
LOGPTR(task->sched, "new stk", (uintptr_t)stk);
|
2010-06-23 23:03:09 -05:00
|
|
|
memset(stk, 0, sizeof(stk_seg));
|
2011-12-14 20:57:52 -06:00
|
|
|
stk->prev = NULL;
|
2011-10-31 15:31:04 -05:00
|
|
|
stk->next = task->stk;
|
2011-12-14 17:36:31 -06:00
|
|
|
stk->end = (uintptr_t) &stk->data[rust_stk_sz + RED_ZONE_SIZE];
|
2011-12-06 19:03:54 -06:00
|
|
|
LOGPTR(task->sched, "stk end", stk->end);
|
2011-12-14 20:57:52 -06:00
|
|
|
|
2011-10-31 15:31:04 -05:00
|
|
|
task->stk = stk;
|
2011-12-14 20:57:52 -06:00
|
|
|
config_valgrind_stack(task->stk);
|
2010-06-23 23:03:09 -05:00
|
|
|
return stk;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2011-06-27 21:15:03 -05:00
|
|
|
del_stk(rust_task *task, stk_seg *stk)
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2011-10-31 15:31:04 -05:00
|
|
|
assert(stk == task->stk && "Freeing stack segments out of order!");
|
|
|
|
|
|
|
|
task->stk = stk->next;
|
|
|
|
|
2011-12-14 20:57:52 -06:00
|
|
|
bool delete_stack = false;
|
|
|
|
if (task->stk != NULL) {
|
|
|
|
// Don't actually delete this stack. Save it to reuse later,
|
|
|
|
// preventing the pathological case where we repeatedly reallocate
|
|
|
|
// the stack for the next frame.
|
|
|
|
task->stk->prev = stk;
|
|
|
|
} else {
|
|
|
|
// This is the last stack, delete it.
|
|
|
|
delete_stack = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the previous previous stack
|
|
|
|
if (stk->prev != NULL) {
|
|
|
|
free_stk(task, stk->prev);
|
|
|
|
stk->prev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
unconfig_valgrind_stack(stk);
|
|
|
|
if (delete_stack) {
|
|
|
|
free_stk(task, stk);
|
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tasks
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task *spawner, const char *name) :
|
2011-07-27 16:51:25 -05:00
|
|
|
ref_count(1),
|
2011-06-27 21:15:03 -05:00
|
|
|
stk(NULL),
|
2010-06-23 23:03:09 -05:00
|
|
|
runtime_sp(0),
|
2011-06-28 14:15:41 -05:00
|
|
|
sched(sched),
|
2010-06-23 23:03:09 -05:00
|
|
|
cache(NULL),
|
2011-06-28 14:15:41 -05:00
|
|
|
kernel(sched->kernel),
|
2010-08-08 21:24:35 -05:00
|
|
|
name(name),
|
2010-09-10 03:21:29 -05:00
|
|
|
state(state),
|
2010-06-23 23:03:09 -05:00
|
|
|
cond(NULL),
|
2010-08-18 01:26:43 -05:00
|
|
|
cond_name("none"),
|
2010-07-05 16:43:40 -05:00
|
|
|
supervisor(spawner),
|
2010-09-10 03:21:29 -05:00
|
|
|
list_index(-1),
|
2011-08-08 20:09:42 -05:00
|
|
|
next_port_id(0),
|
2010-07-19 16:05:18 -05:00
|
|
|
rendezvous_ptr(0),
|
2011-06-29 20:47:47 -05:00
|
|
|
running_on(-1),
|
|
|
|
pinned_on(-1),
|
2011-06-28 14:15:41 -05:00
|
|
|
local_region(&sched->srv->local_region),
|
2011-07-23 21:03:02 -05:00
|
|
|
failed(false),
|
2011-09-14 16:20:13 -05:00
|
|
|
killed(false),
|
2011-08-16 21:48:47 -05:00
|
|
|
propagate_failure(true),
|
2011-09-23 20:30:22 -05:00
|
|
|
dynastack(this),
|
|
|
|
cc_counter(0)
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2011-06-28 14:15:41 -05:00
|
|
|
LOGPTR(sched, "new task", (uintptr_t)this);
|
|
|
|
DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-08-16 18:39:47 -05:00
|
|
|
assert((void*)this == (void*)&user);
|
|
|
|
|
|
|
|
user.notify_enabled = 0;
|
|
|
|
|
2011-07-27 16:34:39 -05:00
|
|
|
stk = new_stk(sched, this, 0);
|
2011-12-06 19:03:54 -06:00
|
|
|
user.rust_sp = stk->end;
|
2011-09-06 20:16:39 -05:00
|
|
|
if (supervisor) {
|
|
|
|
supervisor->ref();
|
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
rust_task::~rust_task()
|
|
|
|
{
|
2011-08-20 18:05:18 -05:00
|
|
|
I(sched, !sched->lock.lock_held_by_current_thread());
|
2011-11-11 13:46:07 -06:00
|
|
|
I(sched, port_table.is_empty());
|
2011-06-28 14:15:41 -05:00
|
|
|
DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
2011-04-19 05:21:57 -05:00
|
|
|
name, (uintptr_t)this, ref_count);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-09-06 20:16:39 -05:00
|
|
|
if (supervisor) {
|
|
|
|
supervisor->deref();
|
|
|
|
}
|
|
|
|
|
2011-08-17 16:42:28 -05:00
|
|
|
kernel->release_task_id(user.id);
|
2011-08-08 15:38:20 -05:00
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
/* FIXME: tighten this up, there are some more
|
|
|
|
assertions that hold at task-lifecycle events. */
|
2011-07-25 20:00:37 -05:00
|
|
|
I(sched, ref_count == 0); // ||
|
2011-07-23 16:01:43 -05:00
|
|
|
// (ref_count == 1 && this == sched->root_task));
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-11-30 00:22:49 -06:00
|
|
|
// Delete all the stacks. There may be more than one if the task failed
|
2011-12-18 04:09:41 -06:00
|
|
|
// and no landing pads stopped to clean up.
|
2011-11-30 00:22:49 -06:00
|
|
|
while (stk != NULL) {
|
|
|
|
del_stk(this, stk);
|
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
struct spawn_args {
|
|
|
|
rust_task *task;
|
|
|
|
uintptr_t a3;
|
|
|
|
uintptr_t a4;
|
2011-10-20 04:56:45 -05:00
|
|
|
void (*CDECL f)(int *, uintptr_t, uintptr_t);
|
2011-05-31 19:44:54 -05:00
|
|
|
};
|
|
|
|
|
2011-09-06 16:03:20 -05:00
|
|
|
struct rust_closure_env {
|
2011-08-13 17:20:11 -05:00
|
|
|
intptr_t ref_count;
|
|
|
|
type_desc *td;
|
|
|
|
};
|
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
struct cleanup_args {
|
|
|
|
spawn_args *spargs;
|
|
|
|
bool failed;
|
|
|
|
};
|
2011-08-10 20:48:57 -05:00
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
void
|
|
|
|
cleanup_task(cleanup_args *args) {
|
|
|
|
spawn_args *a = args->spargs;
|
|
|
|
bool failed = args->failed;
|
|
|
|
rust_task *task = a->task;
|
2011-09-06 20:31:41 -05:00
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
cc::do_cc(task);
|
2011-09-26 17:06:26 -05:00
|
|
|
|
2011-09-06 20:31:41 -05:00
|
|
|
rust_closure_env* env = (rust_closure_env*)a->a3;
|
|
|
|
if(env) {
|
|
|
|
// free the environment.
|
|
|
|
I(task->sched, 1 == env->ref_count); // the ref count better be 1
|
|
|
|
//env->td->drop_glue(NULL, task, NULL, env->td->first_param, env);
|
|
|
|
//env->td->free_glue(NULL, task, NULL, env->td->first_param, env);
|
|
|
|
task->free(env);
|
|
|
|
}
|
|
|
|
|
2011-11-11 16:19:15 -06:00
|
|
|
task->die();
|
|
|
|
|
|
|
|
if (task->killed && !failed) {
|
|
|
|
LOG(task, task, "Task killed during termination");
|
|
|
|
failed = true;
|
|
|
|
}
|
|
|
|
|
2011-11-13 18:36:47 -06:00
|
|
|
task->notify(!failed);
|
|
|
|
|
2011-09-06 20:31:41 -05:00
|
|
|
if (failed) {
|
|
|
|
#ifndef __WIN32__
|
|
|
|
task->conclude_failure();
|
|
|
|
#else
|
|
|
|
A(task->sched, false, "Shouldn't happen");
|
|
|
|
#endif
|
|
|
|
}
|
2011-12-20 12:29:40 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// This runs on the Rust stack
|
|
|
|
extern "C" CDECL
|
|
|
|
void task_start_wrapper(spawn_args *a)
|
|
|
|
{
|
|
|
|
rust_task *task = a->task;
|
|
|
|
int rval = 42;
|
|
|
|
|
|
|
|
bool failed = false;
|
|
|
|
try {
|
|
|
|
a->f(&rval, a->a3, a->a4);
|
|
|
|
} catch (rust_task *ex) {
|
|
|
|
A(task->sched, ex == task,
|
|
|
|
"Expected this task to be thrown for unwinding");
|
|
|
|
failed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup_args ca = {a, failed};
|
|
|
|
|
|
|
|
// The cleanup work needs lots of stack
|
|
|
|
task->sched->c_context.call_shim_on_c_stack(&ca, (void*)cleanup_task);
|
|
|
|
|
2011-11-18 15:33:08 -06:00
|
|
|
task->ctx.next->swap(task->ctx);
|
2011-09-06 16:03:20 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2011-05-24 14:51:22 -05:00
|
|
|
rust_task::start(uintptr_t spawnee_fn,
|
2011-09-06 16:03:20 -05:00
|
|
|
uintptr_t args,
|
|
|
|
uintptr_t env)
|
2011-05-03 09:19:28 -05:00
|
|
|
{
|
2011-09-06 16:03:20 -05:00
|
|
|
LOG(this, task, "starting task from fn 0x%" PRIxPTR
|
|
|
|
" with args 0x%" PRIxPTR, spawnee_fn, args);
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
I(sched, stk->data != NULL);
|
2011-07-13 17:44:09 -05:00
|
|
|
|
2011-08-17 16:42:28 -05:00
|
|
|
char *sp = (char *)user.rust_sp;
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
sp -= sizeof(spawn_args);
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
spawn_args *a = (spawn_args *)sp;
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
a->task = this;
|
2011-09-06 16:03:20 -05:00
|
|
|
a->a3 = env;
|
2011-05-31 19:44:54 -05:00
|
|
|
a->a4 = args;
|
|
|
|
void **f = (void **)&a->f;
|
|
|
|
*f = (void *)spawnee_fn;
|
2011-06-27 12:08:57 -05:00
|
|
|
|
2011-09-06 16:03:20 -05:00
|
|
|
ctx.call((void *)task_start_wrapper, a, sp);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-08-10 20:48:57 -05:00
|
|
|
this->start();
|
|
|
|
}
|
|
|
|
|
2011-09-06 16:03:20 -05:00
|
|
|
void
|
|
|
|
rust_task::start(uintptr_t spawnee_fn,
|
|
|
|
uintptr_t args)
|
|
|
|
{
|
|
|
|
start(spawnee_fn, args, 0);
|
|
|
|
}
|
|
|
|
|
2011-08-10 20:48:57 -05:00
|
|
|
void rust_task::start()
|
|
|
|
{
|
2011-07-13 14:25:36 -05:00
|
|
|
yield_timer.reset_us(0);
|
2011-06-28 14:15:41 -05:00
|
|
|
transition(&sched->newborn_tasks, &sched->running_tasks);
|
2011-07-25 20:00:37 -05:00
|
|
|
sched->lock.signal();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::grow(size_t n_frame_bytes)
|
|
|
|
{
|
2010-08-20 13:05:06 -05:00
|
|
|
// FIXME (issue #151): Just fail rather than almost certainly crashing
|
|
|
|
// mysteriously later. The commented-out logic below won't work at all in
|
|
|
|
// the presence of non-word-aligned pointers.
|
|
|
|
abort();
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2011-11-17 17:26:12 -06:00
|
|
|
// Only run this on the rust stack
|
2010-08-11 23:23:34 -05:00
|
|
|
void
|
2011-11-18 17:36:48 -06:00
|
|
|
rust_task::yield(size_t time_in_us, bool *killed) {
|
|
|
|
if (this->killed) {
|
|
|
|
*killed = true;
|
2011-09-14 16:20:13 -05:00
|
|
|
}
|
2011-11-17 19:33:54 -06:00
|
|
|
|
2011-07-13 14:25:36 -05:00
|
|
|
yield_timer.reset_us(time_in_us);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
// Return to the scheduler.
|
2011-09-06 16:03:20 -05:00
|
|
|
ctx.next->swap(ctx);
|
2011-09-14 16:20:13 -05:00
|
|
|
|
2011-11-18 17:36:48 -06:00
|
|
|
if (this->killed) {
|
|
|
|
*killed = true;
|
2011-09-14 16:20:13 -05:00
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::kill() {
|
2010-08-09 10:15:34 -05:00
|
|
|
if (dead()) {
|
|
|
|
// Task is already dead, can't kill what's already dead.
|
2011-09-14 17:26:59 -05:00
|
|
|
fail_parent();
|
2010-08-09 10:15:34 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
// Note the distinction here: kill() is when you're in an upcall
|
|
|
|
// from task A and want to force-fail task B, you do B->kill().
|
2011-07-13 15:43:35 -05:00
|
|
|
// If you want to fail yourself you do self->fail().
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
|
2011-09-14 16:20:13 -05:00
|
|
|
// When the task next goes to yield or resume it will fail
|
|
|
|
killed = true;
|
2010-06-23 23:03:09 -05:00
|
|
|
// Unblock the task so it can unwind.
|
|
|
|
unblock();
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-07-25 20:00:37 -05:00
|
|
|
sched->lock.signal();
|
|
|
|
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
|
2011-05-24 16:28:37 -05:00
|
|
|
// run_on_resume(rust_unwind_glue);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2011-07-13 15:43:35 -05:00
|
|
|
rust_task::fail() {
|
2010-06-23 23:03:09 -05:00
|
|
|
// See note in ::kill() regarding who should call this.
|
2011-06-28 14:15:41 -05:00
|
|
|
DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
|
2010-10-11 18:40:18 -05:00
|
|
|
backtrace();
|
2011-09-06 20:31:41 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
throw this;
|
|
|
|
#else
|
2011-11-11 16:19:15 -06:00
|
|
|
die();
|
2011-09-06 20:31:41 -05:00
|
|
|
conclude_failure();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::conclude_failure() {
|
2011-09-14 17:26:59 -05:00
|
|
|
fail_parent();
|
|
|
|
failed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::fail_parent() {
|
2010-07-05 16:43:40 -05:00
|
|
|
if (supervisor) {
|
2011-06-28 14:15:41 -05:00
|
|
|
DLOG(sched, task,
|
2011-04-19 05:21:57 -05:00
|
|
|
"task %s @0x%" PRIxPTR
|
|
|
|
" propagating failure to supervisor %s @0x%" PRIxPTR,
|
|
|
|
name, this, supervisor->name, supervisor);
|
2010-07-05 16:43:40 -05:00
|
|
|
supervisor->kill();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
2011-05-04 10:04:53 -05:00
|
|
|
// FIXME: implement unwinding again.
|
2011-07-23 21:03:02 -05:00
|
|
|
if (NULL == supervisor && propagate_failure)
|
|
|
|
sched->fail();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-07-05 16:43:40 -05:00
|
|
|
void
|
|
|
|
rust_task::unsupervise()
|
|
|
|
{
|
2011-06-28 14:15:41 -05:00
|
|
|
DLOG(sched, task,
|
2010-08-08 21:24:35 -05:00
|
|
|
"task %s @0x%" PRIxPTR
|
|
|
|
" disconnecting from supervisor %s @0x%" PRIxPTR,
|
|
|
|
name, this, supervisor->name, supervisor);
|
2011-09-06 20:16:39 -05:00
|
|
|
if (supervisor) {
|
|
|
|
supervisor->deref();
|
|
|
|
}
|
2010-07-05 16:43:40 -05:00
|
|
|
supervisor = NULL;
|
2011-07-23 21:03:02 -05:00
|
|
|
propagate_failure = false;
|
2010-07-05 16:43:40 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
frame_glue_fns*
|
|
|
|
rust_task::get_frame_glue_fns(uintptr_t fp) {
|
|
|
|
fp -= sizeof(uintptr_t);
|
|
|
|
return *((frame_glue_fns**) fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::running()
|
|
|
|
{
|
2011-06-28 14:15:41 -05:00
|
|
|
return state == &sched->running_tasks;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::blocked()
|
|
|
|
{
|
2011-06-28 14:15:41 -05:00
|
|
|
return state == &sched->blocked_tasks;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::blocked_on(rust_cond *on)
|
|
|
|
{
|
|
|
|
return blocked() && cond == on;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::dead()
|
|
|
|
{
|
2011-06-28 14:15:41 -05:00
|
|
|
return state == &sched->dead_tasks;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-06-28 20:53:16 -05:00
|
|
|
void *
|
2011-07-18 14:02:26 -05:00
|
|
|
rust_task::malloc(size_t sz, const char *tag, type_desc *td)
|
2010-06-28 20:53:16 -05:00
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
return local_region.malloc(sz, tag);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
rust_task::realloc(void *data, size_t sz, bool is_gc)
|
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
return local_region.realloc(data, sz);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::free(void *p, bool is_gc)
|
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
local_region.free(p);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::transition(rust_task_list *src, rust_task_list *dst) {
|
2011-08-16 18:39:47 -05:00
|
|
|
bool unlock = false;
|
|
|
|
if(!sched->lock.lock_held_by_current_thread()) {
|
|
|
|
unlock = true;
|
|
|
|
sched->lock.lock();
|
|
|
|
}
|
2011-06-28 14:15:41 -05:00
|
|
|
DLOG(sched, task,
|
2011-05-31 19:44:54 -05:00
|
|
|
"task %s " PTR " state change '%s' -> '%s' while in '%s'",
|
|
|
|
name, (uintptr_t)this, src->name, dst->name, state->name);
|
2011-06-28 14:15:41 -05:00
|
|
|
I(sched, state == src);
|
2010-09-10 03:21:29 -05:00
|
|
|
src->remove(this);
|
|
|
|
dst->append(this);
|
2010-06-23 23:03:09 -05:00
|
|
|
state = dst;
|
2011-08-16 18:39:47 -05:00
|
|
|
if(unlock)
|
|
|
|
sched->lock.unlock();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-07 20:39:07 -05:00
|
|
|
rust_task::block(rust_cond *on, const char* name) {
|
2011-11-11 14:06:18 -06:00
|
|
|
I(sched, !lock.lock_held_by_current_thread());
|
2011-07-07 13:53:08 -05:00
|
|
|
scoped_lock with(lock);
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
|
2010-07-28 16:45:44 -05:00
|
|
|
(uintptr_t) on, (uintptr_t) cond);
|
2011-06-28 14:15:41 -05:00
|
|
|
A(sched, cond == NULL, "Cannot block an already blocked task.");
|
|
|
|
A(sched, on != NULL, "Cannot block on a NULL object.");
|
2010-07-28 16:45:44 -05:00
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
transition(&sched->running_tasks, &sched->blocked_tasks);
|
2010-06-23 23:03:09 -05:00
|
|
|
cond = on;
|
2010-08-18 01:26:43 -05:00
|
|
|
cond_name = name;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::wakeup(rust_cond *from) {
|
2011-11-11 14:06:18 -06:00
|
|
|
I(sched, !lock.lock_held_by_current_thread());
|
2011-07-07 13:53:08 -05:00
|
|
|
scoped_lock with(lock);
|
2011-06-28 14:15:41 -05:00
|
|
|
A(sched, cond != NULL, "Cannot wake up unblocked task.");
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
|
2010-07-28 16:45:44 -05:00
|
|
|
(uintptr_t) cond, (uintptr_t) from);
|
2011-06-28 14:15:41 -05:00
|
|
|
A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
|
2010-07-28 16:45:44 -05:00
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
transition(&sched->blocked_tasks, &sched->running_tasks);
|
|
|
|
I(sched, cond == from);
|
2010-07-28 16:45:44 -05:00
|
|
|
cond = NULL;
|
2010-08-18 01:26:43 -05:00
|
|
|
cond_name = "none";
|
2011-07-06 13:10:40 -05:00
|
|
|
|
2011-07-25 20:00:37 -05:00
|
|
|
sched->lock.signal();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::die() {
|
2011-11-11 14:06:18 -06:00
|
|
|
I(sched, !lock.lock_held_by_current_thread());
|
2011-07-07 13:53:08 -05:00
|
|
|
scoped_lock with(lock);
|
2011-06-28 14:15:41 -05:00
|
|
|
transition(&sched->running_tasks, &sched->dead_tasks);
|
2011-07-25 20:00:37 -05:00
|
|
|
sched->lock.signal();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::unblock() {
|
2011-11-17 19:33:54 -06:00
|
|
|
if (blocked()) {
|
|
|
|
// FIXME: What if another thread unblocks the task between when
|
|
|
|
// we checked and here?
|
2010-06-23 23:03:09 -05:00
|
|
|
wakeup(cond);
|
2011-11-17 19:33:54 -06:00
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
rust_crate_cache *
|
2011-05-26 20:20:48 -05:00
|
|
|
rust_task::get_crate_cache()
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
|
|
|
if (!cache) {
|
2011-06-28 14:15:41 -05:00
|
|
|
DLOG(sched, task, "fetching cache for current crate");
|
|
|
|
cache = sched->get_cache();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
return cache;
|
|
|
|
}
|
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
void
|
|
|
|
rust_task::backtrace() {
|
2011-04-19 05:21:57 -05:00
|
|
|
if (!log_rt_backtrace) return;
|
2010-10-11 18:40:18 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
void *call_stack[256];
|
|
|
|
int nframes = ::backtrace(call_stack, 256);
|
|
|
|
backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-06-29 20:47:47 -05:00
|
|
|
bool rust_task::can_schedule(int id)
|
2011-06-20 19:19:50 -05:00
|
|
|
{
|
2011-07-13 17:44:09 -05:00
|
|
|
return yield_timer.has_timed_out() &&
|
2011-06-29 20:47:47 -05:00
|
|
|
running_on == -1 &&
|
|
|
|
(pinned_on == -1 || pinned_on == id);
|
2011-06-20 19:19:50 -05:00
|
|
|
}
|
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
void *
|
2011-07-18 14:02:26 -05:00
|
|
|
rust_task::calloc(size_t size, const char *tag) {
|
|
|
|
return local_region.calloc(size, tag);
|
2011-06-27 21:15:03 -05:00
|
|
|
}
|
|
|
|
|
2011-06-29 20:47:47 -05:00
|
|
|
void rust_task::pin() {
|
2011-06-29 20:56:34 -05:00
|
|
|
I(this->sched, running_on != -1);
|
2011-06-29 20:47:47 -05:00
|
|
|
pinned_on = running_on;
|
|
|
|
}
|
|
|
|
|
2011-06-29 20:56:34 -05:00
|
|
|
void rust_task::pin(int id) {
|
|
|
|
I(this->sched, running_on == -1);
|
|
|
|
pinned_on = id;
|
|
|
|
}
|
|
|
|
|
2011-06-29 20:47:47 -05:00
|
|
|
void rust_task::unpin() {
|
|
|
|
pinned_on = -1;
|
|
|
|
}
|
|
|
|
|
2011-08-08 20:09:42 -05:00
|
|
|
rust_port_id rust_task::register_port(rust_port *port) {
|
2011-11-11 14:06:18 -06:00
|
|
|
I(sched, !lock.lock_held_by_current_thread());
|
2011-08-08 20:09:42 -05:00
|
|
|
scoped_lock with(lock);
|
|
|
|
|
|
|
|
rust_port_id id = next_port_id++;
|
|
|
|
port_table.put(id, port);
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rust_task::release_port(rust_port_id id) {
|
2011-11-11 17:34:35 -06:00
|
|
|
I(sched, lock.lock_held_by_current_thread());
|
2011-08-08 20:09:42 -05:00
|
|
|
port_table.remove(id);
|
|
|
|
}
|
|
|
|
|
|
|
|
rust_port *rust_task::get_port_by_id(rust_port_id id) {
|
2011-11-11 14:06:18 -06:00
|
|
|
I(sched, !lock.lock_held_by_current_thread());
|
2011-08-08 20:09:42 -05:00
|
|
|
scoped_lock with(lock);
|
|
|
|
rust_port *port = NULL;
|
|
|
|
port_table.get(id, &port);
|
2011-11-11 13:46:07 -06:00
|
|
|
if (port) {
|
|
|
|
port->ref();
|
|
|
|
}
|
2011-08-08 20:09:42 -05:00
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2011-08-15 18:54:02 -05:00
|
|
|
|
2011-09-26 17:06:26 -05:00
|
|
|
// Temporary routine to allow boxes on one task's shared heap to be reparented
|
|
|
|
// to another.
|
|
|
|
const type_desc *
|
|
|
|
rust_task::release_alloc(void *alloc) {
|
2011-11-11 14:06:18 -06:00
|
|
|
I(sched, !lock.lock_held_by_current_thread());
|
2011-09-26 17:06:26 -05:00
|
|
|
lock.lock();
|
|
|
|
|
|
|
|
assert(local_allocs.find(alloc) != local_allocs.end());
|
|
|
|
const type_desc *tydesc = local_allocs[alloc];
|
|
|
|
local_allocs.erase(alloc);
|
|
|
|
|
|
|
|
local_region.release_alloc(alloc);
|
|
|
|
|
|
|
|
lock.unlock();
|
|
|
|
return tydesc;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Temporary routine to allow boxes from one task's shared heap to be
|
|
|
|
// reparented to this one.
|
|
|
|
void
|
|
|
|
rust_task::claim_alloc(void *alloc, const type_desc *tydesc) {
|
2011-11-11 14:06:18 -06:00
|
|
|
I(sched, !lock.lock_held_by_current_thread());
|
2011-09-26 17:06:26 -05:00
|
|
|
lock.lock();
|
|
|
|
|
|
|
|
assert(local_allocs.find(alloc) == local_allocs.end());
|
|
|
|
local_allocs[alloc] = tydesc;
|
|
|
|
local_region.claim_alloc(alloc);
|
|
|
|
|
|
|
|
lock.unlock();
|
|
|
|
}
|
|
|
|
|
2011-11-13 18:36:47 -06:00
|
|
|
void
|
|
|
|
rust_task::notify(bool success) {
|
|
|
|
// FIXME (1078) Do this in rust code
|
|
|
|
if(user.notify_enabled) {
|
|
|
|
rust_task *target_task = kernel->get_task_by_id(user.notify_chan.task);
|
|
|
|
if (target_task) {
|
|
|
|
rust_port *target_port =
|
|
|
|
target_task->get_port_by_id(user.notify_chan.port);
|
|
|
|
if(target_port) {
|
|
|
|
task_notification msg;
|
|
|
|
msg.id = user.id;
|
|
|
|
msg.result = !success ? tr_failure : tr_success;
|
|
|
|
|
|
|
|
target_port->send(&msg);
|
|
|
|
scoped_lock with(target_task->lock);
|
|
|
|
target_port->deref();
|
|
|
|
}
|
|
|
|
target_task->deref();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:54:11 -06:00
|
|
|
extern "C" CDECL void
|
|
|
|
record_sp(void *limit);
|
|
|
|
|
|
|
|
void *
|
|
|
|
rust_task::new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
|
|
|
|
|
|
|
|
stk_seg *stk_seg = new_stk(sched, this, stk_sz + args_sz);
|
2011-12-20 00:47:28 -06:00
|
|
|
A(sched, stk_seg->end - (uintptr_t)stk_seg->data >= stk_sz + args_sz,
|
|
|
|
"Did not receive enough stack");
|
2011-12-06 19:03:54 -06:00
|
|
|
uint8_t *new_sp = (uint8_t*)stk_seg->end;
|
2011-12-15 16:46:12 -06:00
|
|
|
// Push the function arguments to the new stack
|
|
|
|
new_sp = align_down(new_sp - args_sz);
|
2011-11-30 19:54:11 -06:00
|
|
|
memcpy(new_sp, args_addr, args_sz);
|
2011-12-01 17:26:42 -06:00
|
|
|
record_stack_limit();
|
2011-11-30 19:54:11 -06:00
|
|
|
return new_sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::del_stack() {
|
|
|
|
del_stk(this, stk);
|
2011-12-01 17:26:42 -06:00
|
|
|
record_stack_limit();
|
2011-11-30 19:54:11 -06:00
|
|
|
}
|
|
|
|
|
2011-12-01 17:26:42 -06:00
|
|
|
void
|
|
|
|
rust_task::record_stack_limit() {
|
2011-12-04 22:40:34 -06:00
|
|
|
// The function prolog compares the amount of stack needed to the end of
|
|
|
|
// the stack. As an optimization, when the frame size is less than 256
|
|
|
|
// bytes, it will simply compare %esp to to the stack limit instead of
|
|
|
|
// subtracting the frame size. As a result we need our stack limit to
|
|
|
|
// account for those 256 bytes.
|
|
|
|
const unsigned LIMIT_OFFSET = 256;
|
|
|
|
A(sched,
|
2011-12-06 19:03:54 -06:00
|
|
|
(uintptr_t)stk->end - RED_ZONE_SIZE
|
2011-12-04 22:40:34 -06:00
|
|
|
- (uintptr_t)stk->data >= LIMIT_OFFSET,
|
|
|
|
"Stack size must be greater than LIMIT_OFFSET");
|
|
|
|
record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
|
2011-12-01 17:26:42 -06:00
|
|
|
}
|
2011-12-06 18:26:47 -06:00
|
|
|
|
|
|
|
extern "C" uintptr_t get_sp();
|
|
|
|
|
2011-12-18 18:59:49 -06:00
|
|
|
static bool
|
|
|
|
sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
|
|
|
|
// Not positive these bounds for sp are correct. I think that the first
|
|
|
|
// possible value for esp on a new stack is stk->end, which points to the
|
|
|
|
// address before the first value to be pushed onto a new stack. The last
|
|
|
|
// possible address we can push data to is stk->data. Regardless, there's
|
|
|
|
// so much slop at either end that we should never hit one of these
|
|
|
|
// boundaries.
|
|
|
|
return (uintptr_t)stk->data <= sp && sp <= stk->end;
|
|
|
|
}
|
|
|
|
|
2011-12-06 18:26:47 -06:00
|
|
|
/*
|
|
|
|
Called by landing pads during unwinding to figure out which
|
|
|
|
stack segment we are currently running on, delete the others,
|
|
|
|
and record the stack limit (which was not restored when unwinding
|
|
|
|
through __morestack).
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rust_task::reset_stack_limit() {
|
|
|
|
uintptr_t sp = get_sp();
|
2011-12-18 18:59:49 -06:00
|
|
|
while (!sp_in_stk_seg(sp, stk)) {
|
2011-12-06 18:26:47 -06:00
|
|
|
del_stk(this, stk);
|
|
|
|
A(sched, stk != NULL, "Failed to find the current stack");
|
|
|
|
}
|
|
|
|
record_stack_limit();
|
|
|
|
}
|
|
|
|
|
2011-12-18 18:59:49 -06:00
|
|
|
/*
|
|
|
|
Returns true if we're currently running on the Rust stack
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
rust_task::on_rust_stack() {
|
|
|
|
return sp_in_stk_seg(get_sp(), stk);
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// End:
|
|
|
|
//
|