2010-06-23 23:03:09 -05:00
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
#include <execinfo.h>
|
|
|
|
#endif
|
2011-11-14 15:52:35 -06:00
|
|
|
#include <iostream>
|
2011-12-14 17:36:31 -06:00
|
|
|
#include <algorithm>
|
2010-10-11 18:40:18 -05:00
|
|
|
|
2012-04-02 22:18:01 -05:00
|
|
|
#include "rust_task.h"
|
|
|
|
#include "rust_cc.h"
|
|
|
|
#include "rust_env.h"
|
|
|
|
#include "rust_port.h"
|
2011-05-31 19:44:54 -05:00
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
// Tasks
|
2012-03-29 18:31:30 -05:00
|
|
|
rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
|
2012-07-17 19:40:40 -05:00
|
|
|
const char *name, size_t init_stack_sz) :
|
2011-07-27 16:51:25 -05:00
|
|
|
ref_count(1),
|
2012-02-08 19:46:12 -06:00
|
|
|
id(0),
|
2011-06-27 21:15:03 -05:00
|
|
|
stk(NULL),
|
2010-06-23 23:03:09 -05:00
|
|
|
runtime_sp(0),
|
2012-03-29 18:31:30 -05:00
|
|
|
sched(sched_loop->sched),
|
|
|
|
sched_loop(sched_loop),
|
|
|
|
kernel(sched_loop->kernel),
|
2010-08-08 21:24:35 -05:00
|
|
|
name(name),
|
2010-09-10 03:21:29 -05:00
|
|
|
list_index(-1),
|
2010-07-19 16:05:18 -05:00
|
|
|
rendezvous_ptr(0),
|
2012-05-14 10:22:51 -05:00
|
|
|
boxed(sched_loop->kernel->env, &local_region),
|
2012-09-05 22:06:57 -05:00
|
|
|
local_region(&sched_loop->local_region),
|
2012-01-09 15:47:37 -06:00
|
|
|
unwinding(false),
|
2012-01-11 14:37:09 -06:00
|
|
|
cc_counter(0),
|
2012-02-09 03:13:32 -06:00
|
|
|
total_stack_sz(0),
|
2012-06-27 12:07:00 -05:00
|
|
|
task_local_data(NULL),
|
|
|
|
task_local_data_cleanup(NULL),
|
2012-03-02 23:22:42 -06:00
|
|
|
state(state),
|
2012-03-03 01:40:27 -06:00
|
|
|
cond(NULL),
|
|
|
|
cond_name("none"),
|
2012-07-02 19:42:58 -05:00
|
|
|
event_reject(false),
|
|
|
|
event(NULL),
|
2012-02-14 02:43:45 -06:00
|
|
|
killed(false),
|
|
|
|
reentered_rust_stack(false),
|
2012-07-05 18:55:01 -05:00
|
|
|
disallow_kill(0),
|
2012-07-24 14:27:45 -05:00
|
|
|
disallow_yield(0),
|
2012-02-09 03:13:32 -06:00
|
|
|
c_stack(NULL),
|
2012-02-09 13:51:34 -06:00
|
|
|
next_c_sp(0),
|
2012-07-17 19:40:40 -05:00
|
|
|
next_rust_sp(0)
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2012-03-29 18:31:30 -05:00
|
|
|
LOGPTR(sched_loop, "new task", (uintptr_t)this);
|
|
|
|
DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)",
|
2012-04-01 01:12:06 -05:00
|
|
|
sizeof *this, sizeof *this);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2012-02-08 20:29:15 -06:00
|
|
|
new_stack(init_stack_sz);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-03-02 18:33:24 -06:00
|
|
|
// NB: This does not always run on the task's scheduler thread
|
2012-02-07 01:38:22 -06:00
|
|
|
void
|
|
|
|
rust_task::delete_this()
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2012-03-29 18:31:30 -05:00
|
|
|
DLOG(sched_loop, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
2011-04-19 05:21:57 -05:00
|
|
|
name, (uintptr_t)this, ref_count);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2012-06-21 18:44:10 -05:00
|
|
|
/* FIXME (#2677): tighten this up, there are some more
|
2010-06-23 23:03:09 -05:00
|
|
|
assertions that hold at task-lifecycle events. */
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(ref_count == 0); // ||
|
2011-07-23 16:01:43 -05:00
|
|
|
// (ref_count == 1 && this == sched->root_task));
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2012-03-29 18:31:30 -05:00
|
|
|
sched_loop->release_task(this);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-06-01 01:25:13 -05:00
|
|
|
// All failure goes through me. Put your breakpoints here!
|
|
|
|
extern "C" void
|
|
|
|
rust_task_fail(rust_task *task,
|
|
|
|
char const *expr,
|
|
|
|
char const *file,
|
|
|
|
size_t line) {
|
|
|
|
assert(task != NULL);
|
|
|
|
task->begin_failure(expr, file, line);
|
|
|
|
}
|
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
struct spawn_args {
|
|
|
|
rust_task *task;
|
2011-12-30 22:46:08 -06:00
|
|
|
spawn_fn f;
|
2012-02-01 20:52:08 -06:00
|
|
|
rust_opaque_box *envptr;
|
2012-01-04 22:11:39 -06:00
|
|
|
void *argptr;
|
2011-08-13 17:20:11 -05:00
|
|
|
};
|
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
struct cleanup_args {
|
|
|
|
spawn_args *spargs;
|
2012-01-09 15:47:37 -06:00
|
|
|
bool threw_exception;
|
2011-12-20 12:29:40 -06:00
|
|
|
};
|
2011-08-10 20:48:57 -05:00
|
|
|
|
2012-03-23 19:51:54 -05:00
|
|
|
void
|
|
|
|
annihilate_boxes(rust_task *task);
|
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
void
|
|
|
|
cleanup_task(cleanup_args *args) {
|
|
|
|
spawn_args *a = args->spargs;
|
2012-01-09 15:47:37 -06:00
|
|
|
bool threw_exception = args->threw_exception;
|
2011-12-20 12:29:40 -06:00
|
|
|
rust_task *task = a->task;
|
2011-09-06 20:31:41 -05:00
|
|
|
|
2012-03-02 22:55:40 -06:00
|
|
|
{
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(task->lifecycle_lock);
|
2012-03-02 22:55:40 -06:00
|
|
|
if (task->killed && !threw_exception) {
|
|
|
|
LOG(task, task, "Task killed during termination");
|
|
|
|
threw_exception = true;
|
|
|
|
}
|
2011-11-11 16:19:15 -06:00
|
|
|
}
|
|
|
|
|
2012-06-27 12:07:00 -05:00
|
|
|
// Clean up TLS. This will only be set if TLS was used to begin with.
|
|
|
|
// Because this is a crust function, it must be called from the C stack.
|
|
|
|
if (task->task_local_data_cleanup != NULL) {
|
|
|
|
// This assert should hold but it's not our job to ensure it (and
|
|
|
|
// the condition might change). Handled in libcore/task.rs.
|
|
|
|
// assert(task->task_local_data != NULL);
|
|
|
|
task->task_local_data_cleanup(task->task_local_data);
|
|
|
|
task->task_local_data = NULL;
|
2012-07-13 18:13:11 -05:00
|
|
|
} else if (threw_exception && task->id == INIT_TASK_ID) {
|
2012-07-12 18:52:32 -05:00
|
|
|
// Edge case: If main never spawns any tasks, but fails anyway, TLS
|
|
|
|
// won't be around to take down the kernel (task.rs:kill_taskgroup,
|
|
|
|
// rust_task_kill_all). Do it here instead.
|
2012-07-13 18:13:11 -05:00
|
|
|
// (Note that children tasks can not init their TLS if they were
|
|
|
|
// killed too early, so we need to check main's task id too.)
|
2012-07-12 18:52:32 -05:00
|
|
|
task->fail_sched_loop();
|
2012-07-13 18:13:11 -05:00
|
|
|
// This must not happen twice.
|
|
|
|
static bool main_task_failed_without_spawning = false;
|
|
|
|
assert(!main_task_failed_without_spawning);
|
|
|
|
main_task_failed_without_spawning = true;
|
2012-06-27 12:07:00 -05:00
|
|
|
}
|
|
|
|
|
2012-09-24 16:14:03 -05:00
|
|
|
// FIXME (#2676): For performance we should do the annihilator
|
|
|
|
// instead of the cycle collector even under normal termination, but
|
|
|
|
// since that would hide memory management errors (like not derefing
|
|
|
|
// boxes), it needs to be disableable in debug builds.
|
|
|
|
if (threw_exception) {
|
|
|
|
// FIXME (#2676): When the annihilator is more powerful and
|
|
|
|
// successfully runs resource destructors, etc. we can get rid
|
|
|
|
// of this cc
|
|
|
|
cc::do_cc(task);
|
|
|
|
annihilate_boxes(task);
|
|
|
|
}
|
|
|
|
cc::do_final_cc(task);
|
2012-03-26 16:39:39 -05:00
|
|
|
|
|
|
|
task->die();
|
|
|
|
|
2012-07-12 18:52:02 -05:00
|
|
|
#ifdef __WIN32__
|
|
|
|
assert(!threw_exception && "No exception-handling yet on windows builds");
|
2011-09-06 20:31:41 -05:00
|
|
|
#endif
|
2011-12-20 12:29:40 -06:00
|
|
|
}
|
|
|
|
|
2012-05-25 19:22:14 -05:00
|
|
|
extern "C" CDECL void upcall_exchange_free(void *ptr);
|
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
// This runs on the Rust stack
|
|
|
|
void task_start_wrapper(spawn_args *a)
|
|
|
|
{
|
|
|
|
rust_task *task = a->task;
|
|
|
|
|
2012-01-09 15:47:37 -06:00
|
|
|
bool threw_exception = false;
|
2011-12-20 12:29:40 -06:00
|
|
|
try {
|
2012-04-01 01:12:06 -05:00
|
|
|
// The first argument is the return pointer; as the task fn
|
2011-12-30 22:46:08 -06:00
|
|
|
// must have void return type, we can safely pass 0.
|
2012-01-04 22:11:39 -06:00
|
|
|
a->f(0, a->envptr, a->argptr);
|
2011-12-20 12:29:40 -06:00
|
|
|
} catch (rust_task *ex) {
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(ex == task && "Expected this task to be thrown for unwinding");
|
2012-01-09 15:47:37 -06:00
|
|
|
threw_exception = true;
|
2012-02-09 03:13:32 -06:00
|
|
|
|
|
|
|
if (task->c_stack) {
|
|
|
|
task->return_c_stack();
|
|
|
|
}
|
2012-03-21 16:06:43 -05:00
|
|
|
|
|
|
|
// Since we call glue code below we need to make sure we
|
|
|
|
// have the stack limit set up correctly
|
|
|
|
task->reset_stack_limit();
|
2011-12-20 12:29:40 -06:00
|
|
|
}
|
|
|
|
|
2012-02-09 03:13:32 -06:00
|
|
|
// We should have returned any C stack by now
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(task->c_stack == NULL);
|
2012-02-09 03:13:32 -06:00
|
|
|
|
2012-02-01 20:52:08 -06:00
|
|
|
rust_opaque_box* env = a->envptr;
|
2012-01-06 14:06:35 -06:00
|
|
|
if(env) {
|
2012-02-01 20:52:08 -06:00
|
|
|
// free the environment (which should be a unique closure).
|
2012-01-06 14:06:35 -06:00
|
|
|
const type_desc *td = env->td;
|
2012-06-18 14:48:47 -05:00
|
|
|
td->drop_glue(NULL, NULL, NULL, box_body(env));
|
2012-05-25 19:22:14 -05:00
|
|
|
upcall_exchange_free(env);
|
2012-01-04 22:11:39 -06:00
|
|
|
}
|
2011-12-20 12:29:40 -06:00
|
|
|
|
|
|
|
// The cleanup work needs lots of stack
|
2012-01-09 15:47:37 -06:00
|
|
|
cleanup_args ca = {a, threw_exception};
|
2012-02-08 23:42:04 -06:00
|
|
|
task->call_on_c_stack(&ca, (void*)cleanup_task);
|
2011-12-20 12:29:40 -06:00
|
|
|
|
2011-11-18 15:33:08 -06:00
|
|
|
task->ctx.next->swap(task->ctx);
|
2011-09-06 16:03:20 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2011-12-30 22:46:08 -06:00
|
|
|
rust_task::start(spawn_fn spawnee_fn,
|
2012-02-01 20:52:08 -06:00
|
|
|
rust_opaque_box *envptr,
|
2012-01-04 22:11:39 -06:00
|
|
|
void *argptr)
|
2011-05-03 09:19:28 -05:00
|
|
|
{
|
2011-09-06 16:03:20 -05:00
|
|
|
LOG(this, task, "starting task from fn 0x%" PRIxPTR
|
2012-01-04 22:11:39 -06:00
|
|
|
" with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
|
|
|
|
spawnee_fn, envptr, argptr);
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(stk->data != NULL);
|
2011-07-13 17:44:09 -05:00
|
|
|
|
2012-02-08 19:55:36 -06:00
|
|
|
char *sp = (char *)stk->end;
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
sp -= sizeof(spawn_args);
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
spawn_args *a = (spawn_args *)sp;
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
a->task = this;
|
2012-01-04 22:11:39 -06:00
|
|
|
a->envptr = envptr;
|
|
|
|
a->argptr = argptr;
|
2011-12-30 22:46:08 -06:00
|
|
|
a->f = spawnee_fn;
|
2011-06-27 12:08:57 -05:00
|
|
|
|
2011-09-06 16:03:20 -05:00
|
|
|
ctx.call((void *)task_start_wrapper, a, sp);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-08-10 20:48:57 -05:00
|
|
|
this->start();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rust_task::start()
|
|
|
|
{
|
2012-03-17 20:12:15 -05:00
|
|
|
transition(task_state_newborn, task_state_running, NULL, "none");
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-02-14 02:43:45 -06:00
|
|
|
bool
|
|
|
|
rust_task::must_fail_from_being_killed() {
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
2012-07-12 18:52:21 -05:00
|
|
|
return must_fail_from_being_killed_inner();
|
2012-03-03 22:50:11 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2012-07-12 18:52:21 -05:00
|
|
|
rust_task::must_fail_from_being_killed_inner() {
|
2012-07-12 18:52:13 -05:00
|
|
|
lifecycle_lock.must_have_lock();
|
2012-07-05 18:55:01 -05:00
|
|
|
return killed && !reentered_rust_stack && disallow_kill == 0;
|
2012-02-14 02:43:45 -06:00
|
|
|
}
|
|
|
|
|
2012-07-24 14:27:45 -05:00
|
|
|
void rust_task_yield_fail(rust_task *task) {
|
|
|
|
LOG_ERR(task, task, "task %" PRIxPTR " yielded in an atomic section",
|
|
|
|
task);
|
|
|
|
task->fail();
|
|
|
|
}
|
|
|
|
|
2011-11-17 17:26:12 -06:00
|
|
|
// Only run this on the rust stack
|
2012-07-24 17:22:44 -05:00
|
|
|
MUST_CHECK bool rust_task::yield() {
|
|
|
|
bool killed = false;
|
|
|
|
|
2012-07-24 14:27:45 -05:00
|
|
|
if (disallow_yield > 0) {
|
|
|
|
call_on_c_stack(this, (void *)rust_task_yield_fail);
|
|
|
|
}
|
2012-07-24 17:22:44 -05:00
|
|
|
|
|
|
|
// This check is largely superfluous; it's the one after the context swap
|
|
|
|
// that really matters. This one allows us to assert a useful invariant.
|
2012-08-24 16:26:42 -05:00
|
|
|
|
|
|
|
// NB: This takes lifecycle_lock three times, and I believe that none of
|
|
|
|
// them are actually necessary, as per #3213. Removing the locks here may
|
|
|
|
// cause *harmless* races with a killer... but I didn't observe any
|
|
|
|
// substantial performance improvement from removing them, even with
|
|
|
|
// msgsend-ring-pipes, and also it's my last day, so I'm not about to
|
|
|
|
// remove them. -- bblum
|
2012-02-14 02:43:45 -06:00
|
|
|
if (must_fail_from_being_killed()) {
|
2012-07-12 18:52:13 -05:00
|
|
|
{
|
|
|
|
scoped_lock with(lifecycle_lock);
|
|
|
|
assert(!(state == task_state_blocked));
|
|
|
|
}
|
2012-07-24 17:22:44 -05:00
|
|
|
killed = true;
|
2011-09-14 16:20:13 -05:00
|
|
|
}
|
2011-11-17 19:33:54 -06:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
// Return to the scheduler.
|
2011-09-06 16:03:20 -05:00
|
|
|
ctx.next->swap(ctx);
|
2011-09-14 16:20:13 -05:00
|
|
|
|
2012-02-14 02:43:45 -06:00
|
|
|
if (must_fail_from_being_killed()) {
|
2012-07-24 17:22:44 -05:00
|
|
|
killed = true;
|
2011-09-14 16:20:13 -05:00
|
|
|
}
|
2012-07-24 17:22:44 -05:00
|
|
|
return killed;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::kill() {
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
2012-07-20 17:06:17 -05:00
|
|
|
kill_inner();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rust_task::kill_inner() {
|
|
|
|
lifecycle_lock.must_have_lock();
|
2012-03-03 22:50:11 -06:00
|
|
|
|
2012-07-20 17:06:17 -05:00
|
|
|
// Multiple kills should be able to safely race, but check anyway.
|
|
|
|
if (killed) {
|
|
|
|
LOG(this, task, "task %s @0x%" PRIxPTR " already killed", name, this);
|
|
|
|
return;
|
|
|
|
}
|
2010-08-09 10:15:34 -05:00
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
// Note the distinction here: kill() is when you're in an upcall
|
|
|
|
// from task A and want to force-fail task B, you do B->kill().
|
2011-07-13 15:43:35 -05:00
|
|
|
// If you want to fail yourself you do self->fail().
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
|
2011-09-14 16:20:13 -05:00
|
|
|
// When the task next goes to yield or resume it will fail
|
2012-03-03 22:50:11 -06:00
|
|
|
killed = true;
|
2010-06-23 23:03:09 -05:00
|
|
|
// Unblock the task so it can unwind.
|
2012-03-03 22:50:11 -06:00
|
|
|
|
2012-07-12 18:52:13 -05:00
|
|
|
if (state == task_state_blocked &&
|
2012-07-12 18:52:21 -05:00
|
|
|
must_fail_from_being_killed_inner()) {
|
|
|
|
wakeup_inner(cond);
|
2012-03-03 22:50:11 -06:00
|
|
|
}
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2011-07-13 15:43:35 -05:00
|
|
|
rust_task::fail() {
|
2010-06-23 23:03:09 -05:00
|
|
|
// See note in ::kill() regarding who should call this.
|
2012-06-01 01:25:13 -05:00
|
|
|
fail(NULL, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::fail(char const *expr, char const *file, size_t line) {
|
|
|
|
rust_task_fail(this, expr, file, line);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Called only by rust_task_fail
|
|
|
|
void
|
|
|
|
rust_task::begin_failure(char const *expr, char const *file, size_t line) {
|
|
|
|
|
|
|
|
if (expr) {
|
2012-06-27 15:15:48 -05:00
|
|
|
LOG_ERR(this, task, "task failed at '%s', %s:%" PRIdPTR,
|
2012-06-01 01:25:13 -05:00
|
|
|
expr, file, line);
|
|
|
|
}
|
|
|
|
|
2012-03-29 18:31:30 -05:00
|
|
|
DLOG(sched_loop, task, "task %s @0x%" PRIxPTR " failing", name, this);
|
2010-10-11 18:40:18 -05:00
|
|
|
backtrace();
|
2012-01-09 15:47:37 -06:00
|
|
|
unwinding = true;
|
2011-09-06 20:31:41 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
throw this;
|
|
|
|
#else
|
2011-11-11 16:19:15 -06:00
|
|
|
die();
|
2012-06-21 18:44:10 -05:00
|
|
|
// FIXME (#908): Need unwinding on windows. This will end up aborting
|
2012-07-12 18:52:32 -05:00
|
|
|
fail_sched_loop();
|
2012-07-12 18:42:58 -05:00
|
|
|
#endif
|
2012-07-12 01:42:56 -05:00
|
|
|
}
|
|
|
|
|
2012-07-12 18:52:32 -05:00
|
|
|
void rust_task::fail_sched_loop() {
|
|
|
|
sched_loop->fail();
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
frame_glue_fns*
|
|
|
|
rust_task::get_frame_glue_fns(uintptr_t fp) {
|
|
|
|
fp -= sizeof(uintptr_t);
|
|
|
|
return *((frame_glue_fns**) fp);
|
|
|
|
}
|
|
|
|
|
2012-07-12 18:52:13 -05:00
|
|
|
void rust_task::assert_is_running()
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
|
|
|
assert(state == task_state_running);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-07-12 18:52:27 -05:00
|
|
|
// FIXME (#2851) Remove this code when rust_port goes away?
|
2010-06-23 23:03:09 -05:00
|
|
|
bool
|
|
|
|
rust_task::blocked_on(rust_cond *on)
|
|
|
|
{
|
2012-07-12 18:52:27 -05:00
|
|
|
lifecycle_lock.must_have_lock();
|
2012-03-03 01:40:27 -06:00
|
|
|
return cond == on;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-06-28 20:53:16 -05:00
|
|
|
void *
|
2011-07-18 14:02:26 -05:00
|
|
|
rust_task::malloc(size_t sz, const char *tag, type_desc *td)
|
2010-06-28 20:53:16 -05:00
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
return local_region.malloc(sz, tag);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2012-02-03 02:34:42 -06:00
|
|
|
rust_task::realloc(void *data, size_t sz)
|
2010-06-28 20:53:16 -05:00
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
return local_region.realloc(data, sz);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-02-03 02:34:42 -06:00
|
|
|
rust_task::free(void *p)
|
2010-06-28 20:53:16 -05:00
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
local_region.free(p);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2012-03-17 20:12:15 -05:00
|
|
|
rust_task::transition(rust_task_state src, rust_task_state dst,
|
2012-03-03 01:40:27 -06:00
|
|
|
rust_cond *cond, const char* cond_name) {
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
2012-07-12 18:52:21 -05:00
|
|
|
transition_inner(src, dst, cond, cond_name);
|
2012-07-02 19:42:58 -05:00
|
|
|
}
|
|
|
|
|
2012-07-12 18:52:21 -05:00
|
|
|
void rust_task::transition_inner(rust_task_state src, rust_task_state dst,
|
2012-07-02 19:42:58 -05:00
|
|
|
rust_cond *cond, const char* cond_name) {
|
2012-07-12 18:52:13 -05:00
|
|
|
lifecycle_lock.must_have_lock();
|
2012-03-29 18:31:30 -05:00
|
|
|
sched_loop->transition(this, src, dst, cond, cond_name);
|
2012-03-03 04:36:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-03-17 20:12:15 -05:00
|
|
|
rust_task::set_state(rust_task_state state,
|
2012-03-03 04:36:53 -06:00
|
|
|
rust_cond *cond, const char* cond_name) {
|
2012-07-12 18:52:13 -05:00
|
|
|
lifecycle_lock.must_have_lock();
|
2012-03-03 04:36:53 -06:00
|
|
|
this->state = state;
|
|
|
|
this->cond = cond;
|
|
|
|
this->cond_name = cond_name;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-03-03 22:50:11 -06:00
|
|
|
bool
|
2010-09-07 20:39:07 -05:00
|
|
|
rust_task::block(rust_cond *on, const char* name) {
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
2012-07-12 18:52:21 -05:00
|
|
|
return block_inner(on, name);
|
2012-07-02 19:42:58 -05:00
|
|
|
}
|
2012-03-03 22:50:11 -06:00
|
|
|
|
2012-07-02 19:42:58 -05:00
|
|
|
bool
|
2012-07-12 18:52:21 -05:00
|
|
|
rust_task::block_inner(rust_cond *on, const char* name) {
|
|
|
|
if (must_fail_from_being_killed_inner()) {
|
2012-03-03 22:50:11 -06:00
|
|
|
// We're already going to die. Don't block. Tell the task to fail
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
|
2010-07-28 16:45:44 -05:00
|
|
|
(uintptr_t) on, (uintptr_t) cond);
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(cond == NULL && "Cannot block an already blocked task.");
|
|
|
|
assert(on != NULL && "Cannot block on a NULL object.");
|
2010-07-28 16:45:44 -05:00
|
|
|
|
2012-07-12 18:52:21 -05:00
|
|
|
transition_inner(task_state_running, task_state_blocked, on, name);
|
2012-03-03 22:50:11 -06:00
|
|
|
|
|
|
|
return true;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::wakeup(rust_cond *from) {
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
2012-07-12 18:52:21 -05:00
|
|
|
wakeup_inner(from);
|
2012-07-02 19:42:58 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-07-12 18:52:21 -05:00
|
|
|
rust_task::wakeup_inner(rust_cond *from) {
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(cond != NULL && "Cannot wake up unblocked task.");
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
|
2010-07-28 16:45:44 -05:00
|
|
|
(uintptr_t) cond, (uintptr_t) from);
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(cond == from && "Cannot wake up blocked task on wrong condition.");
|
2010-07-28 16:45:44 -05:00
|
|
|
|
2012-07-12 18:52:21 -05:00
|
|
|
transition_inner(task_state_blocked, task_state_running, NULL, "none");
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::die() {
|
2012-03-17 20:12:15 -05:00
|
|
|
transition(task_state_running, task_state_dead, NULL, "none");
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
void
|
|
|
|
rust_task::backtrace() {
|
2011-04-19 05:21:57 -05:00
|
|
|
if (!log_rt_backtrace) return;
|
2010-10-11 18:40:18 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
void *call_stack[256];
|
|
|
|
int nframes = ::backtrace(call_stack, 256);
|
|
|
|
backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
void *
|
2011-07-18 14:02:26 -05:00
|
|
|
rust_task::calloc(size_t size, const char *tag) {
|
|
|
|
return local_region.calloc(size, tag);
|
2011-06-27 21:15:03 -05:00
|
|
|
}
|
|
|
|
|
2012-02-08 20:29:15 -06:00
|
|
|
size_t
|
|
|
|
rust_task::get_next_stack_size(size_t min, size_t current, size_t requested) {
|
|
|
|
LOG(this, mem, "calculating new stack size for 0x%" PRIxPTR, this);
|
|
|
|
LOG(this, mem,
|
|
|
|
"min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
|
|
|
|
min, current, requested);
|
|
|
|
|
|
|
|
// Allocate at least enough to accomodate the next frame
|
|
|
|
size_t sz = std::max(min, requested);
|
|
|
|
|
|
|
|
// And double the stack size each allocation
|
|
|
|
const size_t max = 1024 * 1024;
|
|
|
|
size_t next = std::min(max, current * 2);
|
|
|
|
|
|
|
|
sz = std::max(sz, next);
|
|
|
|
|
|
|
|
LOG(this, mem, "next stack size: %" PRIdPTR, sz);
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(requested <= sz);
|
2012-02-08 20:29:15 -06:00
|
|
|
return sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::free_stack(stk_seg *stk) {
|
2012-03-29 18:31:30 -05:00
|
|
|
LOGPTR(sched_loop, "freeing stk segment", (uintptr_t)stk);
|
2012-02-08 20:29:15 -06:00
|
|
|
total_stack_sz -= user_stack_size(stk);
|
2012-02-27 17:42:22 -06:00
|
|
|
destroy_stack(&local_region, stk);
|
2012-02-08 20:29:15 -06:00
|
|
|
}
|
|
|
|
|
2012-03-21 15:17:24 -05:00
|
|
|
void
|
|
|
|
new_stack_slow(new_stack_args *args) {
|
|
|
|
args->task->new_stack(args->requested_sz);
|
|
|
|
}
|
|
|
|
|
2012-02-08 20:29:15 -06:00
|
|
|
void
|
|
|
|
rust_task::new_stack(size_t requested_sz) {
|
|
|
|
LOG(this, mem, "creating new stack for task %" PRIxPTR, this);
|
|
|
|
if (stk) {
|
|
|
|
::check_stack_canary(stk);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
|
2012-03-29 18:31:30 -05:00
|
|
|
size_t min_sz = sched_loop->min_stack_size;
|
2012-02-08 20:29:15 -06:00
|
|
|
|
|
|
|
// Try to reuse an existing stack segment
|
2012-03-21 15:50:11 -05:00
|
|
|
while (stk != NULL && stk->next != NULL) {
|
|
|
|
size_t next_sz = user_stack_size(stk->next);
|
|
|
|
if (min_sz <= next_sz && requested_sz <= next_sz) {
|
2012-02-08 20:29:15 -06:00
|
|
|
LOG(this, mem, "reusing existing stack");
|
2012-03-21 15:50:11 -05:00
|
|
|
stk = stk->next;
|
2012-02-08 20:29:15 -06:00
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
LOG(this, mem, "existing stack is not big enough");
|
2012-03-21 15:50:11 -05:00
|
|
|
stk_seg *new_next = stk->next->next;
|
|
|
|
free_stack(stk->next);
|
|
|
|
stk->next = new_next;
|
|
|
|
if (new_next) {
|
|
|
|
new_next->prev = stk;
|
2012-03-21 02:31:40 -05:00
|
|
|
}
|
2012-02-08 20:29:15 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The size of the current stack segment, excluding red zone
|
|
|
|
size_t current_sz = 0;
|
|
|
|
if (stk != NULL) {
|
|
|
|
current_sz = user_stack_size(stk);
|
|
|
|
}
|
|
|
|
// The calculated size of the new stack, excluding red zone
|
|
|
|
size_t rust_stk_sz = get_next_stack_size(min_sz,
|
|
|
|
current_sz, requested_sz);
|
|
|
|
|
2012-04-09 17:54:51 -05:00
|
|
|
size_t max_stack = kernel->env->max_stack_size;
|
|
|
|
size_t used_stack = total_stack_sz + rust_stk_sz;
|
|
|
|
|
|
|
|
// Don't allow stacks to grow forever. During unwinding we have to allow
|
|
|
|
// for more stack than normal in order to allow destructors room to run,
|
|
|
|
// arbitrarily selected as 2x the maximum stack size.
|
|
|
|
if (!unwinding && used_stack > max_stack) {
|
2012-02-08 20:29:15 -06:00
|
|
|
LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack", this);
|
2012-04-09 17:54:51 -05:00
|
|
|
fail();
|
2012-04-09 18:26:23 -05:00
|
|
|
} else if (unwinding && used_stack > max_stack * 2) {
|
2012-04-09 17:54:51 -05:00
|
|
|
LOG_ERR(this, task,
|
|
|
|
"task %" PRIxPTR " ran out of stack during unwinding", this);
|
|
|
|
fail();
|
2012-02-08 20:29:15 -06:00
|
|
|
}
|
|
|
|
|
2012-02-08 22:47:52 -06:00
|
|
|
size_t sz = rust_stk_sz + RED_ZONE_SIZE;
|
2012-02-27 17:42:22 -06:00
|
|
|
stk_seg *new_stk = create_stack(&local_region, sz);
|
2012-03-29 18:31:30 -05:00
|
|
|
LOGPTR(sched_loop, "new stk", (uintptr_t)new_stk);
|
2012-03-21 16:13:31 -05:00
|
|
|
new_stk->task = this;
|
2012-03-21 15:50:11 -05:00
|
|
|
new_stk->next = NULL;
|
|
|
|
new_stk->prev = stk;
|
2012-03-21 02:31:40 -05:00
|
|
|
if (stk) {
|
2012-03-21 15:50:11 -05:00
|
|
|
stk->next = new_stk;
|
2012-03-21 02:31:40 -05:00
|
|
|
}
|
2012-03-29 18:31:30 -05:00
|
|
|
LOGPTR(sched_loop, "stk end", new_stk->end);
|
2012-02-08 20:29:15 -06:00
|
|
|
|
|
|
|
stk = new_stk;
|
|
|
|
total_stack_sz += user_stack_size(new_stk);
|
|
|
|
}
|
|
|
|
|
2012-03-21 02:31:40 -05:00
|
|
|
void
|
|
|
|
rust_task::cleanup_after_turn() {
|
|
|
|
// Delete any spare stack segments that were left
|
|
|
|
// behind by calls to prev_stack
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(stk);
|
2012-03-21 15:50:11 -05:00
|
|
|
while (stk->next) {
|
|
|
|
stk_seg *new_next = stk->next->next;
|
|
|
|
free_stack(stk->next);
|
|
|
|
stk->next = new_next;
|
2012-03-21 02:31:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-10 00:28:52 -06:00
|
|
|
static bool
|
|
|
|
sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
|
|
|
|
// Not positive these bounds for sp are correct. I think that the first
|
|
|
|
// possible value for esp on a new stack is stk->end, which points to the
|
|
|
|
// address before the first value to be pushed onto a new stack. The last
|
|
|
|
// possible address we can push data to is stk->data. Regardless, there's
|
|
|
|
// so much slop at either end that we should never hit one of these
|
|
|
|
// boundaries.
|
|
|
|
return (uintptr_t)stk->data <= sp && sp <= stk->end;
|
|
|
|
}
|
|
|
|
|
2011-12-06 18:26:47 -06:00
|
|
|
/*
|
2012-03-21 02:31:40 -05:00
|
|
|
Called by landing pads during unwinding to figure out which stack segment we
|
|
|
|
are currently running on and record the stack limit (which was not restored
|
|
|
|
when unwinding through __morestack).
|
2011-12-06 18:26:47 -06:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
rust_task::reset_stack_limit() {
|
|
|
|
uintptr_t sp = get_sp();
|
2012-06-28 13:53:21 -05:00
|
|
|
while (!sp_in_stk_seg(sp, stk)) {
|
|
|
|
stk = stk->prev;
|
|
|
|
assert(stk != NULL && "Failed to find the current stack");
|
|
|
|
}
|
|
|
|
record_stack_limit();
|
2011-12-06 18:26:47 -06:00
|
|
|
}
|
|
|
|
|
2011-12-20 13:20:54 -06:00
|
|
|
void
|
|
|
|
rust_task::check_stack_canary() {
|
|
|
|
::check_stack_canary(stk);
|
|
|
|
}
|
|
|
|
|
2012-03-02 17:14:52 -06:00
|
|
|
void
|
|
|
|
rust_task::delete_all_stacks() {
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(!on_rust_stack());
|
2012-03-02 17:14:52 -06:00
|
|
|
// Delete all the stacks. There may be more than one if the task failed
|
|
|
|
// and no landing pads stopped to clean up.
|
2012-04-01 21:14:16 -05:00
|
|
|
assert(stk->next == NULL);
|
2012-03-02 17:14:52 -06:00
|
|
|
while (stk != NULL) {
|
2012-03-21 15:50:11 -05:00
|
|
|
stk_seg *prev = stk->prev;
|
2012-03-21 02:31:40 -05:00
|
|
|
free_stack(stk);
|
2012-03-21 15:50:11 -05:00
|
|
|
stk = prev;
|
2012-03-02 17:14:52 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-10 00:28:52 -06:00
|
|
|
/*
|
|
|
|
Returns true if we're currently running on the Rust stack
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
rust_task::on_rust_stack() {
|
2012-02-23 00:29:38 -06:00
|
|
|
if (stk == NULL) {
|
|
|
|
// This only happens during construction
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-02-10 00:28:52 -06:00
|
|
|
uintptr_t sp = get_sp();
|
|
|
|
bool in_first_segment = sp_in_stk_seg(sp, stk);
|
|
|
|
if (in_first_segment) {
|
|
|
|
return true;
|
2012-03-21 15:50:11 -05:00
|
|
|
} else if (stk->prev != NULL) {
|
2012-02-10 00:28:52 -06:00
|
|
|
// This happens only when calling the upcall to delete
|
|
|
|
// a stack segment
|
2012-03-21 15:50:11 -05:00
|
|
|
bool in_second_segment = sp_in_stk_seg(sp, stk->prev);
|
2012-02-10 00:28:52 -06:00
|
|
|
return in_second_segment;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-21 17:03:41 -05:00
|
|
|
// NB: In inhibit_kill and allow_kill, helgrind would complain that we need to
|
|
|
|
// hold lifecycle_lock while accessing disallow_kill. Even though another
|
|
|
|
// killing task may access disallow_kill concurrently, this is not racy
|
|
|
|
// because the killer only cares if this task is blocking, and block() already
|
|
|
|
// uses proper locking. See https://github.com/mozilla/rust/issues/3213 .
|
|
|
|
|
2012-05-15 13:34:52 -05:00
|
|
|
void
|
|
|
|
rust_task::inhibit_kill() {
|
2012-07-31 21:29:59 -05:00
|
|
|
// Here might be good, though not mandatory, to check if we have to die.
|
2012-07-05 18:55:01 -05:00
|
|
|
disallow_kill++;
|
2012-05-15 13:34:52 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::allow_kill() {
|
2012-07-05 18:55:01 -05:00
|
|
|
assert(disallow_kill > 0 && "Illegal allow_kill(): already killable!");
|
|
|
|
disallow_kill--;
|
2012-05-15 13:34:52 -05:00
|
|
|
}
|
|
|
|
|
2012-07-24 14:27:45 -05:00
|
|
|
void rust_task::inhibit_yield() {
|
|
|
|
disallow_yield++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rust_task::allow_yield() {
|
|
|
|
assert(disallow_yield > 0 && "Illegal allow_yield(): already yieldable!");
|
|
|
|
disallow_yield--;
|
|
|
|
}
|
|
|
|
|
2012-07-24 17:22:44 -05:00
|
|
|
MUST_CHECK bool rust_task::wait_event(void **result) {
|
|
|
|
bool killed = false;
|
2012-07-12 18:52:13 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
2012-07-02 19:42:58 -05:00
|
|
|
|
|
|
|
if(!event_reject) {
|
2012-07-12 18:52:21 -05:00
|
|
|
block_inner(&event_cond, "waiting on event");
|
2012-07-12 18:52:13 -05:00
|
|
|
lifecycle_lock.unlock();
|
2012-07-24 17:22:44 -05:00
|
|
|
killed = yield();
|
2012-07-12 18:52:13 -05:00
|
|
|
lifecycle_lock.lock();
|
2012-07-24 17:22:44 -05:00
|
|
|
} else if (must_fail_from_being_killed_inner()) {
|
|
|
|
// If the deschedule was rejected, yield won't do our killed check for
|
|
|
|
// us. For thoroughness, do it here. FIXME (#524)
|
|
|
|
killed = true;
|
2012-07-02 19:42:58 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
event_reject = false;
|
2012-07-24 17:22:44 -05:00
|
|
|
*result = event;
|
|
|
|
return killed;
|
2012-07-02 19:42:58 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::signal_event(void *event) {
|
2012-08-03 17:20:38 -05:00
|
|
|
scoped_lock with(lifecycle_lock);
|
|
|
|
|
2012-07-02 19:42:58 -05:00
|
|
|
this->event = event;
|
|
|
|
event_reject = true;
|
|
|
|
if(task_state_blocked == state) {
|
2012-07-12 18:52:21 -05:00
|
|
|
wakeup_inner(&event_cond);
|
2012-07-02 19:42:58 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// End:
|
|
|
|
//
|