2010-06-23 23:03:09 -05:00
|
|
|
|
|
|
|
#include "rust_internal.h"
|
2011-09-23 20:30:22 -05:00
|
|
|
#include "rust_cc.h"
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
#include <execinfo.h>
|
|
|
|
#endif
|
2011-11-14 15:52:35 -06:00
|
|
|
#include <iostream>
|
2011-10-31 15:31:04 -05:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstring>
|
2011-12-14 17:36:31 -06:00
|
|
|
#include <algorithm>
|
2010-10-11 18:40:18 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
#include "globals.h"
|
2012-02-01 20:52:08 -06:00
|
|
|
#include "rust_upcall.h"
|
2011-05-31 19:44:54 -05:00
|
|
|
|
2011-12-01 17:50:00 -06:00
|
|
|
// The amount of extra space at the end of each stack segment, available
|
|
|
|
// to the rt, compiler and dynamic linker for running small functions
|
|
|
|
// FIXME: We want this to be 128 but need to slim the red zone calls down
|
2012-02-23 02:23:22 -06:00
|
|
|
#define RZ_LINUX_32 (1024*2)
|
|
|
|
#define RZ_LINUX_64 (1024*2)
|
2012-02-24 14:26:06 -06:00
|
|
|
#define RZ_MAC_32 (1024*20)
|
|
|
|
#define RZ_MAC_64 (1024*20)
|
2011-12-20 13:57:22 -06:00
|
|
|
#define RZ_WIN_32 (1024*20)
|
2011-12-30 02:18:55 -06:00
|
|
|
#define RZ_BSD_32 (1024*20)
|
|
|
|
#define RZ_BSD_64 (1024*20)
|
2011-12-18 16:40:15 -06:00
|
|
|
|
|
|
|
#ifdef __linux__
|
2011-12-01 17:50:00 -06:00
|
|
|
#ifdef __i386__
|
2011-12-18 16:40:15 -06:00
|
|
|
#define RED_ZONE_SIZE RZ_LINUX_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_LINUX_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef __APPLE__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_MAC_32
|
2011-12-01 17:50:00 -06:00
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
2011-12-18 16:40:15 -06:00
|
|
|
#define RED_ZONE_SIZE RZ_MAC_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef __WIN32__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_WIN_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_WIN_64
|
|
|
|
#endif
|
2011-12-01 17:50:00 -06:00
|
|
|
#endif
|
2011-12-30 02:18:55 -06:00
|
|
|
#ifdef __FreeBSD__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_BSD_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_BSD_64
|
|
|
|
#endif
|
|
|
|
#endif
|
2011-11-14 15:52:35 -06:00
|
|
|
|
2012-02-08 20:29:15 -06:00
|
|
|
extern "C" CDECL void
|
|
|
|
record_sp(void *limit);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
|
|
|
// Tasks
|
2012-02-03 14:47:01 -06:00
|
|
|
rust_task::rust_task(rust_task_thread *thread, rust_task_list *state,
|
2012-01-29 23:20:36 -06:00
|
|
|
rust_task *spawner, const char *name,
|
|
|
|
size_t init_stack_sz) :
|
2011-07-27 16:51:25 -05:00
|
|
|
ref_count(1),
|
2012-02-08 19:46:12 -06:00
|
|
|
id(0),
|
|
|
|
notify_enabled(false),
|
2011-06-27 21:15:03 -05:00
|
|
|
stk(NULL),
|
2010-06-23 23:03:09 -05:00
|
|
|
runtime_sp(0),
|
2012-02-03 17:18:58 -06:00
|
|
|
sched(thread->sched),
|
2012-02-03 14:47:01 -06:00
|
|
|
thread(thread),
|
2010-06-23 23:03:09 -05:00
|
|
|
cache(NULL),
|
2012-02-03 14:47:01 -06:00
|
|
|
kernel(thread->kernel),
|
2010-08-08 21:24:35 -05:00
|
|
|
name(name),
|
2010-09-10 03:21:29 -05:00
|
|
|
list_index(-1),
|
2011-08-08 20:09:42 -05:00
|
|
|
next_port_id(0),
|
2010-07-19 16:05:18 -05:00
|
|
|
rendezvous_ptr(0),
|
2012-02-03 14:47:01 -06:00
|
|
|
local_region(&thread->srv->local_region),
|
2012-02-01 20:52:08 -06:00
|
|
|
boxed(&local_region),
|
2012-01-09 15:47:37 -06:00
|
|
|
unwinding(false),
|
2011-08-16 21:48:47 -05:00
|
|
|
propagate_failure(true),
|
2011-09-23 20:30:22 -05:00
|
|
|
dynastack(this),
|
2012-01-11 14:37:09 -06:00
|
|
|
cc_counter(0),
|
2012-02-09 03:13:32 -06:00
|
|
|
total_stack_sz(0),
|
2012-03-02 23:22:42 -06:00
|
|
|
state(state),
|
2012-03-03 01:40:27 -06:00
|
|
|
cond(NULL),
|
|
|
|
cond_name("none"),
|
2012-02-14 02:43:45 -06:00
|
|
|
killed(false),
|
|
|
|
reentered_rust_stack(false),
|
2012-02-09 03:13:32 -06:00
|
|
|
c_stack(NULL),
|
2012-02-09 13:51:34 -06:00
|
|
|
next_c_sp(0),
|
2012-03-02 18:33:24 -06:00
|
|
|
next_rust_sp(0),
|
|
|
|
supervisor(spawner)
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2012-02-03 14:47:01 -06:00
|
|
|
LOGPTR(thread, "new task", (uintptr_t)this);
|
|
|
|
DLOG(thread, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2012-02-08 20:29:15 -06:00
|
|
|
new_stack(init_stack_sz);
|
2011-09-06 20:16:39 -05:00
|
|
|
if (supervisor) {
|
|
|
|
supervisor->ref();
|
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-03-02 18:33:24 -06:00
|
|
|
// NB: This does not always run on the task's scheduler thread
|
2012-02-07 01:38:22 -06:00
|
|
|
void
|
|
|
|
rust_task::delete_this()
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
2012-03-02 19:10:14 -06:00
|
|
|
{
|
2012-03-03 04:44:56 -06:00
|
|
|
scoped_lock with (port_lock);
|
2012-03-02 19:10:14 -06:00
|
|
|
I(thread, port_table.is_empty());
|
|
|
|
}
|
|
|
|
|
2012-02-03 14:47:01 -06:00
|
|
|
DLOG(thread, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
2011-04-19 05:21:57 -05:00
|
|
|
name, (uintptr_t)this, ref_count);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2012-02-03 19:26:54 -06:00
|
|
|
// FIXME: We should do this when the task exits, not in the destructor
|
2012-03-02 18:33:24 -06:00
|
|
|
{
|
|
|
|
scoped_lock with(supervisor_lock);
|
|
|
|
if (supervisor) {
|
|
|
|
supervisor->deref();
|
|
|
|
}
|
2011-09-06 20:16:39 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
/* FIXME: tighten this up, there are some more
|
|
|
|
assertions that hold at task-lifecycle events. */
|
2012-02-03 14:47:01 -06:00
|
|
|
I(thread, ref_count == 0); // ||
|
2011-07-23 16:01:43 -05:00
|
|
|
// (ref_count == 1 && this == sched->root_task));
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2012-02-07 01:38:22 -06:00
|
|
|
thread->release_task(this);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
struct spawn_args {
|
|
|
|
rust_task *task;
|
2011-12-30 22:46:08 -06:00
|
|
|
spawn_fn f;
|
2012-02-01 20:52:08 -06:00
|
|
|
rust_opaque_box *envptr;
|
2012-01-04 22:11:39 -06:00
|
|
|
void *argptr;
|
2011-08-13 17:20:11 -05:00
|
|
|
};
|
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
struct cleanup_args {
|
|
|
|
spawn_args *spargs;
|
2012-01-09 15:47:37 -06:00
|
|
|
bool threw_exception;
|
2011-12-20 12:29:40 -06:00
|
|
|
};
|
2011-08-10 20:48:57 -05:00
|
|
|
|
2011-12-20 12:29:40 -06:00
|
|
|
void
|
|
|
|
cleanup_task(cleanup_args *args) {
|
|
|
|
spawn_args *a = args->spargs;
|
2012-01-09 15:47:37 -06:00
|
|
|
bool threw_exception = args->threw_exception;
|
2011-12-20 12:29:40 -06:00
|
|
|
rust_task *task = a->task;
|
2011-09-06 20:31:41 -05:00
|
|
|
|
2012-02-24 13:03:23 -06:00
|
|
|
cc::do_final_cc(task);
|
2011-09-26 17:06:26 -05:00
|
|
|
|
2011-11-11 16:19:15 -06:00
|
|
|
task->die();
|
|
|
|
|
2012-03-02 22:55:40 -06:00
|
|
|
{
|
|
|
|
scoped_lock with(task->kill_lock);
|
|
|
|
if (task->killed && !threw_exception) {
|
|
|
|
LOG(task, task, "Task killed during termination");
|
|
|
|
threw_exception = true;
|
|
|
|
}
|
2011-11-11 16:19:15 -06:00
|
|
|
}
|
|
|
|
|
2012-01-09 15:47:37 -06:00
|
|
|
task->notify(!threw_exception);
|
2011-11-13 18:36:47 -06:00
|
|
|
|
2012-01-09 15:47:37 -06:00
|
|
|
if (threw_exception) {
|
2011-09-06 20:31:41 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
task->conclude_failure();
|
|
|
|
#else
|
2012-02-03 14:47:01 -06:00
|
|
|
A(task->thread, false, "Shouldn't happen");
|
2011-09-06 20:31:41 -05:00
|
|
|
#endif
|
|
|
|
}
|
2011-12-20 12:29:40 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// This runs on the Rust stack
|
|
|
|
void task_start_wrapper(spawn_args *a)
|
|
|
|
{
|
|
|
|
rust_task *task = a->task;
|
|
|
|
|
2012-01-09 15:47:37 -06:00
|
|
|
bool threw_exception = false;
|
2011-12-20 12:29:40 -06:00
|
|
|
try {
|
2011-12-30 22:46:08 -06:00
|
|
|
// The first argument is the return pointer; as the task fn
|
|
|
|
// must have void return type, we can safely pass 0.
|
2012-01-04 22:11:39 -06:00
|
|
|
a->f(0, a->envptr, a->argptr);
|
2011-12-20 12:29:40 -06:00
|
|
|
} catch (rust_task *ex) {
|
2012-02-03 14:47:01 -06:00
|
|
|
A(task->thread, ex == task,
|
2011-12-20 12:29:40 -06:00
|
|
|
"Expected this task to be thrown for unwinding");
|
2012-01-09 15:47:37 -06:00
|
|
|
threw_exception = true;
|
2012-02-09 03:13:32 -06:00
|
|
|
|
|
|
|
if (task->c_stack) {
|
|
|
|
task->return_c_stack();
|
|
|
|
}
|
2011-12-20 12:29:40 -06:00
|
|
|
}
|
|
|
|
|
2012-02-09 03:13:32 -06:00
|
|
|
// We should have returned any C stack by now
|
|
|
|
I(task->thread, task->c_stack == NULL);
|
|
|
|
|
2012-02-01 20:52:08 -06:00
|
|
|
rust_opaque_box* env = a->envptr;
|
2012-01-06 14:06:35 -06:00
|
|
|
if(env) {
|
2012-02-01 20:52:08 -06:00
|
|
|
// free the environment (which should be a unique closure).
|
2012-01-06 14:06:35 -06:00
|
|
|
const type_desc *td = env->td;
|
2012-02-01 20:52:08 -06:00
|
|
|
td->drop_glue(NULL, NULL, td->first_param, box_body(env));
|
|
|
|
upcall_free_shared_type_desc(env->td);
|
2012-01-06 14:06:35 -06:00
|
|
|
upcall_shared_free(env);
|
2012-01-04 22:11:39 -06:00
|
|
|
}
|
2011-12-20 12:29:40 -06:00
|
|
|
|
|
|
|
// The cleanup work needs lots of stack
|
2012-01-09 15:47:37 -06:00
|
|
|
cleanup_args ca = {a, threw_exception};
|
2012-02-08 23:42:04 -06:00
|
|
|
task->call_on_c_stack(&ca, (void*)cleanup_task);
|
2011-12-20 12:29:40 -06:00
|
|
|
|
2011-11-18 15:33:08 -06:00
|
|
|
task->ctx.next->swap(task->ctx);
|
2011-09-06 16:03:20 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2011-12-30 22:46:08 -06:00
|
|
|
rust_task::start(spawn_fn spawnee_fn,
|
2012-02-01 20:52:08 -06:00
|
|
|
rust_opaque_box *envptr,
|
2012-01-04 22:11:39 -06:00
|
|
|
void *argptr)
|
2011-05-03 09:19:28 -05:00
|
|
|
{
|
2011-09-06 16:03:20 -05:00
|
|
|
LOG(this, task, "starting task from fn 0x%" PRIxPTR
|
2012-01-04 22:11:39 -06:00
|
|
|
" with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
|
|
|
|
spawnee_fn, envptr, argptr);
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2012-02-03 14:47:01 -06:00
|
|
|
I(thread, stk->data != NULL);
|
2011-07-13 17:44:09 -05:00
|
|
|
|
2012-02-08 19:55:36 -06:00
|
|
|
char *sp = (char *)stk->end;
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
sp -= sizeof(spawn_args);
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
spawn_args *a = (spawn_args *)sp;
|
2011-05-03 09:19:28 -05:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
a->task = this;
|
2012-01-04 22:11:39 -06:00
|
|
|
a->envptr = envptr;
|
|
|
|
a->argptr = argptr;
|
2011-12-30 22:46:08 -06:00
|
|
|
a->f = spawnee_fn;
|
2011-06-27 12:08:57 -05:00
|
|
|
|
2011-09-06 16:03:20 -05:00
|
|
|
ctx.call((void *)task_start_wrapper, a, sp);
|
2010-06-23 23:03:09 -05:00
|
|
|
|
2011-08-10 20:48:57 -05:00
|
|
|
this->start();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rust_task::start()
|
|
|
|
{
|
2012-03-03 01:40:27 -06:00
|
|
|
transition(&thread->newborn_tasks, &thread->running_tasks, NULL, "none");
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-02-14 02:43:45 -06:00
|
|
|
bool
|
|
|
|
rust_task::must_fail_from_being_killed() {
|
2012-03-02 22:55:40 -06:00
|
|
|
scoped_lock with(kill_lock);
|
2012-02-14 02:43:45 -06:00
|
|
|
return killed && !reentered_rust_stack;
|
|
|
|
}
|
|
|
|
|
2011-11-17 17:26:12 -06:00
|
|
|
// Only run this on the rust stack
|
2010-08-11 23:23:34 -05:00
|
|
|
void
|
2012-02-02 17:48:08 -06:00
|
|
|
rust_task::yield(bool *killed) {
|
2012-02-14 02:43:45 -06:00
|
|
|
if (must_fail_from_being_killed()) {
|
2011-11-18 17:36:48 -06:00
|
|
|
*killed = true;
|
2011-09-14 16:20:13 -05:00
|
|
|
}
|
2011-11-17 19:33:54 -06:00
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
// Return to the scheduler.
|
2011-09-06 16:03:20 -05:00
|
|
|
ctx.next->swap(ctx);
|
2011-09-14 16:20:13 -05:00
|
|
|
|
2012-02-14 02:43:45 -06:00
|
|
|
if (must_fail_from_being_killed()) {
|
2011-11-18 17:36:48 -06:00
|
|
|
*killed = true;
|
2011-09-14 16:20:13 -05:00
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::kill() {
|
2010-08-09 10:15:34 -05:00
|
|
|
if (dead()) {
|
|
|
|
// Task is already dead, can't kill what's already dead.
|
2011-09-14 17:26:59 -05:00
|
|
|
fail_parent();
|
2010-08-09 10:15:34 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
// Note the distinction here: kill() is when you're in an upcall
|
|
|
|
// from task A and want to force-fail task B, you do B->kill().
|
2011-07-13 15:43:35 -05:00
|
|
|
// If you want to fail yourself you do self->fail().
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
|
2011-09-14 16:20:13 -05:00
|
|
|
// When the task next goes to yield or resume it will fail
|
2012-03-02 22:55:40 -06:00
|
|
|
{
|
|
|
|
scoped_lock with(kill_lock);
|
|
|
|
killed = true;
|
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
// Unblock the task so it can unwind.
|
|
|
|
unblock();
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
|
2011-05-24 16:28:37 -05:00
|
|
|
// run_on_resume(rust_unwind_glue);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2012-01-09 15:47:37 -06:00
|
|
|
extern "C" CDECL
|
|
|
|
bool rust_task_is_unwinding(rust_task *rt) {
|
|
|
|
return rt->unwinding;
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2011-07-13 15:43:35 -05:00
|
|
|
rust_task::fail() {
|
2010-06-23 23:03:09 -05:00
|
|
|
// See note in ::kill() regarding who should call this.
|
2012-02-03 14:47:01 -06:00
|
|
|
DLOG(thread, task, "task %s @0x%" PRIxPTR " failing", name, this);
|
2010-10-11 18:40:18 -05:00
|
|
|
backtrace();
|
2012-01-09 15:47:37 -06:00
|
|
|
unwinding = true;
|
2011-09-06 20:31:41 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
throw this;
|
|
|
|
#else
|
2011-11-11 16:19:15 -06:00
|
|
|
die();
|
2011-09-06 20:31:41 -05:00
|
|
|
conclude_failure();
|
2012-01-06 18:18:33 -06:00
|
|
|
// FIXME: Need unwinding on windows. This will end up aborting
|
2012-02-03 14:47:01 -06:00
|
|
|
thread->fail();
|
2011-09-06 20:31:41 -05:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::conclude_failure() {
|
2011-09-14 17:26:59 -05:00
|
|
|
fail_parent();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::fail_parent() {
|
2012-03-02 18:33:24 -06:00
|
|
|
scoped_lock with(supervisor_lock);
|
2010-07-05 16:43:40 -05:00
|
|
|
if (supervisor) {
|
2012-02-03 14:47:01 -06:00
|
|
|
DLOG(thread, task,
|
2011-04-19 05:21:57 -05:00
|
|
|
"task %s @0x%" PRIxPTR
|
|
|
|
" propagating failure to supervisor %s @0x%" PRIxPTR,
|
|
|
|
name, this, supervisor->name, supervisor);
|
2010-07-05 16:43:40 -05:00
|
|
|
supervisor->kill();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
2011-05-04 10:04:53 -05:00
|
|
|
// FIXME: implement unwinding again.
|
2011-07-23 21:03:02 -05:00
|
|
|
if (NULL == supervisor && propagate_failure)
|
2012-02-03 14:47:01 -06:00
|
|
|
thread->fail();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-07-05 16:43:40 -05:00
|
|
|
void
|
|
|
|
rust_task::unsupervise()
|
|
|
|
{
|
2012-03-02 18:33:24 -06:00
|
|
|
scoped_lock with(supervisor_lock);
|
2012-02-01 18:46:11 -06:00
|
|
|
if (supervisor) {
|
2012-02-03 14:47:01 -06:00
|
|
|
DLOG(thread, task,
|
2010-08-08 21:24:35 -05:00
|
|
|
"task %s @0x%" PRIxPTR
|
|
|
|
" disconnecting from supervisor %s @0x%" PRIxPTR,
|
|
|
|
name, this, supervisor->name, supervisor);
|
2011-09-06 20:16:39 -05:00
|
|
|
supervisor->deref();
|
|
|
|
}
|
2010-07-05 16:43:40 -05:00
|
|
|
supervisor = NULL;
|
2011-07-23 21:03:02 -05:00
|
|
|
propagate_failure = false;
|
2010-07-05 16:43:40 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
frame_glue_fns*
|
|
|
|
rust_task::get_frame_glue_fns(uintptr_t fp) {
|
|
|
|
fp -= sizeof(uintptr_t);
|
|
|
|
return *((frame_glue_fns**) fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::running()
|
|
|
|
{
|
2012-03-02 23:22:42 -06:00
|
|
|
scoped_lock with(state_lock);
|
2012-02-03 14:47:01 -06:00
|
|
|
return state == &thread->running_tasks;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::blocked()
|
|
|
|
{
|
2012-03-02 23:22:42 -06:00
|
|
|
scoped_lock with(state_lock);
|
2012-02-03 14:47:01 -06:00
|
|
|
return state == &thread->blocked_tasks;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::blocked_on(rust_cond *on)
|
|
|
|
{
|
2012-03-03 01:40:27 -06:00
|
|
|
scoped_lock with(state_lock);
|
|
|
|
return cond == on;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
rust_task::dead()
|
|
|
|
{
|
2012-03-02 23:22:42 -06:00
|
|
|
scoped_lock with(state_lock);
|
2012-02-03 14:47:01 -06:00
|
|
|
return state == &thread->dead_tasks;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
2010-06-28 20:53:16 -05:00
|
|
|
void *
|
2011-07-18 14:02:26 -05:00
|
|
|
rust_task::malloc(size_t sz, const char *tag, type_desc *td)
|
2010-06-28 20:53:16 -05:00
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
return local_region.malloc(sz, tag);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2012-02-03 02:34:42 -06:00
|
|
|
rust_task::realloc(void *data, size_t sz)
|
2010-06-28 20:53:16 -05:00
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
return local_region.realloc(data, sz);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-02-03 02:34:42 -06:00
|
|
|
rust_task::free(void *p)
|
2010-06-28 20:53:16 -05:00
|
|
|
{
|
2011-08-19 21:17:05 -05:00
|
|
|
local_region.free(p);
|
2010-06-28 20:53:16 -05:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
void
|
2012-03-03 01:40:27 -06:00
|
|
|
rust_task::transition(rust_task_list *src, rust_task_list *dst,
|
|
|
|
rust_cond *cond, const char* cond_name) {
|
2012-03-03 04:36:53 -06:00
|
|
|
thread->transition(this, src, dst, cond, cond_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::set_state(rust_task_list *state,
|
|
|
|
rust_cond *cond, const char* cond_name) {
|
|
|
|
scoped_lock with(state_lock);
|
|
|
|
this->state = state;
|
|
|
|
this->cond = cond;
|
|
|
|
this->cond_name = cond_name;
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-07 20:39:07 -05:00
|
|
|
rust_task::block(rust_cond *on, const char* name) {
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
|
2010-07-28 16:45:44 -05:00
|
|
|
(uintptr_t) on, (uintptr_t) cond);
|
2012-02-03 14:47:01 -06:00
|
|
|
A(thread, cond == NULL, "Cannot block an already blocked task.");
|
|
|
|
A(thread, on != NULL, "Cannot block on a NULL object.");
|
2010-07-28 16:45:44 -05:00
|
|
|
|
2012-03-03 01:40:27 -06:00
|
|
|
transition(&thread->running_tasks, &thread->blocked_tasks, on, name);
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::wakeup(rust_cond *from) {
|
2012-02-03 14:47:01 -06:00
|
|
|
A(thread, cond != NULL, "Cannot wake up unblocked task.");
|
2011-04-19 05:21:57 -05:00
|
|
|
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
|
2010-07-28 16:45:44 -05:00
|
|
|
(uintptr_t) cond, (uintptr_t) from);
|
2012-02-03 14:47:01 -06:00
|
|
|
A(thread, cond == from, "Cannot wake up blocked task on wrong condition.");
|
2010-07-28 16:45:44 -05:00
|
|
|
|
2012-03-03 01:40:27 -06:00
|
|
|
transition(&thread->blocked_tasks, &thread->running_tasks, NULL, "none");
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::die() {
|
2012-03-03 01:40:27 -06:00
|
|
|
transition(&thread->running_tasks, &thread->dead_tasks, NULL, "none");
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task::unblock() {
|
2011-11-17 19:33:54 -06:00
|
|
|
if (blocked()) {
|
|
|
|
// FIXME: What if another thread unblocks the task between when
|
|
|
|
// we checked and here?
|
2010-06-23 23:03:09 -05:00
|
|
|
wakeup(cond);
|
2011-11-17 19:33:54 -06:00
|
|
|
}
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
rust_crate_cache *
|
2011-05-26 20:20:48 -05:00
|
|
|
rust_task::get_crate_cache()
|
2010-06-23 23:03:09 -05:00
|
|
|
{
|
|
|
|
if (!cache) {
|
2012-02-03 14:47:01 -06:00
|
|
|
DLOG(thread, task, "fetching cache for current crate");
|
|
|
|
cache = thread->get_cache();
|
2010-06-23 23:03:09 -05:00
|
|
|
}
|
|
|
|
return cache;
|
|
|
|
}
|
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
void
|
|
|
|
rust_task::backtrace() {
|
2011-04-19 05:21:57 -05:00
|
|
|
if (!log_rt_backtrace) return;
|
2010-10-11 18:40:18 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
void *call_stack[256];
|
|
|
|
int nframes = ::backtrace(call_stack, 256);
|
|
|
|
backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
void *
|
2011-07-18 14:02:26 -05:00
|
|
|
rust_task::calloc(size_t size, const char *tag) {
|
|
|
|
return local_region.calloc(size, tag);
|
2011-06-27 21:15:03 -05:00
|
|
|
}
|
|
|
|
|
2011-08-08 20:09:42 -05:00
|
|
|
rust_port_id rust_task::register_port(rust_port *port) {
|
2012-03-03 04:44:56 -06:00
|
|
|
I(thread, !port_lock.lock_held_by_current_thread());
|
|
|
|
scoped_lock with(port_lock);
|
2011-08-08 20:09:42 -05:00
|
|
|
|
|
|
|
rust_port_id id = next_port_id++;
|
2012-02-03 19:31:00 -06:00
|
|
|
A(thread, id != INTPTR_MAX, "Hit the maximum port id");
|
2011-08-08 20:09:42 -05:00
|
|
|
port_table.put(id, port);
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rust_task::release_port(rust_port_id id) {
|
2012-03-03 04:46:18 -06:00
|
|
|
scoped_lock with(port_lock);
|
2011-08-08 20:09:42 -05:00
|
|
|
port_table.remove(id);
|
|
|
|
}
|
|
|
|
|
|
|
|
rust_port *rust_task::get_port_by_id(rust_port_id id) {
|
2012-03-03 04:44:56 -06:00
|
|
|
I(thread, !port_lock.lock_held_by_current_thread());
|
|
|
|
scoped_lock with(port_lock);
|
2011-08-08 20:09:42 -05:00
|
|
|
rust_port *port = NULL;
|
|
|
|
port_table.get(id, &port);
|
2011-11-11 13:46:07 -06:00
|
|
|
if (port) {
|
|
|
|
port->ref();
|
|
|
|
}
|
2011-08-08 20:09:42 -05:00
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2011-11-13 18:36:47 -06:00
|
|
|
void
|
|
|
|
rust_task::notify(bool success) {
|
|
|
|
// FIXME (1078) Do this in rust code
|
2012-02-08 19:46:12 -06:00
|
|
|
if(notify_enabled) {
|
|
|
|
rust_task *target_task = kernel->get_task_by_id(notify_chan.task);
|
2011-11-13 18:36:47 -06:00
|
|
|
if (target_task) {
|
|
|
|
rust_port *target_port =
|
2012-02-08 19:46:12 -06:00
|
|
|
target_task->get_port_by_id(notify_chan.port);
|
2011-11-13 18:36:47 -06:00
|
|
|
if(target_port) {
|
|
|
|
task_notification msg;
|
2012-02-08 19:46:12 -06:00
|
|
|
msg.id = id;
|
2011-11-13 18:36:47 -06:00
|
|
|
msg.result = !success ? tr_failure : tr_success;
|
|
|
|
|
|
|
|
target_port->send(&msg);
|
2012-03-03 04:44:56 -06:00
|
|
|
scoped_lock with(target_task->port_lock);
|
2011-11-13 18:36:47 -06:00
|
|
|
target_port->deref();
|
|
|
|
}
|
|
|
|
target_task->deref();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-08 20:29:15 -06:00
|
|
|
size_t
|
|
|
|
rust_task::get_next_stack_size(size_t min, size_t current, size_t requested) {
|
|
|
|
LOG(this, mem, "calculating new stack size for 0x%" PRIxPTR, this);
|
|
|
|
LOG(this, mem,
|
|
|
|
"min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
|
|
|
|
min, current, requested);
|
|
|
|
|
|
|
|
// Allocate at least enough to accomodate the next frame
|
|
|
|
size_t sz = std::max(min, requested);
|
|
|
|
|
|
|
|
// And double the stack size each allocation
|
|
|
|
const size_t max = 1024 * 1024;
|
|
|
|
size_t next = std::min(max, current * 2);
|
|
|
|
|
|
|
|
sz = std::max(sz, next);
|
|
|
|
|
|
|
|
LOG(this, mem, "next stack size: %" PRIdPTR, sz);
|
|
|
|
I(thread, requested <= sz);
|
|
|
|
return sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The amount of stack in a segment available to Rust code
|
|
|
|
static size_t
|
|
|
|
user_stack_size(stk_seg *stk) {
|
|
|
|
return (size_t)(stk->end
|
|
|
|
- (uintptr_t)&stk->data[0]
|
|
|
|
- RED_ZONE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::free_stack(stk_seg *stk) {
|
|
|
|
LOGPTR(thread, "freeing stk segment", (uintptr_t)stk);
|
|
|
|
total_stack_sz -= user_stack_size(stk);
|
2012-02-27 17:42:22 -06:00
|
|
|
destroy_stack(&local_region, stk);
|
2012-02-08 20:29:15 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::new_stack(size_t requested_sz) {
|
|
|
|
LOG(this, mem, "creating new stack for task %" PRIxPTR, this);
|
|
|
|
if (stk) {
|
|
|
|
::check_stack_canary(stk);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
|
|
|
|
size_t min_sz = thread->min_stack_size;
|
|
|
|
|
|
|
|
// Try to reuse an existing stack segment
|
|
|
|
if (stk != NULL && stk->prev != NULL) {
|
|
|
|
size_t prev_sz = user_stack_size(stk->prev);
|
|
|
|
if (min_sz <= prev_sz && requested_sz <= prev_sz) {
|
|
|
|
LOG(this, mem, "reusing existing stack");
|
|
|
|
stk = stk->prev;
|
|
|
|
A(thread, stk->prev == NULL, "Bogus stack ptr");
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
LOG(this, mem, "existing stack is not big enough");
|
|
|
|
free_stack(stk->prev);
|
|
|
|
stk->prev = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The size of the current stack segment, excluding red zone
|
|
|
|
size_t current_sz = 0;
|
|
|
|
if (stk != NULL) {
|
|
|
|
current_sz = user_stack_size(stk);
|
|
|
|
}
|
|
|
|
// The calculated size of the new stack, excluding red zone
|
|
|
|
size_t rust_stk_sz = get_next_stack_size(min_sz,
|
|
|
|
current_sz, requested_sz);
|
|
|
|
|
|
|
|
if (total_stack_sz + rust_stk_sz > thread->env->max_stack_size) {
|
|
|
|
LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack", this);
|
|
|
|
fail();
|
|
|
|
}
|
|
|
|
|
2012-02-08 22:47:52 -06:00
|
|
|
size_t sz = rust_stk_sz + RED_ZONE_SIZE;
|
2012-02-27 17:42:22 -06:00
|
|
|
stk_seg *new_stk = create_stack(&local_region, sz);
|
2012-02-08 20:29:15 -06:00
|
|
|
LOGPTR(thread, "new stk", (uintptr_t)new_stk);
|
|
|
|
new_stk->prev = NULL;
|
|
|
|
new_stk->next = stk;
|
|
|
|
LOGPTR(thread, "stk end", new_stk->end);
|
|
|
|
|
|
|
|
stk = new_stk;
|
|
|
|
total_stack_sz += user_stack_size(new_stk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_task::del_stack() {
|
|
|
|
stk_seg *old_stk = stk;
|
|
|
|
::check_stack_canary(old_stk);
|
|
|
|
|
|
|
|
stk = old_stk->next;
|
|
|
|
|
|
|
|
bool delete_stack = false;
|
|
|
|
if (stk != NULL) {
|
|
|
|
// Don't actually delete this stack. Save it to reuse later,
|
|
|
|
// preventing the pathological case where we repeatedly reallocate
|
|
|
|
// the stack for the next frame.
|
|
|
|
stk->prev = old_stk;
|
|
|
|
} else {
|
|
|
|
// This is the last stack, delete it.
|
|
|
|
delete_stack = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the previous previous stack
|
|
|
|
if (old_stk->prev != NULL) {
|
|
|
|
free_stack(old_stk->prev);
|
|
|
|
old_stk->prev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (delete_stack) {
|
|
|
|
free_stack(old_stk);
|
|
|
|
A(thread, total_stack_sz == 0, "Stack size should be 0");
|
|
|
|
}
|
|
|
|
}
|
2011-11-30 19:54:11 -06:00
|
|
|
|
|
|
|
void *
|
2012-02-08 20:04:14 -06:00
|
|
|
rust_task::next_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
|
2012-03-03 19:15:51 -06:00
|
|
|
stk_seg *maybe_next_stack = NULL;
|
|
|
|
if (stk != NULL) {
|
|
|
|
maybe_next_stack = stk->prev;
|
|
|
|
}
|
|
|
|
|
2012-02-08 20:29:15 -06:00
|
|
|
new_stack(stk_sz + args_sz);
|
|
|
|
A(thread, stk->end - (uintptr_t)stk->data >= stk_sz + args_sz,
|
2011-12-20 00:47:28 -06:00
|
|
|
"Did not receive enough stack");
|
2012-02-08 20:29:15 -06:00
|
|
|
uint8_t *new_sp = (uint8_t*)stk->end;
|
2011-12-15 16:46:12 -06:00
|
|
|
// Push the function arguments to the new stack
|
|
|
|
new_sp = align_down(new_sp - args_sz);
|
2012-03-03 19:15:51 -06:00
|
|
|
|
|
|
|
// When reusing a stack segment we need to tell valgrind that this area of
|
|
|
|
// memory is accessible before writing to it, because the act of popping
|
|
|
|
// the stack previously made all of the stack inaccessible.
|
|
|
|
if (maybe_next_stack == stk) {
|
|
|
|
// I don't know exactly where the region ends that valgrind needs us
|
|
|
|
// to mark accessible. On x86_64 these extra bytes aren't needed, but
|
|
|
|
// on i386 we get errors without.
|
|
|
|
int fudge_bytes = 16;
|
|
|
|
reuse_valgrind_stack(stk, new_sp - fudge_bytes);
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:54:11 -06:00
|
|
|
memcpy(new_sp, args_addr, args_sz);
|
2012-02-08 20:04:14 -06:00
|
|
|
A(thread, rust_task_thread::get_task() == this,
|
|
|
|
"Recording the stack limit for the wrong thread");
|
2011-12-01 17:26:42 -06:00
|
|
|
record_stack_limit();
|
2011-11-30 19:54:11 -06:00
|
|
|
return new_sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-02-08 20:04:14 -06:00
|
|
|
rust_task::prev_stack() {
|
2012-02-08 20:29:15 -06:00
|
|
|
del_stack();
|
2012-02-08 20:04:14 -06:00
|
|
|
A(thread, rust_task_thread::get_task() == this,
|
|
|
|
"Recording the stack limit for the wrong thread");
|
2011-12-01 17:26:42 -06:00
|
|
|
record_stack_limit();
|
2011-11-30 19:54:11 -06:00
|
|
|
}
|
|
|
|
|
2011-12-01 17:26:42 -06:00
|
|
|
void
|
|
|
|
rust_task::record_stack_limit() {
|
2012-02-09 13:51:34 -06:00
|
|
|
I(thread, stk);
|
2011-12-04 22:40:34 -06:00
|
|
|
// The function prolog compares the amount of stack needed to the end of
|
|
|
|
// the stack. As an optimization, when the frame size is less than 256
|
|
|
|
// bytes, it will simply compare %esp to to the stack limit instead of
|
|
|
|
// subtracting the frame size. As a result we need our stack limit to
|
|
|
|
// account for those 256 bytes.
|
|
|
|
const unsigned LIMIT_OFFSET = 256;
|
2012-02-03 14:47:01 -06:00
|
|
|
A(thread,
|
2011-12-06 19:03:54 -06:00
|
|
|
(uintptr_t)stk->end - RED_ZONE_SIZE
|
2011-12-04 22:40:34 -06:00
|
|
|
- (uintptr_t)stk->data >= LIMIT_OFFSET,
|
|
|
|
"Stack size must be greater than LIMIT_OFFSET");
|
|
|
|
record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
|
2011-12-01 17:26:42 -06:00
|
|
|
}
|
2011-12-06 18:26:47 -06:00
|
|
|
|
2012-02-10 00:28:52 -06:00
|
|
|
static bool
|
|
|
|
sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
|
|
|
|
// Not positive these bounds for sp are correct. I think that the first
|
|
|
|
// possible value for esp on a new stack is stk->end, which points to the
|
|
|
|
// address before the first value to be pushed onto a new stack. The last
|
|
|
|
// possible address we can push data to is stk->data. Regardless, there's
|
|
|
|
// so much slop at either end that we should never hit one of these
|
|
|
|
// boundaries.
|
|
|
|
return (uintptr_t)stk->data <= sp && sp <= stk->end;
|
|
|
|
}
|
|
|
|
|
2012-02-23 01:50:27 -06:00
|
|
|
struct reset_args {
|
|
|
|
rust_task *task;
|
|
|
|
uintptr_t sp;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
reset_stack_limit_on_c_stack(reset_args *args) {
|
|
|
|
rust_task *task = args->task;
|
|
|
|
uintptr_t sp = args->sp;
|
|
|
|
while (!sp_in_stk_seg(sp, task->stk)) {
|
|
|
|
task->del_stack();
|
|
|
|
A(task->thread, task->stk != NULL,
|
|
|
|
"Failed to find the current stack");
|
|
|
|
}
|
|
|
|
task->record_stack_limit();
|
|
|
|
}
|
|
|
|
|
2011-12-06 18:26:47 -06:00
|
|
|
/*
|
|
|
|
Called by landing pads during unwinding to figure out which
|
|
|
|
stack segment we are currently running on, delete the others,
|
|
|
|
and record the stack limit (which was not restored when unwinding
|
|
|
|
through __morestack).
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rust_task::reset_stack_limit() {
|
2012-02-23 01:50:27 -06:00
|
|
|
I(thread, on_rust_stack());
|
2011-12-06 18:26:47 -06:00
|
|
|
uintptr_t sp = get_sp();
|
2012-02-23 01:50:27 -06:00
|
|
|
// Have to do the rest on the C stack because it involves
|
|
|
|
// freeing stack segments, logging, etc.
|
|
|
|
reset_args ra = {this, sp};
|
|
|
|
call_on_c_stack(&ra, (void*)reset_stack_limit_on_c_stack);
|
2011-12-06 18:26:47 -06:00
|
|
|
}
|
|
|
|
|
2011-12-20 13:20:54 -06:00
|
|
|
void
|
|
|
|
rust_task::check_stack_canary() {
|
|
|
|
::check_stack_canary(stk);
|
|
|
|
}
|
|
|
|
|
2012-03-02 17:14:52 -06:00
|
|
|
void
|
|
|
|
rust_task::delete_all_stacks() {
|
|
|
|
I(thread, !on_rust_stack());
|
|
|
|
// Delete all the stacks. There may be more than one if the task failed
|
|
|
|
// and no landing pads stopped to clean up.
|
|
|
|
while (stk != NULL) {
|
|
|
|
del_stack();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-08 18:22:38 -06:00
|
|
|
void
|
|
|
|
rust_task::config_notify(chan_handle chan) {
|
2012-02-08 19:46:12 -06:00
|
|
|
notify_enabled = true;
|
|
|
|
notify_chan = chan;
|
2012-02-08 18:22:38 -06:00
|
|
|
}
|
|
|
|
|
2012-02-10 00:28:52 -06:00
|
|
|
/*
|
|
|
|
Returns true if we're currently running on the Rust stack
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
rust_task::on_rust_stack() {
|
2012-02-23 00:29:38 -06:00
|
|
|
if (stk == NULL) {
|
|
|
|
// This only happens during construction
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-02-10 00:28:52 -06:00
|
|
|
uintptr_t sp = get_sp();
|
|
|
|
bool in_first_segment = sp_in_stk_seg(sp, stk);
|
|
|
|
if (in_first_segment) {
|
|
|
|
return true;
|
|
|
|
} else if (stk->next != NULL) {
|
|
|
|
// This happens only when calling the upcall to delete
|
|
|
|
// a stack segment
|
|
|
|
bool in_second_segment = sp_in_stk_seg(sp, stk->next);
|
|
|
|
return in_second_segment;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-23 23:03:09 -05:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// End:
|
|
|
|
//
|