2010-07-19 16:05:18 -05:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RUST_TASK_H
|
|
|
|
#define RUST_TASK_H
|
2010-07-28 18:24:50 -05:00
|
|
|
|
2011-09-20 17:35:14 -05:00
|
|
|
#include <map>
|
|
|
|
|
2010-07-28 18:24:50 -05:00
|
|
|
#include "util/array_list.h"
|
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
#include "context.h"
|
2011-09-23 13:42:20 -05:00
|
|
|
#include "rust_debug.h"
|
|
|
|
#include "rust_internal.h"
|
|
|
|
#include "rust_kernel.h"
|
2012-02-01 20:52:08 -06:00
|
|
|
#include "boxed_region.h"
|
2012-02-08 17:28:25 -06:00
|
|
|
#include "rust_stack.h"
|
2012-02-15 00:23:16 -06:00
|
|
|
#include "rust_port_selector.h"
|
2011-05-31 19:44:54 -05:00
|
|
|
|
2012-03-21 16:00:37 -05:00
|
|
|
// The amount of extra space at the end of each stack segment, available
|
|
|
|
// to the rt, compiler and dynamic linker for running small functions
|
|
|
|
// FIXME: We want this to be 128 but need to slim the red zone calls down
|
|
|
|
#define RZ_LINUX_32 (1024*2)
|
|
|
|
#define RZ_LINUX_64 (1024*2)
|
|
|
|
#define RZ_MAC_32 (1024*20)
|
|
|
|
#define RZ_MAC_64 (1024*20)
|
|
|
|
#define RZ_WIN_32 (1024*20)
|
|
|
|
#define RZ_BSD_32 (1024*20)
|
|
|
|
#define RZ_BSD_64 (1024*20)
|
|
|
|
|
|
|
|
#ifdef __linux__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_LINUX_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_LINUX_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef __APPLE__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_MAC_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_MAC_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef __WIN32__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_WIN_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_WIN_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef __FreeBSD__
|
|
|
|
#ifdef __i386__
|
|
|
|
#define RED_ZONE_SIZE RZ_BSD_32
|
|
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define RED_ZONE_SIZE RZ_BSD_64
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2011-09-19 12:34:27 -05:00
|
|
|
struct rust_box;
|
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
struct frame_glue_fns {
|
|
|
|
uintptr_t mark_glue_off;
|
|
|
|
uintptr_t drop_glue_off;
|
|
|
|
uintptr_t reloc_glue_off;
|
|
|
|
};
|
|
|
|
|
2011-08-16 18:39:47 -05:00
|
|
|
// std::lib::task::task_result
|
2011-11-11 17:27:18 -06:00
|
|
|
typedef unsigned long task_result;
|
|
|
|
#define tr_success 0
|
|
|
|
#define tr_failure 1
|
2011-08-16 18:39:47 -05:00
|
|
|
|
2012-02-09 03:13:32 -06:00
|
|
|
struct spawn_args;
|
2012-02-14 02:43:45 -06:00
|
|
|
struct cleanup_args;
|
2012-02-23 01:50:27 -06:00
|
|
|
struct reset_args;
|
2012-03-21 15:17:24 -05:00
|
|
|
struct new_stack_args;
|
2012-02-09 03:13:32 -06:00
|
|
|
|
2011-08-16 18:39:47 -05:00
|
|
|
// std::lib::task::task_notification
|
|
|
|
//
|
|
|
|
// since it's currently a unary tag, we only add the fields.
|
|
|
|
struct task_notification {
|
|
|
|
rust_task_id id;
|
|
|
|
task_result result; // task_result
|
|
|
|
};
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
struct
|
2011-07-27 16:51:25 -05:00
|
|
|
rust_task : public kernel_owned<rust_task>, rust_cond
|
2010-07-19 16:05:18 -05:00
|
|
|
{
|
2011-08-05 17:16:48 -05:00
|
|
|
RUST_ATOMIC_REFCOUNT();
|
2011-07-27 16:51:25 -05:00
|
|
|
|
2012-02-08 19:46:12 -06:00
|
|
|
rust_task_id id;
|
|
|
|
bool notify_enabled;
|
2012-03-15 01:08:47 -05:00
|
|
|
rust_port_id notify_port;
|
2012-02-08 19:46:12 -06:00
|
|
|
|
2011-09-06 16:03:20 -05:00
|
|
|
context ctx;
|
2010-07-19 16:05:18 -05:00
|
|
|
stk_seg *stk;
|
|
|
|
uintptr_t runtime_sp; // Runtime sp while task running.
|
2012-02-03 17:18:58 -06:00
|
|
|
rust_scheduler *sched;
|
2012-02-03 14:47:01 -06:00
|
|
|
rust_task_thread *thread;
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
// Fields known only to the runtime.
|
2011-06-24 18:50:06 -05:00
|
|
|
rust_kernel *kernel;
|
2010-08-08 21:24:35 -05:00
|
|
|
const char *const name;
|
2010-09-10 03:21:29 -05:00
|
|
|
int32_t list_index;
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
// Rendezvous pointer for receiving data when blocked on a port. If we're
|
|
|
|
// trying to read data and no data is available on any incoming channel,
|
|
|
|
// we block on the port, and yield control to the scheduler. Since, we
|
2010-07-28 14:36:59 -05:00
|
|
|
// were not able to read anything, we remember the location where the
|
2010-07-19 16:05:18 -05:00
|
|
|
// result should go in the rendezvous_ptr, and let the sender write to
|
|
|
|
// that location before waking us up.
|
|
|
|
uintptr_t* rendezvous_ptr;
|
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
memory_region local_region;
|
2012-02-01 20:52:08 -06:00
|
|
|
boxed_region boxed;
|
2011-06-27 21:15:03 -05:00
|
|
|
|
2012-01-09 15:47:37 -06:00
|
|
|
// Indicates that fail() has been called and we are cleaning up.
|
|
|
|
// We use this to suppress the "killed" flag during calls to yield.
|
|
|
|
bool unwinding;
|
|
|
|
|
2011-07-23 21:03:02 -05:00
|
|
|
bool propagate_failure;
|
2011-07-14 21:39:53 -05:00
|
|
|
|
2011-09-23 20:30:22 -05:00
|
|
|
uint32_t cc_counter;
|
2011-09-20 17:35:14 -05:00
|
|
|
|
2011-09-23 13:42:20 -05:00
|
|
|
debug::task_debug_info debug;
|
|
|
|
|
2012-01-11 14:37:09 -06:00
|
|
|
// The amount of stack we're using, excluding red zones
|
|
|
|
size_t total_stack_sz;
|
|
|
|
|
2012-02-07 01:38:22 -06:00
|
|
|
private:
|
2012-02-09 03:13:32 -06:00
|
|
|
|
2012-03-03 01:40:27 -06:00
|
|
|
// Protects state, cond, cond_name
|
2012-03-02 23:22:42 -06:00
|
|
|
lock_and_signal state_lock;
|
2012-03-17 20:12:15 -05:00
|
|
|
rust_task_state state;
|
2012-03-03 01:40:27 -06:00
|
|
|
rust_cond *cond;
|
|
|
|
const char *cond_name;
|
2012-03-02 23:22:42 -06:00
|
|
|
|
2012-03-02 22:55:40 -06:00
|
|
|
// Protects the killed flag
|
|
|
|
lock_and_signal kill_lock;
|
2012-02-14 02:43:45 -06:00
|
|
|
// Indicates that the task was killed and needs to unwind
|
|
|
|
bool killed;
|
|
|
|
// Indicates that we've called back into Rust from C
|
|
|
|
bool reentered_rust_stack;
|
|
|
|
|
2012-02-09 03:13:32 -06:00
|
|
|
// The stack used for running C code, borrowed from the scheduler thread
|
|
|
|
stk_seg *c_stack;
|
|
|
|
uintptr_t next_c_sp;
|
2012-02-09 13:51:34 -06:00
|
|
|
uintptr_t next_rust_sp;
|
2012-02-09 03:13:32 -06:00
|
|
|
|
2012-02-15 00:23:16 -06:00
|
|
|
rust_port_selector port_selector;
|
|
|
|
|
2012-03-02 18:33:24 -06:00
|
|
|
lock_and_signal supervisor_lock;
|
|
|
|
rust_task *supervisor; // Parent-link for failure propagation.
|
|
|
|
|
2012-02-07 01:38:22 -06:00
|
|
|
// Called when the atomic refcount reaches zero
|
|
|
|
void delete_this();
|
2012-02-08 20:29:15 -06:00
|
|
|
|
2012-03-21 15:17:24 -05:00
|
|
|
void new_stack_fast(size_t requested_sz);
|
|
|
|
void new_stack(size_t requested_sz);
|
2012-02-08 20:29:15 -06:00
|
|
|
void free_stack(stk_seg *stk);
|
|
|
|
size_t get_next_stack_size(size_t min, size_t current, size_t requested);
|
|
|
|
|
2012-02-09 03:13:32 -06:00
|
|
|
void return_c_stack();
|
|
|
|
|
2012-03-17 20:12:15 -05:00
|
|
|
void transition(rust_task_state src, rust_task_state dst,
|
2012-03-03 04:36:53 -06:00
|
|
|
rust_cond *cond, const char* cond_name);
|
|
|
|
|
2012-03-03 22:50:11 -06:00
|
|
|
bool must_fail_from_being_killed_unlocked();
|
|
|
|
|
2012-02-09 03:13:32 -06:00
|
|
|
friend void task_start_wrapper(spawn_args *a);
|
2012-02-14 02:43:45 -06:00
|
|
|
friend void cleanup_task(cleanup_args *a);
|
2012-02-23 01:50:27 -06:00
|
|
|
friend void reset_stack_limit_on_c_stack(reset_args *a);
|
2012-03-21 15:17:24 -05:00
|
|
|
friend void new_stack_slow(new_stack_args *a);
|
2012-02-09 03:13:32 -06:00
|
|
|
|
2012-02-07 01:38:22 -06:00
|
|
|
public:
|
|
|
|
|
2010-08-08 21:24:35 -05:00
|
|
|
// Only a pointer to 'name' is kept, so it must live as long as this task.
|
2012-02-03 14:47:01 -06:00
|
|
|
rust_task(rust_task_thread *thread,
|
2012-03-17 20:12:15 -05:00
|
|
|
rust_task_state state,
|
2010-08-08 21:24:35 -05:00
|
|
|
rust_task *spawner,
|
2012-01-29 23:20:36 -06:00
|
|
|
const char *name,
|
|
|
|
size_t init_stack_sz);
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2011-12-30 22:46:08 -06:00
|
|
|
void start(spawn_fn spawnee_fn,
|
2012-02-01 20:52:08 -06:00
|
|
|
rust_opaque_box *env,
|
2012-01-04 22:11:39 -06:00
|
|
|
void *args);
|
2011-08-10 20:48:57 -05:00
|
|
|
void start();
|
2010-07-19 16:05:18 -05:00
|
|
|
bool running();
|
|
|
|
bool blocked();
|
|
|
|
bool blocked_on(rust_cond *cond);
|
|
|
|
bool dead();
|
|
|
|
|
2011-07-18 14:02:26 -05:00
|
|
|
void *malloc(size_t sz, const char *tag, type_desc *td=0);
|
2012-02-03 02:34:42 -06:00
|
|
|
void *realloc(void *data, size_t sz);
|
|
|
|
void free(void *p);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2012-03-17 20:12:15 -05:00
|
|
|
void set_state(rust_task_state state,
|
2012-03-03 04:36:53 -06:00
|
|
|
rust_cond *cond, const char* cond_name);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2012-03-03 22:50:11 -06:00
|
|
|
bool block(rust_cond *on, const char* name);
|
2010-07-19 16:05:18 -05:00
|
|
|
void wakeup(rust_cond *from);
|
|
|
|
void die();
|
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
// Print a backtrace, if the "bt" logging option is on.
|
|
|
|
void backtrace();
|
|
|
|
|
2012-02-02 17:48:08 -06:00
|
|
|
// Yields control to the scheduler. Called from the Rust stack
|
|
|
|
void yield(bool *killed);
|
2010-08-11 23:23:34 -05:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
// Fail this task (assuming caller-on-stack is different task).
|
|
|
|
void kill();
|
|
|
|
|
2012-02-14 02:43:45 -06:00
|
|
|
// Indicates that we've been killed and now is an apropriate
|
|
|
|
// time to fail as a result
|
|
|
|
bool must_fail_from_being_killed();
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
// Fail self, assuming caller-on-stack is this task.
|
2011-07-13 15:43:35 -05:00
|
|
|
void fail();
|
2011-09-06 20:31:41 -05:00
|
|
|
void conclude_failure();
|
2011-09-14 17:26:59 -05:00
|
|
|
void fail_parent();
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
// Disconnect from our supervisor.
|
|
|
|
void unsupervise();
|
|
|
|
|
|
|
|
frame_glue_fns *get_frame_glue_fns(uintptr_t fp);
|
2011-06-20 19:19:50 -05:00
|
|
|
|
2011-07-18 14:02:26 -05:00
|
|
|
void *calloc(size_t size, const char *tag);
|
2011-06-29 20:47:47 -05:00
|
|
|
|
2011-08-15 13:34:12 -05:00
|
|
|
// Use this function sparingly. Depending on the ref count is generally
|
|
|
|
// not at all safe.
|
|
|
|
intptr_t get_ref_count() const { return ref_count; }
|
2011-08-15 18:54:02 -05:00
|
|
|
|
2011-11-13 18:36:47 -06:00
|
|
|
void notify(bool success);
|
2011-11-30 19:54:11 -06:00
|
|
|
|
2012-02-08 20:04:14 -06:00
|
|
|
void *next_stack(size_t stk_sz, void *args_addr, size_t args_sz);
|
|
|
|
void prev_stack();
|
2011-12-01 17:26:42 -06:00
|
|
|
void record_stack_limit();
|
2011-12-06 18:26:47 -06:00
|
|
|
void reset_stack_limit();
|
2011-12-18 18:59:49 -06:00
|
|
|
bool on_rust_stack();
|
2011-12-20 13:20:54 -06:00
|
|
|
void check_stack_canary();
|
2012-03-02 17:14:52 -06:00
|
|
|
void delete_all_stacks();
|
2012-02-08 18:22:38 -06:00
|
|
|
|
2012-03-15 01:08:47 -05:00
|
|
|
void config_notify(rust_port_id port);
|
2012-02-08 23:42:04 -06:00
|
|
|
|
|
|
|
void call_on_c_stack(void *args, void *fn_ptr);
|
2012-02-09 13:51:34 -06:00
|
|
|
void call_on_rust_stack(void *args, void *fn_ptr);
|
2012-02-14 13:58:27 -06:00
|
|
|
bool have_c_stack() { return c_stack != NULL; }
|
2012-02-15 00:23:16 -06:00
|
|
|
|
|
|
|
rust_port_selector *get_port_selector() { return &port_selector; }
|
2012-03-02 23:22:42 -06:00
|
|
|
|
2012-03-17 20:12:15 -05:00
|
|
|
rust_task_state get_state() { return state; }
|
2012-03-03 01:40:27 -06:00
|
|
|
rust_cond *get_cond() { return cond; }
|
|
|
|
const char *get_cond_name() { return cond_name; }
|
2012-03-21 02:31:40 -05:00
|
|
|
|
|
|
|
void cleanup_after_turn();
|
2010-07-19 16:05:18 -05:00
|
|
|
};
|
|
|
|
|
2012-02-10 00:28:52 -06:00
|
|
|
// This stuff is on the stack-switching fast path
|
|
|
|
|
2012-02-10 00:15:15 -06:00
|
|
|
// Get a rough approximation of the current stack pointer
|
|
|
|
extern "C" uintptr_t get_sp();
|
|
|
|
|
|
|
|
// This is the function that switches stacks by calling another function with
|
|
|
|
// a single void* argument while changing the stack pointer. It has a funny
|
|
|
|
// name because gdb doesn't normally like to backtrace through split stacks
|
|
|
|
// (thinks it indicates a bug), but has a special case to allow functions
|
|
|
|
// named __morestack to move the stack pointer around.
|
|
|
|
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
|
|
|
|
|
|
|
|
inline static uintptr_t
|
|
|
|
sanitize_next_sp(uintptr_t next_sp) {
|
|
|
|
|
|
|
|
// Since I'm not precisely sure where the next stack pointer sits in
|
|
|
|
// relation to where the context switch actually happened, nor in relation
|
|
|
|
// to the amount of stack needed for calling __morestack I've added some
|
|
|
|
// extra bytes here.
|
|
|
|
|
|
|
|
// FIXME: On the rust stack this potentially puts is quite far into the
|
|
|
|
// red zone. Might want to just allocate a new rust stack every time we
|
|
|
|
// switch back to rust.
|
|
|
|
const uintptr_t padding = 16;
|
|
|
|
|
|
|
|
return align_down(next_sp - padding);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
rust_task::call_on_c_stack(void *args, void *fn_ptr) {
|
2012-02-10 00:28:52 -06:00
|
|
|
// Too expensive to check
|
|
|
|
// I(thread, on_rust_stack());
|
2012-02-10 00:15:15 -06:00
|
|
|
|
2012-02-14 01:18:21 -06:00
|
|
|
uintptr_t prev_rust_sp = next_rust_sp;
|
2012-02-10 00:15:15 -06:00
|
|
|
next_rust_sp = get_sp();
|
|
|
|
|
|
|
|
bool borrowed_a_c_stack = false;
|
|
|
|
uintptr_t sp;
|
|
|
|
if (c_stack == NULL) {
|
|
|
|
c_stack = thread->borrow_c_stack();
|
|
|
|
next_c_sp = align_down(c_stack->end);
|
|
|
|
sp = next_c_sp;
|
|
|
|
borrowed_a_c_stack = true;
|
|
|
|
} else {
|
|
|
|
sp = sanitize_next_sp(next_c_sp);
|
|
|
|
}
|
|
|
|
|
|
|
|
__morestack(args, fn_ptr, sp);
|
|
|
|
|
|
|
|
// Note that we may not actually get here if we threw an exception,
|
|
|
|
// in which case we will return the c stack when the exception is caught.
|
|
|
|
if (borrowed_a_c_stack) {
|
|
|
|
return_c_stack();
|
|
|
|
}
|
2012-02-14 01:18:21 -06:00
|
|
|
|
|
|
|
next_rust_sp = prev_rust_sp;
|
2012-02-10 00:15:15 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
|
2012-02-10 00:28:52 -06:00
|
|
|
// Too expensive to check
|
|
|
|
// I(thread, !on_rust_stack());
|
2012-02-10 00:15:15 -06:00
|
|
|
I(thread, next_rust_sp);
|
|
|
|
|
2012-02-14 02:43:45 -06:00
|
|
|
bool had_reentered_rust_stack = reentered_rust_stack;
|
|
|
|
reentered_rust_stack = true;
|
|
|
|
|
2012-02-14 01:18:21 -06:00
|
|
|
uintptr_t prev_c_sp = next_c_sp;
|
2012-02-10 00:15:15 -06:00
|
|
|
next_c_sp = get_sp();
|
|
|
|
|
|
|
|
uintptr_t sp = sanitize_next_sp(next_rust_sp);
|
|
|
|
|
|
|
|
__morestack(args, fn_ptr, sp);
|
2012-02-14 01:18:21 -06:00
|
|
|
|
|
|
|
next_c_sp = prev_c_sp;
|
2012-02-14 02:43:45 -06:00
|
|
|
reentered_rust_stack = had_reentered_rust_stack;
|
2012-02-10 00:15:15 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
rust_task::return_c_stack() {
|
2012-02-10 00:28:52 -06:00
|
|
|
// Too expensive to check
|
|
|
|
// I(thread, on_rust_stack());
|
2012-02-10 00:15:15 -06:00
|
|
|
I(thread, c_stack != NULL);
|
|
|
|
thread->return_c_stack(c_stack);
|
|
|
|
c_stack = NULL;
|
|
|
|
next_c_sp = 0;
|
|
|
|
}
|
|
|
|
|
2012-03-21 16:00:37 -05:00
|
|
|
// NB: This runs on the Rust stack
|
|
|
|
inline void *
|
|
|
|
rust_task::next_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
|
|
|
|
stk_seg *maybe_next_stack = NULL;
|
|
|
|
if (stk != NULL) {
|
|
|
|
maybe_next_stack = stk->prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_stack_fast(stk_sz + args_sz);
|
|
|
|
A(thread, stk->end - (uintptr_t)stk->data >= stk_sz + args_sz,
|
|
|
|
"Did not receive enough stack");
|
|
|
|
uint8_t *new_sp = (uint8_t*)stk->end;
|
|
|
|
// Push the function arguments to the new stack
|
|
|
|
new_sp = align_down(new_sp - args_sz);
|
|
|
|
|
|
|
|
// When reusing a stack segment we need to tell valgrind that this area of
|
|
|
|
// memory is accessible before writing to it, because the act of popping
|
|
|
|
// the stack previously made all of the stack inaccessible.
|
|
|
|
if (maybe_next_stack == stk) {
|
|
|
|
// I don't know exactly where the region ends that valgrind needs us
|
|
|
|
// to mark accessible. On x86_64 these extra bytes aren't needed, but
|
|
|
|
// on i386 we get errors without.
|
|
|
|
int fudge_bytes = 16;
|
|
|
|
reuse_valgrind_stack(stk, new_sp - fudge_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(new_sp, args_addr, args_sz);
|
|
|
|
record_stack_limit();
|
|
|
|
return new_sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The amount of stack in a segment available to Rust code
|
|
|
|
inline size_t
|
|
|
|
user_stack_size(stk_seg *stk) {
|
|
|
|
return (size_t)(stk->end
|
|
|
|
- (uintptr_t)&stk->data[0]
|
|
|
|
- RED_ZONE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct new_stack_args {
|
|
|
|
rust_task *task;
|
|
|
|
size_t requested_sz;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
new_stack_slow(new_stack_args *args);
|
|
|
|
|
|
|
|
// NB: This runs on the Rust stack
|
|
|
|
// This is the new stack fast path, in which we
|
|
|
|
// reuse the next cached stack segment
|
|
|
|
inline void
|
|
|
|
rust_task::new_stack_fast(size_t requested_sz) {
|
|
|
|
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
|
|
|
|
size_t min_sz = thread->min_stack_size;
|
|
|
|
|
|
|
|
// Try to reuse an existing stack segment
|
|
|
|
if (stk != NULL && stk->next != NULL) {
|
|
|
|
size_t next_sz = user_stack_size(stk->next);
|
|
|
|
if (min_sz <= next_sz && requested_sz <= next_sz) {
|
|
|
|
stk = stk->next;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
new_stack_args args = {this, requested_sz};
|
|
|
|
call_on_c_stack(&args, (void*)new_stack_slow);
|
|
|
|
}
|
|
|
|
|
|
|
|
// NB: This runs on the Rust stack
|
|
|
|
inline void
|
|
|
|
rust_task::prev_stack() {
|
|
|
|
// We're not going to actually delete anything now because that would
|
|
|
|
// require switching to the C stack and be costly. Instead we'll just move
|
|
|
|
// up the link list and clean up later, either in new_stack or after our
|
|
|
|
// turn ends on the scheduler.
|
|
|
|
stk = stk->prev;
|
|
|
|
record_stack_limit();
|
|
|
|
}
|
|
|
|
|
|
|
|
extern "C" CDECL void
|
|
|
|
record_sp(void *limit);
|
|
|
|
|
|
|
|
inline void
|
|
|
|
rust_task::record_stack_limit() {
|
|
|
|
I(thread, stk);
|
|
|
|
// The function prolog compares the amount of stack needed to the end of
|
|
|
|
// the stack. As an optimization, when the frame size is less than 256
|
|
|
|
// bytes, it will simply compare %esp to to the stack limit instead of
|
|
|
|
// subtracting the frame size. As a result we need our stack limit to
|
|
|
|
// account for those 256 bytes.
|
|
|
|
const unsigned LIMIT_OFFSET = 256;
|
|
|
|
A(thread,
|
|
|
|
(uintptr_t)stk->end - RED_ZONE_SIZE
|
|
|
|
- (uintptr_t)stk->data >= LIMIT_OFFSET,
|
|
|
|
"Stack size must be greater than LIMIT_OFFSET");
|
|
|
|
record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-07-28 02:36:35 -05:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// End:
|
|
|
|
//
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
#endif /* RUST_TASK_H */
|