rust/src/rt/rust_task.h

290 lines
7.5 KiB
C
Raw Normal View History

/*
*
*/
#ifndef RUST_TASK_H
#define RUST_TASK_H
2011-09-20 17:35:14 -05:00
#include <map>
#include "util/array_list.h"
#include "context.h"
#include "rust_debug.h"
#include "rust_internal.h"
#include "rust_kernel.h"
#include "rust_obstack.h"
#include "boxed_region.h"
#include "rust_stack.h"
// Corresponds to the rust chan (currently _chan) type.
struct chan_handle {
rust_task_id task;
rust_port_id port;
};
struct rust_box;
struct frame_glue_fns {
uintptr_t mark_glue_off;
uintptr_t drop_glue_off;
uintptr_t reloc_glue_off;
};
// std::lib::task::task_result
typedef unsigned long task_result;
#define tr_success 0
#define tr_failure 1
struct spawn_args;
// std::lib::task::task_notification
//
// since it's currently a unary tag, we only add the fields.
struct task_notification {
rust_task_id id;
task_result result; // task_result
};
struct
2011-07-27 16:51:25 -05:00
rust_task : public kernel_owned<rust_task>, rust_cond
{
2011-08-05 17:16:48 -05:00
RUST_ATOMIC_REFCOUNT();
2011-07-27 16:51:25 -05:00
2012-02-08 19:46:12 -06:00
rust_task_id id;
bool notify_enabled;
chan_handle notify_chan;
context ctx;
stk_seg *stk;
uintptr_t runtime_sp; // Runtime sp while task running.
rust_scheduler *sched;
rust_task_thread *thread;
rust_crate_cache *cache;
// Fields known only to the runtime.
rust_kernel *kernel;
const char *const name;
rust_task_list *state;
rust_cond *cond;
2010-08-18 01:26:43 -05:00
const char *cond_name;
rust_task *supervisor; // Parent-link for failure propagation.
int32_t list_index;
2011-08-08 20:09:42 -05:00
rust_port_id next_port_id;
// Rendezvous pointer for receiving data when blocked on a port. If we're
// trying to read data and no data is available on any incoming channel,
// we block on the port, and yield control to the scheduler. Since, we
// were not able to read anything, we remember the location where the
// result should go in the rendezvous_ptr, and let the sender write to
// that location before waking us up.
uintptr_t* rendezvous_ptr;
memory_region local_region;
boxed_region boxed;
// Indicates that fail() has been called and we are cleaning up.
// We use this to suppress the "killed" flag during calls to yield.
bool unwinding;
2011-09-14 16:20:13 -05:00
// Indicates that the task was killed and needs to unwind
bool killed;
bool propagate_failure;
lock_and_signal lock;
2011-08-08 20:09:42 -05:00
hash_map<rust_port_id, rust_port *> port_table;
rust_obstack dynastack;
uint32_t cc_counter;
2011-09-20 17:35:14 -05:00
debug::task_debug_info debug;
// The amount of stack we're using, excluding red zones
size_t total_stack_sz;
private:
// The stack used for running C code, borrowed from the scheduler thread
stk_seg *c_stack;
uintptr_t next_c_sp;
2012-02-09 13:51:34 -06:00
uintptr_t next_rust_sp;
// Called when the atomic refcount reaches zero
void delete_this();
void new_stack(size_t sz);
void del_stack();
void free_stack(stk_seg *stk);
size_t get_next_stack_size(size_t min, size_t current, size_t requested);
void return_c_stack();
friend void task_start_wrapper(spawn_args *a);
public:
// Only a pointer to 'name' is kept, so it must live as long as this task.
rust_task(rust_task_thread *thread,
rust_task_list *state,
rust_task *spawner,
const char *name,
size_t init_stack_sz);
2011-12-30 22:46:08 -06:00
void start(spawn_fn spawnee_fn,
rust_opaque_box *env,
2012-01-04 22:11:39 -06:00
void *args);
void start();
bool running();
bool blocked();
bool blocked_on(rust_cond *cond);
bool dead();
void *malloc(size_t sz, const char *tag, type_desc *td=0);
void *realloc(void *data, size_t sz);
void free(void *p);
void transition(rust_task_list *src, rust_task_list *dst);
2010-08-18 01:26:43 -05:00
void block(rust_cond *on, const char* name);
void wakeup(rust_cond *from);
void die();
void unblock();
2010-10-11 18:40:18 -05:00
// Print a backtrace, if the "bt" logging option is on.
void backtrace();
// Yields control to the scheduler. Called from the Rust stack
void yield(bool *killed);
// Fail this task (assuming caller-on-stack is different task).
void kill();
// Fail self, assuming caller-on-stack is this task.
void fail();
void conclude_failure();
void fail_parent();
// Disconnect from our supervisor.
void unsupervise();
frame_glue_fns *get_frame_glue_fns(uintptr_t fp);
rust_crate_cache * get_crate_cache();
void *calloc(size_t size, const char *tag);
2011-08-08 20:09:42 -05:00
rust_port_id register_port(rust_port *port);
void release_port(rust_port_id id);
rust_port *get_port_by_id(rust_port_id id);
// Use this function sparingly. Depending on the ref count is generally
// not at all safe.
intptr_t get_ref_count() const { return ref_count; }
void notify(bool success);
2011-11-30 19:54:11 -06:00
void *next_stack(size_t stk_sz, void *args_addr, size_t args_sz);
void prev_stack();
void record_stack_limit();
void reset_stack_limit();
bool on_rust_stack();
void check_stack_canary();
void config_notify(chan_handle chan);
2012-02-08 23:42:04 -06:00
void call_on_c_stack(void *args, void *fn_ptr);
2012-02-09 13:51:34 -06:00
void call_on_rust_stack(void *args, void *fn_ptr);
};
2012-02-10 00:28:52 -06:00
// This stuff is on the stack-switching fast path
// Get a rough approximation of the current stack pointer
extern "C" uintptr_t get_sp();
// This is the function that switches stacks by calling another function with
// a single void* argument while changing the stack pointer. It has a funny
// name because gdb doesn't normally like to backtrace through split stacks
// (thinks it indicates a bug), but has a special case to allow functions
// named __morestack to move the stack pointer around.
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
inline static uintptr_t
sanitize_next_sp(uintptr_t next_sp) {
// Since I'm not precisely sure where the next stack pointer sits in
// relation to where the context switch actually happened, nor in relation
// to the amount of stack needed for calling __morestack I've added some
// extra bytes here.
// FIXME: On the rust stack this potentially puts is quite far into the
// red zone. Might want to just allocate a new rust stack every time we
// switch back to rust.
const uintptr_t padding = 16;
return align_down(next_sp - padding);
}
inline void
rust_task::call_on_c_stack(void *args, void *fn_ptr) {
2012-02-10 00:28:52 -06:00
// Too expensive to check
// I(thread, on_rust_stack());
next_rust_sp = get_sp();
bool borrowed_a_c_stack = false;
uintptr_t sp;
if (c_stack == NULL) {
c_stack = thread->borrow_c_stack();
next_c_sp = align_down(c_stack->end);
sp = next_c_sp;
borrowed_a_c_stack = true;
} else {
sp = sanitize_next_sp(next_c_sp);
}
__morestack(args, fn_ptr, sp);
// Note that we may not actually get here if we threw an exception,
// in which case we will return the c stack when the exception is caught.
if (borrowed_a_c_stack) {
return_c_stack();
}
}
inline void
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
2012-02-10 00:28:52 -06:00
// Too expensive to check
// I(thread, !on_rust_stack());
I(thread, next_rust_sp);
next_c_sp = get_sp();
uintptr_t sp = sanitize_next_sp(next_rust_sp);
__morestack(args, fn_ptr, sp);
}
inline void
rust_task::return_c_stack() {
2012-02-10 00:28:52 -06:00
// Too expensive to check
// I(thread, on_rust_stack());
I(thread, c_stack != NULL);
thread->return_c_stack(c_stack);
c_stack = NULL;
next_c_sp = 0;
}
//
// Local Variables:
// mode: C++
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
#endif /* RUST_TASK_H */