2010-07-19 16:05:18 -05:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RUST_TASK_H
|
|
|
|
#define RUST_TASK_H
|
2010-07-28 18:24:50 -05:00
|
|
|
|
2011-09-20 17:35:14 -05:00
|
|
|
#include <map>
|
|
|
|
|
2010-07-28 18:24:50 -05:00
|
|
|
#include "util/array_list.h"
|
|
|
|
|
2011-05-31 19:44:54 -05:00
|
|
|
#include "context.h"
|
2011-09-23 13:42:20 -05:00
|
|
|
#include "rust_debug.h"
|
|
|
|
#include "rust_internal.h"
|
|
|
|
#include "rust_kernel.h"
|
2011-08-16 21:48:47 -05:00
|
|
|
#include "rust_obstack.h"
|
2012-01-17 12:57:11 -06:00
|
|
|
#include "boxed_region.h"
|
2011-05-31 19:44:54 -05:00
|
|
|
|
2011-09-23 13:42:20 -05:00
|
|
|
// Corresponds to the rust chan (currently _chan) type.
|
|
|
|
struct chan_handle {
|
|
|
|
rust_task_id task;
|
|
|
|
rust_port_id port;
|
|
|
|
};
|
|
|
|
|
2011-09-19 12:34:27 -05:00
|
|
|
struct rust_box;
|
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
struct stk_seg {
|
2011-12-14 20:57:52 -06:00
|
|
|
stk_seg *prev;
|
2011-10-31 15:31:04 -05:00
|
|
|
stk_seg *next;
|
2011-12-06 19:03:54 -06:00
|
|
|
uintptr_t end;
|
2011-10-31 15:31:04 -05:00
|
|
|
unsigned int valgrind_id;
|
|
|
|
#ifndef _LP64
|
|
|
|
uint32_t pad;
|
|
|
|
#endif
|
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
uint8_t data[];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct frame_glue_fns {
|
|
|
|
uintptr_t mark_glue_off;
|
|
|
|
uintptr_t drop_glue_off;
|
|
|
|
uintptr_t reloc_glue_off;
|
|
|
|
};
|
|
|
|
|
2011-08-16 18:39:47 -05:00
|
|
|
// portions of the task structure that are accessible from the standard
|
|
|
|
// library. This struct must agree with the std::task::rust_task record.
|
|
|
|
struct rust_task_user {
|
2011-08-17 16:42:28 -05:00
|
|
|
rust_task_id id;
|
2011-11-13 00:17:40 -06:00
|
|
|
intptr_t notify_enabled; // this is way more bits than necessary, but it
|
2011-08-25 13:20:43 -05:00
|
|
|
// simplifies the alignment.
|
2011-08-16 18:39:47 -05:00
|
|
|
chan_handle notify_chan;
|
2011-08-17 16:42:28 -05:00
|
|
|
uintptr_t rust_sp; // Saved sp when not running.
|
2011-08-16 18:39:47 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
// std::lib::task::task_result
|
2011-11-11 17:27:18 -06:00
|
|
|
typedef unsigned long task_result;
|
|
|
|
#define tr_success 0
|
|
|
|
#define tr_failure 1
|
2011-08-16 18:39:47 -05:00
|
|
|
|
|
|
|
// std::lib::task::task_notification
|
|
|
|
//
|
|
|
|
// since it's currently a unary tag, we only add the fields.
|
|
|
|
struct task_notification {
|
|
|
|
rust_task_id id;
|
|
|
|
task_result result; // task_result
|
|
|
|
};
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
struct
|
2011-07-27 16:51:25 -05:00
|
|
|
rust_task : public kernel_owned<rust_task>, rust_cond
|
2010-07-19 16:05:18 -05:00
|
|
|
{
|
2011-08-16 18:39:47 -05:00
|
|
|
rust_task_user user;
|
|
|
|
|
2011-08-05 17:16:48 -05:00
|
|
|
RUST_ATOMIC_REFCOUNT();
|
2011-07-27 16:51:25 -05:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
// Fields known to the compiler.
|
2011-09-06 16:03:20 -05:00
|
|
|
context ctx;
|
2010-07-19 16:05:18 -05:00
|
|
|
stk_seg *stk;
|
|
|
|
uintptr_t runtime_sp; // Runtime sp while task running.
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler *sched;
|
2010-07-19 16:05:18 -05:00
|
|
|
rust_crate_cache *cache;
|
|
|
|
|
|
|
|
// Fields known only to the runtime.
|
2011-06-24 18:50:06 -05:00
|
|
|
rust_kernel *kernel;
|
2010-08-08 21:24:35 -05:00
|
|
|
const char *const name;
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task_list *state;
|
2010-07-19 16:05:18 -05:00
|
|
|
rust_cond *cond;
|
2010-08-18 01:26:43 -05:00
|
|
|
const char *cond_name;
|
2010-07-19 16:05:18 -05:00
|
|
|
rust_task *supervisor; // Parent-link for failure propagation.
|
2010-09-10 03:21:29 -05:00
|
|
|
int32_t list_index;
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-08-08 20:09:42 -05:00
|
|
|
rust_port_id next_port_id;
|
2011-08-08 15:38:20 -05:00
|
|
|
|
2010-08-11 23:23:34 -05:00
|
|
|
// Keeps track of the last time this task yielded.
|
|
|
|
timer yield_timer;
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
// Rendezvous pointer for receiving data when blocked on a port. If we're
|
|
|
|
// trying to read data and no data is available on any incoming channel,
|
|
|
|
// we block on the port, and yield control to the scheduler. Since, we
|
2010-07-28 14:36:59 -05:00
|
|
|
// were not able to read anything, we remember the location where the
|
2010-07-19 16:05:18 -05:00
|
|
|
// result should go in the rendezvous_ptr, and let the sender write to
|
|
|
|
// that location before waking us up.
|
|
|
|
uintptr_t* rendezvous_ptr;
|
|
|
|
|
2011-06-20 19:19:50 -05:00
|
|
|
// This flag indicates that a worker is either currently running the task
|
|
|
|
// or is about to run this task.
|
2011-06-29 20:47:47 -05:00
|
|
|
int running_on;
|
|
|
|
int pinned_on;
|
2011-05-31 19:44:54 -05:00
|
|
|
|
2011-06-27 21:15:03 -05:00
|
|
|
memory_region local_region;
|
2012-01-17 12:57:11 -06:00
|
|
|
boxed_region boxed;
|
2011-06-27 21:15:03 -05:00
|
|
|
|
2012-01-09 15:47:37 -06:00
|
|
|
// Indicates that fail() has been called and we are cleaning up.
|
|
|
|
// We use this to suppress the "killed" flag during calls to yield.
|
|
|
|
bool unwinding;
|
|
|
|
|
2011-09-14 16:20:13 -05:00
|
|
|
// Indicates that the task was killed and needs to unwind
|
|
|
|
bool killed;
|
2011-07-23 21:03:02 -05:00
|
|
|
bool propagate_failure;
|
2011-07-14 21:39:53 -05:00
|
|
|
|
2011-07-07 13:53:08 -05:00
|
|
|
lock_and_signal lock;
|
|
|
|
|
2011-08-08 20:09:42 -05:00
|
|
|
hash_map<rust_port_id, rust_port *> port_table;
|
|
|
|
|
2011-08-16 21:48:47 -05:00
|
|
|
rust_obstack dynastack;
|
|
|
|
|
2011-09-23 20:30:22 -05:00
|
|
|
uint32_t cc_counter;
|
2011-09-20 17:35:14 -05:00
|
|
|
|
2011-09-23 13:42:20 -05:00
|
|
|
debug::task_debug_info debug;
|
|
|
|
|
2012-01-11 14:37:09 -06:00
|
|
|
// The amount of stack we're using, excluding red zones
|
|
|
|
size_t total_stack_sz;
|
|
|
|
|
2010-08-08 21:24:35 -05:00
|
|
|
// Only a pointer to 'name' is kept, so it must live as long as this task.
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_task(rust_scheduler *sched,
|
2010-09-10 03:21:29 -05:00
|
|
|
rust_task_list *state,
|
2010-08-08 21:24:35 -05:00
|
|
|
rust_task *spawner,
|
2012-01-29 23:20:36 -06:00
|
|
|
const char *name,
|
|
|
|
size_t init_stack_sz);
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
~rust_task();
|
|
|
|
|
2011-12-30 22:46:08 -06:00
|
|
|
void start(spawn_fn spawnee_fn,
|
2012-01-17 12:57:11 -06:00
|
|
|
rust_opaque_box *env,
|
2012-01-04 22:11:39 -06:00
|
|
|
void *args);
|
2011-08-10 20:48:57 -05:00
|
|
|
void start();
|
2010-07-19 16:05:18 -05:00
|
|
|
bool running();
|
|
|
|
bool blocked();
|
|
|
|
bool blocked_on(rust_cond *cond);
|
|
|
|
bool dead();
|
|
|
|
|
2011-07-18 14:02:26 -05:00
|
|
|
void *malloc(size_t sz, const char *tag, type_desc *td=0);
|
2010-07-19 16:05:18 -05:00
|
|
|
void *realloc(void *data, size_t sz, bool gc_mem=false);
|
|
|
|
void free(void *p, bool gc_mem=false);
|
|
|
|
|
2010-09-10 03:21:29 -05:00
|
|
|
void transition(rust_task_list *src, rust_task_list *dst);
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2010-08-18 01:26:43 -05:00
|
|
|
void block(rust_cond *on, const char* name);
|
2010-07-19 16:05:18 -05:00
|
|
|
void wakeup(rust_cond *from);
|
|
|
|
void die();
|
|
|
|
void unblock();
|
|
|
|
|
2010-10-11 18:40:18 -05:00
|
|
|
// Print a backtrace, if the "bt" logging option is on.
|
|
|
|
void backtrace();
|
|
|
|
|
2010-08-11 23:23:34 -05:00
|
|
|
// Yields for a specified duration of time.
|
2011-11-18 17:36:48 -06:00
|
|
|
void yield(size_t time_in_ms, bool *killed);
|
2010-08-11 23:23:34 -05:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
// Fail this task (assuming caller-on-stack is different task).
|
|
|
|
void kill();
|
|
|
|
|
|
|
|
// Fail self, assuming caller-on-stack is this task.
|
2011-07-13 15:43:35 -05:00
|
|
|
void fail();
|
2011-09-06 20:31:41 -05:00
|
|
|
void conclude_failure();
|
2011-09-14 17:26:59 -05:00
|
|
|
void fail_parent();
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
// Disconnect from our supervisor.
|
|
|
|
void unsupervise();
|
|
|
|
|
|
|
|
frame_glue_fns *get_frame_glue_fns(uintptr_t fp);
|
2011-05-26 20:20:48 -05:00
|
|
|
rust_crate_cache * get_crate_cache();
|
2011-06-20 19:19:50 -05:00
|
|
|
|
2011-06-29 20:47:47 -05:00
|
|
|
bool can_schedule(int worker);
|
2011-06-27 21:15:03 -05:00
|
|
|
|
2011-07-18 14:02:26 -05:00
|
|
|
void *calloc(size_t size, const char *tag);
|
2011-06-29 20:47:47 -05:00
|
|
|
|
|
|
|
void pin();
|
2011-06-29 20:56:34 -05:00
|
|
|
void pin(int id);
|
2011-06-29 20:47:47 -05:00
|
|
|
void unpin();
|
2011-07-06 13:10:40 -05:00
|
|
|
|
2011-08-08 20:09:42 -05:00
|
|
|
rust_port_id register_port(rust_port *port);
|
|
|
|
void release_port(rust_port_id id);
|
|
|
|
rust_port *get_port_by_id(rust_port_id id);
|
2011-08-15 13:34:12 -05:00
|
|
|
|
|
|
|
// Use this function sparingly. Depending on the ref count is generally
|
|
|
|
// not at all safe.
|
|
|
|
intptr_t get_ref_count() const { return ref_count; }
|
2011-08-15 18:54:02 -05:00
|
|
|
|
2011-11-13 18:36:47 -06:00
|
|
|
void notify(bool success);
|
2011-11-30 19:54:11 -06:00
|
|
|
|
|
|
|
void *new_stack(size_t stk_sz, void *args_addr, size_t args_sz);
|
|
|
|
void del_stack();
|
2011-12-01 17:26:42 -06:00
|
|
|
void record_stack_limit();
|
2011-12-06 18:26:47 -06:00
|
|
|
void reset_stack_limit();
|
2011-12-18 18:59:49 -06:00
|
|
|
bool on_rust_stack();
|
2011-12-20 13:20:54 -06:00
|
|
|
void check_stack_canary();
|
2010-07-19 16:05:18 -05:00
|
|
|
};
|
|
|
|
|
2010-07-28 02:36:35 -05:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// End:
|
|
|
|
//
|
2010-07-19 16:05:18 -05:00
|
|
|
|
|
|
|
#endif /* RUST_TASK_H */
|