2011-06-28 14:15:41 -05:00
|
|
|
#ifndef RUST_SCHEDULER_H
|
|
|
|
#define RUST_SCHEDULER_H
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-09-27 21:19:19 -05:00
|
|
|
#include "context.h"
|
|
|
|
|
2011-09-27 19:14:18 -05:00
|
|
|
#ifndef _WIN32
|
|
|
|
#include <pthread.h>
|
|
|
|
#else
|
|
|
|
#include <windows.h>
|
|
|
|
#endif
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
struct rust_scheduler;
|
2011-05-26 20:20:48 -05:00
|
|
|
|
2012-01-06 10:29:06 -06:00
|
|
|
struct rust_hashable_dict {
|
|
|
|
UT_hash_handle hh;
|
|
|
|
void* fields[0];
|
|
|
|
};
|
|
|
|
|
2011-08-24 20:36:51 -05:00
|
|
|
class rust_crate_cache {
|
2011-05-26 20:20:48 -05:00
|
|
|
public:
|
|
|
|
type_desc *get_type_desc(size_t size,
|
|
|
|
size_t align,
|
|
|
|
size_t n_descs,
|
2011-08-24 20:36:51 -05:00
|
|
|
type_desc const **descs,
|
|
|
|
uintptr_t n_obj_params);
|
2012-01-06 10:29:06 -06:00
|
|
|
void** get_dict(size_t n_fields, void** dict);
|
2011-05-26 20:20:48 -05:00
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
type_desc *type_descs;
|
2012-01-06 10:29:06 -06:00
|
|
|
rust_hashable_dict *dicts;
|
2011-05-26 20:20:48 -05:00
|
|
|
|
|
|
|
public:
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler *sched;
|
2011-05-26 20:20:48 -05:00
|
|
|
size_t idx;
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_crate_cache(rust_scheduler *sched);
|
2011-05-26 20:20:48 -05:00
|
|
|
~rust_crate_cache();
|
|
|
|
void flush();
|
|
|
|
};
|
|
|
|
|
2011-06-29 17:11:20 -05:00
|
|
|
struct rust_scheduler : public kernel_owned<rust_scheduler>,
|
2011-07-23 21:03:02 -05:00
|
|
|
rust_thread
|
2010-07-19 16:05:18 -05:00
|
|
|
{
|
2011-08-18 17:49:58 -05:00
|
|
|
RUST_REFCOUNTED(rust_scheduler)
|
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
// Fields known only by the runtime:
|
|
|
|
rust_log _log;
|
2011-12-28 13:53:12 -06:00
|
|
|
|
|
|
|
// NB: this is used to filter *runtime-originating* debug
|
|
|
|
// logging, on a per-scheduler basis. It's not likely what
|
|
|
|
// you want to expose to the user in terms of per-task
|
|
|
|
// or per-module logging control. By default all schedulers
|
|
|
|
// are set to debug-level logging here, and filtered by
|
|
|
|
// runtime category using the pseudo-modules ::rt::foo.
|
2011-04-19 05:21:57 -05:00
|
|
|
uint32_t log_lvl;
|
2011-12-28 13:53:12 -06:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
rust_srv *srv;
|
2010-08-08 21:24:35 -05:00
|
|
|
const char *const name;
|
2010-09-10 03:21:29 -05:00
|
|
|
|
|
|
|
rust_task_list newborn_tasks;
|
|
|
|
rust_task_list running_tasks;
|
|
|
|
rust_task_list blocked_tasks;
|
|
|
|
rust_task_list dead_tasks;
|
|
|
|
|
2011-05-26 20:20:48 -05:00
|
|
|
rust_crate_cache cache;
|
2010-09-10 03:21:29 -05:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
randctx rctx;
|
|
|
|
|
2010-09-07 20:39:07 -05:00
|
|
|
rust_kernel *kernel;
|
2010-08-27 20:26:36 -05:00
|
|
|
int32_t list_index;
|
|
|
|
|
2011-07-23 21:03:02 -05:00
|
|
|
const int id;
|
|
|
|
|
|
|
|
lock_and_signal lock;
|
2011-07-27 16:34:39 -05:00
|
|
|
size_t min_stack_size;
|
2011-07-23 21:03:02 -05:00
|
|
|
|
2010-07-19 16:05:18 -05:00
|
|
|
#ifndef __WIN32__
|
|
|
|
pthread_attr_t attr;
|
2011-09-27 19:14:18 -05:00
|
|
|
static pthread_key_t task_key;
|
|
|
|
#else
|
|
|
|
static DWORD task_key;
|
2010-07-19 16:05:18 -05:00
|
|
|
#endif
|
|
|
|
|
2011-09-27 19:14:18 -05:00
|
|
|
static bool tls_initialized;
|
|
|
|
|
2011-07-27 16:34:39 -05:00
|
|
|
rust_env *env;
|
2011-09-27 21:19:19 -05:00
|
|
|
context c_context;
|
2011-07-27 16:34:39 -05:00
|
|
|
|
2012-02-02 19:02:50 -06:00
|
|
|
bool should_exit;
|
|
|
|
|
2010-08-08 21:24:35 -05:00
|
|
|
// Only a pointer to 'name' is kept, so it must live as long as this
|
|
|
|
// domain.
|
2011-07-29 13:00:44 -05:00
|
|
|
rust_scheduler(rust_kernel *kernel, rust_srv *srv, int id);
|
2011-06-28 14:15:41 -05:00
|
|
|
~rust_scheduler();
|
2010-07-19 16:05:18 -05:00
|
|
|
void activate(rust_task *task);
|
2011-04-19 05:21:57 -05:00
|
|
|
void log(rust_task *task, uint32_t level, char const *fmt, ...);
|
2010-07-19 16:05:18 -05:00
|
|
|
rust_log & get_log();
|
|
|
|
void fail();
|
|
|
|
|
2011-05-26 20:20:48 -05:00
|
|
|
rust_crate_cache *get_cache();
|
2010-09-10 03:21:29 -05:00
|
|
|
size_t number_of_live_tasks();
|
2010-07-19 16:05:18 -05:00
|
|
|
|
2011-06-29 20:47:47 -05:00
|
|
|
void reap_dead_tasks(int id);
|
|
|
|
rust_task *schedule_task(int id);
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2011-07-23 21:03:02 -05:00
|
|
|
void start_main_loop();
|
2010-07-28 16:53:08 -05:00
|
|
|
|
|
|
|
void log_state();
|
2010-09-10 03:21:29 -05:00
|
|
|
|
2011-08-10 14:57:53 -05:00
|
|
|
void kill_all_tasks();
|
|
|
|
|
2012-01-29 23:20:36 -06:00
|
|
|
rust_task *create_task(rust_task *spawner, const char *name,
|
|
|
|
size_t init_stack_sz);
|
2011-07-23 21:03:02 -05:00
|
|
|
|
|
|
|
virtual void run();
|
2011-07-28 12:41:48 -05:00
|
|
|
|
|
|
|
#ifdef __WIN32__
|
|
|
|
inline void win32_require(LPCTSTR fn, BOOL ok) {
|
|
|
|
kernel->win32_require(fn, ok);
|
|
|
|
}
|
|
|
|
#endif
|
2011-09-27 19:14:18 -05:00
|
|
|
|
|
|
|
void init_tls();
|
|
|
|
void place_task_in_tls(rust_task *task);
|
|
|
|
|
|
|
|
static rust_task *get_task();
|
2012-02-02 19:02:50 -06:00
|
|
|
|
|
|
|
// Tells the scheduler to exit it's scheduling loop and thread
|
|
|
|
void exit();
|
2010-07-19 16:05:18 -05:00
|
|
|
};
|
|
|
|
|
2011-04-07 15:05:45 -05:00
|
|
|
inline rust_log &
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler::get_log() {
|
2011-04-07 15:05:45 -05:00
|
|
|
return _log;
|
|
|
|
}
|
|
|
|
|
2010-07-28 02:36:35 -05:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
2011-07-13 15:51:20 -05:00
|
|
|
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
|
2010-07-28 02:36:35 -05:00
|
|
|
// End:
|
|
|
|
//
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
#endif /* RUST_SCHEDULER_H */
|