rt: Rename rust_scheduler to rust_task_thread
This commit is contained in:
parent
f3343b3571
commit
f94339cc1c
4
mk/rt.mk
4
mk/rt.mk
@ -42,7 +42,7 @@ RUNTIME_CS_$(1) := \
|
||||
rt/rust_run_program.cpp \
|
||||
rt/rust_crate_cache.cpp \
|
||||
rt/rust_env.cpp \
|
||||
rt/rust_scheduler.cpp \
|
||||
rt/rust_task_thread.cpp \
|
||||
rt/rust_task.cpp \
|
||||
rt/rust_task_list.cpp \
|
||||
rt/rust_port.cpp \
|
||||
@ -79,7 +79,7 @@ RUNTIME_HDR_$(1) := rt/globals.h \
|
||||
rt/rust_unwind.h \
|
||||
rt/rust_upcall.h \
|
||||
rt/rust_port.h \
|
||||
rt/rust_scheduler.h \
|
||||
rt/rust_task_thread.h \
|
||||
rt/rust_shape.h \
|
||||
rt/rust_task.h \
|
||||
rt/rust_task_list.h \
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include "../rust_internal.h"
|
||||
#include "../rust_util.h"
|
||||
#include "../rust_scheduler.h"
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
|
@ -10,15 +10,15 @@ target triple = "@CFG_TARGET_TRIPLE@"
|
||||
%struct.rust_vec = type { i32, i32, [0 x i8] }
|
||||
%struct.rust_fn = type { i32*, %struct.rust_box* }
|
||||
%struct.rust_box = type opaque
|
||||
%struct.rust_task = type { %struct.rust_task_user, i32, [8 x i8], %class.context, %struct.stk_seg*, i32, %struct.rust_scheduler*, %class.rust_crate_cache*, %class.rust_kernel*, i8*, %class.rust_task_list*, %struct.rust_cond*, i8*, %struct.rust_task*, i32, i32, i32*, i32, i32, %class.memory_region, %class.boxed_region, i8, i8, i8, %class.lock_and_signal, %class.hash_map.4, %class.rust_obstack, i32, %"class.debug::task_debug_info", i32, [12 x i8] }
|
||||
%struct.rust_task = type { %struct.rust_task_user, i32, [8 x i8], %class.context, %struct.stk_seg*, i32, %struct.rust_task_thread*, %class.rust_crate_cache*, %class.rust_kernel*, i8*, %class.rust_task_list*, %struct.rust_cond*, i8*, %struct.rust_task*, i32, i32, i32*, %class.memory_region, %class.boxed_region, i8, i8, i8, %class.lock_and_signal, %class.hash_map.4, %class.rust_obstack, i32, %"class.debug::task_debug_info", i32, [4 x i8] }
|
||||
%struct.rust_task_user = type { i32, i32, %struct.chan_handle, i32 }
|
||||
%struct.chan_handle = type { i32, i32 }
|
||||
%class.context = type { %struct.registers_t, %class.context*, [12 x i8] }
|
||||
%struct.registers_t = type { i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i16, i16, i16, i16, i32, i32, [12 x i8] }
|
||||
%struct.stk_seg = type { %struct.stk_seg*, %struct.stk_seg*, i32, i32, i32, [0 x i8] }
|
||||
%struct.rust_scheduler = type { %class.rust_thread, i32, i32, %class.rust_log, i32, %class.rust_srv*, i8*, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_crate_cache, %struct.randctx, %class.rust_kernel*, i32, i32, %class.lock_and_signal, i32, %union.pthread_attr_t, %struct.rust_env*, [8 x i8], %class.context }
|
||||
%struct.rust_task_thread = type { %class.rust_thread, i32, %class.rust_log, i32, %class.rust_srv*, i8*, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_crate_cache, %struct.randctx, %class.rust_kernel*, i32, i32, %class.lock_and_signal, i32, %union.pthread_attr_t, %struct.rust_env*, [12 x i8], %class.context, i8, [15 x i8] }
|
||||
%class.rust_thread = type { i32 (...)**, i8, i32 }
|
||||
%class.rust_log = type { i32 (...)**, %class.rust_srv*, %struct.rust_scheduler*, i8 }
|
||||
%class.rust_log = type { i32 (...)**, %class.rust_srv*, %struct.rust_task_thread*, i8 }
|
||||
%class.rust_srv = type { i32 (...)**, %struct.rust_env*, %class.memory_region }
|
||||
%struct.rust_env = type { i32, i32, i32, i8*, i8, i8, i8* }
|
||||
%class.memory_region = type { i32 (...)**, %class.rust_srv*, %class.memory_region*, i32, %class.array_list, i8, i8, %class.lock_and_signal }
|
||||
@ -30,14 +30,14 @@ target triple = "@CFG_TARGET_TRIPLE@"
|
||||
%union.pthread_mutex_t = type { %"struct.<anonymous union>::__pthread_mutex_s" }
|
||||
%"struct.<anonymous union>::__pthread_mutex_s" = type { i32, i32, i32, i32, i32, %union.anon }
|
||||
%union.anon = type { i32 }
|
||||
%class.rust_task_list = type { %class.indexed_list, %struct.rust_scheduler*, i8* }
|
||||
%class.rust_task_list = type { %class.indexed_list, %struct.rust_task_thread*, i8* }
|
||||
%class.indexed_list = type { i32 (...)**, %class.array_list.1 }
|
||||
%class.array_list.1 = type { i32, %struct.rust_task**, i32 }
|
||||
%class.rust_crate_cache = type { %struct.type_desc*, %struct.rust_hashable_dict*, %struct.rust_scheduler*, i32 }
|
||||
%class.rust_crate_cache = type { %struct.type_desc*, %struct.rust_hashable_dict*, %struct.rust_task_thread*, i32 }
|
||||
%struct.rust_hashable_dict = type { %struct.UT_hash_handle, [0 x i8*] }
|
||||
%struct.randctx = type { i32, [256 x i32], [256 x i32], i32, i32, i32 }
|
||||
%class.rust_kernel = type { i32 (...)**, %class.memory_region, %class.rust_log, %class.rust_srv*, %class.lock_and_signal, %class.array_list.3, %struct.randctx, i32, %class.hash_map, i32, i32, i32, %struct.rust_env* }
|
||||
%class.array_list.3 = type { i32, %struct.rust_scheduler**, i32 }
|
||||
%class.array_list.3 = type { i32, %struct.rust_task_thread**, i32 }
|
||||
%class.hash_map = type { %"struct.hash_map<int, rust_task *>::map_entry"* }
|
||||
%"struct.hash_map<int, rust_task *>::map_entry" = type opaque
|
||||
%union.pthread_attr_t = type { i32, [32 x i8] }
|
||||
|
@ -10,15 +10,15 @@ target triple = "@CFG_TARGET_TRIPLE@"
|
||||
%struct.rust_vec = type { i64, i64, [0 x i8] }
|
||||
%struct.rust_fn = type { i64*, %struct.rust_box* }
|
||||
%struct.rust_box = type opaque
|
||||
%struct.rust_task = type { %struct.rust_task_user, i64, %class.context, %struct.stk_seg*, i64, %struct.rust_scheduler*, %class.rust_crate_cache*, %class.rust_kernel*, i8*, %class.rust_task_list*, %struct.rust_cond*, i8*, %struct.rust_task*, i32, i64, i64*, i32, i32, %class.memory_region, %class.boxed_region, i8, i8, i8, %class.lock_and_signal, %class.hash_map.4, %class.rust_obstack, i32, %"class.debug::task_debug_info", i64 }
|
||||
%struct.rust_task = type { %struct.rust_task_user, i64, %class.context, %struct.stk_seg*, i64, %struct.rust_task_thread*, %class.rust_crate_cache*, %class.rust_kernel*, i8*, %class.rust_task_list*, %struct.rust_cond*, i8*, %struct.rust_task*, i32, i64, i64*, %class.memory_region, %class.boxed_region, i8, i8, i8, %class.lock_and_signal, %class.hash_map.4, %class.rust_obstack, i32, %"class.debug::task_debug_info", i64, [8 x i8] }
|
||||
%struct.rust_task_user = type { i64, i64, %struct.chan_handle, i64 }
|
||||
%struct.chan_handle = type { i64, i64 }
|
||||
%class.context = type { %struct.registers_t, %class.context*, [8 x i8] }
|
||||
%struct.registers_t = type { [22 x i64] }
|
||||
%struct.stk_seg = type { %struct.stk_seg*, %struct.stk_seg*, i64, i32, [0 x i8] }
|
||||
%struct.rust_scheduler = type { %class.rust_thread, i64, i64, %class.rust_log, i32, %class.rust_srv*, i8*, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_crate_cache, %struct.randctx, %class.rust_kernel*, i32, i32, %class.lock_and_signal, i64, %union.pthread_attr_t, %struct.rust_env*, %class.context }
|
||||
%struct.rust_task_thread = type { %class.rust_thread, i64, %class.rust_log, i32, %class.rust_srv*, i8*, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_crate_cache, %struct.randctx, %class.rust_kernel*, i32, i32, %class.lock_and_signal, i64, %union.pthread_attr_t, %struct.rust_env*, [8 x i8], %class.context, i8, [15 x i8] }
|
||||
%class.rust_thread = type { i32 (...)**, i8, i64 }
|
||||
%class.rust_log = type { i32 (...)**, %class.rust_srv*, %struct.rust_scheduler*, i8 }
|
||||
%class.rust_log = type { i32 (...)**, %class.rust_srv*, %struct.rust_task_thread*, i8 }
|
||||
%class.rust_srv = type { i32 (...)**, %struct.rust_env*, %class.memory_region }
|
||||
%struct.rust_env = type { i64, i64, i64, i8*, i8, i8, i8* }
|
||||
%class.memory_region = type { i32 (...)**, %class.rust_srv*, %class.memory_region*, i32, %class.array_list, i8, i8, %class.lock_and_signal }
|
||||
@ -30,14 +30,14 @@ target triple = "@CFG_TARGET_TRIPLE@"
|
||||
%union.pthread_mutex_t = type { %"struct.<anonymous union>::__pthread_mutex_s" }
|
||||
%"struct.<anonymous union>::__pthread_mutex_s" = type { i32, i32, i32, i32, i32, i32, %struct.__pthread_internal_list }
|
||||
%struct.__pthread_internal_list = type { %struct.__pthread_internal_list*, %struct.__pthread_internal_list* }
|
||||
%class.rust_task_list = type { %class.indexed_list, %struct.rust_scheduler*, i8* }
|
||||
%class.rust_task_list = type { %class.indexed_list, %struct.rust_task_thread*, i8* }
|
||||
%class.indexed_list = type { i32 (...)**, %class.array_list.1 }
|
||||
%class.array_list.1 = type { i64, %struct.rust_task**, i64 }
|
||||
%class.rust_crate_cache = type { %struct.type_desc*, %struct.rust_hashable_dict*, %struct.rust_scheduler*, i64 }
|
||||
%class.rust_crate_cache = type { %struct.type_desc*, %struct.rust_hashable_dict*, %struct.rust_task_thread*, i64 }
|
||||
%struct.rust_hashable_dict = type { %struct.UT_hash_handle, [0 x i8*] }
|
||||
%struct.randctx = type { i64, [256 x i64], [256 x i64], i64, i64, i64 }
|
||||
%class.rust_kernel = type { i32 (...)**, %class.memory_region, %class.rust_log, %class.rust_srv*, %class.lock_and_signal, %class.array_list.3, %struct.randctx, i64, %class.hash_map, i32, i64, i32, %struct.rust_env* }
|
||||
%class.array_list.3 = type { i64, %struct.rust_scheduler**, i64 }
|
||||
%class.array_list.3 = type { i64, %struct.rust_task_thread**, i64 }
|
||||
%class.hash_map = type { %"struct.hash_map<long, rust_task *>::map_entry"* }
|
||||
%"struct.hash_map<long, rust_task *>::map_entry" = type opaque
|
||||
%union.pthread_attr_t = type { i64, [48 x i8] }
|
||||
|
@ -90,15 +90,15 @@ rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) {
|
||||
rust_task_id root_id = kernel->create_task(NULL, "main", MAIN_STACK_SIZE);
|
||||
rust_task *root_task = kernel->get_task_by_id(root_id);
|
||||
I(kernel, root_task != NULL);
|
||||
rust_scheduler *sched = root_task->sched;
|
||||
rust_task_thread *thread = root_task->thread;
|
||||
command_line_args *args
|
||||
= new (kernel, "main command line args")
|
||||
command_line_args(root_task, argc, argv);
|
||||
|
||||
DLOG(sched, dom, "startup: %d args in 0x%" PRIxPTR,
|
||||
DLOG(thread, dom, "startup: %d args in 0x%" PRIxPTR,
|
||||
args->argc, (uintptr_t)args->args);
|
||||
for (int i = 0; i < args->argc; i++) {
|
||||
DLOG(sched, dom, "startup: arg[%d] = '%s'", i, args->argv[i]);
|
||||
DLOG(thread, dom, "startup: arg[%d] = '%s'", i, args->argv[i]);
|
||||
}
|
||||
|
||||
root_task->start((spawn_fn)main_fn, NULL, args->args);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* Native builtins. */
|
||||
|
||||
#include "rust_internal.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "rust_task_thread.h"
|
||||
#include "rust_task.h"
|
||||
#include "rust_util.h"
|
||||
#include "sync/timer.h"
|
||||
@ -12,7 +12,7 @@
|
||||
|
||||
extern "C" CDECL rust_str*
|
||||
last_os_error() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
|
||||
LOG(task, task, "last_os_error()");
|
||||
|
||||
@ -55,7 +55,7 @@ last_os_error() {
|
||||
|
||||
extern "C" CDECL rust_str *
|
||||
rust_getcwd() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, task, "rust_getcwd()");
|
||||
|
||||
char cbuf[BUF_BYTES];
|
||||
@ -93,14 +93,14 @@ refcount(intptr_t *v) {
|
||||
|
||||
extern "C" CDECL void
|
||||
unsupervise() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
task->unsupervise();
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
vec_reserve_shared(type_desc* ty, rust_vec** vp,
|
||||
size_t n_elts) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
reserve_vec(task, vp, n_elts * ty->size);
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ vec_reserve_shared(type_desc* ty, rust_vec** vp,
|
||||
*/
|
||||
extern "C" CDECL rust_vec*
|
||||
vec_from_buf_shared(type_desc *ty, void *ptr, size_t count) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
size_t fill = ty->size * count;
|
||||
rust_vec* v = (rust_vec*)task->kernel->malloc(fill + sizeof(rust_vec),
|
||||
"vec_from_buf");
|
||||
@ -121,7 +121,7 @@ vec_from_buf_shared(type_desc *ty, void *ptr, size_t count) {
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_str_push(rust_vec** sp, uint8_t byte) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
size_t fill = (*sp)->fill;
|
||||
reserve_vec(task, sp, fill + 1);
|
||||
(*sp)->data[fill-1] = byte;
|
||||
@ -131,14 +131,14 @@ rust_str_push(rust_vec** sp, uint8_t byte) {
|
||||
|
||||
extern "C" CDECL void *
|
||||
rand_new() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_scheduler *sched = task->sched;
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
rust_task_thread *thread = task->thread;
|
||||
randctx *rctx = (randctx *) task->malloc(sizeof(randctx), "randctx");
|
||||
if (!rctx) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
}
|
||||
isaac_init(sched, rctx);
|
||||
isaac_init(thread, rctx);
|
||||
return rctx;
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ rand_next(randctx *rctx) {
|
||||
|
||||
extern "C" CDECL void
|
||||
rand_free(randctx *rctx) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
task->free(rctx);
|
||||
}
|
||||
|
||||
@ -158,7 +158,7 @@ rand_free(randctx *rctx) {
|
||||
static void
|
||||
debug_tydesc_helper(type_desc *t)
|
||||
{
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, stdlib, " size %" PRIdPTR ", align %" PRIdPTR
|
||||
", first_param 0x%" PRIxPTR,
|
||||
t->size, t->align, t->first_param);
|
||||
@ -166,14 +166,14 @@ debug_tydesc_helper(type_desc *t)
|
||||
|
||||
extern "C" CDECL void
|
||||
debug_tydesc(type_desc *t) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, stdlib, "debug_tydesc");
|
||||
debug_tydesc_helper(t);
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
debug_opaque(type_desc *t, uint8_t *front) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, stdlib, "debug_opaque");
|
||||
debug_tydesc_helper(t);
|
||||
// FIXME may want to actually account for alignment. `front` may not
|
||||
@ -192,7 +192,7 @@ struct rust_box {
|
||||
|
||||
extern "C" CDECL void
|
||||
debug_box(type_desc *t, rust_box *box) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, stdlib, "debug_box(0x%" PRIxPTR ")", box);
|
||||
debug_tydesc_helper(t);
|
||||
LOG(task, stdlib, " refcount %" PRIdPTR,
|
||||
@ -209,7 +209,7 @@ struct rust_tag {
|
||||
|
||||
extern "C" CDECL void
|
||||
debug_tag(type_desc *t, rust_tag *tag) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
|
||||
LOG(task, stdlib, "debug_tag");
|
||||
debug_tydesc_helper(t);
|
||||
@ -227,7 +227,7 @@ struct rust_obj {
|
||||
|
||||
extern "C" CDECL void
|
||||
debug_obj(type_desc *t, rust_obj *obj, size_t nmethods, size_t nbytes) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
|
||||
LOG(task, stdlib, "debug_obj with %" PRIdPTR " methods", nmethods);
|
||||
debug_tydesc_helper(t);
|
||||
@ -249,7 +249,7 @@ struct rust_fn {
|
||||
|
||||
extern "C" CDECL void
|
||||
debug_fn(type_desc *t, rust_fn *fn) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, stdlib, "debug_fn");
|
||||
debug_tydesc_helper(t);
|
||||
LOG(task, stdlib, " thunk at 0x%" PRIxPTR, fn->thunk);
|
||||
@ -263,7 +263,7 @@ extern "C" CDECL void *
|
||||
debug_ptrcast(type_desc *from_ty,
|
||||
type_desc *to_ty,
|
||||
void *ptr) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, stdlib, "debug_ptrcast from");
|
||||
debug_tydesc_helper(from_ty);
|
||||
LOG(task, stdlib, "to");
|
||||
@ -273,13 +273,13 @@ debug_ptrcast(type_desc *from_ty,
|
||||
|
||||
extern "C" CDECL void *
|
||||
debug_get_stk_seg() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
return task->stk;
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_vec*
|
||||
rust_list_files(rust_str *path) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
array_list<rust_str*> strings;
|
||||
#if defined(__WIN32__)
|
||||
WIN32_FIND_DATA FindFileData;
|
||||
@ -346,7 +346,7 @@ rust_ptr_eq(type_desc *t, rust_box *a, rust_box *b) {
|
||||
#if defined(__WIN32__)
|
||||
extern "C" CDECL void
|
||||
get_time(uint32_t *sec, uint32_t *usec) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
SYSTEMTIME systemTime;
|
||||
FILETIME fileTime;
|
||||
GetSystemTime(&systemTime);
|
||||
@ -377,13 +377,13 @@ nano_time(uint64_t *ns) {
|
||||
|
||||
extern "C" CDECL rust_task_id
|
||||
get_task_id() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
return task->user.id;
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_task_id
|
||||
new_task() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
return task->kernel->create_task(task, NULL);
|
||||
}
|
||||
|
||||
@ -396,18 +396,18 @@ drop_task(rust_task *target) {
|
||||
|
||||
extern "C" CDECL rust_task *
|
||||
get_task_pointer(rust_task_id id) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
return task->kernel->get_task_by_id(id);
|
||||
}
|
||||
|
||||
extern "C" rust_task *
|
||||
rust_get_task() {
|
||||
return rust_scheduler::get_task();
|
||||
return rust_task_thread::get_task();
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
start_task(rust_task_id id, fn_env_pair *f) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
rust_task *target = task->kernel->get_task_by_id(id);
|
||||
target->start(f->f, f->env, NULL);
|
||||
target->deref();
|
||||
@ -415,13 +415,13 @@ start_task(rust_task_id id, fn_env_pair *f) {
|
||||
|
||||
extern "C" CDECL int
|
||||
sched_threads() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
return task->kernel->num_threads;
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_port*
|
||||
new_port(size_t unit_sz) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, comm, "new_port(task=0x%" PRIxPTR " (%s), unit_sz=%d)",
|
||||
(uintptr_t) task, task->name, unit_sz);
|
||||
// port starts with refcount == 1
|
||||
@ -430,7 +430,7 @@ new_port(size_t unit_sz) {
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_port_detach(rust_port *port) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, comm, "rust_port_detach(0x%" PRIxPTR ")", (uintptr_t) port);
|
||||
port->detach();
|
||||
// FIXME: Busy waiting until we're the only ref
|
||||
@ -443,9 +443,9 @@ rust_port_detach(rust_port *port) {
|
||||
|
||||
extern "C" CDECL void
|
||||
del_port(rust_port *port) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG(task, comm, "del_port(0x%" PRIxPTR ")", (uintptr_t) port);
|
||||
A(task->sched, port->ref_count == 1, "Expected port ref_count == 1");
|
||||
A(task->thread, port->ref_count == 1, "Expected port ref_count == 1");
|
||||
port->deref();
|
||||
}
|
||||
|
||||
@ -464,7 +464,7 @@ chan_id_send(type_desc *t, rust_task_id target_task_id,
|
||||
rust_port_id target_port_id, void *sptr) {
|
||||
// FIXME: make sure this is thread-safe
|
||||
bool sent = false;
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
rust_task *target_task = task->kernel->get_task_by_id(target_task_id);
|
||||
if(target_task) {
|
||||
rust_port *port = target_task->get_port_by_id(target_port_id);
|
||||
@ -491,7 +491,7 @@ port_recv(uintptr_t *dptr, rust_port *port,
|
||||
uintptr_t *yield, uintptr_t *killed) {
|
||||
*yield = false;
|
||||
*killed = false;
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
{
|
||||
scoped_lock with(port->lock);
|
||||
|
||||
@ -524,7 +524,7 @@ port_recv(uintptr_t *dptr, rust_port *port,
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_set_exit_status(intptr_t code) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
task->kernel->set_exit_status((int)code);
|
||||
}
|
||||
|
||||
@ -539,7 +539,7 @@ extern void log_console_off(rust_env *env);
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_log_console_off() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
log_console_off(task->kernel->env);
|
||||
}
|
||||
|
||||
|
@ -9,20 +9,20 @@ rust_crate_cache::get_type_desc(size_t size,
|
||||
type_desc const **descs,
|
||||
uintptr_t n_obj_params)
|
||||
{
|
||||
I(sched, n_descs > 1);
|
||||
I(thread, n_descs > 1);
|
||||
type_desc *td = NULL;
|
||||
size_t keysz = n_descs * sizeof(type_desc*);
|
||||
HASH_FIND(hh, this->type_descs, descs, keysz, td);
|
||||
if (td) {
|
||||
DLOG(sched, cache, "rust_crate_cache::get_type_desc hit");
|
||||
DLOG(thread, cache, "rust_crate_cache::get_type_desc hit");
|
||||
|
||||
// FIXME: This is a gross hack.
|
||||
td->n_obj_params = std::max(td->n_obj_params, n_obj_params);
|
||||
|
||||
return td;
|
||||
}
|
||||
DLOG(sched, cache, "rust_crate_cache::get_type_desc miss");
|
||||
td = (type_desc*) sched->kernel->malloc(sizeof(type_desc) + keysz,
|
||||
DLOG(thread, cache, "rust_crate_cache::get_type_desc miss");
|
||||
td = (type_desc*) thread->kernel->malloc(sizeof(type_desc) + keysz,
|
||||
"crate cache typedesc");
|
||||
if (!td)
|
||||
return NULL;
|
||||
@ -34,7 +34,7 @@ rust_crate_cache::get_type_desc(size_t size,
|
||||
td->size = size;
|
||||
td->align = align;
|
||||
for (size_t i = 0; i < n_descs; ++i) {
|
||||
DLOG(sched, cache,
|
||||
DLOG(thread, cache,
|
||||
"rust_crate_cache::descs[%" PRIdPTR "] = 0x%" PRIxPTR,
|
||||
i, descs[i]);
|
||||
td->descs[i] = descs[i];
|
||||
@ -52,7 +52,7 @@ rust_crate_cache::get_dict(size_t n_fields, void** dict) {
|
||||
HASH_FIND(hh, this->dicts, dict, dictsz, found);
|
||||
if (found) return &(found->fields[0]);
|
||||
found = (rust_hashable_dict*)
|
||||
sched->kernel->malloc(sizeof(UT_hash_handle) + dictsz,
|
||||
thread->kernel->malloc(sizeof(UT_hash_handle) + dictsz,
|
||||
"crate cache dict");
|
||||
if (!found) return NULL;
|
||||
void** retptr = &(found->fields[0]);
|
||||
@ -61,28 +61,28 @@ rust_crate_cache::get_dict(size_t n_fields, void** dict) {
|
||||
return retptr;
|
||||
}
|
||||
|
||||
rust_crate_cache::rust_crate_cache(rust_scheduler *sched)
|
||||
rust_crate_cache::rust_crate_cache(rust_task_thread *thread)
|
||||
: type_descs(NULL),
|
||||
dicts(NULL),
|
||||
sched(sched),
|
||||
thread(thread),
|
||||
idx(0)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
rust_crate_cache::flush() {
|
||||
DLOG(sched, cache, "rust_crate_cache::flush()");
|
||||
DLOG(thread, cache, "rust_crate_cache::flush()");
|
||||
|
||||
while (type_descs) {
|
||||
type_desc *d = type_descs;
|
||||
HASH_DEL(type_descs, d);
|
||||
DLOG(sched, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d);
|
||||
sched->kernel->free(d);
|
||||
DLOG(thread, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d);
|
||||
thread->kernel->free(d);
|
||||
}
|
||||
while (dicts) {
|
||||
rust_hashable_dict *d = dicts;
|
||||
HASH_DEL(dicts, d);
|
||||
sched->kernel->free(d);
|
||||
thread->kernel->free(d);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ extern "C" {
|
||||
#include "sync/lock_and_signal.h"
|
||||
#include "sync/lock_free_queue.h"
|
||||
|
||||
struct rust_scheduler;
|
||||
struct rust_task_thread;
|
||||
struct rust_task;
|
||||
class rust_log;
|
||||
class rust_port;
|
||||
@ -217,7 +217,7 @@ public:
|
||||
#include "rust_srv.h"
|
||||
#include "rust_log.h"
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "rust_task_thread.h"
|
||||
|
||||
typedef void CDECL (glue_fn)(void *, void *,
|
||||
const type_desc **, void *);
|
||||
|
@ -20,25 +20,25 @@ rust_kernel::rust_kernel(rust_srv *srv, size_t num_threads) :
|
||||
create_schedulers();
|
||||
}
|
||||
|
||||
rust_scheduler *
|
||||
rust_task_thread *
|
||||
rust_kernel::create_scheduler(int id) {
|
||||
_kernel_lock.lock();
|
||||
rust_srv *srv = this->srv->clone();
|
||||
rust_scheduler *sched =
|
||||
new (this, "rust_scheduler") rust_scheduler(this, srv, id);
|
||||
rust_task_thread *thread =
|
||||
new (this, "rust_task_thread") rust_task_thread(this, srv, id);
|
||||
KLOG_("created scheduler: " PTR ", id: %d, index: %d",
|
||||
sched, id, sched->list_index);
|
||||
thread, id, thread->list_index);
|
||||
_kernel_lock.unlock();
|
||||
return sched;
|
||||
return thread;
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::destroy_scheduler(rust_scheduler *sched) {
|
||||
rust_kernel::destroy_scheduler(rust_task_thread *thread) {
|
||||
_kernel_lock.lock();
|
||||
KLOG_("deleting scheduler: " PTR ", name: %s, index: %d",
|
||||
sched, sched->name, sched->list_index);
|
||||
rust_srv *srv = sched->srv;
|
||||
delete sched;
|
||||
thread, thread->name, thread->list_index);
|
||||
rust_srv *srv = thread->srv;
|
||||
delete thread;
|
||||
delete srv;
|
||||
_kernel_lock.unlock();
|
||||
}
|
||||
@ -120,12 +120,12 @@ rust_kernel::signal_kernel_lock() {
|
||||
int rust_kernel::start_task_threads()
|
||||
{
|
||||
for(size_t i = 0; i < num_threads; ++i) {
|
||||
rust_scheduler *thread = threads[i];
|
||||
rust_task_thread *thread = threads[i];
|
||||
thread->start();
|
||||
}
|
||||
|
||||
for(size_t i = 0; i < num_threads; ++i) {
|
||||
rust_scheduler *thread = threads[i];
|
||||
rust_task_thread *thread = threads[i];
|
||||
thread->join();
|
||||
}
|
||||
|
||||
@ -142,7 +142,7 @@ rust_kernel::fail() {
|
||||
exit(rval);
|
||||
#endif
|
||||
for(size_t i = 0; i < num_threads; ++i) {
|
||||
rust_scheduler *thread = threads[i];
|
||||
rust_task_thread *thread = threads[i];
|
||||
thread->kill_all_tasks();
|
||||
}
|
||||
}
|
||||
@ -151,7 +151,7 @@ rust_task_id
|
||||
rust_kernel::create_task(rust_task *spawner, const char *name,
|
||||
size_t init_stack_sz) {
|
||||
scoped_lock with(_kernel_lock);
|
||||
rust_scheduler *thread = threads[isaac_rand(&rctx) % num_threads];
|
||||
rust_task_thread *thread = threads[isaac_rand(&rctx) % num_threads];
|
||||
rust_task *t = thread->create_task(spawner, name, init_stack_sz);
|
||||
t->user.id = max_id++;
|
||||
task_table.put(t->user.id, t);
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include "memory_region.h"
|
||||
#include "rust_log.h"
|
||||
|
||||
struct rust_scheduler;
|
||||
struct rust_task_thread;
|
||||
|
||||
/**
|
||||
* A global object shared by all thread domains. Most of the data structures
|
||||
@ -21,12 +21,12 @@ public:
|
||||
private:
|
||||
lock_and_signal _kernel_lock;
|
||||
|
||||
array_list<rust_scheduler *> threads;
|
||||
array_list<rust_task_thread *> threads;
|
||||
|
||||
randctx rctx;
|
||||
|
||||
rust_scheduler *create_scheduler(int id);
|
||||
void destroy_scheduler(rust_scheduler *sched);
|
||||
rust_task_thread *create_scheduler(int id);
|
||||
void destroy_scheduler(rust_task_thread *thread);
|
||||
|
||||
void create_schedulers();
|
||||
void destroy_schedulers();
|
||||
|
@ -40,9 +40,9 @@ log_console_off(rust_env *env) {
|
||||
}
|
||||
}
|
||||
|
||||
rust_log::rust_log(rust_srv *srv, rust_scheduler *sched) :
|
||||
rust_log::rust_log(rust_srv *srv, rust_task_thread *thread) :
|
||||
_srv(srv),
|
||||
_sched(sched) {
|
||||
_thread(thread) {
|
||||
}
|
||||
|
||||
rust_log::~rust_log() {
|
||||
@ -118,12 +118,12 @@ rust_log::trace_ln(rust_task *task, uint32_t level, char *message) {
|
||||
#endif
|
||||
|
||||
char prefix[BUF_BYTES] = "";
|
||||
if (_sched && _sched->name) {
|
||||
if (_thread && _thread->name) {
|
||||
append_string(prefix, "%04" PRIxPTR ":%.10s:",
|
||||
thread_id, _sched->name);
|
||||
thread_id, _thread->name);
|
||||
} else {
|
||||
append_string(prefix, "%04" PRIxPTR ":0x%08" PRIxPTR ":",
|
||||
thread_id, (uintptr_t) _sched);
|
||||
thread_id, (uintptr_t) _thread);
|
||||
}
|
||||
if (task) {
|
||||
if (task->name) {
|
||||
|
@ -8,18 +8,18 @@ const uint32_t log_info = 2;
|
||||
const uint32_t log_debug = 3;
|
||||
|
||||
#define LOG(task, field, ...) \
|
||||
DLOG_LVL(log_debug, task, task->sched, field, __VA_ARGS__)
|
||||
DLOG_LVL(log_debug, task, task->thread, field, __VA_ARGS__)
|
||||
#define LOG_ERR(task, field, ...) \
|
||||
DLOG_LVL(log_err, task, task->sched, field, __VA_ARGS__)
|
||||
#define DLOG(sched, field, ...) \
|
||||
DLOG_LVL(log_debug, NULL, sched, field, __VA_ARGS__)
|
||||
#define DLOG_ERR(sched, field, ...) \
|
||||
DLOG_LVL(log_err, NULL, sched, field, __VA_ARGS__)
|
||||
#define LOGPTR(sched, msg, ptrval) \
|
||||
DLOG_LVL(log_debug, NULL, sched, mem, "%s 0x%" PRIxPTR, msg, ptrval)
|
||||
#define DLOG_LVL(lvl, task, sched, field, ...) \
|
||||
DLOG_LVL(log_err, task, task->thread, field, __VA_ARGS__)
|
||||
#define DLOG(thread, field, ...) \
|
||||
DLOG_LVL(log_debug, NULL, thread, field, __VA_ARGS__)
|
||||
#define DLOG_ERR(thread, field, ...) \
|
||||
DLOG_LVL(log_err, NULL, thread, field, __VA_ARGS__)
|
||||
#define LOGPTR(thread, msg, ptrval) \
|
||||
DLOG_LVL(log_debug, NULL, thread, mem, "%s 0x%" PRIxPTR, msg, ptrval)
|
||||
#define DLOG_LVL(lvl, task, thread, field, ...) \
|
||||
do { \
|
||||
rust_scheduler* _d_ = sched; \
|
||||
rust_task_thread* _d_ = thread; \
|
||||
if (log_rt_##field >= lvl && _d_->log_lvl >= lvl) { \
|
||||
_d_->log(task, lvl, __VA_ARGS__); \
|
||||
} \
|
||||
@ -34,13 +34,13 @@ const uint32_t log_debug = 3;
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
struct rust_scheduler;
|
||||
struct rust_task_thread;
|
||||
struct rust_task;
|
||||
|
||||
class rust_log {
|
||||
|
||||
public:
|
||||
rust_log(rust_srv *srv, rust_scheduler *sched);
|
||||
rust_log(rust_srv *srv, rust_task_thread *thread);
|
||||
virtual ~rust_log();
|
||||
|
||||
void trace_ln(rust_task *task, uint32_t level, char *message);
|
||||
@ -49,7 +49,7 @@ public:
|
||||
|
||||
private:
|
||||
rust_srv *_srv;
|
||||
rust_scheduler *_sched;
|
||||
rust_task_thread *_thread;
|
||||
bool _use_labels;
|
||||
void trace_ln(rust_task *task, char *message);
|
||||
};
|
||||
|
@ -21,7 +21,7 @@ rust_port::~rust_port() {
|
||||
}
|
||||
|
||||
void rust_port::detach() {
|
||||
I(task->sched, !task->lock.lock_held_by_current_thread());
|
||||
I(task->thread, !task->lock.lock_held_by_current_thread());
|
||||
scoped_lock with(task->lock);
|
||||
{
|
||||
task->release_port(id);
|
||||
@ -29,7 +29,7 @@ void rust_port::detach() {
|
||||
}
|
||||
|
||||
void rust_port::send(void *sptr) {
|
||||
I(task->sched, !lock.lock_held_by_current_thread());
|
||||
I(task->thread, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
|
||||
buffer.enqueue(sptr);
|
||||
@ -46,7 +46,7 @@ void rust_port::send(void *sptr) {
|
||||
}
|
||||
|
||||
bool rust_port::receive(void *dptr) {
|
||||
I(task->sched, lock.lock_held_by_current_thread());
|
||||
I(task->thread, lock.lock_held_by_current_thread());
|
||||
if (buffer.is_empty() == false) {
|
||||
buffer.dequeue(dptr);
|
||||
LOG(task, comm, "<=== read data ===");
|
||||
@ -56,7 +56,7 @@ bool rust_port::receive(void *dptr) {
|
||||
}
|
||||
|
||||
size_t rust_port::size() {
|
||||
I(task->sched, !lock.lock_held_by_current_thread());
|
||||
I(task->thread, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
return buffer.size();
|
||||
}
|
||||
|
@ -532,7 +532,7 @@ extern "C" void
|
||||
shape_cmp_type(int8_t *result, const type_desc *tydesc,
|
||||
const type_desc **subtydescs, uint8_t *data_0,
|
||||
uint8_t *data_1, uint8_t cmp_type) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
shape::arena arena;
|
||||
|
||||
// FIXME: This may well be broken when comparing two closures or objects
|
||||
@ -553,7 +553,7 @@ shape_cmp_type(int8_t *result, const type_desc *tydesc,
|
||||
|
||||
extern "C" rust_str *
|
||||
shape_log_str(const type_desc *tydesc, uint8_t *data) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
|
||||
shape::arena arena;
|
||||
shape::type_param *params =
|
||||
@ -571,7 +571,7 @@ shape_log_str(const type_desc *tydesc, uint8_t *data) {
|
||||
|
||||
extern "C" void
|
||||
shape_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
|
||||
shape::arena arena;
|
||||
shape::type_param *params =
|
||||
@ -583,6 +583,6 @@ shape_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) {
|
||||
|
||||
log.walk();
|
||||
|
||||
task->sched->log(task, level, "%s", ss.str().c_str());
|
||||
task->thread->log(task, level, "%s", ss.str().c_str());
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ const uint8_t stack_canary[] = {0xAB, 0xCD, 0xAB, 0xCD,
|
||||
0xAB, 0xCD, 0xAB, 0xCD};
|
||||
|
||||
static size_t
|
||||
get_next_stk_size(rust_scheduler *sched, rust_task *task,
|
||||
get_next_stk_size(rust_task_thread *thread, rust_task *task,
|
||||
size_t min, size_t current, size_t requested) {
|
||||
LOG(task, mem, "calculating new stack size for 0x%" PRIxPTR, task);
|
||||
LOG(task, mem,
|
||||
@ -84,7 +84,7 @@ get_next_stk_size(rust_scheduler *sched, rust_task *task,
|
||||
sz = std::max(sz, next);
|
||||
|
||||
LOG(task, mem, "next stack size: %" PRIdPTR, sz);
|
||||
I(sched, requested <= sz);
|
||||
I(thread, requested <= sz);
|
||||
return sz;
|
||||
}
|
||||
|
||||
@ -132,13 +132,13 @@ user_stack_size(stk_seg *stk) {
|
||||
|
||||
static void
|
||||
free_stk(rust_task *task, stk_seg *stk) {
|
||||
LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
|
||||
LOGPTR(task->thread, "freeing stk segment", (uintptr_t)stk);
|
||||
task->total_stack_sz -= user_stack_size(stk);
|
||||
task->free(stk);
|
||||
}
|
||||
|
||||
static stk_seg*
|
||||
new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
|
||||
new_stk(rust_task_thread *thread, rust_task *task, size_t requested_sz)
|
||||
{
|
||||
LOG(task, mem, "creating new stack for task %" PRIxPTR, task);
|
||||
if (task->stk) {
|
||||
@ -146,7 +146,7 @@ new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
|
||||
}
|
||||
|
||||
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
|
||||
size_t min_sz = sched->min_stack_size;
|
||||
size_t min_sz = thread->min_stack_size;
|
||||
|
||||
// Try to reuse an existing stack segment
|
||||
if (task->stk != NULL && task->stk->prev != NULL) {
|
||||
@ -154,7 +154,7 @@ new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
|
||||
if (min_sz <= prev_sz && requested_sz <= prev_sz) {
|
||||
LOG(task, mem, "reusing existing stack");
|
||||
task->stk = task->stk->prev;
|
||||
A(sched, task->stk->prev == NULL, "Bogus stack ptr");
|
||||
A(thread, task->stk->prev == NULL, "Bogus stack ptr");
|
||||
config_valgrind_stack(task->stk);
|
||||
return task->stk;
|
||||
} else {
|
||||
@ -170,23 +170,23 @@ new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
|
||||
current_sz = user_stack_size(task->stk);
|
||||
}
|
||||
// The calculated size of the new stack, excluding red zone
|
||||
size_t rust_stk_sz = get_next_stk_size(sched, task, min_sz,
|
||||
size_t rust_stk_sz = get_next_stk_size(thread, task, min_sz,
|
||||
current_sz, requested_sz);
|
||||
|
||||
if (task->total_stack_sz + rust_stk_sz > sched->env->max_stack_size) {
|
||||
if (task->total_stack_sz + rust_stk_sz > thread->env->max_stack_size) {
|
||||
LOG_ERR(task, task, "task %" PRIxPTR " ran out of stack", task);
|
||||
task->fail();
|
||||
}
|
||||
|
||||
size_t sz = sizeof(stk_seg) + rust_stk_sz + RED_ZONE_SIZE;
|
||||
stk_seg *stk = (stk_seg *)task->malloc(sz, "stack");
|
||||
LOGPTR(task->sched, "new stk", (uintptr_t)stk);
|
||||
LOGPTR(task->thread, "new stk", (uintptr_t)stk);
|
||||
memset(stk, 0, sizeof(stk_seg));
|
||||
add_stack_canary(stk);
|
||||
stk->prev = NULL;
|
||||
stk->next = task->stk;
|
||||
stk->end = (uintptr_t) &stk->data[rust_stk_sz + RED_ZONE_SIZE];
|
||||
LOGPTR(task->sched, "stk end", stk->end);
|
||||
LOGPTR(task->thread, "stk end", stk->end);
|
||||
|
||||
task->stk = stk;
|
||||
config_valgrind_stack(task->stk);
|
||||
@ -222,20 +222,20 @@ del_stk(rust_task *task, stk_seg *stk)
|
||||
unconfig_valgrind_stack(stk);
|
||||
if (delete_stack) {
|
||||
free_stk(task, stk);
|
||||
A(task->sched, task->total_stack_sz == 0, "Stack size should be 0");
|
||||
A(task->thread, task->total_stack_sz == 0, "Stack size should be 0");
|
||||
}
|
||||
}
|
||||
|
||||
// Tasks
|
||||
rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
|
||||
rust_task::rust_task(rust_task_thread *thread, rust_task_list *state,
|
||||
rust_task *spawner, const char *name,
|
||||
size_t init_stack_sz) :
|
||||
ref_count(1),
|
||||
stk(NULL),
|
||||
runtime_sp(0),
|
||||
sched(sched),
|
||||
thread(thread),
|
||||
cache(NULL),
|
||||
kernel(sched->kernel),
|
||||
kernel(thread->kernel),
|
||||
name(name),
|
||||
state(state),
|
||||
cond(NULL),
|
||||
@ -244,7 +244,7 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
|
||||
list_index(-1),
|
||||
next_port_id(0),
|
||||
rendezvous_ptr(0),
|
||||
local_region(&sched->srv->local_region),
|
||||
local_region(&thread->srv->local_region),
|
||||
boxed(&local_region),
|
||||
unwinding(false),
|
||||
killed(false),
|
||||
@ -253,14 +253,14 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
|
||||
cc_counter(0),
|
||||
total_stack_sz(0)
|
||||
{
|
||||
LOGPTR(sched, "new task", (uintptr_t)this);
|
||||
DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
|
||||
LOGPTR(thread, "new task", (uintptr_t)this);
|
||||
DLOG(thread, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
|
||||
|
||||
assert((void*)this == (void*)&user);
|
||||
|
||||
user.notify_enabled = 0;
|
||||
|
||||
stk = new_stk(sched, this, init_stack_sz);
|
||||
stk = new_stk(thread, this, init_stack_sz);
|
||||
user.rust_sp = stk->end;
|
||||
if (supervisor) {
|
||||
supervisor->ref();
|
||||
@ -269,9 +269,9 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
|
||||
|
||||
rust_task::~rust_task()
|
||||
{
|
||||
I(sched, !sched->lock.lock_held_by_current_thread());
|
||||
I(sched, port_table.is_empty());
|
||||
DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
||||
I(thread, !thread->lock.lock_held_by_current_thread());
|
||||
I(thread, port_table.is_empty());
|
||||
DLOG(thread, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
||||
name, (uintptr_t)this, ref_count);
|
||||
|
||||
if (supervisor) {
|
||||
@ -282,7 +282,7 @@ rust_task::~rust_task()
|
||||
|
||||
/* FIXME: tighten this up, there are some more
|
||||
assertions that hold at task-lifecycle events. */
|
||||
I(sched, ref_count == 0); // ||
|
||||
I(thread, ref_count == 0); // ||
|
||||
// (ref_count == 1 && this == sched->root_task));
|
||||
|
||||
// Delete all the stacks. There may be more than one if the task failed
|
||||
@ -325,7 +325,7 @@ cleanup_task(cleanup_args *args) {
|
||||
#ifndef __WIN32__
|
||||
task->conclude_failure();
|
||||
#else
|
||||
A(task->sched, false, "Shouldn't happen");
|
||||
A(task->thread, false, "Shouldn't happen");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -342,7 +342,7 @@ void task_start_wrapper(spawn_args *a)
|
||||
// must have void return type, we can safely pass 0.
|
||||
a->f(0, a->envptr, a->argptr);
|
||||
} catch (rust_task *ex) {
|
||||
A(task->sched, ex == task,
|
||||
A(task->thread, ex == task,
|
||||
"Expected this task to be thrown for unwinding");
|
||||
threw_exception = true;
|
||||
}
|
||||
@ -359,7 +359,7 @@ void task_start_wrapper(spawn_args *a)
|
||||
|
||||
// The cleanup work needs lots of stack
|
||||
cleanup_args ca = {a, threw_exception};
|
||||
task->sched->c_context.call_shim_on_c_stack(&ca, (void*)cleanup_task);
|
||||
task->thread->c_context.call_shim_on_c_stack(&ca, (void*)cleanup_task);
|
||||
|
||||
task->ctx.next->swap(task->ctx);
|
||||
}
|
||||
@ -373,7 +373,7 @@ rust_task::start(spawn_fn spawnee_fn,
|
||||
" with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
|
||||
spawnee_fn, envptr, argptr);
|
||||
|
||||
I(sched, stk->data != NULL);
|
||||
I(thread, stk->data != NULL);
|
||||
|
||||
char *sp = (char *)user.rust_sp;
|
||||
|
||||
@ -393,7 +393,7 @@ rust_task::start(spawn_fn spawnee_fn,
|
||||
|
||||
void rust_task::start()
|
||||
{
|
||||
transition(&sched->newborn_tasks, &sched->running_tasks);
|
||||
transition(&thread->newborn_tasks, &thread->running_tasks);
|
||||
}
|
||||
|
||||
// Only run this on the rust stack
|
||||
@ -440,7 +440,7 @@ bool rust_task_is_unwinding(rust_task *rt) {
|
||||
void
|
||||
rust_task::fail() {
|
||||
// See note in ::kill() regarding who should call this.
|
||||
DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
|
||||
DLOG(thread, task, "task %s @0x%" PRIxPTR " failing", name, this);
|
||||
backtrace();
|
||||
unwinding = true;
|
||||
#ifndef __WIN32__
|
||||
@ -449,7 +449,7 @@ rust_task::fail() {
|
||||
die();
|
||||
conclude_failure();
|
||||
// FIXME: Need unwinding on windows. This will end up aborting
|
||||
sched->fail();
|
||||
thread->fail();
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -461,7 +461,7 @@ rust_task::conclude_failure() {
|
||||
void
|
||||
rust_task::fail_parent() {
|
||||
if (supervisor) {
|
||||
DLOG(sched, task,
|
||||
DLOG(thread, task,
|
||||
"task %s @0x%" PRIxPTR
|
||||
" propagating failure to supervisor %s @0x%" PRIxPTR,
|
||||
name, this, supervisor->name, supervisor);
|
||||
@ -469,14 +469,14 @@ rust_task::fail_parent() {
|
||||
}
|
||||
// FIXME: implement unwinding again.
|
||||
if (NULL == supervisor && propagate_failure)
|
||||
sched->fail();
|
||||
thread->fail();
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::unsupervise()
|
||||
{
|
||||
if (supervisor) {
|
||||
DLOG(sched, task,
|
||||
DLOG(thread, task,
|
||||
"task %s @0x%" PRIxPTR
|
||||
" disconnecting from supervisor %s @0x%" PRIxPTR,
|
||||
name, this, supervisor->name, supervisor);
|
||||
@ -495,13 +495,13 @@ rust_task::get_frame_glue_fns(uintptr_t fp) {
|
||||
bool
|
||||
rust_task::running()
|
||||
{
|
||||
return state == &sched->running_tasks;
|
||||
return state == &thread->running_tasks;
|
||||
}
|
||||
|
||||
bool
|
||||
rust_task::blocked()
|
||||
{
|
||||
return state == &sched->blocked_tasks;
|
||||
return state == &thread->blocked_tasks;
|
||||
}
|
||||
|
||||
bool
|
||||
@ -513,7 +513,7 @@ rust_task::blocked_on(rust_cond *on)
|
||||
bool
|
||||
rust_task::dead()
|
||||
{
|
||||
return state == &sched->dead_tasks;
|
||||
return state == &thread->dead_tasks;
|
||||
}
|
||||
|
||||
void *
|
||||
@ -537,55 +537,55 @@ rust_task::free(void *p)
|
||||
void
|
||||
rust_task::transition(rust_task_list *src, rust_task_list *dst) {
|
||||
bool unlock = false;
|
||||
if(!sched->lock.lock_held_by_current_thread()) {
|
||||
if(!thread->lock.lock_held_by_current_thread()) {
|
||||
unlock = true;
|
||||
sched->lock.lock();
|
||||
thread->lock.lock();
|
||||
}
|
||||
DLOG(sched, task,
|
||||
DLOG(thread, task,
|
||||
"task %s " PTR " state change '%s' -> '%s' while in '%s'",
|
||||
name, (uintptr_t)this, src->name, dst->name, state->name);
|
||||
I(sched, state == src);
|
||||
I(thread, state == src);
|
||||
src->remove(this);
|
||||
dst->append(this);
|
||||
state = dst;
|
||||
sched->lock.signal();
|
||||
thread->lock.signal();
|
||||
if(unlock)
|
||||
sched->lock.unlock();
|
||||
thread->lock.unlock();
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::block(rust_cond *on, const char* name) {
|
||||
I(sched, !lock.lock_held_by_current_thread());
|
||||
I(thread, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
|
||||
(uintptr_t) on, (uintptr_t) cond);
|
||||
A(sched, cond == NULL, "Cannot block an already blocked task.");
|
||||
A(sched, on != NULL, "Cannot block on a NULL object.");
|
||||
A(thread, cond == NULL, "Cannot block an already blocked task.");
|
||||
A(thread, on != NULL, "Cannot block on a NULL object.");
|
||||
|
||||
transition(&sched->running_tasks, &sched->blocked_tasks);
|
||||
transition(&thread->running_tasks, &thread->blocked_tasks);
|
||||
cond = on;
|
||||
cond_name = name;
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::wakeup(rust_cond *from) {
|
||||
I(sched, !lock.lock_held_by_current_thread());
|
||||
I(thread, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
A(sched, cond != NULL, "Cannot wake up unblocked task.");
|
||||
A(thread, cond != NULL, "Cannot wake up unblocked task.");
|
||||
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
|
||||
(uintptr_t) cond, (uintptr_t) from);
|
||||
A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
|
||||
A(thread, cond == from, "Cannot wake up blocked task on wrong condition.");
|
||||
|
||||
cond = NULL;
|
||||
cond_name = "none";
|
||||
transition(&sched->blocked_tasks, &sched->running_tasks);
|
||||
transition(&thread->blocked_tasks, &thread->running_tasks);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::die() {
|
||||
I(sched, !lock.lock_held_by_current_thread());
|
||||
I(thread, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
transition(&sched->running_tasks, &sched->dead_tasks);
|
||||
transition(&thread->running_tasks, &thread->dead_tasks);
|
||||
}
|
||||
|
||||
void
|
||||
@ -601,8 +601,8 @@ rust_crate_cache *
|
||||
rust_task::get_crate_cache()
|
||||
{
|
||||
if (!cache) {
|
||||
DLOG(sched, task, "fetching cache for current crate");
|
||||
cache = sched->get_cache();
|
||||
DLOG(thread, task, "fetching cache for current crate");
|
||||
cache = thread->get_cache();
|
||||
}
|
||||
return cache;
|
||||
}
|
||||
@ -623,7 +623,7 @@ rust_task::calloc(size_t size, const char *tag) {
|
||||
}
|
||||
|
||||
rust_port_id rust_task::register_port(rust_port *port) {
|
||||
I(sched, !lock.lock_held_by_current_thread());
|
||||
I(thread, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
|
||||
rust_port_id id = next_port_id++;
|
||||
@ -632,12 +632,12 @@ rust_port_id rust_task::register_port(rust_port *port) {
|
||||
}
|
||||
|
||||
void rust_task::release_port(rust_port_id id) {
|
||||
I(sched, lock.lock_held_by_current_thread());
|
||||
I(thread, lock.lock_held_by_current_thread());
|
||||
port_table.remove(id);
|
||||
}
|
||||
|
||||
rust_port *rust_task::get_port_by_id(rust_port_id id) {
|
||||
I(sched, !lock.lock_held_by_current_thread());
|
||||
I(thread, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
rust_port *port = NULL;
|
||||
port_table.get(id, &port);
|
||||
@ -675,8 +675,8 @@ record_sp(void *limit);
|
||||
void *
|
||||
rust_task::new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
|
||||
|
||||
stk_seg *stk_seg = new_stk(sched, this, stk_sz + args_sz);
|
||||
A(sched, stk_seg->end - (uintptr_t)stk_seg->data >= stk_sz + args_sz,
|
||||
stk_seg *stk_seg = new_stk(thread, this, stk_sz + args_sz);
|
||||
A(thread, stk_seg->end - (uintptr_t)stk_seg->data >= stk_sz + args_sz,
|
||||
"Did not receive enough stack");
|
||||
uint8_t *new_sp = (uint8_t*)stk_seg->end;
|
||||
// Push the function arguments to the new stack
|
||||
@ -700,7 +700,7 @@ rust_task::record_stack_limit() {
|
||||
// subtracting the frame size. As a result we need our stack limit to
|
||||
// account for those 256 bytes.
|
||||
const unsigned LIMIT_OFFSET = 256;
|
||||
A(sched,
|
||||
A(thread,
|
||||
(uintptr_t)stk->end - RED_ZONE_SIZE
|
||||
- (uintptr_t)stk->data >= LIMIT_OFFSET,
|
||||
"Stack size must be greater than LIMIT_OFFSET");
|
||||
@ -731,7 +731,7 @@ rust_task::reset_stack_limit() {
|
||||
uintptr_t sp = get_sp();
|
||||
while (!sp_in_stk_seg(sp, stk)) {
|
||||
del_stk(this, stk);
|
||||
A(sched, stk != NULL, "Failed to find the current stack");
|
||||
A(thread, stk != NULL, "Failed to find the current stack");
|
||||
}
|
||||
record_stack_limit();
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ rust_task : public kernel_owned<rust_task>, rust_cond
|
||||
context ctx;
|
||||
stk_seg *stk;
|
||||
uintptr_t runtime_sp; // Runtime sp while task running.
|
||||
rust_scheduler *sched;
|
||||
rust_task_thread *thread;
|
||||
rust_crate_cache *cache;
|
||||
|
||||
// Fields known only to the runtime.
|
||||
@ -123,7 +123,7 @@ rust_task : public kernel_owned<rust_task>, rust_cond
|
||||
size_t total_stack_sz;
|
||||
|
||||
// Only a pointer to 'name' is kept, so it must live as long as this task.
|
||||
rust_task(rust_scheduler *sched,
|
||||
rust_task(rust_task_thread *thread,
|
||||
rust_task_list *state,
|
||||
rust_task *spawner,
|
||||
const char *name,
|
||||
|
@ -1,15 +1,15 @@
|
||||
#include "rust_internal.h"
|
||||
|
||||
rust_task_list::rust_task_list (rust_scheduler *sched, const char* name) :
|
||||
sched(sched), name(name) {
|
||||
rust_task_list::rust_task_list (rust_task_thread *thread, const char* name) :
|
||||
thread(thread), name(name) {
|
||||
}
|
||||
|
||||
void
|
||||
rust_task_list::delete_all() {
|
||||
DLOG(sched, task, "deleting all %s tasks", name);
|
||||
DLOG(thread, task, "deleting all %s tasks", name);
|
||||
while (is_empty() == false) {
|
||||
rust_task *task = pop_value();
|
||||
DLOG(sched, task, "deleting task " PTR, task);
|
||||
DLOG(thread, task, "deleting task " PTR, task);
|
||||
delete task;
|
||||
}
|
||||
}
|
||||
|
@ -8,9 +8,9 @@
|
||||
class rust_task_list : public indexed_list<rust_task>,
|
||||
public kernel_owned<rust_task_list> {
|
||||
public:
|
||||
rust_scheduler *sched;
|
||||
rust_task_thread *thread;
|
||||
const char* name;
|
||||
rust_task_list (rust_scheduler *sched, const char* name);
|
||||
rust_task_list (rust_task_thread *thread, const char* name);
|
||||
void delete_all();
|
||||
};
|
||||
|
||||
|
@ -7,14 +7,14 @@
|
||||
#include "globals.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
pthread_key_t rust_scheduler::task_key;
|
||||
pthread_key_t rust_task_thread::task_key;
|
||||
#else
|
||||
DWORD rust_scheduler::task_key;
|
||||
DWORD rust_task_thread::task_key;
|
||||
#endif
|
||||
|
||||
bool rust_scheduler::tls_initialized = false;
|
||||
bool rust_task_thread::tls_initialized = false;
|
||||
|
||||
rust_scheduler::rust_scheduler(rust_kernel *kernel,
|
||||
rust_task_thread::rust_task_thread(rust_kernel *kernel,
|
||||
rust_srv *srv,
|
||||
int id) :
|
||||
ref_count(1),
|
||||
@ -46,8 +46,8 @@ rust_scheduler::rust_scheduler(rust_kernel *kernel,
|
||||
init_tls();
|
||||
}
|
||||
|
||||
rust_scheduler::~rust_scheduler() {
|
||||
DLOG(this, dom, "~rust_scheduler %s @0x%" PRIxPTR, name, (uintptr_t)this);
|
||||
rust_task_thread::~rust_task_thread() {
|
||||
DLOG(this, dom, "~rust_task_thread %s @0x%" PRIxPTR, name, (uintptr_t)this);
|
||||
|
||||
newborn_tasks.delete_all();
|
||||
running_tasks.delete_all();
|
||||
@ -59,7 +59,7 @@ rust_scheduler::~rust_scheduler() {
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::activate(rust_task *task) {
|
||||
rust_task_thread::activate(rust_task *task) {
|
||||
task->ctx.next = &c_context;
|
||||
DLOG(this, task, "descheduling...");
|
||||
lock.unlock();
|
||||
@ -69,7 +69,7 @@ rust_scheduler::activate(rust_task *task) {
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::log(rust_task* task, uint32_t level, char const *fmt, ...) {
|
||||
rust_task_thread::log(rust_task* task, uint32_t level, char const *fmt, ...) {
|
||||
char buf[BUF_BYTES];
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
@ -79,14 +79,14 @@ rust_scheduler::log(rust_task* task, uint32_t level, char const *fmt, ...) {
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::fail() {
|
||||
rust_task_thread::fail() {
|
||||
log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed",
|
||||
name, this);
|
||||
kernel->fail();
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::kill_all_tasks() {
|
||||
rust_task_thread::kill_all_tasks() {
|
||||
I(this, !lock.lock_held_by_current_thread());
|
||||
scoped_lock with(lock);
|
||||
|
||||
@ -104,7 +104,7 @@ rust_scheduler::kill_all_tasks() {
|
||||
}
|
||||
|
||||
size_t
|
||||
rust_scheduler::number_of_live_tasks() {
|
||||
rust_task_thread::number_of_live_tasks() {
|
||||
return running_tasks.length() + blocked_tasks.length();
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ rust_scheduler::number_of_live_tasks() {
|
||||
* Delete any dead tasks.
|
||||
*/
|
||||
void
|
||||
rust_scheduler::reap_dead_tasks() {
|
||||
rust_task_thread::reap_dead_tasks() {
|
||||
I(this, lock.lock_held_by_current_thread());
|
||||
if (dead_tasks.length() == 0) {
|
||||
return;
|
||||
@ -157,7 +157,7 @@ rust_scheduler::reap_dead_tasks() {
|
||||
* Returns NULL if no tasks can be scheduled.
|
||||
*/
|
||||
rust_task *
|
||||
rust_scheduler::schedule_task() {
|
||||
rust_task_thread::schedule_task() {
|
||||
I(this, this);
|
||||
// FIXME: in the face of failing tasks, this is not always right.
|
||||
// I(this, n_live_tasks() > 0);
|
||||
@ -173,7 +173,7 @@ rust_scheduler::schedule_task() {
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::log_state() {
|
||||
rust_task_thread::log_state() {
|
||||
if (log_rt_task < log_debug) return;
|
||||
|
||||
if (!running_tasks.is_empty()) {
|
||||
@ -211,7 +211,7 @@ rust_scheduler::log_state() {
|
||||
* drop to zero.
|
||||
*/
|
||||
void
|
||||
rust_scheduler::start_main_loop() {
|
||||
rust_task_thread::start_main_loop() {
|
||||
lock.lock();
|
||||
|
||||
DLOG(this, dom, "started domain loop %d", id);
|
||||
@ -277,12 +277,12 @@ rust_scheduler::start_main_loop() {
|
||||
}
|
||||
|
||||
rust_crate_cache *
|
||||
rust_scheduler::get_cache() {
|
||||
rust_task_thread::get_cache() {
|
||||
return &cache;
|
||||
}
|
||||
|
||||
rust_task *
|
||||
rust_scheduler::create_task(rust_task *spawner, const char *name,
|
||||
rust_task_thread::create_task(rust_task *spawner, const char *name,
|
||||
size_t init_stack_sz) {
|
||||
rust_task *task =
|
||||
new (this->kernel, "rust_task")
|
||||
@ -300,27 +300,27 @@ rust_scheduler::create_task(rust_task *spawner, const char *name,
|
||||
return task;
|
||||
}
|
||||
|
||||
void rust_scheduler::run() {
|
||||
void rust_task_thread::run() {
|
||||
this->start_main_loop();
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
void
|
||||
rust_scheduler::init_tls() {
|
||||
rust_task_thread::init_tls() {
|
||||
int result = pthread_key_create(&task_key, NULL);
|
||||
assert(!result && "Couldn't create the TLS key!");
|
||||
tls_initialized = true;
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::place_task_in_tls(rust_task *task) {
|
||||
rust_task_thread::place_task_in_tls(rust_task *task) {
|
||||
int result = pthread_setspecific(task_key, task);
|
||||
assert(!result && "Couldn't place the task in TLS!");
|
||||
task->record_stack_limit();
|
||||
}
|
||||
|
||||
rust_task *
|
||||
rust_scheduler::get_task() {
|
||||
rust_task_thread::get_task() {
|
||||
if (!tls_initialized)
|
||||
return NULL;
|
||||
rust_task *task = reinterpret_cast<rust_task *>
|
||||
@ -330,21 +330,21 @@ rust_scheduler::get_task() {
|
||||
}
|
||||
#else
|
||||
void
|
||||
rust_scheduler::init_tls() {
|
||||
rust_task_thread::init_tls() {
|
||||
task_key = TlsAlloc();
|
||||
assert(task_key != TLS_OUT_OF_INDEXES && "Couldn't create the TLS key!");
|
||||
tls_initialized = true;
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::place_task_in_tls(rust_task *task) {
|
||||
rust_task_thread::place_task_in_tls(rust_task *task) {
|
||||
BOOL result = TlsSetValue(task_key, task);
|
||||
assert(result && "Couldn't place the task in TLS!");
|
||||
task->record_stack_limit();
|
||||
}
|
||||
|
||||
rust_task *
|
||||
rust_scheduler::get_task() {
|
||||
rust_task_thread::get_task() {
|
||||
if (!tls_initialized)
|
||||
return NULL;
|
||||
rust_task *task = reinterpret_cast<rust_task *>(TlsGetValue(task_key));
|
||||
@ -354,7 +354,7 @@ rust_scheduler::get_task() {
|
||||
#endif
|
||||
|
||||
void
|
||||
rust_scheduler::exit() {
|
||||
rust_task_thread::exit() {
|
||||
A(this, !lock.lock_held_by_current_thread(), "Shouldn't have lock");
|
||||
scoped_lock with(lock);
|
||||
should_exit = true;
|
@ -1,5 +1,5 @@
|
||||
#ifndef RUST_SCHEDULER_H
|
||||
#define RUST_SCHEDULER_H
|
||||
#ifndef RUST_TASK_THREAD_H
|
||||
#define RUST_TASK_THREAD_H
|
||||
|
||||
#include "context.h"
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
struct rust_scheduler;
|
||||
struct rust_task_thread;
|
||||
|
||||
struct rust_hashable_dict {
|
||||
UT_hash_handle hh;
|
||||
@ -32,18 +32,18 @@ private:
|
||||
|
||||
public:
|
||||
|
||||
rust_scheduler *sched;
|
||||
rust_task_thread *thread;
|
||||
size_t idx;
|
||||
|
||||
rust_crate_cache(rust_scheduler *sched);
|
||||
rust_crate_cache(rust_task_thread *thread);
|
||||
~rust_crate_cache();
|
||||
void flush();
|
||||
};
|
||||
|
||||
struct rust_scheduler : public kernel_owned<rust_scheduler>,
|
||||
struct rust_task_thread : public kernel_owned<rust_task_thread>,
|
||||
rust_thread
|
||||
{
|
||||
RUST_REFCOUNTED(rust_scheduler)
|
||||
RUST_REFCOUNTED(rust_task_thread)
|
||||
|
||||
// Fields known only by the runtime:
|
||||
rust_log _log;
|
||||
@ -92,8 +92,8 @@ struct rust_scheduler : public kernel_owned<rust_scheduler>,
|
||||
|
||||
// Only a pointer to 'name' is kept, so it must live as long as this
|
||||
// domain.
|
||||
rust_scheduler(rust_kernel *kernel, rust_srv *srv, int id);
|
||||
~rust_scheduler();
|
||||
rust_task_thread(rust_kernel *kernel, rust_srv *srv, int id);
|
||||
~rust_task_thread();
|
||||
void activate(rust_task *task);
|
||||
void log(rust_task *task, uint32_t level, char const *fmt, ...);
|
||||
rust_log & get_log();
|
||||
@ -132,7 +132,7 @@ struct rust_scheduler : public kernel_owned<rust_scheduler>,
|
||||
};
|
||||
|
||||
inline rust_log &
|
||||
rust_scheduler::get_log() {
|
||||
rust_task_thread::get_log() {
|
||||
return _log;
|
||||
}
|
||||
|
||||
@ -147,4 +147,4 @@ rust_scheduler::get_log() {
|
||||
// End:
|
||||
//
|
||||
|
||||
#endif /* RUST_SCHEDULER_H */
|
||||
#endif /* RUST_TASK_THREAD_H */
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include "rust_cc.h"
|
||||
#include "rust_internal.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "rust_task_thread.h"
|
||||
#include "rust_unwind.h"
|
||||
#include "rust_upcall.h"
|
||||
#include "rust_util.h"
|
||||
@ -46,9 +46,9 @@ static void check_stack_alignment() { }
|
||||
inline void
|
||||
call_upcall_on_c_stack(void *args, void *fn_ptr) {
|
||||
check_stack_alignment();
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_scheduler *sched = task->sched;
|
||||
sched->c_context.call_shim_on_c_stack(args, fn_ptr);
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
rust_task_thread *thread = task->thread;
|
||||
thread->c_context.call_shim_on_c_stack(args, fn_ptr);
|
||||
}
|
||||
|
||||
extern "C" void record_sp(void *limit);
|
||||
@ -62,21 +62,21 @@ extern "C" void record_sp(void *limit);
|
||||
*/
|
||||
extern "C" CDECL void
|
||||
upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
|
||||
// FIXME (1226) - The shim functions generated by rustc contain the
|
||||
// morestack prologue, so we need to let them know they have enough
|
||||
// stack.
|
||||
record_sp(0);
|
||||
|
||||
rust_scheduler *sched = task->sched;
|
||||
rust_task_thread *thread = task->thread;
|
||||
try {
|
||||
sched->c_context.call_shim_on_c_stack(args, fn_ptr);
|
||||
thread->c_context.call_shim_on_c_stack(args, fn_ptr);
|
||||
} catch (...) {
|
||||
A(sched, false, "Native code threw an exception");
|
||||
A(thread, false, "Native code threw an exception");
|
||||
}
|
||||
|
||||
task = rust_scheduler::get_task();
|
||||
task = rust_task_thread::get_task();
|
||||
task->record_stack_limit();
|
||||
}
|
||||
|
||||
@ -90,7 +90,7 @@ struct s_fail_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_fail(s_fail_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
LOG_ERR(task, upcall, "upcall fail '%s', %s:%" PRIdPTR,
|
||||
args->expr, args->file, args->line);
|
||||
@ -116,7 +116,7 @@ struct s_malloc_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_malloc(s_malloc_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
LOG(task, mem, "upcall malloc(0x%" PRIxPTR ")", args->td);
|
||||
@ -153,11 +153,11 @@ struct s_free_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_free(s_free_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
rust_scheduler *sched = task->sched;
|
||||
DLOG(sched, mem,
|
||||
rust_task_thread *thread = task->thread;
|
||||
DLOG(thread, mem,
|
||||
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
|
||||
(uintptr_t)args->ptr);
|
||||
|
||||
@ -200,7 +200,7 @@ struct s_shared_malloc_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_shared_malloc(s_shared_malloc_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
LOG(task, mem,
|
||||
@ -232,11 +232,11 @@ struct s_shared_free_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_shared_free(s_shared_free_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
rust_scheduler *sched = task->sched;
|
||||
DLOG(sched, mem,
|
||||
rust_task_thread *thread = task->thread;
|
||||
DLOG(thread, mem,
|
||||
"upcall shared_free(0x%" PRIxPTR")",
|
||||
(uintptr_t)args->ptr);
|
||||
task->kernel->free(args->ptr);
|
||||
@ -262,7 +262,7 @@ struct s_create_shared_type_desc_args {
|
||||
|
||||
void upcall_s_create_shared_type_desc(s_create_shared_type_desc_args *args)
|
||||
{
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
// Copy the main part of the type descriptor:
|
||||
@ -301,7 +301,7 @@ upcall_create_shared_type_desc(type_desc *td) {
|
||||
|
||||
void upcall_s_free_shared_type_desc(type_desc *td)
|
||||
{ // n.b.: invoked from rust_cc.cpp as well as generated code
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
if (td) {
|
||||
@ -337,7 +337,7 @@ struct s_get_type_desc_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_get_type_desc(s_get_type_desc_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
LOG(task, cache, "upcall get_type_desc with size=%" PRIdPTR
|
||||
@ -375,7 +375,7 @@ struct s_intern_dict_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_intern_dict(s_intern_dict_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
rust_crate_cache *cache = task->get_crate_cache();
|
||||
args->res = cache->get_dict(args->n_fields, args->dict);
|
||||
@ -397,7 +397,7 @@ struct s_vec_grow_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_vec_grow(s_vec_grow_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
reserve_vec(task, args->vp, args->new_sz);
|
||||
(*args->vp)->fill = args->new_sz;
|
||||
@ -438,7 +438,7 @@ struct s_vec_push_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_vec_push(s_vec_push_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
size_t new_sz = (*args->vp)->fill + args->elt_ty->size;
|
||||
reserve_vec(task, args->vp, new_sz);
|
||||
@ -456,7 +456,7 @@ upcall_vec_push(rust_vec** vp, type_desc* elt_ty, void* elt) {
|
||||
upcall_s_vec_push(&args);
|
||||
|
||||
// Do the stack check to make sure this op, on the Rust stack, is behaving
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
task->check_stack_canary();
|
||||
}
|
||||
|
||||
@ -471,7 +471,7 @@ struct s_dynastack_mark_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_dynastack_mark(s_dynastack_mark_args *args) {
|
||||
args->retval = rust_scheduler::get_task()->dynastack.mark();
|
||||
args->retval = rust_task_thread::get_task()->dynastack.mark();
|
||||
}
|
||||
|
||||
extern "C" CDECL void *
|
||||
@ -496,7 +496,7 @@ extern "C" CDECL void
|
||||
upcall_s_dynastack_alloc(s_dynastack_alloc_args *args) {
|
||||
size_t sz = args->sz;
|
||||
args->retval = sz ?
|
||||
rust_scheduler::get_task()->dynastack.alloc(sz, NULL) : NULL;
|
||||
rust_task_thread::get_task()->dynastack.alloc(sz, NULL) : NULL;
|
||||
}
|
||||
|
||||
extern "C" CDECL void *
|
||||
@ -522,7 +522,7 @@ upcall_s_dynastack_alloc_2(s_dynastack_alloc_2_args *args) {
|
||||
size_t sz = args->sz;
|
||||
type_desc *ty = args->ty;
|
||||
args->retval = sz ?
|
||||
rust_scheduler::get_task()->dynastack.alloc(sz, ty) : NULL;
|
||||
rust_task_thread::get_task()->dynastack.alloc(sz, ty) : NULL;
|
||||
}
|
||||
|
||||
extern "C" CDECL void *
|
||||
@ -538,7 +538,7 @@ struct s_dynastack_free_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_dynastack_free(s_dynastack_free_args *args) {
|
||||
return rust_scheduler::get_task()->dynastack.free(args->ptr);
|
||||
return rust_task_thread::get_task()->dynastack.free(args->ptr);
|
||||
}
|
||||
|
||||
/** Frees space in the dynamic stack. */
|
||||
@ -587,7 +587,7 @@ upcall_rust_personality(int version,
|
||||
s_rust_personality_args args = {(_Unwind_Reason_Code)0,
|
||||
version, actions, exception_class,
|
||||
ue_header, context};
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
|
||||
// The personality function is run on the stack of the
|
||||
// last function that threw or landed, which is going
|
||||
@ -659,7 +659,7 @@ struct s_new_stack_args {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_new_stack(struct s_new_stack_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
args->result = task->new_stack(args->stk_sz,
|
||||
args->args_addr,
|
||||
args->args_sz);
|
||||
@ -674,7 +674,7 @@ upcall_new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_del_stack() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
task->del_stack();
|
||||
}
|
||||
|
||||
@ -689,7 +689,7 @@ upcall_del_stack() {
|
||||
// needs to acquire the value of the stack pointer
|
||||
extern "C" CDECL void
|
||||
upcall_reset_stack_limit() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
rust_task *task = rust_task_thread::get_task();
|
||||
task->reset_stack_limit();
|
||||
}
|
||||
|
||||
|
@ -13,24 +13,24 @@ ptr_vec<T>::ptr_vec(rust_task *task) :
|
||||
fill(0),
|
||||
data(new (task, "ptr_vec<T>") T*[alloc])
|
||||
{
|
||||
I(task->sched, data);
|
||||
DLOG(task->sched, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR,
|
||||
I(task->thread, data);
|
||||
DLOG(task->thread, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR,
|
||||
(uintptr_t)data, (uintptr_t)this);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ptr_vec<T>::~ptr_vec()
|
||||
{
|
||||
I(task->sched, data);
|
||||
DLOG(task->sched, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR,
|
||||
I(task->thread, data);
|
||||
DLOG(task->thread, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR,
|
||||
(uintptr_t)this, (uintptr_t)data);
|
||||
I(task->sched, fill == 0);
|
||||
I(task->thread, fill == 0);
|
||||
task->free(data);
|
||||
}
|
||||
|
||||
template <typename T> T *&
|
||||
ptr_vec<T>::operator[](size_t offset) {
|
||||
I(task->sched, data[offset]->idx == offset);
|
||||
I(task->thread, data[offset]->idx == offset);
|
||||
return data[offset];
|
||||
}
|
||||
|
||||
@ -38,14 +38,14 @@ template <typename T>
|
||||
void
|
||||
ptr_vec<T>::push(T *p)
|
||||
{
|
||||
I(task->sched, data);
|
||||
I(task->sched, fill <= alloc);
|
||||
I(task->thread, data);
|
||||
I(task->thread, fill <= alloc);
|
||||
if (fill == alloc) {
|
||||
alloc *= 2;
|
||||
data = (T **)task->realloc(data, alloc * sizeof(T*));
|
||||
I(task->sched, data);
|
||||
I(task->thread, data);
|
||||
}
|
||||
I(task->sched, fill < alloc);
|
||||
I(task->thread, fill < alloc);
|
||||
p->idx = fill;
|
||||
data[fill++] = p;
|
||||
}
|
||||
@ -68,13 +68,13 @@ template <typename T>
|
||||
void
|
||||
ptr_vec<T>::trim(size_t sz)
|
||||
{
|
||||
I(task->sched, data);
|
||||
I(task->thread, data);
|
||||
if (sz <= (alloc / 4) &&
|
||||
(alloc / 2) >= INIT_SIZE) {
|
||||
alloc /= 2;
|
||||
I(task->sched, alloc >= fill);
|
||||
I(task->thread, alloc >= fill);
|
||||
data = (T **)task->realloc(data, alloc * sizeof(T*));
|
||||
I(task->sched, data);
|
||||
I(task->thread, data);
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,9 +83,9 @@ void
|
||||
ptr_vec<T>::swap_delete(T *item)
|
||||
{
|
||||
/* Swap the endpoint into i and decr fill. */
|
||||
I(task->sched, data);
|
||||
I(task->sched, fill > 0);
|
||||
I(task->sched, item->idx < fill);
|
||||
I(task->thread, data);
|
||||
I(task->thread, fill > 0);
|
||||
I(task->thread, item->idx < fill);
|
||||
fill--;
|
||||
if (fill > 0) {
|
||||
T *subst = data[fill];
|
||||
@ -124,13 +124,13 @@ align_to(T size, size_t alignment) {
|
||||
|
||||
// Initialization helper for ISAAC RNG
|
||||
|
||||
template <typename sched_or_kernel>
|
||||
template <typename thread_or_kernel>
|
||||
static inline void
|
||||
isaac_init(sched_or_kernel *sched, randctx *rctx)
|
||||
isaac_init(thread_or_kernel *thread, randctx *rctx)
|
||||
{
|
||||
memset(rctx, 0, sizeof(randctx));
|
||||
|
||||
char *rust_seed = sched->env->rust_seed;
|
||||
char *rust_seed = thread->env->rust_seed;
|
||||
if (rust_seed != NULL) {
|
||||
ub4 seed = (ub4) atoi(rust_seed);
|
||||
for (size_t i = 0; i < RANDSIZ; i ++) {
|
||||
@ -140,24 +140,24 @@ isaac_init(sched_or_kernel *sched, randctx *rctx)
|
||||
} else {
|
||||
#ifdef __WIN32__
|
||||
HCRYPTPROV hProv;
|
||||
sched->win32_require
|
||||
thread->win32_require
|
||||
(_T("CryptAcquireContext"),
|
||||
CryptAcquireContext(&hProv, NULL, NULL, PROV_RSA_FULL,
|
||||
CRYPT_VERIFYCONTEXT|CRYPT_SILENT));
|
||||
sched->win32_require
|
||||
thread->win32_require
|
||||
(_T("CryptGenRandom"),
|
||||
CryptGenRandom(hProv, sizeof(rctx->randrsl),
|
||||
(BYTE*)(&rctx->randrsl)));
|
||||
sched->win32_require
|
||||
thread->win32_require
|
||||
(_T("CryptReleaseContext"),
|
||||
CryptReleaseContext(hProv, 0));
|
||||
#else
|
||||
int fd = open("/dev/urandom", O_RDONLY);
|
||||
I(sched, fd > 0);
|
||||
I(sched,
|
||||
I(thread, fd > 0);
|
||||
I(thread,
|
||||
read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl))
|
||||
== sizeof(rctx->randrsl));
|
||||
I(sched, close(fd) == 0);
|
||||
I(thread, close(fd) == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ private:
|
||||
public:
|
||||
|
||||
rust_uvtmp_thread() {
|
||||
task = rust_scheduler::get_task();
|
||||
task = rust_task_thread::get_task();
|
||||
stop_flag = false;
|
||||
loop = uv_loop_new();
|
||||
uv_idle_init(loop, &idle);
|
||||
|
Loading…
x
Reference in New Issue
Block a user