2012-02-07 18:11:57 -06:00
|
|
|
#include <vector>
|
2010-08-27 20:26:36 -05:00
|
|
|
#include "rust_internal.h"
|
2012-01-06 14:06:35 -06:00
|
|
|
#include "rust_util.h"
|
2012-02-03 17:12:18 -06:00
|
|
|
#include "rust_scheduler.h"
|
2010-08-27 20:26:36 -05:00
|
|
|
|
2011-07-29 13:00:44 -05:00
|
|
|
#define KLOG_(...) \
|
2011-07-27 18:33:31 -05:00
|
|
|
KLOG(this, kern, __VA_ARGS__)
|
2011-07-29 13:00:44 -05:00
|
|
|
#define KLOG_ERR_(field, ...) \
|
2011-07-28 12:41:48 -05:00
|
|
|
KLOG_LVL(this, field, log_err, __VA_ARGS__)
|
2011-04-07 15:05:45 -05:00
|
|
|
|
2012-02-06 23:06:12 -06:00
|
|
|
rust_kernel::rust_kernel(rust_srv *srv) :
|
2011-07-18 14:02:26 -05:00
|
|
|
_region(srv, true),
|
2010-08-27 20:26:36 -05:00
|
|
|
_log(srv, NULL),
|
2011-07-23 21:03:02 -05:00
|
|
|
srv(srv),
|
2011-07-27 16:34:39 -05:00
|
|
|
live_tasks(0),
|
2012-02-03 19:26:54 -06:00
|
|
|
max_task_id(0),
|
|
|
|
rval(0),
|
2012-02-06 23:06:12 -06:00
|
|
|
live_schedulers(0),
|
2012-02-07 18:11:57 -06:00
|
|
|
max_sched_id(0),
|
2011-07-27 16:34:39 -05:00
|
|
|
env(srv->env)
|
2011-06-24 17:56:12 -05:00
|
|
|
{
|
2012-02-04 16:54:10 -06:00
|
|
|
}
|
|
|
|
|
2010-08-27 20:26:36 -05:00
|
|
|
void
|
2011-04-19 05:21:57 -05:00
|
|
|
rust_kernel::log(uint32_t level, char const *fmt, ...) {
|
2011-01-14 18:01:43 -06:00
|
|
|
char buf[BUF_BYTES];
|
2011-04-19 05:21:57 -05:00
|
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
|
|
vsnprintf(buf, sizeof(buf), fmt, args);
|
|
|
|
_log.trace_ln(NULL, level, buf);
|
|
|
|
va_end(args);
|
2010-08-27 20:26:36 -05:00
|
|
|
}
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2011-07-06 17:06:30 -05:00
|
|
|
void
|
|
|
|
rust_kernel::fatal(char const *fmt, ...) {
|
|
|
|
char buf[BUF_BYTES];
|
|
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
|
|
vsnprintf(buf, sizeof(buf), fmt, args);
|
|
|
|
_log.trace_ln(NULL, (uint32_t)0, buf);
|
|
|
|
exit(1);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
2010-09-07 20:39:07 -05:00
|
|
|
void *
|
2011-07-18 14:02:26 -05:00
|
|
|
rust_kernel::malloc(size_t size, const char *tag) {
|
|
|
|
return _region.malloc(size, tag);
|
2010-09-07 20:39:07 -05:00
|
|
|
}
|
|
|
|
|
2011-07-06 00:44:22 -05:00
|
|
|
void *
|
|
|
|
rust_kernel::realloc(void *mem, size_t size) {
|
2011-07-18 14:02:26 -05:00
|
|
|
return _region.realloc(mem, size);
|
2011-07-06 00:44:22 -05:00
|
|
|
}
|
|
|
|
|
2010-09-07 20:39:07 -05:00
|
|
|
void rust_kernel::free(void *mem) {
|
2011-07-18 14:02:26 -05:00
|
|
|
_region.free(mem);
|
2010-09-07 20:39:07 -05:00
|
|
|
}
|
|
|
|
|
2012-02-06 23:06:12 -06:00
|
|
|
rust_sched_id
|
|
|
|
rust_kernel::create_scheduler(size_t num_threads) {
|
2012-02-07 18:11:57 -06:00
|
|
|
I(this, !sched_lock.lock_held_by_current_thread());
|
2012-02-07 19:43:54 -06:00
|
|
|
rust_sched_id id;
|
2012-02-07 18:11:57 -06:00
|
|
|
rust_scheduler *sched;
|
|
|
|
{
|
|
|
|
scoped_lock with(sched_lock);
|
2012-02-07 19:43:54 -06:00
|
|
|
id = max_sched_id++;
|
2012-02-07 18:11:57 -06:00
|
|
|
K(srv, id != INTPTR_MAX, "Hit the maximum scheduler id");
|
|
|
|
sched = new (this, "rust_scheduler")
|
|
|
|
rust_scheduler(this, srv, num_threads, id);
|
|
|
|
bool is_new = sched_table
|
|
|
|
.insert(std::pair<rust_sched_id, rust_scheduler*>(id, sched)).second;
|
|
|
|
A(this, is_new, "Reusing a sched id?");
|
|
|
|
live_schedulers++;
|
|
|
|
}
|
2012-02-06 23:12:59 -06:00
|
|
|
sched->start_task_threads();
|
2012-02-07 19:43:54 -06:00
|
|
|
return id;
|
2011-06-24 18:50:06 -05:00
|
|
|
}
|
|
|
|
|
2012-02-03 17:45:59 -06:00
|
|
|
rust_scheduler *
|
2012-02-06 23:06:12 -06:00
|
|
|
rust_kernel::get_scheduler_by_id(rust_sched_id id) {
|
2012-02-07 18:11:57 -06:00
|
|
|
I(this, !sched_lock.lock_held_by_current_thread());
|
|
|
|
scoped_lock with(sched_lock);
|
|
|
|
sched_map::iterator iter = sched_table.find(id);
|
|
|
|
if (iter != sched_table.end()) {
|
|
|
|
return iter->second;
|
|
|
|
} else {
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-02-03 17:45:59 -06:00
|
|
|
}
|
|
|
|
|
2012-02-04 16:54:10 -06:00
|
|
|
void
|
2012-02-06 23:06:12 -06:00
|
|
|
rust_kernel::release_scheduler_id(rust_sched_id id) {
|
2012-02-04 16:54:10 -06:00
|
|
|
I(this, !sched_lock.lock_held_by_current_thread());
|
|
|
|
scoped_lock with(sched_lock);
|
2012-02-07 18:11:57 -06:00
|
|
|
sched_map::iterator iter = sched_table.find(id);
|
|
|
|
I(this, iter != sched_table.end());
|
|
|
|
rust_scheduler *sched = iter->second;
|
|
|
|
sched_table.erase(iter);
|
2012-02-05 01:42:27 -06:00
|
|
|
delete sched;
|
2012-02-07 18:11:57 -06:00
|
|
|
live_schedulers--;
|
2012-02-04 16:54:10 -06:00
|
|
|
if (live_schedulers == 0) {
|
|
|
|
// We're all done. Tell the main thread to continue
|
|
|
|
sched_lock.signal();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-06 23:06:12 -06:00
|
|
|
int
|
|
|
|
rust_kernel::wait_for_schedulers()
|
|
|
|
{
|
|
|
|
I(this, !sched_lock.lock_held_by_current_thread());
|
2012-02-06 23:12:59 -06:00
|
|
|
scoped_lock with(sched_lock);
|
|
|
|
// Schedulers could possibly have already exited
|
|
|
|
if (live_schedulers != 0) {
|
|
|
|
sched_lock.wait();
|
2012-02-06 23:06:12 -06:00
|
|
|
}
|
2012-02-06 23:12:59 -06:00
|
|
|
return rval;
|
2012-02-06 23:06:12 -06:00
|
|
|
}
|
|
|
|
|
2012-02-07 18:11:57 -06:00
|
|
|
// FIXME: Fix all these FIXMEs
|
2011-08-10 14:57:53 -05:00
|
|
|
void
|
|
|
|
rust_kernel::fail() {
|
2011-08-15 21:25:47 -05:00
|
|
|
// FIXME: On windows we're getting "Application has requested the
|
|
|
|
// Runtime to terminate it in an unusual way" when trying to shutdown
|
|
|
|
// cleanly.
|
2012-01-13 00:17:21 -06:00
|
|
|
set_exit_status(PROC_FAIL_CODE);
|
2011-08-15 21:25:47 -05:00
|
|
|
#if defined(__WIN32__)
|
2011-09-08 15:42:04 -05:00
|
|
|
exit(rval);
|
2011-08-15 21:25:47 -05:00
|
|
|
#endif
|
2012-02-07 18:11:57 -06:00
|
|
|
// Copy the list of schedulers so that we don't hold the lock while
|
|
|
|
// running kill_all_tasks.
|
|
|
|
// FIXME: There's a lot that happens under kill_all_tasks, and I don't
|
|
|
|
// know that holding sched_lock here is ok, but we need to hold the
|
|
|
|
// sched lock to prevent the scheduler from being destroyed while
|
|
|
|
// we are using it. Probably we need to make rust_scheduler atomicly
|
|
|
|
// reference counted.
|
|
|
|
std::vector<rust_scheduler*> scheds;
|
|
|
|
{
|
|
|
|
scoped_lock with(sched_lock);
|
|
|
|
for (sched_map::iterator iter = sched_table.begin();
|
|
|
|
iter != sched_table.end(); iter++) {
|
|
|
|
scheds.push_back(iter->second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: This is not a foolproof way to kill all tasks while ensuring
|
|
|
|
// that no new tasks or schedulers are created in the meantime that
|
|
|
|
// keep the scheduler alive.
|
|
|
|
for (std::vector<rust_scheduler*>::iterator iter = scheds.begin();
|
|
|
|
iter != scheds.end(); iter++) {
|
|
|
|
(*iter)->kill_all_tasks();
|
|
|
|
}
|
2011-08-10 14:57:53 -05:00
|
|
|
}
|
|
|
|
|
2012-02-03 17:12:18 -06:00
|
|
|
void
|
|
|
|
rust_kernel::register_task(rust_task *task) {
|
2012-02-04 02:31:29 -06:00
|
|
|
uintptr_t new_live_tasks;
|
2012-02-03 19:26:54 -06:00
|
|
|
{
|
|
|
|
scoped_lock with(task_lock);
|
|
|
|
task->user.id = max_task_id++;
|
|
|
|
task_table.put(task->user.id, task);
|
2012-02-04 02:03:45 -06:00
|
|
|
new_live_tasks = ++live_tasks;
|
2012-02-03 19:26:54 -06:00
|
|
|
}
|
2012-02-03 19:31:00 -06:00
|
|
|
K(srv, task->user.id != INTPTR_MAX, "Hit the maximum task id");
|
2012-02-03 19:26:54 -06:00
|
|
|
KLOG_("Registered task %" PRIdPTR, task->user.id);
|
|
|
|
KLOG_("Total outstanding tasks: %d", new_live_tasks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rust_kernel::release_task_id(rust_task_id id) {
|
|
|
|
KLOG_("Releasing task %" PRIdPTR, id);
|
2012-02-04 02:31:29 -06:00
|
|
|
uintptr_t new_live_tasks;
|
2012-02-03 19:26:54 -06:00
|
|
|
{
|
|
|
|
scoped_lock with(task_lock);
|
|
|
|
task_table.remove(id);
|
2012-02-04 02:03:45 -06:00
|
|
|
new_live_tasks = --live_tasks;
|
2012-02-03 19:26:54 -06:00
|
|
|
}
|
|
|
|
KLOG_("Total outstanding tasks: %d", new_live_tasks);
|
2012-02-03 17:12:18 -06:00
|
|
|
}
|
|
|
|
|
2011-08-08 15:38:20 -05:00
|
|
|
rust_task *
|
|
|
|
rust_kernel::get_task_by_id(rust_task_id id) {
|
2012-02-03 19:26:54 -06:00
|
|
|
scoped_lock with(task_lock);
|
2011-08-08 15:38:20 -05:00
|
|
|
rust_task *task = NULL;
|
|
|
|
// get leaves task unchanged if not found.
|
|
|
|
task_table.get(id, &task);
|
2011-08-15 13:34:12 -05:00
|
|
|
if(task) {
|
|
|
|
if(task->get_ref_count() == 0) {
|
|
|
|
// this means the destructor is running, since the destructor
|
|
|
|
// grabs the kernel lock to unregister the task. Pretend this
|
|
|
|
// doesn't actually exist.
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
task->ref();
|
|
|
|
}
|
|
|
|
}
|
2011-08-08 15:38:20 -05:00
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2011-06-28 13:34:20 -05:00
|
|
|
#ifdef __WIN32__
|
|
|
|
void
|
|
|
|
rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
|
|
|
|
if (!ok) {
|
|
|
|
LPTSTR buf;
|
|
|
|
DWORD err = GetLastError();
|
|
|
|
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
|
|
|
FORMAT_MESSAGE_FROM_SYSTEM |
|
|
|
|
FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
|
|
NULL, err,
|
|
|
|
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
|
|
|
(LPTSTR) &buf, 0, NULL );
|
2011-07-28 12:41:48 -05:00
|
|
|
KLOG_ERR_(dom, "%s failed with error %ld: %s", fn, err, buf);
|
2011-06-28 13:34:20 -05:00
|
|
|
LocalFree((HLOCAL)buf);
|
2011-07-28 12:41:48 -05:00
|
|
|
I(this, ok);
|
2011-06-28 13:34:20 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-01-13 00:17:21 -06:00
|
|
|
void
|
|
|
|
rust_kernel::set_exit_status(int code) {
|
2012-02-03 19:26:54 -06:00
|
|
|
scoped_lock with(rval_lock);
|
2012-01-13 00:17:21 -06:00
|
|
|
// If we've already failed then that's the code we're going to use
|
|
|
|
if (rval != PROC_FAIL_CODE) {
|
|
|
|
rval = code;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-08 17:48:10 -05:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// End:
|
|
|
|
//
|