rt: Only wake up all schedulers when no tasks are left
At the moment there's not really any reason to be raising this signal, since they schedulers wake up periodically anyway, but once we remove the timer this will be how the schedulers know to exit.
This commit is contained in:
parent
5c3c8d454d
commit
aee83d2ff1
@ -189,11 +189,9 @@ rust_kernel::release_task_id(rust_task_id id) {
|
||||
task_table.remove(id);
|
||||
}
|
||||
|
||||
void rust_kernel::wakeup_schedulers() {
|
||||
void rust_kernel::exit_schedulers() {
|
||||
for(size_t i = 0; i < num_threads; ++i) {
|
||||
rust_scheduler *sched = threads[i];
|
||||
scoped_lock with(sched->lock);
|
||||
sched->lock.signal_all();
|
||||
threads[i]->exit();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
bool is_deadlocked();
|
||||
|
||||
void signal_kernel_lock();
|
||||
void wakeup_schedulers();
|
||||
void exit_schedulers();
|
||||
|
||||
void log_all_scheduler_state();
|
||||
void log(uint32_t level, char const *fmt, ...);
|
||||
|
@ -32,7 +32,8 @@ rust_scheduler::rust_scheduler(rust_kernel *kernel,
|
||||
kernel(kernel),
|
||||
id(id),
|
||||
min_stack_size(kernel->env->min_stack_size),
|
||||
env(kernel->env)
|
||||
env(kernel->env),
|
||||
should_exit(false)
|
||||
{
|
||||
LOGPTR(this, "new dom", (uintptr_t)this);
|
||||
isaac_init(this, &rctx);
|
||||
@ -160,8 +161,12 @@ rust_scheduler::reap_dead_tasks(int id) {
|
||||
rust_task *task = dead_tasks_copy[i];
|
||||
if (task) {
|
||||
task->deref();
|
||||
sync::decrement(kernel->live_tasks);
|
||||
kernel->wakeup_schedulers();
|
||||
int live_tasks = sync::decrement(kernel->live_tasks);
|
||||
if (live_tasks == 0) {
|
||||
// There are no more tasks and there never will be.
|
||||
// Tell all the schedulers to exit.
|
||||
kernel->exit_schedulers();
|
||||
}
|
||||
}
|
||||
}
|
||||
srv->free(dead_tasks_copy);
|
||||
@ -236,7 +241,7 @@ rust_scheduler::start_main_loop() {
|
||||
|
||||
DLOG(this, dom, "started domain loop %d", id);
|
||||
|
||||
while (kernel->live_tasks > 0) {
|
||||
while (!should_exit) {
|
||||
A(this, kernel->is_deadlocked() == false, "deadlock");
|
||||
|
||||
DLOG(this, dom, "worker %d, number_of_live_tasks = %d, total = %d",
|
||||
@ -375,6 +380,14 @@ rust_scheduler::get_task() {
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
rust_scheduler::exit() {
|
||||
A(this, !lock.lock_held_by_current_thread(), "Shouldn't have lock");
|
||||
scoped_lock with(lock);
|
||||
should_exit = true;
|
||||
lock.signal_all();
|
||||
}
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
|
@ -91,6 +91,8 @@ struct rust_scheduler : public kernel_owned<rust_scheduler>,
|
||||
rust_env *env;
|
||||
context c_context;
|
||||
|
||||
bool should_exit;
|
||||
|
||||
// Only a pointer to 'name' is kept, so it must live as long as this
|
||||
// domain.
|
||||
rust_scheduler(rust_kernel *kernel, rust_srv *srv, int id);
|
||||
@ -127,6 +129,9 @@ struct rust_scheduler : public kernel_owned<rust_scheduler>,
|
||||
void place_task_in_tls(rust_task *task);
|
||||
|
||||
static rust_task *get_task();
|
||||
|
||||
// Tells the scheduler to exit it's scheduling loop and thread
|
||||
void exit();
|
||||
};
|
||||
|
||||
inline rust_log &
|
||||
|
Loading…
x
Reference in New Issue
Block a user