From aee83d2ff11139db92d37d2afc4e1dba3d198012 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 2 Feb 2012 17:02:50 -0800 Subject: [PATCH] rt: Only wake up all schedulers when no tasks are left At the moment there's not really any reason to be raising this signal, since they schedulers wake up periodically anyway, but once we remove the timer this will be how the schedulers know to exit. --- src/rt/rust_kernel.cpp | 6 ++---- src/rt/rust_kernel.h | 2 +- src/rt/rust_scheduler.cpp | 21 +++++++++++++++++---- src/rt/rust_scheduler.h | 5 +++++ 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/src/rt/rust_kernel.cpp b/src/rt/rust_kernel.cpp index a52c2d48eed6..aaf19f240c8f 100644 --- a/src/rt/rust_kernel.cpp +++ b/src/rt/rust_kernel.cpp @@ -189,11 +189,9 @@ rust_kernel::release_task_id(rust_task_id id) { task_table.remove(id); } -void rust_kernel::wakeup_schedulers() { +void rust_kernel::exit_schedulers() { for(size_t i = 0; i < num_threads; ++i) { - rust_scheduler *sched = threads[i]; - scoped_lock with(sched->lock); - sched->lock.signal_all(); + threads[i]->exit(); } } diff --git a/src/rt/rust_kernel.h b/src/rt/rust_kernel.h index 3d64e57d9f9d..b82119cc5adb 100644 --- a/src/rt/rust_kernel.h +++ b/src/rt/rust_kernel.h @@ -47,7 +47,7 @@ public: bool is_deadlocked(); void signal_kernel_lock(); - void wakeup_schedulers(); + void exit_schedulers(); void log_all_scheduler_state(); void log(uint32_t level, char const *fmt, ...); diff --git a/src/rt/rust_scheduler.cpp b/src/rt/rust_scheduler.cpp index 5a6cbd241aec..9005680dcc66 100644 --- a/src/rt/rust_scheduler.cpp +++ b/src/rt/rust_scheduler.cpp @@ -32,7 +32,8 @@ rust_scheduler::rust_scheduler(rust_kernel *kernel, kernel(kernel), id(id), min_stack_size(kernel->env->min_stack_size), - env(kernel->env) + env(kernel->env), + should_exit(false) { LOGPTR(this, "new dom", (uintptr_t)this); isaac_init(this, &rctx); @@ -160,8 +161,12 @@ rust_scheduler::reap_dead_tasks(int id) { rust_task *task = dead_tasks_copy[i]; if (task) { task->deref(); - sync::decrement(kernel->live_tasks); - kernel->wakeup_schedulers(); + int live_tasks = sync::decrement(kernel->live_tasks); + if (live_tasks == 0) { + // There are no more tasks and there never will be. + // Tell all the schedulers to exit. + kernel->exit_schedulers(); + } } } srv->free(dead_tasks_copy); @@ -236,7 +241,7 @@ rust_scheduler::start_main_loop() { DLOG(this, dom, "started domain loop %d", id); - while (kernel->live_tasks > 0) { + while (!should_exit) { A(this, kernel->is_deadlocked() == false, "deadlock"); DLOG(this, dom, "worker %d, number_of_live_tasks = %d, total = %d", @@ -375,6 +380,14 @@ rust_scheduler::get_task() { } #endif +void +rust_scheduler::exit() { + A(this, !lock.lock_held_by_current_thread(), "Shouldn't have lock"); + scoped_lock with(lock); + should_exit = true; + lock.signal_all(); +} + // // Local Variables: // mode: C++ diff --git a/src/rt/rust_scheduler.h b/src/rt/rust_scheduler.h index b5bd92efaaa1..ee69af5479e8 100644 --- a/src/rt/rust_scheduler.h +++ b/src/rt/rust_scheduler.h @@ -91,6 +91,8 @@ struct rust_scheduler : public kernel_owned, rust_env *env; context c_context; + bool should_exit; + // Only a pointer to 'name' is kept, so it must live as long as this // domain. rust_scheduler(rust_kernel *kernel, rust_srv *srv, int id); @@ -127,6 +129,9 @@ struct rust_scheduler : public kernel_owned, void place_task_in_tls(rust_task *task); static rust_task *get_task(); + + // Tells the scheduler to exit it's scheduling loop and thread + void exit(); }; inline rust_log &