Adding support for pinning tasks to the currently running thread. Closes #598.

This commit is contained in:
Eric Holk 2011-06-29 18:47:47 -07:00
parent afabde19dc
commit 63dcd325b9
8 changed files with 67 additions and 17 deletions

View file

@ -2,6 +2,8 @@ native "rust" mod rustrt {
fn task_sleep(uint time_in_us);
fn task_yield();
fn task_join(task t);
fn pin_task();
fn unpin_task();
}
/**
@ -21,6 +23,14 @@ fn join(task t) {
ret rustrt::task_join(t);
}
fn pin() {
rustrt::pin_task();
}
fn unpin() {
rustrt::unpin_task();
}
// Local Variables:
// mode: rust;
// fill-column: 78;

View file

@ -700,6 +700,15 @@ ivec_copy_from_buf(rust_task *task, type_desc *ty, rust_ivec *v, void *ptr,
v->payload.ptr->fill = new_size;
}
extern "C" void
pin_task(rust_task *task) {
task->pin();
}
extern "C" void
unpin_task(rust_task *task) {
task->unpin();
}
//
// Local Variables:

View file

@ -83,12 +83,12 @@ rust_scheduler::number_of_live_tasks() {
* Delete any dead tasks.
*/
void
rust_scheduler::reap_dead_tasks() {
rust_scheduler::reap_dead_tasks(int id) {
I(this, kernel->scheduler_lock.lock_held_by_current_thread());
for (size_t i = 0; i < dead_tasks.length(); ) {
rust_task *task = dead_tasks[i];
// Make sure this task isn't still running somewhere else...
if (task->ref_count == 0 && task->can_schedule()) {
if (task->ref_count == 0 && task->can_schedule(id)) {
I(this, task->tasks_waiting_to_join.is_empty());
dead_tasks.remove(task);
DLOG(this, task,
@ -124,7 +124,7 @@ void rust_scheduler::drain_incoming_message_queue(bool process) {
* Returns NULL if no tasks can be scheduled.
*/
rust_task *
rust_scheduler::schedule_task() {
rust_scheduler::schedule_task(int id) {
I(this, this);
// FIXME: in the face of failing tasks, this is not always right.
// I(this, n_live_tasks() > 0);
@ -133,7 +133,7 @@ rust_scheduler::schedule_task() {
// Look around for a runnable task, starting at k.
for(size_t j = 0; j < running_tasks.length(); ++j) {
size_t i = (j + k) % running_tasks.length();
if (running_tasks[i]->can_schedule()) {
if (running_tasks[i]->can_schedule(id)) {
return (rust_task *)running_tasks[i];
}
}
@ -202,7 +202,7 @@ rust_scheduler::start_main_loop(int id) {
drain_incoming_message_queue(true);
rust_task *scheduled_task = schedule_task();
rust_task *scheduled_task = schedule_task(id);
// The scheduler busy waits until a task is available for scheduling.
// Eventually we'll want a smarter way to do this, perhaps sleep
@ -239,10 +239,9 @@ rust_scheduler::start_main_loop(int id) {
DLOG(this, task,
"Running task %p on worker %d",
scheduled_task, id);
I(this, !scheduled_task->active);
scheduled_task->active = true;
scheduled_task->running_on = id;
activate(scheduled_task);
scheduled_task->active = false;
scheduled_task->running_on = -1;
DLOG(this, task,
"returned from task %s @0x%" PRIxPTR
@ -253,7 +252,7 @@ rust_scheduler::start_main_loop(int id) {
scheduled_task->rust_sp,
id);
reap_dead_tasks();
reap_dead_tasks(id);
}
DLOG(this, dom,
@ -272,7 +271,7 @@ rust_scheduler::start_main_loop(int id) {
} else {
drain_incoming_message_queue(true);
}
reap_dead_tasks();
reap_dead_tasks(id);
}
DLOG(this, dom, "finished main-loop %d (dom.rval = %d)", id, rval);

View file

@ -79,8 +79,8 @@ struct rust_scheduler : public kernel_owned<rust_scheduler>,
rust_crate_cache *get_cache();
size_t number_of_live_tasks();
void reap_dead_tasks();
rust_task *schedule_task();
void reap_dead_tasks(int id);
rust_task *schedule_task(int id);
int start_main_loop(int id);

View file

@ -70,7 +70,8 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
list_index(-1),
rendezvous_ptr(0),
handle(NULL),
active(false),
running_on(-1),
pinned_on(-1),
local_region(&sched->srv->local_region),
synchronized_region(&sched->srv->synchronized_region)
{
@ -471,9 +472,11 @@ rust_task::get_handle() {
return handle;
}
bool rust_task::can_schedule()
bool rust_task::can_schedule(int id)
{
return yield_timer.has_timed_out() && !active;
return yield_timer.has_timed_out() &&
running_on == -1 &&
(pinned_on == -1 || pinned_on == id);
}
void *
@ -524,6 +527,14 @@ rust_task::free(void *mem, memory_region::memory_region_type type) {
return;
}
void rust_task::pin() {
pinned_on = running_on;
}
void rust_task::unpin() {
pinned_on = -1;
}
//
// Local Variables:
// mode: C++

View file

@ -77,7 +77,8 @@ rust_task : public maybe_proxy<rust_task>,
// This flag indicates that a worker is either currently running the task
// or is about to run this task.
volatile bool active;
int running_on;
int pinned_on;
memory_region local_region;
memory_region synchronized_region;
@ -143,7 +144,7 @@ rust_task : public maybe_proxy<rust_task>,
frame_glue_fns *get_frame_glue_fns(uintptr_t fp);
rust_crate_cache * get_crate_cache();
bool can_schedule();
bool can_schedule(int worker);
void *malloc(size_t size, memory_region::memory_region_type type);
void *calloc(size_t size);
@ -151,6 +152,9 @@ rust_task : public maybe_proxy<rust_task>,
void *realloc(void *mem, size_t size,
memory_region::memory_region_type type);
void free(void *mem, memory_region::memory_region_type type);
void pin();
void unpin();
};
//

View file

@ -15,6 +15,8 @@ ivec_on_heap
ivec_reserve
ivec_to_ptr
last_os_error
pin_task
unpin_task
rand_free
rand_new
rand_next

View file

@ -0,0 +1,15 @@
// xfail-stage0
/**
Exercises task pinning and unpinning. Doesn't really ensure it
works, just makes sure it runs.
*/
use std;
import std::task;
fn main() {
task::pin();
task::unpin();
}