Reserve 'yield' keyword

Rename task::yield() to task::deschedule().

Fixes #8494.
This commit is contained in:
Kevin Ballard 2013-08-16 12:49:40 -07:00
parent 680eb71564
commit 418e1ebae6
23 changed files with 75 additions and 72 deletions

View file

@ -141,7 +141,7 @@ pub struct Death {
on_exit: Option<~fn(bool)>,
// nesting level counter for task::unkillable calls (0 == killable).
unkillable: int,
// nesting level counter for unstable::atomically calls (0 == can yield).
// nesting level counter for unstable::atomically calls (0 == can deschedule).
wont_sleep: int,
// A "spare" handle to the kill flag inside the kill handle. Used during
// blocking/waking as an optimization to avoid two xadds on the refcount.
@ -572,16 +572,16 @@ impl Death {
}
/// Enter a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a subsequent call to allow_yield.
/// All calls must be paired with a subsequent call to allow_deschedule.
#[inline]
pub fn inhibit_yield(&mut self) {
pub fn inhibit_deschedule(&mut self) {
self.wont_sleep += 1;
}
/// Exit a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a preceding call to inhibit_yield.
/// All calls must be paired with a preceding call to inhibit_deschedule.
#[inline]
pub fn allow_yield(&mut self) {
pub fn allow_deschedule(&mut self) {
rtassert!(self.wont_sleep != 0);
self.wont_sleep -= 1;
}

View file

@ -250,7 +250,7 @@ mod test {
let (c2, p3, c4) = x.take();
p3.recv(); // handshake parent
c4.send(()); // normal receive
task::yield();
task::deschedule();
c2.send(()); // select receive
}
@ -294,7 +294,7 @@ mod test {
if send_on_chans.contains(&i) {
let c = Cell::new(c);
do spawntask_random {
task::yield();
task::deschedule();
c.take().send(());
}
}

View file

@ -537,7 +537,7 @@ pub fn with_task_name<U>(blk: &fn(Option<&str>) -> U) -> U {
}
}
pub fn yield() {
pub fn deschedule() {
//! Yield control to the task scheduler
use rt::local::Local;
@ -568,10 +568,10 @@ pub fn failing() -> bool {
*
* ~~~
* do task::unkillable {
* // detach / yield / destroy must all be called together
* // detach / deschedule / destroy must all be called together
* rustrt::rust_port_detach(po);
* // This must not result in the current task being killed
* task::yield();
* task::deschedule();
* rustrt::rust_port_destroy(po);
* }
* ~~~
@ -689,7 +689,7 @@ fn test_spawn_unlinked_unsup_no_fail_down() { // grandchild sends on a port
let ch = ch.clone();
do spawn_unlinked {
// Give middle task a chance to fail-but-not-kill-us.
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
ch.send(()); // If killed first, grandparent hangs.
}
fail!(); // Shouldn't kill either (grand)parent or (grand)child.
@ -712,7 +712,7 @@ fn test_spawn_unlinked_sup_no_fail_up() { // child unlinked fails
do run_in_newsched_task {
do spawn_supervised { fail!(); }
// Give child a chance to fail-but-not-kill-us.
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
}
}
#[ignore(reason = "linked failure")]
@ -821,7 +821,7 @@ fn test_spawn_failure_propagate_grandchild() {
do spawn_supervised {
do spawn_supervised { block_forever(); }
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -838,7 +838,7 @@ fn test_spawn_failure_propagate_secondborn() {
do spawn_supervised {
do spawn { block_forever(); } // linked
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -855,7 +855,7 @@ fn test_spawn_failure_propagate_nephew_or_niece() {
do spawn { // linked
do spawn_supervised { block_forever(); }
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -872,7 +872,7 @@ fn test_spawn_linked_sup_propagate_sibling() {
do spawn { // linked
do spawn { block_forever(); } // linked
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -1169,12 +1169,12 @@ fn test_unkillable() {
// We want to do this after failing
do spawn_unlinked {
do 10.times { yield() }
do 10.times { deschedule() }
ch.send(());
}
do spawn {
yield();
deschedule();
// We want to fail after the unkillable task
// blocks on recv
fail!();
@ -1205,12 +1205,12 @@ fn test_unkillable_nested() {
// We want to do this after failing
do spawn_unlinked || {
do 10.times { yield() }
do 10.times { deschedule() }
ch.send(());
}
do spawn {
yield();
deschedule();
// We want to fail after the unkillable task
// blocks on recv
fail!();
@ -1277,7 +1277,7 @@ fn test_spawn_watched() {
t.unlinked();
t.watched();
do t.spawn {
task::yield();
task::deschedule();
fail!();
}
}
@ -1313,7 +1313,7 @@ fn test_indestructible() {
t.unwatched();
do t.spawn {
p3.recv();
task::yield();
task::deschedule();
fail!();
}
c3.send(());

View file

@ -272,9 +272,9 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
/**
* Enables a runtime assertion that no operation in the argument closure shall
* use scheduler operations (yield, recv, spawn, etc). This is for use with
* use scheduler operations (deschedule, recv, spawn, etc). This is for use with
* pthread mutexes, which may block the entire scheduler thread, rather than
* just one task, and is hence prone to deadlocks if mixed with yielding.
* just one task, and is hence prone to deadlocks if mixed with descheduling.
*
* NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
* synchronization whatsoever. It only makes sense to use for CPU-local issues.
@ -288,10 +288,10 @@ pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
if in_green_task_context() {
let t = Local::unsafe_borrow::<Task>();
do (|| {
(*t).death.inhibit_yield();
(*t).death.inhibit_deschedule();
f()
}).finally {
(*t).death.allow_yield();
(*t).death.allow_deschedule();
}
} else {
f()
@ -349,7 +349,7 @@ struct ExData<T> {
* This uses a pthread mutex, not one that's aware of the userspace scheduler.
* The user of an Exclusive must be careful not to invoke any functions that may
* reschedule the task while holding the lock, or deadlock may result. If you
* need to block or yield while accessing shared state, use extra::sync::RWArc.
* need to block or deschedule while accessing shared state, use extra::sync::RWArc.
*/
pub struct Exclusive<T> {
x: UnsafeAtomicRcBox<ExData<T>>
@ -377,7 +377,7 @@ impl<T:Send> Exclusive<T> {
// Exactly like std::arc::MutexArc,access(), but with the LittleLock
// instead of a proper mutex. Same reason for being unsafe.
//
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
// Currently, scheduling operations (i.e., descheduling, receiving on a pipe,
// accessing the provided condition variable) are prohibited while inside
// the Exclusive. Supporting that is a work in progress.
#[inline]
@ -431,7 +431,7 @@ mod tests {
fn test_atomically() {
// NB. The whole runtime will abort on an 'atomic-sleep' violation,
// so we can't really test for the converse behaviour.
unsafe { do atomically { } } task::yield(); // oughtn't fail
unsafe { do atomically { } } task::deschedule(); // oughtn't fail
}
#[test]
@ -545,7 +545,7 @@ mod tests {
c.send(());
}
p.recv();
task::yield(); // Try to make the unwrapper get blocked first.
task::deschedule(); // Try to make the unwrapper get blocked first.
let left_x = x.try_unwrap();
assert!(left_x.is_left());
util::ignore(left_x);
@ -566,7 +566,7 @@ mod tests {
do task::spawn {
let x2 = x2.take();
unsafe { do x2.with |_hello| { } }
task::yield();
task::deschedule();
}
assert!(x.unwrap() == ~~"hello");
@ -612,7 +612,7 @@ mod tests {
let x = Exclusive::new(~~"hello");
let x2 = x.clone();
do task::spawn {
do 10.times { task::yield(); } // try to let the unwrapper go
do 10.times { task::deschedule(); } // try to let the unwrapper go
fail!(); // punt it awake from its deadlock
}
let _z = x.unwrap();