auto merge of #6387 : brson/rust/unstable, r=brson
r? @pcwalton * Move `SharedMutableState`, `LittleLock`, and `Exclusive` from `core::unstable` to `core::unstable::sync` * Modernize the `SharedMutableState` interface with methods * Rename `SharedMutableState` to `UnsafeAtomicRcBox` to match `RcBox`.
This commit is contained in:
commit
ad5bfd600d
13 changed files with 434 additions and 424 deletions
|
|
@ -19,8 +19,8 @@ use option::{Option, Some, None};
|
|||
use uint;
|
||||
use unstable;
|
||||
use vec;
|
||||
use unstable::Exclusive;
|
||||
use util::replace;
|
||||
use unstable::sync::{Exclusive, exclusive};
|
||||
|
||||
use pipes::{recv, try_recv, wait_many, peek, PacketHeader};
|
||||
|
||||
|
|
@ -304,7 +304,7 @@ pub struct SharedChan<T> {
|
|||
impl<T: Owned> SharedChan<T> {
|
||||
/// Converts a `chan` into a `shared_chan`.
|
||||
pub fn new(c: Chan<T>) -> SharedChan<T> {
|
||||
SharedChan { ch: unstable::exclusive(c) }
|
||||
SharedChan { ch: exclusive(c) }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -238,6 +238,7 @@ pub mod util;
|
|||
/* Unsupported interfaces */
|
||||
|
||||
// Private APIs
|
||||
#[path = "unstable/mod.rs"]
|
||||
pub mod unstable;
|
||||
|
||||
/* For internal use, not exported */
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ FIXME #4726: It would probably be appropriate to make this a real global
|
|||
*/
|
||||
fn with_env_lock<T>(f: &fn() -> T) -> T {
|
||||
use unstable::global::global_data_clone_create;
|
||||
use unstable::{Exclusive, exclusive};
|
||||
use unstable::sync::{Exclusive, exclusive};
|
||||
|
||||
struct SharedValue(());
|
||||
type ValueMutex = Exclusive<SharedValue>;
|
||||
|
|
@ -855,7 +855,7 @@ pub fn change_dir(p: &Path) -> bool {
|
|||
/// is otherwise unsuccessful.
|
||||
pub fn change_dir_locked(p: &Path, action: &fn()) -> bool {
|
||||
use unstable::global::global_data_clone_create;
|
||||
use unstable::{Exclusive, exclusive};
|
||||
use unstable::sync::{Exclusive, exclusive};
|
||||
|
||||
fn key(_: Exclusive<()>) { }
|
||||
|
||||
|
|
|
|||
|
|
@ -90,6 +90,7 @@ use task::{ExistingScheduler, SchedulerHandle};
|
|||
use task::unkillable;
|
||||
use uint;
|
||||
use util;
|
||||
use unstable::sync::{Exclusive, exclusive};
|
||||
|
||||
#[cfg(test)] use task::default_task_opts;
|
||||
|
||||
|
|
@ -128,7 +129,7 @@ struct TaskGroupData {
|
|||
// tasks in this group.
|
||||
descendants: TaskSet,
|
||||
}
|
||||
type TaskGroupArc = unstable::Exclusive<Option<TaskGroupData>>;
|
||||
type TaskGroupArc = Exclusive<Option<TaskGroupData>>;
|
||||
|
||||
type TaskGroupInner<'self> = &'self mut Option<TaskGroupData>;
|
||||
|
||||
|
|
@ -158,7 +159,7 @@ struct AncestorNode {
|
|||
ancestors: AncestorList,
|
||||
}
|
||||
|
||||
struct AncestorList(Option<unstable::Exclusive<AncestorNode>>);
|
||||
struct AncestorList(Option<Exclusive<AncestorNode>>);
|
||||
|
||||
// Accessors for taskgroup arcs and ancestor arcs that wrap the unsafety.
|
||||
#[inline(always)]
|
||||
|
|
@ -167,7 +168,7 @@ fn access_group<U>(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U {
|
|||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn access_ancestors<U>(x: &unstable::Exclusive<AncestorNode>,
|
||||
fn access_ancestors<U>(x: &Exclusive<AncestorNode>,
|
||||
blk: &fn(x: &mut AncestorNode) -> U) -> U {
|
||||
x.with(blk)
|
||||
}
|
||||
|
|
@ -479,7 +480,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
|
|||
// here.
|
||||
let mut members = new_taskset();
|
||||
taskset_insert(&mut members, spawner);
|
||||
let tasks = unstable::exclusive(Some(TaskGroupData {
|
||||
let tasks = exclusive(Some(TaskGroupData {
|
||||
members: members,
|
||||
descendants: new_taskset(),
|
||||
}));
|
||||
|
|
@ -508,7 +509,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
|
|||
(g, a, spawner_group.is_main)
|
||||
} else {
|
||||
// Child is in a separate group from spawner.
|
||||
let g = unstable::exclusive(Some(TaskGroupData {
|
||||
let g = exclusive(Some(TaskGroupData {
|
||||
members: new_taskset(),
|
||||
descendants: new_taskset(),
|
||||
}));
|
||||
|
|
@ -528,7 +529,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
|
|||
};
|
||||
assert!(new_generation < uint::max_value);
|
||||
// Build a new node in the ancestor list.
|
||||
AncestorList(Some(unstable::exclusive(AncestorNode {
|
||||
AncestorList(Some(exclusive(AncestorNode {
|
||||
generation: new_generation,
|
||||
parent_group: Some(spawner_group.tasks.clone()),
|
||||
ancestors: old_ancestors,
|
||||
|
|
|
|||
|
|
@ -1,354 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#[doc(hidden)];
|
||||
|
||||
use cast;
|
||||
use libc;
|
||||
use comm::{GenericChan, GenericPort};
|
||||
use prelude::*;
|
||||
use task;
|
||||
use task::atomically;
|
||||
use self::finally::Finally;
|
||||
|
||||
#[path = "unstable/at_exit.rs"]
|
||||
pub mod at_exit;
|
||||
#[path = "unstable/global.rs"]
|
||||
pub mod global;
|
||||
#[path = "unstable/finally.rs"]
|
||||
pub mod finally;
|
||||
#[path = "unstable/weak_task.rs"]
|
||||
pub mod weak_task;
|
||||
#[path = "unstable/exchange_alloc.rs"]
|
||||
pub mod exchange_alloc;
|
||||
#[path = "unstable/intrinsics.rs"]
|
||||
pub mod intrinsics;
|
||||
#[path = "unstable/simd.rs"]
|
||||
pub mod simd;
|
||||
#[path = "unstable/extfmt.rs"]
|
||||
pub mod extfmt;
|
||||
#[path = "unstable/lang.rs"]
|
||||
#[cfg(not(test))]
|
||||
pub mod lang;
|
||||
|
||||
mod rustrt {
|
||||
use unstable::{raw_thread, rust_little_lock};
|
||||
|
||||
pub extern {
|
||||
pub unsafe fn rust_create_little_lock() -> rust_little_lock;
|
||||
pub unsafe fn rust_destroy_little_lock(lock: rust_little_lock);
|
||||
pub unsafe fn rust_lock_little_lock(lock: rust_little_lock);
|
||||
pub unsafe fn rust_unlock_little_lock(lock: rust_little_lock);
|
||||
|
||||
pub unsafe fn rust_raw_thread_start(f: &(&fn())) -> *raw_thread;
|
||||
pub unsafe fn rust_raw_thread_join_delete(thread: *raw_thread);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type raw_thread = libc::c_void;
|
||||
|
||||
/**
|
||||
|
||||
Start a new thread outside of the current runtime context and wait
|
||||
for it to terminate.
|
||||
|
||||
The executing thread has no access to a task pointer and will be using
|
||||
a normal large stack.
|
||||
*/
|
||||
pub fn run_in_bare_thread(f: ~fn()) {
|
||||
let (port, chan) = comm::stream();
|
||||
// FIXME #4525: Unfortunate that this creates an extra scheduler but it's
|
||||
// necessary since rust_raw_thread_join_delete is blocking
|
||||
do task::spawn_sched(task::SingleThreaded) {
|
||||
unsafe {
|
||||
let closure: &fn() = || {
|
||||
f()
|
||||
};
|
||||
let thread = rustrt::rust_raw_thread_start(&closure);
|
||||
rustrt::rust_raw_thread_join_delete(thread);
|
||||
chan.send(());
|
||||
}
|
||||
}
|
||||
port.recv();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_in_bare_thread() {
|
||||
let i = 100;
|
||||
do run_in_bare_thread {
|
||||
assert!(i == 100);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_in_bare_thread_exchange() {
|
||||
// Does the exchange heap work without the runtime?
|
||||
let i = ~100;
|
||||
do run_in_bare_thread {
|
||||
assert!(i == ~100);
|
||||
}
|
||||
}
|
||||
|
||||
fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool {
|
||||
unsafe {
|
||||
let old = intrinsics::atomic_cxchg(address, oldval, newval);
|
||||
old == oldval
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Shared state & exclusive ARC
|
||||
****************************************************************************/
|
||||
|
||||
struct ArcData<T> {
|
||||
count: libc::intptr_t,
|
||||
// FIXME(#3224) should be able to make this non-option to save memory
|
||||
data: Option<T>,
|
||||
}
|
||||
|
||||
struct ArcDestruct<T> {
|
||||
data: *libc::c_void,
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T> Drop for ArcDestruct<T>{
|
||||
fn finalize(&self) {
|
||||
unsafe {
|
||||
do task::unkillable {
|
||||
let mut data: ~ArcData<T> = cast::transmute(self.data);
|
||||
let new_count =
|
||||
intrinsics::atomic_xsub(&mut data.count, 1) - 1;
|
||||
assert!(new_count >= 0);
|
||||
if new_count == 0 {
|
||||
// drop glue takes over.
|
||||
} else {
|
||||
cast::forget(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ArcDestruct<T>(data: *libc::c_void) -> ArcDestruct<T> {
|
||||
ArcDestruct {
|
||||
data: data
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc.
|
||||
*
|
||||
* Data races between tasks can result in crashes and, with sufficient
|
||||
* cleverness, arbitrary type coercion.
|
||||
*/
|
||||
pub type SharedMutableState<T> = ArcDestruct<T>;
|
||||
|
||||
pub unsafe fn shared_mutable_state<T:Owned>(data: T) ->
|
||||
SharedMutableState<T> {
|
||||
let data = ~ArcData { count: 1, data: Some(data) };
|
||||
let ptr = cast::transmute(data);
|
||||
ArcDestruct(ptr)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn get_shared_mutable_state<T:Owned>(
|
||||
rc: *SharedMutableState<T>) -> *mut T
|
||||
{
|
||||
let ptr: ~ArcData<T> = cast::transmute((*rc).data);
|
||||
assert!(ptr.count > 0);
|
||||
let r = cast::transmute(ptr.data.get_ref());
|
||||
cast::forget(ptr);
|
||||
return r;
|
||||
}
|
||||
#[inline(always)]
|
||||
pub unsafe fn get_shared_immutable_state<'a,T:Owned>(
|
||||
rc: &'a SharedMutableState<T>) -> &'a T {
|
||||
let ptr: ~ArcData<T> = cast::transmute((*rc).data);
|
||||
assert!(ptr.count > 0);
|
||||
// Cast us back into the correct region
|
||||
let r = cast::transmute_region(ptr.data.get_ref());
|
||||
cast::forget(ptr);
|
||||
return r;
|
||||
}
|
||||
|
||||
pub unsafe fn clone_shared_mutable_state<T:Owned>(rc: &SharedMutableState<T>)
|
||||
-> SharedMutableState<T> {
|
||||
let mut ptr: ~ArcData<T> = cast::transmute((*rc).data);
|
||||
let new_count = intrinsics::atomic_xadd(&mut ptr.count, 1) + 1;
|
||||
assert!(new_count >= 2);
|
||||
cast::forget(ptr);
|
||||
ArcDestruct((*rc).data)
|
||||
}
|
||||
|
||||
impl<T:Owned> Clone for SharedMutableState<T> {
|
||||
fn clone(&self) -> SharedMutableState<T> {
|
||||
unsafe {
|
||||
clone_shared_mutable_state(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type rust_little_lock = *libc::c_void;
|
||||
|
||||
struct LittleLock {
|
||||
l: rust_little_lock,
|
||||
}
|
||||
|
||||
impl Drop for LittleLock {
|
||||
fn finalize(&self) {
|
||||
unsafe {
|
||||
rustrt::rust_destroy_little_lock(self.l);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn LittleLock() -> LittleLock {
|
||||
unsafe {
|
||||
LittleLock {
|
||||
l: rustrt::rust_create_little_lock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub impl LittleLock {
|
||||
#[inline(always)]
|
||||
unsafe fn lock<T>(&self, f: &fn() -> T) -> T {
|
||||
do atomically {
|
||||
rustrt::rust_lock_little_lock(self.l);
|
||||
do (|| {
|
||||
f()
|
||||
}).finally {
|
||||
rustrt::rust_unlock_little_lock(self.l);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ExData<T> {
|
||||
lock: LittleLock,
|
||||
failed: bool,
|
||||
data: T,
|
||||
}
|
||||
|
||||
/**
|
||||
* An arc over mutable data that is protected by a lock. For library use only.
|
||||
*/
|
||||
pub struct Exclusive<T> {
|
||||
x: SharedMutableState<ExData<T>>
|
||||
}
|
||||
|
||||
pub fn exclusive<T:Owned>(user_data: T) -> Exclusive<T> {
|
||||
let data = ExData {
|
||||
lock: LittleLock(),
|
||||
failed: false,
|
||||
data: user_data
|
||||
};
|
||||
Exclusive {
|
||||
x: unsafe {
|
||||
shared_mutable_state(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T:Owned> Clone for Exclusive<T> {
|
||||
// Duplicate an exclusive ARC, as std::arc::clone.
|
||||
fn clone(&self) -> Exclusive<T> {
|
||||
Exclusive { x: unsafe { clone_shared_mutable_state(&self.x) } }
|
||||
}
|
||||
}
|
||||
|
||||
pub impl<T:Owned> Exclusive<T> {
|
||||
// Exactly like std::arc::mutex_arc,access(), but with the little_lock
|
||||
// instead of a proper mutex. Same reason for being unsafe.
|
||||
//
|
||||
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
|
||||
// accessing the provided condition variable) are prohibited while inside
|
||||
// the exclusive. Supporting that is a work in progress.
|
||||
#[inline(always)]
|
||||
unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U {
|
||||
let rec = get_shared_mutable_state(&self.x);
|
||||
do (*rec).lock.lock {
|
||||
if (*rec).failed {
|
||||
fail!(
|
||||
~"Poisoned exclusive - another task failed inside!");
|
||||
}
|
||||
(*rec).failed = true;
|
||||
let result = f(&mut (*rec).data);
|
||||
(*rec).failed = false;
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn with_imm<U>(&self, f: &fn(x: &T) -> U) -> U {
|
||||
do self.with |x| {
|
||||
f(cast::transmute_immut(x))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use comm;
|
||||
use super::exclusive;
|
||||
use task;
|
||||
use uint;
|
||||
|
||||
#[test]
|
||||
fn exclusive_arc() {
|
||||
let mut futures = ~[];
|
||||
|
||||
let num_tasks = 10;
|
||||
let count = 10;
|
||||
|
||||
let total = exclusive(~0);
|
||||
|
||||
for uint::range(0, num_tasks) |_i| {
|
||||
let total = total.clone();
|
||||
let (port, chan) = comm::stream();
|
||||
futures.push(port);
|
||||
|
||||
do task::spawn || {
|
||||
for uint::range(0, count) |_i| {
|
||||
do total.with |count| {
|
||||
**count += 1;
|
||||
}
|
||||
}
|
||||
chan.send(());
|
||||
}
|
||||
};
|
||||
|
||||
for futures.each |f| { f.recv() }
|
||||
|
||||
do total.with |total| {
|
||||
assert!(**total == num_tasks * count)
|
||||
};
|
||||
}
|
||||
|
||||
#[test] #[should_fail] #[ignore(cfg(windows))]
|
||||
fn exclusive_poison() {
|
||||
// Tests that if one task fails inside of an exclusive, subsequent
|
||||
// accesses will also fail.
|
||||
let x = exclusive(1);
|
||||
let x2 = x.clone();
|
||||
do task::try || {
|
||||
do x2.with |one| {
|
||||
assert!(*one == 2);
|
||||
}
|
||||
};
|
||||
do x.with |one| {
|
||||
assert!(*one == 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -31,14 +31,13 @@ use kinds::Owned;
|
|||
use libc::{c_void};
|
||||
use option::{Option, Some, None};
|
||||
use ops::Drop;
|
||||
use unstable::{Exclusive, exclusive};
|
||||
use unstable::sync::{Exclusive, exclusive};
|
||||
use unstable::at_exit::at_exit;
|
||||
use unstable::intrinsics::atomic_cxchg;
|
||||
use hashmap::HashMap;
|
||||
use sys::Closure;
|
||||
|
||||
#[cfg(test)] use unstable::{SharedMutableState, shared_mutable_state};
|
||||
#[cfg(test)] use unstable::get_shared_immutable_state;
|
||||
#[cfg(test)] use unstable::sync::{UnsafeAtomicRcBox};
|
||||
#[cfg(test)] use task::spawn;
|
||||
#[cfg(test)] use uint;
|
||||
|
||||
|
|
@ -234,18 +233,16 @@ extern {
|
|||
|
||||
#[test]
|
||||
fn test_clone_rc() {
|
||||
type MyType = SharedMutableState<int>;
|
||||
|
||||
fn key(_v: SharedMutableState<int>) { }
|
||||
fn key(_v: UnsafeAtomicRcBox<int>) { }
|
||||
|
||||
for uint::range(0, 100) |_| {
|
||||
do spawn {
|
||||
unsafe {
|
||||
let val = do global_data_clone_create(key) {
|
||||
~shared_mutable_state(10)
|
||||
~UnsafeAtomicRcBox::new(10)
|
||||
};
|
||||
|
||||
assert!(get_shared_immutable_state(&val) == &10);
|
||||
assert!(val.get() == &10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -253,16 +250,14 @@ fn test_clone_rc() {
|
|||
|
||||
#[test]
|
||||
fn test_modify() {
|
||||
type MyType = SharedMutableState<int>;
|
||||
|
||||
fn key(_v: SharedMutableState<int>) { }
|
||||
fn key(_v: UnsafeAtomicRcBox<int>) { }
|
||||
|
||||
unsafe {
|
||||
do global_data_modify(key) |v| {
|
||||
match v {
|
||||
None => {
|
||||
unsafe {
|
||||
Some(~shared_mutable_state(10))
|
||||
Some(~UnsafeAtomicRcBox::new(10))
|
||||
}
|
||||
}
|
||||
_ => fail!()
|
||||
|
|
@ -272,7 +267,7 @@ fn test_modify() {
|
|||
do global_data_modify(key) |v| {
|
||||
match v {
|
||||
Some(sms) => {
|
||||
let v = get_shared_immutable_state(sms);
|
||||
let v = sms.get();
|
||||
assert!(*v == 10);
|
||||
None
|
||||
},
|
||||
|
|
@ -284,7 +279,7 @@ fn test_modify() {
|
|||
match v {
|
||||
None => {
|
||||
unsafe {
|
||||
Some(~shared_mutable_state(10))
|
||||
Some(~UnsafeAtomicRcBox::new(10))
|
||||
}
|
||||
}
|
||||
_ => fail!()
|
||||
|
|
|
|||
78
src/libcore/unstable/mod.rs
Normal file
78
src/libcore/unstable/mod.rs
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#[doc(hidden)];
|
||||
|
||||
use libc;
|
||||
use comm::{GenericChan, GenericPort};
|
||||
use prelude::*;
|
||||
use task;
|
||||
|
||||
pub mod at_exit;
|
||||
pub mod global;
|
||||
pub mod finally;
|
||||
pub mod weak_task;
|
||||
pub mod exchange_alloc;
|
||||
pub mod intrinsics;
|
||||
pub mod simd;
|
||||
pub mod extfmt;
|
||||
#[cfg(not(test))]
|
||||
pub mod lang;
|
||||
pub mod sync;
|
||||
|
||||
/**
|
||||
|
||||
Start a new thread outside of the current runtime context and wait
|
||||
for it to terminate.
|
||||
|
||||
The executing thread has no access to a task pointer and will be using
|
||||
a normal large stack.
|
||||
*/
|
||||
pub fn run_in_bare_thread(f: ~fn()) {
|
||||
let (port, chan) = comm::stream();
|
||||
// FIXME #4525: Unfortunate that this creates an extra scheduler but it's
|
||||
// necessary since rust_raw_thread_join_delete is blocking
|
||||
do task::spawn_sched(task::SingleThreaded) {
|
||||
unsafe {
|
||||
let closure: &fn() = || {
|
||||
f()
|
||||
};
|
||||
let thread = rust_raw_thread_start(&closure);
|
||||
rust_raw_thread_join_delete(thread);
|
||||
chan.send(());
|
||||
}
|
||||
}
|
||||
port.recv();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_in_bare_thread() {
|
||||
let i = 100;
|
||||
do run_in_bare_thread {
|
||||
assert!(i == 100);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_in_bare_thread_exchange() {
|
||||
// Does the exchange heap work without the runtime?
|
||||
let i = ~100;
|
||||
do run_in_bare_thread {
|
||||
assert!(i == ~100);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type raw_thread = libc::c_void;
|
||||
|
||||
extern {
|
||||
fn rust_raw_thread_start(f: &(&fn())) -> *raw_thread;
|
||||
fn rust_raw_thread_join_delete(thread: *raw_thread);
|
||||
}
|
||||
286
src/libcore/unstable/sync.rs
Normal file
286
src/libcore/unstable/sync.rs
Normal file
|
|
@ -0,0 +1,286 @@
|
|||
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use cast;
|
||||
use libc;
|
||||
use option::*;
|
||||
use task;
|
||||
use task::atomically;
|
||||
use unstable::finally::Finally;
|
||||
use unstable::intrinsics;
|
||||
use ops::Drop;
|
||||
use clone::Clone;
|
||||
use kinds::Owned;
|
||||
|
||||
/// An atomically reference counted pointer.
|
||||
///
|
||||
/// Enforces no shared-memory safety.
|
||||
pub struct UnsafeAtomicRcBox<T> {
|
||||
data: *mut libc::c_void,
|
||||
}
|
||||
|
||||
struct AtomicRcBoxData<T> {
|
||||
count: int,
|
||||
data: Option<T>,
|
||||
}
|
||||
|
||||
impl<T: Owned> UnsafeAtomicRcBox<T> {
|
||||
pub fn new(data: T) -> UnsafeAtomicRcBox<T> {
|
||||
unsafe {
|
||||
let data = ~AtomicRcBoxData { count: 1, data: Some(data) };
|
||||
let ptr = cast::transmute(data);
|
||||
return UnsafeAtomicRcBox { data: ptr };
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
pub unsafe fn get(&self) -> *mut T
|
||||
{
|
||||
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
|
||||
assert!(data.count > 0);
|
||||
let r: *mut T = cast::transmute(data.data.get_mut_ref());
|
||||
cast::forget(data);
|
||||
return r;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
pub unsafe fn get(&self) -> *mut T
|
||||
{
|
||||
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
|
||||
assert!(data.count > 0);
|
||||
let r: *mut T = data.data.get_mut_ref();
|
||||
cast::forget(data);
|
||||
return r;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
pub unsafe fn get_immut(&self) -> *T
|
||||
{
|
||||
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
|
||||
assert!(data.count > 0);
|
||||
let r: *T = cast::transmute(data.data.get_mut_ref());
|
||||
cast::forget(data);
|
||||
return r;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
pub unsafe fn get_immut(&self) -> *T
|
||||
{
|
||||
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
|
||||
assert!(data.count > 0);
|
||||
let r: *T = cast::transmute_immut(data.data.get_mut_ref());
|
||||
cast::forget(data);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Owned> Clone for UnsafeAtomicRcBox<T> {
|
||||
fn clone(&self) -> UnsafeAtomicRcBox<T> {
|
||||
unsafe {
|
||||
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
|
||||
let new_count = intrinsics::atomic_xadd(&mut data.count, 1) + 1;
|
||||
assert!(new_count >= 2);
|
||||
cast::forget(data);
|
||||
return UnsafeAtomicRcBox { data: self.data };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T> Drop for UnsafeAtomicRcBox<T>{
|
||||
fn finalize(&self) {
|
||||
unsafe {
|
||||
do task::unkillable {
|
||||
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
|
||||
let new_count = intrinsics::atomic_xsub(&mut data.count, 1) - 1;
|
||||
assert!(new_count >= 0);
|
||||
if new_count == 0 {
|
||||
// drop glue takes over.
|
||||
} else {
|
||||
cast::forget(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************/
|
||||
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type rust_little_lock = *libc::c_void;
|
||||
|
||||
struct LittleLock {
|
||||
l: rust_little_lock,
|
||||
}
|
||||
|
||||
impl Drop for LittleLock {
|
||||
fn finalize(&self) {
|
||||
unsafe {
|
||||
rust_destroy_little_lock(self.l);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn LittleLock() -> LittleLock {
|
||||
unsafe {
|
||||
LittleLock {
|
||||
l: rust_create_little_lock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub impl LittleLock {
|
||||
#[inline(always)]
|
||||
unsafe fn lock<T>(&self, f: &fn() -> T) -> T {
|
||||
do atomically {
|
||||
rust_lock_little_lock(self.l);
|
||||
do (|| {
|
||||
f()
|
||||
}).finally {
|
||||
rust_unlock_little_lock(self.l);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ExData<T> {
|
||||
lock: LittleLock,
|
||||
failed: bool,
|
||||
data: T,
|
||||
}
|
||||
|
||||
/**
|
||||
* An arc over mutable data that is protected by a lock. For library use only.
|
||||
*/
|
||||
pub struct Exclusive<T> {
|
||||
x: UnsafeAtomicRcBox<ExData<T>>
|
||||
}
|
||||
|
||||
pub fn exclusive<T:Owned>(user_data: T) -> Exclusive<T> {
|
||||
let data = ExData {
|
||||
lock: LittleLock(),
|
||||
failed: false,
|
||||
data: user_data
|
||||
};
|
||||
Exclusive {
|
||||
x: UnsafeAtomicRcBox::new(data)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T:Owned> Clone for Exclusive<T> {
|
||||
// Duplicate an exclusive ARC, as std::arc::clone.
|
||||
fn clone(&self) -> Exclusive<T> {
|
||||
Exclusive { x: self.x.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
pub impl<T:Owned> Exclusive<T> {
|
||||
// Exactly like std::arc::mutex_arc,access(), but with the little_lock
|
||||
// instead of a proper mutex. Same reason for being unsafe.
|
||||
//
|
||||
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
|
||||
// accessing the provided condition variable) are prohibited while inside
|
||||
// the exclusive. Supporting that is a work in progress.
|
||||
#[inline(always)]
|
||||
unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U {
|
||||
let rec = self.x.get();
|
||||
do (*rec).lock.lock {
|
||||
if (*rec).failed {
|
||||
fail!(
|
||||
~"Poisoned exclusive - another task failed inside!");
|
||||
}
|
||||
(*rec).failed = true;
|
||||
let result = f(&mut (*rec).data);
|
||||
(*rec).failed = false;
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn with_imm<U>(&self, f: &fn(x: &T) -> U) -> U {
|
||||
do self.with |x| {
|
||||
f(cast::transmute_immut(x))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool {
|
||||
unsafe {
|
||||
let old = intrinsics::atomic_cxchg(address, oldval, newval);
|
||||
old == oldval
|
||||
}
|
||||
}
|
||||
|
||||
extern {
|
||||
fn rust_create_little_lock() -> rust_little_lock;
|
||||
fn rust_destroy_little_lock(lock: rust_little_lock);
|
||||
fn rust_lock_little_lock(lock: rust_little_lock);
|
||||
fn rust_unlock_little_lock(lock: rust_little_lock);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use comm;
|
||||
use super::exclusive;
|
||||
use task;
|
||||
use uint;
|
||||
|
||||
#[test]
|
||||
fn exclusive_arc() {
|
||||
let mut futures = ~[];
|
||||
|
||||
let num_tasks = 10;
|
||||
let count = 10;
|
||||
|
||||
let total = exclusive(~0);
|
||||
|
||||
for uint::range(0, num_tasks) |_i| {
|
||||
let total = total.clone();
|
||||
let (port, chan) = comm::stream();
|
||||
futures.push(port);
|
||||
|
||||
do task::spawn || {
|
||||
for uint::range(0, count) |_i| {
|
||||
do total.with |count| {
|
||||
**count += 1;
|
||||
}
|
||||
}
|
||||
chan.send(());
|
||||
}
|
||||
};
|
||||
|
||||
for futures.each |f| { f.recv() }
|
||||
|
||||
do total.with |total| {
|
||||
assert!(**total == num_tasks * count)
|
||||
};
|
||||
}
|
||||
|
||||
#[test] #[should_fail] #[ignore(cfg(windows))]
|
||||
fn exclusive_poison() {
|
||||
// Tests that if one task fails inside of an exclusive, subsequent
|
||||
// accesses will also fail.
|
||||
let x = exclusive(1);
|
||||
let x2 = x.clone();
|
||||
do task::try || {
|
||||
do x2.with |one| {
|
||||
assert!(*one == 2);
|
||||
}
|
||||
};
|
||||
do x.with |one| {
|
||||
assert!(*one == 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3298,8 +3298,9 @@ mod tests {
|
|||
#[test]
|
||||
fn test_swap_remove_noncopyable() {
|
||||
// Tests that we don't accidentally run destructors twice.
|
||||
let mut v = ~[::unstable::exclusive(()), ::unstable::exclusive(()),
|
||||
::unstable::exclusive(())];
|
||||
let mut v = ~[::unstable::sync::exclusive(()),
|
||||
::unstable::sync::exclusive(()),
|
||||
::unstable::sync::exclusive(())];
|
||||
let mut _e = v.swap_remove(0);
|
||||
assert!(v.len() == 2);
|
||||
_e = v.swap_remove(1);
|
||||
|
|
|
|||
|
|
@ -17,9 +17,7 @@ use sync;
|
|||
use sync::{Mutex, mutex_with_condvars, RWlock, rwlock_with_condvars};
|
||||
|
||||
use core::cast;
|
||||
use core::unstable::{SharedMutableState, shared_mutable_state};
|
||||
use core::unstable::{clone_shared_mutable_state};
|
||||
use core::unstable::{get_shared_mutable_state, get_shared_immutable_state};
|
||||
use core::unstable::sync::UnsafeAtomicRcBox;
|
||||
use core::ptr;
|
||||
use core::task;
|
||||
|
||||
|
|
@ -83,11 +81,11 @@ pub impl<'self> Condvar<'self> {
|
|||
****************************************************************************/
|
||||
|
||||
/// An atomically reference counted wrapper for shared immutable state.
|
||||
struct ARC<T> { x: SharedMutableState<T> }
|
||||
struct ARC<T> { x: UnsafeAtomicRcBox<T> }
|
||||
|
||||
/// Create an atomically reference counted wrapper.
|
||||
pub fn ARC<T:Const + Owned>(data: T) -> ARC<T> {
|
||||
ARC { x: unsafe { shared_mutable_state(data) } }
|
||||
ARC { x: UnsafeAtomicRcBox::new(data) }
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -95,7 +93,7 @@ pub fn ARC<T:Const + Owned>(data: T) -> ARC<T> {
|
|||
* wrapper.
|
||||
*/
|
||||
pub fn get<'a, T:Const + Owned>(rc: &'a ARC<T>) -> &'a T {
|
||||
unsafe { get_shared_immutable_state(&rc.x) }
|
||||
unsafe { &*rc.x.get_immut() }
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -106,7 +104,7 @@ pub fn get<'a, T:Const + Owned>(rc: &'a ARC<T>) -> &'a T {
|
|||
* allowing them to share the underlying data.
|
||||
*/
|
||||
pub fn clone<T:Const + Owned>(rc: &ARC<T>) -> ARC<T> {
|
||||
ARC { x: unsafe { clone_shared_mutable_state(&rc.x) } }
|
||||
ARC { x: rc.x.clone() }
|
||||
}
|
||||
|
||||
impl<T:Const + Owned> Clone for ARC<T> {
|
||||
|
|
@ -122,7 +120,7 @@ impl<T:Const + Owned> Clone for ARC<T> {
|
|||
#[doc(hidden)]
|
||||
struct MutexARCInner<T> { lock: Mutex, failed: bool, data: T }
|
||||
/// An ARC with mutable data protected by a blocking mutex.
|
||||
struct MutexARC<T> { x: SharedMutableState<MutexARCInner<T>> }
|
||||
struct MutexARC<T> { x: UnsafeAtomicRcBox<MutexARCInner<T>> }
|
||||
|
||||
/// Create a mutex-protected ARC with the supplied data.
|
||||
pub fn MutexARC<T:Owned>(user_data: T) -> MutexARC<T> {
|
||||
|
|
@ -137,7 +135,7 @@ pub fn mutex_arc_with_condvars<T:Owned>(user_data: T,
|
|||
let data =
|
||||
MutexARCInner { lock: mutex_with_condvars(num_condvars),
|
||||
failed: false, data: user_data };
|
||||
MutexARC { x: unsafe { shared_mutable_state(data) } }
|
||||
MutexARC { x: UnsafeAtomicRcBox::new(data) }
|
||||
}
|
||||
|
||||
impl<T:Owned> Clone for MutexARC<T> {
|
||||
|
|
@ -145,7 +143,7 @@ impl<T:Owned> Clone for MutexARC<T> {
|
|||
fn clone(&self) -> MutexARC<T> {
|
||||
// NB: Cloning the underlying mutex is not necessary. Its reference
|
||||
// count would be exactly the same as the shared state's.
|
||||
MutexARC { x: unsafe { clone_shared_mutable_state(&self.x) } }
|
||||
MutexARC { x: self.x.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -176,7 +174,7 @@ pub impl<T:Owned> MutexARC<T> {
|
|||
*/
|
||||
#[inline(always)]
|
||||
unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
|
||||
let state = get_shared_mutable_state(&self.x);
|
||||
let state = self.x.get();
|
||||
// Borrowck would complain about this if the function were
|
||||
// not already unsafe. See borrow_rwlock, far below.
|
||||
do (&(*state).lock).lock {
|
||||
|
|
@ -192,7 +190,7 @@ pub impl<T:Owned> MutexARC<T> {
|
|||
&self,
|
||||
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U
|
||||
{
|
||||
let state = get_shared_mutable_state(&self.x);
|
||||
let state = self.x.get();
|
||||
do (&(*state).lock).lock_cond |cond| {
|
||||
check_poison(true, (*state).failed);
|
||||
let _z = PoisonOnFail(&mut (*state).failed);
|
||||
|
|
@ -254,7 +252,7 @@ struct RWARCInner<T> { lock: RWlock, failed: bool, data: T }
|
|||
*/
|
||||
#[mutable]
|
||||
struct RWARC<T> {
|
||||
x: SharedMutableState<RWARCInner<T>>,
|
||||
x: UnsafeAtomicRcBox<RWARCInner<T>>,
|
||||
cant_nest: ()
|
||||
}
|
||||
|
||||
|
|
@ -273,13 +271,13 @@ pub fn rw_arc_with_condvars<T:Const + Owned>(
|
|||
let data =
|
||||
RWARCInner { lock: rwlock_with_condvars(num_condvars),
|
||||
failed: false, data: user_data };
|
||||
RWARC { x: unsafe { shared_mutable_state(data) }, cant_nest: () }
|
||||
RWARC { x: UnsafeAtomicRcBox::new(data), cant_nest: () }
|
||||
}
|
||||
|
||||
pub impl<T:Const + Owned> RWARC<T> {
|
||||
/// Duplicate a rwlock-protected ARC, as arc::clone.
|
||||
fn clone(&self) -> RWARC<T> {
|
||||
RWARC { x: unsafe { clone_shared_mutable_state(&self.x) },
|
||||
RWARC { x: self.x.clone(),
|
||||
cant_nest: () }
|
||||
}
|
||||
|
||||
|
|
@ -299,7 +297,7 @@ pub impl<T:Const + Owned> RWARC<T> {
|
|||
#[inline(always)]
|
||||
fn write<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
|
||||
unsafe {
|
||||
let state = get_shared_mutable_state(&self.x);
|
||||
let state = self.x.get();
|
||||
do (*borrow_rwlock(state)).write {
|
||||
check_poison(false, (*state).failed);
|
||||
let _z = PoisonOnFail(&mut (*state).failed);
|
||||
|
|
@ -313,7 +311,7 @@ pub impl<T:Const + Owned> RWARC<T> {
|
|||
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
|
||||
-> U {
|
||||
unsafe {
|
||||
let state = get_shared_mutable_state(&self.x);
|
||||
let state = self.x.get();
|
||||
do (*borrow_rwlock(state)).write_cond |cond| {
|
||||
check_poison(false, (*state).failed);
|
||||
let _z = PoisonOnFail(&mut (*state).failed);
|
||||
|
|
@ -334,10 +332,12 @@ pub impl<T:Const + Owned> RWARC<T> {
|
|||
* access modes, this will not poison the ARC.
|
||||
*/
|
||||
fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
|
||||
let state = unsafe { get_shared_immutable_state(&self.x) };
|
||||
do (&state.lock).read {
|
||||
check_poison(false, state.failed);
|
||||
blk(&state.data)
|
||||
let state = self.x.get();
|
||||
unsafe {
|
||||
do (*state).lock.read {
|
||||
check_poison(false, (*state).failed);
|
||||
blk(&(*state).data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -360,7 +360,7 @@ pub impl<T:Const + Owned> RWARC<T> {
|
|||
*/
|
||||
fn write_downgrade<U>(&self, blk: &fn(v: RWWriteMode<T>) -> U) -> U {
|
||||
unsafe {
|
||||
let state = get_shared_mutable_state(&self.x);
|
||||
let state = self.x.get();
|
||||
do (*borrow_rwlock(state)).write_downgrade |write_mode| {
|
||||
check_poison(false, (*state).failed);
|
||||
blk(RWWriteMode {
|
||||
|
|
@ -374,25 +374,27 @@ pub impl<T:Const + Owned> RWARC<T> {
|
|||
|
||||
/// To be called inside of the write_downgrade block.
|
||||
fn downgrade<'a>(&self, token: RWWriteMode<'a, T>) -> RWReadMode<'a, T> {
|
||||
// The rwlock should assert that the token belongs to us for us.
|
||||
let state = unsafe { get_shared_immutable_state(&self.x) };
|
||||
let RWWriteMode {
|
||||
data: data,
|
||||
token: t,
|
||||
poison: _poison
|
||||
} = token;
|
||||
// Let readers in
|
||||
let new_token = (&state.lock).downgrade(t);
|
||||
// Whatever region the input reference had, it will be safe to use
|
||||
// the same region for the output reference. (The only 'unsafe' part
|
||||
// of this cast is removing the mutability.)
|
||||
let new_data = unsafe { cast::transmute_immut(data) };
|
||||
// Downgrade ensured the token belonged to us. Just a sanity check.
|
||||
assert!(ptr::ref_eq(&state.data, new_data));
|
||||
// Produce new token
|
||||
RWReadMode {
|
||||
data: new_data,
|
||||
token: new_token,
|
||||
unsafe {
|
||||
// The rwlock should assert that the token belongs to us for us.
|
||||
let state = self.x.get();
|
||||
let RWWriteMode {
|
||||
data: data,
|
||||
token: t,
|
||||
poison: _poison
|
||||
} = token;
|
||||
// Let readers in
|
||||
let new_token = (*state).lock.downgrade(t);
|
||||
// Whatever region the input reference had, it will be safe to use
|
||||
// the same region for the output reference. (The only 'unsafe' part
|
||||
// of this cast is removing the mutability.)
|
||||
let new_data = cast::transmute_immut(data);
|
||||
// Downgrade ensured the token belonged to us. Just a sanity check.
|
||||
assert!(ptr::ref_eq(&(*state).data, new_data));
|
||||
// Produce new token
|
||||
RWReadMode {
|
||||
data: new_data,
|
||||
token: new_token,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
* in std.
|
||||
*/
|
||||
|
||||
use core::unstable::{Exclusive, exclusive};
|
||||
use core::unstable::sync::{Exclusive, exclusive};
|
||||
use core::ptr;
|
||||
use core::task;
|
||||
use core::util;
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
// except according to those terms.
|
||||
|
||||
fn main() {
|
||||
let x = Some(unstable::exclusive(false));
|
||||
let x = Some(unstable::sync::exclusive(false));
|
||||
match x {
|
||||
Some(copy z) => { //~ ERROR copying a value of non-copyable type
|
||||
do z.with |b| { assert!(!*b); }
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
// except according to those terms.
|
||||
|
||||
pub fn main() {
|
||||
let x = Some(unstable::exclusive(true));
|
||||
let x = Some(unstable::sync::exclusive(true));
|
||||
match x {
|
||||
Some(ref z) if z.with(|b| *b) => {
|
||||
do z.with |b| { assert!(*b); }
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue