diff --git a/src/libgreen/basic.rs b/src/libgreen/basic.rs index 62b6f71ae9c6..2877768dd8bf 100644 --- a/src/libgreen/basic.rs +++ b/src/libgreen/basic.rs @@ -27,11 +27,11 @@ pub fn event_loop() -> ~EventLoop:Send { } struct BasicLoop { - work: ~[proc():Send], // pending work + work: Vec, // pending work idle: Option<*mut BasicPausable>, // only one is allowed - remotes: ~[(uint, ~Callback:Send)], + remotes: Vec<(uint, ~Callback:Send)>, next_remote: uint, - messages: Exclusive<~[Message]>, + messages: Exclusive>, } enum Message { RunRemote(uint), RemoveRemote(uint) } @@ -39,18 +39,18 @@ enum Message { RunRemote(uint), RemoveRemote(uint) } impl BasicLoop { fn new() -> BasicLoop { BasicLoop { - work: ~[], + work: vec![], idle: None, next_remote: 0, - remotes: ~[], - messages: Exclusive::new(~[]), + remotes: vec![], + messages: Exclusive::new(vec![]), } } /// Process everything in the work queue (continually) fn work(&mut self) { while self.work.len() > 0 { - for work in replace(&mut self.work, ~[]).move_iter() { + for work in replace(&mut self.work, vec![]).move_iter() { work(); } } @@ -60,7 +60,7 @@ impl BasicLoop { let messages = unsafe { self.messages.with(|messages| { if messages.len() > 0 { - Some(replace(messages, ~[])) + Some(replace(messages, vec![])) } else { None } @@ -165,12 +165,12 @@ impl EventLoop for BasicLoop { } struct BasicRemote { - queue: Exclusive<~[Message]>, + queue: Exclusive>, id: uint, } impl BasicRemote { - fn new(queue: Exclusive<~[Message]>, id: uint) -> BasicRemote { + fn new(queue: Exclusive>, id: uint) -> BasicRemote { BasicRemote { queue: queue, id: id } } } diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index e4a9641efd1b..820627b6b7d1 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -195,6 +195,7 @@ // NB this does *not* include globs, please keep it that way. #![feature(macro_rules, phase)] #![allow(visible_private_types)] +#![deny(deprecated_owned_vector)] #[cfg(test)] #[phase(syntax, link)] extern crate log; #[cfg(test)] extern crate rustuv; @@ -209,7 +210,6 @@ use std::rt; use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT}; use std::sync::deque; use std::task::TaskOpts; -use std::slice; use std::sync::arc::UnsafeArc; use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, NewNeighbor}; @@ -318,9 +318,9 @@ impl PoolConfig { /// used to keep the pool alive and also reap the status from the pool. pub struct SchedPool { id: uint, - threads: ~[Thread<()>], - handles: ~[SchedHandle], - stealers: ~[deque::Stealer<~task::GreenTask>], + threads: Vec>, + handles: Vec, + stealers: Vec>, next_friend: uint, stack_pool: StackPool, deque_pool: deque::BufferPool<~task::GreenTask>, @@ -356,9 +356,9 @@ impl SchedPool { // The pool of schedulers that will be returned from this function let (p, state) = TaskState::new(); let mut pool = SchedPool { - threads: ~[], - handles: ~[], - stealers: ~[], + threads: vec![], + handles: vec![], + stealers: vec![], id: unsafe { POOL_ID.fetch_add(1, SeqCst) }, sleepers: SleeperList::new(), stack_pool: StackPool::new(), @@ -371,8 +371,14 @@ impl SchedPool { // Create a work queue for each scheduler, ntimes. Create an extra // for the main thread if that flag is set. We won't steal from it. - let arr = slice::from_fn(nscheds, |_| pool.deque_pool.deque()); - let (workers, stealers) = slice::unzip(arr.move_iter()); + let mut workers = Vec::with_capacity(nscheds); + let mut stealers = Vec::with_capacity(nscheds); + + for _ in range(0, nscheds) { + let (w, s) = pool.deque_pool.deque(); + workers.push(w); + stealers.push(s); + } pool.stealers = stealers; // Now that we've got all our work queues, create one scheduler per @@ -420,7 +426,7 @@ impl SchedPool { } // Jettison the task away! - self.handles[idx].send(TaskFromFriend(task)); + self.handles.get_mut(idx).send(TaskFromFriend(task)); } /// Spawns a new scheduler into this M:N pool. A handle is returned to the @@ -466,7 +472,7 @@ impl SchedPool { /// This only waits for all tasks in *this pool* of schedulers to exit, any /// native tasks or extern pools will not be waited on pub fn shutdown(mut self) { - self.stealers = ~[]; + self.stealers = vec![]; // Wait for everyone to exit. We may have reached a 0-task count // multiple times in the past, meaning there could be several buffered @@ -478,10 +484,10 @@ impl SchedPool { } // Now that everyone's gone, tell everything to shut down. - for mut handle in replace(&mut self.handles, ~[]).move_iter() { + for mut handle in replace(&mut self.handles, vec![]).move_iter() { handle.send(Shutdown); } - for thread in replace(&mut self.threads, ~[]).move_iter() { + for thread in replace(&mut self.threads, vec![]).move_iter() { thread.join(); } } diff --git a/src/libgreen/sched.rs b/src/libgreen/sched.rs index 036d02655f9f..9971dfee8281 100644 --- a/src/libgreen/sched.rs +++ b/src/libgreen/sched.rs @@ -49,7 +49,7 @@ pub struct Scheduler { work_queue: deque::Worker<~GreenTask>, /// Work queues for the other schedulers. These are created by /// cloning the core work queues. - work_queues: ~[deque::Stealer<~GreenTask>], + work_queues: Vec>, /// The queue of incoming messages from other schedulers. /// These are enqueued by SchedHandles after which a remote callback /// is triggered to handle the message. @@ -125,7 +125,7 @@ impl Scheduler { pub fn new(pool_id: uint, event_loop: ~EventLoop:Send, work_queue: deque::Worker<~GreenTask>, - work_queues: ~[deque::Stealer<~GreenTask>], + work_queues: Vec>, sleeper_list: SleeperList, state: TaskState) -> Scheduler { @@ -138,7 +138,7 @@ impl Scheduler { pub fn new_special(pool_id: uint, event_loop: ~EventLoop:Send, work_queue: deque::Worker<~GreenTask>, - work_queues: ~[deque::Stealer<~GreenTask>], + work_queues: Vec>, sleeper_list: SleeperList, run_anything: bool, friend: Option, @@ -502,7 +502,7 @@ impl Scheduler { let len = work_queues.len(); let start_index = self.rng.gen_range(0, len); for index in range(0, len).map(|i| (i + start_index) % len) { - match work_queues[index].steal() { + match work_queues.get_mut(index).steal() { deque::Data(task) => { rtdebug!("found task by stealing"); return Some(task) @@ -1137,7 +1137,7 @@ mod test { let mut pool = BufferPool::new(); let (normal_worker, normal_stealer) = pool.deque(); let (special_worker, special_stealer) = pool.deque(); - let queues = ~[normal_stealer, special_stealer]; + let queues = vec![normal_stealer, special_stealer]; let (_p, state) = TaskState::new(); // Our normal scheduler @@ -1326,7 +1326,7 @@ mod test { #[test] fn multithreading() { run(proc() { - let mut rxs = ~[]; + let mut rxs = vec![]; for _ in range(0, 10) { let (tx, rx) = channel(); spawn(proc() { diff --git a/src/libgreen/stack.rs b/src/libgreen/stack.rs index b8ab4d5f8c1f..1f06ba379f0f 100644 --- a/src/libgreen/stack.rs +++ b/src/libgreen/stack.rs @@ -126,13 +126,13 @@ impl Drop for Stack { pub struct StackPool { // Ideally this would be some datastructure that preserved ordering on // Stack.min_size. - stacks: ~[Stack], + stacks: Vec, } impl StackPool { pub fn new() -> StackPool { StackPool { - stacks: ~[], + stacks: vec![], } }