diff --git a/src/libstd/sync/deque.rs b/src/libstd/sync/deque.rs index 30b95ffb34f..42a8bd88652 100644 --- a/src/libstd/sync/deque.rs +++ b/src/libstd/sync/deque.rs @@ -48,6 +48,8 @@ // FIXME: all atomic operations in this module use a SeqCst ordering. That is // probably overkill +use alloc::arc::Arc; + use clone::Clone; use iter::{range, Iterator}; use kinds::Send; @@ -58,7 +60,6 @@ use owned::Box; use ptr::RawPtr; use ptr; use slice::ImmutableVector; -use sync::arc::UnsafeArc; use sync::atomics::{AtomicInt, AtomicPtr, SeqCst}; use unstable::sync::Exclusive; use rt::heap::{allocate, deallocate}; @@ -87,14 +88,14 @@ struct Deque { /// /// There may only be one worker per deque. pub struct Worker { - deque: UnsafeArc>, + deque: Arc>, } /// The stealing half of the work-stealing deque. Stealers have access to the /// opposite end of the deque from the worker, and they only have access to the /// `steal` method. pub struct Stealer { - deque: UnsafeArc>, + deque: Arc>, } /// When stealing some data, this is an enumeration of the possible outcomes. @@ -149,12 +150,13 @@ impl BufferPool { /// Allocates a new work-stealing deque which will send/receiving memory to /// and from this buffer pool. - pub fn deque(&mut self) -> (Worker, Stealer) { - let (a, b) = UnsafeArc::new2(Deque::new(self.clone())); + pub fn deque(&self) -> (Worker, Stealer) { + let a = Arc::new(Deque::new(self.clone())); + let b = a.clone(); (Worker { deque: a }, Stealer { deque: b }) } - fn alloc(&mut self, bits: int) -> Box> { + fn alloc(&self, bits: int) -> Box> { unsafe { self.pool.with(|pool| { match pool.iter().position(|x| x.size() >= (1 << bits)) { @@ -165,7 +167,7 @@ impl BufferPool { } } - fn free(&mut self, buf: Box>) { + fn free(&self, buf: Box>) { unsafe { let mut buf = Some(buf); self.pool.with(|pool| { @@ -185,34 +187,34 @@ impl Clone for BufferPool { impl Worker { /// Pushes data onto the front of this work queue. - pub fn push(&mut self, t: T) { - unsafe { (*self.deque.get()).push(t) } + pub fn push(&self, t: T) { + unsafe { self.deque.push(t) } } /// Pops data off the front of the work queue, returning `None` on an empty /// queue. - pub fn pop(&mut self) -> Option { - unsafe { (*self.deque.get()).pop() } + pub fn pop(&self) -> Option { + unsafe { self.deque.pop() } } /// Gets access to the buffer pool that this worker is attached to. This can /// be used to create more deques which share the same buffer pool as this /// deque. - pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool { - unsafe { &mut (*self.deque.get()).pool } + pub fn pool<'a>(&'a self) -> &'a BufferPool { + &self.deque.pool } } impl Stealer { /// Steals work off the end of the queue (opposite of the worker's end) - pub fn steal(&mut self) -> Stolen { - unsafe { (*self.deque.get()).steal() } + pub fn steal(&self) -> Stolen { + unsafe { self.deque.steal() } } /// Gets access to the buffer pool that this stealer is attached to. This /// can be used to create more deques which share the same buffer pool as /// this deque. - pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool { - unsafe { &mut (*self.deque.get()).pool } + pub fn pool<'a>(&'a self) -> &'a BufferPool { + &self.deque.pool } } @@ -224,7 +226,7 @@ impl Clone for Stealer { // personally going to heavily comment what's going on here. impl Deque { - fn new(mut pool: BufferPool) -> Deque { + fn new(pool: BufferPool) -> Deque { let buf = pool.alloc(MIN_BITS); Deque { bottom: AtomicInt::new(0), @@ -234,7 +236,7 @@ impl Deque { } } - unsafe fn push(&mut self, data: T) { + unsafe fn push(&self, data: T) { let mut b = self.bottom.load(SeqCst); let t = self.top.load(SeqCst); let mut a = self.array.load(SeqCst); @@ -250,7 +252,7 @@ impl Deque { self.bottom.store(b + 1, SeqCst); } - unsafe fn pop(&mut self) -> Option { + unsafe fn pop(&self) -> Option { let b = self.bottom.load(SeqCst); let a = self.array.load(SeqCst); let b = b - 1; @@ -276,7 +278,7 @@ impl Deque { } } - unsafe fn steal(&mut self) -> Stolen { + unsafe fn steal(&self) -> Stolen { let t = self.top.load(SeqCst); let old = self.array.load(SeqCst); let b = self.bottom.load(SeqCst); @@ -298,7 +300,7 @@ impl Deque { } } - unsafe fn maybe_shrink(&mut self, b: int, t: int) { + unsafe fn maybe_shrink(&self, b: int, t: int) { let a = self.array.load(SeqCst); if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) { self.swap_buffer(b, a, (*a).resize(b, t, -1)); @@ -312,7 +314,7 @@ impl Deque { // after this method has called 'free' on it. The continued usage is simply // a read followed by a forget, but we must make sure that the memory can // continue to be read after we flag this buffer for reclamation. - unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer, + unsafe fn swap_buffer(&self, b: int, old: *mut Buffer, buf: Buffer) -> *mut Buffer { let newbuf: *mut Buffer = transmute(box buf); self.array.store(newbuf, SeqCst); @@ -373,7 +375,7 @@ impl Buffer { // Unsafe because this unsafely overwrites possibly uninitialized or // initialized data. - unsafe fn put(&mut self, i: int, t: T) { + unsafe fn put(&self, i: int, t: T) { let ptr = self.storage.offset(i & self.mask()); ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1); forget(t); @@ -382,7 +384,7 @@ impl Buffer { // Again, unsafe because this has incredibly dubious ownership violations. // It is assumed that this buffer is immediately dropped. unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer { - let mut buf = Buffer::new(self.log_size + delta); + let buf = Buffer::new(self.log_size + delta); for i in range(t, b) { buf.put(i, self.get(i)); }