// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![stable(feature = "rust1", since = "1.0.0")] //! Threadsafe reference-counted boxes (the `Arc` type). //! //! The `Arc` type provides shared ownership of an immutable value. //! Destruction is deterministic, and will occur as soon as the last owner is //! gone. It is marked as `Send` because it uses atomic reference counting. //! //! If you do not need thread-safety, and just need shared ownership, consider //! the [`Rc` type](../rc/struct.Rc.html). It is the same as `Arc`, but //! does not use atomics, making it both thread-unsafe as well as significantly //! faster when updating the reference count. //! //! The `downgrade` method can be used to create a non-owning `Weak` pointer //! to the box. A `Weak` pointer can be upgraded to an `Arc` pointer, but //! will return `None` if the value has already been dropped. //! //! For example, a tree with parent pointers can be represented by putting the //! nodes behind strong `Arc` pointers, and then storing the parent pointers //! as `Weak` pointers. //! //! # Examples //! //! Sharing some immutable data between threads: //! //! ```no_run //! use std::sync::Arc; //! use std::thread; //! //! let five = Arc::new(5); //! //! for _ in 0..10 { //! let five = five.clone(); //! //! thread::spawn(move || { //! println!("{:?}", five); //! }); //! } //! ``` //! //! Sharing mutable data safely between threads with a `Mutex`: //! //! ```no_run //! use std::sync::{Arc, Mutex}; //! use std::thread; //! //! let five = Arc::new(Mutex::new(5)); //! //! for _ in 0..10 { //! let five = five.clone(); //! //! thread::spawn(move || { //! let mut number = five.lock().unwrap(); //! //! *number += 1; //! //! println!("{}", *number); // prints 6 //! }); //! } //! ``` use boxed::Box; use core::atomic; use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; use core::fmt; use core::cmp::Ordering; use core::mem::{align_of_val, size_of_val}; use core::intrinsics::{drop_in_place, abort}; use core::mem; use core::nonzero::NonZero; use core::ops::{Deref, CoerceUnsized}; use core::ptr; use core::marker::Unsize; use core::hash::{Hash, Hasher}; use core::{usize, isize}; use heap::deallocate; const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// An atomically reference counted wrapper for shared state. /// /// # Examples /// /// In this example, a large vector of floats is shared between several threads. /// With simple pipes, without `Arc`, a copy would have to be made for each /// thread. /// /// When you clone an `Arc`, it will create another pointer to the data and /// increase the reference counter. /// /// ``` /// use std::sync::Arc; /// use std::thread; /// /// fn main() { /// let numbers: Vec<_> = (0..100u32).collect(); /// let shared_numbers = Arc::new(numbers); /// /// for _ in 0..10 { /// let child_numbers = shared_numbers.clone(); /// /// thread::spawn(move || { /// let local_numbers = &child_numbers[..]; /// /// // Work with the local numbers /// }); /// } /// } /// ``` #[unsafe_no_drop_flag] #[stable(feature = "rust1", since = "1.0.0")] pub struct Arc { // FIXME #12808: strange name to try to avoid interfering with // field accesses of the contained type via Deref _ptr: NonZero<*mut ArcInner>, } unsafe impl Send for Arc { } unsafe impl Sync for Arc { } impl, U: ?Sized> CoerceUnsized> for Arc {} /// A weak pointer to an `Arc`. /// /// Weak pointers will not keep the data inside of the `Arc` alive, and can be /// used to break cycles between `Arc` pointers. #[unsafe_no_drop_flag] #[unstable(feature = "arc_weak", reason = "Weak pointers may not belong in this module.", issue = "27718")] pub struct Weak { // FIXME #12808: strange name to try to avoid interfering with // field accesses of the contained type via Deref _ptr: NonZero<*mut ArcInner>, } unsafe impl Send for Weak { } unsafe impl Sync for Weak { } impl, U: ?Sized> CoerceUnsized> for Weak {} #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Weak { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "(Weak)") } } struct ArcInner { strong: atomic::AtomicUsize, // the value usize::MAX acts as a sentinel for temporarily "locking" the // ability to upgrade weak pointers or downgrade strong ones; this is used // to avoid races in `make_unique` and `get_mut`. weak: atomic::AtomicUsize, data: T, } unsafe impl Send for ArcInner {} unsafe impl Sync for ArcInner {} impl Arc { /// Constructs a new `Arc`. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn new(data: T) -> Arc { // Start the weak pointer count as 1 which is the weak pointer that's // held by all the strong pointers (kinda), see std/rc.rs for more info let x: Box<_> = box ArcInner { strong: atomic::AtomicUsize::new(1), weak: atomic::AtomicUsize::new(1), data: data, }; Arc { _ptr: unsafe { NonZero::new(Box::into_raw(x)) } } } } impl Arc { /// Downgrades the `Arc` to a `Weak` reference. /// /// # Examples /// /// ``` /// #![feature(arc_weak)] /// /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// let weak_five = five.downgrade(); /// ``` #[unstable(feature = "arc_weak", reason = "Weak pointers may not belong in this module.", issue = "27718")] pub fn downgrade(&self) -> Weak { loop { // This Relaxed is OK because we're checking the value in the CAS // below. let cur = self.inner().weak.load(Relaxed); // check if the weak counter is currently "locked"; if so, spin. if cur == usize::MAX { continue } // NOTE: this code currently ignores the possibility of overflow // into usize::MAX; in general both Rc and Arc need to be adjusted // to deal with overflow. // Unlike with Clone(), we need this to be an Acquire read to // synchronize with the write coming from `is_unique`, so that the // events prior to that write happen before this read. if self.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur { return Weak { _ptr: self._ptr } } } } /// Get the number of weak references to this value. #[inline] #[unstable(feature = "arc_counts", issue = "27718")] pub fn weak_count(this: &Arc) -> usize { this.inner().weak.load(SeqCst) - 1 } /// Get the number of strong references to this value. #[inline] #[unstable(feature = "arc_counts", issue = "27718")] pub fn strong_count(this: &Arc) -> usize { this.inner().strong.load(SeqCst) } #[inline] fn inner(&self) -> &ArcInner { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { &**self._ptr } } // Non-inlined part of `drop`. #[inline(never)] unsafe fn drop_slow(&mut self) { let ptr = *self._ptr; // Destroy the data at this time, even though we may not free the box // allocation itself (there may still be weak pointers lying around). drop_in_place(&mut (*ptr).data); if self.inner().weak.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Arc { /// Makes a clone of the `Arc`. /// /// This increases the strong reference count. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five.clone(); /// ``` #[inline] fn clone(&self) -> Arc { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().strong.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { unsafe { abort(); } } Arc { _ptr: self._ptr } } } #[stable(feature = "rust1", since = "1.0.0")] impl Deref for Arc { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } impl Arc { /// Make a mutable reference from the given `Arc`. /// /// This is also referred to as a copy-on-write operation because the inner /// data is cloned if the (strong) reference count is greater than one. If /// we hold the only strong reference, any existing weak references will no /// longer be upgradeable. /// /// # Examples /// /// ``` /// #![feature(arc_unique)] /// /// use std::sync::Arc; /// /// let mut five = Arc::new(5); /// /// let mut_five = Arc::make_unique(&mut five); /// ``` #[inline] #[unstable(feature = "arc_unique", issue = "27718")] pub fn make_unique(this: &mut Arc) -> &mut T { // Note that we hold both a strong reference and a weak reference. // Thus, releasing our strong reference only will not, by itself, cause // the memory to be deallocated. // // Use Acquire to ensure that we see any writes to `weak` that happen // before release writes (i.e., decrements) to `strong`. Since we hold a // weak count, there's no chance the ArcInner itself could be // deallocated. if this.inner().strong.compare_and_swap(1, 0, Acquire) != 1 { // Another srong pointer exists; clone *this = Arc::new((**this).clone()); } else if this.inner().weak.load(Relaxed) != 1 { // Relaxed suffices in the above because this is fundamentally an // optimization: we are always racing with weak pointers being // dropped. Worst case, we end up allocated a new Arc unnecessarily. // We removed the last strong ref, but there are additional weak // refs remaining. We'll move the contents to a new Arc, and // invalidate the other weak refs. // Note that it is not possible for the read of `weak` to yield // usize::MAX (i.e., locked), since the weak count can only be // locked by a thread with a strong reference. // Materialize our own implicit weak pointer, so that it can clean // up the ArcInner as needed. let weak = Weak { _ptr: this._ptr }; // mark the data itself as already deallocated unsafe { // there is no data race in the implicit write caused by `read` // here (due to zeroing) because data is no longer accessed by // other threads (due to there being no more strong refs at this // point). let mut swap = Arc::new(ptr::read(&(**weak._ptr).data)); mem::swap(this, &mut swap); mem::forget(swap); } } else { // We were the sole reference of either kind; bump back up the // strong ref count. this.inner().strong.store(1, Release); } // As with `get_mut()`, the unsafety is ok because our reference was // either unique to begin with, or became one upon cloning the contents. unsafe { let inner = &mut **this._ptr; &mut inner.data } } } impl Arc { /// Returns a mutable reference to the contained value if the `Arc` is unique. /// /// Returns `None` if the `Arc` is not unique. /// /// # Examples /// /// ``` /// #![feature(arc_unique, alloc)] /// /// extern crate alloc; /// # fn main() { /// use alloc::arc::Arc; /// /// let mut x = Arc::new(3); /// *Arc::get_mut(&mut x).unwrap() = 4; /// assert_eq!(*x, 4); /// /// let _y = x.clone(); /// assert!(Arc::get_mut(&mut x).is_none()); /// # } /// ``` #[inline] #[unstable(feature = "arc_unique", issue = "27718")] pub fn get_mut(this: &mut Arc) -> Option<&mut T> { if this.is_unique() { // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. unsafe { let inner = &mut **this._ptr; Some(&mut inner.data) } } else { None } } /// Determine whether this is the unique reference (including weak refs) to /// the underlying data. /// /// Note that this requires locking the weak ref count. fn is_unique(&mut self) -> bool { // lock the weak pointer count if we appear to be the sole weak pointer // holder. // // The acquire label here ensures a happens-before relationship with any // writes to `strong` prior to decrements of the `weak` count (via drop, // which uses Release). if self.inner().weak.compare_and_swap(1, usize::MAX, Acquire) == 1 { // Due to the previous acquire read, this will observe any writes to // `strong` that were due to upgrading weak pointers; only strong // clones remain, which require that the strong count is > 1 anyway. let unique = self.inner().strong.load(Relaxed) == 1; // The release write here synchronizes with a read in `downgrade`, // effectively preventing the above read of `strong` from happening // after the write. self.inner().weak.store(1, Release); // release the lock unique } else { false } } } #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Arc { /// Drops the `Arc`. /// /// This will decrement the strong reference count. If the strong reference /// count becomes zero and the only other references are `Weak` ones, /// `drop`s the inner value. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// { /// let five = Arc::new(5); /// /// // stuff /// /// drop(five); // explicit drop /// } /// { /// let five = Arc::new(5); /// /// // stuff /// /// } // implicit drop /// ``` #[inline] fn drop(&mut self) { // This structure has #[unsafe_no_drop_flag], so this drop glue may run // more than once (but it is guaranteed to be zeroed after the first if // it's run more than once) let ptr = *self._ptr; // if ptr.is_null() { return } if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE { return } // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This // same logic applies to the below `fetch_sub` to the `weak` count. if self.inner().strong.fetch_sub(1, Release) != 1 { return } // This fence is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` fence. This // means that use of the data happens before decreasing the reference // count, which happens before this fence, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) atomic::fence(Acquire); unsafe { self.drop_slow() } } } #[unstable(feature = "arc_weak", reason = "Weak pointers may not belong in this module.", issue = "27718")] impl Weak { /// Upgrades a weak reference to a strong reference. /// /// Upgrades the `Weak` reference to an `Arc`, if possible. /// /// Returns `None` if there were no strong references and the data was /// destroyed. /// /// # Examples /// /// ``` /// #![feature(arc_weak)] /// /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// let weak_five = five.downgrade(); /// /// let strong_five: Option> = weak_five.upgrade(); /// ``` pub fn upgrade(&self) -> Option> { // We use a CAS loop to increment the strong count instead of a // fetch_add because once the count hits 0 it must never be above 0. let inner = self.inner(); loop { // Relaxed load because any write of 0 that we can observe // leaves the field in a permanently zero state (so a // "stale" read of 0 is fine), and any other value is // confirmed via the CAS below. let n = inner.strong.load(Relaxed); if n == 0 { return None } // Relaxed is valid for the same reason it is on Arc's Clone impl let old = inner.strong.compare_and_swap(n, n + 1, Relaxed); if old == n { return Some(Arc { _ptr: self._ptr }) } } } #[inline] fn inner(&self) -> &ArcInner { // See comments above for why this is "safe" unsafe { &**self._ptr } } } #[unstable(feature = "arc_weak", reason = "Weak pointers may not belong in this module.", issue = "27718")] impl Clone for Weak { /// Makes a clone of the `Weak`. /// /// This increases the weak reference count. /// /// # Examples /// /// ``` /// #![feature(arc_weak)] /// /// use std::sync::Arc; /// /// let weak_five = Arc::new(5).downgrade(); /// /// weak_five.clone(); /// ``` #[inline] fn clone(&self) -> Weak { // See comments in Arc::clone() for why this is relaxed. This can use a // fetch_add (ignoring the lock) because the weak count is only locked // where are *no other* weak pointers in existence. (So we can't be // running this code in that case). let old_size = self.inner().weak.fetch_add(1, Relaxed); // See comments in Arc::clone() for why we do this (for mem::forget). if old_size > MAX_REFCOUNT { unsafe { abort(); } } return Weak { _ptr: self._ptr } } } #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Weak { /// Drops the `Weak`. /// /// This will decrement the weak reference count. /// /// # Examples /// /// ``` /// #![feature(arc_weak)] /// /// use std::sync::Arc; /// /// { /// let five = Arc::new(5); /// let weak_five = five.downgrade(); /// /// // stuff /// /// drop(weak_five); // explicit drop /// } /// { /// let five = Arc::new(5); /// let weak_five = five.downgrade(); /// /// // stuff /// /// } // implicit drop /// ``` fn drop(&mut self) { let ptr = *self._ptr; // see comments above for why this check is here if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE { return } // If we find out that we were the last weak pointer, then its time to // deallocate the data entirely. See the discussion in Arc::drop() about // the memory orderings // // It's not necessary to check for the locked state here, because the // weak count can only be locked if there was precisely one weak ref, // meaning that drop could only subsequently run ON that remaining weak // ref, which can only happen after the lock is released. if self.inner().weak.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); unsafe { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } } #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for Arc { /// Equality for two `Arc`s. /// /// Two `Arc`s are equal if their inner value are equal. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five == Arc::new(5); /// ``` fn eq(&self, other: &Arc) -> bool { *(*self) == *(*other) } /// Inequality for two `Arc`s. /// /// Two `Arc`s are unequal if their inner value are unequal. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five != Arc::new(5); /// ``` fn ne(&self, other: &Arc) -> bool { *(*self) != *(*other) } } #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for Arc { /// Partial comparison for two `Arc`s. /// /// The two are compared by calling `partial_cmp()` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five.partial_cmp(&Arc::new(5)); /// ``` fn partial_cmp(&self, other: &Arc) -> Option { (**self).partial_cmp(&**other) } /// Less-than comparison for two `Arc`s. /// /// The two are compared by calling `<` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five < Arc::new(5); /// ``` fn lt(&self, other: &Arc) -> bool { *(*self) < *(*other) } /// 'Less-than or equal to' comparison for two `Arc`s. /// /// The two are compared by calling `<=` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five <= Arc::new(5); /// ``` fn le(&self, other: &Arc) -> bool { *(*self) <= *(*other) } /// Greater-than comparison for two `Arc`s. /// /// The two are compared by calling `>` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five > Arc::new(5); /// ``` fn gt(&self, other: &Arc) -> bool { *(*self) > *(*other) } /// 'Greater-than or equal to' comparison for two `Arc`s. /// /// The two are compared by calling `>=` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// five >= Arc::new(5); /// ``` fn ge(&self, other: &Arc) -> bool { *(*self) >= *(*other) } } #[stable(feature = "rust1", since = "1.0.0")] impl Ord for Arc { fn cmp(&self, other: &Arc) -> Ordering { (**self).cmp(&**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl Eq for Arc {} #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for Arc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Arc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Pointer for Arc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&*self._ptr, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl Default for Arc { #[stable(feature = "rust1", since = "1.0.0")] fn default() -> Arc { Arc::new(Default::default()) } } #[stable(feature = "rust1", since = "1.0.0")] impl Hash for Arc { fn hash(&self, state: &mut H) { (**self).hash(state) } } #[cfg(test)] mod tests { use std::clone::Clone; use std::sync::mpsc::channel; use std::mem::drop; use std::ops::Drop; use std::option::Option; use std::option::Option::{Some, None}; use std::sync::atomic; use std::sync::atomic::Ordering::{Acquire, SeqCst}; use std::thread; use std::vec::Vec; use super::{Arc, Weak}; use std::sync::Mutex; struct Canary(*mut atomic::AtomicUsize); impl Drop for Canary { fn drop(&mut self) { unsafe { match *self { Canary(c) => { (*c).fetch_add(1, SeqCst); } } } } } #[test] fn manually_share_arc() { let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); let arc_v = Arc::new(v); let (tx, rx) = channel(); let _t = thread::spawn(move || { let arc_v: Arc> = rx.recv().unwrap(); assert_eq!((*arc_v)[3], 4); }); tx.send(arc_v.clone()).unwrap(); assert_eq!((*arc_v)[2], 3); assert_eq!((*arc_v)[4], 5); } #[test] fn test_arc_get_mut() { let mut x = Arc::new(3); *Arc::get_mut(&mut x).unwrap() = 4; assert_eq!(*x, 4); let y = x.clone(); assert!(Arc::get_mut(&mut x).is_none()); drop(y); assert!(Arc::get_mut(&mut x).is_some()); let _w = x.downgrade(); assert!(Arc::get_mut(&mut x).is_none()); } #[test] fn test_cowarc_clone_make_unique() { let mut cow0 = Arc::new(75); let mut cow1 = cow0.clone(); let mut cow2 = cow1.clone(); assert!(75 == *Arc::make_unique(&mut cow0)); assert!(75 == *Arc::make_unique(&mut cow1)); assert!(75 == *Arc::make_unique(&mut cow2)); *Arc::make_unique(&mut cow0) += 1; *Arc::make_unique(&mut cow1) += 2; *Arc::make_unique(&mut cow2) += 3; assert!(76 == *cow0); assert!(77 == *cow1); assert!(78 == *cow2); // none should point to the same backing memory assert!(*cow0 != *cow1); assert!(*cow0 != *cow2); assert!(*cow1 != *cow2); } #[test] fn test_cowarc_clone_unique2() { let mut cow0 = Arc::new(75); let cow1 = cow0.clone(); let cow2 = cow1.clone(); assert!(75 == *cow0); assert!(75 == *cow1); assert!(75 == *cow2); *Arc::make_unique(&mut cow0) += 1; assert!(76 == *cow0); assert!(75 == *cow1); assert!(75 == *cow2); // cow1 and cow2 should share the same contents // cow0 should have a unique reference assert!(*cow0 != *cow1); assert!(*cow0 != *cow2); assert!(*cow1 == *cow2); } #[test] fn test_cowarc_clone_weak() { let mut cow0 = Arc::new(75); let cow1_weak = cow0.downgrade(); assert!(75 == *cow0); assert!(75 == *cow1_weak.upgrade().unwrap()); *Arc::make_unique(&mut cow0) += 1; assert!(76 == *cow0); assert!(cow1_weak.upgrade().is_none()); } #[test] fn test_live() { let x = Arc::new(5); let y = x.downgrade(); assert!(y.upgrade().is_some()); } #[test] fn test_dead() { let x = Arc::new(5); let y = x.downgrade(); drop(x); assert!(y.upgrade().is_none()); } #[test] fn weak_self_cyclic() { struct Cycle { x: Mutex>> } let a = Arc::new(Cycle { x: Mutex::new(None) }); let b = a.clone().downgrade(); *a.x.lock().unwrap() = Some(b); // hopefully we don't double-free (or leak)... } #[test] fn drop_arc() { let mut canary = atomic::AtomicUsize::new(0); let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); drop(x); assert!(canary.load(Acquire) == 1); } #[test] fn drop_arc_weak() { let mut canary = atomic::AtomicUsize::new(0); let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); let arc_weak = arc.downgrade(); assert!(canary.load(Acquire) == 0); drop(arc); assert!(canary.load(Acquire) == 1); drop(arc_weak); } #[test] fn test_strong_count() { let a = Arc::new(0u32); assert!(Arc::strong_count(&a) == 1); let w = a.downgrade(); assert!(Arc::strong_count(&a) == 1); let b = w.upgrade().expect(""); assert!(Arc::strong_count(&b) == 2); assert!(Arc::strong_count(&a) == 2); drop(w); drop(a); assert!(Arc::strong_count(&b) == 1); let c = b.clone(); assert!(Arc::strong_count(&b) == 2); assert!(Arc::strong_count(&c) == 2); } #[test] fn test_weak_count() { let a = Arc::new(0u32); assert!(Arc::strong_count(&a) == 1); assert!(Arc::weak_count(&a) == 0); let w = a.downgrade(); assert!(Arc::strong_count(&a) == 1); assert!(Arc::weak_count(&a) == 1); let x = w.clone(); assert!(Arc::weak_count(&a) == 2); drop(w); drop(x); assert!(Arc::strong_count(&a) == 1); assert!(Arc::weak_count(&a) == 0); let c = a.clone(); assert!(Arc::strong_count(&a) == 2); assert!(Arc::weak_count(&a) == 0); let d = c.downgrade(); assert!(Arc::weak_count(&c) == 1); assert!(Arc::strong_count(&c) == 2); drop(a); drop(c); drop(d); } #[test] fn show_arc() { let a = Arc::new(5u32); assert_eq!(format!("{:?}", a), "5"); } // Make sure deriving works with Arc #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)] struct Foo { inner: Arc } #[test] fn test_unsized() { let x: Arc<[i32]> = Arc::new([1, 2, 3]); assert_eq!(format!("{:?}", x), "[1, 2, 3]"); let y = x.clone().downgrade(); drop(x); assert!(y.upgrade().is_none()); } }