From 6875eb574802c4ea7da5b83bfc690bd1118be364 Mon Sep 17 00:00:00 2001 From: Steve Klabnik <steve@steveklabnik.com> Date: Tue, 16 Dec 2014 20:51:55 -0500 Subject: [PATCH] Improve Arc<T> documentation, and Rc<T> docs a bit Take the docs from Rc<T>, apply them to Arc<T>, and fix some line lengths. --- src/liballoc/arc.rs | 386 ++++++++++++++++++++++++++++++++++++-------- src/liballoc/rc.rs | 31 ++-- 2 files changed, 329 insertions(+), 88 deletions(-) diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 1f1909fd33c..4d2d545aec0 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -10,8 +10,61 @@ #![stable] -//! Concurrency-enabled mechanisms for sharing mutable and/or immutable state -//! between tasks. +//! Threadsafe reference-counted boxes (the `Arc<T>` type). +//! +//! The `Arc<T>` type provides shared ownership of an immutable value. Destruction is +//! deterministic, and will occur as soon as the last owner is gone. It is marked as `Send` because +//! it uses atomic reference counting. +//! +//! If you do not need thread-safety, and just need shared ownership, consider the [`Rc<T>` +//! type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but does not use atomics, making it +//! both thread-unsafe as well as significantly faster when updating the reference count. +//! +//! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer to the box. A +//! `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but will return `None` if the value +//! has already been dropped. +//! +//! For example, a tree with parent pointers can be represented by putting the nodes behind strong +//! `Arc<T>` pointers, and then storing the parent pointers as `Weak<T>` pointers. +//! +//! # Examples +//! +//! Sharing some immutable data between tasks: +//! +//! ``` +//! use std::sync::Arc; +//! +//! let five = Arc::new(5i); +//! +//! for i in range(0u, 10) { +//! let five = five.clone(); +//! +//! spawn(move || { +//! println!("{}", five); +//! }); +//! } +//! ``` +//! +//! Sharing mutable data safely between tasks with a `Mutex`: +//! +//! ``` +//! use std::sync::Arc; +//! use std::sync::Mutex; +//! +//! let five = Arc::new(Mutex::new(5i)); +//! +//! for _ in range(0u, 10) { +//! let five = five.clone(); +//! +//! spawn(move || { +//! let mut number = five.lock(); +//! +//! number += 1; +//! +//! println!("{}", *number); // prints 6 +//! }); +//! } +//! ``` use core::atomic; use core::clone::Clone; @@ -32,9 +85,8 @@ use heap::deallocate; /// /// # Example /// -/// In this example, a large vector of floats is shared between several tasks. -/// With simple pipes, without `Arc`, a copy would have to be made for each -/// task. +/// In this example, a large vector of floats is shared between several tasks. With simple pipes, +/// without `Arc`, a copy would have to be made for each task. /// /// ```rust /// use std::sync::Arc; @@ -64,8 +116,8 @@ pub struct Arc<T> { /// A weak pointer to an `Arc`. /// -/// Weak pointers will not keep the data inside of the `Arc` alive, and can be -/// used to break cycles between `Arc` pointers. +/// Weak pointers will not keep the data inside of the `Arc` alive, and can be used to break cycles +/// between `Arc` pointers. #[unsafe_no_drop_flag] #[experimental = "Weak pointers may not belong in this module."] pub struct Weak<T> { @@ -81,7 +133,15 @@ struct ArcInner<T> { } impl<T: Sync + Send> Arc<T> { - /// Creates an atomically reference counted wrapper. + /// Constructs a new `Arc<T>`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// ``` #[inline] #[stable] pub fn new(data: T) -> Arc<T> { @@ -95,11 +155,17 @@ impl<T: Sync + Send> Arc<T> { Arc { _ptr: unsafe { mem::transmute(x) } } } - /// Downgrades a strong pointer to a weak pointer. + /// Downgrades the `Arc<T>` to a `Weak<T>` reference. /// - /// Weak pointers will not keep the data alive. Once all strong references - /// to the underlying data have been dropped, the data itself will be - /// destroyed. + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// let weak_five = five.downgrade(); + /// ``` #[experimental = "Weak pointers may not belong in this module."] pub fn downgrade(&self) -> Weak<T> { // See the clone() impl for why this is relaxed @@ -111,11 +177,10 @@ impl<T: Sync + Send> Arc<T> { impl<T> Arc<T> { #[inline] fn inner(&self) -> &ArcInner<T> { - // This unsafety is ok because while this arc is alive we're guaranteed - // that the inner pointer is valid. Furthermore, we know that the - // `ArcInner` structure itself is `Sync` because the inner data is - // `Sync` as well, so we're ok loaning out an immutable pointer to - // these contents. + // This unsafety is ok because while this arc is alive we're guaranteed that the inner + // pointer is valid. Furthermore, we know that the `ArcInner` structure itself is `Sync` + // because the inner data is `Sync` as well, so we're ok loaning out an immutable pointer + // to these contents. unsafe { &*self._ptr } } } @@ -132,22 +197,28 @@ pub fn strong_count<T>(this: &Arc<T>) -> uint { this.inner().strong.load(atomic: #[unstable = "waiting on stability of Clone"] impl<T> Clone for Arc<T> { - /// Duplicate an atomically reference counted wrapper. + /// Makes a clone of the `Arc<T>`. /// - /// The resulting two `Arc` objects will point to the same underlying data - /// object. However, one of the `Arc` objects can be sent to another task, - /// allowing them to share the underlying data. + /// This increases the strong reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five.clone(); + /// ``` #[inline] fn clone(&self) -> Arc<T> { - // Using a relaxed ordering is alright here, as knowledge of the - // original reference prevents other threads from erroneously deleting - // the object. + // Using a relaxed ordering is alright here, as knowledge of the original reference + // prevents other threads from erroneously deleting the object. // - // As explained in the [Boost documentation][1], Increasing the - // reference counter can always be done with memory_order_relaxed: New - // references to an object can only be formed from an existing - // reference, and passing an existing reference from one thread to - // another must already provide any required synchronization. + // As explained in the [Boost documentation][1], Increasing the reference counter can + // always be done with memory_order_relaxed: New references to an object can only be formed + // from an existing reference, and passing an existing reference from one thread to another + // must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) self.inner().strong.fetch_add(1, atomic::Relaxed); @@ -164,26 +235,33 @@ impl<T> Deref<T> for Arc<T> { } impl<T: Send + Sync + Clone> Arc<T> { - /// Acquires a mutable pointer to the inner contents by guaranteeing that - /// the reference count is one (no sharing is possible). + /// Make a mutable reference from the given `Arc<T>`. /// - /// This is also referred to as a copy-on-write operation because the inner - /// data is cloned if the reference count is greater than one. + /// This is also referred to as a copy-on-write operation because the inner data is cloned if + /// the reference count is greater than one. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut five = Arc::new(5i); + /// + /// let mut_five = five.make_unique(); + /// ``` #[inline] #[experimental] pub fn make_unique(&mut self) -> &mut T { - // Note that we hold a strong reference, which also counts as - // a weak reference, so we only clone if there is an - // additional reference of either kind. + // Note that we hold a strong reference, which also counts as a weak reference, so we only + // clone if there is an additional reference of either kind. if self.inner().strong.load(atomic::SeqCst) != 1 || self.inner().weak.load(atomic::SeqCst) != 1 { *self = Arc::new((**self).clone()) } - // This unsafety is ok because we're guaranteed that the pointer - // returned is the *only* pointer that will ever be returned to T. Our - // reference count is guaranteed to be 1 at this point, and we required - // the Arc itself to be `mut`, so we're returning the only possible - // reference to the inner data. + // This unsafety is ok because we're guaranteed that the pointer returned is the *only* + // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at + // this point, and we required the Arc itself to be `mut`, so we're returning the only + // possible reference to the inner data. let inner = unsafe { &mut *self._ptr }; &mut inner.data } @@ -192,38 +270,59 @@ impl<T: Send + Sync + Clone> Arc<T> { #[unsafe_destructor] #[experimental = "waiting on stability of Drop"] impl<T: Sync + Send> Drop for Arc<T> { + /// Drops the `Arc<T>`. + /// + /// This will decrement the strong reference count. If the strong reference count becomes zero + /// and the only other references are `Weak<T>` ones, `drop`s the inner value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// { + /// let five = Arc::new(5i); + /// + /// // stuff + /// + /// drop(five); // explict drop + /// } + /// { + /// let five = Arc::new(5i); + /// + /// // stuff + /// + /// } // implicit drop + /// ``` fn drop(&mut self) { - // This structure has #[unsafe_no_drop_flag], so this drop glue may run - // more than once (but it is guaranteed to be zeroed after the first if - // it's run more than once) + // This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but + // it is guaranteed to be zeroed after the first if it's run more than once) if self._ptr.is_null() { return } - // Because `fetch_sub` is already atomic, we do not need to synchronize - // with other threads unless we are going to delete the object. This - // same logic applies to the below `fetch_sub` to the `weak` count. + // Because `fetch_sub` is already atomic, we do not need to synchronize with other threads + // unless we are going to delete the object. This same logic applies to the below + // `fetch_sub` to the `weak` count. if self.inner().strong.fetch_sub(1, atomic::Release) != 1 { return } - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the - // decreasing of the reference count synchronizes with this `Acquire` - // fence. This means that use of the data happens before decreasing - // the reference count, which happens before this fence, which - // happens before the deletion of the data. + // This fence is needed to prevent reordering of use of the data and deletion of the data. + // Because it is marked `Release`, the decreasing of the reference count synchronizes with + // this `Acquire` fence. This means that use of the data happens before decreasing the + // reference count, which happens before this fence, which happens before the deletion of + // the data. // // As explained in the [Boost documentation][1], // - // It is important to enforce any possible access to the object in - // one thread (through an existing reference) to *happen before* - // deleting the object in a different thread. This is achieved by a - // "release" operation after dropping a reference (any access to the - // object through this reference must obviously happened before), - // and an "acquire" operation before deleting the object. + // > It is important to enforce any possible access to the object in one thread (through an + // > existing reference) to *happen before* deleting the object in a different thread. This + // > is achieved by a "release" operation after dropping a reference (any access to the + // > object through this reference must obviously happened before), and an "acquire" + // > operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) atomic::fence(atomic::Acquire); - // Destroy the data at this time, even though we may not free the box - // allocation itself (there may still be weak pointers lying around). + // Destroy the data at this time, even though we may not free the box allocation itself + // (there may still be weak pointers lying around). unsafe { drop(ptr::read(&self.inner().data)); } if self.inner().weak.fetch_sub(1, atomic::Release) == 1 { @@ -236,14 +335,26 @@ impl<T: Sync + Send> Drop for Arc<T> { #[experimental = "Weak pointers may not belong in this module."] impl<T: Sync + Send> Weak<T> { - /// Attempts to upgrade this weak reference to a strong reference. + /// Upgrades a weak reference to a strong reference. /// - /// This method will not upgrade this reference if the strong reference count has already - /// reached 0, but if there are still other active strong references this function will return - /// a new strong reference to the data. + /// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible. + /// + /// Returns `None` if there were no strong references and the data was destroyed. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// let weak_five = five.downgrade(); + /// + /// let strong_five: Option<Arc<_>> = weak_five.upgrade(); + /// ``` pub fn upgrade(&self) -> Option<Arc<T>> { - // We use a CAS loop to increment the strong count instead of a - // fetch_add because once the count hits 0 is must never be above 0. + // We use a CAS loop to increment the strong count instead of a fetch_add because once the + // count hits 0 is must never be above 0. let inner = self.inner(); loop { let n = inner.strong.load(atomic::SeqCst); @@ -262,6 +373,19 @@ impl<T: Sync + Send> Weak<T> { #[experimental = "Weak pointers may not belong in this module."] impl<T: Sync + Send> Clone for Weak<T> { + /// Makes a clone of the `Weak<T>`. + /// + /// This increases the weak reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let weak_five = Arc::new(5i).downgrade(); + /// + /// weak_five.clone(); + /// ``` #[inline] fn clone(&self) -> Weak<T> { // See comments in Arc::clone() for why this is relaxed @@ -273,13 +397,37 @@ impl<T: Sync + Send> Clone for Weak<T> { #[unsafe_destructor] #[experimental = "Weak pointers may not belong in this module."] impl<T: Sync + Send> Drop for Weak<T> { + /// Drops the `Weak<T>`. + /// + /// This will decrement the weak reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// { + /// let five = Arc::new(5i); + /// let weak_five = five.downgrade(); + /// + /// // stuff + /// + /// drop(weak_five); // explict drop + /// } + /// { + /// let five = Arc::new(5i); + /// let weak_five = five.downgrade(); + /// + /// // stuff + /// + /// } // implicit drop + /// ``` fn drop(&mut self) { // see comments above for why this check is here if self._ptr.is_null() { return } - // If we find out that we were the last weak pointer, then its time to - // deallocate the data entirely. See the discussion in Arc::drop() about - // the memory orderings + // If we find out that we were the last weak pointer, then its time to deallocate the data + // entirely. See the discussion in Arc::drop() about the memory orderings if self.inner().weak.fetch_sub(1, atomic::Release) == 1 { atomic::fence(atomic::Acquire); unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(), @@ -290,18 +438,114 @@ impl<T: Sync + Send> Drop for Weak<T> { #[unstable = "waiting on PartialEq"] impl<T: PartialEq> PartialEq for Arc<T> { + /// Equality for two `Arc<T>`s. + /// + /// Two `Arc<T>`s are equal if their inner value are equal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five == Arc::new(5i); + /// ``` fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) } + + /// Inequality for two `Arc<T>`s. + /// + /// Two `Arc<T>`s are unequal if their inner value are unequal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five != Arc::new(5i); + /// ``` fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) } } #[unstable = "waiting on PartialOrd"] impl<T: PartialOrd> PartialOrd for Arc<T> { + /// Partial comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `partial_cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five.partial_cmp(&Arc::new(5i)); + /// ``` fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } + + /// Less-than comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `<` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five < Arc::new(5i); + /// ``` fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) } + + /// 'Less-than or equal to' comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `<=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five <= Arc::new(5i); + /// ``` fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) } - fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) } + + /// Greater-than comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `>` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five > Arc::new(5i); + /// ``` fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) } + + /// 'Greater-than or equal to' comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `>=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5i); + /// + /// five >= Arc::new(5i); + /// ``` + fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) } } #[unstable = "waiting on Ord"] impl<T: Ord> Ord for Arc<T> { diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 217c898e661..4e2fbc82b4a 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -167,12 +167,12 @@ struct RcBox<T> { /// An immutable reference-counted pointer type. /// -/// See the [module level documentation](../index.html) for more. +/// See the [module level documentation](../index.html) for more details. #[unsafe_no_drop_flag] #[stable] pub struct Rc<T> { - // FIXME #12808: strange names to try to avoid interfering with - // field accesses of the contained type via Deref + // FIXME #12808: strange names to try to avoid interfering with field accesses of the contained + // type via Deref _ptr: *mut RcBox<T>, _nosend: marker::NoSend, _noshare: marker::NoSync @@ -192,11 +192,9 @@ impl<T> Rc<T> { pub fn new(value: T) -> Rc<T> { unsafe { Rc { - // there is an implicit weak pointer owned by all the - // strong pointers, which ensures that the weak - // destructor never frees the allocation while the - // strong destructor is running, even if the weak - // pointer is stored inside the strong one. + // there is an implicit weak pointer owned by all the strong pointers, which + // ensures that the weak destructor never frees the allocation while the strong + // destructor is running, even if the weak pointer is stored inside the strong one. _ptr: transmute(box RcBox { value: value, strong: Cell::new(1), @@ -340,11 +338,10 @@ impl<T: Clone> Rc<T> { if !is_unique(self) { *self = Rc::new((**self).clone()) } - // This unsafety is ok because we're guaranteed that the pointer - // returned is the *only* pointer that will ever be returned to T. Our - // reference count is guaranteed to be 1 at this point, and we required - // the `Rc<T>` itself to be `mut`, so we're returning the only possible - // reference to the inner value. + // This unsafety is ok because we're guaranteed that the pointer returned is the *only* + // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at + // this point, and we required the `Rc<T>` itself to be `mut`, so we're returning the only + // possible reference to the inner value. let inner = unsafe { &mut *self._ptr }; &mut inner.value } @@ -398,8 +395,8 @@ impl<T> Drop for Rc<T> { if self.strong() == 0 { ptr::read(&**self); // destroy the contained object - // remove the implicit "strong weak" pointer now - // that we've destroyed the contents. + // remove the implicit "strong weak" pointer now that we've destroyed the + // contents. self.dec_weak(); if self.weak() == 0 { @@ -677,8 +674,8 @@ impl<T> Drop for Weak<T> { unsafe { if !self._ptr.is_null() { self.dec_weak(); - // the weak count starts at 1, and will only go to - // zero if all the strong pointers have disappeared. + // the weak count starts at 1, and will only go to zero if all the strong pointers + // have disappeared. if self.weak() == 0 { deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(), min_align_of::<RcBox<T>>())