2014-01-28 20:05:57 -06:00
|
|
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 18:48:01 -06:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2013-05-20 05:07:14 -05:00
|
|
|
/*!
|
2012-08-10 17:20:03 -05:00
|
|
|
* Concurrency-enabled mechanisms for sharing mutable and/or immutable state
|
|
|
|
* between tasks.
|
|
|
|
*/
|
|
|
|
|
core: Remove the cast module
This commit revisits the `cast` module in libcore and libstd, and scrutinizes
all functions inside of it. The result was to remove the `cast` module entirely,
folding all functionality into the `mem` module. Specifically, this is the fate
of each function in the `cast` module.
* transmute - This function was moved to `mem`, but it is now marked as
#[unstable]. This is due to planned changes to the `transmute`
function and how it can be invoked (see the #[unstable] comment).
For more information, see RFC 5 and #12898
* transmute_copy - This function was moved to `mem`, with clarification that is
is not an error to invoke it with T/U that are different
sizes, but rather that it is strongly discouraged. This
function is now #[stable]
* forget - This function was moved to `mem` and marked #[stable]
* bump_box_refcount - This function was removed due to the deprecation of
managed boxes as well as its questionable utility.
* transmute_mut - This function was previously deprecated, and removed as part
of this commit.
* transmute_mut_unsafe - This function doesn't serve much of a purpose when it
can be achieved with an `as` in safe code, so it was
removed.
* transmute_lifetime - This function was removed because it is likely a strong
indication that code is incorrect in the first place.
* transmute_mut_lifetime - This function was removed for the same reasons as
`transmute_lifetime`
* copy_lifetime - This function was moved to `mem`, but it is marked
`#[unstable]` now due to the likelihood of being removed in
the future if it is found to not be very useful.
* copy_mut_lifetime - This function was also moved to `mem`, but had the same
treatment as `copy_lifetime`.
* copy_lifetime_vec - This function was removed because it is not used today,
and its existence is not necessary with DST
(copy_lifetime will suffice).
In summary, the cast module was stripped down to these functions, and then the
functions were moved to the `mem` module.
transmute - #[unstable]
transmute_copy - #[stable]
forget - #[stable]
copy_lifetime - #[unstable]
copy_mut_lifetime - #[unstable]
[breaking-change]
2014-05-09 12:34:51 -05:00
|
|
|
use std::mem;
|
2014-03-22 02:53:58 -05:00
|
|
|
use std::ptr;
|
2014-05-06 21:03:14 -05:00
|
|
|
use std::rt::heap::exchange_free;
|
2014-03-22 02:53:58 -05:00
|
|
|
use std::sync::atomics;
|
2014-05-06 21:03:14 -05:00
|
|
|
use std::mem::{min_align_of, size_of};
|
2014-03-22 02:53:58 -05:00
|
|
|
|
|
|
|
/// An atomically reference counted wrapper for shared state.
|
|
|
|
///
|
|
|
|
/// # Example
|
|
|
|
///
|
|
|
|
/// In this example, a large vector of floats is shared between several tasks.
|
|
|
|
/// With simple pipes, without `Arc`, a copy would have to be made for each
|
|
|
|
/// task.
|
|
|
|
///
|
|
|
|
/// ```rust
|
|
|
|
/// use sync::Arc;
|
|
|
|
///
|
|
|
|
/// fn main() {
|
|
|
|
/// let numbers = Vec::from_fn(100, |i| i as f32);
|
|
|
|
/// let shared_numbers = Arc::new(numbers);
|
|
|
|
///
|
|
|
|
/// for _ in range(0, 10) {
|
|
|
|
/// let child_numbers = shared_numbers.clone();
|
|
|
|
///
|
|
|
|
/// spawn(proc() {
|
|
|
|
/// let local_numbers = child_numbers.as_slice();
|
|
|
|
///
|
|
|
|
/// // Work with the local numbers
|
|
|
|
/// });
|
|
|
|
/// }
|
|
|
|
/// }
|
|
|
|
/// ```
|
|
|
|
#[unsafe_no_drop_flag]
|
|
|
|
pub struct Arc<T> {
|
2014-03-27 17:10:45 -05:00
|
|
|
x: *mut ArcInner<T>,
|
2013-02-26 13:34:00 -06:00
|
|
|
}
|
2012-08-13 18:45:17 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
/// A weak pointer to an `Arc`.
|
|
|
|
///
|
|
|
|
/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
|
|
|
|
/// used to break cycles between `Arc` pointers.
|
|
|
|
#[unsafe_no_drop_flag]
|
|
|
|
pub struct Weak<T> {
|
2014-03-27 17:10:45 -05:00
|
|
|
x: *mut ArcInner<T>,
|
2012-08-13 18:45:17 -05:00
|
|
|
}
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
struct ArcInner<T> {
|
|
|
|
strong: atomics::AtomicUint,
|
|
|
|
weak: atomics::AtomicUint,
|
|
|
|
data: T,
|
|
|
|
}
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-03-07 11:57:35 -06:00
|
|
|
impl<T: Share + Send> Arc<T> {
|
2013-07-22 15:57:40 -05:00
|
|
|
/// Create an atomically reference counted wrapper.
|
2013-10-24 15:50:21 -05:00
|
|
|
#[inline]
|
2013-07-22 15:57:40 -05:00
|
|
|
pub fn new(data: T) -> Arc<T> {
|
2014-03-22 02:53:58 -05:00
|
|
|
// Start the weak pointer count as 1 which is the weak pointer that's
|
|
|
|
// held by all the strong pointers (kinda), see std/rc.rs for more info
|
2014-04-25 03:08:02 -05:00
|
|
|
let x = box ArcInner {
|
2014-03-22 02:53:58 -05:00
|
|
|
strong: atomics::AtomicUint::new(1),
|
|
|
|
weak: atomics::AtomicUint::new(1),
|
|
|
|
data: data,
|
|
|
|
};
|
core: Remove the cast module
This commit revisits the `cast` module in libcore and libstd, and scrutinizes
all functions inside of it. The result was to remove the `cast` module entirely,
folding all functionality into the `mem` module. Specifically, this is the fate
of each function in the `cast` module.
* transmute - This function was moved to `mem`, but it is now marked as
#[unstable]. This is due to planned changes to the `transmute`
function and how it can be invoked (see the #[unstable] comment).
For more information, see RFC 5 and #12898
* transmute_copy - This function was moved to `mem`, with clarification that is
is not an error to invoke it with T/U that are different
sizes, but rather that it is strongly discouraged. This
function is now #[stable]
* forget - This function was moved to `mem` and marked #[stable]
* bump_box_refcount - This function was removed due to the deprecation of
managed boxes as well as its questionable utility.
* transmute_mut - This function was previously deprecated, and removed as part
of this commit.
* transmute_mut_unsafe - This function doesn't serve much of a purpose when it
can be achieved with an `as` in safe code, so it was
removed.
* transmute_lifetime - This function was removed because it is likely a strong
indication that code is incorrect in the first place.
* transmute_mut_lifetime - This function was removed for the same reasons as
`transmute_lifetime`
* copy_lifetime - This function was moved to `mem`, but it is marked
`#[unstable]` now due to the likelihood of being removed in
the future if it is found to not be very useful.
* copy_mut_lifetime - This function was also moved to `mem`, but had the same
treatment as `copy_lifetime`.
* copy_lifetime_vec - This function was removed because it is not used today,
and its existence is not necessary with DST
(copy_lifetime will suffice).
In summary, the cast module was stripped down to these functions, and then the
functions were moved to the `mem` module.
transmute - #[unstable]
transmute_copy - #[stable]
forget - #[stable]
copy_lifetime - #[unstable]
copy_mut_lifetime - #[unstable]
[breaking-change]
2014-05-09 12:34:51 -05:00
|
|
|
Arc { x: unsafe { mem::transmute(x) } }
|
2013-07-22 15:57:40 -05:00
|
|
|
}
|
|
|
|
|
2013-10-24 15:50:21 -05:00
|
|
|
#[inline]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn inner<'a>(&'a self) -> &'a ArcInner<T> {
|
|
|
|
// This unsafety is ok because while this arc is alive we're guaranteed
|
|
|
|
// that the inner pointer is valid. Furthermore, we know that the
|
|
|
|
// `ArcInner` structure itself is `Share` because the inner data is
|
|
|
|
// `Share` as well, so we're ok loaning out an immutable pointer to
|
|
|
|
// these contents.
|
|
|
|
unsafe { &*self.x }
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Downgrades a strong pointer to a weak pointer
|
|
|
|
///
|
|
|
|
/// Weak pointers will not keep the data alive. Once all strong references
|
|
|
|
/// to the underlying data have been dropped, the data itself will be
|
|
|
|
/// destroyed.
|
|
|
|
pub fn downgrade(&self) -> Weak<T> {
|
|
|
|
// See the clone() impl for why this is relaxed
|
|
|
|
self.inner().weak.fetch_add(1, atomics::Relaxed);
|
|
|
|
Weak { x: self.x }
|
2013-05-20 05:07:14 -05:00
|
|
|
}
|
|
|
|
}
|
2013-05-24 15:54:58 -05:00
|
|
|
|
2014-03-07 11:57:35 -06:00
|
|
|
impl<T: Share + Send> Clone for Arc<T> {
|
2014-03-22 02:53:58 -05:00
|
|
|
/// Duplicate an atomically reference counted wrapper.
|
|
|
|
///
|
|
|
|
/// The resulting two `Arc` objects will point to the same underlying data
|
|
|
|
/// object. However, one of the `Arc` objects can be sent to another task,
|
|
|
|
/// allowing them to share the underlying data.
|
2013-10-24 15:21:49 -05:00
|
|
|
#[inline]
|
2013-07-22 15:57:40 -05:00
|
|
|
fn clone(&self) -> Arc<T> {
|
2014-03-22 02:53:58 -05:00
|
|
|
// Using a relaxed ordering is alright here, as knowledge of the
|
|
|
|
// original reference prevents other threads from erroneously deleting
|
|
|
|
// the object.
|
|
|
|
//
|
|
|
|
// As explained in the [Boost documentation][1], Increasing the
|
|
|
|
// reference counter can always be done with memory_order_relaxed: New
|
|
|
|
// references to an object can only be formed from an existing
|
|
|
|
// reference, and passing an existing reference from one thread to
|
|
|
|
// another must already provide any required synchronization.
|
|
|
|
//
|
|
|
|
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
|
|
|
self.inner().strong.fetch_add(1, atomics::Relaxed);
|
|
|
|
Arc { x: self.x }
|
2012-11-26 18:12:47 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-07 19:55:14 -05:00
|
|
|
impl<T: Send + Share> Deref<T> for Arc<T> {
|
2013-10-24 15:21:49 -05:00
|
|
|
#[inline]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn deref<'a>(&'a self) -> &'a T {
|
2014-04-07 19:55:14 -05:00
|
|
|
&self.inner().data
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2012-11-26 18:12:47 -06:00
|
|
|
}
|
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
impl<T: Send + Share + Clone> Arc<T> {
|
|
|
|
/// Acquires a mutable pointer to the inner contents by guaranteeing that
|
|
|
|
/// the reference count is one (no sharing is possible).
|
|
|
|
///
|
|
|
|
/// This is also referred to as a copy-on-write operation because the inner
|
|
|
|
/// data is cloned if the reference count is greater than one.
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2014-03-22 02:53:58 -05:00
|
|
|
#[experimental]
|
|
|
|
pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
|
|
|
|
if self.inner().strong.load(atomics::SeqCst) != 1 {
|
|
|
|
*self = Arc::new(self.deref().clone())
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2014-03-22 02:53:58 -05:00
|
|
|
// This unsafety is ok because we're guaranteed that the pointer
|
|
|
|
// returned is the *only* pointer that will ever be returned to T. Our
|
|
|
|
// reference count is guaranteed to be 1 at this point, and we required
|
|
|
|
// the Arc itself to be `mut`, so we're returning the only possible
|
|
|
|
// reference to the inner data.
|
core: Remove the cast module
This commit revisits the `cast` module in libcore and libstd, and scrutinizes
all functions inside of it. The result was to remove the `cast` module entirely,
folding all functionality into the `mem` module. Specifically, this is the fate
of each function in the `cast` module.
* transmute - This function was moved to `mem`, but it is now marked as
#[unstable]. This is due to planned changes to the `transmute`
function and how it can be invoked (see the #[unstable] comment).
For more information, see RFC 5 and #12898
* transmute_copy - This function was moved to `mem`, with clarification that is
is not an error to invoke it with T/U that are different
sizes, but rather that it is strongly discouraged. This
function is now #[stable]
* forget - This function was moved to `mem` and marked #[stable]
* bump_box_refcount - This function was removed due to the deprecation of
managed boxes as well as its questionable utility.
* transmute_mut - This function was previously deprecated, and removed as part
of this commit.
* transmute_mut_unsafe - This function doesn't serve much of a purpose when it
can be achieved with an `as` in safe code, so it was
removed.
* transmute_lifetime - This function was removed because it is likely a strong
indication that code is incorrect in the first place.
* transmute_mut_lifetime - This function was removed for the same reasons as
`transmute_lifetime`
* copy_lifetime - This function was moved to `mem`, but it is marked
`#[unstable]` now due to the likelihood of being removed in
the future if it is found to not be very useful.
* copy_mut_lifetime - This function was also moved to `mem`, but had the same
treatment as `copy_lifetime`.
* copy_lifetime_vec - This function was removed because it is not used today,
and its existence is not necessary with DST
(copy_lifetime will suffice).
In summary, the cast module was stripped down to these functions, and then the
functions were moved to the `mem` module.
transmute - #[unstable]
transmute_copy - #[stable]
forget - #[stable]
copy_lifetime - #[unstable]
copy_mut_lifetime - #[unstable]
[breaking-change]
2014-05-09 12:34:51 -05:00
|
|
|
unsafe { mem::transmute::<&_, &mut _>(self.deref()) }
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
#[unsafe_destructor]
|
|
|
|
impl<T: Share + Send> Drop for Arc<T> {
|
2013-09-16 20:18:07 -05:00
|
|
|
fn drop(&mut self) {
|
2014-03-22 02:53:58 -05:00
|
|
|
// This structure has #[unsafe_no_drop_flag], so this drop glue may run
|
|
|
|
// more than once (but it is guaranteed to be zeroed after the first if
|
|
|
|
// it's run more than once)
|
|
|
|
if self.x.is_null() { return }
|
|
|
|
|
|
|
|
// Because `fetch_sub` is already atomic, we do not need to synchronize
|
|
|
|
// with other threads unless we are going to delete the object. This
|
|
|
|
// same logic applies to the below `fetch_sub` to the `weak` count.
|
2014-03-30 12:06:24 -05:00
|
|
|
if self.inner().strong.fetch_sub(1, atomics::Release) != 1 { return }
|
2014-03-22 02:53:58 -05:00
|
|
|
|
|
|
|
// This fence is needed to prevent reordering of use of the data and
|
|
|
|
// deletion of the data. Because it is marked `Release`, the
|
|
|
|
// decreasing of the reference count sychronizes with this `Acquire`
|
|
|
|
// fence. This means that use of the data happens before decreasing
|
|
|
|
// the refernce count, which happens before this fence, which
|
|
|
|
// happens before the deletion of the data.
|
|
|
|
//
|
|
|
|
// As explained in the [Boost documentation][1],
|
|
|
|
//
|
|
|
|
// It is important to enforce any possible access to the object in
|
|
|
|
// one thread (through an existing reference) to *happen before*
|
|
|
|
// deleting the object in a different thread. This is achieved by a
|
|
|
|
// "release" operation after dropping a reference (any access to the
|
|
|
|
// object through this reference must obviously happened before),
|
|
|
|
// and an "acquire" operation before deleting the object.
|
|
|
|
//
|
|
|
|
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
|
|
|
atomics::fence(atomics::Acquire);
|
|
|
|
|
|
|
|
// Destroy the data at this time, even though we may not free the box
|
|
|
|
// allocation itself (there may still be weak pointers lying around).
|
|
|
|
unsafe { drop(ptr::read(&self.inner().data)); }
|
|
|
|
|
2014-03-30 12:06:24 -05:00
|
|
|
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
2014-03-22 02:53:58 -05:00
|
|
|
atomics::fence(atomics::Acquire);
|
2014-05-06 21:03:14 -05:00
|
|
|
unsafe { exchange_free(self.x as *mut u8, size_of::<ArcInner<T>>(),
|
|
|
|
min_align_of::<ArcInner<T>>()) }
|
2013-01-23 20:15:06 -06:00
|
|
|
}
|
2012-08-13 18:45:17 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
impl<T: Share + Send> Weak<T> {
|
|
|
|
/// Attempts to upgrade this weak reference to a strong reference.
|
|
|
|
///
|
|
|
|
/// This method will fail to upgrade this reference if the strong reference
|
|
|
|
/// count has already reached 0, but if there are still other active strong
|
|
|
|
/// references this function will return a new strong reference to the data
|
|
|
|
pub fn upgrade(&self) -> Option<Arc<T>> {
|
|
|
|
// We use a CAS loop to increment the strong count instead of a
|
|
|
|
// fetch_add because once the count hits 0 is must never be above 0.
|
|
|
|
let inner = self.inner();
|
|
|
|
loop {
|
|
|
|
let n = inner.strong.load(atomics::SeqCst);
|
|
|
|
if n == 0 { return None }
|
|
|
|
let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
|
|
|
|
if old == n { return Some(Arc { x: self.x }) }
|
2013-12-21 21:53:43 -06:00
|
|
|
}
|
2012-09-04 19:22:09 -05:00
|
|
|
}
|
|
|
|
|
2013-10-24 15:21:49 -05:00
|
|
|
#[inline]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn inner<'a>(&'a self) -> &'a ArcInner<T> {
|
|
|
|
// See comments above for why this is "safe"
|
|
|
|
unsafe { &*self.x }
|
2012-08-10 19:46:19 -05:00
|
|
|
}
|
2012-11-26 18:12:47 -06:00
|
|
|
}
|
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
impl<T: Share + Send> Clone for Weak<T> {
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn clone(&self) -> Weak<T> {
|
|
|
|
// See comments in Arc::clone() for why this is relaxed
|
|
|
|
self.inner().weak.fetch_add(1, atomics::Relaxed);
|
|
|
|
Weak { x: self.x }
|
2012-08-14 12:32:41 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
#[unsafe_destructor]
|
|
|
|
impl<T: Share + Send> Drop for Weak<T> {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
// see comments above for why this check is here
|
|
|
|
if self.x.is_null() { return }
|
|
|
|
|
|
|
|
// If we find out that we were the last weak pointer, then its time to
|
|
|
|
// deallocate the data entirely. See the discussion in Arc::drop() about
|
|
|
|
// the memory orderings
|
2014-03-30 12:06:24 -05:00
|
|
|
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
2014-03-22 02:53:58 -05:00
|
|
|
atomics::fence(atomics::Acquire);
|
2014-05-06 21:03:14 -05:00
|
|
|
unsafe { exchange_free(self.x as *mut u8, size_of::<ArcInner<T>>(),
|
|
|
|
min_align_of::<ArcInner<T>>()) }
|
2013-12-30 18:17:35 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-10 17:20:03 -05:00
|
|
|
#[cfg(test)]
|
2014-03-22 02:53:58 -05:00
|
|
|
#[allow(experimental)]
|
2012-08-10 17:20:03 -05:00
|
|
|
mod tests {
|
2014-03-22 02:53:58 -05:00
|
|
|
use super::{Arc, Weak};
|
2014-03-30 12:06:24 -05:00
|
|
|
use std::sync::atomics;
|
|
|
|
use std::task;
|
2014-03-22 02:53:58 -05:00
|
|
|
use Mutex;
|
2012-12-28 14:46:08 -06:00
|
|
|
|
2014-03-30 12:06:24 -05:00
|
|
|
struct Canary(*mut atomics::AtomicUint);
|
|
|
|
|
|
|
|
impl Drop for Canary
|
|
|
|
{
|
|
|
|
fn drop(&mut self) {
|
|
|
|
unsafe {
|
|
|
|
match *self {
|
|
|
|
Canary(c) => {
|
|
|
|
(*c).fetch_add(1, atomics::SeqCst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-05-24 21:35:29 -05:00
|
|
|
|
2012-08-10 17:20:03 -05:00
|
|
|
#[test]
|
2013-04-15 10:08:52 -05:00
|
|
|
fn manually_share_arc() {
|
2014-03-05 17:28:08 -06:00
|
|
|
let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
|
2013-07-22 15:57:40 -05:00
|
|
|
let arc_v = Arc::new(v);
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-03-09 16:58:32 -05:00
|
|
|
let (tx, rx) = channel();
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-01-26 22:13:24 -06:00
|
|
|
task::spawn(proc() {
|
2014-03-05 17:28:08 -06:00
|
|
|
let arc_v: Arc<Vec<int>> = rx.recv();
|
2014-03-22 02:53:58 -05:00
|
|
|
assert_eq!(*arc_v.get(3), 4);
|
2014-01-26 22:13:24 -06:00
|
|
|
});
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-03-09 16:58:32 -05:00
|
|
|
tx.send(arc_v.clone());
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
assert_eq!(*arc_v.get(2), 3);
|
|
|
|
assert_eq!(*arc_v.get(4), 5);
|
2012-08-10 17:20:03 -05:00
|
|
|
|
2013-10-21 15:08:31 -05:00
|
|
|
info!("{:?}", arc_v);
|
2012-08-10 17:20:03 -05:00
|
|
|
}
|
2012-08-10 19:46:19 -05:00
|
|
|
|
2012-08-13 18:45:17 -05:00
|
|
|
#[test]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn test_cowarc_clone_make_unique() {
|
|
|
|
let mut cow0 = Arc::new(75u);
|
|
|
|
let mut cow1 = cow0.clone();
|
|
|
|
let mut cow2 = cow1.clone();
|
2012-09-18 23:41:37 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
assert!(75 == *cow0.make_unique());
|
|
|
|
assert!(75 == *cow1.make_unique());
|
|
|
|
assert!(75 == *cow2.make_unique());
|
2012-09-18 23:41:37 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
*cow0.make_unique() += 1;
|
|
|
|
*cow1.make_unique() += 2;
|
|
|
|
*cow2.make_unique() += 3;
|
2012-09-18 23:41:37 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
assert!(76 == *cow0);
|
|
|
|
assert!(77 == *cow1);
|
|
|
|
assert!(78 == *cow2);
|
2013-12-21 21:53:43 -06:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
// none should point to the same backing memory
|
|
|
|
assert!(*cow0 != *cow1);
|
|
|
|
assert!(*cow0 != *cow2);
|
|
|
|
assert!(*cow1 != *cow2);
|
2013-12-21 21:53:43 -06:00
|
|
|
}
|
|
|
|
|
2012-08-14 12:32:41 -05:00
|
|
|
#[test]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn test_cowarc_clone_unique2() {
|
|
|
|
let mut cow0 = Arc::new(75u);
|
|
|
|
let cow1 = cow0.clone();
|
|
|
|
let cow2 = cow1.clone();
|
2013-06-12 16:46:28 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
assert!(75 == *cow0);
|
|
|
|
assert!(75 == *cow1);
|
|
|
|
assert!(75 == *cow2);
|
2013-06-12 16:46:28 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
*cow0.make_unique() += 1;
|
2013-06-12 16:46:28 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
assert!(76 == *cow0);
|
|
|
|
assert!(75 == *cow1);
|
|
|
|
assert!(75 == *cow2);
|
2013-06-12 16:46:28 -05:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
// cow1 and cow2 should share the same contents
|
|
|
|
// cow0 should have a unique reference
|
|
|
|
assert!(*cow0 != *cow1);
|
|
|
|
assert!(*cow0 != *cow2);
|
|
|
|
assert!(*cow1 == *cow2);
|
2013-06-12 16:46:28 -05:00
|
|
|
}
|
2013-12-30 18:17:35 -06:00
|
|
|
|
|
|
|
#[test]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn test_live() {
|
|
|
|
let x = Arc::new(5);
|
|
|
|
let y = x.downgrade();
|
|
|
|
assert!(y.upgrade().is_some());
|
2013-12-30 18:17:35 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn test_dead() {
|
|
|
|
let x = Arc::new(5);
|
|
|
|
let y = x.downgrade();
|
|
|
|
drop(x);
|
|
|
|
assert!(y.upgrade().is_none());
|
2013-12-30 18:17:35 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2014-03-22 02:53:58 -05:00
|
|
|
fn weak_self_cyclic() {
|
|
|
|
struct Cycle {
|
|
|
|
x: Mutex<Option<Weak<Cycle>>>
|
|
|
|
}
|
2013-12-30 18:17:35 -06:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
let a = Arc::new(Cycle { x: Mutex::new(None) });
|
|
|
|
let b = a.clone().downgrade();
|
|
|
|
*a.deref().x.lock().deref_mut() = Some(b);
|
2013-12-30 18:17:35 -06:00
|
|
|
|
2014-03-22 02:53:58 -05:00
|
|
|
// hopefully we don't double-free (or leak)...
|
2013-12-30 18:17:35 -06:00
|
|
|
}
|
2014-03-30 12:06:24 -05:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn drop_arc() {
|
|
|
|
let mut canary = atomics::AtomicUint::new(0);
|
|
|
|
let x = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
|
|
|
|
drop(x);
|
|
|
|
assert!(canary.load(atomics::Acquire) == 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn drop_arc_weak() {
|
|
|
|
let mut canary = atomics::AtomicUint::new(0);
|
|
|
|
let arc = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
|
|
|
|
let arc_weak = arc.downgrade();
|
|
|
|
assert!(canary.load(atomics::Acquire) == 0);
|
|
|
|
drop(arc);
|
|
|
|
assert!(canary.load(atomics::Acquire) == 1);
|
|
|
|
drop(arc_weak);
|
|
|
|
}
|
2012-08-10 17:20:03 -05:00
|
|
|
}
|