Auto merge of - eddyb:oh-snap-ctfe-arrived, r=alexcrichton

This commit is contained in:
bors 2015-05-27 08:47:53 +00:00
commit eb16ad6e71
94 changed files with 441 additions and 1878 deletions

@ -1,686 +0,0 @@
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![stable(feature = "rust1", since = "1.0.0")]
//! Threadsafe reference-counted boxes (the `Arc<T>` type).
//!
//! The `Arc<T>` type provides shared ownership of an immutable value.
//! Destruction is deterministic, and will occur as soon as the last owner is
//! gone. It is marked as `Send` because it uses atomic reference counting.
//!
//! If you do not need thread-safety, and just need shared ownership, consider
//! the [`Rc<T>` type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but
//! does not use atomics, making it both thread-unsafe as well as significantly
//! faster when updating the reference count.
//!
//! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer
//! to the box. A `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but
//! will return `None` if the value has already been dropped.
//!
//! For example, a tree with parent pointers can be represented by putting the
//! nodes behind strong `Arc<T>` pointers, and then storing the parent pointers
//! as `Weak<T>` pointers.
//!
//! # Examples
//!
//! Sharing some immutable data between threads:
//!
//! ```no_run
//! use std::sync::Arc;
//! use std::thread;
//!
//! let five = Arc::new(5);
//!
//! for _ in 0..10 {
//! let five = five.clone();
//!
//! thread::spawn(move || {
//! println!("{:?}", five);
//! });
//! }
//! ```
//!
//! Sharing mutable data safely between threads with a `Mutex`:
//!
//! ```no_run
//! use std::sync::{Arc, Mutex};
//! use std::thread;
//!
//! let five = Arc::new(Mutex::new(5));
//!
//! for _ in 0..10 {
//! let five = five.clone();
//!
//! thread::spawn(move || {
//! let mut number = five.lock().unwrap();
//!
//! *number += 1;
//!
//! println!("{}", *number); // prints 6
//! });
//! }
//! ```
use boxed::Box;
use core::prelude::*;
use core::atomic;
use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
use core::fmt;
use core::cmp::Ordering;
use core::mem::{min_align_of, size_of};
use core::mem;
use core::nonzero::NonZero;
use core::ops::Deref;
use core::ptr;
use core::hash::{Hash, Hasher};
use heap::deallocate;
/// An atomically reference counted wrapper for shared state.
///
/// # Examples
///
/// In this example, a large vector of floats is shared between several threads.
/// With simple pipes, without `Arc`, a copy would have to be made for each
/// thread.
///
/// When you clone an `Arc<T>`, it will create another pointer to the data and
/// increase the reference counter.
///
/// ```
/// # #![feature(alloc, core)]
/// use std::sync::Arc;
/// use std::thread;
///
/// fn main() {
/// let numbers: Vec<_> = (0..100u32).collect();
/// let shared_numbers = Arc::new(numbers);
///
/// for _ in 0..10 {
/// let child_numbers = shared_numbers.clone();
///
/// thread::spawn(move || {
/// let local_numbers = &child_numbers[..];
///
/// // Work with the local numbers
/// });
/// }
/// }
/// ```
#[unsafe_no_drop_flag]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Arc<T> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: NonZero<*mut ArcInner<T>>,
}
unsafe impl<T: Sync + Send> Send for Arc<T> { }
unsafe impl<T: Sync + Send> Sync for Arc<T> { }
/// A weak pointer to an `Arc`.
///
/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
/// used to break cycles between `Arc` pointers.
#[unsafe_no_drop_flag]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
pub struct Weak<T> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: NonZero<*mut ArcInner<T>>,
}
unsafe impl<T: Sync + Send> Send for Weak<T> { }
unsafe impl<T: Sync + Send> Sync for Weak<T> { }
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(Weak)")
}
}
struct ArcInner<T> {
strong: atomic::AtomicUsize,
weak: atomic::AtomicUsize,
data: T,
}
unsafe impl<T: Sync + Send> Send for ArcInner<T> {}
unsafe impl<T: Sync + Send> Sync for ArcInner<T> {}
impl<T> Arc<T> {
/// Constructs a new `Arc<T>`.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(data: T) -> Arc<T> {
// Start the weak pointer count as 1 which is the weak pointer that's
// held by all the strong pointers (kinda), see std/rc.rs for more info
let x: Box<_> = box ArcInner {
strong: atomic::AtomicUsize::new(1),
weak: atomic::AtomicUsize::new(1),
data: data,
};
Arc { _ptr: unsafe { NonZero::new(mem::transmute(x)) } }
}
/// Downgrades the `Arc<T>` to a `Weak<T>` reference.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// let weak_five = five.downgrade();
/// ```
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
pub fn downgrade(&self) -> Weak<T> {
// See the clone() impl for why this is relaxed
self.inner().weak.fetch_add(1, Relaxed);
Weak { _ptr: self._ptr }
}
}
impl<T> Arc<T> {
#[inline]
fn inner(&self) -> &ArcInner<T> {
// This unsafety is ok because while this arc is alive we're guaranteed
// that the inner pointer is valid. Furthermore, we know that the
// `ArcInner` structure itself is `Sync` because the inner data is
// `Sync` as well, so we're ok loaning out an immutable pointer to these
// contents.
unsafe { &**self._ptr }
}
// Non-inlined part of `drop`.
#[inline(never)]
unsafe fn drop_slow(&mut self) {
let ptr = *self._ptr;
// Destroy the data at this time, even though we may not free the box
// allocation itself (there may still be weak pointers lying around).
drop(ptr::read(&self.inner().data));
if self.inner().weak.fetch_sub(1, Release) == 1 {
atomic::fence(Acquire);
deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(), min_align_of::<ArcInner<T>>())
}
}
}
/// Get the number of weak references to this value.
#[inline]
#[unstable(feature = "alloc")]
pub fn weak_count<T>(this: &Arc<T>) -> usize { this.inner().weak.load(SeqCst) - 1 }
/// Get the number of strong references to this value.
#[inline]
#[unstable(feature = "alloc")]
pub fn strong_count<T>(this: &Arc<T>) -> usize { this.inner().strong.load(SeqCst) }
/// Returns a mutable reference to the contained value if the `Arc<T>` is unique.
///
/// Returns `None` if the `Arc<T>` is not unique.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// extern crate alloc;
/// # fn main() {
/// use alloc::arc::{Arc, get_mut};
///
/// let mut x = Arc::new(3);
/// *get_mut(&mut x).unwrap() = 4;
/// assert_eq!(*x, 4);
///
/// let _y = x.clone();
/// assert!(get_mut(&mut x).is_none());
/// # }
/// ```
#[inline]
#[unstable(feature = "alloc")]
pub fn get_mut<T>(this: &mut Arc<T>) -> Option<&mut T> {
if strong_count(this) == 1 && weak_count(this) == 0 {
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
let inner = unsafe { &mut **this._ptr };
Some(&mut inner.data)
} else {
None
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Arc<T> {
/// Makes a clone of the `Arc<T>`.
///
/// This increases the strong reference count.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five.clone();
/// ```
#[inline]
fn clone(&self) -> Arc<T> {
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
self.inner().strong.fetch_add(1, Relaxed);
Arc { _ptr: self._ptr }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.inner().data
}
}
impl<T: Clone> Arc<T> {
/// Make a mutable reference from the given `Arc<T>`.
///
/// This is also referred to as a copy-on-write operation because the inner
/// data is cloned if the reference count is greater than one.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let mut five = Arc::new(5);
///
/// let mut_five = five.make_unique();
/// ```
#[inline]
#[unstable(feature = "alloc")]
pub fn make_unique(&mut self) -> &mut T {
// Note that we hold a strong reference, which also counts as a weak
// reference, so we only clone if there is an additional reference of
// either kind.
if self.inner().strong.load(SeqCst) != 1 ||
self.inner().weak.load(SeqCst) != 1 {
*self = Arc::new((**self).clone())
}
// As with `get_mut()`, the unsafety is ok because our reference was
// either unique to begin with, or became one upon cloning the contents.
let inner = unsafe { &mut **self._ptr };
&mut inner.data
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Arc<T> {
/// Drops the `Arc<T>`.
///
/// This will decrement the strong reference count. If the strong reference
/// count becomes zero and the only other references are `Weak<T>` ones,
/// `drop`s the inner value.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// {
/// let five = Arc::new(5);
///
/// // stuff
///
/// drop(five); // explicit drop
/// }
/// {
/// let five = Arc::new(5);
///
/// // stuff
///
/// } // implicit drop
/// ```
#[inline]
fn drop(&mut self) {
// This structure has #[unsafe_no_drop_flag], so this drop glue may run
// more than once (but it is guaranteed to be zeroed after the first if
// it's run more than once)
let ptr = *self._ptr;
// if ptr.is_null() { return }
if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().strong.fetch_sub(1, Release) != 1 { return }
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
self.drop_slow()
}
}
}
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
impl<T> Weak<T> {
/// Upgrades a weak reference to a strong reference.
///
/// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible.
///
/// Returns `None` if there were no strong references and the data was
/// destroyed.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// let weak_five = five.downgrade();
///
/// let strong_five: Option<Arc<_>> = weak_five.upgrade();
/// ```
pub fn upgrade(&self) -> Option<Arc<T>> {
// We use a CAS loop to increment the strong count instead of a
// fetch_add because once the count hits 0 it must never be above 0.
let inner = self.inner();
loop {
let n = inner.strong.load(SeqCst);
if n == 0 { return None }
let old = inner.strong.compare_and_swap(n, n + 1, SeqCst);
if old == n { return Some(Arc { _ptr: self._ptr }) }
}
}
#[inline]
fn inner(&self) -> &ArcInner<T> {
// See comments above for why this is "safe"
unsafe { &**self._ptr }
}
}
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
impl<T> Clone for Weak<T> {
/// Makes a clone of the `Weak<T>`.
///
/// This increases the weak reference count.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let weak_five = Arc::new(5).downgrade();
///
/// weak_five.clone();
/// ```
#[inline]
fn clone(&self) -> Weak<T> {
// See comments in Arc::clone() for why this is relaxed
self.inner().weak.fetch_add(1, Relaxed);
Weak { _ptr: self._ptr }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Weak<T> {
/// Drops the `Weak<T>`.
///
/// This will decrement the weak reference count.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// {
/// let five = Arc::new(5);
/// let weak_five = five.downgrade();
///
/// // stuff
///
/// drop(weak_five); // explicit drop
/// }
/// {
/// let five = Arc::new(5);
/// let weak_five = five.downgrade();
///
/// // stuff
///
/// } // implicit drop
/// ```
fn drop(&mut self) {
let ptr = *self._ptr;
// see comments above for why this check is here
if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }
// If we find out that we were the last weak pointer, then its time to
// deallocate the data entirely. See the discussion in Arc::drop() about
// the memory orderings
if self.inner().weak.fetch_sub(1, Release) == 1 {
atomic::fence(Acquire);
unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialEq> PartialEq for Arc<T> {
/// Equality for two `Arc<T>`s.
///
/// Two `Arc<T>`s are equal if their inner value are equal.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five == Arc::new(5);
/// ```
fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) }
/// Inequality for two `Arc<T>`s.
///
/// Two `Arc<T>`s are unequal if their inner value are unequal.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five != Arc::new(5);
/// ```
fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd> PartialOrd for Arc<T> {
/// Partial comparison for two `Arc<T>`s.
///
/// The two are compared by calling `partial_cmp()` on their inner values.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five.partial_cmp(&Arc::new(5));
/// ```
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
/// Less-than comparison for two `Arc<T>`s.
///
/// The two are compared by calling `<` on their inner values.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five < Arc::new(5);
/// ```
fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) }
/// 'Less-than or equal to' comparison for two `Arc<T>`s.
///
/// The two are compared by calling `<=` on their inner values.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five <= Arc::new(5);
/// ```
fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) }
/// Greater-than comparison for two `Arc<T>`s.
///
/// The two are compared by calling `>` on their inner values.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five > Arc::new(5);
/// ```
fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) }
/// 'Greater-than or equal to' comparison for two `Arc<T>`s.
///
/// The two are compared by calling `>=` on their inner values.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// five >= Arc::new(5);
/// ```
fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord> Ord for Arc<T> {
fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq> Eq for Arc<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display> fmt::Display for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self._ptr, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Arc<T> {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Arc<T> { Arc::new(Default::default()) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Hash> Hash for Arc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state)
}
}

@ -57,16 +57,12 @@ use core::any::Any;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{self, Hash};
use core::marker::Unsize;
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ops::{CoerceUnsized, Deref, DerefMut};
use core::ptr::{Unique};
use core::raw::{TraitObject};
#[cfg(not(stage0))]
use core::marker::Unsize;
#[cfg(not(stage0))]
use core::ops::CoerceUnsized;
/// A value that represents the heap. This is the default place that the `box`
/// keyword allocates into when no place is supplied.
///
@ -392,5 +388,4 @@ impl<'a,A,R> FnOnce<A> for Box<FnBox<A,Output=R>+Send+'a> {
}
}
#[cfg(not(stage0))]
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}

@ -112,14 +112,7 @@ pub mod boxed;
mod boxed { pub use std::boxed::{Box, HEAP}; }
#[cfg(test)]
mod boxed_test;
#[cfg(not(stage0))]
pub mod arc;
#[cfg(stage0)]
mod arc_stage0;
#[cfg(stage0)]
pub mod arc {
pub use arc_stage0::*;
}
pub mod rc;
/// Common out-of-memory routine

@ -159,36 +159,19 @@ use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::fmt;
use core::hash::{Hasher, Hash};
use core::marker::{self, Sized};
use core::mem::{self, min_align_of, size_of, forget};
use core::intrinsics::{assume, drop_in_place};
use core::marker::{self, Sized, Unsize};
use core::mem::{self, min_align_of, size_of, min_align_of_val, size_of_val, forget};
use core::nonzero::NonZero;
use core::ops::{Deref, Drop};
use core::ops::{CoerceUnsized, Deref, Drop};
use core::option::Option;
use core::option::Option::{Some, None};
use core::ptr;
use core::result::Result;
use core::result::Result::{Ok, Err};
use core::intrinsics::assume;
#[cfg(not(stage0))]
use core::intrinsics::drop_in_place;
#[cfg(not(stage0))]
use core::marker::Unsize;
#[cfg(not(stage0))]
use core::mem::{min_align_of_val, size_of_val};
#[cfg(not(stage0))]
use core::ops::CoerceUnsized;
use heap::deallocate;
#[cfg(stage0)]
struct RcBox<T> {
strong: Cell<usize>,
weak: Cell<usize>,
value: T,
}
#[cfg(not(stage0))]
struct RcBox<T: ?Sized> {
strong: Cell<usize>,
weak: Cell<usize>,
@ -199,15 +182,6 @@ struct RcBox<T: ?Sized> {
/// A reference-counted pointer type over an immutable value.
///
/// See the [module level documentation](./index.html) for more details.
#[cfg(stage0)]
#[unsafe_no_drop_flag]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T> {
// FIXME #12808: strange names to try to avoid interfering with field
// accesses of the contained type via Deref
_ptr: NonZero<*mut RcBox<T>>,
}
#[cfg(not(stage0))]
#[unsafe_no_drop_flag]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T: ?Sized> {
@ -216,19 +190,9 @@ pub struct Rc<T: ?Sized> {
_ptr: NonZero<*mut RcBox<T>>,
}
#[cfg(stage0)]
impl<T> !marker::Send for Rc<T> {}
#[cfg(not(stage0))]
impl<T: ?Sized> !marker::Send for Rc<T> {}
#[cfg(stage0)]
impl<T> !marker::Sync for Rc<T> {}
#[cfg(not(stage0))]
impl<T: ?Sized> !marker::Sync for Rc<T> {}
#[cfg(not(stage0))]
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
impl<T> Rc<T> {
@ -259,7 +223,6 @@ impl<T> Rc<T> {
}
}
#[cfg(not(stage0))]
impl<T: ?Sized> Rc<T> {
/// Downgrades the `Rc<T>` to a `Weak<T>` reference.
///
@ -281,44 +244,12 @@ impl<T: ?Sized> Rc<T> {
}
}
#[cfg(stage0)]
impl<T> Rc<T> {
/// Downgrades the `Rc<T>` to a `Weak<T>` reference.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
///
/// let weak_five = five.downgrade();
/// ```
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module")]
pub fn downgrade(&self) -> Weak<T> {
self.inc_weak();
Weak { _ptr: self._ptr }
}
}
/// Get the number of weak references to this value.
#[cfg(stage0)]
#[inline]
#[unstable(feature = "alloc")]
pub fn weak_count<T>(this: &Rc<T>) -> usize { this.weak() - 1 }
#[cfg(not(stage0))]
#[inline]
#[unstable(feature = "alloc")]
pub fn weak_count<T: ?Sized>(this: &Rc<T>) -> usize { this.weak() - 1 }
/// Get the number of strong references to this value.
#[cfg(stage0)]
#[inline]
#[unstable(feature = "alloc")]
pub fn strong_count<T>(this: &Rc<T>) -> usize { this.strong() }
#[cfg(not(stage0))]
#[inline]
#[unstable(feature = "alloc")]
pub fn strong_count<T: ?Sized>(this: &Rc<T>) -> usize { this.strong() }
@ -438,17 +369,6 @@ impl<T: Clone> Rc<T> {
}
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Deref for Rc<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &T {
&self.inner().value
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Deref for Rc<T> {
type Target = T;
@ -459,58 +379,6 @@ impl<T: ?Sized> Deref for Rc<T> {
}
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Rc<T> {
/// Drops the `Rc<T>`.
///
/// This will decrement the strong reference count. If the strong reference
/// count becomes zero and the only other references are `Weak<T>` ones,
/// `drop`s the inner value.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::rc::Rc;
///
/// {
/// let five = Rc::new(5);
///
/// // stuff
///
/// drop(five); // explicit drop
/// }
/// {
/// let five = Rc::new(5);
///
/// // stuff
///
/// } // implicit drop
/// ```
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE {
self.dec_strong();
if self.strong() == 0 {
ptr::read(&**self); // destroy the contained object
// remove the implicit "strong weak" pointer now that we've
// destroyed the contents.
self.dec_weak();
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Drop for Rc<T> {
/// Drops the `Rc<T>`.
@ -564,32 +432,6 @@ impl<T: ?Sized> Drop for Rc<T> {
}
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Rc<T> {
/// Makes a clone of the `Rc<T>`.
///
/// When you clone an `Rc<T>`, it will create another pointer to the data and
/// increase the strong reference counter.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
///
/// five.clone();
/// ```
#[inline]
fn clone(&self) -> Rc<T> {
self.inc_strong();
Rc { _ptr: self._ptr }
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Clone for Rc<T> {
@ -634,17 +476,6 @@ impl<T: Default> Default for Rc<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(stage0)]
impl<T: PartialEq> PartialEq for Rc<T> {
#[inline(always)]
fn eq(&self, other: &Rc<T>) -> bool { **self == **other }
#[inline(always)]
fn ne(&self, other: &Rc<T>) -> bool { **self != **other }
}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(stage0))]
impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
/// Equality for two `Rc<T>`s.
///
@ -680,34 +511,9 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(stage0)]
impl<T: Eq> Eq for Rc<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(stage0))]
impl<T: ?Sized + Eq> Eq for Rc<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(stage0)]
impl<T: PartialOrd> PartialOrd for Rc<T> {
#[inline(always)]
fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
#[inline(always)]
fn lt(&self, other: &Rc<T>) -> bool { **self < **other }
#[inline(always)]
fn le(&self, other: &Rc<T>) -> bool { **self <= **other }
#[inline(always)]
fn gt(&self, other: &Rc<T>) -> bool { **self > **other }
#[inline(always)]
fn ge(&self, other: &Rc<T>) -> bool { **self >= **other }
}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(stage0))]
impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
/// Partial comparison for two `Rc<T>`s.
///
@ -793,13 +599,6 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(stage0)]
impl<T: Ord> Ord for Rc<T> {
#[inline]
fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(stage0))]
impl<T: ?Sized + Ord> Ord for Rc<T> {
/// Comparison for two `Rc<T>`s.
///
@ -818,14 +617,6 @@ impl<T: ?Sized + Ord> Ord for Rc<T> {
fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Hash> Hash for Rc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized+Hash> Hash for Rc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
@ -833,14 +624,6 @@ impl<T: ?Sized+Hash> Hash for Rc<T> {
}
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display> fmt::Display for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized+fmt::Display> fmt::Display for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -848,14 +631,6 @@ impl<T: ?Sized+fmt::Display> fmt::Display for Rc<T> {
}
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized+fmt::Debug> fmt::Debug for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -876,16 +651,6 @@ impl<T> fmt::Pointer for Rc<T> {
/// dropped.
///
/// See the [module level documentation](./index.html) for more.
#[cfg(stage0)]
#[unsafe_no_drop_flag]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
pub struct Weak<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: NonZero<*mut RcBox<T>>,
}
#[cfg(not(stage0))]
#[unsafe_no_drop_flag]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
@ -895,51 +660,9 @@ pub struct Weak<T: ?Sized> {
_ptr: NonZero<*mut RcBox<T>>,
}
#[cfg(stage0)]
impl<T> !marker::Send for Weak<T> {}
#[cfg(not(stage0))]
impl<T: ?Sized> !marker::Send for Weak<T> {}
#[cfg(stage0)]
impl<T> !marker::Sync for Weak<T> {}
#[cfg(not(stage0))]
impl<T: ?Sized> !marker::Sync for Weak<T> {}
#[cfg(stage0)]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
impl<T> Weak<T> {
/// Upgrades a weak reference to a strong reference.
///
/// Upgrades the `Weak<T>` reference to an `Rc<T>`, if possible.
///
/// Returns `None` if there were no strong references and the data was
/// destroyed.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
///
/// let weak_five = five.downgrade();
///
/// let strong_five: Option<Rc<_>> = weak_five.upgrade();
/// ```
pub fn upgrade(&self) -> Option<Rc<T>> {
if self.strong() == 0 {
None
} else {
self.inc_strong();
Some(Rc { _ptr: self._ptr })
}
}
}
#[cfg(not(stage0))]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
impl<T: ?Sized> Weak<T> {
@ -973,52 +696,6 @@ impl<T: ?Sized> Weak<T> {
}
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Weak<T> {
/// Drops the `Weak<T>`.
///
/// This will decrement the weak reference count.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::rc::Rc;
///
/// {
/// let five = Rc::new(5);
/// let weak_five = five.downgrade();
///
/// // stuff
///
/// drop(weak_five); // explicit drop
/// }
/// {
/// let five = Rc::new(5);
/// let weak_five = five.downgrade();
///
/// // stuff
///
/// } // implicit drop
/// ```
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all
// the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Drop for Weak<T> {
/// Drops the `Weak<T>`.
@ -1064,32 +741,6 @@ impl<T: ?Sized> Drop for Weak<T> {
}
}
#[cfg(stage0)]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
impl<T> Clone for Weak<T> {
/// Makes a clone of the `Weak<T>`.
///
/// This increases the weak reference count.
///
/// # Examples
///
/// ```
/// # #![feature(alloc)]
/// use std::rc::Rc;
///
/// let weak_five = Rc::new(5).downgrade();
///
/// weak_five.clone();
/// ```
#[inline]
fn clone(&self) -> Weak<T> {
self.inc_weak();
Weak { _ptr: self._ptr }
}
}
#[cfg(not(stage0))]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
impl<T: ?Sized> Clone for Weak<T> {
@ -1115,14 +766,6 @@ impl<T: ?Sized> Clone for Weak<T> {
}
}
#[cfg(stage0)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(Weak)")
}
}
#[cfg(not(stage0))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized+fmt::Debug> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -1130,30 +773,6 @@ impl<T: ?Sized+fmt::Debug> fmt::Debug for Weak<T> {
}
}
#[cfg(stage0)]
#[doc(hidden)]
trait RcBoxPtr<T> {
fn inner(&self) -> &RcBox<T>;
#[inline]
fn strong(&self) -> usize { self.inner().strong.get() }
#[inline]
fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
#[inline]
fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
#[inline]
fn weak(&self) -> usize { self.inner().weak.get() }
#[inline]
fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
#[inline]
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
}
#[cfg(not(stage0))]
#[doc(hidden)]
trait RcBoxPtr<T: ?Sized> {
fn inner(&self) -> &RcBox<T>;
@ -1177,21 +796,6 @@ trait RcBoxPtr<T: ?Sized> {
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
}
#[cfg(stage0)]
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
fn inner(&self) -> &RcBox<T> {
unsafe {
// Safe to assume this here, as if it weren't true, we'd be breaking
// the contract anyway.
// This allows the null check to be elided in the destructor if we
// manipulated the reference count in the same function.
assume(!(*(&self._ptr as *const _ as *const *const ())).is_null());
&(**self._ptr)
}
}
}
#[cfg(not(stage0))]
impl<T: ?Sized> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
fn inner(&self) -> &RcBox<T> {
@ -1206,21 +810,6 @@ impl<T: ?Sized> RcBoxPtr<T> for Rc<T> {
}
}
#[cfg(stage0)]
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
fn inner(&self) -> &RcBox<T> {
unsafe {
// Safe to assume this here, as if it weren't true, we'd be breaking
// the contract anyway.
// This allows the null check to be elided in the destructor if we
// manipulated the reference count in the same function.
assume(!(*(&self._ptr as *const _ as *const *const ())).is_null());
&(**self._ptr)
}
}
}
#[cfg(not(stage0))]
impl<T: ?Sized> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
fn inner(&self) -> &RcBox<T> {

@ -116,17 +116,11 @@ impl<'a, T: ?Sized> BorrowMut<T> for &'a mut T {
fn borrow_mut(&mut self) -> &mut T { &mut **self }
}
#[cfg(stage0)]
impl<T> Borrow<T> for rc::Rc<T> {
fn borrow(&self) -> &T { &**self }
}
#[cfg(not(stage0))]
impl<T: ?Sized> Borrow<T> for rc::Rc<T> {
fn borrow(&self) -> &T { &**self }
}
impl<T> Borrow<T> for arc::Arc<T> {
impl<T: ?Sized> Borrow<T> for arc::Arc<T> {
fn borrow(&self) -> &T { &**self }
}

@ -19,7 +19,6 @@ pub use self::TraversalItem::*;
use core::prelude::*;
use core::cmp::Ordering::{Greater, Less, Equal};
#[cfg(not(stage0))]
use core::intrinsics::arith_offset;
use core::iter::Zip;
use core::marker::PhantomData;
@ -207,22 +206,6 @@ impl<T> RawItems<T> {
RawItems::from_parts(slice.as_ptr(), slice.len())
}
#[cfg(stage0)]
unsafe fn from_parts(ptr: *const T, len: usize) -> RawItems<T> {
if mem::size_of::<T>() == 0 {
RawItems {
head: ptr,
tail: (ptr as usize + len) as *const T,
}
} else {
RawItems {
head: ptr,
tail: ptr.offset(len as isize),
}
}
}
#[cfg(not(stage0))]
unsafe fn from_parts(ptr: *const T, len: usize) -> RawItems<T> {
if mem::size_of::<T>() == 0 {
RawItems {
@ -237,18 +220,6 @@ impl<T> RawItems<T> {
}
}
#[cfg(stage0)]
unsafe fn push(&mut self, val: T) {
ptr::write(self.tail as *mut T, val);
if mem::size_of::<T>() == 0 {
self.tail = (self.tail as usize + 1) as *const T;
} else {
self.tail = self.tail.offset(1);
}
}
#[cfg(not(stage0))]
unsafe fn push(&mut self, val: T) {
ptr::write(self.tail as *mut T, val);
@ -263,26 +234,6 @@ impl<T> RawItems<T> {
impl<T> Iterator for RawItems<T> {
type Item = T;
#[cfg(stage0)]
fn next(&mut self) -> Option<T> {
if self.head == self.tail {
None
} else {
unsafe {
let ret = Some(ptr::read(self.head));
if mem::size_of::<T>() == 0 {
self.head = (self.head as usize + 1) as *const T;
} else {
self.head = self.head.offset(1);
}
ret
}
}
}
#[cfg(not(stage0))]
fn next(&mut self) -> Option<T> {
if self.head == self.tail {
None
@ -303,24 +254,6 @@ impl<T> Iterator for RawItems<T> {
}
impl<T> DoubleEndedIterator for RawItems<T> {
#[cfg(stage0)]
fn next_back(&mut self) -> Option<T> {
if self.head == self.tail {
None
} else {
unsafe {
if mem::size_of::<T>() == 0 {
self.tail = (self.tail as usize - 1) as *const T;
} else {
self.tail = self.tail.offset(-1);
}
Some(ptr::read(self.tail))
}
}
}
#[cfg(not(stage0))]
fn next_back(&mut self) -> Option<T> {
if self.head == self.tail {
None

@ -66,9 +66,7 @@ use core::cmp::max;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{self, Hash};
use core::intrinsics::assume;
#[cfg(not(stage0))]
use core::intrinsics::arith_offset;
use core::intrinsics::{arith_offset, assume};
use core::iter::{repeat, FromIterator};
use core::marker::PhantomData;
use core::mem;
@ -1526,25 +1524,6 @@ impl<T> IntoIterator for Vec<T> {
/// }
/// ```
#[inline]
#[cfg(stage0)]
fn into_iter(self) -> IntoIter<T> {
unsafe {
let ptr = *self.ptr;
assume(!ptr.is_null());
let cap = self.cap;
let begin = ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
(ptr as usize + self.len()) as *const T
} else {
ptr.offset(self.len() as isize) as *const T
};
mem::forget(self);
IntoIter { allocation: ptr, cap: cap, ptr: begin, end: end }
}
}
#[inline]
#[cfg(not(stage0))]
fn into_iter(self) -> IntoIter<T> {
unsafe {
let ptr = *self.ptr;
@ -1764,32 +1743,6 @@ impl<T> Iterator for IntoIter<T> {
type Item = T;
#[inline]
#[cfg(stage0)]
fn next(&mut self) -> Option<T> {
unsafe {
if self.ptr == self.end {
None
} else {
if mem::size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
self.ptr = mem::transmute(self.ptr as usize + 1);
// Use a non-null pointer value
Some(ptr::read(EMPTY as *mut T))
} else {
let old = self.ptr;
self.ptr = self.ptr.offset(1);
Some(ptr::read(old))
}
}
}
}
#[inline]
#[cfg(not(stage0))]
fn next(&mut self) -> Option<T> {
unsafe {
if self.ptr == self.end {
@ -1830,29 +1783,6 @@ impl<T> Iterator for IntoIter<T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> DoubleEndedIterator for IntoIter<T> {
#[inline]
#[cfg(stage0)]
fn next_back(&mut self) -> Option<T> {
unsafe {
if self.end == self.ptr {
None
} else {
if mem::size_of::<T>() == 0 {
// See above for why 'ptr.offset' isn't used
self.end = mem::transmute(self.end as usize - 1);
// Use a non-null pointer value
Some(ptr::read(EMPTY as *mut T))
} else {
self.end = self.end.offset(-1);
Some(ptr::read(mem::transmute(self.end)))
}
}
}
}
#[inline]
#[cfg(not(stage0))]
fn next_back(&mut self) -> Option<T> {
unsafe {
if self.end == self.ptr {

@ -399,7 +399,7 @@ fn test_map_in_place_zero_sized() {
#[test]
fn test_map_in_place_zero_drop_count() {
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Clone, PartialEq, Debug)]
struct Nothing;
@ -413,7 +413,7 @@ fn test_map_in_place_zero_drop_count() {
}
}
const NUM_ELEMENTS: usize = 2;
static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
let v = repeat(Nothing).take(NUM_ELEMENTS).collect::<Vec<_>>();

@ -76,7 +76,6 @@ use marker::Sync;
use intrinsics;
use cell::UnsafeCell;
use marker::PhantomData;
use default::Default;
@ -87,8 +86,8 @@ pub struct AtomicBool {
}
impl Default for AtomicBool {
fn default() -> AtomicBool {
ATOMIC_BOOL_INIT
fn default() -> Self {
Self::new(Default::default())
}
}
@ -101,8 +100,8 @@ pub struct AtomicIsize {
}
impl Default for AtomicIsize {
fn default() -> AtomicIsize {
ATOMIC_ISIZE_INIT
fn default() -> Self {
Self::new(Default::default())
}
}
@ -115,8 +114,8 @@ pub struct AtomicUsize {
}
impl Default for AtomicUsize {
fn default() -> AtomicUsize {
ATOMIC_USIZE_INIT
fn default() -> Self {
Self::new(Default::default())
}
}
@ -125,8 +124,7 @@ unsafe impl Sync for AtomicUsize {}
/// A raw pointer type which can be safely shared between threads.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicPtr<T> {
p: UnsafeCell<usize>,
_marker: PhantomData<*mut T>,
p: UnsafeCell<*mut T>,
}
impl<T> Default for AtomicPtr<T> {
@ -175,16 +173,13 @@ pub enum Ordering {
/// An `AtomicBool` initialized to `false`.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ATOMIC_BOOL_INIT: AtomicBool =
AtomicBool { v: UnsafeCell { value: 0 } };
pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
/// An `AtomicIsize` initialized to `0`.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ATOMIC_ISIZE_INIT: AtomicIsize =
AtomicIsize { v: UnsafeCell { value: 0 } };
pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
/// An `AtomicUsize` initialized to `0`.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ATOMIC_USIZE_INIT: AtomicUsize =
AtomicUsize { v: UnsafeCell { value: 0, } };
pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
const UINT_TRUE: usize = !0;
@ -202,9 +197,8 @@ impl AtomicBool {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(v: bool) -> AtomicBool {
let val = if v { UINT_TRUE } else { 0 };
AtomicBool { v: UnsafeCell::new(val) }
pub const fn new(v: bool) -> AtomicBool {
AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
}
/// Loads a value from the bool.
@ -445,7 +439,7 @@ impl AtomicIsize {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(v: isize) -> AtomicIsize {
pub const fn new(v: isize) -> AtomicIsize {
AtomicIsize {v: UnsafeCell::new(v)}
}
@ -633,7 +627,7 @@ impl AtomicUsize {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(v: usize) -> AtomicUsize {
pub const fn new(v: usize) -> AtomicUsize {
AtomicUsize { v: UnsafeCell::new(v) }
}
@ -821,9 +815,8 @@ impl<T> AtomicPtr<T> {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p: UnsafeCell::new(p as usize),
_marker: PhantomData }
pub const fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p: UnsafeCell::new(p) }
}
/// Loads a value from the pointer.
@ -848,7 +841,7 @@ impl<T> AtomicPtr<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn load(&self, order: Ordering) -> *mut T {
unsafe {
atomic_load(self.p.get(), order) as *mut T
atomic_load(self.p.get() as *mut usize, order) as *mut T
}
}
@ -875,7 +868,7 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn store(&self, ptr: *mut T, order: Ordering) {
unsafe { atomic_store(self.p.get(), ptr as usize, order); }
unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
}
/// Stores a value into the pointer, returning the old value.
@ -897,7 +890,7 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
unsafe { atomic_swap(self.p.get(), ptr as usize, order) as *mut T }
unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
}
/// Stores a value into the pointer if the current value is the same as the expected value.
@ -925,7 +918,7 @@ impl<T> AtomicPtr<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
unsafe {
atomic_compare_and_swap(self.p.get(), old as usize,
atomic_compare_and_swap(self.p.get() as *mut usize, old as usize,
new as usize, order) as *mut T
}
}

@ -170,7 +170,7 @@ impl<T:Copy> Cell<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn new(value: T) -> Cell<T> {
pub const fn new(value: T) -> Cell<T> {
Cell {
value: UnsafeCell::new(value),
}
@ -302,7 +302,7 @@ impl<T> RefCell<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn new(value: T) -> RefCell<T> {
pub const fn new(value: T) -> RefCell<T> {
RefCell {
value: UnsafeCell::new(value),
borrow: Cell::new(UNUSED),
@ -663,7 +663,7 @@ impl<T> UnsafeCell<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn new(value: T) -> UnsafeCell<T> {
pub const fn new(value: T) -> UnsafeCell<T> {
UnsafeCell { value: value }
}

@ -145,13 +145,9 @@ extern "rust-intrinsic" {
/// but no instructions will be emitted for it. This is appropriate for operations
/// on the same thread that may be preempted, such as when interacting with signal
/// handlers.
#[cfg(not(stage0))] // SNAP 857ef6e
pub fn atomic_singlethreadfence();
#[cfg(not(stage0))] // SNAP 857ef6e
pub fn atomic_singlethreadfence_acq();
#[cfg(not(stage0))] // SNAP 857ef6e
pub fn atomic_singlethreadfence_rel();
#[cfg(not(stage0))] // SNAP 857ef6e
pub fn atomic_singlethreadfence_acqrel();
/// Aborts the execution of the process.
@ -193,11 +189,8 @@ extern "rust-intrinsic" {
pub fn min_align_of<T>() -> usize;
pub fn pref_align_of<T>() -> usize;
#[cfg(not(stage0))]
pub fn size_of_val<T: ?Sized>(_: &T) -> usize;
#[cfg(not(stage0))]
pub fn min_align_of_val<T: ?Sized>(_: &T) -> usize;
#[cfg(not(stage0))]
pub fn drop_in_place<T: ?Sized>(_: *mut T);
/// Gets a static string slice containing the name of a type.
@ -294,7 +287,6 @@ extern "rust-intrinsic" {
/// resulting pointer to point into or one byte past the end of an allocated
/// object, and it wraps with two's complement arithmetic. The resulting
/// value is not necessarily valid to be used to actually access memory.
#[cfg(not(stage0))]
pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
/// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
@ -592,13 +584,6 @@ extern "rust-intrinsic" {
/// Returns (a * b) mod 2^N, where N is the width of N in bits.
pub fn overflowing_mul<T>(a: T, b: T) -> T;
/// Returns the value of the discriminant for the variant in 'v',
/// cast to a `u64`; if `T` has no discriminant, returns 0.
pub fn discriminant_value<T>(v: &T) -> u64;
}
#[cfg(not(stage0))]
extern "rust-intrinsic" {
/// Performs an unchecked signed division, which results in undefined behavior,
/// in cases where y == 0, or x == int::MIN and y == -1
pub fn unchecked_sdiv<T>(x: T, y: T) -> T;
@ -612,4 +597,8 @@ extern "rust-intrinsic" {
/// Returns the remainder of an unchecked signed division, which results in
/// undefined behavior, in cases where y == 0
pub fn unchecked_srem<T>(x: T, y: T) -> T;
/// Returns the value of the discriminant for the variant in 'v',
/// cast to a `u64`; if `T` has no discriminant, returns 0.
pub fn discriminant_value<T>(v: &T) -> u64;
}

@ -74,6 +74,7 @@
#![feature(concat_idents)]
#![feature(reflect)]
#![feature(custom_attribute)]
#![feature(const_fn)]
#[macro_use]
mod macros;

@ -55,7 +55,6 @@ pub trait Sized {
/// Types that can be "unsized" to a dynamically sized type.
#[unstable(feature = "core")]
#[cfg(not(stage0))]
#[lang="unsize"]
pub trait Unsize<T> {
// Empty.

@ -95,29 +95,12 @@ pub fn size_of<T>() -> usize {
///
/// assert_eq!(4, mem::size_of_val(&5i32));
/// ```
#[cfg(not(stage0))]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of_val<T: ?Sized>(val: &T) -> usize {
unsafe { intrinsics::size_of_val(val) }
}
/// Returns the size of the type that `_val` points to in bytes.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of_val(&5i32));
/// ```
#[cfg(stage0)]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of_val<T>(_val: &T) -> usize {
size_of::<T>()
}
/// Returns the ABI-required minimum alignment of a type
///
/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
@ -144,29 +127,12 @@ pub fn min_align_of<T>() -> usize {
///
/// assert_eq!(4, mem::min_align_of_val(&5i32));
/// ```
#[cfg(not(stage0))]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) }
}
/// Returns the ABI-required minimum alignment of the type of the value that `_val` points to
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::min_align_of_val(&5i32));
/// ```
#[cfg(stage0)]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn min_align_of_val<T>(_val: &T) -> usize {
min_align_of::<T>()
}
/// Returns the alignment in memory for a type.
///
/// This function will return the alignment, in bytes, of a type in memory. If the alignment

@ -11,9 +11,7 @@
//! Exposes the NonZero lang item which provides optimization hints.
use marker::Sized;
use ops::Deref;
#[cfg(not(stage0))]
use ops::CoerceUnsized;
use ops::{CoerceUnsized, Deref};
/// Unsafe trait to indicate what types are usable with the NonZero struct
pub unsafe trait Zeroable {}
@ -57,5 +55,4 @@ impl<T: Zeroable> Deref for NonZero<T> {
}
}
#[cfg(not(stage0))]
impl<T: Zeroable+CoerceUnsized<U>, U: Zeroable> CoerceUnsized<NonZero<U>> for NonZero<T> {}

@ -67,12 +67,9 @@
#![stable(feature = "rust1", since = "1.0.0")]
use marker::Sized;
use marker::{Sized, Unsize};
use fmt;
#[cfg(not(stage0))]
use marker::Unsize;
/// The `Drop` trait is used to run some code when a value goes out of scope. This
/// is sometimes called a 'destructor'.
///
@ -1214,39 +1211,29 @@ mod impls {
/// Trait that indicates that this is a pointer or a wrapper for one,
/// where unsizing can be performed on the pointee.
#[unstable(feature = "core")]
#[cfg(not(stage0))]
#[lang="coerce_unsized"]
pub trait CoerceUnsized<T> {
// Empty.
}
// &mut T -> &mut U
#[cfg(not(stage0))]
impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
// &mut T -> &U
#[cfg(not(stage0))]
impl<'a, 'b: 'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b mut T {}
// &mut T -> *mut U
#[cfg(not(stage0))]
impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for &'a mut T {}
// &mut T -> *const U
#[cfg(not(stage0))]
impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a mut T {}
// &T -> &U
#[cfg(not(stage0))]
impl<'a, 'b: 'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
// &T -> *const U
#[cfg(not(stage0))]
impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a T {}
// *mut T -> *mut U
#[cfg(not(stage0))]
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
// *mut T -> *const U
#[cfg(not(stage0))]
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *mut T {}
// *const T -> *const U
#[cfg(not(stage0))]
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}

@ -125,19 +125,6 @@ pub trait SliceExt {
}
// Use macros to be generic over const/mut
#[cfg(stage0)]
macro_rules! slice_offset {
($ptr:expr, $by:expr) => {{
let ptr = $ptr;
if size_from_ptr(ptr) == 0 {
transmute((ptr as isize).wrapping_add($by))
} else {
ptr.offset($by)
}
}};
}
#[cfg(not(stage0))]
macro_rules! slice_offset {
($ptr:expr, $by:expr) => {{
let ptr = $ptr;

@ -70,13 +70,15 @@ fn int_xor() {
assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
}
static S_BOOL : AtomicBool = ATOMIC_BOOL_INIT;
static S_INT : AtomicIsize = ATOMIC_ISIZE_INIT;
static S_UINT : AtomicUsize = ATOMIC_USIZE_INIT;
static S_FALSE: AtomicBool = AtomicBool::new(false);
static S_TRUE: AtomicBool = AtomicBool::new(true);
static S_INT: AtomicIsize = AtomicIsize::new(0);
static S_UINT: AtomicUsize = AtomicUsize::new(0);
#[test]
fn static_init() {
assert!(!S_BOOL.load(SeqCst));
assert!(!S_FALSE.load(SeqCst));
assert!(S_TRUE.load(SeqCst));
assert!(S_INT.load(SeqCst) == 0);
assert!(S_UINT.load(SeqCst) == 0);
}

@ -184,7 +184,7 @@ use std::mem;
use std::env;
use std::rt;
use std::slice;
use std::sync::{Once, ONCE_INIT, StaticMutex, MUTEX_INIT};
use std::sync::{Once, StaticMutex};
use directive::LOG_LEVEL_NAMES;
@ -200,7 +200,7 @@ pub const MAX_LOG_LEVEL: u32 = 255;
/// The default logging level of a crate if no other is specified.
const DEFAULT_LOG_LEVEL: u32 = 1;
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
/// An unsafe constant that is the maximum logging level of any module
/// specified. This is the first line of defense to determining whether a
@ -367,7 +367,7 @@ pub struct LogLocation {
/// module's log statement should be emitted or not.
#[doc(hidden)]
pub fn mod_enabled(level: u32, module: &str) -> bool {
static INIT: Once = ONCE_INIT;
static INIT: Once = Once::new();
INIT.call_once(init);
// It's possible for many threads are in this function, only one of them

@ -167,7 +167,4 @@ mod rustc {
}
// Build the diagnostics array at the end so that the metadata includes error use sites.
#[cfg(stage0)]
__build_diagnostic_array! { DIAGNOSTICS }
#[cfg(not(stage0))]
__build_diagnostic_array! { librustc, DIAGNOSTICS }

@ -32,7 +32,7 @@ use std::env;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT};
use std::sync::atomic::{AtomicBool, Ordering};
use syntax::ast;
fn print_help_message() {
@ -76,7 +76,7 @@ pub fn maybe_print_constraints_for<'a, 'tcx>(region_vars: &RegionVarBindings<'a,
let output_path = {
let output_template = match requested_output {
Ok(ref s) if &**s == "help" => {
static PRINTED_YET: AtomicBool = ATOMIC_BOOL_INIT;
static PRINTED_YET: AtomicBool = AtomicBool::new(false);
if !PRINTED_YET.load(Ordering::SeqCst) {
print_help_message();
PRINTED_YET.store(true, Ordering::SeqCst);

@ -47,7 +47,4 @@ mod borrowck;
pub mod graphviz;
#[cfg(stage0)]
__build_diagnostic_array! { DIAGNOSTICS }
#[cfg(not(stage0))]
__build_diagnostic_array! { librustc_borrowck, DIAGNOSTICS }

@ -3723,7 +3723,4 @@ pub fn resolve_crate<'a, 'tcx>(session: &'a Session,
}
}
#[cfg(stage0)]
__build_diagnostic_array! { DIAGNOSTICS }
#[cfg(not(stage0))]
__build_diagnostic_array! { librustc_resolve, DIAGNOSTICS }

@ -1005,8 +1005,8 @@ pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
}
unsafe fn configure_llvm(sess: &Session) {
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
use std::sync::Once;
static INIT: Once = Once::new();
// Copy what clang does by turning on loop vectorization at O2 and
// slp vectorization at O3

@ -39,6 +39,7 @@
#![feature(path_ext)]
#![feature(fs)]
#![feature(path_relative_from)]
#![feature(std_misc)]
#![allow(trivial_casts)]

@ -2653,8 +2653,8 @@ pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
// Before we touch LLVM, make sure that multithreading is enabled.
unsafe {
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
use std::sync::Once;
static INIT: Once = Once::new();
static mut POISONED: bool = false;
INIT.call_once(|| {
if llvm::LLVMStartMultithreaded() != 1 {

@ -344,7 +344,4 @@ pub fn check_crate(tcx: &ty::ctxt, trait_map: ty::TraitMap) {
tcx.sess.abort_if_errors();
}
#[cfg(stage0)]
__build_diagnostic_array! { DIAGNOSTICS }
#[cfg(not(stage0))]
__build_diagnostic_array! { librustc_typeck, DIAGNOSTICS }

@ -211,8 +211,8 @@ mod dl {
pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
F: FnOnce() -> T,
{
use sync::{StaticMutex, MUTEX_INIT};
static LOCK: StaticMutex = MUTEX_INIT;
use sync::StaticMutex;
static LOCK: StaticMutex = StaticMutex::new();
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire
// sequence

@ -23,8 +23,8 @@ use ffi::{OsStr, OsString};
use fmt;
use io;
use path::{Path, PathBuf};
use sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering};
use sync::{StaticMutex, MUTEX_INIT};
use sync::atomic::{AtomicIsize, Ordering};
use sync::StaticMutex;
use sys::os as os_imp;
/// Returns the current working directory as a `PathBuf`.
@ -70,7 +70,7 @@ pub fn set_current_dir<P: AsRef<Path>>(p: P) -> io::Result<()> {
os_imp::chdir(p.as_ref())
}
static ENV_LOCK: StaticMutex = MUTEX_INIT;
static ENV_LOCK: StaticMutex = StaticMutex::new();
/// An iterator over a snapshot of the environment variables of this process.
///
@ -475,7 +475,7 @@ pub fn current_exe() -> io::Result<PathBuf> {
os_imp::current_exe()
}
static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT;
static EXIT_STATUS: AtomicIsize = AtomicIsize::new(0);
/// Sets the process exit code
///

@ -11,31 +11,31 @@
use prelude::v1::*;
use boxed;
use cell::UnsafeCell;
use cell::Cell;
use rt;
use sync::{StaticMutex, Arc};
pub struct Lazy<T> {
pub lock: StaticMutex,
pub ptr: UnsafeCell<*mut Arc<T>>,
pub init: fn() -> Arc<T>,
lock: StaticMutex,
ptr: Cell<*mut Arc<T>>,
init: fn() -> Arc<T>,
}
unsafe impl<T> Sync for Lazy<T> {}
macro_rules! lazy_init {
($init:expr) => (::io::lazy::Lazy {
lock: ::sync::MUTEX_INIT,
ptr: ::cell::UnsafeCell { value: 0 as *mut _ },
init: $init,
})
}
impl<T: Send + Sync + 'static> Lazy<T> {
pub const fn new(init: fn() -> Arc<T>) -> Lazy<T> {
Lazy {
lock: StaticMutex::new(),
ptr: Cell::new(0 as *mut _),
init: init
}
}
pub fn get(&'static self) -> Option<Arc<T>> {
let _g = self.lock.lock();
let ptr = self.ptr.get();
unsafe {
let ptr = *self.ptr.get();
if ptr.is_null() {
Some(self.init())
} else if ptr as usize == 1 {
@ -53,14 +53,14 @@ impl<T: Send + Sync + 'static> Lazy<T> {
// `Arc`.
let registered = rt::at_exit(move || {
let g = self.lock.lock();
let ptr = *self.ptr.get();
*self.ptr.get() = 1 as *mut _;
let ptr = self.ptr.get();
self.ptr.set(1 as *mut _);
drop(g);
drop(Box::from_raw(ptr))
});
let ret = (self.init)();
if registered.is_ok() {
*self.ptr.get() = boxed::into_raw(Box::new(ret.clone()));
self.ptr.set(boxed::into_raw(Box::new(ret.clone())));
}
return ret
}

@ -36,13 +36,12 @@ pub use self::stdio::{StdoutLock, StderrLock, StdinLock};
#[doc(no_inline, hidden)]
pub use self::stdio::{set_panic, set_print};
#[macro_use] mod lazy;
pub mod prelude;
mod buffered;
mod cursor;
mod error;
mod impls;
mod lazy;
mod util;
mod stdio;

@ -122,7 +122,7 @@ pub struct StdinLock<'a> {
/// locked version, `StdinLock`, implements both `Read` and `BufRead`, however.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdin() -> Stdin {
static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = lazy_init!(stdin_init);
static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = Lazy::new(stdin_init);
return Stdin {
inner: INSTANCE.get().expect("cannot access stdin during shutdown"),
};
@ -236,7 +236,7 @@ pub struct StdoutLock<'a> {
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdout() -> Stdout {
static INSTANCE: Lazy<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = lazy_init!(stdout_init);
static INSTANCE: Lazy<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = Lazy::new(stdout_init);
return Stdout {
inner: INSTANCE.get().expect("cannot access stdout during shutdown"),
};
@ -308,7 +308,7 @@ pub struct StderrLock<'a> {
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
static INSTANCE: Lazy<ReentrantMutex<RefCell<StderrRaw>>> = lazy_init!(stderr_init);
static INSTANCE: Lazy<ReentrantMutex<RefCell<StderrRaw>>> = Lazy::new(stderr_init);
return Stderr {
inner: INSTANCE.get().expect("cannot access stderr during shutdown"),
};

@ -109,6 +109,7 @@
#![feature(box_syntax)]
#![feature(collections)]
#![feature(core)]
#![feature(const_fn)]
#![feature(into_cow)]
#![feature(lang_items)]
#![feature(libc)]

@ -12,9 +12,9 @@ use prelude::v1::*;
use env;
use net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr, ToSocketAddrs};
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use sync::atomic::{AtomicUsize, Ordering};
static PORT: AtomicUsize = ATOMIC_USIZE_INIT;
static PORT: AtomicUsize = AtomicUsize::new(0);
pub fn next_test_ip4() -> SocketAddr {
let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port();

@ -96,11 +96,11 @@ mod imp {
target_arch = "aarch64",
target_arch = "powerpc")))]
fn is_getrandom_available() -> bool {
use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use sync::{Once, ONCE_INIT};
use sync::atomic::{AtomicBool, Ordering};
use sync::Once;
static CHECKER: Once = ONCE_INIT;
static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
static CHECKER: Once = Once::new();
static AVAILABLE: AtomicBool = AtomicBool::new(false);
CHECKER.call_once(|| {
let mut buf: [u8; 0] = [];

@ -52,10 +52,10 @@ mod imp {
use mem;
use ffi::CStr;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
static mut GLOBAL_ARGS_PTR: usize = 0;
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
pub unsafe fn init(argc: isize, argv: *const *const u8) {
let args = load_argc_and_argv(argc, argv);

@ -20,7 +20,7 @@ use boxed;
use boxed::Box;
use vec::Vec;
use thunk::Thunk;
use sys_common::mutex::{Mutex, MUTEX_INIT};
use sys_common::mutex::Mutex;
type Queue = Vec<Thunk<'static>>;
@ -28,7 +28,7 @@ type Queue = Vec<Thunk<'static>>;
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
static LOCK: Mutex = MUTEX_INIT;
static LOCK: Mutex = Mutex::new();
static mut QUEUE: *mut Queue = 0 as *mut Queue;
// The maximum number of times the cleanup routines will be run. While running

@ -22,7 +22,7 @@ pub use sys::backtrace::write;
// For now logging is turned off by default, and this function checks to see
// whether the magical environment variable is present to see if it's turned on.
pub fn log_enabled() -> bool {
static ENABLED: atomic::AtomicIsize = atomic::ATOMIC_ISIZE_INIT;
static ENABLED: atomic::AtomicIsize = atomic::AtomicIsize::new(0);
match ENABLED.load(Ordering::SeqCst) {
1 => return false,
2 => return true,

@ -72,7 +72,7 @@ use intrinsics;
use libc::c_void;
use mem;
use sync::atomic::{self, Ordering};
use sys_common::mutex::{Mutex, MUTEX_INIT};
use sys_common::mutex::Mutex;
// The actual unwinding implementation is cfg'd here, and we've got two current
// implementations. One goes through SEH on Windows and the other goes through
@ -89,15 +89,15 @@ pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: u32);
// For more information, see below.
const MAX_CALLBACKS: usize = 16;
static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] =
[atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT];
static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
[atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0)];
static CALLBACK_CNT: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
@ -243,7 +243,7 @@ fn begin_unwind_inner(msg: Box<Any + Send>,
// `std::sync` one as accessing TLS can cause weird recursive problems (and
// we don't need poison checking).
unsafe {
static LOCK: Mutex = MUTEX_INIT;
static LOCK: Mutex = Mutex::new();
static mut INIT: bool = false;
LOCK.lock();
if !INIT {

@ -42,7 +42,7 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
}
pub fn min_stack() -> usize {
static MIN: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
match MIN.load(Ordering::SeqCst) {
0 => {}
n => return n - 1,

@ -10,7 +10,7 @@
use prelude::v1::*;
use sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use sync::atomic::{AtomicUsize, Ordering};
use sync::{mutex, MutexGuard, PoisonError};
use sys_common::condvar as sys;
use sys_common::mutex as sys_mutex;
@ -84,10 +84,7 @@ pub struct StaticCondvar {
/// Constant initializer for a statically allocated condition variable.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
inner: sys::CONDVAR_INIT,
mutex: ATOMIC_USIZE_INIT,
};
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar::new();
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and
@ -96,7 +93,7 @@ impl Condvar {
pub fn new() -> Condvar {
Condvar {
inner: box StaticCondvar {
inner: unsafe { sys::Condvar::new() },
inner: sys::Condvar::new(),
mutex: AtomicUsize::new(0),
}
}
@ -234,6 +231,16 @@ impl Drop for Condvar {
}
impl StaticCondvar {
/// Creates a new condition variable
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub const fn new() -> StaticCondvar {
StaticCondvar {
inner: sys::Condvar::new(),
mutex: AtomicUsize::new(0),
}
}
/// Blocks the current thread until this condition variable receives a
/// notification.
///
@ -388,10 +395,10 @@ impl StaticCondvar {
mod tests {
use prelude::v1::*;
use super::{StaticCondvar, CONDVAR_INIT};
use super::StaticCondvar;
use sync::mpsc::channel;
use sync::{StaticMutex, MUTEX_INIT, Condvar, Mutex, Arc};
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use sync::{StaticMutex, Condvar, Mutex, Arc};
use sync::atomic::{AtomicUsize, Ordering};
use thread;
use time::Duration;
use u32;
@ -405,7 +412,7 @@ mod tests {
#[test]
fn static_smoke() {
static C: StaticCondvar = CONDVAR_INIT;
static C: StaticCondvar = StaticCondvar::new();
C.notify_one();
C.notify_all();
unsafe { C.destroy(); }
@ -413,8 +420,8 @@ mod tests {
#[test]
fn notify_one() {
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let _t = thread::spawn(move|| {
@ -464,8 +471,8 @@ mod tests {
#[test]
fn wait_timeout_ms() {
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let (g, _no_timeout) = C.wait_timeout_ms(g, 1).unwrap();
@ -483,9 +490,9 @@ mod tests {
#[test]
fn wait_timeout_with() {
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
static S: AtomicUsize = ATOMIC_USIZE_INIT;
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
static S: AtomicUsize = AtomicUsize::new(0);
let g = M.lock().unwrap();
let (g, success) = C.wait_timeout_with(g, Duration::new(0, 1000), |_| {
@ -530,9 +537,9 @@ mod tests {
#[test]
#[should_panic]
fn two_mutexes() {
static M1: StaticMutex = MUTEX_INIT;
static M2: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = CONDVAR_INIT;
static M1: StaticMutex = StaticMutex::new();
static M2: StaticMutex = StaticMutex::new();
static C: StaticCondvar = StaticCondvar::new();
let mut g = M1.lock().unwrap();
let _t = thread::spawn(move|| {

@ -11,7 +11,7 @@
//! Generic support for building blocking abstractions.
use thread::{self, Thread};
use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use sync::atomic::{AtomicBool, Ordering};
use sync::Arc;
use marker::{Sync, Send};
use mem;
@ -41,7 +41,7 @@ impl !Sync for WaitToken {}
pub fn tokens() -> (WaitToken, SignalToken) {
let inner = Arc::new(Inner {
thread: thread::current(),
woken: ATOMIC_BOOL_INIT,
woken: AtomicBool::new(false),
});
let wait_token = WaitToken {
inner: inner.clone(),

@ -178,17 +178,14 @@ impl<'a, T: ?Sized> !marker::Send for MutexGuard<'a, T> {}
/// other mutex constants.
#[unstable(feature = "std_misc",
reason = "may be merged with Mutex in the future")]
pub const MUTEX_INIT: StaticMutex = StaticMutex {
lock: sys::MUTEX_INIT,
poison: poison::FLAG_INIT,
};
pub const MUTEX_INIT: StaticMutex = StaticMutex::new();
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box MUTEX_INIT,
inner: box StaticMutex::new(),
data: UnsafeCell::new(t),
}
}
@ -271,9 +268,19 @@ impl<T: ?Sized + fmt::Debug + 'static> fmt::Debug for Mutex<T> {
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
impl StaticMutex {
/// Creates a new mutex in an unlocked state ready for use.
#[unstable(feature = "std_misc",
reason = "may be merged with Mutex in the future")]
pub const fn new() -> StaticMutex {
StaticMutex {
lock: sys::Mutex::new(),
poison: poison::Flag::new(),
}
}
/// Acquires this lock, see `Mutex::lock`
#[inline]
#[unstable(feature = "std_misc",
@ -365,7 +372,7 @@ mod tests {
use prelude::v1::*;
use sync::mpsc::channel;
use sync::{Arc, Mutex, StaticMutex, MUTEX_INIT, Condvar};
use sync::{Arc, Mutex, StaticMutex, Condvar};
use thread;
struct Packet<T: Send>(Arc<(Mutex<T>, Condvar)>);
@ -382,7 +389,7 @@ mod tests {
#[test]
fn smoke_static() {
static M: StaticMutex = MUTEX_INIT;
static M: StaticMutex = StaticMutex::new();
unsafe {
drop(M.lock().unwrap());
drop(M.lock().unwrap());
@ -392,7 +399,7 @@ mod tests {
#[test]
fn lots_and_lots() {
static M: StaticMutex = MUTEX_INIT;
static M: StaticMutex = StaticMutex::new();
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;

@ -16,8 +16,8 @@
use prelude::v1::*;
use isize;
use sync::atomic::{AtomicIsize, Ordering, ATOMIC_ISIZE_INIT};
use sync::{StaticMutex, MUTEX_INIT};
use sync::atomic::{AtomicIsize, Ordering};
use sync::StaticMutex;
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
@ -44,13 +44,19 @@ pub struct Once {
/// Initialization value for static `Once` values.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
cnt: ATOMIC_ISIZE_INIT,
lock_cnt: ATOMIC_ISIZE_INIT,
};
pub const ONCE_INIT: Once = Once::new();
impl Once {
/// Creates a new `Once` value.
#[unstable(feature = "std_misc")]
pub const fn new() -> Once {
Once {
mutex: StaticMutex::new(),
cnt: AtomicIsize::new(0),
lock_cnt: AtomicIsize::new(0),
}
}
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
@ -129,12 +135,12 @@ mod tests {
use prelude::v1::*;
use thread;
use super::{ONCE_INIT, Once};
use super::Once;
use sync::mpsc::channel;
#[test]
fn smoke_once() {
static O: Once = ONCE_INIT;
static O: Once = Once::new();
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
@ -144,7 +150,7 @@ mod tests {
#[test]
fn stampede_once() {
static O: Once = ONCE_INIT;
static O: Once = Once::new();
static mut run: bool = false;
let (tx, rx) = channel();

@ -102,10 +102,7 @@ pub struct StaticRwLock {
/// Constant initialization for a statically-initialized rwlock.
#[unstable(feature = "std_misc",
reason = "may be merged with RwLock in the future")]
pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock {
lock: sys::RWLOCK_INIT,
poison: poison::FLAG_INIT,
};
pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new();
/// RAII structure used to release the shared read access of a lock when
/// dropped.
@ -142,7 +139,7 @@ impl<T> RwLock<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> RwLock<T> {
RwLock { inner: box RW_LOCK_INIT, data: UnsafeCell::new(t) }
RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
}
}
@ -280,9 +277,19 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
impl StaticRwLock {
/// Creates a new rwlock.
#[unstable(feature = "std_misc",
reason = "may be merged with RwLock in the future")]
pub const fn new() -> StaticRwLock {
StaticRwLock {
lock: sys::RWLock::new(),
poison: poison::Flag::new(),
}
}
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
@ -420,7 +427,7 @@ mod tests {
use rand::{self, Rng};
use sync::mpsc::channel;
use thread;
use sync::{Arc, RwLock, StaticRwLock, TryLockError, RW_LOCK_INIT};
use sync::{Arc, RwLock, StaticRwLock, TryLockError};
#[test]
fn smoke() {
@ -433,7 +440,7 @@ mod tests {
#[test]
fn static_smoke() {
static R: StaticRwLock = RW_LOCK_INIT;
static R: StaticRwLock = StaticRwLock::new();
drop(R.read().unwrap());
drop(R.write().unwrap());
drop((R.read().unwrap(), R.read().unwrap()));
@ -443,7 +450,7 @@ mod tests {
#[test]
fn frob() {
static R: StaticRwLock = RW_LOCK_INIT;
static R: StaticRwLock = StaticRwLock::new();
const N: usize = 10;
const M: usize = 1000;

@ -20,16 +20,12 @@ use sys::condvar as imp;
/// this type.
pub struct Condvar(imp::Condvar);
/// Static initializer for condition variables.
pub const CONDVAR_INIT: Condvar = Condvar(imp::CONDVAR_INIT);
impl Condvar {
/// Creates a new condition variable for use.
///
/// Behavior is undefined if the condition variable is moved after it is
/// first used with any of the functions below.
#[inline]
pub unsafe fn new() -> Condvar { Condvar(imp::Condvar::new()) }
pub const fn new() -> Condvar { Condvar(imp::Condvar::new()) }
/// Signals one waiter on this condition variable to wake up.
#[inline]

@ -20,10 +20,13 @@ pub struct Mutex(imp::Mutex);
unsafe impl Sync for Mutex {}
/// Constant initializer for statically allocated mutexes.
pub const MUTEX_INIT: Mutex = Mutex(imp::MUTEX_INIT);
impl Mutex {
/// Creates a new mutex for use.
///
/// Behavior is undefined if the mutex is moved after it is
/// first used with any of the functions below.
pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
/// Locks the mutex blocking the current thread until it is available.
///
/// Behavior is undefined if the mutex has been moved between this and any

@ -10,26 +10,28 @@
use prelude::v1::*;
use marker::Reflect;
use cell::UnsafeCell;
use cell::Cell;
use error::{Error};
use fmt;
use marker::Reflect;
use thread;
pub struct Flag { failed: UnsafeCell<bool> }
pub struct Flag { failed: Cell<bool> }
// This flag is only ever accessed with a lock previously held. Note that this
// a totally private structure.
unsafe impl Send for Flag {}
unsafe impl Sync for Flag {}
pub const FLAG_INIT: Flag = Flag { failed: UnsafeCell { value: false } };
impl Flag {
pub const fn new() -> Flag {
Flag { failed: Cell::new(false) }
}
#[inline]
pub fn borrow(&self) -> LockResult<Guard> {
let ret = Guard { panicking: thread::panicking() };
if unsafe { *self.failed.get() } {
if self.get() {
Err(PoisonError::new(ret))
} else {
Ok(ret)
@ -39,13 +41,13 @@ impl Flag {
#[inline]
pub fn done(&self, guard: &Guard) {
if !guard.panicking && thread::panicking() {
unsafe { *self.failed.get() = true; }
self.failed.set(true);
}
}
#[inline]
pub fn get(&self) -> bool {
unsafe { *self.failed.get() }
self.failed.get()
}
}

@ -54,7 +54,7 @@ impl<T> ReentrantMutex<T> {
unsafe {
let mut mutex = ReentrantMutex {
inner: box sys::ReentrantMutex::uninitialized(),
poison: poison::FLAG_INIT,
poison: poison::Flag::new(),
data: t,
};
mutex.inner.init();

@ -17,10 +17,13 @@ use sys::rwlock as imp;
/// safer types at the top level of this crate instead of this type.
pub struct RWLock(imp::RWLock);
/// Constant initializer for static RWLocks.
pub const RWLOCK_INIT: RWLock = RWLock(imp::RWLOCK_INIT);
impl RWLock {
/// Creates a new reader-writer lock for use.
///
/// Behavior is undefined if the reader-writer lock is moved after it is
/// first used with any of the functions below.
pub const fn new() -> RWLock { RWLock(imp::RWLock::new()) }
/// Acquires shared access to the underlying lock, blocking the current
/// thread to do so.
///

@ -86,19 +86,13 @@ use sys::thread_local as imp;
/// }
/// ```
pub struct StaticKey {
/// Inner static TLS key (internals), created with by `INIT_INNER` in this
/// module.
pub inner: StaticKeyInner,
/// Inner static TLS key (internals).
key: AtomicUsize,
/// Destructor for the TLS value.
///
/// See `Key::new` for information about when the destructor runs and how
/// it runs.
pub dtor: Option<unsafe extern fn(*mut u8)>,
}
/// Inner contents of `StaticKey`, created by the `INIT_INNER` constant.
pub struct StaticKeyInner {
key: AtomicUsize,
dtor: Option<unsafe extern fn(*mut u8)>,
}
/// A type for a safely managed OS-based TLS slot.
@ -129,19 +123,16 @@ pub struct Key {
/// Constant initialization value for static TLS keys.
///
/// This value specifies no destructor by default.
pub const INIT: StaticKey = StaticKey {
inner: INIT_INNER,
dtor: None,
};
/// Constant initialization value for the inner part of static TLS keys.
///
/// This value allows specific configuration of the destructor for a TLS key.
pub const INIT_INNER: StaticKeyInner = StaticKeyInner {
key: atomic::ATOMIC_USIZE_INIT,
};
pub const INIT: StaticKey = StaticKey::new(None);
impl StaticKey {
pub const fn new(dtor: Option<unsafe extern fn(*mut u8)>) -> StaticKey {
StaticKey {
key: atomic::AtomicUsize::new(0),
dtor: dtor
}
}
/// Gets the value associated with this TLS key
///
/// This will lazily allocate a TLS key from the OS if one has not already
@ -164,7 +155,7 @@ impl StaticKey {
/// Note that this does *not* run the user-provided destructor if one was
/// specified at definition time. Doing so must be done manually.
pub unsafe fn destroy(&self) {
match self.inner.key.swap(0, Ordering::SeqCst) {
match self.key.swap(0, Ordering::SeqCst) {
0 => {}
n => { imp::destroy(n as imp::Key) }
}
@ -172,7 +163,7 @@ impl StaticKey {
#[inline]
unsafe fn key(&self) -> imp::Key {
match self.inner.key.load(Ordering::Relaxed) {
match self.key.load(Ordering::Relaxed) {
0 => self.lazy_init() as imp::Key,
n => n as imp::Key
}
@ -197,7 +188,7 @@ impl StaticKey {
key2
};
assert!(key != 0);
match self.inner.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
match self.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
// The CAS succeeded, so we've created the actual key
0 => key as usize,
// If someone beat us to the punch, use their key instead
@ -245,7 +236,7 @@ impl Drop for Key {
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::{Key, StaticKey, INIT_INNER};
use super::{Key, StaticKey};
fn assert_sync<T: Sync>() {}
fn assert_send<T: Send>() {}
@ -267,8 +258,8 @@ mod tests {
#[test]
fn statik() {
static K1: StaticKey = StaticKey { inner: INIT_INNER, dtor: None };
static K2: StaticKey = StaticKey { inner: INIT_INNER, dtor: None };
static K1: StaticKey = StaticKey::new(None);
static K2: StaticKey = StaticKey::new(None);
unsafe {
assert!(K1.get().is_null());

@ -91,7 +91,7 @@ use io;
use libc;
use mem;
use str;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
use sys_common::backtrace::*;
@ -117,7 +117,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
// while it doesn't requires lock for work as everything is
// local, it still displays much nicer backtraces when a
// couple of threads panic simultaneously
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
try!(writeln!(w, "stack backtrace:"));
@ -148,7 +148,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
// is semi-reasonable in terms of printing anyway, and we know that all
// I/O done here is blocking I/O, not green I/O, so we don't have to
// worry about this being a native vs green mutex.
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
try!(writeln!(w, "stack backtrace:"));

@ -23,13 +23,8 @@ pub struct Condvar { inner: UnsafeCell<ffi::pthread_cond_t> }
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
pub const CONDVAR_INIT: Condvar = Condvar {
inner: UnsafeCell { value: ffi::PTHREAD_COND_INITIALIZER },
};
impl Condvar {
#[inline]
pub unsafe fn new() -> Condvar {
pub const fn new() -> Condvar {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
Condvar { inner: UnsafeCell::new(ffi::PTHREAD_COND_INITIALIZER) }

@ -21,20 +21,15 @@ pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t {
m.inner.get()
}
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[allow(dead_code)] // sys isn't exported yet
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
pub const fn new() -> Mutex {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
MUTEX_INIT
Mutex { inner: UnsafeCell::new(ffi::PTHREAD_MUTEX_INITIALIZER) }
}
#[inline]
pub unsafe fn lock(&self) {

@ -216,8 +216,8 @@ pub fn current_exe() -> io::Result<PathBuf> {
#[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
pub fn current_exe() -> io::Result<PathBuf> {
use sync::{StaticMutex, MUTEX_INIT};
static LOCK: StaticMutex = MUTEX_INIT;
use sync::StaticMutex;
static LOCK: StaticMutex = StaticMutex::new();
extern {
fn rust_current_exe() -> *const c_char;

@ -16,14 +16,13 @@ use sys::sync as ffi;
pub struct RWLock { inner: UnsafeCell<ffi::pthread_rwlock_t> }
pub const RWLOCK_INIT: RWLock = RWLock {
inner: UnsafeCell { value: ffi::PTHREAD_RWLOCK_INITIALIZER },
};
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
pub const fn new() -> RWLock {
RWLock { inner: UnsafeCell::new(ffi::PTHREAD_RWLOCK_INITIALIZER) }
}
#[inline]
pub unsafe fn read(&self) {
let r = ffi::pthread_rwlock_rdlock(self.inner.get());

@ -330,10 +330,10 @@ pub mod guard {
#[cfg(target_os = "linux")]
fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
use dynamic_lib::DynamicLibrary;
use sync::{Once, ONCE_INIT};
use sync::Once;
type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t;
static INIT: Once = ONCE_INIT;
static INIT: Once = Once::new();
static mut __pthread_get_minstack: Option<F> = None;
INIT.call_once(|| {

@ -8,6 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)] // sys isn't exported yet
use prelude::v1::*;
use libc::c_int;

@ -17,7 +17,7 @@ mod inner {
use libc;
use time::Duration;
use ops::Sub;
use sync::{Once, ONCE_INIT};
use sync::Once;
use super::NSEC_PER_SEC;
pub struct SteadyTime {
@ -42,7 +42,7 @@ mod inner {
numer: 0,
denom: 0,
};
static ONCE: Once = ONCE_INIT;
static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {

@ -36,7 +36,7 @@ use mem;
use path::Path;
use ptr;
use str;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
use sys_common::backtrace::*;
@ -295,7 +295,7 @@ impl Drop for Cleanup {
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
// Open up dbghelp.dll, we don't link to it explicitly because it can't

@ -340,10 +340,10 @@ pub mod compat {
-> $rettype:ty { $fallback:expr }) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use sync::atomic::{AtomicUsize, Ordering};
use mem;
static PTR: AtomicUsize = ATOMIC_USIZE_INIT;
static PTR: AtomicUsize = AtomicUsize::new(0);
fn load() -> usize {
::sys::c::compat::store_func(&PTR,

@ -22,13 +22,10 @@ pub struct Condvar { inner: UnsafeCell<ffi::CONDITION_VARIABLE> }
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
pub const CONDVAR_INIT: Condvar = Condvar {
inner: UnsafeCell { value: ffi::CONDITION_VARIABLE_INIT }
};
impl Condvar {
#[inline]
pub unsafe fn new() -> Condvar { CONDVAR_INIT }
pub const fn new() -> Condvar {
Condvar { inner: UnsafeCell::new(ffi::CONDITION_VARIABLE_INIT) }
}
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {

@ -16,10 +16,6 @@ use mem;
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
@ -41,6 +37,9 @@ pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
// is there there are no guarantees of fairness.
impl Mutex {
pub const fn new() -> Mutex {
Mutex { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
}
#[inline]
pub unsafe fn lock(&self) {
ffi::AcquireSRWLockExclusive(self.inner.get())

@ -18,7 +18,7 @@ use net::SocketAddr;
use num::One;
use ops::Neg;
use rt;
use sync::{Once, ONCE_INIT};
use sync::Once;
use sys::c;
use sys_common::{AsInner, FromInner};
@ -29,7 +29,7 @@ pub struct Socket(libc::SOCKET);
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
static START: Once = ONCE_INIT;
static START: Once = Once::new();
START.call_once(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();

@ -24,7 +24,7 @@ use mem;
use os::windows::ffi::OsStrExt;
use path::Path;
use ptr;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
use sys::c;
use sys::fs::{OpenOptions, File};
use sys::handle::Handle;
@ -169,7 +169,7 @@ impl Process {
try!(unsafe {
// `CreateProcess` is racy!
// http://support.microsoft.com/kb/315939
static CREATE_PROCESS_LOCK: StaticMutex = MUTEX_INIT;
static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new();
let _lock = CREATE_PROCESS_LOCK.lock();
cvt(CreateProcessW(ptr::null(),

@ -15,14 +15,13 @@ use sys::sync as ffi;
pub struct RWLock { inner: UnsafeCell<ffi::SRWLOCK> }
pub const RWLOCK_INIT: RWLock = RWLock {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
pub const fn new() -> RWLock {
RWLock { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
}
#[inline]
pub unsafe fn read(&self) {
ffi::AcquireSRWLockShared(self.inner.get())

@ -15,7 +15,7 @@ use libc::types::os::arch::extra::{DWORD, LPVOID, BOOL};
use boxed;
use ptr;
use rt;
use sys_common::mutex::{MUTEX_INIT, Mutex};
use sys_common::mutex::Mutex;
pub type Key = DWORD;
pub type Dtor = unsafe extern fn(*mut u8);
@ -58,7 +58,7 @@ pub type Dtor = unsafe extern fn(*mut u8);
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
static DTOR_LOCK: Mutex = MUTEX_INIT;
static DTOR_LOCK: Mutex = Mutex::new();
static mut DTORS: *mut Vec<(Key, Dtor)> = 0 as *mut _;
// -------------------------------------------------------------------------

@ -10,7 +10,7 @@
use libc;
use ops::Sub;
use time::Duration;
use sync::{Once, ONCE_INIT};
use sync::Once;
const NANOS_PER_SEC: u64 = 1_000_000_000;
@ -28,7 +28,7 @@ impl SteadyTime {
fn frequency() -> libc::LARGE_INTEGER {
static mut FREQUENCY: libc::LARGE_INTEGER = 0;
static ONCE: Once = ONCE_INIT;
static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {

@ -18,12 +18,7 @@ use cell::UnsafeCell;
// Sure wish we had macro hygiene, no?
#[doc(hidden)]
pub mod __impl {
pub use super::imp::Key as KeyInner;
pub use super::imp::destroy_value;
pub use sys_common::thread_local::INIT_INNER as OS_INIT_INNER;
pub use sys_common::thread_local::StaticKey as OsStaticKey;
}
pub use self::imp::Key as __KeyInner;
/// A thread local storage key which owns its contents.
///
@ -76,55 +71,10 @@ pub struct LocalKey<T> {
//
// This is trivially devirtualizable by LLVM because we never store anything
// to this field and rustc can declare the `static` as constant as well.
#[doc(hidden)]
pub inner: fn() -> &'static __impl::KeyInner<UnsafeCell<Option<T>>>,
inner: fn() -> &'static __KeyInner<T>,
// initialization routine to invoke to create a value
#[doc(hidden)]
pub init: fn() -> T,
}
/// Declare a new thread local storage key of type `std::thread::LocalKey`.
///
/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information.
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable]
macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
static $name: ::std::thread::LocalKey<$t> = {
use std::cell::UnsafeCell as __UnsafeCell;
use std::thread::__local::KeyInner as __KeyInner;
use std::option::Option as __Option;
use std::option::Option::None as __None;
__thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
__UnsafeCell { value: __None }
});
fn __init() -> $t { $init }
fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
&__KEY
}
::std::thread::LocalKey { inner: __getit, init: __init }
};
);
(pub static $name:ident: $t:ty = $init:expr) => (
pub static $name: ::std::thread::LocalKey<$t> = {
use std::cell::UnsafeCell as __UnsafeCell;
use std::thread::__local::KeyInner as __KeyInner;
use std::option::Option as __Option;
use std::option::Option::None as __None;
__thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
__UnsafeCell { value: __None }
});
fn __init() -> $t { $init }
fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
&__KEY
}
::std::thread::LocalKey { inner: __getit, init: __init }
};
);
init: fn() -> T,
}
// Macro pain #4586:
@ -147,50 +97,37 @@ macro_rules! thread_local {
// To get around this, we're forced to inject the #[cfg] logic into the macro
// itself. Woohoo.
/// Declare a new thread local storage key of type `std::thread::LocalKey`.
///
/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information.
#[macro_export]
#[doc(hidden)]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable]
macro_rules! __thread_local_inner {
macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::__local::KeyInner<$t> =
__thread_local_inner!($init, $t);
static $name: ::std::thread::LocalKey<$t> = {
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
static __KEY: ::std::thread::__LocalKeyInner<$t> =
::std::thread::__LocalKeyInner::new();
fn __init() -> $t { $init }
fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY }
::std::thread::LocalKey::new(__getit, __init)
};
);
(pub static $name:ident: $t:ty = $init:expr) => (
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::__local::KeyInner<$t> =
__thread_local_inner!($init, $t);
pub static $name: ::std::thread::LocalKey<$t> = {
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
static __KEY: ::std::thread::__LocalKeyInner<$t> =
::std::thread::__LocalKeyInner::new();
fn __init() -> $t { $init }
fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY }
::std::thread::LocalKey::new(__getit, __init)
};
);
($init:expr, $t:ty) => ({
#[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))]
const _INIT: ::std::thread::__local::KeyInner<$t> = {
::std::thread::__local::KeyInner {
inner: ::std::cell::UnsafeCell { value: $init },
dtor_registered: ::std::cell::UnsafeCell { value: false },
dtor_running: ::std::cell::UnsafeCell { value: false },
}
};
#[allow(trivial_casts)]
#[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))]
const _INIT: ::std::thread::__local::KeyInner<$t> = {
::std::thread::__local::KeyInner {
inner: ::std::cell::UnsafeCell { value: $init },
os: ::std::thread::__local::OsStaticKey {
inner: ::std::thread::__local::OS_INIT_INNER,
dtor: ::std::option::Option::Some(
::std::thread::__local::destroy_value::<$t>
),
},
}
};
_INIT
});
}
/// Indicator of the state of a thread local storage key.
@ -225,6 +162,14 @@ pub enum LocalKeyState {
}
impl<T: 'static> LocalKey<T> {
#[doc(hidden)]
pub const fn new(inner: fn() -> &'static __KeyInner<T>, init: fn() -> T) -> LocalKey<T> {
LocalKey {
inner: inner,
init: init
}
}
/// Acquires a reference to the value in this TLS key.
///
/// This will lazily initialize the value if this thread has not referenced
@ -300,44 +245,45 @@ impl<T: 'static> LocalKey<T> {
mod imp {
use prelude::v1::*;
use cell::UnsafeCell;
use cell::{Cell, UnsafeCell};
use intrinsics;
use ptr;
pub struct Key<T> {
// Place the inner bits in an `UnsafeCell` to currently get around the
// "only Sync statics" restriction. This allows any type to be placed in
// the cell.
//
// Note that all access requires `T: 'static` so it can't be a type with
// any borrowed pointers still.
pub inner: UnsafeCell<T>,
inner: UnsafeCell<Option<T>>,
// Metadata to keep track of the state of the destructor. Remember that
// these variables are thread-local, not global.
pub dtor_registered: UnsafeCell<bool>, // should be Cell
pub dtor_running: UnsafeCell<bool>, // should be Cell
dtor_registered: Cell<bool>,
dtor_running: Cell<bool>,
}
unsafe impl<T> ::marker::Sync for Key<T> { }
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
if intrinsics::needs_drop::<T>() && *self.dtor_running.get() {
pub const fn new() -> Key<T> {
Key {
inner: UnsafeCell::new(None),
dtor_registered: Cell::new(false),
dtor_running: Cell::new(false)
}
}
pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
if intrinsics::needs_drop::<T>() && self.dtor_running.get() {
return None
}
self.register_dtor();
Some(&*self.inner.get())
Some(&self.inner)
}
unsafe fn register_dtor(&self) {
if !intrinsics::needs_drop::<T>() || *self.dtor_registered.get() {
if !intrinsics::needs_drop::<T>() || self.dtor_registered.get() {
return
}
register_dtor(self as *const _ as *mut u8,
destroy_value::<T>);
*self.dtor_registered.get() = true;
self.dtor_registered.set(true);
}
}
@ -354,6 +300,7 @@ mod imp {
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
use boxed;
use mem;
use ptr;
use libc;
use sys_common::thread_local as os;
@ -381,10 +328,7 @@ mod imp {
// *should* be the case that this loop always terminates because we
// provide the guarantee that a TLS key cannot be set after it is
// flagged for destruction.
static DTORS: os::StaticKey = os::StaticKey {
inner: os::INIT_INNER,
dtor: Some(run_dtors as unsafe extern "C" fn(*mut u8)),
};
static DTORS: os::StaticKey = os::StaticKey::new(Some(run_dtors));
type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>;
if DTORS.get().is_null() {
let v: Box<List> = box Vec::new();
@ -422,8 +366,8 @@ mod imp {
// Right before we run the user destructor be sure to flag the
// destructor as running for this thread so calls to `get` will return
// `None`.
*(*ptr).dtor_running.get() = true;
ptr::read((*ptr).inner.get());
(*ptr).dtor_running.set(true);
intrinsics::drop_in_place((*ptr).inner.get());
}
}
@ -433,54 +377,50 @@ mod imp {
use prelude::v1::*;
use alloc::boxed;
use cell::UnsafeCell;
use mem;
use cell::{Cell, UnsafeCell};
use marker;
use ptr;
use sys_common::thread_local::StaticKey as OsStaticKey;
pub struct Key<T> {
// Statically allocated initialization expression, using an `UnsafeCell`
// for the same reasons as above.
pub inner: UnsafeCell<T>,
// OS-TLS key that we'll use to key off.
pub os: OsStaticKey,
os: OsStaticKey,
marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> ::marker::Sync for Key<T> { }
struct Value<T: 'static> {
key: &'static Key<T>,
value: T,
value: UnsafeCell<Option<T>>,
}
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
self.ptr().map(|p| &*p)
impl<T: 'static> Key<T> {
pub const fn new() -> Key<T> {
Key {
os: OsStaticKey::new(Some(destroy_value::<T>)),
marker: marker::PhantomData
}
}
unsafe fn ptr(&'static self) -> Option<*mut T> {
pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
let ptr = self.os.get() as *mut Value<T>;
if !ptr.is_null() {
if ptr as usize == 1 {
return None
}
return Some(&mut (*ptr).value as *mut T);
return Some(&(*ptr).value);
}
// If the lookup returned null, we haven't initialized our own local
// copy, so do that now.
//
// Also note that this transmute_copy should be ok because the value
// `inner` is already validated to be a valid `static` value, so we
// should be able to freely copy the bits.
let ptr: Box<Value<T>> = box Value {
key: self,
value: mem::transmute_copy(&self.inner),
value: UnsafeCell::new(None),
};
let ptr = boxed::into_raw(ptr);
self.os.set(ptr as *mut u8);
Some(&mut (*ptr).value as *mut T)
Some(&(*ptr).value)
}
}
@ -505,7 +445,7 @@ mod tests {
use prelude::v1::*;
use sync::mpsc::{channel, Sender};
use cell::UnsafeCell;
use cell::{Cell, UnsafeCell};
use super::LocalKeyState;
use thread;
@ -520,23 +460,23 @@ mod tests {
#[test]
fn smoke_no_dtor() {
thread_local!(static FOO: UnsafeCell<i32> = UnsafeCell { value: 1 });
thread_local!(static FOO: Cell<i32> = Cell::new(1));
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 1);
*f.get() = 2;
FOO.with(|f| {
assert_eq!(f.get(), 1);
f.set(2);
});
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 1);
FOO.with(|f| {
assert_eq!(f.get(), 1);
});
tx.send(()).unwrap();
});
rx.recv().unwrap();
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 2);
FOO.with(|f| {
assert_eq!(f.get(), 2);
});
}
@ -565,9 +505,7 @@ mod tests {
#[test]
fn smoke_dtor() {
thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell {
value: None
});
thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
let (tx, rx) = channel();
let _t = thread::spawn(move|| unsafe {
@ -583,12 +521,8 @@ mod tests {
fn circular() {
struct S1;
struct S2;
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell {
value: None
});
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell::new(None));
static mut HITS: u32 = 0;
impl Drop for S1 {
@ -626,9 +560,7 @@ mod tests {
#[test]
fn self_referential() {
struct S1;
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {
@ -644,12 +576,8 @@ mod tests {
#[test]
fn dtors_in_dtors_in_dtors() {
struct S1(Sender<()>);
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell {
value: None
});
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {

@ -216,8 +216,7 @@ pub use self::local::{LocalKey, LocalKeyState};
consider stabilizing its interface")]
pub use self::scoped_tls::ScopedKey;
#[doc(hidden)] pub use self::local::__impl as __local;
#[doc(hidden)] pub use self::scoped_tls::__impl as __scoped;
#[doc(hidden)] pub use self::local::__KeyInner as __LocalKeyInner;
////////////////////////////////////////////////////////////////////////////////
// Builder

@ -43,13 +43,6 @@
use prelude::v1::*;
// macro hygiene sure would be nice, wouldn't it?
#[doc(hidden)]
pub mod __impl {
pub use super::imp::KeyInner;
pub use sys_common::thread_local::INIT as OS_INIT;
}
/// Type representing a thread local storage key corresponding to a reference
/// to the type parameter `T`.
///
@ -60,7 +53,7 @@ pub mod __impl {
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
pub struct ScopedKey<T> { #[doc(hidden)] pub inner: __impl::KeyInner<T> }
pub struct ScopedKey<T> { inner: imp::KeyInner<T> }
/// Declare a new scoped thread local storage key.
///
@ -71,18 +64,6 @@ pub struct ScopedKey<T> { #[doc(hidden)] pub inner: __impl::KeyInner<T> }
#[macro_export]
#[allow_internal_unstable]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
__scoped_thread_local_inner!(static $name: $t);
);
(pub static $name:ident: $t:ty) => (
__scoped_thread_local_inner!(pub static $name: $t);
);
}
#[macro_export]
#[doc(hidden)]
#[allow_internal_unstable]
macro_rules! __scoped_thread_local_inner {
(static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
@ -91,7 +72,7 @@ macro_rules! __scoped_thread_local_inner {
target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::ScopedKey<$t> =
__scoped_thread_local_inner!($t);
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
@ -101,42 +82,19 @@ macro_rules! __scoped_thread_local_inner {
target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::ScopedKey<$t> =
__scoped_thread_local_inner!($t);
::std::thread::ScopedKey::new();
);
($t:ty) => ({
use std::thread::ScopedKey as __Key;
#[cfg(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")))]
const _INIT: __Key<$t> = __Key {
inner: ::std::thread::__scoped::KeyInner {
inner: ::std::cell::UnsafeCell { value: 0 as *mut _ },
}
};
#[cfg(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64"))]
const _INIT: __Key<$t> = __Key {
inner: ::std::thread::__scoped::KeyInner {
inner: ::std::thread::__scoped::OS_INIT,
marker: ::std::marker::PhantomData::<::std::cell::Cell<$t>>,
}
};
_INIT
})
}
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
impl<T> ScopedKey<T> {
#[doc(hidden)]
pub const fn new() -> ScopedKey<T> {
ScopedKey { inner: imp::KeyInner::new() }
}
/// Inserts a value into this scoped thread local storage slot for a
/// duration of a closure.
///
@ -170,7 +128,7 @@ impl<T> ScopedKey<T> {
F: FnOnce() -> R,
{
struct Reset<'a, T: 'a> {
key: &'a __impl::KeyInner<T>,
key: &'a imp::KeyInner<T>,
val: *mut T,
}
impl<'a, T> Drop for Reset<'a, T> {
@ -231,19 +189,18 @@ impl<T> ScopedKey<T> {
target_os = "openbsd",
target_arch = "aarch64")))]
mod imp {
use std::cell::UnsafeCell;
use std::cell::Cell;
#[doc(hidden)]
pub struct KeyInner<T> { pub inner: UnsafeCell<*mut T> }
pub struct KeyInner<T> { inner: Cell<*mut T> }
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
pub unsafe fn set(&self, ptr: *mut T) { *self.inner.get() = ptr; }
#[doc(hidden)]
pub unsafe fn get(&self) -> *mut T { *self.inner.get() }
pub const fn new() -> KeyInner<T> {
KeyInner { inner: Cell::new(0 as *mut _) }
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); }
pub unsafe fn get(&self) -> *mut T { self.inner.get() }
}
}
@ -253,23 +210,27 @@ mod imp {
target_os = "openbsd",
target_arch = "aarch64"))]
mod imp {
use prelude::v1::*;
use cell::Cell;
use marker;
use std::cell::Cell;
use sys_common::thread_local::StaticKey as OsStaticKey;
#[doc(hidden)]
pub struct KeyInner<T> {
pub inner: OsStaticKey,
pub marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
unsafe impl<T> marker::Sync for KeyInner<T> { }
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
pub const fn new() -> KeyInner<T> {
KeyInner {
inner: OsStaticKey::new(None),
marker: marker::PhantomData
}
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) }
#[doc(hidden)]
pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ }
}
}

@ -1,3 +1,13 @@
S 2015-05-24 ba0e1cd
bitrig-x86_64 2a710e16e3e3ef3760df1f724d66b3af34c1ef3f
freebsd-x86_64 370db40613f5c08563ed7e38357826dd42d4e0f8
linux-i386 a6f22e481eabf098cc65bda97bf7e434a1fcc20b
linux-x86_64 5fd8698fdfe953e6c4d86cf4fa1d5f3a0053248c
macos-i386 9a273324a6b63a40f67a553029c0a9fb692ffd1f
macos-x86_64 e5b12cb7c179fc98fa905a3c84803645d946a6ae
winnt-i386 18d8d76c5380ee2247dd534bfb2c4ed1b3d83461
winnt-x86_64 ef27ce42af4941be24a2f6097d969ffc845a31ee
S 2015-04-27 857ef6e
bitrig-x86_64 d28e2a5f8b478e69720703e751774f5e728a8edd
freebsd-x86_64 18925db56f6298cc190d1f41615ab5871de1dda0

@ -11,12 +11,12 @@
use std::sync::atomic;
pub const C1: usize = 1;
pub const C2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub const C2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
pub const C3: fn() = foo;
pub const C4: usize = C1 * C1 + C1 / C1;
pub const C5: &'static usize = &C4;
pub static S1: usize = 3;
pub static S2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub static S2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
fn foo() {}

@ -18,9 +18,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

@ -26,9 +26,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

@ -17,9 +17,9 @@ use std::cell::Cell;
use id::Id;
mod s {
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

@ -17,9 +17,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

@ -16,7 +16,7 @@ use self::foo::S;
mod foo {
use std::cell::{UnsafeCell};
static mut count : UnsafeCell<u64> = UnsafeCell { value: 1 };
static mut count : UnsafeCell<u64> = UnsafeCell::new(1);
pub struct S { pub a: u8, pub b: String, secret_uid: u64 }

@ -10,12 +10,12 @@
use std::cell::UnsafeCell;
const A: UnsafeCell<usize> = UnsafeCell { value: 1 };
const A: UnsafeCell<usize> = UnsafeCell::new(1);
const B: &'static UnsafeCell<usize> = &A;
//~^ ERROR: cannot borrow a constant which contains interior mutability
struct C { a: UnsafeCell<usize> }
const D: C = C { a: UnsafeCell { value: 1 } };
const D: C = C { a: UnsafeCell::new(1) };
const E: &'static UnsafeCell<usize> = &D.a;
//~^ ERROR: cannot borrow a constant which contains interior mutability
const F: &'static C = &D;

@ -17,6 +17,5 @@ static boxed: Box<RefCell<isize>> = box RefCell::new(0);
//~^ ERROR allocations are not allowed in statics
//~| ERROR the trait `core::marker::Sync` is not implemented for the type
//~| ERROR the trait `core::marker::Sync` is not implemented for the type
//~| ERROR E0015
fn main() { }

@ -15,11 +15,11 @@ use std::sync::atomic::*;
use std::ptr;
fn main() {
let x = ATOMIC_BOOL_INIT;
let x = AtomicBool::new(false);
let x = *&x; //~ ERROR: cannot move out of borrowed content
let x = ATOMIC_ISIZE_INIT;
let x = AtomicIsize::new(0);
let x = *&x; //~ ERROR: cannot move out of borrowed content
let x = ATOMIC_USIZE_INIT;
let x = AtomicUsize::new(0);
let x = *&x; //~ ERROR: cannot move out of borrowed content
let x: AtomicPtr<usize> = AtomicPtr::new(ptr::null_mut());
let x = *&x; //~ ERROR: cannot move out of borrowed content

@ -28,9 +28,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
/// generates globally unique count (global across the current
/// process, that is)

@ -19,7 +19,7 @@
// This test makes sure that the compiler doesn't crash when trying to assign
// debug locations to const-expressions.
use std::sync::MUTEX_INIT;
use std::sync::StaticMutex;
use std::cell::UnsafeCell;
const CONSTANT: u64 = 3 + 4;
@ -49,7 +49,7 @@ const VEC: [u32; 8] = [0; 8];
const NESTED: (Struct, TupleStruct) = (STRUCT, TUPLE_STRUCT);
const UNSAFE_CELL: UnsafeCell<bool> = UnsafeCell { value: false };
const UNSAFE_CELL: UnsafeCell<bool> = UnsafeCell::new(false);
fn main() {
let mut _constant = CONSTANT;
@ -61,6 +61,6 @@ fn main() {
let mut _string = STRING;
let mut _vec = VEC;
let mut _nested = NESTED;
let mut _extern = MUTEX_INIT;
let mut _extern = StaticMutex::new();
let mut _unsafe_cell = UNSAFE_CELL;
}

@ -22,7 +22,7 @@ enum E {
C = 2
}
static FLAG: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
static FLAG: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
impl Drop for E {
fn drop(&mut self) {

@ -13,10 +13,10 @@
// `T`. Issue #20300.
use std::marker::{PhantomData};
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{AtomicUsize};
use std::sync::atomic::Ordering::SeqCst;
static COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
static COUNTER: AtomicUsize = AtomicUsize::new(0);
// Preamble.
trait Trait { type Item; }

@ -12,9 +12,9 @@
// destructor.
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);

@ -12,9 +12,9 @@
// destructor.
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);

@ -38,8 +38,8 @@ unsafe impl<T: Send> Sync for UnsafeEnum<T> {}
static STATIC1: UnsafeEnum<isize> = UnsafeEnum::VariantSafe;
static STATIC2: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell { value: 1 });
const CONST: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell { value: 1 });
static STATIC2: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(1));
const CONST: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(1));
static STATIC3: MyUnsafe<isize> = MyUnsafe{value: CONST};
static STATIC4: &'static MyUnsafePack<isize> = &STATIC2;
@ -50,7 +50,7 @@ struct Wrap<T> {
unsafe impl<T: Send> Sync for Wrap<T> {}
static UNSAFE: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell{value: 2});
static UNSAFE: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(2));
static WRAPPED_UNSAFE: Wrap<&'static MyUnsafePack<isize>> = Wrap { value: &UNSAFE };
fn main() {

@ -15,10 +15,10 @@
extern crate issue_17718 as other;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
const C1: usize = 1;
const C2: AtomicUsize = ATOMIC_USIZE_INIT;
const C2: AtomicUsize = AtomicUsize::new(0);
const C3: fn() = foo;
const C4: usize = C1 * C1 + C1 / C1;
const C5: &'static usize = &C4;
@ -28,7 +28,7 @@ const C6: usize = {
};
static S1: usize = 3;
static S2: AtomicUsize = ATOMIC_USIZE_INIT;
static S2: AtomicUsize = AtomicUsize::new(0);
mod test {
static A: usize = 4;

@ -13,7 +13,7 @@
// construction.
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{Ordering, AtomicUsize};
#[derive(Debug)]
struct Noisy(u8);
@ -69,7 +69,7 @@ pub fn main() {
assert_eq!(0x03_04, event_log());
}
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
fn reset_log() {
LOG.store(0, Ordering::SeqCst);

@ -14,9 +14,9 @@
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);

@ -12,7 +12,7 @@
// even when no Drop-implementations are involved.
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{Ordering, AtomicUsize};
struct W { wrapped: u32 }
struct S { f0: W, _f1: i32 }
@ -34,7 +34,7 @@ pub fn main() {
"expect: 0x{:x} actual: 0x{:x}", expect, actual);
}
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
fn event_log() -> usize {
LOG.load(Ordering::SeqCst)

@ -12,7 +12,7 @@
// even when no Drop-implementations are involved.
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{Ordering, AtomicUsize};
struct W { wrapped: u32 }
struct S { f0: W, _f1: i32 }
@ -31,7 +31,7 @@ pub fn main() {
"expect: 0x{:x} actual: 0x{:x}", expect, actual);
}
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
fn event_log() -> usize {
LOG.load(Ordering::SeqCst)

@ -11,7 +11,7 @@
#![feature(rand, core)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::__rand::{thread_rng, Rng};
use std::thread;
@ -20,20 +20,20 @@ const MAX_LEN: usize = 32;
static drop_counts: [AtomicUsize; MAX_LEN] =
// FIXME #5244: AtomicUsize is not Copy.
[
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0),
];
static creation_count: AtomicUsize = ATOMIC_USIZE_INIT;
static creation_count: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord)]
struct DropCounter { x: u32, creation_id: usize }