From ddb77185d36985aeac85094cb4742da93a7e821d Mon Sep 17 00:00:00 2001 From: Jorge Aparicio Date: Thu, 27 Nov 2014 09:04:20 -0500 Subject: [PATCH 01/40] impl Clone for Cow --- src/libcore/borrow.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/libcore/borrow.rs b/src/libcore/borrow.rs index 06fda8d6092..0bfa3dac1b0 100644 --- a/src/libcore/borrow.rs +++ b/src/libcore/borrow.rs @@ -137,6 +137,18 @@ pub enum Cow<'a, T, Sized? B: 'a> where B: ToOwned { Owned(T) } +impl<'a, T, Sized? B> Clone for Cow<'a, T, B> where B: ToOwned { + fn clone(&self) -> Cow<'a, T, B> { + match *self { + Borrowed(b) => Borrowed(b), + Owned(ref o) => { + let b: &B = BorrowFrom::borrow_from(o); + Owned(b.to_owned()) + }, + } + } +} + impl<'a, T, Sized? B> Cow<'a, T, B> where B: ToOwned { /// Acquire a mutable reference to the owned form of the data. /// From 44abe92d66bdfe3a2154834d6a593c7a93c4ce32 Mon Sep 17 00:00:00 2001 From: Steve Klabnik Date: Thu, 27 Nov 2014 14:31:54 -0500 Subject: [PATCH 02/40] small doc fixes We don't need this &mut, and vec could use []s --- src/libstd/io/mem.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libstd/io/mem.rs b/src/libstd/io/mem.rs index f27951f263d..4f18f09d352 100644 --- a/src/libstd/io/mem.rs +++ b/src/libstd/io/mem.rs @@ -280,10 +280,10 @@ impl<'a> Seek for BufWriter<'a> { /// # #![allow(unused_must_use)] /// use std::io::BufReader; /// -/// let mut buf = [0, 1, 2, 3]; -/// let mut r = BufReader::new(&mut buf); +/// let buf = [0, 1, 2, 3]; +/// let mut r = BufReader::new(&buf); /// -/// assert_eq!(r.read_to_end().unwrap(), vec!(0, 1, 2, 3)); +/// assert_eq!(r.read_to_end().unwrap(), vec![0, 1, 2, 3]); /// ``` pub struct BufReader<'a> { buf: &'a [u8], From a74b492763a212399276133287b053616dad0306 Mon Sep 17 00:00:00 2001 From: Jauhien Piatlicki Date: Sat, 29 Nov 2014 01:36:34 +0100 Subject: [PATCH 03/40] fix expand_quote_ty function as parse_ty was changed and needs no arguments now --- src/libsyntax/ext/quote.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs index 3fca110a881..45752499ad5 100644 --- a/src/libsyntax/ext/quote.rs +++ b/src/libsyntax/ext/quote.rs @@ -450,9 +450,8 @@ pub fn expand_quote_ty(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box { - let e_param_colons = cx.expr_lit(sp, ast::LitBool(false)); let expanded = expand_parse_call(cx, sp, "parse_ty", - vec!(e_param_colons), tts); + vec![], tts); base::MacExpr::new(expanded) } From 9460d1744f812dd0c162ad9299959cae009671d2 Mon Sep 17 00:00:00 2001 From: kulakowski Date: Sat, 29 Nov 2014 01:38:32 -0600 Subject: [PATCH 04/40] Fix typo in reference.md --- src/doc/reference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/doc/reference.md b/src/doc/reference.md index 1d27ac096df..47c25548af0 100644 --- a/src/doc/reference.md +++ b/src/doc/reference.md @@ -522,7 +522,7 @@ The two values of the boolean type are written `true` and `false`. ### Symbols ```{.ebnf .gram} -symbol : "::" "->" +symbol : "::" | "->" | '#' | '[' | ']' | '(' | ')' | '{' | '}' | ',' | ';' ; ``` From f5715f7867ab7e13fd3304d85861b1dcb1375a89 Mon Sep 17 00:00:00 2001 From: P1start Date: Sun, 30 Nov 2014 17:39:50 +1300 Subject: [PATCH 05/40] Allow trailing commas in array patterns and attributes --- src/libsyntax/parse/attr.rs | 2 +- src/libsyntax/parse/common.rs | 7 +------ src/libsyntax/parse/parser.rs | 5 +++++ .../compile-fail/trailing-comma-array-repeat.rs | 13 +++++++++++++ src/test/run-pass/trailing-comma.rs | 6 ++++++ 5 files changed, 26 insertions(+), 7 deletions(-) create mode 100644 src/test/compile-fail/trailing-comma-array-repeat.rs diff --git a/src/libsyntax/parse/attr.rs b/src/libsyntax/parse/attr.rs index 0c919daa8ed..40703049cc3 100644 --- a/src/libsyntax/parse/attr.rs +++ b/src/libsyntax/parse/attr.rs @@ -212,7 +212,7 @@ impl<'a> ParserAttr for Parser<'a> { fn parse_meta_seq(&mut self) -> Vec> { self.parse_seq(&token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), - seq_sep_trailing_disallowed(token::Comma), + seq_sep_trailing_allowed(token::Comma), |p| p.parse_meta_item()).node } diff --git a/src/libsyntax/parse/common.rs b/src/libsyntax/parse/common.rs index 3842170d677..a96bf1ce10b 100644 --- a/src/libsyntax/parse/common.rs +++ b/src/libsyntax/parse/common.rs @@ -19,18 +19,13 @@ pub struct SeqSep { pub trailing_sep_allowed: bool } -pub fn seq_sep_trailing_disallowed(t: token::Token) -> SeqSep { - SeqSep { - sep: Some(t), - trailing_sep_allowed: false, - } -} pub fn seq_sep_trailing_allowed(t: token::Token) -> SeqSep { SeqSep { sep: Some(t), trailing_sep_allowed: true, } } + pub fn seq_sep_none() -> SeqSep { SeqSep { sep: None, diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 447f2a376e1..9623a1b75b5 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -3129,6 +3129,11 @@ impl<'a> Parser<'a> { first = false; } else { self.expect(&token::Comma); + + if self.token == token::CloseDelim(token::Bracket) + && (before_slice || after.len() != 0) { + break + } } if before_slice { diff --git a/src/test/compile-fail/trailing-comma-array-repeat.rs b/src/test/compile-fail/trailing-comma-array-repeat.rs new file mode 100644 index 00000000000..dadd6571583 --- /dev/null +++ b/src/test/compile-fail/trailing-comma-array-repeat.rs @@ -0,0 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + let [_, ..,] = [(), ()]; //~ ERROR unexpected token: `]` +} diff --git a/src/test/run-pass/trailing-comma.rs b/src/test/run-pass/trailing-comma.rs index 5e93f8eedb7..00e05064080 100644 --- a/src/test/run-pass/trailing-comma.rs +++ b/src/test/run-pass/trailing-comma.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(advanced_slice_patterns,)] + fn f(_: T,) {} struct Foo; @@ -24,9 +26,13 @@ enum Baz { Qux(int,), } +#[allow(unused,)] pub fn main() { f::(0i,); let (_, _,) = (1i, 1i,); + let [_, _,] = [1i, 1,]; + let [_, _, .., _,] = [1i, 1, 1, 1,]; + let [_, _, _.., _,] = [1i, 1, 1, 1,]; let x: Foo = Foo::; From 5048953debe9eb44c672f3c453e894bd64a1301a Mon Sep 17 00:00:00 2001 From: Tobias Bucher Date: Sat, 29 Nov 2014 01:20:14 +0100 Subject: [PATCH 06/40] Simplify `RefCell` code a bit, make `deref` a no-op. --- src/libcore/cell.rs | 122 ++++++++++++++++++++++++++++---------------- 1 file changed, 77 insertions(+), 45 deletions(-) diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 587bb4cb110..ed4df101202 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -277,12 +277,9 @@ impl RefCell { /// Returns `None` if the value is currently mutably borrowed. #[unstable = "may be renamed, depending on global conventions"] pub fn try_borrow<'a>(&'a self) -> Option> { - match self.borrow.get() { - WRITING => None, - borrow => { - self.borrow.set(borrow + 1); - Some(Ref { _parent: self }) - } + match BorrowRef::new(&self.borrow) { + Some(b) => Some(Ref { _value: unsafe { &*self.value.get() }, _borrow: b }), + None => None, } } @@ -310,12 +307,9 @@ impl RefCell { /// Returns `None` if the value is currently borrowed. #[unstable = "may be renamed, depending on global conventions"] pub fn try_borrow_mut<'a>(&'a self) -> Option> { - match self.borrow.get() { - UNUSED => { - self.borrow.set(WRITING); - Some(RefMut { _parent: self }) - }, - _ => None + match BorrowRefMut::new(&self.borrow) { + Some(b) => Some(RefMut { _value: unsafe { &mut *self.value.get() }, _borrow: b }), + None => None, } } @@ -368,29 +362,56 @@ impl PartialEq for RefCell { } } +struct BorrowRef<'b> { + _borrow: &'b Cell, +} + +impl<'b> BorrowRef<'b> { + fn new(borrow: &'b Cell) -> Option> { + match borrow.get() { + WRITING => None, + b => { + borrow.set(b + 1); + Some(BorrowRef { _borrow: borrow }) + }, + } + } +} + +#[unsafe_destructor] +impl<'b> Drop for BorrowRef<'b> { + fn drop(&mut self) { + let borrow = self._borrow.get(); + debug_assert!(borrow != WRITING && borrow != UNUSED); + self._borrow.set(borrow - 1); + } +} + +impl<'b> Clone for BorrowRef<'b> { + fn clone(&self) -> BorrowRef<'b> { + // Since this Ref exists, we know the borrow flag + // is not set to WRITING. + let borrow = self._borrow.get(); + debug_assert!(borrow != WRITING && borrow != UNUSED); + self._borrow.set(borrow + 1); + BorrowRef { _borrow: self._borrow } + } +} + /// Wraps a borrowed reference to a value in a `RefCell` box. #[unstable] pub struct Ref<'b, T:'b> { // FIXME #12808: strange name to try to avoid interfering with // field accesses of the contained type via Deref - _parent: &'b RefCell -} - -#[unsafe_destructor] -#[unstable] -impl<'b, T> Drop for Ref<'b, T> { - fn drop(&mut self) { - let borrow = self._parent.borrow.get(); - debug_assert!(borrow != WRITING && borrow != UNUSED); - self._parent.borrow.set(borrow - 1); - } + _value: &'b T, + _borrow: BorrowRef<'b>, } #[unstable = "waiting for `Deref` to become stable"] impl<'b, T> Deref for Ref<'b, T> { #[inline] fn deref<'a>(&'a self) -> &'a T { - unsafe { &*self._parent.value.get() } + self._value } } @@ -401,15 +422,35 @@ impl<'b, T> Deref for Ref<'b, T> { /// A `Clone` implementation would interfere with the widespread /// use of `r.borrow().clone()` to clone the contents of a `RefCell`. #[experimental = "likely to be moved to a method, pending language changes"] -pub fn clone_ref<'b, T>(orig: &Ref<'b, T>) -> Ref<'b, T> { - // Since this Ref exists, we know the borrow flag - // is not set to WRITING. - let borrow = orig._parent.borrow.get(); - debug_assert!(borrow != WRITING && borrow != UNUSED); - orig._parent.borrow.set(borrow + 1); - +pub fn clone_ref<'b, T:Clone>(orig: &Ref<'b, T>) -> Ref<'b, T> { Ref { - _parent: orig._parent, + _value: orig._value, + _borrow: orig._borrow.clone(), + } +} + +struct BorrowRefMut<'b> { + _borrow: &'b Cell, +} + +#[unsafe_destructor] +impl<'b> Drop for BorrowRefMut<'b> { + fn drop(&mut self) { + let borrow = self._borrow.get(); + debug_assert!(borrow == WRITING); + self._borrow.set(UNUSED); + } +} + +impl<'b> BorrowRefMut<'b> { + fn new(borrow: &'b Cell) -> Option> { + match borrow.get() { + UNUSED => { + borrow.set(WRITING); + Some(BorrowRefMut { _borrow: borrow }) + }, + _ => None, + } } } @@ -418,24 +459,15 @@ pub fn clone_ref<'b, T>(orig: &Ref<'b, T>) -> Ref<'b, T> { pub struct RefMut<'b, T:'b> { // FIXME #12808: strange name to try to avoid interfering with // field accesses of the contained type via Deref - _parent: &'b RefCell -} - -#[unsafe_destructor] -#[unstable] -impl<'b, T> Drop for RefMut<'b, T> { - fn drop(&mut self) { - let borrow = self._parent.borrow.get(); - debug_assert!(borrow == WRITING); - self._parent.borrow.set(UNUSED); - } + _value: &'b mut T, + _borrow: BorrowRefMut<'b>, } #[unstable = "waiting for `Deref` to become stable"] impl<'b, T> Deref for RefMut<'b, T> { #[inline] fn deref<'a>(&'a self) -> &'a T { - unsafe { &*self._parent.value.get() } + self._value } } @@ -443,7 +475,7 @@ impl<'b, T> Deref for RefMut<'b, T> { impl<'b, T> DerefMut for RefMut<'b, T> { #[inline] fn deref_mut<'a>(&'a mut self) -> &'a mut T { - unsafe { &mut *self._parent.value.get() } + self._value } } From e407472f90247bd23485191b9ecaab89a90326c8 Mon Sep 17 00:00:00 2001 From: Paul Collier Date: Mon, 1 Dec 2014 20:43:50 -0800 Subject: [PATCH 07/40] rustdoc: Check for href when prepending rootPath Fixes #18354. --- src/librustdoc/html/static/main.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustdoc/html/static/main.js b/src/librustdoc/html/static/main.js index 7c6f7ed3fe2..8943d942cef 100644 --- a/src/librustdoc/html/static/main.js +++ b/src/librustdoc/html/static/main.js @@ -707,8 +707,8 @@ var code = $('').append(structs[j]); $.each(code.find('a'), function(idx, a) { var href = $(a).attr('href'); - if (!href.startsWith('http')) { - $(a).attr('href', rootPath + $(a).attr('href')); + if (href && !href.startsWith('http')) { + $(a).attr('href', rootPath + href); } }); var li = $('
  • ').append(code); From 5bbe5d6d931519e424aee21664fd37d33152817e Mon Sep 17 00:00:00 2001 From: Matej Lach Date: Tue, 2 Dec 2014 13:40:18 +0000 Subject: [PATCH 08/40] better wording --- src/doc/guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/doc/guide.md b/src/doc/guide.md index c2d43a20ec4..ce5730ad9ce 100644 --- a/src/doc/guide.md +++ b/src/doc/guide.md @@ -2064,8 +2064,8 @@ Great! Next up: let's compare our guess to the secret guess. ## Comparing guesses If you remember, earlier in the guide, we made a `cmp` function that compared -two numbers. Let's add that in, along with a `match` statement to compare the -guess to the secret guess: +two numbers. Let's add that in, along with a `match` statement to compare our +guess to the secret number: ```{rust,ignore} use std::io; From ebf22cbf6a1821a808ca3b11708cf2f4dc60c0a8 Mon Sep 17 00:00:00 2001 From: Matej Lach Date: Tue, 2 Dec 2014 16:10:40 +0000 Subject: [PATCH 09/40] replace 'but' with 'and' --- src/doc/guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/doc/guide.md b/src/doc/guide.md index c2d43a20ec4..afc0811300d 100644 --- a/src/doc/guide.md +++ b/src/doc/guide.md @@ -2861,7 +2861,7 @@ parts of your library. The six levels are: * experimental: This item was only recently introduced or is otherwise in a state of flux. It may change significantly, or even be removed. No guarantee of backwards-compatibility. -* unstable: This item is still under development, but requires more testing to +* unstable: This item is still under development and requires more testing to be considered stable. No guarantee of backwards-compatibility. * stable: This item is considered stable, and will not change significantly. Guarantee of backwards-compatibility. From 6182084e58bf3fde2e700424bf7d6ea708641f74 Mon Sep 17 00:00:00 2001 From: Victor van den Elzen Date: Tue, 2 Dec 2014 20:42:09 +0100 Subject: [PATCH 10/40] Improve documentation of checked_* functions --- src/libcore/num/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index ce61bd97e13..748e8942a3f 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -284,7 +284,7 @@ pub trait Int /// ``` fn checked_add(self, other: Self) -> Option; - /// Checked integer subtraction. Computes `self + other`, returning `None` + /// Checked integer subtraction. Computes `self - other`, returning `None` /// if underflow occurred. /// /// # Example @@ -297,7 +297,7 @@ pub trait Int /// ``` fn checked_sub(self, other: Self) -> Option; - /// Checked integer multiplication. Computes `self + other`, returning + /// Checked integer multiplication. Computes `self * other`, returning /// `None` if underflow or overflow occurred. /// /// # Example @@ -310,8 +310,8 @@ pub trait Int /// ``` fn checked_mul(self, other: Self) -> Option; - /// Checked integer division. Computes `self + other` returning `None` if - /// `self == 0` or the operation results in underflow or overflow. + /// Checked integer division. Computes `self / other`, returning `None` if + /// `other == 0` or the operation results in underflow or overflow. /// /// # Example /// From d8c5269dd2eb14e3d1fefa4a2a26f3ff6a9f1ba8 Mon Sep 17 00:00:00 2001 From: Victor van den Elzen Date: Tue, 2 Dec 2014 20:43:47 +0100 Subject: [PATCH 11/40] Update comment to current file path for intrinsics --- src/libcore/intrinsics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 78c74075d48..11a1073343e 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -10,7 +10,7 @@ //! rustc compiler intrinsics. //! -//! The corresponding definitions are in librustc/middle/trans/foreign.rs. +//! The corresponding definitions are in librustc_trans/trans/intrinsic.rs. //! //! # Volatiles //! From 886ff4f3c3d05d4dda13390f045a6eb577f1e509 Mon Sep 17 00:00:00 2001 From: Luqman Aden Date: Tue, 2 Dec 2014 17:33:52 -0500 Subject: [PATCH 12/40] lldb: Fix pretty printer for nullable-opt enums with fat pointers. --- src/etc/lldb_rust_formatters.py | 9 +++++++-- src/test/debuginfo/option-like-enum.rs | 9 +++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/etc/lldb_rust_formatters.py b/src/etc/lldb_rust_formatters.py index 7924d63c8e0..f4f1a5121d1 100644 --- a/src/etc/lldb_rust_formatters.py +++ b/src/etc/lldb_rust_formatters.py @@ -138,9 +138,14 @@ def print_enum_val(val, internal_dict): return "" % first_variant_name # Read the discriminant - disr_val = val.GetChildAtIndex(0).GetChildAtIndex(disr_field_index).GetValueAsUnsigned() + disr_val = val.GetChildAtIndex(0).GetChildAtIndex(disr_field_index) - if disr_val == 0: + # If the discriminant field is a fat pointer we have to consider the + # first word as the true discriminant + if disr_val.GetType().GetTypeClass() == lldb.eTypeClassStruct: + disr_val = disr_val.GetChildAtIndex(0) + + if disr_val.GetValueAsUnsigned() == 0: # Null case: Print the name of the null-variant null_variant_name = first_variant_name[last_separator_index + 1:] return null_variant_name diff --git a/src/test/debuginfo/option-like-enum.rs b/src/test/debuginfo/option-like-enum.rs index 11c594bac59..333a430e351 100644 --- a/src/test/debuginfo/option-like-enum.rs +++ b/src/test/debuginfo/option-like-enum.rs @@ -61,6 +61,12 @@ // lldb-command:print void_droid // lldb-check:[...]$5 = Void +// lldb-command:print some_str +// lldb-check:[...]$6 = Some(&str { data_ptr: [...], length: 3 }) + +// lldb-command:print none_str +// lldb-check:[...]$7 = None + // If a struct has exactly two variants, one of them is empty, and the other one // contains a non-nullable pointer, then this value is used as the discriminator. @@ -96,6 +102,9 @@ struct NamedFieldsRepr<'a> { fn main() { + let some_str: Option<&'static str> = Some("abc"); + let none_str: Option<&'static str> = None; + let some: Option<&u32> = Some(unsafe { std::mem::transmute(0x12345678u) }); let none: Option<&u32> = None; From 89d09953733e72d61876c633dc0658180aefc4d6 Mon Sep 17 00:00:00 2001 From: Luqman Aden Date: Tue, 2 Dec 2014 18:23:35 -0500 Subject: [PATCH 13/40] gdb: Fix pretty printer for nullable-opt enums with fat pointers. --- src/etc/gdb_rust_pretty_printing.py | 16 +++++++++++----- .../debuginfo/gdb-pretty-struct-and-enums.rs | 12 ++++++++++-- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/etc/gdb_rust_pretty_printing.py b/src/etc/gdb_rust_pretty_printing.py index 1af649f0731..7e5918ea39e 100644 --- a/src/etc/gdb_rust_pretty_printing.py +++ b/src/etc/gdb_rust_pretty_printing.py @@ -54,13 +54,14 @@ def rust_pretty_printer_lookup_function(val): return RustStructPrinter(val, false) if enum_member_count == 1: - if enum_members[0].name == None: + first_variant_name = enum_members[0].name + if first_variant_name == None: # This is a singleton enum return rust_pretty_printer_lookup_function(val[enum_members[0]]) else: - assert enum_members[0].name.startswith("RUST$ENCODED$ENUM$") + assert first_variant_name.startswith("RUST$ENCODED$ENUM$") # This is a space-optimized enum - last_separator_index = enum_members[0].name.rfind("$") + last_separator_index = first_variant_name.rfind("$") second_last_separator_index = first_variant_name.rfind("$", 0, last_separator_index) disr_field_index = first_variant_name[second_last_separator_index + 1 : last_separator_index] @@ -68,7 +69,12 @@ def rust_pretty_printer_lookup_function(val): sole_variant_val = val[enum_members[0]] disr_field = get_field_at_index(sole_variant_val, disr_field_index) - discriminant = int(sole_variant_val[disr_field]) + discriminant = sole_variant_val[disr_field] + + # If the discriminant field is a fat pointer we have to consider the + # first word as the true discriminant + if discriminant.type.code == gdb.TYPE_CODE_STRUCT: + discriminant = discriminant[get_field_at_index(discriminant, 0)] if discriminant == 0: null_variant_name = first_variant_name[last_separator_index + 1:] @@ -173,7 +179,7 @@ class RustCStyleEnumPrinter: class IdentityPrinter: def __init__(self, string): - self.string + self.string = string def to_string(self): return self.string diff --git a/src/test/debuginfo/gdb-pretty-struct-and-enums.rs b/src/test/debuginfo/gdb-pretty-struct-and-enums.rs index 9a42cd92fdc..76cf3c1149d 100644 --- a/src/test/debuginfo/gdb-pretty-struct-and-enums.rs +++ b/src/test/debuginfo/gdb-pretty-struct-and-enums.rs @@ -58,11 +58,17 @@ // gdb-command: print none // gdb-check:$12 = None +// gdb-command: print some_fat +// gdb-check:$13 = Some = {"abc"} + +// gdb-command: print none_fat +// gdb-check:$14 = None + // gdb-command: print nested_variant1 -// gdb-check:$13 = NestedVariant1 = {NestedStruct = {regular_struct = RegularStruct = {the_first_field = 111, the_second_field = 112.5, the_third_field = true, the_fourth_field = "NestedStructString1"}, tuple_struct = TupleStruct = {113.5, 114}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar2, mixed_enum = MixedEnumTupleVar = {115, 116, false}}} +// gdb-check:$15 = NestedVariant1 = {NestedStruct = {regular_struct = RegularStruct = {the_first_field = 111, the_second_field = 112.5, the_third_field = true, the_fourth_field = "NestedStructString1"}, tuple_struct = TupleStruct = {113.5, 114}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar2, mixed_enum = MixedEnumTupleVar = {115, 116, false}}} // gdb-command: print nested_variant2 -// gdb-check:$14 = NestedVariant2 = {abc = NestedStruct = {regular_struct = RegularStruct = {the_first_field = 117, the_second_field = 118.5, the_third_field = false, the_fourth_field = "NestedStructString10"}, tuple_struct = TupleStruct = {119.5, 120}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar3, mixed_enum = MixedEnumStructVar = {field1 = 121.5, field2 = -122}}} +// gdb-check:$16 = NestedVariant2 = {abc = NestedStruct = {regular_struct = RegularStruct = {the_first_field = 117, the_second_field = 118.5, the_third_field = false, the_fourth_field = "NestedStructString10"}, tuple_struct = TupleStruct = {119.5, 120}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar3, mixed_enum = MixedEnumStructVar = {field1 = 121.5, field2 = -122}}} use self::CStyleEnum::{CStyleEnumVar1, CStyleEnumVar2, CStyleEnumVar3}; use self::MixedEnum::{MixedEnumCStyleVar, MixedEnumTupleVar, MixedEnumStructVar}; @@ -129,6 +135,8 @@ fn main() { let some = Some(110u); let none: Option = None; + let some_fat = Some("abc"); + let none_fat: Option<&'static str> = None; let nested_variant1 = NestedVariant1( NestedStruct { From 851c7b5e0f76e32e5a1316befc7465bdc573ac43 Mon Sep 17 00:00:00 2001 From: Clark Gaebel Date: Tue, 2 Dec 2014 17:32:37 -0800 Subject: [PATCH 14/40] Fixed out of date comment on `copy_memory` --- src/libcore/slice.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs index 950f04a5d97..906cbd72104 100644 --- a/src/libcore/slice.rs +++ b/src/libcore/slice.rs @@ -1781,12 +1781,13 @@ pub mod bytes { /// Copies data from `src` to `dst` /// - /// `src` and `dst` must not overlap. Panics if the length of `dst` - /// is less than the length of `src`. + /// Panics if the length of `dst` is less than the length of `src`. #[inline] pub fn copy_memory(dst: &mut [u8], src: &[u8]) { let len_src = src.len(); assert!(dst.len() >= len_src); + // `dst` is unaliasable, so we know statically it doesn't overlap + // with `src`. unsafe { ptr::copy_nonoverlapping_memory(dst.as_mut_ptr(), src.as_ptr(), From ea6f628709a468dc70344cc8c59efb7aa1f70821 Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Wed, 3 Dec 2014 09:16:00 +0100 Subject: [PATCH 15/40] libtest: get rid of dependency to ToJson deriving encodable + using json::PrettyEncoder removes the only ToJson trait implementation in the rust repository outside of libserialize --- src/libtest/lib.rs | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs index fbc60c9b342..049f2cc5603 100644 --- a/src/libtest/lib.rs +++ b/src/libtest/lib.rs @@ -51,8 +51,7 @@ use std::collections::TreeMap; use stats::Stats; use getopts::{OptGroup, optflag, optopt}; use regex::Regex; -use serialize::{json, Decodable}; -use serialize::json::{Json, ToJson}; +use serialize::{json, Decodable, Encodable}; use term::Terminal; use term::color::{Color, RED, YELLOW, GREEN, CYAN}; @@ -1100,17 +1099,6 @@ fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult { } } - -impl ToJson for Metric { - fn to_json(&self) -> json::Json { - let mut map = TreeMap::new(); - map.insert("value".to_string(), json::Json::F64(self.value)); - map.insert("noise".to_string(), json::Json::F64(self.noise)); - json::Json::Object(map) - } -} - - impl MetricMap { pub fn new() -> MetricMap { @@ -1138,14 +1126,8 @@ impl MetricMap { pub fn save(&self, p: &Path) -> io::IoResult<()> { let mut file = try!(File::create(p)); let MetricMap(ref map) = *self; - - // FIXME(pcwalton): Yuck. - let mut new_map = TreeMap::new(); - for (ref key, ref value) in map.iter() { - new_map.insert(key.to_string(), (*value).clone()); - } - - new_map.to_json().to_pretty_writer(&mut file) + let mut enc = json::PrettyEncoder::new(&mut file); + map.encode(&mut enc) } /// Compare against another MetricMap. Optionally compare all From 861e11ceebf862f15917c4c36a0d2a43c88680d3 Mon Sep 17 00:00:00 2001 From: Steve Klabnik Date: Wed, 3 Dec 2014 03:29:57 -0500 Subject: [PATCH 16/40] Remove outdated comment. https://github.com/rust-lang/rust/pull/19472#issuecomment-65370278 --- src/libsyntax/feature_gate.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/libsyntax/feature_gate.rs b/src/libsyntax/feature_gate.rs index 7453da6374e..5d984678b7f 100644 --- a/src/libsyntax/feature_gate.rs +++ b/src/libsyntax/feature_gate.rs @@ -32,9 +32,7 @@ use parse::token; use std::slice; -/// This is a list of all known features since the beginning of time. This list -/// can never shrink, it may only be expanded (in order to prevent old programs -/// from failing to compile). The status of each feature may change, however. +// if you change this list without updating src/doc/reference.md, @cmr will be sad static KNOWN_FEATURES: &'static [(&'static str, Status)] = &[ ("globs", Active), ("macro_rules", Active), @@ -73,8 +71,6 @@ static KNOWN_FEATURES: &'static [(&'static str, Status)] = &[ ("if_let", Active), ("while_let", Active), - // if you change this list without updating src/doc/reference.md, cmr will be sad - // A temporary feature gate used to enable parser extensions needed // to bootstrap fix for #5723. ("issue_5723_bootstrap", Accepted), From 131d4ed018f43a07324aebdd68a997b731945b0b Mon Sep 17 00:00:00 2001 From: Kang Seonghoon Date: Thu, 4 Dec 2014 00:56:45 +0900 Subject: [PATCH 17/40] rustdoc: Fixed a missing rendering of ForeignStaticItems. --- src/librustdoc/html/render.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 2be703e2458..8d7522af027 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -1435,7 +1435,8 @@ impl<'a> fmt::Show for Item<'a> { clean::TypedefItem(ref t) => item_typedef(fmt, self.item, t), clean::MacroItem(ref m) => item_macro(fmt, self.item, m), clean::PrimitiveItem(ref p) => item_primitive(fmt, self.item, p), - clean::StaticItem(ref i) => item_static(fmt, self.item, i), + clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) => + item_static(fmt, self.item, i), clean::ConstantItem(ref c) => item_constant(fmt, self.item, c), _ => Ok(()) } From a3bb8585e89f1eade039111733a2f1efa331e6ff Mon Sep 17 00:00:00 2001 From: Kang Seonghoon Date: Wed, 3 Dec 2014 23:57:45 +0900 Subject: [PATCH 18/40] rustdoc: Refactored various uses of ItemType. In particular, ItemType variants are no longer reexported. Since we already do namespace them via `item_type` mod, it's fine. --- src/librustdoc/html/format.rs | 3 +- src/librustdoc/html/item_type.rs | 100 +++++++++++---------- src/librustdoc/html/render.rs | 146 +++++++++++++++---------------- 3 files changed, 129 insertions(+), 120 deletions(-) diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 6e0b76d441b..9861d18ce51 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -23,7 +23,6 @@ use syntax::ast_util; use clean; use stability_summary::ModuleSummary; -use html::item_type; use html::item_type::ItemType; use html::render; use html::render::{cache, CURRENT_LOCATION_KEY}; @@ -283,7 +282,7 @@ fn path(w: &mut fmt::Formatter, path: &clean::Path, print_all: bool, url.push_str("/"); } match shortty { - item_type::Module => { + ItemType::Module => { url.push_str(fqp.last().unwrap().as_slice()); url.push_str("/index.html"); } diff --git a/src/librustdoc/html/item_type.rs b/src/librustdoc/html/item_type.rs index cb3ad9d063f..1917184c5a9 100644 --- a/src/librustdoc/html/item_type.rs +++ b/src/librustdoc/html/item_type.rs @@ -9,7 +9,6 @@ // except according to those terms. //! Item types. -pub use self::ItemType::*; use std::fmt; use clean; @@ -44,27 +43,64 @@ pub enum ItemType { } impl ItemType { + pub fn from_item(item: &clean::Item) -> ItemType { + match item.inner { + clean::ModuleItem(..) => ItemType::Module, + clean::StructItem(..) => ItemType::Struct, + clean::EnumItem(..) => ItemType::Enum, + clean::FunctionItem(..) => ItemType::Function, + clean::TypedefItem(..) => ItemType::Typedef, + clean::StaticItem(..) => ItemType::Static, + clean::ConstantItem(..) => ItemType::Constant, + clean::TraitItem(..) => ItemType::Trait, + clean::ImplItem(..) => ItemType::Impl, + clean::ViewItemItem(..) => ItemType::ViewItem, + clean::TyMethodItem(..) => ItemType::TyMethod, + clean::MethodItem(..) => ItemType::Method, + clean::StructFieldItem(..) => ItemType::StructField, + clean::VariantItem(..) => ItemType::Variant, + clean::ForeignFunctionItem(..) => ItemType::ForeignFunction, + clean::ForeignStaticItem(..) => ItemType::ForeignStatic, + clean::MacroItem(..) => ItemType::Macro, + clean::PrimitiveItem(..) => ItemType::Primitive, + clean::AssociatedTypeItem(..) => ItemType::AssociatedType, + } + } + + pub fn from_type_kind(kind: clean::TypeKind) -> ItemType { + match kind { + clean::TypeStruct => ItemType::Struct, + clean::TypeEnum => ItemType::Enum, + clean::TypeFunction => ItemType::Function, + clean::TypeTrait => ItemType::Trait, + clean::TypeModule => ItemType::Module, + clean::TypeStatic => ItemType::Static, + clean::TypeVariant => ItemType::Variant, + clean::TypeTypedef => ItemType::Typedef, + } + } + pub fn to_static_str(&self) -> &'static str { match *self { - Module => "mod", - Struct => "struct", - Enum => "enum", - Function => "fn", - Typedef => "type", - Static => "static", - Trait => "trait", - Impl => "impl", - ViewItem => "viewitem", - TyMethod => "tymethod", - Method => "method", - StructField => "structfield", - Variant => "variant", - ForeignFunction => "ffi", - ForeignStatic => "ffs", - Macro => "macro", - Primitive => "primitive", - AssociatedType => "associatedtype", - Constant => "constant", + ItemType::Module => "mod", + ItemType::Struct => "struct", + ItemType::Enum => "enum", + ItemType::Function => "fn", + ItemType::Typedef => "type", + ItemType::Static => "static", + ItemType::Trait => "trait", + ItemType::Impl => "impl", + ItemType::ViewItem => "viewitem", + ItemType::TyMethod => "tymethod", + ItemType::Method => "method", + ItemType::StructField => "structfield", + ItemType::Variant => "variant", + ItemType::ForeignFunction => "ffi", + ItemType::ForeignStatic => "ffs", + ItemType::Macro => "macro", + ItemType::Primitive => "primitive", + ItemType::AssociatedType => "associatedtype", + ItemType::Constant => "constant", } } } @@ -75,27 +111,3 @@ impl fmt::Show for ItemType { } } -pub fn shortty(item: &clean::Item) -> ItemType { - match item.inner { - clean::ModuleItem(..) => Module, - clean::StructItem(..) => Struct, - clean::EnumItem(..) => Enum, - clean::FunctionItem(..) => Function, - clean::TypedefItem(..) => Typedef, - clean::StaticItem(..) => Static, - clean::ConstantItem(..) => Constant, - clean::TraitItem(..) => Trait, - clean::ImplItem(..) => Impl, - clean::ViewItemItem(..) => ViewItem, - clean::TyMethodItem(..) => TyMethod, - clean::MethodItem(..) => Method, - clean::StructFieldItem(..) => StructField, - clean::VariantItem(..) => Variant, - clean::ForeignFunctionItem(..) => ForeignFunction, - clean::ForeignStaticItem(..) => ForeignStatic, - clean::MacroItem(..) => Macro, - clean::PrimitiveItem(..) => Primitive, - clean::AssociatedTypeItem(..) => AssociatedType, - } -} - diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 8d7522af027..8c2be47d9c0 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -61,8 +61,7 @@ use fold::DocFolder; use html::format::{VisSpace, Method, FnStyleSpace, MutableSpace, Stability}; use html::format::{ConciseStability, TyParamBounds, WhereClause}; use html::highlight; -use html::item_type::{ItemType, shortty}; -use html::item_type; +use html::item_type::ItemType; use html::layout; use html::markdown::Markdown; use html::markdown; @@ -314,19 +313,8 @@ pub fn run(mut krate: clean::Crate, let paths: HashMap, ItemType)> = analysis.as_ref().map(|a| { let paths = a.external_paths.borrow_mut().take().unwrap(); - paths.into_iter().map(|(k, (v, t))| { - (k, (v, match t { - clean::TypeStruct => item_type::Struct, - clean::TypeEnum => item_type::Enum, - clean::TypeFunction => item_type::Function, - clean::TypeTrait => item_type::Trait, - clean::TypeModule => item_type::Module, - clean::TypeStatic => item_type::Static, - clean::TypeVariant => item_type::Variant, - clean::TypeTypedef => item_type::Typedef, - })) - }).collect() - }).unwrap_or(HashMap::new()); + paths.into_iter().map(|(k, (v, t))| (k, (v, ItemType::from_type_kind(t)))).collect() + }).unwrap_or(HashMap::new()); let mut cache = Cache { impls: HashMap::new(), external_paths: paths.iter().map(|(&k, v)| (k, v.ref0().clone())) @@ -359,7 +347,7 @@ pub fn run(mut krate: clean::Crate, for &(n, ref e) in krate.externs.iter() { cache.extern_locations.insert(n, extern_location(e, &cx.dst)); let did = ast::DefId { krate: n, node: ast::CRATE_NODE_ID }; - cache.paths.insert(did, (vec![e.name.to_string()], item_type::Module)); + cache.paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. @@ -642,6 +630,11 @@ fn mkdir(path: &Path) -> io::IoResult<()> { } } +/// Returns a documentation-level item type from the item. +fn shortty(item: &clean::Item) -> ItemType { + ItemType::from_item(item) +} + /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. @@ -855,13 +848,13 @@ impl DocFolder for Cache { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { - Some(&(_, item_type::Trait)) => + Some(&(_, ItemType::Trait)) => Some(self.stack[..self.stack.len() - 1]), // The current stack not necessarily has correlation for // where the type was defined. On the other hand, // `paths` always has the right information if present. - Some(&(ref fqp, item_type::Struct)) | - Some(&(ref fqp, item_type::Enum)) => + Some(&(ref fqp, ItemType::Struct)) | + Some(&(ref fqp, ItemType::Enum)) => Some(fqp[..fqp.len() - 1]), Some(..) => Some(self.stack.as_slice()), None => None @@ -929,7 +922,7 @@ impl DocFolder for Cache { clean::VariantItem(..) if !self.privmod => { let mut stack = self.stack.clone(); stack.pop(); - self.paths.insert(item.def_id, (stack, item_type::Enum)); + self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) if item.visibility.is_some() => { @@ -1491,45 +1484,50 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, !cx.ignore_private_item(&items[*i]) }).collect::>(); + // the order of item types in the listing + fn reorder(ty: ItemType) -> u8 { + match ty { + ItemType::ViewItem => 0, + ItemType::Primitive => 1, + ItemType::Module => 2, + ItemType::Macro => 3, + ItemType::Struct => 4, + ItemType::Enum => 5, + ItemType::Constant => 6, + ItemType::Static => 7, + ItemType::ForeignFunction => 8, + ItemType::ForeignStatic => 9, + ItemType::Trait => 10, + ItemType::Function => 11, + ItemType::Typedef => 12, + _ => 13 + ty as u8, + } + } + fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: uint, idx2: uint) -> Ordering { - if shortty(i1) == shortty(i2) { + let ty1 = shortty(i1); + let ty2 = shortty(i2); + if ty1 == ty2 { return i1.name.cmp(&i2.name); } - match (&i1.inner, &i2.inner) { - (&clean::ViewItemItem(ref a), &clean::ViewItemItem(ref b)) => { - match (&a.inner, &b.inner) { - (&clean::ExternCrate(..), _) => Less, - (_, &clean::ExternCrate(..)) => Greater, - _ => idx1.cmp(&idx2), + + let tycmp = reorder(ty1).cmp(&reorder(ty2)); + if let Equal = tycmp { + // for reexports, `extern crate` takes precedence. + match (&i1.inner, &i2.inner) { + (&clean::ViewItemItem(ref a), &clean::ViewItemItem(ref b)) => { + match (&a.inner, &b.inner) { + (&clean::ExternCrate(..), _) => return Less, + (_, &clean::ExternCrate(..)) => return Greater, + _ => {} + } } + (_, _) => {} } - (&clean::ViewItemItem(..), _) => Less, - (_, &clean::ViewItemItem(..)) => Greater, - (&clean::PrimitiveItem(..), _) => Less, - (_, &clean::PrimitiveItem(..)) => Greater, - (&clean::ModuleItem(..), _) => Less, - (_, &clean::ModuleItem(..)) => Greater, - (&clean::MacroItem(..), _) => Less, - (_, &clean::MacroItem(..)) => Greater, - (&clean::StructItem(..), _) => Less, - (_, &clean::StructItem(..)) => Greater, - (&clean::EnumItem(..), _) => Less, - (_, &clean::EnumItem(..)) => Greater, - (&clean::ConstantItem(..), _) => Less, - (_, &clean::ConstantItem(..)) => Greater, - (&clean::StaticItem(..), _) => Less, - (_, &clean::StaticItem(..)) => Greater, - (&clean::ForeignFunctionItem(..), _) => Less, - (_, &clean::ForeignFunctionItem(..)) => Greater, - (&clean::ForeignStaticItem(..), _) => Less, - (_, &clean::ForeignStaticItem(..)) => Greater, - (&clean::TraitItem(..), _) => Less, - (_, &clean::TraitItem(..)) => Greater, - (&clean::FunctionItem(..), _) => Less, - (_, &clean::FunctionItem(..)) => Greater, - (&clean::TypedefItem(..), _) => Less, - (_, &clean::TypedefItem(..)) => Greater, - _ => idx1.cmp(&idx2), + + idx1.cmp(&idx2) + } else { + tycmp } } @@ -1546,26 +1544,26 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, try!(write!(w, "")); } curty = myty; - let (short, name) = match myitem.inner { - clean::ModuleItem(..) => ("modules", "Modules"), - clean::StructItem(..) => ("structs", "Structs"), - clean::EnumItem(..) => ("enums", "Enums"), - clean::FunctionItem(..) => ("functions", "Functions"), - clean::TypedefItem(..) => ("types", "Type Definitions"), - clean::StaticItem(..) => ("statics", "Statics"), - clean::ConstantItem(..) => ("constants", "Constants"), - clean::TraitItem(..) => ("traits", "Traits"), - clean::ImplItem(..) => ("impls", "Implementations"), - clean::ViewItemItem(..) => ("reexports", "Reexports"), - clean::TyMethodItem(..) => ("tymethods", "Type Methods"), - clean::MethodItem(..) => ("methods", "Methods"), - clean::StructFieldItem(..) => ("fields", "Struct Fields"), - clean::VariantItem(..) => ("variants", "Variants"), - clean::ForeignFunctionItem(..) => ("ffi-fns", "Foreign Functions"), - clean::ForeignStaticItem(..) => ("ffi-statics", "Foreign Statics"), - clean::MacroItem(..) => ("macros", "Macros"), - clean::PrimitiveItem(..) => ("primitives", "Primitive Types"), - clean::AssociatedTypeItem(..) => ("associated-types", "Associated Types"), + let (short, name) = match myty.unwrap() { + ItemType::Module => ("modules", "Modules"), + ItemType::Struct => ("structs", "Structs"), + ItemType::Enum => ("enums", "Enums"), + ItemType::Function => ("functions", "Functions"), + ItemType::Typedef => ("types", "Type Definitions"), + ItemType::Static => ("statics", "Statics"), + ItemType::Constant => ("constants", "Constants"), + ItemType::Trait => ("traits", "Traits"), + ItemType::Impl => ("impls", "Implementations"), + ItemType::ViewItem => ("reexports", "Reexports"), + ItemType::TyMethod => ("tymethods", "Type Methods"), + ItemType::Method => ("methods", "Methods"), + ItemType::StructField => ("fields", "Struct Fields"), + ItemType::Variant => ("variants", "Variants"), + ItemType::ForeignFunction => ("ffi-fns", "Foreign Functions"), + ItemType::ForeignStatic => ("ffi-statics", "Foreign Statics"), + ItemType::Macro => ("macros", "Macros"), + ItemType::Primitive => ("primitives", "Primitive Types"), + ItemType::AssociatedType => ("associated-types", "Associated Types"), }; try!(write!(w, "

    \ From 1cb1f00d40f000ac7633b62a603db4fcea835ca6 Mon Sep 17 00:00:00 2001 From: Kang Seonghoon Date: Thu, 4 Dec 2014 00:29:21 +0900 Subject: [PATCH 19/40] rustdoc: Removed Foreign{Function,Static} item types. They are just (unsafe) functions and static items to most users and even compilers! The metadata doesn't distinguish them, so Rustdoc ended up producing broken links (generated `ffi.*.html`, links to `fn.*.html`). It would be best to avoid this pitfall at all. --- src/librustdoc/html/item_type.rs | 9 +++------ src/librustdoc/html/render.rs | 12 ++++-------- src/librustdoc/html/static/main.js | 4 ++-- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/src/librustdoc/html/item_type.rs b/src/librustdoc/html/item_type.rs index 1917184c5a9..0ad12b957ba 100644 --- a/src/librustdoc/html/item_type.rs +++ b/src/librustdoc/html/item_type.rs @@ -34,8 +34,7 @@ pub enum ItemType { Method = 10, StructField = 11, Variant = 12, - ForeignFunction = 13, - ForeignStatic = 14, + // we used to have ForeignFunction and ForeignStatic. they are retired now. Macro = 15, Primitive = 16, AssociatedType = 17, @@ -59,8 +58,8 @@ impl ItemType { clean::MethodItem(..) => ItemType::Method, clean::StructFieldItem(..) => ItemType::StructField, clean::VariantItem(..) => ItemType::Variant, - clean::ForeignFunctionItem(..) => ItemType::ForeignFunction, - clean::ForeignStaticItem(..) => ItemType::ForeignStatic, + clean::ForeignFunctionItem(..) => ItemType::Function, // no ForeignFunction + clean::ForeignStaticItem(..) => ItemType::Static, // no ForeignStatic clean::MacroItem(..) => ItemType::Macro, clean::PrimitiveItem(..) => ItemType::Primitive, clean::AssociatedTypeItem(..) => ItemType::AssociatedType, @@ -95,8 +94,6 @@ impl ItemType { ItemType::Method => "method", ItemType::StructField => "structfield", ItemType::Variant => "variant", - ItemType::ForeignFunction => "ffi", - ItemType::ForeignStatic => "ffs", ItemType::Macro => "macro", ItemType::Primitive => "primitive", ItemType::AssociatedType => "associatedtype", diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 8c2be47d9c0..36ce2796faf 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -1495,12 +1495,10 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, ItemType::Enum => 5, ItemType::Constant => 6, ItemType::Static => 7, - ItemType::ForeignFunction => 8, - ItemType::ForeignStatic => 9, - ItemType::Trait => 10, - ItemType::Function => 11, - ItemType::Typedef => 12, - _ => 13 + ty as u8, + ItemType::Trait => 8, + ItemType::Function => 9, + ItemType::Typedef => 10, + _ => 11 + ty as u8, } } @@ -1559,8 +1557,6 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, ItemType::Method => ("methods", "Methods"), ItemType::StructField => ("fields", "Struct Fields"), ItemType::Variant => ("variants", "Variants"), - ItemType::ForeignFunction => ("ffi-fns", "Foreign Functions"), - ItemType::ForeignStatic => ("ffi-statics", "Foreign Statics"), ItemType::Macro => ("macros", "Macros"), ItemType::Primitive => ("primitives", "Primitive Types"), ItemType::AssociatedType => ("associated-types", "Associated Types"), diff --git a/src/librustdoc/html/static/main.js b/src/librustdoc/html/static/main.js index 7c6f7ed3fe2..0fc6677cd9f 100644 --- a/src/librustdoc/html/static/main.js +++ b/src/librustdoc/html/static/main.js @@ -566,8 +566,8 @@ "method", "structfield", "variant", - "ffi", - "ffs", + "ffi", // retained for backward compatibility + "ffs", // retained for backward compatibility "macro", "primitive", "associatedtype", From 1068855925581d7f6d4a177c5f55ac3d5da8afd7 Mon Sep 17 00:00:00 2001 From: Kang Seonghoon Date: Thu, 4 Dec 2014 00:51:19 +0900 Subject: [PATCH 20/40] rustdoc: Avoid rendering foreign items to the sidebar. Otherwise the generated documentation is 30% larger. The sidebar renders an entry for each item to all items, so large modules have O(n^2) items rendered in the sidebars. Not a correct solution, but at least it works. --- src/librustdoc/html/render.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 36ce2796faf..9eee8e04f0c 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -1244,6 +1244,10 @@ impl Context { for item in m.items.iter() { if self.ignore_private_item(item) { continue } + // avoid putting foreign items to the sidebar. + if let &clean::ForeignFunctionItem(..) = &item.inner { continue } + if let &clean::ForeignStaticItem(..) = &item.inner { continue } + let short = shortty(item).to_static_str(); let myname = match item.name { None => continue, From c200ae5a8a1ab70cfea39ef184e1fd0e87b33181 Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Tue, 2 Dec 2014 14:13:12 -0800 Subject: [PATCH 21/40] Remove feature gates for `if let`, `while let`, and tuple indexing Closes #19469 --- src/libsyntax/feature_gate.rs | 19 +++---------------- src/test/compile-fail/borrow-tuple-fields.rs | 2 -- src/test/compile-fail/if-let.rs | 2 +- src/test/compile-fail/issue-18566.rs | 2 -- src/test/compile-fail/issue-19244-1.rs | 2 -- .../compile-fail/lint-unnecessary-parens.rs | 1 - src/test/compile-fail/move-fragments-1.rs | 2 -- .../compile-fail/move-out-of-tuple-field.rs | 2 -- .../compile-fail/tuple-index-not-tuple.rs | 2 -- .../compile-fail/tuple-index-out-of-bounds.rs | 2 -- src/test/compile-fail/while-let.rs | 2 +- 11 files changed, 5 insertions(+), 33 deletions(-) diff --git a/src/libsyntax/feature_gate.rs b/src/libsyntax/feature_gate.rs index 7453da6374e..c798b70551f 100644 --- a/src/libsyntax/feature_gate.rs +++ b/src/libsyntax/feature_gate.rs @@ -65,13 +65,13 @@ static KNOWN_FEATURES: &'static [(&'static str, Status)] = &[ ("unboxed_closures", Active), ("import_shadowing", Active), ("advanced_slice_patterns", Active), - ("tuple_indexing", Active), + ("tuple_indexing", Accepted), ("associated_types", Active), ("visible_private_types", Active), ("slicing_syntax", Active), - ("if_let", Active), - ("while_let", Active), + ("if_let", Accepted), + ("while_let", Accepted), // if you change this list without updating src/doc/reference.md, cmr will be sad @@ -309,24 +309,11 @@ impl<'a, 'v> Visitor<'v> for Context<'a> { "unboxed closures are a work-in-progress \ feature with known bugs"); } - ast::ExprTupField(..) => { - self.gate_feature("tuple_indexing", - e.span, - "tuple indexing is experimental"); - } - ast::ExprIfLet(..) => { - self.gate_feature("if_let", e.span, - "`if let` syntax is experimental"); - } ast::ExprSlice(..) => { self.gate_feature("slicing_syntax", e.span, "slicing syntax is experimental"); } - ast::ExprWhileLet(..) => { - self.gate_feature("while_let", e.span, - "`while let` syntax is experimental"); - } _ => {} } visit::walk_expr(self, e); diff --git a/src/test/compile-fail/borrow-tuple-fields.rs b/src/test/compile-fail/borrow-tuple-fields.rs index 519bad4e627..1d09143c24d 100644 --- a/src/test/compile-fail/borrow-tuple-fields.rs +++ b/src/test/compile-fail/borrow-tuple-fields.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(tuple_indexing)] - struct Foo(Box, int); struct Bar(int, int); diff --git a/src/test/compile-fail/if-let.rs b/src/test/compile-fail/if-let.rs index b82fb7a94c9..88b6854bb1d 100644 --- a/src/test/compile-fail/if-let.rs +++ b/src/test/compile-fail/if-let.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(macro_rules,if_let)] +#![feature(macro_rules)] fn macros() { macro_rules! foo{ diff --git a/src/test/compile-fail/issue-18566.rs b/src/test/compile-fail/issue-18566.rs index 89b1d8540d8..f64d8fee2d8 100644 --- a/src/test/compile-fail/issue-18566.rs +++ b/src/test/compile-fail/issue-18566.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(tuple_indexing)] - struct MyPtr<'a>(&'a mut uint); impl<'a> Deref for MyPtr<'a> { fn deref<'b>(&'b self) -> &'b uint { self.0 } diff --git a/src/test/compile-fail/issue-19244-1.rs b/src/test/compile-fail/issue-19244-1.rs index 4fcbb878890..7ca83f21305 100644 --- a/src/test/compile-fail/issue-19244-1.rs +++ b/src/test/compile-fail/issue-19244-1.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(tuple_indexing)] - const TUP: (uint,) = (42,); fn main() { diff --git a/src/test/compile-fail/lint-unnecessary-parens.rs b/src/test/compile-fail/lint-unnecessary-parens.rs index 826a4ea5a80..b71effa6f86 100644 --- a/src/test/compile-fail/lint-unnecessary-parens.rs +++ b/src/test/compile-fail/lint-unnecessary-parens.rs @@ -9,7 +9,6 @@ // except according to those terms. #![deny(unused_parens)] -#![feature(if_let,while_let)] #[deriving(Eq, PartialEq)] struct X { y: bool } diff --git a/src/test/compile-fail/move-fragments-1.rs b/src/test/compile-fail/move-fragments-1.rs index ccf12cf79e1..e45862a7fc6 100644 --- a/src/test/compile-fail/move-fragments-1.rs +++ b/src/test/compile-fail/move-fragments-1.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(tuple_indexing)] - // Test that we correctly compute the move fragments for a fn. // // Note that the code below is not actually incorrect; the diff --git a/src/test/compile-fail/move-out-of-tuple-field.rs b/src/test/compile-fail/move-out-of-tuple-field.rs index 7f55a78e8b7..7fcb54e0467 100644 --- a/src/test/compile-fail/move-out-of-tuple-field.rs +++ b/src/test/compile-fail/move-out-of-tuple-field.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(tuple_indexing)] - struct Foo(Box); fn main() { diff --git a/src/test/compile-fail/tuple-index-not-tuple.rs b/src/test/compile-fail/tuple-index-not-tuple.rs index d4ef0e20b26..33aeebb3691 100644 --- a/src/test/compile-fail/tuple-index-not-tuple.rs +++ b/src/test/compile-fail/tuple-index-not-tuple.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(tuple_indexing)] - struct Point { x: int, y: int } struct Empty; diff --git a/src/test/compile-fail/tuple-index-out-of-bounds.rs b/src/test/compile-fail/tuple-index-out-of-bounds.rs index f9bf2746794..609e34f2274 100644 --- a/src/test/compile-fail/tuple-index-out-of-bounds.rs +++ b/src/test/compile-fail/tuple-index-out-of-bounds.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(tuple_indexing)] - struct Point(int, int); fn main() { diff --git a/src/test/compile-fail/while-let.rs b/src/test/compile-fail/while-let.rs index 0dd442ec3f6..ccf3d2dd750 100644 --- a/src/test/compile-fail/while-let.rs +++ b/src/test/compile-fail/while-let.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(macro_rules,while_let)] +#![feature(macro_rules)] fn macros() { macro_rules! foo{ From e621116b867c7e4b2d7082aa9460f635443cd1b8 Mon Sep 17 00:00:00 2001 From: Alexander Light Date: Sun, 30 Nov 2014 08:19:29 -0500 Subject: [PATCH 22/40] make fmt_macros and rustdoc have standard doc attributes --- src/libfmt_macros/lib.rs | 5 +++++ src/librustdoc/lib.rs | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 6b8fafbed5d..03b65b3f71c 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -18,6 +18,11 @@ #![experimental] #![crate_type = "rlib"] #![crate_type = "dylib"] +#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "http://www.rust-lang.org/favicon.ico", + html_root_url = "http://doc.rust-lang.org/nightly/", + html_playground_url = "http://play.rust-lang.org/")] + #![feature(macro_rules, globs, import_shadowing)] pub use self::Piece::*; pub use self::Position::*; diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs index c592474b057..bc02d8c4c30 100644 --- a/src/librustdoc/lib.rs +++ b/src/librustdoc/lib.rs @@ -12,6 +12,10 @@ #![experimental] #![crate_type = "dylib"] #![crate_type = "rlib"] +#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "http://www.rust-lang.org/favicon.ico", + html_root_url = "http://doc.rust-lang.org/nightly/", + html_playground_url = "http://play.rust-lang.org/")] #![allow(unknown_features)] #![feature(globs, if_let, macro_rules, phase, slicing_syntax, tuple_indexing)] From 2e1a50121ef265214c5e2a7d82fe40b4928575ab Mon Sep 17 00:00:00 2001 From: Corey Richardson Date: Tue, 2 Dec 2014 16:48:48 -0800 Subject: [PATCH 23/40] syntax: support ES6-style unicode escapes First half of bootstrapping https://github.com/rust-lang/rfcs/pull/446 --- src/libsyntax/parse/lexer/mod.rs | 81 ++++++++++++++++++- src/libsyntax/parse/mod.rs | 22 +++-- .../compile-fail/new-unicode-escapes-1.rs | 13 +++ .../compile-fail/new-unicode-escapes-2.rs | 13 +++ .../compile-fail/new-unicode-escapes-3.rs | 13 +++ .../compile-fail/new-unicode-escapes-4.rs | 13 +++ src/test/run-pass/new-unicode-escapes.rs | 22 +++++ 7 files changed, 169 insertions(+), 8 deletions(-) create mode 100644 src/test/compile-fail/new-unicode-escapes-1.rs create mode 100644 src/test/compile-fail/new-unicode-escapes-2.rs create mode 100644 src/test/compile-fail/new-unicode-escapes-3.rs create mode 100644 src/test/compile-fail/new-unicode-escapes-4.rs create mode 100644 src/test/run-pass/new-unicode-escapes.rs diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 57983a6dee6..27b65e0f527 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -764,6 +764,15 @@ impl<'a> StringReader<'a> { } } + // SNAP c9f6d69 + #[allow(unused)] + fn old_escape_warning(&mut self, sp: Span) { + self.span_diagnostic + .span_warn(sp, "\\U00ABCD12 and \\uABCD escapes are deprecated"); + self.span_diagnostic + .span_help(sp, "use \\u{ABCD12} escapes instead"); + } + /// Scan for a single (possibly escaped) byte or char /// in a byte, (non-raw) byte string, char, or (non-raw) string literal. /// `start` is the position of `first_source_char`, which is already consumed. @@ -782,12 +791,24 @@ impl<'a> StringReader<'a> { Some(e) => { return match e { 'n' | 'r' | 't' | '\\' | '\'' | '"' | '0' => true, - 'x' => self.scan_hex_digits(2u, delim, !ascii_only), + 'x' => self.scan_byte_escape(delim, !ascii_only), 'u' if !ascii_only => { - self.scan_hex_digits(4u, delim, false) + if self.curr == Some('{') { + self.scan_unicode_escape(delim) + } else { + let res = self.scan_hex_digits(4u, delim, false); + // SNAP c9f6d69 + //let sp = codemap::mk_sp(escaped_pos, self.last_pos); + //self.old_escape_warning(sp); + res + } } 'U' if !ascii_only => { - self.scan_hex_digits(8u, delim, false) + let res = self.scan_hex_digits(8u, delim, false); + // SNAP c9f6d69 + //let sp = codemap::mk_sp(escaped_pos, self.last_pos); + //self.old_escape_warning(sp); + res } '\n' if delim == '"' => { self.consume_whitespace(); @@ -848,6 +869,56 @@ impl<'a> StringReader<'a> { true } + /// Scan over a \u{...} escape + /// + /// At this point, we have already seen the \ and the u, the { is the current character. We + /// will read at least one digit, and up to 6, and pass over the }. + fn scan_unicode_escape(&mut self, delim: char) -> bool { + self.bump(); // past the { + let start_bpos = self.last_pos; + let mut count: uint = 0; + let mut accum_int = 0; + + while !self.curr_is('}') && count <= 6 { + let c = match self.curr { + Some(c) => c, + None => { + self.fatal_span_(start_bpos, self.last_pos, + "unterminated unicode escape (found EOF)"); + } + }; + accum_int *= 16; + accum_int += c.to_digit(16).unwrap_or_else(|| { + if c == delim { + self.fatal_span_(self.last_pos, self.pos, + "unterminated unicode escape (needed a `}`)"); + } else { + self.fatal_span_char(self.last_pos, self.pos, + "illegal character in unicode escape", c); + } + }) as u32; + self.bump(); + count += 1; + } + + if count > 6 { + self.fatal_span_(start_bpos, self.last_pos, + "overlong unicode escape (can have at most 6 hex digits)"); + } + + self.bump(); // past the ending } + + let mut valid = count >= 1 && count <= 6; + if char::from_u32(accum_int).is_none() { + valid = false; + } + + if !valid { + self.fatal_span_(start_bpos, self.last_pos, "illegal unicode character escape"); + } + valid + } + /// Scan over a float exponent. fn scan_float_exponent(&mut self) { if self.curr_is('e') || self.curr_is('E') { @@ -1273,6 +1344,10 @@ impl<'a> StringReader<'a> { return token::Byte(id); } + fn scan_byte_escape(&mut self, delim: char, below_0x7f_only: bool) -> bool { + self.scan_hex_digits(2, delim, below_0x7f_only) + } + fn scan_byte_string(&mut self) -> token::Lit { self.bump(); let start = self.last_pos; diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index b46f7cdfe22..8d0c2de048a 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -393,16 +393,28 @@ pub fn char_lit(lit: &str) -> (char, int) { let msg = format!("lexer should have rejected a bad character escape {}", lit); let msg2 = msg.as_slice(); - let esc: |uint| -> Option<(char, int)> = |len| + fn esc(len: uint, lit: &str) -> Option<(char, int)> { num::from_str_radix(lit.slice(2, len), 16) .and_then(char::from_u32) - .map(|x| (x, len as int)); + .map(|x| (x, len as int)) + } + + let unicode_escape: || -> Option<(char, int)> = || + if lit.as_bytes()[2] == b'{' { + let idx = lit.find('}').expect(msg2); + let subslice = lit.slice(3, idx); + num::from_str_radix(subslice, 16) + .and_then(char::from_u32) + .map(|x| (x, subslice.char_len() as int + 4)) + } else { + esc(6, lit) + }; // Unicode escapes return match lit.as_bytes()[1] as char { - 'x' | 'X' => esc(4), - 'u' => esc(6), - 'U' => esc(10), + 'x' | 'X' => esc(4, lit), + 'u' => unicode_escape(), + 'U' => esc(10, lit), _ => None, }.expect(msg2); } diff --git a/src/test/compile-fail/new-unicode-escapes-1.rs b/src/test/compile-fail/new-unicode-escapes-1.rs new file mode 100644 index 00000000000..f2422830a21 --- /dev/null +++ b/src/test/compile-fail/new-unicode-escapes-1.rs @@ -0,0 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub fn main() { + let s = "\u{2603"; //~ ERROR unterminated unicode escape (needed a `}`) +} diff --git a/src/test/compile-fail/new-unicode-escapes-2.rs b/src/test/compile-fail/new-unicode-escapes-2.rs new file mode 100644 index 00000000000..5da8674c37e --- /dev/null +++ b/src/test/compile-fail/new-unicode-escapes-2.rs @@ -0,0 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub fn main() { + let s = "\u{260311111111}"; //~ ERROR overlong unicode escape (can have at most 6 hex digits) +} diff --git a/src/test/compile-fail/new-unicode-escapes-3.rs b/src/test/compile-fail/new-unicode-escapes-3.rs new file mode 100644 index 00000000000..7c64d02efd7 --- /dev/null +++ b/src/test/compile-fail/new-unicode-escapes-3.rs @@ -0,0 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub fn main() { + let s = "\u{d805}"; //~ ERROR illegal unicode character escape +} diff --git a/src/test/compile-fail/new-unicode-escapes-4.rs b/src/test/compile-fail/new-unicode-escapes-4.rs new file mode 100644 index 00000000000..ffc2b11e0c1 --- /dev/null +++ b/src/test/compile-fail/new-unicode-escapes-4.rs @@ -0,0 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub fn main() { + let s = "\u{lol}"; //~ ERROR illegal character in unicode escape +} diff --git a/src/test/run-pass/new-unicode-escapes.rs b/src/test/run-pass/new-unicode-escapes.rs new file mode 100644 index 00000000000..2888389bcce --- /dev/null +++ b/src/test/run-pass/new-unicode-escapes.rs @@ -0,0 +1,22 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub fn main() { + let s = "\u{2603}"; + assert_eq!(s, "☃"); + + let s = "\u{2a10}\u{2A01}\u{2Aa0}"; + assert_eq!(s, "⨐⨁⪠"); + + let s = "\\{20}"; + let mut correct_s = String::from_str("\\"); + correct_s.push_str("{20}"); + assert_eq!(s, correct_s.as_slice()); +} From 108bca53f04342a4626b34ac1d5b8236d170a12a Mon Sep 17 00:00:00 2001 From: P1start Date: Wed, 3 Dec 2014 22:47:53 +1300 Subject: [PATCH 24/40] =?UTF-8?q?Make=20the=20parser=E2=80=99s=20=E2=80=98?= =?UTF-8?q?expected=20,=20found=20=E2=80=99=20errors=20more=20ac?= =?UTF-8?q?curate?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As an example of what this changes, the following code: let x: [int ..4]; Currently spits out ‘expected `]`, found `..`’. However, a comma would also be valid there, as would a number of other tokens. This change adjusts the parser to produce more accurate errors, so that that example now produces ‘expected one of `(`, `+`, `,`, `::`, or `]`, found `..`’. --- src/libsyntax/parse/parser.rs | 183 +++++++++++------- src/test/compile-fail/better-expected.rs | 13 ++ .../compile-fail/column-offset-1-based.rs | 2 +- src/test/compile-fail/empty-impl-semicolon.rs | 2 +- src/test/compile-fail/issue-1655.rs | 2 +- src/test/compile-fail/issue-19096.rs | 2 +- src/test/compile-fail/issue-3036.rs | 2 +- src/test/compile-fail/match-vec-invalid.rs | 2 +- src/test/compile-fail/multitrait.rs | 2 +- src/test/compile-fail/mut-patterns.rs | 2 +- .../compile-fail/omitted-arg-in-item-fn.rs | 2 +- src/test/compile-fail/pat-range-bad-dots.rs | 2 +- src/test/compile-fail/raw-str-unbalanced.rs | 2 +- .../removed-syntax-closure-lifetime.rs | 2 +- .../removed-syntax-enum-newtype.rs | 2 +- .../compile-fail/removed-syntax-fixed-vec.rs | 2 +- .../removed-syntax-larrow-init.rs | 2 +- .../removed-syntax-larrow-move.rs | 2 +- .../removed-syntax-mut-vec-expr.rs | 2 +- .../compile-fail/removed-syntax-mut-vec-ty.rs | 2 +- .../removed-syntax-ptr-lifetime.rs | 2 +- .../compile-fail/removed-syntax-record.rs | 2 +- .../removed-syntax-uniq-mut-expr.rs | 2 +- .../removed-syntax-uniq-mut-ty.rs | 2 +- .../compile-fail/removed-syntax-with-1.rs | 2 +- .../compile-fail/struct-literal-in-for.rs | 2 +- src/test/compile-fail/struct-literal-in-if.rs | 2 +- .../struct-literal-in-match-discriminant.rs | 2 +- .../compile-fail/struct-literal-in-while.rs | 2 +- 29 files changed, 155 insertions(+), 95 deletions(-) create mode 100644 src/test/compile-fail/better-expected.rs diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 920bcc3a951..c9d78eccdc7 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -87,6 +87,7 @@ use std::mem; use std::num::Float; use std::rc::Rc; use std::iter; +use std::slice; bitflags! { flags Restrictions: u8 { @@ -303,6 +304,22 @@ pub struct Parser<'a> { /// name is not known. This does not change while the parser is descending /// into modules, and sub-parsers have new values for this name. pub root_module_name: Option, + pub expected_tokens: Vec, +} + +#[deriving(PartialEq, Eq, Clone)] +pub enum TokenType { + Token(token::Token), + Operator, +} + +impl TokenType { + fn to_string(&self) -> String { + match *self { + TokenType::Token(ref t) => format!("`{}`", Parser::token_to_string(t)), + TokenType::Operator => "an operator".into_string(), + } + } } fn is_plain_ident_or_underscore(t: &token::Token) -> bool { @@ -347,6 +364,7 @@ impl<'a> Parser<'a> { open_braces: Vec::new(), owns_directory: true, root_module_name: None, + expected_tokens: Vec::new(), } } @@ -375,14 +393,18 @@ impl<'a> Parser<'a> { /// Expect and consume the token t. Signal an error if /// the next token is not t. pub fn expect(&mut self, t: &token::Token) { - if self.token == *t { - self.bump(); + if self.expected_tokens.is_empty() { + if self.token == *t { + self.bump(); + } else { + let token_str = Parser::token_to_string(t); + let this_token_str = self.this_token_to_string(); + self.fatal(format!("expected `{}`, found `{}`", + token_str, + this_token_str).as_slice()) + } } else { - let token_str = Parser::token_to_string(t); - let this_token_str = self.this_token_to_string(); - self.fatal(format!("expected `{}`, found `{}`", - token_str, - this_token_str).as_slice()) + self.expect_one_of(slice::ref_slice(t), &[]); } } @@ -392,15 +414,20 @@ impl<'a> Parser<'a> { pub fn expect_one_of(&mut self, edible: &[token::Token], inedible: &[token::Token]) { - fn tokens_to_string(tokens: &[token::Token]) -> String { + fn tokens_to_string(tokens: &[TokenType]) -> String { let mut i = tokens.iter(); // This might be a sign we need a connect method on Iterator. let b = i.next() - .map_or("".to_string(), |t| Parser::token_to_string(t)); - i.fold(b, |b,a| { - let mut b = b; - b.push_str("`, `"); - b.push_str(Parser::token_to_string(a).as_slice()); + .map_or("".into_string(), |t| t.to_string()); + i.enumerate().fold(b, |mut b, (i, ref a)| { + if tokens.len() > 2 && i == tokens.len() - 2 { + b.push_str(", or "); + } else if tokens.len() == 2 && i == tokens.len() - 2 { + b.push_str(" or "); + } else { + b.push_str(", "); + } + b.push_str(&*a.to_string()); b }) } @@ -409,17 +436,21 @@ impl<'a> Parser<'a> { } else if inedible.contains(&self.token) { // leave it in the input } else { - let mut expected = edible.iter().map(|x| x.clone()).collect::>(); - expected.push_all(inedible); + let mut expected = edible.iter().map(|x| TokenType::Token(x.clone())) + .collect::>(); + expected.extend(inedible.iter().map(|x| TokenType::Token(x.clone()))); + expected.push_all(&*self.expected_tokens); + expected.sort_by(|a, b| a.to_string().cmp(&b.to_string())); + expected.dedup(); let expect = tokens_to_string(expected.as_slice()); let actual = self.this_token_to_string(); self.fatal( (if expected.len() != 1 { - (format!("expected one of `{}`, found `{}`", + (format!("expected one of {}, found `{}`", expect, actual)) } else { - (format!("expected `{}`, found `{}`", + (format!("expected {}, found `{}`", expect, actual)) }).as_slice() @@ -514,10 +545,20 @@ impl<'a> Parser<'a> { spanned(lo, hi, node) } + /// Check if the next token is `tok`, and return `true` if so. + /// + /// This method is will automatically add `tok` to `expected_tokens` if `tok` is not + /// encountered. + pub fn check(&mut self, tok: &token::Token) -> bool { + let is_present = self.token == *tok; + if !is_present { self.expected_tokens.push(TokenType::Token(tok.clone())); } + is_present + } + /// Consume token 'tok' if it exists. Returns true if the given /// token was present, false otherwise. pub fn eat(&mut self, tok: &token::Token) -> bool { - let is_present = self.token == *tok; + let is_present = self.check(tok); if is_present { self.bump() } is_present } @@ -739,7 +780,7 @@ impl<'a> Parser<'a> { // commas in generic parameters, because it can stop either after // parsing a type or after parsing a comma. for i in iter::count(0u, 1) { - if self.token == token::Gt + if self.check(&token::Gt) || self.token == token::BinOp(token::Shr) || self.token == token::Ge || self.token == token::BinOpEq(token::Shr) { @@ -798,7 +839,7 @@ impl<'a> Parser<'a> { } _ => () } - if sep.trailing_sep_allowed && self.token == *ket { break; } + if sep.trailing_sep_allowed && self.check(ket) { break; } v.push(f(self)); } return v; @@ -881,6 +922,7 @@ impl<'a> Parser<'a> { self.span = next.sp; self.token = next.tok; self.tokens_consumed += 1u; + self.expected_tokens.clear(); } /// Advance the parser by one token and return the bumped token. @@ -999,7 +1041,7 @@ impl<'a> Parser<'a> { self.parse_proc_type(lifetime_defs) } else if self.token_is_bare_fn_keyword() || self.token_is_closure_keyword() { self.parse_ty_bare_fn_or_ty_closure(lifetime_defs) - } else if self.token == token::ModSep || + } else if self.check(&token::ModSep) || self.token.is_ident() || self.token.is_path() { @@ -1101,7 +1143,7 @@ impl<'a> Parser<'a> { /// Parses an optional unboxed closure kind (`&:`, `&mut:`, or `:`). pub fn parse_optional_unboxed_closure_kind(&mut self) -> Option { - if self.token == token::BinOp(token::And) && + if self.check(&token::BinOp(token::And)) && self.look_ahead(1, |t| t.is_keyword(keywords::Mut)) && self.look_ahead(2, |t| *t == token::Colon) { self.bump(); @@ -1211,7 +1253,8 @@ impl<'a> Parser<'a> { lifetime_defs: Vec) -> Vec { - if self.eat(&token::Lt) { + if self.token == token::Lt { + self.bump(); if lifetime_defs.is_empty() { self.warn("deprecated syntax; use the `for` keyword now \ (e.g. change `fn<'a>` to `for<'a> fn`)"); @@ -1430,7 +1473,7 @@ impl<'a> Parser<'a> { let lo = self.span.lo; - let t = if self.token == token::OpenDelim(token::Paren) { + let t = if self.check(&token::OpenDelim(token::Paren)) { self.bump(); // (t) is a parenthesized ty @@ -1440,7 +1483,7 @@ impl<'a> Parser<'a> { let mut last_comma = false; while self.token != token::CloseDelim(token::Paren) { ts.push(self.parse_ty_sum()); - if self.token == token::Comma { + if self.check(&token::Comma) { last_comma = true; self.bump(); } else { @@ -1464,11 +1507,11 @@ impl<'a> Parser<'a> { _ => self.obsolete(last_span, ObsoleteOwnedType) } TyTup(vec![self.parse_ty()]) - } else if self.token == token::BinOp(token::Star) { + } else if self.check(&token::BinOp(token::Star)) { // STAR POINTER (bare pointer?) self.bump(); TyPtr(self.parse_ptr()) - } else if self.token == token::OpenDelim(token::Bracket) { + } else if self.check(&token::OpenDelim(token::Bracket)) { // VECTOR self.expect(&token::OpenDelim(token::Bracket)); let t = self.parse_ty_sum(); @@ -1481,7 +1524,7 @@ impl<'a> Parser<'a> { }; self.expect(&token::CloseDelim(token::Bracket)); t - } else if self.token == token::BinOp(token::And) || + } else if self.check(&token::BinOp(token::And)) || self.token == token::AndAnd { // BORROWED POINTER self.expect_and(); @@ -1492,7 +1535,7 @@ impl<'a> Parser<'a> { self.token_is_closure_keyword() { // BARE FUNCTION OR CLOSURE self.parse_ty_bare_fn_or_ty_closure(Vec::new()) - } else if self.token == token::BinOp(token::Or) || + } else if self.check(&token::BinOp(token::Or)) || self.token == token::OrOr || (self.token == token::Lt && self.look_ahead(1, |t| { @@ -1509,7 +1552,7 @@ impl<'a> Parser<'a> { TyTypeof(e) } else if self.eat_keyword(keywords::Proc) { self.parse_proc_type(Vec::new()) - } else if self.token == token::Lt { + } else if self.check(&token::Lt) { // QUALIFIED PATH `::item` self.bump(); let self_type = self.parse_ty_sum(); @@ -1523,7 +1566,7 @@ impl<'a> Parser<'a> { trait_ref: P(trait_ref), item_name: item_name, })) - } else if self.token == token::ModSep || + } else if self.check(&token::ModSep) || self.token.is_ident() || self.token.is_path() { // NAMED TYPE @@ -1532,7 +1575,8 @@ impl<'a> Parser<'a> { // TYPE TO BE INFERRED TyInfer } else { - let msg = format!("expected type, found token {}", self.token); + let this_token_str = self.this_token_to_string(); + let msg = format!("expected type, found `{}`", this_token_str); self.fatal(msg.as_slice()); }; @@ -1635,7 +1679,7 @@ impl<'a> Parser<'a> { } pub fn maybe_parse_fixed_vstore(&mut self) -> Option> { - if self.token == token::Comma && + if self.check(&token::Comma) && self.look_ahead(1, |t| *t == token::DotDot) { self.bump(); self.bump(); @@ -1959,9 +2003,10 @@ impl<'a> Parser<'a> { token::Gt => { return res; } token::BinOp(token::Shr) => { return res; } _ => { + let this_token_str = self.this_token_to_string(); let msg = format!("expected `,` or `>` after lifetime \ - name, got: {}", - self.token); + name, found `{}`", + this_token_str); self.fatal(msg.as_slice()); } } @@ -2126,7 +2171,7 @@ impl<'a> Parser<'a> { es.push(self.parse_expr()); self.commit_expr(&**es.last().unwrap(), &[], &[token::Comma, token::CloseDelim(token::Paren)]); - if self.token == token::Comma { + if self.check(&token::Comma) { trailing_comma = true; self.bump(); @@ -2167,14 +2212,14 @@ impl<'a> Parser<'a> { token::OpenDelim(token::Bracket) => { self.bump(); - if self.token == token::CloseDelim(token::Bracket) { + if self.check(&token::CloseDelim(token::Bracket)) { // Empty vector. self.bump(); ex = ExprVec(Vec::new()); } else { // Nonempty vector. let first_expr = self.parse_expr(); - if self.token == token::Comma && + if self.check(&token::Comma) && self.look_ahead(1, |t| *t == token::DotDot) { // Repeating vector syntax: [ 0, ..512 ] self.bump(); @@ -2182,7 +2227,7 @@ impl<'a> Parser<'a> { let count = self.parse_expr(); self.expect(&token::CloseDelim(token::Bracket)); ex = ExprRepeat(first_expr, count); - } else if self.token == token::Comma { + } else if self.check(&token::Comma) { // Vector with two or more elements. self.bump(); let remaining_exprs = self.parse_seq_to_end( @@ -2284,7 +2329,7 @@ impl<'a> Parser<'a> { ex = ExprBreak(None); } hi = self.span.hi; - } else if self.token == token::ModSep || + } else if self.check(&token::ModSep) || self.token.is_ident() && !self.token.is_keyword(keywords::True) && !self.token.is_keyword(keywords::False) { @@ -2292,7 +2337,7 @@ impl<'a> Parser<'a> { self.parse_path(LifetimeAndTypesWithColons); // `!`, as an operator, is prefix, so we know this isn't that - if self.token == token::Not { + if self.check(&token::Not) { // MACRO INVOCATION expression self.bump(); @@ -2309,7 +2354,7 @@ impl<'a> Parser<'a> { tts, EMPTY_CTXT)); } - if self.token == token::OpenDelim(token::Brace) { + if self.check(&token::OpenDelim(token::Brace)) { // This is a struct literal, unless we're prohibited // from parsing struct literals here. if !self.restrictions.contains(RESTRICTION_NO_STRUCT_LITERAL) { @@ -2840,6 +2885,7 @@ impl<'a> Parser<'a> { self.restrictions.contains(RESTRICTION_NO_BAR_OP) { return lhs; } + self.expected_tokens.push(TokenType::Operator); let cur_opt = self.token.to_binop(); match cur_opt { @@ -3079,7 +3125,7 @@ impl<'a> Parser<'a> { /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self) -> Option> { - if self.token == token::Eq { + if self.check(&token::Eq) { self.bump(); Some(self.parse_expr()) } else { @@ -3092,7 +3138,7 @@ impl<'a> Parser<'a> { let mut pats = Vec::new(); loop { pats.push(self.parse_pat()); - if self.token == token::BinOp(token::Or) { self.bump(); } + if self.check(&token::BinOp(token::Or)) { self.bump(); } else { return pats; } }; } @@ -3114,11 +3160,11 @@ impl<'a> Parser<'a> { } if before_slice { - if self.token == token::DotDot { + if self.check(&token::DotDot) { self.bump(); - if self.token == token::Comma || - self.token == token::CloseDelim(token::Bracket) { + if self.check(&token::Comma) || + self.check(&token::CloseDelim(token::Bracket)) { slice = Some(P(ast::Pat { id: ast::DUMMY_NODE_ID, node: PatWild(PatWildMulti), @@ -3135,7 +3181,7 @@ impl<'a> Parser<'a> { } let subpat = self.parse_pat(); - if before_slice && self.token == token::DotDot { + if before_slice && self.check(&token::DotDot) { self.bump(); slice = Some(subpat); before_slice = false; @@ -3160,13 +3206,13 @@ impl<'a> Parser<'a> { } else { self.expect(&token::Comma); // accept trailing commas - if self.token == token::CloseDelim(token::Brace) { break } + if self.check(&token::CloseDelim(token::Brace)) { break } } let lo = self.span.lo; let hi; - if self.token == token::DotDot { + if self.check(&token::DotDot) { self.bump(); if self.token != token::CloseDelim(token::Brace) { let token_str = self.this_token_to_string(); @@ -3187,7 +3233,7 @@ impl<'a> Parser<'a> { let fieldname = self.parse_ident(); - let (subpat, is_shorthand) = if self.token == token::Colon { + let (subpat, is_shorthand) = if self.check(&token::Colon) { match bind_type { BindByRef(..) | BindByValue(MutMutable) => { let token_str = self.this_token_to_string(); @@ -3267,15 +3313,15 @@ impl<'a> Parser<'a> { token::OpenDelim(token::Paren) => { // parse (pat,pat,pat,...) as tuple self.bump(); - if self.token == token::CloseDelim(token::Paren) { + if self.check(&token::CloseDelim(token::Paren)) { self.bump(); pat = PatTup(vec![]); } else { let mut fields = vec!(self.parse_pat()); if self.look_ahead(1, |t| *t != token::CloseDelim(token::Paren)) { - while self.token == token::Comma { + while self.check(&token::Comma) { self.bump(); - if self.token == token::CloseDelim(token::Paren) { break; } + if self.check(&token::CloseDelim(token::Paren)) { break; } fields.push(self.parse_pat()); } } @@ -3318,7 +3364,7 @@ impl<'a> Parser<'a> { // These expressions are limited to literals (possibly // preceded by unary-minus) or identifiers. let val = self.parse_literal_maybe_minus(); - if (self.token == token::DotDotDot) && + if (self.check(&token::DotDotDot)) && self.look_ahead(1, |t| { *t != token::Comma && *t != token::CloseDelim(token::Bracket) }) { @@ -3621,7 +3667,7 @@ impl<'a> Parser<'a> { let hi = self.span.hi; if id.name == token::special_idents::invalid.name { - if self.token == token::Dot { + if self.check(&token::Dot) { let span = self.span; let token_string = self.this_token_to_string(); self.span_err(span, @@ -3934,7 +3980,7 @@ impl<'a> Parser<'a> { let bounds = self.parse_colon_then_ty_param_bounds(); - let default = if self.token == token::Eq { + let default = if self.check(&token::Eq) { self.bump(); Some(self.parse_ty_sum()) } @@ -4334,7 +4380,7 @@ impl<'a> Parser<'a> { (optional_unboxed_closure_kind, args) } }; - let output = if self.token == token::RArrow { + let output = if self.check(&token::RArrow) { self.parse_ret_ty() } else { Return(P(Ty { @@ -4359,7 +4405,7 @@ impl<'a> Parser<'a> { seq_sep_trailing_allowed(token::Comma), |p| p.parse_fn_block_arg()); - let output = if self.token == token::RArrow { + let output = if self.check(&token::RArrow) { self.parse_ret_ty() } else { Return(P(Ty { @@ -4616,7 +4662,7 @@ impl<'a> Parser<'a> { token::get_ident(class_name)).as_slice()); } self.bump(); - } else if self.token == token::OpenDelim(token::Paren) { + } else if self.check(&token::OpenDelim(token::Paren)) { // It's a tuple-like struct. is_tuple_like = true; fields = self.parse_unspanned_seq( @@ -4801,7 +4847,7 @@ impl<'a> Parser<'a> { fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> ItemInfo { let id_span = self.span; let id = self.parse_ident(); - if self.token == token::Semi { + if self.check(&token::Semi) { self.bump(); // This mod is in an external file. Let's go get it! let (m, attrs) = self.eval_src_mod(id, outer_attrs, id_span); @@ -5044,7 +5090,8 @@ impl<'a> Parser<'a> { let (maybe_path, ident) = match self.token { token::Ident(..) => { let the_ident = self.parse_ident(); - let path = if self.eat(&token::Eq) { + let path = if self.token == token::Eq { + self.bump(); let path = self.parse_str(); let span = self.span; self.obsolete(span, ObsoleteExternCrateRenaming); @@ -5184,7 +5231,7 @@ impl<'a> Parser<'a> { token::get_ident(ident)).as_slice()); } kind = StructVariantKind(struct_def); - } else if self.token == token::OpenDelim(token::Paren) { + } else if self.check(&token::OpenDelim(token::Paren)) { all_nullary = false; let arg_tys = self.parse_enum_variant_seq( &token::OpenDelim(token::Paren), @@ -5348,7 +5395,7 @@ impl<'a> Parser<'a> { visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); - } else if self.token == token::OpenDelim(token::Brace) { + } else if self.check(&token::OpenDelim(token::Brace)) { return self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs); } @@ -5629,7 +5676,7 @@ impl<'a> Parser<'a> { fn parse_view_path(&mut self) -> P { let lo = self.span.lo; - if self.token == token::OpenDelim(token::Brace) { + if self.check(&token::OpenDelim(token::Brace)) { // use {foo,bar} let idents = self.parse_unspanned_seq( &token::OpenDelim(token::Brace), @@ -5653,7 +5700,7 @@ impl<'a> Parser<'a> { self.bump(); let path_lo = self.span.lo; path = vec!(self.parse_ident()); - while self.token == token::ModSep { + while self.check(&token::ModSep) { self.bump(); let id = self.parse_ident(); path.push(id); @@ -5677,7 +5724,7 @@ impl<'a> Parser<'a> { token::ModSep => { // foo::bar or foo::{a,b,c} or foo::* - while self.token == token::ModSep { + while self.check(&token::ModSep) { self.bump(); match self.token { @@ -5846,7 +5893,7 @@ impl<'a> Parser<'a> { loop { match self.parse_foreign_item(attrs, macros_allowed) { IoviNone(returned_attrs) => { - if self.token == token::CloseDelim(token::Brace) { + if self.check(&token::CloseDelim(token::Brace)) { attrs = returned_attrs; break } diff --git a/src/test/compile-fail/better-expected.rs b/src/test/compile-fail/better-expected.rs new file mode 100644 index 00000000000..489f892726a --- /dev/null +++ b/src/test/compile-fail/better-expected.rs @@ -0,0 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + let x: [int ..3]; //~ ERROR expected one of `(`, `+`, `,`, `::`, or `]`, found `..` +} diff --git a/src/test/compile-fail/column-offset-1-based.rs b/src/test/compile-fail/column-offset-1-based.rs index a00ded61758..621b480fe77 100644 --- a/src/test/compile-fail/column-offset-1-based.rs +++ b/src/test/compile-fail/column-offset-1-based.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -# //~ ERROR 11:1: 11:2 error: expected `[`, found `` +# //~ ERROR 11:1: 11:2 error: expected one of `!` or `[`, found `` diff --git a/src/test/compile-fail/empty-impl-semicolon.rs b/src/test/compile-fail/empty-impl-semicolon.rs index b5f17eef886..a598252f1b6 100644 --- a/src/test/compile-fail/empty-impl-semicolon.rs +++ b/src/test/compile-fail/empty-impl-semicolon.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -impl Foo; //~ ERROR expected `{`, found `;` +impl Foo; //~ ERROR expected one of `(`, `+`, `::`, or `{`, found `;` diff --git a/src/test/compile-fail/issue-1655.rs b/src/test/compile-fail/issue-1655.rs index 6bdcf5c5edc..a8704f7545f 100644 --- a/src/test/compile-fail/issue-1655.rs +++ b/src/test/compile-fail/issue-1655.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// error-pattern:expected `[`, found `vec` +// error-pattern:expected one of `!` or `[`, found `vec` mod blade_runner { #vec[doc( brief = "Blade Runner is probably the best movie ever", diff --git a/src/test/compile-fail/issue-19096.rs b/src/test/compile-fail/issue-19096.rs index 7f42abb3acc..6b67814aab3 100644 --- a/src/test/compile-fail/issue-19096.rs +++ b/src/test/compile-fail/issue-19096.rs @@ -12,5 +12,5 @@ fn main() { let t = (42i, 42i); - t.0::; //~ ERROR expected one of `;`, `}`, found `::` + t.0::; //~ ERROR expected one of `.`, `;`, `}`, or an operator, found `::` } diff --git a/src/test/compile-fail/issue-3036.rs b/src/test/compile-fail/issue-3036.rs index 5f56f6b8b6b..16834f49165 100644 --- a/src/test/compile-fail/issue-3036.rs +++ b/src/test/compile-fail/issue-3036.rs @@ -13,4 +13,4 @@ fn main() { let x = 3 -} //~ ERROR: expected `;`, found `}` +} //~ ERROR: expected one of `.`, `;`, or an operator, found `}` diff --git a/src/test/compile-fail/match-vec-invalid.rs b/src/test/compile-fail/match-vec-invalid.rs index 51e83c14aa0..3e073d34f32 100644 --- a/src/test/compile-fail/match-vec-invalid.rs +++ b/src/test/compile-fail/match-vec-invalid.rs @@ -11,7 +11,7 @@ fn main() { let a = Vec::new(); match a { - [1, tail.., tail..] => {}, //~ ERROR: expected `,`, found `..` + [1, tail.., tail..] => {}, //~ ERROR: expected one of `!`, `,`, or `@`, found `..` _ => () } } diff --git a/src/test/compile-fail/multitrait.rs b/src/test/compile-fail/multitrait.rs index 795e3807d5e..7add747fbfa 100644 --- a/src/test/compile-fail/multitrait.rs +++ b/src/test/compile-fail/multitrait.rs @@ -12,7 +12,7 @@ struct S { y: int } -impl Cmp, ToString for S { //~ ERROR: expected `{`, found `,` +impl Cmp, ToString for S { //~ ERROR: expected one of `(`, `+`, `::`, or `{`, found `,` fn eq(&&other: S) { false } fn to_string(&self) -> String { "hi".to_string() } } diff --git a/src/test/compile-fail/mut-patterns.rs b/src/test/compile-fail/mut-patterns.rs index a33a603f7f5..a78e82bb73c 100644 --- a/src/test/compile-fail/mut-patterns.rs +++ b/src/test/compile-fail/mut-patterns.rs @@ -12,5 +12,5 @@ pub fn main() { struct Foo { x: int } - let mut Foo { x: x } = Foo { x: 3 }; //~ ERROR: expected `;`, found `{` + let mut Foo { x: x } = Foo { x: 3 }; //~ ERROR: expected one of `:`, `;`, `=`, or `@`, found `{` } diff --git a/src/test/compile-fail/omitted-arg-in-item-fn.rs b/src/test/compile-fail/omitted-arg-in-item-fn.rs index c5ff885997b..729b45df8b4 100644 --- a/src/test/compile-fail/omitted-arg-in-item-fn.rs +++ b/src/test/compile-fail/omitted-arg-in-item-fn.rs @@ -8,5 +8,5 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -fn foo(x) { //~ ERROR expected `:`, found `)` +fn foo(x) { //~ ERROR expected one of `!`, `:`, or `@`, found `)` } diff --git a/src/test/compile-fail/pat-range-bad-dots.rs b/src/test/compile-fail/pat-range-bad-dots.rs index 5605caaeeed..7fe073a4c3d 100644 --- a/src/test/compile-fail/pat-range-bad-dots.rs +++ b/src/test/compile-fail/pat-range-bad-dots.rs @@ -10,7 +10,7 @@ pub fn main() { match 22i { - 0 .. 3 => {} //~ ERROR expected `=>`, found `..` + 0 .. 3 => {} //~ ERROR expected one of `...`, `=>`, or `|`, found `..` _ => {} } } diff --git a/src/test/compile-fail/raw-str-unbalanced.rs b/src/test/compile-fail/raw-str-unbalanced.rs index 4f3fb7d5b8a..3403b28fdc9 100644 --- a/src/test/compile-fail/raw-str-unbalanced.rs +++ b/src/test/compile-fail/raw-str-unbalanced.rs @@ -10,5 +10,5 @@ static s: &'static str = r#" - "## //~ ERROR expected `;`, found `#` + "## //~ ERROR expected one of `.`, `;`, or an operator, found `#` ; diff --git a/src/test/compile-fail/removed-syntax-closure-lifetime.rs b/src/test/compile-fail/removed-syntax-closure-lifetime.rs index a726e30b1de..a07832d5bb7 100644 --- a/src/test/compile-fail/removed-syntax-closure-lifetime.rs +++ b/src/test/compile-fail/removed-syntax-closure-lifetime.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -type closure = Box; //~ ERROR expected `,`, found `/` +type closure = Box; //~ ERROR expected one of `(`, `+`, `,`, `::`, or `>`, found `/` diff --git a/src/test/compile-fail/removed-syntax-enum-newtype.rs b/src/test/compile-fail/removed-syntax-enum-newtype.rs index b9c9c5f0a53..ba1b5a616df 100644 --- a/src/test/compile-fail/removed-syntax-enum-newtype.rs +++ b/src/test/compile-fail/removed-syntax-enum-newtype.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -enum e = int; //~ ERROR expected `{`, found `=` +enum e = int; //~ ERROR expected one of `<` or `{`, found `=` diff --git a/src/test/compile-fail/removed-syntax-fixed-vec.rs b/src/test/compile-fail/removed-syntax-fixed-vec.rs index 917b4e03ad0..fe49d1f4a8d 100644 --- a/src/test/compile-fail/removed-syntax-fixed-vec.rs +++ b/src/test/compile-fail/removed-syntax-fixed-vec.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -type v = [int * 3]; //~ ERROR expected `]`, found `*` +type v = [int * 3]; //~ ERROR expected one of `(`, `+`, `,`, `::`, or `]`, found `*` diff --git a/src/test/compile-fail/removed-syntax-larrow-init.rs b/src/test/compile-fail/removed-syntax-larrow-init.rs index b2e856750df..1474cc9dd39 100644 --- a/src/test/compile-fail/removed-syntax-larrow-init.rs +++ b/src/test/compile-fail/removed-syntax-larrow-init.rs @@ -11,5 +11,5 @@ fn removed_moves() { let mut x = 0; let y <- x; - //~^ ERROR expected `;`, found `<-` + //~^ ERROR expected one of `!`, `:`, `;`, `=`, or `@`, found `<-` } diff --git a/src/test/compile-fail/removed-syntax-larrow-move.rs b/src/test/compile-fail/removed-syntax-larrow-move.rs index e39fbe0f950..552c9f2efa2 100644 --- a/src/test/compile-fail/removed-syntax-larrow-move.rs +++ b/src/test/compile-fail/removed-syntax-larrow-move.rs @@ -12,5 +12,5 @@ fn removed_moves() { let mut x = 0; let y = 0; y <- x; - //~^ ERROR expected one of `;`, `}`, found `<-` + //~^ ERROR expected one of `!`, `.`, `::`, `;`, `{`, `}`, or an operator, found `<-` } diff --git a/src/test/compile-fail/removed-syntax-mut-vec-expr.rs b/src/test/compile-fail/removed-syntax-mut-vec-expr.rs index b20da6346f7..437f871f8ea 100644 --- a/src/test/compile-fail/removed-syntax-mut-vec-expr.rs +++ b/src/test/compile-fail/removed-syntax-mut-vec-expr.rs @@ -11,5 +11,5 @@ fn f() { let v = [mut 1, 2, 3, 4]; //~^ ERROR expected identifier, found keyword `mut` - //~^^ ERROR expected `]`, found `1` + //~^^ ERROR expected one of `!`, `,`, `.`, `::`, `]`, `{`, or an operator, found `1` } diff --git a/src/test/compile-fail/removed-syntax-mut-vec-ty.rs b/src/test/compile-fail/removed-syntax-mut-vec-ty.rs index c5eec2ef6e1..af469fadf98 100644 --- a/src/test/compile-fail/removed-syntax-mut-vec-ty.rs +++ b/src/test/compile-fail/removed-syntax-mut-vec-ty.rs @@ -10,4 +10,4 @@ type v = [mut int]; //~^ ERROR expected identifier, found keyword `mut` - //~^^ ERROR expected `]`, found `int` + //~^^ ERROR expected one of `(`, `+`, `,`, `::`, or `]`, found `int` diff --git a/src/test/compile-fail/removed-syntax-ptr-lifetime.rs b/src/test/compile-fail/removed-syntax-ptr-lifetime.rs index 0468ddd389a..1a1c4c9b40a 100644 --- a/src/test/compile-fail/removed-syntax-ptr-lifetime.rs +++ b/src/test/compile-fail/removed-syntax-ptr-lifetime.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -type bptr = &lifetime/int; //~ ERROR expected `;`, found `/` +type bptr = &lifetime/int; //~ ERROR expected one of `(`, `+`, `::`, or `;`, found `/` diff --git a/src/test/compile-fail/removed-syntax-record.rs b/src/test/compile-fail/removed-syntax-record.rs index b31e2538ab9..ae5a68575f7 100644 --- a/src/test/compile-fail/removed-syntax-record.rs +++ b/src/test/compile-fail/removed-syntax-record.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -type t = { f: () }; //~ ERROR expected type, found token OpenDelim(Brace) +type t = { f: () }; //~ ERROR expected type, found `{` diff --git a/src/test/compile-fail/removed-syntax-uniq-mut-expr.rs b/src/test/compile-fail/removed-syntax-uniq-mut-expr.rs index 124b3738fab..c5559c4ea96 100644 --- a/src/test/compile-fail/removed-syntax-uniq-mut-expr.rs +++ b/src/test/compile-fail/removed-syntax-uniq-mut-expr.rs @@ -11,5 +11,5 @@ fn f() { let a_box = box mut 42; //~^ ERROR expected identifier, found keyword `mut` - //~^^ ERROR expected `;`, found `42` + //~^^ ERROR expected one of `!`, `.`, `::`, `;`, `{`, or an operator, found `42` } diff --git a/src/test/compile-fail/removed-syntax-uniq-mut-ty.rs b/src/test/compile-fail/removed-syntax-uniq-mut-ty.rs index 579bfed1331..8c3db89bad2 100644 --- a/src/test/compile-fail/removed-syntax-uniq-mut-ty.rs +++ b/src/test/compile-fail/removed-syntax-uniq-mut-ty.rs @@ -10,4 +10,4 @@ type mut_box = Box; //~^ ERROR expected identifier, found keyword `mut` - //~^^ ERROR expected `,`, found `int` + //~^^ ERROR expected one of `(`, `+`, `,`, `::`, or `>`, found `int` diff --git a/src/test/compile-fail/removed-syntax-with-1.rs b/src/test/compile-fail/removed-syntax-with-1.rs index fd8cdb7b10e..c7f31045cb6 100644 --- a/src/test/compile-fail/removed-syntax-with-1.rs +++ b/src/test/compile-fail/removed-syntax-with-1.rs @@ -16,5 +16,5 @@ fn removed_with() { let a = S { foo: (), bar: () }; let b = S { foo: () with a }; - //~^ ERROR expected one of `,`, `}`, found `with` + //~^ ERROR expected one of `,`, `.`, `}`, or an operator, found `with` } diff --git a/src/test/compile-fail/struct-literal-in-for.rs b/src/test/compile-fail/struct-literal-in-for.rs index ccd711d8375..a37197b889d 100644 --- a/src/test/compile-fail/struct-literal-in-for.rs +++ b/src/test/compile-fail/struct-literal-in-for.rs @@ -20,7 +20,7 @@ impl Foo { fn main() { for x in Foo { - x: 3 //~ ERROR expected one of `;`, `}` + x: 3 //~ ERROR expected one of `!`, `.`, `::`, `;`, `{`, `}`, or an operator, found `:` }.hi() { println!("yo"); } diff --git a/src/test/compile-fail/struct-literal-in-if.rs b/src/test/compile-fail/struct-literal-in-if.rs index d63c216c3be..9759e4f7bda 100644 --- a/src/test/compile-fail/struct-literal-in-if.rs +++ b/src/test/compile-fail/struct-literal-in-if.rs @@ -20,7 +20,7 @@ impl Foo { fn main() { if Foo { - x: 3 //~ ERROR expected one of `;`, `}` + x: 3 //~ ERROR expected one of `!`, `.`, `::`, `;`, `{`, `}`, or an operator, found `:` }.hi() { println!("yo"); } diff --git a/src/test/compile-fail/struct-literal-in-match-discriminant.rs b/src/test/compile-fail/struct-literal-in-match-discriminant.rs index c740ba02062..297d3f7347f 100644 --- a/src/test/compile-fail/struct-literal-in-match-discriminant.rs +++ b/src/test/compile-fail/struct-literal-in-match-discriminant.rs @@ -14,7 +14,7 @@ struct Foo { fn main() { match Foo { - x: 3 //~ ERROR expected `=>` + x: 3 //~ ERROR expected one of `!`, `=>`, `@`, or `|`, found `:` } { Foo { x: x diff --git a/src/test/compile-fail/struct-literal-in-while.rs b/src/test/compile-fail/struct-literal-in-while.rs index 7b2c11e2597..5b1679cf9a1 100644 --- a/src/test/compile-fail/struct-literal-in-while.rs +++ b/src/test/compile-fail/struct-literal-in-while.rs @@ -20,7 +20,7 @@ impl Foo { fn main() { while Foo { - x: 3 //~ ERROR expected one of `;`, `}` + x: 3 //~ ERROR expected one of `!`, `.`, `::`, `;`, `{`, `}`, or an operator, found `:` }.hi() { println!("yo"); } From 4b271f3f64ce334767b8ff10d19abc402a1838b3 Mon Sep 17 00:00:00 2001 From: Kang Seonghoon Date: Thu, 4 Dec 2014 12:48:16 +0900 Subject: [PATCH 25/40] rustdoc: Preserve query/fragment in redirects whenever possible. We heavily rely on queries and fragments in the URL structure, so it is desired to preserve them even in the redirects. The generated redirect pages try to preserve them with scripts, which take precedence over the original `Refresh` metadata. Non-scripting browsers would continue to work (with no queries and fragments). --- src/librustdoc/html/layout.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/librustdoc/html/layout.rs b/src/librustdoc/html/layout.rs index 896d070c155..23f31580619 100644 --- a/src/librustdoc/html/layout.rs +++ b/src/librustdoc/html/layout.rs @@ -160,6 +160,7 @@ r##" } pub fn redirect(dst: &mut io::Writer, url: &str) -> io::IoResult<()> { + // "##, url = url, From a12b83996ee48ea149e69cb8600d61998d205c1b Mon Sep 17 00:00:00 2001 From: Kang Seonghoon Date: Thu, 4 Dec 2014 13:52:23 +0900 Subject: [PATCH 26/40] rustdoc: Do not deduplicate items when their parents differ. Fixes #17332. --- src/librustdoc/html/static/main.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/librustdoc/html/static/main.js b/src/librustdoc/html/static/main.js index 7c6f7ed3fe2..069cc100a96 100644 --- a/src/librustdoc/html/static/main.js +++ b/src/librustdoc/html/static/main.js @@ -313,7 +313,8 @@ for (var i = results.length - 1; i > 0; i -= 1) { if (results[i].word === results[i - 1].word && results[i].item.ty === results[i - 1].item.ty && - results[i].item.path === results[i - 1].item.path) + results[i].item.path === results[i - 1].item.path && + (results[i].item.parent || {}).name === (results[i - 1].item.parent || {}).name) { results[i].id = -1; } From e7c1f57d6c8781cfb3e746eac5f13f760fcde2b4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Nov 2014 23:07:43 -0800 Subject: [PATCH 27/40] Back io::stdin with a global singleton BufferedReader io::stdin returns a new `BufferedReader` each time it's called, which results in some very confusing behavior with disappearing output. It now returns a `StdinReader`, which wraps a global singleton `Arc>`. `Reader` is implemented directly on `StdinReader`. However, `Buffer` is not, as the `fill_buf` method is fundamentaly un-thread safe. A `lock` method is defined on `StdinReader` which returns a smart pointer wrapping the underlying `BufferedReader` while guaranteeing mutual exclusion. Code that treats the return value of io::stdin as implementing `Buffer` will break. Add a call to `lock`: ```rust io::stdin().lines() // => io::stdin().lock().lines() ``` Closes #14434 [breaking-change] --- src/libstd/io/mod.rs | 8 +- src/libstd/io/stdio.rs | 152 +++++++++++++++--- src/test/bench/shootout-k-nucleotide.rs | 2 +- src/test/bench/sudoku.rs | 4 +- .../cannot-mutate-captured-non-mut-var.rs | 2 +- src/test/run-pass/issue-13304.rs | 2 +- src/test/run-pass/issue-14456.rs | 2 +- src/test/run-pass/issue-16671.rs | 2 +- 8 files changed, 143 insertions(+), 31 deletions(-) diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs index 5ed10eab15b..b2e4fc75cf2 100644 --- a/src/libstd/io/mod.rs +++ b/src/libstd/io/mod.rs @@ -32,7 +32,7 @@ //! ```rust //! use std::io; //! -//! for line in io::stdin().lines() { +//! for line in io::stdin().lock().lines() { //! print!("{}", line.unwrap()); //! } //! ``` @@ -1413,10 +1413,10 @@ pub trait Buffer: Reader { /// # Example /// /// ```rust - /// use std::io; + /// use std::io::BufReader; /// - /// let mut reader = io::stdin(); - /// let input = reader.read_line().ok().unwrap_or("nothing".to_string()); + /// let mut reader = BufReader::new(b"hello\nworld"); + /// assert_eq!("hello\n", &*reader.read_line().unwrap()); /// ``` /// /// # Error diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 665000eae88..ad5dcf71df7 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -29,22 +29,27 @@ use self::StdSource::*; use boxed::Box; use cell::RefCell; +use clone::Clone; use failure::LOCAL_STDERR; use fmt; -use io::{Reader, Writer, IoResult, IoError, OtherIoError, +use io::{Reader, Writer, IoResult, IoError, OtherIoError, Buffer, standard_error, EndOfFile, LineBufferedWriter, BufferedReader}; use kinds::Send; use libc; use mem; use option::{Option, Some, None}; +use ops::{Deref, DerefMut}; use result::{Ok, Err}; use rustrt; use rustrt::local::Local; use rustrt::task::Task; use slice::SlicePrelude; use str::StrPrelude; +use string::String; use sys::{fs, tty}; +use sync::{Arc, Mutex, MutexGuard, Once, ONCE_INIT}; use uint; +use vec::Vec; // And so begins the tale of acquiring a uv handle to a stdio stream on all // platforms in all situations. Our story begins by splitting the world into two @@ -90,28 +95,135 @@ thread_local!(static LOCAL_STDOUT: RefCell>> = { RefCell::new(None) }) -/// Creates a new non-blocking handle to the stdin of the current process. +/// A synchronized wrapper around a buffered reader from stdin +#[deriving(Clone)] +pub struct StdinReader { + inner: Arc>>, +} + +/// A guard for exlusive access to `StdinReader`'s internal `BufferedReader`. +pub struct StdinReaderGuard<'a> { + inner: MutexGuard<'a, BufferedReader>, +} + +impl<'a> Deref> for StdinReaderGuard<'a> { + fn deref(&self) -> &BufferedReader { + &*self.inner + } +} + +impl<'a> DerefMut> for StdinReaderGuard<'a> { + fn deref_mut(&mut self) -> &mut BufferedReader { + &mut *self.inner + } +} + +impl StdinReader { + /// Locks the `StdinReader`, granting the calling thread exclusive access + /// to the underlying `BufferedReader`. + /// + /// This provides access to methods like `chars` and `lines`. + /// + /// ## Example + /// + /// ```rust + /// use std::io; + /// + /// for line in io::stdin().lock().lines() { + /// println!("{}", line.unwrap()); + /// } + /// ``` + pub fn lock<'a>(&'a mut self) -> StdinReaderGuard<'a> { + StdinReaderGuard { + inner: self.inner.lock() + } + } + + /// Like `Buffer::read_line`. + /// + /// The read is performed atomically - concurrent read calls in other + /// threads will not interleave with this one. + pub fn read_line(&mut self) -> IoResult { + self.inner.lock().read_line() + } + + /// Like `Buffer::read_until`. + /// + /// The read is performed atomically - concurrent read calls in other + /// threads will not interleave with this one. + pub fn read_until(&mut self, byte: u8) -> IoResult> { + self.inner.lock().read_until(byte) + } + + /// Like `Buffer::read_char`. + /// + /// The read is performed atomically - concurrent read calls in other + /// threads will not interleave with this one. + pub fn read_char(&mut self) -> IoResult { + self.inner.lock().read_char() + } +} + +impl Reader for StdinReader { + fn read(&mut self, buf: &mut [u8]) -> IoResult { + self.inner.lock().read(buf) + } + + // We have to manually delegate all of these because the default impls call + // read more than once and we don't want those calls to interleave (or + // incur the costs of repeated locking). + + fn read_at_least(&mut self, min: uint, buf: &mut [u8]) -> IoResult { + self.inner.lock().read_at_least(min, buf) + } + + fn push_at_least(&mut self, min: uint, len: uint, buf: &mut Vec) -> IoResult { + self.inner.lock().push_at_least(min, len, buf) + } + + fn read_to_end(&mut self) -> IoResult> { + self.inner.lock().read_to_end() + } + + fn read_le_uint_n(&mut self, nbytes: uint) -> IoResult { + self.inner.lock().read_le_uint_n(nbytes) + } + + fn read_be_uint_n(&mut self, nbytes: uint) -> IoResult { + self.inner.lock().read_be_uint_n(nbytes) + } +} + +/// Creates a new handle to the stdin of the current process. /// -/// The returned handled is buffered by default with a `BufferedReader`. If -/// buffered access is not desired, the `stdin_raw` function is provided to -/// provided unbuffered access to stdin. -/// -/// Care should be taken when creating multiple handles to the stdin of a -/// process. Because this is a buffered reader by default, it's possible for -/// pending input to be unconsumed in one reader and unavailable to other -/// readers. It is recommended that only one handle at a time is created for the -/// stdin of a process. +/// The returned handle is a wrapper around a global `BufferedReader` shared +/// by all threads. If buffered access is not desired, the `stdin_raw` function +/// is provided to provided unbuffered access to stdin. /// /// See `stdout()` for more notes about this function. -pub fn stdin() -> BufferedReader { - // The default buffer capacity is 64k, but apparently windows doesn't like - // 64k reads on stdin. See #13304 for details, but the idea is that on - // windows we use a slightly smaller buffer that's been seen to be - // acceptable. - if cfg!(windows) { - BufferedReader::with_capacity(8 * 1024, stdin_raw()) - } else { - BufferedReader::new(stdin_raw()) +pub fn stdin() -> StdinReader { + // We're following the same strategy as kimundi's lazy_static library + static mut STDIN: *const StdinReader = 0 as *const StdinReader; + static ONCE: Once = ONCE_INIT; + + unsafe { + ONCE.doit(|| { + // The default buffer capacity is 64k, but apparently windows doesn't like + // 64k reads on stdin. See #13304 for details, but the idea is that on + // windows we use a slightly smaller buffer that's been seen to be + // acceptable. + let stdin = if cfg!(windows) { + BufferedReader::with_capacity(8 * 1024, stdin_raw()) + } else { + BufferedReader::new(stdin_raw()) + }; + let stdin = StdinReader { + inner: Arc::new(Mutex::new(stdin)) + }; + STDIN = mem::transmute(box stdin); + }); + + (*STDIN).clone() } } diff --git a/src/test/bench/shootout-k-nucleotide.rs b/src/test/bench/shootout-k-nucleotide.rs index b030e7bb93e..8ed041513c4 100644 --- a/src/test/bench/shootout-k-nucleotide.rs +++ b/src/test/bench/shootout-k-nucleotide.rs @@ -295,7 +295,7 @@ fn main() { let fd = std::io::File::open(&Path::new("shootout-k-nucleotide.data")); get_sequence(&mut std::io::BufferedReader::new(fd), ">THREE") } else { - get_sequence(&mut std::io::stdin(), ">THREE") + get_sequence(&mut *std::io::stdin().lock(), ">THREE") }; let input = Arc::new(input); diff --git a/src/test/bench/sudoku.rs b/src/test/bench/sudoku.rs index 6664eeecd5d..c55f85f40e8 100644 --- a/src/test/bench/sudoku.rs +++ b/src/test/bench/sudoku.rs @@ -65,7 +65,7 @@ impl Sudoku { return true; } - pub fn read(mut reader: BufferedReader) -> Sudoku { + pub fn read(mut reader: &mut BufferedReader) -> Sudoku { /* assert first line is exactly "9,9" */ assert!(reader.read_line().unwrap() == "9,9".to_string()); @@ -284,7 +284,7 @@ fn main() { let mut sudoku = if use_default { Sudoku::from_vec(&DEFAULT_SUDOKU) } else { - Sudoku::read(io::stdin()) + Sudoku::read(&mut *io::stdin().lock()) }; sudoku.solve(); sudoku.write(&mut io::stdout()); diff --git a/src/test/compile-fail/cannot-mutate-captured-non-mut-var.rs b/src/test/compile-fail/cannot-mutate-captured-non-mut-var.rs index fcb09c20000..daad1afedaa 100644 --- a/src/test/compile-fail/cannot-mutate-captured-non-mut-var.rs +++ b/src/test/compile-fail/cannot-mutate-captured-non-mut-var.rs @@ -14,6 +14,6 @@ fn main() { //~^ ERROR: cannot assign to immutable captured outer variable in a proc `x` let s = std::io::stdin(); - proc() { s.lines(); }; + proc() { s.read_to_end(); }; //~^ ERROR: cannot borrow immutable captured outer variable in a proc `s` as mutable } diff --git a/src/test/run-pass/issue-13304.rs b/src/test/run-pass/issue-13304.rs index 047ff74035b..11003c6fc52 100644 --- a/src/test/run-pass/issue-13304.rs +++ b/src/test/run-pass/issue-13304.rs @@ -37,7 +37,7 @@ fn parent() { } fn child() { - for line in io::stdin().lines() { + for line in io::stdin().lock().lines() { println!("{}", line.unwrap()); } } diff --git a/src/test/run-pass/issue-14456.rs b/src/test/run-pass/issue-14456.rs index 2339e3f6302..f5fdf8704ed 100644 --- a/src/test/run-pass/issue-14456.rs +++ b/src/test/run-pass/issue-14456.rs @@ -27,7 +27,7 @@ fn main() { fn child() { io::stdout().write_line("foo").unwrap(); io::stderr().write_line("bar").unwrap(); - assert_eq!(io::stdin().read_line().err().unwrap().kind, io::EndOfFile); + assert_eq!(io::stdin().lock().read_line().err().unwrap().kind, io::EndOfFile); } fn test() { diff --git a/src/test/run-pass/issue-16671.rs b/src/test/run-pass/issue-16671.rs index a0d384418f9..27a97e1f172 100644 --- a/src/test/run-pass/issue-16671.rs +++ b/src/test/run-pass/issue-16671.rs @@ -19,6 +19,6 @@ pub fn main() { let mut stdin = std::io::stdin(); spawn(proc() { - let _ = stdin.lines(); + let _ = stdin.read_to_end(); }); } From cddbb6a75bd4618e6e136e7ec5140b95899ad70b Mon Sep 17 00:00:00 2001 From: Chase Southwood Date: Thu, 4 Dec 2014 03:10:58 -0600 Subject: [PATCH 28/40] `DerefMut` should be `for Sized?` --- src/libcore/ops.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs index d85481098e4..7de89e2bc50 100644 --- a/src/libcore/ops.rs +++ b/src/libcore/ops.rs @@ -787,7 +787,7 @@ impl<'a, Sized? T> Deref for &'a mut T { /// } /// ``` #[lang="deref_mut"] -pub trait DerefMut: Deref { +pub trait DerefMut for Sized? : Deref { /// The method called to mutably dereference a value fn deref_mut<'a>(&'a mut self) -> &'a mut Result; } From 2e1911b47ae5a4fd8cf4c4a98e739666f99daf82 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 3 Dec 2014 16:31:21 -0800 Subject: [PATCH 29/40] core::iter::Unfold: reword docs and add example Remove note about core --- src/libcore/iter.rs | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/src/libcore/iter.rs b/src/libcore/iter.rs index 2d488a4b155..6c43d4dbc30 100644 --- a/src/libcore/iter.rs +++ b/src/libcore/iter.rs @@ -2036,18 +2036,49 @@ for Inspect<'a, A, T> { } } -/// An iterator which just modifies the contained state throughout iteration. +/// An iterator which passes mutable state to a closure and yields the result. +/// +/// # Example: The Fibonacci Sequence +/// +/// An iterator that yields sequential Fibonacci numbers, and stops on overflow. +/// +/// ```rust +/// use std::iter::Unfold; +/// use std::num::Int; // For `.checked_add()` +/// +/// // This iterator will yield up to the last Fibonacci number before the max value of `u32`. +/// // You can simply change `u32` to `u64` in this line if you want higher values than that. +/// let mut fibonacci = Unfold::new((Some(0u32), Some(1u32)), |&(ref mut x2, ref mut x1)| { +/// // Attempt to get the next Fibonacci number +/// // `x1` will be `None` if previously overflowed. +/// let next = match (*x2, *x1) { +/// (Some(x2), Some(x1)) => x2.checked_add(x1), +/// _ => None, +/// }; +/// +/// // Shift left: ret <- x2 <- x1 <- next +/// let ret = *x2; +/// *x2 = *x1; +/// *x1 = next; +/// +/// ret +/// }); +/// +/// for i in fibonacci { +/// println!("{}", i); +/// } +/// ``` #[experimental] pub struct Unfold<'a, A, St> { f: |&mut St|: 'a -> Option, - /// Internal state that will be yielded on the next iteration + /// Internal state that will be passed to the closure on the next iteration pub state: St, } #[experimental] impl<'a, A, St> Unfold<'a, A, St> { /// Creates a new iterator with the specified closure as the "iterator - /// function" and an initial state to eventually pass to the iterator + /// function" and an initial state to eventually pass to the closure #[inline] pub fn new<'a>(initial_state: St, f: |&mut St|: 'a -> Option) -> Unfold<'a, A, St> { From f0f7a9006853902882f7475b400fc9075c798c29 Mon Sep 17 00:00:00 2001 From: Steve Klabnik Date: Thu, 4 Dec 2014 11:36:08 -0500 Subject: [PATCH 30/40] Some small copy edits to the guide. Fixes #19335. --- src/doc/guide.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/doc/guide.md b/src/doc/guide.md index c2d43a20ec4..f019a114855 100644 --- a/src/doc/guide.md +++ b/src/doc/guide.md @@ -140,7 +140,7 @@ $ editor main.rs ``` Rust files always end in a `.rs` extension. If you're using more than one word -in your file name, use an underscore. `hello_world.rs` rather than +in your filename, use an underscore. `hello_world.rs` rather than `helloworld.rs`. Now that you've got your file open, type this in: @@ -200,7 +200,7 @@ about this difference. Just know that sometimes, you'll see a `!`, and that means that you're calling a macro instead of a normal function. Rust implements `println!` as a macro rather than a function for good reasons, but that's a very advanced topic. You'll learn more when we talk about macros later. One -last thing to mention: Rust's macros are significantly different than C macros, +last thing to mention: Rust's macros are significantly different from C macros, if you've used those. Don't be scared of using macros. We'll get to the details eventually, you'll just have to trust us for now. @@ -595,8 +595,8 @@ let y = if x == 5i { 10i } else { 15i }; ``` This reveals two interesting things about Rust: it is an expression-based -language, and semicolons are different than in other 'curly brace and -semicolon'-based languages. These two things are related. +language, and semicolons are different from semicolons in other 'curly brace +and semicolon'-based languages. These two things are related. ## Expressions vs. Statements @@ -1454,7 +1454,7 @@ Both `continue` and `break` are valid in both kinds of loops. # Strings Strings are an important concept for any programmer to master. Rust's string -handling system is a bit different than in other languages, due to its systems +handling system is a bit different from other languages, due to its systems focus. Any time you have a data structure of variable size, things can get tricky, and strings are a re-sizable data structure. That said, Rust's strings also work differently than in some other systems languages, such as C. From 010cbd011a644512afdca2378c5788181703cac3 Mon Sep 17 00:00:00 2001 From: Steve Klabnik Date: Thu, 4 Dec 2014 11:43:22 -0500 Subject: [PATCH 31/40] Tasks aren't actually lightweight :frown: Fixes #19402. --- src/doc/guide.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/doc/guide.md b/src/doc/guide.md index c2d43a20ec4..0280fb9e97c 100644 --- a/src/doc/guide.md +++ b/src/doc/guide.md @@ -5174,12 +5174,12 @@ processor. Rust's semantics lend themselves very nicely to solving a number of issues that programmers have with concurrency. Many concurrency errors that are runtime errors in other languages are compile-time errors in Rust. -Rust's concurrency primitive is called a **task**. Tasks are lightweight, and -do not share memory in an unsafe manner, preferring message passing to -communicate. It's worth noting that tasks are implemented as a library, and -not part of the language. This means that in the future, other concurrency -libraries can be written for Rust to help in specific scenarios. Here's an -example of creating a task: +Rust's concurrency primitive is called a **task**. Tasks are similar to +threads, and do not share memory in an unsafe manner, preferring message +passing to communicate. It's worth noting that tasks are implemented as a +library, and not part of the language. This means that in the future, other +concurrency libraries can be written for Rust to help in specific scenarios. +Here's an example of creating a task: ```{rust} spawn(proc() { From 0d3c41561751bbcbc8f2fccf7369280e35dbb68f Mon Sep 17 00:00:00 2001 From: Aaron Liblong Date: Thu, 4 Dec 2014 12:14:06 -0500 Subject: [PATCH 32/40] Add capacity() to VecMap Changed capacity() tag to unstable and fixed doc assert --- src/libcollections/vec_map.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/libcollections/vec_map.rs b/src/libcollections/vec_map.rs index 36e66ed27f3..986e7ef5bc2 100644 --- a/src/libcollections/vec_map.rs +++ b/src/libcollections/vec_map.rs @@ -115,6 +115,22 @@ impl VecMap { VecMap { v: Vec::with_capacity(capacity) } } + /// Returns the number of elements the `VecMap` can hold without + /// reallocating. + /// + /// # Example + /// + /// ``` + /// use std::collections::VecMap; + /// let map: VecMap = VecMap::with_capacity(10); + /// assert!(map.capacity() >= 10); + /// ``` + #[inline] + #[unstable = "matches collection reform specification, waiting for dust to settle"] + pub fn capacity(&self) -> uint { + self.v.capacity() + } + /// Returns an iterator visiting all keys in ascending order by the keys. /// The iterator's element type is `uint`. #[unstable = "matches collection reform specification, waiting for dust to settle"] From fdb0d9026e0189141dfdbe08926252efeb21e4d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adolfo=20Ochagav=C3=ADa?= Date: Thu, 4 Dec 2014 20:56:44 +0100 Subject: [PATCH 33/40] Remove reduntant compile-fail test Fixes https://github.com/rust-lang/rust/issues/19510 --- src/test/compile-fail/issue-17718-extern-const.rs | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 src/test/compile-fail/issue-17718-extern-const.rs diff --git a/src/test/compile-fail/issue-17718-extern-const.rs b/src/test/compile-fail/issue-17718-extern-const.rs deleted file mode 100644 index 235d1222d81..00000000000 --- a/src/test/compile-fail/issue-17718-extern-const.rs +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern { - const FOO: uint; //~ ERROR: unexpected token: `const` -} - -fn main() {} From 87235687a13f3fca2e300674997880f0f9ba12a7 Mon Sep 17 00:00:00 2001 From: Alexander Light Date: Thu, 4 Dec 2014 15:02:59 -0500 Subject: [PATCH 34/40] Add ability to use custom alloc::heap::imp Adds the ability to use a custom allocator heap by passing either --cfg external_crate and --extern external= or --cfg external_funcs and defining the allocator functions prefixed by 'rust_' somewhere. This is useful for many reasons including OS/embedded development, and allocator development and testing. --- src/liballoc/heap.rs | 58 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 55 insertions(+), 3 deletions(-) diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 579f47ee874..067c235c9ae 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -123,7 +123,59 @@ const MIN_ALIGN: uint = 8; target_arch = "x86_64"))] const MIN_ALIGN: uint = 16; -#[cfg(jemalloc)] +#[cfg(external_funcs)] +mod imp { + extern { + fn rust_allocate(size: uint, align: uint) -> *mut u8; + fn rust_deallocate(ptr: *mut u8, old_size: uint, align: uint); + fn rust_reallocate(ptr: *mut u8, old_size: uint, size: uint, align: uint) -> *mut u8; + fn rust_reallocate_inplace(ptr: *mut u8, old_size: uint, size: uint, + align: uint) -> uint; + fn rust_usable_size(size: uint, align: uint) -> uint; + fn rust_stats_print(); + } + + #[inline] + pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 { + rust_allocate(size, align) + } + + #[inline] + pub unsafe fn reallocate_inplace(ptr: *mut u8, old_size: uint, size: uint, + align: uint) -> uint { + rust_reallocate_inplace(ptr, old_size, size, align) + } + + #[inline] + pub unsafe fn deallocate(ptr: *mut u8, old_size: uint, align: uint) { + rust_deallocate(ptr, old_size, align) + } + + #[inline] + pub unsafe fn reallocate_inplace(ptr: *mut u8, old_size: uint, size: uint, + align: uint) -> uint { + rust_reallocate_inplace(ptr, old_size, size, align) + } + + #[inline] + pub fn usable_size(size: uint, align: uint) -> uint { + unsafe { rust_usable_size(size, align) } + } + + #[inline] + pub fn stats_print() { + unsafe { rust_stats_print() } + } +} + +#[cfg(external_crate)] +mod imp { + extern crate external; + pub use self::external::{allocate, deallocate, reallocate_inplace, reallocate}; + pub use self::external::{usable_size, stats_print}; +} + +#[cfg(all(not(external_funcs), not(external_crate), jemalloc))] mod imp { use core::option::{None, Option}; use core::ptr::{null_mut, null}; @@ -199,7 +251,7 @@ mod imp { } } -#[cfg(all(not(jemalloc), unix))] +#[cfg(all(not(external_funcs), not(external_crate), not(jemalloc), unix))] mod imp { use core::cmp; use core::ptr; @@ -260,7 +312,7 @@ mod imp { pub fn stats_print() {} } -#[cfg(all(not(jemalloc), windows))] +#[cfg(all(not(external_funcs), not(external_crate), not(jemalloc), windows))] mod imp { use libc::{c_void, size_t}; use libc; From d424af480fac92849fe9ac99bd606024865a8fc5 Mon Sep 17 00:00:00 2001 From: Chase Southwood Date: Thu, 4 Dec 2014 00:35:38 -0600 Subject: [PATCH 35/40] Implement BitOps for TrieSet --- src/libcollections/trie/set.rs | 66 +++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/src/libcollections/trie/set.rs b/src/libcollections/trie/set.rs index dd884b6ee41..9ddb8dd8798 100644 --- a/src/libcollections/trie/set.rs +++ b/src/libcollections/trie/set.rs @@ -9,7 +9,6 @@ // except according to those terms. // FIXME(conventions): implement bounded iterators -// FIXME(conventions): implement BitOr, BitAnd, BitXor, and Sub // FIXME(conventions): replace each_reverse by making iter DoubleEnded // FIXME(conventions): implement iter_mut and into_iter @@ -463,6 +462,30 @@ impl Extend for TrieSet { } } +impl BitOr for TrieSet { + fn bitor(&self, rhs: &TrieSet) -> TrieSet { + self.union(rhs).collect() + } +} + +impl BitAnd for TrieSet { + fn bitand(&self, rhs: &TrieSet) -> TrieSet { + self.intersection(rhs).collect() + } +} + +impl BitXor for TrieSet { + fn bitxor(&self, rhs: &TrieSet) -> TrieSet { + self.symmetric_difference(rhs).collect() + } +} + +impl Sub for TrieSet { + fn sub(&self, rhs: &TrieSet) -> TrieSet { + self.difference(rhs).collect() + } +} + /// A forward iterator over a set. pub struct SetItems<'a> { iter: Entries<'a, ()> @@ -569,6 +592,7 @@ impl<'a> Iterator for UnionItems<'a> { mod test { use std::prelude::*; use std::uint; + use vec::Vec; use super::TrieSet; @@ -738,4 +762,44 @@ mod test { &[1, 5, 9, 13, 19], &[1, 3, 5, 9, 11, 13, 16, 19, 24]); } + + #[test] + fn test_bit_or() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + + let set: TrieSet = a | b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![1u, 2, 3, 4, 5]); + } + + #[test] + fn test_bit_and() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![2, 3, 4].into_iter().collect(); + + let set: TrieSet = a & b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![2u, 3]); + } + + #[test] + fn test_bit_xor() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + + let set: TrieSet = a ^ b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![1u, 2, 4, 5]); + } + + #[test] + fn test_sub() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + + let set: TrieSet = a - b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![1u, 2]); + } } From 74fb798a200dc82cf5b4a18065e3ea565229adc3 Mon Sep 17 00:00:00 2001 From: "NODA, Kai" Date: Fri, 5 Dec 2014 02:05:57 +0800 Subject: [PATCH 36/40] libstd/sys/unix/process.rs: reap a zombie who didn't get through to exec(2). After the library successfully called fork(2), the child does several setup works such as setting UID, GID and current directory before it calls exec(2). When those setup works failed, the child exits but the parent didn't call waitpid(2) and left it as a zombie. This patch also add several sanity checks. They shouldn't make any noticeable impact to runtime performance. The new test case run-pass/wait-forked-but-failed-child.rs calls the ps command to check if the new code can really reap a zombie. When I intentionally create many zombies with my test program ./spawn-failure, The output of "ps -A -o pid,sid,command" should look like this: PID SID COMMAND 1 1 /sbin/init 2 0 [kthreadd] 3 0 [ksoftirqd/0] ... 12562 9237 ./spawn-failure 12563 9237 [spawn-failure] 12564 9237 [spawn-failure] ... 12592 9237 [spawn-failure] 12593 9237 ps -A -o pid,sid,command 12884 12884 /bin/zsh 12922 12922 /bin/zsh ... Filtering the output with the "SID" (session ID) column is a quick way to tell if a process (zombie) was spawned by my own test program. Then the number of "defunct" lines is the number of zombie children. Signed-off-by: NODA, Kai --- src/libstd/sys/unix/process.rs | 43 +++++++--- .../run-pass/wait-forked-but-failed-child.rs | 79 +++++++++++++++++++ 2 files changed, 112 insertions(+), 10 deletions(-) create mode 100644 src/test/run-pass/wait-forked-but-failed-child.rs diff --git a/src/libstd/sys/unix/process.rs b/src/libstd/sys/unix/process.rs index 76c316076f9..7dde19a6476 100644 --- a/src/libstd/sys/unix/process.rs +++ b/src/libstd/sys/unix/process.rs @@ -11,7 +11,7 @@ use self::Req::*; use libc::{mod, pid_t, c_void, c_int}; use c_str::CString; -use io::{mod, IoResult, IoError}; +use io::{mod, IoResult, IoError, EndOfFile}; use mem; use os; use ptr; @@ -39,6 +39,8 @@ enum Req { NewChild(libc::pid_t, Sender, u64), } +const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX"; + impl Process { pub fn id(&self) -> pid_t { self.pid @@ -106,18 +108,36 @@ impl Process { if pid < 0 { return Err(super::last_error()) } else if pid > 0 { + #[inline] + fn combine(arr: &[u8]) -> i32 { + let a = arr[0] as u32; + let b = arr[1] as u32; + let c = arr[2] as u32; + let d = arr[3] as u32; + + ((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32 + } + + let p = Process{ pid: pid }; drop(output); - let mut bytes = [0, ..4]; + let mut bytes = [0, ..8]; return match input.read(&mut bytes) { - Ok(4) => { - let errno = (bytes[0] as i32 << 24) | - (bytes[1] as i32 << 16) | - (bytes[2] as i32 << 8) | - (bytes[3] as i32 << 0); + Ok(8) => { + assert!(combine(CLOEXEC_MSG_FOOTER) == combine(bytes.slice(4, 8)), + "Validation on the CLOEXEC pipe failed: {}", bytes); + let errno = combine(bytes.slice(0, 4)); + assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic"); Err(super::decode_error(errno)) } - Err(..) => Ok(Process { pid: pid }), - Ok(..) => panic!("short read on the cloexec pipe"), + Err(ref e) if e.kind == EndOfFile => Ok(p), + Err(e) => { + assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic"); + panic!("the CLOEXEC pipe failed: {}", e) + }, + Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic + assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic"); + panic!("short read on the CLOEXEC pipe") + } }; } @@ -154,13 +174,16 @@ impl Process { let _ = libc::close(input.fd()); fn fail(output: &mut FileDesc) -> ! { - let errno = sys::os::errno(); + let errno = sys::os::errno() as u32; let bytes = [ (errno >> 24) as u8, (errno >> 16) as u8, (errno >> 8) as u8, (errno >> 0) as u8, + CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1], + CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3] ]; + // pipe I/O up to PIPE_BUF bytes should be atomic assert!(output.write(&bytes).is_ok()); unsafe { libc::_exit(1) } } diff --git a/src/test/run-pass/wait-forked-but-failed-child.rs b/src/test/run-pass/wait-forked-but-failed-child.rs new file mode 100644 index 00000000000..17dfb9e3319 --- /dev/null +++ b/src/test/run-pass/wait-forked-but-failed-child.rs @@ -0,0 +1,79 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +extern crate libc; + +use std::io::process::Command; +use std::iter::IteratorExt; + +use libc::funcs::posix88::unistd; + + +// "ps -A -o pid,sid,command" with GNU ps should output something like this: +// PID SID COMMAND +// 1 1 /sbin/init +// 2 0 [kthreadd] +// 3 0 [ksoftirqd/0] +// ... +// 12562 9237 ./spawn-failure +// 12563 9237 [spawn-failure] +// 12564 9237 [spawn-failure] +// ... +// 12592 9237 [spawn-failure] +// 12593 9237 ps -A -o pid,sid,command +// 12884 12884 /bin/zsh +// 12922 12922 /bin/zsh +// ... + +#[cfg(unix)] +fn find_zombies() { + // http://man.freebsd.org/ps(1) + // http://man7.org/linux/man-pages/man1/ps.1.html + #[cfg(not(target_os = "macos"))] + const FIELDS: &'static str = "pid,sid,command"; + + // https://developer.apple.com/library/mac/documentation/Darwin/ + // Reference/ManPages/man1/ps.1.html + #[cfg(target_os = "macos")] + const FIELDS: &'static str = "pid,sess,command"; + + let my_sid = unsafe { unistd::getsid(0) }; + + let ps_cmd_output = Command::new("ps").args(&["-A", "-o", FIELDS]).output().unwrap(); + let ps_output = String::from_utf8_lossy(ps_cmd_output.output.as_slice()); + + let found = ps_output.split('\n').enumerate().any(|(line_no, line)| + 0 < line_no && 0 < line.len() && + my_sid == from_str(line.split(' ').filter(|w| 0 < w.len()).nth(1) + .expect("1st column should be Session ID") + ).expect("Session ID string into integer") && + line.contains("defunct") && { + println!("Zombie child {}", line); + true + } + ); + + assert!( ! found, "Found at least one zombie child"); +} + +#[cfg(windows)] +fn find_zombies() { } + +fn main() { + let too_long = format!("/NoSuchCommand{:0300}", 0u8); + + for _ in range(0u32, 100) { + let invalid = Command::new(too_long.as_slice()).spawn(); + assert!(invalid.is_err()); + } + + find_zombies(); +} From 714ce7919719e6a70719c873dec506765c00686f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 4 Dec 2014 20:20:09 -0800 Subject: [PATCH 37/40] Make missing_doc lint check typedefs Closes #19543 --- src/libcore/fmt/mod.rs | 1 + src/librustc/lint/builtin.rs | 1 + src/test/compile-fail/lint-missing-doc.rs | 3 +++ 3 files changed, 5 insertions(+) diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index 1d6906c13a8..7b9dd70c58f 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -34,6 +34,7 @@ mod float; pub mod rt; #[experimental = "core and I/O reconciliation may alter this definition"] +/// The type returned by formatter methods. pub type Result = result::Result<(), Error>; /// The error type which is returned from formatting a message into a stream. diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 9a214d531d1..884615c7aae 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -1430,6 +1430,7 @@ impl LintPass for MissingDoc { ast::ItemEnum(..) => "an enum", ast::ItemStruct(..) => "a struct", ast::ItemTrait(..) => "a trait", + ast::ItemTy(..) => "a type alias", _ => return }; self.check_missing_docs_attrs(cx, Some(it.id), it.attrs.as_slice(), diff --git a/src/test/compile-fail/lint-missing-doc.rs b/src/test/compile-fail/lint-missing-doc.rs index 365081aee1a..8d4ecde692d 100644 --- a/src/test/compile-fail/lint-missing-doc.rs +++ b/src/test/compile-fail/lint-missing-doc.rs @@ -17,6 +17,9 @@ //! Some garbage docs for the crate here #![doc="More garbage"] +type Typedef = String; +pub type PubTypedef = String; //~ ERROR: missing documentation + struct Foo { a: int, b: int, From 71d4e77db8ad4b6d821da7e5d5300134ac95974e Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 24 Nov 2014 11:16:40 -0800 Subject: [PATCH 38/40] std: Rewrite the `sync` module This commit is a reimplementation of `std::sync` to be based on the system-provided primitives wherever possible. The previous implementation was fundamentally built on top of channels, and as part of the runtime reform it has become clear that this is not the level of abstraction that the standard level should be providing. This rewrite aims to provide as thin of a shim as possible on top of the system primitives in order to make them safe. The overall interface of the `std::sync` module has in general not changed, but there are a few important distinctions, highlighted below: * The condition variable type, `Condvar`, has been separated out of a `Mutex`. A condition variable is now an entirely separate type. This separation benefits users who only use one mutex, and provides a clearer distinction of who's responsible for managing condition variables (the application). * All of `Condvar`, `Mutex`, and `RWLock` are now directly built on top of system primitives rather than using a custom implementation. The `Once`, `Barrier`, and `Semaphore` types are still built upon these abstractions of the system primitives. * The `Condvar`, `Mutex`, and `RWLock` types all have a new static type and constant initializer corresponding to them. These are provided primarily for C FFI interoperation, but are often useful to otherwise simply have a global lock. The types, however, will leak memory unless `destroy()` is called on them, which is clearly documented. * The `Condvar` implementation for an `RWLock` write lock has been removed. This may be added back in the future with a userspace implementation, but this commit is focused on exposing the system primitives first. * The fundamental architecture of this design is to provide two separate layers. The first layer is that exposed by `sys_common` which is a cross-platform bare-metal abstraction of the system synchronization primitives. No attempt is made at making this layer safe, and it is quite unsafe to use! It is currently not exported as part of the API of the standard library, but the stabilization of the `sys` module will ensure that these will be exposed in time. The purpose of this layer is to provide the core cross-platform abstractions if necessary to implementors. The second layer is the layer provided by `std::sync` which is intended to be the thinnest possible layer on top of `sys_common` which is entirely safe to use. There are a few concerns which need to be addressed when making these system primitives safe: * Once used, the OS primitives can never be **moved**. This means that they essentially need to have a stable address. The static primitives use `&'static self` to enforce this, and the non-static primitives all use a `Box` to provide this guarantee. * Poisoning is leveraged to ensure that invalid data is not accessible from other tasks after one has panicked. In addition to these overall blanket safety limitations, each primitive has a few restrictions of its own: * Mutexes and rwlocks can only be unlocked from the same thread that they were locked by. This is achieved through RAII lock guards which cannot be sent across threads. * Mutexes and rwlocks can only be unlocked if they were previously locked. This is achieved by not exposing an unlocking method. * A condition variable can only be waited on with a locked mutex. This is achieved by requiring a `MutexGuard` in the `wait()` method. * A condition variable cannot be used concurrently with more than one mutex. This is guaranteed by dynamically binding a condition variable to precisely one mutex for its entire lifecycle. This restriction may be able to be relaxed in the future (a mutex is unbound when no threads are waiting on the condvar), but for now it is sufficient to guarantee safety. * Condvars now support timeouts for their blocking operations. The implementation for these operations is provided by the system. Due to the modification of the `Condvar` API, removal of the `std::sync::mutex` API, and reimplementation, this is a breaking change. Most code should be fairly easy to port using the examples in the documentation of these primitives. [breaking-change] Closes #17094 Closes #18003 --- src/libstd/{sync => comm}/mpsc_queue.rs | 9 - src/libstd/{sync => comm}/spsc_queue.rs | 160 ++-- src/libstd/sync/barrier.rs | 116 +++ src/libstd/sync/condvar.rs | 358 +++++++ src/libstd/sync/deque.rs | 663 ------------- src/libstd/sync/future.rs | 2 +- src/libstd/sync/lock.rs | 805 ---------------- src/libstd/sync/mod.rs | 44 +- src/libstd/sync/mpmc_bounded_queue.rs | 219 ----- src/libstd/sync/mutex.rs | 370 ++++++-- src/libstd/sync/{one.rs => once.rs} | 17 +- src/libstd/sync/poison.rs | 48 + src/libstd/sync/raw.rs | 1132 ----------------------- src/libstd/sync/rwlock.rs | 514 ++++++++++ src/libstd/sync/semaphore.rs | 195 ++++ src/libstd/sys/common/condvar.rs | 67 ++ src/libstd/sys/common/mod.rs | 5 +- src/libstd/sys/common/mutex.rs | 64 ++ src/libstd/sys/common/rwlock.rs | 86 ++ src/libstd/sys/unix/condvar.rs | 83 ++ src/libstd/sys/unix/mod.rs | 6 +- src/libstd/sys/unix/mutex.rs | 52 ++ src/libstd/sys/unix/rwlock.rs | 57 ++ src/libstd/sys/unix/sync.rs | 208 +++++ src/libstd/sys/windows/condvar.rs | 63 ++ src/libstd/sys/windows/mod.rs | 4 + src/libstd/sys/windows/mutex.rs | 76 ++ src/libstd/sys/windows/rwlock.rs | 53 ++ src/libstd/sys/windows/sync.rs | 58 ++ 29 files changed, 2483 insertions(+), 3051 deletions(-) rename src/libstd/{sync => comm}/mpsc_queue.rs (95%) rename src/libstd/{sync => comm}/spsc_queue.rs (74%) create mode 100644 src/libstd/sync/barrier.rs create mode 100644 src/libstd/sync/condvar.rs delete mode 100644 src/libstd/sync/deque.rs delete mode 100644 src/libstd/sync/lock.rs delete mode 100644 src/libstd/sync/mpmc_bounded_queue.rs rename src/libstd/sync/{one.rs => once.rs} (96%) create mode 100644 src/libstd/sync/poison.rs delete mode 100644 src/libstd/sync/raw.rs create mode 100644 src/libstd/sync/rwlock.rs create mode 100644 src/libstd/sync/semaphore.rs create mode 100644 src/libstd/sys/common/condvar.rs create mode 100644 src/libstd/sys/common/mutex.rs create mode 100644 src/libstd/sys/common/rwlock.rs create mode 100644 src/libstd/sys/unix/condvar.rs create mode 100644 src/libstd/sys/unix/mutex.rs create mode 100644 src/libstd/sys/unix/rwlock.rs create mode 100644 src/libstd/sys/unix/sync.rs create mode 100644 src/libstd/sys/windows/condvar.rs create mode 100644 src/libstd/sys/windows/mutex.rs create mode 100644 src/libstd/sys/windows/rwlock.rs create mode 100644 src/libstd/sys/windows/sync.rs diff --git a/src/libstd/sync/mpsc_queue.rs b/src/libstd/comm/mpsc_queue.rs similarity index 95% rename from src/libstd/sync/mpsc_queue.rs rename to src/libstd/comm/mpsc_queue.rs index 09212e4dfb6..d4249abc3dd 100644 --- a/src/libstd/sync/mpsc_queue.rs +++ b/src/libstd/comm/mpsc_queue.rs @@ -132,15 +132,6 @@ impl Queue { if self.head.load(Acquire) == tail {Empty} else {Inconsistent} } } - - /// Attempts to pop data from this queue, but doesn't attempt too hard. This - /// will canonicalize inconsistent states to a `None` value. - pub fn casual_pop(&self) -> Option { - match self.pop() { - Data(t) => Some(t), - Empty | Inconsistent => None, - } - } } #[unsafe_destructor] diff --git a/src/libstd/sync/spsc_queue.rs b/src/libstd/comm/spsc_queue.rs similarity index 74% rename from src/libstd/sync/spsc_queue.rs rename to src/libstd/comm/spsc_queue.rs index f0eabe61737..a6b4ab71bac 100644 --- a/src/libstd/sync/spsc_queue.rs +++ b/src/libstd/comm/spsc_queue.rs @@ -40,7 +40,6 @@ use core::prelude::*; use alloc::boxed::Box; use core::mem; use core::cell::UnsafeCell; -use alloc::arc::Arc; use sync::atomic::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release}; @@ -74,39 +73,6 @@ pub struct Queue { cache_subtractions: AtomicUint, } -/// A safe abstraction for the consumer in a single-producer single-consumer -/// queue. -pub struct Consumer { - inner: Arc> -} - -impl Consumer { - /// Attempts to pop the value from the head of the queue, returning `None` - /// if the queue is empty. - pub fn pop(&mut self) -> Option { - self.inner.pop() - } - - /// Attempts to peek at the head of the queue, returning `None` if the queue - /// is empty. - pub fn peek<'a>(&'a mut self) -> Option<&'a mut T> { - self.inner.peek() - } -} - -/// A safe abstraction for the producer in a single-producer single-consumer -/// queue. -pub struct Producer { - inner: Arc> -} - -impl Producer { - /// Pushes a new value onto the queue. - pub fn push(&mut self, t: T) { - self.inner.push(t) - } -} - impl Node { fn new() -> *mut Node { unsafe { @@ -118,30 +84,6 @@ impl Node { } } -/// Creates a new queue with a consumer-producer pair. -/// -/// The producer returned is connected to the consumer to push all data to -/// the consumer. -/// -/// # Arguments -/// -/// * `bound` - This queue implementation is implemented with a linked -/// list, and this means that a push is always a malloc. In -/// order to amortize this cost, an internal cache of nodes is -/// maintained to prevent a malloc from always being -/// necessary. This bound is the limit on the size of the -/// cache (if desired). If the value is 0, then the cache has -/// no bound. Otherwise, the cache will never grow larger than -/// `bound` (although the queue itself could be much larger. -pub fn queue(bound: uint) -> (Consumer, Producer) { - let q = unsafe { Queue::new(bound) }; - let arc = Arc::new(q); - let consumer = Consumer { inner: arc.clone() }; - let producer = Producer { inner: arc }; - - (consumer, producer) -} - impl Queue { /// Creates a new queue. /// @@ -296,78 +238,88 @@ impl Drop for Queue { mod test { use prelude::*; - use super::{queue}; + use sync::Arc; + use super::Queue; #[test] fn smoke() { - let (mut consumer, mut producer) = queue(0); - producer.push(1i); - producer.push(2); - assert_eq!(consumer.pop(), Some(1i)); - assert_eq!(consumer.pop(), Some(2)); - assert_eq!(consumer.pop(), None); - producer.push(3); - producer.push(4); - assert_eq!(consumer.pop(), Some(3)); - assert_eq!(consumer.pop(), Some(4)); - assert_eq!(consumer.pop(), None); + unsafe { + let queue = Queue::new(0); + queue.push(1i); + queue.push(2); + assert_eq!(queue.pop(), Some(1i)); + assert_eq!(queue.pop(), Some(2)); + assert_eq!(queue.pop(), None); + queue.push(3); + queue.push(4); + assert_eq!(queue.pop(), Some(3)); + assert_eq!(queue.pop(), Some(4)); + assert_eq!(queue.pop(), None); + } } #[test] fn peek() { - let (mut consumer, mut producer) = queue(0); - producer.push(vec![1i]); + unsafe { + let queue = Queue::new(0); + queue.push(vec![1i]); - // Ensure the borrowchecker works - match consumer.peek() { - Some(vec) => match vec.as_slice() { - // Note that `pop` is not allowed here due to borrow - [1] => {} - _ => return - }, - None => unreachable!() + // Ensure the borrowchecker works + match queue.peek() { + Some(vec) => match vec.as_slice() { + // Note that `pop` is not allowed here due to borrow + [1] => {} + _ => return + }, + None => unreachable!() + } + + queue.pop(); } - - consumer.pop(); } #[test] fn drop_full() { - let (_, mut producer) = queue(0); - producer.push(box 1i); - producer.push(box 2i); + unsafe { + let q = Queue::new(0); + q.push(box 1i); + q.push(box 2i); + } } #[test] fn smoke_bound() { - let (mut consumer, mut producer) = queue(1); - producer.push(1i); - producer.push(2); - assert_eq!(consumer.pop(), Some(1)); - assert_eq!(consumer.pop(), Some(2)); - assert_eq!(consumer.pop(), None); - producer.push(3); - producer.push(4); - assert_eq!(consumer.pop(), Some(3)); - assert_eq!(consumer.pop(), Some(4)); - assert_eq!(consumer.pop(), None); + unsafe { + let q = Queue::new(0); + q.push(1i); + q.push(2); + assert_eq!(q.pop(), Some(1)); + assert_eq!(q.pop(), Some(2)); + assert_eq!(q.pop(), None); + q.push(3); + q.push(4); + assert_eq!(q.pop(), Some(3)); + assert_eq!(q.pop(), Some(4)); + assert_eq!(q.pop(), None); + } } #[test] fn stress() { - stress_bound(0); - stress_bound(1); + unsafe { + stress_bound(0); + stress_bound(1); + } - fn stress_bound(bound: uint) { - let (consumer, mut producer) = queue(bound); + unsafe fn stress_bound(bound: uint) { + let q = Arc::new(Queue::new(bound)); let (tx, rx) = channel(); + let q2 = q.clone(); spawn(proc() { - // Move the consumer to a local mutable slot - let mut consumer = consumer; for _ in range(0u, 100000) { loop { - match consumer.pop() { + match q2.pop() { Some(1i) => break, Some(_) => panic!(), None => {} @@ -377,7 +329,7 @@ mod test { tx.send(()); }); for _ in range(0i, 100000) { - producer.push(1); + q.push(1); } rx.recv(); } diff --git a/src/libstd/sync/barrier.rs b/src/libstd/sync/barrier.rs new file mode 100644 index 00000000000..5e6dc6ec650 --- /dev/null +++ b/src/libstd/sync/barrier.rs @@ -0,0 +1,116 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use sync::{Mutex, Condvar}; + +/// A barrier enables multiple tasks to synchronize the beginning +/// of some computation. +/// +/// ```rust +/// use std::sync::{Arc, Barrier}; +/// +/// let barrier = Arc::new(Barrier::new(10)); +/// for _ in range(0u, 10) { +/// let c = barrier.clone(); +/// // The same messages will be printed together. +/// // You will NOT see any interleaving. +/// spawn(proc() { +/// println!("before wait"); +/// c.wait(); +/// println!("after wait"); +/// }); +/// } +/// ``` +pub struct Barrier { + lock: Mutex, + cvar: Condvar, + num_threads: uint, +} + +// The inner state of a double barrier +struct BarrierState { + count: uint, + generation_id: uint, +} + +impl Barrier { + /// Create a new barrier that can block a given number of threads. + /// + /// A barrier will block `n`-1 threads which call `wait` and then wake up + /// all threads at once when the `n`th thread calls `wait`. + pub fn new(n: uint) -> Barrier { + Barrier { + lock: Mutex::new(BarrierState { + count: 0, + generation_id: 0, + }), + cvar: Condvar::new(), + num_threads: n, + } + } + + /// Block the current thread until all threads has rendezvoused here. + /// + /// Barriers are re-usable after all threads have rendezvoused once, and can + /// be used continuously. + pub fn wait(&self) { + let mut lock = self.lock.lock(); + let local_gen = lock.generation_id; + lock.count += 1; + if lock.count < self.num_threads { + // We need a while loop to guard against spurious wakeups. + // http://en.wikipedia.org/wiki/Spurious_wakeup + while local_gen == lock.generation_id && + lock.count < self.num_threads { + self.cvar.wait(&lock); + } + } else { + lock.count = 0; + lock.generation_id += 1; + self.cvar.notify_all(); + } + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + + use sync::{Arc, Barrier}; + use comm::Empty; + + #[test] + fn test_barrier() { + let barrier = Arc::new(Barrier::new(10)); + let (tx, rx) = channel(); + + for _ in range(0u, 9) { + let c = barrier.clone(); + let tx = tx.clone(); + spawn(proc() { + c.wait(); + tx.send(true); + }); + } + + // At this point, all spawned tasks should be blocked, + // so we shouldn't get anything from the port + assert!(match rx.try_recv() { + Err(Empty) => true, + _ => false, + }); + + barrier.wait(); + // Now, the barrier is cleared and we should get data. + for _ in range(0u, 9) { + rx.recv(); + } + } +} diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs new file mode 100644 index 00000000000..581b6b4e412 --- /dev/null +++ b/src/libstd/sync/condvar.rs @@ -0,0 +1,358 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use prelude::*; + +use sync::atomic::{mod, AtomicUint}; +use sync::{mutex, StaticMutexGuard}; +use sys_common::condvar as sys; +use sys_common::mutex as sys_mutex; +use time::Duration; + +/// A Condition Variable +/// +/// Condition variables represent the ability to block a thread such that it +/// consumes no CPU time while waiting for an event to occur. Condition +/// variables are typically associated with a boolean predicate (a condition) +/// and a mutex. The predicate is always verified inside of the mutex before +/// determining that thread must block. +/// +/// Functions in this module will block the current **thread** of execution and +/// are bindings to system-provided condition variables where possible. Note +/// that this module places one additional restriction over the system condition +/// variables: each condvar can be used with precisely one mutex at runtime. Any +/// attempt to use multiple mutexes on the same condition variable will result +/// in a runtime panic. If this is not desired, then the unsafe primitives in +/// `sys` do not have this restriction but may result in undefined behavior. +/// +/// # Example +/// +/// ``` +/// use std::sync::{Arc, Mutex, Condvar}; +/// +/// let pair = Arc::new((Mutex::new(false), Condvar::new())); +/// let pair2 = pair.clone(); +/// +/// // Inside of our lock, spawn a new thread, and then wait for it to start +/// spawn(proc() { +/// let &(ref lock, ref cvar) = &*pair2; +/// let mut started = lock.lock(); +/// *started = true; +/// cvar.notify_one(); +/// }); +/// +/// // wait for the thread to start up +/// let &(ref lock, ref cvar) = &*pair; +/// let started = lock.lock(); +/// while !*started { +/// cvar.wait(&started); +/// } +/// ``` +pub struct Condvar { inner: Box } + +/// Statically allocated condition variables. +/// +/// This structure is identical to `Condvar` except that it is suitable for use +/// in static initializers for other structures. +/// +/// # Example +/// +/// ``` +/// use std::sync::{StaticCondvar, CONDVAR_INIT}; +/// +/// static CVAR: StaticCondvar = CONDVAR_INIT; +/// ``` +pub struct StaticCondvar { + inner: sys::Condvar, + mutex: AtomicUint, +} + +/// Constant initializer for a statically allocated condition variable. +pub const CONDVAR_INIT: StaticCondvar = StaticCondvar { + inner: sys::CONDVAR_INIT, + mutex: atomic::INIT_ATOMIC_UINT, +}; + +/// A trait for vaules which can be passed to the waiting methods of condition +/// variables. This is implemented by the mutex guards in this module. +/// +/// Note that this trait should likely not be implemented manually unless you +/// really know what you're doing. +pub trait AsMutexGuard { + #[allow(missing_docs)] + unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard; +} + +impl Condvar { + /// Creates a new condition variable which is ready to be waited on and + /// notified. + pub fn new() -> Condvar { + Condvar { + inner: box StaticCondvar { + inner: unsafe { sys::Condvar::new() }, + mutex: AtomicUint::new(0), + } + } + } + + /// Block the current thread until this condition variable receives a + /// notification. + /// + /// This function will atomically unlock the mutex specified (represented by + /// `guard`) and block the current thread. This means that any calls to + /// `notify_*()` which happen logically after the mutex is unlocked are + /// candidates to wake this thread up. When this function call returns, the + /// lock specified will have been re-acquired. + /// + /// Note that this function is susceptible to spurious wakeups. Condition + /// variables normally have a boolean predicate associated with them, and + /// the predicate must always be checked each time this function returns to + /// protect against spurious wakeups. + /// + /// # Panics + /// + /// This function will `panic!()` if it is used with more than one mutex + /// over time. Each condition variable is dynamically bound to exactly one + /// mutex to ensure defined behavior across platforms. If this functionality + /// is not desired, then unsafe primitives in `sys` are provided. + pub fn wait(&self, mutex_guard: &T) { + unsafe { + let me: &'static Condvar = &*(self as *const _); + me.inner.wait(mutex_guard) + } + } + + /// Wait on this condition variable for a notification, timing out after a + /// specified duration. + /// + /// The semantics of this function are equivalent to `wait()` except that + /// the thread will be blocked for roughly no longer than `dur`. This method + /// should not be used for precise timing due to anomalies such as + /// preemption or platform differences that may not cause the maximum amount + /// of time waited to be precisely `dur`. + /// + /// If the wait timed out, then `false` will be returned. Otherwise if a + /// notification was received then `true` will be returned. + /// + /// Like `wait`, the lock specified will be re-acquired when this function + /// returns, regardless of whether the timeout elapsed or not. + pub fn wait_timeout(&self, mutex_guard: &T, + dur: Duration) -> bool { + unsafe { + let me: &'static Condvar = &*(self as *const _); + me.inner.wait_timeout(mutex_guard, dur) + } + } + + /// Wake up one blocked thread on this condvar. + /// + /// If there is a blocked thread on this condition variable, then it will + /// be woken up from its call to `wait` or `wait_timeout`. Calls to + /// `notify_one` are not buffered in any way. + /// + /// To wake up all threads, see `notify_one()`. + pub fn notify_one(&self) { unsafe { self.inner.inner.notify_one() } } + + /// Wake up all blocked threads on this condvar. + /// + /// This method will ensure that any current waiters on the condition + /// variable are awoken. Calls to `notify_all()` are not buffered in any + /// way. + /// + /// To wake up only one thread, see `notify_one()`. + pub fn notify_all(&self) { unsafe { self.inner.inner.notify_all() } } +} + +impl Drop for Condvar { + fn drop(&mut self) { + unsafe { self.inner.inner.destroy() } + } +} + +impl StaticCondvar { + /// Block the current thread until this condition variable receives a + /// notification. + /// + /// See `Condvar::wait`. + pub fn wait(&'static self, mutex_guard: &T) { + unsafe { + let lock = mutex_guard.as_mutex_guard(); + let sys = mutex::guard_lock(lock); + self.verify(sys); + self.inner.wait(sys); + (*mutex::guard_poison(lock)).check("mutex"); + } + } + + /// Wait on this condition variable for a notification, timing out after a + /// specified duration. + /// + /// See `Condvar::wait_timeout`. + pub fn wait_timeout(&'static self, mutex_guard: &T, + dur: Duration) -> bool { + unsafe { + let lock = mutex_guard.as_mutex_guard(); + let sys = mutex::guard_lock(lock); + self.verify(sys); + let ret = self.inner.wait_timeout(sys, dur); + (*mutex::guard_poison(lock)).check("mutex"); + return ret; + } + } + + /// Wake up one blocked thread on this condvar. + /// + /// See `Condvar::notify_one`. + pub fn notify_one(&'static self) { unsafe { self.inner.notify_one() } } + + /// Wake up all blocked threads on this condvar. + /// + /// See `Condvar::notify_all`. + pub fn notify_all(&'static self) { unsafe { self.inner.notify_all() } } + + /// Deallocate all resources associated with this static condvar. + /// + /// This method is unsafe to call as there is no guarantee that there are no + /// active users of the condvar, and this also doesn't prevent any future + /// users of the condvar. This method is required to be called to not leak + /// memory on all platforms. + pub unsafe fn destroy(&'static self) { + self.inner.destroy() + } + + fn verify(&self, mutex: &sys_mutex::Mutex) { + let addr = mutex as *const _ as uint; + match self.mutex.compare_and_swap(0, addr, atomic::SeqCst) { + // If we got out 0, then we have successfully bound the mutex to + // this cvar. + 0 => {} + + // If we get out a value that's the same as `addr`, then someone + // already beat us to the punch. + n if n == addr => {} + + // Anything else and we're using more than one mutex on this cvar, + // which is currently disallowed. + _ => panic!("attempted to use a condition variable with two \ + mutexes"), + } + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + + use time::Duration; + use super::{StaticCondvar, CONDVAR_INIT}; + use sync::{StaticMutex, MUTEX_INIT, Condvar, Mutex, Arc}; + + #[test] + fn smoke() { + let c = Condvar::new(); + c.notify_one(); + c.notify_all(); + } + + #[test] + fn static_smoke() { + static C: StaticCondvar = CONDVAR_INIT; + C.notify_one(); + C.notify_all(); + unsafe { C.destroy(); } + } + + #[test] + fn notify_one() { + static C: StaticCondvar = CONDVAR_INIT; + static M: StaticMutex = MUTEX_INIT; + + let g = M.lock(); + spawn(proc() { + let _g = M.lock(); + C.notify_one(); + }); + C.wait(&g); + drop(g); + unsafe { C.destroy(); M.destroy(); } + } + + #[test] + fn notify_all() { + const N: uint = 10; + + let data = Arc::new((Mutex::new(0), Condvar::new())); + let (tx, rx) = channel(); + for _ in range(0, N) { + let data = data.clone(); + let tx = tx.clone(); + spawn(proc() { + let &(ref lock, ref cond) = &*data; + let mut cnt = lock.lock(); + *cnt += 1; + if *cnt == N { + tx.send(()); + } + while *cnt != 0 { + cond.wait(&cnt); + } + tx.send(()); + }); + } + drop(tx); + + let &(ref lock, ref cond) = &*data; + rx.recv(); + let mut cnt = lock.lock(); + *cnt = 0; + cond.notify_all(); + drop(cnt); + + for _ in range(0, N) { + rx.recv(); + } + } + + #[test] + fn wait_timeout() { + static C: StaticCondvar = CONDVAR_INIT; + static M: StaticMutex = MUTEX_INIT; + + let g = M.lock(); + assert!(!C.wait_timeout(&g, Duration::nanoseconds(1000))); + spawn(proc() { + let _g = M.lock(); + C.notify_one(); + }); + assert!(C.wait_timeout(&g, Duration::days(1))); + drop(g); + unsafe { C.destroy(); M.destroy(); } + } + + #[test] + #[should_fail] + fn two_mutexes() { + static M1: StaticMutex = MUTEX_INIT; + static M2: StaticMutex = MUTEX_INIT; + static C: StaticCondvar = CONDVAR_INIT; + + let g = M1.lock(); + spawn(proc() { + let _g = M1.lock(); + C.notify_one(); + }); + C.wait(&g); + drop(g); + + C.wait(&M2.lock()); + + } +} + diff --git a/src/libstd/sync/deque.rs b/src/libstd/sync/deque.rs deleted file mode 100644 index 33f6f77eb62..00000000000 --- a/src/libstd/sync/deque.rs +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A (mostly) lock-free concurrent work-stealing deque -//! -//! This module contains an implementation of the Chase-Lev work stealing deque -//! described in "Dynamic Circular Work-Stealing Deque". The implementation is -//! heavily based on the pseudocode found in the paper. -//! -//! This implementation does not want to have the restriction of a garbage -//! collector for reclamation of buffers, and instead it uses a shared pool of -//! buffers. This shared pool is required for correctness in this -//! implementation. -//! -//! The only lock-synchronized portions of this deque are the buffer allocation -//! and deallocation portions. Otherwise all operations are lock-free. -//! -//! # Example -//! -//! use std::sync::deque::BufferPool; -//! -//! let mut pool = BufferPool::new(); -//! let (mut worker, mut stealer) = pool.deque(); -//! -//! // Only the worker may push/pop -//! worker.push(1i); -//! worker.pop(); -//! -//! // Stealers take data from the other end of the deque -//! worker.push(1i); -//! stealer.steal(); -//! -//! // Stealers can be cloned to have many stealers stealing in parallel -//! worker.push(1i); -//! let mut stealer2 = stealer.clone(); -//! stealer2.steal(); - -#![experimental] - -// NB: the "buffer pool" strategy is not done for speed, but rather for -// correctness. For more info, see the comment on `swap_buffer` - -// FIXME: all atomic operations in this module use a SeqCst ordering. That is -// probably overkill - -pub use self::Stolen::*; - -use core::prelude::*; - -use alloc::arc::Arc; -use alloc::heap::{allocate, deallocate}; -use alloc::boxed::Box; -use vec::Vec; -use core::kinds::marker; -use core::mem::{forget, min_align_of, size_of, transmute}; -use core::ptr; -use rustrt::exclusive::Exclusive; - -use sync::atomic::{AtomicInt, AtomicPtr, SeqCst}; - -// Once the queue is less than 1/K full, then it will be downsized. Note that -// the deque requires that this number be less than 2. -static K: int = 4; - -// Minimum number of bits that a buffer size should be. No buffer will resize to -// under this value, and all deques will initially contain a buffer of this -// size. -// -// The size in question is 1 << MIN_BITS -static MIN_BITS: uint = 7; - -struct Deque { - bottom: AtomicInt, - top: AtomicInt, - array: AtomicPtr>, - pool: BufferPool, -} - -/// Worker half of the work-stealing deque. This worker has exclusive access to -/// one side of the deque, and uses `push` and `pop` method to manipulate it. -/// -/// There may only be one worker per deque. -pub struct Worker { - deque: Arc>, - _noshare: marker::NoSync, -} - -/// The stealing half of the work-stealing deque. Stealers have access to the -/// opposite end of the deque from the worker, and they only have access to the -/// `steal` method. -pub struct Stealer { - deque: Arc>, - _noshare: marker::NoSync, -} - -/// When stealing some data, this is an enumeration of the possible outcomes. -#[deriving(PartialEq, Show)] -pub enum Stolen { - /// The deque was empty at the time of stealing - Empty, - /// The stealer lost the race for stealing data, and a retry may return more - /// data. - Abort, - /// The stealer has successfully stolen some data. - Data(T), -} - -/// The allocation pool for buffers used by work-stealing deques. Right now this -/// structure is used for reclamation of memory after it is no longer in use by -/// deques. -/// -/// This data structure is protected by a mutex, but it is rarely used. Deques -/// will only use this structure when allocating a new buffer or deallocating a -/// previous one. -pub struct BufferPool { - pool: Arc>>>>, -} - -/// An internal buffer used by the chase-lev deque. This structure is actually -/// implemented as a circular buffer, and is used as the intermediate storage of -/// the data in the deque. -/// -/// This type is implemented with *T instead of Vec for two reasons: -/// -/// 1. There is nothing safe about using this buffer. This easily allows the -/// same value to be read twice in to rust, and there is nothing to -/// prevent this. The usage by the deque must ensure that one of the -/// values is forgotten. Furthermore, we only ever want to manually run -/// destructors for values in this buffer (on drop) because the bounds -/// are defined by the deque it's owned by. -/// -/// 2. We can certainly avoid bounds checks using *T instead of Vec, although -/// LLVM is probably pretty good at doing this already. -struct Buffer { - storage: *const T, - log_size: uint, -} - -impl BufferPool { - /// Allocates a new buffer pool which in turn can be used to allocate new - /// deques. - pub fn new() -> BufferPool { - BufferPool { pool: Arc::new(Exclusive::new(Vec::new())) } - } - - /// Allocates a new work-stealing deque which will send/receiving memory to - /// and from this buffer pool. - pub fn deque(&self) -> (Worker, Stealer) { - let a = Arc::new(Deque::new(self.clone())); - let b = a.clone(); - (Worker { deque: a, _noshare: marker::NoSync }, - Stealer { deque: b, _noshare: marker::NoSync }) - } - - fn alloc(&mut self, bits: uint) -> Box> { - unsafe { - let mut pool = self.pool.lock(); - match pool.iter().position(|x| x.size() >= (1 << bits)) { - Some(i) => pool.remove(i).unwrap(), - None => box Buffer::new(bits) - } - } - } - - fn free(&self, buf: Box>) { - unsafe { - let mut pool = self.pool.lock(); - match pool.iter().position(|v| v.size() > buf.size()) { - Some(i) => pool.insert(i, buf), - None => pool.push(buf), - } - } - } -} - -impl Clone for BufferPool { - fn clone(&self) -> BufferPool { BufferPool { pool: self.pool.clone() } } -} - -impl Worker { - /// Pushes data onto the front of this work queue. - pub fn push(&self, t: T) { - unsafe { self.deque.push(t) } - } - /// Pops data off the front of the work queue, returning `None` on an empty - /// queue. - pub fn pop(&self) -> Option { - unsafe { self.deque.pop() } - } - - /// Gets access to the buffer pool that this worker is attached to. This can - /// be used to create more deques which share the same buffer pool as this - /// deque. - pub fn pool<'a>(&'a self) -> &'a BufferPool { - &self.deque.pool - } -} - -impl Stealer { - /// Steals work off the end of the queue (opposite of the worker's end) - pub fn steal(&self) -> Stolen { - unsafe { self.deque.steal() } - } - - /// Gets access to the buffer pool that this stealer is attached to. This - /// can be used to create more deques which share the same buffer pool as - /// this deque. - pub fn pool<'a>(&'a self) -> &'a BufferPool { - &self.deque.pool - } -} - -impl Clone for Stealer { - fn clone(&self) -> Stealer { - Stealer { deque: self.deque.clone(), _noshare: marker::NoSync } - } -} - -// Almost all of this code can be found directly in the paper so I'm not -// personally going to heavily comment what's going on here. - -impl Deque { - fn new(mut pool: BufferPool) -> Deque { - let buf = pool.alloc(MIN_BITS); - Deque { - bottom: AtomicInt::new(0), - top: AtomicInt::new(0), - array: AtomicPtr::new(unsafe { transmute(buf) }), - pool: pool, - } - } - - unsafe fn push(&self, data: T) { - let mut b = self.bottom.load(SeqCst); - let t = self.top.load(SeqCst); - let mut a = self.array.load(SeqCst); - let size = b - t; - if size >= (*a).size() - 1 { - // You won't find this code in the chase-lev deque paper. This is - // alluded to in a small footnote, however. We always free a buffer - // when growing in order to prevent leaks. - a = self.swap_buffer(b, a, (*a).resize(b, t, 1)); - b = self.bottom.load(SeqCst); - } - (*a).put(b, data); - self.bottom.store(b + 1, SeqCst); - } - - unsafe fn pop(&self) -> Option { - let b = self.bottom.load(SeqCst); - let a = self.array.load(SeqCst); - let b = b - 1; - self.bottom.store(b, SeqCst); - let t = self.top.load(SeqCst); - let size = b - t; - if size < 0 { - self.bottom.store(t, SeqCst); - return None; - } - let data = (*a).get(b); - if size > 0 { - self.maybe_shrink(b, t); - return Some(data); - } - if self.top.compare_and_swap(t, t + 1, SeqCst) == t { - self.bottom.store(t + 1, SeqCst); - return Some(data); - } else { - self.bottom.store(t + 1, SeqCst); - forget(data); // someone else stole this value - return None; - } - } - - unsafe fn steal(&self) -> Stolen { - let t = self.top.load(SeqCst); - let old = self.array.load(SeqCst); - let b = self.bottom.load(SeqCst); - let a = self.array.load(SeqCst); - let size = b - t; - if size <= 0 { return Empty } - if size % (*a).size() == 0 { - if a == old && t == self.top.load(SeqCst) { - return Empty - } - return Abort - } - let data = (*a).get(t); - if self.top.compare_and_swap(t, t + 1, SeqCst) == t { - Data(data) - } else { - forget(data); // someone else stole this value - Abort - } - } - - unsafe fn maybe_shrink(&self, b: int, t: int) { - let a = self.array.load(SeqCst); - if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) { - self.swap_buffer(b, a, (*a).resize(b, t, -1)); - } - } - - // Helper routine not mentioned in the paper which is used in growing and - // shrinking buffers to swap in a new buffer into place. As a bit of a - // recap, the whole point that we need a buffer pool rather than just - // calling malloc/free directly is that stealers can continue using buffers - // after this method has called 'free' on it. The continued usage is simply - // a read followed by a forget, but we must make sure that the memory can - // continue to be read after we flag this buffer for reclamation. - unsafe fn swap_buffer(&self, b: int, old: *mut Buffer, - buf: Buffer) -> *mut Buffer { - let newbuf: *mut Buffer = transmute(box buf); - self.array.store(newbuf, SeqCst); - let ss = (*newbuf).size(); - self.bottom.store(b + ss, SeqCst); - let t = self.top.load(SeqCst); - if self.top.compare_and_swap(t, t + ss, SeqCst) != t { - self.bottom.store(b, SeqCst); - } - self.pool.free(transmute(old)); - return newbuf; - } -} - - -#[unsafe_destructor] -impl Drop for Deque { - fn drop(&mut self) { - let t = self.top.load(SeqCst); - let b = self.bottom.load(SeqCst); - let a = self.array.load(SeqCst); - // Free whatever is leftover in the dequeue, and then move the buffer - // back into the pool. - for i in range(t, b) { - let _: T = unsafe { (*a).get(i) }; - } - self.pool.free(unsafe { transmute(a) }); - } -} - -#[inline] -fn buffer_alloc_size(log_size: uint) -> uint { - (1 << log_size) * size_of::() -} - -impl Buffer { - unsafe fn new(log_size: uint) -> Buffer { - let size = buffer_alloc_size::(log_size); - let buffer = allocate(size, min_align_of::()); - if buffer.is_null() { ::alloc::oom() } - Buffer { - storage: buffer as *const T, - log_size: log_size, - } - } - - fn size(&self) -> int { 1 << self.log_size } - - // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly - fn mask(&self) -> int { (1 << self.log_size) - 1 } - - unsafe fn elem(&self, i: int) -> *const T { - self.storage.offset(i & self.mask()) - } - - // This does not protect against loading duplicate values of the same cell, - // nor does this clear out the contents contained within. Hence, this is a - // very unsafe method which the caller needs to treat specially in case a - // race is lost. - unsafe fn get(&self, i: int) -> T { - ptr::read(self.elem(i)) - } - - // Unsafe because this unsafely overwrites possibly uninitialized or - // initialized data. - unsafe fn put(&self, i: int, t: T) { - ptr::write(self.elem(i) as *mut T, t); - } - - // Again, unsafe because this has incredibly dubious ownership violations. - // It is assumed that this buffer is immediately dropped. - unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer { - // NB: not entirely obvious, but thanks to 2's complement, - // casting delta to uint and then adding gives the desired - // effect. - let buf = Buffer::new(self.log_size + delta as uint); - for i in range(t, b) { - buf.put(i, self.get(i)); - } - return buf; - } -} - -#[unsafe_destructor] -impl Drop for Buffer { - fn drop(&mut self) { - // It is assumed that all buffers are empty on drop. - let size = buffer_alloc_size::(self.log_size); - unsafe { deallocate(self.storage as *mut u8, size, min_align_of::()) } - } -} - -#[cfg(test)] -mod tests { - use prelude::*; - use super::{Data, BufferPool, Abort, Empty, Worker, Stealer}; - - use mem; - use rustrt::thread::Thread; - use rand; - use rand::Rng; - use sync::atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, - AtomicUint, INIT_ATOMIC_UINT}; - use vec; - - #[test] - fn smoke() { - let pool = BufferPool::new(); - let (w, s) = pool.deque(); - assert_eq!(w.pop(), None); - assert_eq!(s.steal(), Empty); - w.push(1i); - assert_eq!(w.pop(), Some(1)); - w.push(1); - assert_eq!(s.steal(), Data(1)); - w.push(1); - assert_eq!(s.clone().steal(), Data(1)); - } - - #[test] - fn stealpush() { - static AMT: int = 100000; - let pool = BufferPool::::new(); - let (w, s) = pool.deque(); - let t = Thread::start(proc() { - let mut left = AMT; - while left > 0 { - match s.steal() { - Data(i) => { - assert_eq!(i, 1); - left -= 1; - } - Abort | Empty => {} - } - } - }); - - for _ in range(0, AMT) { - w.push(1); - } - - t.join(); - } - - #[test] - fn stealpush_large() { - static AMT: int = 100000; - let pool = BufferPool::<(int, int)>::new(); - let (w, s) = pool.deque(); - let t = Thread::start(proc() { - let mut left = AMT; - while left > 0 { - match s.steal() { - Data((1, 10)) => { left -= 1; } - Data(..) => panic!(), - Abort | Empty => {} - } - } - }); - - for _ in range(0, AMT) { - w.push((1, 10)); - } - - t.join(); - } - - fn stampede(w: Worker>, s: Stealer>, - nthreads: int, amt: uint) { - for _ in range(0, amt) { - w.push(box 20); - } - let mut remaining = AtomicUint::new(amt); - let unsafe_remaining: *mut AtomicUint = &mut remaining; - - let threads = range(0, nthreads).map(|_| { - let s = s.clone(); - Thread::start(proc() { - unsafe { - while (*unsafe_remaining).load(SeqCst) > 0 { - match s.steal() { - Data(box 20) => { - (*unsafe_remaining).fetch_sub(1, SeqCst); - } - Data(..) => panic!(), - Abort | Empty => {} - } - } - } - }) - }).collect::>>(); - - while remaining.load(SeqCst) > 0 { - match w.pop() { - Some(box 20) => { remaining.fetch_sub(1, SeqCst); } - Some(..) => panic!(), - None => {} - } - } - - for thread in threads.into_iter() { - thread.join(); - } - } - - #[test] - fn run_stampede() { - let pool = BufferPool::>::new(); - let (w, s) = pool.deque(); - stampede(w, s, 8, 10000); - } - - #[test] - fn many_stampede() { - static AMT: uint = 4; - let pool = BufferPool::>::new(); - let threads = range(0, AMT).map(|_| { - let (w, s) = pool.deque(); - Thread::start(proc() { - stampede(w, s, 4, 10000); - }) - }).collect::>>(); - - for thread in threads.into_iter() { - thread.join(); - } - } - - #[test] - fn stress() { - static AMT: int = 100000; - static NTHREADS: int = 8; - static DONE: AtomicBool = INIT_ATOMIC_BOOL; - static HITS: AtomicUint = INIT_ATOMIC_UINT; - let pool = BufferPool::::new(); - let (w, s) = pool.deque(); - - let threads = range(0, NTHREADS).map(|_| { - let s = s.clone(); - Thread::start(proc() { - loop { - match s.steal() { - Data(2) => { HITS.fetch_add(1, SeqCst); } - Data(..) => panic!(), - _ if DONE.load(SeqCst) => break, - _ => {} - } - } - }) - }).collect::>>(); - - let mut rng = rand::task_rng(); - let mut expected = 0; - while expected < AMT { - if rng.gen_range(0i, 3) == 2 { - match w.pop() { - None => {} - Some(2) => { HITS.fetch_add(1, SeqCst); }, - Some(_) => panic!(), - } - } else { - expected += 1; - w.push(2); - } - } - - while HITS.load(SeqCst) < AMT as uint { - match w.pop() { - None => {} - Some(2) => { HITS.fetch_add(1, SeqCst); }, - Some(_) => panic!(), - } - } - DONE.store(true, SeqCst); - - for thread in threads.into_iter() { - thread.join(); - } - - assert_eq!(HITS.load(SeqCst), expected as uint); - } - - #[test] - #[cfg_attr(windows, ignore)] // apparently windows scheduling is weird? - fn no_starvation() { - static AMT: int = 10000; - static NTHREADS: int = 4; - static DONE: AtomicBool = INIT_ATOMIC_BOOL; - let pool = BufferPool::<(int, uint)>::new(); - let (w, s) = pool.deque(); - - let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| { - let s = s.clone(); - let unique_box = box AtomicUint::new(0); - let thread_box = unsafe { - *mem::transmute::<&Box, - *const *mut AtomicUint>(&unique_box) - }; - (Thread::start(proc() { - unsafe { - loop { - match s.steal() { - Data((1, 2)) => { - (*thread_box).fetch_add(1, SeqCst); - } - Data(..) => panic!(), - _ if DONE.load(SeqCst) => break, - _ => {} - } - } - } - }), unique_box) - })); - - let mut rng = rand::task_rng(); - let mut myhit = false; - 'outer: loop { - for _ in range(0, rng.gen_range(0, AMT)) { - if !myhit && rng.gen_range(0i, 3) == 2 { - match w.pop() { - None => {} - Some((1, 2)) => myhit = true, - Some(_) => panic!(), - } - } else { - w.push((1, 2)); - } - } - - for slot in hits.iter() { - let amt = slot.load(SeqCst); - if amt == 0 { continue 'outer; } - } - if myhit { - break - } - } - - DONE.store(true, SeqCst); - - for thread in threads.into_iter() { - thread.join(); - } - } -} diff --git a/src/libstd/sync/future.rs b/src/libstd/sync/future.rs index f2f9351fd0d..79e0d487cad 100644 --- a/src/libstd/sync/future.rs +++ b/src/libstd/sync/future.rs @@ -148,7 +148,7 @@ mod test { use prelude::*; use sync::Future; use task; - use comm::{channel, Sender}; + use comm::channel; #[test] fn test_from_value() { diff --git a/src/libstd/sync/lock.rs b/src/libstd/sync/lock.rs deleted file mode 100644 index 77f5b013519..00000000000 --- a/src/libstd/sync/lock.rs +++ /dev/null @@ -1,805 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Wrappers for safe, shared, mutable memory between tasks -//! -//! The wrappers in this module build on the primitives from `sync::raw` to -//! provide safe interfaces around using the primitive locks. These primitives -//! implement a technique called "poisoning" where when a task panicked with a -//! held lock, all future attempts to use the lock will panic. -//! -//! For example, if two tasks are contending on a mutex and one of them panics -//! after grabbing the lock, the second task will immediately panic because the -//! lock is now poisoned. - -use core::prelude::*; - -use self::Inner::*; - -use core::cell::UnsafeCell; -use rustrt::local::Local; -use rustrt::task::Task; - -use super::raw; - -// Poisoning helpers - -struct PoisonOnFail<'a> { - flag: &'a mut bool, - failed: bool, -} - -fn failing() -> bool { - Local::borrow(None::).unwinder.unwinding() -} - -impl<'a> PoisonOnFail<'a> { - fn check(flag: bool, name: &str) { - if flag { - panic!("Poisoned {} - another task failed inside!", name); - } - } - - fn new<'a>(flag: &'a mut bool, name: &str) -> PoisonOnFail<'a> { - PoisonOnFail::check(*flag, name); - PoisonOnFail { - flag: flag, - failed: failing() - } - } -} - -#[unsafe_destructor] -impl<'a> Drop for PoisonOnFail<'a> { - fn drop(&mut self) { - if !self.failed && failing() { - *self.flag = true; - } - } -} - -// Condvar - -enum Inner<'a> { - InnerMutex(raw::MutexGuard<'a>), - InnerRWLock(raw::RWLockWriteGuard<'a>), -} - -impl<'b> Inner<'b> { - fn cond<'a>(&'a self) -> &'a raw::Condvar<'b> { - match *self { - InnerMutex(ref m) => &m.cond, - InnerRWLock(ref m) => &m.cond, - } - } -} - -/// A condition variable, a mechanism for unlock-and-descheduling and -/// signaling, for use with the lock types. -pub struct Condvar<'a> { - name: &'static str, - // n.b. Inner must be after PoisonOnFail because we must set the poison flag - // *inside* the mutex, and struct fields are destroyed top-to-bottom - // (destroy the lock guard last). - poison: PoisonOnFail<'a>, - inner: Inner<'a>, -} - -impl<'a> Condvar<'a> { - /// Atomically exit the associated lock and block until a signal is sent. - /// - /// wait() is equivalent to wait_on(0). - /// - /// # Panics - /// - /// A task which is killed while waiting on a condition variable will wake - /// up, panic, and unlock the associated lock as it unwinds. - #[inline] - pub fn wait(&self) { self.wait_on(0) } - - /// Atomically exit the associated lock and block on a specified condvar - /// until a signal is sent on that same condvar. - /// - /// The associated lock must have been initialised with an appropriate - /// number of condvars. The condvar_id must be between 0 and num_condvars-1 - /// or else this call will fail. - #[inline] - pub fn wait_on(&self, condvar_id: uint) { - assert!(!*self.poison.flag); - self.inner.cond().wait_on(condvar_id); - // This is why we need to wrap sync::condvar. - PoisonOnFail::check(*self.poison.flag, self.name); - } - - /// Wake up a blocked task. Returns false if there was no blocked task. - #[inline] - pub fn signal(&self) -> bool { self.signal_on(0) } - - /// Wake up a blocked task on a specified condvar (as - /// sync::cond.signal_on). Returns false if there was no blocked task. - #[inline] - pub fn signal_on(&self, condvar_id: uint) -> bool { - assert!(!*self.poison.flag); - self.inner.cond().signal_on(condvar_id) - } - - /// Wake up all blocked tasks. Returns the number of tasks woken. - #[inline] - pub fn broadcast(&self) -> uint { self.broadcast_on(0) } - - /// Wake up all blocked tasks on a specified condvar (as - /// sync::cond.broadcast_on). Returns the number of tasks woken. - #[inline] - pub fn broadcast_on(&self, condvar_id: uint) -> uint { - assert!(!*self.poison.flag); - self.inner.cond().broadcast_on(condvar_id) - } -} - -/// A wrapper type which provides synchronized access to the underlying data, of -/// type `T`. A mutex always provides exclusive access, and concurrent requests -/// will block while the mutex is already locked. -/// -/// # Example -/// -/// ``` -/// use std::sync::{Mutex, Arc}; -/// -/// let mutex = Arc::new(Mutex::new(1i)); -/// let mutex2 = mutex.clone(); -/// -/// spawn(proc() { -/// let mut val = mutex2.lock(); -/// *val += 1; -/// val.cond.signal(); -/// }); -/// -/// let value = mutex.lock(); -/// while *value != 2 { -/// value.cond.wait(); -/// } -/// ``` -pub struct Mutex { - lock: raw::Mutex, - failed: UnsafeCell, - data: UnsafeCell, -} - -/// An guard which is created by locking a mutex. Through this guard the -/// underlying data can be accessed. -pub struct MutexGuard<'a, T:'a> { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _data: &'a mut T, - /// Inner condition variable connected to the locked mutex that this guard - /// was created from. This can be used for atomic-unlock-and-deschedule. - pub cond: Condvar<'a>, -} - -impl Mutex { - /// Creates a new mutex to protect the user-supplied data. - pub fn new(user_data: T) -> Mutex { - Mutex::new_with_condvars(user_data, 1) - } - - /// Create a new mutex, with a specified number of associated condvars. - /// - /// This will allow calling wait_on/signal_on/broadcast_on with condvar IDs - /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be - /// allowed but any operations on the condvar will fail.) - pub fn new_with_condvars(user_data: T, num_condvars: uint) -> Mutex { - Mutex { - lock: raw::Mutex::new_with_condvars(num_condvars), - failed: UnsafeCell::new(false), - data: UnsafeCell::new(user_data), - } - } - - /// Access the underlying mutable data with mutual exclusion from other - /// tasks. The returned value is an RAII guard which will unlock the mutex - /// when dropped. All concurrent tasks attempting to lock the mutex will - /// block while the returned value is still alive. - /// - /// # Panics - /// - /// Panicking while inside the Mutex will unlock the Mutex while unwinding, so - /// that other tasks won't block forever. It will also poison the Mutex: - /// any tasks that subsequently try to access it (including those already - /// blocked on the mutex) will also panic immediately. - #[inline] - pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> { - let guard = self.lock.lock(); - - // These two accesses are safe because we're guaranteed at this point - // that we have exclusive access to this mutex. We are indeed able to - // promote ourselves from &Mutex to `&mut T` - let poison = unsafe { &mut *self.failed.get() }; - let data = unsafe { &mut *self.data.get() }; - - MutexGuard { - _data: data, - cond: Condvar { - name: "Mutex", - poison: PoisonOnFail::new(poison, "Mutex"), - inner: InnerMutex(guard), - }, - } - } -} - -impl<'a, T: Send> Deref for MutexGuard<'a, T> { - fn deref<'a>(&'a self) -> &'a T { &*self._data } -} -impl<'a, T: Send> DerefMut for MutexGuard<'a, T> { - fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data } -} - -/// A dual-mode reader-writer lock. The data can be accessed mutably or -/// immutably, and immutably-accessing tasks may run concurrently. -/// -/// # Example -/// -/// ``` -/// use std::sync::{RWLock, Arc}; -/// -/// let lock1 = Arc::new(RWLock::new(1i)); -/// let lock2 = lock1.clone(); -/// -/// spawn(proc() { -/// let mut val = lock2.write(); -/// *val = 3; -/// let val = val.downgrade(); -/// println!("{}", *val); -/// }); -/// -/// let val = lock1.read(); -/// println!("{}", *val); -/// ``` -pub struct RWLock { - lock: raw::RWLock, - failed: UnsafeCell, - data: UnsafeCell, -} - -/// A guard which is created by locking an rwlock in write mode. Through this -/// guard the underlying data can be accessed. -pub struct RWLockWriteGuard<'a, T:'a> { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _data: &'a mut T, - /// Inner condition variable that can be used to sleep on the write mode of - /// this rwlock. - pub cond: Condvar<'a>, -} - -/// A guard which is created by locking an rwlock in read mode. Through this -/// guard the underlying data can be accessed. -pub struct RWLockReadGuard<'a, T:'a> { - // FIXME #12808: strange names to try to avoid interfering with - // field accesses of the contained type via Deref - _data: &'a T, - _guard: raw::RWLockReadGuard<'a>, -} - -impl RWLock { - /// Create a reader/writer lock with the supplied data. - pub fn new(user_data: T) -> RWLock { - RWLock::new_with_condvars(user_data, 1) - } - - /// Create a reader/writer lock with the supplied data and a specified number - /// of condvars (as sync::RWLock::new_with_condvars). - pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWLock { - RWLock { - lock: raw::RWLock::new_with_condvars(num_condvars), - failed: UnsafeCell::new(false), - data: UnsafeCell::new(user_data), - } - } - - /// Access the underlying data mutably. Locks the rwlock in write mode; - /// other readers and writers will block. - /// - /// # Panics - /// - /// Panicking while inside the lock will unlock the lock while unwinding, so - /// that other tasks won't block forever. As Mutex.lock, it will also poison - /// the lock, so subsequent readers and writers will both also panic. - #[inline] - pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a, T> { - let guard = self.lock.write(); - - // These two accesses are safe because we're guaranteed at this point - // that we have exclusive access to this rwlock. We are indeed able to - // promote ourselves from &RWLock to `&mut T` - let poison = unsafe { &mut *self.failed.get() }; - let data = unsafe { &mut *self.data.get() }; - - RWLockWriteGuard { - _data: data, - cond: Condvar { - name: "RWLock", - poison: PoisonOnFail::new(poison, "RWLock"), - inner: InnerRWLock(guard), - }, - } - } - - /// Access the underlying data immutably. May run concurrently with other - /// reading tasks. - /// - /// # Panics - /// - /// Panicking will unlock the lock while unwinding. However, unlike all other - /// access modes, this will not poison the lock. - pub fn read<'a>(&'a self) -> RWLockReadGuard<'a, T> { - let guard = self.lock.read(); - PoisonOnFail::check(unsafe { *self.failed.get() }, "RWLock"); - RWLockReadGuard { - _guard: guard, - _data: unsafe { &*self.data.get() }, - } - } -} - -impl<'a, T: Send + Sync> RWLockWriteGuard<'a, T> { - /// Consumes this write lock token, returning a new read lock token. - /// - /// This will allow pending readers to come into the lock. - pub fn downgrade(self) -> RWLockReadGuard<'a, T> { - let RWLockWriteGuard { _data, cond } = self; - // convert the data to read-only explicitly - let data = &*_data; - let guard = match cond.inner { - InnerMutex(..) => unreachable!(), - InnerRWLock(guard) => guard.downgrade() - }; - RWLockReadGuard { _guard: guard, _data: data } - } -} - -impl<'a, T: Send + Sync> Deref for RWLockReadGuard<'a, T> { - fn deref<'a>(&'a self) -> &'a T { self._data } -} -impl<'a, T: Send + Sync> Deref for RWLockWriteGuard<'a, T> { - fn deref<'a>(&'a self) -> &'a T { &*self._data } -} -impl<'a, T: Send + Sync> DerefMut for RWLockWriteGuard<'a, T> { - fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data } -} - -/// A barrier enables multiple tasks to synchronize the beginning -/// of some computation. -/// -/// ```rust -/// use std::sync::{Arc, Barrier}; -/// -/// let barrier = Arc::new(Barrier::new(10)); -/// for _ in range(0u, 10) { -/// let c = barrier.clone(); -/// // The same messages will be printed together. -/// // You will NOT see any interleaving. -/// spawn(proc() { -/// println!("before wait"); -/// c.wait(); -/// println!("after wait"); -/// }); -/// } -/// ``` -pub struct Barrier { - lock: Mutex, - num_tasks: uint, -} - -// The inner state of a double barrier -struct BarrierState { - count: uint, - generation_id: uint, -} - -impl Barrier { - /// Create a new barrier that can block a given number of tasks. - pub fn new(num_tasks: uint) -> Barrier { - Barrier { - lock: Mutex::new(BarrierState { - count: 0, - generation_id: 0, - }), - num_tasks: num_tasks, - } - } - - /// Block the current task until a certain number of tasks is waiting. - pub fn wait(&self) { - let mut lock = self.lock.lock(); - let local_gen = lock.generation_id; - lock.count += 1; - if lock.count < self.num_tasks { - // We need a while loop to guard against spurious wakeups. - // http://en.wikipedia.org/wiki/Spurious_wakeup - while local_gen == lock.generation_id && - lock.count < self.num_tasks { - lock.cond.wait(); - } - } else { - lock.count = 0; - lock.generation_id += 1; - lock.cond.broadcast(); - } - } -} - -#[cfg(test)] -mod tests { - use prelude::*; - use comm::Empty; - use task; - use task::try_future; - use sync::Arc; - - use super::{Mutex, Barrier, RWLock}; - - #[test] - fn test_mutex_arc_condvar() { - let arc = Arc::new(Mutex::new(false)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - task::spawn(proc() { - // wait until parent gets in - rx.recv(); - let mut lock = arc2.lock(); - *lock = true; - lock.cond.signal(); - }); - - let lock = arc.lock(); - tx.send(()); - assert!(!*lock); - while !*lock { - lock.cond.wait(); - } - } - - #[test] #[should_fail] - fn test_arc_condvar_poison() { - let arc = Arc::new(Mutex::new(1i)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - - spawn(proc() { - rx.recv(); - let lock = arc2.lock(); - lock.cond.signal(); - // Parent should fail when it wakes up. - panic!(); - }); - - let lock = arc.lock(); - tx.send(()); - while *lock == 1 { - lock.cond.wait(); - } - } - - #[test] #[should_fail] - fn test_mutex_arc_poison() { - let arc = Arc::new(Mutex::new(1i)); - let arc2 = arc.clone(); - let _ = task::try(proc() { - let lock = arc2.lock(); - assert_eq!(*lock, 2); - }); - let lock = arc.lock(); - assert_eq!(*lock, 1); - } - - #[test] - fn test_mutex_arc_nested() { - // Tests nested mutexes and access - // to underlying data. - let arc = Arc::new(Mutex::new(1i)); - let arc2 = Arc::new(Mutex::new(arc)); - task::spawn(proc() { - let lock = arc2.lock(); - let lock2 = lock.deref().lock(); - assert_eq!(*lock2, 1); - }); - } - - #[test] - fn test_mutex_arc_access_in_unwind() { - let arc = Arc::new(Mutex::new(1i)); - let arc2 = arc.clone(); - let _ = task::try::<()>(proc() { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - let mut lock = self.i.lock(); - *lock += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }); - let lock = arc.lock(); - assert_eq!(*lock, 2); - } - - #[test] #[should_fail] - fn test_rw_arc_poison_wr() { - let arc = Arc::new(RWLock::new(1i)); - let arc2 = arc.clone(); - let _ = task::try(proc() { - let lock = arc2.write(); - assert_eq!(*lock, 2); - }); - let lock = arc.read(); - assert_eq!(*lock, 1); - } - #[test] #[should_fail] - fn test_rw_arc_poison_ww() { - let arc = Arc::new(RWLock::new(1i)); - let arc2 = arc.clone(); - let _ = task::try(proc() { - let lock = arc2.write(); - assert_eq!(*lock, 2); - }); - let lock = arc.write(); - assert_eq!(*lock, 1); - } - #[test] - fn test_rw_arc_no_poison_rr() { - let arc = Arc::new(RWLock::new(1i)); - let arc2 = arc.clone(); - let _ = task::try(proc() { - let lock = arc2.read(); - assert_eq!(*lock, 2); - }); - let lock = arc.read(); - assert_eq!(*lock, 1); - } - #[test] - fn test_rw_arc_no_poison_rw() { - let arc = Arc::new(RWLock::new(1i)); - let arc2 = arc.clone(); - let _ = task::try(proc() { - let lock = arc2.read(); - assert_eq!(*lock, 2); - }); - let lock = arc.write(); - assert_eq!(*lock, 1); - } - #[test] - fn test_rw_arc_no_poison_dr() { - let arc = Arc::new(RWLock::new(1i)); - let arc2 = arc.clone(); - let _ = task::try(proc() { - let lock = arc2.write().downgrade(); - assert_eq!(*lock, 2); - }); - let lock = arc.write(); - assert_eq!(*lock, 1); - } - - #[test] - fn test_rw_arc() { - let arc = Arc::new(RWLock::new(0i)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - - task::spawn(proc() { - let mut lock = arc2.write(); - for _ in range(0u, 10) { - let tmp = *lock; - *lock = -1; - task::deschedule(); - *lock = tmp + 1; - } - tx.send(()); - }); - - // Readers try to catch the writer in the act - let mut children = Vec::new(); - for _ in range(0u, 5) { - let arc3 = arc.clone(); - children.push(try_future(proc() { - let lock = arc3.read(); - assert!(*lock >= 0); - })); - } - - // Wait for children to pass their asserts - for r in children.iter_mut() { - assert!(r.get_ref().is_ok()); - } - - // Wait for writer to finish - rx.recv(); - let lock = arc.read(); - assert_eq!(*lock, 10); - } - - #[test] - fn test_rw_arc_access_in_unwind() { - let arc = Arc::new(RWLock::new(1i)); - let arc2 = arc.clone(); - let _ = task::try::<()>(proc() { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - let mut lock = self.i.write(); - *lock += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }); - let lock = arc.read(); - assert_eq!(*lock, 2); - } - - #[test] - fn test_rw_downgrade() { - // (1) A downgrader gets in write mode and does cond.wait. - // (2) A writer gets in write mode, sets state to 42, and does signal. - // (3) Downgrader wakes, sets state to 31337. - // (4) tells writer and all other readers to contend as it downgrades. - // (5) Writer attempts to set state back to 42, while downgraded task - // and all reader tasks assert that it's 31337. - let arc = Arc::new(RWLock::new(0i)); - - // Reader tasks - let mut reader_convos = Vec::new(); - for _ in range(0u, 10) { - let ((tx1, rx1), (tx2, rx2)) = (channel(), channel()); - reader_convos.push((tx1, rx2)); - let arcn = arc.clone(); - task::spawn(proc() { - rx1.recv(); // wait for downgrader to give go-ahead - let lock = arcn.read(); - assert_eq!(*lock, 31337); - tx2.send(()); - }); - } - - // Writer task - let arc2 = arc.clone(); - let ((tx1, rx1), (tx2, rx2)) = (channel(), channel()); - task::spawn(proc() { - rx1.recv(); - { - let mut lock = arc2.write(); - assert_eq!(*lock, 0); - *lock = 42; - lock.cond.signal(); - } - rx1.recv(); - { - let mut lock = arc2.write(); - // This shouldn't happen until after the downgrade read - // section, and all other readers, finish. - assert_eq!(*lock, 31337); - *lock = 42; - } - tx2.send(()); - }); - - // Downgrader (us) - let mut lock = arc.write(); - tx1.send(()); // send to another writer who will wake us up - while *lock == 0 { - lock.cond.wait(); - } - assert_eq!(*lock, 42); - *lock = 31337; - // send to other readers - for &(ref mut rc, _) in reader_convos.iter_mut() { - rc.send(()) - } - let lock = lock.downgrade(); - // complete handshake with other readers - for &(_, ref mut rp) in reader_convos.iter_mut() { - rp.recv() - } - tx1.send(()); // tell writer to try again - assert_eq!(*lock, 31337); - drop(lock); - - rx2.recv(); // complete handshake with writer - } - - #[cfg(test)] - fn test_rw_write_cond_downgrade_read_race_helper() { - // Tests that when a downgrader hands off the "reader cloud" lock - // because of a contending reader, a writer can't race to get it - // instead, which would result in readers_and_writers. This tests - // the raw module rather than this one, but it's here because an - // rwarc gives us extra shared state to help check for the race. - let x = Arc::new(RWLock::new(true)); - let (tx, rx) = channel(); - - // writer task - let xw = x.clone(); - task::spawn(proc() { - let mut lock = xw.write(); - tx.send(()); // tell downgrader it's ok to go - lock.cond.wait(); - // The core of the test is here: the condvar reacquire path - // must involve order_lock, so that it cannot race with a reader - // trying to receive the "reader cloud lock hand-off". - *lock = false; - }); - - rx.recv(); // wait for writer to get in - - let lock = x.write(); - assert!(*lock); - // make writer contend in the cond-reacquire path - lock.cond.signal(); - // make a reader task to trigger the "reader cloud lock" handoff - let xr = x.clone(); - let (tx, rx) = channel(); - task::spawn(proc() { - tx.send(()); - drop(xr.read()); - }); - rx.recv(); // wait for reader task to exist - - let lock = lock.downgrade(); - // if writer mistakenly got in, make sure it mutates state - // before we assert on it - for _ in range(0u, 5) { task::deschedule(); } - // make sure writer didn't get in. - assert!(*lock); - } - #[test] - fn test_rw_write_cond_downgrade_read_race() { - // Ideally the above test case would have deschedule statements in it - // that helped to expose the race nearly 100% of the time... but adding - // deschedules in the intuitively-right locations made it even less - // likely, and I wasn't sure why :( . This is a mediocre "next best" - // option. - for _ in range(0u, 8) { - test_rw_write_cond_downgrade_read_race_helper(); - } - } - - #[test] - fn test_barrier() { - let barrier = Arc::new(Barrier::new(10)); - let (tx, rx) = channel(); - - for _ in range(0u, 9) { - let c = barrier.clone(); - let tx = tx.clone(); - spawn(proc() { - c.wait(); - tx.send(true); - }); - } - - // At this point, all spawned tasks should be blocked, - // so we shouldn't get anything from the port - assert!(match rx.try_recv() { - Err(Empty) => true, - _ => false, - }); - - barrier.wait(); - // Now, the barrier is cleared and we should get data. - for _ in range(0u, 9) { - rx.recv(); - } - } -} diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs index 944b852db35..7605a6a96a0 100644 --- a/src/libstd/sync/mod.rs +++ b/src/libstd/sync/mod.rs @@ -17,41 +17,27 @@ #![experimental] -pub use self::one::{Once, ONCE_INIT}; - pub use alloc::arc::{Arc, Weak}; -pub use self::lock::{Mutex, MutexGuard, Condvar, Barrier, - RWLock, RWLockReadGuard, RWLockWriteGuard}; -// The mutex/rwlock in this module are not meant for reexport -pub use self::raw::{Semaphore, SemaphoreGuard}; +pub use self::mutex::{Mutex, MutexGuard, StaticMutex, StaticMutexGuard, MUTEX_INIT}; +pub use self::rwlock::{RWLock, StaticRWLock, RWLOCK_INIT}; +pub use self::rwlock::{RWLockReadGuard, RWLockWriteGuard}; +pub use self::rwlock::{StaticRWLockReadGuard, StaticRWLockWriteGuard}; +pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT, AsMutexGuard}; +pub use self::once::{Once, ONCE_INIT}; +pub use self::semaphore::{Semaphore, SemaphoreGuard}; +pub use self::barrier::Barrier; pub use self::future::Future; pub use self::task_pool::TaskPool; -// Core building blocks for all primitives in this crate - -#[stable] pub mod atomic; - -// Concurrent data structures - -pub mod spsc_queue; -pub mod mpsc_queue; -pub mod mpmc_bounded_queue; -pub mod deque; - -// Low-level concurrency primitives - -mod raw; -mod mutex; -mod one; - -// Higher level primitives based on those above - -mod lock; - -// Task management - +mod barrier; +mod condvar; mod future; +mod mutex; +mod once; +mod poison; +mod rwlock; +mod semaphore; mod task_pool; diff --git a/src/libstd/sync/mpmc_bounded_queue.rs b/src/libstd/sync/mpmc_bounded_queue.rs deleted file mode 100644 index dca2d4098c6..00000000000 --- a/src/libstd/sync/mpmc_bounded_queue.rs +++ /dev/null @@ -1,219 +0,0 @@ -/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Dmitry Vyukov. - */ - -#![experimental] -#![allow(missing_docs, dead_code)] - -// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue - -use core::prelude::*; - -use alloc::arc::Arc; -use vec::Vec; -use core::num::UnsignedInt; -use core::cell::UnsafeCell; - -use sync::atomic::{AtomicUint,Relaxed,Release,Acquire}; - -struct Node { - sequence: AtomicUint, - value: Option, -} - -struct State { - pad0: [u8, ..64], - buffer: Vec>>, - mask: uint, - pad1: [u8, ..64], - enqueue_pos: AtomicUint, - pad2: [u8, ..64], - dequeue_pos: AtomicUint, - pad3: [u8, ..64], -} - -pub struct Queue { - state: Arc>, -} - -impl State { - fn with_capacity(capacity: uint) -> State { - let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 { - if capacity < 2 { - 2u - } else { - // use next power of 2 as capacity - capacity.next_power_of_two() - } - } else { - capacity - }; - let buffer = Vec::from_fn(capacity, |i| { - UnsafeCell::new(Node { sequence:AtomicUint::new(i), value: None }) - }); - State{ - pad0: [0, ..64], - buffer: buffer, - mask: capacity-1, - pad1: [0, ..64], - enqueue_pos: AtomicUint::new(0), - pad2: [0, ..64], - dequeue_pos: AtomicUint::new(0), - pad3: [0, ..64], - } - } - - fn push(&self, value: T) -> bool { - let mask = self.mask; - let mut pos = self.enqueue_pos.load(Relaxed); - loop { - let node = &self.buffer[pos & mask]; - let seq = unsafe { (*node.get()).sequence.load(Acquire) }; - let diff: int = seq as int - pos as int; - - if diff == 0 { - let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed); - if enqueue_pos == pos { - unsafe { - (*node.get()).value = Some(value); - (*node.get()).sequence.store(pos+1, Release); - } - break - } else { - pos = enqueue_pos; - } - } else if diff < 0 { - return false - } else { - pos = self.enqueue_pos.load(Relaxed); - } - } - true - } - - fn pop(&self) -> Option { - let mask = self.mask; - let mut pos = self.dequeue_pos.load(Relaxed); - loop { - let node = &self.buffer[pos & mask]; - let seq = unsafe { (*node.get()).sequence.load(Acquire) }; - let diff: int = seq as int - (pos + 1) as int; - if diff == 0 { - let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed); - if dequeue_pos == pos { - unsafe { - let value = (*node.get()).value.take(); - (*node.get()).sequence.store(pos + mask + 1, Release); - return value - } - } else { - pos = dequeue_pos; - } - } else if diff < 0 { - return None - } else { - pos = self.dequeue_pos.load(Relaxed); - } - } - } -} - -impl Queue { - pub fn with_capacity(capacity: uint) -> Queue { - Queue{ - state: Arc::new(State::with_capacity(capacity)) - } - } - - pub fn push(&self, value: T) -> bool { - self.state.push(value) - } - - pub fn pop(&self) -> Option { - self.state.pop() - } -} - -impl Clone for Queue { - fn clone(&self) -> Queue { - Queue { state: self.state.clone() } - } -} - -#[cfg(test)] -mod tests { - use prelude::*; - use super::Queue; - - #[test] - fn test() { - let nthreads = 8u; - let nmsgs = 1000u; - let q = Queue::with_capacity(nthreads*nmsgs); - assert_eq!(None, q.pop()); - let (tx, rx) = channel(); - - for _ in range(0, nthreads) { - let q = q.clone(); - let tx = tx.clone(); - spawn(proc() { - let q = q; - for i in range(0, nmsgs) { - assert!(q.push(i)); - } - tx.send(()); - }); - } - - let mut completion_rxs = vec![]; - for _ in range(0, nthreads) { - let (tx, rx) = channel(); - completion_rxs.push(rx); - let q = q.clone(); - spawn(proc() { - let q = q; - let mut i = 0u; - loop { - match q.pop() { - None => {}, - Some(_) => { - i += 1; - if i == nmsgs { break } - } - } - } - tx.send(i); - }); - } - - for rx in completion_rxs.iter_mut() { - assert_eq!(nmsgs, rx.recv()); - } - for _ in range(0, nthreads) { - rx.recv(); - } - } -} diff --git a/src/libstd/sync/mutex.rs b/src/libstd/sync/mutex.rs index c9e90210c30..3d17f2bc64b 100644 --- a/src/libstd/sync/mutex.rs +++ b/src/libstd/sync/mutex.rs @@ -8,43 +8,68 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! A simple native mutex implementation. Warning: this API is likely -//! to change soon. +use prelude::*; -#![allow(dead_code)] - -use core::prelude::*; -use alloc::boxed::Box; -use rustrt::mutex; - -pub const LOCKED: uint = 1 << 0; -pub const BLOCKED: uint = 1 << 1; +use cell::UnsafeCell; +use kinds::marker; +use sync::{poison, AsMutexGuard}; +use sys_common::mutex as sys; /// A mutual exclusion primitive useful for protecting shared data /// -/// This mutex will properly block tasks waiting for the lock to become -/// available. The mutex can also be statically initialized or created via a -/// `new` constructor. +/// This mutex will block threads waiting for the lock to become available. The +/// mutex can also be statically initialized or created via a `new` +/// constructor. Each mutex has a type parameter which represents the data that +/// it is protecting. The data can only be accessed through the RAII guards +/// returned from `lock` and `try_lock`, which guarantees that the data is only +/// ever accessed when the mutex is locked. +/// +/// # Poisoning +/// +/// In order to prevent access to otherwise invalid data, each mutex will +/// propagate any panics which occur while the lock is held. Once a thread has +/// panicked while holding the lock, then all other threads will immediately +/// panic as well once they hold the lock. /// /// # Example /// -/// ```rust,ignore -/// use std::sync::mutex::Mutex; +/// ```rust +/// use std::sync::{Arc, Mutex}; +/// const N: uint = 10; /// -/// let m = Mutex::new(); -/// let guard = m.lock(); -/// // do some work -/// drop(guard); // unlock the lock +/// // Spawn a few threads to increment a shared variable (non-atomically), and +/// // let the main thread know once all increments are done. +/// // +/// // Here we're using an Arc to share memory among tasks, and the data inside +/// // the Arc is protected with a mutex. +/// let data = Arc::new(Mutex::new(0)); +/// +/// let (tx, rx) = channel(); +/// for _ in range(0, 10) { +/// let (data, tx) = (data.clone(), tx.clone()); +/// spawn(proc() { +/// // The shared static can only be accessed once the lock is held. +/// // Our non-atomic increment is safe because we're the only thread +/// // which can access the shared state when the lock is held. +/// let mut data = data.lock(); +/// *data += 1; +/// if *data == N { +/// tx.send(()); +/// } +/// // the lock is unlocked here when `data` goes out of scope. +/// }); +/// } +/// +/// rx.recv(); /// ``` -pub struct Mutex { +pub struct Mutex { // Note that this static mutex is in a *box*, not inlined into the struct - // itself. This is done for memory safety reasons with the usage of a - // StaticNativeMutex inside the static mutex above. Once a native mutex has - // been used once, its address can never change (it can't be moved). This - // mutex type can be safely moved at any time, so to ensure that the native - // mutex is used correctly we box the inner lock to give it a constant - // address. - lock: Box, + // itself. Once a native mutex has been used once, its address can never + // change (it can't be moved). This mutex type can be safely moved at any + // time, so to ensure that the native mutex is used correctly we box the + // inner lock to give it a constant address. + inner: Box, + data: UnsafeCell, } /// The static mutex type is provided to allow for static allocation of mutexes. @@ -57,8 +82,8 @@ pub struct Mutex { /// /// # Example /// -/// ```rust,ignore -/// use std::sync::mutex::{StaticMutex, MUTEX_INIT}; +/// ```rust +/// use std::sync::{StaticMutex, MUTEX_INIT}; /// /// static LOCK: StaticMutex = MUTEX_INIT; /// @@ -69,35 +94,113 @@ pub struct Mutex { /// // lock is unlocked here. /// ``` pub struct StaticMutex { - lock: mutex::StaticNativeMutex, + lock: sys::Mutex, + poison: UnsafeCell, } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be access through this guard via its +/// Deref and DerefMut implementations #[must_use] -pub struct Guard<'a> { - guard: mutex::LockGuard<'a>, +pub struct MutexGuard<'a, T: 'a> { + // funny underscores due to how Deref/DerefMut currently work (they + // disregard field privacy). + __lock: &'a Mutex, + __guard: StaticMutexGuard, } -fn lift_guard(guard: mutex::LockGuard) -> Guard { - Guard { guard: guard } +/// An RAII implementation of a "scoped lock" of a static mutex. When this +/// structure is dropped (falls out of scope), the lock will be unlocked. +#[must_use] +pub struct StaticMutexGuard { + lock: &'static sys::Mutex, + marker: marker::NoSend, + poison: poison::Guard<'static>, } /// Static initialization of a mutex. This constant can be used to initialize /// other mutex constants. pub const MUTEX_INIT: StaticMutex = StaticMutex { - lock: mutex::NATIVE_MUTEX_INIT + lock: sys::MUTEX_INIT, + poison: UnsafeCell { value: poison::Flag { failed: false } }, }; -impl StaticMutex { - /// Attempts to grab this lock, see `Mutex::try_lock` - pub fn try_lock<'a>(&'a self) -> Option> { - unsafe { self.lock.trylock().map(lift_guard) } +impl Mutex { + /// Creates a new mutex in an unlocked state ready for use. + pub fn new(t: T) -> Mutex { + Mutex { + inner: box MUTEX_INIT, + data: UnsafeCell::new(t), + } } + /// Acquires a mutex, blocking the current task until it is able to do so. + /// + /// This function will block the local task until it is available to acquire + /// the mutex. Upon returning, the task is the only task with the mutex + /// held. An RAII guard is returned to allow scoped unlock of the lock. When + /// the guard goes out of scope, the mutex will be unlocked. + /// + /// # Panics + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will immediately panic once the mutex is acquired. + pub fn lock(&self) -> MutexGuard { + unsafe { + let lock: &'static StaticMutex = &*(&*self.inner as *const _); + MutexGuard::new(self, lock.lock()) + } + } + + /// Attempts to acquire this lock. + /// + /// If the lock could not be acquired at this time, then `None` is returned. + /// Otherwise, an RAII guard is returned. The lock will be unlocked when the + /// guard is dropped. + /// + /// This function does not block. + /// + /// # Panics + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will immediately panic if the mutex would otherwise be + /// acquired. + pub fn try_lock(&self) -> Option> { + unsafe { + let lock: &'static StaticMutex = &*(&*self.inner as *const _); + lock.try_lock().map(|guard| { + MutexGuard::new(self, guard) + }) + } + } +} + +#[unsafe_destructor] +impl Drop for Mutex { + fn drop(&mut self) { + // This is actually safe b/c we know that there is no further usage of + // this mutex (it's up to the user to arrange for a mutex to get + // dropped, that's not our job) + unsafe { self.inner.lock.destroy() } + } +} + +impl StaticMutex { /// Acquires this lock, see `Mutex::lock` - pub fn lock<'a>(&'a self) -> Guard<'a> { - lift_guard(unsafe { self.lock.lock() }) + pub fn lock(&'static self) -> StaticMutexGuard { + unsafe { self.lock.lock() } + StaticMutexGuard::new(self) + } + + /// Attempts to grab this lock, see `Mutex::try_lock` + pub fn try_lock(&'static self) -> Option { + if unsafe { self.lock.try_lock() } { + Some(StaticMutexGuard::new(self)) + } else { + None + } } /// Deallocates resources associated with this static mutex. @@ -110,58 +213,73 @@ impl StaticMutex { /// *all* platforms. It may be the case that some platforms do not leak /// memory if this method is not called, but this is not guaranteed to be /// true on all platforms. - pub unsafe fn destroy(&self) { + pub unsafe fn destroy(&'static self) { self.lock.destroy() } } -impl Mutex { - /// Creates a new mutex in an unlocked state ready for use. - pub fn new() -> Mutex { - Mutex { - lock: box StaticMutex { - lock: unsafe { mutex::StaticNativeMutex::new() }, - } - } +impl<'mutex, T> MutexGuard<'mutex, T> { + fn new(lock: &Mutex, guard: StaticMutexGuard) -> MutexGuard { + MutexGuard { __lock: lock, __guard: guard } } - - /// Attempts to acquire this lock. - /// - /// If the lock could not be acquired at this time, then `None` is returned. - /// Otherwise, an RAII guard is returned. The lock will be unlocked when the - /// guard is dropped. - /// - /// This function does not block. - pub fn try_lock<'a>(&'a self) -> Option> { - self.lock.try_lock() - } - - /// Acquires a mutex, blocking the current task until it is able to do so. - /// - /// This function will block the local task until it is available to acquire - /// the mutex. Upon returning, the task is the only task with the mutex - /// held. An RAII guard is returned to allow scoped unlock of the lock. When - /// the guard goes out of scope, the mutex will be unlocked. - pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() } } -impl Drop for Mutex { +impl<'mutex, T> AsMutexGuard for MutexGuard<'mutex, T> { + unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard { &self.__guard } +} + +impl<'mutex, T> Deref for MutexGuard<'mutex, T> { + fn deref<'a>(&'a self) -> &'a T { unsafe { &*self.__lock.data.get() } } +} +impl<'mutex, T> DerefMut for MutexGuard<'mutex, T> { + fn deref_mut<'a>(&'a mut self) -> &'a mut T { + unsafe { &mut *self.__lock.data.get() } + } +} + +impl StaticMutexGuard { + fn new(lock: &'static StaticMutex) -> StaticMutexGuard { + unsafe { + let guard = StaticMutexGuard { + lock: &lock.lock, + marker: marker::NoSend, + poison: (*lock.poison.get()).borrow(), + }; + guard.poison.check("mutex"); + return guard; + } + } +} + +pub fn guard_lock(guard: &StaticMutexGuard) -> &sys::Mutex { guard.lock } +pub fn guard_poison(guard: &StaticMutexGuard) -> &poison::Guard { + &guard.poison +} + +impl AsMutexGuard for StaticMutexGuard { + unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard { self } +} + +#[unsafe_destructor] +impl Drop for StaticMutexGuard { fn drop(&mut self) { - // This is actually safe b/c we know that there is no further usage of - // this mutex (it's up to the user to arrange for a mutex to get - // dropped, that's not our job) - unsafe { self.lock.destroy() } + unsafe { + self.poison.done(); + self.lock.unlock(); + } } } #[cfg(test)] mod test { use prelude::*; - use super::{Mutex, StaticMutex, MUTEX_INIT}; + + use task; + use sync::{Arc, Mutex, StaticMutex, MUTEX_INIT, Condvar}; #[test] fn smoke() { - let m = Mutex::new(); + let m = Mutex::new(()); drop(m.lock()); drop(m.lock()); } @@ -211,8 +329,104 @@ mod test { } #[test] - fn trylock() { - let m = Mutex::new(); + fn try_lock() { + let m = Mutex::new(()); assert!(m.try_lock().is_some()); } + + #[test] + fn test_mutex_arc_condvar() { + let arc = Arc::new((Mutex::new(false), Condvar::new())); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + spawn(proc() { + // wait until parent gets in + rx.recv(); + let &(ref lock, ref cvar) = &*arc2; + let mut lock = lock.lock(); + *lock = true; + cvar.notify_one(); + }); + + let &(ref lock, ref cvar) = &*arc; + let lock = lock.lock(); + tx.send(()); + assert!(!*lock); + while !*lock { + cvar.wait(&lock); + } + } + + #[test] + #[should_fail] + fn test_arc_condvar_poison() { + let arc = Arc::new((Mutex::new(1i), Condvar::new())); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + + spawn(proc() { + rx.recv(); + let &(ref lock, ref cvar) = &*arc2; + let _g = lock.lock(); + cvar.notify_one(); + // Parent should fail when it wakes up. + panic!(); + }); + + let &(ref lock, ref cvar) = &*arc; + let lock = lock.lock(); + tx.send(()); + while *lock == 1 { + cvar.wait(&lock); + } + } + + #[test] + #[should_fail] + fn test_mutex_arc_poison() { + let arc = Arc::new(Mutex::new(1i)); + let arc2 = arc.clone(); + let _ = task::try(proc() { + let lock = arc2.lock(); + assert_eq!(*lock, 2); + }); + let lock = arc.lock(); + assert_eq!(*lock, 1); + } + + #[test] + fn test_mutex_arc_nested() { + // Tests nested mutexes and access + // to underlying data. + let arc = Arc::new(Mutex::new(1i)); + let arc2 = Arc::new(Mutex::new(arc)); + let (tx, rx) = channel(); + spawn(proc() { + let lock = arc2.lock(); + let lock2 = lock.deref().lock(); + assert_eq!(*lock2, 1); + tx.send(()); + }); + rx.recv(); + } + + #[test] + fn test_mutex_arc_access_in_unwind() { + let arc = Arc::new(Mutex::new(1i)); + let arc2 = arc.clone(); + let _ = task::try::<()>(proc() { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + *self.i.lock() += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }); + let lock = arc.lock(); + assert_eq!(*lock, 2); + } } diff --git a/src/libstd/sync/one.rs b/src/libstd/sync/once.rs similarity index 96% rename from src/libstd/sync/one.rs rename to src/libstd/sync/once.rs index f710a6da59b..a75088120f8 100644 --- a/src/libstd/sync/one.rs +++ b/src/libstd/sync/once.rs @@ -13,12 +13,10 @@ //! This primitive is meant to be used to run one-time initialization. An //! example use case would be for initializing an FFI library. -use core::prelude::*; - -use core::int; -use core::atomic; - -use super::mutex::{StaticMutex, MUTEX_INIT}; +use int; +use mem::drop; +use sync::atomic; +use sync::{StaticMutex, MUTEX_INIT}; /// A synchronization primitive which can be used to run a one-time global /// initialization. Useful for one-time initialization for FFI or related @@ -27,8 +25,8 @@ use super::mutex::{StaticMutex, MUTEX_INIT}; /// /// # Example /// -/// ```rust,ignore -/// use std::sync::one::{Once, ONCE_INIT}; +/// ```rust +/// use std::sync::{Once, ONCE_INIT}; /// /// static START: Once = ONCE_INIT; /// @@ -59,7 +57,7 @@ impl Once { /// /// When this function returns, it is guaranteed that some initialization /// has run and completed (it may not be the closure specified). - pub fn doit(&self, f: ||) { + pub fn doit(&'static self, f: ||) { // Optimize common path: load is much cheaper than fetch_add. if self.cnt.load(atomic::SeqCst) < 0 { return @@ -121,6 +119,7 @@ impl Once { #[cfg(test)] mod test { use prelude::*; + use task; use super::{ONCE_INIT, Once}; diff --git a/src/libstd/sync/poison.rs b/src/libstd/sync/poison.rs new file mode 100644 index 00000000000..eb46fd77147 --- /dev/null +++ b/src/libstd/sync/poison.rs @@ -0,0 +1,48 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use option::None; +use rustrt::task::Task; +use rustrt::local::Local; + +pub struct Flag { pub failed: bool } + +impl Flag { + pub fn borrow(&mut self) -> Guard { + Guard { flag: &mut self.failed, failing: failing() } + } +} + +pub struct Guard<'a> { + flag: &'a mut bool, + failing: bool, +} + +impl<'a> Guard<'a> { + pub fn check(&self, name: &str) { + if *self.flag { + panic!("poisoned {} - another task failed inside", name); + } + } + + pub fn done(&mut self) { + if !self.failing && failing() { + *self.flag = true; + } + } +} + +fn failing() -> bool { + if Local::exists(None::) { + Local::borrow(None::).unwinder.unwinding() + } else { + false + } +} diff --git a/src/libstd/sync/raw.rs b/src/libstd/sync/raw.rs deleted file mode 100644 index 47580a11513..00000000000 --- a/src/libstd/sync/raw.rs +++ /dev/null @@ -1,1132 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Raw concurrency primitives you know and love. -//! -//! These primitives are not recommended for general use, but are provided for -//! flavorful use-cases. It is recommended to use the types at the top of the -//! `sync` crate which wrap values directly and provide safer abstractions for -//! containing data. - -// A side-effect of merging libsync into libstd; will go away once -// libsync rewrite lands -#![allow(dead_code)] - -use core::prelude::*; -use self::ReacquireOrderLock::*; - -use core::atomic; -use core::finally::Finally; -use core::kinds::marker; -use core::mem; -use core::cell::UnsafeCell; -use vec::Vec; - -use super::mutex; -use comm::{Receiver, Sender, channel}; - -// Each waiting task receives on one of these. -type WaitEnd = Receiver<()>; -type SignalEnd = Sender<()>; -// A doubly-ended queue of waiting tasks. -struct WaitQueue { - head: Receiver, - tail: Sender, -} - -impl WaitQueue { - fn new() -> WaitQueue { - let (block_tail, block_head) = channel(); - WaitQueue { head: block_head, tail: block_tail } - } - - // Signals one live task from the queue. - fn signal(&self) -> bool { - match self.head.try_recv() { - Ok(ch) => { - // Send a wakeup signal. If the waiter was killed, its port will - // have closed. Keep trying until we get a live task. - if ch.send_opt(()).is_ok() { - true - } else { - self.signal() - } - } - _ => false - } - } - - fn broadcast(&self) -> uint { - let mut count = 0; - loop { - match self.head.try_recv() { - Ok(ch) => { - if ch.send_opt(()).is_ok() { - count += 1; - } - } - _ => break - } - } - count - } - - fn wait_end(&self) -> WaitEnd { - let (signal_end, wait_end) = channel(); - self.tail.send(signal_end); - wait_end - } -} - -// The building-block used to make semaphores, mutexes, and rwlocks. -struct Sem { - lock: mutex::Mutex, - // n.b, we need Sem to be `Sync`, but the WaitQueue type is not send/share - // (for good reason). We have an internal invariant on this semaphore, - // however, that the queue is never accessed outside of a locked - // context. - inner: UnsafeCell> -} - -struct SemInner { - count: int, - waiters: WaitQueue, - // Can be either unit or another waitqueue. Some sems shouldn't come with - // a condition variable attached, others should. - blocked: Q, -} - -#[must_use] -struct SemGuard<'a, Q:'a> { - sem: &'a Sem, -} - -impl Sem { - fn new(count: int, q: Q) -> Sem { - assert!(count >= 0, - "semaphores cannot be initialized with negative values"); - Sem { - lock: mutex::Mutex::new(), - inner: UnsafeCell::new(SemInner { - waiters: WaitQueue::new(), - count: count, - blocked: q, - }) - } - } - - unsafe fn with(&self, f: |&mut SemInner|) { - let _g = self.lock.lock(); - // This &mut is safe because, due to the lock, we are the only one who can touch the data - f(&mut *self.inner.get()) - } - - pub fn acquire(&self) { - unsafe { - let mut waiter_nobe = None; - self.with(|state| { - state.count -= 1; - if state.count < 0 { - // Create waiter nobe, enqueue ourself, and tell - // outer scope we need to block. - waiter_nobe = Some(state.waiters.wait_end()); - } - }); - // Uncomment if you wish to test for sem races. Not - // valgrind-friendly. - /* for _ in range(0u, 1000) { task::deschedule(); } */ - // Need to wait outside the exclusive. - if waiter_nobe.is_some() { - let _ = waiter_nobe.unwrap().recv(); - } - } - } - - pub fn release(&self) { - unsafe { - self.with(|state| { - state.count += 1; - if state.count <= 0 { - state.waiters.signal(); - } - }) - } - } - - pub fn access<'a>(&'a self) -> SemGuard<'a, Q> { - self.acquire(); - SemGuard { sem: self } - } -} - -#[unsafe_destructor] -impl<'a, Q: Send> Drop for SemGuard<'a, Q> { - fn drop(&mut self) { - self.sem.release(); - } -} - -impl Sem> { - fn new_and_signal(count: int, num_condvars: uint) -> Sem> { - let mut queues = Vec::new(); - for _ in range(0, num_condvars) { queues.push(WaitQueue::new()); } - Sem::new(count, queues) - } - - // The only other places that condvars get built are rwlock.write_cond() - // and rwlock_write_mode. - pub fn access_cond<'a>(&'a self) -> SemCondGuard<'a> { - SemCondGuard { - guard: self.access(), - cvar: Condvar { sem: self, order: Nothing, nocopy: marker::NoCopy }, - } - } -} - -// FIXME(#3598): Want to use an Option down below, but we need a custom enum -// that's not polymorphic to get around the fact that lifetimes are invariant -// inside of type parameters. -enum ReacquireOrderLock<'a> { - Nothing, // c.c - Just(&'a Semaphore), -} - -/// A mechanism for atomic-unlock-and-deschedule blocking and signalling. -pub struct Condvar<'a> { - // The 'Sem' object associated with this condvar. This is the one that's - // atomically-unlocked-and-descheduled upon and reacquired during wakeup. - sem: &'a Sem >, - // This is (can be) an extra semaphore which is held around the reacquire - // operation on the first one. This is only used in cvars associated with - // rwlocks, and is needed to ensure that, when a downgrader is trying to - // hand off the access lock (which would be the first field, here), a 2nd - // writer waking up from a cvar wait can't race with a reader to steal it, - // See the comment in write_cond for more detail. - order: ReacquireOrderLock<'a>, - // Make sure condvars are non-copyable. - nocopy: marker::NoCopy, -} - -impl<'a> Condvar<'a> { - /// Atomically drop the associated lock, and block until a signal is sent. - /// - /// # Panics - /// - /// A task which is killed while waiting on a condition variable will wake - /// up, panic, and unlock the associated lock as it unwinds. - pub fn wait(&self) { self.wait_on(0) } - - /// As wait(), but can specify which of multiple condition variables to - /// wait on. Only a signal_on() or broadcast_on() with the same condvar_id - /// will wake this thread. - /// - /// The associated lock must have been initialised with an appropriate - /// number of condvars. The condvar_id must be between 0 and num_condvars-1 - /// or else this call will panic. - /// - /// wait() is equivalent to wait_on(0). - pub fn wait_on(&self, condvar_id: uint) { - let mut wait_end = None; - let mut out_of_bounds = None; - // Release lock, 'atomically' enqueuing ourselves in so doing. - unsafe { - self.sem.with(|state| { - if condvar_id < state.blocked.len() { - // Drop the lock. - state.count += 1; - if state.count <= 0 { - state.waiters.signal(); - } - // Create waiter nobe, and enqueue ourself to - // be woken up by a signaller. - wait_end = Some(state.blocked[condvar_id].wait_end()); - } else { - out_of_bounds = Some(state.blocked.len()); - } - }) - } - - // If deschedule checks start getting inserted anywhere, we can be - // killed before or after enqueueing. - check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()", || { - // Unconditionally "block". (Might not actually block if a - // signaller already sent -- I mean 'unconditionally' in contrast - // with acquire().) - (|| { - let _ = wait_end.take().unwrap().recv(); - }).finally(|| { - // Reacquire the condvar. - match self.order { - Just(lock) => { - let _g = lock.access(); - self.sem.acquire(); - } - Nothing => self.sem.acquire(), - } - }) - }) - } - - /// Wake up a blocked task. Returns false if there was no blocked task. - pub fn signal(&self) -> bool { self.signal_on(0) } - - /// As signal, but with a specified condvar_id. See wait_on. - pub fn signal_on(&self, condvar_id: uint) -> bool { - unsafe { - let mut out_of_bounds = None; - let mut result = false; - self.sem.with(|state| { - if condvar_id < state.blocked.len() { - result = state.blocked[condvar_id].signal(); - } else { - out_of_bounds = Some(state.blocked.len()); - } - }); - check_cvar_bounds(out_of_bounds, - condvar_id, - "cond.signal_on()", - || result) - } - } - - /// Wake up all blocked tasks. Returns the number of tasks woken. - pub fn broadcast(&self) -> uint { self.broadcast_on(0) } - - /// As broadcast, but with a specified condvar_id. See wait_on. - pub fn broadcast_on(&self, condvar_id: uint) -> uint { - let mut out_of_bounds = None; - let mut queue = None; - unsafe { - self.sem.with(|state| { - if condvar_id < state.blocked.len() { - // To avoid :broadcast_heavy, we make a new waitqueue, - // swap it out with the old one, and broadcast on the - // old one outside of the little-lock. - queue = Some(mem::replace(&mut state.blocked[condvar_id], - WaitQueue::new())); - } else { - out_of_bounds = Some(state.blocked.len()); - } - }); - check_cvar_bounds(out_of_bounds, - condvar_id, - "cond.signal_on()", - || { - queue.take().unwrap().broadcast() - }) - } - } -} - -// Checks whether a condvar ID was out of bounds, and panics if so, or does -// something else next on success. -#[inline] -fn check_cvar_bounds( - out_of_bounds: Option, - id: uint, - act: &str, - blk: || -> U) - -> U { - match out_of_bounds { - Some(0) => - panic!("{} with illegal ID {} - this lock has no condvars!", act, id), - Some(length) => - panic!("{} with illegal ID {} - ID must be less than {}", act, id, length), - None => blk() - } -} - -#[must_use] -struct SemCondGuard<'a> { - guard: SemGuard<'a, Vec>, - cvar: Condvar<'a>, -} - -/// A counting, blocking, bounded-waiting semaphore. -pub struct Semaphore { - sem: Sem<()>, -} - -/// An RAII guard used to represent an acquired resource to a semaphore. When -/// dropped, this value will release the resource back to the semaphore. -#[must_use] -pub struct SemaphoreGuard<'a> { - _guard: SemGuard<'a, ()>, -} - -impl Semaphore { - /// Create a new semaphore with the specified count. - /// - /// # Panics - /// - /// This function will panic if `count` is negative. - pub fn new(count: int) -> Semaphore { - Semaphore { sem: Sem::new(count, ()) } - } - - /// Acquire a resource represented by the semaphore. Blocks if necessary - /// until resource(s) become available. - pub fn acquire(&self) { self.sem.acquire() } - - /// Release a held resource represented by the semaphore. Wakes a blocked - /// contending task, if any exist. Won't block the caller. - pub fn release(&self) { self.sem.release() } - - /// Acquire a resource of this semaphore, returning an RAII guard which will - /// release the resource when dropped. - pub fn access<'a>(&'a self) -> SemaphoreGuard<'a> { - SemaphoreGuard { _guard: self.sem.access() } - } -} - -/// A blocking, bounded-waiting, mutual exclusion lock with an associated -/// FIFO condition variable. -/// -/// # Panics -/// -/// A task which panicks while holding a mutex will unlock the mutex as it -/// unwinds. -pub struct Mutex { - sem: Sem>, -} - -/// An RAII structure which is used to gain access to a mutex's condition -/// variable. Additionally, when a value of this type is dropped, the -/// corresponding mutex is also unlocked. -#[must_use] -pub struct MutexGuard<'a> { - _guard: SemGuard<'a, Vec>, - /// Inner condition variable which is connected to the outer mutex, and can - /// be used for atomic-unlock-and-deschedule. - pub cond: Condvar<'a>, -} - -impl Mutex { - /// Create a new mutex, with one associated condvar. - pub fn new() -> Mutex { Mutex::new_with_condvars(1) } - - /// Create a new mutex, with a specified number of associated condvars. This - /// will allow calling wait_on/signal_on/broadcast_on with condvar IDs - /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be - /// allowed but any operations on the condvar will panic.) - pub fn new_with_condvars(num_condvars: uint) -> Mutex { - Mutex { sem: Sem::new_and_signal(1, num_condvars) } - } - - /// Acquires ownership of this mutex, returning an RAII guard which will - /// unlock the mutex when dropped. The associated condition variable can - /// also be accessed through the returned guard. - pub fn lock<'a>(&'a self) -> MutexGuard<'a> { - let SemCondGuard { guard, cvar } = self.sem.access_cond(); - MutexGuard { _guard: guard, cond: cvar } - } -} - -// NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem - -/// A blocking, no-starvation, reader-writer lock with an associated condvar. -/// -/// # Panics -/// -/// A task which panics while holding an rwlock will unlock the rwlock as it -/// unwinds. -pub struct RWLock { - order_lock: Semaphore, - access_lock: Sem>, - - // The only way the count flag is ever accessed is with xadd. Since it is - // a read-modify-write operation, multiple xadds on different cores will - // always be consistent with respect to each other, so a monotonic/relaxed - // consistency ordering suffices (i.e., no extra barriers are needed). - // - // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use - // acquire/release orderings superfluously. Change these someday. - read_count: atomic::AtomicUint, -} - -/// An RAII helper which is created by acquiring a read lock on an RWLock. When -/// dropped, this will unlock the RWLock. -#[must_use] -pub struct RWLockReadGuard<'a> { - lock: &'a RWLock, -} - -/// An RAII helper which is created by acquiring a write lock on an RWLock. When -/// dropped, this will unlock the RWLock. -/// -/// A value of this type can also be consumed to downgrade to a read-only lock. -#[must_use] -pub struct RWLockWriteGuard<'a> { - lock: &'a RWLock, - /// Inner condition variable that is connected to the write-mode of the - /// outer rwlock. - pub cond: Condvar<'a>, -} - -impl RWLock { - /// Create a new rwlock, with one associated condvar. - pub fn new() -> RWLock { RWLock::new_with_condvars(1) } - - /// Create a new rwlock, with a specified number of associated condvars. - /// Similar to mutex_with_condvars. - pub fn new_with_condvars(num_condvars: uint) -> RWLock { - RWLock { - order_lock: Semaphore::new(1), - access_lock: Sem::new_and_signal(1, num_condvars), - read_count: atomic::AtomicUint::new(0), - } - } - - /// Acquires a read-lock, returning an RAII guard that will unlock the lock - /// when dropped. Calls to 'read' from other tasks may run concurrently with - /// this one. - pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> { - let _guard = self.order_lock.access(); - let old_count = self.read_count.fetch_add(1, atomic::Acquire); - if old_count == 0 { - self.access_lock.acquire(); - } - RWLockReadGuard { lock: self } - } - - /// Acquire a write-lock, returning an RAII guard that will unlock the lock - /// when dropped. No calls to 'read' or 'write' from other tasks will run - /// concurrently with this one. - /// - /// You can also downgrade a write to a read by calling the `downgrade` - /// method on the returned guard. Additionally, the guard will contain a - /// `Condvar` attached to this lock. - /// - /// # Example - /// - /// ```{rust,ignore} - /// use std::sync::raw::RWLock; - /// - /// let lock = RWLock::new(); - /// let write = lock.write(); - /// // ... exclusive access ... - /// let read = write.downgrade(); - /// // ... shared access ... - /// drop(read); - /// ``` - pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a> { - let _g = self.order_lock.access(); - self.access_lock.acquire(); - - // It's important to thread our order lock into the condvar, so that - // when a cond.wait() wakes up, it uses it while reacquiring the - // access lock. If we permitted a waking-up writer to "cut in line", - // there could arise a subtle race when a downgrader attempts to hand - // off the reader cloud lock to a waiting reader. This race is tested - // in arc.rs (test_rw_write_cond_downgrade_read_race) and looks like: - // T1 (writer) T2 (downgrader) T3 (reader) - // [in cond.wait()] - // [locks for writing] - // [holds access_lock] - // [is signalled, perhaps by - // downgrader or a 4th thread] - // tries to lock access(!) - // lock order_lock - // xadd read_count[0->1] - // tries to lock access - // [downgrade] - // xadd read_count[1->2] - // unlock access - // Since T1 contended on the access lock before T3 did, it will steal - // the lock handoff. Adding order_lock in the condvar reacquire path - // solves this because T1 will hold order_lock while waiting on access, - // which will cause T3 to have to wait until T1 finishes its write, - // which can't happen until T2 finishes the downgrade-read entirely. - // The astute reader will also note that making waking writers use the - // order_lock is better for not starving readers. - RWLockWriteGuard { - lock: self, - cond: Condvar { - sem: &self.access_lock, - order: Just(&self.order_lock), - nocopy: marker::NoCopy, - } - } - } -} - -impl<'a> RWLockWriteGuard<'a> { - /// Consumes this write lock and converts it into a read lock. - pub fn downgrade(self) -> RWLockReadGuard<'a> { - let lock = self.lock; - // Don't run the destructor of the write guard, we're in charge of - // things from now on - unsafe { mem::forget(self) } - - let old_count = lock.read_count.fetch_add(1, atomic::Release); - // If another reader was already blocking, we need to hand-off - // the "reader cloud" access lock to them. - if old_count != 0 { - // Guaranteed not to let another writer in, because - // another reader was holding the order_lock. Hence they - // must be the one to get the access_lock (because all - // access_locks are acquired with order_lock held). See - // the comment in write_cond for more justification. - lock.access_lock.release(); - } - RWLockReadGuard { lock: lock } - } -} - -#[unsafe_destructor] -impl<'a> Drop for RWLockWriteGuard<'a> { - fn drop(&mut self) { - self.lock.access_lock.release(); - } -} - -#[unsafe_destructor] -impl<'a> Drop for RWLockReadGuard<'a> { - fn drop(&mut self) { - let old_count = self.lock.read_count.fetch_sub(1, atomic::Release); - assert!(old_count > 0); - if old_count == 1 { - // Note: this release used to be outside of a locked access - // to exclusive-protected state. If this code is ever - // converted back to such (instead of using atomic ops), - // this access MUST NOT go inside the exclusive access. - self.lock.access_lock.release(); - } - } -} - -#[cfg(test)] -mod tests { - pub use self::RWLockMode::*; - - use sync::Arc; - use prelude::*; - use super::{Semaphore, Mutex, RWLock, Condvar}; - - use mem; - use result; - use task; - - #[test] - fn test_sem_acquire_release() { - let s = Semaphore::new(1); - s.acquire(); - s.release(); - s.acquire(); - } - - #[test] - fn test_sem_basic() { - let s = Semaphore::new(1); - let _g = s.access(); - } - - #[test] - #[should_fail] - fn test_sem_basic2() { - Semaphore::new(-1); - } - - #[test] - fn test_sem_as_mutex() { - let s = Arc::new(Semaphore::new(1)); - let s2 = s.clone(); - task::spawn(proc() { - let _g = s2.access(); - for _ in range(0u, 5) { task::deschedule(); } - }); - let _g = s.access(); - for _ in range(0u, 5) { task::deschedule(); } - } - - #[test] - fn test_sem_as_cvar() { - /* Child waits and parent signals */ - let (tx, rx) = channel(); - let s = Arc::new(Semaphore::new(0)); - let s2 = s.clone(); - task::spawn(proc() { - s2.acquire(); - tx.send(()); - }); - for _ in range(0u, 5) { task::deschedule(); } - s.release(); - let _ = rx.recv(); - - /* Parent waits and child signals */ - let (tx, rx) = channel(); - let s = Arc::new(Semaphore::new(0)); - let s2 = s.clone(); - task::spawn(proc() { - for _ in range(0u, 5) { task::deschedule(); } - s2.release(); - let _ = rx.recv(); - }); - s.acquire(); - tx.send(()); - } - - #[test] - fn test_sem_multi_resource() { - // Parent and child both get in the critical section at the same - // time, and shake hands. - let s = Arc::new(Semaphore::new(2)); - let s2 = s.clone(); - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - task::spawn(proc() { - let _g = s2.access(); - let _ = rx2.recv(); - tx1.send(()); - }); - let _g = s.access(); - tx2.send(()); - let _ = rx1.recv(); - } - - #[test] - fn test_sem_runtime_friendly_blocking() { - // Force the runtime to schedule two threads on the same sched_loop. - // When one blocks, it should schedule the other one. - let s = Arc::new(Semaphore::new(1)); - let s2 = s.clone(); - let (tx, rx) = channel(); - { - let _g = s.access(); - task::spawn(proc() { - tx.send(()); - drop(s2.access()); - tx.send(()); - }); - rx.recv(); // wait for child to come alive - for _ in range(0u, 5) { task::deschedule(); } // let the child contend - } - rx.recv(); // wait for child to be done - } - - #[test] - fn test_mutex_lock() { - // Unsafely achieve shared state, and do the textbook - // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance. - let (tx, rx) = channel(); - let m = Arc::new(Mutex::new()); - let m2 = m.clone(); - let mut sharedstate = box 0; - { - let ptr: *mut int = &mut *sharedstate; - task::spawn(proc() { - access_shared(ptr, &m2, 10); - tx.send(()); - }); - } - { - access_shared(&mut *sharedstate, &m, 10); - let _ = rx.recv(); - - assert_eq!(*sharedstate, 20); - } - - fn access_shared(sharedstate: *mut int, m: &Arc, n: uint) { - for _ in range(0u, n) { - let _g = m.lock(); - let oldval = unsafe { *sharedstate }; - task::deschedule(); - unsafe { *sharedstate = oldval + 1; } - } - } - } - - #[test] - fn test_mutex_cond_wait() { - let m = Arc::new(Mutex::new()); - - // Child wakes up parent - { - let lock = m.lock(); - let m2 = m.clone(); - task::spawn(proc() { - let lock = m2.lock(); - let woken = lock.cond.signal(); - assert!(woken); - }); - lock.cond.wait(); - } - // Parent wakes up child - let (tx, rx) = channel(); - let m3 = m.clone(); - task::spawn(proc() { - let lock = m3.lock(); - tx.send(()); - lock.cond.wait(); - tx.send(()); - }); - rx.recv(); // Wait until child gets in the mutex - { - let lock = m.lock(); - let woken = lock.cond.signal(); - assert!(woken); - } - rx.recv(); // Wait until child wakes up - } - - fn test_mutex_cond_broadcast_helper(num_waiters: uint) { - let m = Arc::new(Mutex::new()); - let mut rxs = Vec::new(); - - for _ in range(0u, num_waiters) { - let mi = m.clone(); - let (tx, rx) = channel(); - rxs.push(rx); - task::spawn(proc() { - let lock = mi.lock(); - tx.send(()); - lock.cond.wait(); - tx.send(()); - }); - } - - // wait until all children get in the mutex - for rx in rxs.iter_mut() { rx.recv(); } - { - let lock = m.lock(); - let num_woken = lock.cond.broadcast(); - assert_eq!(num_woken, num_waiters); - } - // wait until all children wake up - for rx in rxs.iter_mut() { rx.recv(); } - } - - #[test] - fn test_mutex_cond_broadcast() { - test_mutex_cond_broadcast_helper(12); - } - - #[test] - fn test_mutex_cond_broadcast_none() { - test_mutex_cond_broadcast_helper(0); - } - - #[test] - fn test_mutex_cond_no_waiter() { - let m = Arc::new(Mutex::new()); - let m2 = m.clone(); - let _ = task::try(proc() { - drop(m.lock()); - }); - let lock = m2.lock(); - assert!(!lock.cond.signal()); - } - - #[test] - fn test_mutex_killed_simple() { - use any::Any; - - // Mutex must get automatically unlocked if panicked/killed within. - let m = Arc::new(Mutex::new()); - let m2 = m.clone(); - - let result: result::Result<(), Box> = task::try(proc() { - let _lock = m2.lock(); - panic!(); - }); - assert!(result.is_err()); - // child task must have finished by the time try returns - drop(m.lock()); - } - - #[test] - fn test_mutex_cond_signal_on_0() { - // Tests that signal_on(0) is equivalent to signal(). - let m = Arc::new(Mutex::new()); - let lock = m.lock(); - let m2 = m.clone(); - task::spawn(proc() { - let lock = m2.lock(); - lock.cond.signal_on(0); - }); - lock.cond.wait(); - } - - #[test] - fn test_mutex_no_condvars() { - let result = task::try(proc() { - let m = Mutex::new_with_condvars(0); - m.lock().cond.wait(); - }); - assert!(result.is_err()); - let result = task::try(proc() { - let m = Mutex::new_with_condvars(0); - m.lock().cond.signal(); - }); - assert!(result.is_err()); - let result = task::try(proc() { - let m = Mutex::new_with_condvars(0); - m.lock().cond.broadcast(); - }); - assert!(result.is_err()); - } - - #[cfg(test)] - pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead } - - #[cfg(test)] - fn lock_rwlock_in_mode(x: &Arc, mode: RWLockMode, blk: ||) { - match mode { - Read => { let _g = x.read(); blk() } - Write => { let _g = x.write(); blk() } - Downgrade => { let _g = x.write(); blk() } - DowngradeRead => { let _g = x.write().downgrade(); blk() } - } - } - - #[cfg(test)] - fn test_rwlock_exclusion(x: Arc, - mode1: RWLockMode, - mode2: RWLockMode) { - // Test mutual exclusion between readers and writers. Just like the - // mutex mutual exclusion test, a ways above. - let (tx, rx) = channel(); - let x2 = x.clone(); - let mut sharedstate = box 0; - { - let ptr: *const int = &*sharedstate; - task::spawn(proc() { - let sharedstate: &mut int = - unsafe { mem::transmute(ptr) }; - access_shared(sharedstate, &x2, mode1, 10); - tx.send(()); - }); - } - { - access_shared(&mut *sharedstate, &x, mode2, 10); - let _ = rx.recv(); - - assert_eq!(*sharedstate, 20); - } - - fn access_shared(sharedstate: &mut int, x: &Arc, - mode: RWLockMode, n: uint) { - for _ in range(0u, n) { - lock_rwlock_in_mode(x, mode, || { - let oldval = *sharedstate; - task::deschedule(); - *sharedstate = oldval + 1; - }) - } - } - } - - #[test] - fn test_rwlock_readers_wont_modify_the_data() { - test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Write); - test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Read); - test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Downgrade); - test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Read); - test_rwlock_exclusion(Arc::new(RWLock::new()), Write, DowngradeRead); - test_rwlock_exclusion(Arc::new(RWLock::new()), DowngradeRead, Write); - } - - #[test] - fn test_rwlock_writers_and_writers() { - test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Write); - test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Downgrade); - test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Write); - test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Downgrade); - } - - #[cfg(test)] - fn test_rwlock_handshake(x: Arc, - mode1: RWLockMode, - mode2: RWLockMode, - make_mode2_go_first: bool) { - // Much like sem_multi_resource. - let x2 = x.clone(); - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - task::spawn(proc() { - if !make_mode2_go_first { - rx2.recv(); // parent sends to us once it locks, or ... - } - lock_rwlock_in_mode(&x2, mode2, || { - if make_mode2_go_first { - tx1.send(()); // ... we send to it once we lock - } - rx2.recv(); - tx1.send(()); - }) - }); - if make_mode2_go_first { - rx1.recv(); // child sends to us once it locks, or ... - } - lock_rwlock_in_mode(&x, mode1, || { - if !make_mode2_go_first { - tx2.send(()); // ... we send to it once we lock - } - tx2.send(()); - rx1.recv(); - }) - } - - #[test] - fn test_rwlock_readers_and_readers() { - test_rwlock_handshake(Arc::new(RWLock::new()), Read, Read, false); - // The downgrader needs to get in before the reader gets in, otherwise - // they cannot end up reading at the same time. - test_rwlock_handshake(Arc::new(RWLock::new()), DowngradeRead, Read, false); - test_rwlock_handshake(Arc::new(RWLock::new()), Read, DowngradeRead, true); - // Two downgrade_reads can never both end up reading at the same time. - } - - #[test] - fn test_rwlock_downgrade_unlock() { - // Tests that downgrade can unlock the lock in both modes - let x = Arc::new(RWLock::new()); - lock_rwlock_in_mode(&x, Downgrade, || { }); - test_rwlock_handshake(x, Read, Read, false); - let y = Arc::new(RWLock::new()); - lock_rwlock_in_mode(&y, DowngradeRead, || { }); - test_rwlock_exclusion(y, Write, Write); - } - - #[test] - fn test_rwlock_read_recursive() { - let x = RWLock::new(); - let _g1 = x.read(); - let _g2 = x.read(); - } - - #[test] - fn test_rwlock_cond_wait() { - // As test_mutex_cond_wait above. - let x = Arc::new(RWLock::new()); - - // Child wakes up parent - { - let lock = x.write(); - let x2 = x.clone(); - task::spawn(proc() { - let lock = x2.write(); - assert!(lock.cond.signal()); - }); - lock.cond.wait(); - } - // Parent wakes up child - let (tx, rx) = channel(); - let x3 = x.clone(); - task::spawn(proc() { - let lock = x3.write(); - tx.send(()); - lock.cond.wait(); - tx.send(()); - }); - rx.recv(); // Wait until child gets in the rwlock - drop(x.read()); // Must be able to get in as a reader - { - let x = x.write(); - assert!(x.cond.signal()); - } - rx.recv(); // Wait until child wakes up - drop(x.read()); // Just for good measure - } - - #[cfg(test)] - fn test_rwlock_cond_broadcast_helper(num_waiters: uint) { - // Much like the mutex broadcast test. Downgrade-enabled. - fn lock_cond(x: &Arc, blk: |c: &Condvar|) { - let lock = x.write(); - blk(&lock.cond); - } - - let x = Arc::new(RWLock::new()); - let mut rxs = Vec::new(); - - for _ in range(0u, num_waiters) { - let xi = x.clone(); - let (tx, rx) = channel(); - rxs.push(rx); - task::spawn(proc() { - lock_cond(&xi, |cond| { - tx.send(()); - cond.wait(); - tx.send(()); - }) - }); - } - - // wait until all children get in the mutex - for rx in rxs.iter_mut() { let _ = rx.recv(); } - lock_cond(&x, |cond| { - let num_woken = cond.broadcast(); - assert_eq!(num_woken, num_waiters); - }); - // wait until all children wake up - for rx in rxs.iter_mut() { let _ = rx.recv(); } - } - - #[test] - fn test_rwlock_cond_broadcast() { - test_rwlock_cond_broadcast_helper(0); - test_rwlock_cond_broadcast_helper(12); - } - - #[cfg(test)] - fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) { - use any::Any; - - // Mutex must get automatically unlocked if panicked/killed within. - let x = Arc::new(RWLock::new()); - let x2 = x.clone(); - - let result: result::Result<(), Box> = task::try(proc() { - lock_rwlock_in_mode(&x2, mode1, || { - panic!(); - }) - }); - assert!(result.is_err()); - // child task must have finished by the time try returns - lock_rwlock_in_mode(&x, mode2, || { }) - } - - #[test] - fn test_rwlock_reader_killed_writer() { - rwlock_kill_helper(Read, Write); - } - - #[test] - fn test_rwlock_writer_killed_reader() { - rwlock_kill_helper(Write, Read); - } - - #[test] - fn test_rwlock_reader_killed_reader() { - rwlock_kill_helper(Read, Read); - } - - #[test] - fn test_rwlock_writer_killed_writer() { - rwlock_kill_helper(Write, Write); - } - - #[test] - fn test_rwlock_kill_downgrader() { - rwlock_kill_helper(Downgrade, Read); - rwlock_kill_helper(Read, Downgrade); - rwlock_kill_helper(Downgrade, Write); - rwlock_kill_helper(Write, Downgrade); - rwlock_kill_helper(DowngradeRead, Read); - rwlock_kill_helper(Read, DowngradeRead); - rwlock_kill_helper(DowngradeRead, Write); - rwlock_kill_helper(Write, DowngradeRead); - rwlock_kill_helper(DowngradeRead, Downgrade); - rwlock_kill_helper(DowngradeRead, Downgrade); - rwlock_kill_helper(Downgrade, DowngradeRead); - rwlock_kill_helper(Downgrade, DowngradeRead); - } -} diff --git a/src/libstd/sync/rwlock.rs b/src/libstd/sync/rwlock.rs new file mode 100644 index 00000000000..a4f8b1df6af --- /dev/null +++ b/src/libstd/sync/rwlock.rs @@ -0,0 +1,514 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use prelude::*; + +use kinds::marker; +use cell::UnsafeCell; +use sys_common::rwlock as sys; +use sync::poison; + +/// A reader-writer lock +/// +/// This type of lock allows a number of readers or at most one writer at any +/// point in time. The write portion of this lock typically allows modification +/// of the underlying data (exclusive access) and the read portion of this lock +/// typically allows for read-only access (shared access). +/// +/// The type parameter `T` represents the data that this lock protects. It is +/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to +/// allow concurrent access through readers. The RAII guards returned from the +/// locking methods implement `Deref` (and `DerefMut` for the `write` methods) +/// to allow access to the contained of the lock. +/// +/// RWLocks, like Mutexes, will become poisoned on panics. Note, however, that +/// an RWLock may only be poisoned if a panic occurs while it is locked +/// exclusively (write mode). If a panic occurs in any reader, then the lock +/// will not be poisoned. +/// +/// # Example +/// +/// ``` +/// use std::sync::RWLock; +/// +/// let lock = RWLock::new(5i); +/// +/// // many reader locks can be held at once +/// { +/// let r1 = lock.read(); +/// let r2 = lock.read(); +/// assert_eq!(*r1, 5); +/// assert_eq!(*r2, 5); +/// } // read locks are dropped at this point +/// +/// // only one write lock may be held, however +/// { +/// let mut w = lock.write(); +/// *w += 1; +/// assert_eq!(*w, 6); +/// } // write lock is dropped here +/// ``` +pub struct RWLock { + inner: Box, + data: UnsafeCell, +} + +/// Structure representing a staticaly allocated RWLock. +/// +/// This structure is intended to be used inside of a `static` and will provide +/// automatic global access as well as lazy initialization. The internal +/// resources of this RWLock, however, must be manually deallocated. +/// +/// # Example +/// +/// ``` +/// use std::sync::{StaticRWLock, RWLOCK_INIT}; +/// +/// static LOCK: StaticRWLock = RWLOCK_INIT; +/// +/// { +/// let _g = LOCK.read(); +/// // ... shared read access +/// } +/// { +/// let _g = LOCK.write(); +/// // ... exclusive write access +/// } +/// unsafe { LOCK.destroy() } // free all resources +/// ``` +pub struct StaticRWLock { + inner: sys::RWLock, + poison: UnsafeCell, +} + +/// Constant initialization for a statically-initialized rwlock. +pub const RWLOCK_INIT: StaticRWLock = StaticRWLock { + inner: sys::RWLOCK_INIT, + poison: UnsafeCell { value: poison::Flag { failed: false } }, +}; + +/// RAII structure used to release the shared read access of a lock when +/// dropped. +#[must_use] +pub struct RWLockReadGuard<'a, T: 'a> { + __lock: &'a RWLock, + __guard: StaticRWLockReadGuard, +} + +/// RAII structure used to release the exclusive write access of a lock when +/// dropped. +#[must_use] +pub struct RWLockWriteGuard<'a, T: 'a> { + __lock: &'a RWLock, + __guard: StaticRWLockWriteGuard, +} + +/// RAII structure used to release the shared read access of a lock when +/// dropped. +#[must_use] +pub struct StaticRWLockReadGuard { + lock: &'static sys::RWLock, + marker: marker::NoSend, +} + +/// RAII structure used to release the exclusive write access of a lock when +/// dropped. +#[must_use] +pub struct StaticRWLockWriteGuard { + lock: &'static sys::RWLock, + marker: marker::NoSend, + poison: poison::Guard<'static>, +} + +impl RWLock { + /// Creates a new instance of an RWLock which is unlocked and read to go. + pub fn new(t: T) -> RWLock { + RWLock { inner: box RWLOCK_INIT, data: UnsafeCell::new(t) } + } + + /// Locks this rwlock with shared read access, blocking the current thread + /// until it can be acquired. + /// + /// The calling thread will be blocked until there are no more writers which + /// hold the lock. There may be other readers currently inside the lock when + /// this method returns. This method does not provide any guarantees with + /// respect to the ordering of whether contentious readers or writers will + /// acquire the lock first. + /// + /// Returns an RAII guard which will release this thread's shared access + /// once it is dropped. + /// + /// # Panics + /// + /// This function will panic if the RWLock is poisoned. An RWLock is + /// poisoned whenever a writer panics while holding an exclusive lock. The + /// panic will occur immediately after the lock has been acquired. + #[inline] + pub fn read(&self) -> RWLockReadGuard { + unsafe { + let lock: &'static StaticRWLock = &*(&*self.inner as *const _); + RWLockReadGuard::new(self, lock.read()) + } + } + + /// Attempt to acquire this lock with shared read access. + /// + /// This function will never block and will return immediately if `read` + /// would otherwise succeed. Returns `Some` of an RAII guard which will + /// release the shared access of this thread when dropped, or `None` if the + /// access could not be granted. This method does not provide any + /// guarantees with respect to the ordering of whether contentious readers + /// or writers will acquire the lock first. + /// + /// # Panics + /// + /// This function will panic if the RWLock is poisoned. An RWLock is + /// poisoned whenever a writer panics while holding an exclusive lock. A + /// panic will only occur if the lock is acquired. + #[inline] + pub fn try_read(&self) -> Option> { + unsafe { + let lock: &'static StaticRWLock = &*(&*self.inner as *const _); + lock.try_read().map(|guard| { + RWLockReadGuard::new(self, guard) + }) + } + } + + /// Lock this rwlock with exclusive write access, blocking the current + /// thread until it can be acquired. + /// + /// This function will not return while other writers or other readers + /// currently have access to the lock. + /// + /// Returns an RAII guard which will drop the write access of this rwlock + /// when dropped. + /// + /// # Panics + /// + /// This function will panic if the RWLock is poisoned. An RWLock is + /// poisoned whenever a writer panics while holding an exclusive lock. The + /// panic will occur when the lock is acquired. + #[inline] + pub fn write(&self) -> RWLockWriteGuard { + unsafe { + let lock: &'static StaticRWLock = &*(&*self.inner as *const _); + RWLockWriteGuard::new(self, lock.write()) + } + } + + /// Attempt to lock this rwlock with exclusive write access. + /// + /// This function does not ever block, and it will return `None` if a call + /// to `write` would otherwise block. If successful, an RAII guard is + /// returned. + /// + /// # Panics + /// + /// This function will panic if the RWLock is poisoned. An RWLock is + /// poisoned whenever a writer panics while holding an exclusive lock. A + /// panic will only occur if the lock is acquired. + #[inline] + pub fn try_write(&self) -> Option> { + unsafe { + let lock: &'static StaticRWLock = &*(&*self.inner as *const _); + lock.try_write().map(|guard| { + RWLockWriteGuard::new(self, guard) + }) + } + } +} + +#[unsafe_destructor] +impl Drop for RWLock { + fn drop(&mut self) { + unsafe { self.inner.inner.destroy() } + } +} + +impl StaticRWLock { + /// Locks this rwlock with shared read access, blocking the current thread + /// until it can be acquired. + /// + /// See `RWLock::read`. + #[inline] + pub fn read(&'static self) -> StaticRWLockReadGuard { + unsafe { self.inner.read() } + StaticRWLockReadGuard::new(self) + } + + /// Attempt to acquire this lock with shared read access. + /// + /// See `RWLock::try_read`. + #[inline] + pub fn try_read(&'static self) -> Option { + if unsafe { self.inner.try_read() } { + Some(StaticRWLockReadGuard::new(self)) + } else { + None + } + } + + /// Lock this rwlock with exclusive write access, blocking the current + /// thread until it can be acquired. + /// + /// See `RWLock::write`. + #[inline] + pub fn write(&'static self) -> StaticRWLockWriteGuard { + unsafe { self.inner.write() } + StaticRWLockWriteGuard::new(self) + } + + /// Attempt to lock this rwlock with exclusive write access. + /// + /// See `RWLock::try_write`. + #[inline] + pub fn try_write(&'static self) -> Option { + if unsafe { self.inner.try_write() } { + Some(StaticRWLockWriteGuard::new(self)) + } else { + None + } + } + + /// Deallocate all resources associated with this static lock. + /// + /// This method is unsafe to call as there is no guarantee that there are no + /// active users of the lock, and this also doesn't prevent any future users + /// of this lock. This method is required to be called to not leak memory on + /// all platforms. + pub unsafe fn destroy(&'static self) { + self.inner.destroy() + } +} + +impl<'rwlock, T> RWLockReadGuard<'rwlock, T> { + fn new(lock: &RWLock, guard: StaticRWLockReadGuard) + -> RWLockReadGuard { + RWLockReadGuard { __lock: lock, __guard: guard } + } +} +impl<'rwlock, T> RWLockWriteGuard<'rwlock, T> { + fn new(lock: &RWLock, guard: StaticRWLockWriteGuard) + -> RWLockWriteGuard { + RWLockWriteGuard { __lock: lock, __guard: guard } + } +} + +impl<'rwlock, T> Deref for RWLockReadGuard<'rwlock, T> { + fn deref(&self) -> &T { unsafe { &*self.__lock.data.get() } } +} +impl<'rwlock, T> Deref for RWLockWriteGuard<'rwlock, T> { + fn deref(&self) -> &T { unsafe { &*self.__lock.data.get() } } +} +impl<'rwlock, T> DerefMut for RWLockWriteGuard<'rwlock, T> { + fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.__lock.data.get() } } +} + +impl StaticRWLockReadGuard { + fn new(lock: &'static StaticRWLock) -> StaticRWLockReadGuard { + let guard = StaticRWLockReadGuard { + lock: &lock.inner, + marker: marker::NoSend, + }; + unsafe { (*lock.poison.get()).borrow().check("rwlock"); } + return guard; + } +} +impl StaticRWLockWriteGuard { + fn new(lock: &'static StaticRWLock) -> StaticRWLockWriteGuard { + unsafe { + let guard = StaticRWLockWriteGuard { + lock: &lock.inner, + marker: marker::NoSend, + poison: (*lock.poison.get()).borrow(), + }; + guard.poison.check("rwlock"); + return guard; + } + } +} + +#[unsafe_destructor] +impl Drop for StaticRWLockReadGuard { + fn drop(&mut self) { + unsafe { self.lock.read_unlock(); } + } +} + +#[unsafe_destructor] +impl Drop for StaticRWLockWriteGuard { + fn drop(&mut self) { + self.poison.done(); + unsafe { self.lock.write_unlock(); } + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + + use rand::{mod, Rng}; + use task; + use sync::{Arc, RWLock, StaticRWLock, RWLOCK_INIT}; + + #[test] + fn smoke() { + let l = RWLock::new(()); + drop(l.read()); + drop(l.write()); + drop((l.read(), l.read())); + drop(l.write()); + } + + #[test] + fn static_smoke() { + static R: StaticRWLock = RWLOCK_INIT; + drop(R.read()); + drop(R.write()); + drop((R.read(), R.read())); + drop(R.write()); + unsafe { R.destroy(); } + } + + #[test] + fn frob() { + static R: StaticRWLock = RWLOCK_INIT; + static N: uint = 10; + static M: uint = 1000; + + let (tx, rx) = channel::<()>(); + for _ in range(0, N) { + let tx = tx.clone(); + spawn(proc() { + let mut rng = rand::task_rng(); + for _ in range(0, M) { + if rng.gen_weighted_bool(N) { + drop(R.write()); + } else { + drop(R.read()); + } + } + drop(tx); + }); + } + drop(tx); + let _ = rx.recv_opt(); + unsafe { R.destroy(); } + } + + #[test] + #[should_fail] + fn test_rw_arc_poison_wr() { + let arc = Arc::new(RWLock::new(1i)); + let arc2 = arc.clone(); + let _ = task::try(proc() { + let lock = arc2.write(); + assert_eq!(*lock, 2); + }); + let lock = arc.read(); + assert_eq!(*lock, 1); + } + + #[test] + #[should_fail] + fn test_rw_arc_poison_ww() { + let arc = Arc::new(RWLock::new(1i)); + let arc2 = arc.clone(); + let _ = task::try(proc() { + let lock = arc2.write(); + assert_eq!(*lock, 2); + }); + let lock = arc.write(); + assert_eq!(*lock, 1); + } + + #[test] + fn test_rw_arc_no_poison_rr() { + let arc = Arc::new(RWLock::new(1i)); + let arc2 = arc.clone(); + let _ = task::try(proc() { + let lock = arc2.read(); + assert_eq!(*lock, 2); + }); + let lock = arc.read(); + assert_eq!(*lock, 1); + } + #[test] + fn test_rw_arc_no_poison_rw() { + let arc = Arc::new(RWLock::new(1i)); + let arc2 = arc.clone(); + let _ = task::try(proc() { + let lock = arc2.read(); + assert_eq!(*lock, 2); + }); + let lock = arc.write(); + assert_eq!(*lock, 1); + } + + #[test] + fn test_rw_arc() { + let arc = Arc::new(RWLock::new(0i)); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + + task::spawn(proc() { + let mut lock = arc2.write(); + for _ in range(0u, 10) { + let tmp = *lock; + *lock = -1; + task::deschedule(); + *lock = tmp + 1; + } + tx.send(()); + }); + + // Readers try to catch the writer in the act + let mut children = Vec::new(); + for _ in range(0u, 5) { + let arc3 = arc.clone(); + children.push(task::try_future(proc() { + let lock = arc3.read(); + assert!(*lock >= 0); + })); + } + + // Wait for children to pass their asserts + for r in children.iter_mut() { + assert!(r.get_ref().is_ok()); + } + + // Wait for writer to finish + rx.recv(); + let lock = arc.read(); + assert_eq!(*lock, 10); + } + + #[test] + fn test_rw_arc_access_in_unwind() { + let arc = Arc::new(RWLock::new(1i)); + let arc2 = arc.clone(); + let _ = task::try::<()>(proc() { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + let mut lock = self.i.write(); + *lock += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }); + let lock = arc.read(); + assert_eq!(*lock, 2); + } +} diff --git a/src/libstd/sync/semaphore.rs b/src/libstd/sync/semaphore.rs new file mode 100644 index 00000000000..03fb84c38d4 --- /dev/null +++ b/src/libstd/sync/semaphore.rs @@ -0,0 +1,195 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ops::Drop; +use sync::{Mutex, Condvar}; + +/// A counting, blocking, semaphore. +/// +/// Semaphores are a form of atomic counter where access is only granted if the +/// counter is a positive value. Each acquisition will block the calling thread +/// until the counter is positive, and each release will increment the counter +/// and unblock any threads if necessary. +/// +/// # Example +/// +/// ``` +/// use std::sync::Semaphore; +/// +/// // Create a semaphore that represents 5 resources +/// let sem = Semaphore::new(5); +/// +/// // Acquire one of the resources +/// sem.acquire(); +/// +/// // Acquire one of the resources for a limited period of time +/// { +/// let _guard = sem.access(); +/// // ... +/// } // resources is released here +/// +/// // Release our initially acquired resource +/// sem.release(); +/// ``` +pub struct Semaphore { + lock: Mutex, + cvar: Condvar, +} + +/// An RAII guard which will release a resource acquired from a semaphore when +/// dropped. +pub struct SemaphoreGuard<'a> { + sem: &'a Semaphore, +} + +impl Semaphore { + /// Creates a new semaphore with the initial count specified. + /// + /// The count specified can be thought of as a number of resources, and a + /// call to `acquire` or `access` will block until at least one resource is + /// available. It is valid to initialize a semaphore with a negative count. + pub fn new(count: int) -> Semaphore { + Semaphore { + lock: Mutex::new(count), + cvar: Condvar::new(), + } + } + + /// Acquires a resource of this semaphore, blocking the current thread until + /// it can do so. + /// + /// This method will block until the internal count of the semaphore is at + /// least 1. + pub fn acquire(&self) { + let mut count = self.lock.lock(); + while *count <= 0 { + self.cvar.wait(&count); + } + *count -= 1; + } + + /// Release a resource from this semaphore. + /// + /// This will increment the number of resources in this semaphore by 1 and + /// will notify any pending waiters in `acquire` or `access` if necessary. + pub fn release(&self) { + *self.lock.lock() += 1; + self.cvar.notify_one(); + } + + /// Acquires a resource of this semaphore, returning an RAII guard to + /// release the semaphore when dropped. + /// + /// This function is semantically equivalent to an `acquire` followed by a + /// `release` when the guard returned is dropped. + pub fn access(&self) -> SemaphoreGuard { + self.acquire(); + SemaphoreGuard { sem: self } + } +} + +#[unsafe_destructor] +impl<'a> Drop for SemaphoreGuard<'a> { + fn drop(&mut self) { + self.sem.release(); + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + + use sync::Arc; + use super::Semaphore; + + #[test] + fn test_sem_acquire_release() { + let s = Semaphore::new(1); + s.acquire(); + s.release(); + s.acquire(); + } + + #[test] + fn test_sem_basic() { + let s = Semaphore::new(1); + let _g = s.access(); + } + + #[test] + fn test_sem_as_mutex() { + let s = Arc::new(Semaphore::new(1)); + let s2 = s.clone(); + spawn(proc() { + let _g = s2.access(); + }); + let _g = s.access(); + } + + #[test] + fn test_sem_as_cvar() { + /* Child waits and parent signals */ + let (tx, rx) = channel(); + let s = Arc::new(Semaphore::new(0)); + let s2 = s.clone(); + spawn(proc() { + s2.acquire(); + tx.send(()); + }); + s.release(); + let _ = rx.recv(); + + /* Parent waits and child signals */ + let (tx, rx) = channel(); + let s = Arc::new(Semaphore::new(0)); + let s2 = s.clone(); + spawn(proc() { + s2.release(); + let _ = rx.recv(); + }); + s.acquire(); + tx.send(()); + } + + #[test] + fn test_sem_multi_resource() { + // Parent and child both get in the critical section at the same + // time, and shake hands. + let s = Arc::new(Semaphore::new(2)); + let s2 = s.clone(); + let (tx1, rx1) = channel(); + let (tx2, rx2) = channel(); + spawn(proc() { + let _g = s2.access(); + let _ = rx2.recv(); + tx1.send(()); + }); + let _g = s.access(); + tx2.send(()); + let _ = rx1.recv(); + } + + #[test] + fn test_sem_runtime_friendly_blocking() { + let s = Arc::new(Semaphore::new(1)); + let s2 = s.clone(); + let (tx, rx) = channel(); + { + let _g = s.access(); + spawn(proc() { + tx.send(()); + drop(s2.access()); + tx.send(()); + }); + rx.recv(); // wait for child to come alive + } + rx.recv(); // wait for child to be done + } +} diff --git a/src/libstd/sys/common/condvar.rs b/src/libstd/sys/common/condvar.rs new file mode 100644 index 00000000000..e09d9704029 --- /dev/null +++ b/src/libstd/sys/common/condvar.rs @@ -0,0 +1,67 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use time::Duration; +use sys_common::mutex::{mod, Mutex}; +use sys::condvar as imp; + +/// An OS-based condition variable. +/// +/// This structure is the lowest layer possible on top of the OS-provided +/// condition variables. It is consequently entirely unsafe to use. It is +/// recommended to use the safer types at the top level of this crate instead of +/// this type. +pub struct Condvar(imp::Condvar); + +/// Static initializer for condition variables. +pub const CONDVAR_INIT: Condvar = Condvar(imp::CONDVAR_INIT); + +impl Condvar { + /// Creates a new condition variable for use. + /// + /// Behavior is undefined if the condition variable is moved after it is + /// first used with any of the functions below. + #[inline] + pub unsafe fn new() -> Condvar { Condvar(imp::Condvar::new()) } + + /// Signal one waiter on this condition variable to wake up. + #[inline] + pub unsafe fn notify_one(&self) { self.0.notify_one() } + + /// Awaken all current waiters on this condition variable. + #[inline] + pub unsafe fn notify_all(&self) { self.0.notify_all() } + + /// Wait for a signal on the specified mutex. + /// + /// Behavior is undefined if the mutex is not locked by the current thread. + /// Behavior is also undefined if more than one mutex is used concurrently + /// on this condition variable. + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { self.0.wait(mutex::raw(mutex)) } + + /// Wait for a signal on the specified mutex with a timeout duration + /// specified by `dur` (a relative time into the future). + /// + /// Behavior is undefined if the mutex is not locked by the current thread. + /// Behavior is also undefined if more than one mutex is used concurrently + /// on this condition variable. + #[inline] + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + self.0.wait_timeout(mutex::raw(mutex), dur) + } + + /// Deallocate all resources associated with this condition variable. + /// + /// Behavior is undefined if there are current or will be future users of + /// this condition variable. + #[inline] + pub unsafe fn destroy(&self) { self.0.destroy() } +} diff --git a/src/libstd/sys/common/mod.rs b/src/libstd/sys/common/mod.rs index e382ec261a0..f8861c20464 100644 --- a/src/libstd/sys/common/mod.rs +++ b/src/libstd/sys/common/mod.rs @@ -19,8 +19,11 @@ use num::Int; use path::BytesContainer; use collections; -pub mod net; +pub mod condvar; pub mod helper_thread; +pub mod mutex; +pub mod net; +pub mod rwlock; pub mod thread_local; // common error constructors diff --git a/src/libstd/sys/common/mutex.rs b/src/libstd/sys/common/mutex.rs new file mode 100644 index 00000000000..117d33db328 --- /dev/null +++ b/src/libstd/sys/common/mutex.rs @@ -0,0 +1,64 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use sys::mutex::raw; + +use sys::mutex as imp; + +/// An OS-based mutual exclusion lock. +/// +/// This is the thinnest cross-platform wrapper around OS mutexes. All usage of +/// this mutex is unsafe and it is recommended to instead use the safe wrapper +/// at the top level of the crate instead of this type. +pub struct Mutex(imp::Mutex); + +/// Constant initializer for statically allocated mutexes. +pub const MUTEX_INIT: Mutex = Mutex(imp::MUTEX_INIT); + +impl Mutex { + /// Creates a newly initialized mutex. + /// + /// Behavior is undefined if the mutex is moved after the first method is + /// called on the mutex. + #[inline] + pub unsafe fn new() -> Mutex { Mutex(imp::Mutex::new()) } + + /// Lock the mutex blocking the current thread until it is available. + /// + /// Behavior is undefined if the mutex has been moved between this and any + /// previous function call. + #[inline] + pub unsafe fn lock(&self) { self.0.lock() } + + /// Attempt to lock the mutex without blocking, returning whether it was + /// successfully acquired or not. + /// + /// Behavior is undefined if the mutex has been moved between this and any + /// previous function call. + #[inline] + pub unsafe fn try_lock(&self) -> bool { self.0.try_lock() } + + /// Unlock the mutex. + /// + /// Behavior is undefined if the current thread does not actually hold the + /// mutex. + #[inline] + pub unsafe fn unlock(&self) { self.0.unlock() } + + /// Deallocate all resources associated with this mutex. + /// + /// Behavior is undefined if there are current or will be future users of + /// this mutex. + #[inline] + pub unsafe fn destroy(&self) { self.0.destroy() } +} + +// not meant to be exported to the outside world, just the containing module +pub fn raw(mutex: &Mutex) -> &imp::Mutex { &mutex.0 } diff --git a/src/libstd/sys/common/rwlock.rs b/src/libstd/sys/common/rwlock.rs new file mode 100644 index 00000000000..df016b9e293 --- /dev/null +++ b/src/libstd/sys/common/rwlock.rs @@ -0,0 +1,86 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use sys::rwlock as imp; + +/// An OS-based reader-writer lock. +/// +/// This structure is entirely unsafe and serves as the lowest layer of a +/// cross-platform binding of system rwlocks. It is recommended to use the +/// safer types at the top level of this crate instead of this type. +pub struct RWLock(imp::RWLock); + +/// Constant initializer for static RWLocks. +pub const RWLOCK_INIT: RWLock = RWLock(imp::RWLOCK_INIT); + +impl RWLock { + /// Creates a new instance of an RWLock. + /// + /// Usage of an RWLock is undefined if it is moved after its first use (any + /// function calls below). + #[inline] + pub unsafe fn new() -> RWLock { RWLock(imp::RWLock::new()) } + + /// Acquire shared access to the underlying lock, blocking the current + /// thread to do so. + /// + /// Behavior is undefined if the rwlock has been moved between this and any + /// previous methodo call. + #[inline] + pub unsafe fn read(&self) { self.0.read() } + + /// Attempt to acquire shared access to this lock, returning whether it + /// succeeded or not. + /// + /// This function does not block the current thread. + /// + /// Behavior is undefined if the rwlock has been moved between this and any + /// previous methodo call. + #[inline] + pub unsafe fn try_read(&self) -> bool { self.0.try_read() } + + /// Acquire write access to the underlying lock, blocking the current thread + /// to do so. + /// + /// Behavior is undefined if the rwlock has been moved between this and any + /// previous methodo call. + #[inline] + pub unsafe fn write(&self) { self.0.write() } + + /// Attempt to acquire exclusive access to this lock, returning whether it + /// succeeded or not. + /// + /// This function does not block the current thread. + /// + /// Behavior is undefined if the rwlock has been moved between this and any + /// previous methodo call. + #[inline] + pub unsafe fn try_write(&self) -> bool { self.0.try_write() } + + /// Unlock previously acquired shared access to this lock. + /// + /// Behavior is undefined if the current thread does not have shared access. + #[inline] + pub unsafe fn read_unlock(&self) { self.0.read_unlock() } + + /// Unlock previously acquired exclusive access to this lock. + /// + /// Behavior is undefined if the current thread does not currently have + /// exclusive access. + #[inline] + pub unsafe fn write_unlock(&self) { self.0.write_unlock() } + + /// Destroy OS-related resources with this RWLock. + /// + /// Behavior is undefined if there are any currently active users of this + /// lock. + #[inline] + pub unsafe fn destroy(&self) { self.0.destroy() } +} diff --git a/src/libstd/sys/unix/condvar.rs b/src/libstd/sys/unix/condvar.rs new file mode 100644 index 00000000000..f64718539ef --- /dev/null +++ b/src/libstd/sys/unix/condvar.rs @@ -0,0 +1,83 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use libc; +use sys::mutex::{mod, Mutex}; +use sys::sync as ffi; +use time::Duration; + +pub struct Condvar { inner: UnsafeCell } + +pub const CONDVAR_INIT: Condvar = Condvar { + inner: UnsafeCell { value: ffi::PTHREAD_COND_INITIALIZER }, +}; + +impl Condvar { + #[inline] + pub unsafe fn new() -> Condvar { + // Might be moved and address is changing it is better to avoid + // initialization of potentially opaque OS data before it landed + Condvar { inner: UnsafeCell::new(ffi::PTHREAD_COND_INITIALIZER) } + } + + #[inline] + pub unsafe fn notify_one(&self) { + let r = ffi::pthread_cond_signal(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn notify_all(&self) { + let r = ffi::pthread_cond_broadcast(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let r = ffi::pthread_cond_wait(self.inner.get(), mutex::raw(mutex)); + debug_assert_eq!(r, 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + assert!(dur >= Duration::nanoseconds(0)); + + // First, figure out what time it currently is + let mut tv = libc::timeval { tv_sec: 0, tv_usec: 0 }; + let r = ffi::gettimeofday(&mut tv, 0 as *mut _); + debug_assert_eq!(r, 0); + + // Offset that time with the specified duration + let abs = Duration::seconds(tv.tv_sec as i64) + + Duration::microseconds(tv.tv_usec as i64) + + dur; + let ns = abs.num_nanoseconds().unwrap() as u64; + let timeout = libc::timespec { + tv_sec: (ns / 1000000000) as libc::time_t, + tv_nsec: (ns % 1000000000) as libc::c_long, + }; + + // And wait! + let r = ffi::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), + &timeout); + if r != 0 { + debug_assert_eq!(r as int, libc::ETIMEDOUT as int); + false + } else { + true + } + } + + #[inline] + pub unsafe fn destroy(&self) { + let r = ffi::pthread_cond_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } +} diff --git a/src/libstd/sys/unix/mod.rs b/src/libstd/sys/unix/mod.rs index af238905119..7b37fb3fb0f 100644 --- a/src/libstd/sys/unix/mod.rs +++ b/src/libstd/sys/unix/mod.rs @@ -34,14 +34,18 @@ macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => ( pub mod c; pub mod ext; +pub mod condvar; pub mod fs; pub mod helper_signal; +pub mod mutex; pub mod os; pub mod pipe; pub mod process; +pub mod rwlock; +pub mod sync; pub mod tcp; -pub mod timer; pub mod thread_local; +pub mod timer; pub mod tty; pub mod udp; diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs new file mode 100644 index 00000000000..2f01c53cb2c --- /dev/null +++ b/src/libstd/sys/unix/mutex.rs @@ -0,0 +1,52 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use sys::sync as ffi; +use sys_common::mutex; + +pub struct Mutex { inner: UnsafeCell } + +#[inline] +pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t { + m.inner.get() +} + +pub const MUTEX_INIT: Mutex = Mutex { + inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER }, +}; + +impl Mutex { + #[inline] + pub unsafe fn new() -> Mutex { + // Might be moved and address is changing it is better to avoid + // initialization of potentially opaque OS data before it landed + MUTEX_INIT + } + #[inline] + pub unsafe fn lock(&self) { + let r = ffi::pthread_mutex_lock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn unlock(&self) { + let r = ffi::pthread_mutex_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn try_lock(&self) -> bool { + ffi::pthread_mutex_trylock(self.inner.get()) == 0 + } + #[inline] + pub unsafe fn destroy(&self) { + let r = ffi::pthread_mutex_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } +} diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs new file mode 100644 index 00000000000..0d63ff14ff2 --- /dev/null +++ b/src/libstd/sys/unix/rwlock.rs @@ -0,0 +1,57 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use sys::sync as ffi; + +pub struct RWLock { inner: UnsafeCell } + +pub const RWLOCK_INIT: RWLock = RWLock { + inner: UnsafeCell { value: ffi::PTHREAD_RWLOCK_INITIALIZER }, +}; + +impl RWLock { + #[inline] + pub unsafe fn new() -> RWLock { + // Might be moved and address is changing it is better to avoid + // initialization of potentially opaque OS data before it landed + RWLOCK_INIT + } + #[inline] + pub unsafe fn read(&self) { + let r = ffi::pthread_rwlock_rdlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn try_read(&self) -> bool { + ffi::pthread_rwlock_tryrdlock(self.inner.get()) == 0 + } + #[inline] + pub unsafe fn write(&self) { + let r = ffi::pthread_rwlock_wrlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn try_write(&self) -> bool { + ffi::pthread_rwlock_trywrlock(self.inner.get()) == 0 + } + #[inline] + pub unsafe fn read_unlock(&self) { + let r = ffi::pthread_rwlock_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn write_unlock(&self) { self.read_unlock() } + #[inline] + pub unsafe fn destroy(&self) { + let r = ffi::pthread_rwlock_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } +} diff --git a/src/libstd/sys/unix/sync.rs b/src/libstd/sys/unix/sync.rs new file mode 100644 index 00000000000..007826b4b9d --- /dev/null +++ b/src/libstd/sys/unix/sync.rs @@ -0,0 +1,208 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(bad_style)] + +use libc; + +pub use self::os::{PTHREAD_MUTEX_INITIALIZER, pthread_mutex_t}; +pub use self::os::{PTHREAD_COND_INITIALIZER, pthread_cond_t}; +pub use self::os::{PTHREAD_RWLOCK_INITIALIZER, pthread_rwlock_t}; + +extern { + // mutexes + pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> libc::c_int; + pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int; + pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> libc::c_int; + pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> libc::c_int; + + // cvars + pub fn pthread_cond_wait(cond: *mut pthread_cond_t, + lock: *mut pthread_mutex_t) -> libc::c_int; + pub fn pthread_cond_timedwait(cond: *mut pthread_cond_t, + lock: *mut pthread_mutex_t, + abstime: *const libc::timespec) -> libc::c_int; + pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> libc::c_int; + pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> libc::c_int; + pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> libc::c_int; + pub fn gettimeofday(tp: *mut libc::timeval, + tz: *mut libc::c_void) -> libc::c_int; + + // rwlocks + pub fn pthread_rwlock_destroy(lock: *mut pthread_rwlock_t) -> libc::c_int; + pub fn pthread_rwlock_rdlock(lock: *mut pthread_rwlock_t) -> libc::c_int; + pub fn pthread_rwlock_tryrdlock(lock: *mut pthread_rwlock_t) -> libc::c_int; + pub fn pthread_rwlock_wrlock(lock: *mut pthread_rwlock_t) -> libc::c_int; + pub fn pthread_rwlock_trywrlock(lock: *mut pthread_rwlock_t) -> libc::c_int; + pub fn pthread_rwlock_unlock(lock: *mut pthread_rwlock_t) -> libc::c_int; +} + +#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] +mod os { + use libc; + + pub type pthread_mutex_t = *mut libc::c_void; + pub type pthread_cond_t = *mut libc::c_void; + pub type pthread_rwlock_t = *mut libc::c_void; + + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _; +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +mod os { + use libc; + + #[cfg(target_arch = "x86_64")] + const __PTHREAD_MUTEX_SIZE__: uint = 56; + #[cfg(any(target_arch = "x86", + target_arch = "arm"))] + const __PTHREAD_MUTEX_SIZE__: uint = 40; + + #[cfg(target_arch = "x86_64")] + const __PTHREAD_COND_SIZE__: uint = 40; + #[cfg(any(target_arch = "x86", + target_arch = "arm"))] + const __PTHREAD_COND_SIZE__: uint = 24; + + #[cfg(target_arch = "x86_64")] + const __PTHREAD_RWLOCK_SIZE__: uint = 192; + #[cfg(any(target_arch = "x86", + target_arch = "arm"))] + const __PTHREAD_RWLOCK_SIZE__: uint = 124; + + const _PTHREAD_MUTEX_SIG_INIT: libc::c_long = 0x32AAABA7; + const _PTHREAD_COND_SIG_INIT: libc::c_long = 0x3CB0B1BB; + const _PTHREAD_RWLOCK_SIG_INIT: libc::c_long = 0x2DA8B3B4; + + #[repr(C)] + pub struct pthread_mutex_t { + __sig: libc::c_long, + __opaque: [u8, ..__PTHREAD_MUTEX_SIZE__], + } + #[repr(C)] + pub struct pthread_cond_t { + __sig: libc::c_long, + __opaque: [u8, ..__PTHREAD_COND_SIZE__], + } + #[repr(C)] + pub struct pthread_rwlock_t { + __sig: libc::c_long, + __opaque: [u8, ..__PTHREAD_RWLOCK_SIZE__], + } + + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __sig: _PTHREAD_MUTEX_SIG_INIT, + __opaque: [0, ..__PTHREAD_MUTEX_SIZE__], + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __sig: _PTHREAD_COND_SIG_INIT, + __opaque: [0, ..__PTHREAD_COND_SIZE__], + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + __sig: _PTHREAD_RWLOCK_SIG_INIT, + __opaque: [0, ..__PTHREAD_RWLOCK_SIZE__], + }; +} + +#[cfg(target_os = "linux")] +mod os { + use libc; + + // minus 8 because we have an 'align' field + #[cfg(target_arch = "x86_64")] + const __SIZEOF_PTHREAD_MUTEX_T: uint = 40 - 8; + #[cfg(any(target_arch = "x86", + target_arch = "arm", + target_arch = "mips", + target_arch = "mipsel"))] + const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8; + + #[cfg(any(target_arch = "x86_64", + target_arch = "x86", + target_arch = "arm", + target_arch = "mips", + target_arch = "mipsel"))] + const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8; + + #[cfg(target_arch = "x86_64")] + const __SIZEOF_PTHREAD_RWLOCK_T: uint = 56 - 8; + + #[cfg(any(target_arch = "x86", + target_arch = "arm", + target_arch = "mips", + target_arch = "mipsel"))] + const __SIZEOF_PTHREAD_RWLOCK_T: uint = 32 - 8; + + #[repr(C)] + pub struct pthread_mutex_t { + __align: libc::c_longlong, + size: [u8, ..__SIZEOF_PTHREAD_MUTEX_T], + } + #[repr(C)] + pub struct pthread_cond_t { + __align: libc::c_longlong, + size: [u8, ..__SIZEOF_PTHREAD_COND_T], + } + #[repr(C)] + pub struct pthread_rwlock_t { + __align: libc::c_longlong, + size: [u8, ..__SIZEOF_PTHREAD_RWLOCK_T], + } + + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __align: 0, + size: [0, ..__SIZEOF_PTHREAD_MUTEX_T], + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __align: 0, + size: [0, ..__SIZEOF_PTHREAD_COND_T], + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + __align: 0, + size: [0, ..__SIZEOF_PTHREAD_RWLOCK_T], + }; +} +#[cfg(target_os = "android")] +mod os { + use libc; + + #[repr(C)] + pub struct pthread_mutex_t { value: libc::c_int } + #[repr(C)] + pub struct pthread_cond_t { value: libc::c_int } + #[repr(C)] + pub struct pthread_rwlock_t { + lock: pthread_mutex_t, + cond: pthread_cond_t, + numLocks: libc::c_int, + writerThreadId: libc::c_int, + pendingReaders: libc::c_int, + pendingWriters: libc::c_int, + reserved: [*mut libc::c_void, ..4], + } + + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + value: 0, + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + value: 0, + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + lock: PTHREAD_MUTEX_INITIALIZER, + cond: PTHREAD_COND_INITIALIZER, + numLocks: 0, + writerThreadId: 0, + pendingReaders: 0, + pendingWriters: 0, + reserved: [0 as *mut _, ..4], + }; +} diff --git a/src/libstd/sys/windows/condvar.rs b/src/libstd/sys/windows/condvar.rs new file mode 100644 index 00000000000..3cabf3a6319 --- /dev/null +++ b/src/libstd/sys/windows/condvar.rs @@ -0,0 +1,63 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use libc::{mod, DWORD}; +use libc; +use os; +use sys::mutex::{mod, Mutex}; +use sys::sync as ffi; +use time::Duration; + +pub struct Condvar { inner: UnsafeCell } + +pub const CONDVAR_INIT: Condvar = Condvar { + inner: UnsafeCell { value: ffi::CONDITION_VARIABLE_INIT } +}; + +impl Condvar { + #[inline] + pub unsafe fn new() -> Condvar { CONDVAR_INIT } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let r = ffi::SleepConditionVariableCS(self.inner.get(), + mutex::raw(mutex), + libc::INFINITE); + debug_assert!(r != 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let r = ffi::SleepConditionVariableCS(self.inner.get(), + mutex::raw(mutex), + dur.num_milliseconds() as DWORD); + if r == 0 { + const ERROR_TIMEOUT: DWORD = 0x5B4; + debug_assert_eq!(os::errno() as uint, ERROR_TIMEOUT as uint); + false + } else { + true + } + } + + #[inline] + pub unsafe fn notify_one(&self) { + ffi::WakeConditionVariable(self.inner.get()) + } + + #[inline] + pub unsafe fn notify_all(&self) { + ffi::WakeAllConditionVariable(self.inner.get()) + } + + pub unsafe fn destroy(&self) { + // ... + } +} diff --git a/src/libstd/sys/windows/mod.rs b/src/libstd/sys/windows/mod.rs index 6b9555c52ce..e9243c5040c 100644 --- a/src/libstd/sys/windows/mod.rs +++ b/src/libstd/sys/windows/mod.rs @@ -35,11 +35,15 @@ macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => ( pub mod c; pub mod ext; +pub mod condvar; pub mod fs; pub mod helper_signal; +pub mod mutex; pub mod os; pub mod pipe; pub mod process; +pub mod rwlock; +pub mod sync; pub mod tcp; pub mod thread_local; pub mod timer; diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs new file mode 100644 index 00000000000..10ebcf4bd09 --- /dev/null +++ b/src/libstd/sys/windows/mutex.rs @@ -0,0 +1,76 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use sync::atomic; +use alloc::{mod, heap}; + +use libc::DWORD; +use sys::sync as ffi; + +const SPIN_COUNT: DWORD = 4000; + +pub struct Mutex { inner: atomic::AtomicUint } + +pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::INIT_ATOMIC_UINT }; + +#[inline] +pub unsafe fn raw(m: &super::Mutex) -> ffi::LPCRITICAL_SECTION { + m.0.get() +} + +impl Mutex { + #[inline] + pub unsafe fn new() -> Mutex { + Mutex { inner: atomic::AtomicUint::new(init_lock() as uint) } + } + #[inline] + pub unsafe fn lock(&self) { + ffi::EnterCriticalSection(self.get()) + } + #[inline] + pub unsafe fn try_lock(&self) -> bool { + ffi::TryEnterCriticalSection(self.get()) != 0 + } + #[inline] + pub unsafe fn unlock(&self) { + ffi::LeaveCriticalSection(self.get()) + } + pub unsafe fn destroy(&self) { + let lock = self.inner.swap(0, atomic::SeqCst); + if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) } + } + + unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION { + match self.inner.load(atomic::SeqCst) { + 0 => {} + n => return n as ffi::LPCRITICAL_SECTION + } + let lock = init_lock(); + match self.inner.compare_and_swap(0, lock as uint, atomic::SeqCst) { + 0 => return lock as ffi::LPCRITICAL_SECTION, + _ => {} + } + free_lock(lock); + return self.inner.load(atomic::SeqCst) as ffi::LPCRITICAL_SECTION; + } +} + +unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION { + let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8) + as ffi::LPCRITICAL_SECTION; + if block.is_null() { alloc::oom() } + ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT); + return block; +} + +unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) { + ffi::DeleteCriticalSection(h); + heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8); +} diff --git a/src/libstd/sys/windows/rwlock.rs b/src/libstd/sys/windows/rwlock.rs new file mode 100644 index 00000000000..88ce85c39f6 --- /dev/null +++ b/src/libstd/sys/windows/rwlock.rs @@ -0,0 +1,53 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cell::UnsafeCell; +use sys::sync as ffi; + +pub struct RWLock { inner: UnsafeCell } + +pub const RWLOCK_INIT: RWLock = RWLock { + inner: UnsafeCell { value: ffi::SRWLOCK_INIT } +}; + +impl RWLock { + #[inline] + pub unsafe fn new() -> RWLock { RWLOCK_INIT } + + #[inline] + pub unsafe fn read(&self) { + ffi::AcquireSRWLockShared(self.inner.get()) + } + #[inline] + pub unsafe fn try_read(&self) -> bool { + ffi::TryAcquireSRWLockShared(self.inner.get()) != 0 + } + #[inline] + pub unsafe fn write(&self) { + ffi::AcquireSRWLockExclusive(self.inner.get()) + } + #[inline] + pub unsafe fn try_write(&self) -> bool { + ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0 + } + #[inline] + pub unsafe fn read_unlock(&self) { + ffi::ReleaseSRWLockShared(self.inner.get()) + } + #[inline] + pub unsafe fn write_unlock(&self) { + ffi::ReleaseSRWLockExclusive(self.inner.get()) + } + + #[inline] + pub unsafe fn destroy(&self) { + // ... + } +} diff --git a/src/libstd/sys/windows/sync.rs b/src/libstd/sys/windows/sync.rs new file mode 100644 index 00000000000..cbca47912b5 --- /dev/null +++ b/src/libstd/sys/windows/sync.rs @@ -0,0 +1,58 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use libc::{BOOL, DWORD, c_void, LPVOID}; +use libc::types::os::arch::extra::BOOLEAN; + +pub type LPCRITICAL_SECTION = *mut c_void; +pub type LPCONDITION_VARIABLE = *mut CONDITION_VARIABLE; +pub type LPSRWLOCK = *mut SRWLOCK; + +#[cfg(target_arch = "x86")] +pub const CRITICAL_SECTION_SIZE: uint = 24; +#[cfg(target_arch = "x86_64")] +pub const CRITICAL_SECTION_SIZE: uint = 40; + +#[repr(C)] +pub struct CONDITION_VARIABLE { pub ptr: LPVOID } +#[repr(C)] +pub struct SRWLOCK { pub ptr: LPVOID } + +pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { + ptr: 0 as *mut _, +}; +pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ }; + +extern "system" { + // critical sections + pub fn InitializeCriticalSectionAndSpinCount( + lpCriticalSection: LPCRITICAL_SECTION, + dwSpinCount: DWORD) -> BOOL; + pub fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION); + pub fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION); + pub fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION); + pub fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL; + + // condition variables + pub fn SleepConditionVariableCS(ConditionVariable: LPCONDITION_VARIABLE, + CriticalSection: LPCRITICAL_SECTION, + dwMilliseconds: DWORD) -> BOOL; + pub fn WakeConditionVariable(ConditionVariable: LPCONDITION_VARIABLE); + pub fn WakeAllConditionVariable(ConditionVariable: LPCONDITION_VARIABLE); + + // slim rwlocks + pub fn AcquireSRWLockExclusive(SRWLock: LPSRWLOCK); + pub fn AcquireSRWLockShared(SRWLock: LPSRWLOCK); + pub fn ReleaseSRWLockExclusive(SRWLock: LPSRWLOCK); + pub fn ReleaseSRWLockShared(SRWLock: LPSRWLOCK); + pub fn TryAcquireSRWLockExclusive(SRWLock: LPSRWLOCK) -> BOOLEAN; + pub fn TryAcquireSRWLockShared(SRWLock: LPSRWLOCK) -> BOOLEAN; +} + From c3adbd34c4e637d20a184eb03f09b30c69de8b6e Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 24 Nov 2014 11:16:40 -0800 Subject: [PATCH 39/40] Fall out of the std::sync rewrite --- src/etc/licenseck.py | 5 +- src/libstd/comm/mod.rs | 20 ++-- src/libstd/comm/shared.rs | 21 ++-- src/libstd/comm/stream.rs | 2 +- src/libstd/dynamic_lib.rs | 4 +- src/libstd/lib.rs | 2 +- src/libstd/os.rs | 10 +- src/libstd/rt/backtrace.rs | 16 +-- src/libstd/sync/condvar.rs | 15 ++- src/libstd/sync/mutex.rs | 2 +- src/libstd/sys/common/helper_thread.rs | 21 ++-- src/libstd/sys/common/net.rs | 12 +-- src/libstd/sys/unix/mod.rs | 4 +- src/libstd/sys/unix/pipe.rs | 7 +- src/libstd/sys/windows/mod.rs | 4 +- src/libstd/sys/windows/mutex.rs | 6 +- src/libstd/sys/windows/pipe.rs | 7 +- src/test/bench/msgsend-ring-mutex-arcs.rs | 16 +-- src/test/bench/msgsend-ring-rw-arcs.rs | 113 ---------------------- 19 files changed, 99 insertions(+), 188 deletions(-) delete mode 100644 src/test/bench/msgsend-ring-rw-arcs.rs diff --git a/src/etc/licenseck.py b/src/etc/licenseck.py index 9162edcb530..7669df36b04 100644 --- a/src/etc/licenseck.py +++ b/src/etc/licenseck.py @@ -38,9 +38,8 @@ exceptions = [ "rt/isaac/randport.cpp", # public domain "rt/isaac/rand.h", # public domain "rt/isaac/standard.h", # public domain - "libstd/sync/mpsc_queue.rs", # BSD - "libstd/sync/spsc_queue.rs", # BSD - "libstd/sync/mpmc_bounded_queue.rs", # BSD + "libstd/comm/mpsc_queue.rs", # BSD + "libstd/comm/spsc_queue.rs", # BSD "test/bench/shootout-binarytrees.rs", # BSD "test/bench/shootout-chameneos-redux.rs", # BSD "test/bench/shootout-fannkuch-redux.rs", # BSD diff --git a/src/libstd/comm/mod.rs b/src/libstd/comm/mod.rs index 2b66e91c00d..d291ed72567 100644 --- a/src/libstd/comm/mod.rs +++ b/src/libstd/comm/mod.rs @@ -354,6 +354,8 @@ mod select; mod shared; mod stream; mod sync; +mod mpsc_queue; +mod spsc_queue; /// The receiving-half of Rust's channel type. This half can only be owned by /// one task @@ -628,24 +630,26 @@ impl Sender { #[unstable] impl Clone for Sender { fn clone(&self) -> Sender { - let (packet, sleeper) = match *unsafe { self.inner() } { + let (packet, sleeper, guard) = match *unsafe { self.inner() } { Oneshot(ref p) => { let a = Arc::new(UnsafeCell::new(shared::Packet::new())); unsafe { - (*a.get()).postinit_lock(); + let guard = (*a.get()).postinit_lock(); match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) { - oneshot::UpSuccess | oneshot::UpDisconnected => (a, None), - oneshot::UpWoke(task) => (a, Some(task)) + oneshot::UpSuccess | + oneshot::UpDisconnected => (a, None, guard), + oneshot::UpWoke(task) => (a, Some(task), guard) } } } Stream(ref p) => { let a = Arc::new(UnsafeCell::new(shared::Packet::new())); unsafe { - (*a.get()).postinit_lock(); + let guard = (*a.get()).postinit_lock(); match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) { - stream::UpSuccess | stream::UpDisconnected => (a, None), - stream::UpWoke(task) => (a, Some(task)), + stream::UpSuccess | + stream::UpDisconnected => (a, None, guard), + stream::UpWoke(task) => (a, Some(task), guard), } } } @@ -657,7 +661,7 @@ impl Clone for Sender { }; unsafe { - (*packet.get()).inherit_blocker(sleeper); + (*packet.get()).inherit_blocker(sleeper, guard); let tmp = Sender::new(Shared(packet.clone())); mem::swap(self.inner_mut(), tmp.inner_mut()); diff --git a/src/libstd/comm/shared.rs b/src/libstd/comm/shared.rs index 6396edbdbd1..13b5e10fcd3 100644 --- a/src/libstd/comm/shared.rs +++ b/src/libstd/comm/shared.rs @@ -26,12 +26,11 @@ use alloc::boxed::Box; use core::cmp; use core::int; use rustrt::local::Local; -use rustrt::mutex::NativeMutex; use rustrt::task::{Task, BlockedTask}; use rustrt::thread::Thread; -use sync::atomic; -use sync::mpsc_queue as mpsc; +use sync::{atomic, Mutex, MutexGuard}; +use comm::mpsc_queue as mpsc; const DISCONNECTED: int = int::MIN; const FUDGE: int = 1024; @@ -56,7 +55,7 @@ pub struct Packet { // this lock protects various portions of this implementation during // select() - select_lock: NativeMutex, + select_lock: Mutex<()>, } pub enum Failure { @@ -76,7 +75,7 @@ impl Packet { channels: atomic::AtomicInt::new(2), port_dropped: atomic::AtomicBool::new(false), sender_drain: atomic::AtomicInt::new(0), - select_lock: unsafe { NativeMutex::new() }, + select_lock: Mutex::new(()), }; return p; } @@ -86,8 +85,8 @@ impl Packet { // In other case mutex data will be duplicated while cloning // and that could cause problems on platforms where it is // represented by opaque data structure - pub fn postinit_lock(&mut self) { - unsafe { self.select_lock.lock_noguard() } + pub fn postinit_lock(&self) -> MutexGuard<()> { + self.select_lock.lock() } // This function is used at the creation of a shared packet to inherit a @@ -95,7 +94,9 @@ impl Packet { // tasks in select(). // // This can only be called at channel-creation time - pub fn inherit_blocker(&mut self, task: Option) { + pub fn inherit_blocker(&mut self, + task: Option, + guard: MutexGuard<()>) { match task { Some(task) => { assert_eq!(self.cnt.load(atomic::SeqCst), 0); @@ -135,7 +136,7 @@ impl Packet { // interfere with this method. After we unlock this lock, we're // signifying that we're done modifying self.cnt and self.to_wake and // the port is ready for the world to continue using it. - unsafe { self.select_lock.unlock_noguard() } + drop(guard); } pub fn send(&mut self, t: T) -> Result<(), T> { @@ -441,7 +442,7 @@ impl Packet { // done with. Without this bounce, we can race with inherit_blocker // about looking at and dealing with to_wake. Once we have acquired the // lock, we are guaranteed that inherit_blocker is done. - unsafe { + { let _guard = self.select_lock.lock(); } diff --git a/src/libstd/comm/stream.rs b/src/libstd/comm/stream.rs index 23d042960b1..06ab4f4427a 100644 --- a/src/libstd/comm/stream.rs +++ b/src/libstd/comm/stream.rs @@ -32,7 +32,7 @@ use rustrt::task::{Task, BlockedTask}; use rustrt::thread::Thread; use sync::atomic; -use sync::spsc_queue as spsc; +use comm::spsc_queue as spsc; use comm::Receiver; const DISCONNECTED: int = int::MIN; diff --git a/src/libstd/dynamic_lib.rs b/src/libstd/dynamic_lib.rs index 3cd0c0eeaf2..160365dac36 100644 --- a/src/libstd/dynamic_lib.rs +++ b/src/libstd/dynamic_lib.rs @@ -225,8 +225,8 @@ pub mod dl { } pub fn check_for_errors_in(f: || -> T) -> Result { - use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; - static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; + use sync::{StaticMutex, MUTEX_INIT}; + static LOCK: StaticMutex = MUTEX_INIT; unsafe { // dlerror isn't thread safe, so we need to lock around this entire // sequence diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index f6b73f037f2..d4274d7e401 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -106,7 +106,7 @@ #![allow(unknown_features)] #![feature(macro_rules, globs, linkage)] #![feature(default_type_params, phase, lang_items, unsafe_destructor)] -#![feature(import_shadowing, slicing_syntax)] +#![feature(import_shadowing, slicing_syntax, tuple_indexing)] // Don't link to std. We are std. #![no_std] diff --git a/src/libstd/os.rs b/src/libstd/os.rs index 0abd030a163..a8adfec34ed 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -209,14 +209,12 @@ Accessing environment variables is not generally threadsafe. Serialize access through a global lock. */ fn with_env_lock(f: || -> T) -> T { - use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + use sync::{StaticMutex, MUTEX_INIT}; - static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; + static LOCK: StaticMutex = MUTEX_INIT; - unsafe { - let _guard = LOCK.lock(); - f() - } + let _guard = LOCK.lock(); + f() } /// Returns a vector of (variable, value) pairs, for all the environment diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs index 0103fe670e7..159fc3080e8 100644 --- a/src/libstd/rt/backtrace.rs +++ b/src/libstd/rt/backtrace.rs @@ -238,7 +238,7 @@ mod imp { use mem; use option::{Some, None, Option}; use result::{Ok, Err}; - use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + use sync::{StaticMutex, MUTEX_INIT}; /// As always - iOS on arm uses SjLj exceptions and /// _Unwind_Backtrace is even not available there. Still, @@ -264,8 +264,8 @@ mod imp { // while it doesn't requires lock for work as everything is // local, it still displays much nicer backtraces when a // couple of tasks panic simultaneously - static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; - let _g = unsafe { LOCK.lock() }; + static LOCK: StaticMutex = MUTEX_INIT; + let _g = LOCK.lock(); try!(writeln!(w, "stack backtrace:")); // 100 lines should be enough @@ -297,8 +297,8 @@ mod imp { // is semi-reasonable in terms of printing anyway, and we know that all // I/O done here is blocking I/O, not green I/O, so we don't have to // worry about this being a native vs green mutex. - static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; - let _g = unsafe { LOCK.lock() }; + static LOCK: StaticMutex = MUTEX_INIT; + let _g = LOCK.lock(); try!(writeln!(w, "stack backtrace:")); @@ -667,7 +667,7 @@ mod imp { use option::{Some, None}; use path::Path; use result::{Ok, Err}; - use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + use sync::{StaticMutex, MUTEX_INIT}; use slice::SlicePrelude; use str::StrPrelude; use dynamic_lib::DynamicLibrary; @@ -928,8 +928,8 @@ mod imp { pub fn write(w: &mut Writer) -> IoResult<()> { // According to windows documentation, all dbghelp functions are // single-threaded. - static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; - let _g = unsafe { LOCK.lock() }; + static LOCK: StaticMutex = MUTEX_INIT; + let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't // always be found. Additionally, it's nice having fewer dependencies. diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs index 581b6b4e412..0fdd57b2792 100644 --- a/src/libstd/sync/condvar.rs +++ b/src/libstd/sync/condvar.rs @@ -143,8 +143,14 @@ impl Condvar { /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. - pub fn wait_timeout(&self, mutex_guard: &T, - dur: Duration) -> bool { + // Note that this method is *not* public, and this is quite intentional + // because we're not quite sure about the semantics of relative vs absolute + // durations or how the timing guarantees play into what the system APIs + // provide. There are also additional concerns about the unix-specific + // implementation which may need to be addressed. + #[allow(dead_code)] + fn wait_timeout(&self, mutex_guard: &T, + dur: Duration) -> bool { unsafe { let me: &'static Condvar = &*(self as *const _); me.inner.wait_timeout(mutex_guard, dur) @@ -195,8 +201,9 @@ impl StaticCondvar { /// specified duration. /// /// See `Condvar::wait_timeout`. - pub fn wait_timeout(&'static self, mutex_guard: &T, - dur: Duration) -> bool { + #[allow(dead_code)] // may want to stabilize this later, see wait_timeout above + fn wait_timeout(&'static self, mutex_guard: &T, + dur: Duration) -> bool { unsafe { let lock = mutex_guard.as_mutex_guard(); let sys = mutex::guard_lock(lock); diff --git a/src/libstd/sync/mutex.rs b/src/libstd/sync/mutex.rs index 3d17f2bc64b..4e07d54c57e 100644 --- a/src/libstd/sync/mutex.rs +++ b/src/libstd/sync/mutex.rs @@ -45,7 +45,7 @@ use sys_common::mutex as sys; /// let data = Arc::new(Mutex::new(0)); /// /// let (tx, rx) = channel(); -/// for _ in range(0, 10) { +/// for _ in range(0u, 10) { /// let (data, tx) = (data.clone(), tx.clone()); /// spawn(proc() { /// // The shared static can only be accessed once the lock is held. diff --git a/src/libstd/sys/common/helper_thread.rs b/src/libstd/sys/common/helper_thread.rs index 9508d8d9232..c0018c5d970 100644 --- a/src/libstd/sys/common/helper_thread.rs +++ b/src/libstd/sys/common/helper_thread.rs @@ -20,13 +20,14 @@ //! can be created in the future and there must be no active timers at that //! time. +use prelude::*; + +use cell::UnsafeCell; use mem; use rustrt::bookkeeping; -use rustrt::mutex::StaticNativeMutex; use rustrt; -use cell::UnsafeCell; +use sync::{StaticMutex, StaticCondvar}; use sys::helper_signal; -use prelude::*; use task; @@ -39,7 +40,8 @@ use task; /// is for static initialization. pub struct Helper { /// Internal lock which protects the remaining fields - pub lock: StaticNativeMutex, + pub lock: StaticMutex, + pub cond: StaticCondvar, // You'll notice that the remaining fields are UnsafeCell, and this is // because all helper thread operations are done through &self, but we need @@ -53,6 +55,9 @@ pub struct Helper { /// Flag if this helper thread has booted and been initialized yet. pub initialized: UnsafeCell, + + /// Flag if this helper thread has shut down + pub shutdown: UnsafeCell, } impl Helper { @@ -80,7 +85,9 @@ impl Helper { task::spawn(proc() { bookkeeping::decrement(); helper(receive, rx, t); - self.lock.lock().signal() + let _g = self.lock.lock(); + *self.shutdown.get() = true; + self.cond.notify_one() }); rustrt::at_exit(proc() { self.shutdown() }); @@ -119,7 +126,9 @@ impl Helper { helper_signal::signal(*self.signal.get() as helper_signal::signal); // Wait for the child to exit - guard.wait(); + while !*self.shutdown.get() { + self.cond.wait(&guard); + } drop(guard); // Clean up after ourselves diff --git a/src/libstd/sys/common/net.rs b/src/libstd/sys/common/net.rs index 029fc852742..ddc6dd021c3 100644 --- a/src/libstd/sys/common/net.rs +++ b/src/libstd/sys/common/net.rs @@ -16,13 +16,13 @@ use libc::{mod, c_char, c_int}; use mem; use num::Int; use ptr::{mod, null, null_mut}; -use rustrt::mutex; use io::net::ip::{SocketAddr, IpAddr, Ipv4Addr, Ipv6Addr}; use io::net::addrinfo; use io::{IoResult, IoError}; use sys::{mod, retry, c, sock_t, last_error, last_net_error, last_gai_error, close_sock, wrlen, msglen_t, os, wouldblock, set_nonblocking, timer, ms_to_timeval, decode_error_detailed}; +use sync::{Mutex, MutexGuard}; use sys_common::{mod, keep_going, short_write, timeout}; use prelude::*; use cmp; @@ -557,12 +557,12 @@ struct Inner { // Unused on Linux, where this lock is not necessary. #[allow(dead_code)] - lock: mutex::NativeMutex + lock: Mutex<()>, } impl Inner { fn new(fd: sock_t) -> Inner { - Inner { fd: fd, lock: unsafe { mutex::NativeMutex::new() } } + Inner { fd: fd, lock: Mutex::new(()) } } } @@ -572,7 +572,7 @@ impl Drop for Inner { pub struct Guard<'a> { pub fd: sock_t, - pub guard: mutex::LockGuard<'a>, + pub guard: MutexGuard<'a, ()>, } #[unsafe_destructor] @@ -666,7 +666,7 @@ impl TcpStream { fn lock_nonblocking<'a>(&'a self) -> Guard<'a> { let ret = Guard { fd: self.fd(), - guard: unsafe { self.inner.lock.lock() }, + guard: self.inner.lock.lock(), }; assert!(set_nonblocking(self.fd(), true).is_ok()); ret @@ -805,7 +805,7 @@ impl UdpSocket { fn lock_nonblocking<'a>(&'a self) -> Guard<'a> { let ret = Guard { fd: self.fd(), - guard: unsafe { self.inner.lock.lock() }, + guard: self.inner.lock.lock(), }; assert!(set_nonblocking(self.fd(), true).is_ok()); ret diff --git a/src/libstd/sys/unix/mod.rs b/src/libstd/sys/unix/mod.rs index 7b37fb3fb0f..4effedbe3ab 100644 --- a/src/libstd/sys/unix/mod.rs +++ b/src/libstd/sys/unix/mod.rs @@ -25,10 +25,12 @@ use sys_common::mkerr_libc; macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => ( static $name: Helper<$m> = Helper { - lock: ::rustrt::mutex::NATIVE_MUTEX_INIT, + lock: ::sync::MUTEX_INIT, + cond: ::sync::CONDVAR_INIT, chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> }, signal: ::cell::UnsafeCell { value: 0 }, initialized: ::cell::UnsafeCell { value: false }, + shutdown: ::cell::UnsafeCell { value: false }, }; ) ) diff --git a/src/libstd/sys/unix/pipe.rs b/src/libstd/sys/unix/pipe.rs index 3f70fb5c1a5..08e6f7059d8 100644 --- a/src/libstd/sys/unix/pipe.rs +++ b/src/libstd/sys/unix/pipe.rs @@ -12,8 +12,7 @@ use alloc::arc::Arc; use libc; use c_str::CString; use mem; -use rustrt::mutex; -use sync::atomic; +use sync::{atomic, Mutex}; use io::{mod, IoResult, IoError}; use prelude::*; @@ -60,12 +59,12 @@ struct Inner { // Unused on Linux, where this lock is not necessary. #[allow(dead_code)] - lock: mutex::NativeMutex + lock: Mutex<()>, } impl Inner { fn new(fd: fd_t) -> Inner { - Inner { fd: fd, lock: unsafe { mutex::NativeMutex::new() } } + Inner { fd: fd, lock: Mutex::new(()) } } } diff --git a/src/libstd/sys/windows/mod.rs b/src/libstd/sys/windows/mod.rs index e9243c5040c..9fce308cb94 100644 --- a/src/libstd/sys/windows/mod.rs +++ b/src/libstd/sys/windows/mod.rs @@ -26,10 +26,12 @@ use sync::{Once, ONCE_INIT}; macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => ( static $name: Helper<$m> = Helper { - lock: ::rustrt::mutex::NATIVE_MUTEX_INIT, + lock: ::sync::MUTEX_INIT, + cond: ::sync::CONDVAR_INIT, chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> }, signal: ::cell::UnsafeCell { value: 0 }, initialized: ::cell::UnsafeCell { value: false }, + shutdown: ::cell::UnsafeCell { value: false }, }; ) ) diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs index 10ebcf4bd09..ddd89070ed5 100644 --- a/src/libstd/sys/windows/mutex.rs +++ b/src/libstd/sys/windows/mutex.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use prelude::*; + use sync::atomic; use alloc::{mod, heap}; @@ -21,8 +23,8 @@ pub struct Mutex { inner: atomic::AtomicUint } pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::INIT_ATOMIC_UINT }; #[inline] -pub unsafe fn raw(m: &super::Mutex) -> ffi::LPCRITICAL_SECTION { - m.0.get() +pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION { + m.get() } impl Mutex { diff --git a/src/libstd/sys/windows/pipe.rs b/src/libstd/sys/windows/pipe.rs index ca7985aa35b..bf658d0efd0 100644 --- a/src/libstd/sys/windows/pipe.rs +++ b/src/libstd/sys/windows/pipe.rs @@ -89,8 +89,7 @@ use libc; use c_str::CString; use mem; use ptr; -use sync::atomic; -use rustrt::mutex; +use sync::{atomic, Mutex}; use io::{mod, IoError, IoResult}; use prelude::*; @@ -126,7 +125,7 @@ impl Drop for Event { struct Inner { handle: libc::HANDLE, - lock: mutex::NativeMutex, + lock: Mutex<()>, read_closed: atomic::AtomicBool, write_closed: atomic::AtomicBool, } @@ -135,7 +134,7 @@ impl Inner { fn new(handle: libc::HANDLE) -> Inner { Inner { handle: handle, - lock: unsafe { mutex::NativeMutex::new() }, + lock: Mutex::new(()), read_closed: atomic::AtomicBool::new(false), write_closed: atomic::AtomicBool::new(false), } diff --git a/src/test/bench/msgsend-ring-mutex-arcs.rs b/src/test/bench/msgsend-ring-mutex-arcs.rs index d06e6c8cd19..863c3c879a7 100644 --- a/src/test/bench/msgsend-ring-mutex-arcs.rs +++ b/src/test/bench/msgsend-ring-mutex-arcs.rs @@ -19,28 +19,30 @@ // ignore-lexer-test FIXME #15679 use std::os; -use std::sync::{Arc, Future, Mutex}; +use std::sync::{Arc, Future, Mutex, Condvar}; use std::time::Duration; use std::uint; // A poor man's pipe. -type pipe = Arc>>; +type pipe = Arc<(Mutex>, Condvar)>; fn send(p: &pipe, msg: uint) { - let mut arr = p.lock(); + let &(ref lock, ref cond) = &**p; + let mut arr = lock.lock(); arr.push(msg); - arr.cond.signal(); + cond.notify_one(); } fn recv(p: &pipe) -> uint { - let mut arr = p.lock(); + let &(ref lock, ref cond) = &**p; + let mut arr = lock.lock(); while arr.is_empty() { - arr.cond.wait(); + cond.wait(&arr); } arr.pop().unwrap() } fn init() -> (pipe,pipe) { - let m = Arc::new(Mutex::new(Vec::new())); + let m = Arc::new((Mutex::new(Vec::new()), Condvar::new())); ((&m).clone(), m) } diff --git a/src/test/bench/msgsend-ring-rw-arcs.rs b/src/test/bench/msgsend-ring-rw-arcs.rs deleted file mode 100644 index 03066d40512..00000000000 --- a/src/test/bench/msgsend-ring-rw-arcs.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// This test creates a bunch of tasks that simultaneously send to each -// other in a ring. The messages should all be basically -// independent. -// This is like msgsend-ring-pipes but adapted to use Arcs. - -// This also serves as a pipes test, because Arcs are implemented with pipes. - -// no-pretty-expanded FIXME #15189 -// ignore-lexer-test FIXME #15679 - -use std::os; -use std::sync::{RWLock, Arc, Future}; -use std::time::Duration; -use std::uint; - -// A poor man's pipe. -type pipe = Arc>>; - -fn send(p: &pipe, msg: uint) { - let mut arr = p.write(); - arr.push(msg); - arr.cond.signal(); -} -fn recv(p: &pipe) -> uint { - let mut arr = p.write(); - while arr.is_empty() { - arr.cond.wait(); - } - arr.pop().unwrap() -} - -fn init() -> (pipe,pipe) { - let x = Arc::new(RWLock::new(Vec::new())); - ((&x).clone(), x) -} - - -fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { - let mut num_chan = Some(num_chan); - let mut num_port = Some(num_port); - // Send/Receive lots of messages. - for j in range(0u, count) { - //println!("task %?, iter %?", i, j); - let num_chan2 = num_chan.take().unwrap(); - let num_port2 = num_port.take().unwrap(); - send(&num_chan2, i * j); - num_chan = Some(num_chan2); - let _n = recv(&num_port2); - //log(error, _n); - num_port = Some(num_port2); - }; -} - -fn main() { - let args = os::args(); - let args = if os::getenv("RUST_BENCH").is_some() { - vec!("".to_string(), "100".to_string(), "10000".to_string()) - } else if args.len() <= 1u { - vec!("".to_string(), "10".to_string(), "100".to_string()) - } else { - args.clone().into_iter().collect() - }; - - let num_tasks = from_str::(args[1].as_slice()).unwrap(); - let msg_per_task = from_str::(args[2].as_slice()).unwrap(); - - let (mut num_chan, num_port) = init(); - - let mut p = Some((num_chan, num_port)); - let dur = Duration::span(|| { - let (mut num_chan, num_port) = p.take().unwrap(); - - // create the ring - let mut futures = Vec::new(); - - for i in range(1u, num_tasks) { - //println!("spawning %?", i); - let (new_chan, num_port) = init(); - let num_chan_2 = num_chan.clone(); - let new_future = Future::spawn(proc() { - thread_ring(i, msg_per_task, num_chan_2, num_port) - }); - futures.push(new_future); - num_chan = new_chan; - }; - - // do our iteration - thread_ring(0, msg_per_task, num_chan, num_port); - - // synchronize - for f in futures.iter_mut() { - let _ = f.get(); - } - }); - - // all done, report stats. - let num_msgs = num_tasks * msg_per_task; - let rate = (num_msgs as f64) / (dur.num_milliseconds() as f64); - - println!("Sent {} messages in {} ms", num_msgs, dur.num_milliseconds()); - println!(" {} messages / second", rate / 1000.0); - println!(" {} μs / message", 1000000. / rate / 1000.0); -} From 309ab34b7ad80c43f619d4ed7faa8e3905eae530 Mon Sep 17 00:00:00 2001 From: Chase Southwood Date: Thu, 4 Dec 2014 00:35:38 -0600 Subject: [PATCH 40/40] Implement BitOps for TrieSet --- src/libcollections/trie/set.rs | 126 ++++++++++++++++++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) diff --git a/src/libcollections/trie/set.rs b/src/libcollections/trie/set.rs index dd884b6ee41..1b3657943da 100644 --- a/src/libcollections/trie/set.rs +++ b/src/libcollections/trie/set.rs @@ -9,7 +9,6 @@ // except according to those terms. // FIXME(conventions): implement bounded iterators -// FIXME(conventions): implement BitOr, BitAnd, BitXor, and Sub // FIXME(conventions): replace each_reverse by making iter DoubleEnded // FIXME(conventions): implement iter_mut and into_iter @@ -463,6 +462,90 @@ impl Extend for TrieSet { } } +#[unstable = "matches collection reform specification, waiting for dust to settle"] +impl BitOr for TrieSet { + /// Returns the union of `self` and `rhs` as a new `TrieSet`. + /// + /// # Example + /// + /// ``` + /// use std::collections::TrieSet; + /// + /// let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + /// let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + /// + /// let set: TrieSet = a | b; + /// let v: Vec = set.iter().collect(); + /// assert_eq!(v, vec![1u, 2, 3, 4, 5]); + /// ``` + fn bitor(&self, rhs: &TrieSet) -> TrieSet { + self.union(rhs).collect() + } +} + +#[unstable = "matches collection reform specification, waiting for dust to settle"] +impl BitAnd for TrieSet { + /// Returns the intersection of `self` and `rhs` as a new `TrieSet`. + /// + /// # Example + /// + /// ``` + /// use std::collections::TrieSet; + /// + /// let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + /// let b: TrieSet = vec![2, 3, 4].into_iter().collect(); + /// + /// let set: TrieSet = a & b; + /// let v: Vec = set.iter().collect(); + /// assert_eq!(v, vec![2u, 3]); + /// ``` + fn bitand(&self, rhs: &TrieSet) -> TrieSet { + self.intersection(rhs).collect() + } +} + +#[unstable = "matches collection reform specification, waiting for dust to settle"] +impl BitXor for TrieSet { + /// Returns the symmetric difference of `self` and `rhs` as a new `TrieSet`. + /// + /// # Example + /// + /// ``` + /// use std::collections::TrieSet; + /// + /// let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + /// let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + /// + /// let set: TrieSet = a ^ b; + /// let v: Vec = set.iter().collect(); + /// assert_eq!(v, vec![1u, 2, 4, 5]); + /// ``` + fn bitxor(&self, rhs: &TrieSet) -> TrieSet { + self.symmetric_difference(rhs).collect() + } +} + +#[unstable = "matches collection reform specification, waiting for dust to settle"] +impl Sub for TrieSet { + /// Returns the difference of `self` and `rhs` as a new `TrieSet`. + /// + /// # Example + /// + /// ``` + /// use std::collections::TrieSet; + /// + /// let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + /// let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + /// + /// let set: TrieSet = a - b; + /// let v: Vec = set.iter().collect(); + /// assert_eq!(v, vec![1u, 2]); + /// ``` + fn sub(&self, rhs: &TrieSet) -> TrieSet { + self.difference(rhs).collect() + } +} + /// A forward iterator over a set. pub struct SetItems<'a> { iter: Entries<'a, ()> @@ -569,6 +652,7 @@ impl<'a> Iterator for UnionItems<'a> { mod test { use std::prelude::*; use std::uint; + use vec::Vec; use super::TrieSet; @@ -738,4 +822,44 @@ mod test { &[1, 5, 9, 13, 19], &[1, 3, 5, 9, 11, 13, 16, 19, 24]); } + + #[test] + fn test_bit_or() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + + let set: TrieSet = a | b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![1u, 2, 3, 4, 5]); + } + + #[test] + fn test_bit_and() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![2, 3, 4].into_iter().collect(); + + let set: TrieSet = a & b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![2u, 3]); + } + + #[test] + fn test_bit_xor() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + + let set: TrieSet = a ^ b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![1u, 2, 4, 5]); + } + + #[test] + fn test_sub() { + let a: TrieSet = vec![1, 2, 3].into_iter().collect(); + let b: TrieSet = vec![3, 4, 5].into_iter().collect(); + + let set: TrieSet = a - b; + let v: Vec = set.iter().collect(); + assert_eq!(v, vec![1u, 2]); + } }