per rfc 459
cc https://github.com/rust-lang/rust/issues/19390
One question is: should we start by warning, and only switch to hard error later? I think we discussed something like this in the meeting.
r? @alexcrichton
- The following operator traits now take their arguments by value: `Add`, `Sub`, `Mul`, `Div`, `Rem`, `BitAnd`, `BitOr`, `BitXor`, `Shl`, `Shr`. This breaks all existing implementations of these traits.
- The binary operation `a OP b` now "desugars" to `OpTrait::op_method(a, b)` and consumes both arguments.
- `String` and `Vec` addition have been changed to reuse the LHS owned value, and to avoid internal cloning. Only the following asymmetric operations are available: `String + &str` and `Vec<T> + &[T]`, which are now a short-hand for the "append" operation.
[breaking-change]
---
This passes `make check` locally. I haven't touch the unary operators in this PR, but converting them to by value should be very similar to this PR. I can work on them after this gets the thumbs up.
@nikomatsakis r? the compiler changes
@aturon r? the library changes. I think the only controversial bit is the semantic change of the `Vec`/`String` `Add` implementation.
cc #19148
This patch does not itself enable generalized where clauses, but it lays the groundwork. Rather than storing a list of bounds per type parameter, the trait selection and other logic is now driven by a unified list of predicates. All predicate handling is now driven through a common interface. This also fixes a number of bugs where region predicates were being dropped on the floor. As a drive-by, this patch also fixes some bugs in the opt-out-copy feature flag.
That said, this patch does not change the parser or AST in any way, so we still *generate* the list of predicates by walking a list of bounds (and we still *store* the bounds on the `TypeParameterDef` and so on). Those will get patched in a follow-up.
The commits in this case are standalone; the first few are simple refactorings.
r? @nick29581
cc @aturon
in most cases, just the error message changed, but in some cases we
are reporting new errors that OUGHT to have been reported before but
we're overlooked (mostly involving the `'static` bound on `Send`).
These probably happened during the merge of the commit that made `Copy` opt-in.
Also, convert the last occurence of `/**` to `///` in `src/libstd/num/strconv.rs`
This detects (a subset of) the cases when `transmute::<T, U>(x)` can be
lowered to a direct `bitcast T x to U` in LLVM. This assists with
efficiently handling a SIMD vector as multiple different types,
e.g. swapping bytes/words/double words around inside some larger vector
type.
C compilers like GCC and Clang handle integer vector types as `__m128i`
for all widths, and implicitly insert bitcasts as required. This patch
allows Rust to express this, even if it takes a bit of `unsafe`, whereas
previously it was impossible to do at all without inline assembly.
Example:
pub fn reverse_u32s(u: u64x2) -> u64x2 {
unsafe {
let tmp = mem::transmute::<_, u32x4>(u);
let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0);
mem::transmute::<_, u64x2>(swapped)
}
}
Compiling with `--opt-level=3` gives:
Before
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to i128
%u.0.extract.trunc = trunc i128 %1 to i32
%u.4.extract.shift = lshr i128 %1, 32
%u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32
%u.8.extract.shift = lshr i128 %1, 64
%u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32
%u.12.extract.shift = lshr i128 %1, 96
%u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32
%2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0
%3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1
%4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2
%5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3
%6 = bitcast <4 x i32> %5 to <2 x i64>
ret <2 x i64> %6
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
movd %xmm0, %rax
punpckhqdq %xmm0, %xmm0
movd %xmm0, %rcx
movq %rcx, %rdx
shrq $32, %rdx
movq %rax, %rsi
shrq $32, %rsi
movd %eax, %xmm0
movd %ecx, %xmm1
punpckldq %xmm0, %xmm1
movd %esi, %xmm2
movd %edx, %xmm0
punpckldq %xmm2, %xmm0
punpckldq %xmm1, %xmm0
retq
After
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to <4 x i32>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%3 = bitcast <4 x i32> %2 to <2 x i64>
ret <2 x i64> %3
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
pshufd $27, %xmm0, %xmm0
retq
These probably happened during the merge of the commit that made `Copy` opt-in.
Also, convert the last occurence of `/**` to `///` in `src/libstd/num/strconv.rs`
This change makes the compiler no longer infer whether types (structures
and enumerations) implement the `Copy` trait (and thus are implicitly
copyable). Rather, you must implement `Copy` yourself via `impl Copy for
MyType {}`.
A new warning has been added, `missing_copy_implementations`, to warn
you if a non-generic public type has been added that could have
implemented `Copy` but didn't.
For convenience, you may *temporarily* opt out of this behavior by using
`#![feature(opt_out_copy)]`. Note though that this feature gate will never be
accepted and will be removed by the time that 1.0 is released, so you should
transition your code away from using it.
This breaks code like:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
Change this code to:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
impl Copy for Point2D {}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
This is the backwards-incompatible part of #13231.
Part of RFC #3.
[breaking-change]
Now that we have an overloaded comparison (`==`) operator, and that `Vec`/`String` deref to `[T]`/`str` on method calls, many `as_slice()`/`as_mut_slice()`/`to_string()` calls have become redundant. This patch removes them. These were the most common patterns:
- `assert_eq(test_output.as_slice(), "ground truth")` -> `assert_eq(test_output, "ground truth")`
- `assert_eq(test_output, "ground truth".to_string())` -> `assert_eq(test_output, "ground truth")`
- `vec.as_mut_slice().sort()` -> `vec.sort()`
- `vec.as_slice().slice(from, to)` -> `vec.slice(from_to)`
---
Note that e.g. `a_string.push_str(b_string.as_slice())` has been left untouched in this PR, since we first need to settle down whether we want to favor the `&*b_string` or the `b_string[]` notation.
This is rebased on top of #19167
cc @alexcrichton @aturon
In regards to:
https://github.com/rust-lang/rust/issues/19253#issuecomment-64836729
This commit:
* Changes the #deriving code so that it generates code that utilizes fewer
reexports (in particur Option::* and Result::*), which is necessary to
remove those reexports in the future
* Changes other areas of the codebase so that fewer reexports are utilized
Comparison traits have gained an `Rhs` input parameter that defaults to `Self`. And now the comparison operators can be overloaded to work between different types. In particular, this PR allows the following operations (and their commutative versions):
- `&str` == `String` == `CowString`
- `&[A]` == `&mut [B]` == `Vec<C>` == `CowVec<D>` == `[E, ..N]` (for `N` up to 32)
- `&mut A` == `&B` (for `Sized` `A` and `B`)
Where `A`, `B`, `C`, `D`, `E` may be different types that implement `PartialEq`. For example, these comparisons are now valid: `string == "foo"`, and `vec_of_strings == ["Hello", "world"]`.
[breaking-change]s
Since the `==` may now work on different types, operations that relied on the old "same type restriction" to drive type inference, will need to be type annotated. These are the most common fallout cases:
- `some_vec == some_iter.collect()`: `collect` needs to be type annotated: `collect::<Vec<_>>()`
- `slice == &[a, b, c]`: RHS doesn't get coerced to an slice, use an array instead `[a, b, c]`
- `lhs == []`: Change expression to `lhs.is_empty()`
- `lhs == some_generic_function()`: Type annotate the RHS as necessary
cc #19148
r? @aturon
This detects (a subset of) the cases when `transmute::<T, U>(x)` can be
lowered to a direct `bitcast T x to U` in LLVM. This assists with
efficiently handling a SIMD vector as multiple different types,
e.g. swapping bytes/words/double words around inside some larger vector
type.
C compilers like GCC and Clang handle integer vector types as `__m128i`
for all widths, and implicitly insert bitcasts as required. This patch
allows Rust to express this, even if it takes a bit of `unsafe`, whereas
previously it was impossible to do at all without inline assembly.
Example:
pub fn reverse_u32s(u: u64x2) -> u64x2 {
unsafe {
let tmp = mem::transmute::<_, u32x4>(u);
let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0);
mem::transmute::<_, u64x2>(swapped)
}
}
Compiling with `--opt-level=3` gives:
Before
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to i128
%u.0.extract.trunc = trunc i128 %1 to i32
%u.4.extract.shift = lshr i128 %1, 32
%u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32
%u.8.extract.shift = lshr i128 %1, 64
%u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32
%u.12.extract.shift = lshr i128 %1, 96
%u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32
%2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0
%3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1
%4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2
%5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3
%6 = bitcast <4 x i32> %5 to <2 x i64>
ret <2 x i64> %6
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
movd %xmm0, %rax
punpckhqdq %xmm0, %xmm0
movd %xmm0, %rcx
movq %rcx, %rdx
shrq $32, %rdx
movq %rax, %rsi
shrq $32, %rsi
movd %eax, %xmm0
movd %ecx, %xmm1
punpckldq %xmm0, %xmm1
movd %esi, %xmm2
movd %edx, %xmm0
punpckldq %xmm2, %xmm0
punpckldq %xmm1, %xmm0
retq
After
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to <4 x i32>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%3 = bitcast <4 x i32> %2 to <2 x i64>
ret <2 x i64> %3
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
pshufd $27, %xmm0, %xmm0
retq
One negative side-effect of this change is that there might be quite a bit of copying strings out of the codemap, i.e. one copy for every block that gets translated, just for taking a look at the last character of the block. If this turns out to cause a performance problem then `CodeMap::span_to_snippet()` could be changed return `Option<&str>` instead of `Option<String>`.
Fixes#18791