2022-06-22 19:45:40 -05:00
|
|
|
#![feature(prelude_import)]
|
|
|
|
// check-pass
|
|
|
|
// compile-flags: -Zunpretty=expanded
|
|
|
|
// edition:2021
|
|
|
|
//
|
|
|
|
// This test checks the code generated for all[*] the builtin derivable traits
|
|
|
|
// on a variety of structs and enums. It protects against accidental changes to
|
|
|
|
// the generated code, and makes deliberate changes to the generated code
|
|
|
|
// easier to review.
|
|
|
|
//
|
|
|
|
// [*] It excludes `Copy` in some cases, because that changes the code
|
|
|
|
// generated for `Clone`.
|
|
|
|
//
|
|
|
|
// [*] It excludes `RustcEncodable` and `RustDecodable`, which are obsolete and
|
|
|
|
// also require the `rustc_serialize` crate.
|
|
|
|
|
|
|
|
#![crate_type = "lib"]
|
|
|
|
#![allow(dead_code)]
|
|
|
|
#![allow(deprecated)]
|
|
|
|
#[prelude_import]
|
|
|
|
use std::prelude::rust_2021::*;
|
|
|
|
#[macro_use]
|
|
|
|
extern crate std;
|
|
|
|
|
|
|
|
// Empty struct.
|
|
|
|
struct Empty;
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Empty {
|
|
|
|
#[inline]
|
2022-06-27 22:10:36 -05:00
|
|
|
fn clone(&self) -> Empty { *self }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::marker::Copy for Empty { }
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Empty {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::fmt::Formatter::write_str(f, "Empty")
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::default::Default for Empty {
|
|
|
|
#[inline]
|
|
|
|
fn default() -> Empty { Empty {} }
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Empty {
|
2022-06-27 22:10:36 -05:00
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {}
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Empty {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Empty {
|
|
|
|
#[inline]
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
fn eq(&self, other: &Empty) -> bool { true }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Empty {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Empty {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
2022-06-27 22:10:36 -05:00
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {}
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Empty {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Empty)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Empty {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Empty) -> ::core::cmp::Ordering {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::cmp::Ordering::Equal
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A basic struct.
|
|
|
|
struct Point {
|
|
|
|
x: u32,
|
|
|
|
y: u32,
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Point {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Point {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::clone::AssertParamIsClone<u32>;
|
|
|
|
*self
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::marker::Copy for Point { }
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Point {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::fmt::Formatter::debug_struct_field2_finish(f, "Point", "x",
|
|
|
|
&&self.x, "y", &&self.y)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::default::Default for Point {
|
|
|
|
#[inline]
|
|
|
|
fn default() -> Point {
|
|
|
|
Point {
|
|
|
|
x: ::core::default::Default::default(),
|
|
|
|
y: ::core::default::Default::default(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Point {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
::core::hash::Hash::hash(&self.x, state);
|
|
|
|
::core::hash::Hash::hash(&self.y, state)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Point {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Point {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Point) -> bool {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
self.x == other.x && self.y == other.y
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn ne(&self, other: &Point) -> bool {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
self.x != other.x || self.y != other.y
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Point {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Point {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::cmp::AssertParamIsEq<u32>;
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Point {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Point)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.x, &other.x) {
|
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&self.y, &other.y),
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
cmp => cmp,
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Point {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Point) -> ::core::cmp::Ordering {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::Ord::cmp(&self.x, &other.x) {
|
|
|
|
::core::cmp::Ordering::Equal =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&self.y, &other.y),
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
cmp => cmp,
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-23 23:06:35 -05:00
|
|
|
// A large struct.
|
2022-06-22 19:45:40 -05:00
|
|
|
struct Big {
|
|
|
|
b1: u32,
|
|
|
|
b2: u32,
|
|
|
|
b3: u32,
|
|
|
|
b4: u32,
|
|
|
|
b5: u32,
|
|
|
|
b6: u32,
|
|
|
|
b7: u32,
|
|
|
|
b8: u32,
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Big {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Big {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
Big {
|
|
|
|
b1: ::core::clone::Clone::clone(&self.b1),
|
|
|
|
b2: ::core::clone::Clone::clone(&self.b2),
|
|
|
|
b3: ::core::clone::Clone::clone(&self.b3),
|
|
|
|
b4: ::core::clone::Clone::clone(&self.b4),
|
|
|
|
b5: ::core::clone::Clone::clone(&self.b5),
|
|
|
|
b6: ::core::clone::Clone::clone(&self.b6),
|
|
|
|
b7: ::core::clone::Clone::clone(&self.b7),
|
|
|
|
b8: ::core::clone::Clone::clone(&self.b8),
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Big {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
2022-06-27 22:10:36 -05:00
|
|
|
let names: &'static _ =
|
|
|
|
&["b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8"];
|
|
|
|
let values: &[&dyn ::core::fmt::Debug] =
|
|
|
|
&[&&self.b1, &&self.b2, &&self.b3, &&self.b4, &&self.b5,
|
|
|
|
&&self.b6, &&self.b7, &&self.b8];
|
|
|
|
::core::fmt::Formatter::debug_struct_fields_finish(f, "Big", names,
|
|
|
|
values)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::default::Default for Big {
|
|
|
|
#[inline]
|
|
|
|
fn default() -> Big {
|
|
|
|
Big {
|
|
|
|
b1: ::core::default::Default::default(),
|
|
|
|
b2: ::core::default::Default::default(),
|
|
|
|
b3: ::core::default::Default::default(),
|
|
|
|
b4: ::core::default::Default::default(),
|
|
|
|
b5: ::core::default::Default::default(),
|
|
|
|
b6: ::core::default::Default::default(),
|
|
|
|
b7: ::core::default::Default::default(),
|
|
|
|
b8: ::core::default::Default::default(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Big {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
::core::hash::Hash::hash(&self.b1, state);
|
|
|
|
::core::hash::Hash::hash(&self.b2, state);
|
|
|
|
::core::hash::Hash::hash(&self.b3, state);
|
|
|
|
::core::hash::Hash::hash(&self.b4, state);
|
|
|
|
::core::hash::Hash::hash(&self.b5, state);
|
|
|
|
::core::hash::Hash::hash(&self.b6, state);
|
|
|
|
::core::hash::Hash::hash(&self.b7, state);
|
|
|
|
::core::hash::Hash::hash(&self.b8, state)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Big {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Big {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Big) -> bool {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
self.b1 == other.b1 && self.b2 == other.b2 && self.b3 == other.b3 &&
|
|
|
|
self.b4 == other.b4 && self.b5 == other.b5 &&
|
|
|
|
self.b6 == other.b6 && self.b7 == other.b7 &&
|
|
|
|
self.b8 == other.b8
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn ne(&self, other: &Big) -> bool {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
self.b1 != other.b1 || self.b2 != other.b2 || self.b3 != other.b3 ||
|
|
|
|
self.b4 != other.b4 || self.b5 != other.b5 ||
|
|
|
|
self.b6 != other.b6 || self.b7 != other.b7 ||
|
|
|
|
self.b8 != other.b8
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Big {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Big {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::cmp::AssertParamIsEq<u32>;
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Big {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Big)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.b1, &other.b1) {
|
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal) =>
|
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.b2,
|
|
|
|
&other.b2) {
|
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
|
|
|
=>
|
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.b3,
|
|
|
|
&other.b3) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
|
|
|
=>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.b4,
|
|
|
|
&other.b4) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
|
|
|
=>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.b5,
|
|
|
|
&other.b5) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
|
|
|
=>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.b6,
|
|
|
|
&other.b6) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
|
|
|
=>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&self.b7,
|
|
|
|
&other.b7) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
|
|
|
=>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&self.b8, &other.b8),
|
2022-06-22 19:45:40 -05:00
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
cmp => cmp,
|
2022-06-22 19:45:40 -05:00
|
|
|
},
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
cmp => cmp,
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Big {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Big) -> ::core::cmp::Ordering {
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::Ord::cmp(&self.b1, &other.b1) {
|
|
|
|
::core::cmp::Ordering::Equal =>
|
|
|
|
match ::core::cmp::Ord::cmp(&self.b2, &other.b2) {
|
|
|
|
::core::cmp::Ordering::Equal =>
|
|
|
|
match ::core::cmp::Ord::cmp(&self.b3, &other.b3) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::cmp::Ordering::Equal =>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::Ord::cmp(&self.b4, &other.b4) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::cmp::Ordering::Equal =>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::Ord::cmp(&self.b5, &other.b5) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::cmp::Ordering::Equal =>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::Ord::cmp(&self.b6, &other.b6) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::cmp::Ordering::Equal =>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
match ::core::cmp::Ord::cmp(&self.b7, &other.b7) {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::cmp::Ordering::Equal =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&self.b8, &other.b8),
|
2022-06-22 19:45:40 -05:00
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
cmp => cmp,
|
|
|
|
},
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
cmp => cmp,
|
2022-06-22 19:45:40 -05:00
|
|
|
},
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
cmp => cmp,
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-23 23:06:35 -05:00
|
|
|
// A packed tuple struct.
|
|
|
|
#[repr(packed)]
|
|
|
|
struct Packed(u32);
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Packed {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Packed {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::clone::AssertParamIsClone<u32>;
|
|
|
|
*self
|
2022-06-23 23:06:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::marker::Copy for Packed { }
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Packed {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
2022-06-27 22:10:36 -05:00
|
|
|
let Self(__self_0_0) = *self;
|
|
|
|
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Packed",
|
|
|
|
&&__self_0_0)
|
2022-06-23 23:06:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::default::Default for Packed {
|
|
|
|
#[inline]
|
|
|
|
fn default() -> Packed { Packed(::core::default::Default::default()) }
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Packed {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
let Self(__self_0_0) = *self;
|
|
|
|
::core::hash::Hash::hash(&__self_0_0, state)
|
2022-06-23 23:06:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Packed {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Packed {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Packed) -> bool {
|
2022-06-27 22:10:36 -05:00
|
|
|
let Self(__self_0_0) = *self;
|
|
|
|
let Self(__self_1_0) = *other;
|
|
|
|
__self_0_0 == __self_1_0
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn ne(&self, other: &Packed) -> bool {
|
2022-06-27 22:10:36 -05:00
|
|
|
let Self(__self_0_0) = *self;
|
|
|
|
let Self(__self_1_0) = *other;
|
|
|
|
__self_0_0 != __self_1_0
|
2022-06-23 23:06:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Packed {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Packed {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::cmp::AssertParamIsEq<u32>;
|
2022-06-23 23:06:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Packed {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Packed)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
2022-06-27 22:10:36 -05:00
|
|
|
let Self(__self_0_0) = *self;
|
|
|
|
let Self(__self_1_0) = *other;
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&__self_0_0, &__self_1_0)
|
2022-06-23 23:06:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Packed {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Packed) -> ::core::cmp::Ordering {
|
2022-06-27 22:10:36 -05:00
|
|
|
let Self(__self_0_0) = *self;
|
|
|
|
let Self(__self_1_0) = *other;
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&__self_0_0, &__self_1_0)
|
2022-06-23 23:06:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-01 02:35:42 -05:00
|
|
|
// An empty enum.
|
|
|
|
enum Enum0 {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Enum0 {
|
|
|
|
#[inline]
|
2022-06-27 22:10:36 -05:00
|
|
|
fn clone(&self) -> Enum0 { *self }
|
2022-07-01 02:35:42 -05:00
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::marker::Copy for Enum0 { }
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Enum0 {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
|
|
|
unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Enum0 {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
|
|
|
unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Enum0 {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Enum0 {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Enum0) -> bool {
|
|
|
|
unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Enum0 {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Enum0 {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
2022-06-27 22:10:36 -05:00
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {}
|
2022-07-01 02:35:42 -05:00
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Enum0 {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Enum0)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
|
|
|
unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Enum0 {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Enum0) -> ::core::cmp::Ordering {
|
|
|
|
unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A single-variant enum.
|
|
|
|
enum Enum1 {
|
|
|
|
Single {
|
|
|
|
x: u32,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Enum1 {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Enum1 {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Enum1::Single { x: ref __self_0 } =>
|
2022-07-01 02:35:42 -05:00
|
|
|
Enum1::Single { x: ::core::clone::Clone::clone(&*__self_0) },
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Enum1 {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Enum1::Single { x: ref __self_0 } =>
|
2022-07-01 02:35:42 -05:00
|
|
|
::core::fmt::Formatter::debug_struct_field1_finish(f,
|
|
|
|
"Single", "x", &&*__self_0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Enum1 {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Enum1::Single { x: ref __self_0 } => {
|
2022-07-01 02:35:42 -05:00
|
|
|
::core::hash::Hash::hash(&*__self_0, state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Enum1 {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Enum1 {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Enum1) -> bool {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Enum1::Single { x: ref __self_0 }, &Enum1::Single {
|
|
|
|
x: ref __arg_1_0 }) => *__self_0 == *__arg_1_0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn ne(&self, other: &Enum1) -> bool {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Enum1::Single { x: ref __self_0 }, &Enum1::Single {
|
|
|
|
x: ref __arg_1_0 }) => *__self_0 != *__arg_1_0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Enum1 {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Enum1 {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::cmp::AssertParamIsEq<u32>;
|
2022-07-01 02:35:42 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Enum1 {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Enum1)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Enum1::Single { x: ref __self_0 }, &Enum1::Single {
|
|
|
|
x: ref __arg_1_0 }) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0),
|
2022-07-01 02:35:42 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Enum1 {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Enum1) -> ::core::cmp::Ordering {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Enum1::Single { x: ref __self_0 }, &Enum1::Single {
|
|
|
|
x: ref __arg_1_0 }) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0),
|
2022-07-01 02:35:42 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-22 19:45:40 -05:00
|
|
|
// A C-like, fieldless enum.
|
|
|
|
enum Fieldless {
|
|
|
|
|
|
|
|
#[default]
|
|
|
|
A,
|
|
|
|
B,
|
|
|
|
C,
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Fieldless {
|
|
|
|
#[inline]
|
2022-06-27 22:10:36 -05:00
|
|
|
fn clone(&self) -> Fieldless { *self }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::marker::Copy for Fieldless { }
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Fieldless {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Fieldless::A => ::core::fmt::Formatter::write_str(f, "A"),
|
|
|
|
&Fieldless::B => ::core::fmt::Formatter::write_str(f, "B"),
|
|
|
|
&Fieldless::C => ::core::fmt::Formatter::write_str(f, "C"),
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::default::Default for Fieldless {
|
|
|
|
#[inline]
|
|
|
|
fn default() -> Fieldless { Self::A }
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Fieldless {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
2022-06-22 19:45:40 -05:00
|
|
|
_ => {
|
|
|
|
::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self),
|
|
|
|
state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Fieldless {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Fieldless {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Fieldless) -> bool {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) { _ => true, }
|
|
|
|
} else { false }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Fieldless {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Fieldless {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
2022-06-27 22:10:36 -05:00
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {}
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Fieldless {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Fieldless)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
_ =>
|
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
::core::cmp::PartialOrd::partial_cmp(&__self_vi, &__arg_1_vi)
|
|
|
|
}
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Fieldless {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Fieldless) -> ::core::cmp::Ordering {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) { _ => ::core::cmp::Ordering::Equal, }
|
|
|
|
} else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// An enum with multiple fieldless and fielded variants.
|
|
|
|
enum Mixed {
|
|
|
|
|
|
|
|
#[default]
|
|
|
|
P,
|
|
|
|
Q,
|
|
|
|
R(u32),
|
|
|
|
S {
|
|
|
|
d1: u32,
|
|
|
|
d2: u32,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Mixed {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Mixed {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::clone::AssertParamIsClone<u32>;
|
|
|
|
*self
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::marker::Copy for Mixed { }
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Mixed {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Mixed::P => ::core::fmt::Formatter::write_str(f, "P"),
|
|
|
|
&Mixed::Q => ::core::fmt::Formatter::write_str(f, "Q"),
|
|
|
|
&Mixed::R(ref __self_0) =>
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::fmt::Formatter::debug_tuple_field1_finish(f, "R",
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
&&*__self_0),
|
2022-07-04 00:08:28 -05:00
|
|
|
&Mixed::S { d1: ref __self_0, d2: ref __self_1 } =>
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::fmt::Formatter::debug_struct_field2_finish(f, "S",
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
"d1", &&*__self_0, "d2", &&*__self_1),
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::default::Default for Mixed {
|
|
|
|
#[inline]
|
|
|
|
fn default() -> Mixed { Self::P }
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Mixed {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Mixed::R(ref __self_0) => {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self),
|
|
|
|
state);
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::hash::Hash::hash(&*__self_0, state)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
2022-07-04 00:08:28 -05:00
|
|
|
&Mixed::S { d1: ref __self_0, d2: ref __self_1 } => {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self),
|
|
|
|
state);
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::hash::Hash::hash(&*__self_0, state);
|
|
|
|
::core::hash::Hash::hash(&*__self_1, state)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self),
|
|
|
|
state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Mixed {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Mixed {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Mixed) -> bool {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) =>
|
|
|
|
*__self_0 == *__arg_1_0,
|
|
|
|
(&Mixed::S { d1: ref __self_0, d2: ref __self_1 },
|
|
|
|
&Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) =>
|
|
|
|
*__self_0 == *__arg_1_0 && *__self_1 == *__arg_1_1,
|
|
|
|
_ => true,
|
|
|
|
}
|
|
|
|
} else { false }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn ne(&self, other: &Mixed) -> bool {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) =>
|
|
|
|
*__self_0 != *__arg_1_0,
|
|
|
|
(&Mixed::S { d1: ref __self_0, d2: ref __self_1 },
|
|
|
|
&Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) =>
|
|
|
|
*__self_0 != *__arg_1_0 || *__self_1 != *__arg_1_1,
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
} else { true }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Mixed {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Mixed {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::cmp::AssertParamIsEq<u32>;
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Mixed {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Mixed)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&*__self_0,
|
|
|
|
&*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
(&Mixed::S { d1: ref __self_0, d2: ref __self_1 },
|
|
|
|
&Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) =>
|
|
|
|
match ::core::cmp::PartialOrd::partial_cmp(&*__self_0,
|
|
|
|
&*__arg_1_0) {
|
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal)
|
|
|
|
=>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&*__self_1,
|
|
|
|
&*__arg_1_1),
|
2022-06-27 22:10:36 -05:00
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
_ =>
|
|
|
|
::core::option::Option::Some(::core::cmp::Ordering::Equal),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
::core::cmp::PartialOrd::partial_cmp(&__self_vi, &__arg_1_vi)
|
|
|
|
}
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Mixed {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Mixed) -> ::core::cmp::Ordering {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
(&Mixed::S { d1: ref __self_0, d2: ref __self_1 },
|
|
|
|
&Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) =>
|
|
|
|
match ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0) {
|
|
|
|
::core::cmp::Ordering::Equal =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&*__self_1, &*__arg_1_1),
|
2022-06-27 22:10:36 -05:00
|
|
|
cmp => cmp,
|
|
|
|
},
|
|
|
|
_ => ::core::cmp::Ordering::Equal,
|
|
|
|
}
|
|
|
|
} else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// An enum with no fieldless variants. Note that `Default` cannot be derived
|
|
|
|
// for this enum.
|
|
|
|
enum Fielded { X(u32), Y(bool), Z(Option<i32>), }
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Fielded {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Fielded {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Fielded::X(ref __self_0) =>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
Fielded::X(::core::clone::Clone::clone(&*__self_0)),
|
2022-07-04 00:08:28 -05:00
|
|
|
&Fielded::Y(ref __self_0) =>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
Fielded::Y(::core::clone::Clone::clone(&*__self_0)),
|
2022-07-04 00:08:28 -05:00
|
|
|
&Fielded::Z(ref __self_0) =>
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
Fielded::Z(::core::clone::Clone::clone(&*__self_0)),
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::fmt::Debug for Fielded {
|
|
|
|
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Fielded::X(ref __self_0) =>
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::fmt::Formatter::debug_tuple_field1_finish(f, "X",
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
&&*__self_0),
|
2022-07-04 00:08:28 -05:00
|
|
|
&Fielded::Y(ref __self_0) =>
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Y",
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
&&*__self_0),
|
2022-07-04 00:08:28 -05:00
|
|
|
&Fielded::Z(ref __self_0) =>
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::fmt::Formatter::debug_tuple_field1_finish(f, "Z",
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
&&*__self_0),
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::hash::Hash for Fielded {
|
|
|
|
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
|
2022-07-04 00:08:28 -05:00
|
|
|
match &*self {
|
|
|
|
&Fielded::X(ref __self_0) => {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self),
|
|
|
|
state);
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::hash::Hash::hash(&*__self_0, state)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
2022-07-04 00:08:28 -05:00
|
|
|
&Fielded::Y(ref __self_0) => {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self),
|
|
|
|
state);
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::hash::Hash::hash(&*__self_0, state)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
2022-07-04 00:08:28 -05:00
|
|
|
&Fielded::Z(ref __self_0) => {
|
2022-06-22 19:45:40 -05:00
|
|
|
::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self),
|
|
|
|
state);
|
Don't use match-destructuring for derived ops on structs.
All derive ops currently use match-destructuring to access fields. This
is reasonable for enums, but sub-optimal for structs. E.g.:
```
fn eq(&self, other: &Point) -> bool {
match *other {
Self { x: ref __self_1_0, y: ref __self_1_1 } =>
match *self {
Self { x: ref __self_0_0, y: ref __self_0_1 } =>
(*__self_0_0) == (*__self_1_0) &&
(*__self_0_1) == (*__self_1_1),
},
}
}
```
This commit changes derive ops on structs to use field access instead, e.g.:
```
fn eq(&self, other: &Point) -> bool {
self.x == other.x && self.y == other.y
}
```
This is faster to compile, results in smaller binaries, and is simpler to
generate. Unfortunately, we have to keep the old pattern generating code around
for `repr(packed)` structs because something like `&self.x` (which doesn't show
up in `PartialEq` ops, but does show up in `Debug` and `Hash` ops) isn't
allowed. But this commit at least changes those cases to use let-destructuring
instead of match-destructuring, e.g.:
```
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
{
let Self(ref __self_0_0) = *self;
{ ::core::hash::Hash::hash(&(*__self_0_0), state) }
}
}
```
There are some unnecessary blocks remaining in the generated code, but I
will fix them in a follow-up PR.
2022-06-23 19:20:54 -05:00
|
|
|
::core::hash::Hash::hash(&*__self_0, state)
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralPartialEq for Fielded {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialEq for Fielded {
|
|
|
|
#[inline]
|
|
|
|
fn eq(&self, other: &Fielded) -> bool {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) =>
|
|
|
|
*__self_0 == *__arg_1_0,
|
|
|
|
(&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) =>
|
|
|
|
*__self_0 == *__arg_1_0,
|
|
|
|
(&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) =>
|
|
|
|
*__self_0 == *__arg_1_0,
|
|
|
|
_ => unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
} else { false }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn ne(&self, other: &Fielded) -> bool {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) =>
|
|
|
|
*__self_0 != *__arg_1_0,
|
|
|
|
(&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) =>
|
|
|
|
*__self_0 != *__arg_1_0,
|
|
|
|
(&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) =>
|
|
|
|
*__self_0 != *__arg_1_0,
|
|
|
|
_ => unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
} else { true }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
impl ::core::marker::StructuralEq for Fielded {}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Eq for Fielded {
|
|
|
|
#[inline]
|
|
|
|
#[doc(hidden)]
|
|
|
|
#[no_coverage]
|
|
|
|
fn assert_receiver_is_total_eq(&self) -> () {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::cmp::AssertParamIsEq<u32>;
|
|
|
|
let _: ::core::cmp::AssertParamIsEq<bool>;
|
|
|
|
let _: ::core::cmp::AssertParamIsEq<Option<i32>>;
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::PartialOrd for Fielded {
|
|
|
|
#[inline]
|
|
|
|
fn partial_cmp(&self, other: &Fielded)
|
|
|
|
-> ::core::option::Option<::core::cmp::Ordering> {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&*__self_0,
|
|
|
|
&*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
(&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&*__self_0,
|
|
|
|
&*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
(&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::PartialOrd::partial_cmp(&*__self_0,
|
|
|
|
&*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
_ => unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
::core::cmp::PartialOrd::partial_cmp(&__self_vi, &__arg_1_vi)
|
|
|
|
}
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::cmp::Ord for Fielded {
|
|
|
|
#[inline]
|
|
|
|
fn cmp(&self, other: &Fielded) -> ::core::cmp::Ordering {
|
2022-06-27 22:10:36 -05:00
|
|
|
let __self_vi = ::core::intrinsics::discriminant_value(&*self);
|
|
|
|
let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other);
|
|
|
|
if __self_vi == __arg_1_vi {
|
|
|
|
match (&*self, &*other) {
|
|
|
|
(&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
(&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
(&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) =>
|
2022-07-01 06:05:01 -05:00
|
|
|
::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0),
|
2022-06-27 22:10:36 -05:00
|
|
|
_ => unsafe { ::core::intrinsics::unreachable() }
|
|
|
|
}
|
|
|
|
} else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) }
|
2022-06-22 19:45:40 -05:00
|
|
|
}
|
|
|
|
}
|
2022-07-01 01:47:12 -05:00
|
|
|
|
|
|
|
// A union. Most builtin traits are not derivable for unions.
|
|
|
|
pub union Union {
|
|
|
|
pub b: bool,
|
|
|
|
pub u: u32,
|
|
|
|
pub i: i32,
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::clone::Clone for Union {
|
|
|
|
#[inline]
|
|
|
|
fn clone(&self) -> Union {
|
2022-06-27 22:10:36 -05:00
|
|
|
let _: ::core::clone::AssertParamIsCopy<Self>;
|
|
|
|
*self
|
2022-07-01 01:47:12 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#[automatically_derived]
|
|
|
|
#[allow(unused_qualifications)]
|
|
|
|
impl ::core::marker::Copy for Union { }
|