Auto merge of #71264 - Dylan-DPC:rollup-njgbey7, r=Dylan-DPC

Rollup of 6 pull requests

Successful merges:

 - #70467 (Use `call` instead of `invoke` for functions that cannot unwind )
 - #71070 (rustbuild: Remove stage 0 LLD flavor workaround for MSVC)
 - #71167 (big-O notation: parenthesis for function calls, explicit multiplication)
 - #71238 (Miri: fix typo)
 - #71242 (Format Mailmap To Work With GitHub)
 - #71243 (Account for use of `try!()` in 2018 edition and guide users in the right direction)

Failed merges:

r? @ghost
This commit is contained in:
bors 2020-04-17 21:57:06 +00:00
commit cff9a758ae
20 changed files with 160 additions and 81 deletions

View File

@ -133,7 +133,7 @@ João Oliveira <hello@jxs.pt> joaoxsouls <joaoxsouls@gmail.com>
Johann Hofmann <git@johann-hofmann.com> Johann <git@johann-hofmann.com>
John Clements <clements@racket-lang.org> <clements@brinckerhoff.org>
John Hodge <acessdev@gmail.com> John Hodge <tpg@mutabah.net>
John Kre Alsaker <john.kare.alsaker@gmail.com>
John Kåre Alsaker <john.kare.alsaker@gmail.com>
John Talling <inrustwetrust@users.noreply.github.com>
Jonathan Bailey <jbailey@mozilla.com> <jbailey@jbailey-20809.local>
Jonathan S <gereeter@gmail.com> Jonathan S <gereeter+code@gmail.com>
@ -153,7 +153,7 @@ Laurențiu Nicola <lnicola@dend.ro>
Lee Jeffery <leejeffery@gmail.com> Lee Jeffery <lee@leejeffery.co.uk>
Lee Wondong <wdlee91@gmail.com>
Lennart Kudling <github@kudling.de>
Lo Testard <leo.testard@gmail.com>
Léo Testard <leo.testard@gmail.com>
Lindsey Kuper <lindsey@composition.al> <lindsey@rockstargirl.org>
Lindsey Kuper <lindsey@composition.al> <lkuper@mozilla.com>
Luke Metz <luke.metz@students.olin.edu>

View File

@ -134,11 +134,6 @@ fn main() {
cmd.arg(format!("-Clinker={}", host_linker));
}
// Override linker flavor if necessary.
if let Ok(host_linker_flavor) = env::var("RUSTC_HOST_LINKER_FLAVOR") {
cmd.arg(format!("-Clinker-flavor={}", host_linker_flavor));
}
if let Ok(s) = env::var("RUSTC_HOST_CRT_STATIC") {
if s == "true" {
cmd.arg("-C").arg("target-feature=+crt-static");

View File

@ -969,27 +969,11 @@ impl<'a> Builder<'a> {
// See https://github.com/rust-lang/rust/issues/68647.
let can_use_lld = mode != Mode::Std;
// FIXME: The beta compiler doesn't pick the `lld-link` flavor for `*-pc-windows-msvc`
// Remove `RUSTC_HOST_LINKER_FLAVOR` when this is fixed
let lld_linker_flavor = |linker: &Path, target: Interned<String>| {
compiler.stage == 0
&& linker.file_name() == Some(OsStr::new("rust-lld"))
&& target.contains("pc-windows-msvc")
};
if let Some(host_linker) = self.linker(compiler.host, can_use_lld) {
if lld_linker_flavor(host_linker, compiler.host) {
cargo.env("RUSTC_HOST_LINKER_FLAVOR", "lld-link");
}
cargo.env("RUSTC_HOST_LINKER", host_linker);
}
if let Some(target_linker) = self.linker(target, can_use_lld) {
if lld_linker_flavor(target_linker, target) {
rustflags.arg("-Clinker-flavor=lld-link");
}
let target = crate::envify(&target);
cargo.env(&format!("CARGO_TARGET_{}_LINKER", target), target_linker);
}

View File

@ -1,10 +1,10 @@
//! A priority queue implemented with a binary heap.
//!
//! Insertion and popping the largest element have `O(log n)` time complexity.
//! Insertion and popping the largest element have `O(log(n))` time complexity.
//! Checking the largest element is `O(1)`. Converting a vector to a binary heap
//! can be done in-place, and has `O(n)` complexity. A binary heap can also be
//! converted to a sorted vector in-place, allowing it to be used for an `O(n
//! log n)` in-place heapsort.
//! converted to a sorted vector in-place, allowing it to be used for an `O(n * log(n))`
//! in-place heapsort.
//!
//! # Examples
//!
@ -233,9 +233,9 @@ use super::SpecExtend;
///
/// # Time complexity
///
/// | [push] | [pop] | [peek]/[peek\_mut] |
/// |--------|----------|--------------------|
/// | O(1)~ | O(log n) | O(1) |
/// | [push] | [pop] | [peek]/[peek\_mut] |
/// |--------|-----------|--------------------|
/// | O(1)~ | O(log(n)) | O(1) |
///
/// The value for `push` is an expected cost; the method documentation gives a
/// more detailed analysis.
@ -398,7 +398,7 @@ impl<T: Ord> BinaryHeap<T> {
///
/// # Time complexity
///
/// Cost is O(1) in the worst case.
/// Cost is `O(1)` in the worst case.
#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: true }) }
@ -422,8 +422,7 @@ impl<T: Ord> BinaryHeap<T> {
///
/// # Time complexity
///
/// The worst case cost of `pop` on a heap containing *n* elements is O(log
/// n).
/// The worst case cost of `pop` on a heap containing *n* elements is `O(log(n))`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop(&mut self) -> Option<T> {
self.data.pop().map(|mut item| {
@ -456,15 +455,15 @@ impl<T: Ord> BinaryHeap<T> {
///
/// The expected cost of `push`, averaged over every possible ordering of
/// the elements being pushed, and over a sufficiently large number of
/// pushes, is O(1). This is the most meaningful cost metric when pushing
/// pushes, is `O(1)`. This is the most meaningful cost metric when pushing
/// elements that are *not* already in any sorted pattern.
///
/// The time complexity degrades if elements are pushed in predominantly
/// ascending order. In the worst case, elements are pushed in ascending
/// sorted order and the amortized cost per push is O(log n) against a heap
/// sorted order and the amortized cost per push is `O(log(n))` against a heap
/// containing *n* elements.
///
/// The worst case cost of a *single* call to `push` is O(n). The worst case
/// The worst case cost of a *single* call to `push` is `O(n)`. The worst case
/// occurs when capacity is exhausted and needs a resize. The resize cost
/// has been amortized in the previous figures.
#[stable(feature = "rust1", since = "1.0.0")]
@ -623,7 +622,7 @@ impl<T: Ord> BinaryHeap<T> {
// `rebuild` takes O(len1 + len2) operations
// and about 2 * (len1 + len2) comparisons in the worst case
// while `extend` takes O(len2 * log_2(len1)) operations
// while `extend` takes O(len2 * log(len1)) operations
// and about 1 * len2 * log_2(len1) comparisons in the worst case,
// assuming len1 >= len2.
#[inline]
@ -644,7 +643,7 @@ impl<T: Ord> BinaryHeap<T> {
/// The remaining elements will be removed on drop in heap order.
///
/// Note:
/// * `.drain_sorted()` is O(n lg n); much slower than `.drain()`.
/// * `.drain_sorted()` is `O(n * log(n))`; much slower than `.drain()`.
/// You should use the latter for most cases.
///
/// # Examples
@ -729,7 +728,7 @@ impl<T> BinaryHeap<T> {
///
/// # Time complexity
///
/// Cost is O(1) in the worst case.
/// Cost is `O(1)` in the worst case.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peek(&self) -> Option<&T> {
self.data.get(0)

View File

@ -40,7 +40,7 @@ use UnderflowResult::*;
/// performance on *small* nodes of elements which are cheap to compare. However in the future we
/// would like to further explore choosing the optimal search strategy based on the choice of B,
/// and possibly other factors. Using linear search, searching for a random element is expected
/// to take O(B log<sub>B</sub>n) comparisons, which is generally worse than a BST. In practice,
/// to take O(B * log(n)) comparisons, which is generally worse than a BST. In practice,
/// however, performance is excellent.
///
/// It is a logic error for a key to be modified in such a way that the key's ordering relative to

View File

@ -390,7 +390,7 @@ impl<T> LinkedList<T> {
/// This reuses all the nodes from `other` and moves them into `self`. After
/// this operation, `other` becomes empty.
///
/// This operation should compute in O(1) time and O(1) memory.
/// This operation should compute in `O(1)` time and `O(1)` memory.
///
/// # Examples
///
@ -547,7 +547,7 @@ impl<T> LinkedList<T> {
/// Returns `true` if the `LinkedList` is empty.
///
/// This operation should compute in O(1) time.
/// This operation should compute in `O(1)` time.
///
/// # Examples
///
@ -568,7 +568,7 @@ impl<T> LinkedList<T> {
/// Returns the length of the `LinkedList`.
///
/// This operation should compute in O(1) time.
/// This operation should compute in `O(1)` time.
///
/// # Examples
///
@ -594,7 +594,7 @@ impl<T> LinkedList<T> {
/// Removes all elements from the `LinkedList`.
///
/// This operation should compute in O(n) time.
/// This operation should compute in `O(n)` time.
///
/// # Examples
///
@ -737,7 +737,7 @@ impl<T> LinkedList<T> {
/// Adds an element first in the list.
///
/// This operation should compute in O(1) time.
/// This operation should compute in `O(1)` time.
///
/// # Examples
///
@ -760,7 +760,7 @@ impl<T> LinkedList<T> {
/// Removes the first element and returns it, or `None` if the list is
/// empty.
///
/// This operation should compute in O(1) time.
/// This operation should compute in `O(1)` time.
///
/// # Examples
///
@ -783,7 +783,7 @@ impl<T> LinkedList<T> {
/// Appends an element to the back of a list.
///
/// This operation should compute in O(1) time.
/// This operation should compute in `O(1)` time.
///
/// # Examples
///
@ -803,7 +803,7 @@ impl<T> LinkedList<T> {
/// Removes the last element from a list and returns it, or `None` if
/// it is empty.
///
/// This operation should compute in O(1) time.
/// This operation should compute in `O(1)` time.
///
/// # Examples
///
@ -824,7 +824,7 @@ impl<T> LinkedList<T> {
/// Splits the list into two at the given index. Returns everything after the given index,
/// including the index.
///
/// This operation should compute in O(n) time.
/// This operation should compute in `O(n)` time.
///
/// # Panics
///
@ -880,7 +880,7 @@ impl<T> LinkedList<T> {
/// Removes the element at the given index and returns it.
///
/// This operation should compute in O(n) time.
/// This operation should compute in `O(n)` time.
///
/// # Panics
/// Panics if at >= len

View File

@ -1391,7 +1391,7 @@ impl<T> VecDeque<T> {
/// Removes an element from anywhere in the `VecDeque` and returns it,
/// replacing it with the first element.
///
/// This does not preserve ordering, but is O(1).
/// This does not preserve ordering, but is `O(1)`.
///
/// Returns `None` if `index` is out of bounds.
///
@ -1426,7 +1426,7 @@ impl<T> VecDeque<T> {
/// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the
/// last element.
///
/// This does not preserve ordering, but is O(1).
/// This does not preserve ordering, but is `O(1)`.
///
/// Returns `None` if `index` is out of bounds.
///
@ -2927,7 +2927,7 @@ impl<T> From<VecDeque<T>> for Vec<T> {
/// [`Vec<T>`]: crate::vec::Vec
/// [`VecDeque<T>`]: crate::collections::VecDeque
///
/// This never needs to re-allocate, but does need to do O(n) data movement if
/// This never needs to re-allocate, but does need to do `O(n)` data movement if
/// the circular buffer doesn't happen to be at the beginning of the allocation.
///
/// # Examples

View File

@ -165,7 +165,7 @@ mod hack {
impl<T> [T] {
/// Sorts the slice.
///
/// This sort is stable (i.e., does not reorder equal elements) and `O(n log n)` worst-case.
/// This sort is stable (i.e., does not reorder equal elements) and `O(n * log(n))` worst-case.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
@ -200,7 +200,7 @@ impl<T> [T] {
/// Sorts the slice with a comparator function.
///
/// This sort is stable (i.e., does not reorder equal elements) and `O(n log n)` worst-case.
/// This sort is stable (i.e., does not reorder equal elements) and `O(n * log(n))` worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
@ -254,7 +254,7 @@ impl<T> [T] {
/// Sorts the slice with a key extraction function.
///
/// This sort is stable (i.e., does not reorder equal elements) and `O(m n log n)`
/// This sort is stable (i.e., does not reorder equal elements) and `O(m * n * log(n))`
/// worst-case, where the key function is `O(m)`.
///
/// For expensive key functions (e.g. functions that are not simple property accesses or
@ -297,7 +297,7 @@ impl<T> [T] {
///
/// During sorting, the key function is called only once per element.
///
/// This sort is stable (i.e., does not reorder equal elements) and `O(m n + n log n)`
/// This sort is stable (i.e., does not reorder equal elements) and `O(m * n + n * log(n))`
/// worst-case, where the key function is `O(m)`.
///
/// For simple key functions (e.g., functions that are property accesses or
@ -935,7 +935,7 @@ where
/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
///
/// The invariants ensure that the total running time is `O(n log n)` worst-case.
/// The invariants ensure that the total running time is `O(n * log(n))` worst-case.
fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,

View File

@ -1606,7 +1606,7 @@ impl<T> [T] {
/// Sorts the slice, but may not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and `O(n log n)` worst-case.
/// (i.e., does not allocate), and `O(n * log(n))` worst-case.
///
/// # Current implementation
///
@ -1642,7 +1642,7 @@ impl<T> [T] {
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and `O(n log n)` worst-case.
/// (i.e., does not allocate), and `O(n * log(n))` worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
@ -1697,7 +1697,7 @@ impl<T> [T] {
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and `O(m n log n)` worst-case, where the key function is
/// (i.e., does not allocate), and `O(m * n * log(n))` worst-case, where the key function is
/// `O(m)`.
///
/// # Current implementation
@ -1957,7 +1957,7 @@ impl<T> [T] {
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still O(n).
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.

View File

@ -143,7 +143,7 @@ where
}
}
/// Sorts `v` using heapsort, which guarantees `O(n log n)` worst-case.
/// Sorts `v` using heapsort, which guarantees `O(n * log(n))` worst-case.
#[cold]
pub fn heapsort<T, F>(v: &mut [T], is_less: &mut F)
where
@ -621,7 +621,7 @@ where
}
// If too many bad pivot choices were made, simply fall back to heapsort in order to
// guarantee `O(n log n)` worst-case.
// guarantee `O(n * log(n))` worst-case.
if limit == 0 {
heapsort(v, is_less);
return;
@ -684,7 +684,7 @@ where
}
}
/// Sorts `v` using pattern-defeating quicksort, which is `O(n log n)` worst-case.
/// Sorts `v` using pattern-defeating quicksort, which is `O(n * log(n))` worst-case.
pub fn quicksort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,

View File

@ -111,7 +111,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
cleanup: Option<mir::BasicBlock>,
) {
if let Some(cleanup) = cleanup {
// If there is a cleanup block and the function we're calling can unwind, then
// do an invoke, otherwise do a call.
if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
let ret_bx = if let Some((_, target)) = destination {
fx.blocks[target]
} else {

View File

@ -26,7 +26,7 @@ pub enum MemPlaceMeta<Tag = (), Id = AllocId> {
/// `Sized` types or unsized `extern type`
None,
/// The address of this place may not be taken. This protects the `MemPlace` from coming from
/// a ZST Operand with a backing allocation and being converted to an integer address. This
/// a ZST Operand without a backing allocation and being converted to an integer address. This
/// should be impossible, because you can't take the address of an operand, but this is a second
/// protection layer ensuring that we don't mess up.
Poison,

View File

@ -1054,6 +1054,39 @@ impl<'a> Parser<'a> {
}
}
pub(super) fn try_macro_suggestion(&mut self) -> PResult<'a, P<Expr>> {
let is_try = self.token.is_keyword(kw::Try);
let is_questionmark = self.look_ahead(1, |t| t == &token::Not); //check for !
let is_open = self.look_ahead(2, |t| t == &token::OpenDelim(token::Paren)); //check for (
if is_try && is_questionmark && is_open {
let lo = self.token.span;
self.bump(); //remove try
self.bump(); //remove !
let try_span = lo.to(self.token.span); //we take the try!( span
self.bump(); //remove (
let is_empty = self.token == token::CloseDelim(token::Paren); //check if the block is empty
self.consume_block(token::Paren, ConsumeClosingDelim::No); //eat the block
let hi = self.token.span;
self.bump(); //remove )
let mut err = self.struct_span_err(lo.to(hi), "use of deprecated `try` macro");
err.note("in the 2018 edition `try` is a reserved keyword, and the `try!()` macro is deprecated");
let prefix = if is_empty { "" } else { "alternatively, " };
if !is_empty {
err.multipart_suggestion(
"you can use the `?` operator instead",
vec![(try_span, "".to_owned()), (hi, "?".to_owned())],
Applicability::MachineApplicable,
);
}
err.span_suggestion(lo.shrink_to_lo(), &format!("{}you can still access the deprecated `try!()` macro using the \"raw identifier\" syntax", prefix), "r#".to_string(), Applicability::MachineApplicable);
err.emit();
Ok(self.mk_expr_err(lo.to(hi)))
} else {
Err(self.expected_expression_found()) // The user isn't trying to invoke the try! macro
}
}
/// Recovers a situation like `for ( $pat in $expr )`
/// and suggest writing `for $pat in $expr` instead.
///

View File

@ -1006,7 +1006,7 @@ impl<'a> Parser<'a> {
let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal), attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
None => Err(self.expected_expression_found()),
None => self.try_macro_suggestion(),
}
}

View File

@ -110,10 +110,10 @@
//!
//! For Sets, all operations have the cost of the equivalent Map operation.
//!
//! | | get | insert | remove | predecessor | append |
//! |--------------|-----------|----------|----------|-------------|--------|
//! | [`HashMap`] | O(1)~ | O(1)~* | O(1)~ | N/A | N/A |
//! | [`BTreeMap`] | O(log n) | O(log n) | O(log n) | O(log n) | O(n+m) |
//! | | get | insert | remove | predecessor | append |
//! |--------------|-----------|-----------|-----------|-------------|--------|
//! | [`HashMap`] | O(1)~ | O(1)~* | O(1)~ | N/A | N/A |
//! | [`BTreeMap`] | O(log(n)) | O(log(n)) | O(log(n)) | O(log(n)) | O(n+m) |
//!
//! # Correct and Efficient Usage of Collections
//!

View File

@ -43,8 +43,8 @@
//! terminator, so the buffer length is really `len+1` characters.
//! Rust strings don't have a nul terminator; their length is always
//! stored and does not need to be calculated. While in Rust
//! accessing a string's length is a O(1) operation (because the
//! length is stored); in C it is an O(length) operation because the
//! accessing a string's length is a `O(1)` operation (because the
//! length is stored); in C it is an `O(length)` operation because the
//! length needs to be computed by scanning the string for the nul
//! terminator.
//!

View File

@ -16,13 +16,13 @@ extern "C" {
#[unwind(aborts)] // FIXME(#58794)
pub unsafe extern "C" fn use_foreign_c_variadic_0() {
// Ensure that we correctly call foreign C-variadic functions.
// CHECK: invoke void (i32, ...) @foreign_c_variadic_0([[PARAM:i32( signext)?]] 0)
// CHECK: call void (i32, ...) @foreign_c_variadic_0([[PARAM:i32( signext)?]] 0)
foreign_c_variadic_0(0);
// CHECK: invoke void (i32, ...) @foreign_c_variadic_0([[PARAM]] 0, [[PARAM]] 42)
// CHECK: call void (i32, ...) @foreign_c_variadic_0([[PARAM]] 0, [[PARAM]] 42)
foreign_c_variadic_0(0, 42i32);
// CHECK: invoke void (i32, ...) @foreign_c_variadic_0([[PARAM]] 0, [[PARAM]] 42, [[PARAM]] 1024)
// CHECK: call void (i32, ...) @foreign_c_variadic_0([[PARAM]] 0, [[PARAM]] 42, [[PARAM]] 1024)
foreign_c_variadic_0(0, 42i32, 1024i32);
// CHECK: invoke void (i32, ...) @foreign_c_variadic_0([[PARAM]] 0, [[PARAM]] 42, [[PARAM]] 1024, [[PARAM]] 0)
// CHECK: call void (i32, ...) @foreign_c_variadic_0([[PARAM]] 0, [[PARAM]] 42, [[PARAM]] 1024, [[PARAM]] 0)
foreign_c_variadic_0(0, 42i32, 1024i32, 0i32);
}
@ -30,24 +30,24 @@ pub unsafe extern "C" fn use_foreign_c_variadic_0() {
// removing the "spoofed" `VaListImpl` that is used by Rust defined C-variadics.
#[unwind(aborts)] // FIXME(#58794)
pub unsafe extern "C" fn use_foreign_c_variadic_1_0(ap: VaList) {
// CHECK: invoke void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap)
// CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap)
foreign_c_variadic_1(ap);
}
#[unwind(aborts)] // FIXME(#58794)
pub unsafe extern "C" fn use_foreign_c_variadic_1_1(ap: VaList) {
// CHECK: invoke void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 42)
// CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 42)
foreign_c_variadic_1(ap, 42i32);
}
#[unwind(aborts)] // FIXME(#58794)
pub unsafe extern "C" fn use_foreign_c_variadic_1_2(ap: VaList) {
// CHECK: invoke void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42)
// CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42)
foreign_c_variadic_1(ap, 2i32, 42i32);
}
#[unwind(aborts)] // FIXME(#58794)
pub unsafe extern "C" fn use_foreign_c_variadic_1_3(ap: VaList) {
// CHECK: invoke void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42, [[PARAM]] 0)
// CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42, [[PARAM]] 0)
foreign_c_variadic_1(ap, 2i32, 42i32, 0i32);
}

View File

@ -0,0 +1,27 @@
// compile-flags: -C no-prepopulate-passes
#![feature(link_llvm_intrinsics)]
#![crate_type = "lib"]
struct A;
impl Drop for A {
fn drop(&mut self) {
println!("A");
}
}
extern {
#[link_name = "llvm.sqrt.f32"]
fn sqrt(x: f32) -> f32;
}
pub fn do_call() {
let _a = A;
unsafe {
// Ensure that we `call` LLVM intrinsics instead of trying to `invoke` them
// CHECK: call float @llvm.sqrt.f32(float 4.000000e+00
sqrt(4.0);
}
}

View File

@ -0,0 +1,9 @@
// compile-flags: --edition 2018
fn foo() -> Result<(), ()> {
Ok(try!()); //~ ERROR use of deprecated `try` macro
Ok(try!(Ok(()))) //~ ERROR use of deprecated `try` macro
}
fn main() {
let _ = foo();
}

View File

@ -0,0 +1,30 @@
error: use of deprecated `try` macro
--> $DIR/try-macro-suggestion.rs:3:8
|
LL | Ok(try!());
| ^^^^^^
|
= note: in the 2018 edition `try` is a reserved keyword, and the `try!()` macro is deprecated
help: you can still access the deprecated `try!()` macro using the "raw identifier" syntax
|
LL | Ok(r#try!());
| ^^
error: use of deprecated `try` macro
--> $DIR/try-macro-suggestion.rs:4:8
|
LL | Ok(try!(Ok(())))
| ^^^^^^^^^^^^
|
= note: in the 2018 edition `try` is a reserved keyword, and the `try!()` macro is deprecated
help: you can use the `?` operator instead
|
LL | Ok(Ok(())?)
| -- ^
help: alternatively, you can still access the deprecated `try!()` macro using the "raw identifier" syntax
|
LL | Ok(r#try!(Ok(())))
| ^^
error: aborting due to 2 previous errors