Rollup merge of #58509 - phansch:add_myself_to_clippy_toolstate_maintainers, r=oli-obk

Notify myself when Clippy toolstate changes
This commit is contained in:
kennytm 2019-02-16 23:46:32 +08:00
commit 4fcee62736
No known key found for this signature in database
GPG Key ID: FEF6C8051D0E013C
20 changed files with 219 additions and 180 deletions

View File

@ -112,6 +112,8 @@ ENV TARGETS=$TARGETS,thumbv7em-none-eabihf
ENV TARGETS=$TARGETS,thumbv8m.main-none-eabi
ENV TARGETS=$TARGETS,riscv32imc-unknown-none-elf
ENV TARGETS=$TARGETS,riscv32imac-unknown-none-elf
ENV TARGETS=$TARGETS,riscv64imac-unknown-none-elf
ENV TARGETS=$TARGETS,riscv64gc-unknown-none-elf
ENV TARGETS=$TARGETS,armebv7r-none-eabi
ENV TARGETS=$TARGETS,armebv7r-none-eabihf
ENV TARGETS=$TARGETS,armv7r-none-eabi

View File

@ -206,6 +206,10 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for ty::adjustment::AutoBorrow
}
}
impl_stable_hash_for!(tuple_struct ty::util::NeedsDrop { value });
impl_stable_hash_for!(tuple_struct ty::AdtSizedConstraint<'tcx> { list });
impl_stable_hash_for!(struct ty::UpvarPath { hir_id });
impl_stable_hash_for!(struct ty::UpvarId { var_path, closure_expr_id });

View File

@ -43,7 +43,7 @@ use syntax::ast::{self, Name, Ident, NodeId};
use syntax::attr;
use syntax::ext::hygiene::Mark;
use syntax::symbol::{keywords, Symbol, LocalInternedString, InternedString};
use syntax_pos::{DUMMY_SP, Span};
use syntax_pos::Span;
use smallvec;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
@ -2379,20 +2379,7 @@ impl<'a, 'gcx, 'tcx> AdtDef {
/// Due to normalization being eager, this applies even if
/// the associated type is behind a pointer (e.g., issue #31299).
pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> &'tcx [Ty<'tcx>] {
match tcx.try_adt_sized_constraint(DUMMY_SP, self.did) {
Ok(tys) => tys,
Err(mut bug) => {
debug!("adt_sized_constraint: {:?} is recursive", self);
// This should be reported as an error by `check_representable`.
//
// Consider the type as Sized in the meanwhile to avoid
// further errors. Delay our `bug` diagnostic here to get
// emitted later as well in case we accidentally otherwise don't
// emit an error.
bug.delay_as_bug();
tcx.intern_type_list(&[tcx.types.err])
}
}
tcx.adt_sized_constraint(self.did).0
}
fn sized_constraint_for_ty(&self,
@ -3083,6 +3070,9 @@ fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Asso
parent_item.node)
}
#[derive(Clone)]
pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
/// Calculates the `Sized` constraint.
///
/// In fact, there are only a few options for the types in the constraint:
@ -3094,7 +3084,7 @@ fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Asso
/// check should catch this case.
fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx [Ty<'tcx>] {
-> AdtSizedConstraint<'tcx> {
let def = tcx.adt_def(def_id);
let result = tcx.mk_type_list(def.variants.iter().flat_map(|v| {
@ -3105,7 +3095,7 @@ fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
debug!("adt_sized_constraint: {:?} => {:?}", def, result);
result
AdtSizedConstraint(result)
}
fn associated_item_def_ids<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,

View File

@ -13,6 +13,7 @@ use crate::ty::subst::Substs;
use crate::ty::query::queries;
use crate::ty::query::Query;
use crate::ty::query::QueryCache;
use crate::ty::query::plumbing::CycleError;
use crate::util::profiling::ProfileCategory;
use std::borrow::Cow;
@ -49,7 +50,7 @@ pub(super) trait QueryAccessors<'tcx>: QueryConfig<'tcx> {
result: &Self::Value
) -> Option<Fingerprint>;
fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>) -> Self::Value;
fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>, error: CycleError<'tcx>) -> Self::Value;
}
pub(super) trait QueryDescription<'tcx>: QueryAccessors<'tcx> {

View File

@ -73,30 +73,12 @@ impl<'tcx> QueryJob<'tcx> {
}
/// Awaits for the query job to complete.
///
/// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any
/// query that means that there is a query cycle, thus this always running a cycle error.
#[cfg(not(parallel_compiler))]
#[inline(never)]
#[cold]
pub(super) fn cycle_error<'lcx, 'a, D: QueryDescription<'tcx>>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
span: Span,
) -> TryGetJob<'a, 'tcx, D> {
TryGetJob::JobCompleted(Err(Box::new(self.find_cycle_in_stack(tcx, span))))
}
/// Awaits for the query job to complete.
///
/// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any
/// query that means that there is a query cycle, thus this always running a cycle error.
#[cfg(parallel_compiler)]
pub(super) fn r#await<'lcx>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
span: Span,
) -> Result<(), Box<CycleError<'tcx>>> {
) -> Result<(), CycleError<'tcx>> {
tls::with_related_context(tcx, move |icx| {
let mut waiter = Lrc::new(QueryWaiter {
query: icx.query.clone(),
@ -111,13 +93,13 @@ impl<'tcx> QueryJob<'tcx> {
let mut cycle = waiter.cycle.lock();
match cycle.take() {
None => Ok(()),
Some(cycle) => Err(Box::new(cycle))
Some(cycle) => Err(cycle)
}
})
}
#[cfg(not(parallel_compiler))]
fn find_cycle_in_stack<'lcx>(
pub(super) fn find_cycle_in_stack<'lcx>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
span: Span,

View File

@ -34,15 +34,15 @@ use crate::traits::query::normalize::NormalizationResult;
use crate::traits::query::outlives_bounds::OutlivesBound;
use crate::traits::specialization_graph;
use crate::traits::Clauses;
use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt, AdtSizedConstraint};
use crate::ty::steal::Steal;
use crate::ty::subst::Substs;
use crate::ty::util::NeedsDrop;
use crate::util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet};
use crate::util::common::{ErrorReported};
use crate::util::profiling::ProfileCategory::*;
use crate::session::Session;
use errors::DiagnosticBuilder;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::bit_set::BitSet;
use rustc_data_structures::indexed_vec::IndexVec;
@ -154,7 +154,16 @@ define_queries! { <'tcx>
[] fn trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef,
[] fn adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef,
[] fn adt_destructor: AdtDestructor(DefId) -> Option<ty::Destructor>,
[] fn adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>],
// The cycle error here should be reported as an error by `check_representable`.
// We consider the type as Sized in the meanwhile to avoid
// further errors (done in impl Value for AdtSizedConstraint).
// Use `cycle_delay_bug` to delay the cycle error here to be emitted later
// in case we accidentally otherwise don't emit an error.
[cycle_delay_bug] fn adt_sized_constraint: SizedConstraint(
DefId
) -> AdtSizedConstraint<'tcx>,
[] fn adt_dtorck_constraint: DtorckConstraint(
DefId
) -> Result<DtorckConstraint<'tcx>, NoSolution>,
@ -411,7 +420,16 @@ define_queries! { <'tcx>
[] fn is_copy_raw: is_copy_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
[] fn is_sized_raw: is_sized_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
[] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
[] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
// The cycle error here should be reported as an error by `check_representable`.
// We consider the type as not needing drop in the meanwhile to avoid
// further errors (done in impl Value for NeedsDrop).
// Use `cycle_delay_bug` to delay the cycle error here to be emitted later
// in case we accidentally otherwise don't emit an error.
[cycle_delay_bug] fn needs_drop_raw: needs_drop_dep_node(
ty::ParamEnvAnd<'tcx, Ty<'tcx>>
) -> NeedsDrop,
[] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
-> Result<&'tcx ty::layout::LayoutDetails,
ty::layout::LayoutError<'tcx>>,
@ -731,32 +749,6 @@ define_queries! { <'tcx>
},
}
// `try_get_query` can't be public because it uses the private query
// implementation traits, so we provide access to it selectively.
impl<'a, 'tcx, 'lcx> TyCtxt<'a, 'tcx, 'lcx> {
pub fn try_adt_sized_constraint(
self,
span: Span,
key: DefId,
) -> Result<&'tcx [Ty<'tcx>], Box<DiagnosticBuilder<'a>>> {
self.try_get_query::<queries::adt_sized_constraint<'_>>(span, key)
}
pub fn try_needs_drop_raw(
self,
span: Span,
key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
) -> Result<bool, Box<DiagnosticBuilder<'a>>> {
self.try_get_query::<queries::needs_drop_raw<'_>>(span, key)
}
pub fn try_optimized_mir(
self,
span: Span,
key: DefId,
) -> Result<&'tcx mir::Mir<'tcx>, Box<DiagnosticBuilder<'a>>> {
self.try_get_query::<queries::optimized_mir<'_>>(span, key)
}
}
//////////////////////////////////////////////////////////////////////
// These functions are little shims used to find the dep-node for a
// given query when there is not a *direct* mapping:

View File

@ -19,6 +19,8 @@ use errors::FatalError;
use rustc_data_structures::fx::{FxHashMap};
use rustc_data_structures::sync::{Lrc, Lock};
use rustc_data_structures::thin_vec::ThinVec;
#[cfg(not(parallel_compiler))]
use rustc_data_structures::cold_path;
use std::mem;
use std::ptr;
use std::collections::hash_map::Entry;
@ -114,7 +116,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
if let Some(value) = lock.results.get(key) {
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
tcx.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY));
let result = Ok((value.value.clone(), value.index));
let result = (value.value.clone(), value.index);
#[cfg(debug_assertions)]
{
lock.cache_hits += 1;
@ -160,9 +162,11 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
mem::drop(lock);
// If we are single-threaded we know that we have cycle error,
// so we just turn the errror
// so we just return the error
#[cfg(not(parallel_compiler))]
return job.cycle_error(tcx, span);
return TryGetJob::Cycle(cold_path(|| {
Q::handle_cycle_error(tcx, job.find_cycle_in_stack(tcx, span))
}));
// With parallel queries we might just have to wait on some other
// thread
@ -172,7 +176,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY));
if let Err(cycle) = result {
return TryGetJob::JobCompleted(Err(cycle));
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
}
}
}
@ -238,7 +242,10 @@ pub(super) enum TryGetJob<'a, 'tcx: 'a, D: QueryDescription<'tcx> + 'a> {
/// The query was already completed.
/// Returns the result of the query and its dep node index
/// if it succeeded or a cycle error if it failed
JobCompleted(Result<(D::Value, DepNodeIndex), Box<CycleError<'tcx>>>),
JobCompleted((D::Value, DepNodeIndex)),
/// Trying to execute the query resulted in a cycle.
Cycle(D::Value),
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
@ -279,8 +286,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
#[cold]
pub(super) fn report_cycle(
self,
box CycleError { usage, cycle: stack }: Box<CycleError<'gcx>>
) -> Box<DiagnosticBuilder<'a>>
CycleError { usage, cycle: stack }: CycleError<'gcx>
) -> DiagnosticBuilder<'a>
{
assert!(!stack.is_empty());
@ -314,7 +321,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
&format!("cycle used when {}", query.describe(self)));
}
return Box::new(err)
err
})
}
@ -346,13 +353,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
}
#[inline(never)]
fn try_get_with<Q: QueryDescription<'gcx>>(
pub(super) fn get_query<Q: QueryDescription<'gcx>>(
self,
span: Span,
key: Q::Key)
-> Result<Q::Value, Box<CycleError<'gcx>>>
{
debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})",
-> Q::Value {
debug!("ty::query::get_query<{}>(key={:?}, span={:?})",
Q::NAME,
key,
span);
@ -366,11 +372,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
let job = match JobOwner::try_get(self, span, &key) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::JobCompleted(result) => {
return result.map(|(v, index)| {
self.dep_graph.read_index(index);
v
})
TryGetJob::Cycle(result) => return result,
TryGetJob::JobCompleted((v, index)) => {
self.dep_graph.read_index(index);
return v
}
};
@ -378,7 +383,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// expensive for some DepKinds.
if !self.dep_graph.is_fully_enabled() {
let null_dep_node = DepNode::new_no_params(crate::dep_graph::DepKind::Null);
return Ok(self.force_query_with_job::<Q>(key, job, null_dep_node).0);
return self.force_query_with_job::<Q>(key, job, null_dep_node).0;
}
let dep_node = Q::to_dep_node(self, &key);
@ -407,7 +412,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
job.complete(&result, dep_node_index);
return Ok(result);
return result;
}
if !dep_node.kind.is_input() {
@ -427,13 +432,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
});
if let Some((result, dep_node_index)) = loaded {
job.complete(&result, dep_node_index);
return Ok(result);
return result;
}
}
let (result, dep_node_index) = self.force_query_with_job::<Q>(key, job, dep_node);
self.dep_graph.read_index(dep_node_index);
Ok(result)
result
}
fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'gcx>>(
@ -631,57 +636,28 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
// Ensure that only one of them runs the query
let job = match JobOwner::try_get(self, span, &key) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::JobCompleted(result) => {
if let Err(e) = result {
self.report_cycle(e).emit();
}
TryGetJob::Cycle(_) |
TryGetJob::JobCompleted(_) => {
return
}
};
self.force_query_with_job::<Q>(key, job, dep_node);
}
pub(super) fn try_get_query<Q: QueryDescription<'gcx>>(
self,
span: Span,
key: Q::Key,
) -> Result<Q::Value, Box<DiagnosticBuilder<'a>>> {
match self.try_get_with::<Q>(span, key) {
Ok(e) => Ok(e),
Err(e) => Err(self.report_cycle(e)),
}
}
// FIXME: Try uninlining this
#[inline(always)]
pub(super) fn get_query<Q: QueryDescription<'gcx>>(
self,
span: Span,
key: Q::Key,
) -> Q::Value {
self.try_get_with::<Q>(span, key).unwrap_or_else(|e| {
self.emit_error::<Q>(e)
})
}
#[inline(never)]
#[cold]
fn emit_error<Q: QueryDescription<'gcx>>(
self,
e: Box<CycleError<'gcx>>,
) -> Q::Value {
self.report_cycle(e).emit();
Q::handle_cycle_error(self)
}
}
macro_rules! handle_cycle_error {
([][$this: expr]) => {{
Value::from_cycle_error($this.global_tcx())
([][$tcx: expr, $error:expr]) => {{
$tcx.report_cycle($error).emit();
Value::from_cycle_error($tcx.global_tcx())
}};
([fatal_cycle$(, $modifiers:ident)*][$this:expr]) => {{
$this.sess.abort_if_errors();
unreachable!();
([fatal_cycle$(, $modifiers:ident)*][$tcx:expr, $error:expr]) => {{
$tcx.report_cycle($error).emit();
$tcx.sess.abort_if_errors();
unreachable!()
}};
([cycle_delay_bug$(, $modifiers:ident)*][$tcx:expr, $error:expr]) => {{
$tcx.report_cycle($error).delay_as_bug();
Value::from_cycle_error($tcx.global_tcx())
}};
([$other:ident$(, $modifiers:ident)*][$($args:tt)*]) => {
handle_cycle_error!([$($modifiers),*][$($args)*])
@ -995,8 +971,11 @@ macro_rules! define_queries_inner {
hash_result!([$($modifiers)*][_hcx, _result])
}
fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>) -> Self::Value {
handle_cycle_error!([$($modifiers)*][tcx])
fn handle_cycle_error(
tcx: TyCtxt<'_, 'tcx, '_>,
error: CycleError<'tcx>
) -> Self::Value {
handle_cycle_error!([$($modifiers)*][tcx, error])
}
})*

View File

@ -1,4 +1,5 @@
use crate::ty::{self, Ty, TyCtxt};
use crate::ty::{self, Ty, TyCtxt, AdtSizedConstraint};
use crate::ty::util::NeedsDrop;
use syntax::symbol::Symbol;
@ -31,3 +32,14 @@ impl<'tcx> Value<'tcx> for ty::SymbolName {
}
}
impl<'tcx> Value<'tcx> for NeedsDrop {
fn from_cycle_error(_: TyCtxt<'_, 'tcx, 'tcx>) -> Self {
NeedsDrop(false)
}
}
impl<'tcx> Value<'tcx> for AdtSizedConstraint<'tcx> {
fn from_cycle_error(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> Self {
AdtSizedConstraint(tcx.intern_type_list(&[tcx.types.err]))
}
}

View File

@ -755,7 +755,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>)
-> bool {
tcx.needs_drop_raw(param_env.and(self))
tcx.needs_drop_raw(param_env.and(self)).0
}
pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
@ -992,29 +992,22 @@ fn is_freeze_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
))
}
#[derive(Clone)]
pub struct NeedsDrop(pub bool);
fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
-> bool
-> NeedsDrop
{
let (param_env, ty) = query.into_parts();
let needs_drop = |ty: Ty<'tcx>| -> bool {
tcx.try_needs_drop_raw(DUMMY_SP, param_env.and(ty)).unwrap_or_else(|mut bug| {
// Cycles should be reported as an error by `check_representable`.
//
// Consider the type as not needing drop in the meanwhile to
// avoid further errors.
//
// In case we forgot to emit a bug elsewhere, delay our
// diagnostic to get emitted as a compiler bug.
bug.delay_as_bug();
false
})
tcx.needs_drop_raw(param_env.and(ty)).0
};
assert!(!ty.needs_infer());
match ty.sty {
NeedsDrop(match ty.sty {
// Fast-path for primitive types
ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) |
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never |
@ -1072,7 +1065,7 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def.variants.iter().any(
|variant| variant.fields.iter().any(
|field| needs_drop(field.ty(tcx, substs)))),
}
})
}
pub enum ExplicitSelf<'tcx> {

View File

@ -41,6 +41,12 @@ extern crate rustc_cratesio_shim;
pub use rustc_serialize::hex::ToHex;
#[inline(never)]
#[cold]
pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
f()
}
#[macro_export]
macro_rules! likely {
($e:expr) => {

View File

@ -98,22 +98,34 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> {
continue;
}
let callee_mir = match self.tcx.try_optimized_mir(callsite.location.span,
callsite.callee) {
Ok(callee_mir) if self.consider_optimizing(callsite, callee_mir) => {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.substs,
param_env,
callee_mir,
)
}
Ok(_) => continue,
let self_node_id = self.tcx.hir().as_local_node_id(self.source.def_id()).unwrap();
let callee_node_id = self.tcx.hir().as_local_node_id(callsite.callee);
Err(mut bug) => {
// FIXME(#43542) shouldn't have to cancel an error
bug.cancel();
continue
let callee_mir = if let Some(callee_node_id) = callee_node_id {
// Avoid a cycle here by only using `optimized_mir` only if we have
// a lower node id than the callee. This ensures that the callee will
// not inline us. This trick only works without incremental compilation.
// So don't do it if that is enabled.
if !self.tcx.dep_graph.is_fully_enabled()
&& self_node_id.as_u32() < callee_node_id.as_u32() {
self.tcx.optimized_mir(callsite.callee)
} else {
continue;
}
} else {
// This cannot result in a cycle since the callee MIR is from another crate
// and is already optimized.
self.tcx.optimized_mir(callsite.callee)
};
let callee_mir = if self.consider_optimizing(callsite, callee_mir) {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.substs,
param_env,
callee_mir,
)
} else {
continue;
};
let start = caller_mir.basic_blocks().len();

View File

@ -451,6 +451,8 @@ supported_targets! {
("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf),
("riscv64imac-unknown-none-elf", riscv64imac_unknown_none_elf),
("riscv64gc-unknown-none-elf", riscv64gc_unknown_none_elf),
("aarch64-unknown-none", aarch64_unknown_none),

View File

@ -0,0 +1,31 @@
use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy,
Target, TargetOptions, TargetResult};
pub fn target() -> TargetResult {
Ok(Target {
data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
llvm_target: "riscv64".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
target_os: "none".to_string(),
target_env: String::new(),
target_vendor: "unknown".to_string(),
arch: "riscv64".to_string(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: TargetOptions {
linker: Some("rust-lld".to_string()),
cpu: "generic-rv64".to_string(),
max_atomic_width: Some(64),
atomic_cas: true,
features: "+m,+a,+f,+d,+c".to_string(),
executables: true,
panic_strategy: PanicStrategy::Abort,
relocation_model: "static".to_string(),
emit_debug_gdb_scripts: false,
abi_blacklist: super::riscv_base::abi_blacklist(),
.. Default::default()
},
})
}

View File

@ -0,0 +1,31 @@
use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy,
Target, TargetOptions, TargetResult};
pub fn target() -> TargetResult {
Ok(Target {
data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
llvm_target: "riscv64".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
target_os: "none".to_string(),
target_env: String::new(),
target_vendor: "unknown".to_string(),
arch: "riscv64".to_string(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: TargetOptions {
linker: Some("rust-lld".to_string()),
cpu: "generic-rv64".to_string(),
max_atomic_width: Some(64),
atomic_cas: true,
features: "+m,+a,+c".to_string(),
executables: true,
panic_strategy: PanicStrategy::Abort,
relocation_model: "static".to_string(),
emit_debug_gdb_scripts: false,
abi_blacklist: super::riscv_base::abi_blacklist(),
.. Default::default()
},
})
}

@ -1 +1 @@
Subproject commit 683d3522690b7a9d0163e7e7e6586f2b1364ed02
Subproject commit 73a75d35b9776d56160fa3200aca4a970ae49b60

View File

@ -6,16 +6,16 @@ fn main() {
println!("{}", bar());
}
#[inline(always)]
fn foo(x: i32, y: i32) -> bool {
x == y
}
fn bar() -> bool {
let f = foo;
f(1, -1)
}
#[inline(always)]
fn foo(x: i32, y: i32) -> bool {
x == y
}
// END RUST SOURCE
// START rustc.bar.Inline.after.mir
// ...

View File

@ -6,16 +6,16 @@ fn main() {
println!("{}", bar());
}
#[inline(always)]
fn foo(x: &i32, y: &i32) -> bool {
*x == *y
}
fn bar() -> bool {
let f = foo;
f(&1, &-1)
}
#[inline(always)]
fn foo(x: &i32, y: &i32) -> bool {
*x == *y
}
// END RUST SOURCE
// START rustc.bar.Inline.after.mir
// ...

View File

@ -1,14 +1,14 @@
// compile-flags: -Z span_free_formats -Z mir-opt-level=3
fn test2(x: &dyn X) -> bool {
test(x)
}
#[inline]
fn test(x: &dyn X) -> bool {
x.y()
}
fn test2(x: &dyn X) -> bool {
test(x)
}
trait X {
fn y(&self) -> bool {
false

View File

@ -85,6 +85,8 @@ static TARGETS: &'static [&'static str] = &[
"powerpc64le-unknown-linux-gnu",
"riscv32imc-unknown-none-elf",
"riscv32imac-unknown-none-elf",
"riscv64imac-unknown-none-elf",
"riscv64gc-unknown-none-elf",
"s390x-unknown-linux-gnu",
"sparc64-unknown-linux-gnu",
"sparcv9-sun-solaris",

View File

@ -15,7 +15,7 @@ except ImportError:
# List of people to ping when the status of a tool changed.
MAINTAINERS = {
'miri': '@oli-obk @RalfJung @eddyb',
'clippy-driver': '@Manishearth @llogiq @mcarton @oli-obk',
'clippy-driver': '@Manishearth @llogiq @mcarton @oli-obk @phansch',
'rls': '@nrc @Xanewok',
'rustfmt': '@nrc @topecongiro',
'book': '@carols10cents @steveklabnik',