2020-03-20 22:30:30 -05:00
|
|
|
use crate::cgu_reuse_tracker::CguReuseTracker;
|
2019-11-29 15:05:28 -06:00
|
|
|
use crate::code_stats::CodeStats;
|
2019-12-22 16:42:04 -06:00
|
|
|
pub use crate::code_stats::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
|
2021-07-19 10:14:58 -05:00
|
|
|
use crate::config::{self, CrateType, OutputType, SwitchWithOptPath};
|
2020-03-20 22:30:30 -05:00
|
|
|
use crate::parse::ParseSess;
|
2019-11-29 15:05:28 -06:00
|
|
|
use crate::search_paths::{PathKind, SearchPath};
|
2021-09-28 03:53:33 -05:00
|
|
|
use crate::{filesearch, lint};
|
2012-12-13 15:05:22 -06:00
|
|
|
|
2020-07-29 20:27:50 -05:00
|
|
|
pub use rustc_ast::attr::MarkedAttrs;
|
2020-04-27 12:56:11 -05:00
|
|
|
pub use rustc_ast::Attribute;
|
2020-03-20 22:30:30 -05:00
|
|
|
use rustc_data_structures::flock;
|
|
|
|
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
|
|
|
use rustc_data_structures::jobserver::{self, Client};
|
|
|
|
use rustc_data_structures::profiling::{duration_to_secs_str, SelfProfiler, SelfProfilerRef};
|
2018-12-05 09:51:58 -06:00
|
|
|
use rustc_data_structures::sync::{
|
2020-05-15 23:44:28 -05:00
|
|
|
self, AtomicU64, AtomicUsize, Lock, Lrc, OnceCell, OneThread, Ordering, Ordering::SeqCst,
|
2018-12-05 09:51:58 -06:00
|
|
|
};
|
2019-12-22 16:42:04 -06:00
|
|
|
use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter;
|
2020-03-20 22:30:30 -05:00
|
|
|
use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType};
|
2019-11-29 15:05:28 -06:00
|
|
|
use rustc_errors::json::JsonEmitter;
|
2020-05-02 07:39:19 -05:00
|
|
|
use rustc_errors::registry::Registry;
|
2021-07-11 15:08:58 -05:00
|
|
|
use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorReported};
|
2021-06-25 18:48:26 -05:00
|
|
|
use rustc_macros::HashStable_Generic;
|
2021-06-08 11:36:30 -05:00
|
|
|
pub use rustc_span::def_id::StableCrateId;
|
2021-08-12 15:30:40 -05:00
|
|
|
use rustc_span::edition::Edition;
|
2020-05-27 13:34:17 -05:00
|
|
|
use rustc_span::source_map::{FileLoader, MultiSpan, RealFileLoader, SourceMap, Span};
|
2020-07-29 20:27:50 -05:00
|
|
|
use rustc_span::{sym, SourceFileHashAlgorithm, Symbol};
|
2020-02-12 09:48:03 -06:00
|
|
|
use rustc_target::asm::InlineAsmArch;
|
2020-05-06 19:34:27 -05:00
|
|
|
use rustc_target::spec::{CodeModel, PanicStrategy, RelocModel, RelroLevel};
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 14:37:49 -05:00
|
|
|
use rustc_target::spec::{
|
|
|
|
SanitizerSet, SplitDebuginfo, StackProtector, Target, TargetTriple, TlsModel,
|
|
|
|
};
|
2015-01-08 19:14:10 -06:00
|
|
|
|
2019-11-03 16:38:02 -06:00
|
|
|
use std::cell::{self, RefCell};
|
2015-02-26 23:00:43 -06:00
|
|
|
use std::env;
|
2020-05-26 13:48:08 -05:00
|
|
|
use std::fmt;
|
2016-09-26 17:45:50 -05:00
|
|
|
use std::io::Write;
|
2019-11-11 11:33:30 -06:00
|
|
|
use std::num::NonZeroU32;
|
2020-05-26 13:48:08 -05:00
|
|
|
use std::ops::{Div, Mul};
|
2021-08-31 11:36:25 -05:00
|
|
|
use std::path::{Path, PathBuf};
|
2020-02-12 09:48:03 -06:00
|
|
|
use std::str::FromStr;
|
2019-10-03 20:29:25 -05:00
|
|
|
use std::sync::Arc;
|
2019-12-22 16:42:04 -06:00
|
|
|
use std::time::Duration;
|
2019-02-10 16:23:00 -06:00
|
|
|
|
2018-12-29 06:03:33 -06:00
|
|
|
pub struct OptimizationFuel {
|
2019-02-08 07:53:55 -06:00
|
|
|
/// If `-zfuel=crate=n` is specified, initially set to `n`, otherwise `0`.
|
2018-12-29 06:03:33 -06:00
|
|
|
remaining: u64,
|
|
|
|
/// We're rejecting all further optimizations.
|
|
|
|
out_of_fuel: bool,
|
|
|
|
}
|
|
|
|
|
2020-02-16 19:32:25 -06:00
|
|
|
/// The behavior of the CTFE engine when an error occurs with regards to backtraces.
|
|
|
|
#[derive(Clone, Copy)]
|
|
|
|
pub enum CtfeBacktrace {
|
|
|
|
/// Do nothing special, return the error as usual without a backtrace.
|
|
|
|
Disabled,
|
|
|
|
/// Capture a backtrace at the point the error is created and return it in the error
|
|
|
|
/// (to be printed later if/when the error ever actually gets shown to the user).
|
|
|
|
Capture,
|
|
|
|
/// Capture a backtrace at the point the error is created and immediately print it out.
|
|
|
|
Immediate,
|
|
|
|
}
|
|
|
|
|
2020-05-26 13:48:08 -05:00
|
|
|
/// New-type wrapper around `usize` for representing limits. Ensures that comparisons against
|
|
|
|
/// limits are consistent throughout the compiler.
|
2021-06-25 18:48:26 -05:00
|
|
|
#[derive(Clone, Copy, Debug, HashStable_Generic)]
|
2020-05-26 13:48:08 -05:00
|
|
|
pub struct Limit(pub usize);
|
|
|
|
|
|
|
|
impl Limit {
|
|
|
|
/// Create a new limit from a `usize`.
|
|
|
|
pub fn new(value: usize) -> Self {
|
|
|
|
Limit(value)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check that `value` is within the limit. Ensures that the same comparisons are used
|
|
|
|
/// throughout the compiler, as mismatches can cause ICEs, see #72540.
|
2021-02-25 18:00:00 -06:00
|
|
|
#[inline]
|
2020-05-26 13:48:08 -05:00
|
|
|
pub fn value_within_limit(&self, value: usize) -> bool {
|
|
|
|
value <= self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 11:28:52 -05:00
|
|
|
impl From<usize> for Limit {
|
|
|
|
fn from(value: usize) -> Self {
|
|
|
|
Self::new(value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 13:48:08 -05:00
|
|
|
impl fmt::Display for Limit {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
write!(f, "{}", self.0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Div<usize> for Limit {
|
|
|
|
type Output = Limit;
|
|
|
|
|
|
|
|
fn div(self, rhs: usize) -> Self::Output {
|
|
|
|
Limit::new(self.0 / rhs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Mul<usize> for Limit {
|
|
|
|
type Output = Limit;
|
|
|
|
|
|
|
|
fn mul(self, rhs: usize) -> Self::Output {
|
|
|
|
Limit::new(self.0 * rhs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-04 13:02:51 -05:00
|
|
|
#[derive(Clone, Copy, Debug, HashStable_Generic)]
|
|
|
|
pub struct Limits {
|
|
|
|
/// The maximum recursion limit for potentially infinitely recursive
|
|
|
|
/// operations such as auto-dereference and monomorphization.
|
|
|
|
pub recursion_limit: Limit,
|
|
|
|
/// The size at which the `large_assignments` lint starts
|
|
|
|
/// being emitted.
|
|
|
|
pub move_size_limit: Limit,
|
|
|
|
/// The maximum length of types during monomorphization.
|
|
|
|
pub type_length_limit: Limit,
|
|
|
|
/// The maximum blocks a const expression can evaluate.
|
|
|
|
pub const_eval_limit: Limit,
|
|
|
|
}
|
|
|
|
|
2017-10-10 09:12:11 -05:00
|
|
|
/// Represents the data associated with a compilation
|
|
|
|
/// session for a single crate.
|
2014-03-05 08:36:01 -06:00
|
|
|
pub struct Session {
|
2020-10-14 11:42:13 -05:00
|
|
|
pub target: Target,
|
2015-01-08 19:14:10 -06:00
|
|
|
pub host: Target,
|
2014-05-06 06:38:01 -05:00
|
|
|
pub opts: config::Options,
|
2021-09-01 06:39:48 -05:00
|
|
|
pub host_tlib_path: Lrc<SearchPath>,
|
|
|
|
pub target_tlib_path: Lrc<SearchPath>,
|
2014-03-28 12:05:27 -05:00
|
|
|
pub parse_sess: ParseSess,
|
2018-11-21 22:49:48 -06:00
|
|
|
pub sysroot: PathBuf,
|
2017-10-10 09:12:11 -05:00
|
|
|
/// The name of the root source file of the crate, in the local file system.
|
|
|
|
/// `None` means that there is no source file.
|
2017-12-14 01:09:19 -06:00
|
|
|
pub local_crate_source_file: Option<PathBuf>,
|
2018-04-01 01:14:25 -05:00
|
|
|
|
2019-09-05 21:57:44 -05:00
|
|
|
/// Set of `(DiagnosticId, Option<Span>, message)` tuples tracking
|
2017-06-26 15:30:21 -05:00
|
|
|
/// (sub)diagnostics that have been set once, but should not be set again,
|
2017-10-28 12:39:00 -05:00
|
|
|
/// in order to avoid redundantly verbose output (Issue #24690, #44953).
|
2018-04-01 01:22:40 -05:00
|
|
|
pub one_time_diagnostics: Lock<FxHashSet<(DiagnosticMessageId, Option<Span>, String)>>,
|
2020-05-15 23:44:28 -05:00
|
|
|
crate_types: OnceCell<Vec<CrateType>>,
|
2021-06-08 11:36:30 -05:00
|
|
|
/// The `stable_crate_id` is constructed out of the crate name and all the
|
|
|
|
/// `-C metadata` arguments passed to the compiler. Its value forms a unique
|
|
|
|
/// global identifier for the crate. It is used to allow multiple crates
|
|
|
|
/// with the same name to coexist. See the
|
2019-09-05 21:57:44 -05:00
|
|
|
/// `rustc_codegen_llvm::back::symbol_names` module for more information.
|
2021-06-08 11:36:30 -05:00
|
|
|
pub stable_crate_id: OnceCell<StableCrateId>,
|
2018-02-14 09:11:02 -06:00
|
|
|
|
2020-05-15 23:44:28 -05:00
|
|
|
features: OnceCell<rustc_feature::Features>,
|
2014-03-06 12:37:24 -06:00
|
|
|
|
2018-04-01 01:19:26 -05:00
|
|
|
incr_comp_session: OneThread<RefCell<IncrCompSession>>,
|
2018-09-18 09:33:24 -05:00
|
|
|
/// Used for incremental compilation tests. Will only be populated if
|
|
|
|
/// `-Zquery-dep-graph` is specified.
|
|
|
|
pub cgu_reuse_tracker: CguReuseTracker,
|
2016-08-11 18:02:39 -05:00
|
|
|
|
2019-09-05 21:57:44 -05:00
|
|
|
/// Used by `-Z self-profile`.
|
2019-09-27 07:03:09 -05:00
|
|
|
pub prof: SelfProfilerRef,
|
2018-05-19 12:50:58 -05:00
|
|
|
|
2016-08-23 12:23:58 -05:00
|
|
|
/// Some measurements that are being gathered during compilation.
|
|
|
|
pub perf_stats: PerfStats,
|
|
|
|
|
2016-11-14 10:46:20 -06:00
|
|
|
/// Data about code being compiled, gathered during compilation.
|
2019-11-10 10:48:47 -06:00
|
|
|
pub code_stats: CodeStats,
|
2016-11-14 10:46:20 -06:00
|
|
|
|
2019-02-08 07:53:55 -06:00
|
|
|
/// Tracks fuel info if `-zfuel=crate=n` is specified.
|
2018-12-29 06:03:33 -06:00
|
|
|
optimization_fuel: Lock<OptimizationFuel>,
|
2017-03-08 15:28:47 -06:00
|
|
|
|
|
|
|
/// Always set to zero and incremented so that we can print fuel expended by a crate.
|
2019-12-17 15:28:33 -06:00
|
|
|
pub print_fuel: AtomicU64,
|
2017-06-15 09:08:18 -05:00
|
|
|
|
|
|
|
/// Loaded up early on in the initialization of this `Session` to avoid
|
|
|
|
/// false positives about a job server in our environment.
|
2018-04-23 10:00:08 -05:00
|
|
|
pub jobserver: Client,
|
2017-06-03 16:54:08 -05:00
|
|
|
|
2018-06-30 17:27:44 -05:00
|
|
|
/// Cap lint level specified by a driver specifically.
|
|
|
|
pub driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
|
2019-03-04 21:24:52 -06:00
|
|
|
|
2020-02-16 19:32:25 -06:00
|
|
|
/// Tracks the current behavior of the CTFE engine when an error occurs.
|
|
|
|
/// Options range from returning the error without a backtrace to returning an error
|
|
|
|
/// and immediately printing the backtrace to stderr.
|
2021-08-31 08:06:59 -05:00
|
|
|
/// The `Lock` is only used by miri to allow setting `ctfe_backtrace` after analysis when
|
|
|
|
/// `MIRI_BACKTRACE` is set. This makes it only apply to miri's errors and not to all CTFE
|
|
|
|
/// errors.
|
2020-02-16 19:32:25 -06:00
|
|
|
pub ctfe_backtrace: Lock<CtfeBacktrace>,
|
2020-03-31 20:00:52 -05:00
|
|
|
|
2020-05-03 07:23:08 -05:00
|
|
|
/// This tracks where `-Zunleash-the-miri-inside-of-you` was used to get around a
|
|
|
|
/// const check, optionally with the relevant feature gate. We use this to
|
|
|
|
/// warn about unleashing, but with a single diagnostic instead of dozens that
|
|
|
|
/// drown everything else in noise.
|
|
|
|
miri_unleashed_features: Lock<Vec<(Span, Option<Symbol>)>>,
|
2020-05-02 06:19:24 -05:00
|
|
|
|
2020-02-12 09:48:03 -06:00
|
|
|
/// Architecture to use for interpreting asm!.
|
|
|
|
pub asm_arch: Option<InlineAsmArch>,
|
|
|
|
|
|
|
|
/// Set of enabled features for the current target.
|
|
|
|
pub target_features: FxHashSet<Symbol>,
|
2012-07-11 17:00:40 -05:00
|
|
|
}
|
|
|
|
|
2016-08-23 12:23:58 -05:00
|
|
|
pub struct PerfStats {
|
2019-02-08 07:53:55 -06:00
|
|
|
/// The accumulated time spent on computing symbol hashes.
|
2018-04-01 01:17:25 -05:00
|
|
|
pub symbol_hash_time: Lock<Duration>,
|
2018-02-09 09:39:36 -06:00
|
|
|
/// Total number of values canonicalized queries constructed.
|
2018-04-01 01:17:25 -05:00
|
|
|
pub queries_canonicalized: AtomicUsize,
|
2018-02-25 09:58:54 -06:00
|
|
|
/// Number of times this query is invoked.
|
2020-03-23 13:22:19 -05:00
|
|
|
pub normalize_generic_arg_after_erasing_regions: AtomicUsize,
|
2018-02-21 10:24:13 -06:00
|
|
|
/// Number of times this query is invoked.
|
2018-04-01 01:17:25 -05:00
|
|
|
pub normalize_projection_ty: AtomicUsize,
|
2016-08-23 12:23:58 -05:00
|
|
|
}
|
|
|
|
|
2019-09-05 21:57:44 -05:00
|
|
|
/// Enum to support dispatch of one-time diagnostics (in `Session.diag_once`).
|
2017-06-26 15:30:21 -05:00
|
|
|
enum DiagnosticBuilderMethod {
|
|
|
|
Note,
|
|
|
|
SpanNote,
|
2021-03-16 00:50:34 -05:00
|
|
|
// Add more variants as needed to support one-time diagnostics.
|
2017-06-26 15:30:21 -05:00
|
|
|
}
|
|
|
|
|
2020-08-27 05:00:21 -05:00
|
|
|
/// Trait implemented by error types. This should not be implemented manually. Instead, use
|
|
|
|
/// `#[derive(SessionDiagnostic)]` -- see [rustc_macros::SessionDiagnostic].
|
|
|
|
pub trait SessionDiagnostic<'a> {
|
|
|
|
/// Write out as a diagnostic out of `sess`.
|
|
|
|
#[must_use]
|
|
|
|
fn into_diagnostic(self, sess: &'a Session) -> DiagnosticBuilder<'a>;
|
|
|
|
}
|
|
|
|
|
2019-09-05 21:57:44 -05:00
|
|
|
/// Diagnostic message ID, used by `Session.one_time_diagnostics` to avoid
|
|
|
|
/// emitting the same message more than once.
|
2017-10-28 12:39:00 -05:00
|
|
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
|
|
|
pub enum DiagnosticMessageId {
|
2017-11-12 21:13:07 -06:00
|
|
|
ErrorId(u16), // EXXXX error code as integer
|
2017-10-28 12:39:00 -05:00
|
|
|
LintId(lint::LintId),
|
2019-11-11 11:33:30 -06:00
|
|
|
StabilityId(Option<NonZeroU32>), // issue number
|
2017-10-28 12:39:00 -05:00
|
|
|
}
|
|
|
|
|
2017-11-24 12:26:42 -06:00
|
|
|
impl From<&'static lint::Lint> for DiagnosticMessageId {
|
|
|
|
fn from(lint: &'static lint::Lint) -> Self {
|
|
|
|
DiagnosticMessageId::LintId(lint::LintId::of(lint))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-05 08:36:01 -06:00
|
|
|
impl Session {
|
2020-05-03 07:23:08 -05:00
|
|
|
pub fn miri_unleashed_feature(&self, span: Span, feature_gate: Option<Symbol>) {
|
|
|
|
self.miri_unleashed_features.lock().push((span, feature_gate));
|
2020-05-02 07:39:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn check_miri_unleashed_features(&self) {
|
2020-05-03 07:23:08 -05:00
|
|
|
let unleashed_features = self.miri_unleashed_features.lock();
|
|
|
|
if !unleashed_features.is_empty() {
|
|
|
|
let mut must_err = false;
|
|
|
|
// Create a diagnostic pointing at where things got unleashed.
|
|
|
|
let mut diag = self.struct_warn("skipping const checks");
|
|
|
|
for &(span, feature_gate) in unleashed_features.iter() {
|
|
|
|
// FIXME: `span_label` doesn't do anything, so we use "help" as a hack.
|
|
|
|
if let Some(feature_gate) = feature_gate {
|
|
|
|
diag.span_help(span, &format!("skipping check for `{}` feature", feature_gate));
|
|
|
|
// The unleash flag must *not* be used to just "hack around" feature gates.
|
|
|
|
must_err = true;
|
|
|
|
} else {
|
|
|
|
diag.span_help(span, "skipping check that does not even have a feature gate");
|
2020-05-02 06:19:24 -05:00
|
|
|
}
|
2020-05-03 07:23:08 -05:00
|
|
|
}
|
|
|
|
diag.emit();
|
|
|
|
// If we should err, make sure we did.
|
|
|
|
if must_err && !self.has_errors() {
|
2020-05-02 06:19:24 -05:00
|
|
|
// We have skipped a feature gate, and not run into other errors... reject.
|
2020-05-03 07:23:08 -05:00
|
|
|
self.err(
|
2020-05-02 06:19:24 -05:00
|
|
|
"`-Zunleash-the-miri-inside-of-you` may not be used to circumvent feature \
|
2020-05-03 10:12:50 -05:00
|
|
|
gates, except when testing error paths in the CTFE engine",
|
2020-05-03 07:23:08 -05:00
|
|
|
);
|
2020-05-02 06:19:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-02 07:39:19 -05:00
|
|
|
/// Invoked all the way at the end to finish off diagnostics printing.
|
|
|
|
pub fn finish_diagnostics(&self, registry: &Registry) {
|
|
|
|
self.check_miri_unleashed_features();
|
|
|
|
self.diagnostic().print_error_count(registry);
|
2020-08-13 14:41:52 -05:00
|
|
|
self.emit_future_breakage();
|
|
|
|
}
|
|
|
|
|
|
|
|
fn emit_future_breakage(&self) {
|
2021-12-04 13:34:20 -06:00
|
|
|
if !self.opts.json_future_incompat {
|
2020-08-13 14:41:52 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let diags = self.diagnostic().take_future_breakage_diagnostics();
|
|
|
|
if diags.is_empty() {
|
|
|
|
return;
|
|
|
|
}
|
2021-07-11 15:08:58 -05:00
|
|
|
self.parse_sess.span_diagnostic.emit_future_breakage_report(diags);
|
2020-05-02 06:19:24 -05:00
|
|
|
}
|
|
|
|
|
2021-06-08 11:36:30 -05:00
|
|
|
pub fn local_stable_crate_id(&self) -> StableCrateId {
|
|
|
|
self.stable_crate_id.get().copied().unwrap()
|
2020-05-15 23:44:28 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn crate_types(&self) -> &[CrateType] {
|
|
|
|
self.crate_types.get().unwrap().as_slice()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn init_crate_types(&self, crate_types: Vec<CrateType>) {
|
|
|
|
self.crate_types.set(crate_types).expect("`crate_types` was initialized twice")
|
|
|
|
}
|
|
|
|
|
2019-12-22 16:42:04 -06:00
|
|
|
pub fn struct_span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'_> {
|
2015-12-17 21:15:53 -06:00
|
|
|
self.diagnostic().struct_span_warn(sp, msg)
|
|
|
|
}
|
2021-07-01 05:29:20 -05:00
|
|
|
pub fn struct_span_force_warn<S: Into<MultiSpan>>(
|
|
|
|
&self,
|
|
|
|
sp: S,
|
|
|
|
msg: &str,
|
|
|
|
) -> DiagnosticBuilder<'_> {
|
|
|
|
self.diagnostic().struct_span_force_warn(sp, msg)
|
|
|
|
}
|
2021-03-27 21:16:17 -05:00
|
|
|
pub fn struct_span_warn_with_code<S: Into<MultiSpan>>(
|
|
|
|
&self,
|
|
|
|
sp: S,
|
|
|
|
msg: &str,
|
|
|
|
code: DiagnosticId,
|
|
|
|
) -> DiagnosticBuilder<'_> {
|
|
|
|
self.diagnostic().struct_span_warn_with_code(sp, msg, code)
|
|
|
|
}
|
2019-06-21 16:49:03 -05:00
|
|
|
pub fn struct_warn(&self, msg: &str) -> DiagnosticBuilder<'_> {
|
2015-12-17 21:15:53 -06:00
|
|
|
self.diagnostic().struct_warn(msg)
|
|
|
|
}
|
2021-07-01 05:29:20 -05:00
|
|
|
pub fn struct_force_warn(&self, msg: &str) -> DiagnosticBuilder<'_> {
|
|
|
|
self.diagnostic().struct_force_warn(msg)
|
|
|
|
}
|
2020-08-13 14:41:52 -05:00
|
|
|
pub fn struct_span_allow<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'_> {
|
|
|
|
self.diagnostic().struct_span_allow(sp, msg)
|
|
|
|
}
|
|
|
|
pub fn struct_allow(&self, msg: &str) -> DiagnosticBuilder<'_> {
|
|
|
|
self.diagnostic().struct_allow(msg)
|
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
pub fn struct_span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'_> {
|
2016-07-18 17:10:19 -05:00
|
|
|
self.diagnostic().struct_span_err(sp, msg)
|
2015-12-17 21:15:53 -06:00
|
|
|
}
|
2019-06-21 16:49:03 -05:00
|
|
|
pub fn struct_span_err_with_code<S: Into<MultiSpan>>(
|
|
|
|
&self,
|
2018-03-05 23:29:03 -06:00
|
|
|
sp: S,
|
|
|
|
msg: &str,
|
|
|
|
code: DiagnosticId,
|
2019-06-21 16:49:03 -05:00
|
|
|
) -> DiagnosticBuilder<'_> {
|
2016-07-18 17:10:19 -05:00
|
|
|
self.diagnostic().struct_span_err_with_code(sp, msg, code)
|
2015-12-17 21:15:53 -06:00
|
|
|
}
|
2017-05-29 11:46:29 -05:00
|
|
|
// FIXME: This method should be removed (every error should have an associated error code).
|
2019-06-21 16:49:03 -05:00
|
|
|
pub fn struct_err(&self, msg: &str) -> DiagnosticBuilder<'_> {
|
2015-12-17 21:15:53 -06:00
|
|
|
self.diagnostic().struct_err(msg)
|
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
pub fn struct_err_with_code(&self, msg: &str, code: DiagnosticId) -> DiagnosticBuilder<'_> {
|
2017-05-29 11:46:29 -05:00
|
|
|
self.diagnostic().struct_err_with_code(msg, code)
|
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
pub fn struct_span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'_> {
|
2015-12-17 21:15:53 -06:00
|
|
|
self.diagnostic().struct_span_fatal(sp, msg)
|
|
|
|
}
|
2019-06-21 16:49:03 -05:00
|
|
|
pub fn struct_span_fatal_with_code<S: Into<MultiSpan>>(
|
|
|
|
&self,
|
2018-03-05 23:29:03 -06:00
|
|
|
sp: S,
|
|
|
|
msg: &str,
|
|
|
|
code: DiagnosticId,
|
2019-06-21 16:49:03 -05:00
|
|
|
) -> DiagnosticBuilder<'_> {
|
2015-12-17 21:15:53 -06:00
|
|
|
self.diagnostic().struct_span_fatal_with_code(sp, msg, code)
|
|
|
|
}
|
2021-03-27 21:16:17 -05:00
|
|
|
pub fn struct_fatal(&self, msg: &str) -> DiagnosticBuilder<'_> {
|
|
|
|
self.diagnostic().struct_fatal(msg)
|
|
|
|
}
|
2015-12-17 21:15:53 -06:00
|
|
|
|
2015-12-13 06:12:47 -06:00
|
|
|
pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
|
2021-03-27 21:45:01 -05:00
|
|
|
self.diagnostic().span_fatal(sp, msg)
|
2010-09-01 15:24:14 -05:00
|
|
|
}
|
2021-03-27 21:16:17 -05:00
|
|
|
pub fn span_fatal_with_code<S: Into<MultiSpan>>(
|
|
|
|
&self,
|
|
|
|
sp: S,
|
|
|
|
msg: &str,
|
|
|
|
code: DiagnosticId,
|
|
|
|
) -> ! {
|
2021-03-27 21:45:01 -05:00
|
|
|
self.diagnostic().span_fatal_with_code(sp, msg, code)
|
2021-03-27 21:16:17 -05:00
|
|
|
}
|
2013-09-06 21:11:55 -05:00
|
|
|
pub fn fatal(&self, msg: &str) -> ! {
|
2018-01-21 05:47:58 -06:00
|
|
|
self.diagnostic().fatal(msg).raise()
|
2010-09-01 15:24:14 -05:00
|
|
|
}
|
2021-03-27 21:16:17 -05:00
|
|
|
pub fn span_err_or_warn<S: Into<MultiSpan>>(&self, is_warning: bool, sp: S, msg: &str) {
|
|
|
|
if is_warning {
|
|
|
|
self.span_warn(sp, msg);
|
|
|
|
} else {
|
|
|
|
self.span_err(sp, msg);
|
|
|
|
}
|
|
|
|
}
|
2015-12-13 06:12:47 -06:00
|
|
|
pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
|
2016-07-18 17:10:19 -05:00
|
|
|
self.diagnostic().span_err(sp, msg)
|
2011-06-19 00:55:53 -05:00
|
|
|
}
|
2021-03-27 21:16:17 -05:00
|
|
|
pub fn span_err_with_code<S: Into<MultiSpan>>(&self, sp: S, msg: &str, code: DiagnosticId) {
|
|
|
|
self.diagnostic().span_err_with_code(sp, &msg, code)
|
|
|
|
}
|
2013-09-06 21:11:55 -05:00
|
|
|
pub fn err(&self, msg: &str) {
|
2015-12-13 16:17:55 -06:00
|
|
|
self.diagnostic().err(msg)
|
2012-01-13 19:08:47 -06:00
|
|
|
}
|
2020-08-27 05:00:21 -05:00
|
|
|
pub fn emit_err<'a>(&'a self, err: impl SessionDiagnostic<'a>) {
|
|
|
|
err.into_diagnostic(self).emit()
|
|
|
|
}
|
2021-05-31 19:00:00 -05:00
|
|
|
#[inline]
|
2015-03-25 19:06:52 -05:00
|
|
|
pub fn err_count(&self) -> usize {
|
2015-12-13 16:17:55 -06:00
|
|
|
self.diagnostic().err_count()
|
2013-05-06 08:00:37 -05:00
|
|
|
}
|
2013-09-06 21:11:55 -05:00
|
|
|
pub fn has_errors(&self) -> bool {
|
2015-12-13 16:17:55 -06:00
|
|
|
self.diagnostic().has_errors()
|
2011-06-19 00:55:53 -05:00
|
|
|
}
|
2019-10-16 06:13:13 -05:00
|
|
|
pub fn has_errors_or_delayed_span_bugs(&self) -> bool {
|
|
|
|
self.diagnostic().has_errors_or_delayed_span_bugs()
|
|
|
|
}
|
2013-09-06 21:11:55 -05:00
|
|
|
pub fn abort_if_errors(&self) {
|
2015-12-13 16:17:55 -06:00
|
|
|
self.diagnostic().abort_if_errors();
|
2011-06-19 00:55:53 -05:00
|
|
|
}
|
2018-12-08 13:30:23 -06:00
|
|
|
pub fn compile_status(&self) -> Result<(), ErrorReported> {
|
2021-07-22 18:47:46 -05:00
|
|
|
if self.diagnostic().has_errors_or_lint_errors() {
|
2019-09-22 21:45:21 -05:00
|
|
|
self.diagnostic().emit_stashed_diagnostics();
|
2019-06-22 06:44:03 -05:00
|
|
|
Err(ErrorReported)
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2017-07-02 08:09:09 -05:00
|
|
|
}
|
2019-06-22 06:44:03 -05:00
|
|
|
// FIXME(matthewjasper) Remove this method, it should never be needed.
|
2017-07-02 08:09:09 -05:00
|
|
|
pub fn track_errors<F, T>(&self, f: F) -> Result<T, ErrorReported>
|
2018-03-05 23:29:03 -06:00
|
|
|
where
|
|
|
|
F: FnOnce() -> T,
|
2015-12-11 01:59:11 -06:00
|
|
|
{
|
2016-01-31 13:43:43 -06:00
|
|
|
let old_count = self.err_count();
|
2016-01-20 03:07:33 -06:00
|
|
|
let result = f();
|
2021-08-04 09:25:45 -05:00
|
|
|
if self.err_count() == old_count { Ok(result) } else { Err(ErrorReported) }
|
2016-01-20 18:19:20 -06:00
|
|
|
}
|
2015-12-13 06:12:47 -06:00
|
|
|
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
|
2015-12-13 23:15:39 -06:00
|
|
|
self.diagnostic().span_warn(sp, msg)
|
2012-01-12 10:59:49 -06:00
|
|
|
}
|
2021-03-27 21:16:17 -05:00
|
|
|
pub fn span_warn_with_code<S: Into<MultiSpan>>(&self, sp: S, msg: &str, code: DiagnosticId) {
|
|
|
|
self.diagnostic().span_warn_with_code(sp, msg, code)
|
|
|
|
}
|
2013-09-06 21:11:55 -05:00
|
|
|
pub fn warn(&self, msg: &str) {
|
2015-12-13 23:15:39 -06:00
|
|
|
self.diagnostic().warn(msg)
|
2011-08-27 16:57:47 -05:00
|
|
|
}
|
2015-04-13 02:13:09 -05:00
|
|
|
/// Delay a span_bug() call until abort_if_errors()
|
2020-08-06 10:34:13 -05:00
|
|
|
#[track_caller]
|
2015-12-13 06:12:47 -06:00
|
|
|
pub fn delay_span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
|
2015-12-13 23:15:39 -06:00
|
|
|
self.diagnostic().delay_span_bug(sp, msg)
|
2015-04-13 02:13:09 -05:00
|
|
|
}
|
2020-08-22 14:24:48 -05:00
|
|
|
|
|
|
|
/// Used for code paths of expensive computations that should only take place when
|
|
|
|
/// warnings or errors are emitted. If no messages are emitted ("good path"), then
|
|
|
|
/// it's likely a bug.
|
|
|
|
pub fn delay_good_path_bug(&self, msg: &str) {
|
|
|
|
if self.opts.debugging_opts.print_type_sizes
|
|
|
|
|| self.opts.debugging_opts.query_dep_graph
|
|
|
|
|| self.opts.debugging_opts.dump_mir.is_some()
|
|
|
|
|| self.opts.debugging_opts.unpretty.is_some()
|
|
|
|
|| self.opts.output_types.contains_key(&OutputType::Mir)
|
|
|
|
|| std::env::var_os("RUSTC_LOG").is_some()
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.diagnostic().delay_good_path_bug(msg)
|
|
|
|
}
|
|
|
|
|
2015-12-20 15:00:43 -06:00
|
|
|
pub fn note_without_error(&self, msg: &str) {
|
|
|
|
self.diagnostic().note_without_error(msg)
|
|
|
|
}
|
2021-03-27 21:16:17 -05:00
|
|
|
pub fn span_note_without_error<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
|
|
|
|
self.diagnostic().span_note_without_error(sp, msg)
|
|
|
|
}
|
2020-06-09 08:37:59 -05:00
|
|
|
pub fn struct_note_without_error(&self, msg: &str) -> DiagnosticBuilder<'_> {
|
|
|
|
self.diagnostic().struct_note_without_error(msg)
|
|
|
|
}
|
2017-01-28 06:01:45 -06:00
|
|
|
|
2021-05-31 19:00:00 -05:00
|
|
|
#[inline]
|
2019-11-29 15:05:28 -06:00
|
|
|
pub fn diagnostic(&self) -> &rustc_errors::Handler {
|
2014-03-16 13:56:24 -05:00
|
|
|
&self.parse_sess.span_diagnostic
|
2012-03-22 19:39:45 -05:00
|
|
|
}
|
2016-10-15 12:28:12 -05:00
|
|
|
|
2021-07-21 07:53:13 -05:00
|
|
|
pub fn with_disabled_diagnostic<T, F: FnOnce() -> T>(&self, f: F) -> T {
|
|
|
|
self.parse_sess.span_diagnostic.with_disabled_diagnostic(f)
|
|
|
|
}
|
|
|
|
|
2017-06-26 15:30:21 -05:00
|
|
|
/// Analogous to calling methods on the given `DiagnosticBuilder`, but
|
|
|
|
/// deduplicates on lint ID, span (if any), and message for this `Session`
|
2018-03-05 23:29:03 -06:00
|
|
|
fn diag_once<'a, 'b>(
|
|
|
|
&'a self,
|
|
|
|
diag_builder: &'b mut DiagnosticBuilder<'a>,
|
|
|
|
method: DiagnosticBuilderMethod,
|
|
|
|
msg_id: DiagnosticMessageId,
|
|
|
|
message: &str,
|
|
|
|
span_maybe: Option<Span>,
|
|
|
|
) {
|
2017-11-24 12:26:42 -06:00
|
|
|
let id_span_message = (msg_id, span_maybe, message.to_owned());
|
2019-12-22 16:42:04 -06:00
|
|
|
let fresh = self.one_time_diagnostics.borrow_mut().insert(id_span_message);
|
2017-11-20 06:13:27 -06:00
|
|
|
if fresh {
|
2017-06-26 15:30:21 -05:00
|
|
|
match method {
|
|
|
|
DiagnosticBuilderMethod::Note => {
|
|
|
|
diag_builder.note(message);
|
2018-03-05 23:29:03 -06:00
|
|
|
}
|
2017-06-26 15:30:21 -05:00
|
|
|
DiagnosticBuilderMethod::SpanNote => {
|
2019-09-05 21:57:44 -05:00
|
|
|
let span = span_maybe.expect("`span_note` needs a span");
|
2017-11-24 12:26:42 -06:00
|
|
|
diag_builder.span_note(span, message);
|
2018-03-05 23:29:03 -06:00
|
|
|
}
|
2017-06-26 15:30:21 -05:00
|
|
|
}
|
2016-10-15 12:28:12 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 23:29:03 -06:00
|
|
|
pub fn diag_span_note_once<'a, 'b>(
|
|
|
|
&'a self,
|
|
|
|
diag_builder: &'b mut DiagnosticBuilder<'a>,
|
|
|
|
msg_id: DiagnosticMessageId,
|
|
|
|
span: Span,
|
|
|
|
message: &str,
|
|
|
|
) {
|
|
|
|
self.diag_once(
|
|
|
|
diag_builder,
|
|
|
|
DiagnosticBuilderMethod::SpanNote,
|
|
|
|
msg_id,
|
|
|
|
message,
|
|
|
|
Some(span),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn diag_note_once<'a, 'b>(
|
|
|
|
&'a self,
|
|
|
|
diag_builder: &'b mut DiagnosticBuilder<'a>,
|
|
|
|
msg_id: DiagnosticMessageId,
|
|
|
|
message: &str,
|
|
|
|
) {
|
2019-12-22 16:42:04 -06:00
|
|
|
self.diag_once(diag_builder, DiagnosticBuilderMethod::Note, msg_id, message, None);
|
2018-03-05 23:29:03 -06:00
|
|
|
}
|
|
|
|
|
2020-02-17 11:38:30 -06:00
|
|
|
#[inline]
|
2020-05-27 13:34:17 -05:00
|
|
|
pub fn source_map(&self) -> &SourceMap {
|
2018-08-18 05:14:09 -05:00
|
|
|
self.parse_sess.source_map()
|
2014-03-16 13:56:24 -05:00
|
|
|
}
|
2018-03-05 23:29:03 -06:00
|
|
|
pub fn verbose(&self) -> bool {
|
|
|
|
self.opts.debugging_opts.verbose
|
|
|
|
}
|
|
|
|
pub fn time_passes(&self) -> bool {
|
2019-02-13 06:11:50 -06:00
|
|
|
self.opts.debugging_opts.time_passes || self.opts.debugging_opts.time
|
|
|
|
}
|
2018-12-30 13:59:03 -06:00
|
|
|
pub fn instrument_mcount(&self) -> bool {
|
|
|
|
self.opts.debugging_opts.instrument_mcount
|
|
|
|
}
|
2013-09-06 21:11:55 -05:00
|
|
|
pub fn time_llvm_passes(&self) -> bool {
|
2014-12-09 03:55:49 -06:00
|
|
|
self.opts.debugging_opts.time_llvm_passes
|
2013-02-22 00:41:37 -06:00
|
|
|
}
|
2018-03-05 23:29:03 -06:00
|
|
|
pub fn meta_stats(&self) -> bool {
|
|
|
|
self.opts.debugging_opts.meta_stats
|
|
|
|
}
|
|
|
|
pub fn asm_comments(&self) -> bool {
|
|
|
|
self.opts.debugging_opts.asm_comments
|
|
|
|
}
|
2018-06-12 14:05:37 -05:00
|
|
|
pub fn verify_llvm_ir(&self) -> bool {
|
2020-07-22 10:44:15 -05:00
|
|
|
self.opts.debugging_opts.verify_llvm_ir || option_env!("RUSTC_VERIFY_LLVM_IR").is_some()
|
2018-03-05 23:29:03 -06:00
|
|
|
}
|
2013-09-06 21:11:55 -05:00
|
|
|
pub fn print_llvm_passes(&self) -> bool {
|
2014-12-09 03:55:49 -06:00
|
|
|
self.opts.debugging_opts.print_llvm_passes
|
2013-08-22 22:58:42 -05:00
|
|
|
}
|
2019-07-24 10:00:09 -05:00
|
|
|
pub fn binary_dep_depinfo(&self) -> bool {
|
|
|
|
self.opts.debugging_opts.binary_dep_depinfo
|
|
|
|
}
|
2021-03-03 15:19:15 -06:00
|
|
|
pub fn mir_opt_level(&self) -> usize {
|
2021-11-28 19:54:42 -06:00
|
|
|
self.opts.mir_opt_level()
|
2021-03-03 15:19:15 -06:00
|
|
|
}
|
2017-12-20 03:26:54 -06:00
|
|
|
|
2019-02-08 07:53:55 -06:00
|
|
|
/// Gets the features enabled for the current compilation session.
|
2018-02-14 09:11:02 -06:00
|
|
|
/// DO NOT USE THIS METHOD if there is a TyCtxt available, as it circumvents
|
|
|
|
/// dependency tracking. Use tcx.features() instead.
|
|
|
|
#[inline]
|
2019-11-29 17:23:38 -06:00
|
|
|
pub fn features_untracked(&self) -> &rustc_feature::Features {
|
2020-05-15 23:44:28 -05:00
|
|
|
self.features.get().unwrap()
|
2017-12-19 12:10:07 -06:00
|
|
|
}
|
2017-12-20 03:26:54 -06:00
|
|
|
|
2019-11-29 17:23:38 -06:00
|
|
|
pub fn init_features(&self, features: rustc_feature::Features) {
|
2020-05-15 23:44:28 -05:00
|
|
|
match self.features.set(features) {
|
|
|
|
Ok(()) => {}
|
|
|
|
Err(_) => panic!("`features` was initialized twice"),
|
|
|
|
}
|
2017-12-19 12:47:59 -06:00
|
|
|
}
|
2017-12-20 03:26:54 -06:00
|
|
|
|
2018-01-16 17:02:31 -06:00
|
|
|
/// Calculates the flavor of LTO to use for this compilation.
|
|
|
|
pub fn lto(&self) -> config::Lto {
|
|
|
|
// If our target has codegen requirements ignore the command line
|
2020-11-08 05:27:51 -06:00
|
|
|
if self.target.requires_lto {
|
2018-03-05 23:29:03 -06:00
|
|
|
return config::Lto::Fat;
|
2018-01-16 17:02:31 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the user specified something, return that. If they only said `-C
|
|
|
|
// lto` and we've for whatever reason forced off ThinLTO via the CLI,
|
|
|
|
// then ensure we can't use a ThinLTO.
|
|
|
|
match self.opts.cg.lto {
|
2018-09-04 10:57:17 -05:00
|
|
|
config::LtoCli::Unspecified => {
|
|
|
|
// The compiler was invoked without the `-Clto` flag. Fall
|
|
|
|
// through to the default handling
|
|
|
|
}
|
|
|
|
config::LtoCli::No => {
|
|
|
|
// The user explicitly opted out of any kind of LTO
|
|
|
|
return config::Lto::No;
|
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
config::LtoCli::Yes | config::LtoCli::Fat | config::LtoCli::NoParam => {
|
2018-09-04 10:57:17 -05:00
|
|
|
// All of these mean fat LTO
|
|
|
|
return config::Lto::Fat;
|
|
|
|
}
|
|
|
|
config::LtoCli::Thin => {
|
|
|
|
return if self.opts.cli_forced_thinlto_off {
|
|
|
|
config::Lto::Fat
|
|
|
|
} else {
|
|
|
|
config::Lto::Thin
|
|
|
|
};
|
|
|
|
}
|
2018-01-16 17:02:31 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ok at this point the target doesn't require anything and the user
|
|
|
|
// hasn't asked for anything. Our next decision is whether or not
|
|
|
|
// we enable "auto" ThinLTO where we use multiple codegen units and
|
|
|
|
// then do ThinLTO over those codegen units. The logic below will
|
|
|
|
// either return `No` or `ThinLocal`.
|
|
|
|
|
|
|
|
// If processing command line options determined that we're incompatible
|
2018-11-26 20:59:49 -06:00
|
|
|
// with ThinLTO (e.g., `-C lto --emit llvm-ir`) then return that option.
|
2018-01-16 17:02:31 -06:00
|
|
|
if self.opts.cli_forced_thinlto_off {
|
2018-03-05 23:29:03 -06:00
|
|
|
return config::Lto::No;
|
2018-01-16 17:02:31 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// If `-Z thinlto` specified process that, but note that this is mostly
|
|
|
|
// a deprecated option now that `-C lto=thin` exists.
|
|
|
|
if let Some(enabled) = self.opts.debugging_opts.thinlto {
|
|
|
|
if enabled {
|
2018-03-05 23:29:03 -06:00
|
|
|
return config::Lto::ThinLocal;
|
2018-01-16 17:02:31 -06:00
|
|
|
} else {
|
2018-03-05 23:29:03 -06:00
|
|
|
return config::Lto::No;
|
2018-01-16 17:02:31 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's only one codegen unit and LTO isn't enabled then there's
|
|
|
|
// no need for ThinLTO so just return false.
|
|
|
|
if self.codegen_units() == 1 {
|
2018-03-05 23:29:03 -06:00
|
|
|
return config::Lto::No;
|
2018-01-16 17:02:31 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we're in "defaults" territory. By default we enable ThinLTO for
|
|
|
|
// optimized compiles (anything greater than O0).
|
|
|
|
match self.opts.optimize {
|
|
|
|
config::OptLevel::No => config::Lto::No,
|
|
|
|
_ => config::Lto::ThinLocal,
|
|
|
|
}
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-03 01:19:29 -06:00
|
|
|
}
|
2018-01-16 17:02:31 -06:00
|
|
|
|
2016-09-27 21:26:08 -05:00
|
|
|
/// Returns the panic strategy for this compile session. If the user explicitly selected one
|
|
|
|
/// using '-C panic', use that, otherwise use the panic strategy defined by the target.
|
|
|
|
pub fn panic_strategy(&self) -> PanicStrategy {
|
2020-11-08 05:27:51 -06:00
|
|
|
self.opts.cg.panic.unwrap_or(self.target.panic_strategy)
|
2016-09-27 21:26:08 -05:00
|
|
|
}
|
2018-01-04 14:19:23 -06:00
|
|
|
pub fn fewer_names(&self) -> bool {
|
2020-11-22 18:00:00 -06:00
|
|
|
if let Some(fewer_names) = self.opts.debugging_opts.fewer_names {
|
|
|
|
fewer_names
|
|
|
|
} else {
|
|
|
|
let more_names = self.opts.output_types.contains_key(&OutputType::LlvmAssembly)
|
|
|
|
|| self.opts.output_types.contains_key(&OutputType::Bitcode)
|
|
|
|
// AddressSanitizer and MemorySanitizer use alloca name when reporting an issue.
|
|
|
|
|| self.opts.debugging_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::MEMORY);
|
|
|
|
!more_names
|
|
|
|
}
|
2018-01-04 14:19:23 -06:00
|
|
|
}
|
|
|
|
|
2014-12-27 03:19:27 -06:00
|
|
|
pub fn unstable_options(&self) -> bool {
|
2014-12-09 03:55:49 -06:00
|
|
|
self.opts.debugging_opts.unstable_options
|
2014-02-07 04:50:07 -06:00
|
|
|
}
|
2020-10-10 13:27:52 -05:00
|
|
|
pub fn is_nightly_build(&self) -> bool {
|
|
|
|
self.opts.unstable_features.is_nightly_build()
|
|
|
|
}
|
2021-10-07 17:33:13 -05:00
|
|
|
pub fn is_sanitizer_cfi_enabled(&self) -> bool {
|
|
|
|
self.opts.debugging_opts.sanitizer.contains(SanitizerSet::CFI)
|
|
|
|
}
|
2017-02-17 15:00:08 -06:00
|
|
|
pub fn overflow_checks(&self) -> bool {
|
2021-11-24 12:19:23 -06:00
|
|
|
self.opts.cg.overflow_checks.unwrap_or(self.opts.debug_assertions)
|
2017-02-17 15:00:08 -06:00
|
|
|
}
|
2016-05-10 15:21:18 -05:00
|
|
|
|
2020-03-03 02:17:15 -06:00
|
|
|
/// Check whether this compile session and crate type use static crt.
|
2020-05-01 17:30:23 -05:00
|
|
|
pub fn crt_static(&self, crate_type: Option<CrateType>) -> bool {
|
2020-11-08 05:27:51 -06:00
|
|
|
if !self.target.crt_static_respected {
|
2020-05-01 15:32:00 -05:00
|
|
|
// If the target does not opt in to crt-static support, use its default.
|
2020-11-08 05:27:51 -06:00
|
|
|
return self.target.crt_static_default;
|
2017-08-22 16:24:29 -05:00
|
|
|
}
|
|
|
|
|
2017-08-22 16:24:29 -05:00
|
|
|
let requested_features = self.opts.cg.target_feature.split(',');
|
|
|
|
let found_negative = requested_features.clone().any(|r| r == "-crt-static");
|
|
|
|
let found_positive = requested_features.clone().any(|r| r == "+crt-static");
|
|
|
|
|
2020-03-08 00:24:02 -06:00
|
|
|
if found_positive || found_negative {
|
2020-03-03 02:17:15 -06:00
|
|
|
found_positive
|
2020-05-01 17:30:23 -05:00
|
|
|
} else if crate_type == Some(CrateType::ProcMacro)
|
|
|
|
|| crate_type == None && self.opts.crate_types.contains(&CrateType::ProcMacro)
|
2020-03-08 00:24:02 -06:00
|
|
|
{
|
|
|
|
// FIXME: When crate_type is not available,
|
|
|
|
// we use compiler options to determine the crate_type.
|
|
|
|
// We can't check `#![crate_type = "proc-macro"]` here.
|
|
|
|
false
|
|
|
|
} else {
|
2020-11-08 05:27:51 -06:00
|
|
|
self.target.crt_static_default
|
2020-03-03 02:17:15 -06:00
|
|
|
}
|
2017-08-22 16:24:29 -05:00
|
|
|
}
|
|
|
|
|
2020-04-22 16:46:45 -05:00
|
|
|
pub fn relocation_model(&self) -> RelocModel {
|
2020-11-08 05:27:51 -06:00
|
|
|
self.opts.cg.relocation_model.unwrap_or(self.target.relocation_model)
|
2020-04-22 16:46:45 -05:00
|
|
|
}
|
|
|
|
|
2020-05-06 19:34:27 -05:00
|
|
|
pub fn code_model(&self) -> Option<CodeModel> {
|
2020-11-08 05:27:51 -06:00
|
|
|
self.opts.cg.code_model.or(self.target.code_model)
|
2020-05-06 19:34:27 -05:00
|
|
|
}
|
|
|
|
|
2020-04-25 13:45:21 -05:00
|
|
|
pub fn tls_model(&self) -> TlsModel {
|
2020-11-08 05:27:51 -06:00
|
|
|
self.opts.debugging_opts.tls_model.unwrap_or(self.target.tls_model)
|
2020-04-25 13:45:21 -05:00
|
|
|
}
|
|
|
|
|
2020-12-12 21:38:23 -06:00
|
|
|
pub fn is_wasi_reactor(&self) -> bool {
|
|
|
|
self.target.options.os == "wasi"
|
|
|
|
&& matches!(
|
|
|
|
self.opts.debugging_opts.wasi_exec_model,
|
|
|
|
Some(config::WasiExecModel::Reactor)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-11-30 10:39:08 -06:00
|
|
|
pub fn split_debuginfo(&self) -> SplitDebuginfo {
|
|
|
|
self.opts.cg.split_debuginfo.unwrap_or(self.target.split_debuginfo)
|
|
|
|
}
|
|
|
|
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 14:37:49 -05:00
|
|
|
pub fn stack_protector(&self) -> StackProtector {
|
|
|
|
if self.target.options.supports_stack_protector {
|
|
|
|
self.opts.debugging_opts.stack_protector
|
|
|
|
} else {
|
|
|
|
StackProtector::None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-30 10:39:08 -06:00
|
|
|
pub fn target_can_use_split_dwarf(&self) -> bool {
|
|
|
|
!self.target.is_like_windows && !self.target.is_like_osx
|
|
|
|
}
|
|
|
|
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 06:08:35 -05:00
|
|
|
pub fn must_emit_unwind_tables(&self) -> bool {
|
|
|
|
// This is used to control the emission of the `uwtable` attribute on
|
|
|
|
// LLVM functions.
|
|
|
|
//
|
2021-03-25 05:49:35 -05:00
|
|
|
// Unwind tables are needed when compiling with `-C panic=unwind`, but
|
|
|
|
// LLVM won't omit unwind tables unless the function is also marked as
|
|
|
|
// `nounwind`, so users are allowed to disable `uwtable` emission.
|
|
|
|
// Historically rustc always emits `uwtable` attributes by default, so
|
|
|
|
// even they can be disabled, they're still emitted by default.
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 06:08:35 -05:00
|
|
|
//
|
|
|
|
// On some targets (including windows), however, exceptions include
|
|
|
|
// other events such as illegal instructions, segfaults, etc. This means
|
|
|
|
// that on Windows we end up still needing unwind tables even if the `-C
|
|
|
|
// panic=abort` flag is passed.
|
|
|
|
//
|
|
|
|
// You can also find more info on why Windows needs unwind tables in:
|
|
|
|
// https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
|
|
|
|
//
|
|
|
|
// If a target requires unwind tables, then they must be emitted.
|
|
|
|
// Otherwise, we can defer to the `-C force-unwind-tables=<yes/no>`
|
|
|
|
// value, if it is provided, or disable them, if not.
|
2021-03-25 05:49:35 -05:00
|
|
|
self.target.requires_uwtable
|
|
|
|
|| self.opts.cg.force_unwind_tables.unwrap_or(
|
|
|
|
self.panic_strategy() == PanicStrategy::Unwind || self.target.default_uwtable,
|
|
|
|
)
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 06:08:35 -05:00
|
|
|
}
|
|
|
|
|
2021-06-08 11:36:30 -05:00
|
|
|
pub fn generate_proc_macro_decls_symbol(&self, stable_crate_id: StableCrateId) -> String {
|
|
|
|
format!("__rustc_proc_macro_decls_{:08x}__", stable_crate_id.to_u64())
|
rustc: Implement custom derive (macros 1.1)
This commit is an implementation of [RFC 1681] which adds support to the
compiler for first-class user-define custom `#[derive]` modes with a far more
stable API than plugins have today.
[RFC 1681]: https://github.com/rust-lang/rfcs/blob/master/text/1681-macros-1.1.md
The main features added by this commit are:
* A new `rustc-macro` crate-type. This crate type represents one which will
provide custom `derive` implementations and perhaps eventually flower into the
implementation of macros 2.0 as well.
* A new `rustc_macro` crate in the standard distribution. This crate will
provide the runtime interface between macro crates and the compiler. The API
here is particularly conservative right now but has quite a bit of room to
expand into any manner of APIs required by macro authors.
* The ability to load new derive modes through the `#[macro_use]` annotations on
other crates.
All support added here is gated behind the `rustc_macro` feature gate, both for
the library support (the `rustc_macro` crate) as well as the language features.
There are a few minor differences from the implementation outlined in the RFC,
such as the `rustc_macro` crate being available as a dylib and all symbols are
`dlsym`'d directly instead of having a shim compiled. These should only affect
the implementation, however, not the public interface.
This commit also ended up touching a lot of code related to `#[derive]`, making
a few notable changes:
* Recognized derive attributes are no longer desugared to `derive_Foo`. Wasn't
sure how to keep this behavior and *not* expose it to custom derive.
* Derive attributes no longer have access to unstable features by default, they
have to opt in on a granular level.
* The `derive(Copy,Clone)` optimization is now done through another "obscure
attribute" which is just intended to ferry along in the compiler that such an
optimization is possible. The `derive(PartialEq,Eq)` optimization was also
updated to do something similar.
---
One part of this PR which needs to be improved before stabilizing are the errors
and exact interfaces here. The error messages are relatively poor quality and
there are surprising spects of this such as `#[derive(PartialEq, Eq, MyTrait)]`
not working by default. The custom attributes added by the compiler end up
becoming unstable again when going through a custom impl.
Hopefully though this is enough to start allowing experimentation on crates.io!
syntax-[breaking-change]
2016-08-22 19:07:11 -05:00
|
|
|
}
|
|
|
|
|
2018-08-30 00:02:42 -05:00
|
|
|
pub fn target_filesearch(&self, kind: PathKind) -> filesearch::FileSearch<'_> {
|
2018-03-05 23:29:03 -06:00
|
|
|
filesearch::FileSearch::new(
|
2018-11-21 22:49:48 -06:00
|
|
|
&self.sysroot,
|
2018-03-14 09:27:06 -05:00
|
|
|
self.opts.target_triple.triple(),
|
2018-03-05 23:29:03 -06:00
|
|
|
&self.opts.search_paths,
|
2021-09-01 06:39:48 -05:00
|
|
|
&self.target_tlib_path,
|
2018-03-05 23:29:03 -06:00
|
|
|
kind,
|
|
|
|
)
|
2014-03-09 07:24:58 -05:00
|
|
|
}
|
2018-08-30 00:02:42 -05:00
|
|
|
pub fn host_filesearch(&self, kind: PathKind) -> filesearch::FileSearch<'_> {
|
2014-04-17 10:52:25 -05:00
|
|
|
filesearch::FileSearch::new(
|
2018-11-21 22:49:48 -06:00
|
|
|
&self.sysroot,
|
2014-11-15 19:30:33 -06:00
|
|
|
config::host_triple(),
|
2014-12-16 16:32:02 -06:00
|
|
|
&self.opts.search_paths,
|
2018-11-19 18:06:45 -06:00
|
|
|
&self.host_tlib_path,
|
2018-03-05 23:29:03 -06:00
|
|
|
kind,
|
|
|
|
)
|
2014-04-17 10:52:25 -05:00
|
|
|
}
|
2016-08-11 18:02:39 -05:00
|
|
|
|
2021-08-31 11:36:25 -05:00
|
|
|
/// Returns a list of directories where target-specific tool binaries are located.
|
|
|
|
pub fn get_tools_search_paths(&self, self_contained: bool) -> Vec<PathBuf> {
|
|
|
|
let rustlib_path = rustc_target::target_rustlib_path(&self.sysroot, &config::host_triple());
|
2021-09-03 05:36:33 -05:00
|
|
|
let p = PathBuf::from_iter([
|
2021-08-31 11:36:25 -05:00
|
|
|
Path::new(&self.sysroot),
|
|
|
|
Path::new(&rustlib_path),
|
|
|
|
Path::new("bin"),
|
2021-09-03 05:36:33 -05:00
|
|
|
]);
|
2021-08-31 11:36:25 -05:00
|
|
|
if self_contained { vec![p.clone(), p.join("self-contained")] } else { vec![p] }
|
|
|
|
}
|
|
|
|
|
2018-03-05 23:29:03 -06:00
|
|
|
pub fn init_incr_comp_session(
|
|
|
|
&self,
|
|
|
|
session_dir: PathBuf,
|
|
|
|
lock_file: flock::Lock,
|
|
|
|
load_dep_graph: bool,
|
|
|
|
) {
|
2016-08-11 18:02:39 -05:00
|
|
|
let mut incr_comp_session = self.incr_comp_session.borrow_mut();
|
|
|
|
|
2018-03-05 23:29:03 -06:00
|
|
|
if let IncrCompSession::NotInitialized = *incr_comp_session {
|
|
|
|
} else {
|
2019-12-22 16:42:04 -06:00
|
|
|
panic!("Trying to initialize IncrCompSession `{:?}`", *incr_comp_session)
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
|
|
|
|
2019-12-22 16:42:04 -06:00
|
|
|
*incr_comp_session =
|
|
|
|
IncrCompSession::Active { session_directory: session_dir, lock_file, load_dep_graph };
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn finalize_incr_comp_session(&self, new_directory_path: PathBuf) {
|
|
|
|
let mut incr_comp_session = self.incr_comp_session.borrow_mut();
|
|
|
|
|
2018-03-05 23:29:03 -06:00
|
|
|
if let IncrCompSession::Active { .. } = *incr_comp_session {
|
|
|
|
} else {
|
2019-12-22 16:42:04 -06:00
|
|
|
panic!("trying to finalize `IncrCompSession` `{:?}`", *incr_comp_session);
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
|
|
|
|
2019-09-05 21:57:44 -05:00
|
|
|
// Note: this will also drop the lock file, thus unlocking the directory.
|
2019-12-22 16:42:04 -06:00
|
|
|
*incr_comp_session = IncrCompSession::Finalized { session_directory: new_directory_path };
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn mark_incr_comp_session_as_invalid(&self) {
|
|
|
|
let mut incr_comp_session = self.incr_comp_session.borrow_mut();
|
|
|
|
|
2016-08-22 12:01:46 -05:00
|
|
|
let session_directory = match *incr_comp_session {
|
2019-12-22 16:42:04 -06:00
|
|
|
IncrCompSession::Active { ref session_directory, .. } => session_directory.clone(),
|
2018-01-05 12:07:36 -06:00
|
|
|
IncrCompSession::InvalidBecauseOfErrors { .. } => return,
|
2019-12-22 16:42:04 -06:00
|
|
|
_ => panic!("trying to invalidate `IncrCompSession` `{:?}`", *incr_comp_session),
|
2016-08-22 12:01:46 -05:00
|
|
|
};
|
2016-08-11 18:02:39 -05:00
|
|
|
|
2019-09-05 21:57:44 -05:00
|
|
|
// Note: this will also drop the lock file, thus unlocking the directory.
|
2019-12-22 16:42:04 -06:00
|
|
|
*incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { session_directory };
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
|
|
|
|
2018-08-30 00:02:42 -05:00
|
|
|
pub fn incr_comp_session_dir(&self) -> cell::Ref<'_, PathBuf> {
|
2016-08-11 18:02:39 -05:00
|
|
|
let incr_comp_session = self.incr_comp_session.borrow();
|
2019-12-22 16:42:04 -06:00
|
|
|
cell::Ref::map(incr_comp_session, |incr_comp_session| match *incr_comp_session {
|
|
|
|
IncrCompSession::NotInitialized => panic!(
|
|
|
|
"trying to get session directory from `IncrCompSession`: {:?}",
|
|
|
|
*incr_comp_session,
|
|
|
|
),
|
|
|
|
IncrCompSession::Active { ref session_directory, .. }
|
|
|
|
| IncrCompSession::Finalized { ref session_directory }
|
|
|
|
| IncrCompSession::InvalidBecauseOfErrors { ref session_directory } => {
|
|
|
|
session_directory
|
|
|
|
}
|
|
|
|
})
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
|
|
|
|
2018-08-30 00:02:42 -05:00
|
|
|
pub fn incr_comp_session_dir_opt(&self) -> Option<cell::Ref<'_, PathBuf>> {
|
2019-10-13 09:47:38 -05:00
|
|
|
self.opts.incremental.as_ref().map(|_| self.incr_comp_session_dir())
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
2016-08-23 12:23:58 -05:00
|
|
|
|
|
|
|
pub fn print_perf_stats(&self) {
|
2021-02-18 06:13:38 -06:00
|
|
|
eprintln!(
|
2018-03-05 23:29:03 -06:00
|
|
|
"Total time spent computing symbol hashes: {}",
|
2018-04-01 01:17:25 -05:00
|
|
|
duration_to_secs_str(*self.perf_stats.symbol_hash_time.lock())
|
2018-03-05 23:29:03 -06:00
|
|
|
);
|
2021-02-18 06:13:38 -06:00
|
|
|
eprintln!(
|
2019-12-22 16:42:04 -06:00
|
|
|
"Total queries canonicalized: {}",
|
|
|
|
self.perf_stats.queries_canonicalized.load(Ordering::Relaxed)
|
|
|
|
);
|
2021-02-18 06:13:38 -06:00
|
|
|
eprintln!(
|
2020-03-23 13:22:19 -05:00
|
|
|
"normalize_generic_arg_after_erasing_regions: {}",
|
|
|
|
self.perf_stats.normalize_generic_arg_after_erasing_regions.load(Ordering::Relaxed)
|
2019-12-22 16:42:04 -06:00
|
|
|
);
|
2021-02-18 06:13:38 -06:00
|
|
|
eprintln!(
|
2019-12-22 16:42:04 -06:00
|
|
|
"normalize_projection_ty: {}",
|
|
|
|
self.perf_stats.normalize_projection_ty.load(Ordering::Relaxed)
|
|
|
|
);
|
2016-08-23 12:23:58 -05:00
|
|
|
}
|
2017-03-08 15:28:47 -06:00
|
|
|
|
2017-03-08 21:20:07 -06:00
|
|
|
/// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
|
2017-03-08 15:28:47 -06:00
|
|
|
/// This expends fuel if applicable, and records fuel if applicable.
|
|
|
|
pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
|
|
|
|
let mut ret = true;
|
2021-08-30 07:36:24 -05:00
|
|
|
if let Some((ref c, _)) = self.opts.debugging_opts.fuel {
|
2018-10-10 08:33:10 -05:00
|
|
|
if c == crate_name {
|
2019-01-28 08:51:47 -06:00
|
|
|
assert_eq!(self.threads(), 1);
|
2018-12-29 06:03:33 -06:00
|
|
|
let mut fuel = self.optimization_fuel.lock();
|
|
|
|
ret = fuel.remaining != 0;
|
|
|
|
if fuel.remaining == 0 && !fuel.out_of_fuel {
|
2021-09-20 18:53:04 -05:00
|
|
|
if self.diagnostic().can_emit_warnings() {
|
|
|
|
// We only call `msg` in case we can actually emit warnings.
|
|
|
|
// Otherwise, this could cause a `delay_good_path_bug` to
|
|
|
|
// trigger (issue #79546).
|
|
|
|
self.warn(&format!("optimization-fuel-exhausted: {}", msg()));
|
|
|
|
}
|
2018-12-29 06:03:33 -06:00
|
|
|
fuel.out_of_fuel = true;
|
|
|
|
} else if fuel.remaining > 0 {
|
|
|
|
fuel.remaining -= 1;
|
2017-03-08 15:28:47 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-08-30 07:41:09 -05:00
|
|
|
if let Some(ref c) = self.opts.debugging_opts.print_fuel {
|
2018-10-10 08:33:10 -05:00
|
|
|
if c == crate_name {
|
2019-01-28 08:51:47 -06:00
|
|
|
assert_eq!(self.threads(), 1);
|
2018-12-29 06:03:33 -06:00
|
|
|
self.print_fuel.fetch_add(1, SeqCst);
|
2018-03-05 23:29:03 -06:00
|
|
|
}
|
2017-03-08 15:28:47 -06:00
|
|
|
}
|
|
|
|
ret
|
|
|
|
}
|
2017-10-04 16:38:52 -05:00
|
|
|
|
2017-12-03 07:16:19 -06:00
|
|
|
/// Returns the number of query threads that should be used for this
|
|
|
|
/// compilation
|
2019-01-28 08:51:47 -06:00
|
|
|
pub fn threads(&self) -> usize {
|
2019-09-30 15:27:28 -05:00
|
|
|
self.opts.debugging_opts.threads
|
2017-12-03 07:16:19 -06:00
|
|
|
}
|
|
|
|
|
2017-10-04 16:38:52 -05:00
|
|
|
/// Returns the number of codegen units that should be used for this
|
|
|
|
/// compilation
|
|
|
|
pub fn codegen_units(&self) -> usize {
|
|
|
|
if let Some(n) = self.opts.cli_forced_codegen_units {
|
2018-03-05 23:29:03 -06:00
|
|
|
return n;
|
2017-10-04 16:38:52 -05:00
|
|
|
}
|
2020-11-08 05:27:51 -06:00
|
|
|
if let Some(n) = self.target.default_codegen_units {
|
2018-03-05 23:29:03 -06:00
|
|
|
return n as usize;
|
2017-10-04 16:38:52 -05:00
|
|
|
}
|
|
|
|
|
2020-03-19 11:14:02 -05:00
|
|
|
// If incremental compilation is turned on, we default to a high number
|
|
|
|
// codegen units in order to reduce the "collateral damage" small
|
|
|
|
// changes cause.
|
|
|
|
if self.opts.incremental.is_some() {
|
|
|
|
return 256;
|
|
|
|
}
|
|
|
|
|
2017-11-25 13:13:58 -06:00
|
|
|
// Why is 16 codegen units the default all the time?
|
|
|
|
//
|
|
|
|
// The main reason for enabling multiple codegen units by default is to
|
2018-05-08 08:10:16 -05:00
|
|
|
// leverage the ability for the codegen backend to do codegen and
|
|
|
|
// optimization in parallel. This allows us, especially for large crates, to
|
2017-11-25 13:13:58 -06:00
|
|
|
// make good use of all available resources on the machine once we've
|
|
|
|
// hit that stage of compilation. Large crates especially then often
|
2018-05-08 08:10:16 -05:00
|
|
|
// take a long time in codegen/optimization and this helps us amortize that
|
2017-11-25 13:13:58 -06:00
|
|
|
// cost.
|
|
|
|
//
|
|
|
|
// Note that a high number here doesn't mean that we'll be spawning a
|
|
|
|
// large number of threads in parallel. The backend of rustc contains
|
|
|
|
// global rate limiting through the `jobserver` crate so we'll never
|
|
|
|
// overload the system with too much work, but rather we'll only be
|
|
|
|
// optimizing when we're otherwise cooperating with other instances of
|
|
|
|
// rustc.
|
|
|
|
//
|
|
|
|
// Rather a high number here means that we should be able to keep a lot
|
|
|
|
// of idle cpus busy. By ensuring that no codegen unit takes *too* long
|
|
|
|
// to build we'll be guaranteed that all cpus will finish pretty closely
|
|
|
|
// to one another and we should make relatively optimal use of system
|
|
|
|
// resources
|
|
|
|
//
|
|
|
|
// Note that the main cost of codegen units is that it prevents LLVM
|
|
|
|
// from inlining across codegen units. Users in general don't have a lot
|
|
|
|
// of control over how codegen units are split up so it's our job in the
|
|
|
|
// compiler to ensure that undue performance isn't lost when using
|
|
|
|
// codegen units (aka we can't require everyone to slap `#[inline]` on
|
|
|
|
// everything).
|
|
|
|
//
|
|
|
|
// If we're compiling at `-O0` then the number doesn't really matter too
|
|
|
|
// much because performance doesn't matter and inlining is ok to lose.
|
|
|
|
// In debug mode we just want to try to guarantee that no cpu is stuck
|
|
|
|
// doing work that could otherwise be farmed to others.
|
|
|
|
//
|
|
|
|
// In release mode, however (O1 and above) performance does indeed
|
|
|
|
// matter! To recover the loss in performance due to inlining we'll be
|
|
|
|
// enabling ThinLTO by default (the function for which is just below).
|
|
|
|
// This will ensure that we recover any inlining wins we otherwise lost
|
|
|
|
// through codegen unit partitioning.
|
|
|
|
//
|
|
|
|
// ---
|
|
|
|
//
|
|
|
|
// Ok that's a lot of words but the basic tl;dr; is that we want a high
|
|
|
|
// number here -- but not too high. Additionally we're "safe" to have it
|
|
|
|
// always at the same number at all optimization levels.
|
|
|
|
//
|
|
|
|
// As a result 16 was chosen here! Mostly because it was a power of 2
|
|
|
|
// and most benchmarks agreed it was roughly a local optimum. Not very
|
|
|
|
// scientific.
|
2017-12-21 09:03:16 -06:00
|
|
|
16
|
2017-11-25 13:13:58 -06:00
|
|
|
}
|
|
|
|
|
2018-01-23 13:34:57 -06:00
|
|
|
pub fn teach(&self, code: &DiagnosticId) -> bool {
|
2018-07-26 17:18:06 -05:00
|
|
|
self.opts.debugging_opts.teach && self.diagnostic().must_teach(code)
|
2018-01-22 20:07:35 -06:00
|
|
|
}
|
2018-02-04 10:52:26 -06:00
|
|
|
|
2018-11-02 16:07:56 -05:00
|
|
|
pub fn rust_2015(&self) -> bool {
|
|
|
|
self.opts.edition == Edition::Edition2015
|
|
|
|
}
|
|
|
|
|
2018-03-14 22:30:06 -05:00
|
|
|
/// Are we allowed to use features from the Rust 2018 edition?
|
2018-02-04 10:52:26 -06:00
|
|
|
pub fn rust_2018(&self) -> bool {
|
2018-04-19 15:56:26 -05:00
|
|
|
self.opts.edition >= Edition::Edition2018
|
2018-02-04 10:52:26 -06:00
|
|
|
}
|
2018-02-22 18:51:42 -06:00
|
|
|
|
2020-12-30 07:33:46 -06:00
|
|
|
/// Are we allowed to use features from the Rust 2021 edition?
|
|
|
|
pub fn rust_2021(&self) -> bool {
|
|
|
|
self.opts.edition >= Edition::Edition2021
|
|
|
|
}
|
|
|
|
|
2018-03-14 22:30:06 -05:00
|
|
|
pub fn edition(&self) -> Edition {
|
2018-04-19 15:56:26 -05:00
|
|
|
self.opts.edition
|
2018-02-22 18:51:42 -06:00
|
|
|
}
|
2018-09-26 11:19:55 -05:00
|
|
|
|
2019-02-08 07:53:55 -06:00
|
|
|
/// Returns `true` if we cannot skip the PLT for shared library calls.
|
2018-09-26 11:19:55 -05:00
|
|
|
pub fn needs_plt(&self) -> bool {
|
|
|
|
// Check if the current target usually needs PLT to be enabled.
|
|
|
|
// The user can use the command line flag to override it.
|
2020-11-08 05:27:51 -06:00
|
|
|
let needs_plt = self.target.needs_plt;
|
2018-09-26 11:19:55 -05:00
|
|
|
|
|
|
|
let dbg_opts = &self.opts.debugging_opts;
|
|
|
|
|
2020-11-08 05:27:51 -06:00
|
|
|
let relro_level = dbg_opts.relro_level.unwrap_or(self.target.relro_level);
|
2018-09-26 11:19:55 -05:00
|
|
|
|
|
|
|
// Only enable this optimization by default if full relro is also enabled.
|
|
|
|
// In this case, lazy binding was already unavailable, so nothing is lost.
|
|
|
|
// This also ensures `-Wl,-z,now` is supported by the linker.
|
|
|
|
let full_relro = RelroLevel::Full == relro_level;
|
|
|
|
|
|
|
|
// If user didn't explicitly forced us to use / skip the PLT,
|
|
|
|
// then try to skip it where possible.
|
|
|
|
dbg_opts.plt.unwrap_or(needs_plt || !full_relro)
|
|
|
|
}
|
2020-05-12 19:00:00 -05:00
|
|
|
|
|
|
|
/// Checks if LLVM lifetime markers should be emitted.
|
|
|
|
pub fn emit_lifetime_markers(&self) -> bool {
|
2020-06-13 19:00:00 -05:00
|
|
|
self.opts.optimize != config::OptLevel::No
|
|
|
|
// AddressSanitizer uses lifetimes to detect use after scope bugs.
|
|
|
|
// MemorySanitizer uses lifetimes to detect use of uninitialized stack variables.
|
2021-01-22 20:32:38 -06:00
|
|
|
// HWAddressSanitizer will use lifetimes to detect use after scope bugs in the future.
|
|
|
|
|| self.opts.debugging_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::MEMORY | SanitizerSet::HWADDRESS)
|
2020-05-12 19:00:00 -05:00
|
|
|
}
|
2020-07-29 20:27:50 -05:00
|
|
|
|
2020-08-27 14:53:43 -05:00
|
|
|
pub fn link_dead_code(&self) -> bool {
|
2020-12-11 11:08:05 -06:00
|
|
|
self.opts.cg.link_dead_code.unwrap_or(false)
|
2020-08-27 14:53:43 -05:00
|
|
|
}
|
|
|
|
|
coverage bug fixes and optimization support
Adjusted LLVM codegen for code compiled with `-Zinstrument-coverage` to
address multiple, somewhat related issues.
Fixed a significant flaw in prior coverage solution: Every counter
generated a new counter variable, but there should have only been one
counter variable per function. This appears to have bloated .profraw
files significantly. (For a small program, it increased the size by
about 40%. I have not tested large programs, but there is anecdotal
evidence that profraw files were way too large. This is a good fix,
regardless, but hopefully it also addresses related issues.
Fixes: #82144
Invalid LLVM coverage data produced when compiled with -C opt-level=1
Existing tests now work up to at least `opt-level=3`. This required a
detailed analysis of the LLVM IR, comparisons with Clang C++ LLVM IR
when compiled with coverage, and a lot of trial and error with codegen
adjustments.
The biggest hurdle was figuring out how to continue to support coverage
results for unused functions and generics. Rust's coverage results have
three advantages over Clang's coverage results:
1. Rust's coverage map does not include any overlapping code regions,
making coverage counting unambiguous.
2. Rust generates coverage results (showing zero counts) for all unused
functions, including generics. (Clang does not generate coverage for
uninstantiated template functions.)
3. Rust's unused functions produce minimal stubbed functions in LLVM IR,
sufficient for including in the coverage results; while Clang must
generate the complete LLVM IR for each unused function, even though
it will never be called.
This PR removes the previous hack of attempting to inject coverage into
some other existing function instance, and generates dedicated instances
for each unused function. This change, and a few other adjustments
(similar to what is required for `-C link-dead-code`, but with lower
impact), makes it possible to support LLVM optimizations.
Fixes: #79651
Coverage report: "Unexecuted instantiation:..." for a generic function
from multiple crates
Fixed by removing the aforementioned hack. Some "Unexecuted
instantiation" notices are unavoidable, as explained in the
`used_crate.rs` test, but `-Zinstrument-coverage` has new options to
back off support for either unused generics, or all unused functions,
which avoids the notice, at the cost of less coverage of unused
functions.
Fixes: #82875
Invalid LLVM coverage data produced with crate brotli_decompressor
Fixed by disabling the LLVM function attribute that forces inlining, if
`-Z instrument-coverage` is enabled. This attribute is applied to
Rust functions with `#[inline(always)], and in some cases, the forced
inlining breaks coverage instrumentation and reports.
2021-03-15 18:32:45 -05:00
|
|
|
pub fn instrument_coverage(&self) -> bool {
|
2021-11-29 21:09:01 -06:00
|
|
|
self.opts.instrument_coverage()
|
coverage bug fixes and optimization support
Adjusted LLVM codegen for code compiled with `-Zinstrument-coverage` to
address multiple, somewhat related issues.
Fixed a significant flaw in prior coverage solution: Every counter
generated a new counter variable, but there should have only been one
counter variable per function. This appears to have bloated .profraw
files significantly. (For a small program, it increased the size by
about 40%. I have not tested large programs, but there is anecdotal
evidence that profraw files were way too large. This is a good fix,
regardless, but hopefully it also addresses related issues.
Fixes: #82144
Invalid LLVM coverage data produced when compiled with -C opt-level=1
Existing tests now work up to at least `opt-level=3`. This required a
detailed analysis of the LLVM IR, comparisons with Clang C++ LLVM IR
when compiled with coverage, and a lot of trial and error with codegen
adjustments.
The biggest hurdle was figuring out how to continue to support coverage
results for unused functions and generics. Rust's coverage results have
three advantages over Clang's coverage results:
1. Rust's coverage map does not include any overlapping code regions,
making coverage counting unambiguous.
2. Rust generates coverage results (showing zero counts) for all unused
functions, including generics. (Clang does not generate coverage for
uninstantiated template functions.)
3. Rust's unused functions produce minimal stubbed functions in LLVM IR,
sufficient for including in the coverage results; while Clang must
generate the complete LLVM IR for each unused function, even though
it will never be called.
This PR removes the previous hack of attempting to inject coverage into
some other existing function instance, and generates dedicated instances
for each unused function. This change, and a few other adjustments
(similar to what is required for `-C link-dead-code`, but with lower
impact), makes it possible to support LLVM optimizations.
Fixes: #79651
Coverage report: "Unexecuted instantiation:..." for a generic function
from multiple crates
Fixed by removing the aforementioned hack. Some "Unexecuted
instantiation" notices are unavoidable, as explained in the
`used_crate.rs` test, but `-Zinstrument-coverage` has new options to
back off support for either unused generics, or all unused functions,
which avoids the notice, at the cost of less coverage of unused
functions.
Fixes: #82875
Invalid LLVM coverage data produced with crate brotli_decompressor
Fixed by disabling the LLVM function attribute that forces inlining, if
`-Z instrument-coverage` is enabled. This attribute is applied to
Rust functions with `#[inline(always)], and in some cases, the forced
inlining breaks coverage instrumentation and reports.
2021-03-15 18:32:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn instrument_coverage_except_unused_generics(&self) -> bool {
|
2021-11-29 21:09:01 -06:00
|
|
|
self.opts.instrument_coverage_except_unused_generics()
|
coverage bug fixes and optimization support
Adjusted LLVM codegen for code compiled with `-Zinstrument-coverage` to
address multiple, somewhat related issues.
Fixed a significant flaw in prior coverage solution: Every counter
generated a new counter variable, but there should have only been one
counter variable per function. This appears to have bloated .profraw
files significantly. (For a small program, it increased the size by
about 40%. I have not tested large programs, but there is anecdotal
evidence that profraw files were way too large. This is a good fix,
regardless, but hopefully it also addresses related issues.
Fixes: #82144
Invalid LLVM coverage data produced when compiled with -C opt-level=1
Existing tests now work up to at least `opt-level=3`. This required a
detailed analysis of the LLVM IR, comparisons with Clang C++ LLVM IR
when compiled with coverage, and a lot of trial and error with codegen
adjustments.
The biggest hurdle was figuring out how to continue to support coverage
results for unused functions and generics. Rust's coverage results have
three advantages over Clang's coverage results:
1. Rust's coverage map does not include any overlapping code regions,
making coverage counting unambiguous.
2. Rust generates coverage results (showing zero counts) for all unused
functions, including generics. (Clang does not generate coverage for
uninstantiated template functions.)
3. Rust's unused functions produce minimal stubbed functions in LLVM IR,
sufficient for including in the coverage results; while Clang must
generate the complete LLVM IR for each unused function, even though
it will never be called.
This PR removes the previous hack of attempting to inject coverage into
some other existing function instance, and generates dedicated instances
for each unused function. This change, and a few other adjustments
(similar to what is required for `-C link-dead-code`, but with lower
impact), makes it possible to support LLVM optimizations.
Fixes: #79651
Coverage report: "Unexecuted instantiation:..." for a generic function
from multiple crates
Fixed by removing the aforementioned hack. Some "Unexecuted
instantiation" notices are unavoidable, as explained in the
`used_crate.rs` test, but `-Zinstrument-coverage` has new options to
back off support for either unused generics, or all unused functions,
which avoids the notice, at the cost of less coverage of unused
functions.
Fixes: #82875
Invalid LLVM coverage data produced with crate brotli_decompressor
Fixed by disabling the LLVM function attribute that forces inlining, if
`-Z instrument-coverage` is enabled. This attribute is applied to
Rust functions with `#[inline(always)], and in some cases, the forced
inlining breaks coverage instrumentation and reports.
2021-03-15 18:32:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn instrument_coverage_except_unused_functions(&self) -> bool {
|
2021-11-29 21:09:01 -06:00
|
|
|
self.opts.instrument_coverage_except_unused_functions()
|
coverage bug fixes and optimization support
Adjusted LLVM codegen for code compiled with `-Zinstrument-coverage` to
address multiple, somewhat related issues.
Fixed a significant flaw in prior coverage solution: Every counter
generated a new counter variable, but there should have only been one
counter variable per function. This appears to have bloated .profraw
files significantly. (For a small program, it increased the size by
about 40%. I have not tested large programs, but there is anecdotal
evidence that profraw files were way too large. This is a good fix,
regardless, but hopefully it also addresses related issues.
Fixes: #82144
Invalid LLVM coverage data produced when compiled with -C opt-level=1
Existing tests now work up to at least `opt-level=3`. This required a
detailed analysis of the LLVM IR, comparisons with Clang C++ LLVM IR
when compiled with coverage, and a lot of trial and error with codegen
adjustments.
The biggest hurdle was figuring out how to continue to support coverage
results for unused functions and generics. Rust's coverage results have
three advantages over Clang's coverage results:
1. Rust's coverage map does not include any overlapping code regions,
making coverage counting unambiguous.
2. Rust generates coverage results (showing zero counts) for all unused
functions, including generics. (Clang does not generate coverage for
uninstantiated template functions.)
3. Rust's unused functions produce minimal stubbed functions in LLVM IR,
sufficient for including in the coverage results; while Clang must
generate the complete LLVM IR for each unused function, even though
it will never be called.
This PR removes the previous hack of attempting to inject coverage into
some other existing function instance, and generates dedicated instances
for each unused function. This change, and a few other adjustments
(similar to what is required for `-C link-dead-code`, but with lower
impact), makes it possible to support LLVM optimizations.
Fixes: #79651
Coverage report: "Unexecuted instantiation:..." for a generic function
from multiple crates
Fixed by removing the aforementioned hack. Some "Unexecuted
instantiation" notices are unavoidable, as explained in the
`used_crate.rs` test, but `-Zinstrument-coverage` has new options to
back off support for either unused generics, or all unused functions,
which avoids the notice, at the cost of less coverage of unused
functions.
Fixes: #82875
Invalid LLVM coverage data produced with crate brotli_decompressor
Fixed by disabling the LLVM function attribute that forces inlining, if
`-Z instrument-coverage` is enabled. This attribute is applied to
Rust functions with `#[inline(always)], and in some cases, the forced
inlining breaks coverage instrumentation and reports.
2021-03-15 18:32:45 -05:00
|
|
|
}
|
|
|
|
|
2020-07-29 20:27:50 -05:00
|
|
|
pub fn is_proc_macro_attr(&self, attr: &Attribute) -> bool {
|
|
|
|
[sym::proc_macro, sym::proc_macro_attribute, sym::proc_macro_derive]
|
|
|
|
.iter()
|
2021-07-29 12:00:41 -05:00
|
|
|
.any(|kind| attr.has_name(*kind))
|
2020-07-29 20:27:50 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn contains_name(&self, attrs: &[Attribute], name: Symbol) -> bool {
|
2021-07-29 12:00:41 -05:00
|
|
|
attrs.iter().any(|item| item.has_name(name))
|
2020-07-29 20:27:50 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn find_by_name<'a>(
|
|
|
|
&'a self,
|
|
|
|
attrs: &'a [Attribute],
|
|
|
|
name: Symbol,
|
|
|
|
) -> Option<&'a Attribute> {
|
2021-07-29 12:00:41 -05:00
|
|
|
attrs.iter().find(|attr| attr.has_name(name))
|
2020-07-29 20:27:50 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn filter_by_name<'a>(
|
|
|
|
&'a self,
|
|
|
|
attrs: &'a [Attribute],
|
|
|
|
name: Symbol,
|
|
|
|
) -> impl Iterator<Item = &'a Attribute> {
|
2021-07-29 12:00:41 -05:00
|
|
|
attrs.iter().filter(move |attr| attr.has_name(name))
|
2020-07-29 20:27:50 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn first_attr_value_str_by_name(
|
|
|
|
&self,
|
|
|
|
attrs: &[Attribute],
|
|
|
|
name: Symbol,
|
|
|
|
) -> Option<Symbol> {
|
2021-07-29 12:00:41 -05:00
|
|
|
attrs.iter().find(|at| at.has_name(name)).and_then(|at| at.value_str())
|
2020-07-29 20:27:50 -05:00
|
|
|
}
|
2010-09-01 15:24:14 -05:00
|
|
|
}
|
2011-12-08 23:05:44 -06:00
|
|
|
|
2018-12-08 13:30:23 -06:00
|
|
|
fn default_emitter(
|
|
|
|
sopts: &config::Options,
|
2019-11-29 15:05:28 -06:00
|
|
|
registry: rustc_errors::registry::Registry,
|
2020-05-27 13:34:17 -05:00
|
|
|
source_map: Lrc<SourceMap>,
|
2018-12-08 13:30:23 -06:00
|
|
|
emitter_dest: Option<Box<dyn Write + Send>>,
|
|
|
|
) -> Box<dyn Emitter + sync::Send> {
|
2019-12-15 09:12:30 -06:00
|
|
|
let macro_backtrace = sopts.debugging_opts.macro_backtrace;
|
2018-12-08 13:30:23 -06:00
|
|
|
match (sopts.error_format, emitter_dest) {
|
2019-03-25 05:16:58 -05:00
|
|
|
(config::ErrorOutputType::HumanReadable(kind), dst) => {
|
|
|
|
let (short, color_config) = kind.unzip();
|
2019-05-31 14:15:59 -05:00
|
|
|
|
2019-06-05 14:13:56 -05:00
|
|
|
if let HumanReadableErrorType::AnnotateSnippet(_) = kind {
|
2020-05-27 13:34:17 -05:00
|
|
|
let emitter =
|
|
|
|
AnnotateSnippetEmitterWriter::new(Some(source_map), short, macro_backtrace);
|
2020-04-02 00:44:47 -05:00
|
|
|
Box::new(emitter.ui_testing(sopts.debugging_opts.ui_testing))
|
2019-05-31 14:15:59 -05:00
|
|
|
} else {
|
|
|
|
let emitter = match dst {
|
|
|
|
None => EmitterWriter::stderr(
|
|
|
|
color_config,
|
2020-05-27 13:34:17 -05:00
|
|
|
Some(source_map),
|
2019-05-31 14:15:59 -05:00
|
|
|
short,
|
|
|
|
sopts.debugging_opts.teach,
|
2019-08-14 19:57:28 -05:00
|
|
|
sopts.debugging_opts.terminal_width,
|
2019-12-15 09:12:30 -06:00
|
|
|
macro_backtrace,
|
2019-05-31 14:15:59 -05:00
|
|
|
),
|
|
|
|
Some(dst) => EmitterWriter::new(
|
|
|
|
dst,
|
2020-05-27 13:34:17 -05:00
|
|
|
Some(source_map),
|
2019-05-31 14:15:59 -05:00
|
|
|
short,
|
|
|
|
false, // no teach messages when writing to a buffer
|
|
|
|
false, // no colors when writing to a buffer
|
2019-08-14 19:57:28 -05:00
|
|
|
None, // no terminal width
|
2019-12-15 09:12:30 -06:00
|
|
|
macro_backtrace,
|
2019-05-31 14:15:59 -05:00
|
|
|
),
|
|
|
|
};
|
2020-04-02 00:44:47 -05:00
|
|
|
Box::new(emitter.ui_testing(sopts.debugging_opts.ui_testing))
|
2019-05-31 14:15:59 -05:00
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
}
|
2019-03-25 05:16:58 -05:00
|
|
|
(config::ErrorOutputType::Json { pretty, json_rendered }, None) => Box::new(
|
2020-06-26 07:18:16 -05:00
|
|
|
JsonEmitter::stderr(
|
|
|
|
Some(registry),
|
|
|
|
source_map,
|
|
|
|
pretty,
|
|
|
|
json_rendered,
|
|
|
|
sopts.debugging_opts.terminal_width,
|
|
|
|
macro_backtrace,
|
|
|
|
)
|
|
|
|
.ui_testing(sopts.debugging_opts.ui_testing),
|
2018-12-08 13:30:23 -06:00
|
|
|
),
|
2019-03-25 05:16:58 -05:00
|
|
|
(config::ErrorOutputType::Json { pretty, json_rendered }, Some(dst)) => Box::new(
|
2018-12-08 13:30:23 -06:00
|
|
|
JsonEmitter::new(
|
|
|
|
dst,
|
|
|
|
Some(registry),
|
2020-05-27 13:34:17 -05:00
|
|
|
source_map,
|
2018-12-08 13:30:23 -06:00
|
|
|
pretty,
|
2019-03-25 05:16:58 -05:00
|
|
|
json_rendered,
|
2020-06-26 07:18:16 -05:00
|
|
|
sopts.debugging_opts.terminal_width,
|
2019-12-15 09:12:30 -06:00
|
|
|
macro_backtrace,
|
2019-12-22 16:42:04 -06:00
|
|
|
)
|
2020-04-02 00:44:47 -05:00
|
|
|
.ui_testing(sopts.debugging_opts.ui_testing),
|
2018-12-08 13:30:23 -06:00
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub enum DiagnosticOutput {
|
|
|
|
Default,
|
2019-12-22 16:42:04 -06:00
|
|
|
Raw(Box<dyn Write + Send>),
|
2018-12-08 13:30:23 -06:00
|
|
|
}
|
|
|
|
|
2020-05-27 13:34:17 -05:00
|
|
|
pub fn build_session(
|
2018-03-05 23:29:03 -06:00
|
|
|
sopts: config::Options,
|
|
|
|
local_crate_source_file: Option<PathBuf>,
|
2019-11-29 15:05:28 -06:00
|
|
|
registry: rustc_errors::registry::Registry,
|
2018-12-08 13:30:23 -06:00
|
|
|
diagnostics_output: DiagnosticOutput,
|
2020-03-31 00:17:15 -05:00
|
|
|
driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
|
|
|
|
file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
|
2020-09-17 05:01:12 -05:00
|
|
|
target_override: Option<Target>,
|
2020-05-27 13:34:17 -05:00
|
|
|
) -> Session {
|
2015-01-26 17:42:24 -06:00
|
|
|
// FIXME: This is not general enough to make the warning lint completely override
|
|
|
|
// normal diagnostic warnings, since the warning lint can also be denied and changed
|
|
|
|
// later via the source code.
|
2018-03-05 23:29:03 -06:00
|
|
|
let warnings_allow = sopts
|
|
|
|
.lint_opts
|
2015-01-26 17:42:24 -06:00
|
|
|
.iter()
|
|
|
|
.filter(|&&(ref key, _)| *key == "warnings")
|
2017-09-16 02:13:07 -05:00
|
|
|
.map(|&(_, ref level)| *level == lint::Allow)
|
2015-01-26 17:42:24 -06:00
|
|
|
.last()
|
2017-09-16 02:13:07 -05:00
|
|
|
.unwrap_or(false);
|
|
|
|
let cap_lints_allow = sopts.lint_cap.map_or(false, |cap| cap == lint::Allow);
|
2017-11-20 12:03:20 -06:00
|
|
|
let can_emit_warnings = !(warnings_allow || cap_lints_allow);
|
2017-11-18 14:16:10 -06:00
|
|
|
|
2019-11-15 12:41:50 -06:00
|
|
|
let write_dest = match diagnostics_output {
|
|
|
|
DiagnosticOutput::Default => None,
|
|
|
|
DiagnosticOutput::Raw(write) => Some(write),
|
2018-12-08 13:30:23 -06:00
|
|
|
};
|
2020-03-31 00:17:15 -05:00
|
|
|
|
2021-04-03 00:45:02 -05:00
|
|
|
let sysroot = match &sopts.maybe_sysroot {
|
|
|
|
Some(sysroot) => sysroot.clone(),
|
|
|
|
None => filesearch::get_or_default_sysroot(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let target_cfg = config::build_target_config(&sopts, target_override, &sysroot);
|
2020-03-31 00:17:15 -05:00
|
|
|
let host_triple = TargetTriple::from_triple(config::host_triple());
|
2021-05-27 03:21:53 -05:00
|
|
|
let (host, target_warnings) = Target::search(&host_triple, &sysroot).unwrap_or_else(|e| {
|
2020-03-31 00:17:15 -05:00
|
|
|
early_error(sopts.error_format, &format!("Error loading host specification: {}", e))
|
|
|
|
});
|
2021-05-27 03:21:53 -05:00
|
|
|
for warning in target_warnings.warning_messages() {
|
|
|
|
early_warn(sopts.error_format, &warning)
|
|
|
|
}
|
2020-03-31 00:17:15 -05:00
|
|
|
|
2020-11-06 15:24:55 -06:00
|
|
|
let loader = file_loader.unwrap_or_else(|| Box::new(RealFileLoader));
|
2020-03-31 00:17:15 -05:00
|
|
|
let hash_kind = sopts.debugging_opts.src_hash_algorithm.unwrap_or_else(|| {
|
2020-11-08 05:27:51 -06:00
|
|
|
if target_cfg.is_like_msvc {
|
2020-03-31 00:17:15 -05:00
|
|
|
SourceFileHashAlgorithm::Sha1
|
|
|
|
} else {
|
|
|
|
SourceFileHashAlgorithm::Md5
|
|
|
|
}
|
|
|
|
});
|
|
|
|
let source_map = Lrc::new(SourceMap::with_file_loader_and_hash_kind(
|
|
|
|
loader,
|
|
|
|
sopts.file_path_mapping(),
|
|
|
|
hash_kind,
|
|
|
|
));
|
2020-05-27 13:34:17 -05:00
|
|
|
let emitter = default_emitter(&sopts, registry, source_map.clone(), write_dest);
|
2015-12-30 21:50:06 -06:00
|
|
|
|
2020-03-31 00:17:15 -05:00
|
|
|
let span_diagnostic = rustc_errors::Handler::with_emitter_and_flags(
|
2018-03-05 23:29:03 -06:00
|
|
|
emitter,
|
2019-12-29 14:07:23 -06:00
|
|
|
sopts.debugging_opts.diagnostic_handler_flags(can_emit_warnings),
|
2018-03-05 23:29:03 -06:00
|
|
|
);
|
2014-02-06 21:57:09 -06:00
|
|
|
|
2019-12-22 16:42:04 -06:00
|
|
|
let self_profiler = if let SwitchWithOptPath::Enabled(ref d) = sopts.debugging_opts.self_profile
|
|
|
|
{
|
|
|
|
let directory =
|
|
|
|
if let Some(ref directory) = d { directory } else { std::path::Path::new(".") };
|
|
|
|
|
|
|
|
let profiler = SelfProfiler::new(
|
|
|
|
directory,
|
2020-11-30 21:54:20 -06:00
|
|
|
sopts.crate_name.as_deref(),
|
2019-12-22 16:42:04 -06:00
|
|
|
&sopts.debugging_opts.self_profile_events,
|
|
|
|
);
|
|
|
|
match profiler {
|
|
|
|
Ok(profiler) => Some(Arc::new(profiler)),
|
|
|
|
Err(e) => {
|
|
|
|
early_warn(sopts.error_format, &format!("failed to create profiler: {}", e));
|
|
|
|
None
|
2019-04-04 18:41:49 -05:00
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2019-02-10 16:23:00 -06:00
|
|
|
|
2021-01-28 02:24:55 -06:00
|
|
|
let mut parse_sess = ParseSess::with_span_handler(span_diagnostic, source_map);
|
|
|
|
parse_sess.assume_incomplete_release = sopts.debugging_opts.assume_incomplete_release;
|
2014-05-06 06:38:01 -05:00
|
|
|
|
2018-11-19 18:06:45 -06:00
|
|
|
let host_triple = config::host_triple();
|
|
|
|
let target_triple = sopts.target_triple.triple();
|
2021-09-01 06:39:48 -05:00
|
|
|
let host_tlib_path = Lrc::new(SearchPath::from_sysroot_and_triple(&sysroot, host_triple));
|
2018-11-19 18:06:45 -06:00
|
|
|
let target_tlib_path = if host_triple == target_triple {
|
2021-09-01 06:39:48 -05:00
|
|
|
// Use the same `SearchPath` if host and target triple are identical to avoid unnecessary
|
|
|
|
// rescanning of the target lib path and an unnecessary allocation.
|
|
|
|
host_tlib_path.clone()
|
2018-11-19 18:06:45 -06:00
|
|
|
} else {
|
2021-09-01 06:39:48 -05:00
|
|
|
Lrc::new(SearchPath::from_sysroot_and_triple(&sysroot, target_triple))
|
2018-11-19 18:06:45 -06:00
|
|
|
};
|
|
|
|
|
2017-04-24 12:01:19 -05:00
|
|
|
let file_path_mapping = sopts.file_path_mapping();
|
|
|
|
|
2018-03-05 23:29:03 -06:00
|
|
|
let local_crate_source_file =
|
|
|
|
local_crate_source_file.map(|path| file_path_mapping.map_prefix(path).0);
|
2014-02-06 21:57:09 -06:00
|
|
|
|
2018-12-29 06:03:33 -06:00
|
|
|
let optimization_fuel = Lock::new(OptimizationFuel {
|
2021-01-11 13:45:33 -06:00
|
|
|
remaining: sopts.debugging_opts.fuel.as_ref().map_or(0, |i| i.1),
|
2018-12-29 06:03:33 -06:00
|
|
|
out_of_fuel: false,
|
|
|
|
});
|
2019-12-17 15:28:33 -06:00
|
|
|
let print_fuel = AtomicU64::new(0);
|
2017-03-08 15:28:47 -06:00
|
|
|
|
2018-09-18 09:33:24 -05:00
|
|
|
let cgu_reuse_tracker = if sopts.debugging_opts.query_dep_graph {
|
|
|
|
CguReuseTracker::new()
|
|
|
|
} else {
|
|
|
|
CguReuseTracker::new_disabled()
|
|
|
|
};
|
|
|
|
|
2019-12-31 19:24:05 -06:00
|
|
|
let prof = SelfProfilerRef::new(
|
|
|
|
self_profiler,
|
|
|
|
sopts.debugging_opts.time_passes || sopts.debugging_opts.time,
|
|
|
|
sopts.debugging_opts.time_passes,
|
|
|
|
);
|
|
|
|
|
2020-02-16 19:32:25 -06:00
|
|
|
let ctfe_backtrace = Lock::new(match env::var("RUSTC_CTFE_BACKTRACE") {
|
|
|
|
Ok(ref val) if val == "immediate" => CtfeBacktrace::Immediate,
|
|
|
|
Ok(ref val) if val != "0" => CtfeBacktrace::Capture,
|
|
|
|
_ => CtfeBacktrace::Disabled,
|
|
|
|
});
|
|
|
|
|
2020-11-08 05:27:51 -06:00
|
|
|
let asm_arch =
|
|
|
|
if target_cfg.allow_asm { InlineAsmArch::from_str(&target_cfg.arch).ok() } else { None };
|
2020-02-12 09:48:03 -06:00
|
|
|
|
2014-06-10 16:03:19 -05:00
|
|
|
let sess = Session {
|
2014-07-23 13:56:36 -05:00
|
|
|
target: target_cfg,
|
2017-07-03 13:19:51 -05:00
|
|
|
host,
|
2014-05-06 06:38:01 -05:00
|
|
|
opts: sopts,
|
2018-11-19 18:06:45 -06:00
|
|
|
host_tlib_path,
|
|
|
|
target_tlib_path,
|
2019-09-05 21:57:44 -05:00
|
|
|
parse_sess,
|
2018-11-21 22:49:48 -06:00
|
|
|
sysroot,
|
2017-07-03 13:19:51 -05:00
|
|
|
local_crate_source_file,
|
2018-10-16 09:57:53 -05:00
|
|
|
one_time_diagnostics: Default::default(),
|
2020-05-15 23:44:28 -05:00
|
|
|
crate_types: OnceCell::new(),
|
2021-06-08 11:36:30 -05:00
|
|
|
stable_crate_id: OnceCell::new(),
|
2020-05-15 23:44:28 -05:00
|
|
|
features: OnceCell::new(),
|
2018-04-01 01:19:26 -05:00
|
|
|
incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)),
|
2018-09-18 09:33:24 -05:00
|
|
|
cgu_reuse_tracker,
|
2019-12-31 19:24:05 -06:00
|
|
|
prof,
|
2016-08-23 12:23:58 -05:00
|
|
|
perf_stats: PerfStats {
|
2018-04-01 01:17:25 -05:00
|
|
|
symbol_hash_time: Lock::new(Duration::from_secs(0)),
|
|
|
|
queries_canonicalized: AtomicUsize::new(0),
|
2020-03-23 13:22:19 -05:00
|
|
|
normalize_generic_arg_after_erasing_regions: AtomicUsize::new(0),
|
2018-04-01 01:17:25 -05:00
|
|
|
normalize_projection_ty: AtomicUsize::new(0),
|
2016-11-14 10:46:20 -06:00
|
|
|
},
|
2018-10-16 09:57:53 -05:00
|
|
|
code_stats: Default::default(),
|
2018-12-29 06:03:33 -06:00
|
|
|
optimization_fuel,
|
2017-07-03 13:19:51 -05:00
|
|
|
print_fuel,
|
2018-12-18 02:03:38 -06:00
|
|
|
jobserver: jobserver::client(),
|
2018-12-08 13:30:23 -06:00
|
|
|
driver_lint_caps,
|
2020-02-16 19:32:25 -06:00
|
|
|
ctfe_backtrace,
|
2020-05-02 06:19:24 -05:00
|
|
|
miri_unleashed_features: Lock::new(Default::default()),
|
2020-02-12 09:48:03 -06:00
|
|
|
asm_arch,
|
|
|
|
target_features: FxHashSet::default(),
|
2014-06-10 16:03:19 -05:00
|
|
|
};
|
|
|
|
|
2018-08-02 07:26:27 -05:00
|
|
|
validate_commandline_args_with_session_available(&sess);
|
|
|
|
|
2020-05-27 13:34:17 -05:00
|
|
|
sess
|
2014-05-06 06:38:01 -05:00
|
|
|
}
|
2014-02-06 21:57:09 -06:00
|
|
|
|
2018-08-02 07:26:27 -05:00
|
|
|
// If it is useful to have a Session available already for validating a
|
|
|
|
// commandline argument, you can do so here.
|
|
|
|
fn validate_commandline_args_with_session_available(sess: &Session) {
|
2018-08-06 04:16:28 -05:00
|
|
|
// Since we don't know if code in an rlib will be linked to statically or
|
2020-05-07 04:52:21 -05:00
|
|
|
// dynamically downstream, rustc generates `__imp_` symbols that help linkers
|
|
|
|
// on Windows deal with this lack of knowledge (#27438). Unfortunately,
|
2018-08-06 04:16:28 -05:00
|
|
|
// these manually generated symbols confuse LLD when it tries to merge
|
2020-05-07 04:52:21 -05:00
|
|
|
// bitcode during ThinLTO. Therefore we disallow dynamic linking on Windows
|
2018-08-06 04:16:28 -05:00
|
|
|
// when compiling for LLD ThinLTO. This way we can validly just not generate
|
|
|
|
// the `dllimport` attributes and `__imp_` symbols in that case.
|
2019-12-22 16:42:04 -06:00
|
|
|
if sess.opts.cg.linker_plugin_lto.enabled()
|
|
|
|
&& sess.opts.cg.prefer_dynamic
|
2020-11-08 05:27:51 -06:00
|
|
|
&& sess.target.is_like_windows
|
2019-12-22 16:42:04 -06:00
|
|
|
{
|
|
|
|
sess.err(
|
|
|
|
"Linker plugin based LTO is not supported together with \
|
2020-05-07 04:52:21 -05:00
|
|
|
`-C prefer-dynamic` when targeting Windows-like targets",
|
2019-12-22 16:42:04 -06:00
|
|
|
);
|
2018-08-02 07:26:27 -05:00
|
|
|
}
|
2019-05-22 06:00:09 -05:00
|
|
|
|
|
|
|
// Make sure that any given profiling data actually exists so LLVM can't
|
|
|
|
// decide to silently skip PGO.
|
2019-05-28 09:48:03 -05:00
|
|
|
if let Some(ref path) = sess.opts.cg.profile_use {
|
2019-05-22 06:00:09 -05:00
|
|
|
if !path.exists() {
|
2019-12-22 16:42:04 -06:00
|
|
|
sess.err(&format!(
|
|
|
|
"File `{}` passed to `-C profile-use` does not exist.",
|
|
|
|
path.display()
|
|
|
|
));
|
2019-05-22 06:00:09 -05:00
|
|
|
}
|
|
|
|
}
|
2019-05-21 05:11:23 -05:00
|
|
|
|
2021-05-07 02:41:37 -05:00
|
|
|
// Do the same for sample profile data.
|
|
|
|
if let Some(ref path) = sess.opts.debugging_opts.profile_sample_use {
|
|
|
|
if !path.exists() {
|
|
|
|
sess.err(&format!(
|
|
|
|
"File `{}` passed to `-C profile-sample-use` does not exist.",
|
|
|
|
path.display()
|
|
|
|
));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 06:08:35 -05:00
|
|
|
// Unwind tables cannot be disabled if the target requires them.
|
|
|
|
if let Some(include_uwtables) = sess.opts.cg.force_unwind_tables {
|
2020-11-08 05:27:51 -06:00
|
|
|
if sess.target.requires_uwtable && !include_uwtables {
|
Add Option to Force Unwind Tables
When panic != unwind, `nounwind` is added to all functions for a target.
This can cause issues when a panic happens with RUST_BACKTRACE=1, as
there needs to be a way to reconstruct the backtrace. There are three
possible sources of this information: forcing frame pointers (for which
an option exists already), debug info (for which an option exists), or
unwind tables.
Especially for embedded devices, forcing frame pointers can have code
size overheads (RISC-V sees ~10% overheads, ARM sees ~2-3% overheads).
In code, it can be the case that debug info is not kept, so it is useful
to provide this third option, unwind tables, that users can use to
reconstruct the call stack. Reconstructing this stack is harder than
with frame pointers, but it is still possible.
This commit adds a compiler option which allows a user to force the
addition of unwind tables. Unwind tables cannot be disabled on targets
that require them for correctness, or when using `-C panic=unwind`.
2020-05-04 06:08:35 -05:00
|
|
|
sess.err(
|
|
|
|
"target requires unwind tables, they cannot be disabled with \
|
|
|
|
`-C force-unwind-tables=no`.",
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-07 16:49:00 -06:00
|
|
|
// Sanitizers can only be used on platforms that we know have working sanitizer codegen.
|
|
|
|
let supported_sanitizers = sess.target.options.supported_sanitizers;
|
|
|
|
let unsupported_sanitizers = sess.opts.debugging_opts.sanitizer - supported_sanitizers;
|
|
|
|
match unsupported_sanitizers.into_iter().count() {
|
|
|
|
0 => {}
|
|
|
|
1 => sess
|
|
|
|
.err(&format!("{} sanitizer is not supported for this target", unsupported_sanitizers)),
|
|
|
|
_ => sess.err(&format!(
|
|
|
|
"{} sanitizers are not supported for this target",
|
|
|
|
unsupported_sanitizers
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
// Cannot mix and match sanitizers.
|
|
|
|
let mut sanitizer_iter = sess.opts.debugging_opts.sanitizer.into_iter();
|
|
|
|
if let (Some(first), Some(second)) = (sanitizer_iter.next(), sanitizer_iter.next()) {
|
|
|
|
sess.err(&format!("`-Zsanitizer={}` is incompatible with `-Zsanitizer={}`", first, second));
|
2019-11-06 18:00:00 -06:00
|
|
|
}
|
2021-05-21 12:24:50 -05:00
|
|
|
|
|
|
|
// Cannot enable crt-static with sanitizers on Linux
|
|
|
|
if sess.crt_static(None) && !sess.opts.debugging_opts.sanitizer.is_empty() {
|
|
|
|
sess.err(
|
2021-10-03 01:53:02 -05:00
|
|
|
"sanitizer is incompatible with statically linked libc, \
|
2021-05-21 12:24:50 -05:00
|
|
|
disable it using `-C target-feature=-crt-static`",
|
|
|
|
);
|
|
|
|
}
|
2021-10-07 17:33:13 -05:00
|
|
|
|
|
|
|
// LLVM CFI requires LTO.
|
|
|
|
if sess.is_sanitizer_cfi_enabled() {
|
|
|
|
if sess.opts.cg.lto == config::LtoCli::Unspecified
|
|
|
|
|| sess.opts.cg.lto == config::LtoCli::No
|
|
|
|
|| sess.opts.cg.lto == config::LtoCli::Thin
|
|
|
|
{
|
|
|
|
sess.err("`-Zsanitizer=cfi` requires `-Clto`");
|
|
|
|
}
|
|
|
|
}
|
add rustc option for using LLVM stack smash protection
LLVM has built-in heuristics for adding stack canaries to functions. These
heuristics can be selected with LLVM function attributes. This patch adds a
rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use
of these attributes. This gives rustc the same stack smash protection support as
clang offers through options `-fno-stack-protector`, `-fstack-protector`,
`-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can
offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the
current list of rustc exploit
mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html),
originally discussed in #15179.
Stack smash protection adds runtime overhead and is therefore still off by
default, but now users have the option to trade performance for security as they
see fit. An example use case is adding Rust code in an existing C/C++ code base
compiled with stack smash protection. Without the ability to add stack smash
protection to the Rust code, the code base artifacts could be exploitable in
ways not possible if the code base remained pure C/C++.
Stack smash protection support is present in LLVM for almost all the current
tier 1/tier 2 targets: see
test/assembly/stack-protector/stack-protector-target-support.rs. The one
exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a
warning message printed if stack smash protection is used with this target (see
test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3
targets has not been checked.
Since the heuristics are applied at the LLVM level, the heuristics are expected
to add stack smash protection to a fraction of functions comparable to C/C++.
Some experiments demonstrating how Rust code is affected by the different
heuristics can be found in
test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is
potential for better heuristics using Rust-specific safety information. For
example it might be reasonable to skip stack smash protection in functions which
transitively only use safe Rust code, or which uses only a subset of functions
the user declares safe (such as anything under `std.*`). Such alternative
heuristics could be added at a later point.
LLVM also offers a "safestack" sanitizer as an alternative way to guard against
stack smashing (see #26612). This could possibly also be included as a
stack-protection heuristic. An alternative is to add it as a sanitizer (#39699).
This is what clang does: safestack is exposed with option
`-fsanitize=safe-stack`.
The options are only supported by the LLVM backend, but as with other codegen
options it is visible in the main codegen option help menu. The heuristic names
"basic", "strong", and "all" are hopefully sufficiently generic to be usable in
other backends as well.
Reviewed-by: Nikita Popov <nikic@php.net>
Extra commits during review:
- [address-review] make the stack-protector option unstable
- [address-review] reduce detail level of stack-protector option help text
- [address-review] correct grammar in comment
- [address-review] use compiler flag to avoid merging functions in test
- [address-review] specify min LLVM version in fortanix stack-protector test
Only for Fortanix test, since this target specifically requests the
`--x86-experimental-lvi-inline-asm-hardening` flag.
- [address-review] specify required LLVM components in stack-protector tests
- move stack protector option enum closer to other similar option enums
- rustc_interface/tests: sort debug option list in tracking hash test
- add an explicit `none` stack-protector option
Revert "set LLVM requirements for all stack protector support test revisions"
This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 14:37:49 -05:00
|
|
|
|
|
|
|
if sess.opts.debugging_opts.stack_protector != StackProtector::None {
|
|
|
|
if !sess.target.options.supports_stack_protector {
|
|
|
|
sess.warn(&format!(
|
|
|
|
"`-Z stack-protector={}` is not supported for target {} and will be ignored",
|
|
|
|
sess.opts.debugging_opts.stack_protector, sess.opts.target_triple
|
|
|
|
))
|
|
|
|
}
|
|
|
|
}
|
2018-08-02 07:26:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-11 18:02:39 -05:00
|
|
|
/// Holds data on the current incremental compilation session, if there is one.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum IncrCompSession {
|
2017-10-10 09:12:11 -05:00
|
|
|
/// This is the state the session will be in until the incr. comp. dir is
|
|
|
|
/// needed.
|
2016-08-11 18:02:39 -05:00
|
|
|
NotInitialized,
|
2017-10-10 09:12:11 -05:00
|
|
|
/// This is the state during which the session directory is private and can
|
|
|
|
/// be modified.
|
2019-12-22 16:42:04 -06:00
|
|
|
Active { session_directory: PathBuf, lock_file: flock::Lock, load_dep_graph: bool },
|
2017-10-10 09:12:11 -05:00
|
|
|
/// This is the state after the session directory has been finalized. In this
|
|
|
|
/// state, the contents of the directory must not be modified any more.
|
2018-03-05 23:29:03 -06:00
|
|
|
Finalized { session_directory: PathBuf },
|
2017-10-10 09:12:11 -05:00
|
|
|
/// This is an error state that is reached when some compilation error has
|
|
|
|
/// occurred. It indicates that the contents of the session directory must
|
|
|
|
/// not be used, since they might be invalid.
|
2018-03-05 23:29:03 -06:00
|
|
|
InvalidBecauseOfErrors { session_directory: PathBuf },
|
2016-08-11 18:02:39 -05:00
|
|
|
}
|
|
|
|
|
2021-03-28 11:36:53 -05:00
|
|
|
pub fn early_error_no_abort(output: config::ErrorOutputType, msg: &str) {
|
2018-03-02 23:20:26 -06:00
|
|
|
let emitter: Box<dyn Emitter + sync::Send> = match output {
|
2019-03-25 05:16:58 -05:00
|
|
|
config::ErrorOutputType::HumanReadable(kind) => {
|
|
|
|
let (short, color_config) = kind.unzip();
|
2019-09-07 08:57:11 -05:00
|
|
|
Box::new(EmitterWriter::stderr(color_config, None, short, false, None, false))
|
2017-09-16 12:24:08 -05:00
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
config::ErrorOutputType::Json { pretty, json_rendered } => {
|
2020-06-26 07:18:16 -05:00
|
|
|
Box::new(JsonEmitter::basic(pretty, json_rendered, None, false))
|
2019-12-22 16:42:04 -06:00
|
|
|
}
|
2015-12-30 21:50:06 -06:00
|
|
|
};
|
2019-11-29 15:05:28 -06:00
|
|
|
let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
|
2019-09-07 10:21:17 -05:00
|
|
|
handler.struct_fatal(msg).emit();
|
2021-03-28 11:36:53 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! {
|
|
|
|
early_error_no_abort(output, msg);
|
2019-11-29 15:05:28 -06:00
|
|
|
rustc_errors::FatalError.raise();
|
2014-11-15 19:30:33 -06:00
|
|
|
}
|
|
|
|
|
2015-12-30 21:50:06 -06:00
|
|
|
pub fn early_warn(output: config::ErrorOutputType, msg: &str) {
|
2018-03-02 23:20:26 -06:00
|
|
|
let emitter: Box<dyn Emitter + sync::Send> = match output {
|
2019-03-25 05:16:58 -05:00
|
|
|
config::ErrorOutputType::HumanReadable(kind) => {
|
|
|
|
let (short, color_config) = kind.unzip();
|
2019-09-07 08:57:11 -05:00
|
|
|
Box::new(EmitterWriter::stderr(color_config, None, short, false, None, false))
|
2017-09-16 12:24:08 -05:00
|
|
|
}
|
2019-12-22 16:42:04 -06:00
|
|
|
config::ErrorOutputType::Json { pretty, json_rendered } => {
|
2020-06-26 07:18:16 -05:00
|
|
|
Box::new(JsonEmitter::basic(pretty, json_rendered, None, false))
|
2019-12-22 16:42:04 -06:00
|
|
|
}
|
2015-12-30 21:50:06 -06:00
|
|
|
};
|
2019-11-29 15:05:28 -06:00
|
|
|
let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
|
2019-09-07 10:21:17 -05:00
|
|
|
handler.struct_warn(msg).emit();
|
2014-11-15 19:30:33 -06:00
|
|
|
}
|