From 975eb312eff3f8e3453e1836995e485b5086515d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Mon, 28 Jan 2019 15:51:47 +0100 Subject: [PATCH] Use multiple threads by default. Limits tests to one thread. Do some renaming. --- Cargo.lock | 1 + config.toml.example | 2 +- src/bootstrap/bin/rustc.rs | 4 +-- src/bootstrap/compile.rs | 4 +-- src/bootstrap/config.rs | 6 ++-- src/librustc/Cargo.toml | 1 + src/librustc/dep_graph/graph.rs | 8 ++--- src/librustc/lib.rs | 1 + src/librustc/session/config.rs | 12 ++++---- src/librustc/session/mod.rs | 12 ++++---- src/librustc/ty/context.rs | 14 ++++----- src/librustc/ty/query/job.rs | 42 +++++++++++++------------- src/librustc/ty/query/mod.rs | 2 +- src/librustc/ty/query/plumbing.rs | 8 ++--- src/librustc_data_structures/sync.rs | 44 ++++++++++++++-------------- src/librustc_driver/driver.rs | 6 ++-- src/libsyntax_pos/lib.rs | 4 +-- src/libsyntax_pos/symbol.rs | 4 +-- src/tools/compiletest/src/runtest.rs | 3 ++ 19 files changed, 92 insertions(+), 86 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c34a325a474..18f05658a17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2234,6 +2234,7 @@ dependencies = [ "jobserver 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "polonius-engine 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/config.toml.example b/config.toml.example index 24293fc864c..183d61a43e3 100644 --- a/config.toml.example +++ b/config.toml.example @@ -318,7 +318,7 @@ #incremental = false # Build rustc with experimental parallelization -#experimental-parallel-queries = false +#parallel-compiler = false # The default linker that will be hard-coded into the generated compiler for # targets that don't specify linker explicitly in their target specifications. diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index a0c75cd9e94..7a765973e20 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -284,8 +284,8 @@ fn main() { } } - if env::var_os("RUSTC_PARALLEL_QUERIES").is_some() { - cmd.arg("--cfg").arg("parallel_queries"); + if env::var_os("RUSTC_PARALLEL_COMPILER").is_some() { + cmd.arg("--cfg").arg("parallel_compiler"); } if env::var_os("RUSTC_DENY_WARNINGS").is_some() && env::var_os("RUSTC_EXTERNAL_TOOL").is_none() diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs index ec04dee6c32..ddae3cb0d60 100644 --- a/src/bootstrap/compile.rs +++ b/src/bootstrap/compile.rs @@ -554,8 +554,8 @@ pub fn rustc_cargo_env(builder: &Builder, cargo: &mut Command) { if let Some(ref s) = builder.config.rustc_default_linker { cargo.env("CFG_DEFAULT_LINKER", s); } - if builder.config.rustc_parallel_queries { - cargo.env("RUSTC_PARALLEL_QUERIES", "1"); + if builder.config.rustc_parallel { + cargo.env("RUSTC_PARALLEL_COMPILER", "1"); } if builder.config.rust_verify_llvm_ir { cargo.env("RUSTC_VERIFY_LLVM_IR", "1"); diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index a2989f0cffa..ba339c50fc3 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -97,7 +97,7 @@ pub struct Config { pub rust_debuginfo_only_std: bool, pub rust_debuginfo_tools: bool, pub rust_rpath: bool, - pub rustc_parallel_queries: bool, + pub rustc_parallel: bool, pub rustc_default_linker: Option, pub rust_optimize_tests: bool, pub rust_debuginfo_tests: bool, @@ -298,7 +298,7 @@ struct Rust { debuginfo_lines: Option, debuginfo_only_std: Option, debuginfo_tools: Option, - experimental_parallel_queries: Option, + parallel_compiler: Option, backtrace: Option, default_linker: Option, channel: Option, @@ -557,7 +557,7 @@ pub fn parse(args: &[String]) -> Config { set(&mut config.lld_enabled, rust.lld); set(&mut config.lldb_enabled, rust.lldb); set(&mut config.llvm_tools_enabled, rust.llvm_tools); - config.rustc_parallel_queries = rust.experimental_parallel_queries.unwrap_or(false); + config.rustc_parallel = rust.parallel_compiler.unwrap_or(false); config.rustc_default_linker = rust.default_linker.clone(); config.musl_root = rust.musl_root.clone().map(PathBuf::from); config.save_toolstates = rust.save_toolstates.clone().map(PathBuf::from); diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 504e016e5b8..a5521effc7d 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -15,6 +15,7 @@ fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } jobserver = "0.1" lazy_static = "1.0.0" +num_cpus = "1.0" scoped-tls = { version = "0.1.1", features = ["nightly"] } log = { version = "0.4", features = ["release_max_level_info", "std"] } polonius-engine = "0.6.2" diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index 63857d6c918..c9353a451e2 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -580,7 +580,7 @@ fn try_mark_previous_green<'tcx>( ) -> Option { debug!("try_mark_previous_green({:?}) - BEGIN", dep_node); - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] { debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node)); debug_assert!(data.colors.get(prev_dep_node_index).is_none()); @@ -743,7 +743,7 @@ fn try_mark_previous_green<'tcx>( // ... and finally storing a "Green" entry in the color map. // Multiple threads can all write the same color here - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] debug_assert!(data.colors.get(prev_dep_node_index).is_none(), "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ insertion for {:?}", dep_node); @@ -766,7 +766,7 @@ fn emit_diagnostics<'tcx>( did_allocation: bool, diagnostics: Vec, ) { - if did_allocation || !cfg!(parallel_queries) { + if did_allocation || !cfg!(parallel_compiler) { // Only the thread which did the allocation emits the error messages let handle = tcx.sess.diagnostic(); @@ -778,7 +778,7 @@ fn emit_diagnostics<'tcx>( DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit(); } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] { // Mark the diagnostics and emitted and wake up waiters data.emitted_diagnostics.lock().insert(dep_node_index); diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 7ecec0b9a69..f886e50246a 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -70,6 +70,7 @@ extern crate fmt_macros; extern crate getopts; extern crate graphviz; +extern crate num_cpus; #[macro_use] extern crate lazy_static; #[macro_use] extern crate scoped_tls; #[cfg(windows)] diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index e2f4049b1db..4b1aefb2216 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1194,8 +1194,8 @@ fn parse_merge_functions(slot: &mut Option, v: Option<&str>) -> "prints the llvm optimization passes being run"), ast_json: bool = (false, parse_bool, [UNTRACKED], "print the AST as JSON and halt"), - query_threads: Option = (None, parse_opt_uint, [UNTRACKED], - "execute queries on a thread pool with N threads"), + threads: Option = (None, parse_opt_uint, [UNTRACKED], + "use a thread pool with N threads"), ast_json_noexpand: bool = (false, parse_bool, [UNTRACKED], "print the pre-expansion AST as JSON and halt"), ls: bool = (false, parse_bool, [UNTRACKED], @@ -1986,17 +1986,17 @@ pub fn build_session_options_and_crate_config( } } - if debugging_opts.query_threads == Some(0) { + if debugging_opts.threads == Some(0) { early_error( error_format, - "Value for query threads must be a positive nonzero integer", + "Value for threads must be a positive nonzero integer", ); } - if debugging_opts.query_threads.unwrap_or(1) > 1 && debugging_opts.fuel.is_some() { + if debugging_opts.threads.unwrap_or(1) > 1 && debugging_opts.fuel.is_some() { early_error( error_format, - "Optimization fuel is incompatible with multiple query threads", + "Optimization fuel is incompatible with multiple threads", ); } diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index 875021e20d4..c5034415d6f 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -877,7 +877,7 @@ pub fn consider_optimizing String>(&self, crate_name: &str, msg: T) - let mut ret = true; if let Some(ref c) = self.optimization_fuel_crate { if c == crate_name { - assert_eq!(self.query_threads(), 1); + assert_eq!(self.threads(), 1); let mut fuel = self.optimization_fuel.lock(); ret = fuel.remaining != 0; if fuel.remaining == 0 && !fuel.out_of_fuel { @@ -890,7 +890,7 @@ pub fn consider_optimizing String>(&self, crate_name: &str, msg: T) - } if let Some(ref c) = self.print_fuel_crate { if c == crate_name { - assert_eq!(self.query_threads(), 1); + assert_eq!(self.threads(), 1); self.print_fuel.fetch_add(1, SeqCst); } } @@ -899,14 +899,14 @@ pub fn consider_optimizing String>(&self, crate_name: &str, msg: T) - /// Returns the number of query threads that should be used for this /// compilation - pub fn query_threads_from_opts(opts: &config::Options) -> usize { - opts.debugging_opts.query_threads.unwrap_or(1) + pub fn threads_from_opts(opts: &config::Options) -> usize { + opts.debugging_opts.threads.unwrap_or(::num_cpus::get()) } /// Returns the number of query threads that should be used for this /// compilation - pub fn query_threads(&self) -> usize { - Self::query_threads_from_opts(&self.opts) + pub fn threads(&self) -> usize { + Self::threads_from_opts(&self.opts) } /// Returns the number of codegen units that should be used for this diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index a0da7cf2137..881c0d4e6d2 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1823,10 +1823,10 @@ pub mod tls { use rustc_data_structures::thin_vec::ThinVec; use dep_graph::TaskDeps; - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] use std::cell::Cell; - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] use rayon_core; /// This is the implicit state of rustc. It contains the current @@ -1859,7 +1859,7 @@ pub struct ImplicitCtxt<'a, 'gcx: 'tcx, 'tcx> { /// Sets Rayon's thread local variable which is preserved for Rayon jobs /// to `value` during the call to `f`. It is restored to its previous value after. /// This is used to set the pointer to the new ImplicitCtxt. - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline] fn set_tlv R, R>(value: usize, f: F) -> R { rayon_core::tlv::with(value, f) @@ -1867,20 +1867,20 @@ fn set_tlv R, R>(value: usize, f: F) -> R { /// Gets Rayon's thread local variable which is preserved for Rayon jobs. /// This is used to get the pointer to the current ImplicitCtxt. - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline] fn get_tlv() -> usize { rayon_core::tlv::get() } /// A thread local variable which stores a pointer to the current ImplicitCtxt - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] thread_local!(static TLV: Cell = Cell::new(0)); /// Sets TLV to `value` during the call to `f`. /// It is restored to its previous value after. /// This is used to set the pointer to the new ImplicitCtxt. - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline] fn set_tlv R, R>(value: usize, f: F) -> R { let old = get_tlv(); @@ -1890,7 +1890,7 @@ fn set_tlv R, R>(value: usize, f: F) -> R { } /// This is used to get the pointer to the current ImplicitCtxt. - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] fn get_tlv() -> usize { TLV.with(|tlv| tlv.get()) } diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs index d794429a8a7..abbf74a7761 100644 --- a/src/librustc/ty/query/job.rs +++ b/src/librustc/ty/query/job.rs @@ -8,7 +8,7 @@ use ty::tls; use ty::query::Query; use ty::query::plumbing::CycleError; -#[cfg(not(parallel_queries))] +#[cfg(not(parallel_compiler))] use ty::query::{ plumbing::TryGetJob, config::QueryDescription, @@ -17,7 +17,7 @@ use std::process; use std::{fmt, ptr}; -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] use { rayon_core, parking_lot::{Mutex, Condvar}, @@ -54,7 +54,7 @@ pub struct QueryJob<'tcx> { pub parent: Option>>, /// The latch which is used to wait on this job - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] latch: QueryLatch<'tcx>, } @@ -64,7 +64,7 @@ pub fn new(info: QueryInfo<'tcx>, parent: Option>>) -> Self { QueryJob { info, parent, - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] latch: QueryLatch::new(), } } @@ -73,7 +73,7 @@ pub fn new(info: QueryInfo<'tcx>, parent: Option>>) -> Self { /// /// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any /// query that means that there is a query cycle, thus this always running a cycle error. - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(never)] #[cold] pub(super) fn cycle_error<'lcx, 'a, D: QueryDescription<'tcx>>( @@ -88,7 +88,7 @@ pub(super) fn cycle_error<'lcx, 'a, D: QueryDescription<'tcx>>( /// /// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any /// query that means that there is a query cycle, thus this always running a cycle error. - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] pub(super) fn await<'lcx>( &self, tcx: TyCtxt<'_, 'tcx, 'lcx>, @@ -113,7 +113,7 @@ pub(super) fn await<'lcx>( }) } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] fn find_cycle_in_stack<'lcx>( &self, tcx: TyCtxt<'_, 'tcx, 'lcx>, @@ -152,7 +152,7 @@ fn find_cycle_in_stack<'lcx>( /// This does nothing for single threaded rustc, /// as there are no concurrent jobs which could be waiting on us pub fn signal_complete(&self) { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] self.latch.set(); } @@ -161,7 +161,7 @@ fn as_ptr(&self) -> *const QueryJob<'tcx> { } } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] struct QueryWaiter<'tcx> { query: Option>>, condvar: Condvar, @@ -169,7 +169,7 @@ struct QueryWaiter<'tcx> { cycle: Lock>>, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] impl<'tcx> QueryWaiter<'tcx> { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); @@ -177,18 +177,18 @@ fn notify(&self, registry: &rayon_core::Registry) { } } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] struct QueryLatchInfo<'tcx> { complete: bool, waiters: Vec>>, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] struct QueryLatch<'tcx> { info: Mutex>, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] impl<'tcx> QueryLatch<'tcx> { fn new() -> Self { QueryLatch { @@ -242,7 +242,7 @@ fn extract_waiter( } /// A resumable waiter of a query. The usize is the index into waiters in the query's latch -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] type Waiter<'tcx> = (Lrc>, usize); /// Visits all the non-resumable and resumable waiters of a query. @@ -254,7 +254,7 @@ fn extract_waiter( /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn visit_waiters<'tcx, F>(query: Lrc>, mut visit: F) -> Option>> where F: FnMut(Span, Lrc>) -> Option>> @@ -282,7 +282,7 @@ fn visit_waiters<'tcx, F>(query: Lrc>, mut visit: F) -> Option(query: Lrc>, span: Span, stack: &mut Vec<(Span, Lrc>)>, @@ -321,7 +321,7 @@ fn cycle_check<'tcx>(query: Lrc>, /// Finds out if there's a path to the compiler root (aka. code which isn't in a query) /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn connected_to_root<'tcx>( query: Lrc>, visited: &mut FxHashSet<*const QueryJob<'tcx>> @@ -346,7 +346,7 @@ fn connected_to_root<'tcx>( } // Deterministically pick an query from a list -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc>)>( tcx: TyCtxt<'_, 'tcx, '_>, queries: &'a [T], @@ -372,7 +372,7 @@ fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc>)>( /// the function return true. /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn remove_cycle<'tcx>( jobs: &mut Vec>>, wakelist: &mut Vec>>, @@ -475,7 +475,7 @@ fn remove_cycle<'tcx>( /// Creates a new thread and forwards information in thread locals to it. /// The new thread runs the deadlock handler. /// Must only be called when a deadlock is about to happen. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] pub unsafe fn handle_deadlock() { use syntax; use syntax_pos; @@ -514,7 +514,7 @@ pub unsafe fn handle_deadlock() { /// uses a query latch and then resuming that waiter. /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { let on_panic = OnDrop(|| { eprintln!("deadlock handler panicked, aborting process"); diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs index ec1103b0ae5..195bec11ee5 100644 --- a/src/librustc/ty/query/mod.rs +++ b/src/librustc/ty/query/mod.rs @@ -69,7 +69,7 @@ mod job; pub use self::job::{QueryJob, QueryInfo}; -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] pub use self::job::handle_deadlock; mod keys; diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 541f5b75aa5..e777c883c37 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -153,12 +153,12 @@ pub(super) fn try_get( // If we are single-threaded we know that we have cycle error, // so we just turn the errror - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] return job.cycle_error(tcx, span); // With parallel queries we might just have to wait on some other // thread - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] { if let Err(cycle) = job.await(tcx, span) { return TryGetJob::JobCompleted(Err(cycle)); @@ -695,7 +695,7 @@ macro_rules! define_queries_inner { [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { use std::mem; - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] use ty::query::job::QueryResult; use rustc_data_structures::sync::Lock; use { @@ -736,7 +736,7 @@ pub fn record_computed_queries(&self, sess: &Session) { }); } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] pub fn collect_active_jobs(&self) -> Vec>> { let mut jobs = Vec::new(); diff --git a/src/librustc_data_structures/sync.rs b/src/librustc_data_structures/sync.rs index 0253eef4dfa..cae3087fe58 100644 --- a/src/librustc_data_structures/sync.rs +++ b/src/librustc_data_structures/sync.rs @@ -1,21 +1,21 @@ -//! This module defines types which are thread safe if cfg!(parallel_queries) is true. +//! This module defines types which are thread safe if cfg!(parallel_compiler) is true. //! //! `Lrc` is an alias of either Rc or Arc. //! //! `Lock` is a mutex. -//! It internally uses `parking_lot::Mutex` if cfg!(parallel_queries) is true, +//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true, //! `RefCell` otherwise. //! //! `RwLock` is a read-write lock. -//! It internally uses `parking_lot::RwLock` if cfg!(parallel_queries) is true, +//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true, //! `RefCell` otherwise. //! -//! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false. +//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false. //! -//! `MTRef` is a immutable reference if cfg!(parallel_queries), and an mutable reference otherwise. +//! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise. //! //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync -//! depending on the value of cfg!(parallel_queries). +//! depending on the value of cfg!(parallel_compiler). use std::collections::HashMap; use std::hash::{Hash, BuildHasher}; @@ -50,7 +50,7 @@ pub fn serial_scope(f: F) -> R pub use std::sync::atomic::Ordering; cfg_if! { - if #[cfg(not(parallel_queries))] { + if #[cfg(not(parallel_compiler))] { pub auto trait Send {} pub auto trait Sync {} @@ -461,19 +461,19 @@ pub fn get_mut(&mut self) -> &mut T { self.0.get_mut() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] pub fn try_lock(&self) -> Option> { self.0.try_lock() } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] pub fn try_lock(&self) -> Option> { self.0.try_borrow_mut().ok() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] pub fn lock(&self) -> LockGuard { if ERROR_CHECKING { @@ -483,7 +483,7 @@ pub fn lock(&self) -> LockGuard { } } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] pub fn lock(&self) -> LockGuard { self.0.borrow_mut() @@ -539,13 +539,13 @@ pub fn get_mut(&mut self) -> &mut T { self.0.get_mut() } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] pub fn read(&self) -> ReadGuard { self.0.borrow() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] pub fn read(&self) -> ReadGuard { if ERROR_CHECKING { @@ -560,25 +560,25 @@ pub fn with_read_lock R, R>(&self, f: F) -> R { f(&*self.read()) } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] pub fn try_write(&self) -> Result, ()> { self.0.try_borrow_mut().map_err(|_| ()) } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] pub fn try_write(&self) -> Result, ()> { self.0.try_write().ok_or(()) } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] pub fn write(&self) -> WriteGuard { self.0.borrow_mut() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] pub fn write(&self) -> WriteGuard { if ERROR_CHECKING { @@ -616,27 +616,27 @@ fn clone(&self) -> Self { /// It will panic if it is used on multiple threads. #[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)] pub struct OneThread { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] thread: thread::ThreadId, inner: T, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] unsafe impl std::marker::Sync for OneThread {} -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] unsafe impl std::marker::Send for OneThread {} impl OneThread { #[inline(always)] fn check(&self) { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] assert_eq!(thread::current().id(), self.thread); } #[inline(always)] pub fn new(inner: T) -> Self { OneThread { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] thread: thread::current().id(), inner, } diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index f5e5f1f5c0b..c586c705676 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -56,7 +56,7 @@ use profile; use super::Compilation; -#[cfg(not(parallel_queries))] +#[cfg(not(parallel_compiler))] pub fn spawn_thread_pool R + sync::Send, R: sync::Send>( opts: config::Options, f: F @@ -66,7 +66,7 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: }) } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] pub fn spawn_thread_pool R + sync::Send, R: sync::Send>( opts: config::Options, f: F @@ -78,7 +78,7 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: let gcx_ptr = &Lock::new(0); let config = ThreadPoolBuilder::new() - .num_threads(Session::query_threads_from_opts(&opts)) + .num_threads(Session::threads_from_opts(&opts)) .deadlock_handler(|| unsafe { ty::query::handle_deadlock() }) .stack_size(::STACK_SIZE); diff --git a/src/libsyntax_pos/lib.rs b/src/libsyntax_pos/lib.rs index d9d7f9b0cb4..2a857792396 100644 --- a/src/libsyntax_pos/lib.rs +++ b/src/libsyntax_pos/lib.rs @@ -238,9 +238,9 @@ pub fn with_ctxt(&self, ctxt: SyntaxContext) -> Span { // The interner is pointed to by a thread local value which is only set on the main thread // with parallelization is disabled. So we don't allow `Span` to transfer between threads // to avoid panics and other errors, even though it would be memory safe to do so. -#[cfg(not(parallel_queries))] +#[cfg(not(parallel_compiler))] impl !Send for Span {} -#[cfg(not(parallel_queries))] +#[cfg(not(parallel_compiler))] impl !Sync for Span {} impl PartialOrd for Span { diff --git a/src/libsyntax_pos/symbol.rs b/src/libsyntax_pos/symbol.rs index e741b79bd4c..7097f332b8b 100644 --- a/src/libsyntax_pos/symbol.rs +++ b/src/libsyntax_pos/symbol.rs @@ -150,9 +150,9 @@ pub struct SymbolIndex { .. } // The interner is pointed to by a thread local value which is only set on the main thread // with parallelization is disabled. So we don't allow `Symbol` to transfer between threads // to avoid panics and other errors, even though it would be memory safe to do so. -#[cfg(not(parallel_queries))] +#[cfg(not(parallel_compiler))] impl !Send for Symbol { } -#[cfg(not(parallel_queries))] +#[cfg(not(parallel_compiler))] impl !Sync for Symbol { } impl Symbol { diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs index fc4f7654bd1..3581c4cfbcb 100644 --- a/src/tools/compiletest/src/runtest.rs +++ b/src/tools/compiletest/src/runtest.rs @@ -1727,6 +1727,9 @@ fn make_compile_args(&self, input_file: &Path, output_file: TargetLocation) -> C // FIXME Why is -L here? rustc.arg(input_file); //.arg("-L").arg(&self.config.build_base); + // Use a single thread for efficiency and a deterministic error message order + rustc.arg("-Zthreads=1"); + // Optionally prevent default --target if specified in test compile-flags. let custom_target = self .props