Use multiple threads by default. Limits tests to one thread. Do some renaming.

This commit is contained in:
John Kåre Alsaker 2019-01-28 15:51:47 +01:00
parent a21bd75688
commit 975eb312ef
19 changed files with 92 additions and 86 deletions

View File

@ -2234,6 +2234,7 @@ dependencies = [
"jobserver 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"polonius-engine 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -318,7 +318,7 @@
#incremental = false
# Build rustc with experimental parallelization
#experimental-parallel-queries = false
#parallel-compiler = false
# The default linker that will be hard-coded into the generated compiler for
# targets that don't specify linker explicitly in their target specifications.

View File

@ -284,8 +284,8 @@ fn main() {
}
}
if env::var_os("RUSTC_PARALLEL_QUERIES").is_some() {
cmd.arg("--cfg").arg("parallel_queries");
if env::var_os("RUSTC_PARALLEL_COMPILER").is_some() {
cmd.arg("--cfg").arg("parallel_compiler");
}
if env::var_os("RUSTC_DENY_WARNINGS").is_some() && env::var_os("RUSTC_EXTERNAL_TOOL").is_none()

View File

@ -554,8 +554,8 @@ pub fn rustc_cargo_env(builder: &Builder, cargo: &mut Command) {
if let Some(ref s) = builder.config.rustc_default_linker {
cargo.env("CFG_DEFAULT_LINKER", s);
}
if builder.config.rustc_parallel_queries {
cargo.env("RUSTC_PARALLEL_QUERIES", "1");
if builder.config.rustc_parallel {
cargo.env("RUSTC_PARALLEL_COMPILER", "1");
}
if builder.config.rust_verify_llvm_ir {
cargo.env("RUSTC_VERIFY_LLVM_IR", "1");

View File

@ -97,7 +97,7 @@ pub struct Config {
pub rust_debuginfo_only_std: bool,
pub rust_debuginfo_tools: bool,
pub rust_rpath: bool,
pub rustc_parallel_queries: bool,
pub rustc_parallel: bool,
pub rustc_default_linker: Option<String>,
pub rust_optimize_tests: bool,
pub rust_debuginfo_tests: bool,
@ -298,7 +298,7 @@ struct Rust {
debuginfo_lines: Option<bool>,
debuginfo_only_std: Option<bool>,
debuginfo_tools: Option<bool>,
experimental_parallel_queries: Option<bool>,
parallel_compiler: Option<bool>,
backtrace: Option<bool>,
default_linker: Option<String>,
channel: Option<String>,
@ -557,7 +557,7 @@ pub fn parse(args: &[String]) -> Config {
set(&mut config.lld_enabled, rust.lld);
set(&mut config.lldb_enabled, rust.lldb);
set(&mut config.llvm_tools_enabled, rust.llvm_tools);
config.rustc_parallel_queries = rust.experimental_parallel_queries.unwrap_or(false);
config.rustc_parallel = rust.parallel_compiler.unwrap_or(false);
config.rustc_default_linker = rust.default_linker.clone();
config.musl_root = rust.musl_root.clone().map(PathBuf::from);
config.save_toolstates = rust.save_toolstates.clone().map(PathBuf::from);

View File

@ -15,6 +15,7 @@ fmt_macros = { path = "../libfmt_macros" }
graphviz = { path = "../libgraphviz" }
jobserver = "0.1"
lazy_static = "1.0.0"
num_cpus = "1.0"
scoped-tls = { version = "0.1.1", features = ["nightly"] }
log = { version = "0.4", features = ["release_max_level_info", "std"] }
polonius-engine = "0.6.2"

View File

@ -580,7 +580,7 @@ fn try_mark_previous_green<'tcx>(
) -> Option<DepNodeIndex> {
debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
{
debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node));
debug_assert!(data.colors.get(prev_dep_node_index).is_none());
@ -743,7 +743,7 @@ fn try_mark_previous_green<'tcx>(
// ... and finally storing a "Green" entry in the color map.
// Multiple threads can all write the same color here
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
insertion for {:?}", dep_node);
@ -766,7 +766,7 @@ fn emit_diagnostics<'tcx>(
did_allocation: bool,
diagnostics: Vec<Diagnostic>,
) {
if did_allocation || !cfg!(parallel_queries) {
if did_allocation || !cfg!(parallel_compiler) {
// Only the thread which did the allocation emits the error messages
let handle = tcx.sess.diagnostic();
@ -778,7 +778,7 @@ fn emit_diagnostics<'tcx>(
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
{
// Mark the diagnostics and emitted and wake up waiters
data.emitted_diagnostics.lock().insert(dep_node_index);

View File

@ -70,6 +70,7 @@
extern crate fmt_macros;
extern crate getopts;
extern crate graphviz;
extern crate num_cpus;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate scoped_tls;
#[cfg(windows)]

View File

@ -1194,8 +1194,8 @@ fn parse_merge_functions(slot: &mut Option<MergeFunctions>, v: Option<&str>) ->
"prints the llvm optimization passes being run"),
ast_json: bool = (false, parse_bool, [UNTRACKED],
"print the AST as JSON and halt"),
query_threads: Option<usize> = (None, parse_opt_uint, [UNTRACKED],
"execute queries on a thread pool with N threads"),
threads: Option<usize> = (None, parse_opt_uint, [UNTRACKED],
"use a thread pool with N threads"),
ast_json_noexpand: bool = (false, parse_bool, [UNTRACKED],
"print the pre-expansion AST as JSON and halt"),
ls: bool = (false, parse_bool, [UNTRACKED],
@ -1986,17 +1986,17 @@ pub fn build_session_options_and_crate_config(
}
}
if debugging_opts.query_threads == Some(0) {
if debugging_opts.threads == Some(0) {
early_error(
error_format,
"Value for query threads must be a positive nonzero integer",
"Value for threads must be a positive nonzero integer",
);
}
if debugging_opts.query_threads.unwrap_or(1) > 1 && debugging_opts.fuel.is_some() {
if debugging_opts.threads.unwrap_or(1) > 1 && debugging_opts.fuel.is_some() {
early_error(
error_format,
"Optimization fuel is incompatible with multiple query threads",
"Optimization fuel is incompatible with multiple threads",
);
}

View File

@ -877,7 +877,7 @@ pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -
let mut ret = true;
if let Some(ref c) = self.optimization_fuel_crate {
if c == crate_name {
assert_eq!(self.query_threads(), 1);
assert_eq!(self.threads(), 1);
let mut fuel = self.optimization_fuel.lock();
ret = fuel.remaining != 0;
if fuel.remaining == 0 && !fuel.out_of_fuel {
@ -890,7 +890,7 @@ pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -
}
if let Some(ref c) = self.print_fuel_crate {
if c == crate_name {
assert_eq!(self.query_threads(), 1);
assert_eq!(self.threads(), 1);
self.print_fuel.fetch_add(1, SeqCst);
}
}
@ -899,14 +899,14 @@ pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -
/// Returns the number of query threads that should be used for this
/// compilation
pub fn query_threads_from_opts(opts: &config::Options) -> usize {
opts.debugging_opts.query_threads.unwrap_or(1)
pub fn threads_from_opts(opts: &config::Options) -> usize {
opts.debugging_opts.threads.unwrap_or(::num_cpus::get())
}
/// Returns the number of query threads that should be used for this
/// compilation
pub fn query_threads(&self) -> usize {
Self::query_threads_from_opts(&self.opts)
pub fn threads(&self) -> usize {
Self::threads_from_opts(&self.opts)
}
/// Returns the number of codegen units that should be used for this

View File

@ -1823,10 +1823,10 @@ pub mod tls {
use rustc_data_structures::thin_vec::ThinVec;
use dep_graph::TaskDeps;
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
use std::cell::Cell;
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
use rayon_core;
/// This is the implicit state of rustc. It contains the current
@ -1859,7 +1859,7 @@ pub struct ImplicitCtxt<'a, 'gcx: 'tcx, 'tcx> {
/// Sets Rayon's thread local variable which is preserved for Rayon jobs
/// to `value` during the call to `f`. It is restored to its previous value after.
/// This is used to set the pointer to the new ImplicitCtxt.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
#[inline]
fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
rayon_core::tlv::with(value, f)
@ -1867,20 +1867,20 @@ fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
/// Gets Rayon's thread local variable which is preserved for Rayon jobs.
/// This is used to get the pointer to the current ImplicitCtxt.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
#[inline]
fn get_tlv() -> usize {
rayon_core::tlv::get()
}
/// A thread local variable which stores a pointer to the current ImplicitCtxt
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
thread_local!(static TLV: Cell<usize> = Cell::new(0));
/// Sets TLV to `value` during the call to `f`.
/// It is restored to its previous value after.
/// This is used to set the pointer to the new ImplicitCtxt.
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
#[inline]
fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
let old = get_tlv();
@ -1890,7 +1890,7 @@ fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
}
/// This is used to get the pointer to the current ImplicitCtxt.
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
fn get_tlv() -> usize {
TLV.with(|tlv| tlv.get())
}

View File

@ -8,7 +8,7 @@
use ty::tls;
use ty::query::Query;
use ty::query::plumbing::CycleError;
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
use ty::query::{
plumbing::TryGetJob,
config::QueryDescription,
@ -17,7 +17,7 @@
use std::process;
use std::{fmt, ptr};
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
use {
rayon_core,
parking_lot::{Mutex, Condvar},
@ -54,7 +54,7 @@ pub struct QueryJob<'tcx> {
pub parent: Option<Lrc<QueryJob<'tcx>>>,
/// The latch which is used to wait on this job
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
latch: QueryLatch<'tcx>,
}
@ -64,7 +64,7 @@ pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
QueryJob {
info,
parent,
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
latch: QueryLatch::new(),
}
}
@ -73,7 +73,7 @@ pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
///
/// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any
/// query that means that there is a query cycle, thus this always running a cycle error.
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
#[inline(never)]
#[cold]
pub(super) fn cycle_error<'lcx, 'a, D: QueryDescription<'tcx>>(
@ -88,7 +88,7 @@ pub(super) fn cycle_error<'lcx, 'a, D: QueryDescription<'tcx>>(
///
/// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any
/// query that means that there is a query cycle, thus this always running a cycle error.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
pub(super) fn await<'lcx>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
@ -113,7 +113,7 @@ pub(super) fn await<'lcx>(
})
}
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
fn find_cycle_in_stack<'lcx>(
&self,
tcx: TyCtxt<'_, 'tcx, 'lcx>,
@ -152,7 +152,7 @@ fn find_cycle_in_stack<'lcx>(
/// This does nothing for single threaded rustc,
/// as there are no concurrent jobs which could be waiting on us
pub fn signal_complete(&self) {
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
self.latch.set();
}
@ -161,7 +161,7 @@ fn as_ptr(&self) -> *const QueryJob<'tcx> {
}
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
struct QueryWaiter<'tcx> {
query: Option<Lrc<QueryJob<'tcx>>>,
condvar: Condvar,
@ -169,7 +169,7 @@ struct QueryWaiter<'tcx> {
cycle: Lock<Option<CycleError<'tcx>>>,
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
impl<'tcx> QueryWaiter<'tcx> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
@ -177,18 +177,18 @@ fn notify(&self, registry: &rayon_core::Registry) {
}
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
struct QueryLatchInfo<'tcx> {
complete: bool,
waiters: Vec<Lrc<QueryWaiter<'tcx>>>,
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
struct QueryLatch<'tcx> {
info: Mutex<QueryLatchInfo<'tcx>>,
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
impl<'tcx> QueryLatch<'tcx> {
fn new() -> Self {
QueryLatch {
@ -242,7 +242,7 @@ fn extract_waiter(
}
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
type Waiter<'tcx> = (Lrc<QueryJob<'tcx>>, usize);
/// Visits all the non-resumable and resumable waiters of a query.
@ -254,7 +254,7 @@ fn extract_waiter(
/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
fn visit_waiters<'tcx, F>(query: Lrc<QueryJob<'tcx>>, mut visit: F) -> Option<Option<Waiter<'tcx>>>
where
F: FnMut(Span, Lrc<QueryJob<'tcx>>) -> Option<Option<Waiter<'tcx>>>
@ -282,7 +282,7 @@ fn visit_waiters<'tcx, F>(query: Lrc<QueryJob<'tcx>>, mut visit: F) -> Option<Op
/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
span: Span,
stack: &mut Vec<(Span, Lrc<QueryJob<'tcx>>)>,
@ -321,7 +321,7 @@ fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
fn connected_to_root<'tcx>(
query: Lrc<QueryJob<'tcx>>,
visited: &mut FxHashSet<*const QueryJob<'tcx>>
@ -346,7 +346,7 @@ fn connected_to_root<'tcx>(
}
// Deterministically pick an query from a list
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc<QueryJob<'tcx>>)>(
tcx: TyCtxt<'_, 'tcx, '_>,
queries: &'a [T],
@ -372,7 +372,7 @@ fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc<QueryJob<'tcx>>)>(
/// the function return true.
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
fn remove_cycle<'tcx>(
jobs: &mut Vec<Lrc<QueryJob<'tcx>>>,
wakelist: &mut Vec<Lrc<QueryWaiter<'tcx>>>,
@ -475,7 +475,7 @@ fn remove_cycle<'tcx>(
/// Creates a new thread and forwards information in thread locals to it.
/// The new thread runs the deadlock handler.
/// Must only be called when a deadlock is about to happen.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
pub unsafe fn handle_deadlock() {
use syntax;
use syntax_pos;
@ -514,7 +514,7 @@ pub unsafe fn handle_deadlock() {
/// uses a query latch and then resuming that waiter.
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) {
let on_panic = OnDrop(|| {
eprintln!("deadlock handler panicked, aborting process");

View File

@ -69,7 +69,7 @@
mod job;
pub use self::job::{QueryJob, QueryInfo};
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
pub use self::job::handle_deadlock;
mod keys;

View File

@ -153,12 +153,12 @@ pub(super) fn try_get(
// If we are single-threaded we know that we have cycle error,
// so we just turn the errror
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
return job.cycle_error(tcx, span);
// With parallel queries we might just have to wait on some other
// thread
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
{
if let Err(cycle) = job.await(tcx, span) {
return TryGetJob::JobCompleted(Err(cycle));
@ -695,7 +695,7 @@ macro_rules! define_queries_inner {
[$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => {
use std::mem;
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
use ty::query::job::QueryResult;
use rustc_data_structures::sync::Lock;
use {
@ -736,7 +736,7 @@ pub fn record_computed_queries(&self, sess: &Session) {
});
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
pub fn collect_active_jobs(&self) -> Vec<Lrc<QueryJob<$tcx>>> {
let mut jobs = Vec::new();

View File

@ -1,21 +1,21 @@
//! This module defines types which are thread safe if cfg!(parallel_queries) is true.
//! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
//!
//! `Lrc` is an alias of either Rc or Arc.
//!
//! `Lock` is a mutex.
//! It internally uses `parking_lot::Mutex` if cfg!(parallel_queries) is true,
//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
//! `RefCell` otherwise.
//!
//! `RwLock` is a read-write lock.
//! It internally uses `parking_lot::RwLock` if cfg!(parallel_queries) is true,
//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
//! `RefCell` otherwise.
//!
//! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false.
//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
//!
//! `MTRef` is a immutable reference if cfg!(parallel_queries), and an mutable reference otherwise.
//! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise.
//!
//! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
//! depending on the value of cfg!(parallel_queries).
//! depending on the value of cfg!(parallel_compiler).
use std::collections::HashMap;
use std::hash::{Hash, BuildHasher};
@ -50,7 +50,7 @@ pub fn serial_scope<F, R>(f: F) -> R
pub use std::sync::atomic::Ordering;
cfg_if! {
if #[cfg(not(parallel_queries))] {
if #[cfg(not(parallel_compiler))] {
pub auto trait Send {}
pub auto trait Sync {}
@ -461,19 +461,19 @@ pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<T>> {
self.0.try_lock()
}
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn try_lock(&self) -> Option<LockGuard<T>> {
self.0.try_borrow_mut().ok()
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
#[inline(always)]
pub fn lock(&self) -> LockGuard<T> {
if ERROR_CHECKING {
@ -483,7 +483,7 @@ pub fn lock(&self) -> LockGuard<T> {
}
}
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn lock(&self) -> LockGuard<T> {
self.0.borrow_mut()
@ -539,13 +539,13 @@ pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn read(&self) -> ReadGuard<T> {
self.0.borrow()
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
#[inline(always)]
pub fn read(&self) -> ReadGuard<T> {
if ERROR_CHECKING {
@ -560,25 +560,25 @@ pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
f(&*self.read())
}
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn try_write(&self) -> Result<WriteGuard<T>, ()> {
self.0.try_borrow_mut().map_err(|_| ())
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
#[inline(always)]
pub fn try_write(&self) -> Result<WriteGuard<T>, ()> {
self.0.try_write().ok_or(())
}
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
#[inline(always)]
pub fn write(&self) -> WriteGuard<T> {
self.0.borrow_mut()
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
#[inline(always)]
pub fn write(&self) -> WriteGuard<T> {
if ERROR_CHECKING {
@ -616,27 +616,27 @@ fn clone(&self) -> Self {
/// It will panic if it is used on multiple threads.
#[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)]
pub struct OneThread<T> {
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
thread: thread::ThreadId,
inner: T,
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
unsafe impl<T> std::marker::Sync for OneThread<T> {}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
unsafe impl<T> std::marker::Send for OneThread<T> {}
impl<T> OneThread<T> {
#[inline(always)]
fn check(&self) {
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
assert_eq!(thread::current().id(), self.thread);
}
#[inline(always)]
pub fn new(inner: T) -> Self {
OneThread {
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
thread: thread::current().id(),
inner,
}

View File

@ -56,7 +56,7 @@
use profile;
use super::Compilation;
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
pub fn spawn_thread_pool<F: FnOnce(config::Options) -> R + sync::Send, R: sync::Send>(
opts: config::Options,
f: F
@ -66,7 +66,7 @@ pub fn spawn_thread_pool<F: FnOnce(config::Options) -> R + sync::Send, R: sync::
})
}
#[cfg(parallel_queries)]
#[cfg(parallel_compiler)]
pub fn spawn_thread_pool<F: FnOnce(config::Options) -> R + sync::Send, R: sync::Send>(
opts: config::Options,
f: F
@ -78,7 +78,7 @@ pub fn spawn_thread_pool<F: FnOnce(config::Options) -> R + sync::Send, R: sync::
let gcx_ptr = &Lock::new(0);
let config = ThreadPoolBuilder::new()
.num_threads(Session::query_threads_from_opts(&opts))
.num_threads(Session::threads_from_opts(&opts))
.deadlock_handler(|| unsafe { ty::query::handle_deadlock() })
.stack_size(::STACK_SIZE);

View File

@ -238,9 +238,9 @@ pub fn with_ctxt(&self, ctxt: SyntaxContext) -> Span {
// The interner is pointed to by a thread local value which is only set on the main thread
// with parallelization is disabled. So we don't allow `Span` to transfer between threads
// to avoid panics and other errors, even though it would be memory safe to do so.
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
impl !Send for Span {}
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
impl !Sync for Span {}
impl PartialOrd for Span {

View File

@ -150,9 +150,9 @@ pub struct SymbolIndex { .. }
// The interner is pointed to by a thread local value which is only set on the main thread
// with parallelization is disabled. So we don't allow `Symbol` to transfer between threads
// to avoid panics and other errors, even though it would be memory safe to do so.
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
impl !Send for Symbol { }
#[cfg(not(parallel_queries))]
#[cfg(not(parallel_compiler))]
impl !Sync for Symbol { }
impl Symbol {

View File

@ -1727,6 +1727,9 @@ fn make_compile_args(&self, input_file: &Path, output_file: TargetLocation) -> C
// FIXME Why is -L here?
rustc.arg(input_file); //.arg("-L").arg(&self.config.build_base);
// Use a single thread for efficiency and a deterministic error message order
rustc.arg("-Zthreads=1");
// Optionally prevent default --target if specified in test compile-flags.
let custom_target = self
.props