Rollup merge of #112830 - nnethercote:more-codegen-cleanups, r=oli-obk
More codegen cleanups Some additional cleanups I found while looking closely at this code, following up from #112827. r= `@oli-obk`
This commit is contained in:
commit
904994e101
@ -11,9 +11,7 @@ use jobserver::{Acquired, Client};
|
||||
use rustc_ast::attr;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
|
||||
use rustc_data_structures::memmap::Mmap;
|
||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::profiling::TimingGuard;
|
||||
use rustc_data_structures::profiling::VerboseTimingGuard;
|
||||
use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
|
||||
use rustc_data_structures::sync::Lrc;
|
||||
use rustc_errors::emitter::Emitter;
|
||||
use rustc_errors::{translation::Translate, DiagnosticId, FatalError, Handler, Level};
|
||||
@ -705,20 +703,6 @@ impl<B: WriteBackendMethods> WorkItem<B> {
|
||||
}
|
||||
}
|
||||
|
||||
fn start_profiling<'a>(&self, cgcx: &'a CodegenContext<B>) -> TimingGuard<'a> {
|
||||
match *self {
|
||||
WorkItem::Optimize(ref m) => {
|
||||
cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name)
|
||||
}
|
||||
WorkItem::CopyPostLtoArtifacts(ref m) => cgcx
|
||||
.prof
|
||||
.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*m.name),
|
||||
WorkItem::LTO(ref m) => {
|
||||
cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a short description of this work item suitable for use as a thread name.
|
||||
fn short_description(&self) -> String {
|
||||
// `pthread_setname()` on *nix is limited to 15 characters and longer names are ignored.
|
||||
@ -759,21 +743,6 @@ pub enum FatLTOInput<B: WriteBackendMethods> {
|
||||
InMemory(ModuleCodegen<B::Module>),
|
||||
}
|
||||
|
||||
fn execute_work_item<B: ExtraBackendMethods>(
|
||||
cgcx: &CodegenContext<B>,
|
||||
work_item: WorkItem<B>,
|
||||
) -> Result<WorkItemResult<B>, FatalError> {
|
||||
let module_config = cgcx.config(work_item.module_kind());
|
||||
|
||||
match work_item {
|
||||
WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
|
||||
WorkItem::CopyPostLtoArtifacts(module) => {
|
||||
Ok(execute_copy_from_cache_work_item(cgcx, module, module_config))
|
||||
}
|
||||
WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
|
||||
}
|
||||
}
|
||||
|
||||
/// Actual LTO type we end up choosing based on multiple factors.
|
||||
pub enum ComputedLtoType {
|
||||
No,
|
||||
@ -1706,8 +1675,27 @@ fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>
|
||||
// as a diagnostic was already sent off to the main thread - just
|
||||
// surface that there was an error in this worker.
|
||||
bomb.result = {
|
||||
let _prof_timer = work.start_profiling(&cgcx);
|
||||
Some(execute_work_item(&cgcx, work))
|
||||
let module_config = cgcx.config(work.module_kind());
|
||||
|
||||
Some(match work {
|
||||
WorkItem::Optimize(m) => {
|
||||
let _timer =
|
||||
cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name);
|
||||
execute_optimize_work_item(&cgcx, m, module_config)
|
||||
}
|
||||
WorkItem::CopyPostLtoArtifacts(m) => {
|
||||
let _timer = cgcx.prof.generic_activity_with_arg(
|
||||
"codegen_copy_artifacts_from_incr_cache",
|
||||
&*m.name,
|
||||
);
|
||||
Ok(execute_copy_from_cache_work_item(&cgcx, m, module_config))
|
||||
}
|
||||
WorkItem::LTO(m) => {
|
||||
let _timer =
|
||||
cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name());
|
||||
execute_lto_work_item(&cgcx, m, module_config)
|
||||
}
|
||||
})
|
||||
};
|
||||
})
|
||||
.expect("failed to spawn thread");
|
||||
|
@ -424,7 +424,7 @@ fn run_compiler(
|
||||
return early_exit();
|
||||
}
|
||||
|
||||
queries.ongoing_codegen()?;
|
||||
let ongoing_codegen = queries.ongoing_codegen()?;
|
||||
|
||||
if sess.opts.unstable_opts.print_type_sizes {
|
||||
sess.code_stats.print_type_sizes();
|
||||
@ -437,7 +437,7 @@ fn run_compiler(
|
||||
sess.code_stats.print_vtable_sizes(crate_name);
|
||||
}
|
||||
|
||||
let linker = queries.linker()?;
|
||||
let linker = queries.linker(ongoing_codegen)?;
|
||||
Ok(Some(linker))
|
||||
})?;
|
||||
|
||||
|
@ -740,8 +740,8 @@ pub fn create_global_ctxt<'tcx>(
|
||||
})
|
||||
}
|
||||
|
||||
/// Runs the resolution, type-checking, region checking and other
|
||||
/// miscellaneous analysis passes on the crate.
|
||||
/// Runs the type-checking, region checking and other miscellaneous analysis
|
||||
/// passes on the crate.
|
||||
fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
|
||||
rustc_passes::hir_id_validator::check_crate(tcx);
|
||||
|
||||
|
@ -93,7 +93,6 @@ pub struct Queries<'tcx> {
|
||||
dep_graph: Query<DepGraph>,
|
||||
// This just points to what's in `gcx_cell`.
|
||||
gcx: Query<&'tcx GlobalCtxt<'tcx>>,
|
||||
ongoing_codegen: Query<Box<dyn Any>>,
|
||||
}
|
||||
|
||||
impl<'tcx> Queries<'tcx> {
|
||||
@ -110,7 +109,6 @@ impl<'tcx> Queries<'tcx> {
|
||||
register_plugins: Default::default(),
|
||||
dep_graph: Default::default(),
|
||||
gcx: Default::default(),
|
||||
ongoing_codegen: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,23 +247,19 @@ impl<'tcx> Queries<'tcx> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn ongoing_codegen(&'tcx self) -> Result<QueryResult<'_, Box<dyn Any>>> {
|
||||
self.ongoing_codegen.compute(|| {
|
||||
self.global_ctxt()?.enter(|tcx| {
|
||||
tcx.analysis(()).ok();
|
||||
pub fn ongoing_codegen(&'tcx self) -> Result<Box<dyn Any>> {
|
||||
self.global_ctxt()?.enter(|tcx| {
|
||||
// Don't do code generation if there were any errors
|
||||
self.session().compile_status()?;
|
||||
|
||||
// Don't do code generation if there were any errors
|
||||
self.session().compile_status()?;
|
||||
// If we have any delayed bugs, for example because we created TyKind::Error earlier,
|
||||
// it's likely that codegen will only cause more ICEs, obscuring the original problem
|
||||
self.session().diagnostic().flush_delayed();
|
||||
|
||||
// If we have any delayed bugs, for example because we created TyKind::Error earlier,
|
||||
// it's likely that codegen will only cause more ICEs, obscuring the original problem
|
||||
self.session().diagnostic().flush_delayed();
|
||||
// Hook for UI tests.
|
||||
Self::check_for_rustc_errors_attr(tcx);
|
||||
|
||||
// Hook for UI tests.
|
||||
Self::check_for_rustc_errors_attr(tcx);
|
||||
|
||||
Ok(passes::start_codegen(&***self.codegen_backend(), tcx))
|
||||
})
|
||||
Ok(passes::start_codegen(&***self.codegen_backend(), tcx))
|
||||
})
|
||||
}
|
||||
|
||||
@ -303,7 +297,7 @@ impl<'tcx> Queries<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn linker(&'tcx self) -> Result<Linker> {
|
||||
pub fn linker(&'tcx self, ongoing_codegen: Box<dyn Any>) -> Result<Linker> {
|
||||
let sess = self.session().clone();
|
||||
let codegen_backend = self.codegen_backend().clone();
|
||||
|
||||
@ -314,7 +308,6 @@ impl<'tcx> Queries<'tcx> {
|
||||
tcx.dep_graph.clone(),
|
||||
)
|
||||
});
|
||||
let ongoing_codegen = self.ongoing_codegen()?.steal();
|
||||
|
||||
Ok(Linker {
|
||||
sess,
|
||||
|
@ -63,10 +63,11 @@ fn compile(code: String, output: PathBuf, sysroot: PathBuf) {
|
||||
};
|
||||
|
||||
interface::run_compiler(config, |compiler| {
|
||||
// This runs all the passes prior to linking, too.
|
||||
let linker = compiler.enter(|queries| queries.linker());
|
||||
if let Ok(linker) = linker {
|
||||
linker.link();
|
||||
}
|
||||
let linker = compiler.enter(|queries| {
|
||||
queries.global_ctxt()?.enter(|tcx| tcx.analysis(()))?;
|
||||
let ongoing_codegen = queries.ongoing_codegen()?;
|
||||
queries.linker(ongoing_codegen)
|
||||
});
|
||||
linker.unwrap().link();
|
||||
});
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user