WIP: Implement dummy ThinLTO
FIXME: This seems very slow. ==> Not sure anymore: compare with the master branch.
This commit is contained in:
parent
9ca7658817
commit
21b1b11981
@ -426,9 +426,10 @@ impl ConfigInfo {
|
||||
|
||||
// Since we don't support ThinLTO, disable LTO completely when not trying to do LTO.
|
||||
// TODO(antoyo): remove when we can handle ThinLTO.
|
||||
if !env.contains_key(&"FAT_LTO".to_string()) {
|
||||
// TODO: remove:
|
||||
/*if !env.contains_key(&"FAT_LTO".to_string()) {
|
||||
rustflags.push("-Clto=off".to_string());
|
||||
}
|
||||
}*/
|
||||
// FIXME(antoyo): remove once the atomic shim is gone
|
||||
if os_name == "Darwin" {
|
||||
rustflags.extend_from_slice(&[
|
||||
|
404
src/back/lto.rs
404
src/back/lto.rs
@ -16,13 +16,14 @@
|
||||
// /usr/bin/ld: warning: type of symbol `_RNvNvNvNtCs5JWOrf9uCus_5rayon11thread_pool19WORKER_THREAD_STATE7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
|
||||
// /usr/bin/ld: warning: type of symbol `_RNvNvNvNvNtNtNtCsAj5i4SGTR7_3std4sync4mpmc5waker17current_thread_id5DUMMY7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
|
||||
// /usr/bin/ld: warning: incremental linking of LTO and non-LTO objects; using -flinker-output=nolto-rel which will bypass whole program optimization
|
||||
use std::ffi::CString;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::fs::{self, File};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use gccjit::OutputKind;
|
||||
use gccjit::{Context, OutputKind};
|
||||
use object::read::archive::ArchiveFile;
|
||||
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule};
|
||||
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
|
||||
use rustc_codegen_ssa::back::symbol_export;
|
||||
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
|
||||
use rustc_codegen_ssa::traits::*;
|
||||
@ -30,6 +31,7 @@ use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
|
||||
use rustc_data_structures::memmap::Mmap;
|
||||
use rustc_errors::{DiagCtxtHandle, FatalError};
|
||||
use rustc_hir::def_id::LOCAL_CRATE;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::dep_graph::WorkProduct;
|
||||
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
|
||||
use rustc_session::config::{CrateType, Lto};
|
||||
@ -349,6 +351,400 @@ impl ModuleBuffer {
|
||||
|
||||
impl ModuleBufferMethods for ModuleBuffer {
|
||||
fn data(&self) -> &[u8] {
|
||||
unimplemented!("data not needed for GCC codegen");
|
||||
&[]
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs thin LTO by performing necessary global analysis and returning two
|
||||
/// lists, one of the modules that need optimization and another for modules that
|
||||
/// can simply be copied over from the incr. comp. cache.
|
||||
pub(crate) fn run_thin(
|
||||
cgcx: &CodegenContext<GccCodegenBackend>,
|
||||
modules: Vec<(String, ThinBuffer)>,
|
||||
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
||||
) -> Result<(Vec<LtoModuleCodegen<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
|
||||
let dcx = cgcx.create_dcx();
|
||||
let lto_data = prepare_lto(cgcx, &dcx)?;
|
||||
/*let symbols_below_threshold =
|
||||
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
|
||||
if cgcx.opts.cg.linker_plugin_lto.enabled() {
|
||||
unreachable!(
|
||||
"We should never reach this case if the LTO step \
|
||||
is deferred to the linker"
|
||||
);
|
||||
}
|
||||
thin_lto(
|
||||
cgcx,
|
||||
&dcx,
|
||||
modules,
|
||||
lto_data.upstream_modules,
|
||||
lto_data.tmp_path,
|
||||
cached_modules, /*, &symbols_below_threshold*/
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn prepare_thin(
|
||||
module: ModuleCodegen<GccContext>,
|
||||
_emit_summary: bool,
|
||||
) -> (String, ThinBuffer) {
|
||||
let name = module.name;
|
||||
//let buffer = ThinBuffer::new(module.module_llvm.context, true, emit_summary);
|
||||
let buffer = ThinBuffer::new(&module.module_llvm.context);
|
||||
(name, buffer)
|
||||
}
|
||||
|
||||
/// Prepare "thin" LTO to get run on these modules.
|
||||
///
|
||||
/// The general structure of ThinLTO is quite different from the structure of
|
||||
/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
|
||||
/// one giant LLVM module, and then we run more optimization passes over this
|
||||
/// big module after internalizing most symbols. Thin LTO, on the other hand,
|
||||
/// avoid this large bottleneck through more targeted optimization.
|
||||
///
|
||||
/// At a high level Thin LTO looks like:
|
||||
///
|
||||
/// 1. Prepare a "summary" of each LLVM module in question which describes
|
||||
/// the values inside, cost of the values, etc.
|
||||
/// 2. Merge the summaries of all modules in question into one "index"
|
||||
/// 3. Perform some global analysis on this index
|
||||
/// 4. For each module, use the index and analysis calculated previously to
|
||||
/// perform local transformations on the module, for example inlining
|
||||
/// small functions from other modules.
|
||||
/// 5. Run thin-specific optimization passes over each module, and then code
|
||||
/// generate everything at the end.
|
||||
///
|
||||
/// The summary for each module is intended to be quite cheap, and the global
|
||||
/// index is relatively quite cheap to create as well. As a result, the goal of
|
||||
/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
|
||||
/// situations. For example one cheap optimization is that we can parallelize
|
||||
/// all codegen modules, easily making use of all the cores on a machine.
|
||||
///
|
||||
/// With all that in mind, the function here is designed at specifically just
|
||||
/// calculating the *index* for ThinLTO. This index will then be shared amongst
|
||||
/// all of the `LtoModuleCodegen` units returned below and destroyed once
|
||||
/// they all go out of scope.
|
||||
fn thin_lto(
|
||||
cgcx: &CodegenContext<GccCodegenBackend>,
|
||||
_dcx: &DiagCtxt,
|
||||
modules: Vec<(String, ThinBuffer)>,
|
||||
serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
|
||||
tmp_path: TempDir,
|
||||
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
||||
//symbols_below_threshold: &[*const libc::c_char],
|
||||
) -> Result<(Vec<LtoModuleCodegen<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
|
||||
let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
|
||||
info!("going for that thin, thin LTO");
|
||||
|
||||
/*let green_modules: FxHashMap<_, _> =
|
||||
cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();*/
|
||||
|
||||
let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
|
||||
let mut thin_buffers = Vec::with_capacity(modules.len());
|
||||
let mut module_names = Vec::with_capacity(full_scope_len);
|
||||
//let mut thin_modules = Vec::with_capacity(full_scope_len);
|
||||
|
||||
for (i, (name, buffer)) in modules.into_iter().enumerate() {
|
||||
info!("local module: {} - {}", i, name);
|
||||
let cname = CString::new(name.as_bytes()).unwrap();
|
||||
/*thin_modules.push(llvm::ThinLTOModule {
|
||||
identifier: cname.as_ptr(),
|
||||
data: buffer.data().as_ptr(),
|
||||
len: buffer.data().len(),
|
||||
});*/
|
||||
thin_buffers.push(buffer);
|
||||
module_names.push(cname);
|
||||
}
|
||||
|
||||
// FIXME: All upstream crates are deserialized internally in the
|
||||
// function below to extract their summary and modules. Note that
|
||||
// unlike the loop above we *must* decode and/or read something
|
||||
// here as these are all just serialized files on disk. An
|
||||
// improvement, however, to make here would be to store the
|
||||
// module summary separately from the actual module itself. Right
|
||||
// now this is store in one large bitcode file, and the entire
|
||||
// file is deflate-compressed. We could try to bypass some of the
|
||||
// decompression by storing the index uncompressed and only
|
||||
// lazily decompressing the bytecode if necessary.
|
||||
//
|
||||
// Note that truly taking advantage of this optimization will
|
||||
// likely be further down the road. We'd have to implement
|
||||
// incremental ThinLTO first where we could actually avoid
|
||||
// looking at upstream modules entirely sometimes (the contents,
|
||||
// we must always unconditionally look at the index).
|
||||
let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
|
||||
|
||||
let cached_modules =
|
||||
cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
|
||||
|
||||
for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
|
||||
info!("upstream or cached module {:?}", name);
|
||||
/*thin_modules.push(llvm::ThinLTOModule {
|
||||
identifier: name.as_ptr(),
|
||||
data: module.data().as_ptr(),
|
||||
len: module.data().len(),
|
||||
});*/
|
||||
|
||||
match module {
|
||||
SerializedModule::Local(ref module_buffer) => {
|
||||
let path = module_buffer.0.to_str().expect("path");
|
||||
let my_path = PathBuf::from(path);
|
||||
//let exists = my_path.exists();
|
||||
//println!("Path: {:?}: {}", path, exists);
|
||||
/*module.module_llvm.should_combine_object_files = true;
|
||||
module
|
||||
.module_llvm
|
||||
.context
|
||||
.add_driver_option(module_buffer.0.to_str().expect("path"));*/
|
||||
}
|
||||
SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
|
||||
SerializedModule::FromUncompressedFile(_) => {
|
||||
unimplemented!("from uncompressed file")
|
||||
}
|
||||
}
|
||||
|
||||
serialized.push(module);
|
||||
module_names.push(name);
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
//assert_eq!(thin_modules.len(), module_names.len());
|
||||
|
||||
// Delegate to the C++ bindings to create some data here. Once this is a
|
||||
// tried-and-true interface we may wish to try to upstream some of this
|
||||
// to LLVM itself, right now we reimplement a lot of what they do
|
||||
// upstream...
|
||||
/*let data = llvm::LLVMRustCreateThinLTOData(
|
||||
thin_modules.as_ptr(),
|
||||
thin_modules.len() as u32,
|
||||
symbols_below_threshold.as_ptr(),
|
||||
symbols_below_threshold.len() as u32,
|
||||
)
|
||||
.ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
|
||||
*/
|
||||
|
||||
let data = ThinData; //(Arc::new(tmp_path))/*(data)*/;
|
||||
|
||||
info!("thin LTO data created");
|
||||
|
||||
/*let (key_map_path, prev_key_map, curr_key_map) =
|
||||
if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir {
|
||||
let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
|
||||
// If the previous file was deleted, or we get an IO error
|
||||
// reading the file, then we'll just use `None` as the
|
||||
// prev_key_map, which will force the code to be recompiled.
|
||||
let prev =
|
||||
if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
|
||||
let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
|
||||
(Some(path), prev, curr)
|
||||
}
|
||||
else {
|
||||
// If we don't compile incrementally, we don't need to load the
|
||||
// import data from LLVM.
|
||||
assert!(green_modules.is_empty());
|
||||
let curr = ThinLTOKeysMap::default();
|
||||
(None, None, curr)
|
||||
};
|
||||
info!("thin LTO cache key map loaded");
|
||||
info!("prev_key_map: {:#?}", prev_key_map);
|
||||
info!("curr_key_map: {:#?}", curr_key_map);*/
|
||||
|
||||
// Throw our data in an `Arc` as we'll be sharing it across threads. We
|
||||
// also put all memory referenced by the C++ data (buffers, ids, etc)
|
||||
// into the arc as well. After this we'll create a thin module
|
||||
// codegen per module in this data.
|
||||
let shared =
|
||||
Arc::new(ThinShared { data, thin_buffers, serialized_modules: serialized, module_names });
|
||||
|
||||
let copy_jobs = vec![];
|
||||
let mut opt_jobs = vec![];
|
||||
|
||||
info!("checking which modules can be-reused and which have to be re-optimized.");
|
||||
for (module_index, module_name) in shared.module_names.iter().enumerate() {
|
||||
let module_name = module_name_to_str(module_name);
|
||||
/*if let (Some(prev_key_map), true) =
|
||||
(prev_key_map.as_ref(), green_modules.contains_key(module_name))
|
||||
{
|
||||
assert!(cgcx.incr_comp_session_dir.is_some());
|
||||
|
||||
// If a module exists in both the current and the previous session,
|
||||
// and has the same LTO cache key in both sessions, then we can re-use it
|
||||
if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
|
||||
let work_product = green_modules[module_name].clone();
|
||||
copy_jobs.push(work_product);
|
||||
info!(" - {}: re-used", module_name);
|
||||
assert!(cgcx.incr_comp_session_dir.is_some());
|
||||
continue;
|
||||
}
|
||||
}*/
|
||||
|
||||
info!(" - {}: re-compiled", module_name);
|
||||
opt_jobs
|
||||
.push(LtoModuleCodegen::Thin(ThinModule { shared: shared.clone(), idx: module_index }));
|
||||
}
|
||||
|
||||
// Save the current ThinLTO import information for the next compilation
|
||||
// session, overwriting the previous serialized data (if any).
|
||||
/*if let Some(path) = key_map_path {
|
||||
if let Err(err) = curr_key_map.save_to_file(&path) {
|
||||
return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }));
|
||||
}
|
||||
}*/
|
||||
|
||||
// NOTE: save the temporary directory used by LTO so that it gets deleted after linking instead
|
||||
// of now.
|
||||
//module.module_llvm.temp_dir = Some(tmp_path);
|
||||
// TODO: save the directory so that it gets deleted later.
|
||||
std::mem::forget(tmp_path);
|
||||
|
||||
Ok((opt_jobs, copy_jobs))
|
||||
}
|
||||
|
||||
pub unsafe fn optimize_thin_module(
|
||||
thin_module: ThinModule<GccCodegenBackend>,
|
||||
_cgcx: &CodegenContext<GccCodegenBackend>,
|
||||
) -> Result<ModuleCodegen<GccContext>, FatalError> {
|
||||
//let dcx = cgcx.create_dcx();
|
||||
|
||||
//let module_name = &thin_module.shared.module_names[thin_module.idx];
|
||||
/*let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
|
||||
let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;*/
|
||||
|
||||
// Right now the implementation we've got only works over serialized
|
||||
// modules, so we create a fresh new LLVM context and parse the module
|
||||
// into that context. One day, however, we may do this for upstream
|
||||
// crates but for locally codegened modules we may be able to reuse
|
||||
// that LLVM Context and Module.
|
||||
//let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
|
||||
//let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &dcx)? as *const _;
|
||||
let mut should_combine_object_files = false;
|
||||
let context = match thin_module.shared.thin_buffers.get(thin_module.idx) {
|
||||
Some(thin_buffer) => Arc::clone(&thin_buffer.context),
|
||||
None => {
|
||||
let context = Context::default();
|
||||
let len = thin_module.shared.thin_buffers.len();
|
||||
let module = &thin_module.shared.serialized_modules[thin_module.idx - len];
|
||||
match *module {
|
||||
SerializedModule::Local(ref module_buffer) => {
|
||||
let path = module_buffer.0.to_str().expect("path");
|
||||
|
||||
//let my_path = PathBuf::from(path);
|
||||
//let exists = my_path.exists();
|
||||
//println!("Path2: {:?}: {}", path, exists);
|
||||
|
||||
context.add_driver_option(path);
|
||||
should_combine_object_files = true;
|
||||
/*module.module_llvm.should_combine_object_files = true;
|
||||
module
|
||||
.module_llvm
|
||||
.context
|
||||
.add_driver_option(module_buffer.0.to_str().expect("path"));*/
|
||||
}
|
||||
SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
|
||||
SerializedModule::FromUncompressedFile(_) => {
|
||||
unimplemented!("from uncompressed file")
|
||||
}
|
||||
}
|
||||
Arc::new(context)
|
||||
}
|
||||
};
|
||||
let module = ModuleCodegen {
|
||||
module_llvm: GccContext { context, should_combine_object_files, temp_dir: None },
|
||||
name: thin_module.name().to_string(),
|
||||
kind: ModuleKind::Regular,
|
||||
};
|
||||
/*{
|
||||
let target = &*module.module_llvm.tm;
|
||||
let llmod = module.module_llvm.llmod();
|
||||
save_temp_bitcode(cgcx, &module, "thin-lto-input");
|
||||
|
||||
// Up next comes the per-module local analyses that we do for Thin LTO.
|
||||
// Each of these functions is basically copied from the LLVM
|
||||
// implementation and then tailored to suit this implementation. Ideally
|
||||
// each of these would be supported by upstream LLVM but that's perhaps
|
||||
// a patch for another day!
|
||||
//
|
||||
// You can find some more comments about these functions in the LLVM
|
||||
// bindings we've got (currently `PassWrapper.cpp`)
|
||||
{
|
||||
let _timer =
|
||||
cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
|
||||
if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
|
||||
return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
|
||||
}
|
||||
save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
|
||||
}
|
||||
|
||||
{
|
||||
let _timer = cgcx
|
||||
.prof
|
||||
.generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
|
||||
if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
|
||||
return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
|
||||
}
|
||||
save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
|
||||
}
|
||||
|
||||
{
|
||||
let _timer = cgcx
|
||||
.prof
|
||||
.generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
|
||||
if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
|
||||
return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
|
||||
}
|
||||
save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
|
||||
}
|
||||
|
||||
{
|
||||
let _timer =
|
||||
cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
|
||||
if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
|
||||
return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
|
||||
}
|
||||
save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
|
||||
}
|
||||
|
||||
// Alright now that we've done everything related to the ThinLTO
|
||||
// analysis it's time to run some optimizations! Here we use the same
|
||||
// `run_pass_manager` as the "fat" LTO above except that we tell it to
|
||||
// populate a thin-specific pass manager, which presumably LLVM treats a
|
||||
// little differently.
|
||||
{
|
||||
info!("running thin lto passes over {}", module.name);
|
||||
run_pass_manager(cgcx, &dcx, &mut module, true)?;
|
||||
save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
|
||||
}
|
||||
}*/
|
||||
Ok(module)
|
||||
}
|
||||
|
||||
pub struct ThinBuffer {
|
||||
context: Arc<Context<'static>>,
|
||||
}
|
||||
|
||||
// TODO: check if this makes sense to make ThinBuffer Send and Sync.
|
||||
unsafe impl Send for ThinBuffer {}
|
||||
unsafe impl Sync for ThinBuffer {}
|
||||
|
||||
impl ThinBuffer {
|
||||
pub fn new(context: &Arc<Context<'static>>) -> Self {
|
||||
Self { context: Arc::clone(context) }
|
||||
}
|
||||
}
|
||||
|
||||
impl ThinBufferMethods for ThinBuffer {
|
||||
fn data(&self) -> &[u8] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn thin_link_data(&self) -> &[u8] {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ThinData; //(Arc<TempDir>);
|
||||
|
||||
fn module_name_to_str(c_str: &CStr) -> &str {
|
||||
c_str.to_str().unwrap_or_else(|e| {
|
||||
bug!("Encountered non-utf8 GCC module name `{}`: {}", c_str.to_string_lossy(), e)
|
||||
})
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ pub(crate) unsafe fn codegen(
|
||||
|
||||
// NOTE: Only generate object files with GIMPLE when this environment variable is set for
|
||||
// now because this requires a particular setup (same gcc/lto1/lto-wrapper commit as libgccjit).
|
||||
// TODO: remove this environment variable.
|
||||
let fat_lto = env::var("EMBED_LTO_BITCODE").as_deref() == Ok("1");
|
||||
|
||||
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
|
||||
@ -113,17 +114,20 @@ pub(crate) unsafe fn codegen(
|
||||
context.set_debug_info(true);
|
||||
context.dump_to_file(path, true);
|
||||
}
|
||||
if should_combine_object_files && fat_lto {
|
||||
context.add_command_line_option("-flto=auto");
|
||||
context.add_command_line_option("-flto-partition=one");
|
||||
if should_combine_object_files {
|
||||
if fat_lto {
|
||||
context.add_command_line_option("-flto=auto");
|
||||
context.add_command_line_option("-flto-partition=one");
|
||||
|
||||
// NOTE: without -fuse-linker-plugin, we get the following error:
|
||||
// lto1: internal compiler error: decompressed stream: Destination buffer is too small
|
||||
context.add_driver_option("-fuse-linker-plugin");
|
||||
}
|
||||
|
||||
context.add_driver_option("-Wl,-r");
|
||||
// NOTE: we need -nostdlib, otherwise, we get the following error:
|
||||
// /usr/bin/ld: cannot find -lgcc_s: No such file or directory
|
||||
context.add_driver_option("-nostdlib");
|
||||
// NOTE: without -fuse-linker-plugin, we get the following error:
|
||||
// lto1: internal compiler error: decompressed stream: Destination buffer is too small
|
||||
context.add_driver_option("-fuse-linker-plugin");
|
||||
|
||||
// NOTE: this doesn't actually generate an executable. With the above flags, it combines the .o files together in another .o.
|
||||
context.compile_to_file(
|
||||
@ -131,6 +135,7 @@ pub(crate) unsafe fn codegen(
|
||||
obj_out.to_str().expect("path to str"),
|
||||
);
|
||||
} else {
|
||||
//println!("Combining to object file");
|
||||
context.compile_to_file(
|
||||
OutputKind::ObjectFile,
|
||||
obj_out.to_str().expect("path to str"),
|
||||
|
@ -1,5 +1,6 @@
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use gccjit::{FunctionType, GlobalKind};
|
||||
@ -205,7 +206,11 @@ pub fn compile_codegen_unit(
|
||||
|
||||
ModuleCodegen {
|
||||
name: cgu_name.to_string(),
|
||||
module_llvm: GccContext { context, should_combine_object_files: false, temp_dir: None },
|
||||
module_llvm: GccContext {
|
||||
context: Arc::new(context),
|
||||
should_combine_object_files: false,
|
||||
temp_dir: None,
|
||||
},
|
||||
kind: ModuleKind::Regular,
|
||||
}
|
||||
}
|
||||
|
47
src/lib.rs
47
src/lib.rs
@ -4,7 +4,7 @@
|
||||
* TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one — https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html).
|
||||
* For Thin LTO, this might be helpful:
|
||||
* In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none.
|
||||
* Or the new incremental LTO?
|
||||
* Or the new incremental LTO (https://www.phoronix.com/news/GCC-Incremental-LTO-Patches)?
|
||||
*
|
||||
* Maybe some missing optizations enabled by rustc's LTO is in there: https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
|
||||
* Like -fipa-icf (should be already enabled) and maybe -fdevirtualize-at-ltrans.
|
||||
@ -80,6 +80,8 @@ use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use back::lto::ThinBuffer;
|
||||
use back::lto::ThinData;
|
||||
use errors::LTONotSupported;
|
||||
#[cfg(not(feature = "master"))]
|
||||
use gccjit::CType;
|
||||
@ -92,9 +94,7 @@ use rustc_codegen_ssa::back::write::{
|
||||
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
|
||||
};
|
||||
use rustc_codegen_ssa::base::codegen_crate;
|
||||
use rustc_codegen_ssa::traits::{
|
||||
CodegenBackend, ExtraBackendMethods, ThinBufferMethods, WriteBackendMethods,
|
||||
};
|
||||
use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, WriteBackendMethods};
|
||||
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
|
||||
use rustc_data_structures::fx::FxIndexMap;
|
||||
use rustc_data_structures::sync::IntoDynSyncSend;
|
||||
@ -188,6 +188,7 @@ impl CodegenBackend for GccCodegenBackend {
|
||||
|
||||
#[cfg(feature = "master")]
|
||||
gccjit::set_global_personality_function_name(b"rust_eh_personality\0");
|
||||
|
||||
if sess.lto() == Lto::Thin {
|
||||
sess.dcx().emit_warn(LTONotSupported {});
|
||||
}
|
||||
@ -293,7 +294,7 @@ impl ExtraBackendMethods for GccCodegenBackend {
|
||||
alloc_error_handler_kind: AllocatorKind,
|
||||
) -> Self::Module {
|
||||
let mut mods = GccContext {
|
||||
context: new_context(tcx),
|
||||
context: Arc::new(new_context(tcx)),
|
||||
should_combine_object_files: false,
|
||||
temp_dir: None,
|
||||
};
|
||||
@ -323,20 +324,8 @@ impl ExtraBackendMethods for GccCodegenBackend {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ThinBuffer;
|
||||
|
||||
impl ThinBufferMethods for ThinBuffer {
|
||||
fn data(&self) -> &[u8] {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn thin_link_data(&self) -> &[u8] {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GccContext {
|
||||
context: Context<'static>,
|
||||
context: Arc<Context<'static>>,
|
||||
should_combine_object_files: bool,
|
||||
// Temporary directory used by LTO. We keep it here so that it's not removed before linking.
|
||||
temp_dir: Option<TempDir>,
|
||||
@ -351,7 +340,7 @@ impl WriteBackendMethods for GccCodegenBackend {
|
||||
type TargetMachine = ();
|
||||
type TargetMachineError = ();
|
||||
type ModuleBuffer = ModuleBuffer;
|
||||
type ThinData = ();
|
||||
type ThinData = ThinData;
|
||||
type ThinBuffer = ThinBuffer;
|
||||
|
||||
fn run_fat_lto(
|
||||
@ -363,11 +352,11 @@ impl WriteBackendMethods for GccCodegenBackend {
|
||||
}
|
||||
|
||||
fn run_thin_lto(
|
||||
_cgcx: &CodegenContext<Self>,
|
||||
_modules: Vec<(String, Self::ThinBuffer)>,
|
||||
_cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||
cgcx: &CodegenContext<Self>,
|
||||
modules: Vec<(String, Self::ThinBuffer)>,
|
||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
|
||||
unimplemented!();
|
||||
back::lto::run_thin(cgcx, modules, cached_modules)
|
||||
}
|
||||
|
||||
fn print_pass_timings(&self) {
|
||||
@ -397,10 +386,10 @@ impl WriteBackendMethods for GccCodegenBackend {
|
||||
}
|
||||
|
||||
unsafe fn optimize_thin(
|
||||
_cgcx: &CodegenContext<Self>,
|
||||
_thin: ThinModule<Self>,
|
||||
cgcx: &CodegenContext<Self>,
|
||||
thin: ThinModule<Self>,
|
||||
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
|
||||
unimplemented!();
|
||||
back::lto::optimize_thin_module(thin, cgcx)
|
||||
}
|
||||
|
||||
unsafe fn codegen(
|
||||
@ -413,10 +402,10 @@ impl WriteBackendMethods for GccCodegenBackend {
|
||||
}
|
||||
|
||||
fn prepare_thin(
|
||||
_module: ModuleCodegen<Self::Module>,
|
||||
_emit_summary: bool,
|
||||
module: ModuleCodegen<Self::Module>,
|
||||
emit_summary: bool,
|
||||
) -> (String, Self::ThinBuffer) {
|
||||
unimplemented!();
|
||||
back::lto::prepare_thin(module, emit_summary)
|
||||
}
|
||||
|
||||
fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user