Auto merge of #132762 - Zalathar:rollup-qfgz165, r=Zalathar

Rollup of 5 pull requests

Successful merges:

 - #132161 ([StableMIR] A few fixes to pretty printing)
 - #132389 (coverage: Simplify parts of coverage graph creation)
 - #132452 (coverage: Extract safe FFI wrapper functions to `llvm_cov`)
 - #132590 (Simplify FFI calls for `-Ztime-llvm-passes` and `-Zprint-codegen-stats`)
 - #132738 (Initialize channel `Block`s directly on the heap)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-11-08 08:00:08 +00:00
commit 6295686a37
14 changed files with 695 additions and 338 deletions

View File

@ -0,0 +1,100 @@
//! Safe wrappers for coverage-specific FFI functions.
use std::ffi::CString;
use crate::common::AsCCharPtr;
use crate::coverageinfo::ffi;
use crate::llvm;
pub(crate) fn covmap_var_name() -> CString {
CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteCovmapVarNameToString(s);
}))
.expect("covmap variable name should not contain NUL")
}
pub(crate) fn covmap_section_name(llmod: &llvm::Module) -> CString {
CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteCovmapSectionNameToString(llmod, s);
}))
.expect("covmap section name should not contain NUL")
}
pub(crate) fn covfun_section_name(llmod: &llvm::Module) -> CString {
CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteCovfunSectionNameToString(llmod, s);
}))
.expect("covfun section name should not contain NUL")
}
pub(crate) fn create_pgo_func_name_var<'ll>(
llfn: &'ll llvm::Value,
mangled_fn_name: &str,
) -> &'ll llvm::Value {
unsafe {
llvm::LLVMRustCoverageCreatePGOFuncNameVar(
llfn,
mangled_fn_name.as_c_char_ptr(),
mangled_fn_name.len(),
)
}
}
pub(crate) fn write_filenames_to_buffer<'a>(
filenames: impl IntoIterator<Item = &'a str>,
) -> Vec<u8> {
let (pointers, lengths) = filenames
.into_iter()
.map(|s: &str| (s.as_c_char_ptr(), s.len()))
.unzip::<_, _, Vec<_>, Vec<_>>();
llvm::build_byte_buffer(|buffer| unsafe {
llvm::LLVMRustCoverageWriteFilenamesToBuffer(
pointers.as_ptr(),
pointers.len(),
lengths.as_ptr(),
lengths.len(),
buffer,
);
})
}
pub(crate) fn write_function_mappings_to_buffer(
virtual_file_mapping: &[u32],
expressions: &[ffi::CounterExpression],
code_regions: &[ffi::CodeRegion],
branch_regions: &[ffi::BranchRegion],
mcdc_branch_regions: &[ffi::MCDCBranchRegion],
mcdc_decision_regions: &[ffi::MCDCDecisionRegion],
) -> Vec<u8> {
llvm::build_byte_buffer(|buffer| unsafe {
llvm::LLVMRustCoverageWriteFunctionMappingsToBuffer(
virtual_file_mapping.as_ptr(),
virtual_file_mapping.len(),
expressions.as_ptr(),
expressions.len(),
code_regions.as_ptr(),
code_regions.len(),
branch_regions.as_ptr(),
branch_regions.len(),
mcdc_branch_regions.as_ptr(),
mcdc_branch_regions.len(),
mcdc_decision_regions.as_ptr(),
mcdc_decision_regions.len(),
buffer,
)
})
}
/// Hashes some bytes into a 64-bit hash, via LLVM's `IndexedInstrProf::ComputeHash`,
/// as required for parts of the LLVM coverage mapping format.
pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
unsafe { llvm::LLVMRustCoverageHashBytes(bytes.as_c_char_ptr(), bytes.len()) }
}
/// Returns LLVM's `coverage::CovMapVersion::CurrentVersion` (CoverageMapping.h)
/// as a raw numeric value. For historical reasons, the numeric value is 1 less
/// than the number in the version's name, so `Version7` is actually `6u32`.
pub(crate) fn mapping_version() -> u32 {
unsafe { llvm::LLVMRustCoverageMappingVersion() }
}

View File

@ -1,4 +1,5 @@
use std::ffi::CString; use std::ffi::CString;
use std::iter;
use itertools::Itertools as _; use itertools::Itertools as _;
use rustc_abi::Align; use rustc_abi::Align;
@ -17,9 +18,9 @@
use tracing::debug; use tracing::debug;
use crate::common::CodegenCx; use crate::common::CodegenCx;
use crate::coverageinfo::ffi;
use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector}; use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector};
use crate::{coverageinfo, llvm}; use crate::coverageinfo::{ffi, llvm_cov};
use crate::llvm;
/// Generates and exports the coverage map, which is embedded in special /// Generates and exports the coverage map, which is embedded in special
/// linker sections in the final binary. /// linker sections in the final binary.
@ -33,7 +34,7 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
// agrees with our Rust-side code. Expected versions (encoded as n-1) are: // agrees with our Rust-side code. Expected versions (encoded as n-1) are:
// - `CovMapVersion::Version7` (6) used by LLVM 18-19 // - `CovMapVersion::Version7` (6) used by LLVM 18-19
let covmap_version = { let covmap_version = {
let llvm_covmap_version = coverageinfo::mapping_version(); let llvm_covmap_version = llvm_cov::mapping_version();
let expected_versions = 6..=6; let expected_versions = 6..=6;
assert!( assert!(
expected_versions.contains(&llvm_covmap_version), expected_versions.contains(&llvm_covmap_version),
@ -78,7 +79,7 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
let filenames_size = filenames_buffer.len(); let filenames_size = filenames_buffer.len();
let filenames_val = cx.const_bytes(&filenames_buffer); let filenames_val = cx.const_bytes(&filenames_buffer);
let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer); let filenames_ref = llvm_cov::hash_bytes(&filenames_buffer);
// Generate the coverage map header, which contains the filenames used by // Generate the coverage map header, which contains the filenames used by
// this CGU's coverage mappings, and store it in a well-known global. // this CGU's coverage mappings, and store it in a well-known global.
@ -187,13 +188,10 @@ fn make_filenames_buffer(&self, tcx: TyCtxt<'_>) -> Vec<u8> {
.for_scope(tcx.sess, RemapPathScopeComponents::MACRO) .for_scope(tcx.sess, RemapPathScopeComponents::MACRO)
.to_string_lossy(); .to_string_lossy();
llvm::build_byte_buffer(|buffer| { // Insert the working dir at index 0, before the other filenames.
coverageinfo::write_filenames_section_to_buffer( let filenames =
// Insert the working dir at index 0, before the other filenames. iter::once(working_dir).chain(self.raw_file_table.iter().map(Symbol::as_str));
std::iter::once(working_dir).chain(self.raw_file_table.iter().map(Symbol::as_str)), llvm_cov::write_filenames_to_buffer(filenames)
buffer,
);
})
} }
} }
@ -296,17 +294,14 @@ fn encode_mappings_for_function(
} }
// Encode the function's coverage mappings into a buffer. // Encode the function's coverage mappings into a buffer.
llvm::build_byte_buffer(|buffer| { llvm_cov::write_function_mappings_to_buffer(
coverageinfo::write_mapping_to_buffer( &virtual_file_mapping.into_vec(),
virtual_file_mapping.into_vec(), &expressions,
expressions, &code_regions,
&code_regions, &branch_regions,
&branch_regions, &mcdc_branch_regions,
&mcdc_branch_regions, &mcdc_decision_regions,
&mcdc_decision_regions, )
buffer,
);
})
} }
/// Generates the contents of the covmap record for this CGU, which mostly /// Generates the contents of the covmap record for this CGU, which mostly
@ -335,23 +330,11 @@ fn generate_covmap_record<'ll>(
let covmap_data = let covmap_data =
cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false); cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false);
let covmap_var_name = CString::new(llvm::build_byte_buffer(|s| unsafe { let llglobal = llvm::add_global(cx.llmod, cx.val_ty(covmap_data), &llvm_cov::covmap_var_name());
llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
}))
.unwrap();
debug!("covmap var name: {:?}", covmap_var_name);
let covmap_section_name = CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
}))
.expect("covmap section name should not contain NUL");
debug!("covmap section name: {:?}", covmap_section_name);
let llglobal = llvm::add_global(cx.llmod, cx.val_ty(covmap_data), &covmap_var_name);
llvm::set_initializer(llglobal, covmap_data); llvm::set_initializer(llglobal, covmap_data);
llvm::set_global_constant(llglobal, true); llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::set_section(llglobal, &covmap_section_name); llvm::set_section(llglobal, &llvm_cov::covmap_section_name(cx.llmod));
// LLVM's coverage mapping format specifies 8-byte alignment for items in this section. // LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
// <https://llvm.org/docs/CoverageMappingFormat.html> // <https://llvm.org/docs/CoverageMappingFormat.html>
llvm::set_alignment(llglobal, Align::EIGHT); llvm::set_alignment(llglobal, Align::EIGHT);
@ -373,7 +356,7 @@ fn generate_covfun_record(
let coverage_mapping_size = coverage_mapping_buffer.len(); let coverage_mapping_size = coverage_mapping_buffer.len();
let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer); let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer);
let func_name_hash = coverageinfo::hash_bytes(mangled_function_name.as_bytes()); let func_name_hash = llvm_cov::hash_bytes(mangled_function_name.as_bytes());
let func_name_hash_val = cx.const_u64(func_name_hash); let func_name_hash_val = cx.const_u64(func_name_hash);
let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32); let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
let source_hash_val = cx.const_u64(source_hash); let source_hash_val = cx.const_u64(source_hash);

View File

@ -1,24 +1,23 @@
use std::cell::{OnceCell, RefCell}; use std::cell::{OnceCell, RefCell};
use std::ffi::{CStr, CString}; use std::ffi::{CStr, CString};
use libc::c_uint;
use rustc_abi::Size; use rustc_abi::Size;
use rustc_codegen_ssa::traits::{ use rustc_codegen_ssa::traits::{
BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods, BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods,
}; };
use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_llvm::RustString;
use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::ty::Instance; use rustc_middle::ty::Instance;
use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::layout::HasTyCtxt;
use tracing::{debug, instrument}; use tracing::{debug, instrument};
use crate::builder::Builder; use crate::builder::Builder;
use crate::common::{AsCCharPtr, CodegenCx}; use crate::common::CodegenCx;
use crate::coverageinfo::map_data::FunctionCoverageCollector; use crate::coverageinfo::map_data::FunctionCoverageCollector;
use crate::llvm; use crate::llvm;
pub(crate) mod ffi; pub(crate) mod ffi;
mod llvm_cov;
pub(crate) mod map_data; pub(crate) mod map_data;
mod mapgen; mod mapgen;
@ -80,12 +79,9 @@ pub(crate) fn coverageinfo_finalize(&self) {
/// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix) /// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
/// - `.lcovfun$M` on Windows (includes `$M` sorting suffix) /// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
fn covfun_section_name(&self) -> &CStr { fn covfun_section_name(&self) -> &CStr {
self.coverage_cx().covfun_section_name.get_or_init(|| { self.coverage_cx()
CString::new(llvm::build_byte_buffer(|s| unsafe { .covfun_section_name
llvm::LLVMRustCoverageWriteFuncSectionNameToString(self.llmod, s); .get_or_init(|| llvm_cov::covfun_section_name(self.llmod))
}))
.expect("covfun section name should not contain NUL")
})
} }
/// For LLVM codegen, returns a function-specific `Value` for a global /// For LLVM codegen, returns a function-specific `Value` for a global
@ -95,9 +91,11 @@ fn covfun_section_name(&self) -> &CStr {
fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value { fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
debug!("getting pgo_func_name_var for instance={:?}", instance); debug!("getting pgo_func_name_var for instance={:?}", instance);
let mut pgo_func_name_var_map = self.coverage_cx().pgo_func_name_var_map.borrow_mut(); let mut pgo_func_name_var_map = self.coverage_cx().pgo_func_name_var_map.borrow_mut();
pgo_func_name_var_map pgo_func_name_var_map.entry(instance).or_insert_with(|| {
.entry(instance) let llfn = self.get_fn(instance);
.or_insert_with(|| create_pgo_func_name_var(self, instance)) let mangled_fn_name: &str = self.tcx.symbol_name(instance).name;
llvm_cov::create_pgo_func_name_var(llfn, mangled_fn_name)
})
} }
} }
@ -225,80 +223,3 @@ fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) {
} }
} }
} }
/// Calls llvm::createPGOFuncNameVar() with the given function instance's
/// mangled function name. The LLVM API returns an llvm::GlobalVariable
/// containing the function name, with the specific variable name and linkage
/// required by LLVM InstrProf source-based coverage instrumentation. Use
/// `bx.get_pgo_func_name_var()` to ensure the variable is only created once per
/// `Instance`.
fn create_pgo_func_name_var<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll llvm::Value {
let mangled_fn_name: &str = cx.tcx.symbol_name(instance).name;
let llfn = cx.get_fn(instance);
unsafe {
llvm::LLVMRustCoverageCreatePGOFuncNameVar(
llfn,
mangled_fn_name.as_c_char_ptr(),
mangled_fn_name.len(),
)
}
}
pub(crate) fn write_filenames_section_to_buffer<'a>(
filenames: impl IntoIterator<Item = &'a str>,
buffer: &RustString,
) {
let (pointers, lengths) = filenames
.into_iter()
.map(|s: &str| (s.as_c_char_ptr(), s.len()))
.unzip::<_, _, Vec<_>, Vec<_>>();
unsafe {
llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
pointers.as_ptr(),
pointers.len(),
lengths.as_ptr(),
lengths.len(),
buffer,
);
}
}
pub(crate) fn write_mapping_to_buffer(
virtual_file_mapping: Vec<u32>,
expressions: Vec<ffi::CounterExpression>,
code_regions: &[ffi::CodeRegion],
branch_regions: &[ffi::BranchRegion],
mcdc_branch_regions: &[ffi::MCDCBranchRegion],
mcdc_decision_regions: &[ffi::MCDCDecisionRegion],
buffer: &RustString,
) {
unsafe {
llvm::LLVMRustCoverageWriteMappingToBuffer(
virtual_file_mapping.as_ptr(),
virtual_file_mapping.len() as c_uint,
expressions.as_ptr(),
expressions.len() as c_uint,
code_regions.as_ptr(),
code_regions.len() as c_uint,
branch_regions.as_ptr(),
branch_regions.len() as c_uint,
mcdc_branch_regions.as_ptr(),
mcdc_branch_regions.len() as c_uint,
mcdc_decision_regions.as_ptr(),
mcdc_decision_regions.len() as c_uint,
buffer,
);
}
}
pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_c_char_ptr(), bytes.len()) }
}
pub(crate) fn mapping_version() -> u32 {
unsafe { llvm::LLVMRustCoverageMappingVersion() }
}

View File

@ -22,7 +22,6 @@
use std::any::Any; use std::any::Any;
use std::ffi::CStr; use std::ffi::CStr;
use std::io::Write;
use std::mem::ManuallyDrop; use std::mem::ManuallyDrop;
use back::owned_target_machine::OwnedTargetMachine; use back::owned_target_machine::OwnedTargetMachine;
@ -165,30 +164,12 @@ impl WriteBackendMethods for LlvmCodegenBackend {
type ThinData = back::lto::ThinData; type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer; type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) { fn print_pass_timings(&self) {
unsafe { let timings = llvm::build_string(|s| unsafe { llvm::LLVMRustPrintPassTimings(s) }).unwrap();
let mut size = 0; print!("{timings}");
let cstr = llvm::LLVMRustPrintPassTimings(&raw mut size);
if cstr.is_null() {
println!("failed to get pass timings");
} else {
let timings = std::slice::from_raw_parts(cstr as *const u8, size);
std::io::stdout().write_all(timings).unwrap();
libc::free(cstr as *mut _);
}
}
} }
fn print_statistics(&self) { fn print_statistics(&self) {
unsafe { let stats = llvm::build_string(|s| unsafe { llvm::LLVMRustPrintStatistics(s) }).unwrap();
let mut size = 0; print!("{stats}");
let cstr = llvm::LLVMRustPrintStatistics(&raw mut size);
if cstr.is_null() {
println!("failed to get pass stats");
} else {
let stats = std::slice::from_raw_parts(cstr as *const u8, size);
std::io::stdout().write_all(stats).unwrap();
libc::free(cstr as *mut _);
}
}
} }
fn run_link( fn run_link(
cgcx: &CodegenContext<Self>, cgcx: &CodegenContext<Self>,

View File

@ -1765,11 +1765,13 @@ pub fn LLVMRustBuildAtomicStore<'a>(
/// Returns a string describing the last error caused by an LLVMRust* call. /// Returns a string describing the last error caused by an LLVMRust* call.
pub fn LLVMRustGetLastError() -> *const c_char; pub fn LLVMRustGetLastError() -> *const c_char;
/// Print the pass timings since static dtors aren't picking them up. /// Prints the timing information collected by `-Ztime-llvm-passes`.
pub fn LLVMRustPrintPassTimings(size: *const size_t) -> *const c_char; #[expect(improper_ctypes)]
pub(crate) fn LLVMRustPrintPassTimings(OutStr: &RustString);
/// Print the statistics since static dtors aren't picking them up. /// Prints the statistics collected by `-Zprint-codegen-stats`.
pub fn LLVMRustPrintStatistics(size: *const size_t) -> *const c_char; #[expect(improper_ctypes)]
pub(crate) fn LLVMRustPrintStatistics(OutStr: &RustString);
/// Prepares inline assembly. /// Prepares inline assembly.
pub fn LLVMRustInlineAsm( pub fn LLVMRustInlineAsm(
@ -1790,7 +1792,7 @@ pub fn LLVMRustInlineAsmVerify(
) -> bool; ) -> bool;
#[allow(improper_ctypes)] #[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteFilenamesSectionToBuffer( pub(crate) fn LLVMRustCoverageWriteFilenamesToBuffer(
Filenames: *const *const c_char, Filenames: *const *const c_char,
FilenamesLen: size_t, FilenamesLen: size_t,
Lengths: *const size_t, Lengths: *const size_t,
@ -1799,19 +1801,19 @@ pub(crate) fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
); );
#[allow(improper_ctypes)] #[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteMappingToBuffer( pub(crate) fn LLVMRustCoverageWriteFunctionMappingsToBuffer(
VirtualFileMappingIDs: *const c_uint, VirtualFileMappingIDs: *const c_uint,
NumVirtualFileMappingIDs: c_uint, NumVirtualFileMappingIDs: size_t,
Expressions: *const crate::coverageinfo::ffi::CounterExpression, Expressions: *const crate::coverageinfo::ffi::CounterExpression,
NumExpressions: c_uint, NumExpressions: size_t,
CodeRegions: *const crate::coverageinfo::ffi::CodeRegion, CodeRegions: *const crate::coverageinfo::ffi::CodeRegion,
NumCodeRegions: c_uint, NumCodeRegions: size_t,
BranchRegions: *const crate::coverageinfo::ffi::BranchRegion, BranchRegions: *const crate::coverageinfo::ffi::BranchRegion,
NumBranchRegions: c_uint, NumBranchRegions: size_t,
MCDCBranchRegions: *const crate::coverageinfo::ffi::MCDCBranchRegion, MCDCBranchRegions: *const crate::coverageinfo::ffi::MCDCBranchRegion,
NumMCDCBranchRegions: c_uint, NumMCDCBranchRegions: size_t,
MCDCDecisionRegions: *const crate::coverageinfo::ffi::MCDCDecisionRegion, MCDCDecisionRegions: *const crate::coverageinfo::ffi::MCDCDecisionRegion,
NumMCDCDecisionRegions: c_uint, NumMCDCDecisionRegions: size_t,
BufferOut: &RustString, BufferOut: &RustString,
); );
@ -1820,16 +1822,16 @@ pub(crate) fn LLVMRustCoverageCreatePGOFuncNameVar(
FuncName: *const c_char, FuncName: *const c_char,
FuncNameLen: size_t, FuncNameLen: size_t,
) -> &Value; ) -> &Value;
pub(crate) fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64; pub(crate) fn LLVMRustCoverageHashBytes(Bytes: *const c_char, NumBytes: size_t) -> u64;
#[allow(improper_ctypes)] #[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteMapSectionNameToString(M: &Module, Str: &RustString); pub(crate) fn LLVMRustCoverageWriteCovmapSectionNameToString(M: &Module, OutStr: &RustString);
#[allow(improper_ctypes)] #[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteFuncSectionNameToString(M: &Module, Str: &RustString); pub(crate) fn LLVMRustCoverageWriteCovfunSectionNameToString(M: &Module, OutStr: &RustString);
#[allow(improper_ctypes)] #[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString); pub(crate) fn LLVMRustCoverageWriteCovmapVarNameToString(OutStr: &RustString);
pub(crate) fn LLVMRustCoverageMappingVersion() -> u32; pub(crate) fn LLVMRustCoverageMappingVersion() -> u32;
pub fn LLVMRustDebugMetadataVersion() -> u32; pub fn LLVMRustDebugMetadataVersion() -> u32;

View File

@ -123,13 +123,13 @@ fromRust(LLVMRustCounterExprKind Kind) {
report_fatal_error("Bad LLVMRustCounterExprKind!"); report_fatal_error("Bad LLVMRustCounterExprKind!");
} }
extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer( extern "C" void LLVMRustCoverageWriteFilenamesToBuffer(
const char *const Filenames[], size_t FilenamesLen, // String start pointers const char *const Filenames[], size_t FilenamesLen, // String start pointers
const size_t *const Lengths, size_t LengthsLen, // Corresponding lengths const size_t *const Lengths, size_t LengthsLen, // Corresponding lengths
RustStringRef BufferOut) { RustStringRef BufferOut) {
if (FilenamesLen != LengthsLen) { if (FilenamesLen != LengthsLen) {
report_fatal_error( report_fatal_error(
"Mismatched lengths in LLVMRustCoverageWriteFilenamesSectionToBuffer"); "Mismatched lengths in LLVMRustCoverageWriteFilenamesToBuffer");
} }
SmallVector<std::string, 32> FilenameRefs; SmallVector<std::string, 32> FilenameRefs;
@ -143,16 +143,15 @@ extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
FilenamesWriter.write(OS); FilenamesWriter.write(OS);
} }
extern "C" void LLVMRustCoverageWriteMappingToBuffer( extern "C" void LLVMRustCoverageWriteFunctionMappingsToBuffer(
const unsigned *VirtualFileMappingIDs, unsigned NumVirtualFileMappingIDs, const unsigned *VirtualFileMappingIDs, size_t NumVirtualFileMappingIDs,
const LLVMRustCounterExpression *RustExpressions, unsigned NumExpressions, const LLVMRustCounterExpression *RustExpressions, size_t NumExpressions,
const LLVMRustCoverageCodeRegion *CodeRegions, unsigned NumCodeRegions, const LLVMRustCoverageCodeRegion *CodeRegions, size_t NumCodeRegions,
const LLVMRustCoverageBranchRegion *BranchRegions, const LLVMRustCoverageBranchRegion *BranchRegions, size_t NumBranchRegions,
unsigned NumBranchRegions,
const LLVMRustCoverageMCDCBranchRegion *MCDCBranchRegions, const LLVMRustCoverageMCDCBranchRegion *MCDCBranchRegions,
unsigned NumMCDCBranchRegions, size_t NumMCDCBranchRegions,
const LLVMRustCoverageMCDCDecisionRegion *MCDCDecisionRegions, const LLVMRustCoverageMCDCDecisionRegion *MCDCDecisionRegions,
unsigned NumMCDCDecisionRegions, RustStringRef BufferOut) { size_t NumMCDCDecisionRegions, RustStringRef BufferOut) {
// Convert from FFI representation to LLVM representation. // Convert from FFI representation to LLVM representation.
// Expressions: // Expressions:
@ -219,34 +218,37 @@ LLVMRustCoverageCreatePGOFuncNameVar(LLVMValueRef F, const char *FuncName,
return wrap(createPGOFuncNameVar(*cast<Function>(unwrap(F)), FuncNameRef)); return wrap(createPGOFuncNameVar(*cast<Function>(unwrap(F)), FuncNameRef));
} }
extern "C" uint64_t LLVMRustCoverageHashByteArray(const char *Bytes, extern "C" uint64_t LLVMRustCoverageHashBytes(const char *Bytes,
size_t NumBytes) { size_t NumBytes) {
auto StrRef = StringRef(Bytes, NumBytes); return IndexedInstrProf::ComputeHash(StringRef(Bytes, NumBytes));
return IndexedInstrProf::ComputeHash(StrRef);
} }
static void WriteSectionNameToString(LLVMModuleRef M, InstrProfSectKind SK, // Private helper function for getting the covmap and covfun section names.
RustStringRef Str) { static void writeInstrProfSectionNameToString(LLVMModuleRef M,
InstrProfSectKind SectKind,
RustStringRef OutStr) {
auto TargetTriple = Triple(unwrap(M)->getTargetTriple()); auto TargetTriple = Triple(unwrap(M)->getTargetTriple());
auto name = getInstrProfSectionName(SK, TargetTriple.getObjectFormat()); auto name = getInstrProfSectionName(SectKind, TargetTriple.getObjectFormat());
auto OS = RawRustStringOstream(Str); auto OS = RawRustStringOstream(OutStr);
OS << name; OS << name;
} }
extern "C" void LLVMRustCoverageWriteMapSectionNameToString(LLVMModuleRef M, extern "C" void
RustStringRef Str) { LLVMRustCoverageWriteCovmapSectionNameToString(LLVMModuleRef M,
WriteSectionNameToString(M, IPSK_covmap, Str); RustStringRef OutStr) {
writeInstrProfSectionNameToString(M, IPSK_covmap, OutStr);
} }
extern "C" void extern "C" void
LLVMRustCoverageWriteFuncSectionNameToString(LLVMModuleRef M, LLVMRustCoverageWriteCovfunSectionNameToString(LLVMModuleRef M,
RustStringRef Str) { RustStringRef OutStr) {
WriteSectionNameToString(M, IPSK_covfun, Str); writeInstrProfSectionNameToString(M, IPSK_covfun, OutStr);
} }
extern "C" void LLVMRustCoverageWriteMappingVarNameToString(RustStringRef Str) { extern "C" void
LLVMRustCoverageWriteCovmapVarNameToString(RustStringRef OutStr) {
auto name = getCoverageMappingVarName(); auto name = getCoverageMappingVarName();
auto OS = RawRustStringOstream(Str); auto OS = RawRustStringOstream(OutStr);
OS << name; OS << name;
} }

View File

@ -140,26 +140,14 @@ extern "C" void LLVMRustSetNormalizedTarget(LLVMModuleRef M,
unwrap(M)->setTargetTriple(Triple::normalize(Triple)); unwrap(M)->setTargetTriple(Triple::normalize(Triple));
} }
extern "C" const char *LLVMRustPrintPassTimings(size_t *Len) { extern "C" void LLVMRustPrintPassTimings(RustStringRef OutBuf) {
std::string buf; auto OS = RawRustStringOstream(OutBuf);
auto SS = raw_string_ostream(buf); TimerGroup::printAll(OS);
TimerGroup::printAll(SS);
SS.flush();
*Len = buf.length();
char *CStr = (char *)malloc(*Len);
memcpy(CStr, buf.c_str(), *Len);
return CStr;
} }
extern "C" const char *LLVMRustPrintStatistics(size_t *Len) { extern "C" void LLVMRustPrintStatistics(RustStringRef OutBuf) {
std::string buf; auto OS = RawRustStringOstream(OutBuf);
auto SS = raw_string_ostream(buf); llvm::PrintStatistics(OS);
llvm::PrintStatistics(SS);
SS.flush();
*Len = buf.length();
char *CStr = (char *)malloc(*Len);
memcpy(CStr, buf.c_str(), *Len);
return CStr;
} }
extern "C" LLVMValueRef LLVMRustGetNamedValue(LLVMModuleRef M, const char *Name, extern "C" LLVMValueRef LLVMRustGetNamedValue(LLVMModuleRef M, const char *Name,

View File

@ -1,7 +1,7 @@
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::iter;
use std::ops::{Index, IndexMut}; use std::ops::{Index, IndexMut};
use std::{iter, mem, slice};
use rustc_data_structures::captures::Captures; use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
@ -127,10 +127,10 @@ fn compute_basic_coverage_blocks(
let mut bcbs = IndexVec::<BasicCoverageBlock, _>::with_capacity(num_basic_blocks); let mut bcbs = IndexVec::<BasicCoverageBlock, _>::with_capacity(num_basic_blocks);
let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks); let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks);
let mut add_basic_coverage_block = |basic_blocks: &mut Vec<BasicBlock>| { let mut flush_chain_into_new_bcb = |current_chain: &mut Vec<BasicBlock>| {
// Take the accumulated list of blocks, leaving the vector empty // Take the accumulated list of blocks, leaving the vector empty
// to be used by subsequent BCBs. // to be used by subsequent BCBs.
let basic_blocks = std::mem::take(basic_blocks); let basic_blocks = mem::take(current_chain);
let bcb = bcbs.next_index(); let bcb = bcbs.next_index();
for &bb in basic_blocks.iter() { for &bb in basic_blocks.iter() {
@ -141,48 +141,41 @@ fn compute_basic_coverage_blocks(
bcb_filtered_successors(mir_body[bb].terminator()).is_out_summable() bcb_filtered_successors(mir_body[bb].terminator()).is_out_summable()
}); });
let bcb_data = BasicCoverageBlockData { basic_blocks, is_out_summable }; let bcb_data = BasicCoverageBlockData { basic_blocks, is_out_summable };
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data); debug!("adding {bcb:?}: {bcb_data:?}");
bcbs.push(bcb_data); bcbs.push(bcb_data);
}; };
// Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows // Traverse the MIR control-flow graph, accumulating chains of blocks
// each block terminator's `successors()`. Coverage spans must map to actual source code, // that can be combined into a single node in the coverage graph.
// so compiler generated blocks and paths can be ignored. To that end, the CFG traversal // A depth-first search ensures that if two nodes can be chained
// intentionally omits unwind paths. // together, they will be adjacent in the traversal order.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
// Accumulates a chain of blocks that will be combined into one BCB. // Accumulates a chain of blocks that will be combined into one BCB.
let mut basic_blocks = Vec::new(); let mut current_chain = vec![];
let filtered_successors = |bb| bcb_filtered_successors(mir_body[bb].terminator()); let subgraph = CoverageRelevantSubgraph::new(&mir_body.basic_blocks);
for bb in short_circuit_preorder(mir_body, filtered_successors) for bb in graph::depth_first_search(subgraph, mir::START_BLOCK)
.filter(|&bb| mir_body[bb].terminator().kind != TerminatorKind::Unreachable) .filter(|&bb| mir_body[bb].terminator().kind != TerminatorKind::Unreachable)
{ {
// If the previous block can't be chained into `bb`, flush the accumulated if let Some(&prev) = current_chain.last() {
// blocks into a new BCB, then start building the next chain. // Adding a block to a non-empty chain is allowed if the
if let Some(&prev) = basic_blocks.last() // previous block permits chaining, and the current block has
&& (!filtered_successors(prev).is_chainable() || { // `prev` as its sole predecessor.
// If `bb` has multiple predecessor blocks, or `prev` isn't let can_chain = subgraph.coverage_successors(prev).is_out_chainable()
// one of its predecessors, we can't chain and must flush. && mir_body.basic_blocks.predecessors()[bb].as_slice() == &[prev];
let predecessors = &mir_body.basic_blocks.predecessors()[bb]; if !can_chain {
predecessors.len() > 1 || !predecessors.contains(&prev) // The current block can't be added to the existing chain, so
}) // flush that chain into a new BCB, and start a new chain.
{ flush_chain_into_new_bcb(&mut current_chain);
debug!( }
terminator_kind = ?mir_body[prev].terminator().kind,
predecessors = ?&mir_body.basic_blocks.predecessors()[bb],
"can't chain from {prev:?} to {bb:?}"
);
add_basic_coverage_block(&mut basic_blocks);
} }
basic_blocks.push(bb); current_chain.push(bb);
} }
if !basic_blocks.is_empty() { if !current_chain.is_empty() {
debug!("flushing accumulated blocks into one last BCB"); debug!("flushing accumulated blocks into one last BCB");
add_basic_coverage_block(&mut basic_blocks); flush_chain_into_new_bcb(&mut current_chain);
} }
(bcbs, bb_to_bcb) (bcbs, bb_to_bcb)
@ -389,34 +382,28 @@ pub(crate) fn last_bb(&self) -> BasicBlock {
/// indicates whether that block can potentially be combined into the same BCB /// indicates whether that block can potentially be combined into the same BCB
/// as its sole successor. /// as its sole successor.
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
enum CoverageSuccessors<'a> { struct CoverageSuccessors<'a> {
/// The terminator has exactly one straight-line successor, so its block can /// Coverage-relevant successors of the corresponding terminator.
/// potentially be combined into the same BCB as that successor. /// There might be 0, 1, or multiple targets.
Chainable(BasicBlock), targets: &'a [BasicBlock],
/// The block cannot be combined into the same BCB as its successor(s). /// `Yield` terminators are not chainable, because their sole out-edge is
NotChainable(&'a [BasicBlock]), /// only followed if/when the generator is resumed after the yield.
/// Yield terminators are not chainable, and their execution count can also is_yield: bool,
/// differ from the execution count of their out-edge.
Yield(BasicBlock),
} }
impl CoverageSuccessors<'_> { impl CoverageSuccessors<'_> {
fn is_chainable(&self) -> bool { /// If `false`, this terminator cannot be chained into another block when
match self { /// building the coverage graph.
Self::Chainable(_) => true, fn is_out_chainable(&self) -> bool {
Self::NotChainable(_) => false, // If a terminator is out-summable and has exactly one out-edge, then
Self::Yield(_) => false, // it is eligible to be chained into its successor block.
} self.is_out_summable() && self.targets.len() == 1
} }
/// Returns true if the terminator itself is assumed to have the same /// Returns true if the terminator itself is assumed to have the same
/// execution count as the sum of its out-edges (assuming no panics). /// execution count as the sum of its out-edges (assuming no panics).
fn is_out_summable(&self) -> bool { fn is_out_summable(&self) -> bool {
match self { !self.is_yield && !self.targets.is_empty()
Self::Chainable(_) => true,
Self::NotChainable(_) => true,
Self::Yield(_) => false,
}
} }
} }
@ -425,12 +412,7 @@ impl IntoIterator for CoverageSuccessors<'_> {
type IntoIter = impl DoubleEndedIterator<Item = Self::Item>; type IntoIter = impl DoubleEndedIterator<Item = Self::Item>;
fn into_iter(self) -> Self::IntoIter { fn into_iter(self) -> Self::IntoIter {
match self { self.targets.iter().copied()
Self::Chainable(bb) | Self::Yield(bb) => {
Some(bb).into_iter().chain((&[]).iter().copied())
}
Self::NotChainable(bbs) => None.into_iter().chain(bbs.iter().copied()),
}
} }
} }
@ -440,14 +422,17 @@ fn into_iter(self) -> Self::IntoIter {
// `catch_unwind()` handlers. // `catch_unwind()` handlers.
fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> CoverageSuccessors<'a> { fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> CoverageSuccessors<'a> {
use TerminatorKind::*; use TerminatorKind::*;
match terminator.kind { let mut is_yield = false;
let targets = match &terminator.kind {
// A switch terminator can have many coverage-relevant successors. // A switch terminator can have many coverage-relevant successors.
// (If there is exactly one successor, we still treat it as not chainable.) SwitchInt { targets, .. } => targets.all_targets(),
SwitchInt { ref targets, .. } => CoverageSuccessors::NotChainable(targets.all_targets()),
// A yield terminator has exactly 1 successor, but should not be chained, // A yield terminator has exactly 1 successor, but should not be chained,
// because its resume edge has a different execution count. // because its resume edge has a different execution count.
Yield { resume, .. } => CoverageSuccessors::Yield(resume), Yield { resume, .. } => {
is_yield = true;
slice::from_ref(resume)
}
// These terminators have exactly one coverage-relevant successor, // These terminators have exactly one coverage-relevant successor,
// and can be chained into it. // and can be chained into it.
@ -455,24 +440,15 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera
| Drop { target, .. } | Drop { target, .. }
| FalseEdge { real_target: target, .. } | FalseEdge { real_target: target, .. }
| FalseUnwind { real_target: target, .. } | FalseUnwind { real_target: target, .. }
| Goto { target } => CoverageSuccessors::Chainable(target), | Goto { target } => slice::from_ref(target),
// A call terminator can normally be chained, except when it has no // A call terminator can normally be chained, except when it has no
// successor because it is known to diverge. // successor because it is known to diverge.
Call { target: maybe_target, .. } => match maybe_target { Call { target: maybe_target, .. } => maybe_target.as_slice(),
Some(target) => CoverageSuccessors::Chainable(target),
None => CoverageSuccessors::NotChainable(&[]),
},
// An inline asm terminator can normally be chained, except when it // An inline asm terminator can normally be chained, except when it
// diverges or uses asm goto. // diverges or uses asm goto.
InlineAsm { ref targets, .. } => { InlineAsm { targets, .. } => &targets,
if let [target] = targets[..] {
CoverageSuccessors::Chainable(target)
} else {
CoverageSuccessors::NotChainable(targets)
}
}
// These terminators have no coverage-relevant successors. // These terminators have no coverage-relevant successors.
CoroutineDrop CoroutineDrop
@ -480,8 +456,10 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera
| TailCall { .. } | TailCall { .. }
| Unreachable | Unreachable
| UnwindResume | UnwindResume
| UnwindTerminate(_) => CoverageSuccessors::NotChainable(&[]), | UnwindTerminate(_) => &[],
} };
CoverageSuccessors { targets, is_yield }
} }
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
@ -616,28 +594,31 @@ pub(crate) fn unvisited(&self) -> Vec<BasicCoverageBlock> {
} }
} }
fn short_circuit_preorder<'a, 'tcx, F, Iter>( /// Wrapper around a [`mir::BasicBlocks`] graph that restricts each node's
body: &'a mir::Body<'tcx>, /// successors to only the ones considered "relevant" when building a coverage
filtered_successors: F, /// graph.
) -> impl Iterator<Item = BasicBlock> + Captures<'a> + Captures<'tcx> #[derive(Clone, Copy)]
where struct CoverageRelevantSubgraph<'a, 'tcx> {
F: Fn(BasicBlock) -> Iter, basic_blocks: &'a mir::BasicBlocks<'tcx>,
Iter: IntoIterator<Item = BasicBlock>, }
{ impl<'a, 'tcx> CoverageRelevantSubgraph<'a, 'tcx> {
let mut visited = BitSet::new_empty(body.basic_blocks.len()); fn new(basic_blocks: &'a mir::BasicBlocks<'tcx>) -> Self {
let mut worklist = vec![mir::START_BLOCK]; Self { basic_blocks }
}
std::iter::from_fn(move || {
while let Some(bb) = worklist.pop() { fn coverage_successors(&self, bb: BasicBlock) -> CoverageSuccessors<'_> {
if !visited.insert(bb) { bcb_filtered_successors(self.basic_blocks[bb].terminator())
continue; }
} }
impl<'a, 'tcx> graph::DirectedGraph for CoverageRelevantSubgraph<'a, 'tcx> {
worklist.extend(filtered_successors(bb)); type Node = BasicBlock;
return Some(bb); fn num_nodes(&self) -> usize {
} self.basic_blocks.num_nodes()
}
None }
}) impl<'a, 'tcx> graph::Successors for CoverageRelevantSubgraph<'a, 'tcx> {
fn successors(&self, bb: Self::Node) -> impl Iterator<Item = Self::Node> {
self.coverage_successors(bb).into_iter()
}
} }

View File

@ -1,13 +1,14 @@
//! Implement methods to pretty print stable MIR body.
use std::fmt::Debug; use std::fmt::Debug;
use std::io::Write; use std::io::Write;
use std::{fmt, io, iter}; use std::{fmt, io, iter};
use fmt::{Display, Formatter}; use fmt::{Display, Formatter};
use super::{AssertMessage, BinOp, BorrowKind, FakeBorrowKind, TerminatorKind}; use super::{AggregateKind, AssertMessage, BinOp, BorrowKind, FakeBorrowKind, TerminatorKind};
use crate::mir::{Operand, Place, Rvalue, StatementKind, UnwindAction, VarDebugInfoContents}; use crate::mir::{Operand, Place, Rvalue, StatementKind, UnwindAction, VarDebugInfoContents};
use crate::ty::{IndexedVal, MirConst, Ty, TyConst}; use crate::ty::{AdtKind, IndexedVal, MirConst, Ty, TyConst};
use crate::{Body, Mutability, with}; use crate::{Body, CrateDef, Mutability, with};
impl Display for Ty { impl Display for Ty {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
@ -23,10 +24,11 @@ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
pub(crate) fn function_body<W: Write>(writer: &mut W, body: &Body, name: &str) -> io::Result<()> { pub(crate) fn function_body<W: Write>(writer: &mut W, body: &Body, name: &str) -> io::Result<()> {
write!(writer, "fn {name}(")?; write!(writer, "fn {name}(")?;
body.arg_locals() let mut sep = "";
.iter() for (index, local) in body.arg_locals().iter().enumerate() {
.enumerate() write!(writer, "{}_{}: {}", sep, index + 1, local.ty)?;
.try_for_each(|(index, local)| write!(writer, "_{}: {}", index + 1, local.ty))?; sep = ", ";
}
write!(writer, ")")?; write!(writer, ")")?;
let return_local = body.ret_local(); let return_local = body.ret_local();
@ -73,39 +75,40 @@ pub(crate) fn function_body<W: Write>(writer: &mut W, body: &Body, name: &str) -
} }
fn pretty_statement<W: Write>(writer: &mut W, statement: &StatementKind) -> io::Result<()> { fn pretty_statement<W: Write>(writer: &mut W, statement: &StatementKind) -> io::Result<()> {
const INDENT: &str = " ";
match statement { match statement {
StatementKind::Assign(place, rval) => { StatementKind::Assign(place, rval) => {
write!(writer, " {place:?} = ")?; write!(writer, "{INDENT}{place:?} = ")?;
pretty_rvalue(writer, rval)?; pretty_rvalue(writer, rval)?;
writeln!(writer, ";") writeln!(writer, ";")
} }
// FIXME: Add rest of the statements // FIXME: Add rest of the statements
StatementKind::FakeRead(cause, place) => { StatementKind::FakeRead(cause, place) => {
writeln!(writer, "FakeRead({cause:?}, {place:?});") writeln!(writer, "{INDENT}FakeRead({cause:?}, {place:?});")
} }
StatementKind::SetDiscriminant { place, variant_index } => { StatementKind::SetDiscriminant { place, variant_index } => {
writeln!(writer, "discriminant({place:?} = {};", variant_index.to_index()) writeln!(writer, "{INDENT}discriminant({place:?} = {};", variant_index.to_index())
} }
StatementKind::Deinit(place) => writeln!(writer, "Deinit({place:?};"), StatementKind::Deinit(place) => writeln!(writer, "Deinit({place:?};"),
StatementKind::StorageLive(local) => { StatementKind::StorageLive(local) => {
writeln!(writer, "StorageLive(_{local});") writeln!(writer, "{INDENT}StorageLive(_{local});")
} }
StatementKind::StorageDead(local) => { StatementKind::StorageDead(local) => {
writeln!(writer, "StorageDead(_{local});") writeln!(writer, "{INDENT}StorageDead(_{local});")
} }
StatementKind::Retag(kind, place) => writeln!(writer, "Retag({kind:?}, {place:?});"), StatementKind::Retag(kind, place) => writeln!(writer, "Retag({kind:?}, {place:?});"),
StatementKind::PlaceMention(place) => { StatementKind::PlaceMention(place) => {
writeln!(writer, "PlaceMention({place:?};") writeln!(writer, "{INDENT}PlaceMention({place:?};")
} }
StatementKind::ConstEvalCounter => { StatementKind::ConstEvalCounter => {
writeln!(writer, "ConstEvalCounter;") writeln!(writer, "{INDENT}ConstEvalCounter;")
} }
StatementKind::Nop => writeln!(writer, "nop;"), StatementKind::Nop => writeln!(writer, "{INDENT}nop;"),
StatementKind::AscribeUserType { .. } StatementKind::AscribeUserType { .. }
| StatementKind::Coverage(_) | StatementKind::Coverage(_)
| StatementKind::Intrinsic(_) => { | StatementKind::Intrinsic(_) => {
// FIX-ME: Make them pretty. // FIX-ME: Make them pretty.
writeln!(writer, "{statement:?};") writeln!(writer, "{INDENT}{statement:?};")
} }
} }
} }
@ -322,15 +325,11 @@ fn pretty_ty_const(ct: &TyConst) -> String {
fn pretty_rvalue<W: Write>(writer: &mut W, rval: &Rvalue) -> io::Result<()> { fn pretty_rvalue<W: Write>(writer: &mut W, rval: &Rvalue) -> io::Result<()> {
match rval { match rval {
Rvalue::AddressOf(mutability, place) => { Rvalue::AddressOf(mutability, place) => {
write!(writer, "&raw {}(*{:?})", pretty_mut(*mutability), place) write!(writer, "&raw {} {:?}", pretty_mut(*mutability), place)
} }
Rvalue::Aggregate(aggregate_kind, operands) => { Rvalue::Aggregate(aggregate_kind, operands) => {
// FIXME: Add pretty_aggregate function that returns a pretty string // FIXME: Add pretty_aggregate function that returns a pretty string
write!(writer, "{aggregate_kind:?} (")?; pretty_aggregate(writer, aggregate_kind, operands)
let mut op_iter = operands.iter();
op_iter.next().map_or(Ok(()), |op| write!(writer, "{}", pretty_operand(op)))?;
op_iter.try_for_each(|op| write!(writer, ", {}", pretty_operand(op)))?;
write!(writer, ")")
} }
Rvalue::BinaryOp(bin, op1, op2) => { Rvalue::BinaryOp(bin, op1, op2) => {
write!(writer, "{:?}({}, {})", bin, pretty_operand(op1), pretty_operand(op2)) write!(writer, "{:?}({}, {})", bin, pretty_operand(op1), pretty_operand(op2))
@ -360,22 +359,74 @@ fn pretty_rvalue<W: Write>(writer: &mut W, rval: &Rvalue) -> io::Result<()> {
write!(writer, "{kind}{place:?}") write!(writer, "{kind}{place:?}")
} }
Rvalue::Repeat(op, cnst) => { Rvalue::Repeat(op, cnst) => {
write!(writer, "{} \" \" {}", pretty_operand(op), pretty_ty_const(cnst)) write!(writer, "[{}; {}]", pretty_operand(op), pretty_ty_const(cnst))
} }
Rvalue::ShallowInitBox(_, _) => Ok(()), Rvalue::ShallowInitBox(_, _) => Ok(()),
Rvalue::ThreadLocalRef(item) => { Rvalue::ThreadLocalRef(item) => {
write!(writer, "thread_local_ref{item:?}") write!(writer, "thread_local_ref{item:?}")
} }
Rvalue::NullaryOp(nul, ty) => { Rvalue::NullaryOp(nul, ty) => {
write!(writer, "{nul:?} {ty} \" \"") write!(writer, "{nul:?}::<{ty}>() \" \"")
} }
Rvalue::UnaryOp(un, op) => { Rvalue::UnaryOp(un, op) => {
write!(writer, "{} \" \" {:?}", pretty_operand(op), un) write!(writer, "{:?}({})", un, pretty_operand(op))
} }
Rvalue::Use(op) => write!(writer, "{}", pretty_operand(op)), Rvalue::Use(op) => write!(writer, "{}", pretty_operand(op)),
} }
} }
fn pretty_aggregate<W: Write>(
writer: &mut W,
aggregate_kind: &AggregateKind,
operands: &Vec<Operand>,
) -> io::Result<()> {
let suffix = match aggregate_kind {
AggregateKind::Array(_) => {
write!(writer, "[")?;
"]"
}
AggregateKind::Tuple => {
write!(writer, "(")?;
")"
}
AggregateKind::Adt(def, var, _, _, _) => {
if def.kind() == AdtKind::Enum {
write!(writer, "{}::{}", def.name(), def.variant(*var).unwrap().name())?;
} else {
write!(writer, "{}", def.variant(*var).unwrap().name())?;
}
if operands.is_empty() {
return Ok(());
}
// FIXME: Change this once we have CtorKind in StableMIR.
write!(writer, "(")?;
")"
}
AggregateKind::Closure(def, _) => {
write!(writer, "{{closure@{}}}(", def.span().diagnostic())?;
")"
}
AggregateKind::Coroutine(def, _, _) => {
write!(writer, "{{coroutine@{}}}(", def.span().diagnostic())?;
")"
}
AggregateKind::RawPtr(ty, mutability) => {
write!(
writer,
"*{} {ty} from (",
if *mutability == Mutability::Mut { "mut" } else { "const" }
)?;
")"
}
};
let mut separator = "";
for op in operands {
write!(writer, "{}{}", separator, pretty_operand(op))?;
separator = ", ";
}
write!(writer, "{suffix}")
}
fn pretty_mut(mutability: Mutability) -> &'static str { fn pretty_mut(mutability: Mutability) -> &'static str {
match mutability { match mutability {
Mutability::Not => " ", Mutability::Not => " ",

View File

@ -271,6 +271,14 @@ pub fn get_filename(&self) -> Filename {
pub fn get_lines(&self) -> LineInfo { pub fn get_lines(&self) -> LineInfo {
with(|c| c.get_lines(self)) with(|c| c.get_lines(self))
} }
/// Return the span location to be printed in diagnostic messages.
///
/// This may leak local file paths and should not be used to build artifacts that may be
/// distributed.
pub fn diagnostic(&self) -> String {
with(|c| c.span_to_string(*self))
}
} }
#[derive(Clone, Copy, Debug, Serialize)] #[derive(Clone, Copy, Debug, Serialize)]

View File

@ -63,14 +63,14 @@ struct Block<T> {
impl<T> Block<T> { impl<T> Block<T> {
/// Creates an empty block. /// Creates an empty block.
fn new() -> Block<T> { fn new() -> Box<Block<T>> {
// SAFETY: This is safe because: // SAFETY: This is safe because:
// [1] `Block::next` (AtomicPtr) may be safely zero initialized. // [1] `Block::next` (AtomicPtr) may be safely zero initialized.
// [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].
// [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it // [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it
// holds a MaybeUninit. // holds a MaybeUninit.
// [4] `Slot::state` (AtomicUsize) may be safely zero initialized. // [4] `Slot::state` (AtomicUsize) may be safely zero initialized.
unsafe { MaybeUninit::zeroed().assume_init() } unsafe { Box::new_zeroed().assume_init() }
} }
/// Waits until the next pointer is set. /// Waits until the next pointer is set.
@ -199,13 +199,13 @@ fn start_send(&self, token: &mut Token) -> bool {
// If we're going to have to install the next block, allocate it in advance in order to // If we're going to have to install the next block, allocate it in advance in order to
// make the wait for other threads as short as possible. // make the wait for other threads as short as possible.
if offset + 1 == BLOCK_CAP && next_block.is_none() { if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Box::new(Block::<T>::new())); next_block = Some(Block::<T>::new());
} }
// If this is the first message to be sent into the channel, we need to allocate the // If this is the first message to be sent into the channel, we need to allocate the
// first block and install it. // first block and install it.
if block.is_null() { if block.is_null() {
let new = Box::into_raw(Box::new(Block::<T>::new())); let new = Box::into_raw(Block::<T>::new());
if self if self
.tail .tail

View File

@ -0,0 +1,48 @@
//@ compile-flags: -Z unpretty=stable-mir --crate-type lib -C panic=abort
//@ check-pass
//@ only-x86_64
//@ needs-unwind unwind edges are different with panic=abort
//! Check how stable mir pretty printer prints different operands and abort strategy.
pub fn operands(val: u8) {
let array = [val; 10];
let first = array[0];
let last = array[10 - 1];
assert_eq!(first, last);
let reference = &first;
let dereferenced = *reference;
assert_eq!(dereferenced, first);
let tuple = (first, last);
let (first_again, _) = tuple;
let first_again_again = tuple.0;
assert_eq!(first_again, first_again_again);
let length = array.len();
let size_of = std::mem::size_of_val(&length);
assert_eq!(length, size_of);
}
pub struct Dummy {
c: char,
i: i32,
}
pub enum Ctors {
Unit,
StructLike { d: Dummy },
TupLike(bool),
}
pub fn more_operands() -> [Ctors; 3] {
let dummy = Dummy { c: 'a', i: i32::MIN };
let unit = Ctors::Unit;
let struct_like = Ctors::StructLike { d: dummy };
let tup_like = Ctors::TupLike(false);
[unit, struct_like, tup_like]
}
pub fn closures(x: bool, z: bool) -> impl FnOnce(bool) -> bool {
move |y: bool| (x ^ y) || z
}

View File

@ -0,0 +1,263 @@
// WARNING: This is highly experimental output it's intended for stable-mir developers only.
// If you find a bug or want to improve the output open a issue at https://github.com/rust-lang/project-stable-mir.
fn operands(_1: u8) -> () {
let mut _0: ();
let _2: [u8; 10];
let _3: u8;
let _4: usize;
let mut _5: usize;
let mut _6: bool;
let _7: u8;
let _8: usize;
let mut _9: (usize, bool);
let mut _10: usize;
let mut _11: bool;
let mut _12: (&u8, &u8);
let mut _13: &u8;
let mut _14: &u8;
let _15: &u8;
let _16: &u8;
let mut _17: bool;
let mut _18: u8;
let mut _19: u8;
let _20: core::panicking::AssertKind;
let _21: !;
let mut _22: Option<Arguments<'_>>;
let _23: &u8;
let _24: u8;
let mut _25: (&u8, &u8);
let mut _26: &u8;
let mut _27: &u8;
let _28: &u8;
let _29: &u8;
let mut _30: bool;
let mut _31: u8;
let mut _32: u8;
let _33: core::panicking::AssertKind;
let _34: !;
let mut _35: Option<Arguments<'_>>;
let _36: (u8, u8);
let _37: u8;
let _38: u8;
let mut _39: (&u8, &u8);
let mut _40: &u8;
let mut _41: &u8;
let _42: &u8;
let _43: &u8;
let mut _44: bool;
let mut _45: u8;
let mut _46: u8;
let _47: core::panicking::AssertKind;
let _48: !;
let mut _49: Option<Arguments<'_>>;
let _50: usize;
let mut _51: &[u8];
let mut _52: &[u8; 10];
let _53: usize;
let _54: &usize;
let mut _55: (&usize, &usize);
let mut _56: &usize;
let mut _57: &usize;
let _58: &usize;
let _59: &usize;
let mut _60: bool;
let mut _61: usize;
let mut _62: usize;
let _63: core::panicking::AssertKind;
let _64: !;
let mut _65: Option<Arguments<'_>>;
debug val => _1;
debug array => _2;
debug first => _3;
debug last => _7;
debug left_val => _15;
debug right_val => _16;
debug kind => _20;
debug reference => _23;
debug dereferenced => _24;
debug left_val => _28;
debug right_val => _29;
debug kind => _33;
debug tuple => _36;
debug first_again => _37;
debug first_again_again => _38;
debug left_val => _42;
debug right_val => _43;
debug kind => _47;
debug length => _50;
debug size_of => _53;
debug left_val => _58;
debug right_val => _59;
debug kind => _63;
bb0: {
_2 = [_1; 10];
_4 = 0_usize;
_5 = 10_usize;
_6 = Lt(_4, _5);
assert(move _6, "index out of bounds: the length is {} but the index is {}", move _5, _4) -> [success: bb1, unwind unreachable];
}
bb1: {
_3 = _2[_4];
_9 = CheckedSub(10_usize, 1_usize);
assert(!move (_9.1: bool), "attempt to compute `{} - {}`, which would overflow", 10_usize, 1_usize) -> [success: bb2, unwind unreachable];
}
bb2: {
_8 = move (_9.0: usize);
_10 = 10_usize;
_11 = Lt(_8, _10);
assert(move _11, "index out of bounds: the length is {} but the index is {}", move _10, _8) -> [success: bb3, unwind unreachable];
}
bb3: {
_7 = _2[_8];
_13 = &_3;
_14 = &_7;
_12 = (move _13, move _14);
_15 = (_12.0: &u8);
_16 = (_12.1: &u8);
_18 = (*_15);
_19 = (*_16);
_17 = Eq(move _18, move _19);
switchInt(move _17) -> [0: bb5, otherwise: bb4];
}
bb4: {
_23 = &_3;
_24 = (*_23);
_26 = &_24;
_27 = &_3;
_25 = (move _26, move _27);
_28 = (_25.0: &u8);
_29 = (_25.1: &u8);
_31 = (*_28);
_32 = (*_29);
_30 = Eq(move _31, move _32);
switchInt(move _30) -> [0: bb7, otherwise: bb6];
}
bb5: {
_20 = core::panicking::AssertKind::Eq;
_22 = std::option::Option::None;
_21 = core::panicking::assert_failed::<u8, u8>(move _20, _15, _16, move _22) -> unwind unreachable;
}
bb6: {
_36 = (_3, _7);
_37 = (_36.0: u8);
_38 = (_36.0: u8);
_40 = &_37;
_41 = &_38;
_39 = (move _40, move _41);
_42 = (_39.0: &u8);
_43 = (_39.1: &u8);
_45 = (*_42);
_46 = (*_43);
_44 = Eq(move _45, move _46);
switchInt(move _44) -> [0: bb9, otherwise: bb8];
}
bb7: {
_33 = core::panicking::AssertKind::Eq;
_35 = std::option::Option::None;
_34 = core::panicking::assert_failed::<u8, u8>(move _33, _28, _29, move _35) -> unwind unreachable;
}
bb8: {
_52 = &_2;
_51 = move _52 as &[u8];
_50 = PtrMetadata(move _51);
_54 = &_50;
_53 = std::mem::size_of_val::<usize>(_54) -> [return: bb10, unwind unreachable];
}
bb9: {
_47 = core::panicking::AssertKind::Eq;
_49 = std::option::Option::None;
_48 = core::panicking::assert_failed::<u8, u8>(move _47, _42, _43, move _49) -> unwind unreachable;
}
bb10: {
_56 = &_50;
_57 = &_53;
_55 = (move _56, move _57);
_58 = (_55.0: &usize);
_59 = (_55.1: &usize);
_61 = (*_58);
_62 = (*_59);
_60 = Eq(move _61, move _62);
switchInt(move _60) -> [0: bb12, otherwise: bb11];
}
bb11: {
return;
}
bb12: {
_63 = core::panicking::AssertKind::Eq;
_65 = std::option::Option::None;
_64 = core::panicking::assert_failed::<usize, usize>(move _63, _58, _59, move _65) -> unwind unreachable;
}
}
fn operands::{constant#0}() -> usize {
let mut _0: usize;
bb0: {
_0 = 10_usize;
return;
}
}
fn more_operands() -> [Ctors; 3] {
let mut _0: [Ctors; 3];
let _1: Dummy;
let _2: Ctors;
let _3: Ctors;
let _4: Ctors;
debug dummy => _1;
debug unit => _2;
debug struct_like => _3;
debug tup_like => _4;
bb0: {
_1 = Dummy('a', core::num::<impl i32>::MIN);
_2 = Ctors::Unit;
_3 = Ctors::StructLike(move _1);
_4 = Ctors::TupLike(false);
_0 = [move _2, move _3, move _4];
return;
}
}
fn more_operands::{constant#0}() -> usize {
let mut _0: usize;
bb0: {
_0 = 3_usize;
return;
}
}
fn closures(_1: bool, _2: bool) -> {closure@$DIR/operands.rs:47:5: 47:19} {
let mut _0: {closure@$DIR/operands.rs:47:5: 47:19};
debug x => _1;
debug z => _2;
bb0: {
_0 = {closure@$DIR/operands.rs:47:5: 47:19}(_1, _2);
return;
}
}
fn closures::{closure#0}(_1: {closure@$DIR/operands.rs:47:5: 47:19}, _2: bool) -> bool {
let mut _0: bool;
let mut _3: bool;
let mut _4: bool;
debug y => _2;
debug x => (_1.0: bool);
debug z => (_1.1: bool);
bb0: {
_4 = (_1.0: bool);
_3 = BitXor(move _4, _2);
switchInt(move _3) -> [0: bb2, otherwise: bb1];
}
bb1: {
_0 = true;
goto -> bb3;
}
bb2: {
_0 = (_1.1: bool);
goto -> bb3;
}
bb3: {
return;
}
}
fn Ctors::TupLike(_1: bool) -> Ctors {
let mut _0: Ctors;
bb0: {
_0 = Ctors::TupLike(move _1);
return;
}
}

View File

@ -0,0 +1,29 @@
//@ run-pass
//@ needs-threads
//@ compile-flags: -Copt-level=0
// The channel's `Block::new` was causing a stack overflow because it held 32 item slots, which is
// 1MiB for this test's `BigStruct` -- instantiated on the stack before moving to `Box::new`.
//
// That block is now initialized directly on the heap.
//
// Ref: https://github.com/rust-lang/rust/issues/102246
use std::sync::mpsc::channel;
use std::thread;
const N: usize = 32_768;
struct BigStruct {
_data: [u8; N],
}
fn main() {
let (sender, receiver) = channel::<BigStruct>();
let thread1 = thread::spawn(move || {
sender.send(BigStruct { _data: [0u8; N] }).unwrap();
});
thread1.join().unwrap();
for _data in receiver.try_iter() {}
}