Auto merge of #132762 - Zalathar:rollup-qfgz165, r=Zalathar

Rollup of 5 pull requests

Successful merges:

 - #132161 ([StableMIR] A few fixes to pretty printing)
 - #132389 (coverage: Simplify parts of coverage graph creation)
 - #132452 (coverage: Extract safe FFI wrapper functions to `llvm_cov`)
 - #132590 (Simplify FFI calls for `-Ztime-llvm-passes` and `-Zprint-codegen-stats`)
 - #132738 (Initialize channel `Block`s directly on the heap)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-11-08 08:00:08 +00:00
commit 6295686a37
14 changed files with 695 additions and 338 deletions

View File

@ -0,0 +1,100 @@
//! Safe wrappers for coverage-specific FFI functions.
use std::ffi::CString;
use crate::common::AsCCharPtr;
use crate::coverageinfo::ffi;
use crate::llvm;
pub(crate) fn covmap_var_name() -> CString {
CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteCovmapVarNameToString(s);
}))
.expect("covmap variable name should not contain NUL")
}
pub(crate) fn covmap_section_name(llmod: &llvm::Module) -> CString {
CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteCovmapSectionNameToString(llmod, s);
}))
.expect("covmap section name should not contain NUL")
}
pub(crate) fn covfun_section_name(llmod: &llvm::Module) -> CString {
CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteCovfunSectionNameToString(llmod, s);
}))
.expect("covfun section name should not contain NUL")
}
pub(crate) fn create_pgo_func_name_var<'ll>(
llfn: &'ll llvm::Value,
mangled_fn_name: &str,
) -> &'ll llvm::Value {
unsafe {
llvm::LLVMRustCoverageCreatePGOFuncNameVar(
llfn,
mangled_fn_name.as_c_char_ptr(),
mangled_fn_name.len(),
)
}
}
pub(crate) fn write_filenames_to_buffer<'a>(
filenames: impl IntoIterator<Item = &'a str>,
) -> Vec<u8> {
let (pointers, lengths) = filenames
.into_iter()
.map(|s: &str| (s.as_c_char_ptr(), s.len()))
.unzip::<_, _, Vec<_>, Vec<_>>();
llvm::build_byte_buffer(|buffer| unsafe {
llvm::LLVMRustCoverageWriteFilenamesToBuffer(
pointers.as_ptr(),
pointers.len(),
lengths.as_ptr(),
lengths.len(),
buffer,
);
})
}
pub(crate) fn write_function_mappings_to_buffer(
virtual_file_mapping: &[u32],
expressions: &[ffi::CounterExpression],
code_regions: &[ffi::CodeRegion],
branch_regions: &[ffi::BranchRegion],
mcdc_branch_regions: &[ffi::MCDCBranchRegion],
mcdc_decision_regions: &[ffi::MCDCDecisionRegion],
) -> Vec<u8> {
llvm::build_byte_buffer(|buffer| unsafe {
llvm::LLVMRustCoverageWriteFunctionMappingsToBuffer(
virtual_file_mapping.as_ptr(),
virtual_file_mapping.len(),
expressions.as_ptr(),
expressions.len(),
code_regions.as_ptr(),
code_regions.len(),
branch_regions.as_ptr(),
branch_regions.len(),
mcdc_branch_regions.as_ptr(),
mcdc_branch_regions.len(),
mcdc_decision_regions.as_ptr(),
mcdc_decision_regions.len(),
buffer,
)
})
}
/// Hashes some bytes into a 64-bit hash, via LLVM's `IndexedInstrProf::ComputeHash`,
/// as required for parts of the LLVM coverage mapping format.
pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
unsafe { llvm::LLVMRustCoverageHashBytes(bytes.as_c_char_ptr(), bytes.len()) }
}
/// Returns LLVM's `coverage::CovMapVersion::CurrentVersion` (CoverageMapping.h)
/// as a raw numeric value. For historical reasons, the numeric value is 1 less
/// than the number in the version's name, so `Version7` is actually `6u32`.
pub(crate) fn mapping_version() -> u32 {
unsafe { llvm::LLVMRustCoverageMappingVersion() }
}

View File

@ -1,4 +1,5 @@
use std::ffi::CString;
use std::iter;
use itertools::Itertools as _;
use rustc_abi::Align;
@ -17,9 +18,9 @@
use tracing::debug;
use crate::common::CodegenCx;
use crate::coverageinfo::ffi;
use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector};
use crate::{coverageinfo, llvm};
use crate::coverageinfo::{ffi, llvm_cov};
use crate::llvm;
/// Generates and exports the coverage map, which is embedded in special
/// linker sections in the final binary.
@ -33,7 +34,7 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
// agrees with our Rust-side code. Expected versions (encoded as n-1) are:
// - `CovMapVersion::Version7` (6) used by LLVM 18-19
let covmap_version = {
let llvm_covmap_version = coverageinfo::mapping_version();
let llvm_covmap_version = llvm_cov::mapping_version();
let expected_versions = 6..=6;
assert!(
expected_versions.contains(&llvm_covmap_version),
@ -78,7 +79,7 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
let filenames_size = filenames_buffer.len();
let filenames_val = cx.const_bytes(&filenames_buffer);
let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
let filenames_ref = llvm_cov::hash_bytes(&filenames_buffer);
// Generate the coverage map header, which contains the filenames used by
// this CGU's coverage mappings, and store it in a well-known global.
@ -187,13 +188,10 @@ fn make_filenames_buffer(&self, tcx: TyCtxt<'_>) -> Vec<u8> {
.for_scope(tcx.sess, RemapPathScopeComponents::MACRO)
.to_string_lossy();
llvm::build_byte_buffer(|buffer| {
coverageinfo::write_filenames_section_to_buffer(
// Insert the working dir at index 0, before the other filenames.
std::iter::once(working_dir).chain(self.raw_file_table.iter().map(Symbol::as_str)),
buffer,
);
})
// Insert the working dir at index 0, before the other filenames.
let filenames =
iter::once(working_dir).chain(self.raw_file_table.iter().map(Symbol::as_str));
llvm_cov::write_filenames_to_buffer(filenames)
}
}
@ -296,17 +294,14 @@ fn encode_mappings_for_function(
}
// Encode the function's coverage mappings into a buffer.
llvm::build_byte_buffer(|buffer| {
coverageinfo::write_mapping_to_buffer(
virtual_file_mapping.into_vec(),
expressions,
&code_regions,
&branch_regions,
&mcdc_branch_regions,
&mcdc_decision_regions,
buffer,
);
})
llvm_cov::write_function_mappings_to_buffer(
&virtual_file_mapping.into_vec(),
&expressions,
&code_regions,
&branch_regions,
&mcdc_branch_regions,
&mcdc_decision_regions,
)
}
/// Generates the contents of the covmap record for this CGU, which mostly
@ -335,23 +330,11 @@ fn generate_covmap_record<'ll>(
let covmap_data =
cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false);
let covmap_var_name = CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
}))
.unwrap();
debug!("covmap var name: {:?}", covmap_var_name);
let covmap_section_name = CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
}))
.expect("covmap section name should not contain NUL");
debug!("covmap section name: {:?}", covmap_section_name);
let llglobal = llvm::add_global(cx.llmod, cx.val_ty(covmap_data), &covmap_var_name);
let llglobal = llvm::add_global(cx.llmod, cx.val_ty(covmap_data), &llvm_cov::covmap_var_name());
llvm::set_initializer(llglobal, covmap_data);
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::set_section(llglobal, &covmap_section_name);
llvm::set_section(llglobal, &llvm_cov::covmap_section_name(cx.llmod));
// LLVM's coverage mapping format specifies 8-byte alignment for items in this section.
// <https://llvm.org/docs/CoverageMappingFormat.html>
llvm::set_alignment(llglobal, Align::EIGHT);
@ -373,7 +356,7 @@ fn generate_covfun_record(
let coverage_mapping_size = coverage_mapping_buffer.len();
let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer);
let func_name_hash = coverageinfo::hash_bytes(mangled_function_name.as_bytes());
let func_name_hash = llvm_cov::hash_bytes(mangled_function_name.as_bytes());
let func_name_hash_val = cx.const_u64(func_name_hash);
let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
let source_hash_val = cx.const_u64(source_hash);

View File

@ -1,24 +1,23 @@
use std::cell::{OnceCell, RefCell};
use std::ffi::{CStr, CString};
use libc::c_uint;
use rustc_abi::Size;
use rustc_codegen_ssa::traits::{
BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods,
};
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_llvm::RustString;
use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::ty::Instance;
use rustc_middle::ty::layout::HasTyCtxt;
use tracing::{debug, instrument};
use crate::builder::Builder;
use crate::common::{AsCCharPtr, CodegenCx};
use crate::common::CodegenCx;
use crate::coverageinfo::map_data::FunctionCoverageCollector;
use crate::llvm;
pub(crate) mod ffi;
mod llvm_cov;
pub(crate) mod map_data;
mod mapgen;
@ -80,12 +79,9 @@ pub(crate) fn coverageinfo_finalize(&self) {
/// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
/// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
fn covfun_section_name(&self) -> &CStr {
self.coverage_cx().covfun_section_name.get_or_init(|| {
CString::new(llvm::build_byte_buffer(|s| unsafe {
llvm::LLVMRustCoverageWriteFuncSectionNameToString(self.llmod, s);
}))
.expect("covfun section name should not contain NUL")
})
self.coverage_cx()
.covfun_section_name
.get_or_init(|| llvm_cov::covfun_section_name(self.llmod))
}
/// For LLVM codegen, returns a function-specific `Value` for a global
@ -95,9 +91,11 @@ fn covfun_section_name(&self) -> &CStr {
fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
debug!("getting pgo_func_name_var for instance={:?}", instance);
let mut pgo_func_name_var_map = self.coverage_cx().pgo_func_name_var_map.borrow_mut();
pgo_func_name_var_map
.entry(instance)
.or_insert_with(|| create_pgo_func_name_var(self, instance))
pgo_func_name_var_map.entry(instance).or_insert_with(|| {
let llfn = self.get_fn(instance);
let mangled_fn_name: &str = self.tcx.symbol_name(instance).name;
llvm_cov::create_pgo_func_name_var(llfn, mangled_fn_name)
})
}
}
@ -225,80 +223,3 @@ fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) {
}
}
}
/// Calls llvm::createPGOFuncNameVar() with the given function instance's
/// mangled function name. The LLVM API returns an llvm::GlobalVariable
/// containing the function name, with the specific variable name and linkage
/// required by LLVM InstrProf source-based coverage instrumentation. Use
/// `bx.get_pgo_func_name_var()` to ensure the variable is only created once per
/// `Instance`.
fn create_pgo_func_name_var<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll llvm::Value {
let mangled_fn_name: &str = cx.tcx.symbol_name(instance).name;
let llfn = cx.get_fn(instance);
unsafe {
llvm::LLVMRustCoverageCreatePGOFuncNameVar(
llfn,
mangled_fn_name.as_c_char_ptr(),
mangled_fn_name.len(),
)
}
}
pub(crate) fn write_filenames_section_to_buffer<'a>(
filenames: impl IntoIterator<Item = &'a str>,
buffer: &RustString,
) {
let (pointers, lengths) = filenames
.into_iter()
.map(|s: &str| (s.as_c_char_ptr(), s.len()))
.unzip::<_, _, Vec<_>, Vec<_>>();
unsafe {
llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
pointers.as_ptr(),
pointers.len(),
lengths.as_ptr(),
lengths.len(),
buffer,
);
}
}
pub(crate) fn write_mapping_to_buffer(
virtual_file_mapping: Vec<u32>,
expressions: Vec<ffi::CounterExpression>,
code_regions: &[ffi::CodeRegion],
branch_regions: &[ffi::BranchRegion],
mcdc_branch_regions: &[ffi::MCDCBranchRegion],
mcdc_decision_regions: &[ffi::MCDCDecisionRegion],
buffer: &RustString,
) {
unsafe {
llvm::LLVMRustCoverageWriteMappingToBuffer(
virtual_file_mapping.as_ptr(),
virtual_file_mapping.len() as c_uint,
expressions.as_ptr(),
expressions.len() as c_uint,
code_regions.as_ptr(),
code_regions.len() as c_uint,
branch_regions.as_ptr(),
branch_regions.len() as c_uint,
mcdc_branch_regions.as_ptr(),
mcdc_branch_regions.len() as c_uint,
mcdc_decision_regions.as_ptr(),
mcdc_decision_regions.len() as c_uint,
buffer,
);
}
}
pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_c_char_ptr(), bytes.len()) }
}
pub(crate) fn mapping_version() -> u32 {
unsafe { llvm::LLVMRustCoverageMappingVersion() }
}

View File

@ -22,7 +22,6 @@
use std::any::Any;
use std::ffi::CStr;
use std::io::Write;
use std::mem::ManuallyDrop;
use back::owned_target_machine::OwnedTargetMachine;
@ -165,30 +164,12 @@ impl WriteBackendMethods for LlvmCodegenBackend {
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe {
let mut size = 0;
let cstr = llvm::LLVMRustPrintPassTimings(&raw mut size);
if cstr.is_null() {
println!("failed to get pass timings");
} else {
let timings = std::slice::from_raw_parts(cstr as *const u8, size);
std::io::stdout().write_all(timings).unwrap();
libc::free(cstr as *mut _);
}
}
let timings = llvm::build_string(|s| unsafe { llvm::LLVMRustPrintPassTimings(s) }).unwrap();
print!("{timings}");
}
fn print_statistics(&self) {
unsafe {
let mut size = 0;
let cstr = llvm::LLVMRustPrintStatistics(&raw mut size);
if cstr.is_null() {
println!("failed to get pass stats");
} else {
let stats = std::slice::from_raw_parts(cstr as *const u8, size);
std::io::stdout().write_all(stats).unwrap();
libc::free(cstr as *mut _);
}
}
let stats = llvm::build_string(|s| unsafe { llvm::LLVMRustPrintStatistics(s) }).unwrap();
print!("{stats}");
}
fn run_link(
cgcx: &CodegenContext<Self>,

View File

@ -1765,11 +1765,13 @@ pub fn LLVMRustBuildAtomicStore<'a>(
/// Returns a string describing the last error caused by an LLVMRust* call.
pub fn LLVMRustGetLastError() -> *const c_char;
/// Print the pass timings since static dtors aren't picking them up.
pub fn LLVMRustPrintPassTimings(size: *const size_t) -> *const c_char;
/// Prints the timing information collected by `-Ztime-llvm-passes`.
#[expect(improper_ctypes)]
pub(crate) fn LLVMRustPrintPassTimings(OutStr: &RustString);
/// Print the statistics since static dtors aren't picking them up.
pub fn LLVMRustPrintStatistics(size: *const size_t) -> *const c_char;
/// Prints the statistics collected by `-Zprint-codegen-stats`.
#[expect(improper_ctypes)]
pub(crate) fn LLVMRustPrintStatistics(OutStr: &RustString);
/// Prepares inline assembly.
pub fn LLVMRustInlineAsm(
@ -1790,7 +1792,7 @@ pub fn LLVMRustInlineAsmVerify(
) -> bool;
#[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
pub(crate) fn LLVMRustCoverageWriteFilenamesToBuffer(
Filenames: *const *const c_char,
FilenamesLen: size_t,
Lengths: *const size_t,
@ -1799,19 +1801,19 @@ pub(crate) fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
);
#[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteMappingToBuffer(
pub(crate) fn LLVMRustCoverageWriteFunctionMappingsToBuffer(
VirtualFileMappingIDs: *const c_uint,
NumVirtualFileMappingIDs: c_uint,
NumVirtualFileMappingIDs: size_t,
Expressions: *const crate::coverageinfo::ffi::CounterExpression,
NumExpressions: c_uint,
NumExpressions: size_t,
CodeRegions: *const crate::coverageinfo::ffi::CodeRegion,
NumCodeRegions: c_uint,
NumCodeRegions: size_t,
BranchRegions: *const crate::coverageinfo::ffi::BranchRegion,
NumBranchRegions: c_uint,
NumBranchRegions: size_t,
MCDCBranchRegions: *const crate::coverageinfo::ffi::MCDCBranchRegion,
NumMCDCBranchRegions: c_uint,
NumMCDCBranchRegions: size_t,
MCDCDecisionRegions: *const crate::coverageinfo::ffi::MCDCDecisionRegion,
NumMCDCDecisionRegions: c_uint,
NumMCDCDecisionRegions: size_t,
BufferOut: &RustString,
);
@ -1820,16 +1822,16 @@ pub(crate) fn LLVMRustCoverageCreatePGOFuncNameVar(
FuncName: *const c_char,
FuncNameLen: size_t,
) -> &Value;
pub(crate) fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
pub(crate) fn LLVMRustCoverageHashBytes(Bytes: *const c_char, NumBytes: size_t) -> u64;
#[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteMapSectionNameToString(M: &Module, Str: &RustString);
pub(crate) fn LLVMRustCoverageWriteCovmapSectionNameToString(M: &Module, OutStr: &RustString);
#[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteFuncSectionNameToString(M: &Module, Str: &RustString);
pub(crate) fn LLVMRustCoverageWriteCovfunSectionNameToString(M: &Module, OutStr: &RustString);
#[allow(improper_ctypes)]
pub(crate) fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString);
pub(crate) fn LLVMRustCoverageWriteCovmapVarNameToString(OutStr: &RustString);
pub(crate) fn LLVMRustCoverageMappingVersion() -> u32;
pub fn LLVMRustDebugMetadataVersion() -> u32;

View File

@ -123,13 +123,13 @@ fromRust(LLVMRustCounterExprKind Kind) {
report_fatal_error("Bad LLVMRustCounterExprKind!");
}
extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
extern "C" void LLVMRustCoverageWriteFilenamesToBuffer(
const char *const Filenames[], size_t FilenamesLen, // String start pointers
const size_t *const Lengths, size_t LengthsLen, // Corresponding lengths
RustStringRef BufferOut) {
if (FilenamesLen != LengthsLen) {
report_fatal_error(
"Mismatched lengths in LLVMRustCoverageWriteFilenamesSectionToBuffer");
"Mismatched lengths in LLVMRustCoverageWriteFilenamesToBuffer");
}
SmallVector<std::string, 32> FilenameRefs;
@ -143,16 +143,15 @@ extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
FilenamesWriter.write(OS);
}
extern "C" void LLVMRustCoverageWriteMappingToBuffer(
const unsigned *VirtualFileMappingIDs, unsigned NumVirtualFileMappingIDs,
const LLVMRustCounterExpression *RustExpressions, unsigned NumExpressions,
const LLVMRustCoverageCodeRegion *CodeRegions, unsigned NumCodeRegions,
const LLVMRustCoverageBranchRegion *BranchRegions,
unsigned NumBranchRegions,
extern "C" void LLVMRustCoverageWriteFunctionMappingsToBuffer(
const unsigned *VirtualFileMappingIDs, size_t NumVirtualFileMappingIDs,
const LLVMRustCounterExpression *RustExpressions, size_t NumExpressions,
const LLVMRustCoverageCodeRegion *CodeRegions, size_t NumCodeRegions,
const LLVMRustCoverageBranchRegion *BranchRegions, size_t NumBranchRegions,
const LLVMRustCoverageMCDCBranchRegion *MCDCBranchRegions,
unsigned NumMCDCBranchRegions,
size_t NumMCDCBranchRegions,
const LLVMRustCoverageMCDCDecisionRegion *MCDCDecisionRegions,
unsigned NumMCDCDecisionRegions, RustStringRef BufferOut) {
size_t NumMCDCDecisionRegions, RustStringRef BufferOut) {
// Convert from FFI representation to LLVM representation.
// Expressions:
@ -219,34 +218,37 @@ LLVMRustCoverageCreatePGOFuncNameVar(LLVMValueRef F, const char *FuncName,
return wrap(createPGOFuncNameVar(*cast<Function>(unwrap(F)), FuncNameRef));
}
extern "C" uint64_t LLVMRustCoverageHashByteArray(const char *Bytes,
size_t NumBytes) {
auto StrRef = StringRef(Bytes, NumBytes);
return IndexedInstrProf::ComputeHash(StrRef);
extern "C" uint64_t LLVMRustCoverageHashBytes(const char *Bytes,
size_t NumBytes) {
return IndexedInstrProf::ComputeHash(StringRef(Bytes, NumBytes));
}
static void WriteSectionNameToString(LLVMModuleRef M, InstrProfSectKind SK,
RustStringRef Str) {
// Private helper function for getting the covmap and covfun section names.
static void writeInstrProfSectionNameToString(LLVMModuleRef M,
InstrProfSectKind SectKind,
RustStringRef OutStr) {
auto TargetTriple = Triple(unwrap(M)->getTargetTriple());
auto name = getInstrProfSectionName(SK, TargetTriple.getObjectFormat());
auto OS = RawRustStringOstream(Str);
auto name = getInstrProfSectionName(SectKind, TargetTriple.getObjectFormat());
auto OS = RawRustStringOstream(OutStr);
OS << name;
}
extern "C" void LLVMRustCoverageWriteMapSectionNameToString(LLVMModuleRef M,
RustStringRef Str) {
WriteSectionNameToString(M, IPSK_covmap, Str);
extern "C" void
LLVMRustCoverageWriteCovmapSectionNameToString(LLVMModuleRef M,
RustStringRef OutStr) {
writeInstrProfSectionNameToString(M, IPSK_covmap, OutStr);
}
extern "C" void
LLVMRustCoverageWriteFuncSectionNameToString(LLVMModuleRef M,
RustStringRef Str) {
WriteSectionNameToString(M, IPSK_covfun, Str);
LLVMRustCoverageWriteCovfunSectionNameToString(LLVMModuleRef M,
RustStringRef OutStr) {
writeInstrProfSectionNameToString(M, IPSK_covfun, OutStr);
}
extern "C" void LLVMRustCoverageWriteMappingVarNameToString(RustStringRef Str) {
extern "C" void
LLVMRustCoverageWriteCovmapVarNameToString(RustStringRef OutStr) {
auto name = getCoverageMappingVarName();
auto OS = RawRustStringOstream(Str);
auto OS = RawRustStringOstream(OutStr);
OS << name;
}

View File

@ -140,26 +140,14 @@ extern "C" void LLVMRustSetNormalizedTarget(LLVMModuleRef M,
unwrap(M)->setTargetTriple(Triple::normalize(Triple));
}
extern "C" const char *LLVMRustPrintPassTimings(size_t *Len) {
std::string buf;
auto SS = raw_string_ostream(buf);
TimerGroup::printAll(SS);
SS.flush();
*Len = buf.length();
char *CStr = (char *)malloc(*Len);
memcpy(CStr, buf.c_str(), *Len);
return CStr;
extern "C" void LLVMRustPrintPassTimings(RustStringRef OutBuf) {
auto OS = RawRustStringOstream(OutBuf);
TimerGroup::printAll(OS);
}
extern "C" const char *LLVMRustPrintStatistics(size_t *Len) {
std::string buf;
auto SS = raw_string_ostream(buf);
llvm::PrintStatistics(SS);
SS.flush();
*Len = buf.length();
char *CStr = (char *)malloc(*Len);
memcpy(CStr, buf.c_str(), *Len);
return CStr;
extern "C" void LLVMRustPrintStatistics(RustStringRef OutBuf) {
auto OS = RawRustStringOstream(OutBuf);
llvm::PrintStatistics(OS);
}
extern "C" LLVMValueRef LLVMRustGetNamedValue(LLVMModuleRef M, const char *Name,

View File

@ -1,7 +1,7 @@
use std::cmp::Ordering;
use std::collections::VecDeque;
use std::iter;
use std::ops::{Index, IndexMut};
use std::{iter, mem, slice};
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashSet;
@ -127,10 +127,10 @@ fn compute_basic_coverage_blocks(
let mut bcbs = IndexVec::<BasicCoverageBlock, _>::with_capacity(num_basic_blocks);
let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks);
let mut add_basic_coverage_block = |basic_blocks: &mut Vec<BasicBlock>| {
let mut flush_chain_into_new_bcb = |current_chain: &mut Vec<BasicBlock>| {
// Take the accumulated list of blocks, leaving the vector empty
// to be used by subsequent BCBs.
let basic_blocks = std::mem::take(basic_blocks);
let basic_blocks = mem::take(current_chain);
let bcb = bcbs.next_index();
for &bb in basic_blocks.iter() {
@ -141,48 +141,41 @@ fn compute_basic_coverage_blocks(
bcb_filtered_successors(mir_body[bb].terminator()).is_out_summable()
});
let bcb_data = BasicCoverageBlockData { basic_blocks, is_out_summable };
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
debug!("adding {bcb:?}: {bcb_data:?}");
bcbs.push(bcb_data);
};
// Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
// each block terminator's `successors()`. Coverage spans must map to actual source code,
// so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
// intentionally omits unwind paths.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
// Traverse the MIR control-flow graph, accumulating chains of blocks
// that can be combined into a single node in the coverage graph.
// A depth-first search ensures that if two nodes can be chained
// together, they will be adjacent in the traversal order.
// Accumulates a chain of blocks that will be combined into one BCB.
let mut basic_blocks = Vec::new();
let mut current_chain = vec![];
let filtered_successors = |bb| bcb_filtered_successors(mir_body[bb].terminator());
for bb in short_circuit_preorder(mir_body, filtered_successors)
let subgraph = CoverageRelevantSubgraph::new(&mir_body.basic_blocks);
for bb in graph::depth_first_search(subgraph, mir::START_BLOCK)
.filter(|&bb| mir_body[bb].terminator().kind != TerminatorKind::Unreachable)
{
// If the previous block can't be chained into `bb`, flush the accumulated
// blocks into a new BCB, then start building the next chain.
if let Some(&prev) = basic_blocks.last()
&& (!filtered_successors(prev).is_chainable() || {
// If `bb` has multiple predecessor blocks, or `prev` isn't
// one of its predecessors, we can't chain and must flush.
let predecessors = &mir_body.basic_blocks.predecessors()[bb];
predecessors.len() > 1 || !predecessors.contains(&prev)
})
{
debug!(
terminator_kind = ?mir_body[prev].terminator().kind,
predecessors = ?&mir_body.basic_blocks.predecessors()[bb],
"can't chain from {prev:?} to {bb:?}"
);
add_basic_coverage_block(&mut basic_blocks);
if let Some(&prev) = current_chain.last() {
// Adding a block to a non-empty chain is allowed if the
// previous block permits chaining, and the current block has
// `prev` as its sole predecessor.
let can_chain = subgraph.coverage_successors(prev).is_out_chainable()
&& mir_body.basic_blocks.predecessors()[bb].as_slice() == &[prev];
if !can_chain {
// The current block can't be added to the existing chain, so
// flush that chain into a new BCB, and start a new chain.
flush_chain_into_new_bcb(&mut current_chain);
}
}
basic_blocks.push(bb);
current_chain.push(bb);
}
if !basic_blocks.is_empty() {
if !current_chain.is_empty() {
debug!("flushing accumulated blocks into one last BCB");
add_basic_coverage_block(&mut basic_blocks);
flush_chain_into_new_bcb(&mut current_chain);
}
(bcbs, bb_to_bcb)
@ -389,34 +382,28 @@ pub(crate) fn last_bb(&self) -> BasicBlock {
/// indicates whether that block can potentially be combined into the same BCB
/// as its sole successor.
#[derive(Clone, Copy, Debug)]
enum CoverageSuccessors<'a> {
/// The terminator has exactly one straight-line successor, so its block can
/// potentially be combined into the same BCB as that successor.
Chainable(BasicBlock),
/// The block cannot be combined into the same BCB as its successor(s).
NotChainable(&'a [BasicBlock]),
/// Yield terminators are not chainable, and their execution count can also
/// differ from the execution count of their out-edge.
Yield(BasicBlock),
struct CoverageSuccessors<'a> {
/// Coverage-relevant successors of the corresponding terminator.
/// There might be 0, 1, or multiple targets.
targets: &'a [BasicBlock],
/// `Yield` terminators are not chainable, because their sole out-edge is
/// only followed if/when the generator is resumed after the yield.
is_yield: bool,
}
impl CoverageSuccessors<'_> {
fn is_chainable(&self) -> bool {
match self {
Self::Chainable(_) => true,
Self::NotChainable(_) => false,
Self::Yield(_) => false,
}
/// If `false`, this terminator cannot be chained into another block when
/// building the coverage graph.
fn is_out_chainable(&self) -> bool {
// If a terminator is out-summable and has exactly one out-edge, then
// it is eligible to be chained into its successor block.
self.is_out_summable() && self.targets.len() == 1
}
/// Returns true if the terminator itself is assumed to have the same
/// execution count as the sum of its out-edges (assuming no panics).
fn is_out_summable(&self) -> bool {
match self {
Self::Chainable(_) => true,
Self::NotChainable(_) => true,
Self::Yield(_) => false,
}
!self.is_yield && !self.targets.is_empty()
}
}
@ -425,12 +412,7 @@ impl IntoIterator for CoverageSuccessors<'_> {
type IntoIter = impl DoubleEndedIterator<Item = Self::Item>;
fn into_iter(self) -> Self::IntoIter {
match self {
Self::Chainable(bb) | Self::Yield(bb) => {
Some(bb).into_iter().chain((&[]).iter().copied())
}
Self::NotChainable(bbs) => None.into_iter().chain(bbs.iter().copied()),
}
self.targets.iter().copied()
}
}
@ -440,14 +422,17 @@ fn into_iter(self) -> Self::IntoIter {
// `catch_unwind()` handlers.
fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> CoverageSuccessors<'a> {
use TerminatorKind::*;
match terminator.kind {
let mut is_yield = false;
let targets = match &terminator.kind {
// A switch terminator can have many coverage-relevant successors.
// (If there is exactly one successor, we still treat it as not chainable.)
SwitchInt { ref targets, .. } => CoverageSuccessors::NotChainable(targets.all_targets()),
SwitchInt { targets, .. } => targets.all_targets(),
// A yield terminator has exactly 1 successor, but should not be chained,
// because its resume edge has a different execution count.
Yield { resume, .. } => CoverageSuccessors::Yield(resume),
Yield { resume, .. } => {
is_yield = true;
slice::from_ref(resume)
}
// These terminators have exactly one coverage-relevant successor,
// and can be chained into it.
@ -455,24 +440,15 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera
| Drop { target, .. }
| FalseEdge { real_target: target, .. }
| FalseUnwind { real_target: target, .. }
| Goto { target } => CoverageSuccessors::Chainable(target),
| Goto { target } => slice::from_ref(target),
// A call terminator can normally be chained, except when it has no
// successor because it is known to diverge.
Call { target: maybe_target, .. } => match maybe_target {
Some(target) => CoverageSuccessors::Chainable(target),
None => CoverageSuccessors::NotChainable(&[]),
},
Call { target: maybe_target, .. } => maybe_target.as_slice(),
// An inline asm terminator can normally be chained, except when it
// diverges or uses asm goto.
InlineAsm { ref targets, .. } => {
if let [target] = targets[..] {
CoverageSuccessors::Chainable(target)
} else {
CoverageSuccessors::NotChainable(targets)
}
}
InlineAsm { targets, .. } => &targets,
// These terminators have no coverage-relevant successors.
CoroutineDrop
@ -480,8 +456,10 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera
| TailCall { .. }
| Unreachable
| UnwindResume
| UnwindTerminate(_) => CoverageSuccessors::NotChainable(&[]),
}
| UnwindTerminate(_) => &[],
};
CoverageSuccessors { targets, is_yield }
}
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
@ -616,28 +594,31 @@ pub(crate) fn unvisited(&self) -> Vec<BasicCoverageBlock> {
}
}
fn short_circuit_preorder<'a, 'tcx, F, Iter>(
body: &'a mir::Body<'tcx>,
filtered_successors: F,
) -> impl Iterator<Item = BasicBlock> + Captures<'a> + Captures<'tcx>
where
F: Fn(BasicBlock) -> Iter,
Iter: IntoIterator<Item = BasicBlock>,
{
let mut visited = BitSet::new_empty(body.basic_blocks.len());
let mut worklist = vec![mir::START_BLOCK];
std::iter::from_fn(move || {
while let Some(bb) = worklist.pop() {
if !visited.insert(bb) {
continue;
}
worklist.extend(filtered_successors(bb));
return Some(bb);
}
None
})
/// Wrapper around a [`mir::BasicBlocks`] graph that restricts each node's
/// successors to only the ones considered "relevant" when building a coverage
/// graph.
#[derive(Clone, Copy)]
struct CoverageRelevantSubgraph<'a, 'tcx> {
basic_blocks: &'a mir::BasicBlocks<'tcx>,
}
impl<'a, 'tcx> CoverageRelevantSubgraph<'a, 'tcx> {
fn new(basic_blocks: &'a mir::BasicBlocks<'tcx>) -> Self {
Self { basic_blocks }
}
fn coverage_successors(&self, bb: BasicBlock) -> CoverageSuccessors<'_> {
bcb_filtered_successors(self.basic_blocks[bb].terminator())
}
}
impl<'a, 'tcx> graph::DirectedGraph for CoverageRelevantSubgraph<'a, 'tcx> {
type Node = BasicBlock;
fn num_nodes(&self) -> usize {
self.basic_blocks.num_nodes()
}
}
impl<'a, 'tcx> graph::Successors for CoverageRelevantSubgraph<'a, 'tcx> {
fn successors(&self, bb: Self::Node) -> impl Iterator<Item = Self::Node> {
self.coverage_successors(bb).into_iter()
}
}

View File

@ -1,13 +1,14 @@
//! Implement methods to pretty print stable MIR body.
use std::fmt::Debug;
use std::io::Write;
use std::{fmt, io, iter};
use fmt::{Display, Formatter};
use super::{AssertMessage, BinOp, BorrowKind, FakeBorrowKind, TerminatorKind};
use super::{AggregateKind, AssertMessage, BinOp, BorrowKind, FakeBorrowKind, TerminatorKind};
use crate::mir::{Operand, Place, Rvalue, StatementKind, UnwindAction, VarDebugInfoContents};
use crate::ty::{IndexedVal, MirConst, Ty, TyConst};
use crate::{Body, Mutability, with};
use crate::ty::{AdtKind, IndexedVal, MirConst, Ty, TyConst};
use crate::{Body, CrateDef, Mutability, with};
impl Display for Ty {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
@ -23,10 +24,11 @@ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
pub(crate) fn function_body<W: Write>(writer: &mut W, body: &Body, name: &str) -> io::Result<()> {
write!(writer, "fn {name}(")?;
body.arg_locals()
.iter()
.enumerate()
.try_for_each(|(index, local)| write!(writer, "_{}: {}", index + 1, local.ty))?;
let mut sep = "";
for (index, local) in body.arg_locals().iter().enumerate() {
write!(writer, "{}_{}: {}", sep, index + 1, local.ty)?;
sep = ", ";
}
write!(writer, ")")?;
let return_local = body.ret_local();
@ -73,39 +75,40 @@ pub(crate) fn function_body<W: Write>(writer: &mut W, body: &Body, name: &str) -
}
fn pretty_statement<W: Write>(writer: &mut W, statement: &StatementKind) -> io::Result<()> {
const INDENT: &str = " ";
match statement {
StatementKind::Assign(place, rval) => {
write!(writer, " {place:?} = ")?;
write!(writer, "{INDENT}{place:?} = ")?;
pretty_rvalue(writer, rval)?;
writeln!(writer, ";")
}
// FIXME: Add rest of the statements
StatementKind::FakeRead(cause, place) => {
writeln!(writer, "FakeRead({cause:?}, {place:?});")
writeln!(writer, "{INDENT}FakeRead({cause:?}, {place:?});")
}
StatementKind::SetDiscriminant { place, variant_index } => {
writeln!(writer, "discriminant({place:?} = {};", variant_index.to_index())
writeln!(writer, "{INDENT}discriminant({place:?} = {};", variant_index.to_index())
}
StatementKind::Deinit(place) => writeln!(writer, "Deinit({place:?};"),
StatementKind::StorageLive(local) => {
writeln!(writer, "StorageLive(_{local});")
writeln!(writer, "{INDENT}StorageLive(_{local});")
}
StatementKind::StorageDead(local) => {
writeln!(writer, "StorageDead(_{local});")
writeln!(writer, "{INDENT}StorageDead(_{local});")
}
StatementKind::Retag(kind, place) => writeln!(writer, "Retag({kind:?}, {place:?});"),
StatementKind::PlaceMention(place) => {
writeln!(writer, "PlaceMention({place:?};")
writeln!(writer, "{INDENT}PlaceMention({place:?};")
}
StatementKind::ConstEvalCounter => {
writeln!(writer, "ConstEvalCounter;")
writeln!(writer, "{INDENT}ConstEvalCounter;")
}
StatementKind::Nop => writeln!(writer, "nop;"),
StatementKind::Nop => writeln!(writer, "{INDENT}nop;"),
StatementKind::AscribeUserType { .. }
| StatementKind::Coverage(_)
| StatementKind::Intrinsic(_) => {
// FIX-ME: Make them pretty.
writeln!(writer, "{statement:?};")
writeln!(writer, "{INDENT}{statement:?};")
}
}
}
@ -322,15 +325,11 @@ fn pretty_ty_const(ct: &TyConst) -> String {
fn pretty_rvalue<W: Write>(writer: &mut W, rval: &Rvalue) -> io::Result<()> {
match rval {
Rvalue::AddressOf(mutability, place) => {
write!(writer, "&raw {}(*{:?})", pretty_mut(*mutability), place)
write!(writer, "&raw {} {:?}", pretty_mut(*mutability), place)
}
Rvalue::Aggregate(aggregate_kind, operands) => {
// FIXME: Add pretty_aggregate function that returns a pretty string
write!(writer, "{aggregate_kind:?} (")?;
let mut op_iter = operands.iter();
op_iter.next().map_or(Ok(()), |op| write!(writer, "{}", pretty_operand(op)))?;
op_iter.try_for_each(|op| write!(writer, ", {}", pretty_operand(op)))?;
write!(writer, ")")
pretty_aggregate(writer, aggregate_kind, operands)
}
Rvalue::BinaryOp(bin, op1, op2) => {
write!(writer, "{:?}({}, {})", bin, pretty_operand(op1), pretty_operand(op2))
@ -360,22 +359,74 @@ fn pretty_rvalue<W: Write>(writer: &mut W, rval: &Rvalue) -> io::Result<()> {
write!(writer, "{kind}{place:?}")
}
Rvalue::Repeat(op, cnst) => {
write!(writer, "{} \" \" {}", pretty_operand(op), pretty_ty_const(cnst))
write!(writer, "[{}; {}]", pretty_operand(op), pretty_ty_const(cnst))
}
Rvalue::ShallowInitBox(_, _) => Ok(()),
Rvalue::ThreadLocalRef(item) => {
write!(writer, "thread_local_ref{item:?}")
}
Rvalue::NullaryOp(nul, ty) => {
write!(writer, "{nul:?} {ty} \" \"")
write!(writer, "{nul:?}::<{ty}>() \" \"")
}
Rvalue::UnaryOp(un, op) => {
write!(writer, "{} \" \" {:?}", pretty_operand(op), un)
write!(writer, "{:?}({})", un, pretty_operand(op))
}
Rvalue::Use(op) => write!(writer, "{}", pretty_operand(op)),
}
}
fn pretty_aggregate<W: Write>(
writer: &mut W,
aggregate_kind: &AggregateKind,
operands: &Vec<Operand>,
) -> io::Result<()> {
let suffix = match aggregate_kind {
AggregateKind::Array(_) => {
write!(writer, "[")?;
"]"
}
AggregateKind::Tuple => {
write!(writer, "(")?;
")"
}
AggregateKind::Adt(def, var, _, _, _) => {
if def.kind() == AdtKind::Enum {
write!(writer, "{}::{}", def.name(), def.variant(*var).unwrap().name())?;
} else {
write!(writer, "{}", def.variant(*var).unwrap().name())?;
}
if operands.is_empty() {
return Ok(());
}
// FIXME: Change this once we have CtorKind in StableMIR.
write!(writer, "(")?;
")"
}
AggregateKind::Closure(def, _) => {
write!(writer, "{{closure@{}}}(", def.span().diagnostic())?;
")"
}
AggregateKind::Coroutine(def, _, _) => {
write!(writer, "{{coroutine@{}}}(", def.span().diagnostic())?;
")"
}
AggregateKind::RawPtr(ty, mutability) => {
write!(
writer,
"*{} {ty} from (",
if *mutability == Mutability::Mut { "mut" } else { "const" }
)?;
")"
}
};
let mut separator = "";
for op in operands {
write!(writer, "{}{}", separator, pretty_operand(op))?;
separator = ", ";
}
write!(writer, "{suffix}")
}
fn pretty_mut(mutability: Mutability) -> &'static str {
match mutability {
Mutability::Not => " ",

View File

@ -271,6 +271,14 @@ pub fn get_filename(&self) -> Filename {
pub fn get_lines(&self) -> LineInfo {
with(|c| c.get_lines(self))
}
/// Return the span location to be printed in diagnostic messages.
///
/// This may leak local file paths and should not be used to build artifacts that may be
/// distributed.
pub fn diagnostic(&self) -> String {
with(|c| c.span_to_string(*self))
}
}
#[derive(Clone, Copy, Debug, Serialize)]

View File

@ -63,14 +63,14 @@ struct Block<T> {
impl<T> Block<T> {
/// Creates an empty block.
fn new() -> Block<T> {
fn new() -> Box<Block<T>> {
// SAFETY: This is safe because:
// [1] `Block::next` (AtomicPtr) may be safely zero initialized.
// [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].
// [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it
// holds a MaybeUninit.
// [4] `Slot::state` (AtomicUsize) may be safely zero initialized.
unsafe { MaybeUninit::zeroed().assume_init() }
unsafe { Box::new_zeroed().assume_init() }
}
/// Waits until the next pointer is set.
@ -199,13 +199,13 @@ fn start_send(&self, token: &mut Token) -> bool {
// If we're going to have to install the next block, allocate it in advance in order to
// make the wait for other threads as short as possible.
if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Box::new(Block::<T>::new()));
next_block = Some(Block::<T>::new());
}
// If this is the first message to be sent into the channel, we need to allocate the
// first block and install it.
if block.is_null() {
let new = Box::into_raw(Box::new(Block::<T>::new()));
let new = Box::into_raw(Block::<T>::new());
if self
.tail

View File

@ -0,0 +1,48 @@
//@ compile-flags: -Z unpretty=stable-mir --crate-type lib -C panic=abort
//@ check-pass
//@ only-x86_64
//@ needs-unwind unwind edges are different with panic=abort
//! Check how stable mir pretty printer prints different operands and abort strategy.
pub fn operands(val: u8) {
let array = [val; 10];
let first = array[0];
let last = array[10 - 1];
assert_eq!(first, last);
let reference = &first;
let dereferenced = *reference;
assert_eq!(dereferenced, first);
let tuple = (first, last);
let (first_again, _) = tuple;
let first_again_again = tuple.0;
assert_eq!(first_again, first_again_again);
let length = array.len();
let size_of = std::mem::size_of_val(&length);
assert_eq!(length, size_of);
}
pub struct Dummy {
c: char,
i: i32,
}
pub enum Ctors {
Unit,
StructLike { d: Dummy },
TupLike(bool),
}
pub fn more_operands() -> [Ctors; 3] {
let dummy = Dummy { c: 'a', i: i32::MIN };
let unit = Ctors::Unit;
let struct_like = Ctors::StructLike { d: dummy };
let tup_like = Ctors::TupLike(false);
[unit, struct_like, tup_like]
}
pub fn closures(x: bool, z: bool) -> impl FnOnce(bool) -> bool {
move |y: bool| (x ^ y) || z
}

View File

@ -0,0 +1,263 @@
// WARNING: This is highly experimental output it's intended for stable-mir developers only.
// If you find a bug or want to improve the output open a issue at https://github.com/rust-lang/project-stable-mir.
fn operands(_1: u8) -> () {
let mut _0: ();
let _2: [u8; 10];
let _3: u8;
let _4: usize;
let mut _5: usize;
let mut _6: bool;
let _7: u8;
let _8: usize;
let mut _9: (usize, bool);
let mut _10: usize;
let mut _11: bool;
let mut _12: (&u8, &u8);
let mut _13: &u8;
let mut _14: &u8;
let _15: &u8;
let _16: &u8;
let mut _17: bool;
let mut _18: u8;
let mut _19: u8;
let _20: core::panicking::AssertKind;
let _21: !;
let mut _22: Option<Arguments<'_>>;
let _23: &u8;
let _24: u8;
let mut _25: (&u8, &u8);
let mut _26: &u8;
let mut _27: &u8;
let _28: &u8;
let _29: &u8;
let mut _30: bool;
let mut _31: u8;
let mut _32: u8;
let _33: core::panicking::AssertKind;
let _34: !;
let mut _35: Option<Arguments<'_>>;
let _36: (u8, u8);
let _37: u8;
let _38: u8;
let mut _39: (&u8, &u8);
let mut _40: &u8;
let mut _41: &u8;
let _42: &u8;
let _43: &u8;
let mut _44: bool;
let mut _45: u8;
let mut _46: u8;
let _47: core::panicking::AssertKind;
let _48: !;
let mut _49: Option<Arguments<'_>>;
let _50: usize;
let mut _51: &[u8];
let mut _52: &[u8; 10];
let _53: usize;
let _54: &usize;
let mut _55: (&usize, &usize);
let mut _56: &usize;
let mut _57: &usize;
let _58: &usize;
let _59: &usize;
let mut _60: bool;
let mut _61: usize;
let mut _62: usize;
let _63: core::panicking::AssertKind;
let _64: !;
let mut _65: Option<Arguments<'_>>;
debug val => _1;
debug array => _2;
debug first => _3;
debug last => _7;
debug left_val => _15;
debug right_val => _16;
debug kind => _20;
debug reference => _23;
debug dereferenced => _24;
debug left_val => _28;
debug right_val => _29;
debug kind => _33;
debug tuple => _36;
debug first_again => _37;
debug first_again_again => _38;
debug left_val => _42;
debug right_val => _43;
debug kind => _47;
debug length => _50;
debug size_of => _53;
debug left_val => _58;
debug right_val => _59;
debug kind => _63;
bb0: {
_2 = [_1; 10];
_4 = 0_usize;
_5 = 10_usize;
_6 = Lt(_4, _5);
assert(move _6, "index out of bounds: the length is {} but the index is {}", move _5, _4) -> [success: bb1, unwind unreachable];
}
bb1: {
_3 = _2[_4];
_9 = CheckedSub(10_usize, 1_usize);
assert(!move (_9.1: bool), "attempt to compute `{} - {}`, which would overflow", 10_usize, 1_usize) -> [success: bb2, unwind unreachable];
}
bb2: {
_8 = move (_9.0: usize);
_10 = 10_usize;
_11 = Lt(_8, _10);
assert(move _11, "index out of bounds: the length is {} but the index is {}", move _10, _8) -> [success: bb3, unwind unreachable];
}
bb3: {
_7 = _2[_8];
_13 = &_3;
_14 = &_7;
_12 = (move _13, move _14);
_15 = (_12.0: &u8);
_16 = (_12.1: &u8);
_18 = (*_15);
_19 = (*_16);
_17 = Eq(move _18, move _19);
switchInt(move _17) -> [0: bb5, otherwise: bb4];
}
bb4: {
_23 = &_3;
_24 = (*_23);
_26 = &_24;
_27 = &_3;
_25 = (move _26, move _27);
_28 = (_25.0: &u8);
_29 = (_25.1: &u8);
_31 = (*_28);
_32 = (*_29);
_30 = Eq(move _31, move _32);
switchInt(move _30) -> [0: bb7, otherwise: bb6];
}
bb5: {
_20 = core::panicking::AssertKind::Eq;
_22 = std::option::Option::None;
_21 = core::panicking::assert_failed::<u8, u8>(move _20, _15, _16, move _22) -> unwind unreachable;
}
bb6: {
_36 = (_3, _7);
_37 = (_36.0: u8);
_38 = (_36.0: u8);
_40 = &_37;
_41 = &_38;
_39 = (move _40, move _41);
_42 = (_39.0: &u8);
_43 = (_39.1: &u8);
_45 = (*_42);
_46 = (*_43);
_44 = Eq(move _45, move _46);
switchInt(move _44) -> [0: bb9, otherwise: bb8];
}
bb7: {
_33 = core::panicking::AssertKind::Eq;
_35 = std::option::Option::None;
_34 = core::panicking::assert_failed::<u8, u8>(move _33, _28, _29, move _35) -> unwind unreachable;
}
bb8: {
_52 = &_2;
_51 = move _52 as &[u8];
_50 = PtrMetadata(move _51);
_54 = &_50;
_53 = std::mem::size_of_val::<usize>(_54) -> [return: bb10, unwind unreachable];
}
bb9: {
_47 = core::panicking::AssertKind::Eq;
_49 = std::option::Option::None;
_48 = core::panicking::assert_failed::<u8, u8>(move _47, _42, _43, move _49) -> unwind unreachable;
}
bb10: {
_56 = &_50;
_57 = &_53;
_55 = (move _56, move _57);
_58 = (_55.0: &usize);
_59 = (_55.1: &usize);
_61 = (*_58);
_62 = (*_59);
_60 = Eq(move _61, move _62);
switchInt(move _60) -> [0: bb12, otherwise: bb11];
}
bb11: {
return;
}
bb12: {
_63 = core::panicking::AssertKind::Eq;
_65 = std::option::Option::None;
_64 = core::panicking::assert_failed::<usize, usize>(move _63, _58, _59, move _65) -> unwind unreachable;
}
}
fn operands::{constant#0}() -> usize {
let mut _0: usize;
bb0: {
_0 = 10_usize;
return;
}
}
fn more_operands() -> [Ctors; 3] {
let mut _0: [Ctors; 3];
let _1: Dummy;
let _2: Ctors;
let _3: Ctors;
let _4: Ctors;
debug dummy => _1;
debug unit => _2;
debug struct_like => _3;
debug tup_like => _4;
bb0: {
_1 = Dummy('a', core::num::<impl i32>::MIN);
_2 = Ctors::Unit;
_3 = Ctors::StructLike(move _1);
_4 = Ctors::TupLike(false);
_0 = [move _2, move _3, move _4];
return;
}
}
fn more_operands::{constant#0}() -> usize {
let mut _0: usize;
bb0: {
_0 = 3_usize;
return;
}
}
fn closures(_1: bool, _2: bool) -> {closure@$DIR/operands.rs:47:5: 47:19} {
let mut _0: {closure@$DIR/operands.rs:47:5: 47:19};
debug x => _1;
debug z => _2;
bb0: {
_0 = {closure@$DIR/operands.rs:47:5: 47:19}(_1, _2);
return;
}
}
fn closures::{closure#0}(_1: {closure@$DIR/operands.rs:47:5: 47:19}, _2: bool) -> bool {
let mut _0: bool;
let mut _3: bool;
let mut _4: bool;
debug y => _2;
debug x => (_1.0: bool);
debug z => (_1.1: bool);
bb0: {
_4 = (_1.0: bool);
_3 = BitXor(move _4, _2);
switchInt(move _3) -> [0: bb2, otherwise: bb1];
}
bb1: {
_0 = true;
goto -> bb3;
}
bb2: {
_0 = (_1.1: bool);
goto -> bb3;
}
bb3: {
return;
}
}
fn Ctors::TupLike(_1: bool) -> Ctors {
let mut _0: Ctors;
bb0: {
_0 = Ctors::TupLike(move _1);
return;
}
}

View File

@ -0,0 +1,29 @@
//@ run-pass
//@ needs-threads
//@ compile-flags: -Copt-level=0
// The channel's `Block::new` was causing a stack overflow because it held 32 item slots, which is
// 1MiB for this test's `BigStruct` -- instantiated on the stack before moving to `Box::new`.
//
// That block is now initialized directly on the heap.
//
// Ref: https://github.com/rust-lang/rust/issues/102246
use std::sync::mpsc::channel;
use std::thread;
const N: usize = 32_768;
struct BigStruct {
_data: [u8; N],
}
fn main() {
let (sender, receiver) = channel::<BigStruct>();
let thread1 = thread::spawn(move || {
sender.send(BigStruct { _data: [0u8; N] }).unwrap();
});
thread1.join().unwrap();
for _data in receiver.try_iter() {}
}