Auto merge of #30448 - alexcrichton:llvmup, r=nikomatsakis

These commits perform a few high-level changes with the goal of enabling i686 MSVC unwinding:

* LLVM is upgraded to pick up the new exception handling instructions and intrinsics for MSVC. This puts us somewhere along the 3.8 branch, but we should still be compatible with LLVM 3.7 for non-MSVC targets.
* All unwinding for MSVC targets (both 32 and 64-bit) are implemented in terms of this new LLVM support. I would like to also extend this to Windows GNU targets to drop the runtime dependencies we have on MinGW, but I'd like to land this first.
* Some tests were fixed up for i686 MSVC here and there where necessary. The full test suite should be passing now for that target.

In terms of landing this I plan to have this go through first, then verify that i686 MSVC works, then I'll enable `make check` on the bots for that target instead of just `make` as-is today.

Closes #25869
This commit is contained in:
bors 2016-01-30 00:25:44 +00:00
commit 303892ee15
47 changed files with 1350 additions and 561 deletions

View File

@ -1039,6 +1039,8 @@ $(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok: \
export INCLUDE := $$(CFG_MSVC_INCLUDE_PATH_$$(HOST_$(3)))
$(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok: \
export LIB := $$(CFG_MSVC_LIB_PATH_$$(HOST_$(3)))
$(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok: \
export MSVC_LIB := "$$(CFG_MSVC_LIB_$$(HOST_$(3)))"
$(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok: \
$(S)src/test/run-make/%/Makefile \
$$(CSREQ$(1)_T_$(2)_H_$(3))

View File

@ -552,7 +552,15 @@ extern "rust-intrinsic" {
pub fn discriminant_value<T>(v: &T) -> u64;
/// Rust's "try catch" construct which invokes the function pointer `f` with
/// the data pointer `data`, returning the exception payload if an exception
/// is thrown (aka the thread panics).
/// the data pointer `data`.
///
/// The third pointer is a target-specific data pointer which is filled in
/// with the specifics of the exception that occurred. For examples on Unix
/// platforms this is a `*mut *mut T` which is filled in by the compiler and
/// on MSVC it's `*mut [usize; 2]`. For more information see the compiler's
/// source as well as std's catch implementation.
#[cfg(not(stage0))]
pub fn try(f: fn(*mut u8), data: *mut u8, local_ptr: *mut u8) -> i32;
#[cfg(stage0)]
pub fn try(f: fn(*mut u8), data: *mut u8) -> *mut u8;
}

View File

@ -13,7 +13,6 @@ use target::Target;
pub fn target() -> Target {
let mut base = super::windows_msvc_base::opts();
base.cpu = "x86-64".to_string();
base.custom_unwind_resume = true;
Target {
llvm_target: "x86_64-pc-windows-msvc".to_string(),

View File

@ -79,14 +79,14 @@ impl Drop for ArchiveRO {
}
impl<'a> Iterator for Iter<'a> {
type Item = Child<'a>;
type Item = Result<Child<'a>, String>;
fn next(&mut self) -> Option<Child<'a>> {
fn next(&mut self) -> Option<Result<Child<'a>, String>> {
let ptr = unsafe { ::LLVMRustArchiveIteratorNext(self.ptr) };
if ptr.is_null() {
None
::last_error().map(Err)
} else {
Some(Child { ptr: ptr, _data: marker::PhantomData })
Some(Ok(Child { ptr: ptr, _data: marker::PhantomData }))
}
}
}

View File

@ -56,7 +56,7 @@ pub use self::DiagnosticSeverity::*;
pub use self::Linkage::*;
pub use self::DLLStorageClassTypes::*;
use std::ffi::CString;
use std::ffi::{CString, CStr};
use std::cell::RefCell;
use std::slice;
use libc::{c_uint, c_ushort, uint64_t, c_int, size_t, c_char};
@ -544,6 +544,9 @@ pub type SMDiagnosticRef = *mut SMDiagnostic_opaque;
#[allow(missing_copy_implementations)]
pub enum RustArchiveMember_opaque {}
pub type RustArchiveMemberRef = *mut RustArchiveMember_opaque;
#[allow(missing_copy_implementations)]
pub enum OperandBundleDef_opaque {}
pub type OperandBundleDefRef = *mut OperandBundleDef_opaque;
pub type DiagnosticHandler = unsafe extern "C" fn(DiagnosticInfoRef, *mut c_void);
pub type InlineAsmDiagHandler = unsafe extern "C" fn(SMDiagnosticRef, *const c_void, c_uint);
@ -1149,14 +1152,15 @@ extern {
Addr: ValueRef,
NumDests: c_uint)
-> ValueRef;
pub fn LLVMBuildInvoke(B: BuilderRef,
Fn: ValueRef,
Args: *const ValueRef,
NumArgs: c_uint,
Then: BasicBlockRef,
Catch: BasicBlockRef,
Name: *const c_char)
-> ValueRef;
pub fn LLVMRustBuildInvoke(B: BuilderRef,
Fn: ValueRef,
Args: *const ValueRef,
NumArgs: c_uint,
Then: BasicBlockRef,
Catch: BasicBlockRef,
Bundle: OperandBundleDefRef,
Name: *const c_char)
-> ValueRef;
pub fn LLVMRustBuildLandingPad(B: BuilderRef,
Ty: TypeRef,
PersFn: ValueRef,
@ -1167,6 +1171,31 @@ extern {
pub fn LLVMBuildResume(B: BuilderRef, Exn: ValueRef) -> ValueRef;
pub fn LLVMBuildUnreachable(B: BuilderRef) -> ValueRef;
pub fn LLVMRustBuildCleanupPad(B: BuilderRef,
ParentPad: ValueRef,
ArgCnt: c_uint,
Args: *const ValueRef,
Name: *const c_char) -> ValueRef;
pub fn LLVMRustBuildCleanupRet(B: BuilderRef,
CleanupPad: ValueRef,
UnwindBB: BasicBlockRef) -> ValueRef;
pub fn LLVMRustBuildCatchPad(B: BuilderRef,
ParentPad: ValueRef,
ArgCnt: c_uint,
Args: *const ValueRef,
Name: *const c_char) -> ValueRef;
pub fn LLVMRustBuildCatchRet(B: BuilderRef,
Pad: ValueRef,
BB: BasicBlockRef) -> ValueRef;
pub fn LLVMRustBuildCatchSwitch(Builder: BuilderRef,
ParentPad: ValueRef,
BB: BasicBlockRef,
NumHandlers: c_uint,
Name: *const c_char) -> ValueRef;
pub fn LLVMRustAddHandler(CatchSwitch: ValueRef,
Handler: BasicBlockRef);
pub fn LLVMRustSetPersonalityFn(B: BuilderRef, Pers: ValueRef);
/* Add a case to the switch instruction */
pub fn LLVMAddCase(Switch: ValueRef,
OnVal: ValueRef,
@ -1476,12 +1505,13 @@ extern {
/* Miscellaneous instructions */
pub fn LLVMBuildPhi(B: BuilderRef, Ty: TypeRef, Name: *const c_char)
-> ValueRef;
pub fn LLVMBuildCall(B: BuilderRef,
Fn: ValueRef,
Args: *const ValueRef,
NumArgs: c_uint,
Name: *const c_char)
-> ValueRef;
pub fn LLVMRustBuildCall(B: BuilderRef,
Fn: ValueRef,
Args: *const ValueRef,
NumArgs: c_uint,
Bundle: OperandBundleDefRef,
Name: *const c_char)
-> ValueRef;
pub fn LLVMBuildSelect(B: BuilderRef,
If: ValueRef,
Then: ValueRef,
@ -2126,6 +2156,12 @@ extern {
pub fn LLVMRustSetDataLayoutFromTargetMachine(M: ModuleRef,
TM: TargetMachineRef);
pub fn LLVMRustGetModuleDataLayout(M: ModuleRef) -> TargetDataRef;
pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char,
Inputs: *const ValueRef,
NumInputs: c_uint)
-> OperandBundleDefRef;
pub fn LLVMRustFreeOperandBundleDef(Bundle: OperandBundleDefRef);
}
#[cfg(have_component_x86)]
@ -2404,6 +2440,48 @@ pub fn initialize_available_targets() {
init_pnacl();
}
pub fn last_error() -> Option<String> {
unsafe {
let cstr = LLVMRustGetLastError();
if cstr.is_null() {
None
} else {
let err = CStr::from_ptr(cstr).to_bytes();
let err = String::from_utf8_lossy(err).to_string();
libc::free(cstr as *mut _);
Some(err)
}
}
}
pub struct OperandBundleDef {
inner: OperandBundleDefRef,
}
impl OperandBundleDef {
pub fn new(name: &str, vals: &[ValueRef]) -> OperandBundleDef {
let name = CString::new(name).unwrap();
let def = unsafe {
LLVMRustBuildOperandBundleDef(name.as_ptr(),
vals.as_ptr(),
vals.len() as c_uint)
};
OperandBundleDef { inner: def }
}
pub fn raw(&self) -> OperandBundleDefRef {
self.inner
}
}
impl Drop for OperandBundleDef {
fn drop(&mut self) {
unsafe {
LLVMRustFreeOperandBundleDef(self.inner);
}
}
}
// The module containing the native LLVM dependencies, generated by the build system
// Note that this must come after the rustllvm extern declaration so that
// parts of LLVM that rustllvm depends on aren't thrown away by the linker.

View File

@ -729,7 +729,7 @@ pub fn note_crate_name(err: &mut DiagnosticBuilder, name: &str) {
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data = {
let section = ar.iter().find(|sect| {
let section = ar.iter().filter_map(|s| s.ok()).find(|sect| {
sect.name() == Some(METADATA_FILENAME)
});
match section {

View File

@ -124,6 +124,7 @@ impl<'a> ArchiveBuilder<'a> {
}
let archive = self.src_archive.as_ref().unwrap().as_ref().unwrap();
let ret = archive.iter()
.filter_map(|child| child.ok())
.filter(is_relevant_child)
.filter_map(|child| child.name())
.filter(|name| !self.removals.iter().any(|x| x == name))
@ -332,9 +333,15 @@ impl<'a> ArchiveBuilder<'a> {
// We skip any files explicitly desired for skipping, and we also skip
// all SYMDEF files as these are just magical placeholders which get
// re-created when we make a new archive anyway.
for file in archive.iter().filter(is_relevant_child) {
for file in archive.iter() {
let file = try!(file.map_err(string_to_io_error));
if !is_relevant_child(&file) {
continue
}
let filename = file.name().unwrap();
if skip(filename) { continue }
if skip(filename) {
continue
}
let filename = Path::new(filename).file_name().unwrap()
.to_str().unwrap();
@ -448,6 +455,7 @@ impl<'a> ArchiveBuilder<'a> {
unsafe {
if let Some(archive) = self.src_archive() {
for child in archive.iter() {
let child = try!(child.map_err(string_to_io_error));
let child_name = match child.name() {
Some(s) => s,
None => continue,
@ -475,10 +483,25 @@ impl<'a> ArchiveBuilder<'a> {
strings.push(name);
}
Addition::Archive { archive, archive_name: _, mut skip } => {
for child in archive.iter().filter(is_relevant_child) {
for child in archive.iter() {
let child = try!(child.map_err(string_to_io_error));
if !is_relevant_child(&child) {
continue
}
let child_name = child.name().unwrap();
if skip(child_name) { continue }
if skip(child_name) {
continue
}
// It appears that LLVM's archive writer is a little
// buggy if the name we pass down isn't just the
// filename component, so chop that off here and
// pass it in.
//
// See LLVM bug 25877 for more info.
let child_name = Path::new(child_name)
.file_name().unwrap()
.to_str().unwrap();
let name = try!(CString::new(child_name));
let m = llvm::LLVMRustArchiveMemberNew(ptr::null(),
name.as_ptr(),
@ -517,3 +540,7 @@ impl<'a> ArchiveBuilder<'a> {
}
}
}
fn string_to_io_error(s: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
}

View File

@ -52,7 +52,7 @@ pub fn run(sess: &session::Session, llmod: ModuleRef,
link::each_linked_rlib(sess, &mut |_, path| {
let archive = ArchiveRO::open(&path).expect("wanted an rlib");
let bytecodes = archive.iter().filter_map(|child| {
child.name().map(|name| (name, child))
child.ok().and_then(|c| c.name().map(|name| (name, c)))
}).filter(|&(name, _)| name.ends_with("bytecode.deflate"));
for (name, data) in bytecodes {
let bc_encoded = data.data();

View File

@ -27,24 +27,16 @@ use std::collections::HashMap;
use std::ffi::{CStr, CString};
use std::fs;
use std::path::{Path, PathBuf};
use std::ptr;
use std::str;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::channel;
use std::thread;
use libc::{self, c_uint, c_int, c_void};
use libc::{c_uint, c_int, c_void};
pub fn llvm_err(handler: &errors::Handler, msg: String) -> ! {
unsafe {
let cstr = llvm::LLVMRustGetLastError();
if cstr == ptr::null() {
panic!(handler.fatal(&msg[..]));
} else {
let err = CStr::from_ptr(cstr).to_bytes();
let err = String::from_utf8_lossy(err).to_string();
libc::free(cstr as *mut _);
panic!(handler.fatal(&format!("{}: {}", &msg[..], &err[..])));
}
match llvm::last_error() {
Some(err) => panic!(handler.fatal(&format!("{}: {}", msg, err))),
None => panic!(handler.fatal(&msg)),
}
}

View File

@ -1271,7 +1271,8 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
};
}
Variant(_, ref repr, _, _) => {
let (the_kind, val_opt) = adt::trans_switch(bcx, &**repr, val.val);
let (the_kind, val_opt) = adt::trans_switch(bcx, &repr,
val.val, true);
kind = the_kind;
if let Some(tval) = val_opt { test_val = tval; }
}

View File

@ -874,12 +874,15 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>,
///
/// This should ideally be less tightly tied to `_match`.
pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
r: &Repr<'tcx>, scrutinee: ValueRef)
r: &Repr<'tcx>,
scrutinee: ValueRef,
range_assert: bool)
-> (_match::BranchKind, Option<ValueRef>) {
match *r {
CEnum(..) | General(..) |
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
(_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None)))
(_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None,
range_assert)))
}
Univariant(..) => {
// N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
@ -900,14 +903,18 @@ pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool {
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
scrutinee: ValueRef, cast_to: Option<Type>)
scrutinee: ValueRef, cast_to: Option<Type>,
range_assert: bool)
-> ValueRef {
debug!("trans_get_discr r: {:?}", r);
let val = match *r {
CEnum(ity, min, max) => load_discr(bcx, ity, scrutinee, min, max),
CEnum(ity, min, max) => {
load_discr(bcx, ity, scrutinee, min, max, range_assert)
}
General(ity, ref cases, _) => {
let ptr = StructGEP(bcx, scrutinee, 0);
load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1))
load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1),
range_assert)
}
Univariant(..) => C_u8(bcx.ccx(), 0),
RawNullablePointer { nndiscr, nnty, .. } => {
@ -934,7 +941,8 @@ fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &Disc
}
/// Helper for cases where the discriminant is simply loaded.
fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr,
range_assert: bool)
-> ValueRef {
let llty = ll_inttype(bcx.ccx(), ity);
assert_eq!(val_ty(ptr), llty.ptr_to());
@ -944,7 +952,7 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
let mask = Disr(!0u64 >> (64 - bits));
// For a (max) discr of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
if max.wrapping_add(Disr(1)) & mask == min & mask {
if max.wrapping_add(Disr(1)) & mask == min & mask || !range_assert {
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
@ -1223,10 +1231,14 @@ pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
// runtime, so the basic block isn't actually unreachable, so we
// need to make it do something with defined behavior. In this case
// we just return early from the function.
//
// Note that this is also why the `trans_get_discr` below has
// `false` to indicate that loading the discriminant should
// not have a range assert.
let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void");
RetVoid(ret_void_cx, DebugLoc::None);
let discr_val = trans_get_discr(bcx, r, value, None);
let discr_val = trans_get_discr(bcx, r, value, None, false);
let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len());
let bcx_next = fcx.new_temp_block("enum-variant-iter-next");

View File

@ -559,7 +559,7 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
match adt::trans_switch(cx, &*repr, av) {
match adt::trans_switch(cx, &*repr, av, false) {
(_match::Single, None) => {
if n_variants != 0 {
assert!(n_variants == 1);
@ -984,23 +984,11 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
/// 64-bit MinGW) instead of "full SEH".
pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.target.options.is_like_msvc && sess.target.target.arch == "x86"
sess.target.target.options.is_like_msvc
}
pub fn avoid_invoke(bcx: Block) -> bool {
// FIXME(#25869) currently SEH-based unwinding is pretty buggy in LLVM and
// is being overhauled as this is being written. Until that
// time such that upstream LLVM's implementation is more solid
// and we start binding it we need to skip invokes for any
// target which wants SEH-based unwinding.
if bcx.sess().no_landing_pads() || wants_msvc_seh(bcx.sess()) {
true
} else if bcx.is_lpad {
// Avoid using invoke if we are already inside a landing pad.
true
} else {
false
}
bcx.sess().no_landing_pads() || bcx.lpad.borrow().is_some()
}
pub fn need_invoke(bcx: Block) -> bool {
@ -1148,10 +1136,9 @@ pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Blo
}
pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
is_lpad: bool,
llbb: BasicBlockRef)
-> Block<'blk, 'tcx> {
common::BlockS::new(llbb, is_lpad, None, fcx)
common::BlockS::new(llbb, None, fcx)
}
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
@ -1324,7 +1311,7 @@ fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte:
let volatile = C_bool(ccx, false);
b.call(llintrinsicfn,
&[llptr, llzeroval, size, align, volatile],
None);
None, None);
}
/// In general, when we create an scratch value in an alloca, the
@ -1398,7 +1385,7 @@ pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &st
// Block, which we do not have for `alloca_insert_pt`).
core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| {
let ptr = b.pointercast(p, Type::i8p(ccx));
b.call(lifetime_start, &[C_u64(ccx, size), ptr], None);
b.call(lifetime_start, &[C_u64(ccx, size), ptr], None, None);
});
memfill(&b, p, ty, adt::DTOR_DONE);
p
@ -1620,7 +1607,7 @@ pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
alloca_insert_pt: Cell::new(None),
llreturn: Cell::new(None),
needs_ret_allocas: nested_returns,
personality: Cell::new(None),
landingpad_alloca: Cell::new(None),
caller_expects_out_pointer: uses_outptr,
lllocals: RefCell::new(NodeMap()),
llupvars: RefCell::new(NodeMap()),
@ -1899,7 +1886,7 @@ pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
if !last_bcx.terminated.get() {
Br(last_bcx, llreturn, DebugLoc::None);
}
raw_block(fcx, false, llreturn)
raw_block(fcx, llreturn)
}
None => last_bcx,
};
@ -2713,11 +2700,12 @@ pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) {
(rust_main, args)
};
let result = llvm::LLVMBuildCall(bld,
start_fn,
args.as_ptr(),
args.len() as c_uint,
noname());
let result = llvm::LLVMRustBuildCall(bld,
start_fn,
args.as_ptr(),
args.len() as c_uint,
0 as *mut _,
noname());
llvm::LLVMBuildRet(bld, result);
}

View File

@ -150,7 +150,9 @@ pub fn Invoke(cx: Block,
cx.val_to_string(fn_),
args.iter().map(|a| cx.val_to_string(*a)).collect::<Vec<String>>().join(", "));
debug_loc.apply(cx.fcx);
B(cx).invoke(fn_, args, then, catch, attributes)
let lpad = cx.lpad.borrow();
let bundle = lpad.as_ref().and_then(|b| b.bundle());
B(cx).invoke(fn_, args, then, catch, bundle, attributes)
}
pub fn Unreachable(cx: Block) {
@ -914,7 +916,9 @@ pub fn Call(cx: Block,
return _UndefReturn(cx, fn_);
}
debug_loc.apply(cx.fcx);
B(cx).call(fn_, args, attributes)
let lpad = cx.lpad.borrow();
let bundle = lpad.as_ref().and_then(|b| b.bundle());
B(cx).call(fn_, args, bundle, attributes)
}
pub fn CallWithConv(cx: Block,
@ -928,7 +932,9 @@ pub fn CallWithConv(cx: Block,
return _UndefReturn(cx, fn_);
}
debug_loc.apply(cx.fcx);
B(cx).call_with_conv(fn_, args, conv, attributes)
let lpad = cx.lpad.borrow();
let bundle = lpad.as_ref().and_then(|b| b.bundle());
B(cx).call_with_conv(fn_, args, conv, bundle, attributes)
}
pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) {
@ -1050,6 +1056,10 @@ pub fn SetCleanup(cx: Block, landing_pad: ValueRef) {
B(cx).set_cleanup(landing_pad)
}
pub fn SetPersonalityFn(cx: Block, f: ValueRef) {
B(cx).set_personality_fn(f)
}
pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef {
check_not_terminated(cx);
terminate(cx, "Resume");
@ -1068,3 +1078,46 @@ pub fn AtomicRMW(cx: Block, op: AtomicBinOp,
order: AtomicOrdering) -> ValueRef {
B(cx).atomic_rmw(op, dst, src, order)
}
pub fn CleanupPad(cx: Block,
parent: Option<ValueRef>,
args: &[ValueRef]) -> ValueRef {
check_not_terminated(cx);
assert!(!cx.unreachable.get());
B(cx).cleanup_pad(parent, args)
}
pub fn CleanupRet(cx: Block,
cleanup: ValueRef,
unwind: Option<BasicBlockRef>) -> ValueRef {
check_not_terminated(cx);
terminate(cx, "CleanupRet");
B(cx).cleanup_ret(cleanup, unwind)
}
pub fn CatchPad(cx: Block,
parent: ValueRef,
args: &[ValueRef]) -> ValueRef {
check_not_terminated(cx);
assert!(!cx.unreachable.get());
B(cx).catch_pad(parent, args)
}
pub fn CatchRet(cx: Block, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef {
check_not_terminated(cx);
terminate(cx, "CatchRet");
B(cx).catch_ret(pad, unwind)
}
pub fn CatchSwitch(cx: Block,
parent: Option<ValueRef>,
unwind: Option<BasicBlockRef>,
num_handlers: usize) -> ValueRef {
check_not_terminated(cx);
terminate(cx, "CatchSwitch");
B(cx).catch_switch(parent, unwind, num_handlers)
}
pub fn AddHandler(cx: Block, catch_switch: ValueRef, handler: BasicBlockRef) {
B(cx).add_handler(catch_switch, handler)
}

View File

@ -12,7 +12,7 @@
use llvm;
use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder};
use llvm::{Opcode, IntPredicate, RealPredicate, False};
use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
use trans::base;
use trans::common::*;
@ -158,6 +158,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
args: &[ValueRef],
then: BasicBlockRef,
catch: BasicBlockRef,
bundle: Option<&OperandBundleDef>,
attributes: Option<AttrBuilder>)
-> ValueRef {
self.count_insn("invoke");
@ -169,17 +170,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.collect::<Vec<String>>()
.join(", "));
let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
unsafe {
let v = llvm::LLVMBuildInvoke(self.llbuilder,
llfn,
args.as_ptr(),
args.len() as c_uint,
then,
catch,
noname());
match attributes {
Some(a) => a.apply_callsite(v),
None => {}
let v = llvm::LLVMRustBuildInvoke(self.llbuilder,
llfn,
args.as_ptr(),
args.len() as c_uint,
then,
catch,
bundle,
noname());
if let Some(a) = attributes {
a.apply_callsite(v);
}
v
}
@ -771,7 +774,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
comment_text.as_ptr(), noname(), False,
False)
};
self.call(asm, &[], None);
self.call(asm, &[], None, None);
}
}
@ -796,11 +799,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
unsafe {
let v = llvm::LLVMInlineAsm(
fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint);
self.call(v, inputs, None)
self.call(v, inputs, None, None)
}
}
pub fn call(&self, llfn: ValueRef, args: &[ValueRef],
bundle: Option<&OperandBundleDef>,
attributes: Option<AttrBuilder>) -> ValueRef {
self.count_insn("call");
@ -837,21 +841,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
unsafe {
let v = llvm::LLVMBuildCall(self.llbuilder, llfn, args.as_ptr(),
args.len() as c_uint, noname());
match attributes {
Some(a) => a.apply_callsite(v),
None => {}
let v = llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(),
args.len() as c_uint, bundle,
noname());
if let Some(a) = attributes {
a.apply_callsite(v);
}
v
}
}
pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef],
conv: CallConv, attributes: Option<AttrBuilder>) -> ValueRef {
conv: CallConv,
bundle: Option<&OperandBundleDef>,
attributes: Option<AttrBuilder>) -> ValueRef {
self.count_insn("callwithconv");
let v = self.call(llfn, args, attributes);
let v = self.call(llfn, args, bundle, attributes);
llvm::SetInstructionCallConv(v, conv);
v
}
@ -948,8 +956,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
assert!((t as isize != 0));
let args: &[ValueRef] = &[];
self.count_insn("trap");
llvm::LLVMBuildCall(
self.llbuilder, t, args.as_ptr(), args.len() as c_uint, noname());
llvm::LLVMRustBuildCall(self.llbuilder, t,
args.as_ptr(), args.len() as c_uint,
0 as *mut _,
noname());
}
}
@ -983,6 +993,86 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn cleanup_pad(&self,
parent: Option<ValueRef>,
args: &[ValueRef]) -> ValueRef {
self.count_insn("cleanuppad");
let parent = parent.unwrap_or(0 as *mut _);
let name = CString::new("cleanuppad").unwrap();
let ret = unsafe {
llvm::LLVMRustBuildCleanupPad(self.llbuilder,
parent,
args.len() as c_uint,
args.as_ptr(),
name.as_ptr())
};
assert!(!ret.is_null(), "LLVM does not have support for cleanuppad");
return ret
}
pub fn cleanup_ret(&self, cleanup: ValueRef,
unwind: Option<BasicBlockRef>) -> ValueRef {
self.count_insn("cleanupret");
let unwind = unwind.unwrap_or(0 as *mut _);
let ret = unsafe {
llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
};
assert!(!ret.is_null(), "LLVM does not have support for cleanupret");
return ret
}
pub fn catch_pad(&self,
parent: ValueRef,
args: &[ValueRef]) -> ValueRef {
self.count_insn("catchpad");
let name = CString::new("catchpad").unwrap();
let ret = unsafe {
llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
args.len() as c_uint, args.as_ptr(),
name.as_ptr())
};
assert!(!ret.is_null(), "LLVM does not have support for catchpad");
return ret
}
pub fn catch_ret(&self, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef {
self.count_insn("catchret");
let ret = unsafe {
llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
};
assert!(!ret.is_null(), "LLVM does not have support for catchret");
return ret
}
pub fn catch_switch(&self,
parent: Option<ValueRef>,
unwind: Option<BasicBlockRef>,
num_handlers: usize) -> ValueRef {
self.count_insn("catchswitch");
let parent = parent.unwrap_or(0 as *mut _);
let unwind = unwind.unwrap_or(0 as *mut _);
let name = CString::new("catchswitch").unwrap();
let ret = unsafe {
llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
num_handlers as c_uint,
name.as_ptr())
};
assert!(!ret.is_null(), "LLVM does not have support for catchswitch");
return ret
}
pub fn add_handler(&self, catch_switch: ValueRef, handler: BasicBlockRef) {
unsafe {
llvm::LLVMRustAddHandler(catch_switch, handler);
}
}
pub fn set_personality_fn(&self, personality: ValueRef) {
unsafe {
llvm::LLVMRustSetPersonalityFn(self.llbuilder, personality);
}
}
// Atomic Operations
pub fn atomic_cmpxchg(&self, dst: ValueRef,
cmp: ValueRef, src: ValueRef,

View File

@ -123,7 +123,7 @@ use llvm::{BasicBlockRef, ValueRef};
use trans::base;
use trans::build;
use trans::common;
use trans::common::{Block, FunctionContext, NodeIdAndSpan};
use trans::common::{Block, FunctionContext, NodeIdAndSpan, LandingPad};
use trans::datum::{Datum, Lvalue};
use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::glue;
@ -185,11 +185,17 @@ impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum EarlyExitLabel {
UnwindExit,
UnwindExit(UnwindKind),
ReturnExit,
LoopExit(ast::NodeId, usize)
}
#[derive(Copy, Clone, Debug)]
pub enum UnwindKind {
LandingPad,
CleanupPad(ValueRef),
}
#[derive(Copy, Clone)]
pub struct CachedEarlyExit {
label: EarlyExitLabel,
@ -372,16 +378,17 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
self.ccx.sess().bug("no loop scope found");
}
/// Returns a block to branch to which will perform all pending cleanups and then
/// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
/// Returns a block to branch to which will perform all pending cleanups and
/// then break/continue (depending on `exit`) out of the loop with id
/// `cleanup_scope`
fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: usize) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
}
/// Returns a block to branch to which will perform all pending cleanups and then return from
/// this function
/// Returns a block to branch to which will perform all pending cleanups and
/// then return from this function
fn return_exit_block(&'blk self) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(ReturnExit)
}
@ -400,7 +407,8 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of
/// `ty`
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
@ -585,8 +593,9 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
/// Returns a basic block to branch to in the event of a panic. This block will run the panic
/// cleanups and eventually invoke the LLVM `Resume` instruction.
/// Returns a basic block to branch to in the event of a panic. This block
/// will run the panic cleanups and eventually resume the exception that
/// caused the landing pad to be run.
fn get_landing_pad(&'blk self) -> BasicBlockRef {
let _icx = base::push_ctxt("get_landing_pad");
@ -682,9 +691,10 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
f(self.scopes.borrow().last().unwrap())
}
/// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
/// unwind. This function will generate all cleanups between the top of the stack and the exit
/// `label` and return a basic block that the caller can branch to.
/// Used when the caller wishes to jump to an early exit, such as a return,
/// break, continue, or unwind. This function will generate all cleanups
/// between the top of the stack and the exit `label` and return a basic
/// block that the caller can branch to.
///
/// For example, if the current stack of cleanups were as follows:
///
@ -695,15 +705,15 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
/// Custom 2
/// AST 24
///
/// and the `label` specifies a break from `Loop 23`, then this function would generate a
/// series of basic blocks as follows:
/// and the `label` specifies a break from `Loop 23`, then this function
/// would generate a series of basic blocks as follows:
///
/// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
///
/// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
/// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
/// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
/// `break_blk`.
/// where `break_blk` is the block specified in `Loop 23` as the target for
/// breaks. The return value would be the first basic block in that sequence
/// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)`
/// and it will perform all cleanups and finally branch to the `break_blk`.
fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef {
@ -725,21 +735,30 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
loop {
if self.scopes_len() == 0 {
match label {
UnwindExit => {
// Generate a block that will `Resume`.
let prev_bcx = self.new_block(true, "resume", None);
let personality = self.personality.get().expect(
"create_landing_pad() should have set this");
let lp = build::Load(prev_bcx, personality);
base::call_lifetime_end(prev_bcx, personality);
base::trans_unwind_resume(prev_bcx, lp);
prev_llbb = prev_bcx.llbb;
UnwindExit(val) => {
// Generate a block that will resume unwinding to the
// calling function
let bcx = self.new_block("resume", None);
match val {
UnwindKind::LandingPad => {
let addr = self.landingpad_alloca.get()
.unwrap();
let lp = build::Load(bcx, addr);
base::call_lifetime_end(bcx, addr);
base::trans_unwind_resume(bcx, lp);
}
UnwindKind::CleanupPad(_) => {
let pad = build::CleanupPad(bcx, None, &[]);
build::CleanupRet(bcx, pad, None);
}
}
prev_llbb = bcx.llbb;
break;
}
ReturnExit => {
prev_llbb = self.get_llreturn();
break;
break
}
LoopExit(id, _) => {
@ -754,12 +773,9 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
// scope for this label. If so, we can stop popping scopes
// and branch to the cached label, since it contains the
// cleanups for any subsequent scopes.
match self.top_scope(|s| s.cached_early_exit(label)) {
Some(cleanup_block) => {
prev_llbb = cleanup_block;
break;
}
None => { }
if let Some(exit) = self.top_scope(|s| s.cached_early_exit(label)) {
prev_llbb = exit;
break;
}
// Pop off the scope, since we will be generating
@ -769,15 +785,11 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
popped_scopes.push(self.pop_scope());
let scope = popped_scopes.last().unwrap();
match label {
UnwindExit | ReturnExit => { }
UnwindExit(..) | ReturnExit => { }
LoopExit(id, exit) => {
match scope.kind.early_exit_block(id, exit) {
Some(exitllbb) => {
prev_llbb = exitllbb;
break;
}
None => { }
if let Some(exit) = scope.kind.early_exit_block(id, exit) {
prev_llbb = exit;
break
}
}
}
@ -810,18 +822,17 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
if !scope.cleanups.is_empty() {
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
let bcx_in = self.new_block(label.is_unwind(),
&name[..],
None);
let bcx_in = self.new_block(&name[..], None);
let exit_label = label.start(bcx_in);
let mut bcx_out = bcx_in;
for cleanup in scope.cleanups.iter().rev() {
bcx_out = cleanup.trans(bcx_out,
scope.debug_loc);
bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
}
build::Br(bcx_out, prev_llbb, DebugLoc::None);
exit_label.branch(bcx_out, prev_llbb);
prev_llbb = bcx_in.llbb;
scope.add_cached_early_exit(label, prev_llbb);
scope.add_cached_early_exit(exit_label, prev_llbb);
}
self.push_scope(scope);
}
@ -832,14 +843,14 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
prev_llbb
}
/// Creates a landing pad for the top scope, if one does not exist. The landing pad will
/// perform all cleanups necessary for an unwind and then `resume` to continue error
/// propagation:
/// Creates a landing pad for the top scope, if one does not exist. The
/// landing pad will perform all cleanups necessary for an unwind and then
/// `resume` to continue error propagation:
///
/// landing_pad -> ... cleanups ... -> [resume]
///
/// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
/// in this function itself.)
/// (The cleanups and resume instruction are created by
/// `trans_cleanups_to_exit_scope()`, not in this function itself.)
fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
let pad_bcx;
@ -850,47 +861,58 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
let mut scopes = self.scopes.borrow_mut();
let last_scope = scopes.last_mut().unwrap();
match last_scope.cached_landing_pad {
Some(llbb) => { return llbb; }
Some(llbb) => return llbb,
None => {
let name = last_scope.block_name("unwind");
pad_bcx = self.new_block(true, &name[..], None);
pad_bcx = self.new_block(&name[..], None);
last_scope.cached_landing_pad = Some(pad_bcx.llbb);
}
}
}
// The landing pad return type (the type being propagated). Not sure what
// this represents but it's determined by the personality function and
// this is what the EH proposal example uses.
let llretty = Type::struct_(self.ccx,
&[Type::i8p(self.ccx), Type::i32(self.ccx)],
false);
};
let llpersonality = pad_bcx.fcx.eh_personality();
// The only landing pad clause will be 'cleanup'
let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
let val = if base::wants_msvc_seh(self.ccx.sess()) {
// A cleanup pad requires a personality function to be specified, so
// we do that here explicitly (happens implicitly below through
// creation of the landingpad instruction). We then create a
// cleanuppad instruction which has no filters to run cleanup on all
// exceptions.
build::SetPersonalityFn(pad_bcx, llpersonality);
let llretval = build::CleanupPad(pad_bcx, None, &[]);
UnwindKind::CleanupPad(llretval)
} else {
// The landing pad return type (the type being propagated). Not sure
// what this represents but it's determined by the personality
// function and this is what the EH proposal example uses.
let llretty = Type::struct_(self.ccx,
&[Type::i8p(self.ccx), Type::i32(self.ccx)],
false);
// The landing pad block is a cleanup
build::SetCleanup(pad_bcx, llretval);
// The only landing pad clause will be 'cleanup'
let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
// We store the retval in a function-central alloca, so that calls to
// Resume can find it.
match self.personality.get() {
Some(addr) => {
build::Store(pad_bcx, llretval, addr);
}
None => {
let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
base::call_lifetime_start(pad_bcx, addr);
self.personality.set(Some(addr));
build::Store(pad_bcx, llretval, addr);
}
}
// The landing pad block is a cleanup
build::SetCleanup(pad_bcx, llretval);
let addr = match self.landingpad_alloca.get() {
Some(addr) => addr,
None => {
let addr = base::alloca(pad_bcx, common::val_ty(llretval),
"");
base::call_lifetime_start(pad_bcx, addr);
self.landingpad_alloca.set(Some(addr));
addr
}
};
build::Store(pad_bcx, llretval, addr);
UnwindKind::LandingPad
};
// Generate the cleanup block and branch to it.
let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
build::Br(pad_bcx, cleanup_llbb, DebugLoc::None);
let label = UnwindExit(val);
let cleanup_llbb = self.trans_cleanups_to_exit_scope(label);
label.branch(pad_bcx, cleanup_llbb);
return pad_bcx.llbb;
}
@ -992,10 +1014,53 @@ impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
}
impl EarlyExitLabel {
fn is_unwind(&self) -> bool {
/// Generates a branch going from `from_bcx` to `to_llbb` where `self` is
/// the exit label attached to the start of `from_bcx`.
///
/// Transitions from an exit label to other exit labels depend on the type
/// of label. For example with MSVC exceptions unwind exit labels will use
/// the `cleanupret` instruction instead of the `br` instruction.
fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) {
if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self {
build::CleanupRet(from_bcx, pad, Some(to_llbb));
} else {
build::Br(from_bcx, to_llbb, DebugLoc::None);
}
}
/// Generates the necessary instructions at the start of `bcx` to prepare
/// for the same kind of early exit label that `self` is.
///
/// This function will appropriately configure `bcx` based on the kind of
/// label this is. For UnwindExit labels, the `lpad` field of the block will
/// be set to `Some`, and for MSVC exceptions this function will generate a
/// `cleanuppad` instruction at the start of the block so it may be jumped
/// to in the future (e.g. so this block can be cached as an early exit).
///
/// Returns a new label which will can be used to cache `bcx` in the list of
/// early exits.
fn start(&self, bcx: Block) -> EarlyExitLabel {
match *self {
UnwindExit => true,
_ => false
UnwindExit(UnwindKind::CleanupPad(..)) => {
let pad = build::CleanupPad(bcx, None, &[]);
*bcx.lpad.borrow_mut() = Some(LandingPad::msvc(pad));
UnwindExit(UnwindKind::CleanupPad(pad))
}
UnwindExit(UnwindKind::LandingPad) => {
*bcx.lpad.borrow_mut() = Some(LandingPad::gnu());
*self
}
label => label,
}
}
}
impl PartialEq for UnwindKind {
fn eq(&self, val: &UnwindKind) -> bool {
match (*self, *val) {
(UnwindKind::LandingPad, UnwindKind::LandingPad) |
(UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true,
_ => false,
}
}
}

View File

@ -17,7 +17,7 @@ pub use self::ExprOrMethodCall::*;
use session::Session;
use llvm;
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
use llvm::{True, False, Bool};
use llvm::{True, False, Bool, OperandBundleDef};
use middle::cfg;
use middle::def::Def;
use middle::def_id::DefId;
@ -326,9 +326,13 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// we use a separate alloca for each return
pub needs_ret_allocas: bool,
// The a value alloca'd for calls to upcalls.rust_personality. Used when
// outputting the resume instruction.
pub personality: Cell<Option<ValueRef>>,
// When working with landingpad-based exceptions this value is alloca'd and
// later loaded when using the resume instruction. This ends up being
// critical to chaining landing pads and resuing already-translated
// cleanups.
//
// Note that for cleanuppad-based exceptions this is not used.
pub landingpad_alloca: Cell<Option<ValueRef>>,
// True if the caller expects this fn to use the out pointer to
// return. Either way, your code should write into the slot llretslotptr
@ -424,7 +428,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
}
pub fn new_block(&'a self,
is_lpad: bool,
name: &str,
opt_node_id: Option<ast::NodeId>)
-> Block<'a, 'tcx> {
@ -433,7 +436,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
name.as_ptr());
BlockS::new(llbb, is_lpad, opt_node_id, self)
BlockS::new(llbb, opt_node_id, self)
}
}
@ -441,13 +444,13 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
name: &str,
node_id: ast::NodeId)
-> Block<'a, 'tcx> {
self.new_block(false, name, Some(node_id))
self.new_block(name, Some(node_id))
}
pub fn new_temp_block(&'a self,
name: &str)
-> Block<'a, 'tcx> {
self.new_block(false, name, None)
self.new_block(name, None)
}
pub fn join_blocks(&'a self,
@ -577,8 +580,9 @@ pub struct BlockS<'blk, 'tcx: 'blk> {
pub terminated: Cell<bool>,
pub unreachable: Cell<bool>,
// Is this block part of a landing pad?
pub is_lpad: bool,
// If this block part of a landing pad, then this is `Some` indicating what
// kind of landing pad its in, otherwise this is none.
pub lpad: RefCell<Option<LandingPad>>,
// AST node-id associated with this block, if any. Used for
// debugging purposes only.
@ -593,7 +597,6 @@ pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
pub fn new(llbb: BasicBlockRef,
is_lpad: bool,
opt_node_id: Option<ast::NodeId>,
fcx: &'blk FunctionContext<'blk, 'tcx>)
-> Block<'blk, 'tcx> {
@ -601,7 +604,7 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
llbb: llbb,
terminated: Cell::new(false),
unreachable: Cell::new(false),
is_lpad: is_lpad,
lpad: RefCell::new(None),
opt_node_id: opt_node_id,
fcx: fcx
})
@ -658,6 +661,53 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
}
}
/// A structure representing an active landing pad for the duration of a basic
/// block.
///
/// Each `Block` may contain an instance of this, indicating whether the block
/// is part of a landing pad or not. This is used to make decision about whether
/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to
/// use `invoke`) and also about various function call metadata.
///
/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
/// just a bunch of `None` instances (not too interesting), but for MSVC
/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
/// When inside of a landing pad, each function call in LLVM IR needs to be
/// annotated with which landing pad it's a part of. This is accomplished via
/// the `OperandBundleDef` value created for MSVC landing pads.
pub struct LandingPad {
cleanuppad: Option<ValueRef>,
operand: Option<OperandBundleDef>,
}
impl LandingPad {
pub fn gnu() -> LandingPad {
LandingPad { cleanuppad: None, operand: None }
}
pub fn msvc(cleanuppad: ValueRef) -> LandingPad {
LandingPad {
cleanuppad: Some(cleanuppad),
operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])),
}
}
pub fn bundle(&self) -> Option<&OperandBundleDef> {
self.operand.as_ref()
}
}
impl Clone for LandingPad {
fn clone(&self) -> LandingPad {
LandingPad {
cleanuppad: self.cleanuppad,
operand: self.cleanuppad.map(|p| {
OperandBundleDef::new("funclet", &[p])
}),
}
}
}
pub struct Result<'blk, 'tcx: 'blk> {
pub bcx: Block<'blk, 'tcx>,
pub val: ValueRef

View File

@ -872,6 +872,16 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
return Some(f);
}
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(ccx, $name,
Type::variadic_func(&[], &$ret),
ccx.tcx().mk_nil());
llvm::SetUnnamedAddr(f, false);
ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret),
@ -880,7 +890,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
);
}
macro_rules! mk_struct {
($($field_ty:expr),*) => (Type::struct_(ccx, &[$($field_ty),*], false))
@ -908,6 +918,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);
ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
@ -1008,6 +1019,9 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void);
ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
// Some intrinsics were introduced in later versions of LLVM, but they have
// fallbacks in libc or libm and such.

View File

@ -2097,7 +2097,8 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let datum = unpack_datum!(
bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
let llexpr_ptr = datum.to_llref();
let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx)));
let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr,
Some(Type::i64(ccx)), true);
ll_t_in = val_ty(discr);
(discr, adt::is_discr_signed(&*repr))
} else {

View File

@ -844,7 +844,8 @@ pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
debug!("calling llrustfn = {}, t = {:?}",
ccx.tn().val_to_string(llrustfn), t);
let attributes = attributes::from_fn_type(ccx, t);
let llrust_ret_val = builder.call(llrustfn, &llrust_args, Some(attributes));
let llrust_ret_val = builder.call(llrustfn, &llrust_args,
None, Some(attributes));
// Get the return value where the foreign fn expects it.
let llforeign_ret_ty = match tys.fn_ty.ret_ty.cast {

View File

@ -316,11 +316,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
// For `try` we need some custom control flow
if &name[..] == "try" {
if let callee::ArgExprs(ref exprs) = args {
let (func, data) = if exprs.len() != 2 {
ccx.sess().bug("expected two exprs as arguments for \
let (func, data, local_ptr) = if exprs.len() != 3 {
ccx.sess().bug("expected three exprs as arguments for \
`try` intrinsic");
} else {
(&exprs[0], &exprs[1])
(&exprs[0], &exprs[1], &exprs[2])
};
// translate arguments
@ -328,6 +328,9 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
let data = unpack_datum!(bcx, expr::trans(bcx, data));
let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
let local_ptr = unpack_datum!(bcx, expr::trans(bcx, local_ptr));
let local_ptr = local_ptr.to_rvalue_datum(bcx, "local_ptr");
let local_ptr = unpack_datum!(bcx, local_ptr);
let dest = match dest {
expr::SaveIn(d) => d,
@ -336,7 +339,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
};
// do the invoke
bcx = try_intrinsic(bcx, func.val, data.val, dest,
bcx = try_intrinsic(bcx, func.val, data.val, local_ptr.val, dest,
call_debug_location);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
@ -655,7 +658,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
match val_ty.sty {
ty::TyEnum(..) => {
let repr = adt::represent_type(ccx, *val_ty);
adt::trans_get_discr(bcx, &*repr, llargs[0], Some(llret_ty))
adt::trans_get_discr(bcx, &*repr, llargs[0],
Some(llret_ty), true)
}
_ => C_null(llret_ty)
}
@ -1044,6 +1048,7 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> {
if bcx.sess().no_landing_pads() {
@ -1051,142 +1056,115 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
bcx
} else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, func, data, dest, dloc)
trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
} else {
trans_gnu_try(bcx, func, data, dest, dloc)
trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
}
}
// MSVC's definition of the `rust_try` function. The exact implementation here
// is a little different than the GNU (standard) version below, not only because
// of the personality function but also because of the other fiddly bits about
// SEH. LLVM also currently requires us to structure this in a very particular
// way as explained below.
// MSVC's definition of the `rust_try` function.
//
// Like with the GNU version we generate a shim wrapper
// This implementation uses the new exception handling instructions in LLVM
// which have support in LLVM for SEH on MSVC targets. Although these
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> {
let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
let ccx = bcx.ccx();
let dloc = DebugLoc::None;
let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try",
try_fn_ty);
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
output, ccx.tcx().mk_substs(Substs::trans_empty()),
None, &block_arena);
let bcx = init_function(&fcx, true, output);
let then = fcx.new_temp_block("then");
let catch = fcx.new_temp_block("catch");
let catch_return = fcx.new_temp_block("catch-return");
let catch_resume = fcx.new_temp_block("catch-resume");
let personality = fcx.eh_personality();
let eh_typeid_for = ccx.get_intrinsic(&"llvm.eh.typeid.for");
let rust_try_filter = match bcx.tcx().lang_items.msvc_try_filter() {
Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
bcx.fcx.param_substs).val,
None => bcx.sess().bug("msvc_try_filter not defined"),
};
SetPersonalityFn(bcx, bcx.fcx.eh_personality());
// Type indicator for the exception being thrown, not entirely sure
// what's going on here but it's what all the examples in LLVM use.
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
false);
let normal = bcx.fcx.new_temp_block("normal");
let catchswitch = bcx.fcx.new_temp_block("catchswitch");
let catchpad = bcx.fcx.new_temp_block("catchpad");
let caught = bcx.fcx.new_temp_block("caught");
llvm::SetFunctionAttribute(rust_try, llvm::Attribute::NoInline);
llvm::SetFunctionAttribute(rust_try, llvm::Attribute::OptimizeNone);
let func = llvm::get_param(rust_try, 0);
let data = llvm::get_param(rust_try, 1);
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
// Invoke the function, specifying our two temporary landing pads as the
// ext point. After the invoke we've terminated our basic block.
Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
// All the magic happens in this landing pad, and this is basically the
// only landing pad in rust tagged with "catch" to indicate that we're
// catching an exception. The other catch handlers in the GNU version
// below just catch *all* exceptions, but that's because most exceptions
// are already filtered out by the gnu personality function.
// We're generating an IR snippet that looks like:
//
// For MSVC we're just using a standard personality function that we
// can't customize (e.g. _except_handler3 or __C_specific_handler), so
// we need to do the exception filtering ourselves. This is currently
// performed by the `__rust_try_filter` function. This function,
// specified in the landingpad instruction, will be invoked by Windows
// SEH routines and will return whether the exception in question can be
// caught (aka the Rust runtime is the one that threw the exception).
// declare i32 @rust_try(%func, %data, %ptr) {
// %slot = alloca i8*
// call @llvm.localescape(%slot)
// store %ptr, %slot
// invoke %func(%data) to label %normal unwind label %catchswitch
//
// To get this to compile (currently LLVM segfaults if it's not in this
// particular structure), when the landingpad is executing we test to
// make sure that the ID of the exception being thrown is indeed the one
// that we were expecting. If it's not, we resume the exception, and
// otherwise we return the pointer that we got Full disclosure: It's not
// clear to me what this `llvm.eh.typeid` stuff is doing *other* then
// just allowing LLVM to compile this file without segfaulting. I would
// expect the entire landing pad to just be:
// normal:
// ret i32 0
//
// %vals = landingpad ...
// %ehptr = extractvalue { i8*, i32 } %vals, 0
// ret i8* %ehptr
// catchswitch:
// %cs = catchswitch within none [%catchpad] unwind to caller
//
// but apparently LLVM chokes on this, so we do the more complicated
// thing to placate it.
let vals = LandingPad(catch, lpad_ty, personality, 1);
let rust_try_filter = BitCast(catch, rust_try_filter, Type::i8p(ccx));
AddClause(catch, vals, rust_try_filter);
let ehptr = ExtractValue(catch, vals, 0);
let sel = ExtractValue(catch, vals, 1);
let filter_sel = Call(catch, eh_typeid_for, &[rust_try_filter], None,
dloc);
let is_filter = ICmp(catch, llvm::IntEQ, sel, filter_sel, dloc);
CondBr(catch, is_filter, catch_return.llbb, catch_resume.llbb, dloc);
// catchpad:
// %tok = catchpad within %cs [%rust_try_filter]
// catchret from %tok to label %caught
//
// caught:
// ret i32 1
// }
//
// This structure follows the basic usage of the instructions in LLVM
// (see their documentation/test cases for examples), but a
// perhaps-surprising part here is the usage of the `localescape`
// intrinsic. This is used to allow the filter function (also generated
// here) to access variables on the stack of this intrinsic. This
// ability enables us to transfer information about the exception being
// thrown to this point, where we're catching the exception.
//
// More information can be found in libstd's seh.rs implementation.
let slot = Alloca(bcx, Type::i8p(ccx), "slot");
let localescape = ccx.get_intrinsic(&"llvm.localescape");
Call(bcx, localescape, &[slot], None, dloc);
Store(bcx, local_ptr, slot);
Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, None, dloc);
// Our "catch-return" basic block is where we've determined that we
// actually need to catch this exception, in which case we just return
// the exception pointer.
Ret(catch_return, ehptr, dloc);
Ret(normal, C_i32(ccx, 0), dloc);
// The "catch-resume" block is where we're running this landing pad but
// we actually need to not catch the exception, so just resume the
// exception to return.
trans_unwind_resume(catch_resume, vals);
let cs = CatchSwitch(catchswitch, None, None, 1);
AddHandler(catchswitch, cs, catchpad.llbb);
// On the successful branch we just return null.
Ret(then, C_null(Type::i8p(ccx)), dloc);
let filter = generate_filter_fn(bcx.fcx, bcx.fcx.llfn);
let filter = BitCast(catchpad, filter, Type::i8p(ccx));
let tok = CatchPad(catchpad, cs, &[filter]);
CatchRet(catchpad, tok, caught.llbb);
return rust_try
Ret(caught, C_i32(ccx, 1), dloc);
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data], None, dloc);
let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc);
Store(bcx, ret, dest);
return bcx;
return bcx
}
// Definition of the standard "try" function for Rust using the GNU-like model
// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
// instructions).
//
// This translation is a little surprising because
// we always call a shim function instead of inlining the call to `invoke`
// manually here. This is done because in LLVM we're only allowed to have one
// personality per function definition. The call to the `try` intrinsic is
// being inlined into the function calling it, and that function may already
// have other personality functions in play. By calling a shim we're
// guaranteed that our shim will have the right personality function.
//
// This translation is a little surprising because we always call a shim
// function instead of inlining the call to `invoke` manually here. This is done
// because in LLVM we're only allowed to have one personality per function
// definition. The call to the `try` intrinsic is being inlined into the
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> {
let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
let ccx = bcx.ccx();
let dloc = DebugLoc::None;
@ -1196,60 +1174,82 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// invoke %func(%args...) normal %normal unwind %catch
//
// normal:
// ret null
// ret 0
//
// catch:
// (ptr, _) = landingpad
// ret ptr
// store ptr, %local_ptr
// ret 1
//
// Note that the `local_ptr` data passed into the `try` intrinsic is
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try", try_fn_ty);
attributes::emit_uwtable(rust_try, true);
attributes::emit_uwtable(bcx.fcx.llfn, true);
let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() {
Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
bcx.fcx.param_substs).val,
None => bcx.tcx().sess.bug("eh_personality_catch not defined"),
};
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
output, ccx.tcx().mk_substs(Substs::trans_empty()),
None, &block_arena);
let bcx = init_function(&fcx, true, output);
let then = bcx.fcx.new_temp_block("then");
let catch = bcx.fcx.new_temp_block("catch");
let func = llvm::get_param(rust_try, 0);
let data = llvm::get_param(rust_try, 1);
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
Ret(then, C_null(Type::i8p(ccx)), dloc);
Ret(then, C_i32(ccx, 0), dloc);
// Type indicator for the exception being thrown.
// The first value in this tuple is a pointer to the exception object being thrown.
// The second value is a "selector" indicating which of the landing pad clauses
// the exception's type had been matched to. rust_try ignores the selector.
//
// The first value in this tuple is a pointer to the exception object
// being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
false);
let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
AddClause(catch, vals, C_null(Type::i8p(ccx)));
let ptr = ExtractValue(catch, vals, 0);
Ret(catch, ptr, dloc);
fcx.cleanup();
return rust_try
Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
Ret(catch, C_i32(ccx, 1), dloc);
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data], None, dloc);
let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc);
Store(bcx, ret, dest);
return bcx;
}
// Helper to generate the `Ty` associated with `rust_try`
// Helper function to give a Block to a closure to translate a shim function.
// This is currently primarily used for the `try` intrinsic functions above.
fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name: &str,
ty: Ty<'tcx>,
output: ty::FnOutput<'tcx>,
trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
let llfn = declare::define_internal_rust_fn(ccx, name, ty);
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
output, ccx.tcx().mk_substs(Substs::trans_empty()),
None, &block_arena);
let bcx = init_function(&fcx, true, output);
trans(bcx);
fcx.cleanup();
return llfn
}
// Helper function used to get a handle to the `__rust_try` function used to
// catch exceptions.
//
// This function is only generated once and is then cached.
fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
f: &mut FnMut(Ty<'tcx>,
ty::FnOutput<'tcx>) -> ValueRef)
trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
if let Some(llfn) = *ccx.rust_try_fn().borrow() {
@ -1269,21 +1269,125 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
}),
});
let fn_ty = tcx.mk_fn(None, fn_ty);
let output = ty::FnOutput::FnConverging(i8p);
let output = ty::FnOutput::FnConverging(tcx.types.i32);
let try_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![fn_ty, i8p],
inputs: vec![fn_ty, i8p, i8p],
output: output,
variadic: false,
}),
});
let rust_try = f(tcx.mk_fn(None, try_fn_ty), output);
let rust_try = gen_fn(fcx, "__rust_try", tcx.mk_fn(None, try_fn_ty), output,
trans);
*ccx.rust_try_fn().borrow_mut() = Some(rust_try);
return rust_try
}
// For MSVC-style exceptions (SEH), the compiler generates a filter function
// which is used to determine whether an exception is being caught (e.g. if it's
// a Rust exception or some other).
//
// This function is used to generate said filter function. The shim generated
// here is actually just a thin wrapper to call the real implementation in the
// standard library itself. For reasons as to why, see seh.rs in the standard
// library.
fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
rust_try_fn: ValueRef)
-> ValueRef {
let ccx = fcx.ccx;
let tcx = ccx.tcx();
let dloc = DebugLoc::None;
let rust_try_filter = match ccx.tcx().lang_items.msvc_try_filter() {
Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
fcx.param_substs).val,
None => ccx.sess().bug("msvc_try_filter not defined"),
};
let output = ty::FnOutput::FnConverging(tcx.types.i32);
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
let frameaddress = ccx.get_intrinsic(&"llvm.frameaddress");
let recoverfp = ccx.get_intrinsic(&"llvm.x86.seh.recoverfp");
let localrecover = ccx.get_intrinsic(&"llvm.localrecover");
// On all platforms, once we have the EXCEPTION_POINTERS handle as well as
// the base pointer, we follow the standard layout of:
//
// block:
// %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
// %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
// %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
// ret i32 %ret
//
// The recoverfp intrinsic is used to recover the frame frame pointer of the
// `rust_try_fn` function, which is then in turn passed to the
// `localrecover` intrinsic (pairing with the `localescape` intrinsic
// mentioned above). Putting all this together means that we now have a
// handle to the arguments passed into the `try` function, allowing writing
// to the stack over there.
//
// For more info, see seh.rs in the standard library.
let do_trans = |bcx: Block, ehptrs, base_pointer| {
let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer],
None, dloc);
let arg = Call(bcx, localrecover,
&[rust_try_fn, parentfp, C_i32(ccx, 0)], None, dloc);
let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], None, dloc);
Ret(bcx, ret, dloc);
};
if ccx.tcx().sess.target.target.arch == "x86" {
// On x86 the filter function doesn't actually receive any arguments.
// Instead the %ebp register contains some contextual information.
//
// Unfortunately I don't know of any great documentation as to what's
// going on here, all I can say is that there's a few tests cases in
// LLVM's test suite which follow this pattern of instructions, so we
// just do the same.
let filter_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![],
output: output,
variadic: false,
}),
});
let filter_fn_ty = tcx.mk_fn(None, filter_fn_ty);
gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], None, dloc);
let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
do_trans(bcx, exn, ebp);
})
} else if ccx.tcx().sess.target.target.arch == "x86_64" {
// Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
// are passed in as arguments to the filter function, so we just pass
// those along.
let filter_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![i8p, i8p],
output: output,
variadic: false,
}),
});
let filter_fn_ty = tcx.mk_fn(None, filter_fn_ty);
gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
let exn = llvm::get_param(bcx.fcx.llfn, 0);
let rbp = llvm::get_param(bcx.fcx.llfn, 1);
do_trans(bcx, exn, rbp);
})
} else {
panic!("unknown target to generate a filter function")
}
}
fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
span_err!(a, b, E0511, "{}", c);
}

View File

@ -16,7 +16,7 @@ use trans::adt;
use trans::attributes;
use trans::base;
use trans::build;
use trans::common::{self, Block};
use trans::common::{self, Block, LandingPad};
use trans::debuginfo::DebugLoc;
use trans::foreign;
use trans::type_of;
@ -55,7 +55,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let discr_lvalue = self.trans_lvalue(bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let repr = adt::represent_type(bcx.ccx(), ty);
let discr = adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None);
let discr = adt::trans_get_discr(bcx, &repr, discr_lvalue.llval,
None, true);
// The else branch of the Switch can't be hit, so branch to an unreachable
// instruction so LLVM knows that
@ -161,7 +162,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let cleanup = self.bcx(targets.1);
let landingpad = self.make_landing_pad(cleanup);
let (target, postinvoke) = if must_copy_dest {
(bcx.fcx.new_block(false, "", None), Some(self.bcx(targets.0)))
(bcx.fcx.new_block("", None),
Some(self.bcx(targets.0)))
} else {
(self.bcx(targets.0), None)
};
@ -266,7 +268,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
fn make_landing_pad(&mut self, cleanup: Block<'bcx, 'tcx>) -> Block<'bcx, 'tcx> {
let bcx = cleanup.fcx.new_block(true, "cleanup", None);
let bcx = cleanup.fcx.new_block("cleanup", None);
// FIXME(#30941) this doesn't handle msvc-style exceptions
*bcx.lpad.borrow_mut() = Some(LandingPad::gnu());
let ccx = bcx.ccx();
let llpersonality = bcx.fcx.eh_personality();
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
@ -282,7 +286,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
match self.unreachable_block {
Some(b) => b,
None => {
let bl = self.fcx.new_block(false, "unreachable", None);
let bl = self.fcx.new_block("unreachable", None);
build::Unreachable(bl);
self.unreachable_block = Some(bl);
bl

View File

@ -14,7 +14,7 @@ use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use trans::base;
use trans::build;
use trans::common::{self, Block};
use trans::common::{self, Block, LandingPad};
use trans::debuginfo::DebugLoc;
use trans::expr;
use trans::type_of;
@ -114,8 +114,12 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>) {
let block_bcxs: Vec<Block<'bcx,'tcx>> =
mir_blocks.iter()
.map(|&bb|{
let is_cleanup = mir.basic_block_data(bb).is_cleanup;
fcx.new_block(is_cleanup, &format!("{:?}", bb), None)
let bcx = fcx.new_block(&format!("{:?}", bb), None);
// FIXME(#30941) this doesn't handle msvc-style exceptions
if mir.basic_block_data(bb).is_cleanup {
*bcx.lpad.borrow_mut() = Some(LandingPad::gnu())
}
bcx
})
.collect();

View File

@ -220,7 +220,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
let repr = adt::represent_type(bcx.ccx(), operand.ty);
let llval = operand.immediate();
let discr = adt::trans_get_discr(bcx, &*repr, llval, None);
let discr = adt::trans_get_discr(bcx, &*repr, llval,
None, true);
(discr, common::val_ty(discr), adt::is_discr_signed(&*repr))
} else {
(operand.immediate(), ll_t_in, operand.ty.is_signed())

View File

@ -293,7 +293,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
}),
};
let fn_ty = tcx.mk_bare_fn(fn_ty);
(0, vec![tcx.mk_fn(None, fn_ty), mut_u8], mut_u8)
(0, vec![tcx.mk_fn(None, fn_ty), mut_u8, mut_u8], tcx.types.i32)
}
ref other => {

View File

@ -41,6 +41,11 @@ pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
}
}
#[cfg(not(stage0))]
pub fn payload() -> *mut u8 {
0 as *mut u8
}
pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
let my_ep = ptr as *mut Exception;
let cause = (*my_ep).cause.take();

View File

@ -83,13 +83,13 @@ use sys_common::mutex::Mutex;
// implementations. One goes through SEH on Windows and the other goes through
// libgcc via the libunwind-like API.
// i686-pc-windows-msvc
#[cfg(all(windows, target_arch = "x86", target_env = "msvc"))]
// *-pc-windows-msvc
#[cfg(target_env = "msvc")]
#[path = "seh.rs"] #[doc(hidden)]
pub mod imp;
// x86_64-pc-windows-*
#[cfg(all(windows, target_arch = "x86_64"))]
// x86_64-pc-windows-gnu
#[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))]
#[path = "seh64_gnu.rs"] #[doc(hidden)]
pub mod imp;
@ -122,45 +122,54 @@ pub unsafe fn try<F: FnOnce()>(f: F) -> Result<(), Box<Any + Send>> {
let mut f = Some(f);
return inner_try(try_fn::<F>, &mut f as *mut _ as *mut u8);
// If an inner function were not used here, then this generic function `try`
// uses the native symbol `rust_try`, for which the code is statically
// linked into the standard library. This means that the DLL for the
// standard library must have `rust_try` as an exposed symbol that
// downstream crates can link against (because monomorphizations of `try` in
// downstream crates will have a reference to the `rust_try` symbol).
//
// On MSVC this requires the symbol `rust_try` to be tagged with
// `dllexport`, but it's easier to not have conditional `src/rt/rust_try.ll`
// files and instead just have this non-generic shim the compiler can take
// care of exposing correctly.
unsafe fn inner_try(f: fn(*mut u8), data: *mut u8)
-> Result<(), Box<Any + Send>> {
PANIC_COUNT.with(|s| {
let prev = s.get();
s.set(0);
let ep = intrinsics::try(f, data);
s.set(prev);
if ep.is_null() {
Ok(())
} else {
Err(imp::cleanup(ep))
}
})
}
fn try_fn<F: FnOnce()>(opt_closure: *mut u8) {
let opt_closure = opt_closure as *mut Option<F>;
unsafe { (*opt_closure).take().unwrap()(); }
}
}
extern {
// Rust's try-catch
// When f(...) returns normally, the return value is null.
// When f(...) throws, the return value is a pointer to the caught
// exception object.
fn rust_try(f: extern fn(*mut u8),
data: *mut u8) -> *mut u8;
}
#[cfg(not(stage0))]
unsafe fn inner_try(f: fn(*mut u8), data: *mut u8)
-> Result<(), Box<Any + Send>> {
PANIC_COUNT.with(|s| {
let prev = s.get();
s.set(0);
// The "payload" here is a platform-specific region of memory which is
// used to transmit information about the exception being thrown from
// the point-of-throw back to this location.
//
// A pointer to this data is passed to the `try` intrinsic itself,
// allowing this function, the `try` intrinsic, imp::payload(), and
// imp::cleanup() to all work in concert to transmit this information.
//
// More information about what this pointer actually is can be found in
// each implementation as well as browsing the compiler source itself.
let mut payload = imp::payload();
let r = intrinsics::try(f, data, &mut payload as *mut _ as *mut _);
s.set(prev);
if r == 0 {
Ok(())
} else {
Err(imp::cleanup(payload))
}
})
}
#[cfg(stage0)]
unsafe fn inner_try(f: fn(*mut u8), data: *mut u8)
-> Result<(), Box<Any + Send>> {
PANIC_COUNT.with(|s| {
let prev = s.get();
s.set(0);
let ep = intrinsics::try(f, data);
s.set(prev);
if ep.is_null() {
Ok(())
} else {
Err(imp::cleanup(ep))
}
})
}
/// Determines whether the current thread is unwinding because of panic.

View File

@ -8,109 +8,175 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
//! Windows SEH
//!
//! On Windows (currently only on MSVC), the default exception handling
//! mechanism is Structured Exception Handling (SEH). This is quite different
//! than Dwarf-based exception handling (e.g. what other unix platforms use) in
//! terms of compiler internals, so LLVM is required to have a good deal of
//! extra support for SEH. Currently this support is somewhat lacking, so what's
//! here is the bare bones of SEH support.
//! extra support for SEH.
//!
//! In a nutshell, what happens here is:
//!
//! 1. The `panic` function calls the standard Windows function `RaiseException`
//! with a Rust-specific code, triggering the unwinding process.
//! 2. All landing pads generated by the compiler (just "cleanup" landing pads)
//! use the personality function `__C_specific_handler`, a function in the
//! CRT, and the unwinding code in Windows will use this personality function
//! to execute all cleanup code on the stack.
//! 3. Eventually the "catch" code in `rust_try` (located in
//! src/rt/rust_try_msvc_64.ll) is executed, which will ensure that the
//! exception being caught is indeed a Rust exception, returning control back
//! into Rust.
//! 2. All landing pads generated by the compiler use the personality function
//! `__C_specific_handler` on 64-bit and `__except_handler3` on 32-bit,
//! functions in the CRT, and the unwinding code in Windows will use this
//! personality function to execute all cleanup code on the stack.
//! 3. All compiler-generated calls to `invoke` have a landing pad set as a
//! `cleanuppad` LLVM instruction, which indicates the start of the cleanup
//! routine. The personality (in step 2, defined in the CRT) is responsible
//! for running the cleanup routines.
//! 4. Eventually the "catch" code in the `try` intrinsic (generated by the
//! compiler) is executed, which will ensure that the exception being caught
//! is indeed a Rust exception, indicating that control should come back to
//! Rust. This is done via a `catchswitch` plus a `catchpad` instruction in
//! LLVM IR terms, finally returning normal control to the program with a
//! `catchret` instruction. The `try` intrinsic uses a filter function to
//! detect what kind of exception is being thrown, and this detection is
//! implemented as the msvc_try_filter language item below.
//!
//! Some specific differences from the gcc-based exception handling are:
//!
//! * Rust has no custom personality function, it is instead *always*
//! __C_specific_handler, so the filtering is done in a C++-like manner
//! instead of in the personality function itself. Note that the specific
//! syntax for this (found in the rust_try_msvc_64.ll) is taken from an LLVM
//! test case for SEH.
//! __C_specific_handler or __except_handler3, so the filtering is done in a
//! C++-like manner instead of in the personality function itself. Note that
//! the precise codegen for this was lifted from an LLVM test case for SEH
//! (this is the `__rust_try_filter` function below).
//! * We've got some data to transmit across the unwinding boundary,
//! specifically a `Box<Any + Send + 'static>`. In Dwarf-based unwinding this
//! data is part of the payload of the exception, but I have not currently
//! figured out how to do this with LLVM's bindings. Judging by some comments
//! in the LLVM test cases this may not even be possible currently with LLVM,
//! so this is just abandoned entirely. Instead the data is stored in a
//! thread-local in `panic` and retrieved during `cleanup`.
//! specifically a `Box<Any + Send + 'static>`. Like with Dwarf exceptions
//! these two pointers are stored as a payload in the exception itself. On
//! MSVC, however, there's no need for an extra allocation because the call
//! stack is preserved while filter functions are being executed. This means
//! that the pointers are passed directly to `RaiseException` which are then
//! recovered in the filter function to be written to the stack frame of the
//! `try` intrinsic.
//!
//! So given all that, the bindings here are pretty small,
//! [win64]: http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx
//! [llvm]: http://llvm.org/docs/ExceptionHandling.html#background-on-windows-exceptions
#![allow(bad_style)]
use prelude::v1::*;
use any::Any;
use ptr;
use sys_common::thread_local::StaticKey;
use sys::c;
// 0x R U S T
const RUST_PANIC: c::DWORD = 0x52555354;
static PANIC_DATA: StaticKey = StaticKey::new(None);
// A code which indicates panics that originate from Rust. Note that some of the
// upper bits are used by the system so we just set them to 0 and ignore them.
// 0x 0 R S T
const RUST_PANIC: c::DWORD = 0x00525354;
pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
// See module docs above for an explanation of why `data` is stored in a
// thread local instead of being passed as an argument to the
// `RaiseException` function (which can in theory carry along arbitrary
// data).
let exception = Box::new(data);
rtassert!(PANIC_DATA.get().is_null());
PANIC_DATA.set(Box::into_raw(exception) as *mut u8);
pub use self::imp::*;
c::RaiseException(RUST_PANIC, 0, 0, ptr::null());
rtabort!("could not unwind stack");
#[cfg(stage0)]
mod imp {
use prelude::v1::*;
use any::Any;
pub unsafe fn panic(_data: Box<Any + Send + 'static>) -> ! {
rtabort!("cannot unwind SEH in stage0")
}
pub unsafe fn cleanup(_ptr: *mut u8) -> Box<Any + Send + 'static> {
rtabort!("can't cleanup SEH in stage0")
}
#[lang = "msvc_try_filter"]
#[linkage = "external"]
unsafe extern fn __rust_try_filter() -> i32 {
0
}
#[lang = "eh_unwind_resume"]
#[unwind]
unsafe extern fn rust_eh_unwind_resume(_ptr: *mut u8) -> ! {
rtabort!("can't resume unwind SEH in stage0")
}
#[lang = "eh_personality_catch"]
unsafe extern fn rust_eh_personality_catch() {}
}
pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
// The `ptr` here actually corresponds to the code of the exception, and our
// real data is stored in our thread local.
rtassert!(ptr as c::DWORD == RUST_PANIC);
#[cfg(not(stage0))]
mod imp {
use prelude::v1::*;
let data = PANIC_DATA.get() as *mut Box<Any + Send + 'static>;
PANIC_DATA.set(ptr::null_mut());
rtassert!(!data.is_null());
use any::Any;
use mem;
use raw;
use super::RUST_PANIC;
use sys::c;
*Box::from_raw(data)
pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
// As mentioned above, the call stack here is preserved while the filter
// functions are running, so it's ok to pass stack-local arrays into
// `RaiseException`.
//
// The two pointers of the `data` trait object are written to the stack,
// passed to `RaiseException`, and they're later extracted by the filter
// function below in the "custom exception information" section of the
// `EXCEPTION_RECORD` type.
let ptrs = mem::transmute::<_, raw::TraitObject>(data);
let ptrs = [ptrs.data, ptrs.vtable];
c::RaiseException(RUST_PANIC, 0, 2, ptrs.as_ptr() as *mut _);
rtabort!("could not unwind stack");
}
pub fn payload() -> [usize; 2] {
[0; 2]
}
pub unsafe fn cleanup(payload: [usize; 2]) -> Box<Any + Send + 'static> {
mem::transmute(raw::TraitObject {
data: payload[0] as *mut _,
vtable: payload[1] as *mut _,
})
}
// This is quite a special function, and it's not literally passed in as the
// filter function for the `catchpad` of the `try` intrinsic. The compiler
// actually generates its own filter function wrapper which will delegate to
// this for the actual execution logic for whether the exception should be
// caught. The reasons for this are:
//
// * Each architecture has a slightly different ABI for the filter function
// here. For example on x86 there are no arguments but on x86_64 there are
// two.
// * This function needs access to the stack frame of the `try` intrinsic
// which is using this filter as a catch pad. This is because the payload
// of this exception, `Box<Any>`, needs to be transmitted to that
// location.
//
// Both of these differences end up using a ton of weird llvm-specific
// intrinsics, so it's actually pretty difficult to express the entire
// filter function in Rust itself. As a compromise, the compiler takes care
// of all the weird LLVM-specific and platform-specific stuff, getting to
// the point where this function makes the actual decision about what to
// catch given two parameters.
//
// The first parameter is `*mut EXCEPTION_POINTERS` which is some contextual
// information about the exception being filtered, and the second pointer is
// `*mut *mut [usize; 2]` (the payload here). This value points directly
// into the stack frame of the `try` intrinsic itself, and we use it to copy
// information from the exception onto the stack.
#[lang = "msvc_try_filter"]
#[cfg(not(test))]
unsafe extern fn __rust_try_filter(eh_ptrs: *mut u8,
payload: *mut u8) -> i32 {
let eh_ptrs = eh_ptrs as *mut c::EXCEPTION_POINTERS;
let payload = payload as *mut *mut [usize; 2];
let record = &*(*eh_ptrs).ExceptionRecord;
if record.ExceptionCode != RUST_PANIC {
return 0
}
(**payload)[0] = record.ExceptionInformation[0] as usize;
(**payload)[1] = record.ExceptionInformation[1] as usize;
return 1
}
}
// This is required by the compiler to exist (e.g. it's a lang item), but it's
// never actually called by the compiler because __C_specific_handler is the
// personality function that is always used. Hence this is just an aborting
// stub.
// This is required by the compiler to exist (e.g. it's a lang item), but
// it's never actually called by the compiler because __C_specific_handler
// or _except_handler3 is the personality function that is always used.
// Hence this is just an aborting stub.
#[lang = "eh_personality"]
#[cfg(not(test))]
fn rust_eh_personality() {
unsafe { ::intrinsics::abort() }
}
// This is a function referenced from `rust_try_msvc_64.ll` which is used to
// filter the exceptions being caught by that function.
//
// In theory local variables can be accessed through the `rbp` parameter of this
// function, but a comment in an LLVM test case indicates that this is not
// implemented in LLVM, so this is just an idempotent function which doesn't
// ferry along any other information.
//
// This function just takes a look at the current EXCEPTION_RECORD being thrown
// to ensure that it's code is RUST_PANIC, which was set by the call to
// `RaiseException` above in the `panic` function.
#[lang = "msvc_try_filter"]
#[linkage = "external"]
#[allow(private_no_mangle_fns)]
extern fn __rust_try_filter(eh_ptrs: *mut c::EXCEPTION_POINTERS,
_rbp: *mut u8) -> i32 {
unsafe {
((*(*eh_ptrs).ExceptionRecord).ExceptionCode == RUST_PANIC) as i32
}
}

View File

@ -50,6 +50,11 @@ pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
rtabort!("could not unwind stack");
}
#[cfg(not(stage0))]
pub fn payload() -> *mut u8 {
0 as *mut u8
}
pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
let panic_ctx = Box::from_raw(ptr as *mut PanicData);
return panic_ctx.data;

@ -1 +1 @@
Subproject commit 3564439515985dc1cc0d77057ed00901635a80ad
Subproject commit de5c31045dc0f6da1f65d02ee640ccf99ba90e7c

View File

@ -24,7 +24,13 @@ struct LLVMRustArchiveMember {
const char *name;
Archive::Child child;
LLVMRustArchiveMember(): filename(NULL), name(NULL), child(NULL, NULL) {}
LLVMRustArchiveMember(): filename(NULL), name(NULL),
#if LLVM_VERSION_MINOR >= 8
child(NULL, NULL, NULL)
#else
child(NULL, NULL)
#endif
{}
~LLVMRustArchiveMember() {}
};
@ -92,8 +98,18 @@ extern "C" const Archive::Child*
LLVMRustArchiveIteratorNext(RustArchiveIterator *rai) {
if (rai->cur == rai->end)
return NULL;
const Archive::Child *cur = rai->cur.operator->();
Archive::Child *ret = new Archive::Child(*cur);
#if LLVM_VERSION_MINOR >= 8
const ErrorOr<Archive::Child>* cur = rai->cur.operator->();
if (!*cur) {
LLVMRustSetLastError(cur->getError().message().c_str());
return NULL;
}
const Archive::Child &child = cur->get();
#else
const Archive::Child &child = *rai->cur.operator->();
#endif
Archive::Child *ret = new Archive::Child(child);
++rai->cur;
return ret;
}

View File

@ -348,6 +348,19 @@ extern "C" LLVMMetadataRef LLVMDIBuilderCreateFunction(
LLVMValueRef Fn,
LLVMMetadataRef TParam,
LLVMMetadataRef Decl) {
#if LLVM_VERSION_MINOR >= 8
DITemplateParameterArray TParams =
DITemplateParameterArray(unwrap<MDTuple>(TParam));
DISubprogram *Sub = Builder->createFunction(
unwrapDI<DIScope>(Scope), Name, LinkageName,
unwrapDI<DIFile>(File), LineNo,
unwrapDI<DISubroutineType>(Ty), isLocalToUnit, isDefinition, ScopeLine,
Flags, isOptimized,
TParams,
unwrapDIptr<DISubprogram>(Decl));
unwrap<Function>(Fn)->setSubprogram(Sub);
return wrap(Sub);
#else
return wrap(Builder->createFunction(
unwrapDI<DIScope>(Scope), Name, LinkageName,
unwrapDI<DIFile>(File), LineNo,
@ -356,6 +369,7 @@ extern "C" LLVMMetadataRef LLVMDIBuilderCreateFunction(
unwrap<Function>(Fn),
unwrapDIptr<MDNode>(TParam),
unwrapDIptr<MDNode>(Decl)));
#endif
}
extern "C" LLVMMetadataRef LLVMDIBuilderCreateBasicType(
@ -830,7 +844,9 @@ LLVMRustLinkInExternalBitcode(LLVMModuleRef dst, char *bc, size_t len) {
#if LLVM_VERSION_MINOR >= 6
raw_string_ostream Stream(Err);
DiagnosticPrinterRawOStream DP(Stream);
#if LLVM_VERSION_MINOR >= 7
#if LLVM_VERSION_MINOR >= 8
if (Linker::linkModules(*Dst, std::move(Src.get()))) {
#elif LLVM_VERSION_MINOR >= 7
if (Linker::LinkModules(Dst, Src->get(), [&](const DiagnosticInfo &DI) { DI.print(DP); })) {
#else
if (Linker::LinkModules(Dst, *Src, [&](const DiagnosticInfo &DI) { DI.print(DP); })) {
@ -971,3 +987,181 @@ LLVMRustBuildLandingPad(LLVMBuilderRef Builder,
LLVMValueRef F) {
return LLVMBuildLandingPad(Builder, Ty, PersFn, NumClauses, Name);
}
extern "C" LLVMValueRef
LLVMRustBuildCleanupPad(LLVMBuilderRef Builder,
LLVMValueRef ParentPad,
unsigned ArgCnt,
LLVMValueRef *LLArgs,
const char *Name) {
#if LLVM_VERSION_MINOR >= 8
Value **Args = unwrap(LLArgs);
if (ParentPad == NULL) {
Type *Ty = Type::getTokenTy(unwrap(Builder)->getContext());
ParentPad = wrap(Constant::getNullValue(Ty));
}
return wrap(unwrap(Builder)->CreateCleanupPad(unwrap(ParentPad),
ArrayRef<Value*>(Args, ArgCnt),
Name));
#else
return NULL;
#endif
}
extern "C" LLVMValueRef
LLVMRustBuildCleanupRet(LLVMBuilderRef Builder,
LLVMValueRef CleanupPad,
LLVMBasicBlockRef UnwindBB) {
#if LLVM_VERSION_MINOR >= 8
CleanupPadInst *Inst = cast<CleanupPadInst>(unwrap(CleanupPad));
return wrap(unwrap(Builder)->CreateCleanupRet(Inst, unwrap(UnwindBB)));
#else
return NULL;
#endif
}
extern "C" LLVMValueRef
LLVMRustBuildCatchPad(LLVMBuilderRef Builder,
LLVMValueRef ParentPad,
unsigned ArgCnt,
LLVMValueRef *LLArgs,
const char *Name) {
#if LLVM_VERSION_MINOR >= 8
Value **Args = unwrap(LLArgs);
return wrap(unwrap(Builder)->CreateCatchPad(unwrap(ParentPad),
ArrayRef<Value*>(Args, ArgCnt),
Name));
#else
return NULL;
#endif
}
extern "C" LLVMValueRef
LLVMRustBuildCatchRet(LLVMBuilderRef Builder,
LLVMValueRef Pad,
LLVMBasicBlockRef BB) {
#if LLVM_VERSION_MINOR >= 8
return wrap(unwrap(Builder)->CreateCatchRet(cast<CatchPadInst>(unwrap(Pad)),
unwrap(BB)));
#else
return NULL;
#endif
}
extern "C" LLVMValueRef
LLVMRustBuildCatchSwitch(LLVMBuilderRef Builder,
LLVMValueRef ParentPad,
LLVMBasicBlockRef BB,
unsigned NumHandlers,
const char *Name) {
#if LLVM_VERSION_MINOR >= 8
if (ParentPad == NULL) {
Type *Ty = Type::getTokenTy(unwrap(Builder)->getContext());
ParentPad = wrap(Constant::getNullValue(Ty));
}
return wrap(unwrap(Builder)->CreateCatchSwitch(unwrap(ParentPad),
unwrap(BB),
NumHandlers,
Name));
#else
return NULL;
#endif
}
extern "C" void
LLVMRustAddHandler(LLVMValueRef CatchSwitchRef,
LLVMBasicBlockRef Handler) {
#if LLVM_VERSION_MINOR >= 8
Value *CatchSwitch = unwrap(CatchSwitchRef);
cast<CatchSwitchInst>(CatchSwitch)->addHandler(unwrap(Handler));
#endif
}
extern "C" void
LLVMRustSetPersonalityFn(LLVMBuilderRef B,
LLVMValueRef Personality) {
#if LLVM_VERSION_MINOR >= 8
unwrap(B)->GetInsertBlock()
->getParent()
->setPersonalityFn(cast<Function>(unwrap(Personality)));
#endif
}
#if LLVM_VERSION_MINOR >= 8
extern "C" OperandBundleDef*
LLVMRustBuildOperandBundleDef(const char *Name,
LLVMValueRef *Inputs,
unsigned NumInputs) {
return new OperandBundleDef(Name, makeArrayRef(unwrap(Inputs), NumInputs));
}
extern "C" void
LLVMRustFreeOperandBundleDef(OperandBundleDef* Bundle) {
delete Bundle;
}
extern "C" LLVMValueRef
LLVMRustBuildCall(LLVMBuilderRef B,
LLVMValueRef Fn,
LLVMValueRef *Args,
unsigned NumArgs,
OperandBundleDef *Bundle,
const char *Name) {
unsigned len = Bundle ? 1 : 0;
ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, len);
return wrap(unwrap(B)->CreateCall(unwrap(Fn),
makeArrayRef(unwrap(Args), NumArgs),
Bundles,
Name));
}
extern "C" LLVMValueRef
LLVMRustBuildInvoke(LLVMBuilderRef B,
LLVMValueRef Fn,
LLVMValueRef *Args,
unsigned NumArgs,
LLVMBasicBlockRef Then,
LLVMBasicBlockRef Catch,
OperandBundleDef *Bundle,
const char *Name) {
unsigned len = Bundle ? 1 : 0;
ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, len);
return wrap(unwrap(B)->CreateInvoke(unwrap(Fn), unwrap(Then), unwrap(Catch),
makeArrayRef(unwrap(Args), NumArgs),
Bundles,
Name));
}
#else
extern "C" void*
LLVMRustBuildOperandBundleDef(const char *Name,
LLVMValueRef *Inputs,
unsigned NumInputs) {
return NULL;
}
extern "C" void
LLVMRustFreeOperandBundleDef(void* Bundle) {
}
extern "C" LLVMValueRef
LLVMRustBuildCall(LLVMBuilderRef B,
LLVMValueRef Fn,
LLVMValueRef *Args,
unsigned NumArgs,
void *Bundle,
const char *Name) {
return LLVMBuildCall(B, Fn, Args, NumArgs, Name);
}
extern "C" LLVMValueRef
LLVMRustBuildInvoke(LLVMBuilderRef B,
LLVMValueRef Fn,
LLVMValueRef *Args,
unsigned NumArgs,
LLVMBasicBlockRef Then,
LLVMBasicBlockRef Catch,
void *Bundle,
const char *Name) {
return LLVMBuildInvoke(B, Fn, Args, NumArgs, Then, Catch, Name);
}
#endif

View File

@ -1,4 +1,4 @@
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
2015-12-02
2015-01-25

View File

@ -0,0 +1,15 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub extern fn f() -> i32 { 1 }
pub extern fn g() -> i32 { 2 }
pub fn get_f() -> extern fn() -> i32 { f }
pub fn get_g() -> extern fn() -> i32 { g }

View File

@ -0,0 +1,12 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[no_mangle]
pub extern fn foo() {}

View File

@ -7,7 +7,10 @@
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
// ignore-msvc: FIXME(#30941)
// error-pattern:converging_fn called
// error-pattern:0 dropped
// error-pattern:exit

View File

@ -7,7 +7,10 @@
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
// ignore-msvc: FIXME(#30941)
// error-pattern:complex called
// error-pattern:dropped
// error-pattern:exit

View File

@ -7,9 +7,13 @@
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
// ignore-msvc: FIXME(#30941)
// error-pattern:diverging_fn called
// error-pattern:0 dropped
use std::io::{self, Write};
struct Droppable(u8);

View File

@ -42,6 +42,12 @@ use syntax::diagnostics::registry::Registry;
use syntax::parse::token;
fn main() {
// Currently trips an assertion on i686-msvc, presumably because the support
// in LLVM is a little young.
if cfg!(target_env = "msvc") && cfg!(target_arch = "x86") {
return
}
let program = r#"
#[no_mangle]
pub static TEST_STATIC: i32 = 42;

View File

@ -1,6 +1,6 @@
-include ../tools.mk
all: $(call NATIVE_STATICLIB,test)
all: $(call NATIVE_STATICLIB,ctest)
$(RUSTC) testcrate.rs
$(RUSTC) test.rs
$(call RUN,test) || exit 1

View File

@ -10,11 +10,12 @@
#![crate_type = "lib"]
#[repr(C)]
pub struct TestUnion {
val: u64
_val: u64
}
#[link(name = "test", kind = "static")]
#[link(name = "ctest", kind = "static")]
extern {
pub fn give_back(tu: TestUnion) -> u64;
}

View File

@ -1,5 +1,4 @@
{
"data-layout": "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32",
"llvm-target": "i686-unknown-linux-gnu",
"target-endian": "little",
"target-pointer-width": "32",

View File

@ -100,8 +100,13 @@ REMOVE_RLIBS = rm $(TMPDIR)/$(call RLIB_GLOB,$(1))
%.a: %.o
ar crus $@ $<
ifdef IS_MSVC
%.lib: lib%.o
$(MSVC_LIB) -out:`cygpath -w $@` $<
else
%.lib: lib%.o
ar crus $@ $<
endif
%.dylib: %.o
$(CC) -dynamiclib -Wl,-dylib -o $@ $<
%.so: %.o

View File

@ -27,12 +27,11 @@ macro_rules! pos {
() => ((file!(), line!()))
}
#[cfg(any(all(unix,
not(target_os = "macos"),
not(target_os = "ios"),
not(target_os = "android"),
not(all(target_os = "linux", target_arch = "arm"))),
all(windows, not(target_arch = "x86"))))]
#[cfg(all(unix,
not(target_os = "macos"),
not(target_os = "ios"),
not(target_os = "android"),
not(all(target_os = "linux", target_arch = "arm"))))]
macro_rules! dump_and_die {
($($pos:expr),*) => ({
// FIXME(#18285): we cannot include the current position because
@ -43,12 +42,11 @@ macro_rules! dump_and_die {
}
// this does not work on Windows, Android, OSX or iOS
#[cfg(not(any(all(unix,
#[cfg(not(all(unix,
not(target_os = "macos"),
not(target_os = "ios"),
not(target_os = "android"),
not(all(target_os = "linux", target_arch = "arm"))),
all(windows, not(target_arch = "x86")))))]
not(all(target_os = "linux", target_arch = "arm")))))]
macro_rules! dump_and_die {
($($pos:expr),*) => ({ let _ = [$($pos),*]; })
}

View File

@ -8,17 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:extern-take-value.rs
extern fn f() {
}
extern fn g() {
}
extern crate extern_take_value;
pub fn main() {
let a: extern "C" fn() = f;
let b: extern "C" fn() = f;
let c: extern "C" fn() = g;
let a: extern "C" fn() -> i32 = extern_take_value::get_f();
let b: extern "C" fn() -> i32 = extern_take_value::get_f();
let c: extern "C" fn() -> i32 = extern_take_value::get_g();
assert!(a == b);
assert!(a != c);

View File

@ -12,12 +12,16 @@
// ABI (#9309).
// pretty-expanded FIXME #23616
// aux-build:fn-abi.rs
extern crate fn_abi;
extern {
fn printf();
fn foo();
}
pub fn main() {
// Will only type check if the type of _p and the decl of printf use the same ABI
let _p: unsafe extern fn() = printf;
// Will only type check if the type of _p and the decl of foo use the
// same ABI
let _p: unsafe extern fn() = foo;
}

View File

@ -8,9 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(intrinsics, core)]
macro_rules! assert_approx_eq {
($a:expr, $b:expr) => ({
let (a, b) = (&$a, &$b);
@ -19,96 +16,52 @@ macro_rules! assert_approx_eq {
})
}
mod rusti {
extern "rust-intrinsic" {
pub fn sqrtf32(x: f32) -> f32;
pub fn sqrtf64(x: f64) -> f64;
pub fn powif32(a: f32, x: i32) -> f32;
pub fn powif64(a: f64, x: i32) -> f64;
pub fn sinf32(x: f32) -> f32;
pub fn sinf64(x: f64) -> f64;
pub fn cosf32(x: f32) -> f32;
pub fn cosf64(x: f64) -> f64;
pub fn powf32(a: f32, x: f32) -> f32;
pub fn powf64(a: f64, x: f64) -> f64;
pub fn expf32(x: f32) -> f32;
pub fn expf64(x: f64) -> f64;
pub fn exp2f32(x: f32) -> f32;
pub fn exp2f64(x: f64) -> f64;
pub fn logf32(x: f32) -> f32;
pub fn logf64(x: f64) -> f64;
pub fn log10f32(x: f32) -> f32;
pub fn log10f64(x: f64) -> f64;
pub fn log2f32(x: f32) -> f32;
pub fn log2f64(x: f64) -> f64;
pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
pub fn fabsf32(x: f32) -> f32;
pub fn fabsf64(x: f64) -> f64;
pub fn floorf32(x: f32) -> f32;
pub fn floorf64(x: f64) -> f64;
pub fn ceilf32(x: f32) -> f32;
pub fn ceilf64(x: f64) -> f64;
pub fn truncf32(x: f32) -> f32;
pub fn truncf64(x: f64) -> f64;
}
}
pub fn main() {
unsafe {
use rusti::*;
use std::f32;
use std::f64;
use std::f32;
use std::f64;
assert_approx_eq!(64f32.sqrt(), 8f32);
assert_approx_eq!(64f64.sqrt(), 8f64);
assert_approx_eq!(sqrtf32(64f32), 8f32);
assert_approx_eq!(sqrtf64(64f64), 8f64);
assert_approx_eq!(25f32.powi(-2), 0.0016f32);
assert_approx_eq!(23.2f64.powi(2), 538.24f64);
assert_approx_eq!(powif32(25f32, -2), 0.0016f32);
assert_approx_eq!(powif64(23.2f64, 2), 538.24f64);
assert_approx_eq!(0f32.sin(), 0f32);
assert_approx_eq!((f64::consts::PI / 2f64).sin(), 1f64);
assert_approx_eq!(sinf32(0f32), 0f32);
assert_approx_eq!(sinf64(f64::consts::PI / 2f64), 1f64);
assert_approx_eq!(0f32.cos(), 1f32);
assert_approx_eq!((f64::consts::PI * 2f64).cos(), 1f64);
assert_approx_eq!(cosf32(0f32), 1f32);
assert_approx_eq!(cosf64(f64::consts::PI * 2f64), 1f64);
assert_approx_eq!(25f32.powf(-2f32), 0.0016f32);
assert_approx_eq!(400f64.powf(0.5f64), 20f64);
assert_approx_eq!(powf32(25f32, -2f32), 0.0016f32);
assert_approx_eq!(powf64(400f64, 0.5f64), 20f64);
assert_approx_eq!((1f32.exp() - f32::consts::E).abs(), 0f32);
assert_approx_eq!(1f64.exp(), f64::consts::E);
assert_approx_eq!(fabsf32(expf32(1f32) - f32::consts::E), 0f32);
assert_approx_eq!(expf64(1f64), f64::consts::E);
assert_approx_eq!(10f32.exp2(), 1024f32);
assert_approx_eq!(50f64.exp2(), 1125899906842624f64);
assert_approx_eq!(exp2f32(10f32), 1024f32);
assert_approx_eq!(exp2f64(50f64), 1125899906842624f64);
assert_approx_eq!((f32::consts::E.ln() - 1f32).abs(), 0f32);
assert_approx_eq!(1f64.ln(), 0f64);
assert_approx_eq!(fabsf32(logf32(f32::consts::E) - 1f32), 0f32);
assert_approx_eq!(logf64(1f64), 0f64);
assert_approx_eq!(10f32.log10(), 1f32);
assert_approx_eq!(f64::consts::E.log10(), f64::consts::LOG10_E);
assert_approx_eq!(log10f32(10f32), 1f32);
assert_approx_eq!(log10f64(f64::consts::E), f64::consts::LOG10_E);
assert_approx_eq!(8f32.log2(), 3f32);
assert_approx_eq!(f64::consts::E.log2(), f64::consts::LOG2_E);
assert_approx_eq!(log2f32(8f32), 3f32);
assert_approx_eq!(log2f64(f64::consts::E), f64::consts::LOG2_E);
assert_approx_eq!(1.0f32.mul_add(2.0f32, 5.0f32), 7.0f32);
assert_approx_eq!(0.0f64.mul_add(-2.0f64, f64::consts::E), f64::consts::E);
assert_approx_eq!(fmaf32(1.0f32, 2.0f32, 5.0f32), 7.0f32);
assert_approx_eq!(fmaf64(0.0f64, -2.0f64, f64::consts::E), f64::consts::E);
assert_approx_eq!((-1.0f32).abs(), 1.0f32);
assert_approx_eq!(34.2f64.abs(), 34.2f64);
assert_approx_eq!(fabsf32(-1.0f32), 1.0f32);
assert_approx_eq!(fabsf64(34.2f64), 34.2f64);
assert_approx_eq!(3.8f32.floor(), 3.0f32);
assert_approx_eq!((-1.1f64).floor(), -2.0f64);
assert_approx_eq!(floorf32(3.8f32), 3.0f32);
assert_approx_eq!(floorf64(-1.1f64), -2.0f64);
// Causes linker error
// undefined reference to llvm.ceil.f32/64
//assert_eq!(ceilf32(-2.3f32), -2.0f32);
//assert_eq!(ceilf64(3.8f64), 4.0f64);
// Causes linker error
// undefined reference to llvm.trunc.f32/64
//assert_eq!(truncf32(0.1f32), 0.0f32);
//assert_eq!(truncf64(-0.1f64), 0.0f64);
}
assert_approx_eq!((-2.3f32).ceil(), -2.0f32);
assert_approx_eq!(3.8f64.ceil(), 4.0f64);
assert_approx_eq!(0.1f32.trunc(), 0.0f32);
assert_approx_eq!((-0.1f64).trunc(), 0.0f64);
}