All Builder methods now take &mut self instead of &self
This commit is contained in:
parent
1ebdfbb026
commit
54dd3a47fd
src
librustc_codegen_llvm
librustc_codegen_ssa
@ -171,13 +171,13 @@ pub trait ArgTypeExt<'ll, 'tcx> {
|
||||
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
||||
fn store(
|
||||
&self,
|
||||
bx: &Builder<'_, 'll, 'tcx>,
|
||||
bx: &mut Builder<'_, 'll, 'tcx>,
|
||||
val: &'ll Value,
|
||||
dst: PlaceRef<'tcx, &'ll Value>,
|
||||
);
|
||||
fn store_fn_arg(
|
||||
&self,
|
||||
bx: &Builder<'_, 'll, 'tcx>,
|
||||
bx: &mut Builder<'_, 'll, 'tcx>,
|
||||
idx: &mut usize,
|
||||
dst: PlaceRef<'tcx, &'ll Value>,
|
||||
);
|
||||
@ -196,14 +196,13 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||
/// or results of call/invoke instructions into their destinations.
|
||||
fn store(
|
||||
&self,
|
||||
bx: &Builder<'_, 'll, 'tcx>,
|
||||
bx: &mut Builder<'_, 'll, 'tcx>,
|
||||
val: &'ll Value,
|
||||
dst: PlaceRef<'tcx, &'ll Value>,
|
||||
) {
|
||||
if self.is_ignore() {
|
||||
return;
|
||||
}
|
||||
let cx = bx.cx();
|
||||
if self.is_sized_indirect() {
|
||||
OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
|
||||
} else if self.is_unsized_indirect() {
|
||||
@ -213,7 +212,8 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
||||
let can_store_through_cast_ptr = false;
|
||||
if can_store_through_cast_ptr {
|
||||
let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx)));
|
||||
let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx()));
|
||||
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
|
||||
bx.store(val, cast_dst, self.layout.align);
|
||||
} else {
|
||||
// The actual return type is a struct, but the ABI
|
||||
@ -231,9 +231,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||
// bitcasting to the struct type yields invalid cast errors.
|
||||
|
||||
// We instead thus allocate some scratch space...
|
||||
let scratch_size = cast.size(cx);
|
||||
let scratch_align = cast.align(cx);
|
||||
let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
|
||||
let scratch_size = cast.size(bx.cx());
|
||||
let scratch_align = cast.align(bx.cx());
|
||||
let llscratch = bx.alloca(cast.llvm_type(bx.cx()), "abi_cast", scratch_align);
|
||||
bx.lifetime_start(llscratch, scratch_size);
|
||||
|
||||
// ...where we first store the value...
|
||||
@ -241,11 +241,11 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||
|
||||
// ...and then memcpy it to the intended destination.
|
||||
bx.memcpy(
|
||||
bx.pointercast(dst.llval, cx.type_i8p()),
|
||||
dst.llval,
|
||||
self.layout.align,
|
||||
bx.pointercast(llscratch, cx.type_i8p()),
|
||||
llscratch,
|
||||
scratch_align,
|
||||
cx.const_usize(self.layout.size.bytes()),
|
||||
bx.cx().const_usize(self.layout.size.bytes()),
|
||||
MemFlags::empty()
|
||||
);
|
||||
|
||||
@ -258,7 +258,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||
|
||||
fn store_fn_arg(
|
||||
&self,
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
idx: &mut usize,
|
||||
dst: PlaceRef<'tcx, &'ll Value>,
|
||||
) {
|
||||
@ -284,14 +284,14 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||
|
||||
impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
fn store_fn_arg(
|
||||
&self,
|
||||
&mut self,
|
||||
ty: &ArgType<'tcx, Ty<'tcx>>,
|
||||
idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
|
||||
) {
|
||||
ty.store_fn_arg(self, idx, dst)
|
||||
}
|
||||
fn store_arg_ty(
|
||||
&self,
|
||||
&mut self,
|
||||
ty: &ArgType<'tcx, Ty<'tcx>>,
|
||||
val: &'ll Value,
|
||||
dst: PlaceRef<'tcx, &'ll Value>
|
||||
@ -324,7 +324,7 @@ pub trait FnTypeExt<'tcx> {
|
||||
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
|
||||
fn llvm_cconv(&self) -> llvm::CallConv;
|
||||
fn apply_attrs_llfn(&self, llfn: &'ll Value);
|
||||
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
|
||||
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
|
||||
}
|
||||
|
||||
impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
|
||||
@ -761,7 +761,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
|
||||
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
|
||||
let mut i = 0;
|
||||
let mut apply = |attrs: &ArgAttributes| {
|
||||
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
|
||||
@ -832,7 +832,7 @@ impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
||||
|
||||
impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
fn apply_attrs_callsite(
|
||||
&self,
|
||||
&mut self,
|
||||
ty: &FnType<'tcx, Ty<'tcx>>,
|
||||
callsite: Self::Value
|
||||
) {
|
||||
|
@ -26,7 +26,7 @@ use libc::{c_uint, c_char};
|
||||
|
||||
impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
fn codegen_inline_asm(
|
||||
&self,
|
||||
&mut self,
|
||||
ia: &hir::InlineAsm,
|
||||
outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
|
||||
mut inputs: Vec<&'ll Value>
|
||||
|
@ -201,7 +201,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn switch(
|
||||
&self,
|
||||
&mut self,
|
||||
v: &'ll Value,
|
||||
else_llbb: &'ll BasicBlock,
|
||||
num_cases: usize,
|
||||
@ -212,7 +212,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn invoke(
|
||||
&self,
|
||||
&mut self,
|
||||
llfn: &'ll Value,
|
||||
args: &[&'ll Value],
|
||||
then: &'ll BasicBlock,
|
||||
@ -241,7 +241,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn unreachable(&self) {
|
||||
fn unreachable(&mut self) {
|
||||
self.count_insn("unreachable");
|
||||
unsafe {
|
||||
llvm::LLVMBuildUnreachable(self.llbuilder);
|
||||
@ -249,21 +249,21 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
/* Arithmetic */
|
||||
fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("add");
|
||||
unsafe {
|
||||
llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fadd");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fadd");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
|
||||
@ -272,21 +272,21 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("sub");
|
||||
unsafe {
|
||||
llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fsub");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fsub");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
|
||||
@ -295,21 +295,21 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("mul");
|
||||
unsafe {
|
||||
llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fmul");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fmul");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
|
||||
@ -319,42 +319,42 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
|
||||
fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("udiv");
|
||||
unsafe {
|
||||
llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("exactudiv");
|
||||
unsafe {
|
||||
llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("sdiv");
|
||||
unsafe {
|
||||
llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("exactsdiv");
|
||||
unsafe {
|
||||
llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fdiv");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fdiv");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
|
||||
@ -363,28 +363,28 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("urem");
|
||||
unsafe {
|
||||
llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("srem");
|
||||
unsafe {
|
||||
llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("frem");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("frem");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
|
||||
@ -393,70 +393,70 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("shl");
|
||||
unsafe {
|
||||
llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("lshr");
|
||||
unsafe {
|
||||
llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("ashr");
|
||||
unsafe {
|
||||
llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("and");
|
||||
unsafe {
|
||||
llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("or");
|
||||
unsafe {
|
||||
llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("xor");
|
||||
unsafe {
|
||||
llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn neg(&self, v: &'ll Value) -> &'ll Value {
|
||||
fn neg(&mut self, v: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("neg");
|
||||
unsafe {
|
||||
llvm::LLVMBuildNeg(self.llbuilder, v, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fneg(&self, v: &'ll Value) -> &'ll Value {
|
||||
fn fneg(&mut self, v: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fneg");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn not(&self, v: &'ll Value) -> &'ll Value {
|
||||
fn not(&mut self, v: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("not");
|
||||
unsafe {
|
||||
llvm::LLVMBuildNot(self.llbuilder, v, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
|
||||
fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
|
||||
let mut bx = Builder::with_cx(self.cx);
|
||||
bx.position_at_start(unsafe {
|
||||
llvm::LLVMGetFirstBasicBlock(self.llfn())
|
||||
@ -464,7 +464,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
bx.dynamic_alloca(ty, name, align)
|
||||
}
|
||||
|
||||
fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
|
||||
fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
|
||||
self.count_insn("alloca");
|
||||
unsafe {
|
||||
let alloca = if name.is_empty() {
|
||||
@ -479,7 +479,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn array_alloca(&self,
|
||||
fn array_alloca(&mut self,
|
||||
ty: &'ll Type,
|
||||
len: &'ll Value,
|
||||
name: &str,
|
||||
@ -498,7 +498,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
|
||||
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
|
||||
self.count_insn("load");
|
||||
unsafe {
|
||||
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
|
||||
@ -507,7 +507,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
|
||||
fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("load.volatile");
|
||||
unsafe {
|
||||
let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
|
||||
@ -517,7 +517,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn atomic_load(
|
||||
&self,
|
||||
&mut self,
|
||||
ptr: &'ll Value,
|
||||
order: rustc_codegen_ssa::common::AtomicOrdering,
|
||||
size: Size,
|
||||
@ -537,7 +537,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn load_operand(
|
||||
&self,
|
||||
&mut self,
|
||||
place: PlaceRef<'tcx, &'ll Value>
|
||||
) -> OperandRef<'tcx, &'ll Value> {
|
||||
debug!("PlaceRef::load: {:?}", place);
|
||||
@ -548,21 +548,25 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
return OperandRef::new_zst(self.cx(), place.layout);
|
||||
}
|
||||
|
||||
let scalar_load_metadata = |load, scalar: &layout::Scalar| {
|
||||
fn scalar_load_metadata<'a, 'll, 'tcx>(
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
load: &'ll Value,
|
||||
scalar: &layout::Scalar
|
||||
) {
|
||||
let vr = scalar.valid_range.clone();
|
||||
match scalar.value {
|
||||
layout::Int(..) => {
|
||||
let range = scalar.valid_range_exclusive(self.cx());
|
||||
let range = scalar.valid_range_exclusive(bx.cx());
|
||||
if range.start != range.end {
|
||||
self.range_metadata(load, range);
|
||||
bx.range_metadata(load, range);
|
||||
}
|
||||
}
|
||||
layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
|
||||
self.nonnull_metadata(load);
|
||||
bx.nonnull_metadata(load);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let val = if let Some(llextra) = place.llextra {
|
||||
OperandValue::Ref(place.llval, Some(llextra), place.align)
|
||||
@ -578,16 +582,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
let llval = const_llval.unwrap_or_else(|| {
|
||||
let load = self.load(place.llval, place.align);
|
||||
if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
|
||||
scalar_load_metadata(load, scalar);
|
||||
scalar_load_metadata(self, load, scalar);
|
||||
}
|
||||
load
|
||||
});
|
||||
OperandValue::Immediate(to_immediate(self, llval, place.layout))
|
||||
} else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
|
||||
let load = |i, scalar: &layout::Scalar| {
|
||||
let mut load = |i, scalar: &layout::Scalar| {
|
||||
let llptr = self.struct_gep(place.llval, i as u64);
|
||||
let load = self.load(llptr, place.align);
|
||||
scalar_load_metadata(load, scalar);
|
||||
scalar_load_metadata(self, load, scalar);
|
||||
if scalar.is_bool() {
|
||||
self.trunc(load, self.cx().type_i1())
|
||||
} else {
|
||||
@ -604,7 +608,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
|
||||
|
||||
|
||||
fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
|
||||
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
|
||||
if self.cx().sess().target.target.arch == "amdgpu" {
|
||||
// amdgpu/LLVM does something weird and thinks a i64 value is
|
||||
// split into a v2i32, halving the bitwidth LLVM expects,
|
||||
@ -627,19 +631,19 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn nonnull_metadata(&self, load: &'ll Value) {
|
||||
fn nonnull_metadata(&mut self, load: &'ll Value) {
|
||||
unsafe {
|
||||
llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
|
||||
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
|
||||
}
|
||||
}
|
||||
|
||||
fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
|
||||
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
|
||||
self.store_with_flags(val, ptr, align, MemFlags::empty())
|
||||
}
|
||||
|
||||
fn store_with_flags(
|
||||
&self,
|
||||
&mut self,
|
||||
val: &'ll Value,
|
||||
ptr: &'ll Value,
|
||||
align: Align,
|
||||
@ -672,7 +676,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
|
||||
fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value,
|
||||
order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) {
|
||||
debug!("Store {:?} -> {:?}", val, ptr);
|
||||
self.count_insn("store.atomic");
|
||||
@ -689,7 +693,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
|
||||
fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
|
||||
self.count_insn("gep");
|
||||
unsafe {
|
||||
llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
|
||||
@ -697,7 +701,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
|
||||
fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
|
||||
self.count_insn("inboundsgep");
|
||||
unsafe {
|
||||
llvm::LLVMBuildInBoundsGEP(
|
||||
@ -706,77 +710,77 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
/* Casts */
|
||||
fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("trunc");
|
||||
unsafe {
|
||||
llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("sext");
|
||||
unsafe {
|
||||
llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("fptoui");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("fptosi");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("uitofp");
|
||||
unsafe {
|
||||
llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("sitofp");
|
||||
unsafe {
|
||||
llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("fptrunc");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("fpext");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("ptrtoint");
|
||||
unsafe {
|
||||
llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("inttoptr");
|
||||
unsafe {
|
||||
llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("bitcast");
|
||||
unsafe {
|
||||
llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
|
||||
@ -784,14 +788,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
|
||||
fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
|
||||
fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
|
||||
self.count_insn("intcast");
|
||||
unsafe {
|
||||
llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
|
||||
}
|
||||
}
|
||||
|
||||
fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("pointercast");
|
||||
unsafe {
|
||||
llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
|
||||
@ -799,7 +803,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
/* Comparisons */
|
||||
fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("icmp");
|
||||
let op = llvm::IntPredicate::from_generic(op);
|
||||
unsafe {
|
||||
@ -807,7 +811,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("fcmp");
|
||||
unsafe {
|
||||
llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
|
||||
@ -815,14 +819,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
/* Miscellaneous instructions */
|
||||
fn empty_phi(&self, ty: &'ll Type) -> &'ll Value {
|
||||
fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("emptyphi");
|
||||
unsafe {
|
||||
llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
|
||||
fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
|
||||
assert_eq!(vals.len(), bbs.len());
|
||||
let phi = self.empty_phi(ty);
|
||||
self.count_insn("addincoming");
|
||||
@ -834,7 +838,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
|
||||
fn inline_asm_call(&mut self, asm: *const c_char, cons: *const c_char,
|
||||
inputs: &[&'ll Value], output: &'ll Type,
|
||||
volatile: bool, alignstack: bool,
|
||||
dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
|
||||
@ -867,7 +871,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn memcpy(&self, dst: &'ll Value, dst_align: Align,
|
||||
fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
|
||||
src: &'ll Value, src_align: Align,
|
||||
size: &'ll Value, flags: MemFlags) {
|
||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||
@ -887,7 +891,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn memmove(&self, dst: &'ll Value, dst_align: Align,
|
||||
fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
|
||||
src: &'ll Value, src_align: Align,
|
||||
size: &'ll Value, flags: MemFlags) {
|
||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||
@ -908,7 +912,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn memset(
|
||||
&self,
|
||||
&mut self,
|
||||
ptr: &'ll Value,
|
||||
fill_byte: &'ll Value,
|
||||
size: &'ll Value,
|
||||
@ -924,14 +928,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
|
||||
}
|
||||
|
||||
fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("minnum");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
|
||||
instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
|
||||
}
|
||||
}
|
||||
fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("maxnum");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
|
||||
@ -940,7 +944,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn select(
|
||||
&self, cond: &'ll Value,
|
||||
&mut self, cond: &'ll Value,
|
||||
then_val: &'ll Value,
|
||||
else_val: &'ll Value,
|
||||
) -> &'ll Value {
|
||||
@ -951,14 +955,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
|
||||
fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("vaarg");
|
||||
unsafe {
|
||||
llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
|
||||
fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("extractelement");
|
||||
unsafe {
|
||||
llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
|
||||
@ -966,7 +970,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn insert_element(
|
||||
&self, vec: &'ll Value,
|
||||
&mut self, vec: &'ll Value,
|
||||
elt: &'ll Value,
|
||||
idx: &'ll Value,
|
||||
) -> &'ll Value {
|
||||
@ -976,14 +980,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
|
||||
fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("shufflevector");
|
||||
unsafe {
|
||||
llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
||||
fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
||||
unsafe {
|
||||
let elt_ty = self.cx.val_ty(elt);
|
||||
let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64));
|
||||
@ -993,7 +997,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.fadd_fast");
|
||||
unsafe {
|
||||
// FIXME: add a non-fast math version once
|
||||
@ -1004,7 +1008,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
instr
|
||||
}
|
||||
}
|
||||
fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.fmul_fast");
|
||||
unsafe {
|
||||
// FIXME: add a non-fast math version once
|
||||
@ -1015,35 +1019,35 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
instr
|
||||
}
|
||||
}
|
||||
fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.add");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
|
||||
}
|
||||
fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.mul");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
|
||||
}
|
||||
fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.and");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
|
||||
}
|
||||
fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.or");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
|
||||
}
|
||||
fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.xor");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
|
||||
}
|
||||
fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.fmin");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
|
||||
}
|
||||
fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.fmax");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
|
||||
}
|
||||
fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.fmin_fast");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
|
||||
@ -1051,7 +1055,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
instr
|
||||
}
|
||||
}
|
||||
fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
|
||||
fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.fmax_fast");
|
||||
unsafe {
|
||||
let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
|
||||
@ -1059,16 +1063,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
instr
|
||||
}
|
||||
}
|
||||
fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
||||
fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.min");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
|
||||
}
|
||||
fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
||||
fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
|
||||
self.count_insn("vector.reduce.max");
|
||||
unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
|
||||
}
|
||||
|
||||
fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
|
||||
fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
|
||||
self.count_insn("extractvalue");
|
||||
assert_eq!(idx as c_uint as u64, idx);
|
||||
unsafe {
|
||||
@ -1076,7 +1080,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
|
||||
fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value,
|
||||
idx: u64) -> &'ll Value {
|
||||
self.count_insn("insertvalue");
|
||||
assert_eq!(idx as c_uint as u64, idx);
|
||||
@ -1086,7 +1090,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value,
|
||||
fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value,
|
||||
num_clauses: usize) -> &'ll Value {
|
||||
self.count_insn("landingpad");
|
||||
unsafe {
|
||||
@ -1095,27 +1099,27 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
|
||||
fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
|
||||
unsafe {
|
||||
llvm::LLVMAddClause(landing_pad, clause);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_cleanup(&self, landing_pad: &'ll Value) {
|
||||
fn set_cleanup(&mut self, landing_pad: &'ll Value) {
|
||||
self.count_insn("setcleanup");
|
||||
unsafe {
|
||||
llvm::LLVMSetCleanup(landing_pad, llvm::True);
|
||||
}
|
||||
}
|
||||
|
||||
fn resume(&self, exn: &'ll Value) -> &'ll Value {
|
||||
fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
|
||||
self.count_insn("resume");
|
||||
unsafe {
|
||||
llvm::LLVMBuildResume(self.llbuilder, exn)
|
||||
}
|
||||
}
|
||||
|
||||
fn cleanup_pad(&self,
|
||||
fn cleanup_pad(&mut self,
|
||||
parent: Option<&'ll Value>,
|
||||
args: &[&'ll Value]) -> Funclet<'ll> {
|
||||
self.count_insn("cleanuppad");
|
||||
@ -1131,7 +1135,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn cleanup_ret(
|
||||
&self, funclet: &Funclet<'ll>,
|
||||
&mut self, funclet: &Funclet<'ll>,
|
||||
unwind: Option<&'ll BasicBlock>,
|
||||
) -> &'ll Value {
|
||||
self.count_insn("cleanupret");
|
||||
@ -1141,7 +1145,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
ret.expect("LLVM does not have support for cleanupret")
|
||||
}
|
||||
|
||||
fn catch_pad(&self,
|
||||
fn catch_pad(&mut self,
|
||||
parent: &'ll Value,
|
||||
args: &[&'ll Value]) -> Funclet<'ll> {
|
||||
self.count_insn("catchpad");
|
||||
@ -1154,7 +1158,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
Funclet::new(ret.expect("LLVM does not have support for catchpad"))
|
||||
}
|
||||
|
||||
fn catch_ret(&self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
|
||||
fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
|
||||
self.count_insn("catchret");
|
||||
let ret = unsafe {
|
||||
llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind)
|
||||
@ -1163,7 +1167,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn catch_switch(
|
||||
&self,
|
||||
&mut self,
|
||||
parent: Option<&'ll Value>,
|
||||
unwind: Option<&'ll BasicBlock>,
|
||||
num_handlers: usize,
|
||||
@ -1178,13 +1182,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
ret.expect("LLVM does not have support for catchswitch")
|
||||
}
|
||||
|
||||
fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
|
||||
fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
|
||||
unsafe {
|
||||
llvm::LLVMRustAddHandler(catch_switch, handler);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_personality_fn(&self, personality: &'ll Value) {
|
||||
fn set_personality_fn(&mut self, personality: &'ll Value) {
|
||||
unsafe {
|
||||
llvm::LLVMSetPersonalityFn(self.llfn(), personality);
|
||||
}
|
||||
@ -1192,7 +1196,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
|
||||
// Atomic Operations
|
||||
fn atomic_cmpxchg(
|
||||
&self,
|
||||
&mut self,
|
||||
dst: &'ll Value,
|
||||
cmp: &'ll Value,
|
||||
src: &'ll Value,
|
||||
@ -1214,7 +1218,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
fn atomic_rmw(
|
||||
&self,
|
||||
&mut self,
|
||||
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
|
||||
dst: &'ll Value,
|
||||
src: &'ll Value,
|
||||
@ -1232,7 +1236,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn atomic_fence(
|
||||
&self,
|
||||
&mut self,
|
||||
order: rustc_codegen_ssa::common::AtomicOrdering,
|
||||
scope: rustc_codegen_ssa::common::SynchronizationScope
|
||||
) {
|
||||
@ -1245,27 +1249,27 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
|
||||
fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
|
||||
unsafe {
|
||||
llvm::LLVMAddCase(s, on_val, dest)
|
||||
}
|
||||
}
|
||||
|
||||
fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
|
||||
fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
|
||||
self.count_insn("addincoming");
|
||||
unsafe {
|
||||
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_invariant_load(&self, load: &'ll Value) {
|
||||
fn set_invariant_load(&mut self, load: &'ll Value) {
|
||||
unsafe {
|
||||
llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
|
||||
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
|
||||
}
|
||||
}
|
||||
|
||||
fn check_store<'b>(&self,
|
||||
fn check_store<'b>(&mut self,
|
||||
val: &'ll Value,
|
||||
ptr: &'ll Value) -> &'ll Value {
|
||||
let dest_ptr_ty = self.cx.val_ty(ptr);
|
||||
@ -1284,7 +1288,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_call<'b>(&self,
|
||||
fn check_call<'b>(&mut self,
|
||||
typ: &str,
|
||||
llfn: &'ll Value,
|
||||
args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
|
||||
@ -1326,15 +1330,15 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
Cow::Owned(casted_args)
|
||||
}
|
||||
|
||||
fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
|
||||
fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
|
||||
self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
|
||||
}
|
||||
|
||||
fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
|
||||
fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
|
||||
self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
|
||||
}
|
||||
|
||||
fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
|
||||
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
|
||||
if self.cx.sess().opts.optimize == config::OptLevel::No {
|
||||
return;
|
||||
}
|
||||
@ -1351,7 +1355,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn call(
|
||||
&self,
|
||||
&mut self,
|
||||
llfn: &'ll Value,
|
||||
args: &[&'ll Value],
|
||||
funclet: Option<&Funclet<'ll>>,
|
||||
@ -1377,14 +1381,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||
self.count_insn("zext");
|
||||
unsafe {
|
||||
llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
|
||||
}
|
||||
}
|
||||
|
||||
fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
|
||||
fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
|
||||
self.count_insn("structgep");
|
||||
assert_eq!(idx as c_uint as u64, idx);
|
||||
unsafe {
|
||||
@ -1396,13 +1400,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
self.cx
|
||||
}
|
||||
|
||||
fn delete_basic_block(&self, bb: &'ll BasicBlock) {
|
||||
fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
|
||||
unsafe {
|
||||
llvm::LLVMDeleteBasicBlock(bb);
|
||||
}
|
||||
}
|
||||
|
||||
fn do_not_inline(&self, llret: &'ll Value) {
|
||||
fn do_not_inline(&mut self, llret: &'ll Value) {
|
||||
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ use syntax::attr;
|
||||
|
||||
/// Inserts a side-effect free instruction sequence that makes sure that the
|
||||
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
|
||||
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
|
||||
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) {
|
||||
if needs_gdb_debug_scripts_section(bx.cx()) {
|
||||
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
|
||||
// Load just the first byte as that's all that's necessary to force
|
||||
|
@ -159,7 +159,7 @@ pub fn finalize(cx: &CodegenCx) {
|
||||
|
||||
impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
fn declare_local(
|
||||
&self,
|
||||
&mut self,
|
||||
dbg_context: &FunctionDebugContext<&'ll DISubprogram>,
|
||||
variable_name: ast::Name,
|
||||
variable_type: Ty<'tcx>,
|
||||
@ -225,14 +225,14 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn set_source_location(
|
||||
&self,
|
||||
&mut self,
|
||||
debug_context: &FunctionDebugContext<&'ll DISubprogram>,
|
||||
scope: Option<&'ll DIScope>,
|
||||
span: Span,
|
||||
) {
|
||||
set_source_location(debug_context, &self, scope, span)
|
||||
}
|
||||
fn insert_reference_to_gdb_debug_scripts_section_global(&self) {
|
||||
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
|
||||
gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
|
||||
}
|
||||
}
|
||||
|
@ -89,15 +89,14 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu
|
||||
|
||||
impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
fn codegen_intrinsic_call(
|
||||
&self,
|
||||
&mut self,
|
||||
callee_ty: Ty<'tcx>,
|
||||
fn_ty: &FnType<'tcx, Ty<'tcx>>,
|
||||
args: &[OperandRef<'tcx, &'ll Value>],
|
||||
llresult: &'ll Value,
|
||||
span: Span,
|
||||
) {
|
||||
let cx = self.cx();
|
||||
let tcx = cx.tcx;
|
||||
let tcx = self.cx().tcx;
|
||||
|
||||
let (def_id, substs) = match callee_ty.sty {
|
||||
ty::FnDef(def_id, substs) => (def_id, substs),
|
||||
@ -110,10 +109,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
let ret_ty = sig.output();
|
||||
let name = &*tcx.item_name(def_id).as_str();
|
||||
|
||||
let llret_ty = cx.layout_of(ret_ty).llvm_type(cx);
|
||||
let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx());
|
||||
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
|
||||
|
||||
let simple = get_simple_intrinsic(cx, name);
|
||||
let simple = get_simple_intrinsic(self.cx(), name);
|
||||
let llval = match name {
|
||||
_ if simple.is_some() => {
|
||||
self.call(simple.unwrap(),
|
||||
@ -124,15 +123,15 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
return;
|
||||
},
|
||||
"likely" => {
|
||||
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
|
||||
self.call(expect, &[args[0].immediate(), cx.const_bool(true)], None)
|
||||
let expect = self.cx().get_intrinsic(&("llvm.expect.i1"));
|
||||
self.call(expect, &[args[0].immediate(), self.cx().const_bool(true)], None)
|
||||
}
|
||||
"unlikely" => {
|
||||
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
|
||||
self.call(expect, &[args[0].immediate(), cx.const_bool(false)], None)
|
||||
let expect = self.cx().get_intrinsic(&("llvm.expect.i1"));
|
||||
self.call(expect, &[args[0].immediate(), self.cx().const_bool(false)], None)
|
||||
}
|
||||
"try" => {
|
||||
try_intrinsic(self, cx,
|
||||
try_intrinsic(self,
|
||||
args[0].immediate(),
|
||||
args[1].immediate(),
|
||||
args[2].immediate(),
|
||||
@ -140,12 +139,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
return;
|
||||
}
|
||||
"breakpoint" => {
|
||||
let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
|
||||
let llfn = self.cx().get_intrinsic(&("llvm.debugtrap"));
|
||||
self.call(llfn, &[], None)
|
||||
}
|
||||
"size_of" => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
cx.const_usize(cx.size_of(tp_ty).bytes())
|
||||
self.cx().const_usize(self.cx().size_of(tp_ty).bytes())
|
||||
}
|
||||
"size_of_val" => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
@ -154,12 +153,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
|
||||
llsize
|
||||
} else {
|
||||
cx.const_usize(cx.size_of(tp_ty).bytes())
|
||||
self.cx().const_usize(self.cx().size_of(tp_ty).bytes())
|
||||
}
|
||||
}
|
||||
"min_align_of" => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
cx.const_usize(cx.align_of(tp_ty).abi())
|
||||
self.cx().const_usize(self.cx().align_of(tp_ty).abi())
|
||||
}
|
||||
"min_align_of_val" => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
@ -168,35 +167,35 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
|
||||
llalign
|
||||
} else {
|
||||
cx.const_usize(cx.align_of(tp_ty).abi())
|
||||
self.cx().const_usize(self.cx().align_of(tp_ty).abi())
|
||||
}
|
||||
}
|
||||
"pref_align_of" => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
cx.const_usize(cx.align_of(tp_ty).pref())
|
||||
self.cx().const_usize(self.cx().align_of(tp_ty).pref())
|
||||
}
|
||||
"type_name" => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
|
||||
cx.const_str_slice(ty_name)
|
||||
self.cx().const_str_slice(ty_name)
|
||||
}
|
||||
"type_id" => {
|
||||
cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0)))
|
||||
self.cx().const_u64(self.cx().tcx.type_id_hash(substs.type_at(0)))
|
||||
}
|
||||
"init" => {
|
||||
let ty = substs.type_at(0);
|
||||
if !cx.layout_of(ty).is_zst() {
|
||||
if !self.cx().layout_of(ty).is_zst() {
|
||||
// Just zero out the stack slot.
|
||||
// If we store a zero constant, LLVM will drown in vreg allocation for large
|
||||
// data structures, and the generated code will be awful. (A telltale sign of
|
||||
// this is large quantities of `mov [byte ptr foo],0` in the generated code.)
|
||||
memset_intrinsic(
|
||||
&self,
|
||||
self,
|
||||
false,
|
||||
ty,
|
||||
llresult,
|
||||
cx.const_u8(0),
|
||||
cx.const_usize(1)
|
||||
self.cx().const_u8(0),
|
||||
self.cx().const_usize(1)
|
||||
);
|
||||
}
|
||||
return;
|
||||
@ -208,7 +207,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
"needs_drop" => {
|
||||
let tp_ty = substs.type_at(0);
|
||||
|
||||
cx.const_bool(cx.type_needs_drop(tp_ty))
|
||||
self.cx().const_bool(self.cx().type_needs_drop(tp_ty))
|
||||
}
|
||||
"offset" => {
|
||||
let ptr = args[0].immediate();
|
||||
@ -222,33 +221,33 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
"copy_nonoverlapping" => {
|
||||
copy_intrinsic(&self, false, false, substs.type_at(0),
|
||||
copy_intrinsic(self, false, false, substs.type_at(0),
|
||||
args[1].immediate(), args[0].immediate(), args[2].immediate());
|
||||
return;
|
||||
}
|
||||
"copy" => {
|
||||
copy_intrinsic(&self, true, false, substs.type_at(0),
|
||||
copy_intrinsic(self, true, false, substs.type_at(0),
|
||||
args[1].immediate(), args[0].immediate(), args[2].immediate());
|
||||
return;
|
||||
}
|
||||
"write_bytes" => {
|
||||
memset_intrinsic(&self, false, substs.type_at(0),
|
||||
memset_intrinsic(self, false, substs.type_at(0),
|
||||
args[0].immediate(), args[1].immediate(), args[2].immediate());
|
||||
return;
|
||||
}
|
||||
|
||||
"volatile_copy_nonoverlapping_memory" => {
|
||||
copy_intrinsic(&self, false, true, substs.type_at(0),
|
||||
copy_intrinsic(self, false, true, substs.type_at(0),
|
||||
args[0].immediate(), args[1].immediate(), args[2].immediate());
|
||||
return;
|
||||
}
|
||||
"volatile_copy_memory" => {
|
||||
copy_intrinsic(&self, true, true, substs.type_at(0),
|
||||
copy_intrinsic(self, true, true, substs.type_at(0),
|
||||
args[0].immediate(), args[1].immediate(), args[2].immediate());
|
||||
return;
|
||||
}
|
||||
"volatile_set_memory" => {
|
||||
memset_intrinsic(&self, true, substs.type_at(0),
|
||||
memset_intrinsic(self, true, substs.type_at(0),
|
||||
args[0].immediate(), args[1].immediate(), args[2].immediate());
|
||||
return;
|
||||
}
|
||||
@ -256,32 +255,32 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
let tp_ty = substs.type_at(0);
|
||||
let mut ptr = args[0].immediate();
|
||||
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
||||
ptr = self.pointercast(ptr, cx.type_ptr_to(ty.llvm_type(cx)));
|
||||
ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty.llvm_type(self.cx())));
|
||||
}
|
||||
let load = self.volatile_load(ptr);
|
||||
let align = if name == "unaligned_volatile_load" {
|
||||
1
|
||||
} else {
|
||||
cx.align_of(tp_ty).abi() as u32
|
||||
self.cx().align_of(tp_ty).abi() as u32
|
||||
};
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(load, align);
|
||||
}
|
||||
to_immediate(self, load, cx.layout_of(tp_ty))
|
||||
to_immediate(self, load, self.cx().layout_of(tp_ty))
|
||||
},
|
||||
"volatile_store" => {
|
||||
let dst = args[0].deref(cx);
|
||||
let dst = args[0].deref(self.cx());
|
||||
args[1].val.volatile_store(self, dst);
|
||||
return;
|
||||
},
|
||||
"unaligned_volatile_store" => {
|
||||
let dst = args[0].deref(cx);
|
||||
let dst = args[0].deref(self.cx());
|
||||
args[1].val.unaligned_volatile_store(self, dst);
|
||||
return;
|
||||
},
|
||||
"prefetch_read_data" | "prefetch_write_data" |
|
||||
"prefetch_read_instruction" | "prefetch_write_instruction" => {
|
||||
let expect = cx.get_intrinsic(&("llvm.prefetch"));
|
||||
let expect = self.cx().get_intrinsic(&("llvm.prefetch"));
|
||||
let (rw, cache_type) = match name {
|
||||
"prefetch_read_data" => (0, 1),
|
||||
"prefetch_write_data" => (1, 1),
|
||||
@ -291,9 +290,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
};
|
||||
self.call(expect, &[
|
||||
args[0].immediate(),
|
||||
cx.const_i32(rw),
|
||||
self.cx().const_i32(rw),
|
||||
args[1].immediate(),
|
||||
cx.const_i32(cache_type)
|
||||
self.cx().const_i32(cache_type)
|
||||
], None)
|
||||
},
|
||||
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
|
||||
@ -302,22 +301,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
"unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
|
||||
"rotate_left" | "rotate_right" => {
|
||||
let ty = arg_tys[0];
|
||||
match int_type_width_signed(ty, cx) {
|
||||
match int_type_width_signed(ty, self.cx()) {
|
||||
Some((width, signed)) =>
|
||||
match name {
|
||||
"ctlz" | "cttz" => {
|
||||
let y = cx.const_bool(false);
|
||||
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
|
||||
let y = self.cx().const_bool(false);
|
||||
let llfn = self.cx().get_intrinsic(
|
||||
&format!("llvm.{}.i{}", name, width),
|
||||
);
|
||||
self.call(llfn, &[args[0].immediate(), y], None)
|
||||
}
|
||||
"ctlz_nonzero" | "cttz_nonzero" => {
|
||||
let y = cx.const_bool(true);
|
||||
let y = self.cx().const_bool(true);
|
||||
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
|
||||
let llfn = cx.get_intrinsic(llvm_name);
|
||||
let llfn = self.cx().get_intrinsic(llvm_name);
|
||||
self.call(llfn, &[args[0].immediate(), y], None)
|
||||
}
|
||||
"ctpop" => self.call(
|
||||
cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
|
||||
self.cx().get_intrinsic(&format!("llvm.ctpop.i{}", width)),
|
||||
&[args[0].immediate()],
|
||||
None
|
||||
),
|
||||
@ -325,19 +326,29 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
if width == 8 {
|
||||
args[0].immediate() // byte swap a u8/i8 is just a no-op
|
||||
} else {
|
||||
self.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
|
||||
&[args[0].immediate()], None)
|
||||
self.call(
|
||||
self.cx().get_intrinsic(
|
||||
&format!("llvm.bswap.i{}", width),
|
||||
),
|
||||
&[args[0].immediate()],
|
||||
None,
|
||||
)
|
||||
}
|
||||
}
|
||||
"bitreverse" => {
|
||||
self.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
|
||||
&[args[0].immediate()], None)
|
||||
self.call(
|
||||
self.cx().get_intrinsic(
|
||||
&format!("llvm.bitreverse.i{}", width),
|
||||
),
|
||||
&[args[0].immediate()],
|
||||
None,
|
||||
)
|
||||
}
|
||||
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
|
||||
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
|
||||
if signed { 's' } else { 'u' },
|
||||
&name[..3], width);
|
||||
let llfn = cx.get_intrinsic(&intrinsic);
|
||||
let llfn = self.cx().get_intrinsic(&intrinsic);
|
||||
|
||||
// Convert `i1` to a `bool`, and write it to the out parameter
|
||||
let pair = self.call(llfn, &[
|
||||
@ -345,10 +356,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
args[1].immediate()
|
||||
], None);
|
||||
let val = self.extract_value(pair, 0);
|
||||
let overflow = self.zext(
|
||||
self.extract_value(pair, 1),
|
||||
cx.type_bool()
|
||||
);
|
||||
let overflow = self.extract_value(pair, 1);
|
||||
let overflow = self.zext(overflow, self.cx().type_bool());
|
||||
|
||||
let dest = result.project_field(self, 0);
|
||||
self.store(val, dest.llval, dest.align);
|
||||
@ -393,14 +402,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
// rotate = funnel shift with first two args the same
|
||||
let llvm_name = &format!("llvm.fsh{}.i{}",
|
||||
if is_left { 'l' } else { 'r' }, width);
|
||||
let llfn = cx.get_intrinsic(llvm_name);
|
||||
let llfn = self.cx().get_intrinsic(llvm_name);
|
||||
self.call(llfn, &[val, val, raw_shift], None)
|
||||
} else {
|
||||
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
|
||||
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
||||
let width = cx.const_uint(cx.type_ix(width), width);
|
||||
let width = self.cx().const_uint(
|
||||
self.cx().type_ix(width),
|
||||
width,
|
||||
);
|
||||
let shift = self.urem(raw_shift, width);
|
||||
let inv_shift = self.urem(self.sub(width, raw_shift), width);
|
||||
let width_minus_raw_shift = self.sub(width, raw_shift);
|
||||
let inv_shift = self.urem(width_minus_raw_shift, width);
|
||||
let shift1 = self.shl(
|
||||
val,
|
||||
if is_left { shift } else { inv_shift },
|
||||
@ -448,11 +461,11 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
},
|
||||
|
||||
"discriminant_value" => {
|
||||
args[0].deref(cx).codegen_get_discr(self, ret_ty)
|
||||
args[0].deref(self.cx()).codegen_get_discr(self, ret_ty)
|
||||
}
|
||||
|
||||
name if name.starts_with("simd_") => {
|
||||
match generic_simd_intrinsic(&self, name,
|
||||
match generic_simd_intrinsic(self, name,
|
||||
callee_ty,
|
||||
args,
|
||||
ret_ty, llret_ty,
|
||||
@ -483,16 +496,16 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
(SequentiallyConsistent, Monotonic),
|
||||
"failacq" if is_cxchg =>
|
||||
(SequentiallyConsistent, Acquire),
|
||||
_ => cx.sess().fatal("unknown ordering in atomic intrinsic")
|
||||
_ => self.cx().sess().fatal("unknown ordering in atomic intrinsic")
|
||||
},
|
||||
4 => match (split[2], split[3]) {
|
||||
("acq", "failrelaxed") if is_cxchg =>
|
||||
(Acquire, Monotonic),
|
||||
("acqrel", "failrelaxed") if is_cxchg =>
|
||||
(AcquireRelease, Monotonic),
|
||||
_ => cx.sess().fatal("unknown ordering in atomic intrinsic")
|
||||
_ => self.cx().sess().fatal("unknown ordering in atomic intrinsic")
|
||||
},
|
||||
_ => cx.sess().fatal("Atomic intrinsic not in correct format"),
|
||||
_ => self.cx().sess().fatal("Atomic intrinsic not in correct format"),
|
||||
};
|
||||
|
||||
let invalid_monomorphization = |ty| {
|
||||
@ -504,7 +517,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
match split[1] {
|
||||
"cxchg" | "cxchgweak" => {
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, cx).is_some() {
|
||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
||||
let weak = split[1] == "cxchgweak";
|
||||
let pair = self.atomic_cmpxchg(
|
||||
args[0].immediate(),
|
||||
@ -514,10 +527,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
failorder,
|
||||
weak);
|
||||
let val = self.extract_value(pair, 0);
|
||||
let success = self.zext(
|
||||
self.extract_value(pair, 1),
|
||||
cx.type_bool()
|
||||
);
|
||||
let success = self.extract_value(pair, 1);
|
||||
let success = self.zext(success, self.cx().type_bool());
|
||||
|
||||
let dest = result.project_field(self, 0);
|
||||
self.store(val, dest.llval, dest.align);
|
||||
@ -531,8 +542,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
|
||||
"load" => {
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, cx).is_some() {
|
||||
let size = cx.size_of(ty);
|
||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
||||
let size = self.cx().size_of(ty);
|
||||
self.atomic_load(args[0].immediate(), order, size)
|
||||
} else {
|
||||
return invalid_monomorphization(ty);
|
||||
@ -541,8 +552,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
|
||||
"store" => {
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, cx).is_some() {
|
||||
let size = cx.size_of(ty);
|
||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
||||
let size = self.cx().size_of(ty);
|
||||
self.atomic_store(
|
||||
args[1].immediate(),
|
||||
args[0].immediate(),
|
||||
@ -579,11 +590,11 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
"min" => AtomicRmwBinOp::AtomicMin,
|
||||
"umax" => AtomicRmwBinOp::AtomicUMax,
|
||||
"umin" => AtomicRmwBinOp::AtomicUMin,
|
||||
_ => cx.sess().fatal("unknown atomic operation")
|
||||
_ => self.cx().sess().fatal("unknown atomic operation")
|
||||
};
|
||||
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, cx).is_some() {
|
||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
||||
self.atomic_rmw(
|
||||
atom_op,
|
||||
args[0].immediate(),
|
||||
@ -598,7 +609,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
"nontemporal_store" => {
|
||||
let dst = args[0].deref(cx);
|
||||
let dst = args[0].deref(self.cx());
|
||||
args[1].val.nontemporal_store(self, dst);
|
||||
return;
|
||||
}
|
||||
@ -658,7 +669,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
// arguments to be truncated as needed and pointers to be
|
||||
// cast.
|
||||
fn modify_as_needed<'ll, 'tcx>(
|
||||
bx: &Builder<'_, 'll, 'tcx>,
|
||||
bx: &mut Builder<'_, 'll, 'tcx>,
|
||||
t: &intrinsics::Type,
|
||||
arg: &OperandRef<'tcx, &'ll Value>,
|
||||
) -> Vec<&'ll Value> {
|
||||
@ -677,7 +688,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
};
|
||||
let arg = PlaceRef::new_sized(ptr, arg.layout, align);
|
||||
(0..contents.len()).map(|i| {
|
||||
bx.load_operand(arg.project_field(bx, i)).immediate()
|
||||
let field = arg.project_field(bx, i);
|
||||
bx.load_operand(field).immediate()
|
||||
}).collect()
|
||||
}
|
||||
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
||||
@ -703,21 +715,21 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
|
||||
|
||||
let inputs = intr.inputs.iter()
|
||||
.flat_map(|t| ty_to_type(cx, t))
|
||||
.flat_map(|t| ty_to_type(self.cx(), t))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let outputs = one(ty_to_type(cx, &intr.output));
|
||||
let outputs = one(ty_to_type(self.cx(), &intr.output));
|
||||
|
||||
let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
|
||||
modify_as_needed(&self, t, arg)
|
||||
modify_as_needed(self, t, arg)
|
||||
}).collect();
|
||||
assert_eq!(inputs.len(), llargs.len());
|
||||
|
||||
let val = match intr.definition {
|
||||
intrinsics::IntrinsicDef::Named(name) => {
|
||||
let f = cx.declare_cfn(
|
||||
let f = self.cx().declare_cfn(
|
||||
name,
|
||||
cx.type_func(&inputs, outputs),
|
||||
self.cx().type_func(&inputs, outputs),
|
||||
);
|
||||
self.call(f, &llargs, None)
|
||||
}
|
||||
@ -742,7 +754,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
|
||||
if !fn_ty.ret.is_ignore() {
|
||||
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
||||
let ptr = self.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx)));
|
||||
let ptr_llty = self.cx().type_ptr_to(ty.llvm_type(self.cx()));
|
||||
let ptr = self.pointercast(result.llval, ptr_llty);
|
||||
self.store(llval, ptr, result.align);
|
||||
} else {
|
||||
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
|
||||
@ -753,7 +766,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
fn copy_intrinsic(
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
allow_overlap: bool,
|
||||
volatile: bool,
|
||||
ty: Ty<'tcx>,
|
||||
@ -776,7 +789,7 @@ fn copy_intrinsic(
|
||||
}
|
||||
|
||||
fn memset_intrinsic(
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
volatile: bool,
|
||||
ty: Ty<'tcx>,
|
||||
dst: &'ll Value,
|
||||
@ -784,18 +797,17 @@ fn memset_intrinsic(
|
||||
count: &'ll Value
|
||||
) {
|
||||
let (size, align) = bx.cx().size_and_align_of(ty);
|
||||
let size = bx.cx().const_usize(size.bytes());
|
||||
let size = bx.mul(bx.cx().const_usize(size.bytes()), count);
|
||||
let flags = if volatile {
|
||||
MemFlags::VOLATILE
|
||||
} else {
|
||||
MemFlags::empty()
|
||||
};
|
||||
bx.memset(dst, val, bx.mul(size, count), align, flags);
|
||||
bx.memset(dst, val, size, align, flags);
|
||||
}
|
||||
|
||||
fn try_intrinsic(
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
cx: &CodegenCx<'ll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
func: &'ll Value,
|
||||
data: &'ll Value,
|
||||
local_ptr: &'ll Value,
|
||||
@ -804,11 +816,11 @@ fn try_intrinsic(
|
||||
if bx.cx().sess().no_landing_pads() {
|
||||
bx.call(func, &[data], None);
|
||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||
bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align);
|
||||
bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align);
|
||||
} else if wants_msvc_seh(bx.cx().sess()) {
|
||||
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
|
||||
codegen_msvc_try(bx, func, data, local_ptr, dest);
|
||||
} else {
|
||||
codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
|
||||
codegen_gnu_try(bx, func, data, local_ptr, dest);
|
||||
}
|
||||
}
|
||||
|
||||
@ -820,21 +832,18 @@ fn try_intrinsic(
|
||||
// writing, however, LLVM does not recommend the usage of these new instructions
|
||||
// as the old ones are still more optimized.
|
||||
fn codegen_msvc_try(
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
cx: &CodegenCx<'ll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
func: &'ll Value,
|
||||
data: &'ll Value,
|
||||
local_ptr: &'ll Value,
|
||||
dest: &'ll Value,
|
||||
) {
|
||||
let llfn = get_rust_try_fn(cx, &mut |bx| {
|
||||
let cx = bx.cx();
|
||||
|
||||
let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| {
|
||||
bx.set_personality_fn(bx.cx().eh_personality());
|
||||
|
||||
let mut normal = bx.build_sibling_block("normal");
|
||||
let catchswitch = bx.build_sibling_block("catchswitch");
|
||||
let catchpad = bx.build_sibling_block("catchpad");
|
||||
let mut catchswitch = bx.build_sibling_block("catchswitch");
|
||||
let mut catchpad = bx.build_sibling_block("catchpad");
|
||||
let mut caught = bx.build_sibling_block("caught");
|
||||
|
||||
let func = llvm::get_param(bx.llfn(), 0);
|
||||
@ -880,34 +889,35 @@ fn codegen_msvc_try(
|
||||
// }
|
||||
//
|
||||
// More information can be found in libstd's seh.rs implementation.
|
||||
let i64p = cx.type_ptr_to(cx.type_i64());
|
||||
let i64p = bx.cx().type_ptr_to(bx.cx().type_i64());
|
||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||
let slot = bx.alloca(i64p, "slot", ptr_align);
|
||||
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
|
||||
|
||||
normal.ret(cx.const_i32(0));
|
||||
normal.ret(bx.cx().const_i32(0));
|
||||
|
||||
let cs = catchswitch.catch_switch(None, None, 1);
|
||||
catchswitch.add_handler(cs, catchpad.llbb());
|
||||
|
||||
let tcx = cx.tcx;
|
||||
let tydesc = match tcx.lang_items().msvc_try_filter() {
|
||||
Some(did) => cx.get_static(did),
|
||||
let tydesc = match bx.tcx().lang_items().msvc_try_filter() {
|
||||
Some(did) => bx.cx().get_static(did),
|
||||
None => bug!("msvc_try_filter not defined"),
|
||||
};
|
||||
let funclet = catchpad.catch_pad(cs, &[tydesc, cx.const_i32(0), slot]);
|
||||
let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]);
|
||||
let addr = catchpad.load(slot, ptr_align);
|
||||
|
||||
let i64_align = bx.tcx().data_layout.i64_align;
|
||||
let arg1 = catchpad.load(addr, i64_align);
|
||||
let val1 = cx.const_i32(1);
|
||||
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
|
||||
let val1 = bx.cx().const_i32(1);
|
||||
let gep1 = catchpad.inbounds_gep(addr, &[val1]);
|
||||
let arg2 = catchpad.load(gep1, i64_align);
|
||||
let local_ptr = catchpad.bitcast(local_ptr, i64p);
|
||||
let gep2 = catchpad.inbounds_gep(local_ptr, &[val1]);
|
||||
catchpad.store(arg1, local_ptr, i64_align);
|
||||
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
|
||||
catchpad.store(arg2, gep2, i64_align);
|
||||
catchpad.catch_ret(&funclet, caught.llbb());
|
||||
|
||||
caught.ret(cx.const_i32(1));
|
||||
caught.ret(bx.cx().const_i32(1));
|
||||
});
|
||||
|
||||
// Note that no invoke is used here because by definition this function
|
||||
@ -929,16 +939,13 @@ fn codegen_msvc_try(
|
||||
// functions in play. By calling a shim we're guaranteed that our shim will have
|
||||
// the right personality function.
|
||||
fn codegen_gnu_try(
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
cx: &CodegenCx<'ll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
func: &'ll Value,
|
||||
data: &'ll Value,
|
||||
local_ptr: &'ll Value,
|
||||
dest: &'ll Value,
|
||||
) {
|
||||
let llfn = get_rust_try_fn(cx, &mut |bx| {
|
||||
let cx = bx.cx();
|
||||
|
||||
let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| {
|
||||
// Codegens the shims described above:
|
||||
//
|
||||
// bx:
|
||||
@ -963,7 +970,7 @@ fn codegen_gnu_try(
|
||||
let data = llvm::get_param(bx.llfn(), 1);
|
||||
let local_ptr = llvm::get_param(bx.llfn(), 2);
|
||||
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
|
||||
then.ret(cx.const_i32(0));
|
||||
then.ret(bx.cx().const_i32(0));
|
||||
|
||||
// Type indicator for the exception being thrown.
|
||||
//
|
||||
@ -971,13 +978,14 @@ fn codegen_gnu_try(
|
||||
// being thrown. The second value is a "selector" indicating which of
|
||||
// the landing pad clauses the exception's type had been matched to.
|
||||
// rust_try ignores the selector.
|
||||
let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false);
|
||||
let lpad_ty = bx.cx().type_struct(&[bx.cx().type_i8p(), bx.cx().type_i32()], false);
|
||||
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
|
||||
catch.add_clause(vals, bx.cx().const_null(cx.type_i8p()));
|
||||
catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p()));
|
||||
let ptr = catch.extract_value(vals, 0);
|
||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||
catch.store(ptr, catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())), ptr_align);
|
||||
catch.ret(cx.const_i32(1));
|
||||
let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p()));
|
||||
catch.store(ptr, bitcast, ptr_align);
|
||||
catch.ret(bx.cx().const_i32(1));
|
||||
});
|
||||
|
||||
// Note that no invoke is used here because by definition this function
|
||||
@ -1043,7 +1051,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
|
||||
}
|
||||
|
||||
fn generic_simd_intrinsic(
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
name: &str,
|
||||
callee_ty: Ty<'tcx>,
|
||||
args: &[OperandRef<'tcx, &'ll Value>],
|
||||
@ -1219,7 +1227,7 @@ fn generic_simd_intrinsic(
|
||||
in_elem: &::rustc::ty::TyS,
|
||||
in_ty: &::rustc::ty::TyS,
|
||||
in_len: usize,
|
||||
bx: &Builder<'a, 'll, 'tcx>,
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
span: Span,
|
||||
args: &[OperandRef<'tcx, &'ll Value>],
|
||||
) -> Result<&'ll Value, ()> {
|
||||
|
@ -137,7 +137,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
|
||||
}
|
||||
|
||||
pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value,
|
||||
t: Ty<'tcx>,
|
||||
@ -147,7 +147,8 @@ pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
let signed = match t.sty {
|
||||
ty::Float(_) => {
|
||||
let cmp = bin_op_to_fcmp_predicate(op);
|
||||
return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
|
||||
let cmp = bx.fcmp(cmp, lhs, rhs);
|
||||
return bx.sext(cmp, ret_ty);
|
||||
},
|
||||
ty::Uint(_) => false,
|
||||
ty::Int(_) => true,
|
||||
@ -155,11 +156,12 @@ pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
};
|
||||
|
||||
let cmp = bin_op_to_icmp_predicate(op, signed);
|
||||
let cmp = bx.icmp(cmp, lhs, rhs);
|
||||
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
|
||||
// to get the correctly sized type. This will compile to a single instruction
|
||||
// once the IR is converted to assembly if the SIMD instruction is supported
|
||||
// by the target architecture.
|
||||
bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
|
||||
bx.sext(cmp, ret_ty)
|
||||
}
|
||||
|
||||
/// Retrieve the information we are losing (making dynamic) in an unsizing
|
||||
@ -199,7 +201,7 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
|
||||
|
||||
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
|
||||
pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
src: Bx::Value,
|
||||
src_ty: Ty<'tcx>,
|
||||
dst_ty: Ty<'tcx>
|
||||
@ -254,13 +256,13 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
/// Coerce `src`, which is a reference to a value of type `src_ty`,
|
||||
/// to a value of type `dst_ty` and store the result in `dst`
|
||||
pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
src: PlaceRef<'tcx, Bx::Value>,
|
||||
dst: PlaceRef<'tcx, Bx::Value>
|
||||
) {
|
||||
let src_ty = src.layout.ty;
|
||||
let dst_ty = dst.layout.ty;
|
||||
let coerce_ptr = || {
|
||||
let mut coerce_ptr = || {
|
||||
let (base, info) = match bx.load_operand(src).val {
|
||||
OperandValue::Pair(base, info) => {
|
||||
// fat-ptr to fat-ptr unsize preserves the vtable
|
||||
@ -313,31 +315,20 @@ pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
op: hir::BinOpKind,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value
|
||||
) -> Bx::Value {
|
||||
cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b))
|
||||
cast_shift_rhs(bx, op, lhs, rhs)
|
||||
}
|
||||
|
||||
fn cast_shift_rhs<'a, 'tcx: 'a, F, G, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
op: hir::BinOpKind,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value,
|
||||
trunc: F,
|
||||
zext: G
|
||||
) -> Bx::Value
|
||||
where F: FnOnce(
|
||||
Bx::Value,
|
||||
Bx::Type
|
||||
) -> Bx::Value,
|
||||
G: FnOnce(
|
||||
Bx::Value,
|
||||
Bx::Type
|
||||
) -> Bx::Value
|
||||
{
|
||||
) -> Bx::Value {
|
||||
// Shifts may have any size int on the rhs
|
||||
if op.is_shift() {
|
||||
let mut rhs_llty = bx.cx().val_ty(rhs);
|
||||
@ -351,11 +342,11 @@ fn cast_shift_rhs<'a, 'tcx: 'a, F, G, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
let rhs_sz = bx.cx().int_width(rhs_llty);
|
||||
let lhs_sz = bx.cx().int_width(lhs_llty);
|
||||
if lhs_sz < rhs_sz {
|
||||
trunc(rhs, lhs_llty)
|
||||
bx.trunc(rhs, lhs_llty)
|
||||
} else if lhs_sz > rhs_sz {
|
||||
// FIXME (#1877: If in the future shifting by negative
|
||||
// values is no longer undefined then this is wrong.
|
||||
zext(rhs, lhs_llty)
|
||||
bx.zext(rhs, lhs_llty)
|
||||
} else {
|
||||
rhs
|
||||
}
|
||||
@ -374,7 +365,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
|
||||
}
|
||||
|
||||
pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
val: Bx::Value
|
||||
) {
|
||||
let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
|
||||
@ -382,7 +373,7 @@ pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
val: Bx::Value
|
||||
) -> Bx::Value {
|
||||
if bx.cx().val_ty(val) == bx.cx().type_i1() {
|
||||
@ -393,7 +384,7 @@ pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
val: Bx::Value,
|
||||
layout: layout::TyLayout,
|
||||
) -> Bx::Value {
|
||||
@ -404,7 +395,7 @@ pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
val: Bx::Value,
|
||||
scalar: &layout::Scalar,
|
||||
) -> Bx::Value {
|
||||
@ -415,7 +406,7 @@ pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dst: Bx::Value,
|
||||
dst_align: Align,
|
||||
src: Bx::Value,
|
||||
@ -549,7 +540,8 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
};
|
||||
|
||||
let result = bx.call(start_fn, &args, None);
|
||||
bx.ret(bx.intcast(result, cx.type_int(), true));
|
||||
let cast = bx.intcast(result, cx.type_int(), true);
|
||||
bx.ret(cast);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ pub fn langcall(tcx: TyCtxt,
|
||||
// of Java. (See related discussion on #1877 and #10183.)
|
||||
|
||||
pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value
|
||||
) -> Bx::Value {
|
||||
@ -174,7 +174,7 @@ pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
lhs_t: Ty<'tcx>,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value
|
||||
@ -191,15 +191,16 @@ pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
rhs: Bx::Value
|
||||
) -> Bx::Value {
|
||||
let rhs_llty = bx.cx().val_ty(rhs);
|
||||
bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
|
||||
let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
|
||||
bx.and(rhs, shift_val)
|
||||
}
|
||||
|
||||
pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
llty: Bx::Type,
|
||||
mask_llty: Bx::Type,
|
||||
invert: bool
|
||||
|
@ -21,7 +21,7 @@ use rustc::ty::{self, Ty};
|
||||
use interfaces::*;
|
||||
|
||||
pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
t: Ty<'tcx>,
|
||||
info: Option<Bx::Value>
|
||||
) -> (Bx::Value, Bx::Value) {
|
||||
@ -50,12 +50,11 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx.cx().const_usize(align.abi()))
|
||||
}
|
||||
_ => {
|
||||
let cx = bx.cx();
|
||||
// First get the size of all statically known fields.
|
||||
// Don't use size_of because it also rounds up to alignment, which we
|
||||
// want to avoid, as the unsized field's alignment could be smaller.
|
||||
assert!(!t.is_simd());
|
||||
let layout = cx.layout_of(t);
|
||||
let layout = bx.cx().layout_of(t);
|
||||
debug!("DST {} layout: {:?}", t, layout);
|
||||
|
||||
let i = layout.fields.count() - 1;
|
||||
@ -63,12 +62,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
let sized_align = layout.align.abi();
|
||||
debug!("DST {} statically sized prefix size: {} align: {}",
|
||||
t, sized_size, sized_align);
|
||||
let sized_size = cx.const_usize(sized_size);
|
||||
let sized_align = cx.const_usize(sized_align);
|
||||
let sized_size = bx.cx().const_usize(sized_size);
|
||||
let sized_align = bx.cx().const_usize(sized_align);
|
||||
|
||||
// Recurse to get the size of the dynamically sized field (must be
|
||||
// the last field).
|
||||
let field_ty = layout.field(cx, i).ty;
|
||||
let field_ty = layout.field(bx.cx(), i).ty;
|
||||
let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
|
||||
|
||||
// FIXME (#26403, #27023): We should be adding padding
|
||||
@ -95,11 +94,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
(Some(sized_align), Some(unsized_align)) => {
|
||||
// If both alignments are constant, (the sized_align should always be), then
|
||||
// pick the correct alignment statically.
|
||||
cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
|
||||
bx.cx().const_usize(std::cmp::max(sized_align, unsized_align) as u64)
|
||||
}
|
||||
_ => {
|
||||
let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
|
||||
bx.select(cmp, sized_align, unsized_align)
|
||||
}
|
||||
_ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align),
|
||||
sized_align,
|
||||
unsized_align)
|
||||
};
|
||||
|
||||
// Issue #27023: must add any necessary padding to `size`
|
||||
@ -112,9 +112,11 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
// emulated via the semi-standard fast bit trick:
|
||||
//
|
||||
// `(size + (align-1)) & -align`
|
||||
|
||||
let addend = bx.sub(align, bx.cx().const_usize(1));
|
||||
let size = bx.and(bx.add(size, addend), bx.neg(align));
|
||||
let one = bx.cx().const_usize(1);
|
||||
let addend = bx.sub(align, one);
|
||||
let add = bx.add(size, addend);
|
||||
let neg = bx.neg(align);
|
||||
let size = bx.and(add, neg);
|
||||
|
||||
(size, align)
|
||||
}
|
||||
|
@ -19,5 +19,5 @@ pub trait AbiMethods<'tcx> {
|
||||
}
|
||||
|
||||
pub trait AbiBuilderMethods<'tcx>: HasCodegen<'tcx> {
|
||||
fn apply_attrs_callsite(&self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
|
||||
fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ use rustc::hir::{GlobalAsm, InlineAsm};
|
||||
pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> {
|
||||
// Take an inline assembly expression and splat it out via LLVM
|
||||
fn codegen_inline_asm(
|
||||
&self,
|
||||
&mut self,
|
||||
ia: &InlineAsm,
|
||||
outputs: Vec<PlaceRef<'tcx, Self::Value>>,
|
||||
inputs: Vec<Self::Value>,
|
||||
|
@ -53,98 +53,115 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
|
||||
then_llbb: Self::BasicBlock,
|
||||
else_llbb: Self::BasicBlock,
|
||||
);
|
||||
fn switch(&self, v: Self::Value, else_llbb: Self::BasicBlock, num_cases: usize) -> Self::Value;
|
||||
fn switch(
|
||||
&mut self,
|
||||
v: Self::Value,
|
||||
else_llbb: Self::BasicBlock,
|
||||
num_cases: usize,
|
||||
) -> Self::Value;
|
||||
fn invoke(
|
||||
&self,
|
||||
&mut self,
|
||||
llfn: Self::Value,
|
||||
args: &[Self::Value],
|
||||
then: Self::BasicBlock,
|
||||
catch: Self::BasicBlock,
|
||||
funclet: Option<&Self::Funclet>,
|
||||
) -> Self::Value;
|
||||
fn unreachable(&self);
|
||||
fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fadd(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fadd_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn sub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fsub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fsub_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn mul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fmul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fmul_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn udiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn exactudiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn sdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn exactsdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fdiv_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn urem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn srem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn frem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn frem_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn shl(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn lshr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn ashr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn and(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn or(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn xor(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn neg(&self, v: Self::Value) -> Self::Value;
|
||||
fn fneg(&self, v: Self::Value) -> Self::Value;
|
||||
fn not(&self, v: Self::Value) -> Self::Value;
|
||||
fn unreachable(&mut self);
|
||||
fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn neg(&mut self, v: Self::Value) -> Self::Value;
|
||||
fn fneg(&mut self, v: Self::Value) -> Self::Value;
|
||||
fn not(&mut self, v: Self::Value) -> Self::Value;
|
||||
|
||||
fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
|
||||
fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
|
||||
fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
|
||||
fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
|
||||
fn array_alloca(
|
||||
&self,
|
||||
&mut self,
|
||||
ty: Self::Type,
|
||||
len: Self::Value,
|
||||
name: &str,
|
||||
align: Align,
|
||||
) -> Self::Value;
|
||||
|
||||
fn load(&self, ptr: Self::Value, align: Align) -> Self::Value;
|
||||
fn volatile_load(&self, ptr: Self::Value) -> Self::Value;
|
||||
fn atomic_load(&self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
|
||||
fn load_operand(&self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>;
|
||||
fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
|
||||
fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
|
||||
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
|
||||
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
|
||||
-> OperandRef<'tcx, Self::Value>;
|
||||
|
||||
fn range_metadata(&self, load: Self::Value, range: Range<u128>);
|
||||
fn nonnull_metadata(&self, load: Self::Value);
|
||||
fn range_metadata(&mut self, load: Self::Value, range: Range<u128>);
|
||||
fn nonnull_metadata(&mut self, load: Self::Value);
|
||||
|
||||
fn store(&self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
|
||||
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
|
||||
fn store_with_flags(
|
||||
&self,
|
||||
&mut self,
|
||||
val: Self::Value,
|
||||
ptr: Self::Value,
|
||||
align: Align,
|
||||
flags: MemFlags,
|
||||
) -> Self::Value;
|
||||
fn atomic_store(&self, val: Self::Value, ptr: Self::Value, order: AtomicOrdering, size: Size);
|
||||
fn atomic_store(
|
||||
&mut self,
|
||||
val: Self::Value,
|
||||
ptr: Self::Value,
|
||||
order: AtomicOrdering,
|
||||
size: Size,
|
||||
);
|
||||
|
||||
fn gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
|
||||
fn inbounds_gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
|
||||
fn struct_gep(&self, ptr: Self::Value, idx: u64) -> Self::Value;
|
||||
fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
|
||||
fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
|
||||
fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value;
|
||||
|
||||
fn trunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn sext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fptoui(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fptosi(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn uitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn sitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fptrunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fpext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn ptrtoint(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn inttoptr(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn bitcast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn intcast(&self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
|
||||
fn pointercast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
|
||||
fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
|
||||
fn icmp(&self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fcmp(&self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
|
||||
fn empty_phi(&self, ty: Self::Type) -> Self::Value;
|
||||
fn phi(&self, ty: Self::Type, vals: &[Self::Value], bbs: &[Self::BasicBlock]) -> Self::Value;
|
||||
fn empty_phi(&mut self, ty: Self::Type) -> Self::Value;
|
||||
fn phi(
|
||||
&mut self,
|
||||
ty: Self::Type,
|
||||
vals: &[Self::Value],
|
||||
bbs: &[Self::BasicBlock],
|
||||
) -> Self::Value;
|
||||
fn inline_asm_call(
|
||||
&self,
|
||||
&mut self,
|
||||
asm: *const c_char,
|
||||
cons: *const c_char,
|
||||
inputs: &[Self::Value],
|
||||
@ -155,7 +172,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
|
||||
) -> Option<Self::Value>;
|
||||
|
||||
fn memcpy(
|
||||
&self,
|
||||
&mut self,
|
||||
dst: Self::Value,
|
||||
dst_align: Align,
|
||||
src: Self::Value,
|
||||
@ -164,7 +181,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
|
||||
flags: MemFlags,
|
||||
);
|
||||
fn memmove(
|
||||
&self,
|
||||
&mut self,
|
||||
dst: Self::Value,
|
||||
dst_align: Align,
|
||||
src: Self::Value,
|
||||
@ -173,7 +190,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
|
||||
flags: MemFlags,
|
||||
);
|
||||
fn memset(
|
||||
&self,
|
||||
&mut self,
|
||||
ptr: Self::Value,
|
||||
fill_byte: Self::Value,
|
||||
size: Self::Value,
|
||||
@ -181,56 +198,74 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
|
||||
flags: MemFlags,
|
||||
);
|
||||
|
||||
fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn minnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn maxnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
|
||||
fn select(
|
||||
&self,
|
||||
&mut self,
|
||||
cond: Self::Value,
|
||||
then_val: Self::Value,
|
||||
else_val: Self::Value,
|
||||
) -> Self::Value;
|
||||
|
||||
fn va_arg(&self, list: Self::Value, ty: Self::Type) -> Self::Value;
|
||||
fn extract_element(&self, vec: Self::Value, idx: Self::Value) -> Self::Value;
|
||||
fn insert_element(&self, vec: Self::Value, elt: Self::Value, idx: Self::Value) -> Self::Value;
|
||||
fn shuffle_vector(&self, v1: Self::Value, v2: Self::Value, mask: Self::Value) -> Self::Value;
|
||||
fn vector_splat(&self, num_elts: usize, elt: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fadd_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmul_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_add(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_mul(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_and(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_or(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_xor(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmin(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmax(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmin_fast(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmax_fast(&self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_min(&self, src: Self::Value, is_signed: bool) -> Self::Value;
|
||||
fn vector_reduce_max(&self, src: Self::Value, is_signed: bool) -> Self::Value;
|
||||
fn extract_value(&self, agg_val: Self::Value, idx: u64) -> Self::Value;
|
||||
fn insert_value(&self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
|
||||
fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
|
||||
fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
|
||||
fn insert_element(
|
||||
&mut self,
|
||||
vec: Self::Value,
|
||||
elt: Self::Value,
|
||||
idx: Self::Value,
|
||||
) -> Self::Value;
|
||||
fn shuffle_vector(
|
||||
&mut self,
|
||||
v1: Self::Value,
|
||||
v2: Self::Value,
|
||||
mask: Self::Value,
|
||||
) -> Self::Value;
|
||||
fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fadd_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmul_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_add(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_mul(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_and(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_or(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_xor(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmin(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmax(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmin_fast(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_fmax_fast(&mut self, src: Self::Value) -> Self::Value;
|
||||
fn vector_reduce_min(&mut self, src: Self::Value, is_signed: bool) -> Self::Value;
|
||||
fn vector_reduce_max(&mut self, src: Self::Value, is_signed: bool) -> Self::Value;
|
||||
fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
|
||||
fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
|
||||
|
||||
fn landing_pad(&self, ty: Self::Type, pers_fn: Self::Value, num_clauses: usize) -> Self::Value;
|
||||
fn add_clause(&self, landing_pad: Self::Value, clause: Self::Value);
|
||||
fn set_cleanup(&self, landing_pad: Self::Value);
|
||||
fn resume(&self, exn: Self::Value) -> Self::Value;
|
||||
fn cleanup_pad(&self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
|
||||
fn cleanup_ret(&self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>)
|
||||
-> Self::Value;
|
||||
fn catch_pad(&self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
|
||||
fn catch_ret(&self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value;
|
||||
fn landing_pad(
|
||||
&mut self,
|
||||
ty: Self::Type,
|
||||
pers_fn: Self::Value,
|
||||
num_clauses: usize,
|
||||
) -> Self::Value;
|
||||
fn add_clause(&mut self, landing_pad: Self::Value, clause: Self::Value);
|
||||
fn set_cleanup(&mut self, landing_pad: Self::Value);
|
||||
fn resume(&mut self, exn: Self::Value) -> Self::Value;
|
||||
fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
|
||||
fn cleanup_ret(
|
||||
&mut self,
|
||||
funclet: &Self::Funclet,
|
||||
unwind: Option<Self::BasicBlock>,
|
||||
) -> Self::Value;
|
||||
fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
|
||||
fn catch_ret(&mut self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value;
|
||||
fn catch_switch(
|
||||
&self,
|
||||
&mut self,
|
||||
parent: Option<Self::Value>,
|
||||
unwind: Option<Self::BasicBlock>,
|
||||
num_handlers: usize,
|
||||
) -> Self::Value;
|
||||
fn add_handler(&self, catch_switch: Self::Value, handler: Self::BasicBlock);
|
||||
fn set_personality_fn(&self, personality: Self::Value);
|
||||
fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock);
|
||||
fn set_personality_fn(&mut self, personality: Self::Value);
|
||||
|
||||
fn atomic_cmpxchg(
|
||||
&self,
|
||||
&mut self,
|
||||
dst: Self::Value,
|
||||
cmp: Self::Value,
|
||||
src: Self::Value,
|
||||
@ -239,31 +274,31 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
|
||||
weak: bool,
|
||||
) -> Self::Value;
|
||||
fn atomic_rmw(
|
||||
&self,
|
||||
&mut self,
|
||||
op: AtomicRmwBinOp,
|
||||
dst: Self::Value,
|
||||
src: Self::Value,
|
||||
order: AtomicOrdering,
|
||||
) -> Self::Value;
|
||||
fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope);
|
||||
fn add_case(&self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock);
|
||||
fn add_incoming_to_phi(&self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock);
|
||||
fn set_invariant_load(&self, load: Self::Value);
|
||||
fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
|
||||
fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock);
|
||||
fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock);
|
||||
fn set_invariant_load(&mut self, load: Self::Value);
|
||||
|
||||
/// Returns the ptr value that should be used for storing `val`.
|
||||
fn check_store(&self, val: Self::Value, ptr: Self::Value) -> Self::Value;
|
||||
fn check_store(&mut self, val: Self::Value, ptr: Self::Value) -> Self::Value;
|
||||
|
||||
/// Returns the args that should be used for a call to `llfn`.
|
||||
fn check_call<'b>(
|
||||
&self,
|
||||
&mut self,
|
||||
typ: &str,
|
||||
llfn: Self::Value,
|
||||
args: &'b [Self::Value],
|
||||
) -> Cow<'b, [Self::Value]>
|
||||
where
|
||||
[Self::Value]: ToOwned;
|
||||
fn lifetime_start(&self, ptr: Self::Value, size: Size);
|
||||
fn lifetime_end(&self, ptr: Self::Value, size: Size);
|
||||
fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
|
||||
fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
|
||||
|
||||
/// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
|
||||
/// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
|
||||
@ -273,16 +308,16 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
|
||||
///
|
||||
/// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
|
||||
/// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
|
||||
fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: Self::Value, size: Size);
|
||||
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: Self::Value, size: Size);
|
||||
|
||||
fn call(
|
||||
&self,
|
||||
&mut self,
|
||||
llfn: Self::Value,
|
||||
args: &[Self::Value],
|
||||
funclet: Option<&Self::Funclet>,
|
||||
) -> Self::Value;
|
||||
fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||
|
||||
fn delete_basic_block(&self, bb: Self::BasicBlock);
|
||||
fn do_not_inline(&self, llret: Self::Value);
|
||||
fn delete_basic_block(&mut self, bb: Self::BasicBlock);
|
||||
fn do_not_inline(&mut self, llret: Self::Value);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
|
||||
|
||||
pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
|
||||
fn declare_local(
|
||||
&self,
|
||||
&mut self,
|
||||
dbg_context: &FunctionDebugContext<Self::DIScope>,
|
||||
variable_name: Name,
|
||||
variable_type: Ty<'tcx>,
|
||||
@ -63,10 +63,10 @@ pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
|
||||
span: Span,
|
||||
);
|
||||
fn set_source_location(
|
||||
&self,
|
||||
&mut self,
|
||||
debug_context: &FunctionDebugContext<Self::DIScope>,
|
||||
scope: Option<Self::DIScope>,
|
||||
span: Span,
|
||||
);
|
||||
fn insert_reference_to_gdb_debug_scripts_section_global(&self);
|
||||
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {
|
||||
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
|
||||
/// add them to librustc_codegen_llvm/context.rs
|
||||
fn codegen_intrinsic_call(
|
||||
&self,
|
||||
&mut self,
|
||||
callee_ty: Ty<'tcx>,
|
||||
fn_ty: &FnType<'tcx, Ty<'tcx>>,
|
||||
args: &[OperandRef<'tcx, Self::Value>],
|
||||
|
@ -105,13 +105,13 @@ pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
|
||||
|
||||
pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> {
|
||||
fn store_fn_arg(
|
||||
&self,
|
||||
&mut self,
|
||||
ty: &ArgType<'tcx, Ty<'tcx>>,
|
||||
idx: &mut usize,
|
||||
dst: PlaceRef<'tcx, Self::Value>,
|
||||
);
|
||||
fn store_arg_ty(
|
||||
&self,
|
||||
&mut self,
|
||||
ty: &ArgType<'tcx, Ty<'tcx>>,
|
||||
val: Self::Value,
|
||||
dst: PlaceRef<'tcx, Self::Value>,
|
||||
|
@ -30,7 +30,7 @@ impl<'a, 'tcx: 'a> VirtualIndex {
|
||||
|
||||
pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
llvtable: Bx::Value,
|
||||
fn_ty: &FnType<'tcx, Ty<'tcx>>
|
||||
) -> Bx::Value {
|
||||
@ -42,10 +42,8 @@ impl<'a, 'tcx: 'a> VirtualIndex {
|
||||
bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
|
||||
);
|
||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||
let ptr = bx.load(
|
||||
bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
|
||||
ptr_align
|
||||
);
|
||||
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
|
||||
let ptr = bx.load(gep, ptr_align);
|
||||
bx.nonnull_metadata(ptr);
|
||||
// Vtable loads are invariant
|
||||
bx.set_invariant_load(ptr);
|
||||
@ -54,7 +52,7 @@ impl<'a, 'tcx: 'a> VirtualIndex {
|
||||
|
||||
pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
llvtable: Bx::Value
|
||||
) -> Bx::Value {
|
||||
// Load the data pointer from the object.
|
||||
@ -62,10 +60,8 @@ impl<'a, 'tcx: 'a> VirtualIndex {
|
||||
|
||||
let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
|
||||
let usize_align = bx.tcx().data_layout.pointer_align;
|
||||
let ptr = bx.load(
|
||||
bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
|
||||
usize_align
|
||||
);
|
||||
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
|
||||
let ptr = bx.load(gep, usize_align);
|
||||
// Vtable loads are invariant
|
||||
bx.set_invariant_load(ptr);
|
||||
ptr
|
||||
|
@ -102,7 +102,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
debug!("llblock: creating cleanup trampoline for {:?}", target);
|
||||
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
|
||||
let trampoline = this.new_block(name);
|
||||
let mut trampoline = this.new_block(name);
|
||||
trampoline.cleanup_ret(funclet(this).unwrap(), Some(lltarget));
|
||||
trampoline.llbb()
|
||||
} else {
|
||||
@ -145,9 +145,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
bx.apply_attrs_callsite(&fn_ty, invokeret);
|
||||
|
||||
if let Some((ret_dest, target)) = destination {
|
||||
let ret_bx = this.build_block(target);
|
||||
this.set_debug_loc(&ret_bx, terminator.source_info);
|
||||
this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret);
|
||||
let mut ret_bx = this.build_block(target);
|
||||
this.set_debug_loc(&mut ret_bx, terminator.source_info);
|
||||
this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret);
|
||||
}
|
||||
} else {
|
||||
let llret = bx.call(fn_ptr, &llargs, funclet(this));
|
||||
@ -169,16 +169,18 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
};
|
||||
|
||||
self.set_debug_loc(&bx, terminator.source_info);
|
||||
self.set_debug_loc(&mut bx, terminator.source_info);
|
||||
match terminator.kind {
|
||||
mir::TerminatorKind::Resume => {
|
||||
if let Some(funclet) = funclet(self) {
|
||||
bx.cleanup_ret(funclet, None);
|
||||
} else {
|
||||
let slot = self.get_personality_slot(&bx);
|
||||
let lp0 = bx.load_operand(slot.project_field(&bx, 0)).immediate();
|
||||
let lp1 = bx.load_operand(slot.project_field(&bx, 1)).immediate();
|
||||
slot.storage_dead(&bx);
|
||||
let slot = self.get_personality_slot(&mut bx);
|
||||
let lp0 = slot.project_field(&mut bx, 0);
|
||||
let lp0 = bx.load_operand(lp0).immediate();
|
||||
let lp1 = slot.project_field(&mut bx, 1);
|
||||
let lp1 = bx.load_operand(lp1).immediate();
|
||||
slot.storage_dead(&mut bx);
|
||||
|
||||
if !bx.cx().sess().target.target.options.custom_unwind_resume {
|
||||
let mut lp = bx.cx().const_undef(self.landing_pad_type());
|
||||
@ -204,7 +206,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
|
||||
let discr = self.codegen_operand(&bx, discr);
|
||||
let discr = self.codegen_operand(&mut bx, discr);
|
||||
if targets.len() == 2 {
|
||||
// If there are two targets, emit br instead of switch
|
||||
let lltrue = llblock(self, targets[0]);
|
||||
@ -249,11 +251,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
PassMode::Direct(_) | PassMode::Pair(..) => {
|
||||
let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
|
||||
let op =
|
||||
self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE));
|
||||
if let Ref(llval, _, align) = op.val {
|
||||
bx.load(llval, align)
|
||||
} else {
|
||||
op.immediate_or_packed_pair(&bx)
|
||||
op.immediate_or_packed_pair(&mut bx)
|
||||
}
|
||||
}
|
||||
|
||||
@ -271,8 +274,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
};
|
||||
let llslot = match op.val {
|
||||
Immediate(_) | Pair(..) => {
|
||||
let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret");
|
||||
op.val.store(&bx, scratch);
|
||||
let scratch =
|
||||
PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret");
|
||||
op.val.store(&mut bx, scratch);
|
||||
scratch.llval
|
||||
}
|
||||
Ref(llval, _, align) => {
|
||||
@ -281,11 +285,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
llval
|
||||
}
|
||||
};
|
||||
bx.load(
|
||||
bx.pointercast(llslot, bx.cx().type_ptr_to(
|
||||
bx.cx().cast_backend_type(&cast_ty)
|
||||
)),
|
||||
self.fn_ty.ret.layout.align)
|
||||
let addr = bx.pointercast(llslot, bx.cx().type_ptr_to(
|
||||
bx.cx().cast_backend_type(&cast_ty)
|
||||
));
|
||||
bx.load(addr, self.fn_ty.ret.layout.align)
|
||||
}
|
||||
};
|
||||
bx.ret(llval);
|
||||
@ -306,7 +309,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
return
|
||||
}
|
||||
|
||||
let place = self.codegen_place(&bx, location);
|
||||
let place = self.codegen_place(&mut bx, location);
|
||||
let (args1, args2);
|
||||
let mut args = if let Some(llextra) = place.llextra {
|
||||
args2 = [place.llval, llextra];
|
||||
@ -325,7 +328,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
let fn_ty = bx.cx().new_vtable(sig, &[]);
|
||||
let vtable = args[1];
|
||||
args = &args[..1];
|
||||
(meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
|
||||
(meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
|
||||
}
|
||||
_ => {
|
||||
(bx.cx().get_fn(drop_fn),
|
||||
@ -338,7 +341,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
|
||||
let cond = self.codegen_operand(&bx, cond).immediate();
|
||||
let cond = self.codegen_operand(&mut bx, cond).immediate();
|
||||
let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1);
|
||||
|
||||
// This case can currently arise only from functions marked
|
||||
@ -375,7 +378,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
// After this point, bx is the block for the call to panic.
|
||||
bx = panic_block;
|
||||
self.set_debug_loc(&bx, terminator.source_info);
|
||||
self.set_debug_loc(&mut bx, terminator.source_info);
|
||||
|
||||
// Get the location information.
|
||||
let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
|
||||
@ -390,8 +393,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// Put together the arguments to the panic entry point.
|
||||
let (lang_item, args) = match *msg {
|
||||
EvalErrorKind::BoundsCheck { ref len, ref index } => {
|
||||
let len = self.codegen_operand(&bx, len).immediate();
|
||||
let index = self.codegen_operand(&bx, index).immediate();
|
||||
let len = self.codegen_operand(&mut bx, len).immediate();
|
||||
let index = self.codegen_operand(&mut bx, index).immediate();
|
||||
|
||||
let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
|
||||
let file_line_col = bx.cx().static_addr_of(
|
||||
@ -442,7 +445,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
from_hir_call: _
|
||||
} => {
|
||||
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
|
||||
let callee = self.codegen_operand(&bx, func);
|
||||
let callee = self.codegen_operand(&mut bx, func);
|
||||
|
||||
let (instance, mut llfn) = match callee.layout.ty.sty {
|
||||
ty::FnDef(def_id, substs) => {
|
||||
@ -476,7 +479,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
if intrinsic == Some("transmute") {
|
||||
if let Some(destination_ref) = destination.as_ref() {
|
||||
let &(ref dest, target) = destination_ref;
|
||||
self.codegen_transmute(&bx, &args[0], dest);
|
||||
self.codegen_transmute(&mut bx, &args[0], dest);
|
||||
funclet_br(self, &mut bx, target);
|
||||
} else {
|
||||
// If we are trying to transmute to an uninhabited type,
|
||||
@ -567,7 +570,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// Prepare the return value destination
|
||||
let ret_dest = if let Some((ref dest, _)) = *destination {
|
||||
let is_intrinsic = intrinsic.is_some();
|
||||
self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs,
|
||||
self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs,
|
||||
is_intrinsic)
|
||||
} else {
|
||||
ReturnDest::Nothing
|
||||
@ -635,7 +638,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
}
|
||||
|
||||
self.codegen_operand(&bx, arg)
|
||||
self.codegen_operand(&mut bx, arg)
|
||||
}).collect();
|
||||
|
||||
|
||||
@ -644,7 +647,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
terminator.source_info.span);
|
||||
|
||||
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
|
||||
self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval);
|
||||
self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval);
|
||||
}
|
||||
|
||||
if let Some((_, target)) = *destination {
|
||||
@ -665,7 +668,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
};
|
||||
|
||||
'make_args: for (i, arg) in first_args.iter().enumerate() {
|
||||
let mut op = self.codegen_operand(&bx, arg);
|
||||
let mut op = self.codegen_operand(&mut bx, arg);
|
||||
|
||||
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
|
||||
if let Pair(..) = op.val {
|
||||
@ -679,7 +682,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
&& !op.layout.ty.is_region_ptr()
|
||||
{
|
||||
'iter_fields: for i in 0..op.layout.fields.count() {
|
||||
let field = op.extract_field(&bx, i);
|
||||
let field = op.extract_field(&mut bx, i);
|
||||
if !field.layout.is_zst() {
|
||||
// we found the one non-zero-sized field that is allowed
|
||||
// now find *its* non-zero-sized field, or stop if it's a
|
||||
@ -698,7 +701,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
match op.val {
|
||||
Pair(data_ptr, meta) => {
|
||||
llfn = Some(meth::VirtualIndex::from_index(idx)
|
||||
.get_fn(&bx, meta, &fn_ty));
|
||||
.get_fn(&mut bx, meta, &fn_ty));
|
||||
llargs.push(data_ptr);
|
||||
continue 'make_args
|
||||
}
|
||||
@ -707,7 +710,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
} else if let Ref(data_ptr, Some(meta), _) = op.val {
|
||||
// by-value dynamic dispatch
|
||||
llfn = Some(meth::VirtualIndex::from_index(idx)
|
||||
.get_fn(&bx, meta, &fn_ty));
|
||||
.get_fn(&mut bx, meta, &fn_ty));
|
||||
llargs.push(data_ptr);
|
||||
continue;
|
||||
} else {
|
||||
@ -720,17 +723,17 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
match (arg, op.val) {
|
||||
(&mir::Operand::Copy(_), Ref(_, None, _)) |
|
||||
(&mir::Operand::Constant(_), Ref(_, None, _)) => {
|
||||
let tmp = PlaceRef::alloca(&bx, op.layout, "const");
|
||||
op.val.store(&bx, tmp);
|
||||
let tmp = PlaceRef::alloca(&mut bx, op.layout, "const");
|
||||
op.val.store(&mut bx, tmp);
|
||||
op.val = Ref(tmp.llval, None, tmp.align);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]);
|
||||
self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]);
|
||||
}
|
||||
if let Some(tup) = untuple {
|
||||
self.codegen_arguments_untupled(&bx, tup, &mut llargs,
|
||||
self.codegen_arguments_untupled(&mut bx, tup, &mut llargs,
|
||||
&fn_ty.args[first_args.len()..])
|
||||
}
|
||||
|
||||
@ -753,7 +756,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
fn codegen_argument(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
op: OperandRef<'tcx, Bx::Value>,
|
||||
llargs: &mut Vec<Bx::Value>,
|
||||
arg: &ArgType<'tcx, Ty<'tcx>>
|
||||
@ -820,9 +823,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
if by_ref && !arg.is_indirect() {
|
||||
// Have to load the argument, maybe while casting it.
|
||||
if let PassMode::Cast(ty) = arg.mode {
|
||||
llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(
|
||||
let addr = bx.pointercast(llval, bx.cx().type_ptr_to(
|
||||
bx.cx().cast_backend_type(&ty))
|
||||
), align.min(arg.layout.align));
|
||||
);
|
||||
llval = bx.load(addr, align.min(arg.layout.align));
|
||||
} else {
|
||||
// We can't use `PlaceRef::load` here because the argument
|
||||
// may have a type we don't treat as immediate, but the ABI
|
||||
@ -845,7 +849,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
fn codegen_arguments_untupled(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
operand: &mir::Operand<'tcx>,
|
||||
llargs: &mut Vec<Bx::Value>,
|
||||
args: &[ArgType<'tcx, Ty<'tcx>>]
|
||||
@ -857,7 +861,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
|
||||
for i in 0..tuple.layout.fields.count() {
|
||||
let field_ptr = tuple_ptr.project_field(bx, i);
|
||||
self.codegen_argument(bx, bx.load_operand(field_ptr), llargs, &args[i]);
|
||||
let field = bx.load_operand(field_ptr);
|
||||
self.codegen_argument(bx, field, llargs, &args[i]);
|
||||
}
|
||||
} else if let Ref(_, Some(_), _) = tuple.val {
|
||||
bug!("closure arguments must be sized")
|
||||
@ -872,7 +877,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
fn get_personality_slot(
|
||||
&mut self,
|
||||
bx: &Bx
|
||||
bx: &mut Bx
|
||||
) -> PlaceRef<'tcx, Bx::Value> {
|
||||
let cx = bx.cx();
|
||||
if let Some(slot) = self.personality_slot {
|
||||
@ -920,9 +925,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
let lp = bx.landing_pad(llretty, llpersonality, 1);
|
||||
bx.set_cleanup(lp);
|
||||
|
||||
let slot = self.get_personality_slot(&bx);
|
||||
slot.storage_live(&bx);
|
||||
Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot);
|
||||
let slot = self.get_personality_slot(&mut bx);
|
||||
slot.storage_live(&mut bx);
|
||||
Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
|
||||
|
||||
bx.br(target_bb);
|
||||
bx.llbb()
|
||||
@ -937,7 +942,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
&mut self
|
||||
) -> Bx::BasicBlock {
|
||||
self.unreachable_block.unwrap_or_else(|| {
|
||||
let bx = self.new_block("unreachable");
|
||||
let mut bx = self.new_block("unreachable");
|
||||
bx.unreachable();
|
||||
self.unreachable_block = Some(bx.llbb());
|
||||
bx.llbb()
|
||||
@ -959,7 +964,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
fn make_return_dest(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dest: &mir::Place<'tcx>,
|
||||
fn_ret: &ArgType<'tcx, Ty<'tcx>>,
|
||||
llargs: &mut Vec<Bx::Value>, is_intrinsic: bool
|
||||
@ -1019,7 +1024,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
fn codegen_transmute(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
src: &mir::Operand<'tcx>,
|
||||
dst: &mir::Place<'tcx>
|
||||
) {
|
||||
@ -1050,7 +1055,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
fn codegen_transmute_into(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
src: &mir::Operand<'tcx>,
|
||||
dst: PlaceRef<'tcx, Bx::Value>
|
||||
) {
|
||||
@ -1065,7 +1070,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// Stores the return value of a function call into it's final location.
|
||||
fn store_return(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dest: ReturnDest<'tcx, Bx::Value>,
|
||||
ret_ty: &ArgType<'tcx, Ty<'tcx>>,
|
||||
llval: Bx::Value
|
||||
|
@ -111,7 +111,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
pub fn set_debug_loc(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
source_info: mir::SourceInfo
|
||||
) {
|
||||
let (scope, span) = self.debug_loc(source_info);
|
||||
@ -264,7 +264,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
fx.locals = {
|
||||
let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals);
|
||||
|
||||
let allocate_local = |local| {
|
||||
let mut allocate_local = |local| {
|
||||
let decl = &mir.local_decls[local];
|
||||
let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
|
||||
assert!(!layout.ty.has_erasable_regions());
|
||||
@ -283,11 +283,11 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
debug!("alloc: {:?} ({}) -> place", local, name);
|
||||
if layout.is_unsized() {
|
||||
let indirect_place =
|
||||
PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str());
|
||||
PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str());
|
||||
// FIXME: add an appropriate debuginfo
|
||||
LocalRef::UnsizedPlace(indirect_place)
|
||||
} else {
|
||||
let place = PlaceRef::alloca(&bx, layout, &name.as_str());
|
||||
let place = PlaceRef::alloca(&mut bx, layout, &name.as_str());
|
||||
if dbg {
|
||||
let (scope, span) = fx.debug_loc(mir::SourceInfo {
|
||||
span: decl.source_info.span,
|
||||
@ -308,11 +308,14 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
} else if memory_locals.contains(local) {
|
||||
debug!("alloc: {:?} -> place", local);
|
||||
if layout.is_unsized() {
|
||||
let indirect_place =
|
||||
PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local));
|
||||
let indirect_place = PlaceRef::alloca_unsized_indirect(
|
||||
&mut bx,
|
||||
layout,
|
||||
&format!("{:?}", local),
|
||||
);
|
||||
LocalRef::UnsizedPlace(indirect_place)
|
||||
} else {
|
||||
LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local)))
|
||||
LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local)))
|
||||
}
|
||||
} else {
|
||||
// If this is an immediate local, we do not create an
|
||||
@ -399,7 +402,7 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
// bar();
|
||||
// }
|
||||
Some(&mir::TerminatorKind::Abort) => {
|
||||
let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
|
||||
let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
|
||||
let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
|
||||
ret_llbb = cs_bx.llbb();
|
||||
|
||||
@ -480,7 +483,8 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
if arg.pad.is_some() {
|
||||
llarg_idx += 1;
|
||||
}
|
||||
bx.store_fn_arg(arg, &mut llarg_idx, place.project_field(bx, i));
|
||||
let pr_field = place.project_field(bx, i);
|
||||
bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
|
||||
}
|
||||
|
||||
// Now that we have one alloca that contains the aggregate value,
|
||||
|
@ -76,7 +76,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
}
|
||||
|
||||
pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
val: &'tcx ty::Const<'tcx>
|
||||
) -> Result<Self, ErrorHandled> {
|
||||
let layout = bx.cx().layout_of(val.ty);
|
||||
@ -160,7 +160,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
/// For other cases, see `immediate`.
|
||||
pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx
|
||||
bx: &mut Bx
|
||||
) -> V {
|
||||
if let OperandValue::Pair(a, b) = self.val {
|
||||
let llty = bx.cx().backend_type(self.layout);
|
||||
@ -168,8 +168,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
self, llty);
|
||||
// Reconstruct the immediate aggregate.
|
||||
let mut llpair = bx.cx().const_undef(llty);
|
||||
llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0);
|
||||
llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1);
|
||||
let imm_a = base::from_immediate(bx, a);
|
||||
let imm_b = base::from_immediate(bx, b);
|
||||
llpair = bx.insert_value(llpair, imm_a, 0);
|
||||
llpair = bx.insert_value(llpair, imm_b, 1);
|
||||
llpair
|
||||
} else {
|
||||
self.immediate()
|
||||
@ -178,7 +180,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
|
||||
/// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
|
||||
pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
llval: V,
|
||||
layout: TyLayout<'tcx>
|
||||
) -> Self {
|
||||
@ -187,8 +189,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
llval, layout);
|
||||
|
||||
// Deconstruct the immediate aggregate.
|
||||
let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a);
|
||||
let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b);
|
||||
let a_llval = bx.extract_value(llval, 0);
|
||||
let a_llval = base::to_immediate_scalar(bx, a_llval, a);
|
||||
let b_llval = bx.extract_value(llval, 1);
|
||||
let b_llval = base::to_immediate_scalar(bx, b_llval, b);
|
||||
OperandValue::Pair(a_llval, b_llval)
|
||||
} else {
|
||||
OperandValue::Immediate(llval)
|
||||
@ -198,7 +202,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
|
||||
pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
&self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
i: usize
|
||||
) -> Self {
|
||||
let field = self.layout.field(bx.cx(), i);
|
||||
@ -261,7 +265,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
|
||||
impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
|
||||
pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dest: PlaceRef<'tcx, V>
|
||||
) {
|
||||
self.store_with_flags(bx, dest, MemFlags::empty());
|
||||
@ -269,7 +273,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
|
||||
|
||||
pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dest: PlaceRef<'tcx, V>
|
||||
) {
|
||||
self.store_with_flags(bx, dest, MemFlags::VOLATILE);
|
||||
@ -277,7 +281,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
|
||||
|
||||
pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dest: PlaceRef<'tcx, V>,
|
||||
) {
|
||||
self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
|
||||
@ -285,7 +289,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
|
||||
|
||||
pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dest: PlaceRef<'tcx, V>
|
||||
) {
|
||||
self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
|
||||
@ -293,7 +297,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
|
||||
|
||||
fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
dest: PlaceRef<'tcx, V>,
|
||||
flags: MemFlags,
|
||||
) {
|
||||
@ -326,7 +330,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
|
||||
}
|
||||
pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
indirect_dest: PlaceRef<'tcx, V>
|
||||
) {
|
||||
debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
|
||||
@ -361,7 +365,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
|
||||
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
fn maybe_codegen_consume_direct(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
place: &mir::Place<'tcx>
|
||||
) -> Option<OperandRef<'tcx, Bx::Value>> {
|
||||
debug!("maybe_codegen_consume_direct(place={:?})", place);
|
||||
@ -409,7 +413,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
pub fn codegen_consume(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
place: &mir::Place<'tcx>
|
||||
) -> OperandRef<'tcx, Bx::Value> {
|
||||
debug!("codegen_consume(place={:?})", place);
|
||||
@ -428,12 +432,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
// for most places, to consume them we just load them
|
||||
// out from their home
|
||||
bx.load_operand(self.codegen_place(bx, place))
|
||||
let place = self.codegen_place(bx, place);
|
||||
bx.load_operand(place)
|
||||
}
|
||||
|
||||
pub fn codegen_operand(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
operand: &mir::Operand<'tcx>
|
||||
) -> OperandRef<'tcx, Bx::Value> {
|
||||
debug!("codegen_operand(operand={:?})", operand);
|
||||
|
@ -52,7 +52,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
}
|
||||
|
||||
pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
layout: TyLayout<'tcx>,
|
||||
name: &str
|
||||
) -> Self {
|
||||
@ -64,7 +64,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
|
||||
/// Returns a place for an indirect reference to an unsized place.
|
||||
pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
layout: TyLayout<'tcx>,
|
||||
name: &str,
|
||||
) -> Self {
|
||||
@ -96,29 +96,28 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
/// Access a field, at a point when the value's case is known.
|
||||
pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self, bx: &Bx,
|
||||
self, bx: &mut Bx,
|
||||
ix: usize,
|
||||
) -> Self {
|
||||
let cx = bx.cx();
|
||||
let field = self.layout.field(cx, ix);
|
||||
let field = self.layout.field(bx.cx(), ix);
|
||||
let offset = self.layout.fields.offset(ix);
|
||||
let effective_field_align = self.align.restrict_for_offset(offset);
|
||||
|
||||
let simple = || {
|
||||
let mut simple = || {
|
||||
// Unions and newtypes only use an offset of 0.
|
||||
let llval = if offset.bytes() == 0 {
|
||||
self.llval
|
||||
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
|
||||
// Offsets have to match either first or second field.
|
||||
assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
|
||||
assert_eq!(offset, a.value.size(bx.cx()).abi_align(b.value.align(bx.cx())));
|
||||
bx.struct_gep(self.llval, 1)
|
||||
} else {
|
||||
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
|
||||
};
|
||||
PlaceRef {
|
||||
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
|
||||
llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(field))),
|
||||
llextra: if cx.type_has_metadata(field.ty) {
|
||||
llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
|
||||
llextra: if bx.cx().type_has_metadata(field.ty) {
|
||||
self.llextra
|
||||
} else {
|
||||
None
|
||||
@ -168,7 +167,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
|
||||
let meta = self.llextra;
|
||||
|
||||
let unaligned_offset = cx.const_usize(offset.bytes());
|
||||
let unaligned_offset = bx.cx().const_usize(offset.bytes());
|
||||
|
||||
// Get the alignment of the field
|
||||
let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
|
||||
@ -179,18 +178,19 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
// (unaligned offset + (align - 1)) & -align
|
||||
|
||||
// Calculate offset
|
||||
let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
|
||||
let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
|
||||
bx.neg(unsized_align));
|
||||
let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
|
||||
let and_lhs = bx.add(unaligned_offset, align_sub_1);
|
||||
let and_rhs = bx.neg(unsized_align);
|
||||
let offset = bx.and(and_lhs, and_rhs);
|
||||
|
||||
debug!("struct_field_ptr: DST field offset: {:?}", offset);
|
||||
|
||||
// Cast and adjust pointer
|
||||
let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
|
||||
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
|
||||
let byte_ptr = bx.gep(byte_ptr, &[offset]);
|
||||
|
||||
// Finally, cast back to the type expected
|
||||
let ll_fty = cx.backend_type(field);
|
||||
let ll_fty = bx.cx().backend_type(field);
|
||||
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
|
||||
|
||||
PlaceRef {
|
||||
@ -204,7 +204,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
/// Obtain the actual discriminant of a value.
|
||||
pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
cast_to: Ty<'tcx>
|
||||
) -> V {
|
||||
let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
|
||||
@ -252,7 +252,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
} else {
|
||||
bx.cx().const_uint_big(niche_llty, niche_start)
|
||||
};
|
||||
bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
|
||||
let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval);
|
||||
bx.select(select_arg,
|
||||
bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
|
||||
bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
|
||||
} else {
|
||||
@ -261,8 +262,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
|
||||
let lldiscr_max =
|
||||
bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
|
||||
bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
|
||||
bx.intcast(lldiscr, cast_to, false),
|
||||
let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max);
|
||||
let cast = bx.intcast(lldiscr, cast_to, false);
|
||||
bx.select(select_arg,
|
||||
cast,
|
||||
bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
|
||||
}
|
||||
}
|
||||
@ -273,7 +276,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
/// representation.
|
||||
pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
&self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
variant_index: VariantIdx
|
||||
) {
|
||||
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
|
||||
@ -330,7 +333,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
|
||||
pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
&self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
llindex: V
|
||||
) -> Self {
|
||||
PlaceRef {
|
||||
@ -343,7 +346,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
|
||||
pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
&self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
variant_index: VariantIdx
|
||||
) -> Self {
|
||||
let mut downcast = *self;
|
||||
@ -356,11 +359,11 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
downcast
|
||||
}
|
||||
|
||||
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
|
||||
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
|
||||
bx.lifetime_start(self.llval, self.layout.size);
|
||||
}
|
||||
|
||||
pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
|
||||
pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
|
||||
bx.lifetime_end(self.llval, self.layout.size);
|
||||
}
|
||||
}
|
||||
@ -368,13 +371,13 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
pub fn codegen_place(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
place: &mir::Place<'tcx>
|
||||
) -> PlaceRef<'tcx, Bx::Value> {
|
||||
debug!("codegen_place(place={:?})", place);
|
||||
|
||||
let cx = bx.cx();
|
||||
let tcx = cx.tcx();
|
||||
let cx = self.cx;
|
||||
let tcx = self.cx.tcx();
|
||||
|
||||
if let mir::Place::Local(index) = *place {
|
||||
match self.locals[index] {
|
||||
|
@ -40,10 +40,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
match *rvalue {
|
||||
mir::Rvalue::Use(ref operand) => {
|
||||
let cg_operand = self.codegen_operand(&bx, operand);
|
||||
let cg_operand = self.codegen_operand(&mut bx, operand);
|
||||
// FIXME: consider not copying constants through stack. (fixable by codegenning
|
||||
// constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
|
||||
cg_operand.val.store(&bx, dest);
|
||||
cg_operand.val.store(&mut bx, dest);
|
||||
bx
|
||||
}
|
||||
|
||||
@ -53,8 +53,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
if bx.cx().is_backend_scalar_pair(dest.layout) {
|
||||
// into-coerce of a thin pointer to a fat pointer - just
|
||||
// use the operand path.
|
||||
let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
|
||||
temp.val.store(&bx, dest);
|
||||
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
|
||||
temp.val.store(&mut bx, dest);
|
||||
return bx;
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// this to be eliminated by MIR building, but
|
||||
// `CoerceUnsized` can be passed by a where-clause,
|
||||
// so the (generic) MIR may not be able to expand it.
|
||||
let operand = self.codegen_operand(&bx, source);
|
||||
let operand = self.codegen_operand(&mut bx, source);
|
||||
match operand.val {
|
||||
OperandValue::Pair(..) |
|
||||
OperandValue::Immediate(_) => {
|
||||
@ -73,15 +73,15 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// index into the struct, and this case isn't
|
||||
// important enough for it.
|
||||
debug!("codegen_rvalue: creating ugly alloca");
|
||||
let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
|
||||
scratch.storage_live(&bx);
|
||||
operand.val.store(&bx, scratch);
|
||||
base::coerce_unsized_into(&bx, scratch, dest);
|
||||
scratch.storage_dead(&bx);
|
||||
let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp");
|
||||
scratch.storage_live(&mut bx);
|
||||
operand.val.store(&mut bx, scratch);
|
||||
base::coerce_unsized_into(&mut bx, scratch, dest);
|
||||
scratch.storage_dead(&mut bx);
|
||||
}
|
||||
OperandValue::Ref(llref, None, align) => {
|
||||
let source = PlaceRef::new_sized(llref, operand.layout, align);
|
||||
base::coerce_unsized_into(&bx, source, dest);
|
||||
base::coerce_unsized_into(&mut bx, source, dest);
|
||||
}
|
||||
OperandValue::Ref(_, Some(_), _) => {
|
||||
bug!("unsized coercion on an unsized rvalue")
|
||||
@ -91,14 +91,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
mir::Rvalue::Repeat(ref elem, count) => {
|
||||
let cg_elem = self.codegen_operand(&bx, elem);
|
||||
let cg_elem = self.codegen_operand(&mut bx, elem);
|
||||
|
||||
// Do not generate the loop for zero-sized elements or empty arrays.
|
||||
if dest.layout.is_zst() {
|
||||
return bx;
|
||||
}
|
||||
|
||||
let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
|
||||
let zero = bx.cx().const_usize(0);
|
||||
let start = dest.project_index(&mut bx, zero).llval;
|
||||
|
||||
if let OperandValue::Immediate(v) = cg_elem.val {
|
||||
let size = bx.cx().const_usize(dest.layout.size.bytes());
|
||||
@ -111,7 +111,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
// Use llvm.memset.p0i8.* to initialize byte arrays
|
||||
let v = base::from_immediate(&bx, v);
|
||||
let v = base::from_immediate(&mut bx, v);
|
||||
if bx.cx().val_ty(v) == bx.cx().type_i8() {
|
||||
bx.memset(start, v, size, dest.align, MemFlags::empty());
|
||||
return bx;
|
||||
@ -119,7 +119,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
let count = bx.cx().const_usize(count);
|
||||
let end = dest.project_index(&bx, count).llval;
|
||||
let end = dest.project_index(&mut bx, count).llval;
|
||||
|
||||
let mut header_bx = bx.build_sibling_block("repeat_loop_header");
|
||||
let mut body_bx = bx.build_sibling_block("repeat_loop_body");
|
||||
@ -131,7 +131,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
|
||||
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
|
||||
|
||||
cg_elem.val.store(&body_bx,
|
||||
cg_elem.val.store(&mut body_bx,
|
||||
PlaceRef::new_sized(current, cg_elem.layout, dest.align));
|
||||
|
||||
let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
|
||||
@ -144,9 +144,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
mir::Rvalue::Aggregate(ref kind, ref operands) => {
|
||||
let (dest, active_field_index) = match **kind {
|
||||
mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
|
||||
dest.codegen_set_discr(&bx, variant_index);
|
||||
dest.codegen_set_discr(&mut bx, variant_index);
|
||||
if adt_def.is_enum() {
|
||||
(dest.project_downcast(&bx, variant_index), active_field_index)
|
||||
(dest.project_downcast(&mut bx, variant_index), active_field_index)
|
||||
} else {
|
||||
(dest, active_field_index)
|
||||
}
|
||||
@ -154,11 +154,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
_ => (dest, None)
|
||||
};
|
||||
for (i, operand) in operands.iter().enumerate() {
|
||||
let op = self.codegen_operand(&bx, operand);
|
||||
let op = self.codegen_operand(&mut bx, operand);
|
||||
// Do not generate stores and GEPis for zero-sized fields.
|
||||
if !op.layout.is_zst() {
|
||||
let field_index = active_field_index.unwrap_or(i);
|
||||
op.val.store(&bx, dest.project_field(&bx, field_index));
|
||||
let field = dest.project_field(&mut bx, field_index);
|
||||
op.val.store(&mut bx, field);
|
||||
}
|
||||
}
|
||||
bx
|
||||
@ -166,8 +167,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
_ => {
|
||||
assert!(self.rvalue_creates_operand(rvalue));
|
||||
let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
|
||||
temp.val.store(&bx, dest);
|
||||
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
|
||||
temp.val.store(&mut bx, dest);
|
||||
bx
|
||||
}
|
||||
}
|
||||
@ -175,7 +176,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
pub fn codegen_rvalue_unsized(
|
||||
&mut self,
|
||||
bx: Bx,
|
||||
mut bx: Bx,
|
||||
indirect_dest: PlaceRef<'tcx, Bx::Value>,
|
||||
rvalue: &mir::Rvalue<'tcx>,
|
||||
) -> Bx {
|
||||
@ -184,8 +185,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
match *rvalue {
|
||||
mir::Rvalue::Use(ref operand) => {
|
||||
let cg_operand = self.codegen_operand(&bx, operand);
|
||||
cg_operand.val.store_unsized(&bx, indirect_dest);
|
||||
let cg_operand = self.codegen_operand(&mut bx, operand);
|
||||
cg_operand.val.store_unsized(&mut bx, indirect_dest);
|
||||
bx
|
||||
}
|
||||
|
||||
@ -195,14 +196,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
pub fn codegen_rvalue_operand(
|
||||
&mut self,
|
||||
bx: Bx,
|
||||
mut bx: Bx,
|
||||
rvalue: &mir::Rvalue<'tcx>
|
||||
) -> (Bx, OperandRef<'tcx, Bx::Value>) {
|
||||
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
|
||||
|
||||
match *rvalue {
|
||||
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
|
||||
let operand = self.codegen_operand(&bx, source);
|
||||
let operand = self.codegen_operand(&mut bx, source);
|
||||
debug!("cast operand is {:?}", operand);
|
||||
let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
|
||||
|
||||
@ -255,7 +256,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
OperandValue::Immediate(lldata) => {
|
||||
// "standard" unsize
|
||||
let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
|
||||
let (lldata, llextra) = base::unsize_thin_ptr(&mut bx, lldata,
|
||||
operand.layout.ty, cast.ty);
|
||||
OperandValue::Pair(lldata, llextra)
|
||||
}
|
||||
@ -329,12 +330,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
// We want `table[e as usize]` to not
|
||||
// have bound checks, and this is the most
|
||||
// convenient place to put the `assume`.
|
||||
|
||||
base::call_assume(&bx, bx.icmp(
|
||||
let ll_t_in_const =
|
||||
bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
|
||||
let cmp = bx.icmp(
|
||||
IntPredicate::IntULE,
|
||||
llval,
|
||||
bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
|
||||
));
|
||||
ll_t_in_const
|
||||
);
|
||||
base::call_assume(&mut bx, cmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -366,11 +369,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
bx.inttoptr(usize_llval, ll_t_out)
|
||||
}
|
||||
(CastTy::Int(_), CastTy::Float) =>
|
||||
cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
|
||||
cast_int_to_float(&mut bx, signed, llval, ll_t_in, ll_t_out),
|
||||
(CastTy::Float, CastTy::Int(IntTy::I)) =>
|
||||
cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
|
||||
cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out),
|
||||
(CastTy::Float, CastTy::Int(_)) =>
|
||||
cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
|
||||
cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out),
|
||||
_ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
|
||||
};
|
||||
OperandValue::Immediate(newval)
|
||||
@ -383,7 +386,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
mir::Rvalue::Ref(_, bk, ref place) => {
|
||||
let cg_place = self.codegen_place(&bx, place);
|
||||
let cg_place = self.codegen_place(&mut bx, place);
|
||||
|
||||
let ty = cg_place.layout.ty;
|
||||
|
||||
@ -404,7 +407,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
mir::Rvalue::Len(ref place) => {
|
||||
let size = self.evaluate_array_len(&bx, place);
|
||||
let size = self.evaluate_array_len(&mut bx, place);
|
||||
let operand = OperandRef {
|
||||
val: OperandValue::Immediate(size),
|
||||
layout: bx.cx().layout_of(bx.tcx().types.usize),
|
||||
@ -413,12 +416,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
|
||||
let lhs = self.codegen_operand(&bx, lhs);
|
||||
let rhs = self.codegen_operand(&bx, rhs);
|
||||
let lhs = self.codegen_operand(&mut bx, lhs);
|
||||
let rhs = self.codegen_operand(&mut bx, rhs);
|
||||
let llresult = match (lhs.val, rhs.val) {
|
||||
(OperandValue::Pair(lhs_addr, lhs_extra),
|
||||
OperandValue::Pair(rhs_addr, rhs_extra)) => {
|
||||
self.codegen_fat_ptr_binop(&bx, op,
|
||||
self.codegen_fat_ptr_binop(&mut bx, op,
|
||||
lhs_addr, lhs_extra,
|
||||
rhs_addr, rhs_extra,
|
||||
lhs.layout.ty)
|
||||
@ -426,7 +429,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
(OperandValue::Immediate(lhs_val),
|
||||
OperandValue::Immediate(rhs_val)) => {
|
||||
self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
|
||||
self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
|
||||
}
|
||||
|
||||
_ => bug!()
|
||||
@ -439,9 +442,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
(bx, operand)
|
||||
}
|
||||
mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
|
||||
let lhs = self.codegen_operand(&bx, lhs);
|
||||
let rhs = self.codegen_operand(&bx, rhs);
|
||||
let result = self.codegen_scalar_checked_binop(&bx, op,
|
||||
let lhs = self.codegen_operand(&mut bx, lhs);
|
||||
let rhs = self.codegen_operand(&mut bx, rhs);
|
||||
let result = self.codegen_scalar_checked_binop(&mut bx, op,
|
||||
lhs.immediate(), rhs.immediate(),
|
||||
lhs.layout.ty);
|
||||
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
|
||||
@ -455,7 +458,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
|
||||
mir::Rvalue::UnaryOp(op, ref operand) => {
|
||||
let operand = self.codegen_operand(&bx, operand);
|
||||
let operand = self.codegen_operand(&mut bx, operand);
|
||||
let lloperand = operand.immediate();
|
||||
let is_float = operand.layout.ty.is_fp();
|
||||
let llval = match op {
|
||||
@ -474,8 +477,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
mir::Rvalue::Discriminant(ref place) => {
|
||||
let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
|
||||
let discr = self.codegen_place(&bx, place)
|
||||
.codegen_get_discr(&bx, discr_ty);
|
||||
let discr = self.codegen_place(&mut bx, place)
|
||||
.codegen_get_discr(&mut bx, discr_ty);
|
||||
(bx, OperandRef {
|
||||
val: OperandValue::Immediate(discr),
|
||||
layout: self.cx.layout_of(discr_ty)
|
||||
@ -509,7 +512,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
};
|
||||
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
||||
let r = bx.cx().get_fn(instance);
|
||||
let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
|
||||
let call = bx.call(r, &[llsize, llalign], None);
|
||||
let val = bx.pointercast(call, llty_ptr);
|
||||
|
||||
let operand = OperandRef {
|
||||
val: OperandValue::Immediate(val),
|
||||
@ -518,7 +522,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
(bx, operand)
|
||||
}
|
||||
mir::Rvalue::Use(ref operand) => {
|
||||
let operand = self.codegen_operand(&bx, operand);
|
||||
let operand = self.codegen_operand(&mut bx, operand);
|
||||
(bx, operand)
|
||||
}
|
||||
mir::Rvalue::Repeat(..) |
|
||||
@ -534,7 +538,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
fn evaluate_array_len(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
place: &mir::Place<'tcx>,
|
||||
) -> Bx::Value {
|
||||
// ZST are passed as operands and require special handling
|
||||
@ -554,7 +558,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
pub fn codegen_scalar_binop(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
op: mir::BinOp,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value,
|
||||
@ -622,7 +626,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
pub fn codegen_fat_ptr_binop(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
op: mir::BinOp,
|
||||
lhs_addr: Bx::Value,
|
||||
lhs_extra: Bx::Value,
|
||||
@ -632,16 +636,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
) -> Bx::Value {
|
||||
match op {
|
||||
mir::BinOp::Eq => {
|
||||
bx.and(
|
||||
bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
|
||||
bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra)
|
||||
)
|
||||
let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
|
||||
let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
|
||||
bx.and(lhs, rhs)
|
||||
}
|
||||
mir::BinOp::Ne => {
|
||||
bx.or(
|
||||
bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr),
|
||||
bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra)
|
||||
)
|
||||
let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
|
||||
let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
|
||||
bx.or(lhs, rhs)
|
||||
}
|
||||
mir::BinOp::Le | mir::BinOp::Lt |
|
||||
mir::BinOp::Ge | mir::BinOp::Gt => {
|
||||
@ -653,14 +655,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
|
||||
_ => bug!(),
|
||||
};
|
||||
|
||||
bx.or(
|
||||
bx.icmp(strict_op, lhs_addr, rhs_addr),
|
||||
bx.and(
|
||||
bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
|
||||
bx.icmp(op, lhs_extra, rhs_extra)
|
||||
)
|
||||
)
|
||||
let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
|
||||
let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
|
||||
let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
|
||||
let rhs = bx.and(and_lhs, and_rhs);
|
||||
bx.or(lhs, rhs)
|
||||
}
|
||||
_ => {
|
||||
bug!("unexpected fat ptr binop");
|
||||
@ -670,7 +669,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
pub fn codegen_scalar_checked_binop(
|
||||
&mut self,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
op: mir::BinOp,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value,
|
||||
@ -752,7 +751,7 @@ enum OverflowOp {
|
||||
|
||||
fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
oop: OverflowOp,
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
ty: Ty
|
||||
) -> Bx::Value {
|
||||
use syntax::ast::IntTy::*;
|
||||
@ -820,7 +819,7 @@ fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
signed: bool,
|
||||
x: Bx::Value,
|
||||
int_ty: Bx::Type,
|
||||
@ -843,7 +842,8 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
|
||||
let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
|
||||
let infinity = bx.bitcast(infinity_bits, float_ty);
|
||||
bx.select(overflow, infinity, bx.uitofp(x, float_ty))
|
||||
let fp = bx.uitofp(x, float_ty);
|
||||
bx.select(overflow, infinity, fp)
|
||||
} else {
|
||||
if signed {
|
||||
bx.sitofp(x, float_ty)
|
||||
@ -854,7 +854,7 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
|
||||
fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &Bx,
|
||||
bx: &mut Bx,
|
||||
signed: bool,
|
||||
x: Bx::Value,
|
||||
float_ty: Bx::Type,
|
||||
@ -869,6 +869,9 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
|
||||
return fptosui_result;
|
||||
}
|
||||
|
||||
let int_width = bx.cx().int_width(int_ty);
|
||||
let float_width = bx.cx().float_width(float_ty);
|
||||
// LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
|
||||
// destination integer type after rounding towards zero. This `undef` value can cause UB in
|
||||
// safe code (see issue #10184), so we implement a saturating conversion on top of it:
|
||||
@ -888,50 +891,50 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
// On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
|
||||
// we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
|
||||
// This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
|
||||
let int_max = |signed: bool, int_ty: Bx::Type| -> u128 {
|
||||
let shift_amount = 128 - bx.cx().int_width(int_ty);
|
||||
let int_max = |signed: bool, int_width: u64| -> u128 {
|
||||
let shift_amount = 128 - int_width;
|
||||
if signed {
|
||||
i128::MAX as u128 >> shift_amount
|
||||
} else {
|
||||
u128::MAX >> shift_amount
|
||||
}
|
||||
};
|
||||
let int_min = |signed: bool, int_ty: Bx::Type| -> i128 {
|
||||
let int_min = |signed: bool, int_width: u64| -> i128 {
|
||||
if signed {
|
||||
i128::MIN >> (128 - bx.cx().int_width(int_ty))
|
||||
i128::MIN >> (128 - int_width)
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
let compute_clamp_bounds_single =
|
||||
|signed: bool, int_ty: Bx::Type| -> (u128, u128) {
|
||||
let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
|
||||
|signed: bool, int_width: u64| -> (u128, u128) {
|
||||
let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
|
||||
assert_eq!(rounded_min.status, Status::OK);
|
||||
let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
|
||||
let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
|
||||
assert!(rounded_max.value.is_finite());
|
||||
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
|
||||
};
|
||||
let compute_clamp_bounds_double =
|
||||
|signed: bool, int_ty: Bx::Type| -> (u128, u128) {
|
||||
let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
|
||||
|signed: bool, int_width: u64| -> (u128, u128) {
|
||||
let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
|
||||
assert_eq!(rounded_min.status, Status::OK);
|
||||
let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
|
||||
let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
|
||||
assert!(rounded_max.value.is_finite());
|
||||
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
|
||||
};
|
||||
|
||||
let float_bits_to_llval = |bits| {
|
||||
let bits_llval = match bx.cx().float_width(float_ty) {
|
||||
let mut float_bits_to_llval = |bits| {
|
||||
let bits_llval = match float_width {
|
||||
32 => bx.cx().const_u32(bits as u32),
|
||||
64 => bx.cx().const_u64(bits as u64),
|
||||
n => bug!("unsupported float width {}", n),
|
||||
};
|
||||
bx.bitcast(bits_llval, float_ty)
|
||||
};
|
||||
let (f_min, f_max) = match bx.cx().float_width(float_ty) {
|
||||
32 => compute_clamp_bounds_single(signed, int_ty),
|
||||
64 => compute_clamp_bounds_double(signed, int_ty),
|
||||
let (f_min, f_max) = match float_width {
|
||||
32 => compute_clamp_bounds_single(signed, int_width),
|
||||
64 => compute_clamp_bounds_double(signed, int_width),
|
||||
n => bug!("unsupported float width {}", n),
|
||||
};
|
||||
let f_min = float_bits_to_llval(f_min);
|
||||
@ -979,8 +982,8 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
// performed is ultimately up to the backend, but at least x86 does perform them.
|
||||
let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
|
||||
let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
|
||||
let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
|
||||
let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
|
||||
let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
|
||||
let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
|
||||
let s0 = bx.select(less_or_nan, int_min, fptosui_result);
|
||||
let s1 = bx.select(greater, int_max, s0);
|
||||
|
||||
@ -989,7 +992,9 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
// Therefore we only need to execute this step for signed integer types.
|
||||
if signed {
|
||||
// LLVM has no isNaN predicate, so we use (x == x) instead
|
||||
bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))
|
||||
let zero = bx.cx().const_uint(int_ty, 0);
|
||||
let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
|
||||
bx.select(cmp, s1, zero)
|
||||
} else {
|
||||
s1
|
||||
}
|
||||
|
@ -19,12 +19,12 @@ use interfaces::*;
|
||||
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
pub fn codegen_statement(
|
||||
&mut self,
|
||||
bx: Bx,
|
||||
mut bx: Bx,
|
||||
statement: &mir::Statement<'tcx>
|
||||
) -> Bx {
|
||||
debug!("codegen_statement(statement={:?})", statement);
|
||||
|
||||
self.set_debug_loc(&bx, statement.source_info);
|
||||
self.set_debug_loc(&mut bx, statement.source_info);
|
||||
match statement.kind {
|
||||
mir::StatementKind::Assign(ref place, ref rvalue) => {
|
||||
if let mir::Place::Local(index) = *place {
|
||||
@ -53,39 +53,39 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let cg_dest = self.codegen_place(&bx, place);
|
||||
let cg_dest = self.codegen_place(&mut bx, place);
|
||||
self.codegen_rvalue(bx, cg_dest, rvalue)
|
||||
}
|
||||
}
|
||||
mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
|
||||
self.codegen_place(&bx, place)
|
||||
.codegen_set_discr(&bx, variant_index);
|
||||
self.codegen_place(&mut bx, place)
|
||||
.codegen_set_discr(&mut bx, variant_index);
|
||||
bx
|
||||
}
|
||||
mir::StatementKind::StorageLive(local) => {
|
||||
if let LocalRef::Place(cg_place) = self.locals[local] {
|
||||
cg_place.storage_live(&bx);
|
||||
cg_place.storage_live(&mut bx);
|
||||
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
|
||||
cg_indirect_place.storage_live(&bx);
|
||||
cg_indirect_place.storage_live(&mut bx);
|
||||
}
|
||||
bx
|
||||
}
|
||||
mir::StatementKind::StorageDead(local) => {
|
||||
if let LocalRef::Place(cg_place) = self.locals[local] {
|
||||
cg_place.storage_dead(&bx);
|
||||
cg_place.storage_dead(&mut bx);
|
||||
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
|
||||
cg_indirect_place.storage_dead(&bx);
|
||||
cg_indirect_place.storage_dead(&mut bx);
|
||||
}
|
||||
bx
|
||||
}
|
||||
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
|
||||
let outputs = outputs.iter().map(|output| {
|
||||
self.codegen_place(&bx, output)
|
||||
self.codegen_place(&mut bx, output)
|
||||
}).collect();
|
||||
|
||||
let input_vals = inputs.iter()
|
||||
.fold(Vec::with_capacity(inputs.len()), |mut acc, (span, input)| {
|
||||
let op = self.codegen_operand(&bx, input);
|
||||
let op = self.codegen_operand(&mut bx, input);
|
||||
if let OperandValue::Immediate(_) = op.val {
|
||||
acc.push(op.immediate());
|
||||
} else {
|
||||
|
Loading…
x
Reference in New Issue
Block a user