Merge pull request #839 from bjorn3/fold_offset_into_load_store

Some runtime optimizations
This commit is contained in:
bjorn3 2019-12-20 21:32:39 +01:00 committed by GitHub
commit 5cb81cdb16
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 289 additions and 128 deletions

View File

@ -94,14 +94,14 @@ pub fn add_local_place_comments<'tcx>(
align.abi.bytes(),
align.pref.bytes(),
)),
CPlaceInner::Addr(addr, None) => fx.add_global_comment(format!(
"reuse {:5} {:20} {:4}b {}, {} storage={}",
CPlaceInner::Addr(ptr, None) => fx.add_global_comment(format!(
"reuse {:5} {:20} {:4}b {}, {} storage={:?}",
format!("{:?}", local),
format!("{:?}", ty),
size.bytes(),
align.abi.bytes(),
align.pref.bytes(),
addr,
ptr,
)),
CPlaceInner::Addr(_, Some(_)) => unreachable!(),
}

View File

@ -250,9 +250,7 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
match *ret_vals {
[] => CValue::by_ref(
self.bcx
.ins()
.iconst(self.pointer_type, self.pointer_type.bytes() as i64),
Pointer::const_addr(self, self.pointer_type.bytes() as i64),
return_layout,
),
[val] => CValue::by_val(val, return_layout),
@ -352,7 +350,7 @@ pub fn codegen_fn_prelude(fx: &mut FunctionCx<'_, '_, impl Backend>, start_ebb:
// We wont mutate this argument, so it is fine to borrow the backing storage
// of this argument, to prevent a copy.
let place = CPlace::for_addr(addr, val.layout());
let place = CPlace::for_ptr(Pointer::new(addr), val.layout());
#[cfg(debug_assertions)]
self::comments::add_local_place_comments(fx, place, local);
@ -613,7 +611,8 @@ pub fn codegen_drop<'tcx>(fx: &mut FunctionCx<'_, 'tcx, impl Backend>, drop_plac
let drop_fn_ty = drop_fn.ty(fx.tcx);
match ty.kind {
ty::Dynamic(..) => {
let (ptr, vtable) = drop_place.to_addr_maybe_unsized(fx);
let (ptr, vtable) = drop_place.to_ptr_maybe_unsized(fx);
let ptr = ptr.get_addr(fx);
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
let fn_sig = fx.tcx.normalize_erasing_late_bound_regions(

View File

@ -119,7 +119,7 @@ pub fn adjust_arg_for_abi<'tcx>(
let (a, b) = arg.load_scalar_pair(fx);
Pair(a, b)
}
PassMode::ByRef => Single(arg.force_stack(fx)),
PassMode::ByRef => Single(arg.force_stack(fx).get_addr(fx)),
}
}
@ -158,6 +158,6 @@ pub fn cvalue_for_param<'tcx>(
let (a, b) = ebb_params.assert_pair();
Some(CValue::by_val_pair(a, b, layout))
}
PassMode::ByRef => Some(CValue::by_ref(ebb_params.assert_single(), layout)),
PassMode::ByRef => Some(CValue::by_ref(Pointer::new(ebb_params.assert_single()), layout)),
}
}

View File

@ -28,7 +28,7 @@ pub fn codegen_return_param(
PassMode::ByRef => {
let ret_param = fx.bcx.append_ebb_param(start_ebb, fx.pointer_type);
fx.local_map
.insert(RETURN_PLACE, CPlace::for_addr(ret_param, ret_layout));
.insert(RETURN_PLACE, CPlace::for_ptr(Pointer::new(ret_param), ret_layout));
Single(ret_param)
}
@ -58,7 +58,7 @@ pub fn codegen_with_call_return_arg<'tcx, B: Backend, T>(
let return_ptr = match output_pass_mode {
PassMode::NoPass => None,
PassMode::ByRef => match ret_place {
Some(ret_place) => Some(ret_place.to_addr(fx)),
Some(ret_place) => Some(ret_place.to_ptr(fx).get_addr(fx)),
None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)),
},
PassMode::ByVal(_) | PassMode::ByValPair(_, _) => None,

View File

@ -30,14 +30,6 @@ pub fn analyze(fx: &FunctionCx<'_, '_, impl Backend>) -> IndexVec<Local, SsaKind
_ => {}
}
}
match &bb.terminator().kind {
TerminatorKind::Call {
destination: Some((place, _)),
..
} => analyze_non_ssa_place(&mut flag_map, place),
_ => {}
}
}
flag_map

View File

@ -601,7 +601,7 @@ fn codegen_array_len<'tcx>(
fx.bcx.ins().iconst(fx.pointer_type, len)
}
ty::Slice(_elem_ty) => place
.to_addr_maybe_unsized(fx)
.to_ptr_maybe_unsized(fx)
.1
.expect("Length metadata for slice place"),
_ => bug!("Rvalue::Len({:?})", place),
@ -616,11 +616,13 @@ pub fn trans_place<'tcx>(
PlaceBase::Local(local) => fx.get_local_place(*local),
PlaceBase::Static(static_) => match static_.kind {
StaticKind::Static => {
// Statics can't be generic, so `static_.ty` doesn't need to be monomorphized.
crate::constant::codegen_static_ref(fx, static_.def_id, static_.ty)
}
StaticKind::Promoted(promoted, substs) => {
let instance = Instance::new(static_.def_id, fx.monomorphize(&substs));
crate::constant::trans_promoted(fx, instance, promoted, static_.ty)
let ty = fx.monomorphize(&static_.ty);
crate::constant::trans_promoted(fx, instance, promoted, ty)
}
},
};
@ -657,25 +659,21 @@ pub fn trans_place<'tcx>(
match cplace.layout().ty.kind {
ty::Array(elem_ty, len) => {
let elem_layout = fx.layout_of(elem_ty);
let ptr = cplace.to_addr(fx);
let ptr = cplace.to_ptr(fx);
let len = crate::constant::force_eval_const(fx, len)
.eval_usize(fx.tcx, ParamEnv::reveal_all());
cplace = CPlace::for_addr(
fx.bcx
.ins()
.iadd_imm(ptr, elem_layout.size.bytes() as i64 * from as i64),
cplace = CPlace::for_ptr(
ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * from as i64),
fx.layout_of(fx.tcx.mk_array(elem_ty, len - from as u64 - to as u64)),
);
}
ty::Slice(elem_ty) => {
assert!(from_end, "slice subslices should be `from_end`");
let elem_layout = fx.layout_of(elem_ty);
let (ptr, len) = cplace.to_addr_maybe_unsized(fx);
let (ptr, len) = cplace.to_ptr_maybe_unsized(fx);
let len = len.unwrap();
cplace = CPlace::for_addr_with_extra(
fx.bcx
.ins()
.iadd_imm(ptr, elem_layout.size.bytes() as i64 * from as i64),
cplace = CPlace::for_ptr_with_extra(
ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * from as i64),
fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
cplace.layout(),
);

View File

@ -132,9 +132,7 @@ pub fn trans_const_value<'tcx>(
CValue::by_val(val, layout)
}
ty::FnDef(_def_id, _substs) => CValue::by_ref(
fx.bcx
.ins()
.iconst(fx.pointer_type, fx.pointer_type.bytes() as i64),
crate::pointer::Pointer::const_addr(fx, fx.pointer_type.bytes() as i64),
layout,
),
_ => trans_const_place(fx, const_).to_cvalue(fx),
@ -265,7 +263,7 @@ fn cplace_for_dataid<'tcx>(
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
let layout = fx.layout_of(fx.monomorphize(&ty));
assert!(!layout.is_unsized(), "unsized statics aren't supported");
CPlace::for_addr(global_ptr, layout)
CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
}
fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut Module<impl Backend>, cx: &mut ConstantCx) {

View File

@ -446,7 +446,7 @@ pub fn codegen_intrinsic_call<'tcx>(
};
discriminant_value, (c ptr) {
let pointee_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
let val = CValue::by_ref(ptr.load_scalar(fx), pointee_layout);
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), pointee_layout);
let discr = crate::discriminant::codegen_get_discriminant(fx, val, ret.layout());
ret.write_cvalue(fx, discr);
};
@ -615,6 +615,10 @@ pub fn codegen_intrinsic_call<'tcx>(
let clif_ty = fx.clif_type(layout.ty).unwrap();
let val = match clif_ty {
types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 0),
types::I128 => {
let zero = fx.bcx.ins().iconst(types::I64, 0);
fx.bcx.ins().iconcat(zero, zero)
}
types::F32 => {
let zero = fx.bcx.ins().iconst(types::I32, 0);
fx.bcx.ins().bitcast(types::F32, zero)
@ -629,7 +633,7 @@ pub fn codegen_intrinsic_call<'tcx>(
fx.bcx.def_var(mir_var(var), val);
}
_ => {
let addr = ret.to_addr(fx);
let addr = ret.to_ptr(fx).get_addr(fx);
let layout = ret.layout();
fx.bcx.emit_small_memset(fx.module.target_config(), addr, 0, layout.size.bytes(), 1);
}
@ -647,6 +651,11 @@ pub fn codegen_intrinsic_call<'tcx>(
let clif_ty = fx.clif_type(layout.ty).unwrap();
let val = match clif_ty {
types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 42),
types::I128 => {
let zero = fx.bcx.ins().iconst(types::I64, 0);
let fourty_two = fx.bcx.ins().iconst(types::I64, 42);
fx.bcx.ins().iconcat(fourty_two, zero)
}
types::F32 => {
let zero = fx.bcx.ins().iconst(types::I32, 0xdeadbeef);
fx.bcx.ins().bitcast(types::F32, zero)
@ -681,7 +690,8 @@ pub fn codegen_intrinsic_call<'tcx>(
let msb_lz = fx.bcx.ins().clz(msb);
let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz)
let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
fx.bcx.ins().uextend(types::I128, res)
} else {
fx.bcx.ins().clz(arg)
};
@ -697,7 +707,8 @@ pub fn codegen_intrinsic_call<'tcx>(
let msb_tz = fx.bcx.ins().ctz(msb);
let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz)
let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
fx.bcx.ins().uextend(types::I128, res)
} else {
fx.bcx.ins().ctz(arg)
};
@ -804,12 +815,12 @@ pub fn codegen_intrinsic_call<'tcx>(
// Cranelift treats loads as volatile by default
let inner_layout =
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
let val = CValue::by_ref(ptr.load_scalar(fx), inner_layout);
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
ret.write_cvalue(fx, val);
};
volatile_store, (v ptr, c val) {
// Cranelift treats stores as volatile by default
let dest = CPlace::for_addr(ptr, val.layout());
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
};
@ -843,11 +854,11 @@ pub fn codegen_intrinsic_call<'tcx>(
_ if intrinsic.starts_with("atomic_load"), (c ptr) {
let inner_layout =
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
let val = CValue::by_ref(ptr.load_scalar(fx), inner_layout);
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
ret.write_cvalue(fx, val);
};
_ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
let dest = CPlace::for_addr(ptr, val.layout());
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
};
_ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
@ -857,7 +868,7 @@ pub fn codegen_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
// Write new
let dest = CPlace::for_addr(ptr, src.layout());
let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
dest.write_cvalue(fx, src);
};
_ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*

View File

@ -49,6 +49,7 @@ mod llvm_intrinsics;
mod main_shim;
mod metadata;
mod num;
mod pointer;
mod pretty_clif;
mod target_features_whitelist;
mod trap;
@ -106,6 +107,7 @@ mod prelude {
pub use crate::cast::*;
pub use crate::common::*;
pub use crate::debuginfo::{DebugContext, FunctionDebugContext};
pub use crate::pointer::Pointer;
pub use crate::trap::*;
pub use crate::unimpl::unimpl;
pub use crate::value_and_place::{CPlace, CPlaceInner, CValue};

154
src/pointer.rs Normal file
View File

@ -0,0 +1,154 @@
use crate::prelude::*;
use cranelift::codegen::ir::immediates::Offset32;
#[derive(Copy, Clone, Debug)]
pub struct Pointer {
base: PointerBase,
offset: Offset32,
}
#[derive(Copy, Clone, Debug)]
enum PointerBase {
Addr(Value),
Stack(StackSlot),
}
impl Pointer {
pub fn new(addr: Value) -> Self {
Pointer {
base: PointerBase::Addr(addr),
offset: Offset32::new(0),
}
}
pub fn stack_slot(stack_slot: StackSlot) -> Self {
Pointer {
base: PointerBase::Stack(stack_slot),
offset: Offset32::new(0),
}
}
pub fn const_addr<'a, 'tcx>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>, addr: i64) -> Self {
let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
Pointer {
base: PointerBase::Addr(addr),
offset: Offset32::new(0),
}
}
pub fn get_addr<'a, 'tcx>(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> Value {
match self.base {
PointerBase::Addr(base_addr) => {
let offset: i64 = self.offset.into();
if offset == 0 {
base_addr
} else {
fx.bcx.ins().iadd_imm(base_addr, offset)
}
}
PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset),
}
}
pub fn try_get_addr_and_offset(self) -> Option<(Value, Offset32)> {
match self.base {
PointerBase::Addr(addr) => Some((addr, self.offset)),
PointerBase::Stack(_) => None,
}
}
pub fn offset<'a, 'tcx>(
self,
fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
extra_offset: Offset32,
) -> Self {
self.offset_i64(fx, extra_offset.into())
}
pub fn offset_i64<'a, 'tcx>(
self,
fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
extra_offset: i64,
) -> Self {
if let Some(new_offset) = self.offset.try_add_i64(extra_offset) {
Pointer {
base: self.base,
offset: new_offset,
}
} else {
let base_offset: i64 = self.offset.into();
if let Some(new_offset) = base_offset.checked_add(extra_offset){
let base_addr = match self.base {
PointerBase::Addr(addr) => addr,
PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0),
};
let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
Pointer {
base: PointerBase::Addr(addr),
offset: Offset32::new(0),
}
} else {
panic!("self.offset ({}) + extra_offset ({}) not representable in i64", base_offset, extra_offset);
}
}
}
pub fn offset_value<'a, 'tcx>(
self,
fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
extra_offset: Value,
) -> Self {
match self.base {
PointerBase::Addr(addr) => Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
offset: self.offset,
},
PointerBase::Stack(stack_slot) => {
let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset);
Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
offset: Offset32::new(0),
}
}
}
}
pub fn load<'a, 'tcx>(
self,
fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
ty: Type,
flags: MemFlags,
) -> Value {
match self.base {
PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
PointerBase::Stack(stack_slot) => if ty == types::I128 {
// WORKAROUND for stack_load.i128 not being implemented
let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
fx.bcx.ins().load(ty, flags, base_addr, self.offset)
} else {
fx.bcx.ins().stack_load(ty, stack_slot, self.offset)
}
}
}
pub fn store<'a, 'tcx>(
self,
fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
value: Value,
flags: MemFlags,
) {
match self.base {
PointerBase::Addr(base_addr) => {
fx.bcx.ins().store(flags, value, base_addr, self.offset);
}
PointerBase::Stack(stack_slot) => if fx.bcx.func.dfg.value_type(value) == types::I128 {
// WORKAROUND for stack_load.i128 not being implemented
let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
fx.bcx.ins().store(flags, value, base_addr, self.offset);
} else {
fx.bcx.ins().stack_store(value, stack_slot, self.offset);
}
}
}
}

View File

@ -91,8 +91,7 @@ pub fn trap_unreachable_ret_value<'tcx>(
msg: impl AsRef<str>,
) -> CValue<'tcx> {
trap_unimplemented(fx, msg);
let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
CValue::by_ref(zero, dest_layout)
CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
}
/// Like `trap_unreachable` but returns a fake place for the specified type.
@ -104,6 +103,5 @@ pub fn trap_unreachable_ret_place<'tcx>(
msg: impl AsRef<str>,
) -> CPlace<'tcx> {
trap_unimplemented(fx, msg);
let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
CPlace::for_addr(zero, dest_layout)
CPlace::for_ptr(Pointer::const_addr(fx, 0), dest_layout)
}

View File

@ -1,24 +1,22 @@
use crate::prelude::*;
use cranelift::codegen::ir::immediates::Offset32;
fn codegen_field<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
base: Value,
base: Pointer,
extra: Option<Value>,
layout: TyLayout<'tcx>,
field: mir::Field,
) -> (Value, TyLayout<'tcx>) {
) -> (Pointer, TyLayout<'tcx>) {
let field_offset = layout.fields.offset(field.index());
let field_layout = layout.field(&*fx, field.index());
let simple = |fx: &mut FunctionCx<_>| {
if field_offset.bytes() > 0 {
(
fx.bcx.ins().iadd_imm(base, field_offset.bytes() as i64),
field_layout,
)
} else {
(base, field_layout)
}
(
base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()),
field_layout,
)
};
if let Some(extra) = extra {
@ -44,7 +42,7 @@ fn codegen_field<'tcx>(
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
(
fx.bcx.ins().iadd(base, offset),
base.offset_value(fx, offset),
field_layout,
)
}
@ -54,12 +52,12 @@ fn codegen_field<'tcx>(
}
}
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: &Scalar, b_scalar: &Scalar) -> i32 {
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: &Scalar, b_scalar: &Scalar) -> Offset32 {
let b_offset = a_scalar
.value
.size(&tcx)
.align_to(b_scalar.value.align(&tcx).abi);
b_offset.bytes().try_into().unwrap()
Offset32::new(b_offset.bytes().try_into().unwrap())
}
/// A read-only value
@ -68,14 +66,14 @@ pub struct CValue<'tcx>(CValueInner, TyLayout<'tcx>);
#[derive(Debug, Copy, Clone)]
enum CValueInner {
ByRef(Value),
ByRef(Pointer),
ByVal(Value),
ByValPair(Value, Value),
}
impl<'tcx> CValue<'tcx> {
pub fn by_ref(value: Value, layout: TyLayout<'tcx>) -> CValue<'tcx> {
CValue(CValueInner::ByRef(value), layout)
pub fn by_ref(ptr: Pointer, layout: TyLayout<'tcx>) -> CValue<'tcx> {
CValue(CValueInner::ByRef(ptr), layout)
}
pub fn by_val(value: Value, layout: TyLayout<'tcx>) -> CValue<'tcx> {
@ -90,21 +88,31 @@ impl<'tcx> CValue<'tcx> {
self.1
}
pub fn force_stack<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Value {
pub fn force_stack<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Pointer {
let layout = self.1;
match self.0 {
CValueInner::ByRef(value) => value,
CValueInner::ByRef(ptr) => ptr,
CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
let cplace = CPlace::new_stack_slot(fx, layout.ty);
cplace.write_cvalue(fx, self);
cplace.to_addr(fx)
cplace.to_ptr(fx)
}
}
}
pub fn try_to_addr(self) -> Option<Value> {
match self.0 {
CValueInner::ByRef(addr) => Some(addr),
CValueInner::ByRef(ptr) => {
if let Some((base_addr, offset)) = ptr.try_get_addr_and_offset() {
if offset == Offset32::new(0) {
Some(base_addr)
} else {
None
}
} else {
None
}
}
CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
}
}
@ -113,13 +121,13 @@ impl<'tcx> CValue<'tcx> {
pub fn load_scalar<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Value {
let layout = self.1;
match self.0 {
CValueInner::ByRef(addr) => {
CValueInner::ByRef(ptr) => {
let scalar = match layout.abi {
layout::Abi::Scalar(ref scalar) => scalar.clone(),
_ => unreachable!(),
};
let clif_ty = scalar_to_clif_type(fx.tcx, scalar);
fx.bcx.ins().load(clif_ty, MemFlags::new(), addr, 0)
ptr.load(fx, clif_ty, MemFlags::new())
}
CValueInner::ByVal(value) => value,
CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
@ -133,7 +141,7 @@ impl<'tcx> CValue<'tcx> {
) -> (Value, Value) {
let layout = self.1;
match self.0 {
CValueInner::ByRef(addr) => {
CValueInner::ByRef(ptr) => {
let (a_scalar, b_scalar) = match &layout.abi {
layout::Abi::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self),
@ -141,8 +149,8 @@ impl<'tcx> CValue<'tcx> {
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
let val1 = fx.bcx.ins().load(clif_ty1, MemFlags::new(), addr, 0);
let val2 = fx.bcx.ins().load(clif_ty2, MemFlags::new(), addr, b_offset);
let val1 = ptr.load(fx, clif_ty1, MemFlags::new());
let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new());
(val1, val2)
}
CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
@ -156,12 +164,12 @@ impl<'tcx> CValue<'tcx> {
field: mir::Field,
) -> CValue<'tcx> {
let layout = self.1;
let base = match self.0 {
CValueInner::ByRef(addr) => addr,
let ptr = match self.0 {
CValueInner::ByRef(ptr) => ptr,
_ => bug!("place_field for {:?}", self),
};
let (field_ptr, field_layout) = codegen_field(fx, base, None, layout, field);
let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
CValue::by_ref(field_ptr, field_layout)
}
@ -224,7 +232,7 @@ pub struct CPlace<'tcx> {
#[derive(Debug, Copy, Clone)]
pub enum CPlaceInner {
Var(Local),
Addr(Value, Option<Value>),
Addr(Pointer, Option<Value>),
Stack(StackSlot),
NoPlace,
}
@ -282,16 +290,16 @@ impl<'tcx> CPlace<'tcx> {
}
}
pub fn for_addr(addr: Value, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
pub fn for_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
CPlace {
inner: CPlaceInner::Addr(addr, None),
inner: CPlaceInner::Addr(ptr, None),
layout,
}
}
pub fn for_addr_with_extra(addr: Value, extra: Value, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
pub fn for_ptr_with_extra(ptr: Pointer, extra: Value, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
CPlace {
inner: CPlaceInner::Addr(addr, Some(extra)),
inner: CPlaceInner::Addr(ptr, Some(extra)),
layout,
}
}
@ -304,47 +312,42 @@ impl<'tcx> CPlace<'tcx> {
fx.bcx.set_val_label(val, cranelift::codegen::ir::ValueLabel::from_u32(var.as_u32()));
CValue::by_val(val, layout)
}
CPlaceInner::Addr(addr, extra) => {
CPlaceInner::Addr(ptr, extra) => {
assert!(extra.is_none(), "unsized values are not yet supported");
CValue::by_ref(addr, layout)
CValue::by_ref(ptr, layout)
}
CPlaceInner::Stack(stack_slot) => CValue::by_ref(
fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0),
Pointer::stack_slot(stack_slot),
layout,
),
CPlaceInner::NoPlace => CValue::by_ref(
fx.bcx
.ins()
.iconst(fx.pointer_type, fx.pointer_type.bytes() as i64),
Pointer::const_addr(fx, i64::try_from(self.layout.align.pref.bytes()).unwrap()),
layout,
),
}
}
pub fn to_addr(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Value {
match self.to_addr_maybe_unsized(fx) {
(addr, None) => addr,
pub fn to_ptr(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Pointer {
match self.to_ptr_maybe_unsized(fx) {
(ptr, None) => ptr,
(_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
}
}
pub fn to_addr_maybe_unsized(
pub fn to_ptr_maybe_unsized(
self,
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
) -> (Value, Option<Value>) {
) -> (Pointer, Option<Value>) {
match self.inner {
CPlaceInner::Addr(addr, extra) => (addr, extra),
CPlaceInner::Addr(ptr, extra) => (ptr, extra),
CPlaceInner::Stack(stack_slot) => (
fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0),
Pointer::stack_slot(stack_slot),
None,
),
CPlaceInner::NoPlace => {
(
fx.bcx.ins().iconst(
fx.pointer_type,
i64::try_from(self.layout.align.pref.bytes()).unwrap(),
),
None
Pointer::const_addr(fx, i64::try_from(self.layout.align.pref.bytes()).unwrap()),
None,
)
}
CPlaceInner::Var(_) => bug!("Expected CPlace::Addr, found CPlace::Var"),
@ -420,17 +423,15 @@ impl<'tcx> CPlace<'tcx> {
assert_assignable(fx, from_ty, to_ty);
let dst_layout = self.layout();
let addr = match self.inner {
let to_ptr = match self.inner {
CPlaceInner::Var(var) => {
let data = from.load_scalar(fx);
fx.bcx.set_val_label(data, cranelift::codegen::ir::ValueLabel::from_u32(var.as_u32()));
fx.bcx.def_var(mir_var(var), data);
return;
}
CPlaceInner::Addr(addr, None) => addr,
CPlaceInner::Stack(stack_slot) => {
fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
}
CPlaceInner::Addr(ptr, None) => ptr,
CPlaceInner::Stack(stack_slot) => Pointer::stack_slot(stack_slot),
CPlaceInner::NoPlace => {
if dst_layout.abi != Abi::Uninhabited {
assert_eq!(dst_layout.size.bytes(), 0, "{:?}", dst_layout);
@ -442,27 +443,29 @@ impl<'tcx> CPlace<'tcx> {
match from.0 {
CValueInner::ByVal(val) => {
fx.bcx.ins().store(MemFlags::new(), val, addr, 0);
to_ptr.store(fx, val, MemFlags::new());
}
CValueInner::ByValPair(value, extra) => match dst_layout.abi {
Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
fx.bcx.ins().store(MemFlags::new(), value, addr, 0);
fx.bcx.ins().store(MemFlags::new(), extra, addr, b_offset);
to_ptr.store(fx, value, MemFlags::new());
to_ptr.offset(fx, b_offset).store(fx, extra, MemFlags::new());
}
_ => bug!(
"Non ScalarPair abi {:?} for ByValPair CValue",
dst_layout.abi
),
},
CValueInner::ByRef(from_addr) => {
CValueInner::ByRef(from_ptr) => {
let from_addr = from_ptr.get_addr(fx);
let to_addr = to_ptr.get_addr(fx);
let src_layout = from.1;
let size = dst_layout.size.bytes();
let src_align = src_layout.align.abi.bytes() as u8;
let dst_align = dst_layout.align.abi.bytes() as u8;
fx.bcx.emit_small_memcpy(
fx.module.target_config(),
addr,
to_addr,
from_addr,
size,
dst_align,
@ -478,13 +481,13 @@ impl<'tcx> CPlace<'tcx> {
field: mir::Field,
) -> CPlace<'tcx> {
let layout = self.layout();
let (base, extra) = self.to_addr_maybe_unsized(fx);
let (base, extra) = self.to_ptr_maybe_unsized(fx);
let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
if field_layout.is_unsized() {
CPlace::for_addr_with_extra(field_ptr, extra.unwrap(), field_layout)
CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
} else {
CPlace::for_addr(field_ptr, field_layout)
CPlace::for_ptr(field_ptr, field_layout)
}
}
@ -493,9 +496,9 @@ impl<'tcx> CPlace<'tcx> {
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
index: Value,
) -> CPlace<'tcx> {
let (elem_layout, addr) = match self.layout().ty.kind {
ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_addr(fx)),
ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_addr_maybe_unsized(fx).0),
let (elem_layout, ptr) = match self.layout().ty.kind {
ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr(fx)),
ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized(fx).0),
_ => bug!("place_index({:?})", self.layout().ty),
};
@ -504,30 +507,30 @@ impl<'tcx> CPlace<'tcx> {
.ins()
.imul_imm(index, elem_layout.size.bytes() as i64);
CPlace::for_addr(fx.bcx.ins().iadd(addr, offset), elem_layout)
CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
}
pub fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> CPlace<'tcx> {
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
if has_ptr_meta(fx.tcx, inner_layout.ty) {
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
CPlace::for_addr_with_extra(addr, extra, inner_layout)
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
} else {
CPlace::for_addr(self.to_cvalue(fx).load_scalar(fx), inner_layout)
CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
}
}
pub fn write_place_ref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, dest: CPlace<'tcx>) {
if has_ptr_meta(fx.tcx, self.layout().ty) {
let (value, extra) = self.to_addr_maybe_unsized(fx);
let (ptr, extra) = self.to_ptr_maybe_unsized(fx);
let ptr = CValue::by_val_pair(
value,
ptr.get_addr(fx),
extra.expect("unsized type without metadata"),
dest.layout(),
);
dest.write_cvalue(fx, ptr);
} else {
let ptr = CValue::by_val(self.to_addr(fx), dest.layout());
let ptr = CValue::by_val(self.to_ptr(fx).get_addr(fx), dest.layout());
dest.write_cvalue(fx, ptr);
}
}

View File

@ -6,11 +6,17 @@ const DROP_FN_INDEX: usize = 0;
const SIZE_INDEX: usize = 1;
const ALIGN_INDEX: usize = 2;
fn vtable_memflags() -> MemFlags {
let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
flags.set_readonly(); // A vtable is always read-only.
flags
}
pub fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.tcx),
MemFlags::new(),
vtable_memflags(),
vtable,
(DROP_FN_INDEX * usize_size) as i32,
)
@ -20,7 +26,7 @@ pub fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) ->
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.tcx),
MemFlags::new(),
vtable_memflags(),
vtable,
(SIZE_INDEX * usize_size) as i32,
)
@ -30,7 +36,7 @@ pub fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.tcx),
MemFlags::new(),
vtable_memflags(),
vtable,
(ALIGN_INDEX * usize_size) as i32,
)
@ -45,7 +51,7 @@ pub fn get_ptr_and_method_ref<'tcx>(
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
let func_ref = fx.bcx.ins().load(
pointer_ty(fx.tcx),
MemFlags::new(),
vtable_memflags(),
vtable,
((idx + 3) * usize_size as usize) as i32,
);

View File

@ -78,8 +78,8 @@ hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_clif
popd
pushd build_sysroot/sysroot_src/src/libcore/tests
rm -r sysroot_src/src/**/*/target/ || true
cargo test
rm -r ./target || true
../../../../../cargo.sh test
popd
pushd regex