rust/src/value_and_place.rs

884 lines
34 KiB
Rust
Raw Normal View History

//! Definition of [`CValue`] and [`CPlace`]
use crate::prelude::*;
use cranelift_codegen::ir::immediates::Offset32;
2019-08-18 09:52:07 -05:00
fn codegen_field<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
base: Pointer,
extra: Option<Value>,
layout: TyAndLayout<'tcx>,
field: mir::Field,
) -> (Pointer, TyAndLayout<'tcx>) {
let field_offset = layout.fields.offset(field.index());
let field_layout = layout.field(&*fx, field.index());
let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
(base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
};
if let Some(extra) = extra {
if !field_layout.is_unsized() {
return simple(fx);
}
match field_layout.ty.kind() {
ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
ty::Adt(def, _) if def.repr().packed() => {
assert_eq!(layout.align.abi.bytes(), 1);
simple(fx)
}
_ => {
// We have to align the offset for DST's
let unaligned_offset = field_offset.bytes();
let (_, unsized_align) =
crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
(base.offset_value(fx, offset), field_layout)
}
}
} else {
simple(fx)
}
}
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
Offset32::new(b_offset.bytes().try_into().unwrap())
}
/// A read-only value
#[derive(Debug, Copy, Clone)]
pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
2019-06-11 09:41:40 -05:00
#[derive(Debug, Copy, Clone)]
enum CValueInner {
ByRef(Pointer, Option<Value>),
2019-06-11 09:41:40 -05:00
ByVal(Value),
ByValPair(Value, Value),
}
impl<'tcx> CValue<'tcx> {
pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
CValue(CValueInner::ByRef(ptr, None), layout)
}
pub(crate) fn by_ref_unsized(
ptr: Pointer,
meta: Value,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
}
pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
2019-06-11 09:41:40 -05:00
CValue(CValueInner::ByVal(value), layout)
}
pub(crate) fn by_val_pair(
value: Value,
extra: Value,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
2019-06-11 09:41:40 -05:00
CValue(CValueInner::ByValPair(value, extra), layout)
}
pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
2019-06-11 09:41:40 -05:00
self.1
}
// FIXME remove
pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
2019-06-11 09:41:40 -05:00
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, meta) => (ptr, meta),
CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
let cplace = CPlace::new_stack_slot(fx, layout);
cplace.write_cvalue(fx, self);
2020-03-29 04:51:43 -05:00
(cplace.to_ptr(), None)
}
}
}
pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
match self.0 {
CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
}
}
/// Load a value with layout.abi of scalar
pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
2019-06-11 09:41:40 -05:00
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi {
Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
.by(u16::try_from(count).unwrap())
.unwrap(),
_ => unreachable!("{:?}", layout.ty),
};
2020-08-30 04:14:11 -05:00
let mut flags = MemFlags::new();
flags.set_notrap();
ptr.load(fx, clif_ty, flags)
}
2019-06-11 09:41:40 -05:00
CValueInner::ByVal(value) => value,
CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
2019-06-11 09:41:40 -05:00
CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
}
}
/// Load a value pair with layout.abi of scalar pair
pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
2019-06-11 09:41:40 -05:00
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
let (a_scalar, b_scalar) = match layout.abi {
Abi::ScalarPair(a, b) => (a, b),
2019-09-14 10:53:36 -05:00
_ => unreachable!("load_scalar_pair({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
2020-08-30 04:14:11 -05:00
let mut flags = MemFlags::new();
flags.set_notrap();
let val1 = ptr.load(fx, clif_ty1, flags);
let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
(val1, val2)
}
CValueInner::ByRef(_, Some(_)) => {
bug!("load_scalar_pair for unsized value not allowed")
}
2019-06-11 09:41:40 -05:00
CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
CValueInner::ByValPair(val1, val2) => (val1, val2),
}
}
pub(crate) fn value_field(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
field: mir::Field,
2019-08-18 09:52:07 -05:00
) -> CValue<'tcx> {
2019-06-11 09:41:40 -05:00
let layout = self.1;
match self.0 {
CValueInner::ByVal(val) => match layout.abi {
Abi::Vector { element: _, count } => {
let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
let field = u8::try_from(field.index()).unwrap();
assert!(field < count);
let lane = fx.bcx.ins().extractlane(val, field);
let field_layout = layout.field(&*fx, usize::from(field));
CValue::by_val(lane, field_layout)
}
_ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
},
CValueInner::ByValPair(val1, val2) => match layout.abi {
Abi::ScalarPair(_, _) => {
let val = match field.as_u32() {
0 => val1,
1 => val2,
_ => bug!("field should be 0 or 1"),
};
let field_layout = layout.field(&*fx, usize::from(field));
CValue::by_val(val, field_layout)
2020-07-02 17:23:21 -05:00
}
_ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
},
CValueInner::ByRef(ptr, None) => {
let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
CValue::by_ref(field_ptr, field_layout)
}
CValueInner::ByRef(_, Some(_)) => todo!(),
}
}
/// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
/// such that you can access individual lanes.
pub(crate) fn value_lane(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
lane_idx: u64,
) -> CValue<'tcx> {
let layout = self.1;
assert!(layout.ty.is_simd());
let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
let lane_layout = fx.layout_of(lane_ty);
assert!(lane_idx < lane_count);
match self.0 {
CValueInner::ByVal(val) => match layout.abi {
Abi::Vector { element: _, count: _ } => {
assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
let lane_idx = u8::try_from(lane_idx).unwrap();
let lane = fx.bcx.ins().extractlane(val, lane_idx);
CValue::by_val(lane, lane_layout)
}
_ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
},
CValueInner::ByValPair(_, _) => unreachable!(),
CValueInner::ByRef(ptr, None) => {
let field_offset = lane_layout.size * lane_idx;
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
CValue::by_ref(field_ptr, lane_layout)
}
CValueInner::ByRef(_, Some(_)) => unreachable!(),
}
}
pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
crate::unsize::coerce_unsized_into(fx, self, dest);
}
2019-07-24 10:16:31 -05:00
/// If `ty` is signed, `const_val` must already be sign extended.
pub(crate) fn const_val(
fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
2020-10-29 08:30:47 -05:00
const_val: ty::ScalarInt,
2019-08-18 09:52:07 -05:00
) -> CValue<'tcx> {
assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
let clif_ty = fx.clif_type(layout.ty).unwrap();
2019-07-24 10:16:31 -05:00
if let ty::Bool = layout.ty.kind() {
assert!(
2020-10-29 08:30:47 -05:00
const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
"Invalid bool 0x{:032X}",
const_val
);
2019-12-01 10:35:40 -06:00
}
let val = match layout.ty.kind() {
2020-04-05 07:01:02 -05:00
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
2020-10-29 08:30:47 -05:00
let const_val = const_val.to_bits(layout.size).unwrap();
2019-07-24 10:16:31 -05:00
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
2019-07-24 10:16:31 -05:00
fx.bcx.ins().iconcat(lsb, msb)
}
ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
2019-12-01 10:35:40 -06:00
}
2020-04-05 07:01:02 -05:00
ty::Float(FloatTy::F32) => {
2019-12-01 10:35:40 -06:00
fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
}
2020-04-05 07:01:02 -05:00
ty::Float(FloatTy::F64) => {
2019-12-01 10:35:40 -06:00
fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
2019-07-24 10:16:31 -05:00
}
2019-08-31 12:28:09 -05:00
_ => panic!(
2019-12-01 10:35:40 -06:00
"CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
layout.ty
2019-08-31 12:28:09 -05:00
),
2019-06-12 13:54:38 -05:00
};
2019-07-24 10:16:31 -05:00
2019-06-12 13:54:38 -05:00
CValue::by_val(val, layout)
}
2020-03-29 04:52:30 -05:00
pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
2020-03-29 04:52:30 -05:00
assert_eq!(self.layout().abi, layout.abi);
2019-06-11 09:41:40 -05:00
CValue(self.0, layout)
}
}
/// A place where you can write a value to or read a value from
#[derive(Debug, Copy, Clone)]
pub(crate) struct CPlace<'tcx> {
inner: CPlaceInner,
layout: TyAndLayout<'tcx>,
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum CPlaceInner {
Var(Local, Variable),
2020-07-02 17:23:21 -05:00
VarPair(Local, Variable, Variable),
VarLane(Local, Variable, u8),
Addr(Pointer, Option<Value>),
}
2019-08-18 09:52:07 -05:00
impl<'tcx> CPlace<'tcx> {
pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
pub(crate) fn inner(&self) -> &CPlaceInner {
&self.inner
}
pub(crate) fn new_stack_slot(
fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
assert!(!layout.is_unsized());
if layout.size.bytes() == 0 {
return CPlace {
inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
layout,
};
}
if layout.size.bytes() >= u64::from(u32::MAX - 16) {
fx.tcx
.sess
.fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
}
let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
// specify stack slot alignment.
size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
});
CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
}
pub(crate) fn new_var(
fx: &mut FunctionCx<'_, '_, 'tcx>,
local: Local,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
let var = Variable::with_u32(fx.next_ssa_var);
fx.next_ssa_var += 1;
fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
CPlace { inner: CPlaceInner::Var(local, var), layout }
}
2020-07-02 17:23:21 -05:00
pub(crate) fn new_var_pair(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2020-07-02 17:23:21 -05:00
local: Local,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
let var1 = Variable::with_u32(fx.next_ssa_var);
fx.next_ssa_var += 1;
let var2 = Variable::with_u32(fx.next_ssa_var);
fx.next_ssa_var += 1;
let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
fx.bcx.declare_var(var1, ty1);
fx.bcx.declare_var(var2, ty2);
CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
2020-07-02 17:23:21 -05:00
}
pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
2019-06-11 09:30:47 -05:00
}
pub(crate) fn for_ptr_with_extra(
ptr: Pointer,
extra: Value,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
2019-06-11 09:30:47 -05:00
}
pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
let layout = self.layout();
match self.inner {
CPlaceInner::Var(_local, var) => {
let val = fx.bcx.use_var(var);
//fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
2019-11-09 09:42:21 -06:00
CValue::by_val(val, layout)
}
2020-07-02 17:23:21 -05:00
CPlaceInner::VarPair(_local, var1, var2) => {
let val1 = fx.bcx.use_var(var1);
//fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
2020-07-02 17:23:21 -05:00
let val2 = fx.bcx.use_var(var2);
//fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
2020-07-02 17:23:21 -05:00
CValue::by_val_pair(val1, val2, layout)
}
CPlaceInner::VarLane(_local, var, lane) => {
let val = fx.bcx.use_var(var);
//fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
let val = fx.bcx.ins().extractlane(val, lane);
CValue::by_val(val, layout)
}
CPlaceInner::Addr(ptr, extra) => {
if let Some(extra) = extra {
CValue::by_ref_unsized(ptr, extra, layout)
} else {
CValue::by_ref(ptr, layout)
}
}
}
}
2020-03-29 04:51:43 -05:00
pub(crate) fn to_ptr(self) -> Pointer {
match self.to_ptr_maybe_unsized() {
(ptr, None) => ptr,
(_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
}
}
2020-03-29 04:51:43 -05:00
pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
match self.inner {
CPlaceInner::Addr(ptr, extra) => (ptr, extra),
2020-07-02 17:23:21 -05:00
CPlaceInner::Var(_, _)
| CPlaceInner::VarPair(_, _, _)
| CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
}
}
pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
2020-04-13 07:53:25 -05:00
self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
}
pub(crate) fn write_cvalue_transmute(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
2020-04-13 07:53:25 -05:00
from: CValue<'tcx>,
) {
self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
}
fn write_cvalue_maybe_transmute(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
2020-04-13 07:53:25 -05:00
from: CValue<'tcx>,
method: &'static str,
2020-04-13 07:53:25 -05:00
) {
fn transmute_value<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
var: Variable,
data: Value,
dst_ty: Type,
) {
let src_ty = fx.bcx.func.dfg.value_type(data);
assert_eq!(
src_ty.bytes(),
dst_ty.bytes(),
"write_cvalue_transmute: {:?} -> {:?}",
src_ty,
dst_ty,
);
let data = match (src_ty, dst_ty) {
(_, _) if src_ty == dst_ty => data,
// This is a `write_cvalue_transmute`.
(types::I32, types::F32)
| (types::F32, types::I32)
| (types::I64, types::F64)
| (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
_ if src_ty.is_vector() && dst_ty.is_vector() => {
fx.bcx.ins().raw_bitcast(dst_ty, data)
}
_ if src_ty.is_vector() || dst_ty.is_vector() => {
// FIXME do something more efficient for transmutes between vectors and integers.
let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
// specify stack slot alignment.
size: (src_ty.bytes() + 15) / 16 * 16,
});
let ptr = Pointer::stack_slot(stack_slot);
ptr.store(fx, data, MemFlags::trusted());
ptr.load(fx, dst_ty, MemFlags::trusted())
}
// `CValue`s should never contain SSA-only types, so if you ended
// up here having seen an error like `B1 -> I8`, then before
// calling `write_cvalue` you need to add a `bint` instruction.
_ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
};
//fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, data);
}
2020-04-13 07:53:25 -05:00
assert_eq!(self.layout().size, from.layout().size);
if fx.clif_comments.enabled() {
2020-04-13 07:53:25 -05:00
use cranelift_codegen::cursor::{Cursor, CursorPosition};
let cur_block = match fx.bcx.cursor().position() {
CursorPosition::After(block) => block,
_ => unreachable!(),
};
fx.add_comment(
fx.bcx.func.layout.last_inst(cur_block).unwrap(),
format!(
"{}: {:?}: {:?} <- {:?}: {:?}",
method,
self.inner(),
self.layout().ty,
from.0,
from.layout().ty
),
2020-04-13 07:53:25 -05:00
);
}
let dst_layout = self.layout();
let to_ptr = match self.inner {
CPlaceInner::Var(_local, var) => {
if let ty::Array(element, len) = dst_layout.ty.kind() {
// Can only happen for vector types
let len =
u16::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
2022-01-24 21:13:38 -06:00
let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
let data = match from.0 {
CValueInner::ByRef(ptr, None) => {
let mut flags = MemFlags::new();
flags.set_notrap();
ptr.load(fx, vector_ty, flags)
}
CValueInner::ByVal(_)
| CValueInner::ByValPair(_, _)
| CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
};
fx.bcx.def_var(var, data);
return;
}
let data = CValue(from.0, dst_layout).load_scalar(fx);
2020-04-13 07:53:25 -05:00
let dst_ty = fx.clif_type(self.layout().ty).unwrap();
transmute_value(fx, var, data, dst_ty);
return;
}
2020-07-02 17:23:21 -05:00
CPlaceInner::VarPair(_local, var1, var2) => {
let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
transmute_value(fx, var1, data1, dst_ty1);
transmute_value(fx, var2, data2, dst_ty2);
2020-07-02 17:23:21 -05:00
return;
}
CPlaceInner::VarLane(_local, var, lane) => {
let data = from.load_scalar(fx);
// First get the old vector
let vector = fx.bcx.use_var(var);
//fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
// Next insert the written lane into the vector
let vector = fx.bcx.ins().insertlane(vector, data, lane);
// Finally write the new vector
//fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, vector);
return;
}
2020-03-29 04:51:43 -05:00
CPlaceInner::Addr(ptr, None) => {
if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
return;
}
2020-03-29 04:51:43 -05:00
ptr
}
CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
};
2020-08-30 04:14:11 -05:00
let mut flags = MemFlags::new();
flags.set_notrap();
match from.layout().abi {
// FIXME make Abi::Vector work too
Abi::Scalar(_) => {
let val = from.load_scalar(fx);
2020-08-30 04:14:11 -05:00
to_ptr.store(fx, val, flags);
return;
}
Abi::ScalarPair(a_scalar, b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
2020-08-30 04:14:11 -05:00
to_ptr.store(fx, value, flags);
to_ptr.offset(fx, b_offset).store(fx, extra, flags);
return;
}
_ => {}
}
2019-06-11 09:41:40 -05:00
match from.0 {
CValueInner::ByVal(val) => {
2020-08-30 04:14:11 -05:00
to_ptr.store(fx, val, flags);
}
2019-12-31 08:53:18 -06:00
CValueInner::ByValPair(_, _) => {
bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
}
CValueInner::ByRef(from_ptr, None) => {
let from_addr = from_ptr.get_addr(fx);
let to_addr = to_ptr.get_addr(fx);
2019-06-11 09:41:40 -05:00
let src_layout = from.1;
let size = dst_layout.size.bytes();
let src_align = src_layout.align.abi.bytes() as u8;
let dst_align = dst_layout.align.abi.bytes() as u8;
fx.bcx.emit_small_memory_copy(
fx.target_config,
to_addr,
2019-06-11 09:41:40 -05:00
from_addr,
size,
dst_align,
src_align,
true,
MemFlags::trusted(),
);
}
CValueInner::ByRef(_, Some(_)) => todo!(),
}
}
pub(crate) fn place_field(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
field: mir::Field,
) -> CPlace<'tcx> {
let layout = self.layout();
match self.inner {
CPlaceInner::Var(local, var) => match layout.ty.kind() {
ty::Array(_, _) => {
// Can only happen for vector types
return CPlace {
inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
match f0_ty.kind() {
ty::Array(_, _) => {
assert_eq!(field.as_u32(), 0);
return CPlace {
inner: CPlaceInner::Var(local, var),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
_ => {
return CPlace {
inner: CPlaceInner::VarLane(
local,
var,
field.as_u32().try_into().unwrap(),
),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
}
}
_ => {}
},
CPlaceInner::VarPair(local, var1, var2) => {
let layout = layout.field(&*fx, field.index());
match field.as_u32() {
0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
_ => unreachable!("field should be 0 or 1"),
}
2020-07-02 17:23:21 -05:00
}
_ => {}
2020-07-02 17:23:21 -05:00
}
2020-03-29 04:51:43 -05:00
let (base, extra) = self.to_ptr_maybe_unsized();
let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
if field_layout.is_unsized() {
if let ty::Foreign(_) = field_layout.ty.kind() {
assert!(extra.is_none());
CPlace::for_ptr(field_ptr, field_layout)
} else {
CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
}
} else {
CPlace::for_ptr(field_ptr, field_layout)
}
}
/// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
/// such that you can access individual lanes.
pub(crate) fn place_lane(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
lane_idx: u64,
) -> CPlace<'tcx> {
let layout = self.layout();
assert!(layout.ty.is_simd());
let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
let lane_layout = fx.layout_of(lane_ty);
assert!(lane_idx < lane_count);
match self.inner {
CPlaceInner::Var(local, var) => {
assert!(matches!(layout.abi, Abi::Vector { .. }));
CPlace {
inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
layout: lane_layout,
}
}
CPlaceInner::VarPair(_, _, _) => unreachable!(),
CPlaceInner::VarLane(_, _, _) => unreachable!(),
CPlaceInner::Addr(ptr, None) => {
let field_offset = lane_layout.size * lane_idx;
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
CPlace::for_ptr(field_ptr, lane_layout)
}
CPlaceInner::Addr(_, Some(_)) => unreachable!(),
}
}
pub(crate) fn place_index(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
index: Value,
) -> CPlace<'tcx> {
let (elem_layout, ptr) = match self.layout().ty.kind() {
2022-01-24 21:13:38 -06:00
ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
_ => bug!("place_index({:?})", self.layout().ty),
};
let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
}
pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
if has_ptr_meta(fx.tcx, inner_layout.ty) {
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
2019-09-14 10:53:36 -05:00
} else {
CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
}
}
pub(crate) fn place_ref(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
if has_ptr_meta(fx.tcx, self.layout().ty) {
2020-03-29 04:51:43 -05:00
let (ptr, extra) = self.to_ptr_maybe_unsized();
CValue::by_val_pair(
ptr.get_addr(fx),
2019-08-31 12:28:09 -05:00
extra.expect("unsized type without metadata"),
layout,
)
2019-09-14 10:53:36 -05:00
} else {
CValue::by_val(self.to_ptr().get_addr(fx), layout)
}
}
pub(crate) fn downcast_variant(
self,
fx: &FunctionCx<'_, '_, 'tcx>,
variant: VariantIdx,
) -> Self {
2020-03-29 04:52:30 -05:00
assert!(!self.layout().is_unsized());
let layout = self.layout().for_variant(fx, variant);
CPlace { inner: self.inner, layout }
}
}
#[track_caller]
pub(crate) fn assert_assignable<'tcx>(
fx: &FunctionCx<'_, '_, 'tcx>,
from_ty: Ty<'tcx>,
to_ty: Ty<'tcx>,
limit: usize,
) {
if limit == 0 {
// assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
// soundness. don't attempt to check deep types to avoid exponential behavior in certain
// cases.
return;
}
match (from_ty.kind(), to_ty.kind()) {
(ty::Ref(_, a, _), ty::Ref(_, b, _))
| (
ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
) => {
assert_assignable(fx, *a, *b, limit - 1);
}
(ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
| (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
assert_assignable(fx, *a, *b, limit - 1);
}
(ty::FnPtr(_), ty::FnPtr(_)) => {
let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
from_ty.fn_sig(fx.tcx),
);
let to_sig = fx
.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
assert_eq!(
from_sig, to_sig,
"Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
from_sig, to_sig, fx,
);
// fn(&T) -> for<'l> fn(&'l T) is allowed
}
(&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
for (from, to) in from_traits.iter().zip(to_traits) {
let from =
fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
assert_eq!(
from, to,
"Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
from_traits, to_traits, fx,
);
}
// dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
}
(&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
let mut types_a = types_a.iter();
let mut types_b = types_b.iter();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
if adt_def_a.did() == adt_def_b.did() =>
{
let mut types_a = substs_a.types();
let mut types_b = substs_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
(&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
if def_id_a == def_id_b =>
{
let mut types_a = substs_a.types();
let mut types_b = substs_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
// No way to check if it is correct or not with polymorphization enabled
}
_ => {
assert_eq!(
from_ty,
to_ty,
"Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
from_ty.kind(),
to_ty.kind(),
fx,
);
}
}
}