Make builtin_deref just return a Ty
This commit is contained in:
parent
9a57c636e7
commit
0a67bf1b8a
@ -670,11 +670,8 @@ fn codegen_stmt<'tcx>(
|
||||
let to_ty = fx.monomorphize(to_ty);
|
||||
|
||||
fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
ty.builtin_deref(true).is_some_and(
|
||||
|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
|
||||
has_ptr_meta(fx.tcx, pointee_ty)
|
||||
},
|
||||
)
|
||||
ty.builtin_deref(true)
|
||||
.is_some_and(|pointee_ty| has_ptr_meta(fx.tcx, pointee_ty))
|
||||
}
|
||||
|
||||
if is_fat_ptr(fx, from_ty) {
|
||||
|
@ -586,7 +586,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
|
||||
intrinsic_args!(fx, args => (base, offset); intrinsic);
|
||||
let offset = offset.load_scalar(fx);
|
||||
|
||||
let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
|
||||
let pointee_ty = base.layout().ty.builtin_deref(true).unwrap();
|
||||
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
|
||||
let ptr_diff = if pointee_size != 1 {
|
||||
fx.bcx.ins().imul_imm(offset, pointee_size as i64)
|
||||
@ -610,7 +610,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
|
||||
let val = val.load_scalar(fx);
|
||||
let count = count.load_scalar(fx);
|
||||
|
||||
let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
|
||||
let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap();
|
||||
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
|
||||
let count = if pointee_size != 1 {
|
||||
fx.bcx.ins().imul_imm(count, pointee_size as i64)
|
||||
@ -715,7 +715,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
|
||||
|
||||
// Cranelift treats loads as volatile by default
|
||||
// FIXME correctly handle unaligned_volatile_load
|
||||
let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
|
||||
let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap());
|
||||
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
|
||||
ret.write_cvalue(fx, val);
|
||||
}
|
||||
|
@ -974,7 +974,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
intrinsic_args!(fx, args => (ptr, offset); intrinsic);
|
||||
|
||||
let (lane_count, ptr_lane_ty) = ptr.layout().ty.simd_size_and_type(fx.tcx);
|
||||
let pointee_ty = ptr_lane_ty.builtin_deref(true).unwrap().ty;
|
||||
let pointee_ty = ptr_lane_ty.builtin_deref(true).unwrap();
|
||||
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
|
||||
let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
|
||||
let ret_lane_layout = fx.layout_of(ret_lane_ty);
|
||||
|
@ -95,7 +95,7 @@ mod prelude {
|
||||
pub(crate) use rustc_middle::mir::{self, *};
|
||||
pub(crate) use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
pub(crate) use rustc_middle::ty::{
|
||||
self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut, UintTy,
|
||||
self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, UintTy,
|
||||
};
|
||||
pub(crate) use rustc_span::Span;
|
||||
pub(crate) use rustc_target::abi::{Abi, FieldIdx, Scalar, Size, VariantIdx, FIRST_VARIANT};
|
||||
|
10
src/num.rs
10
src/num.rs
@ -388,12 +388,8 @@ pub(crate) fn codegen_ptr_binop<'tcx>(
|
||||
in_lhs: CValue<'tcx>,
|
||||
in_rhs: CValue<'tcx>,
|
||||
) -> CValue<'tcx> {
|
||||
let is_thin_ptr = in_lhs
|
||||
.layout()
|
||||
.ty
|
||||
.builtin_deref(true)
|
||||
.map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
|
||||
.unwrap_or(true);
|
||||
let is_thin_ptr =
|
||||
in_lhs.layout().ty.builtin_deref(true).map(|ty| !has_ptr_meta(fx.tcx, ty)).unwrap_or(true);
|
||||
|
||||
if is_thin_ptr {
|
||||
match bin_op {
|
||||
@ -404,7 +400,7 @@ pub(crate) fn codegen_ptr_binop<'tcx>(
|
||||
codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
|
||||
}
|
||||
BinOp::Offset => {
|
||||
let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
|
||||
let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap();
|
||||
let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
|
||||
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
|
||||
let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
|
||||
|
@ -127,7 +127,7 @@ pub(crate) fn coerce_unsized_into<'tcx>(
|
||||
let dst_ty = dst.layout().ty;
|
||||
let mut coerce_ptr = || {
|
||||
let (base, info) =
|
||||
if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
|
||||
if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap()).is_unsized() {
|
||||
let (old_base, old_info) = src.load_scalar_pair(fx);
|
||||
unsize_ptr(fx, old_base, src.layout(), dst.layout(), Some(old_info))
|
||||
} else {
|
||||
|
@ -819,7 +819,7 @@ pub(crate) fn place_index(
|
||||
}
|
||||
|
||||
pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
|
||||
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
|
||||
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap());
|
||||
if has_ptr_meta(fx.tcx, inner_layout.ty) {
|
||||
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
|
||||
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
|
||||
|
@ -59,7 +59,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
|
||||
|
||||
if let ty::Ref(_, ty, _) = arg.layout().ty.kind() {
|
||||
if ty.is_dyn_star() {
|
||||
let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap().ty);
|
||||
let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap());
|
||||
let dyn_star = CPlace::for_ptr(Pointer::new(arg.load_scalar(fx)), inner_layout);
|
||||
let ptr = dyn_star.place_field(fx, FieldIdx::ZERO).to_ptr();
|
||||
let vtable =
|
||||
|
Loading…
Reference in New Issue
Block a user