rust/src/num.rs

468 lines
17 KiB
Rust
Raw Normal View History

//! Various operations on integer and floating-point numbers
2019-08-14 04:52:39 -05:00
use crate::prelude::*;
pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
2019-08-14 08:03:52 -05:00
use BinOp::*;
use IntCC::*;
Some(match bin_op {
Eq => Equal,
2019-08-31 12:28:09 -05:00
Lt => {
if signed {
SignedLessThan
} else {
UnsignedLessThan
}
}
Le => {
if signed {
SignedLessThanOrEqual
} else {
UnsignedLessThanOrEqual
}
}
2019-08-14 08:03:52 -05:00
Ne => NotEqual,
2019-08-31 12:28:09 -05:00
Ge => {
if signed {
SignedGreaterThanOrEqual
} else {
UnsignedGreaterThanOrEqual
}
}
Gt => {
if signed {
SignedGreaterThan
} else {
UnsignedGreaterThan
}
}
2019-08-14 08:03:52 -05:00
_ => return None,
})
}
fn codegen_compare_bin_op<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2019-08-14 08:03:52 -05:00
bin_op: BinOp,
signed: bool,
lhs: Value,
rhs: Value,
) -> CValue<'tcx> {
let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
2019-08-14 08:03:52 -05:00
CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
}
pub(crate) fn codegen_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2019-08-14 08:03:52 -05:00
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
match bin_op {
BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
match in_lhs.layout().ty.kind() {
2019-08-14 08:03:52 -05:00
ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
let signed = type_sign(in_lhs.layout().ty);
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
}
_ => {}
}
}
_ => {}
}
match in_lhs.layout().ty.kind() {
ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
2019-08-14 08:03:52 -05:00
}
2019-08-14 04:52:39 -05:00
}
pub(crate) fn codegen_bool_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2019-08-14 04:52:39 -05:00
bin_op: BinOp,
2019-08-14 08:03:52 -05:00
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
2019-08-14 04:52:39 -05:00
) -> CValue<'tcx> {
2019-08-14 08:03:52 -05:00
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
let b = fx.bcx.ins();
let res = match bin_op {
BinOp::BitXor => b.bxor(lhs, rhs),
BinOp::BitAnd => b.band(lhs, rhs),
BinOp::BitOr => b.bor(lhs, rhs),
// Compare binops handles by `codegen_binop`.
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
2019-08-14 04:52:39 -05:00
};
2019-08-14 08:03:52 -05:00
CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
2019-08-14 04:52:39 -05:00
}
pub(crate) fn codegen_int_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2019-08-14 04:52:39 -05:00
bin_op: BinOp,
2019-08-14 08:03:52 -05:00
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
2019-08-14 04:52:39 -05:00
) -> CValue<'tcx> {
if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
assert_eq!(
2019-08-14 08:03:52 -05:00
in_lhs.layout().ty,
in_rhs.layout().ty,
2019-08-14 04:52:39 -05:00
"int binop requires lhs and rhs of same type"
);
}
2019-08-14 08:03:52 -05:00
if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
2019-08-14 04:52:39 -05:00
return res;
}
2019-08-14 08:03:52 -05:00
let signed = type_sign(in_lhs.layout().ty);
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
let b = fx.bcx.ins();
let val = match bin_op {
BinOp::Add => b.iadd(lhs, rhs),
BinOp::Sub => b.isub(lhs, rhs),
BinOp::Mul => b.imul(lhs, rhs),
2019-08-31 12:28:09 -05:00
BinOp::Div => {
if signed {
b.sdiv(lhs, rhs)
} else {
b.udiv(lhs, rhs)
}
}
BinOp::Rem => {
if signed {
b.srem(lhs, rhs)
} else {
b.urem(lhs, rhs)
}
}
2019-08-14 08:03:52 -05:00
BinOp::BitXor => b.bxor(lhs, rhs),
BinOp::BitAnd => b.band(lhs, rhs),
BinOp::BitOr => b.bor(lhs, rhs),
BinOp::Shl => b.ishl(lhs, rhs),
2019-08-14 08:18:05 -05:00
BinOp::Shr => {
if signed {
b.sshr(lhs, rhs)
2019-08-14 08:18:05 -05:00
} else {
b.ushr(lhs, rhs)
2019-08-14 08:18:05 -05:00
}
}
2019-08-14 08:03:52 -05:00
// Compare binops handles by `codegen_binop`.
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
2019-08-14 04:52:39 -05:00
};
2019-08-14 08:03:52 -05:00
CValue::by_val(val, in_lhs.layout())
2019-08-14 04:52:39 -05:00
}
pub(crate) fn codegen_checked_int_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2019-08-14 04:52:39 -05:00
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
assert_eq!(
in_lhs.layout().ty,
in_rhs.layout().ty,
"checked int binop requires lhs and rhs of same type"
);
}
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
2019-08-14 08:03:52 -05:00
if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
2019-08-14 04:52:39 -05:00
return res;
}
let signed = type_sign(in_lhs.layout().ty);
2019-08-14 04:52:39 -05:00
let (res, has_overflow) = match bin_op {
BinOp::Add => {
/*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
(val, c_out)*/
// FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
let val = fx.bcx.ins().iadd(lhs, rhs);
let has_overflow = if !signed {
fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
} else {
let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
fx.bcx.ins().bxor(rhs_is_negative, slt)
};
(val, has_overflow)
}
BinOp::Sub => {
/*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
(val, b_out)*/
// FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
let val = fx.bcx.ins().isub(lhs, rhs);
let has_overflow = if !signed {
fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
} else {
let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
fx.bcx.ins().bxor(rhs_is_negative, sgt)
};
(val, has_overflow)
}
BinOp::Mul => {
2020-06-20 08:15:28 -05:00
let ty = fx.bcx.func.dfg.value_type(lhs);
match ty {
types::I8 | types::I16 | types::I32 if !signed => {
let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
let val = fx.bcx.ins().imul(lhs, rhs);
let has_overflow = fx.bcx.ins().icmp_imm(
IntCC::UnsignedGreaterThan,
val,
(1 << ty.bits()) - 1,
);
2020-06-20 08:15:28 -05:00
let val = fx.bcx.ins().ireduce(ty, val);
(val, has_overflow)
}
types::I8 | types::I16 | types::I32 if signed => {
let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
let val = fx.bcx.ins().imul(lhs, rhs);
let has_underflow =
fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
let has_overflow = fx.bcx.ins().icmp_imm(
IntCC::SignedGreaterThan,
val,
(1 << (ty.bits() - 1)) - 1,
);
2020-06-20 08:15:28 -05:00
let val = fx.bcx.ins().ireduce(ty, val);
(val, fx.bcx.ins().bor(has_underflow, has_overflow))
}
types::I64 => {
let val = fx.bcx.ins().imul(lhs, rhs);
let has_overflow = if !signed {
let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
} else {
// Based on LLVM's instruction sequence for compiling
// a.checked_mul(b).is_some() to riscv64gc:
// mulh a2, a0, a1
// mul a0, a0, a1
// srai a0, a0, 63
// xor a0, a0, a2
// snez a0, a0
2020-06-20 08:15:28 -05:00
let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
let xor = fx.bcx.ins().bxor(val_hi, val_sign);
fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
2020-06-20 08:15:28 -05:00
};
(val, has_overflow)
}
types::I128 => {
unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
}
2020-06-20 08:15:28 -05:00
_ => unreachable!("invalid non-integer type {}", ty),
}
2019-08-14 04:52:39 -05:00
}
BinOp::Shl => {
let val = fx.bcx.ins().ishl(lhs, rhs);
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
2019-08-14 04:52:39 -05:00
(val, has_overflow)
}
BinOp::Shr => {
let val =
if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) };
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
2019-08-14 04:52:39 -05:00
(val, has_overflow)
}
_ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
2019-08-14 04:52:39 -05:00
};
Rename many interner functions. (This is a large commit. The changes to `compiler/rustc_middle/src/ty/context.rs` are the most important ones.) The current naming scheme is a mess, with a mix of `_intern_`, `intern_` and `mk_` prefixes, with little consistency. In particular, in many cases it's easy to use an iterator interner when a (preferable) slice interner is available. The guiding principles of the new naming system: - No `_intern_` prefixes. - The `intern_` prefix is for internal operations. - The `mk_` prefix is for external operations. - For cases where there is a slice interner and an iterator interner, the former is `mk_foo` and the latter is `mk_foo_from_iter`. Also, `slice_interners!` and `direct_interners!` can now be `pub` or non-`pub`, which helps enforce the internal/external operations division. It's not perfect, but I think it's a clear improvement. The following lists show everything that was renamed. slice_interners - const_list - mk_const_list -> mk_const_list_from_iter - intern_const_list -> mk_const_list - substs - mk_substs -> mk_substs_from_iter - intern_substs -> mk_substs - check_substs -> check_and_mk_substs (this is a weird one) - canonical_var_infos - intern_canonical_var_infos -> mk_canonical_var_infos - poly_existential_predicates - mk_poly_existential_predicates -> mk_poly_existential_predicates_from_iter - intern_poly_existential_predicates -> mk_poly_existential_predicates - _intern_poly_existential_predicates -> intern_poly_existential_predicates - predicates - mk_predicates -> mk_predicates_from_iter - intern_predicates -> mk_predicates - _intern_predicates -> intern_predicates - projs - intern_projs -> mk_projs - place_elems - mk_place_elems -> mk_place_elems_from_iter - intern_place_elems -> mk_place_elems - bound_variable_kinds - mk_bound_variable_kinds -> mk_bound_variable_kinds_from_iter - intern_bound_variable_kinds -> mk_bound_variable_kinds direct_interners - region - intern_region (unchanged) - const - mk_const_internal -> intern_const - const_allocation - intern_const_alloc -> mk_const_alloc - layout - intern_layout -> mk_layout - adt_def - intern_adt_def -> mk_adt_def_from_data (unusual case, hard to avoid) - alloc_adt_def(!) -> mk_adt_def - external_constraints - intern_external_constraints -> mk_external_constraints Other - type_list - mk_type_list -> mk_type_list_from_iter - intern_type_list -> mk_type_list - tup - mk_tup -> mk_tup_from_iter - intern_tup -> mk_tup
2023-02-16 21:33:08 -06:00
let out_layout = fx.layout_of(fx.tcx.mk_tup(&[in_lhs.layout().ty, fx.tcx.types.bool]));
CValue::by_val_pair(res, has_overflow, out_layout)
2019-08-14 04:52:39 -05:00
}
pub(crate) fn codegen_saturating_int_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
lhs: CValue<'tcx>,
rhs: CValue<'tcx>,
) -> CValue<'tcx> {
assert_eq!(lhs.layout().ty, rhs.layout().ty);
let signed = type_sign(lhs.layout().ty);
let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
let (val, has_overflow) = checked_res.load_scalar_pair(fx);
let val = match (bin_op, signed) {
(BinOp::Add, false) => fx.bcx.ins().select(has_overflow, max, val),
(BinOp::Sub, false) => fx.bcx.ins().select(has_overflow, min, val),
(BinOp::Add, true) => {
let rhs = rhs.load_scalar(fx);
let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
fx.bcx.ins().select(has_overflow, sat_val, val)
}
(BinOp::Sub, true) => {
let rhs = rhs.load_scalar(fx);
let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
fx.bcx.ins().select(has_overflow, sat_val, val)
}
_ => unreachable!(),
};
CValue::by_val(val, lhs.layout())
}
pub(crate) fn codegen_float_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2019-08-14 04:52:39 -05:00
bin_op: BinOp,
2019-08-14 08:03:52 -05:00
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
2019-08-14 04:52:39 -05:00
) -> CValue<'tcx> {
2019-08-14 08:03:52 -05:00
assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
2019-08-14 04:52:39 -05:00
2019-08-14 08:03:52 -05:00
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
2019-08-14 04:52:39 -05:00
2019-08-14 08:03:52 -05:00
let b = fx.bcx.ins();
let res = match bin_op {
BinOp::Add => b.fadd(lhs, rhs),
BinOp::Sub => b.fsub(lhs, rhs),
BinOp::Mul => b.fmul(lhs, rhs),
BinOp::Div => b.fdiv(lhs, rhs),
BinOp::Rem => {
let (name, ty) = match in_lhs.layout().ty.kind() {
ty::Float(FloatTy::F32) => ("fmodf", types::F32),
ty::Float(FloatTy::F64) => ("fmod", types::F64),
2019-08-14 08:03:52 -05:00
_ => bug!(),
};
let ret_val = fx.lib_call(
name,
vec![AbiParam::new(ty), AbiParam::new(ty)],
vec![AbiParam::new(ty)],
&[lhs, rhs],
)[0];
return CValue::by_val(ret_val, in_lhs.layout());
2019-08-14 08:03:52 -05:00
}
BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
let fltcc = match bin_op {
BinOp::Eq => FloatCC::Equal,
BinOp::Lt => FloatCC::LessThan,
BinOp::Le => FloatCC::LessThanOrEqual,
BinOp::Ne => FloatCC::NotEqual,
BinOp::Ge => FloatCC::GreaterThanOrEqual,
BinOp::Gt => FloatCC::GreaterThan,
_ => unreachable!(),
};
let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
}
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
2019-08-14 04:52:39 -05:00
};
2019-08-14 08:03:52 -05:00
CValue::by_val(res, in_lhs.layout())
2019-08-14 04:52:39 -05:00
}
pub(crate) fn codegen_ptr_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2019-08-14 04:52:39 -05:00
bin_op: BinOp,
2019-08-14 08:03:52 -05:00
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
2019-08-14 04:52:39 -05:00
) -> CValue<'tcx> {
let is_thin_ptr = in_lhs
.layout()
.ty
.builtin_deref(true)
.map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
.unwrap_or(true);
2019-09-14 10:53:36 -05:00
2019-09-15 11:15:40 -05:00
if is_thin_ptr {
2019-08-14 08:03:52 -05:00
match bin_op {
BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
2019-08-14 04:52:39 -05:00
codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
2019-08-14 08:03:52 -05:00
}
BinOp::Offset => {
2019-09-15 11:15:40 -05:00
let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
2019-08-14 08:03:52 -05:00
let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
let base_val = base.load_scalar(fx);
let res = fx.bcx.ins().iadd(base_val, ptr_diff);
CValue::by_val(res, base.layout())
2019-08-14 08:03:52 -05:00
}
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
}
2019-08-14 04:52:39 -05:00
} else {
2019-08-14 08:03:52 -05:00
let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
2019-08-14 04:52:39 -05:00
let res = match bin_op {
BinOp::Eq => {
let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
fx.bcx.ins().band(ptr_eq, extra_eq)
}
BinOp::Ne => {
let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
fx.bcx.ins().bor(ptr_ne, extra_ne)
}
BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
2019-08-31 12:28:09 -05:00
let ptr_cmp =
fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
2019-08-31 12:28:09 -05:00
let extra_cmp = fx.bcx.ins().icmp(
bin_op_to_intcc(bin_op, false).unwrap(),
lhs_extra,
rhs_extra,
);
2019-08-14 04:52:39 -05:00
fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
}
_ => panic!("bin_op {:?} on ptr", bin_op),
};
CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
2019-08-14 04:52:39 -05:00
}
}
// In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
// For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
// and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
// a float against itself. Only in case of NaN is it not equal to itself.
pub(crate) fn codegen_float_min(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
let temp = fx.bcx.ins().select(a_ge_b, b, a);
fx.bcx.ins().select(a_is_nan, b, temp)
}
pub(crate) fn codegen_float_max(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
let temp = fx.bcx.ins().select(a_le_b, b, a);
fx.bcx.ins().select(a_is_nan, b, temp)
}