diff --git a/src/intrinsics/mod.rs b/src/intrinsics/mod.rs index de7695041d3..49c0fa34340 100644 --- a/src/intrinsics/mod.rs +++ b/src/intrinsics/mod.rs @@ -1,28 +1,6 @@ //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"` //! and LLVM intrinsics that have symbol names starting with `llvm.`. -macro_rules! intrinsic_pat { - (_) => { - _ - }; - ($name:ident) => { - sym::$name - }; - (kw.$name:ident) => { - kw::$name - }; -} - -macro_rules! intrinsic_arg { - (o $fx:expr, $arg:ident) => {}; - (c $fx:expr, $arg:ident) => { - let $arg = codegen_operand($fx, $arg); - }; - (v $fx:expr, $arg:ident) => { - let $arg = codegen_operand($fx, $arg).load_scalar($fx); - }; -} - macro_rules! intrinsic_args { ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => { #[allow(unused_parens)] @@ -34,28 +12,6 @@ macro_rules! intrinsic_args { } } -macro_rules! intrinsic_match { - ($fx:expr, $intrinsic:expr, $args:expr, - _ => $unknown:block; - $( - $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block; - )*) => { - match $intrinsic { - $( - $(intrinsic_pat!($($name).*))|* $(if $cond)? => { - if let [$($arg),*] = $args { - $(intrinsic_arg!($a $fx, $arg);)* - $content - } else { - bug!("wrong number of args for intrinsic {:?}", $intrinsic); - } - } - )* - _ => $unknown, - } - } -} - mod cpuid; mod llvm; mod simd; @@ -359,28 +315,31 @@ fn codegen_regular_intrinsic_call<'tcx>( ) { let usize_layout = fx.layout_of(fx.tcx.types.usize); - intrinsic_match! { - fx, intrinsic, args, - _ => { - fx.tcx.sess.span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic)); - }; + match intrinsic { + sym::assume => { + intrinsic_args!(fx, args => (_a); intrinsic); + } + sym::likely | sym::unlikely => { + intrinsic_args!(fx, args => (a); intrinsic); - assume, (c _a) {}; - likely | unlikely, (c a) { ret.write_cvalue(fx, a); - }; - breakpoint, () { + } + sym::breakpoint => { + intrinsic_args!(fx, args => (); intrinsic); + fx.bcx.ins().debugtrap(); - }; - copy | copy_nonoverlapping, (v src, v dst, v count) { + } + sym::copy | sym::copy_nonoverlapping => { + intrinsic_args!(fx, args => (src, dst, count); intrinsic); + let src = src.load_scalar(fx); + let dst = dst.load_scalar(fx); + let count = count.load_scalar(fx); + let elem_ty = substs.type_at(0); let elem_size: u64 = fx.layout_of(elem_ty).size.bytes(); assert_eq!(args.len(), 3); - let byte_amount = if elem_size != 1 { - fx.bcx.ins().imul_imm(count, elem_size as i64) - } else { - count - }; + let byte_amount = + if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count }; if intrinsic == sym::copy_nonoverlapping { // FIXME emit_small_memcpy @@ -389,17 +348,19 @@ fn codegen_regular_intrinsic_call<'tcx>( // FIXME emit_small_memmove fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount); } - }; - // NOTE: the volatile variants have src and dst swapped - volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) { + } + sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => { + // NOTE: the volatile variants have src and dst swapped + intrinsic_args!(fx, args => (dst, src, count); intrinsic); + let dst = dst.load_scalar(fx); + let src = src.load_scalar(fx); + let count = count.load_scalar(fx); + let elem_ty = substs.type_at(0); let elem_size: u64 = fx.layout_of(elem_ty).size.bytes(); assert_eq!(args.len(), 3); - let byte_amount = if elem_size != 1 { - fx.bcx.ins().imul_imm(count, elem_size as i64) - } else { - count - }; + let byte_amount = + if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count }; // FIXME make the copy actually volatile when using emit_small_mem{cpy,move} if intrinsic == sym::volatile_copy_nonoverlapping_memory { @@ -409,8 +370,10 @@ fn codegen_regular_intrinsic_call<'tcx>( // FIXME emit_small_memmove fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount); } - }; - size_of_val, (c ptr) { + } + sym::size_of_val => { + intrinsic_args!(fx, args => (ptr); intrinsic); + let layout = fx.layout_of(substs.type_at(0)); // Note: Can't use is_unsized here as truly unsized types need to take the fixed size // branch @@ -419,14 +382,13 @@ fn codegen_regular_intrinsic_call<'tcx>( let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info); size } else { - fx - .bcx - .ins() - .iconst(fx.pointer_type, layout.size.bytes() as i64) + fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64) }; ret.write_cvalue(fx, CValue::by_val(size, usize_layout)); - }; - min_align_of_val, (c ptr) { + } + sym::min_align_of_val => { + intrinsic_args!(fx, args => (ptr); intrinsic); + let layout = fx.layout_of(substs.type_at(0)); // Note: Can't use is_unsized here as truly unsized types need to take the fixed size // branch @@ -435,26 +397,37 @@ fn codegen_regular_intrinsic_call<'tcx>( let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info); align } else { - fx - .bcx - .ins() - .iconst(fx.pointer_type, layout.align.abi.bytes() as i64) + fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64) }; ret.write_cvalue(fx, CValue::by_val(align, usize_layout)); - }; + } + + sym::vtable_size => { + intrinsic_args!(fx, args => (vtable); intrinsic); + let vtable = vtable.load_scalar(fx); - vtable_size, (v vtable) { let size = crate::vtable::size_of_obj(fx, vtable); ret.write_cvalue(fx, CValue::by_val(size, usize_layout)); - }; + } + + sym::vtable_align => { + intrinsic_args!(fx, args => (vtable); intrinsic); + let vtable = vtable.load_scalar(fx); - vtable_align, (v vtable) { let align = crate::vtable::min_align_of_obj(fx, vtable); ret.write_cvalue(fx, CValue::by_val(align, usize_layout)); - }; + } + + sym::unchecked_add + | sym::unchecked_sub + | sym::unchecked_mul + | sym::unchecked_div + | sym::exact_div + | sym::unchecked_rem + | sym::unchecked_shl + | sym::unchecked_shr => { + intrinsic_args!(fx, args => (x, y); intrinsic); - unchecked_add | unchecked_sub | unchecked_mul | unchecked_div | exact_div | unchecked_rem - | unchecked_shl | unchecked_shr, (c x, c y) { // FIXME trap on overflow let bin_op = match intrinsic { sym::unchecked_add => BinOp::Add, @@ -468,8 +441,10 @@ fn codegen_regular_intrinsic_call<'tcx>( }; let res = crate::num::codegen_int_binop(fx, bin_op, x, y); ret.write_cvalue(fx, res); - }; - add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) { + } + sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { + intrinsic_args!(fx, args => (x, y); intrinsic); + assert_eq!(x.layout().ty, y.layout().ty); let bin_op = match intrinsic { sym::add_with_overflow => BinOp::Add, @@ -478,15 +453,12 @@ fn codegen_regular_intrinsic_call<'tcx>( _ => unreachable!(), }; - let res = crate::num::codegen_checked_int_binop( - fx, - bin_op, - x, - y, - ); + let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y); ret.write_cvalue(fx, res); - }; - saturating_add | saturating_sub, (c lhs, c rhs) { + } + sym::saturating_add | sym::saturating_sub => { + intrinsic_args!(fx, args => (lhs, rhs); intrinsic); + assert_eq!(lhs.layout().ty, rhs.layout().ty); let bin_op = match intrinsic { sym::saturating_add => BinOp::Add, @@ -496,12 +468,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let signed = type_sign(lhs.layout().ty); - let checked_res = crate::num::codegen_checked_int_binop( - fx, - bin_op, - lhs, - rhs, - ); + let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs); let (val, has_overflow) = checked_res.load_scalar_pair(fx); let clif_ty = fx.clif_type(lhs.layout().ty).unwrap(); @@ -513,13 +480,15 @@ fn codegen_regular_intrinsic_call<'tcx>( (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val), (sym::saturating_add, true) => { let rhs = rhs.load_scalar(fx); - let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); + let rhs_ge_zero = + fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min); fx.bcx.ins().select(has_overflow, sat_val, val) } (sym::saturating_sub, true) => { let rhs = rhs.load_scalar(fx); - let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); + let rhs_ge_zero = + fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0); let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max); fx.bcx.ins().select(has_overflow, sat_val, val) } @@ -529,23 +498,32 @@ fn codegen_regular_intrinsic_call<'tcx>( let res = CValue::by_val(val, lhs.layout()); ret.write_cvalue(fx, res); - }; - rotate_left, (c x, v y) { + } + sym::rotate_left => { + intrinsic_args!(fx, args => (x, y); intrinsic); + let y = y.load_scalar(fx); + let layout = x.layout(); let x = x.load_scalar(fx); let res = fx.bcx.ins().rotl(x, y); ret.write_cvalue(fx, CValue::by_val(res, layout)); - }; - rotate_right, (c x, v y) { + } + sym::rotate_right => { + intrinsic_args!(fx, args => (x, y); intrinsic); + let y = y.load_scalar(fx); + let layout = x.layout(); let x = x.load_scalar(fx); let res = fx.bcx.ins().rotr(x, y); ret.write_cvalue(fx, CValue::by_val(res, layout)); - }; + } // The only difference between offset and arith_offset is regarding UB. Because Cranelift // doesn't have UB both are codegen'ed the same way - offset | arith_offset, (c base, v offset) { + sym::offset | sym::arith_offset => { + intrinsic_args!(fx, args => (base, offset); intrinsic); + let offset = offset.load_scalar(fx); + let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty; let pointee_size = fx.layout_of(pointee_ty).size.bytes(); let ptr_diff = if pointee_size != 1 { @@ -556,12 +534,18 @@ fn codegen_regular_intrinsic_call<'tcx>( let base_val = base.load_scalar(fx); let res = fx.bcx.ins().iadd(base_val, ptr_diff); ret.write_cvalue(fx, CValue::by_val(res, base.layout())); - }; + } + + sym::transmute => { + intrinsic_args!(fx, args => (from); intrinsic); - transmute, (c from) { ret.write_cvalue_transmute(fx, from); - }; - write_bytes | volatile_set_memory, (c dst, v val, v count) { + } + sym::write_bytes | sym::volatile_set_memory => { + intrinsic_args!(fx, args => (dst, val, count); intrinsic); + let val = val.load_scalar(fx); + let count = count.load_scalar(fx); + let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty; let pointee_size = fx.layout_of(pointee_ty).size.bytes(); let count = if pointee_size != 1 { @@ -573,34 +557,42 @@ fn codegen_regular_intrinsic_call<'tcx>( // FIXME make the memset actually volatile when switching to emit_small_memset // FIXME use emit_small_memset fx.bcx.call_memset(fx.target_config, dst_ptr, val, count); - }; - ctlz | ctlz_nonzero, (c arg) { + } + sym::ctlz | sym::ctlz_nonzero => { + intrinsic_args!(fx, args => (arg); intrinsic); let val = arg.load_scalar(fx); + // FIXME trap on `ctlz_nonzero` with zero arg. let res = fx.bcx.ins().clz(val); let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); - }; - cttz | cttz_nonzero, (c arg) { + } + sym::cttz | sym::cttz_nonzero => { + intrinsic_args!(fx, args => (arg); intrinsic); let val = arg.load_scalar(fx); + // FIXME trap on `cttz_nonzero` with zero arg. let res = fx.bcx.ins().ctz(val); let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); - }; - ctpop, (c arg) { + } + sym::ctpop => { + intrinsic_args!(fx, args => (arg); intrinsic); let val = arg.load_scalar(fx); + let res = fx.bcx.ins().popcnt(val); let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); - }; - bitreverse, (c arg) { + } + sym::bitreverse => { + intrinsic_args!(fx, args => (arg); intrinsic); let val = arg.load_scalar(fx); + let res = fx.bcx.ins().bitrev(val); let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); - }; - bswap, (c arg) { + } + sym::bswap => { // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { match bcx.func.dfg.value_type(v) { @@ -676,11 +668,15 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { ty => unreachable!("bswap {}", ty), } } + intrinsic_args!(fx, args => (arg); intrinsic); let val = arg.load_scalar(fx); + let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout()); ret.write_cvalue(fx, res); - }; - assert_inhabited | assert_zero_valid | assert_uninit_valid, () { + } + sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => { + intrinsic_args!(fx, args => (); intrinsic); + let layout = fx.layout_of(substs.type_at(0)); if layout.abi.is_uninhabited() { with_no_trimmed_paths!({ @@ -697,7 +693,10 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { with_no_trimmed_paths!({ crate::base::codegen_panic( fx, - &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty), + &format!( + "attempted to zero-initialize type `{}`, which is invalid", + layout.ty + ), source_info, ); }); @@ -708,41 +707,53 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { with_no_trimmed_paths!({ crate::base::codegen_panic( fx, - &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty), + &format!( + "attempted to leave type `{}` uninitialized, which is invalid", + layout.ty + ), source_info, ) }); return; } - }; + } + + sym::volatile_load | sym::unaligned_volatile_load => { + intrinsic_args!(fx, args => (ptr); intrinsic); - volatile_load | unaligned_volatile_load, (c ptr) { // Cranelift treats loads as volatile by default // FIXME correctly handle unaligned_volatile_load - let inner_layout = - fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty); + let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty); let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout); ret.write_cvalue(fx, val); - }; - volatile_store | unaligned_volatile_store, (v ptr, c val) { + } + sym::volatile_store | sym::unaligned_volatile_store => { + intrinsic_args!(fx, args => (ptr, val); intrinsic); + let ptr = ptr.load_scalar(fx); + // Cranelift treats stores as volatile by default // FIXME correctly handle unaligned_volatile_store let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout()); dest.write_cvalue(fx, val); - }; + } + + sym::pref_align_of + | sym::needs_drop + | sym::type_id + | sym::type_name + | sym::variant_count => { + intrinsic_args!(fx, args => (); intrinsic); - pref_align_of | needs_drop | type_id | type_name | variant_count, () { let const_val = fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap(); - let val = crate::constant::codegen_const_value( - fx, - const_val, - ret.layout().ty, - ); + let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty); ret.write_cvalue(fx, val); - }; + } - ptr_offset_from | ptr_offset_from_unsigned, (v ptr, v base) { + sym::ptr_offset_from | sym::ptr_offset_from_unsigned => { + intrinsic_args!(fx, args => (ptr, base); intrinsic); + let ptr = ptr.load_scalar(fx); + let base = base.load_scalar(fx); let ty = substs.type_at(0); let pointee_size: u64 = fx.layout_of(ty).size.bytes(); @@ -758,31 +769,44 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout) }; ret.write_cvalue(fx, val); - }; + } + + sym::ptr_guaranteed_eq => { + intrinsic_args!(fx, args => (a, b); intrinsic); - ptr_guaranteed_eq, (c a, c b) { let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b); ret.write_cvalue(fx, val); - }; + } + + sym::ptr_guaranteed_ne => { + intrinsic_args!(fx, args => (a, b); intrinsic); - ptr_guaranteed_ne, (c a, c b) { let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b); ret.write_cvalue(fx, val); - }; + } + + sym::caller_location => { + intrinsic_args!(fx, args => (); intrinsic); - caller_location, () { let caller_location = fx.get_caller_location(source_info); ret.write_cvalue(fx, caller_location); - }; + } + + _ if intrinsic.as_str().starts_with("atomic_fence") => { + intrinsic_args!(fx, args => (); intrinsic); - _ if intrinsic.as_str().starts_with("atomic_fence"), () { fx.bcx.ins().fence(); - }; - _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () { + } + _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => { + intrinsic_args!(fx, args => (); intrinsic); + // FIXME use a compiler fence once Cranelift supports it fx.bcx.ins().fence(); - }; - _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) { + } + _ if intrinsic.as_str().starts_with("atomic_load") => { + intrinsic_args!(fx, args => (ptr); intrinsic); + let ptr = ptr.load_scalar(fx); + let ty = substs.type_at(0); match ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { @@ -794,7 +818,9 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { fx.bcx.ins().jump(ret_block, &[]); return; } else { - fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported"); + fx.tcx + .sess + .span_fatal(source_info.span, "128bit atomics not yet supported"); } } ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -809,8 +835,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let val = CValue::by_val(val, fx.layout_of(ty)); ret.write_cvalue(fx, val); - }; - _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) { + } + _ if intrinsic.as_str().starts_with("atomic_store") => { + intrinsic_args!(fx, args => (ptr, val); intrinsic); + let ptr = ptr.load_scalar(fx); + let ty = substs.type_at(0); match ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { @@ -822,7 +851,9 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { fx.bcx.ins().jump(ret_block, &[]); return; } else { - fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported"); + fx.tcx + .sess + .span_fatal(source_info.span, "128bit atomics not yet supported"); } } ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -835,8 +866,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let val = val.load_scalar(fx); fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr); - }; - _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) { + } + _ if intrinsic.as_str().starts_with("atomic_xchg") => { + intrinsic_args!(fx, args => (ptr, new); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = new.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -853,8 +887,12 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_* + } + _ if intrinsic.as_str().starts_with("atomic_cxchg") => { + // both atomic_cxchg_* and atomic_cxchgweak_* + intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = new.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -870,11 +908,15 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new); let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old); - let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout()); + let ret_val = + CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout()); ret.write_cvalue(fx, ret_val) - }; + } + + _ if intrinsic.as_str().starts_with("atomic_xadd") => { + intrinsic_args!(fx, args => (ptr, amount); intrinsic); + let ptr = ptr.load_scalar(fx); - _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) { let layout = amount.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -887,12 +929,16 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let amount = amount.load_scalar(fx); - let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount); + let old = + fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount); let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) { + } + _ if intrinsic.as_str().starts_with("atomic_xsub") => { + intrinsic_args!(fx, args => (ptr, amount); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = amount.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -905,12 +951,16 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let amount = amount.load_scalar(fx); - let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount); + let old = + fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount); let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_and") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -927,8 +977,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_or") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -945,8 +998,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_xor") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -963,8 +1019,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_nand") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -981,8 +1040,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_max") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -999,8 +1061,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_umax") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -1017,8 +1082,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_min") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -1035,8 +1103,11 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; - _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) { + } + _ if intrinsic.as_str().starts_with("atomic_umin") => { + intrinsic_args!(fx, args => (ptr, src); intrinsic); + let ptr = ptr.load_scalar(fx); + let layout = src.layout(); match layout.ty.kind() { ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {} @@ -1053,30 +1124,51 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); - }; + } + + sym::minnumf32 => { + intrinsic_args!(fx, args => (a, b); intrinsic); + let a = a.load_scalar(fx); + let b = b.load_scalar(fx); - minnumf32, (v a, v b) { let val = crate::num::codegen_float_min(fx, a, b); let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32)); ret.write_cvalue(fx, val); - }; - minnumf64, (v a, v b) { + } + sym::minnumf64 => { + intrinsic_args!(fx, args => (a, b); intrinsic); + let a = a.load_scalar(fx); + let b = b.load_scalar(fx); + let val = crate::num::codegen_float_min(fx, a, b); let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64)); ret.write_cvalue(fx, val); - }; - maxnumf32, (v a, v b) { + } + sym::maxnumf32 => { + intrinsic_args!(fx, args => (a, b); intrinsic); + let a = a.load_scalar(fx); + let b = b.load_scalar(fx); + let val = crate::num::codegen_float_max(fx, a, b); let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32)); ret.write_cvalue(fx, val); - }; - maxnumf64, (v a, v b) { + } + sym::maxnumf64 => { + intrinsic_args!(fx, args => (a, b); intrinsic); + let a = a.load_scalar(fx); + let b = b.load_scalar(fx); + let val = crate::num::codegen_float_max(fx, a, b); let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64)); ret.write_cvalue(fx, val); - }; + } + + kw::Try => { + intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic); + let f = f.load_scalar(fx); + let data = data.load_scalar(fx); + let _catch_fn = catch_fn.load_scalar(fx); - kw.Try, (v f, v data, v _catch_fn) { // FIXME once unwinding is supported, change this to actually catch panics let f_sig = fx.bcx.func.import_signature(Signature { call_conv: fx.target_config.default_call_conv, @@ -1089,20 +1181,30 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { let layout = ret.layout(); let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size)); ret.write_cvalue(fx, ret_val); - }; + } - fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) { - let res = crate::num::codegen_float_binop(fx, match intrinsic { - sym::fadd_fast => BinOp::Add, - sym::fsub_fast => BinOp::Sub, - sym::fmul_fast => BinOp::Mul, - sym::fdiv_fast => BinOp::Div, - sym::frem_fast => BinOp::Rem, - _ => unreachable!(), - }, x, y); + sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + let res = crate::num::codegen_float_binop( + fx, + match intrinsic { + sym::fadd_fast => BinOp::Add, + sym::fsub_fast => BinOp::Sub, + sym::fmul_fast => BinOp::Mul, + sym::fdiv_fast => BinOp::Div, + sym::frem_fast => BinOp::Rem, + _ => unreachable!(), + }, + x, + y, + ); ret.write_cvalue(fx, res); - }; - float_to_int_unchecked, (v f) { + } + sym::float_to_int_unchecked => { + intrinsic_args!(fx, args => (f); intrinsic); + let f = f.load_scalar(fx); + let res = crate::cast::clif_int_or_float_cast( fx, f, @@ -1111,66 +1213,74 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { type_sign(ret.layout().ty), ); ret.write_cvalue(fx, CValue::by_val(res, ret.layout())); - }; + } + + sym::raw_eq => { + intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic); + let lhs_ref = lhs_ref.load_scalar(fx); + let rhs_ref = rhs_ref.load_scalar(fx); - raw_eq, (v lhs_ref, v rhs_ref) { let size = fx.layout_of(substs.type_at(0)).layout.size(); // FIXME add and use emit_small_memcmp - let is_eq_value = - if size == Size::ZERO { - // No bytes means they're trivially equal - fx.bcx.ins().iconst(types::I8, 1) - } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) { - // Can't use `trusted` for these loads; they could be unaligned. - let mut flags = MemFlags::new(); - flags.set_notrap(); - let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0); - let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0); - let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val); - fx.bcx.ins().bint(types::I8, eq) - } else { - // Just call `memcmp` (like slices do in core) when the - // size is too large or it's not a power-of-two. - let signed_bytes = i64::try_from(size.bytes()).unwrap(); - let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes); - let params = vec![AbiParam::new(fx.pointer_type); 3]; - let returns = vec![AbiParam::new(types::I32)]; - let args = &[lhs_ref, rhs_ref, bytes_val]; - let cmp = fx.lib_call("memcmp", params, returns, args)[0]; - let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0); - fx.bcx.ins().bint(types::I8, eq) - }; + let is_eq_value = if size == Size::ZERO { + // No bytes means they're trivially equal + fx.bcx.ins().iconst(types::I8, 1) + } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) { + // Can't use `trusted` for these loads; they could be unaligned. + let mut flags = MemFlags::new(); + flags.set_notrap(); + let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0); + let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0); + let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val); + fx.bcx.ins().bint(types::I8, eq) + } else { + // Just call `memcmp` (like slices do in core) when the + // size is too large or it's not a power-of-two. + let signed_bytes = i64::try_from(size.bytes()).unwrap(); + let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes); + let params = vec![AbiParam::new(fx.pointer_type); 3]; + let returns = vec![AbiParam::new(types::I32)]; + let args = &[lhs_ref, rhs_ref, bytes_val]; + let cmp = fx.lib_call("memcmp", params, returns, args)[0]; + let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0); + fx.bcx.ins().bint(types::I8, eq) + }; ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout())); - }; + } + + sym::const_allocate => { + intrinsic_args!(fx, args => (_size, _align); intrinsic); - const_allocate, (c _size, c _align) { // returns a null pointer at runtime. let null = fx.bcx.ins().iconst(fx.pointer_type, 0); ret.write_cvalue(fx, CValue::by_val(null, ret.layout())); - }; + } - const_deallocate, (c _ptr, c _size, c _align) { + sym::const_deallocate => { + intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic); // nop at runtime. - }; + } + + sym::black_box => { + intrinsic_args!(fx, args => (a); intrinsic); - black_box, (c a) { // FIXME implement black_box semantics ret.write_cvalue(fx, a); - }; + } // FIXME implement variadics in cranelift - va_copy, (o _dest, o _src) { + sym::va_copy | sym::va_arg | sym::va_end => { fx.tcx.sess.span_fatal( source_info.span, "Defining variadic functions is not yet supported by Cranelift", ); - }; - va_arg | va_end, (o _valist) { - fx.tcx.sess.span_fatal( - source_info.span, - "Defining variadic functions is not yet supported by Cranelift", - ); - }; + } + + _ => { + fx.tcx + .sess + .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic)); + } } let ret_block = fx.get_block(destination.unwrap());