Rollup merge of #114382 - scottmcm:compare-bytes-intrinsic, r=cjgillot

Add a new `compare_bytes` intrinsic instead of calling `memcmp` directly

As discussed in #113435, this lets the backends be the place that can have the "don't call the function if n == 0" logic, if it's needed for the target.  (I didn't actually *add* those checks, though, since as I understood it we didn't actually need them on known targets?)

Doing this also let me make it `const` (unstable), which I don't think `extern "C" fn memcmp` can be.

cc `@RalfJung` `@Amanieu`
This commit is contained in:
Matthias Krüger 2023-08-07 05:29:12 +02:00 committed by GitHub
commit 5dd98a4eb1

View File

@ -1155,6 +1155,20 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
}
sym::compare_bytes => {
intrinsic_args!(fx, args => (lhs_ptr, rhs_ptr, bytes_val); intrinsic);
let lhs_ptr = lhs_ptr.load_scalar(fx);
let rhs_ptr = rhs_ptr.load_scalar(fx);
let bytes_val = bytes_val.load_scalar(fx);
let params = vec![AbiParam::new(fx.pointer_type); 3];
let returns = vec![AbiParam::new(types::I32)];
let args = &[lhs_ptr, rhs_ptr, bytes_val];
// Here we assume that the `memcmp` provided by the target is a NOP for size 0.
let cmp = fx.lib_call("memcmp", params, returns, args)[0];
ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout()));
}
sym::const_allocate => {
intrinsic_args!(fx, args => (_size, _align); intrinsic);