Rollup merge of #130450 - workingjubilee:these-names-are-indirect, r=bjorn3
Reduce confusion about `make_indirect_byval` by renaming it As part of doing so, remove the incorrect handling of the wasm target's `make_indirect_byval` (i.e. using it at all).
This commit is contained in:
commit
b33dd7dc88
@ -14,7 +14,7 @@ fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if arg.layout.is_aggregate() {
|
if arg.layout.is_aggregate() {
|
||||||
arg.make_indirect_byval(None);
|
arg.pass_by_stack_offset(None);
|
||||||
} else {
|
} else {
|
||||||
arg.extend_integer_width_to(32);
|
arg.extend_integer_width_to(32);
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ pub enum PassMode {
|
|||||||
/// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
|
/// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
|
||||||
/// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
|
/// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
|
||||||
/// alignment (if `None`). This means that the alignment will not always
|
/// alignment (if `None`). This means that the alignment will not always
|
||||||
/// match the Rust type's alignment; see documentation of `make_indirect_byval` for more info.
|
/// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
|
||||||
///
|
///
|
||||||
/// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
|
/// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
|
||||||
Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
|
Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
|
||||||
@ -681,7 +681,7 @@ pub fn make_indirect_from_ignore(&mut self) {
|
|||||||
/// either in the caller (if the type's alignment is lower than the byval alignment)
|
/// either in the caller (if the type's alignment is lower than the byval alignment)
|
||||||
/// or in the callee (if the type's alignment is higher than the byval alignment),
|
/// or in the callee (if the type's alignment is higher than the byval alignment),
|
||||||
/// to ensure that Rust code never sees an underaligned pointer.
|
/// to ensure that Rust code never sees an underaligned pointer.
|
||||||
pub fn make_indirect_byval(&mut self, byval_align: Option<Align>) {
|
pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
|
||||||
assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
|
assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
|
||||||
self.make_indirect();
|
self.make_indirect();
|
||||||
match self.mode {
|
match self.mode {
|
||||||
@ -879,8 +879,7 @@ pub fn adjust_for_foreign_abi<C>(
|
|||||||
{
|
{
|
||||||
if abi == spec::abi::Abi::X86Interrupt {
|
if abi == spec::abi::Abi::X86Interrupt {
|
||||||
if let Some(arg) = self.args.first_mut() {
|
if let Some(arg) = self.args.first_mut() {
|
||||||
// FIXME(pcwalton): This probably should use the x86 `byval` ABI...
|
arg.pass_by_stack_offset(None);
|
||||||
arg.make_indirect_byval(None);
|
|
||||||
}
|
}
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
|
|||||||
}
|
}
|
||||||
arg.extend_integer_width_to(32);
|
arg.extend_integer_width_to(32);
|
||||||
if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
|
if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
|
||||||
arg.make_indirect_byval(None);
|
arg.make_indirect();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ fn contains_vector<'a, Ty, C>(cx: &C, layout: TyAndLayout<'a, Ty>) -> bool
|
|||||||
align_4
|
align_4
|
||||||
};
|
};
|
||||||
|
|
||||||
arg.make_indirect_byval(Some(byval_align));
|
arg.pass_by_stack_offset(Some(byval_align));
|
||||||
} else {
|
} else {
|
||||||
arg.extend_integer_width_to(32);
|
arg.extend_integer_width_to(32);
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
|
|||||||
if is_arg {
|
if is_arg {
|
||||||
// The x86_64 ABI doesn't have any special requirements for `byval` alignment,
|
// The x86_64 ABI doesn't have any special requirements for `byval` alignment,
|
||||||
// the type's alignment is always used.
|
// the type's alignment is always used.
|
||||||
arg.make_indirect_byval(None);
|
arg.pass_by_stack_offset(None);
|
||||||
} else {
|
} else {
|
||||||
// `sret` parameter thus one less integer register available
|
// `sret` parameter thus one less integer register available
|
||||||
arg.make_indirect();
|
arg.make_indirect();
|
||||||
|
@ -68,7 +68,7 @@ fn classify_arg_ty<'a, Ty, C>(arg: &mut ArgAbi<'_, Ty>, arg_gprs_left: &mut u64,
|
|||||||
*arg_gprs_left -= needed_arg_gprs;
|
*arg_gprs_left -= needed_arg_gprs;
|
||||||
|
|
||||||
if must_use_stack {
|
if must_use_stack {
|
||||||
arg.make_indirect_byval(None);
|
arg.pass_by_stack_offset(None);
|
||||||
} else if is_xtensa_aggregate(arg) {
|
} else if is_xtensa_aggregate(arg) {
|
||||||
// Aggregates which are <= max_size will be passed in
|
// Aggregates which are <= max_size will be passed in
|
||||||
// registers if possible, so coerce to integers.
|
// registers if possible, so coerce to integers.
|
||||||
|
@ -1,10 +1,8 @@
|
|||||||
// ignore-tidy-linelength
|
// ignore-tidy-linelength
|
||||||
//@ revisions:m68k wasm x86_64-linux x86_64-windows i686-linux i686-windows
|
//@ revisions:m68k x86_64-linux x86_64-windows i686-linux i686-windows
|
||||||
|
|
||||||
//@[m68k] compile-flags: --target m68k-unknown-linux-gnu
|
//@[m68k] compile-flags: --target m68k-unknown-linux-gnu
|
||||||
//@[m68k] needs-llvm-components: m68k
|
//@[m68k] needs-llvm-components: m68k
|
||||||
//@[wasm] compile-flags: --target wasm32-unknown-emscripten
|
|
||||||
//@[wasm] needs-llvm-components: webassembly
|
|
||||||
//@[x86_64-linux] compile-flags: --target x86_64-unknown-linux-gnu
|
//@[x86_64-linux] compile-flags: --target x86_64-unknown-linux-gnu
|
||||||
//@[x86_64-linux] needs-llvm-components: x86
|
//@[x86_64-linux] needs-llvm-components: x86
|
||||||
//@[x86_64-windows] compile-flags: --target x86_64-pc-windows-msvc
|
//@[x86_64-windows] compile-flags: --target x86_64-pc-windows-msvc
|
||||||
@ -15,7 +13,7 @@
|
|||||||
//@[i686-windows] needs-llvm-components: x86
|
//@[i686-windows] needs-llvm-components: x86
|
||||||
|
|
||||||
// Tests that `byval` alignment is properly specified (#80127).
|
// Tests that `byval` alignment is properly specified (#80127).
|
||||||
// The only targets that use `byval` are m68k, wasm, x86-64, and x86.
|
// The only targets that use `byval` are m68k, x86-64, and x86.
|
||||||
// Note also that Windows mandates a by-ref ABI here, so it does not use byval.
|
// Note also that Windows mandates a by-ref ABI here, so it does not use byval.
|
||||||
|
|
||||||
#![feature(no_core, lang_items)]
|
#![feature(no_core, lang_items)]
|
||||||
@ -112,9 +110,6 @@ pub unsafe fn call_na1(x: NaturalAlign1) {
|
|||||||
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
|
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
|
||||||
// m68k: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
|
// m68k: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
|
||||||
|
|
||||||
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
|
|
||||||
// wasm: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
|
|
||||||
|
|
||||||
// x86_64-linux: call void @natural_align_1(i16
|
// x86_64-linux: call void @natural_align_1(i16
|
||||||
|
|
||||||
// x86_64-windows: call void @natural_align_1(i16
|
// x86_64-windows: call void @natural_align_1(i16
|
||||||
@ -133,7 +128,6 @@ pub unsafe fn call_na2(x: NaturalAlign2) {
|
|||||||
// CHECK: start:
|
// CHECK: start:
|
||||||
|
|
||||||
// m68k-NEXT: call void @natural_align_2
|
// m68k-NEXT: call void @natural_align_2
|
||||||
// wasm-NEXT: call void @natural_align_2
|
|
||||||
// x86_64-linux-NEXT: call void @natural_align_2
|
// x86_64-linux-NEXT: call void @natural_align_2
|
||||||
// x86_64-windows-NEXT: call void @natural_align_2
|
// x86_64-windows-NEXT: call void @natural_align_2
|
||||||
|
|
||||||
@ -204,8 +198,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
// m68k: declare void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}})
|
// m68k: declare void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @natural_align_1(i16)
|
// x86_64-linux: declare void @natural_align_1(i16)
|
||||||
|
|
||||||
// x86_64-windows: declare void @natural_align_1(i16)
|
// x86_64-windows: declare void @natural_align_1(i16)
|
||||||
@ -217,8 +209,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @natural_align_2({{.*}}byval([34 x i8]) align 2{{.*}})
|
// m68k: declare void @natural_align_2({{.*}}byval([34 x i8]) align 2{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @natural_align_2({{.*}}byval([34 x i8]) align 2{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @natural_align_2({{.*}}byval([34 x i8]) align 2{{.*}})
|
// x86_64-linux: declare void @natural_align_2({{.*}}byval([34 x i8]) align 2{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @natural_align_2(
|
// x86_64-windows: declare void @natural_align_2(
|
||||||
@ -232,8 +222,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @force_align_4({{.*}}byval([20 x i8]) align 4{{.*}})
|
// m68k: declare void @force_align_4({{.*}}byval([20 x i8]) align 4{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @force_align_4({{.*}}byval([20 x i8]) align 4{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @force_align_4({{.*}}byval([20 x i8]) align 4{{.*}})
|
// x86_64-linux: declare void @force_align_4({{.*}}byval([20 x i8]) align 4{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @force_align_4(
|
// x86_64-windows: declare void @force_align_4(
|
||||||
@ -247,8 +235,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @natural_align_8({{.*}}byval([24 x i8]) align 4{{.*}})
|
// m68k: declare void @natural_align_8({{.*}}byval([24 x i8]) align 4{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @natural_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @natural_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// x86_64-linux: declare void @natural_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @natural_align_8(
|
// x86_64-windows: declare void @natural_align_8(
|
||||||
@ -262,8 +248,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @force_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// m68k: declare void @force_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @force_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @force_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// x86_64-linux: declare void @force_align_8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @force_align_8(
|
// x86_64-windows: declare void @force_align_8(
|
||||||
@ -279,8 +263,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @lower_fa8({{.*}}byval([24 x i8]) align 4{{.*}})
|
// m68k: declare void @lower_fa8({{.*}}byval([24 x i8]) align 4{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @lower_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @lower_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// x86_64-linux: declare void @lower_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @lower_fa8(
|
// x86_64-windows: declare void @lower_fa8(
|
||||||
@ -294,8 +276,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @wrapped_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// m68k: declare void @wrapped_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @wrapped_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @wrapped_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// x86_64-linux: declare void @wrapped_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @wrapped_fa8(
|
// x86_64-windows: declare void @wrapped_fa8(
|
||||||
@ -311,8 +291,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @transparent_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// m68k: declare void @transparent_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @transparent_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @transparent_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
// x86_64-linux: declare void @transparent_fa8({{.*}}byval([24 x i8]) align 8{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @transparent_fa8(
|
// x86_64-windows: declare void @transparent_fa8(
|
||||||
@ -328,8 +306,6 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
|
|||||||
|
|
||||||
// m68k: declare void @force_align_16({{.*}}byval([80 x i8]) align 16{{.*}})
|
// m68k: declare void @force_align_16({{.*}}byval([80 x i8]) align 16{{.*}})
|
||||||
|
|
||||||
// wasm: declare void @force_align_16({{.*}}byval([80 x i8]) align 16{{.*}})
|
|
||||||
|
|
||||||
// x86_64-linux: declare void @force_align_16({{.*}}byval([80 x i8]) align 16{{.*}})
|
// x86_64-linux: declare void @force_align_16({{.*}}byval([80 x i8]) align 16{{.*}})
|
||||||
|
|
||||||
// x86_64-windows: declare void @force_align_16(
|
// x86_64-windows: declare void @force_align_16(
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//@ revisions: i686-linux i686-freebsd x64-linux x64-apple wasm32
|
//@ revisions: i686-linux i686-freebsd x64-linux x64-apple
|
||||||
//@ compile-flags: -O -C no-prepopulate-passes
|
//@ compile-flags: -O -C no-prepopulate-passes
|
||||||
|
|
||||||
//@[i686-linux] compile-flags: --target i686-unknown-linux-gnu
|
//@[i686-linux] compile-flags: --target i686-unknown-linux-gnu
|
||||||
@ -9,8 +9,6 @@
|
|||||||
//@[x64-linux] needs-llvm-components: x86
|
//@[x64-linux] needs-llvm-components: x86
|
||||||
//@[x64-apple] compile-flags: --target x86_64-apple-darwin
|
//@[x64-apple] compile-flags: --target x86_64-apple-darwin
|
||||||
//@[x64-apple] needs-llvm-components: x86
|
//@[x64-apple] needs-llvm-components: x86
|
||||||
//@[wasm32] compile-flags: --target wasm32-wasi
|
|
||||||
//@[wasm32] needs-llvm-components: webassembly
|
|
||||||
|
|
||||||
// See ./transparent.rs
|
// See ./transparent.rs
|
||||||
// Some platforms pass large aggregates using immediate arrays in LLVMIR
|
// Some platforms pass large aggregates using immediate arrays in LLVMIR
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
//@ revisions: aarch64-linux aarch64-darwin
|
//@ revisions: aarch64-linux aarch64-darwin wasm32-wasi
|
||||||
//@ compile-flags: -O -C no-prepopulate-passes
|
//@ compile-flags: -O -C no-prepopulate-passes
|
||||||
|
|
||||||
//@[aarch64-linux] compile-flags: --target aarch64-unknown-linux-gnu
|
//@[aarch64-linux] compile-flags: --target aarch64-unknown-linux-gnu
|
||||||
//@[aarch64-linux] needs-llvm-components: aarch64
|
//@[aarch64-linux] needs-llvm-components: aarch64
|
||||||
//@[aarch64-darwin] compile-flags: --target aarch64-apple-darwin
|
//@[aarch64-darwin] compile-flags: --target aarch64-apple-darwin
|
||||||
//@[aarch64-darwin] needs-llvm-components: aarch64
|
//@[aarch64-darwin] needs-llvm-components: aarch64
|
||||||
|
//@[wasm32-wasi] compile-flags: --target wasm32-wasi
|
||||||
|
//@[wasm32-wasi] needs-llvm-components: webassembly
|
||||||
|
|
||||||
// See ./transparent.rs
|
// See ./transparent.rs
|
||||||
// Some platforms pass large aggregates using immediate arrays in LLVMIR
|
// Some platforms pass large aggregates using immediate arrays in LLVMIR
|
||||||
|
Loading…
Reference in New Issue
Block a user