diff --git a/Cargo.lock b/Cargo.lock
index afcff1bfb06..de5158ab073 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3551,7 +3551,6 @@ version = "0.0.0"
 dependencies = [
  "rustc_ast",
  "rustc_span",
- "rustc_target",
  "tracing",
 ]
 
diff --git a/compiler/rustc_ast_pretty/Cargo.toml b/compiler/rustc_ast_pretty/Cargo.toml
index f447bc7f4ef..6ea942a2f30 100644
--- a/compiler/rustc_ast_pretty/Cargo.toml
+++ b/compiler/rustc_ast_pretty/Cargo.toml
@@ -11,4 +11,3 @@ doctest = false
 tracing = "0.1"
 rustc_span = { path = "../rustc_span" }
 rustc_ast = { path = "../rustc_ast" }
-rustc_target = { path = "../rustc_target" }
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 195e840866a..ef467ed6514 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -2514,7 +2514,7 @@ where
         extra_args: &[Ty<'tcx>],
         caller_location: Option<Ty<'tcx>>,
         codegen_fn_attr_flags: CodegenFnAttrFlags,
-        mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
+        make_self_ptr_thin: bool,
     ) -> Self;
     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
 }
@@ -2574,9 +2574,7 @@ where
         // Assume that fn pointers may always unwind
         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
 
-        call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
-            ArgAbi::new(cx.layout_of(ty))
-        })
+        call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
     }
 
     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
@@ -2590,55 +2588,14 @@ where
 
         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
 
-        call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
-            let mut layout = cx.layout_of(ty);
-            // Don't pass the vtable, it's not an argument of the virtual fn.
-            // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
-            // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
-            if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
-                let fat_pointer_ty = if layout.is_unsized() {
-                    // unsized `self` is passed as a pointer to `self`
-                    // FIXME (mikeyhew) change this to use &own if it is ever added to the language
-                    cx.tcx().mk_mut_ptr(layout.ty)
-                } else {
-                    match layout.abi {
-                        Abi::ScalarPair(..) => (),
-                        _ => bug!("receiver type has unsupported layout: {:?}", layout),
-                    }
-
-                    // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
-                    // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
-                    // elsewhere in the compiler as a method on a `dyn Trait`.
-                    // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
-                    // get a built-in pointer type
-                    let mut fat_pointer_layout = layout;
-                    'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
-                        && !fat_pointer_layout.ty.is_region_ptr()
-                    {
-                        for i in 0..fat_pointer_layout.fields.count() {
-                            let field_layout = fat_pointer_layout.field(cx, i);
-
-                            if !field_layout.is_zst() {
-                                fat_pointer_layout = field_layout;
-                                continue 'descend_newtypes;
-                            }
-                        }
-
-                        bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
-                    }
-
-                    fat_pointer_layout.ty
-                };
-
-                // we now have a type like `*mut RcBox<dyn Trait>`
-                // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
-                // this is understood as a special case elsewhere in the compiler
-                let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
-                layout = cx.layout_of(unit_pointer_ty);
-                layout.ty = fat_pointer_ty;
-            }
-            ArgAbi::new(layout)
-        })
+        call::FnAbi::new_internal(
+            cx,
+            sig,
+            extra_args,
+            caller_location,
+            attrs,
+            matches!(instance.def, ty::InstanceDef::Virtual(..)),
+        )
     }
 
     fn new_internal(
@@ -2647,7 +2604,7 @@ where
         extra_args: &[Ty<'tcx>],
         caller_location: Option<Ty<'tcx>>,
         codegen_fn_attr_flags: CodegenFnAttrFlags,
-        mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
+        force_thin_self_ptr: bool,
     ) -> Self {
         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
 
@@ -2778,7 +2735,23 @@ where
 
         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
             let is_return = arg_idx.is_none();
-            let mut arg = mk_arg_type(ty, arg_idx);
+
+            let layout = cx.layout_of(ty);
+            let layout = if force_thin_self_ptr && arg_idx == Some(0) {
+                // Don't pass the vtable, it's not an argument of the virtual fn.
+                // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
+                // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
+                make_thin_self_ptr(cx, layout)
+            } else {
+                layout
+            };
+
+            let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
+                let mut attrs = ArgAttributes::new();
+                adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
+                attrs
+            });
+
             if arg.layout.is_zst() {
                 // For some forsaken reason, x86_64-pc-windows-gnu
                 // doesn't ignore zero-sized struct arguments.
@@ -2794,30 +2767,6 @@ where
                 }
             }
 
-            // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
-            if !is_return && rust_abi {
-                if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
-                    let mut a_attrs = ArgAttributes::new();
-                    let mut b_attrs = ArgAttributes::new();
-                    adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
-                    adjust_for_rust_scalar(
-                        &mut b_attrs,
-                        b,
-                        arg.layout,
-                        a.value.size(cx).align_to(b.value.align(cx).abi),
-                        false,
-                    );
-                    arg.mode = PassMode::Pair(a_attrs, b_attrs);
-                    return arg;
-                }
-            }
-
-            if let Abi::Scalar(ref scalar) = arg.layout.abi {
-                if let PassMode::Direct(ref mut attrs) = arg.mode {
-                    adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
-                }
-            }
-
             arg
         };
 
@@ -2894,9 +2843,10 @@ where
                 let max_by_val_size = Pointer.size(cx) * 2;
                 let size = arg.layout.size;
 
-                if arg.layout.is_unsized() || size > max_by_val_size {
-                    arg.make_indirect();
-                } else {
+                let is_indirect_not_on_stack =
+                    matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
+                assert!(is_indirect_not_on_stack, "{:?}", arg);
+                if !arg.layout.is_unsized() && size <= max_by_val_size {
                     // We want to pass small aggregates as immediates, but using
                     // a LLVM aggregate type for this leads to bad optimizations,
                     // so we pick an appropriately sized integer type instead.
@@ -2915,3 +2865,52 @@ where
         }
     }
 }
+
+fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
+where
+    C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
+        + HasTyCtxt<'tcx>
+        + HasParamEnv<'tcx>,
+{
+    let fat_pointer_ty = if layout.is_unsized() {
+        // unsized `self` is passed as a pointer to `self`
+        // FIXME (mikeyhew) change this to use &own if it is ever added to the language
+        cx.tcx().mk_mut_ptr(layout.ty)
+    } else {
+        match layout.abi {
+            Abi::ScalarPair(..) => (),
+            _ => bug!("receiver type has unsupported layout: {:?}", layout),
+        }
+
+        // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
+        // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
+        // elsewhere in the compiler as a method on a `dyn Trait`.
+        // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
+        // get a built-in pointer type
+        let mut fat_pointer_layout = layout;
+        'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
+            && !fat_pointer_layout.ty.is_region_ptr()
+        {
+            for i in 0..fat_pointer_layout.fields.count() {
+                let field_layout = fat_pointer_layout.field(cx, i);
+
+                if !field_layout.is_zst() {
+                    fat_pointer_layout = field_layout;
+                    continue 'descend_newtypes;
+                }
+            }
+
+            bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
+        }
+
+        fat_pointer_layout.ty
+    };
+
+    // we now have a type like `*mut RcBox<dyn Trait>`
+    // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
+    // this is understood as a special case elsewhere in the compiler
+    let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
+    layout = cx.layout_of(unit_pointer_ty);
+    layout.ty = fat_pointer_ty;
+    layout
+}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index a9e595d11e7..2cbd52bf3e9 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -27,10 +27,16 @@ mod x86_win64;
 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
 pub enum PassMode {
     /// Ignore the argument.
+    ///
+    /// The argument is either uninhabited or a ZST.
     Ignore,
     /// Pass the argument directly.
+    ///
+    /// The argument has a layout abi of `Scalar` or `Vector`.
     Direct(ArgAttributes),
     /// Pass a pair's elements directly in two arguments.
+    ///
+    /// The argument has a layout abi of `ScalarPair`.
     Pair(ArgAttributes, ArgAttributes),
     /// Pass the argument after casting it, to either
     /// a single uniform or a pair of registers.
@@ -434,28 +440,49 @@ pub struct ArgAbi<'a, Ty> {
 }
 
 impl<'a, Ty> ArgAbi<'a, Ty> {
-    pub fn new(layout: TyAndLayout<'a, Ty>) -> Self {
-        ArgAbi { layout, pad: None, mode: PassMode::Direct(ArgAttributes::new()) }
+    pub fn new(
+        cx: &impl HasDataLayout,
+        layout: TyAndLayout<'a, Ty>,
+        scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, &abi::Scalar, Size) -> ArgAttributes,
+    ) -> Self {
+        let mode = match &layout.abi {
+            Abi::Uninhabited => PassMode::Ignore,
+            Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
+            Abi::ScalarPair(a, b) => PassMode::Pair(
+                scalar_attrs(&layout, a, Size::ZERO),
+                scalar_attrs(&layout, b, a.value.size(cx).align_to(b.value.align(cx).abi)),
+            ),
+            Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+            Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout),
+        };
+        ArgAbi { layout, pad: None, mode }
     }
 
-    pub fn make_indirect(&mut self) {
-        assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
-
-        // Start with fresh attributes for the pointer.
+    fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
         let mut attrs = ArgAttributes::new();
 
         // For non-immediate arguments the callee gets its own copy of
         // the value on the stack, so there are no aliases. It's also
         // program-invisible so can't possibly capture
         attrs.set(ArgAttribute::NoAlias).set(ArgAttribute::NoCapture).set(ArgAttribute::NonNull);
-        attrs.pointee_size = self.layout.size;
+        attrs.pointee_size = layout.size;
         // FIXME(eddyb) We should be doing this, but at least on
         // i686-pc-windows-msvc, it results in wrong stack offsets.
-        // attrs.pointee_align = Some(self.layout.align.abi);
+        // attrs.pointee_align = Some(layout.align.abi);
 
-        let extra_attrs = self.layout.is_unsized().then_some(ArgAttributes::new());
+        let extra_attrs = layout.is_unsized().then_some(ArgAttributes::new());
 
-        self.mode = PassMode::Indirect { attrs, extra_attrs, on_stack: false };
+        PassMode::Indirect { attrs, extra_attrs, on_stack: false }
+    }
+
+    pub fn make_indirect(&mut self) {
+        match self.mode {
+            PassMode::Direct(_) | PassMode::Pair(_, _) => {}
+            PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: false } => return,
+            _ => panic!("Tried to make {:?} indirect", self.mode),
+        }
+
+        self.mode = Self::indirect_pass_mode(&self.layout);
     }
 
     pub fn make_indirect_byval(&mut self) {
@@ -486,7 +513,6 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
     }
 
     pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
-        assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
         self.mode = PassMode::Cast(target.into());
     }