rustc_ty_utils/
abi.rs

1use std::iter;
2
3use rustc_abi::Primitive::Pointer;
4use rustc_abi::{BackendRepr, ExternAbi, PointerKind, Scalar, Size};
5use rustc_hir as hir;
6use rustc_hir::lang_items::LangItem;
7use rustc_middle::bug;
8use rustc_middle::query::Providers;
9use rustc_middle::ty::layout::{
10    FnAbiError, HasTyCtxt, HasTypingEnv, LayoutCx, LayoutOf, TyAndLayout, fn_can_unwind,
11};
12use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt};
13use rustc_session::config::OptLevel;
14use rustc_span::def_id::DefId;
15use rustc_target::callconv::{
16    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, RiscvInterruptKind,
17};
18use tracing::debug;
19
20pub(crate) fn provide(providers: &mut Providers) {
21    *providers = Providers { fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
22}
23
24// NOTE(eddyb) this is private to avoid using it from outside of
25// `fn_abi_of_instance` - any other uses are either too high-level
26// for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
27// or should go through `FnAbi` instead, to avoid losing any
28// adjustments `fn_abi_of_instance` might be performing.
29#[tracing::instrument(level = "debug", skip(tcx, typing_env))]
30fn fn_sig_for_fn_abi<'tcx>(
31    tcx: TyCtxt<'tcx>,
32    instance: ty::Instance<'tcx>,
33    typing_env: ty::TypingEnv<'tcx>,
34) -> ty::FnSig<'tcx> {
35    if let InstanceKind::ThreadLocalShim(..) = instance.def {
36        return tcx.mk_fn_sig(
37            [],
38            tcx.thread_local_ptr_ty(instance.def_id()),
39            false,
40            hir::Safety::Safe,
41            rustc_abi::ExternAbi::Unadjusted,
42        );
43    }
44
45    let ty = instance.ty(tcx, typing_env);
46    match *ty.kind() {
47        ty::FnDef(def_id, args) => {
48            let mut sig = tcx
49                .instantiate_bound_regions_with_erased(tcx.fn_sig(def_id).instantiate(tcx, args));
50
51            // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
52            if let ty::InstanceKind::VTableShim(..) = instance.def {
53                let mut inputs_and_output = sig.inputs_and_output.to_vec();
54                inputs_and_output[0] = Ty::new_mut_ptr(tcx, inputs_and_output[0]);
55                sig.inputs_and_output = tcx.mk_type_list(&inputs_and_output);
56            }
57
58            sig
59        }
60        ty::Closure(def_id, args) => {
61            let sig = tcx.instantiate_bound_regions_with_erased(args.as_closure().sig());
62            let env_ty = tcx.closure_env_ty(
63                Ty::new_closure(tcx, def_id, args),
64                args.as_closure().kind(),
65                tcx.lifetimes.re_erased,
66            );
67
68            tcx.mk_fn_sig(
69                iter::once(env_ty).chain(sig.inputs().iter().cloned()),
70                sig.output(),
71                sig.c_variadic,
72                sig.safety,
73                sig.abi,
74            )
75        }
76        ty::CoroutineClosure(def_id, args) => {
77            let coroutine_ty = Ty::new_coroutine_closure(tcx, def_id, args);
78            let sig = args.as_coroutine_closure().coroutine_closure_sig();
79
80            // When this `CoroutineClosure` comes from a `ConstructCoroutineInClosureShim`,
81            // make sure we respect the `target_kind` in that shim.
82            // FIXME(async_closures): This shouldn't be needed, and we should be populating
83            // a separate def-id for these bodies.
84            let mut coroutine_kind = args.as_coroutine_closure().kind();
85
86            let env_ty =
87                if let InstanceKind::ConstructCoroutineInClosureShim { receiver_by_ref, .. } =
88                    instance.def
89                {
90                    coroutine_kind = ty::ClosureKind::FnOnce;
91
92                    // Implementations of `FnMut` and `Fn` for coroutine-closures
93                    // still take their receiver by ref.
94                    if receiver_by_ref {
95                        Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, coroutine_ty)
96                    } else {
97                        coroutine_ty
98                    }
99                } else {
100                    tcx.closure_env_ty(coroutine_ty, coroutine_kind, tcx.lifetimes.re_erased)
101                };
102
103            let sig = tcx.instantiate_bound_regions_with_erased(sig);
104
105            tcx.mk_fn_sig(
106                iter::once(env_ty).chain([sig.tupled_inputs_ty]),
107                sig.to_coroutine_given_kind_and_upvars(
108                    tcx,
109                    args.as_coroutine_closure().parent_args(),
110                    tcx.coroutine_for_closure(def_id),
111                    coroutine_kind,
112                    tcx.lifetimes.re_erased,
113                    args.as_coroutine_closure().tupled_upvars_ty(),
114                    args.as_coroutine_closure().coroutine_captures_by_ref_ty(),
115                ),
116                sig.c_variadic,
117                sig.safety,
118                sig.abi,
119            )
120        }
121        ty::Coroutine(did, args) => {
122            let coroutine_kind = tcx.coroutine_kind(did).unwrap();
123            let sig = args.as_coroutine().sig();
124
125            let env_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, ty);
126
127            let pin_did = tcx.require_lang_item(LangItem::Pin, None);
128            let pin_adt_ref = tcx.adt_def(pin_did);
129            let pin_args = tcx.mk_args(&[env_ty.into()]);
130            let env_ty = match coroutine_kind {
131                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _) => {
132                    // Iterator::next doesn't accept a pinned argument,
133                    // unlike for all other coroutine kinds.
134                    env_ty
135                }
136                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, _)
137                | hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, _)
138                | hir::CoroutineKind::Coroutine(_) => Ty::new_adt(tcx, pin_adt_ref, pin_args),
139            };
140
141            // The `FnSig` and the `ret_ty` here is for a coroutines main
142            // `Coroutine::resume(...) -> CoroutineState` function in case we
143            // have an ordinary coroutine, the `Future::poll(...) -> Poll`
144            // function in case this is a special coroutine backing an async construct
145            // or the `Iterator::next(...) -> Option` function in case this is a
146            // special coroutine backing a gen construct.
147            let (resume_ty, ret_ty) = match coroutine_kind {
148                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, _) => {
149                    // The signature should be `Future::poll(_, &mut Context<'_>) -> Poll<Output>`
150                    assert_eq!(sig.yield_ty, tcx.types.unit);
151
152                    let poll_did = tcx.require_lang_item(LangItem::Poll, None);
153                    let poll_adt_ref = tcx.adt_def(poll_did);
154                    let poll_args = tcx.mk_args(&[sig.return_ty.into()]);
155                    let ret_ty = Ty::new_adt(tcx, poll_adt_ref, poll_args);
156
157                    // We have to replace the `ResumeTy` that is used for type and borrow checking
158                    // with `&mut Context<'_>` which is used in codegen.
159                    #[cfg(debug_assertions)]
160                    {
161                        if let ty::Adt(resume_ty_adt, _) = sig.resume_ty.kind() {
162                            let expected_adt =
163                                tcx.adt_def(tcx.require_lang_item(LangItem::ResumeTy, None));
164                            assert_eq!(*resume_ty_adt, expected_adt);
165                        } else {
166                            panic!("expected `ResumeTy`, found `{:?}`", sig.resume_ty);
167                        };
168                    }
169                    let context_mut_ref = Ty::new_task_context(tcx);
170
171                    (Some(context_mut_ref), ret_ty)
172                }
173                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _) => {
174                    // The signature should be `Iterator::next(_) -> Option<Yield>`
175                    let option_did = tcx.require_lang_item(LangItem::Option, None);
176                    let option_adt_ref = tcx.adt_def(option_did);
177                    let option_args = tcx.mk_args(&[sig.yield_ty.into()]);
178                    let ret_ty = Ty::new_adt(tcx, option_adt_ref, option_args);
179
180                    assert_eq!(sig.return_ty, tcx.types.unit);
181                    assert_eq!(sig.resume_ty, tcx.types.unit);
182
183                    (None, ret_ty)
184                }
185                hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, _) => {
186                    // The signature should be
187                    // `AsyncIterator::poll_next(_, &mut Context<'_>) -> Poll<Option<Output>>`
188                    assert_eq!(sig.return_ty, tcx.types.unit);
189
190                    // Yield type is already `Poll<Option<yield_ty>>`
191                    let ret_ty = sig.yield_ty;
192
193                    // We have to replace the `ResumeTy` that is used for type and borrow checking
194                    // with `&mut Context<'_>` which is used in codegen.
195                    #[cfg(debug_assertions)]
196                    {
197                        if let ty::Adt(resume_ty_adt, _) = sig.resume_ty.kind() {
198                            let expected_adt =
199                                tcx.adt_def(tcx.require_lang_item(LangItem::ResumeTy, None));
200                            assert_eq!(*resume_ty_adt, expected_adt);
201                        } else {
202                            panic!("expected `ResumeTy`, found `{:?}`", sig.resume_ty);
203                        };
204                    }
205                    let context_mut_ref = Ty::new_task_context(tcx);
206
207                    (Some(context_mut_ref), ret_ty)
208                }
209                hir::CoroutineKind::Coroutine(_) => {
210                    // The signature should be `Coroutine::resume(_, Resume) -> CoroutineState<Yield, Return>`
211                    let state_did = tcx.require_lang_item(LangItem::CoroutineState, None);
212                    let state_adt_ref = tcx.adt_def(state_did);
213                    let state_args = tcx.mk_args(&[sig.yield_ty.into(), sig.return_ty.into()]);
214                    let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args);
215
216                    (Some(sig.resume_ty), ret_ty)
217                }
218            };
219
220            if let Some(resume_ty) = resume_ty {
221                tcx.mk_fn_sig(
222                    [env_ty, resume_ty],
223                    ret_ty,
224                    false,
225                    hir::Safety::Safe,
226                    rustc_abi::ExternAbi::Rust,
227                )
228            } else {
229                // `Iterator::next` doesn't have a `resume` argument.
230                tcx.mk_fn_sig(
231                    [env_ty],
232                    ret_ty,
233                    false,
234                    hir::Safety::Safe,
235                    rustc_abi::ExternAbi::Rust,
236                )
237            }
238        }
239        _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
240    }
241}
242
243#[inline]
244fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: ExternAbi, c_variadic: bool) -> Conv {
245    use rustc_abi::ExternAbi::*;
246    match tcx.sess.target.adjust_abi(abi, c_variadic) {
247        Rust | RustCall => Conv::Rust,
248
249        // This is intentionally not using `Conv::Cold`, as that has to preserve
250        // even SIMD registers, which is generally not a good trade-off.
251        RustCold => Conv::PreserveMost,
252
253        // It's the ABI's job to select this, not ours.
254        System { .. } => bug!("system abi should be selected elsewhere"),
255        EfiApi => bug!("eficall abi should be selected elsewhere"),
256
257        Stdcall { .. } => Conv::X86Stdcall,
258        Fastcall { .. } => Conv::X86Fastcall,
259        Vectorcall { .. } => Conv::X86VectorCall,
260        Thiscall { .. } => Conv::X86ThisCall,
261        C { .. } => Conv::C,
262        Unadjusted => Conv::C,
263        Win64 { .. } => Conv::X86_64Win64,
264        SysV64 { .. } => Conv::X86_64SysV,
265        Aapcs { .. } => Conv::ArmAapcs,
266        CCmseNonSecureCall => Conv::CCmseNonSecureCall,
267        CCmseNonSecureEntry => Conv::CCmseNonSecureEntry,
268        PtxKernel => Conv::GpuKernel,
269        Msp430Interrupt => Conv::Msp430Intr,
270        X86Interrupt => Conv::X86Intr,
271        GpuKernel => Conv::GpuKernel,
272        AvrInterrupt => Conv::AvrInterrupt,
273        AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
274        RiscvInterruptM => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine },
275        RiscvInterruptS => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor },
276
277        // These API constants ought to be more specific...
278        Cdecl { .. } => Conv::C,
279    }
280}
281
282fn fn_abi_of_fn_ptr<'tcx>(
283    tcx: TyCtxt<'tcx>,
284    query: ty::PseudoCanonicalInput<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
285) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
286    let ty::PseudoCanonicalInput { typing_env, value: (sig, extra_args) } = query;
287    fn_abi_new_uncached(
288        &LayoutCx::new(tcx, typing_env),
289        tcx.instantiate_bound_regions_with_erased(sig),
290        extra_args,
291        None,
292    )
293}
294
295fn fn_abi_of_instance<'tcx>(
296    tcx: TyCtxt<'tcx>,
297    query: ty::PseudoCanonicalInput<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
298) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
299    let ty::PseudoCanonicalInput { typing_env, value: (instance, extra_args) } = query;
300    fn_abi_new_uncached(
301        &LayoutCx::new(tcx, typing_env),
302        fn_sig_for_fn_abi(tcx, instance, typing_env),
303        extra_args,
304        Some(instance),
305    )
306}
307
308// Handle safe Rust thin and wide pointers.
309fn adjust_for_rust_scalar<'tcx>(
310    cx: LayoutCx<'tcx>,
311    attrs: &mut ArgAttributes,
312    scalar: Scalar,
313    layout: TyAndLayout<'tcx>,
314    offset: Size,
315    is_return: bool,
316    drop_target_pointee: Option<Ty<'tcx>>,
317) {
318    // Booleans are always a noundef i1 that needs to be zero-extended.
319    if scalar.is_bool() {
320        attrs.ext(ArgExtension::Zext);
321        attrs.set(ArgAttribute::NoUndef);
322        return;
323    }
324
325    if !scalar.is_uninit_valid() {
326        attrs.set(ArgAttribute::NoUndef);
327    }
328
329    // Only pointer types handled below.
330    let Scalar::Initialized { value: Pointer(_), valid_range } = scalar else { return };
331
332    // Set `nonnull` if the validity range excludes zero, or for the argument to `drop_in_place`,
333    // which must be nonnull per its documented safety requirements.
334    if !valid_range.contains(0) || drop_target_pointee.is_some() {
335        attrs.set(ArgAttribute::NonNull);
336    }
337
338    let tcx = cx.tcx();
339
340    if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
341        let kind = if let Some(kind) = pointee.safe {
342            Some(kind)
343        } else if let Some(pointee) = drop_target_pointee {
344            // The argument to `drop_in_place` is semantically equivalent to a mutable reference.
345            Some(PointerKind::MutableRef { unpin: pointee.is_unpin(tcx, cx.typing_env) })
346        } else {
347            None
348        };
349        if let Some(kind) = kind {
350            attrs.pointee_align =
351                Some(pointee.align.min(cx.tcx().sess.target.max_reliable_alignment()));
352
353            // `Box` are not necessarily dereferenceable for the entire duration of the function as
354            // they can be deallocated at any time. Same for non-frozen shared references (see
355            // <https://github.com/rust-lang/rust/pull/98017>), and for mutable references to
356            // potentially self-referential types (see
357            // <https://github.com/rust-lang/unsafe-code-guidelines/issues/381>). If LLVM had a way
358            // to say "dereferenceable on entry" we could use it here.
359            attrs.pointee_size = match kind {
360                PointerKind::Box { .. }
361                | PointerKind::SharedRef { frozen: false }
362                | PointerKind::MutableRef { unpin: false } => Size::ZERO,
363                PointerKind::SharedRef { frozen: true }
364                | PointerKind::MutableRef { unpin: true } => pointee.size,
365            };
366
367            // The aliasing rules for `Box<T>` are still not decided, but currently we emit
368            // `noalias` for it. This can be turned off using an unstable flag.
369            // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
370            let noalias_for_box = tcx.sess.opts.unstable_opts.box_noalias;
371
372            // LLVM prior to version 12 had known miscompiles in the presence of noalias attributes
373            // (see #54878), so it was conditionally disabled, but we don't support earlier
374            // versions at all anymore. We still support turning it off using -Zmutable-noalias.
375            let noalias_mut_ref = tcx.sess.opts.unstable_opts.mutable_noalias;
376
377            // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as both
378            // `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on memory
379            // dependencies rather than pointer equality. However this only applies to arguments,
380            // not return values.
381            //
382            // `&mut T` and `Box<T>` where `T: Unpin` are unique and hence `noalias`.
383            let no_alias = match kind {
384                PointerKind::SharedRef { frozen } => frozen,
385                PointerKind::MutableRef { unpin } => unpin && noalias_mut_ref,
386                PointerKind::Box { unpin, global } => unpin && global && noalias_for_box,
387            };
388            // We can never add `noalias` in return position; that LLVM attribute has some very surprising semantics
389            // (see <https://github.com/rust-lang/unsafe-code-guidelines/issues/385#issuecomment-1368055745>).
390            if no_alias && !is_return {
391                attrs.set(ArgAttribute::NoAlias);
392            }
393
394            if matches!(kind, PointerKind::SharedRef { frozen: true }) && !is_return {
395                attrs.set(ArgAttribute::ReadOnly);
396            }
397        }
398    }
399}
400
401/// Ensure that the ABI makes basic sense.
402fn fn_abi_sanity_check<'tcx>(
403    cx: &LayoutCx<'tcx>,
404    fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
405    spec_abi: ExternAbi,
406) {
407    fn fn_arg_sanity_check<'tcx>(
408        cx: &LayoutCx<'tcx>,
409        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
410        spec_abi: ExternAbi,
411        arg: &ArgAbi<'tcx, Ty<'tcx>>,
412    ) {
413        let tcx = cx.tcx();
414
415        if spec_abi.is_rustic_abi() {
416            if arg.layout.is_zst() {
417                // Casting closures to function pointers depends on ZST closure types being
418                // omitted entirely in the calling convention.
419                assert!(arg.is_ignore());
420            }
421            if let PassMode::Indirect { on_stack, .. } = arg.mode {
422                assert!(!on_stack, "rust abi shouldn't use on_stack");
423            }
424        }
425
426        match &arg.mode {
427            PassMode::Ignore => {
428                assert!(arg.layout.is_zst());
429            }
430            PassMode::Direct(_) => {
431                // Here the Rust type is used to determine the actual ABI, so we have to be very
432                // careful. Scalar/Vector is fine, since backends will generally use
433                // `layout.backend_repr` and ignore everything else. We should just reject
434                //`Aggregate` entirely here, but some targets need to be fixed first.
435                match arg.layout.backend_repr {
436                    BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => {}
437                    BackendRepr::ScalarPair(..) => {
438                        panic!("`PassMode::Direct` used for ScalarPair type {}", arg.layout.ty)
439                    }
440                    BackendRepr::Memory { sized } => {
441                        // For an unsized type we'd only pass the sized prefix, so there is no universe
442                        // in which we ever want to allow this.
443                        assert!(sized, "`PassMode::Direct` for unsized type in ABI: {:#?}", fn_abi);
444                        // This really shouldn't happen even for sized aggregates, since
445                        // `immediate_llvm_type` will use `layout.fields` to turn this Rust type into an
446                        // LLVM type. This means all sorts of Rust type details leak into the ABI.
447                        // However wasm sadly *does* currently use this mode for it's "C" ABI so we
448                        // have to allow it -- but we absolutely shouldn't let any more targets do
449                        // that. (Also see <https://github.com/rust-lang/rust/issues/115666>.)
450                        //
451                        // The unadjusted ABI also uses Direct for all args and is ill-specified,
452                        // but unfortunately we need it for calling certain LLVM intrinsics.
453
454                        match spec_abi {
455                            ExternAbi::Unadjusted => {}
456                            ExternAbi::C { unwind: _ }
457                                if matches!(&*tcx.sess.target.arch, "wasm32" | "wasm64") => {}
458                            _ => {
459                                panic!(
460                                    "`PassMode::Direct` for aggregates only allowed for \"unadjusted\" functions and on wasm\n\
461                                      Problematic type: {:#?}",
462                                    arg.layout,
463                                );
464                            }
465                        }
466                    }
467                }
468            }
469            PassMode::Pair(_, _) => {
470                // Similar to `Direct`, we need to make sure that backends use `layout.backend_repr`
471                // and ignore the rest of the layout.
472                assert!(
473                    matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)),
474                    "PassMode::Pair for type {}",
475                    arg.layout.ty
476                );
477            }
478            PassMode::Cast { .. } => {
479                // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
480                assert!(arg.layout.is_sized());
481            }
482            PassMode::Indirect { meta_attrs: None, .. } => {
483                // No metadata, must be sized.
484                // Conceptually, unsized arguments must be copied around, which requires dynamically
485                // determining their size, which we cannot do without metadata. Consult
486                // t-opsem before removing this check.
487                assert!(arg.layout.is_sized());
488            }
489            PassMode::Indirect { meta_attrs: Some(_), on_stack, .. } => {
490                // With metadata. Must be unsized and not on the stack.
491                assert!(arg.layout.is_unsized() && !on_stack);
492                // Also, must not be `extern` type.
493                let tail = tcx.struct_tail_for_codegen(arg.layout.ty, cx.typing_env);
494                if matches!(tail.kind(), ty::Foreign(..)) {
495                    // These types do not have metadata, so having `meta_attrs` is bogus.
496                    // Conceptually, unsized arguments must be copied around, which requires dynamically
497                    // determining their size. Therefore, we cannot allow `extern` types here. Consult
498                    // t-opsem before removing this check.
499                    panic!("unsized arguments must not be `extern` types");
500                }
501            }
502        }
503    }
504
505    for arg in fn_abi.args.iter() {
506        fn_arg_sanity_check(cx, fn_abi, spec_abi, arg);
507    }
508    fn_arg_sanity_check(cx, fn_abi, spec_abi, &fn_abi.ret);
509}
510
511#[tracing::instrument(level = "debug", skip(cx, instance))]
512fn fn_abi_new_uncached<'tcx>(
513    cx: &LayoutCx<'tcx>,
514    sig: ty::FnSig<'tcx>,
515    extra_args: &[Ty<'tcx>],
516    instance: Option<ty::Instance<'tcx>>,
517) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
518    let tcx = cx.tcx();
519    let (caller_location, determined_fn_def_id, is_virtual_call) = if let Some(instance) = instance
520    {
521        let is_virtual_call = matches!(instance.def, ty::InstanceKind::Virtual(..));
522        (
523            instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()),
524            if is_virtual_call { None } else { Some(instance.def_id()) },
525            is_virtual_call,
526        )
527    } else {
528        (None, None, false)
529    };
530    let sig = tcx.normalize_erasing_regions(cx.typing_env, sig);
531
532    let conv = conv_from_spec_abi(cx.tcx(), sig.abi, sig.c_variadic);
533
534    let mut inputs = sig.inputs();
535    let extra_args = if sig.abi == ExternAbi::RustCall {
536        assert!(!sig.c_variadic && extra_args.is_empty());
537
538        if let Some(input) = sig.inputs().last()
539            && let ty::Tuple(tupled_arguments) = input.kind()
540        {
541            inputs = &sig.inputs()[0..sig.inputs().len() - 1];
542            tupled_arguments
543        } else {
544            bug!(
545                "argument to function with \"rust-call\" ABI \
546                    is not a tuple"
547            );
548        }
549    } else {
550        assert!(sig.c_variadic || extra_args.is_empty());
551        extra_args
552    };
553
554    let is_drop_in_place = determined_fn_def_id.is_some_and(|def_id| {
555        tcx.is_lang_item(def_id, LangItem::DropInPlace)
556            || tcx.is_lang_item(def_id, LangItem::AsyncDropInPlace)
557    });
558
559    let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, &'tcx FnAbiError<'tcx>> {
560        let span = tracing::debug_span!("arg_of");
561        let _entered = span.enter();
562        let is_return = arg_idx.is_none();
563        let is_drop_target = is_drop_in_place && arg_idx == Some(0);
564        let drop_target_pointee = is_drop_target.then(|| match ty.kind() {
565            ty::RawPtr(ty, _) => *ty,
566            _ => bug!("argument to drop_in_place is not a raw ptr: {:?}", ty),
567        });
568
569        let layout = cx.layout_of(ty).map_err(|err| &*tcx.arena.alloc(FnAbiError::Layout(*err)))?;
570        let layout = if is_virtual_call && arg_idx == Some(0) {
571            // Don't pass the vtable, it's not an argument of the virtual fn.
572            // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
573            // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
574            make_thin_self_ptr(cx, layout)
575        } else {
576            layout
577        };
578
579        let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
580            let mut attrs = ArgAttributes::new();
581            adjust_for_rust_scalar(
582                *cx,
583                &mut attrs,
584                scalar,
585                *layout,
586                offset,
587                is_return,
588                drop_target_pointee,
589            );
590            attrs
591        });
592
593        if arg.layout.is_zst() {
594            arg.mode = PassMode::Ignore;
595        }
596
597        Ok(arg)
598    };
599
600    let mut fn_abi = FnAbi {
601        ret: arg_of(sig.output(), None)?,
602        args: inputs
603            .iter()
604            .copied()
605            .chain(extra_args.iter().copied())
606            .chain(caller_location)
607            .enumerate()
608            .map(|(i, ty)| arg_of(ty, Some(i)))
609            .collect::<Result<_, _>>()?,
610        c_variadic: sig.c_variadic,
611        fixed_count: inputs.len() as u32,
612        conv,
613        can_unwind: fn_can_unwind(
614            tcx,
615            // Since `#[rustc_nounwind]` can change unwinding, we cannot infer unwinding by `fn_def_id` for a virtual call.
616            determined_fn_def_id,
617            sig.abi,
618        ),
619    };
620    fn_abi_adjust_for_abi(
621        cx,
622        &mut fn_abi,
623        sig.abi,
624        // If this is a virtual call, we cannot pass the `fn_def_id`, as it might call other
625        // functions from vtable. Internally, `deduced_param_attrs` attempts to infer attributes by
626        // visit the function body.
627        determined_fn_def_id,
628    );
629    debug!("fn_abi_new_uncached = {:?}", fn_abi);
630    fn_abi_sanity_check(cx, &fn_abi, sig.abi);
631    Ok(tcx.arena.alloc(fn_abi))
632}
633
634#[tracing::instrument(level = "trace", skip(cx))]
635fn fn_abi_adjust_for_abi<'tcx>(
636    cx: &LayoutCx<'tcx>,
637    fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
638    abi: ExternAbi,
639    fn_def_id: Option<DefId>,
640) {
641    if abi == ExternAbi::Unadjusted {
642        // The "unadjusted" ABI passes aggregates in "direct" mode. That's fragile but needed for
643        // some LLVM intrinsics.
644        fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
645            // This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
646            // but who knows what breaks if we change this now.
647            if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
648                assert!(
649                    arg.layout.backend_repr.is_sized(),
650                    "'unadjusted' ABI does not support unsized arguments"
651                );
652            }
653            arg.make_direct_deprecated();
654        }
655
656        unadjust(&mut fn_abi.ret);
657        for arg in fn_abi.args.iter_mut() {
658            unadjust(arg);
659        }
660        return;
661    }
662
663    let tcx = cx.tcx();
664
665    if abi.is_rustic_abi() {
666        fn_abi.adjust_for_rust_abi(cx);
667
668        // Look up the deduced parameter attributes for this function, if we have its def ID and
669        // we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
670        // as appropriate.
671        let deduced_param_attrs =
672            if tcx.sess.opts.optimize != OptLevel::No && tcx.sess.opts.incremental.is_none() {
673                fn_def_id.map(|fn_def_id| tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default()
674            } else {
675                &[]
676            };
677
678        for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
679            if arg.is_ignore() {
680                continue;
681            }
682
683            // If we deduced that this parameter was read-only, add that to the attribute list now.
684            //
685            // The `readonly` parameter only applies to pointers, so we can only do this if the
686            // argument was passed indirectly. (If the argument is passed directly, it's an SSA
687            // value, so it's implicitly immutable.)
688            if let &mut PassMode::Indirect { ref mut attrs, .. } = &mut arg.mode {
689                // The `deduced_param_attrs` list could be empty if this is a type of function
690                // we can't deduce any parameters for, so make sure the argument index is in
691                // bounds.
692                if let Some(deduced_param_attrs) = deduced_param_attrs.get(arg_idx) {
693                    if deduced_param_attrs.read_only {
694                        attrs.regular.insert(ArgAttribute::ReadOnly);
695                        debug!("added deduced read-only attribute");
696                    }
697                }
698            }
699        }
700    } else {
701        fn_abi.adjust_for_foreign_abi(cx, abi);
702    }
703}
704
705#[tracing::instrument(level = "debug", skip(cx))]
706fn make_thin_self_ptr<'tcx>(
707    cx: &(impl HasTyCtxt<'tcx> + HasTypingEnv<'tcx>),
708    layout: TyAndLayout<'tcx>,
709) -> TyAndLayout<'tcx> {
710    let tcx = cx.tcx();
711    let wide_pointer_ty = if layout.is_unsized() {
712        // unsized `self` is passed as a pointer to `self`
713        // FIXME (mikeyhew) change this to use &own if it is ever added to the language
714        Ty::new_mut_ptr(tcx, layout.ty)
715    } else {
716        match layout.backend_repr {
717            BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (),
718            _ => bug!("receiver type has unsupported layout: {:?}", layout),
719        }
720
721        // In the case of Rc<Self>, we need to explicitly pass a *mut RcInner<Self>
722        // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
723        // elsewhere in the compiler as a method on a `dyn Trait`.
724        // To get the type `*mut RcInner<Self>`, we just keep unwrapping newtypes until we
725        // get a built-in pointer type
726        let mut wide_pointer_layout = layout;
727        while !wide_pointer_layout.ty.is_raw_ptr() && !wide_pointer_layout.ty.is_ref() {
728            wide_pointer_layout = wide_pointer_layout
729                .non_1zst_field(cx)
730                .expect("not exactly one non-1-ZST field in a `DispatchFromDyn` type")
731                .1
732        }
733
734        wide_pointer_layout.ty
735    };
736
737    // we now have a type like `*mut RcInner<dyn Trait>`
738    // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
739    // this is understood as a special case elsewhere in the compiler
740    let unit_ptr_ty = Ty::new_mut_ptr(tcx, tcx.types.unit);
741
742    TyAndLayout {
743        ty: wide_pointer_ty,
744
745        // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
746        // should always work because the type is always `*mut ()`.
747        ..tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(unit_ptr_ty)).unwrap()
748    }
749}