rustc_target/callconv/mod.rs
1use std::{fmt, iter};
2
3use rustc_abi::{
4 AddressSpace, Align, BackendRepr, CanonAbi, ExternAbi, HasDataLayout, Primitive, Reg, RegKind,
5 Scalar, Size, TyAbiInterface, TyAndLayout,
6};
7use rustc_macros::HashStable_Generic;
8
9pub use crate::spec::AbiMap;
10use crate::spec::{HasTargetSpec, HasX86AbiOpt};
11
12mod aarch64;
13mod amdgpu;
14mod arm;
15mod avr;
16mod bpf;
17mod csky;
18mod hexagon;
19mod loongarch;
20mod m68k;
21mod mips;
22mod mips64;
23mod msp430;
24mod nvptx64;
25mod powerpc;
26mod powerpc64;
27mod riscv;
28mod s390x;
29mod sparc;
30mod sparc64;
31mod wasm;
32mod x86;
33mod x86_64;
34mod x86_win32;
35mod x86_win64;
36mod xtensa;
37
38#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
39pub enum PassMode {
40 /// Ignore the argument.
41 ///
42 /// The argument is a ZST.
43 Ignore,
44 /// Pass the argument directly.
45 ///
46 /// The argument has a layout abi of `Scalar` or `Vector`.
47 /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
48 /// This is bad since it leaks LLVM implementation details into the ABI.
49 /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
50 Direct(ArgAttributes),
51 /// Pass a pair's elements directly in two arguments.
52 ///
53 /// The argument has a layout abi of `ScalarPair`.
54 Pair(ArgAttributes, ArgAttributes),
55 /// Pass the argument after casting it. See the `CastTarget` docs for details.
56 ///
57 /// `pad_i32` indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
58 Cast { pad_i32: bool, cast: Box<CastTarget> },
59 /// Pass the argument indirectly via a hidden pointer.
60 ///
61 /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
62 /// argument. (This is the only mode that supports unsized arguments.)
63 ///
64 /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
65 /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
66 /// attribute. The `byval` argument will use a byte array with the same size as the Rust type
67 /// (which ensures that padding is preserved and that we do not rely on LLVM's struct layout),
68 /// and will use the alignment specified in `attrs.pointee_align` (if `Some`) or the type's
69 /// alignment (if `None`). This means that the alignment will not always
70 /// match the Rust type's alignment; see documentation of `pass_by_stack_offset` for more info.
71 ///
72 /// `on_stack` cannot be true for unsized arguments, i.e., when `meta_attrs` is `Some`.
73 Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
74}
75
76impl PassMode {
77 /// Checks if these two `PassMode` are equal enough to be considered "the same for all
78 /// function call ABIs". However, the `Layout` can also impact ABI decisions,
79 /// so that needs to be compared as well!
80 pub fn eq_abi(&self, other: &Self) -> bool {
81 match (self, other) {
82 (PassMode::Ignore, PassMode::Ignore) => true,
83 (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
84 (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
85 (
86 PassMode::Cast { cast: c1, pad_i32: pad1 },
87 PassMode::Cast { cast: c2, pad_i32: pad2 },
88 ) => c1.eq_abi(c2) && pad1 == pad2,
89 (
90 PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
91 PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
92 ) => a1.eq_abi(a2) && s1 == s2,
93 (
94 PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
95 PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
96 ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
97 _ => false,
98 }
99 }
100}
101
102// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
103// of this module
104pub use attr_impl::ArgAttribute;
105
106#[allow(non_upper_case_globals)]
107#[allow(unused)]
108mod attr_impl {
109 use rustc_macros::HashStable_Generic;
110
111 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
112 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq, HashStable_Generic)]
113 pub struct ArgAttribute(u8);
114 bitflags::bitflags! {
115 impl ArgAttribute: u8 {
116 const NoAlias = 1 << 1;
117 const NoCapture = 1 << 2;
118 const NonNull = 1 << 3;
119 const ReadOnly = 1 << 4;
120 const InReg = 1 << 5;
121 const NoUndef = 1 << 6;
122 const CapturesReadOnly = 1 << 7;
123 }
124 }
125 rustc_data_structures::external_bitflags_debug! { ArgAttribute }
126}
127
128/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
129/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
130/// not necessary to extend the argument, this enum is ignored.
131#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
132pub enum ArgExtension {
133 None,
134 Zext,
135 Sext,
136}
137
138/// A compact representation of LLVM attributes (at least those relevant for this module)
139/// that can be manipulated without interacting with LLVM's Attribute machinery.
140#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
141pub struct ArgAttributes {
142 pub regular: ArgAttribute,
143 pub arg_ext: ArgExtension,
144 /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
145 /// (corresponding to LLVM's dereferenceable_or_null attributes, i.e., it is okay for this to be
146 /// set on a null pointer, but all non-null pointers must be dereferenceable).
147 pub pointee_size: Size,
148 /// The minimum alignment of the pointee, if any.
149 pub pointee_align: Option<Align>,
150}
151
152impl ArgAttributes {
153 pub fn new() -> Self {
154 ArgAttributes {
155 regular: ArgAttribute::default(),
156 arg_ext: ArgExtension::None,
157 pointee_size: Size::ZERO,
158 pointee_align: None,
159 }
160 }
161
162 pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
163 assert!(
164 self.arg_ext == ArgExtension::None || self.arg_ext == ext,
165 "cannot set {:?} when {:?} is already set",
166 ext,
167 self.arg_ext
168 );
169 self.arg_ext = ext;
170 self
171 }
172
173 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
174 self.regular |= attr;
175 self
176 }
177
178 pub fn contains(&self, attr: ArgAttribute) -> bool {
179 self.regular.contains(attr)
180 }
181
182 /// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
183 /// function call ABIs".
184 pub fn eq_abi(&self, other: &Self) -> bool {
185 // There's only one regular attribute that matters for the call ABI: InReg.
186 // Everything else is things like noalias, dereferenceable, nonnull, ...
187 // (This also applies to pointee_size, pointee_align.)
188 if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
189 {
190 return false;
191 }
192 // We also compare the sign extension mode -- this could let the callee make assumptions
193 // about bits that conceptually were not even passed.
194 if self.arg_ext != other.arg_ext {
195 return false;
196 }
197 true
198 }
199}
200
201impl From<ArgAttribute> for ArgAttributes {
202 fn from(value: ArgAttribute) -> Self {
203 Self {
204 regular: value,
205 arg_ext: ArgExtension::None,
206 pointee_size: Size::ZERO,
207 pointee_align: None,
208 }
209 }
210}
211
212/// An argument passed entirely registers with the
213/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
214#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
215pub struct Uniform {
216 pub unit: Reg,
217
218 /// The total size of the argument, which can be:
219 /// * equal to `unit.size` (one scalar/vector),
220 /// * a multiple of `unit.size` (an array of scalar/vectors),
221 /// * if `unit.kind` is `Integer`, the last element can be shorter, i.e., `{ i64, i64, i32 }`
222 /// for 64-bit integers with a total size of 20 bytes. When the argument is actually passed,
223 /// this size will be rounded up to the nearest multiple of `unit.size`.
224 pub total: Size,
225
226 /// Indicate that the argument is consecutive, in the sense that either all values need to be
227 /// passed in register, or all on the stack. If they are passed on the stack, there should be
228 /// no additional padding between elements.
229 pub is_consecutive: bool,
230}
231
232impl From<Reg> for Uniform {
233 fn from(unit: Reg) -> Uniform {
234 Uniform { unit, total: unit.size, is_consecutive: false }
235 }
236}
237
238impl Uniform {
239 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
240 self.unit.align(cx)
241 }
242
243 /// Pass using one or more values of the given type, without requiring them to be consecutive.
244 /// That is, some values may be passed in register and some on the stack.
245 pub fn new(unit: Reg, total: Size) -> Self {
246 Uniform { unit, total, is_consecutive: false }
247 }
248
249 /// Pass using one or more consecutive values of the given type. Either all values will be
250 /// passed in registers, or all on the stack.
251 pub fn consecutive(unit: Reg, total: Size) -> Self {
252 Uniform { unit, total, is_consecutive: true }
253 }
254}
255
256/// Describes the type used for `PassMode::Cast`.
257///
258/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
259/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
260/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
261/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
262/// (and all data in the padding between the registers is dropped).
263#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
264pub struct CastTarget {
265 pub prefix: [Option<Reg>; 8],
266 /// The offset of `rest` from the start of the value. Currently only implemented for a `Reg`
267 /// pair created by the `offset_pair` method.
268 pub rest_offset: Option<Size>,
269 pub rest: Uniform,
270 pub attrs: ArgAttributes,
271}
272
273impl From<Reg> for CastTarget {
274 fn from(unit: Reg) -> CastTarget {
275 CastTarget::from(Uniform::from(unit))
276 }
277}
278
279impl From<Uniform> for CastTarget {
280 fn from(uniform: Uniform) -> CastTarget {
281 Self::prefixed([None; 8], uniform)
282 }
283}
284
285impl CastTarget {
286 pub fn prefixed(prefix: [Option<Reg>; 8], rest: Uniform) -> Self {
287 Self { prefix, rest_offset: None, rest, attrs: ArgAttributes::new() }
288 }
289
290 pub fn offset_pair(a: Reg, offset_from_start: Size, b: Reg) -> Self {
291 Self {
292 prefix: [Some(a), None, None, None, None, None, None, None],
293 rest_offset: Some(offset_from_start),
294 rest: b.into(),
295 attrs: ArgAttributes::new(),
296 }
297 }
298
299 pub fn with_attrs(mut self, attrs: ArgAttributes) -> Self {
300 self.attrs = attrs;
301 self
302 }
303
304 pub fn pair(a: Reg, b: Reg) -> CastTarget {
305 Self::prefixed([Some(a), None, None, None, None, None, None, None], Uniform::from(b))
306 }
307
308 /// When you only access the range containing valid data, you can use this unaligned size;
309 /// otherwise, use the safer `size` method.
310 pub fn unaligned_size<C: HasDataLayout>(&self, _cx: &C) -> Size {
311 // Prefix arguments are passed in specific designated registers
312 let prefix_size = if let Some(offset_from_start) = self.rest_offset {
313 offset_from_start
314 } else {
315 self.prefix
316 .iter()
317 .filter_map(|x| x.map(|reg| reg.size))
318 .fold(Size::ZERO, |acc, size| acc + size)
319 };
320 // Remaining arguments are passed in chunks of the unit size
321 let rest_size =
322 self.rest.unit.size * self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes());
323
324 prefix_size + rest_size
325 }
326
327 pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
328 self.unaligned_size(cx).align_to(self.align(cx))
329 }
330
331 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
332 self.prefix
333 .iter()
334 .filter_map(|x| x.map(|reg| reg.align(cx)))
335 .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
336 acc.max(align)
337 })
338 }
339
340 /// Checks if these two `CastTarget` are equal enough to be considered "the same for all
341 /// function call ABIs".
342 pub fn eq_abi(&self, other: &Self) -> bool {
343 let CastTarget {
344 prefix: prefix_l,
345 rest_offset: rest_offset_l,
346 rest: rest_l,
347 attrs: attrs_l,
348 } = self;
349 let CastTarget {
350 prefix: prefix_r,
351 rest_offset: rest_offset_r,
352 rest: rest_r,
353 attrs: attrs_r,
354 } = other;
355 prefix_l == prefix_r
356 && rest_offset_l == rest_offset_r
357 && rest_l == rest_r
358 && attrs_l.eq_abi(attrs_r)
359 }
360}
361
362/// Information about how to pass an argument to,
363/// or return a value from, a function, under some ABI.
364#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
365pub struct ArgAbi<'a, Ty> {
366 pub layout: TyAndLayout<'a, Ty>,
367 pub mode: PassMode,
368}
369
370// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
371impl<'a, Ty: fmt::Display> fmt::Debug for ArgAbi<'a, Ty> {
372 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
373 let ArgAbi { layout, mode } = self;
374 f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
375 }
376}
377
378impl<'a, Ty> ArgAbi<'a, Ty> {
379 /// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
380 pub fn new(
381 cx: &impl HasDataLayout,
382 layout: TyAndLayout<'a, Ty>,
383 scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, Scalar, Size) -> ArgAttributes,
384 ) -> Self {
385 let mode = match layout.backend_repr {
386 BackendRepr::Scalar(scalar) => {
387 PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
388 }
389 BackendRepr::ScalarPair(a, b) => PassMode::Pair(
390 scalar_attrs(&layout, a, Size::ZERO),
391 scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
392 ),
393 BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
394 BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
395 };
396 ArgAbi { layout, mode }
397 }
398
399 fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
400 let mut attrs = ArgAttributes::new();
401
402 // For non-immediate arguments the callee gets its own copy of
403 // the value on the stack, so there are no aliases. It's also
404 // program-invisible so can't possibly capture
405 attrs
406 .set(ArgAttribute::NoAlias)
407 .set(ArgAttribute::NoCapture)
408 .set(ArgAttribute::NonNull)
409 .set(ArgAttribute::NoUndef);
410 attrs.pointee_size = layout.size;
411 attrs.pointee_align = Some(layout.align.abi);
412
413 let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
414
415 PassMode::Indirect { attrs, meta_attrs, on_stack: false }
416 }
417
418 /// Pass this argument directly instead. Should NOT be used!
419 /// Only exists because of past ABI mistakes that will take time to fix
420 /// (see <https://github.com/rust-lang/rust/issues/115666>).
421 #[track_caller]
422 pub fn make_direct_deprecated(&mut self) {
423 match self.mode {
424 PassMode::Indirect { .. } => {
425 self.mode = PassMode::Direct(ArgAttributes::new());
426 }
427 PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => {} // already direct
428 _ => panic!("Tried to make {:?} direct", self.mode),
429 }
430 }
431
432 /// Pass this argument indirectly, by passing a (thin or wide) pointer to the argument instead.
433 /// This is valid for both sized and unsized arguments.
434 #[track_caller]
435 pub fn make_indirect(&mut self) {
436 match self.mode {
437 PassMode::Direct(_) | PassMode::Pair(_, _) => {
438 self.mode = Self::indirect_pass_mode(&self.layout);
439 }
440 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
441 // already indirect
442 }
443 _ => panic!("Tried to make {:?} indirect", self.mode),
444 }
445 }
446
447 /// Same as `make_indirect`, but for arguments that are ignored. Only needed for ABIs that pass
448 /// ZSTs indirectly.
449 #[track_caller]
450 pub fn make_indirect_from_ignore(&mut self) {
451 match self.mode {
452 PassMode::Ignore => {
453 self.mode = Self::indirect_pass_mode(&self.layout);
454 }
455 PassMode::Indirect { attrs: _, meta_attrs: _, on_stack: false } => {
456 // already indirect
457 }
458 _ => panic!("Tried to make {:?} indirect (expected `PassMode::Ignore`)", self.mode),
459 }
460 }
461
462 /// Pass this argument indirectly, by placing it at a fixed stack offset.
463 /// This corresponds to the `byval` LLVM argument attribute.
464 /// This is only valid for sized arguments.
465 ///
466 /// `byval_align` specifies the alignment of the `byval` stack slot, which does not need to
467 /// correspond to the type's alignment. This will be `Some` if the target's ABI specifies that
468 /// stack slots used for arguments passed by-value have specific alignment requirements which
469 /// differ from the alignment used in other situations.
470 ///
471 /// If `None`, the type's alignment is used.
472 ///
473 /// If the resulting alignment differs from the type's alignment,
474 /// the argument will be copied to an alloca with sufficient alignment,
475 /// either in the caller (if the type's alignment is lower than the byval alignment)
476 /// or in the callee (if the type's alignment is higher than the byval alignment),
477 /// to ensure that Rust code never sees an underaligned pointer.
478 pub fn pass_by_stack_offset(&mut self, byval_align: Option<Align>) {
479 assert!(!self.layout.is_unsized(), "used byval ABI for unsized layout");
480 self.make_indirect();
481 match self.mode {
482 PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
483 *on_stack = true;
484
485 // Some platforms, like 32-bit x86, change the alignment of the type when passing
486 // `byval`. Account for that.
487 if let Some(byval_align) = byval_align {
488 // On all targets with byval align this is currently true, so let's assert it.
489 debug_assert!(byval_align >= Align::from_bytes(4).unwrap());
490 attrs.pointee_align = Some(byval_align);
491 }
492 }
493 _ => unreachable!(),
494 }
495 }
496
497 pub fn extend_integer_width_to(&mut self, bits: u64) {
498 // Only integers have signedness
499 if let BackendRepr::Scalar(scalar) = self.layout.backend_repr
500 && let Primitive::Int(i, signed) = scalar.primitive()
501 && i.size().bits() < bits
502 && let PassMode::Direct(ref mut attrs) = self.mode
503 {
504 if signed {
505 attrs.ext(ArgExtension::Sext)
506 } else {
507 attrs.ext(ArgExtension::Zext)
508 };
509 }
510 }
511
512 pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
513 self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
514 }
515
516 pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
517 self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
518 }
519
520 pub fn is_indirect(&self) -> bool {
521 matches!(self.mode, PassMode::Indirect { .. })
522 }
523
524 pub fn is_sized_indirect(&self) -> bool {
525 matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })
526 }
527
528 pub fn is_unsized_indirect(&self) -> bool {
529 matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })
530 }
531
532 pub fn is_ignore(&self) -> bool {
533 matches!(self.mode, PassMode::Ignore)
534 }
535
536 /// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
537 /// function call ABIs".
538 pub fn eq_abi(&self, other: &Self) -> bool
539 where
540 Ty: PartialEq,
541 {
542 // Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
543 // at the type.
544 self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode) && {
545 // `fn_arg_sanity_check` accepts `PassMode::Direct` for some aggregates.
546 // That elevates any type difference to an ABI difference since we just use the
547 // full Rust type as the LLVM argument/return type.
548 if matches!(self.mode, PassMode::Direct(..))
549 && matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
550 {
551 // For aggregates in `Direct` mode to be compatible, the types need to be equal.
552 self.layout.ty == other.layout.ty
553 } else {
554 true
555 }
556 }
557 }
558}
559
560#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
561pub enum RiscvInterruptKind {
562 Machine,
563 Supervisor,
564}
565
566impl RiscvInterruptKind {
567 pub fn as_str(&self) -> &'static str {
568 match self {
569 Self::Machine => "machine",
570 Self::Supervisor => "supervisor",
571 }
572 }
573}
574
575/// Metadata describing how the arguments to a native function
576/// should be passed in order to respect the native ABI.
577///
578/// The signature represented by this type may not match the MIR function signature.
579/// Certain attributes, like `#[track_caller]` can introduce additional arguments, which are present in [`FnAbi`], but not in `FnSig`.
580/// While this difference is rarely relevant, it should still be kept in mind.
581///
582/// I will do my best to describe this structure, but these
583/// comments are reverse-engineered and may be inaccurate. -NDM
584#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
585pub struct FnAbi<'a, Ty> {
586 /// The type, layout, and information about how each argument is passed.
587 pub args: Box<[ArgAbi<'a, Ty>]>,
588
589 /// The layout, type, and the way a value is returned from this function.
590 pub ret: ArgAbi<'a, Ty>,
591
592 /// Marks this function as variadic (accepting a variable number of arguments).
593 pub c_variadic: bool,
594
595 /// The count of non-variadic arguments.
596 ///
597 /// Should only be different from args.len() when c_variadic is true.
598 /// This can be used to know whether an argument is variadic or not.
599 pub fixed_count: u32,
600 /// The calling convention of this function.
601 pub conv: CanonAbi,
602 /// Indicates if an unwind may happen across a call to this function.
603 pub can_unwind: bool,
604}
605
606// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
607impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> {
608 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
609 let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
610 f.debug_struct("FnAbi")
611 .field("args", args)
612 .field("ret", ret)
613 .field("c_variadic", c_variadic)
614 .field("fixed_count", fixed_count)
615 .field("conv", conv)
616 .field("can_unwind", can_unwind)
617 .finish()
618 }
619}
620
621impl<'a, Ty> FnAbi<'a, Ty> {
622 pub fn adjust_for_foreign_abi<C>(&mut self, cx: &C, abi: ExternAbi)
623 where
624 Ty: TyAbiInterface<'a, C> + Copy,
625 C: HasDataLayout + HasTargetSpec + HasX86AbiOpt,
626 {
627 if abi == ExternAbi::X86Interrupt {
628 if let Some(arg) = self.args.first_mut() {
629 arg.pass_by_stack_offset(None);
630 }
631 return;
632 }
633
634 let spec = cx.target_spec();
635 match &spec.arch[..] {
636 "x86" => {
637 let (flavor, regparm) = match abi {
638 ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => {
639 (x86::Flavor::FastcallOrVectorcall, None)
640 }
641 ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => {
642 (x86::Flavor::General, cx.x86_abi_opt().regparm)
643 }
644 _ => (x86::Flavor::General, None),
645 };
646 let reg_struct_return = cx.x86_abi_opt().reg_struct_return;
647 let opts = x86::X86Options { flavor, regparm, reg_struct_return };
648 if spec.is_like_msvc {
649 x86_win32::compute_abi_info(cx, self, opts);
650 } else {
651 x86::compute_abi_info(cx, self, opts);
652 }
653 }
654 "x86_64" => match abi {
655 ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
656 ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => {
657 x86_win64::compute_abi_info(cx, self)
658 }
659 _ => {
660 if cx.target_spec().is_like_windows {
661 x86_win64::compute_abi_info(cx, self)
662 } else {
663 x86_64::compute_abi_info(cx, self)
664 }
665 }
666 },
667 "aarch64" | "arm64ec" => {
668 let kind = if cx.target_spec().is_like_darwin {
669 aarch64::AbiKind::DarwinPCS
670 } else if cx.target_spec().is_like_windows {
671 aarch64::AbiKind::Win64
672 } else {
673 aarch64::AbiKind::AAPCS
674 };
675 aarch64::compute_abi_info(cx, self, kind)
676 }
677 "amdgpu" => amdgpu::compute_abi_info(cx, self),
678 "arm" => arm::compute_abi_info(cx, self),
679 "avr" => avr::compute_abi_info(self),
680 "loongarch32" | "loongarch64" => loongarch::compute_abi_info(cx, self),
681 "m68k" => m68k::compute_abi_info(self),
682 "csky" => csky::compute_abi_info(self),
683 "mips" | "mips32r6" => mips::compute_abi_info(cx, self),
684 "mips64" | "mips64r6" => mips64::compute_abi_info(cx, self),
685 "powerpc" => powerpc::compute_abi_info(cx, self),
686 "powerpc64" => powerpc64::compute_abi_info(cx, self),
687 "s390x" => s390x::compute_abi_info(cx, self),
688 "msp430" => msp430::compute_abi_info(self),
689 "sparc" => sparc::compute_abi_info(cx, self),
690 "sparc64" => sparc64::compute_abi_info(cx, self),
691 "nvptx64" => {
692 if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
693 nvptx64::compute_ptx_kernel_abi_info(cx, self)
694 } else {
695 nvptx64::compute_abi_info(self)
696 }
697 }
698 "hexagon" => hexagon::compute_abi_info(self),
699 "xtensa" => xtensa::compute_abi_info(cx, self),
700 "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
701 "wasm32" | "wasm64" => wasm::compute_abi_info(cx, self),
702 "bpf" => bpf::compute_abi_info(self),
703 arch => panic!("no lowering implemented for {arch}"),
704 }
705 }
706
707 pub fn adjust_for_rust_abi<C>(&mut self, cx: &C)
708 where
709 Ty: TyAbiInterface<'a, C> + Copy,
710 C: HasDataLayout + HasTargetSpec,
711 {
712 let spec = cx.target_spec();
713 match &*spec.arch {
714 "x86" => x86::compute_rust_abi_info(cx, self),
715 "riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self),
716 "loongarch32" | "loongarch64" => loongarch::compute_rust_abi_info(cx, self),
717 "aarch64" => aarch64::compute_rust_abi_info(cx, self),
718 _ => {}
719 };
720
721 for (arg_idx, arg) in self
722 .args
723 .iter_mut()
724 .enumerate()
725 .map(|(idx, arg)| (Some(idx), arg))
726 .chain(iter::once((None, &mut self.ret)))
727 {
728 // If the logic above already picked a specific type to cast the argument to, leave that
729 // in place.
730 if matches!(arg.mode, PassMode::Ignore | PassMode::Cast { .. }) {
731 continue;
732 }
733
734 if arg_idx.is_none()
735 && arg.layout.size > Primitive::Pointer(AddressSpace::ZERO).size(cx) * 2
736 && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })
737 {
738 // Return values larger than 2 registers using a return area
739 // pointer. LLVM and Cranelift disagree about how to return
740 // values that don't fit in the registers designated for return
741 // values. LLVM will force the entire return value to be passed
742 // by return area pointer, while Cranelift will look at each IR level
743 // return value independently and decide to pass it in a
744 // register or not, which would result in the return value
745 // being passed partially in registers and partially through a
746 // return area pointer. For large IR-level values such as `i128`,
747 // cranelift will even split up the value into smaller chunks.
748 //
749 // While Cranelift may need to be fixed as the LLVM behavior is
750 // generally more correct with respect to the surface language,
751 // forcing this behavior in rustc itself makes it easier for
752 // other backends to conform to the Rust ABI and for the C ABI
753 // rustc already handles this behavior anyway.
754 //
755 // In addition LLVM's decision to pass the return value in
756 // registers or using a return area pointer depends on how
757 // exactly the return type is lowered to an LLVM IR type. For
758 // example `Option<u128>` can be lowered as `{ i128, i128 }`
759 // in which case the x86_64 backend would use a return area
760 // pointer, or it could be passed as `{ i32, i128 }` in which
761 // case the x86_64 backend would pass it in registers by taking
762 // advantage of an LLVM ABI extension that allows using 3
763 // registers for the x86_64 sysv call conv rather than the
764 // officially specified 2 registers.
765 //
766 // FIXME: Technically we should look at the amount of available
767 // return registers rather than guessing that there are 2
768 // registers for return values. In practice only a couple of
769 // architectures have less than 2 return registers. None of
770 // which supported by Cranelift.
771 //
772 // NOTE: This adjustment is only necessary for the Rust ABI as
773 // for other ABI's the calling convention implementations in
774 // rustc_target already ensure any return value which doesn't
775 // fit in the available amount of return registers is passed in
776 // the right way for the current target.
777 //
778 // The adjustment is not necessary nor desired for types with a vector
779 // representation; those are handled below.
780 arg.make_indirect();
781 continue;
782 }
783
784 match arg.layout.backend_repr {
785 BackendRepr::Memory { .. } => {
786 // Compute `Aggregate` ABI.
787
788 let is_indirect_not_on_stack =
789 matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
790 assert!(is_indirect_not_on_stack);
791
792 let size = arg.layout.size;
793 if arg.layout.is_sized()
794 && size <= Primitive::Pointer(AddressSpace::ZERO).size(cx)
795 {
796 // We want to pass small aggregates as immediates, but using
797 // an LLVM aggregate type for this leads to bad optimizations,
798 // so we pick an appropriately sized integer type instead.
799 arg.cast_to(Reg { kind: RegKind::Integer, size });
800 }
801 }
802
803 BackendRepr::SimdVector { .. } => {
804 // This is a fun case! The gist of what this is doing is
805 // that we want callers and callees to always agree on the
806 // ABI of how they pass SIMD arguments. If we were to *not*
807 // make these arguments indirect then they'd be immediates
808 // in LLVM, which means that they'd used whatever the
809 // appropriate ABI is for the callee and the caller. That
810 // means, for example, if the caller doesn't have AVX
811 // enabled but the callee does, then passing an AVX argument
812 // across this boundary would cause corrupt data to show up.
813 //
814 // This problem is fixed by unconditionally passing SIMD
815 // arguments through memory between callers and callees
816 // which should get them all to agree on ABI regardless of
817 // target feature sets. Some more information about this
818 // issue can be found in #44367.
819 //
820 // We *could* do better in some cases, e.g. on x86_64 targets where SSE2 is
821 // required. However, it turns out that that makes LLVM worse at optimizing this
822 // code, so we pass things indirectly even there. See #139029 for more on that.
823 if spec.simd_types_indirect {
824 arg.make_indirect();
825 }
826 }
827
828 _ => {}
829 }
830 }
831 }
832}
833
834// Some types are used a lot. Make sure they don't unintentionally get bigger.
835#[cfg(target_pointer_width = "64")]
836mod size_asserts {
837 use rustc_data_structures::static_assert_size;
838
839 use super::*;
840 // tidy-alphabetical-start
841 static_assert_size!(ArgAbi<'_, usize>, 56);
842 static_assert_size!(FnAbi<'_, usize>, 80);
843 // tidy-alphabetical-end
844}