rustc_codegen_llvm/
abi.rs

1use std::borrow::Borrow;
2use std::cmp;
3
4use libc::c_uint;
5use rustc_abi::{BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size};
6use rustc_codegen_ssa::MemFlags;
7use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
8use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
9use rustc_codegen_ssa::traits::*;
10use rustc_middle::ty::Ty;
11use rustc_middle::ty::layout::LayoutOf;
12use rustc_middle::{bug, ty};
13use rustc_session::config;
14use rustc_target::callconv::{
15    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, Conv, FnAbi, PassMode,
16};
17use rustc_target::spec::SanitizerSet;
18use smallvec::SmallVec;
19
20use crate::attributes::{self, llfn_attrs_from_instance};
21use crate::builder::Builder;
22use crate::context::CodegenCx;
23use crate::llvm::{self, Attribute, AttributePlace};
24use crate::type_::Type;
25use crate::type_of::LayoutLlvmExt;
26use crate::value::Value;
27
28trait ArgAttributesExt {
29    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
30    fn apply_attrs_to_callsite(
31        &self,
32        idx: AttributePlace,
33        cx: &CodegenCx<'_, '_>,
34        callsite: &Value,
35    );
36}
37
38const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
39    [(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
40
41const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
42    (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
43    (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
44    (ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
45    (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
46    (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
47];
48
49fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
50    let mut regular = this.regular;
51
52    let mut attrs = SmallVec::new();
53
54    // ABI-affecting attributes must always be applied
55    for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
56        if regular.contains(attr) {
57            attrs.push(llattr.create_attr(cx.llcx));
58        }
59    }
60    if let Some(align) = this.pointee_align {
61        attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
62    }
63    match this.arg_ext {
64        ArgExtension::None => {}
65        ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
66        ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
67    }
68
69    // Only apply remaining attributes when optimizing
70    if cx.sess().opts.optimize != config::OptLevel::No {
71        let deref = this.pointee_size.bytes();
72        if deref != 0 {
73            if regular.contains(ArgAttribute::NonNull) {
74                attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
75            } else {
76                attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
77            }
78            regular -= ArgAttribute::NonNull;
79        }
80        for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
81            if regular.contains(attr) {
82                attrs.push(llattr.create_attr(cx.llcx));
83            }
84        }
85    } else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
86        // If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects
87        // memory sanitizer's behavior.
88
89        if regular.contains(ArgAttribute::NoUndef) {
90            attrs.push(llvm::AttributeKind::NoUndef.create_attr(cx.llcx));
91        }
92    }
93
94    attrs
95}
96
97impl ArgAttributesExt for ArgAttributes {
98    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
99        let attrs = get_attrs(self, cx);
100        attributes::apply_to_llfn(llfn, idx, &attrs);
101    }
102
103    fn apply_attrs_to_callsite(
104        &self,
105        idx: AttributePlace,
106        cx: &CodegenCx<'_, '_>,
107        callsite: &Value,
108    ) {
109        let attrs = get_attrs(self, cx);
110        attributes::apply_to_callsite(callsite, idx, &attrs);
111    }
112}
113
114pub(crate) trait LlvmType {
115    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
116}
117
118impl LlvmType for Reg {
119    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
120        match self.kind {
121            RegKind::Integer => cx.type_ix(self.size.bits()),
122            RegKind::Float => match self.size.bits() {
123                16 => cx.type_f16(),
124                32 => cx.type_f32(),
125                64 => cx.type_f64(),
126                128 => cx.type_f128(),
127                _ => bug!("unsupported float: {:?}", self),
128            },
129            RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
130        }
131    }
132}
133
134impl LlvmType for CastTarget {
135    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
136        let rest_ll_unit = self.rest.unit.llvm_type(cx);
137        let rest_count = if self.rest.total == Size::ZERO {
138            0
139        } else {
140            assert_ne!(
141                self.rest.unit.size,
142                Size::ZERO,
143                "total size {:?} cannot be divided into units of zero size",
144                self.rest.total
145            );
146            if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 {
147                assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split");
148            }
149            self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes())
150        };
151
152        // Simplify to a single unit or an array if there's no prefix.
153        // This produces the same layout, but using a simpler type.
154        if self.prefix.iter().all(|x| x.is_none()) {
155            // We can't do this if is_consecutive is set and the unit would get
156            // split on the target. Currently, this is only relevant for i128
157            // registers.
158            if rest_count == 1 && (!self.rest.is_consecutive || self.rest.unit != Reg::i128()) {
159                return rest_ll_unit;
160            }
161
162            return cx.type_array(rest_ll_unit, rest_count);
163        }
164
165        // Generate a struct type with the prefix and the "rest" arguments.
166        let prefix_args =
167            self.prefix.iter().flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)));
168        let rest_args = (0..rest_count).map(|_| rest_ll_unit);
169        let args: Vec<_> = prefix_args.chain(rest_args).collect();
170        cx.type_struct(&args, false)
171    }
172}
173
174trait ArgAbiExt<'ll, 'tcx> {
175    fn store(
176        &self,
177        bx: &mut Builder<'_, 'll, 'tcx>,
178        val: &'ll Value,
179        dst: PlaceRef<'tcx, &'ll Value>,
180    );
181    fn store_fn_arg(
182        &self,
183        bx: &mut Builder<'_, 'll, 'tcx>,
184        idx: &mut usize,
185        dst: PlaceRef<'tcx, &'ll Value>,
186    );
187}
188
189impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
190    /// Stores a direct/indirect value described by this ArgAbi into a
191    /// place for the original Rust type of this argument/return.
192    /// Can be used for both storing formal arguments into Rust variables
193    /// or results of call/invoke instructions into their destinations.
194    fn store(
195        &self,
196        bx: &mut Builder<'_, 'll, 'tcx>,
197        val: &'ll Value,
198        dst: PlaceRef<'tcx, &'ll Value>,
199    ) {
200        match &self.mode {
201            PassMode::Ignore => {}
202            // Sized indirect arguments
203            PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => {
204                let align = attrs.pointee_align.unwrap_or(self.layout.align.abi);
205                OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst);
206            }
207            // Unsized indirect qrguments
208            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
209                bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
210            }
211            PassMode::Cast { cast, pad_i32: _ } => {
212                // The ABI mandates that the value is passed as a different struct representation.
213                // Spill and reload it from the stack to convert from the ABI representation to
214                // the Rust representation.
215                let scratch_size = cast.size(bx);
216                let scratch_align = cast.align(bx);
217                // Note that the ABI type may be either larger or smaller than the Rust type,
218                // due to the presence or absence of trailing padding. For example:
219                // - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding
220                //   when passed by value, making it smaller.
221                // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
222                //   when passed by value, making it larger.
223                let copy_bytes =
224                    cmp::min(cast.unaligned_size(bx).bytes(), self.layout.size.bytes());
225                // Allocate some scratch space...
226                let llscratch = bx.alloca(scratch_size, scratch_align);
227                bx.lifetime_start(llscratch, scratch_size);
228                // ...store the value...
229                bx.store(val, llscratch, scratch_align);
230                // ... and then memcpy it to the intended destination.
231                bx.memcpy(
232                    dst.val.llval,
233                    self.layout.align.abi,
234                    llscratch,
235                    scratch_align,
236                    bx.const_usize(copy_bytes),
237                    MemFlags::empty(),
238                );
239                bx.lifetime_end(llscratch, scratch_size);
240            }
241            _ => {
242                OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);
243            }
244        }
245    }
246
247    fn store_fn_arg(
248        &self,
249        bx: &mut Builder<'_, 'll, 'tcx>,
250        idx: &mut usize,
251        dst: PlaceRef<'tcx, &'ll Value>,
252    ) {
253        let mut next = || {
254            let val = llvm::get_param(bx.llfn(), *idx as c_uint);
255            *idx += 1;
256            val
257        };
258        match self.mode {
259            PassMode::Ignore => {}
260            PassMode::Pair(..) => {
261                OperandValue::Pair(next(), next()).store(bx, dst);
262            }
263            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
264                let place_val = PlaceValue {
265                    llval: next(),
266                    llextra: Some(next()),
267                    align: self.layout.align.abi,
268                };
269                OperandValue::Ref(place_val).store(bx, dst);
270            }
271            PassMode::Direct(_)
272            | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
273            | PassMode::Cast { .. } => {
274                let next_arg = next();
275                self.store(bx, next_arg, dst);
276            }
277        }
278    }
279}
280
281impl<'ll, 'tcx> ArgAbiBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
282    fn store_fn_arg(
283        &mut self,
284        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
285        idx: &mut usize,
286        dst: PlaceRef<'tcx, Self::Value>,
287    ) {
288        arg_abi.store_fn_arg(self, idx, dst)
289    }
290    fn store_arg(
291        &mut self,
292        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
293        val: &'ll Value,
294        dst: PlaceRef<'tcx, &'ll Value>,
295    ) {
296        arg_abi.store(self, val, dst)
297    }
298}
299
300pub(crate) trait FnAbiLlvmExt<'ll, 'tcx> {
301    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
302    fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
303    fn llvm_cconv(&self, cx: &CodegenCx<'ll, 'tcx>) -> llvm::CallConv;
304
305    /// Apply attributes to a function declaration/definition.
306    fn apply_attrs_llfn(
307        &self,
308        cx: &CodegenCx<'ll, 'tcx>,
309        llfn: &'ll Value,
310        instance: Option<ty::Instance<'tcx>>,
311    );
312
313    /// Apply attributes to a function call.
314    fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
315}
316
317impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
318    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
319        // Ignore "extra" args from the call site for C variadic functions.
320        // Only the "fixed" args are part of the LLVM function signature.
321        let args =
322            if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args };
323
324        // This capacity calculation is approximate.
325        let mut llargument_tys = Vec::with_capacity(
326            self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
327        );
328
329        let llreturn_ty = match &self.ret.mode {
330            PassMode::Ignore => cx.type_void(),
331            PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
332            PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
333            PassMode::Indirect { .. } => {
334                llargument_tys.push(cx.type_ptr());
335                cx.type_void()
336            }
337        };
338
339        for arg in args {
340            // Note that the exact number of arguments pushed here is carefully synchronized with
341            // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
342            // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
343            let llarg_ty = match &arg.mode {
344                PassMode::Ignore => continue,
345                PassMode::Direct(_) => {
346                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
347                    // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
348                    // guaranteeing that we generate ABI-compatible LLVM IR.
349                    arg.layout.immediate_llvm_type(cx)
350                }
351                PassMode::Pair(..) => {
352                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
353                    // so for ScalarPair we can easily be sure that we are generating ABI-compatible
354                    // LLVM IR.
355                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
356                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
357                    continue;
358                }
359                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
360                    // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
361                    // Any two ABI-compatible unsized types have the same metadata type and
362                    // moreover the same metadata value leads to the same dynamic size and
363                    // alignment, so this respects ABI compatibility.
364                    let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
365                    let ptr_layout = cx.layout_of(ptr_ty);
366                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
367                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
368                    continue;
369                }
370                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => cx.type_ptr(),
371                PassMode::Cast { cast, pad_i32 } => {
372                    // add padding
373                    if *pad_i32 {
374                        llargument_tys.push(Reg::i32().llvm_type(cx));
375                    }
376                    // Compute the LLVM type we use for this function from the cast type.
377                    // We assume here that ABI-compatible Rust types have the same cast type.
378                    cast.llvm_type(cx)
379                }
380            };
381            llargument_tys.push(llarg_ty);
382        }
383
384        if self.c_variadic {
385            cx.type_variadic_func(&llargument_tys, llreturn_ty)
386        } else {
387            cx.type_func(&llargument_tys, llreturn_ty)
388        }
389    }
390
391    fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
392        cx.type_ptr_ext(cx.data_layout().instruction_address_space)
393    }
394
395    fn llvm_cconv(&self, cx: &CodegenCx<'ll, 'tcx>) -> llvm::CallConv {
396        llvm::CallConv::from_conv(self.conv, cx.tcx.sess.target.arch.borrow())
397    }
398
399    fn apply_attrs_llfn(
400        &self,
401        cx: &CodegenCx<'ll, 'tcx>,
402        llfn: &'ll Value,
403        instance: Option<ty::Instance<'tcx>>,
404    ) {
405        let mut func_attrs = SmallVec::<[_; 3]>::new();
406        if self.ret.layout.is_uninhabited() {
407            func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
408        }
409        if !self.can_unwind {
410            func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
411        }
412        if let Conv::RiscvInterrupt { kind } = self.conv {
413            func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
414        }
415        if let Conv::CCmseNonSecureEntry = self.conv {
416            func_attrs.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"))
417        }
418        attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
419
420        let mut i = 0;
421        let mut apply = |attrs: &ArgAttributes| {
422            attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
423            i += 1;
424            i - 1
425        };
426
427        let apply_range_attr = |idx: AttributePlace, scalar: rustc_abi::Scalar| {
428            if cx.sess().opts.optimize != config::OptLevel::No
429                && matches!(scalar.primitive(), Primitive::Int(..))
430                // If the value is a boolean, the range is 0..2 and that ultimately
431                // become 0..0 when the type becomes i1, which would be rejected
432                // by the LLVM verifier.
433                && !scalar.is_bool()
434                // LLVM also rejects full range.
435                && !scalar.is_always_valid(cx)
436            {
437                attributes::apply_to_llfn(
438                    llfn,
439                    idx,
440                    &[llvm::CreateRangeAttr(cx.llcx, scalar.size(cx), scalar.valid_range(cx))],
441                );
442            }
443        };
444
445        match &self.ret.mode {
446            PassMode::Direct(attrs) => {
447                attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
448                if let BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
449                    apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
450                }
451            }
452            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
453                assert!(!on_stack);
454                let i = apply(attrs);
455                let sret = llvm::CreateStructRetAttr(
456                    cx.llcx,
457                    cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()),
458                );
459                attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
460                if cx.sess().opts.optimize != config::OptLevel::No {
461                    attributes::apply_to_llfn(
462                        llfn,
463                        llvm::AttributePlace::Argument(i),
464                        &[
465                            llvm::AttributeKind::Writable.create_attr(cx.llcx),
466                            llvm::AttributeKind::DeadOnUnwind.create_attr(cx.llcx),
467                        ],
468                    );
469                }
470            }
471            PassMode::Cast { cast, pad_i32: _ } => {
472                cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
473            }
474            _ => {}
475        }
476        for arg in self.args.iter() {
477            match &arg.mode {
478                PassMode::Ignore => {}
479                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
480                    let i = apply(attrs);
481                    let byval = llvm::CreateByValAttr(
482                        cx.llcx,
483                        cx.type_array(cx.type_i8(), arg.layout.size.bytes()),
484                    );
485                    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
486                }
487                PassMode::Direct(attrs) => {
488                    let i = apply(attrs);
489                    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
490                        apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
491                    }
492                }
493                PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
494                    apply(attrs);
495                }
496                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
497                    assert!(!on_stack);
498                    apply(attrs);
499                    apply(meta_attrs);
500                }
501                PassMode::Pair(a, b) => {
502                    let i = apply(a);
503                    let ii = apply(b);
504                    if let BackendRepr::ScalarPair(scalar_a, scalar_b) = arg.layout.backend_repr {
505                        apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
506                        apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
507                    }
508                }
509                PassMode::Cast { cast, pad_i32 } => {
510                    if *pad_i32 {
511                        apply(&ArgAttributes::new());
512                    }
513                    apply(&cast.attrs);
514                }
515            }
516        }
517
518        // If the declaration has an associated instance, compute extra attributes based on that.
519        if let Some(instance) = instance {
520            llfn_attrs_from_instance(cx, llfn, instance);
521        }
522    }
523
524    fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
525        let mut func_attrs = SmallVec::<[_; 2]>::new();
526        if self.ret.layout.is_uninhabited() {
527            func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
528        }
529        if !self.can_unwind {
530            func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
531        }
532        attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
533
534        let mut i = 0;
535        let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
536            attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
537            i += 1;
538            i - 1
539        };
540        match &self.ret.mode {
541            PassMode::Direct(attrs) => {
542                attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
543            }
544            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
545                assert!(!on_stack);
546                let i = apply(bx.cx, attrs);
547                let sret = llvm::CreateStructRetAttr(
548                    bx.cx.llcx,
549                    bx.cx.type_array(bx.cx.type_i8(), self.ret.layout.size.bytes()),
550                );
551                attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
552            }
553            PassMode::Cast { cast, pad_i32: _ } => {
554                cast.attrs.apply_attrs_to_callsite(
555                    llvm::AttributePlace::ReturnValue,
556                    bx.cx,
557                    callsite,
558                );
559            }
560            _ => {}
561        }
562        for arg in self.args.iter() {
563            match &arg.mode {
564                PassMode::Ignore => {}
565                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
566                    let i = apply(bx.cx, attrs);
567                    let byval = llvm::CreateByValAttr(
568                        bx.cx.llcx,
569                        bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()),
570                    );
571                    attributes::apply_to_callsite(
572                        callsite,
573                        llvm::AttributePlace::Argument(i),
574                        &[byval],
575                    );
576                }
577                PassMode::Direct(attrs)
578                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
579                    apply(bx.cx, attrs);
580                }
581                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
582                    apply(bx.cx, attrs);
583                    apply(bx.cx, meta_attrs);
584                }
585                PassMode::Pair(a, b) => {
586                    apply(bx.cx, a);
587                    apply(bx.cx, b);
588                }
589                PassMode::Cast { cast, pad_i32 } => {
590                    if *pad_i32 {
591                        apply(bx.cx, &ArgAttributes::new());
592                    }
593                    apply(bx.cx, &cast.attrs);
594                }
595            }
596        }
597
598        let cconv = self.llvm_cconv(&bx.cx);
599        if cconv != llvm::CCallConv {
600            llvm::SetInstructionCallConv(callsite, cconv);
601        }
602
603        if self.conv == Conv::CCmseNonSecureCall {
604            // This will probably get ignored on all targets but those supporting the TrustZone-M
605            // extension (thumbv8m targets).
606            let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
607            attributes::apply_to_callsite(
608                callsite,
609                llvm::AttributePlace::Function,
610                &[cmse_nonsecure_call],
611            );
612        }
613
614        // Some intrinsics require that an elementtype attribute (with the pointee type of a
615        // pointer argument) is added to the callsite.
616        let element_type_index = unsafe { llvm::LLVMRustGetElementTypeArgIndex(callsite) };
617        if element_type_index >= 0 {
618            let arg_ty = self.args[element_type_index as usize].layout.ty;
619            let pointee_ty = arg_ty.builtin_deref(true).expect("Must be pointer argument");
620            let element_type_attr = unsafe {
621                llvm::LLVMRustCreateElementTypeAttr(bx.llcx, bx.layout_of(pointee_ty).llvm_type(bx))
622            };
623            attributes::apply_to_callsite(
624                callsite,
625                llvm::AttributePlace::Argument(element_type_index as u32),
626                &[element_type_attr],
627            );
628        }
629    }
630}
631
632impl AbiBuilderMethods for Builder<'_, '_, '_> {
633    fn get_param(&mut self, index: usize) -> Self::Value {
634        llvm::get_param(self.llfn(), index as c_uint)
635    }
636}
637
638impl llvm::CallConv {
639    pub(crate) fn from_conv(conv: Conv, arch: &str) -> Self {
640        match conv {
641            Conv::C
642            | Conv::Rust
643            | Conv::CCmseNonSecureCall
644            | Conv::CCmseNonSecureEntry
645            | Conv::RiscvInterrupt { .. } => llvm::CCallConv,
646            Conv::Cold => llvm::ColdCallConv,
647            Conv::PreserveMost => llvm::PreserveMost,
648            Conv::PreserveAll => llvm::PreserveAll,
649            Conv::GpuKernel => {
650                if arch == "amdgpu" {
651                    llvm::AmdgpuKernel
652                } else if arch == "nvptx64" {
653                    llvm::PtxKernel
654                } else {
655                    panic!("Architecture {arch} does not support GpuKernel calling convention");
656                }
657            }
658            Conv::AvrInterrupt => llvm::AvrInterrupt,
659            Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
660            Conv::ArmAapcs => llvm::ArmAapcsCallConv,
661            Conv::Msp430Intr => llvm::Msp430Intr,
662            Conv::X86Fastcall => llvm::X86FastcallCallConv,
663            Conv::X86Intr => llvm::X86_Intr,
664            Conv::X86Stdcall => llvm::X86StdcallCallConv,
665            Conv::X86ThisCall => llvm::X86_ThisCall,
666            Conv::X86VectorCall => llvm::X86_VectorCall,
667            Conv::X86_64SysV => llvm::X86_64_SysV,
668            Conv::X86_64Win64 => llvm::X86_64_Win64,
669        }
670    }
671}