rustc_codegen_ssa/traits/
builder.rs

1use std::assert_matches::assert_matches;
2use std::ops::Deref;
3
4use rustc_abi::{Align, Scalar, Size, WrappingRange};
5use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
6use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
7use rustc_middle::ty::{AtomicOrdering, Instance, Ty};
8use rustc_session::config::OptLevel;
9use rustc_span::Span;
10use rustc_target::callconv::FnAbi;
11
12use super::abi::AbiBuilderMethods;
13use super::asm::AsmBuilderMethods;
14use super::consts::ConstCodegenMethods;
15use super::coverageinfo::CoverageInfoBuilderMethods;
16use super::debuginfo::DebugInfoBuilderMethods;
17use super::intrinsic::IntrinsicCallBuilderMethods;
18use super::misc::MiscCodegenMethods;
19use super::type_::{ArgAbiBuilderMethods, BaseTypeCodegenMethods, LayoutTypeCodegenMethods};
20use super::{CodegenMethods, StaticBuilderMethods};
21use crate::MemFlags;
22use crate::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
23use crate::mir::operand::{OperandRef, OperandValue};
24use crate::mir::place::{PlaceRef, PlaceValue};
25
26#[derive(Copy, Clone, Debug, PartialEq, Eq)]
27pub enum OverflowOp {
28    Add,
29    Sub,
30    Mul,
31}
32
33pub trait BuilderMethods<'a, 'tcx>:
34    Sized
35    + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
36    + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
37    + Deref<Target = Self::CodegenCx>
38    + CoverageInfoBuilderMethods<'tcx>
39    + DebugInfoBuilderMethods
40    + ArgAbiBuilderMethods<'tcx>
41    + AbiBuilderMethods
42    + IntrinsicCallBuilderMethods<'tcx>
43    + AsmBuilderMethods<'tcx>
44    + StaticBuilderMethods
45{
46    // `BackendTypes` is a supertrait of both `CodegenMethods` and
47    // `BuilderMethods`. This bound ensures all impls agree on the associated
48    // types within.
49    type CodegenCx: CodegenMethods<
50            'tcx,
51            Value = Self::Value,
52            Metadata = Self::Metadata,
53            Function = Self::Function,
54            BasicBlock = Self::BasicBlock,
55            Type = Self::Type,
56            Funclet = Self::Funclet,
57            DIScope = Self::DIScope,
58            DILocation = Self::DILocation,
59            DIVariable = Self::DIVariable,
60        >;
61
62    fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
63
64    fn cx(&self) -> &Self::CodegenCx;
65    fn llbb(&self) -> Self::BasicBlock;
66
67    fn set_span(&mut self, span: Span);
68
69    // FIXME(eddyb) replace uses of this with `append_sibling_block`.
70    fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
71
72    fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
73
74    fn switch_to_block(&mut self, llbb: Self::BasicBlock);
75
76    fn ret_void(&mut self);
77    fn ret(&mut self, v: Self::Value);
78    fn br(&mut self, dest: Self::BasicBlock);
79    fn cond_br(
80        &mut self,
81        cond: Self::Value,
82        then_llbb: Self::BasicBlock,
83        else_llbb: Self::BasicBlock,
84    );
85
86    // Conditional with expectation.
87    //
88    // This function is opt-in for back ends.
89    //
90    // The default implementation calls `self.expect()` before emitting the branch
91    // by calling `self.cond_br()`
92    fn cond_br_with_expect(
93        &mut self,
94        mut cond: Self::Value,
95        then_llbb: Self::BasicBlock,
96        else_llbb: Self::BasicBlock,
97        expect: Option<bool>,
98    ) {
99        if let Some(expect) = expect {
100            cond = self.expect(cond, expect);
101        }
102        self.cond_br(cond, then_llbb, else_llbb)
103    }
104
105    fn switch(
106        &mut self,
107        v: Self::Value,
108        else_llbb: Self::BasicBlock,
109        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
110    );
111
112    // This is like `switch()`, but every case has a bool flag indicating whether it's cold.
113    //
114    // Default implementation throws away the cold flags and calls `switch()`.
115    fn switch_with_weights(
116        &mut self,
117        v: Self::Value,
118        else_llbb: Self::BasicBlock,
119        _else_is_cold: bool,
120        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock, bool)>,
121    ) {
122        self.switch(v, else_llbb, cases.map(|(val, bb, _)| (val, bb)))
123    }
124
125    fn invoke(
126        &mut self,
127        llty: Self::Type,
128        fn_attrs: Option<&CodegenFnAttrs>,
129        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
130        llfn: Self::Value,
131        args: &[Self::Value],
132        then: Self::BasicBlock,
133        catch: Self::BasicBlock,
134        funclet: Option<&Self::Funclet>,
135        instance: Option<Instance<'tcx>>,
136    ) -> Self::Value;
137    fn unreachable(&mut self);
138
139    /// Like [`Self::unreachable`], but for use in the middle of a basic block.
140    fn unreachable_nonterminator(&mut self) {
141        // This is the preferred LLVM incantation for this per
142        // https://llvm.org/docs/Frontend/PerformanceTips.html#other-things-to-consider
143        // Other backends may override if they have a better way.
144        let const_true = self.cx().const_bool(true);
145        let poison_ptr = self.const_poison(self.cx().type_ptr());
146        self.store(const_true, poison_ptr, Align::ONE);
147    }
148
149    fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
150    fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
151    fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
152    fn fadd_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
153    fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
154    fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
155    fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
156    fn fsub_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
157    fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
158    fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
159    fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
160    fn fmul_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
161    fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
162    fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
163    fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
164    fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
165    fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
166    fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
167    fn fdiv_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
168    fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
169    fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
170    fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
171    fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
172    fn frem_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
173    /// Generate a left-shift. Both operands must have the same size. The right operand must be
174    /// interpreted as unsigned and can be assumed to be less than the size of the left operand.
175    fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
176    /// Generate a logical right-shift. Both operands must have the same size. The right operand
177    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
178    /// operand.
179    fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
180    /// Generate an arithmetic right-shift. Both operands must have the same size. The right operand
181    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
182    /// operand.
183    fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
184    fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
185        self.add(lhs, rhs)
186    }
187    fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
188        self.add(lhs, rhs)
189    }
190    fn unchecked_suadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
191        self.unchecked_sadd(lhs, rhs)
192    }
193    fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
194        self.sub(lhs, rhs)
195    }
196    fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
197        self.sub(lhs, rhs)
198    }
199    fn unchecked_susub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
200        self.unchecked_ssub(lhs, rhs)
201    }
202    fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
203        self.mul(lhs, rhs)
204    }
205    fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
206        self.mul(lhs, rhs)
207    }
208    fn unchecked_sumul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
209        // Which to default to is a fairly arbitrary choice,
210        // but this is what slice layout was using before.
211        self.unchecked_smul(lhs, rhs)
212    }
213    fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
214    fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
215    /// Defaults to [`Self::or`], but guarantees `(lhs & rhs) == 0` so some backends
216    /// can emit something more helpful for optimizations.
217    fn or_disjoint(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
218        self.or(lhs, rhs)
219    }
220    fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
221    fn neg(&mut self, v: Self::Value) -> Self::Value;
222    fn fneg(&mut self, v: Self::Value) -> Self::Value;
223    fn not(&mut self, v: Self::Value) -> Self::Value;
224
225    fn checked_binop(
226        &mut self,
227        oop: OverflowOp,
228        ty: Ty<'tcx>,
229        lhs: Self::Value,
230        rhs: Self::Value,
231    ) -> (Self::Value, Self::Value);
232
233    fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
234    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
235
236    fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
237
238    fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
239    fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
240    fn atomic_load(
241        &mut self,
242        ty: Self::Type,
243        ptr: Self::Value,
244        order: AtomicOrdering,
245        size: Size,
246    ) -> Self::Value;
247    fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
248        assert_eq!(place.llextra, None);
249        self.load(ty, place.llval, place.align)
250    }
251    fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
252    -> OperandRef<'tcx, Self::Value>;
253
254    /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
255    fn write_operand_repeatedly(
256        &mut self,
257        elem: OperandRef<'tcx, Self::Value>,
258        count: u64,
259        dest: PlaceRef<'tcx, Self::Value>,
260    );
261
262    /// Emits an `assume` that the integer value `imm` of type `ty` is contained in `range`.
263    ///
264    /// This *always* emits the assumption, so you probably want to check the
265    /// optimization level and `Scalar::is_always_valid` before calling it.
266    fn assume_integer_range(&mut self, imm: Self::Value, ty: Self::Type, range: WrappingRange) {
267        let WrappingRange { start, end } = range;
268
269        // Perhaps one day we'll be able to use assume operand bundles for this,
270        // but for now this encoding with a single icmp+assume is best per
271        // <https://github.com/llvm/llvm-project/issues/123278#issuecomment-2597440158>
272        let shifted = if start == 0 {
273            imm
274        } else {
275            let low = self.const_uint_big(ty, start);
276            self.sub(imm, low)
277        };
278        let width = self.const_uint_big(ty, u128::wrapping_sub(end, start));
279        let cmp = self.icmp(IntPredicate::IntULE, shifted, width);
280        self.assume(cmp);
281    }
282
283    /// Emits an `assume` that the `val` of pointer type is non-null.
284    ///
285    /// You may want to check the optimization level before bothering calling this.
286    fn assume_nonnull(&mut self, val: Self::Value) {
287        // Arguably in LLVM it'd be better to emit an assume operand bundle instead
288        // <https://llvm.org/docs/LangRef.html#assume-operand-bundles>
289        // but this works fine for all backends.
290
291        let null = self.const_null(self.type_ptr());
292        let is_null = self.icmp(IntPredicate::IntNE, val, null);
293        self.assume(is_null);
294    }
295
296    fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
297    fn nonnull_metadata(&mut self, load: Self::Value);
298
299    fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
300    fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value {
301        assert_eq!(place.llextra, None);
302        self.store(val, place.llval, place.align)
303    }
304    fn store_with_flags(
305        &mut self,
306        val: Self::Value,
307        ptr: Self::Value,
308        align: Align,
309        flags: MemFlags,
310    ) -> Self::Value;
311    fn store_to_place_with_flags(
312        &mut self,
313        val: Self::Value,
314        place: PlaceValue<Self::Value>,
315        flags: MemFlags,
316    ) -> Self::Value {
317        assert_eq!(place.llextra, None);
318        self.store_with_flags(val, place.llval, place.align, flags)
319    }
320    fn atomic_store(
321        &mut self,
322        val: Self::Value,
323        ptr: Self::Value,
324        order: AtomicOrdering,
325        size: Size,
326    );
327
328    fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
329    fn inbounds_gep(
330        &mut self,
331        ty: Self::Type,
332        ptr: Self::Value,
333        indices: &[Self::Value],
334    ) -> Self::Value;
335    fn inbounds_nuw_gep(
336        &mut self,
337        ty: Self::Type,
338        ptr: Self::Value,
339        indices: &[Self::Value],
340    ) -> Self::Value {
341        self.inbounds_gep(ty, ptr, indices)
342    }
343    fn ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
344        self.gep(self.cx().type_i8(), ptr, &[offset])
345    }
346    fn inbounds_ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
347        self.inbounds_gep(self.cx().type_i8(), ptr, &[offset])
348    }
349
350    fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
351    /// Produces the same value as [`Self::trunc`] (and defaults to that),
352    /// but is UB unless the *zero*-extending the result can reproduce `val`.
353    fn unchecked_utrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
354        self.trunc(val, dest_ty)
355    }
356    /// Produces the same value as [`Self::trunc`] (and defaults to that),
357    /// but is UB unless the *sign*-extending the result can reproduce `val`.
358    fn unchecked_strunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
359        self.trunc(val, dest_ty)
360    }
361
362    fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
363    fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
364    fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
365    fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
366    fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
367    fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
368    fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
369    fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
370    fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
371    fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
372    fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
373    fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
374    fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
375    fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
376
377    fn cast_float_to_int(
378        &mut self,
379        signed: bool,
380        x: Self::Value,
381        dest_ty: Self::Type,
382    ) -> Self::Value {
383        let in_ty = self.cx().val_ty(x);
384        let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
385            && self.cx().type_kind(in_ty) == TypeKind::Vector
386        {
387            (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
388        } else {
389            (in_ty, dest_ty)
390        };
391        assert_matches!(
392            self.cx().type_kind(float_ty),
393            TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::FP128
394        );
395        assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
396
397        if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts {
398            return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
399        }
400
401        if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) }
402    }
403
404    fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
405    fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
406
407    /// Returns `-1` if `lhs < rhs`, `0` if `lhs == rhs`, and `1` if `lhs > rhs`.
408    // FIXME: Move the default implementation from `codegen_scalar_binop` into this method and
409    // remove the `Option` return once LLVM 20 is the minimum version.
410    fn three_way_compare(
411        &mut self,
412        _ty: Ty<'tcx>,
413        _lhs: Self::Value,
414        _rhs: Self::Value,
415    ) -> Option<Self::Value> {
416        None
417    }
418
419    fn memcpy(
420        &mut self,
421        dst: Self::Value,
422        dst_align: Align,
423        src: Self::Value,
424        src_align: Align,
425        size: Self::Value,
426        flags: MemFlags,
427    );
428    fn memmove(
429        &mut self,
430        dst: Self::Value,
431        dst_align: Align,
432        src: Self::Value,
433        src_align: Align,
434        size: Self::Value,
435        flags: MemFlags,
436    );
437    fn memset(
438        &mut self,
439        ptr: Self::Value,
440        fill_byte: Self::Value,
441        size: Self::Value,
442        align: Align,
443        flags: MemFlags,
444    );
445
446    /// *Typed* copy for non-overlapping places.
447    ///
448    /// Has a default implementation in terms of `memcpy`, but specific backends
449    /// can override to do something smarter if possible.
450    ///
451    /// (For example, typed load-stores with alias metadata.)
452    fn typed_place_copy(
453        &mut self,
454        dst: PlaceValue<Self::Value>,
455        src: PlaceValue<Self::Value>,
456        layout: TyAndLayout<'tcx>,
457    ) {
458        self.typed_place_copy_with_flags(dst, src, layout, MemFlags::empty());
459    }
460
461    fn typed_place_copy_with_flags(
462        &mut self,
463        dst: PlaceValue<Self::Value>,
464        src: PlaceValue<Self::Value>,
465        layout: TyAndLayout<'tcx>,
466        flags: MemFlags,
467    ) {
468        assert!(layout.is_sized(), "cannot typed-copy an unsigned type");
469        assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
470        assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
471        if flags.contains(MemFlags::NONTEMPORAL) {
472            // HACK(nox): This is inefficient but there is no nontemporal memcpy.
473            let ty = self.backend_type(layout);
474            let val = self.load_from_place(ty, src);
475            self.store_to_place_with_flags(val, dst, flags);
476        } else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(layout) {
477            // If we're not optimizing, the aliasing information from `memcpy`
478            // isn't useful, so just load-store the value for smaller code.
479            let temp = self.load_operand(src.with_type(layout));
480            temp.val.store_with_flags(self, dst.with_type(layout), flags);
481        } else if !layout.is_zst() {
482            let bytes = self.const_usize(layout.size.bytes());
483            self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags);
484        }
485    }
486
487    /// *Typed* swap for non-overlapping places.
488    ///
489    /// Avoids `alloca`s for Immediates and ScalarPairs.
490    ///
491    /// FIXME: Maybe do something smarter for Ref types too?
492    /// For now, the `typed_swap_nonoverlapping` intrinsic just doesn't call this for those
493    /// cases (in non-debug), preferring the fallback body instead.
494    fn typed_place_swap(
495        &mut self,
496        left: PlaceValue<Self::Value>,
497        right: PlaceValue<Self::Value>,
498        layout: TyAndLayout<'tcx>,
499    ) {
500        let mut temp = self.load_operand(left.with_type(layout));
501        if let OperandValue::Ref(..) = temp.val {
502            // The SSA value isn't stand-alone, so we need to copy it elsewhere
503            let alloca = PlaceRef::alloca(self, layout);
504            self.typed_place_copy(alloca.val, left, layout);
505            temp = self.load_operand(alloca);
506        }
507        self.typed_place_copy(left, right, layout);
508        temp.val.store(self, right.with_type(layout));
509    }
510
511    fn select(
512        &mut self,
513        cond: Self::Value,
514        then_val: Self::Value,
515        else_val: Self::Value,
516    ) -> Self::Value;
517
518    fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
519    fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
520    fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
521    fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
522    fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
523
524    fn set_personality_fn(&mut self, personality: Self::Function);
525
526    // These are used by everyone except msvc
527    fn cleanup_landing_pad(&mut self, pers_fn: Self::Function) -> (Self::Value, Self::Value);
528    fn filter_landing_pad(&mut self, pers_fn: Self::Function);
529    fn resume(&mut self, exn0: Self::Value, exn1: Self::Value);
530
531    // These are used only by msvc
532    fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
533    fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
534    fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
535    fn catch_switch(
536        &mut self,
537        parent: Option<Self::Value>,
538        unwind: Option<Self::BasicBlock>,
539        handlers: &[Self::BasicBlock],
540    ) -> Self::Value;
541
542    fn atomic_cmpxchg(
543        &mut self,
544        dst: Self::Value,
545        cmp: Self::Value,
546        src: Self::Value,
547        order: AtomicOrdering,
548        failure_order: AtomicOrdering,
549        weak: bool,
550    ) -> (Self::Value, Self::Value);
551    fn atomic_rmw(
552        &mut self,
553        op: AtomicRmwBinOp,
554        dst: Self::Value,
555        src: Self::Value,
556        order: AtomicOrdering,
557    ) -> Self::Value;
558    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
559    fn set_invariant_load(&mut self, load: Self::Value);
560
561    /// Called for `StorageLive`
562    fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
563
564    /// Called for `StorageDead`
565    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
566
567    /// "Finally codegen the call"
568    ///
569    /// ## Arguments
570    ///
571    /// The `fn_attrs`, `fn_abi`, and `instance` arguments are Options because they are advisory.
572    /// They relate to optional codegen enhancements like LLVM CFI, and do not affect ABI per se.
573    /// Any ABI-related transformations should be handled by different, earlier stages of codegen.
574    /// For instance, in the caller of `BuilderMethods::call`.
575    ///
576    /// This means that a codegen backend which disregards `fn_attrs`, `fn_abi`, and `instance`
577    /// should still do correct codegen, and code should not be miscompiled if they are omitted.
578    /// It is not a miscompilation in this sense if it fails to run under CFI, other sanitizers, or
579    /// in the context of other compiler-enhanced security features.
580    ///
581    /// The typical case that they are None is during the codegen of intrinsics and lang-items,
582    /// as those are "fake functions" with only a trivial ABI if any, et cetera.
583    ///
584    /// ## Return
585    ///
586    /// Must return the value the function will return so it can be written to the destination,
587    /// assuming the function does not explicitly pass the destination as a pointer in `args`.
588    fn call(
589        &mut self,
590        llty: Self::Type,
591        fn_attrs: Option<&CodegenFnAttrs>,
592        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
593        fn_val: Self::Value,
594        args: &[Self::Value],
595        funclet: Option<&Self::Funclet>,
596        instance: Option<Instance<'tcx>>,
597    ) -> Self::Value;
598
599    fn tail_call(
600        &mut self,
601        llty: Self::Type,
602        fn_attrs: Option<&CodegenFnAttrs>,
603        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
604        llfn: Self::Value,
605        args: &[Self::Value],
606        funclet: Option<&Self::Funclet>,
607        instance: Option<Instance<'tcx>>,
608    );
609
610    fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
611
612    fn apply_attrs_to_cleanup_callsite(&mut self, llret: Self::Value);
613}