rustc_codegen_ssa/traits/
builder.rs

1use std::assert_matches::assert_matches;
2use std::ops::Deref;
3
4use rustc_abi::{Align, Scalar, Size, WrappingRange};
5use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
6use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
7use rustc_middle::ty::{AtomicOrdering, Instance, Ty};
8use rustc_session::config::OptLevel;
9use rustc_span::Span;
10use rustc_target::callconv::FnAbi;
11
12use super::abi::AbiBuilderMethods;
13use super::asm::AsmBuilderMethods;
14use super::consts::ConstCodegenMethods;
15use super::coverageinfo::CoverageInfoBuilderMethods;
16use super::debuginfo::DebugInfoBuilderMethods;
17use super::intrinsic::IntrinsicCallBuilderMethods;
18use super::misc::MiscCodegenMethods;
19use super::type_::{ArgAbiBuilderMethods, BaseTypeCodegenMethods, LayoutTypeCodegenMethods};
20use super::{CodegenMethods, StaticBuilderMethods};
21use crate::MemFlags;
22use crate::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
23use crate::mir::operand::{OperandRef, OperandValue};
24use crate::mir::place::{PlaceRef, PlaceValue};
25
26#[derive(Copy, Clone, Debug)]
27pub enum OverflowOp {
28    Add,
29    Sub,
30    Mul,
31}
32
33pub trait BuilderMethods<'a, 'tcx>:
34    Sized
35    + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
36    + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
37    + Deref<Target = Self::CodegenCx>
38    + CoverageInfoBuilderMethods<'tcx>
39    + DebugInfoBuilderMethods
40    + ArgAbiBuilderMethods<'tcx>
41    + AbiBuilderMethods
42    + IntrinsicCallBuilderMethods<'tcx>
43    + AsmBuilderMethods<'tcx>
44    + StaticBuilderMethods
45{
46    // `BackendTypes` is a supertrait of both `CodegenMethods` and
47    // `BuilderMethods`. This bound ensures all impls agree on the associated
48    // types within.
49    type CodegenCx: CodegenMethods<
50            'tcx,
51            Value = Self::Value,
52            Metadata = Self::Metadata,
53            Function = Self::Function,
54            BasicBlock = Self::BasicBlock,
55            Type = Self::Type,
56            Funclet = Self::Funclet,
57            DIScope = Self::DIScope,
58            DILocation = Self::DILocation,
59            DIVariable = Self::DIVariable,
60        >;
61
62    fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
63
64    fn cx(&self) -> &Self::CodegenCx;
65    fn llbb(&self) -> Self::BasicBlock;
66
67    fn set_span(&mut self, span: Span);
68
69    // FIXME(eddyb) replace uses of this with `append_sibling_block`.
70    fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
71
72    fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
73
74    fn switch_to_block(&mut self, llbb: Self::BasicBlock);
75
76    fn ret_void(&mut self);
77    fn ret(&mut self, v: Self::Value);
78    fn br(&mut self, dest: Self::BasicBlock);
79    fn cond_br(
80        &mut self,
81        cond: Self::Value,
82        then_llbb: Self::BasicBlock,
83        else_llbb: Self::BasicBlock,
84    );
85
86    // Conditional with expectation.
87    //
88    // This function is opt-in for back ends.
89    //
90    // The default implementation calls `self.expect()` before emiting the branch
91    // by calling `self.cond_br()`
92    fn cond_br_with_expect(
93        &mut self,
94        mut cond: Self::Value,
95        then_llbb: Self::BasicBlock,
96        else_llbb: Self::BasicBlock,
97        expect: Option<bool>,
98    ) {
99        if let Some(expect) = expect {
100            cond = self.expect(cond, expect);
101        }
102        self.cond_br(cond, then_llbb, else_llbb)
103    }
104
105    fn switch(
106        &mut self,
107        v: Self::Value,
108        else_llbb: Self::BasicBlock,
109        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
110    );
111
112    // This is like `switch()`, but every case has a bool flag indicating whether it's cold.
113    //
114    // Default implementation throws away the cold flags and calls `switch()`.
115    fn switch_with_weights(
116        &mut self,
117        v: Self::Value,
118        else_llbb: Self::BasicBlock,
119        _else_is_cold: bool,
120        cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock, bool)>,
121    ) {
122        self.switch(v, else_llbb, cases.map(|(val, bb, _)| (val, bb)))
123    }
124
125    fn invoke(
126        &mut self,
127        llty: Self::Type,
128        fn_attrs: Option<&CodegenFnAttrs>,
129        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
130        llfn: Self::Value,
131        args: &[Self::Value],
132        then: Self::BasicBlock,
133        catch: Self::BasicBlock,
134        funclet: Option<&Self::Funclet>,
135        instance: Option<Instance<'tcx>>,
136    ) -> Self::Value;
137    fn unreachable(&mut self);
138
139    fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
140    fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
141    fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
142    fn fadd_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
143    fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
144    fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
145    fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
146    fn fsub_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
147    fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
148    fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
149    fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
150    fn fmul_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
151    fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
152    fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
153    fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
154    fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
155    fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
156    fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
157    fn fdiv_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
158    fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
159    fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
160    fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
161    fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
162    fn frem_algebraic(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
163    /// Generate a left-shift. Both operands must have the same size. The right operand must be
164    /// interpreted as unsigned and can be assumed to be less than the size of the left operand.
165    fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
166    /// Generate a logical right-shift. Both operands must have the same size. The right operand
167    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
168    /// operand.
169    fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
170    /// Generate an arithmetic right-shift. Both operands must have the same size. The right operand
171    /// must be interpreted as unsigned and can be assumed to be less than the size of the left
172    /// operand.
173    fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
174    fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
175        self.add(lhs, rhs)
176    }
177    fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
178        self.add(lhs, rhs)
179    }
180    fn unchecked_suadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
181        self.unchecked_sadd(lhs, rhs)
182    }
183    fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
184        self.sub(lhs, rhs)
185    }
186    fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
187        self.sub(lhs, rhs)
188    }
189    fn unchecked_susub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
190        self.unchecked_ssub(lhs, rhs)
191    }
192    fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
193        self.mul(lhs, rhs)
194    }
195    fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
196        self.mul(lhs, rhs)
197    }
198    fn unchecked_sumul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
199        // Which to default to is a fairly arbitrary choice,
200        // but this is what slice layout was using before.
201        self.unchecked_smul(lhs, rhs)
202    }
203    fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
204    fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
205    /// Defaults to [`Self::or`], but guarantees `(lhs & rhs) == 0` so some backends
206    /// can emit something more helpful for optimizations.
207    fn or_disjoint(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value {
208        self.or(lhs, rhs)
209    }
210    fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
211    fn neg(&mut self, v: Self::Value) -> Self::Value;
212    fn fneg(&mut self, v: Self::Value) -> Self::Value;
213    fn not(&mut self, v: Self::Value) -> Self::Value;
214
215    fn checked_binop(
216        &mut self,
217        oop: OverflowOp,
218        ty: Ty<'_>,
219        lhs: Self::Value,
220        rhs: Self::Value,
221    ) -> (Self::Value, Self::Value);
222
223    fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
224    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
225
226    fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
227    fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value;
228
229    fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
230    fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
231    fn atomic_load(
232        &mut self,
233        ty: Self::Type,
234        ptr: Self::Value,
235        order: AtomicOrdering,
236        size: Size,
237    ) -> Self::Value;
238    fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
239        assert_eq!(place.llextra, None);
240        self.load(ty, place.llval, place.align)
241    }
242    fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
243    -> OperandRef<'tcx, Self::Value>;
244
245    /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
246    fn write_operand_repeatedly(
247        &mut self,
248        elem: OperandRef<'tcx, Self::Value>,
249        count: u64,
250        dest: PlaceRef<'tcx, Self::Value>,
251    );
252
253    /// Emits an `assume` that the integer value `imm` of type `ty` is contained in `range`.
254    ///
255    /// This *always* emits the assumption, so you probably want to check the
256    /// optimization level and `Scalar::is_always_valid` before calling it.
257    fn assume_integer_range(&mut self, imm: Self::Value, ty: Self::Type, range: WrappingRange) {
258        let WrappingRange { start, end } = range;
259
260        // Perhaps one day we'll be able to use assume operand bundles for this,
261        // but for now this encoding with a single icmp+assume is best per
262        // <https://github.com/llvm/llvm-project/issues/123278#issuecomment-2597440158>
263        let shifted = if start == 0 {
264            imm
265        } else {
266            let low = self.const_uint_big(ty, start);
267            self.sub(imm, low)
268        };
269        let width = self.const_uint_big(ty, u128::wrapping_sub(end, start));
270        let cmp = self.icmp(IntPredicate::IntULE, shifted, width);
271        self.assume(cmp);
272    }
273
274    /// Emits an `assume` that the `val` of pointer type is non-null.
275    ///
276    /// You may want to check the optimization level before bothering calling this.
277    fn assume_nonnull(&mut self, val: Self::Value) {
278        // Arguably in LLVM it'd be better to emit an assume operand bundle instead
279        // <https://llvm.org/docs/LangRef.html#assume-operand-bundles>
280        // but this works fine for all backends.
281
282        let null = self.const_null(self.type_ptr());
283        let is_null = self.icmp(IntPredicate::IntNE, val, null);
284        self.assume(is_null);
285    }
286
287    fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
288    fn nonnull_metadata(&mut self, load: Self::Value);
289
290    fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
291    fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value {
292        assert_eq!(place.llextra, None);
293        self.store(val, place.llval, place.align)
294    }
295    fn store_with_flags(
296        &mut self,
297        val: Self::Value,
298        ptr: Self::Value,
299        align: Align,
300        flags: MemFlags,
301    ) -> Self::Value;
302    fn store_to_place_with_flags(
303        &mut self,
304        val: Self::Value,
305        place: PlaceValue<Self::Value>,
306        flags: MemFlags,
307    ) -> Self::Value {
308        assert_eq!(place.llextra, None);
309        self.store_with_flags(val, place.llval, place.align, flags)
310    }
311    fn atomic_store(
312        &mut self,
313        val: Self::Value,
314        ptr: Self::Value,
315        order: AtomicOrdering,
316        size: Size,
317    );
318
319    fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
320    fn inbounds_gep(
321        &mut self,
322        ty: Self::Type,
323        ptr: Self::Value,
324        indices: &[Self::Value],
325    ) -> Self::Value;
326    fn inbounds_nuw_gep(
327        &mut self,
328        ty: Self::Type,
329        ptr: Self::Value,
330        indices: &[Self::Value],
331    ) -> Self::Value {
332        self.inbounds_gep(ty, ptr, indices)
333    }
334    fn ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
335        self.gep(self.cx().type_i8(), ptr, &[offset])
336    }
337    fn inbounds_ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
338        self.inbounds_gep(self.cx().type_i8(), ptr, &[offset])
339    }
340
341    fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
342    /// Produces the same value as [`Self::trunc`] (and defaults to that),
343    /// but is UB unless the *zero*-extending the result can reproduce `val`.
344    fn unchecked_utrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
345        self.trunc(val, dest_ty)
346    }
347    /// Produces the same value as [`Self::trunc`] (and defaults to that),
348    /// but is UB unless the *sign*-extending the result can reproduce `val`.
349    fn unchecked_strunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value {
350        self.trunc(val, dest_ty)
351    }
352
353    fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
354    fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
355    fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
356    fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
357    fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
358    fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
359    fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
360    fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
361    fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
362    fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
363    fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
364    fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
365    fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
366    fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
367
368    fn cast_float_to_int(
369        &mut self,
370        signed: bool,
371        x: Self::Value,
372        dest_ty: Self::Type,
373    ) -> Self::Value {
374        let in_ty = self.cx().val_ty(x);
375        let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
376            && self.cx().type_kind(in_ty) == TypeKind::Vector
377        {
378            (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
379        } else {
380            (in_ty, dest_ty)
381        };
382        assert_matches!(
383            self.cx().type_kind(float_ty),
384            TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::FP128
385        );
386        assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
387
388        if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts {
389            return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
390        }
391
392        if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) }
393    }
394
395    fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
396    fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
397
398    /// Returns `-1` if `lhs < rhs`, `0` if `lhs == rhs`, and `1` if `lhs > rhs`.
399    // FIXME: Move the default implementation from `codegen_scalar_binop` into this method and
400    // remove the `Option` return once LLVM 20 is the minimum version.
401    fn three_way_compare(
402        &mut self,
403        _ty: Ty<'tcx>,
404        _lhs: Self::Value,
405        _rhs: Self::Value,
406    ) -> Option<Self::Value> {
407        None
408    }
409
410    fn memcpy(
411        &mut self,
412        dst: Self::Value,
413        dst_align: Align,
414        src: Self::Value,
415        src_align: Align,
416        size: Self::Value,
417        flags: MemFlags,
418    );
419    fn memmove(
420        &mut self,
421        dst: Self::Value,
422        dst_align: Align,
423        src: Self::Value,
424        src_align: Align,
425        size: Self::Value,
426        flags: MemFlags,
427    );
428    fn memset(
429        &mut self,
430        ptr: Self::Value,
431        fill_byte: Self::Value,
432        size: Self::Value,
433        align: Align,
434        flags: MemFlags,
435    );
436
437    /// *Typed* copy for non-overlapping places.
438    ///
439    /// Has a default implementation in terms of `memcpy`, but specific backends
440    /// can override to do something smarter if possible.
441    ///
442    /// (For example, typed load-stores with alias metadata.)
443    fn typed_place_copy(
444        &mut self,
445        dst: PlaceValue<Self::Value>,
446        src: PlaceValue<Self::Value>,
447        layout: TyAndLayout<'tcx>,
448    ) {
449        self.typed_place_copy_with_flags(dst, src, layout, MemFlags::empty());
450    }
451
452    fn typed_place_copy_with_flags(
453        &mut self,
454        dst: PlaceValue<Self::Value>,
455        src: PlaceValue<Self::Value>,
456        layout: TyAndLayout<'tcx>,
457        flags: MemFlags,
458    ) {
459        assert!(layout.is_sized(), "cannot typed-copy an unsigned type");
460        assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
461        assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
462        if flags.contains(MemFlags::NONTEMPORAL) {
463            // HACK(nox): This is inefficient but there is no nontemporal memcpy.
464            let ty = self.backend_type(layout);
465            let val = self.load_from_place(ty, src);
466            self.store_to_place_with_flags(val, dst, flags);
467        } else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(layout) {
468            // If we're not optimizing, the aliasing information from `memcpy`
469            // isn't useful, so just load-store the value for smaller code.
470            let temp = self.load_operand(src.with_type(layout));
471            temp.val.store_with_flags(self, dst.with_type(layout), flags);
472        } else if !layout.is_zst() {
473            let bytes = self.const_usize(layout.size.bytes());
474            self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags);
475        }
476    }
477
478    /// *Typed* swap for non-overlapping places.
479    ///
480    /// Avoids `alloca`s for Immediates and ScalarPairs.
481    ///
482    /// FIXME: Maybe do something smarter for Ref types too?
483    /// For now, the `typed_swap_nonoverlapping` intrinsic just doesn't call this for those
484    /// cases (in non-debug), preferring the fallback body instead.
485    fn typed_place_swap(
486        &mut self,
487        left: PlaceValue<Self::Value>,
488        right: PlaceValue<Self::Value>,
489        layout: TyAndLayout<'tcx>,
490    ) {
491        let mut temp = self.load_operand(left.with_type(layout));
492        if let OperandValue::Ref(..) = temp.val {
493            // The SSA value isn't stand-alone, so we need to copy it elsewhere
494            let alloca = PlaceRef::alloca(self, layout);
495            self.typed_place_copy(alloca.val, left, layout);
496            temp = self.load_operand(alloca);
497        }
498        self.typed_place_copy(left, right, layout);
499        temp.val.store(self, right.with_type(layout));
500    }
501
502    fn select(
503        &mut self,
504        cond: Self::Value,
505        then_val: Self::Value,
506        else_val: Self::Value,
507    ) -> Self::Value;
508
509    fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
510    fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
511    fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
512    fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
513    fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
514
515    fn set_personality_fn(&mut self, personality: Self::Function);
516
517    // These are used by everyone except msvc
518    fn cleanup_landing_pad(&mut self, pers_fn: Self::Function) -> (Self::Value, Self::Value);
519    fn filter_landing_pad(&mut self, pers_fn: Self::Function) -> (Self::Value, Self::Value);
520    fn resume(&mut self, exn0: Self::Value, exn1: Self::Value);
521
522    // These are used only by msvc
523    fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
524    fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
525    fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
526    fn catch_switch(
527        &mut self,
528        parent: Option<Self::Value>,
529        unwind: Option<Self::BasicBlock>,
530        handlers: &[Self::BasicBlock],
531    ) -> Self::Value;
532
533    fn atomic_cmpxchg(
534        &mut self,
535        dst: Self::Value,
536        cmp: Self::Value,
537        src: Self::Value,
538        order: AtomicOrdering,
539        failure_order: AtomicOrdering,
540        weak: bool,
541    ) -> (Self::Value, Self::Value);
542    fn atomic_rmw(
543        &mut self,
544        op: AtomicRmwBinOp,
545        dst: Self::Value,
546        src: Self::Value,
547        order: AtomicOrdering,
548    ) -> Self::Value;
549    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
550    fn set_invariant_load(&mut self, load: Self::Value);
551
552    /// Called for `StorageLive`
553    fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
554
555    /// Called for `StorageDead`
556    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
557
558    fn call(
559        &mut self,
560        llty: Self::Type,
561        fn_attrs: Option<&CodegenFnAttrs>,
562        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
563        llfn: Self::Value,
564        args: &[Self::Value],
565        funclet: Option<&Self::Funclet>,
566        instance: Option<Instance<'tcx>>,
567    ) -> Self::Value;
568    fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
569
570    fn apply_attrs_to_cleanup_callsite(&mut self, llret: Self::Value);
571}