rustc_codegen_ssa/mir/
rvalue.rs

1use rustc_abi::{self as abi, FIRST_VARIANT};
2use rustc_middle::ty::adjustment::PointerCoercion;
3use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
4use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
5use rustc_middle::{bug, mir, span_bug};
6use rustc_session::config::OptLevel;
7use tracing::{debug, instrument};
8
9use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
10use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
11use super::{FunctionCx, LocalRef};
12use crate::common::{IntPredicate, TypeKind};
13use crate::traits::*;
14use crate::{MemFlags, base};
15
16impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
17    #[instrument(level = "trace", skip(self, bx))]
18    pub(crate) fn codegen_rvalue(
19        &mut self,
20        bx: &mut Bx,
21        dest: PlaceRef<'tcx, Bx::Value>,
22        rvalue: &mir::Rvalue<'tcx>,
23    ) {
24        match *rvalue {
25            mir::Rvalue::Use(ref operand) => {
26                let cg_operand = self.codegen_operand(bx, operand);
27                // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
28                // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
29                cg_operand.val.store(bx, dest);
30            }
31
32            mir::Rvalue::Cast(
33                mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
34                ref source,
35                _,
36            ) => {
37                // The destination necessarily contains a wide pointer, so if
38                // it's a scalar pair, it's a wide pointer or newtype thereof.
39                if bx.cx().is_backend_scalar_pair(dest.layout) {
40                    // Into-coerce of a thin pointer to a wide pointer -- just
41                    // use the operand path.
42                    let temp = self.codegen_rvalue_operand(bx, rvalue);
43                    temp.val.store(bx, dest);
44                    return;
45                }
46
47                // Unsize of a nontrivial struct. I would prefer for
48                // this to be eliminated by MIR building, but
49                // `CoerceUnsized` can be passed by a where-clause,
50                // so the (generic) MIR may not be able to expand it.
51                let operand = self.codegen_operand(bx, source);
52                match operand.val {
53                    OperandValue::Pair(..) | OperandValue::Immediate(_) => {
54                        // Unsize from an immediate structure. We don't
55                        // really need a temporary alloca here, but
56                        // avoiding it would require us to have
57                        // `coerce_unsized_into` use `extractvalue` to
58                        // index into the struct, and this case isn't
59                        // important enough for it.
60                        debug!("codegen_rvalue: creating ugly alloca");
61                        let scratch = PlaceRef::alloca(bx, operand.layout);
62                        scratch.storage_live(bx);
63                        operand.val.store(bx, scratch);
64                        base::coerce_unsized_into(bx, scratch, dest);
65                        scratch.storage_dead(bx);
66                    }
67                    OperandValue::Ref(val) => {
68                        if val.llextra.is_some() {
69                            bug!("unsized coercion on an unsized rvalue");
70                        }
71                        base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
72                    }
73                    OperandValue::ZeroSized => {
74                        bug!("unsized coercion on a ZST rvalue");
75                    }
76                }
77            }
78
79            mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
80                let src = self.codegen_operand(bx, operand);
81                self.codegen_transmute(bx, src, dest);
82            }
83
84            mir::Rvalue::Repeat(ref elem, count) => {
85                // Do not generate the loop for zero-sized elements or empty arrays.
86                if dest.layout.is_zst() {
87                    return;
88                }
89
90                // When the element is a const with all bytes uninit, emit a single memset that
91                // writes undef to the entire destination.
92                if let mir::Operand::Constant(const_op) = elem {
93                    let val = self.eval_mir_constant(const_op);
94                    if val.all_bytes_uninit(self.cx.tcx()) {
95                        let size = bx.const_usize(dest.layout.size.bytes());
96                        bx.memset(
97                            dest.val.llval,
98                            bx.const_undef(bx.type_i8()),
99                            size,
100                            dest.val.align,
101                            MemFlags::empty(),
102                        );
103                        return;
104                    }
105                }
106
107                let cg_elem = self.codegen_operand(bx, elem);
108
109                let try_init_all_same = |bx: &mut Bx, v| {
110                    let start = dest.val.llval;
111                    let size = bx.const_usize(dest.layout.size.bytes());
112
113                    // Use llvm.memset.p0i8.* to initialize all same byte arrays
114                    if let Some(int) = bx.cx().const_to_opt_u128(v, false) {
115                        let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()];
116                        let first = bytes[0];
117                        if bytes[1..].iter().all(|&b| b == first) {
118                            let fill = bx.cx().const_u8(first);
119                            bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
120                            return true;
121                        }
122                    }
123
124                    // Use llvm.memset.p0i8.* to initialize byte arrays
125                    let v = bx.from_immediate(v);
126                    if bx.cx().val_ty(v) == bx.cx().type_i8() {
127                        bx.memset(start, v, size, dest.val.align, MemFlags::empty());
128                        return true;
129                    }
130                    false
131                };
132
133                match cg_elem.val {
134                    OperandValue::Immediate(v) => {
135                        if try_init_all_same(bx, v) {
136                            return;
137                        }
138                    }
139                    _ => (),
140                }
141
142                let count = self
143                    .monomorphize(count)
144                    .try_to_target_usize(bx.tcx())
145                    .expect("expected monomorphic const in codegen");
146
147                bx.write_operand_repeatedly(cg_elem, count, dest);
148            }
149
150            // This implementation does field projection, so never use it for `RawPtr`,
151            // which will always be fine with the `codegen_rvalue_operand` path below.
152            mir::Rvalue::Aggregate(ref kind, ref operands)
153                if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
154            {
155                let (variant_index, variant_dest, active_field_index) = match **kind {
156                    mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
157                        let variant_dest = dest.project_downcast(bx, variant_index);
158                        (variant_index, variant_dest, active_field_index)
159                    }
160                    _ => (FIRST_VARIANT, dest, None),
161                };
162                if active_field_index.is_some() {
163                    assert_eq!(operands.len(), 1);
164                }
165                for (i, operand) in operands.iter_enumerated() {
166                    let op = self.codegen_operand(bx, operand);
167                    // Do not generate stores and GEPis for zero-sized fields.
168                    if !op.layout.is_zst() {
169                        let field_index = active_field_index.unwrap_or(i);
170                        let field = if let mir::AggregateKind::Array(_) = **kind {
171                            let llindex = bx.cx().const_usize(field_index.as_u32().into());
172                            variant_dest.project_index(bx, llindex)
173                        } else {
174                            variant_dest.project_field(bx, field_index.as_usize())
175                        };
176                        op.val.store(bx, field);
177                    }
178                }
179                dest.codegen_set_discr(bx, variant_index);
180            }
181
182            _ => {
183                let temp = self.codegen_rvalue_operand(bx, rvalue);
184                temp.val.store(bx, dest);
185            }
186        }
187    }
188
189    /// Transmutes the `src` value to the destination type by writing it to `dst`.
190    ///
191    /// See also [`Self::codegen_transmute_operand`] for cases that can be done
192    /// without needing a pre-allocated place for the destination.
193    fn codegen_transmute(
194        &mut self,
195        bx: &mut Bx,
196        src: OperandRef<'tcx, Bx::Value>,
197        dst: PlaceRef<'tcx, Bx::Value>,
198    ) {
199        // The MIR validator enforces no unsized transmutes.
200        assert!(src.layout.is_sized());
201        assert!(dst.layout.is_sized());
202
203        if src.layout.size != dst.layout.size
204            || src.layout.is_uninhabited()
205            || dst.layout.is_uninhabited()
206        {
207            // These cases are all UB to actually hit, so don't emit code for them.
208            // (The size mismatches are reachable via `transmute_unchecked`.)
209            bx.unreachable_nonterminator();
210        } else {
211            // Since in this path we have a place anyway, we can store or copy to it,
212            // making sure we use the destination place's alignment even if the
213            // source would normally have a higher one.
214            src.val.store(bx, dst.val.with_type(src.layout));
215        }
216    }
217
218    /// Transmutes an `OperandValue` to another `OperandValue`.
219    ///
220    /// This is supported for all cases where the `cast` type is SSA,
221    /// but for non-ZSTs with [`abi::BackendRepr::Memory`] it ICEs.
222    pub(crate) fn codegen_transmute_operand(
223        &mut self,
224        bx: &mut Bx,
225        operand: OperandRef<'tcx, Bx::Value>,
226        cast: TyAndLayout<'tcx>,
227    ) -> OperandValue<Bx::Value> {
228        if let abi::BackendRepr::Memory { .. } = cast.backend_repr
229            && !cast.is_zst()
230        {
231            span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
232        }
233
234        // `Layout` is interned, so we can do a cheap check for things that are
235        // exactly the same and thus don't need any handling.
236        if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
237            return operand.val;
238        }
239
240        // Check for transmutes that are always UB.
241        if operand.layout.size != cast.size
242            || operand.layout.is_uninhabited()
243            || cast.is_uninhabited()
244        {
245            bx.unreachable_nonterminator();
246
247            // We still need to return a value of the appropriate type, but
248            // it's already UB so do the easiest thing available.
249            return OperandValue::poison(bx, cast);
250        }
251
252        // To or from pointers takes different methods, so we use this to restrict
253        // the SimdVector case to types which can be `bitcast` between each other.
254        #[inline]
255        fn vector_can_bitcast(x: abi::Scalar) -> bool {
256            matches!(
257                x,
258                abi::Scalar::Initialized {
259                    value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
260                    ..
261                }
262            )
263        }
264
265        let cx = bx.cx();
266        match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
267            _ if cast.is_zst() => OperandValue::ZeroSized,
268            (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
269                assert_eq!(source_place_val.llextra, None);
270                // The existing alignment is part of `source_place_val`,
271                // so that alignment will be used, not `cast`'s.
272                bx.load_operand(source_place_val.with_type(cast)).val
273            }
274            (
275                OperandValue::Immediate(imm),
276                abi::BackendRepr::Scalar(from_scalar),
277                abi::BackendRepr::Scalar(to_scalar),
278            ) if from_scalar.size(cx) == to_scalar.size(cx) => {
279                OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
280            }
281            (
282                OperandValue::Immediate(imm),
283                abi::BackendRepr::SimdVector { element: from_scalar, .. },
284                abi::BackendRepr::SimdVector { element: to_scalar, .. },
285            ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
286                let to_backend_ty = bx.cx().immediate_backend_type(cast);
287                OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
288            }
289            (
290                OperandValue::Pair(imm_a, imm_b),
291                abi::BackendRepr::ScalarPair(in_a, in_b),
292                abi::BackendRepr::ScalarPair(out_a, out_b),
293            ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
294                OperandValue::Pair(
295                    transmute_scalar(bx, imm_a, in_a, out_a),
296                    transmute_scalar(bx, imm_b, in_b, out_b),
297                )
298            }
299            _ => {
300                // For any other potentially-tricky cases, make a temporary instead.
301                // If anything else wants the target local to be in memory this won't
302                // be hit, as `codegen_transmute` will get called directly. Thus this
303                // is only for places where everything else wants the operand form,
304                // and thus it's not worth making those places get it from memory.
305                //
306                // Notably, Scalar ⇌ ScalarPair cases go here to avoid padding
307                // and endianness issues, as do SimdVector ones to avoid worrying
308                // about things like f32x8 ⇌ ptrx4 that would need multiple steps.
309                let align = Ord::max(operand.layout.align.abi, cast.align.abi);
310                let size = Ord::max(operand.layout.size, cast.size);
311                let temp = PlaceValue::alloca(bx, size, align);
312                bx.lifetime_start(temp.llval, size);
313                operand.val.store(bx, temp.with_type(operand.layout));
314                let val = bx.load_operand(temp.with_type(cast)).val;
315                bx.lifetime_end(temp.llval, size);
316                val
317            }
318        }
319    }
320
321    /// Cast one of the immediates from an [`OperandValue::Immediate`]
322    /// or an [`OperandValue::Pair`] to an immediate of the target type.
323    ///
324    /// Returns `None` if the cast is not possible.
325    fn cast_immediate(
326        &self,
327        bx: &mut Bx,
328        mut imm: Bx::Value,
329        from_scalar: abi::Scalar,
330        from_backend_ty: Bx::Type,
331        to_scalar: abi::Scalar,
332        to_backend_ty: Bx::Type,
333    ) -> Option<Bx::Value> {
334        use abi::Primitive::*;
335
336        // When scalars are passed by value, there's no metadata recording their
337        // valid ranges. For example, `char`s are passed as just `i32`, with no
338        // way for LLVM to know that they're 0x10FFFF at most. Thus we assume
339        // the range of the input value too, not just the output range.
340        assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
341
342        imm = match (from_scalar.primitive(), to_scalar.primitive()) {
343            (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
344            (Float(_), Float(_)) => {
345                let srcsz = bx.cx().float_width(from_backend_ty);
346                let dstsz = bx.cx().float_width(to_backend_ty);
347                if dstsz > srcsz {
348                    bx.fpext(imm, to_backend_ty)
349                } else if srcsz > dstsz {
350                    bx.fptrunc(imm, to_backend_ty)
351                } else {
352                    imm
353                }
354            }
355            (Int(_, is_signed), Float(_)) => {
356                if is_signed {
357                    bx.sitofp(imm, to_backend_ty)
358                } else {
359                    bx.uitofp(imm, to_backend_ty)
360                }
361            }
362            (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
363            (Int(_, is_signed), Pointer(..)) => {
364                let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
365                bx.inttoptr(usize_imm, to_backend_ty)
366            }
367            (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
368            _ => return None,
369        };
370        Some(imm)
371    }
372
373    pub(crate) fn codegen_rvalue_operand(
374        &mut self,
375        bx: &mut Bx,
376        rvalue: &mir::Rvalue<'tcx>,
377    ) -> OperandRef<'tcx, Bx::Value> {
378        match *rvalue {
379            mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
380                let operand = self.codegen_operand(bx, source);
381                debug!("cast operand is {:?}", operand);
382                let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
383
384                let val = match *kind {
385                    mir::CastKind::PointerExposeProvenance => {
386                        assert!(bx.cx().is_backend_immediate(cast));
387                        let llptr = operand.immediate();
388                        let llcast_ty = bx.cx().immediate_backend_type(cast);
389                        let lladdr = bx.ptrtoint(llptr, llcast_ty);
390                        OperandValue::Immediate(lladdr)
391                    }
392                    mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
393                        match *operand.layout.ty.kind() {
394                            ty::FnDef(def_id, args) => {
395                                let instance = ty::Instance::resolve_for_fn_ptr(
396                                    bx.tcx(),
397                                    bx.typing_env(),
398                                    def_id,
399                                    args,
400                                )
401                                .unwrap();
402                                OperandValue::Immediate(bx.get_fn_addr(instance))
403                            }
404                            _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
405                        }
406                    }
407                    mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
408                        match *operand.layout.ty.kind() {
409                            ty::Closure(def_id, args) => {
410                                let instance = Instance::resolve_closure(
411                                    bx.cx().tcx(),
412                                    def_id,
413                                    args,
414                                    ty::ClosureKind::FnOnce,
415                                );
416                                OperandValue::Immediate(bx.cx().get_fn_addr(instance))
417                            }
418                            _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
419                        }
420                    }
421                    mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
422                        // This is a no-op at the LLVM level.
423                        operand.val
424                    }
425                    mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
426                        assert!(bx.cx().is_backend_scalar_pair(cast));
427                        let (lldata, llextra) = operand.val.pointer_parts();
428                        let (lldata, llextra) =
429                            base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
430                        OperandValue::Pair(lldata, llextra)
431                    }
432                    mir::CastKind::PointerCoercion(
433                        PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
434                    ) => {
435                        bug!("{kind:?} is for borrowck, and should never appear in codegen");
436                    }
437                    mir::CastKind::PtrToPtr
438                        if bx.cx().is_backend_scalar_pair(operand.layout) =>
439                    {
440                        if let OperandValue::Pair(data_ptr, meta) = operand.val {
441                            if bx.cx().is_backend_scalar_pair(cast) {
442                                OperandValue::Pair(data_ptr, meta)
443                            } else {
444                                // Cast of wide-ptr to thin-ptr is an extraction of data-ptr.
445                                OperandValue::Immediate(data_ptr)
446                            }
447                        } else {
448                            bug!("unexpected non-pair operand");
449                        }
450                    }
451                    | mir::CastKind::IntToInt
452                    | mir::CastKind::FloatToInt
453                    | mir::CastKind::FloatToFloat
454                    | mir::CastKind::IntToFloat
455                    | mir::CastKind::PtrToPtr
456                    | mir::CastKind::FnPtrToPtr
457                    // Since int2ptr can have arbitrary integer types as input (so we have to do
458                    // sign extension and all that), it is currently best handled in the same code
459                    // path as the other integer-to-X casts.
460                    | mir::CastKind::PointerWithExposedProvenance => {
461                        let imm = operand.immediate();
462                        let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
463                            bug!("Found non-scalar for operand {operand:?}");
464                        };
465                        let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
466
467                        assert!(bx.cx().is_backend_immediate(cast));
468                        let to_backend_ty = bx.cx().immediate_backend_type(cast);
469                        if operand.layout.is_uninhabited() {
470                            let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
471                            return OperandRef { val, layout: cast };
472                        }
473                        let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
474                            bug!("Found non-scalar for cast {cast:?}");
475                        };
476
477                        self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
478                            .map(OperandValue::Immediate)
479                            .unwrap_or_else(|| {
480                                bug!("Unsupported cast of {operand:?} to {cast:?}");
481                            })
482                    }
483                    mir::CastKind::Transmute => {
484                        self.codegen_transmute_operand(bx, operand, cast)
485                    }
486                };
487                OperandRef { val, layout: cast }
488            }
489
490            mir::Rvalue::Ref(_, bk, place) => {
491                let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
492                    Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
493                };
494                self.codegen_place_to_pointer(bx, place, mk_ref)
495            }
496
497            mir::Rvalue::CopyForDeref(place) => {
498                self.codegen_operand(bx, &mir::Operand::Copy(place))
499            }
500            mir::Rvalue::RawPtr(kind, place) => {
501                let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
502                    Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
503                };
504                self.codegen_place_to_pointer(bx, place, mk_ptr)
505            }
506
507            mir::Rvalue::Len(place) => {
508                let size = self.evaluate_array_len(bx, place);
509                OperandRef {
510                    val: OperandValue::Immediate(size),
511                    layout: bx.cx().layout_of(bx.tcx().types.usize),
512                }
513            }
514
515            mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
516                if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
517            {
518                let lhs = self.codegen_operand(bx, lhs);
519                let rhs = self.codegen_operand(bx, rhs);
520                let result = self.codegen_scalar_checked_binop(
521                    bx,
522                    op,
523                    lhs.immediate(),
524                    rhs.immediate(),
525                    lhs.layout.ty,
526                );
527                let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
528                let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
529                OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
530            }
531            mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
532                let lhs = self.codegen_operand(bx, lhs);
533                let rhs = self.codegen_operand(bx, rhs);
534                let llresult = match (lhs.val, rhs.val) {
535                    (
536                        OperandValue::Pair(lhs_addr, lhs_extra),
537                        OperandValue::Pair(rhs_addr, rhs_extra),
538                    ) => self.codegen_wide_ptr_binop(
539                        bx,
540                        op,
541                        lhs_addr,
542                        lhs_extra,
543                        rhs_addr,
544                        rhs_extra,
545                        lhs.layout.ty,
546                    ),
547
548                    (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
549                        .codegen_scalar_binop(
550                            bx,
551                            op,
552                            lhs_val,
553                            rhs_val,
554                            lhs.layout.ty,
555                            rhs.layout.ty,
556                        ),
557
558                    _ => bug!(),
559                };
560                OperandRef {
561                    val: OperandValue::Immediate(llresult),
562                    layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
563                }
564            }
565
566            mir::Rvalue::UnaryOp(op, ref operand) => {
567                let operand = self.codegen_operand(bx, operand);
568                let is_float = operand.layout.ty.is_floating_point();
569                let (val, layout) = match op {
570                    mir::UnOp::Not => {
571                        let llval = bx.not(operand.immediate());
572                        (OperandValue::Immediate(llval), operand.layout)
573                    }
574                    mir::UnOp::Neg => {
575                        let llval = if is_float {
576                            bx.fneg(operand.immediate())
577                        } else {
578                            bx.neg(operand.immediate())
579                        };
580                        (OperandValue::Immediate(llval), operand.layout)
581                    }
582                    mir::UnOp::PtrMetadata => {
583                        assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
584                        let (_, meta) = operand.val.pointer_parts();
585                        assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
586                        if let Some(meta) = meta {
587                            (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
588                        } else {
589                            (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
590                        }
591                    }
592                };
593                assert!(
594                    val.is_expected_variant_for_type(self.cx, layout),
595                    "Made wrong variant {val:?} for type {layout:?}",
596                );
597                OperandRef { val, layout }
598            }
599
600            mir::Rvalue::Discriminant(ref place) => {
601                let discr_ty = rvalue.ty(self.mir, bx.tcx());
602                let discr_ty = self.monomorphize(discr_ty);
603                let operand = self.codegen_consume(bx, place.as_ref());
604                let discr = operand.codegen_get_discr(self, bx, discr_ty);
605                OperandRef {
606                    val: OperandValue::Immediate(discr),
607                    layout: self.cx.layout_of(discr_ty),
608                }
609            }
610
611            mir::Rvalue::NullaryOp(ref null_op, ty) => {
612                let ty = self.monomorphize(ty);
613                let layout = bx.cx().layout_of(ty);
614                let val = match null_op {
615                    mir::NullOp::SizeOf => {
616                        assert!(bx.cx().type_is_sized(ty));
617                        let val = layout.size.bytes();
618                        bx.cx().const_usize(val)
619                    }
620                    mir::NullOp::AlignOf => {
621                        assert!(bx.cx().type_is_sized(ty));
622                        let val = layout.align.abi.bytes();
623                        bx.cx().const_usize(val)
624                    }
625                    mir::NullOp::OffsetOf(fields) => {
626                        let val = bx
627                            .tcx()
628                            .offset_of_subfield(bx.typing_env(), layout, fields.iter())
629                            .bytes();
630                        bx.cx().const_usize(val)
631                    }
632                    mir::NullOp::UbChecks => {
633                        let val = bx.tcx().sess.ub_checks();
634                        bx.cx().const_bool(val)
635                    }
636                    mir::NullOp::ContractChecks => {
637                        let val = bx.tcx().sess.contract_checks();
638                        bx.cx().const_bool(val)
639                    }
640                };
641                let tcx = self.cx.tcx();
642                OperandRef {
643                    val: OperandValue::Immediate(val),
644                    layout: self.cx.layout_of(null_op.ty(tcx)),
645                }
646            }
647
648            mir::Rvalue::ThreadLocalRef(def_id) => {
649                assert!(bx.cx().tcx().is_static(def_id));
650                let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
651                let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
652                {
653                    let instance = ty::Instance {
654                        def: ty::InstanceKind::ThreadLocalShim(def_id),
655                        args: ty::GenericArgs::empty(),
656                    };
657                    let fn_ptr = bx.get_fn_addr(instance);
658                    let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
659                    let fn_ty = bx.fn_decl_backend_type(fn_abi);
660                    let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
661                        Some(bx.tcx().codegen_instance_attrs(instance.def))
662                    } else {
663                        None
664                    };
665                    bx.call(
666                        fn_ty,
667                        fn_attrs.as_deref(),
668                        Some(fn_abi),
669                        fn_ptr,
670                        &[],
671                        None,
672                        Some(instance),
673                    )
674                } else {
675                    bx.get_static(def_id)
676                };
677                OperandRef { val: OperandValue::Immediate(static_), layout }
678            }
679            mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
680            mir::Rvalue::Repeat(ref elem, len_const) => {
681                // All arrays have `BackendRepr::Memory`, so only the ZST cases
682                // end up here. Anything else forces the destination local to be
683                // `Memory`, and thus ends up handled in `codegen_rvalue` instead.
684                let operand = self.codegen_operand(bx, elem);
685                let array_ty = Ty::new_array_with_const_len(bx.tcx(), operand.layout.ty, len_const);
686                let array_ty = self.monomorphize(array_ty);
687                let array_layout = bx.layout_of(array_ty);
688                assert!(array_layout.is_zst());
689                OperandRef { val: OperandValue::ZeroSized, layout: array_layout }
690            }
691            mir::Rvalue::Aggregate(ref kind, ref fields) => {
692                let (variant_index, active_field_index) = match **kind {
693                    mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
694                        (variant_index, active_field_index)
695                    }
696                    _ => (FIRST_VARIANT, None),
697                };
698
699                let ty = rvalue.ty(self.mir, self.cx.tcx());
700                let ty = self.monomorphize(ty);
701                let layout = self.cx.layout_of(ty);
702
703                let mut builder = OperandRefBuilder::new(layout);
704                for (field_idx, field) in fields.iter_enumerated() {
705                    let op = self.codegen_operand(bx, field);
706                    let fi = active_field_index.unwrap_or(field_idx);
707                    builder.insert_field(bx, variant_index, fi, op);
708                }
709
710                let tag_result = codegen_tag_value(self.cx, variant_index, layout);
711                match tag_result {
712                    Err(super::place::UninhabitedVariantError) => {
713                        // Like codegen_set_discr we use a sound abort, but could
714                        // potentially `unreachable` or just return the poison for
715                        // more optimizability, if that turns out to be helpful.
716                        bx.abort();
717                        let val = OperandValue::poison(bx, layout);
718                        OperandRef { val, layout }
719                    }
720                    Ok(maybe_tag_value) => {
721                        if let Some((tag_field, tag_imm)) = maybe_tag_value {
722                            builder.insert_imm(tag_field, tag_imm);
723                        }
724                        builder.build(bx.cx())
725                    }
726                }
727            }
728            mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
729                let operand = self.codegen_operand(bx, operand);
730                let val = operand.immediate();
731
732                let content_ty = self.monomorphize(content_ty);
733                let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
734
735                OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
736            }
737            mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
738                let operand = self.codegen_operand(bx, operand);
739                let binder_ty = self.monomorphize(binder_ty);
740                let layout = bx.cx().layout_of(binder_ty);
741                OperandRef { val: operand.val, layout }
742            }
743        }
744    }
745
746    fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
747        // ZST are passed as operands and require special handling
748        // because codegen_place() panics if Local is operand.
749        if let Some(index) = place.as_local()
750            && let LocalRef::Operand(op) = self.locals[index]
751            && let ty::Array(_, n) = op.layout.ty.kind()
752        {
753            let n = n.try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
754            return bx.cx().const_usize(n);
755        }
756        // use common size calculation for non zero-sized types
757        let cg_value = self.codegen_place(bx, place.as_ref());
758        cg_value.len(bx.cx())
759    }
760
761    /// Codegen an `Rvalue::RawPtr` or `Rvalue::Ref`
762    fn codegen_place_to_pointer(
763        &mut self,
764        bx: &mut Bx,
765        place: mir::Place<'tcx>,
766        mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
767    ) -> OperandRef<'tcx, Bx::Value> {
768        let cg_place = self.codegen_place(bx, place.as_ref());
769        let val = cg_place.val.address();
770
771        let ty = cg_place.layout.ty;
772        assert!(
773            if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
774                matches!(val, OperandValue::Pair(..))
775            } else {
776                matches!(val, OperandValue::Immediate(..))
777            },
778            "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
779        );
780
781        OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
782    }
783
784    fn codegen_scalar_binop(
785        &mut self,
786        bx: &mut Bx,
787        op: mir::BinOp,
788        lhs: Bx::Value,
789        rhs: Bx::Value,
790        lhs_ty: Ty<'tcx>,
791        rhs_ty: Ty<'tcx>,
792    ) -> Bx::Value {
793        let is_float = lhs_ty.is_floating_point();
794        let is_signed = lhs_ty.is_signed();
795        match op {
796            mir::BinOp::Add => {
797                if is_float {
798                    bx.fadd(lhs, rhs)
799                } else {
800                    bx.add(lhs, rhs)
801                }
802            }
803            mir::BinOp::AddUnchecked => {
804                if is_signed {
805                    bx.unchecked_sadd(lhs, rhs)
806                } else {
807                    bx.unchecked_uadd(lhs, rhs)
808                }
809            }
810            mir::BinOp::Sub => {
811                if is_float {
812                    bx.fsub(lhs, rhs)
813                } else {
814                    bx.sub(lhs, rhs)
815                }
816            }
817            mir::BinOp::SubUnchecked => {
818                if is_signed {
819                    bx.unchecked_ssub(lhs, rhs)
820                } else {
821                    bx.unchecked_usub(lhs, rhs)
822                }
823            }
824            mir::BinOp::Mul => {
825                if is_float {
826                    bx.fmul(lhs, rhs)
827                } else {
828                    bx.mul(lhs, rhs)
829                }
830            }
831            mir::BinOp::MulUnchecked => {
832                if is_signed {
833                    bx.unchecked_smul(lhs, rhs)
834                } else {
835                    bx.unchecked_umul(lhs, rhs)
836                }
837            }
838            mir::BinOp::Div => {
839                if is_float {
840                    bx.fdiv(lhs, rhs)
841                } else if is_signed {
842                    bx.sdiv(lhs, rhs)
843                } else {
844                    bx.udiv(lhs, rhs)
845                }
846            }
847            mir::BinOp::Rem => {
848                if is_float {
849                    bx.frem(lhs, rhs)
850                } else if is_signed {
851                    bx.srem(lhs, rhs)
852                } else {
853                    bx.urem(lhs, rhs)
854                }
855            }
856            mir::BinOp::BitOr => bx.or(lhs, rhs),
857            mir::BinOp::BitAnd => bx.and(lhs, rhs),
858            mir::BinOp::BitXor => bx.xor(lhs, rhs),
859            mir::BinOp::Offset => {
860                let pointee_type = lhs_ty
861                    .builtin_deref(true)
862                    .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
863                let pointee_layout = bx.cx().layout_of(pointee_type);
864                if pointee_layout.is_zst() {
865                    // `Offset` works in terms of the size of pointee,
866                    // so offsetting a pointer to ZST is a noop.
867                    lhs
868                } else {
869                    let llty = bx.cx().backend_type(pointee_layout);
870                    if !rhs_ty.is_signed() {
871                        bx.inbounds_nuw_gep(llty, lhs, &[rhs])
872                    } else {
873                        bx.inbounds_gep(llty, lhs, &[rhs])
874                    }
875                }
876            }
877            mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
878                let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
879                bx.shl(lhs, rhs)
880            }
881            mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
882                let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
883                if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
884            }
885            mir::BinOp::Ne
886            | mir::BinOp::Lt
887            | mir::BinOp::Gt
888            | mir::BinOp::Eq
889            | mir::BinOp::Le
890            | mir::BinOp::Ge => {
891                if is_float {
892                    bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
893                } else {
894                    bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
895                }
896            }
897            mir::BinOp::Cmp => {
898                use std::cmp::Ordering;
899                assert!(!is_float);
900                if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
901                    return value;
902                }
903                let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
904                if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
905                    // FIXME: This actually generates tighter assembly, and is a classic trick
906                    // <https://graphics.stanford.edu/~seander/bithacks.html#CopyIntegerSign>
907                    // However, as of 2023-11 it optimizes worse in things like derived
908                    // `PartialOrd`, so only use it in debug for now. Once LLVM can handle it
909                    // better (see <https://github.com/llvm/llvm-project/issues/73417>), it'll
910                    // be worth trying it in optimized builds as well.
911                    let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
912                    let gtext = bx.zext(is_gt, bx.type_i8());
913                    let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
914                    let ltext = bx.zext(is_lt, bx.type_i8());
915                    bx.unchecked_ssub(gtext, ltext)
916                } else {
917                    // These operations are those expected by `tests/codegen-llvm/integer-cmp.rs`,
918                    // from <https://github.com/rust-lang/rust/pull/63767>.
919                    let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
920                    let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
921                    let ge = bx.select(
922                        is_ne,
923                        bx.cx().const_i8(Ordering::Greater as i8),
924                        bx.cx().const_i8(Ordering::Equal as i8),
925                    );
926                    bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
927                }
928            }
929            mir::BinOp::AddWithOverflow
930            | mir::BinOp::SubWithOverflow
931            | mir::BinOp::MulWithOverflow => {
932                bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
933            }
934        }
935    }
936
937    fn codegen_wide_ptr_binop(
938        &mut self,
939        bx: &mut Bx,
940        op: mir::BinOp,
941        lhs_addr: Bx::Value,
942        lhs_extra: Bx::Value,
943        rhs_addr: Bx::Value,
944        rhs_extra: Bx::Value,
945        _input_ty: Ty<'tcx>,
946    ) -> Bx::Value {
947        match op {
948            mir::BinOp::Eq => {
949                let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
950                let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
951                bx.and(lhs, rhs)
952            }
953            mir::BinOp::Ne => {
954                let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
955                let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
956                bx.or(lhs, rhs)
957            }
958            mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
959                // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
960                let (op, strict_op) = match op {
961                    mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
962                    mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
963                    mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
964                    mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
965                    _ => bug!(),
966                };
967                let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
968                let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
969                let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
970                let rhs = bx.and(and_lhs, and_rhs);
971                bx.or(lhs, rhs)
972            }
973            _ => {
974                bug!("unexpected wide ptr binop");
975            }
976        }
977    }
978
979    fn codegen_scalar_checked_binop(
980        &mut self,
981        bx: &mut Bx,
982        op: mir::BinOp,
983        lhs: Bx::Value,
984        rhs: Bx::Value,
985        input_ty: Ty<'tcx>,
986    ) -> OperandValue<Bx::Value> {
987        let (val, of) = match op {
988            // These are checked using intrinsics
989            mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
990                let oop = match op {
991                    mir::BinOp::Add => OverflowOp::Add,
992                    mir::BinOp::Sub => OverflowOp::Sub,
993                    mir::BinOp::Mul => OverflowOp::Mul,
994                    _ => unreachable!(),
995                };
996                bx.checked_binop(oop, input_ty, lhs, rhs)
997            }
998            _ => bug!("Operator `{:?}` is not a checkable operator", op),
999        };
1000
1001        OperandValue::Pair(val, of)
1002    }
1003}
1004
1005/// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`.
1006///
1007/// This is expected to be in *immediate* form, as seen in [`OperandValue::Immediate`]
1008/// or [`OperandValue::Pair`] (so `i1` for bools, not `i8`, for example).
1009///
1010/// ICEs if the passed-in `imm` is not a value of the expected type for
1011/// `from_scalar`, such as if it's a vector or a pair.
1012pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1013    bx: &mut Bx,
1014    mut imm: Bx::Value,
1015    from_scalar: abi::Scalar,
1016    to_scalar: abi::Scalar,
1017) -> Bx::Value {
1018    assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
1019    let imm_ty = bx.cx().val_ty(imm);
1020    assert_ne!(
1021        bx.cx().type_kind(imm_ty),
1022        TypeKind::Vector,
1023        "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
1024    );
1025
1026    // While optimizations will remove no-op transmutes, they might still be
1027    // there in debug or things that aren't no-op in MIR because they change
1028    // the Rust type but not the underlying layout/niche.
1029    if from_scalar == to_scalar {
1030        return imm;
1031    }
1032
1033    use abi::Primitive::*;
1034    imm = bx.from_immediate(imm);
1035
1036    let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
1037    debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
1038    let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
1039
1040    // If we have a scalar, we must already know its range. Either
1041    //
1042    // 1) It's a parameter with `range` parameter metadata,
1043    // 2) It's something we `load`ed with `!range` metadata, or
1044    // 3) After a transmute we `assume`d the range (see below).
1045    //
1046    // That said, last time we tried removing this, it didn't actually help
1047    // the rustc-perf results, so might as well keep doing it
1048    // <https://github.com/rust-lang/rust/pull/135610#issuecomment-2599275182>
1049    assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
1050
1051    imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1052        (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1053        (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1054        (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
1055        (Pointer(..), Int(..)) => {
1056            // FIXME: this exposes the provenance, which shouldn't be necessary.
1057            bx.ptrtoint(imm, to_backend_ty)
1058        }
1059        (Float(_), Pointer(..)) => {
1060            let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1061            bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
1062        }
1063        (Pointer(..), Float(_)) => {
1064            // FIXME: this exposes the provenance, which shouldn't be necessary.
1065            let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1066            bx.bitcast(int_imm, to_backend_ty)
1067        }
1068    };
1069
1070    debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1071
1072    // This `assume` remains important for cases like (a conceptual)
1073    //    transmute::<u32, NonZeroU32>(x) == 0
1074    // since it's never passed to something with parameter metadata (especially
1075    // after MIR inlining) so the only way to tell the backend about the
1076    // constraint that the `transmute` introduced is to `assume` it.
1077    assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
1078
1079    imm = bx.to_immediate_scalar(imm, to_scalar);
1080    imm
1081}
1082
1083/// Emits an `assume` call that `imm`'s value is within the known range of `scalar`.
1084///
1085/// If `known` is `Some`, only emits the assume if it's more specific than
1086/// whatever is already known from the range of *that* scalar.
1087fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1088    bx: &mut Bx,
1089    imm: Bx::Value,
1090    scalar: abi::Scalar,
1091    backend_ty: Bx::Type,
1092    known: Option<&abi::Scalar>,
1093) {
1094    if matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
1095        return;
1096    }
1097
1098    match (scalar, known) {
1099        (abi::Scalar::Union { .. }, _) => return,
1100        (_, None) => {
1101            if scalar.is_always_valid(bx.cx()) {
1102                return;
1103            }
1104        }
1105        (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
1106            let known_range = known.valid_range(bx.cx());
1107            if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
1108                return;
1109            }
1110        }
1111    }
1112
1113    match scalar.primitive() {
1114        abi::Primitive::Int(..) => {
1115            let range = scalar.valid_range(bx.cx());
1116            bx.assume_integer_range(imm, backend_ty, range);
1117        }
1118        abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1119            if !scalar.valid_range(bx.cx()).contains(0) =>
1120        {
1121            bx.assume_nonnull(imm);
1122        }
1123        abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1124    }
1125}