1use rustc_abi::{self as abi, FIRST_VARIANT};
2use rustc_middle::ty::adjustment::PointerCoercion;
3use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
4use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
5use rustc_middle::{bug, mir, span_bug};
6use rustc_session::config::OptLevel;
7use tracing::{debug, instrument};
8
9use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
10use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
11use super::{FunctionCx, LocalRef};
12use crate::common::{IntPredicate, TypeKind};
13use crate::traits::*;
14use crate::{MemFlags, base};
15
16impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
17 #[instrument(level = "trace", skip(self, bx))]
18 pub(crate) fn codegen_rvalue(
19 &mut self,
20 bx: &mut Bx,
21 dest: PlaceRef<'tcx, Bx::Value>,
22 rvalue: &mir::Rvalue<'tcx>,
23 ) {
24 match *rvalue {
25 mir::Rvalue::Use(ref operand) => {
26 let cg_operand = self.codegen_operand(bx, operand);
27 cg_operand.val.store(bx, dest);
30 }
31
32 mir::Rvalue::Cast(
33 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
34 ref source,
35 _,
36 ) => {
37 if bx.cx().is_backend_scalar_pair(dest.layout) {
40 let temp = self.codegen_rvalue_operand(bx, rvalue);
43 temp.val.store(bx, dest);
44 return;
45 }
46
47 let operand = self.codegen_operand(bx, source);
52 match operand.val {
53 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
54 debug!("codegen_rvalue: creating ugly alloca");
61 let scratch = PlaceRef::alloca(bx, operand.layout);
62 scratch.storage_live(bx);
63 operand.val.store(bx, scratch);
64 base::coerce_unsized_into(bx, scratch, dest);
65 scratch.storage_dead(bx);
66 }
67 OperandValue::Ref(val) => {
68 if val.llextra.is_some() {
69 bug!("unsized coercion on an unsized rvalue");
70 }
71 base::coerce_unsized_into(bx, val.with_type(operand.layout), dest);
72 }
73 OperandValue::ZeroSized => {
74 bug!("unsized coercion on a ZST rvalue");
75 }
76 }
77 }
78
79 mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
80 let src = self.codegen_operand(bx, operand);
81 self.codegen_transmute(bx, src, dest);
82 }
83
84 mir::Rvalue::Repeat(ref elem, count) => {
85 if dest.layout.is_zst() {
87 return;
88 }
89
90 if let mir::Operand::Constant(const_op) = elem {
93 let val = self.eval_mir_constant(const_op);
94 if val.all_bytes_uninit(self.cx.tcx()) {
95 let size = bx.const_usize(dest.layout.size.bytes());
96 bx.memset(
97 dest.val.llval,
98 bx.const_undef(bx.type_i8()),
99 size,
100 dest.val.align,
101 MemFlags::empty(),
102 );
103 return;
104 }
105 }
106
107 let cg_elem = self.codegen_operand(bx, elem);
108
109 let try_init_all_same = |bx: &mut Bx, v| {
110 let start = dest.val.llval;
111 let size = bx.const_usize(dest.layout.size.bytes());
112
113 if let Some(int) = bx.cx().const_to_opt_u128(v, false) {
115 let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()];
116 let first = bytes[0];
117 if bytes[1..].iter().all(|&b| b == first) {
118 let fill = bx.cx().const_u8(first);
119 bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
120 return true;
121 }
122 }
123
124 let v = bx.from_immediate(v);
126 if bx.cx().val_ty(v) == bx.cx().type_i8() {
127 bx.memset(start, v, size, dest.val.align, MemFlags::empty());
128 return true;
129 }
130 false
131 };
132
133 match cg_elem.val {
134 OperandValue::Immediate(v) => {
135 if try_init_all_same(bx, v) {
136 return;
137 }
138 }
139 _ => (),
140 }
141
142 let count = self
143 .monomorphize(count)
144 .try_to_target_usize(bx.tcx())
145 .expect("expected monomorphic const in codegen");
146
147 bx.write_operand_repeatedly(cg_elem, count, dest);
148 }
149
150 mir::Rvalue::Aggregate(ref kind, ref operands)
153 if !matches!(**kind, mir::AggregateKind::RawPtr(..)) =>
154 {
155 let (variant_index, variant_dest, active_field_index) = match **kind {
156 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
157 let variant_dest = dest.project_downcast(bx, variant_index);
158 (variant_index, variant_dest, active_field_index)
159 }
160 _ => (FIRST_VARIANT, dest, None),
161 };
162 if active_field_index.is_some() {
163 assert_eq!(operands.len(), 1);
164 }
165 for (i, operand) in operands.iter_enumerated() {
166 let op = self.codegen_operand(bx, operand);
167 if !op.layout.is_zst() {
169 let field_index = active_field_index.unwrap_or(i);
170 let field = if let mir::AggregateKind::Array(_) = **kind {
171 let llindex = bx.cx().const_usize(field_index.as_u32().into());
172 variant_dest.project_index(bx, llindex)
173 } else {
174 variant_dest.project_field(bx, field_index.as_usize())
175 };
176 op.val.store(bx, field);
177 }
178 }
179 dest.codegen_set_discr(bx, variant_index);
180 }
181
182 _ => {
183 let temp = self.codegen_rvalue_operand(bx, rvalue);
184 temp.val.store(bx, dest);
185 }
186 }
187 }
188
189 fn codegen_transmute(
194 &mut self,
195 bx: &mut Bx,
196 src: OperandRef<'tcx, Bx::Value>,
197 dst: PlaceRef<'tcx, Bx::Value>,
198 ) {
199 assert!(src.layout.is_sized());
201 assert!(dst.layout.is_sized());
202
203 if src.layout.size != dst.layout.size
204 || src.layout.is_uninhabited()
205 || dst.layout.is_uninhabited()
206 {
207 bx.unreachable_nonterminator();
210 } else {
211 src.val.store(bx, dst.val.with_type(src.layout));
215 }
216 }
217
218 pub(crate) fn codegen_transmute_operand(
223 &mut self,
224 bx: &mut Bx,
225 operand: OperandRef<'tcx, Bx::Value>,
226 cast: TyAndLayout<'tcx>,
227 ) -> OperandValue<Bx::Value> {
228 if let abi::BackendRepr::Memory { .. } = cast.backend_repr
229 && !cast.is_zst()
230 {
231 span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
232 }
233
234 if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
237 return operand.val;
238 }
239
240 if operand.layout.size != cast.size
242 || operand.layout.is_uninhabited()
243 || cast.is_uninhabited()
244 {
245 bx.unreachable_nonterminator();
246
247 return OperandValue::poison(bx, cast);
250 }
251
252 #[inline]
255 fn vector_can_bitcast(x: abi::Scalar) -> bool {
256 matches!(
257 x,
258 abi::Scalar::Initialized {
259 value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
260 ..
261 }
262 )
263 }
264
265 let cx = bx.cx();
266 match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
267 _ if cast.is_zst() => OperandValue::ZeroSized,
268 (OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
269 assert_eq!(source_place_val.llextra, None);
270 bx.load_operand(source_place_val.with_type(cast)).val
273 }
274 (
275 OperandValue::Immediate(imm),
276 abi::BackendRepr::Scalar(from_scalar),
277 abi::BackendRepr::Scalar(to_scalar),
278 ) if from_scalar.size(cx) == to_scalar.size(cx) => {
279 OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
280 }
281 (
282 OperandValue::Immediate(imm),
283 abi::BackendRepr::SimdVector { element: from_scalar, .. },
284 abi::BackendRepr::SimdVector { element: to_scalar, .. },
285 ) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
286 let to_backend_ty = bx.cx().immediate_backend_type(cast);
287 OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
288 }
289 (
290 OperandValue::Pair(imm_a, imm_b),
291 abi::BackendRepr::ScalarPair(in_a, in_b),
292 abi::BackendRepr::ScalarPair(out_a, out_b),
293 ) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
294 OperandValue::Pair(
295 transmute_scalar(bx, imm_a, in_a, out_a),
296 transmute_scalar(bx, imm_b, in_b, out_b),
297 )
298 }
299 _ => {
300 let align = Ord::max(operand.layout.align.abi, cast.align.abi);
310 let size = Ord::max(operand.layout.size, cast.size);
311 let temp = PlaceValue::alloca(bx, size, align);
312 bx.lifetime_start(temp.llval, size);
313 operand.val.store(bx, temp.with_type(operand.layout));
314 let val = bx.load_operand(temp.with_type(cast)).val;
315 bx.lifetime_end(temp.llval, size);
316 val
317 }
318 }
319 }
320
321 fn cast_immediate(
326 &self,
327 bx: &mut Bx,
328 mut imm: Bx::Value,
329 from_scalar: abi::Scalar,
330 from_backend_ty: Bx::Type,
331 to_scalar: abi::Scalar,
332 to_backend_ty: Bx::Type,
333 ) -> Option<Bx::Value> {
334 use abi::Primitive::*;
335
336 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
341
342 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
343 (Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
344 (Float(_), Float(_)) => {
345 let srcsz = bx.cx().float_width(from_backend_ty);
346 let dstsz = bx.cx().float_width(to_backend_ty);
347 if dstsz > srcsz {
348 bx.fpext(imm, to_backend_ty)
349 } else if srcsz > dstsz {
350 bx.fptrunc(imm, to_backend_ty)
351 } else {
352 imm
353 }
354 }
355 (Int(_, is_signed), Float(_)) => {
356 if is_signed {
357 bx.sitofp(imm, to_backend_ty)
358 } else {
359 bx.uitofp(imm, to_backend_ty)
360 }
361 }
362 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
363 (Int(_, is_signed), Pointer(..)) => {
364 let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
365 bx.inttoptr(usize_imm, to_backend_ty)
366 }
367 (Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
368 _ => return None,
369 };
370 Some(imm)
371 }
372
373 pub(crate) fn codegen_rvalue_operand(
374 &mut self,
375 bx: &mut Bx,
376 rvalue: &mir::Rvalue<'tcx>,
377 ) -> OperandRef<'tcx, Bx::Value> {
378 match *rvalue {
379 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
380 let operand = self.codegen_operand(bx, source);
381 debug!("cast operand is {:?}", operand);
382 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
383
384 let val = match *kind {
385 mir::CastKind::PointerExposeProvenance => {
386 assert!(bx.cx().is_backend_immediate(cast));
387 let llptr = operand.immediate();
388 let llcast_ty = bx.cx().immediate_backend_type(cast);
389 let lladdr = bx.ptrtoint(llptr, llcast_ty);
390 OperandValue::Immediate(lladdr)
391 }
392 mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
393 match *operand.layout.ty.kind() {
394 ty::FnDef(def_id, args) => {
395 let instance = ty::Instance::resolve_for_fn_ptr(
396 bx.tcx(),
397 bx.typing_env(),
398 def_id,
399 args,
400 )
401 .unwrap();
402 OperandValue::Immediate(bx.get_fn_addr(instance))
403 }
404 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
405 }
406 }
407 mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _) => {
408 match *operand.layout.ty.kind() {
409 ty::Closure(def_id, args) => {
410 let instance = Instance::resolve_closure(
411 bx.cx().tcx(),
412 def_id,
413 args,
414 ty::ClosureKind::FnOnce,
415 );
416 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
417 }
418 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
419 }
420 }
421 mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
422 operand.val
424 }
425 mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
426 assert!(bx.cx().is_backend_scalar_pair(cast));
427 let (lldata, llextra) = operand.val.pointer_parts();
428 let (lldata, llextra) =
429 base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
430 OperandValue::Pair(lldata, llextra)
431 }
432 mir::CastKind::PointerCoercion(
433 PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, _
434 ) => {
435 bug!("{kind:?} is for borrowck, and should never appear in codegen");
436 }
437 mir::CastKind::PtrToPtr
438 if bx.cx().is_backend_scalar_pair(operand.layout) =>
439 {
440 if let OperandValue::Pair(data_ptr, meta) = operand.val {
441 if bx.cx().is_backend_scalar_pair(cast) {
442 OperandValue::Pair(data_ptr, meta)
443 } else {
444 OperandValue::Immediate(data_ptr)
446 }
447 } else {
448 bug!("unexpected non-pair operand");
449 }
450 }
451 | mir::CastKind::IntToInt
452 | mir::CastKind::FloatToInt
453 | mir::CastKind::FloatToFloat
454 | mir::CastKind::IntToFloat
455 | mir::CastKind::PtrToPtr
456 | mir::CastKind::FnPtrToPtr
457 | mir::CastKind::PointerWithExposedProvenance => {
461 let imm = operand.immediate();
462 let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
463 bug!("Found non-scalar for operand {operand:?}");
464 };
465 let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
466
467 assert!(bx.cx().is_backend_immediate(cast));
468 let to_backend_ty = bx.cx().immediate_backend_type(cast);
469 if operand.layout.is_uninhabited() {
470 let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
471 return OperandRef { val, layout: cast };
472 }
473 let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
474 bug!("Found non-scalar for cast {cast:?}");
475 };
476
477 self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
478 .map(OperandValue::Immediate)
479 .unwrap_or_else(|| {
480 bug!("Unsupported cast of {operand:?} to {cast:?}");
481 })
482 }
483 mir::CastKind::Transmute => {
484 self.codegen_transmute_operand(bx, operand, cast)
485 }
486 };
487 OperandRef { val, layout: cast }
488 }
489
490 mir::Rvalue::Ref(_, bk, place) => {
491 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
492 Ty::new_ref(tcx, tcx.lifetimes.re_erased, ty, bk.to_mutbl_lossy())
493 };
494 self.codegen_place_to_pointer(bx, place, mk_ref)
495 }
496
497 mir::Rvalue::CopyForDeref(place) => {
498 self.codegen_operand(bx, &mir::Operand::Copy(place))
499 }
500 mir::Rvalue::RawPtr(kind, place) => {
501 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
502 Ty::new_ptr(tcx, ty, kind.to_mutbl_lossy())
503 };
504 self.codegen_place_to_pointer(bx, place, mk_ptr)
505 }
506
507 mir::Rvalue::Len(place) => {
508 let size = self.evaluate_array_len(bx, place);
509 OperandRef {
510 val: OperandValue::Immediate(size),
511 layout: bx.cx().layout_of(bx.tcx().types.usize),
512 }
513 }
514
515 mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs))
516 if let Some(op) = op_with_overflow.overflowing_to_wrapping() =>
517 {
518 let lhs = self.codegen_operand(bx, lhs);
519 let rhs = self.codegen_operand(bx, rhs);
520 let result = self.codegen_scalar_checked_binop(
521 bx,
522 op,
523 lhs.immediate(),
524 rhs.immediate(),
525 lhs.layout.ty,
526 );
527 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
528 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
529 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
530 }
531 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
532 let lhs = self.codegen_operand(bx, lhs);
533 let rhs = self.codegen_operand(bx, rhs);
534 let llresult = match (lhs.val, rhs.val) {
535 (
536 OperandValue::Pair(lhs_addr, lhs_extra),
537 OperandValue::Pair(rhs_addr, rhs_extra),
538 ) => self.codegen_wide_ptr_binop(
539 bx,
540 op,
541 lhs_addr,
542 lhs_extra,
543 rhs_addr,
544 rhs_extra,
545 lhs.layout.ty,
546 ),
547
548 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self
549 .codegen_scalar_binop(
550 bx,
551 op,
552 lhs_val,
553 rhs_val,
554 lhs.layout.ty,
555 rhs.layout.ty,
556 ),
557
558 _ => bug!(),
559 };
560 OperandRef {
561 val: OperandValue::Immediate(llresult),
562 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
563 }
564 }
565
566 mir::Rvalue::UnaryOp(op, ref operand) => {
567 let operand = self.codegen_operand(bx, operand);
568 let is_float = operand.layout.ty.is_floating_point();
569 let (val, layout) = match op {
570 mir::UnOp::Not => {
571 let llval = bx.not(operand.immediate());
572 (OperandValue::Immediate(llval), operand.layout)
573 }
574 mir::UnOp::Neg => {
575 let llval = if is_float {
576 bx.fneg(operand.immediate())
577 } else {
578 bx.neg(operand.immediate())
579 };
580 (OperandValue::Immediate(llval), operand.layout)
581 }
582 mir::UnOp::PtrMetadata => {
583 assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),);
584 let (_, meta) = operand.val.pointer_parts();
585 assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
586 if let Some(meta) = meta {
587 (OperandValue::Immediate(meta), operand.layout.field(self.cx, 1))
588 } else {
589 (OperandValue::ZeroSized, bx.cx().layout_of(bx.tcx().types.unit))
590 }
591 }
592 };
593 assert!(
594 val.is_expected_variant_for_type(self.cx, layout),
595 "Made wrong variant {val:?} for type {layout:?}",
596 );
597 OperandRef { val, layout }
598 }
599
600 mir::Rvalue::Discriminant(ref place) => {
601 let discr_ty = rvalue.ty(self.mir, bx.tcx());
602 let discr_ty = self.monomorphize(discr_ty);
603 let operand = self.codegen_consume(bx, place.as_ref());
604 let discr = operand.codegen_get_discr(self, bx, discr_ty);
605 OperandRef {
606 val: OperandValue::Immediate(discr),
607 layout: self.cx.layout_of(discr_ty),
608 }
609 }
610
611 mir::Rvalue::NullaryOp(ref null_op, ty) => {
612 let ty = self.monomorphize(ty);
613 let layout = bx.cx().layout_of(ty);
614 let val = match null_op {
615 mir::NullOp::SizeOf => {
616 assert!(bx.cx().type_is_sized(ty));
617 let val = layout.size.bytes();
618 bx.cx().const_usize(val)
619 }
620 mir::NullOp::AlignOf => {
621 assert!(bx.cx().type_is_sized(ty));
622 let val = layout.align.abi.bytes();
623 bx.cx().const_usize(val)
624 }
625 mir::NullOp::OffsetOf(fields) => {
626 let val = bx
627 .tcx()
628 .offset_of_subfield(bx.typing_env(), layout, fields.iter())
629 .bytes();
630 bx.cx().const_usize(val)
631 }
632 mir::NullOp::UbChecks => {
633 let val = bx.tcx().sess.ub_checks();
634 bx.cx().const_bool(val)
635 }
636 mir::NullOp::ContractChecks => {
637 let val = bx.tcx().sess.contract_checks();
638 bx.cx().const_bool(val)
639 }
640 };
641 let tcx = self.cx.tcx();
642 OperandRef {
643 val: OperandValue::Immediate(val),
644 layout: self.cx.layout_of(null_op.ty(tcx)),
645 }
646 }
647
648 mir::Rvalue::ThreadLocalRef(def_id) => {
649 assert!(bx.cx().tcx().is_static(def_id));
650 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
651 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
652 {
653 let instance = ty::Instance {
654 def: ty::InstanceKind::ThreadLocalShim(def_id),
655 args: ty::GenericArgs::empty(),
656 };
657 let fn_ptr = bx.get_fn_addr(instance);
658 let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
659 let fn_ty = bx.fn_decl_backend_type(fn_abi);
660 let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
661 Some(bx.tcx().codegen_instance_attrs(instance.def))
662 } else {
663 None
664 };
665 bx.call(
666 fn_ty,
667 fn_attrs.as_deref(),
668 Some(fn_abi),
669 fn_ptr,
670 &[],
671 None,
672 Some(instance),
673 )
674 } else {
675 bx.get_static(def_id)
676 };
677 OperandRef { val: OperandValue::Immediate(static_), layout }
678 }
679 mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
680 mir::Rvalue::Repeat(ref elem, len_const) => {
681 let operand = self.codegen_operand(bx, elem);
685 let array_ty = Ty::new_array_with_const_len(bx.tcx(), operand.layout.ty, len_const);
686 let array_ty = self.monomorphize(array_ty);
687 let array_layout = bx.layout_of(array_ty);
688 assert!(array_layout.is_zst());
689 OperandRef { val: OperandValue::ZeroSized, layout: array_layout }
690 }
691 mir::Rvalue::Aggregate(ref kind, ref fields) => {
692 let (variant_index, active_field_index) = match **kind {
693 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
694 (variant_index, active_field_index)
695 }
696 _ => (FIRST_VARIANT, None),
697 };
698
699 let ty = rvalue.ty(self.mir, self.cx.tcx());
700 let ty = self.monomorphize(ty);
701 let layout = self.cx.layout_of(ty);
702
703 let mut builder = OperandRefBuilder::new(layout);
704 for (field_idx, field) in fields.iter_enumerated() {
705 let op = self.codegen_operand(bx, field);
706 let fi = active_field_index.unwrap_or(field_idx);
707 builder.insert_field(bx, variant_index, fi, op);
708 }
709
710 let tag_result = codegen_tag_value(self.cx, variant_index, layout);
711 match tag_result {
712 Err(super::place::UninhabitedVariantError) => {
713 bx.abort();
717 let val = OperandValue::poison(bx, layout);
718 OperandRef { val, layout }
719 }
720 Ok(maybe_tag_value) => {
721 if let Some((tag_field, tag_imm)) = maybe_tag_value {
722 builder.insert_imm(tag_field, tag_imm);
723 }
724 builder.build(bx.cx())
725 }
726 }
727 }
728 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
729 let operand = self.codegen_operand(bx, operand);
730 let val = operand.immediate();
731
732 let content_ty = self.monomorphize(content_ty);
733 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
734
735 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
736 }
737 mir::Rvalue::WrapUnsafeBinder(ref operand, binder_ty) => {
738 let operand = self.codegen_operand(bx, operand);
739 let binder_ty = self.monomorphize(binder_ty);
740 let layout = bx.cx().layout_of(binder_ty);
741 OperandRef { val: operand.val, layout }
742 }
743 }
744 }
745
746 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
747 if let Some(index) = place.as_local()
750 && let LocalRef::Operand(op) = self.locals[index]
751 && let ty::Array(_, n) = op.layout.ty.kind()
752 {
753 let n = n.try_to_target_usize(bx.tcx()).expect("expected monomorphic const in codegen");
754 return bx.cx().const_usize(n);
755 }
756 let cg_value = self.codegen_place(bx, place.as_ref());
758 cg_value.len(bx.cx())
759 }
760
761 fn codegen_place_to_pointer(
763 &mut self,
764 bx: &mut Bx,
765 place: mir::Place<'tcx>,
766 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
767 ) -> OperandRef<'tcx, Bx::Value> {
768 let cg_place = self.codegen_place(bx, place.as_ref());
769 let val = cg_place.val.address();
770
771 let ty = cg_place.layout.ty;
772 assert!(
773 if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) {
774 matches!(val, OperandValue::Pair(..))
775 } else {
776 matches!(val, OperandValue::Immediate(..))
777 },
778 "Address of place was unexpectedly {val:?} for pointee type {ty:?}",
779 );
780
781 OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
782 }
783
784 fn codegen_scalar_binop(
785 &mut self,
786 bx: &mut Bx,
787 op: mir::BinOp,
788 lhs: Bx::Value,
789 rhs: Bx::Value,
790 lhs_ty: Ty<'tcx>,
791 rhs_ty: Ty<'tcx>,
792 ) -> Bx::Value {
793 let is_float = lhs_ty.is_floating_point();
794 let is_signed = lhs_ty.is_signed();
795 match op {
796 mir::BinOp::Add => {
797 if is_float {
798 bx.fadd(lhs, rhs)
799 } else {
800 bx.add(lhs, rhs)
801 }
802 }
803 mir::BinOp::AddUnchecked => {
804 if is_signed {
805 bx.unchecked_sadd(lhs, rhs)
806 } else {
807 bx.unchecked_uadd(lhs, rhs)
808 }
809 }
810 mir::BinOp::Sub => {
811 if is_float {
812 bx.fsub(lhs, rhs)
813 } else {
814 bx.sub(lhs, rhs)
815 }
816 }
817 mir::BinOp::SubUnchecked => {
818 if is_signed {
819 bx.unchecked_ssub(lhs, rhs)
820 } else {
821 bx.unchecked_usub(lhs, rhs)
822 }
823 }
824 mir::BinOp::Mul => {
825 if is_float {
826 bx.fmul(lhs, rhs)
827 } else {
828 bx.mul(lhs, rhs)
829 }
830 }
831 mir::BinOp::MulUnchecked => {
832 if is_signed {
833 bx.unchecked_smul(lhs, rhs)
834 } else {
835 bx.unchecked_umul(lhs, rhs)
836 }
837 }
838 mir::BinOp::Div => {
839 if is_float {
840 bx.fdiv(lhs, rhs)
841 } else if is_signed {
842 bx.sdiv(lhs, rhs)
843 } else {
844 bx.udiv(lhs, rhs)
845 }
846 }
847 mir::BinOp::Rem => {
848 if is_float {
849 bx.frem(lhs, rhs)
850 } else if is_signed {
851 bx.srem(lhs, rhs)
852 } else {
853 bx.urem(lhs, rhs)
854 }
855 }
856 mir::BinOp::BitOr => bx.or(lhs, rhs),
857 mir::BinOp::BitAnd => bx.and(lhs, rhs),
858 mir::BinOp::BitXor => bx.xor(lhs, rhs),
859 mir::BinOp::Offset => {
860 let pointee_type = lhs_ty
861 .builtin_deref(true)
862 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty));
863 let pointee_layout = bx.cx().layout_of(pointee_type);
864 if pointee_layout.is_zst() {
865 lhs
868 } else {
869 let llty = bx.cx().backend_type(pointee_layout);
870 if !rhs_ty.is_signed() {
871 bx.inbounds_nuw_gep(llty, lhs, &[rhs])
872 } else {
873 bx.inbounds_gep(llty, lhs, &[rhs])
874 }
875 }
876 }
877 mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
878 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
879 bx.shl(lhs, rhs)
880 }
881 mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
882 let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
883 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
884 }
885 mir::BinOp::Ne
886 | mir::BinOp::Lt
887 | mir::BinOp::Gt
888 | mir::BinOp::Eq
889 | mir::BinOp::Le
890 | mir::BinOp::Ge => {
891 if is_float {
892 bx.fcmp(base::bin_op_to_fcmp_predicate(op), lhs, rhs)
893 } else {
894 bx.icmp(base::bin_op_to_icmp_predicate(op, is_signed), lhs, rhs)
895 }
896 }
897 mir::BinOp::Cmp => {
898 use std::cmp::Ordering;
899 assert!(!is_float);
900 if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
901 return value;
902 }
903 let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
904 if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
905 let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
912 let gtext = bx.zext(is_gt, bx.type_i8());
913 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
914 let ltext = bx.zext(is_lt, bx.type_i8());
915 bx.unchecked_ssub(gtext, ltext)
916 } else {
917 let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
920 let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
921 let ge = bx.select(
922 is_ne,
923 bx.cx().const_i8(Ordering::Greater as i8),
924 bx.cx().const_i8(Ordering::Equal as i8),
925 );
926 bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
927 }
928 }
929 mir::BinOp::AddWithOverflow
930 | mir::BinOp::SubWithOverflow
931 | mir::BinOp::MulWithOverflow => {
932 bug!("{op:?} needs to return a pair, so call codegen_scalar_checked_binop instead")
933 }
934 }
935 }
936
937 fn codegen_wide_ptr_binop(
938 &mut self,
939 bx: &mut Bx,
940 op: mir::BinOp,
941 lhs_addr: Bx::Value,
942 lhs_extra: Bx::Value,
943 rhs_addr: Bx::Value,
944 rhs_extra: Bx::Value,
945 _input_ty: Ty<'tcx>,
946 ) -> Bx::Value {
947 match op {
948 mir::BinOp::Eq => {
949 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
950 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
951 bx.and(lhs, rhs)
952 }
953 mir::BinOp::Ne => {
954 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
955 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
956 bx.or(lhs, rhs)
957 }
958 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
959 let (op, strict_op) = match op {
961 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
962 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
963 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
964 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
965 _ => bug!(),
966 };
967 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
968 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
969 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
970 let rhs = bx.and(and_lhs, and_rhs);
971 bx.or(lhs, rhs)
972 }
973 _ => {
974 bug!("unexpected wide ptr binop");
975 }
976 }
977 }
978
979 fn codegen_scalar_checked_binop(
980 &mut self,
981 bx: &mut Bx,
982 op: mir::BinOp,
983 lhs: Bx::Value,
984 rhs: Bx::Value,
985 input_ty: Ty<'tcx>,
986 ) -> OperandValue<Bx::Value> {
987 let (val, of) = match op {
988 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
990 let oop = match op {
991 mir::BinOp::Add => OverflowOp::Add,
992 mir::BinOp::Sub => OverflowOp::Sub,
993 mir::BinOp::Mul => OverflowOp::Mul,
994 _ => unreachable!(),
995 };
996 bx.checked_binop(oop, input_ty, lhs, rhs)
997 }
998 _ => bug!("Operator `{:?}` is not a checkable operator", op),
999 };
1000
1001 OperandValue::Pair(val, of)
1002 }
1003}
1004
1005pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1013 bx: &mut Bx,
1014 mut imm: Bx::Value,
1015 from_scalar: abi::Scalar,
1016 to_scalar: abi::Scalar,
1017) -> Bx::Value {
1018 assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
1019 let imm_ty = bx.cx().val_ty(imm);
1020 assert_ne!(
1021 bx.cx().type_kind(imm_ty),
1022 TypeKind::Vector,
1023 "Vector type {imm_ty:?} not allowed in transmute_scalar {from_scalar:?} -> {to_scalar:?}"
1024 );
1025
1026 if from_scalar == to_scalar {
1030 return imm;
1031 }
1032
1033 use abi::Primitive::*;
1034 imm = bx.from_immediate(imm);
1035
1036 let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
1037 debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
1038 let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
1039
1040 assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
1050
1051 imm = match (from_scalar.primitive(), to_scalar.primitive()) {
1052 (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
1053 (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
1054 (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
1055 (Pointer(..), Int(..)) => {
1056 bx.ptrtoint(imm, to_backend_ty)
1058 }
1059 (Float(_), Pointer(..)) => {
1060 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
1061 bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
1062 }
1063 (Pointer(..), Float(_)) => {
1064 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
1066 bx.bitcast(int_imm, to_backend_ty)
1067 }
1068 };
1069
1070 debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1071
1072 assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
1078
1079 imm = bx.to_immediate_scalar(imm, to_scalar);
1080 imm
1081}
1082
1083fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1088 bx: &mut Bx,
1089 imm: Bx::Value,
1090 scalar: abi::Scalar,
1091 backend_ty: Bx::Type,
1092 known: Option<&abi::Scalar>,
1093) {
1094 if matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
1095 return;
1096 }
1097
1098 match (scalar, known) {
1099 (abi::Scalar::Union { .. }, _) => return,
1100 (_, None) => {
1101 if scalar.is_always_valid(bx.cx()) {
1102 return;
1103 }
1104 }
1105 (abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
1106 let known_range = known.valid_range(bx.cx());
1107 if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
1108 return;
1109 }
1110 }
1111 }
1112
1113 match scalar.primitive() {
1114 abi::Primitive::Int(..) => {
1115 let range = scalar.valid_range(bx.cx());
1116 bx.assume_integer_range(imm, backend_ty, range);
1117 }
1118 abi::Primitive::Pointer(abi::AddressSpace::ZERO)
1119 if !scalar.valid_range(bx.cx()).contains(0) =>
1120 {
1121 bx.assume_nonnull(imm);
1122 }
1123 abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
1124 }
1125}