rustc_const_eval/interpret/
intrinsics.rs

1//! Intrinsics and other functions that the interpreter executes without
2//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
3//! and miri.
4
5use std::assert_matches::assert_matches;
6
7use rustc_abi::Size;
8use rustc_apfloat::ieee::{Double, Half, Quad, Single};
9use rustc_hir::def_id::DefId;
10use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
11use rustc_middle::ty::layout::{LayoutOf as _, TyAndLayout, ValidityRequirement};
12use rustc_middle::ty::{GenericArgsRef, Ty, TyCtxt};
13use rustc_middle::{bug, ty};
14use rustc_span::{Symbol, sym};
15use tracing::trace;
16
17use super::memory::MemoryKind;
18use super::util::ensure_monomorphic_enough;
19use super::{
20    Allocation, CheckInAllocMsg, ConstAllocation, GlobalId, ImmTy, InterpCx, InterpResult, Machine,
21    OpTy, PlaceTy, Pointer, PointerArithmetic, Provenance, Scalar, err_inval, err_ub_custom,
22    err_unsup_format, interp_ok, throw_inval, throw_ub_custom, throw_ub_format,
23};
24use crate::fluent_generated as fluent;
25
26/// Directly returns an `Allocation` containing an absolute path representation of the given type.
27pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
28    let path = crate::util::type_name(tcx, ty);
29    let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ());
30    tcx.mk_const_alloc(alloc)
31}
32
33/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
34/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
35pub(crate) fn eval_nullary_intrinsic<'tcx>(
36    tcx: TyCtxt<'tcx>,
37    typing_env: ty::TypingEnv<'tcx>,
38    def_id: DefId,
39    args: GenericArgsRef<'tcx>,
40) -> InterpResult<'tcx, ConstValue<'tcx>> {
41    let tp_ty = args.type_at(0);
42    let name = tcx.item_name(def_id);
43    interp_ok(match name {
44        sym::type_name => {
45            ensure_monomorphic_enough(tcx, tp_ty)?;
46            let alloc = alloc_type_name(tcx, tp_ty);
47            ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }
48        }
49        sym::needs_drop => {
50            ensure_monomorphic_enough(tcx, tp_ty)?;
51            ConstValue::from_bool(tp_ty.needs_drop(tcx, typing_env))
52        }
53        sym::pref_align_of => {
54            // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
55            let layout = tcx
56                .layout_of(typing_env.as_query_input(tp_ty))
57                .map_err(|e| err_inval!(Layout(*e)))?;
58            ConstValue::from_target_usize(layout.align.pref.bytes(), &tcx)
59        }
60        sym::type_id => {
61            ensure_monomorphic_enough(tcx, tp_ty)?;
62            ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128())
63        }
64        sym::variant_count => match match tp_ty.kind() {
65            // Pattern types have the same number of variants as their base type.
66            // Even if we restrict e.g. which variants are valid, the variants are essentially just uninhabited.
67            // And `Result<(), !>` still has two variants according to `variant_count`.
68            ty::Pat(base, _) => *base,
69            _ => tp_ty,
70        }
71        .kind()
72        {
73            // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
74            ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx),
75            ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
76                throw_inval!(TooGeneric)
77            }
78            ty::Pat(..) => unreachable!(),
79            ty::Bound(_, _) => bug!("bound ty during ctfe"),
80            ty::Bool
81            | ty::Char
82            | ty::Int(_)
83            | ty::Uint(_)
84            | ty::Float(_)
85            | ty::Foreign(_)
86            | ty::Str
87            | ty::Array(_, _)
88            | ty::Slice(_)
89            | ty::RawPtr(_, _)
90            | ty::Ref(_, _, _)
91            | ty::FnDef(_, _)
92            | ty::FnPtr(..)
93            | ty::Dynamic(_, _, _)
94            | ty::Closure(_, _)
95            | ty::CoroutineClosure(_, _)
96            | ty::Coroutine(_, _)
97            | ty::CoroutineWitness(..)
98            | ty::UnsafeBinder(_)
99            | ty::Never
100            | ty::Tuple(_)
101            | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
102        },
103        other => bug!("`{}` is not a zero arg intrinsic", other),
104    })
105}
106
107impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
108    /// Returns `true` if emulation happened.
109    /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
110    /// intrinsic handling.
111    pub fn eval_intrinsic(
112        &mut self,
113        instance: ty::Instance<'tcx>,
114        args: &[OpTy<'tcx, M::Provenance>],
115        dest: &PlaceTy<'tcx, M::Provenance>,
116        ret: Option<mir::BasicBlock>,
117    ) -> InterpResult<'tcx, bool> {
118        let instance_args = instance.args;
119        let intrinsic_name = self.tcx.item_name(instance.def_id());
120
121        match intrinsic_name {
122            sym::caller_location => {
123                let span = self.find_closest_untracked_caller_location();
124                let val = self.tcx.span_as_caller_location(span);
125                let val =
126                    self.const_val_to_op(val, self.tcx.caller_location_ty(), Some(dest.layout))?;
127                self.copy_op(&val, dest)?;
128            }
129
130            sym::min_align_of_val | sym::size_of_val => {
131                // Avoid `deref_pointer` -- this is not a deref, the ptr does not have to be
132                // dereferenceable!
133                let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
134                let (size, align) = self
135                    .size_and_align_of_mplace(&place)?
136                    .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
137
138                let result = match intrinsic_name {
139                    sym::min_align_of_val => align.bytes(),
140                    sym::size_of_val => size.bytes(),
141                    _ => bug!(),
142                };
143
144                self.write_scalar(Scalar::from_target_usize(result, self), dest)?;
145            }
146
147            sym::pref_align_of
148            | sym::needs_drop
149            | sym::type_id
150            | sym::type_name
151            | sym::variant_count => {
152                let gid = GlobalId { instance, promoted: None };
153                let ty = match intrinsic_name {
154                    sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
155                    sym::needs_drop => self.tcx.types.bool,
156                    sym::type_id => self.tcx.types.u128,
157                    sym::type_name => Ty::new_static_str(self.tcx.tcx),
158                    _ => bug!(),
159                };
160                let val = self
161                    .ctfe_query(|tcx| tcx.const_eval_global_id(self.typing_env, gid, tcx.span))?;
162                let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
163                self.copy_op(&val, dest)?;
164            }
165
166            sym::fadd_algebraic
167            | sym::fsub_algebraic
168            | sym::fmul_algebraic
169            | sym::fdiv_algebraic
170            | sym::frem_algebraic => {
171                let a = self.read_immediate(&args[0])?;
172                let b = self.read_immediate(&args[1])?;
173
174                let op = match intrinsic_name {
175                    sym::fadd_algebraic => BinOp::Add,
176                    sym::fsub_algebraic => BinOp::Sub,
177                    sym::fmul_algebraic => BinOp::Mul,
178                    sym::fdiv_algebraic => BinOp::Div,
179                    sym::frem_algebraic => BinOp::Rem,
180
181                    _ => bug!(),
182                };
183
184                let res = self.binary_op(op, &a, &b)?;
185                // `binary_op` already called `generate_nan` if needed.
186                let res = M::apply_float_nondet(self, res)?;
187                self.write_immediate(*res, dest)?;
188            }
189
190            sym::ctpop
191            | sym::cttz
192            | sym::cttz_nonzero
193            | sym::ctlz
194            | sym::ctlz_nonzero
195            | sym::bswap
196            | sym::bitreverse => {
197                let ty = instance_args.type_at(0);
198                let layout = self.layout_of(ty)?;
199                let val = self.read_scalar(&args[0])?;
200
201                let out_val = self.numeric_intrinsic(intrinsic_name, val, layout, dest.layout)?;
202                self.write_scalar(out_val, dest)?;
203            }
204            sym::saturating_add | sym::saturating_sub => {
205                let l = self.read_immediate(&args[0])?;
206                let r = self.read_immediate(&args[1])?;
207                let val = self.saturating_arith(
208                    if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub },
209                    &l,
210                    &r,
211                )?;
212                self.write_scalar(val, dest)?;
213            }
214            sym::discriminant_value => {
215                let place = self.deref_pointer(&args[0])?;
216                let variant = self.read_discriminant(&place)?;
217                let discr = self.discriminant_for_variant(place.layout.ty, variant)?;
218                self.write_immediate(*discr, dest)?;
219            }
220            sym::exact_div => {
221                let l = self.read_immediate(&args[0])?;
222                let r = self.read_immediate(&args[1])?;
223                self.exact_div(&l, &r, dest)?;
224            }
225            sym::rotate_left | sym::rotate_right => {
226                // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
227                // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
228                let layout_val = self.layout_of(instance_args.type_at(0))?;
229                let val = self.read_scalar(&args[0])?;
230                let val_bits = val.to_bits(layout_val.size)?; // sign is ignored here
231
232                let layout_raw_shift = self.layout_of(self.tcx.types.u32)?;
233                let raw_shift = self.read_scalar(&args[1])?;
234                let raw_shift_bits = raw_shift.to_bits(layout_raw_shift.size)?;
235
236                let width_bits = u128::from(layout_val.size.bits());
237                let shift_bits = raw_shift_bits % width_bits;
238                let inv_shift_bits = (width_bits - shift_bits) % width_bits;
239                let result_bits = if intrinsic_name == sym::rotate_left {
240                    (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
241                } else {
242                    (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
243                };
244                let truncated_bits = layout_val.size.truncate(result_bits);
245                let result = Scalar::from_uint(truncated_bits, layout_val.size);
246                self.write_scalar(result, dest)?;
247            }
248            sym::copy => {
249                self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
250            }
251            sym::write_bytes => {
252                self.write_bytes_intrinsic(&args[0], &args[1], &args[2], "write_bytes")?;
253            }
254            sym::compare_bytes => {
255                let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
256                self.write_scalar(result, dest)?;
257            }
258            sym::arith_offset => {
259                let ptr = self.read_pointer(&args[0])?;
260                let offset_count = self.read_target_isize(&args[1])?;
261                let pointee_ty = instance_args.type_at(0);
262
263                let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
264                let offset_bytes = offset_count.wrapping_mul(pointee_size);
265                let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
266                self.write_pointer(offset_ptr, dest)?;
267            }
268            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
269                let a = self.read_pointer(&args[0])?;
270                let b = self.read_pointer(&args[1])?;
271
272                let usize_layout = self.layout_of(self.tcx.types.usize)?;
273                let isize_layout = self.layout_of(self.tcx.types.isize)?;
274
275                // Get offsets for both that are at least relative to the same base.
276                // With `OFFSET_IS_ADDR` this is trivial; without it we need either
277                // two integers or two pointers into the same allocation.
278                let (a_offset, b_offset, is_addr) = if M::Provenance::OFFSET_IS_ADDR {
279                    (a.addr().bytes(), b.addr().bytes(), /*is_addr*/ true)
280                } else {
281                    match (self.ptr_try_get_alloc_id(a, 0), self.ptr_try_get_alloc_id(b, 0)) {
282                        (Err(a), Err(b)) => {
283                            // Neither pointer points to an allocation, so they are both absolute.
284                            (a, b, /*is_addr*/ true)
285                        }
286                        (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _)))
287                            if a_alloc_id == b_alloc_id =>
288                        {
289                            // Found allocation for both, and it's the same.
290                            // Use these offsets for distance calculation.
291                            (a_offset.bytes(), b_offset.bytes(), /*is_addr*/ false)
292                        }
293                        _ => {
294                            // Not into the same allocation -- this is UB.
295                            throw_ub_custom!(
296                                fluent::const_eval_offset_from_different_allocations,
297                                name = intrinsic_name,
298                            );
299                        }
300                    }
301                };
302
303                // Compute distance: a - b.
304                let dist = {
305                    // Addresses are unsigned, so this is a `usize` computation. We have to do the
306                    // overflow check separately anyway.
307                    let (val, overflowed) = {
308                        let a_offset = ImmTy::from_uint(a_offset, usize_layout);
309                        let b_offset = ImmTy::from_uint(b_offset, usize_layout);
310                        self.binary_op(BinOp::SubWithOverflow, &a_offset, &b_offset)?
311                            .to_scalar_pair()
312                    };
313                    if overflowed.to_bool()? {
314                        // a < b
315                        if intrinsic_name == sym::ptr_offset_from_unsigned {
316                            throw_ub_custom!(
317                                fluent::const_eval_offset_from_unsigned_overflow,
318                                a_offset = a_offset,
319                                b_offset = b_offset,
320                                is_addr = is_addr,
321                            );
322                        }
323                        // The signed form of the intrinsic allows this. If we interpret the
324                        // difference as isize, we'll get the proper signed difference. If that
325                        // seems *positive* or equal to isize::MIN, they were more than isize::MAX apart.
326                        let dist = val.to_target_isize(self)?;
327                        if dist >= 0 || i128::from(dist) == self.pointer_size().signed_int_min() {
328                            throw_ub_custom!(
329                                fluent::const_eval_offset_from_underflow,
330                                name = intrinsic_name,
331                            );
332                        }
333                        dist
334                    } else {
335                        // b >= a
336                        let dist = val.to_target_isize(self)?;
337                        // If converting to isize produced a *negative* result, we had an overflow
338                        // because they were more than isize::MAX apart.
339                        if dist < 0 {
340                            throw_ub_custom!(
341                                fluent::const_eval_offset_from_overflow,
342                                name = intrinsic_name,
343                            );
344                        }
345                        dist
346                    }
347                };
348
349                // Check that the memory between them is dereferenceable at all, starting from the
350                // origin pointer: `dist` is `a - b`, so it is based on `b`.
351                self.check_ptr_access_signed(b, dist, CheckInAllocMsg::Dereferenceable)
352                    .map_err_kind(|_| {
353                        // This could mean they point to different allocations, or they point to the same allocation
354                        // but not the entire range between the pointers is in-bounds.
355                        if let Ok((a_alloc_id, ..)) = self.ptr_try_get_alloc_id(a, 0)
356                            && let Ok((b_alloc_id, ..)) = self.ptr_try_get_alloc_id(b, 0)
357                            && a_alloc_id == b_alloc_id
358                        {
359                            err_ub_custom!(
360                                fluent::const_eval_offset_from_out_of_bounds,
361                                name = intrinsic_name,
362                            )
363                        } else {
364                            err_ub_custom!(
365                                fluent::const_eval_offset_from_different_allocations,
366                                name = intrinsic_name,
367                            )
368                        }
369                    })?;
370                // Then check that this is also dereferenceable from `a`. This ensures that they are
371                // derived from the same allocation.
372                self.check_ptr_access_signed(
373                    a,
374                    dist.checked_neg().unwrap(), // i64::MIN is impossible as no allocation can be that large
375                    CheckInAllocMsg::Dereferenceable,
376                )
377                .map_err_kind(|_| {
378                    // Make the error more specific.
379                    err_ub_custom!(
380                        fluent::const_eval_offset_from_different_allocations,
381                        name = intrinsic_name,
382                    )
383                })?;
384
385                // Perform division by size to compute return value.
386                let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
387                    assert!(0 <= dist && dist <= self.target_isize_max());
388                    usize_layout
389                } else {
390                    assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
391                    isize_layout
392                };
393                let pointee_layout = self.layout_of(instance_args.type_at(0))?;
394                // If ret_layout is unsigned, we checked that so is the distance, so we are good.
395                let val = ImmTy::from_int(dist, ret_layout);
396                let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
397                self.exact_div(&val, &size, dest)?;
398            }
399
400            sym::assert_inhabited
401            | sym::assert_zero_valid
402            | sym::assert_mem_uninitialized_valid => {
403                let ty = instance.args.type_at(0);
404                let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
405
406                let should_panic = !self
407                    .tcx
408                    .check_validity_requirement((requirement, self.typing_env.as_query_input(ty)))
409                    .map_err(|_| err_inval!(TooGeneric))?;
410
411                if should_panic {
412                    let layout = self.layout_of(ty)?;
413
414                    let msg = match requirement {
415                        // For *all* intrinsics we first check `is_uninhabited` to give a more specific
416                        // error message.
417                        _ if layout.is_uninhabited() => format!(
418                            "aborted execution: attempted to instantiate uninhabited type `{ty}`"
419                        ),
420                        ValidityRequirement::Inhabited => bug!("handled earlier"),
421                        ValidityRequirement::Zero => format!(
422                            "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
423                        ),
424                        ValidityRequirement::UninitMitigated0x01Fill => format!(
425                            "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
426                        ),
427                        ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
428                    };
429
430                    M::panic_nounwind(self, &msg)?;
431                    // Skip the `return_to_block` at the end (we panicked, we do not return).
432                    return interp_ok(true);
433                }
434            }
435            sym::simd_insert => {
436                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
437                let elem = &args[2];
438                let (input, input_len) = self.project_to_simd(&args[0])?;
439                let (dest, dest_len) = self.project_to_simd(dest)?;
440                assert_eq!(input_len, dest_len, "Return vector length must match input length");
441                // Bounds are not checked by typeck so we have to do it ourselves.
442                if index >= input_len {
443                    throw_ub_format!(
444                        "`simd_insert` index {index} is out-of-bounds of vector with length {input_len}"
445                    );
446                }
447
448                for i in 0..dest_len {
449                    let place = self.project_index(&dest, i)?;
450                    let value =
451                        if i == index { elem.clone() } else { self.project_index(&input, i)? };
452                    self.copy_op(&value, &place)?;
453                }
454            }
455            sym::simd_extract => {
456                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
457                let (input, input_len) = self.project_to_simd(&args[0])?;
458                // Bounds are not checked by typeck so we have to do it ourselves.
459                if index >= input_len {
460                    throw_ub_format!(
461                        "`simd_extract` index {index} is out-of-bounds of vector with length {input_len}"
462                    );
463                }
464                self.copy_op(&self.project_index(&input, index)?, dest)?;
465            }
466            sym::black_box => {
467                // These just return their argument
468                self.copy_op(&args[0], dest)?;
469            }
470            sym::raw_eq => {
471                let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
472                self.write_scalar(result, dest)?;
473            }
474            sym::typed_swap_nonoverlapping => {
475                self.typed_swap_nonoverlapping_intrinsic(&args[0], &args[1])?;
476            }
477
478            sym::vtable_size => {
479                let ptr = self.read_pointer(&args[0])?;
480                // `None` because we don't know which trait to expect here; any vtable is okay.
481                let (size, _align) = self.get_vtable_size_and_align(ptr, None)?;
482                self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?;
483            }
484            sym::vtable_align => {
485                let ptr = self.read_pointer(&args[0])?;
486                // `None` because we don't know which trait to expect here; any vtable is okay.
487                let (_size, align) = self.get_vtable_size_and_align(ptr, None)?;
488                self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
489            }
490
491            sym::minnumf16 => self.float_min_intrinsic::<Half>(args, dest)?,
492            sym::minnumf32 => self.float_min_intrinsic::<Single>(args, dest)?,
493            sym::minnumf64 => self.float_min_intrinsic::<Double>(args, dest)?,
494            sym::minnumf128 => self.float_min_intrinsic::<Quad>(args, dest)?,
495
496            sym::minimumf16 => self.float_minimum_intrinsic::<Half>(args, dest)?,
497            sym::minimumf32 => self.float_minimum_intrinsic::<Single>(args, dest)?,
498            sym::minimumf64 => self.float_minimum_intrinsic::<Double>(args, dest)?,
499            sym::minimumf128 => self.float_minimum_intrinsic::<Quad>(args, dest)?,
500
501            sym::maxnumf16 => self.float_max_intrinsic::<Half>(args, dest)?,
502            sym::maxnumf32 => self.float_max_intrinsic::<Single>(args, dest)?,
503            sym::maxnumf64 => self.float_max_intrinsic::<Double>(args, dest)?,
504            sym::maxnumf128 => self.float_max_intrinsic::<Quad>(args, dest)?,
505
506            sym::maximumf16 => self.float_maximum_intrinsic::<Half>(args, dest)?,
507            sym::maximumf32 => self.float_maximum_intrinsic::<Single>(args, dest)?,
508            sym::maximumf64 => self.float_maximum_intrinsic::<Double>(args, dest)?,
509            sym::maximumf128 => self.float_maximum_intrinsic::<Quad>(args, dest)?,
510
511            sym::copysignf16 => self.float_copysign_intrinsic::<Half>(args, dest)?,
512            sym::copysignf32 => self.float_copysign_intrinsic::<Single>(args, dest)?,
513            sym::copysignf64 => self.float_copysign_intrinsic::<Double>(args, dest)?,
514            sym::copysignf128 => self.float_copysign_intrinsic::<Quad>(args, dest)?,
515
516            sym::fabsf16 => self.float_abs_intrinsic::<Half>(args, dest)?,
517            sym::fabsf32 => self.float_abs_intrinsic::<Single>(args, dest)?,
518            sym::fabsf64 => self.float_abs_intrinsic::<Double>(args, dest)?,
519            sym::fabsf128 => self.float_abs_intrinsic::<Quad>(args, dest)?,
520
521            sym::floorf16 => self.float_round_intrinsic::<Half>(
522                args,
523                dest,
524                rustc_apfloat::Round::TowardNegative,
525            )?,
526            sym::floorf32 => self.float_round_intrinsic::<Single>(
527                args,
528                dest,
529                rustc_apfloat::Round::TowardNegative,
530            )?,
531            sym::floorf64 => self.float_round_intrinsic::<Double>(
532                args,
533                dest,
534                rustc_apfloat::Round::TowardNegative,
535            )?,
536            sym::floorf128 => self.float_round_intrinsic::<Quad>(
537                args,
538                dest,
539                rustc_apfloat::Round::TowardNegative,
540            )?,
541
542            sym::ceilf16 => self.float_round_intrinsic::<Half>(
543                args,
544                dest,
545                rustc_apfloat::Round::TowardPositive,
546            )?,
547            sym::ceilf32 => self.float_round_intrinsic::<Single>(
548                args,
549                dest,
550                rustc_apfloat::Round::TowardPositive,
551            )?,
552            sym::ceilf64 => self.float_round_intrinsic::<Double>(
553                args,
554                dest,
555                rustc_apfloat::Round::TowardPositive,
556            )?,
557            sym::ceilf128 => self.float_round_intrinsic::<Quad>(
558                args,
559                dest,
560                rustc_apfloat::Round::TowardPositive,
561            )?,
562
563            sym::truncf16 => {
564                self.float_round_intrinsic::<Half>(args, dest, rustc_apfloat::Round::TowardZero)?
565            }
566            sym::truncf32 => {
567                self.float_round_intrinsic::<Single>(args, dest, rustc_apfloat::Round::TowardZero)?
568            }
569            sym::truncf64 => {
570                self.float_round_intrinsic::<Double>(args, dest, rustc_apfloat::Round::TowardZero)?
571            }
572            sym::truncf128 => {
573                self.float_round_intrinsic::<Quad>(args, dest, rustc_apfloat::Round::TowardZero)?
574            }
575
576            sym::roundf16 => self.float_round_intrinsic::<Half>(
577                args,
578                dest,
579                rustc_apfloat::Round::NearestTiesToAway,
580            )?,
581            sym::roundf32 => self.float_round_intrinsic::<Single>(
582                args,
583                dest,
584                rustc_apfloat::Round::NearestTiesToAway,
585            )?,
586            sym::roundf64 => self.float_round_intrinsic::<Double>(
587                args,
588                dest,
589                rustc_apfloat::Round::NearestTiesToAway,
590            )?,
591            sym::roundf128 => self.float_round_intrinsic::<Quad>(
592                args,
593                dest,
594                rustc_apfloat::Round::NearestTiesToAway,
595            )?,
596
597            sym::round_ties_even_f16 => self.float_round_intrinsic::<Half>(
598                args,
599                dest,
600                rustc_apfloat::Round::NearestTiesToEven,
601            )?,
602            sym::round_ties_even_f32 => self.float_round_intrinsic::<Single>(
603                args,
604                dest,
605                rustc_apfloat::Round::NearestTiesToEven,
606            )?,
607            sym::round_ties_even_f64 => self.float_round_intrinsic::<Double>(
608                args,
609                dest,
610                rustc_apfloat::Round::NearestTiesToEven,
611            )?,
612            sym::round_ties_even_f128 => self.float_round_intrinsic::<Quad>(
613                args,
614                dest,
615                rustc_apfloat::Round::NearestTiesToEven,
616            )?,
617
618            // Unsupported intrinsic: skip the return_to_block below.
619            _ => return interp_ok(false),
620        }
621
622        trace!("{:?}", self.dump_place(&dest.clone().into()));
623        self.return_to_block(ret)?;
624        interp_ok(true)
625    }
626
627    pub(super) fn eval_nondiverging_intrinsic(
628        &mut self,
629        intrinsic: &NonDivergingIntrinsic<'tcx>,
630    ) -> InterpResult<'tcx> {
631        match intrinsic {
632            NonDivergingIntrinsic::Assume(op) => {
633                let op = self.eval_operand(op, None)?;
634                let cond = self.read_scalar(&op)?.to_bool()?;
635                if !cond {
636                    throw_ub_custom!(fluent::const_eval_assume_false);
637                }
638                interp_ok(())
639            }
640            NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
641                count,
642                src,
643                dst,
644            }) => {
645                let src = self.eval_operand(src, None)?;
646                let dst = self.eval_operand(dst, None)?;
647                let count = self.eval_operand(count, None)?;
648                self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)
649            }
650        }
651    }
652
653    pub fn numeric_intrinsic(
654        &self,
655        name: Symbol,
656        val: Scalar<M::Provenance>,
657        layout: TyAndLayout<'tcx>,
658        ret_layout: TyAndLayout<'tcx>,
659    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
660        assert!(layout.ty.is_integral(), "invalid type for numeric intrinsic: {}", layout.ty);
661        let bits = val.to_bits(layout.size)?; // these operations all ignore the sign
662        let extra = 128 - u128::from(layout.size.bits());
663        let bits_out = match name {
664            sym::ctpop => u128::from(bits.count_ones()),
665            sym::ctlz_nonzero | sym::cttz_nonzero if bits == 0 => {
666                throw_ub_custom!(fluent::const_eval_call_nonzero_intrinsic, name = name,);
667            }
668            sym::ctlz | sym::ctlz_nonzero => u128::from(bits.leading_zeros()) - extra,
669            sym::cttz | sym::cttz_nonzero => u128::from((bits << extra).trailing_zeros()) - extra,
670            sym::bswap => {
671                assert_eq!(layout, ret_layout);
672                (bits << extra).swap_bytes()
673            }
674            sym::bitreverse => {
675                assert_eq!(layout, ret_layout);
676                (bits << extra).reverse_bits()
677            }
678            _ => bug!("not a numeric intrinsic: {}", name),
679        };
680        interp_ok(Scalar::from_uint(bits_out, ret_layout.size))
681    }
682
683    pub fn exact_div(
684        &mut self,
685        a: &ImmTy<'tcx, M::Provenance>,
686        b: &ImmTy<'tcx, M::Provenance>,
687        dest: &PlaceTy<'tcx, M::Provenance>,
688    ) -> InterpResult<'tcx> {
689        assert_eq!(a.layout.ty, b.layout.ty);
690        assert_matches!(a.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
691
692        // Performs an exact division, resulting in undefined behavior where
693        // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
694        // First, check x % y != 0 (or if that computation overflows).
695        let rem = self.binary_op(BinOp::Rem, a, b)?;
696        // sign does not matter for 0 test, so `to_bits` is fine
697        if rem.to_scalar().to_bits(a.layout.size)? != 0 {
698            throw_ub_custom!(
699                fluent::const_eval_exact_div_has_remainder,
700                a = format!("{a}"),
701                b = format!("{b}")
702            )
703        }
704        // `Rem` says this is all right, so we can let `Div` do its job.
705        let res = self.binary_op(BinOp::Div, a, b)?;
706        self.write_immediate(*res, dest)
707    }
708
709    pub fn saturating_arith(
710        &self,
711        mir_op: BinOp,
712        l: &ImmTy<'tcx, M::Provenance>,
713        r: &ImmTy<'tcx, M::Provenance>,
714    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
715        assert_eq!(l.layout.ty, r.layout.ty);
716        assert_matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..));
717        assert_matches!(mir_op, BinOp::Add | BinOp::Sub);
718
719        let (val, overflowed) =
720            self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
721        interp_ok(if overflowed.to_bool()? {
722            let size = l.layout.size;
723            if l.layout.backend_repr.is_signed() {
724                // For signed ints the saturated value depends on the sign of the first
725                // term since the sign of the second term can be inferred from this and
726                // the fact that the operation has overflowed (if either is 0 no
727                // overflow can occur)
728                let first_term: i128 = l.to_scalar().to_int(l.layout.size)?;
729                if first_term >= 0 {
730                    // Negative overflow not possible since the positive first term
731                    // can only increase an (in range) negative term for addition
732                    // or corresponding negated positive term for subtraction.
733                    Scalar::from_int(size.signed_int_max(), size)
734                } else {
735                    // Positive overflow not possible for similar reason.
736                    Scalar::from_int(size.signed_int_min(), size)
737                }
738            } else {
739                // unsigned
740                if matches!(mir_op, BinOp::Add) {
741                    // max unsigned
742                    Scalar::from_uint(size.unsigned_int_max(), size)
743                } else {
744                    // underflow to 0
745                    Scalar::from_uint(0u128, size)
746                }
747            }
748        } else {
749            val
750        })
751    }
752
753    /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
754    /// allocation.
755    pub fn ptr_offset_inbounds(
756        &self,
757        ptr: Pointer<Option<M::Provenance>>,
758        offset_bytes: i64,
759    ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
760        // The offset must be in bounds starting from `ptr`.
761        self.check_ptr_access_signed(
762            ptr,
763            offset_bytes,
764            CheckInAllocMsg::InboundsPointerArithmetic,
765        )?;
766        // This also implies that there is no overflow, so we are done.
767        interp_ok(ptr.wrapping_signed_offset(offset_bytes, self))
768    }
769
770    /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
771    pub(crate) fn copy_intrinsic(
772        &mut self,
773        src: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
774        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
775        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
776        nonoverlapping: bool,
777    ) -> InterpResult<'tcx> {
778        let count = self.read_target_usize(count)?;
779        let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?;
780        let (size, align) = (layout.size, layout.align.abi);
781
782        let size = self.compute_size_in_bytes(size, count).ok_or_else(|| {
783            err_ub_custom!(
784                fluent::const_eval_size_overflow,
785                name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
786            )
787        })?;
788
789        let src = self.read_pointer(src)?;
790        let dst = self.read_pointer(dst)?;
791
792        self.check_ptr_align(src, align)?;
793        self.check_ptr_align(dst, align)?;
794
795        self.mem_copy(src, dst, size, nonoverlapping)
796    }
797
798    /// Does a *typed* swap of `*left` and `*right`.
799    fn typed_swap_nonoverlapping_intrinsic(
800        &mut self,
801        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
802        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
803    ) -> InterpResult<'tcx> {
804        let left = self.deref_pointer(left)?;
805        let right = self.deref_pointer(right)?;
806        assert_eq!(left.layout, right.layout);
807        assert!(left.layout.is_sized());
808        let kind = MemoryKind::Stack;
809        let temp = self.allocate(left.layout, kind)?;
810        self.copy_op(&left, &temp)?; // checks alignment of `left`
811
812        // We want to always enforce non-overlapping, even if this is a scalar type.
813        // Therefore we directly use the underlying `mem_copy` here.
814        self.mem_copy(right.ptr(), left.ptr(), left.layout.size, /*nonoverlapping*/ true)?;
815        // This means we also need to do the validation of the value that used to be in `right`
816        // ourselves. This value is now in `left.` The one that started out in `left` already got
817        // validated by the copy above.
818        if M::enforce_validity(self, left.layout) {
819            self.validate_operand(
820                &left.clone().into(),
821                M::enforce_validity_recursively(self, left.layout),
822                /*reset_provenance_and_padding*/ true,
823            )?;
824        }
825
826        self.copy_op(&temp, &right)?; // checks alignment of `right`
827
828        self.deallocate_ptr(temp.ptr(), None, kind)?;
829        interp_ok(())
830    }
831
832    pub fn write_bytes_intrinsic(
833        &mut self,
834        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
835        byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
836        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
837        name: &'static str,
838    ) -> InterpResult<'tcx> {
839        let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?;
840
841        let dst = self.read_pointer(dst)?;
842        let byte = self.read_scalar(byte)?.to_u8()?;
843        let count = self.read_target_usize(count)?;
844
845        // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
846        // but no actual allocation can be big enough for the difference to be noticeable.
847        let len = self
848            .compute_size_in_bytes(layout.size, count)
849            .ok_or_else(|| err_ub_custom!(fluent::const_eval_size_overflow, name = name))?;
850
851        let bytes = std::iter::repeat(byte).take(len.bytes_usize());
852        self.write_bytes_ptr(dst, bytes)
853    }
854
855    pub(crate) fn compare_bytes_intrinsic(
856        &mut self,
857        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
858        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
859        byte_count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
860    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
861        let left = self.read_pointer(left)?;
862        let right = self.read_pointer(right)?;
863        let n = Size::from_bytes(self.read_target_usize(byte_count)?);
864
865        let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
866        let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
867
868        // `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
869        let result = Ord::cmp(left_bytes, right_bytes) as i32;
870        interp_ok(Scalar::from_i32(result))
871    }
872
873    pub(crate) fn raw_eq_intrinsic(
874        &mut self,
875        lhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
876        rhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
877    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
878        let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap())?;
879        assert!(layout.is_sized());
880
881        let get_bytes = |this: &InterpCx<'tcx, M>,
882                         op: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>|
883         -> InterpResult<'tcx, &[u8]> {
884            let ptr = this.read_pointer(op)?;
885            this.check_ptr_align(ptr, layout.align.abi)?;
886            let Some(alloc_ref) = self.get_ptr_alloc(ptr, layout.size)? else {
887                // zero-sized access
888                return interp_ok(&[]);
889            };
890            alloc_ref.get_bytes_strip_provenance()
891        };
892
893        let lhs_bytes = get_bytes(self, lhs)?;
894        let rhs_bytes = get_bytes(self, rhs)?;
895        interp_ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
896    }
897
898    fn float_min_intrinsic<F>(
899        &mut self,
900        args: &[OpTy<'tcx, M::Provenance>],
901        dest: &PlaceTy<'tcx, M::Provenance>,
902    ) -> InterpResult<'tcx, ()>
903    where
904        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
905    {
906        let a: F = self.read_scalar(&args[0])?.to_float()?;
907        let b: F = self.read_scalar(&args[1])?.to_float()?;
908        let res = if a == b {
909            // They are definitely not NaN (those are never equal), but they could be `+0` and `-0`.
910            // Let the machine decide which one to return.
911            M::equal_float_min_max(self, a, b)
912        } else {
913            self.adjust_nan(a.min(b), &[a, b])
914        };
915        self.write_scalar(res, dest)?;
916        interp_ok(())
917    }
918
919    fn float_max_intrinsic<F>(
920        &mut self,
921        args: &[OpTy<'tcx, M::Provenance>],
922        dest: &PlaceTy<'tcx, M::Provenance>,
923    ) -> InterpResult<'tcx, ()>
924    where
925        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
926    {
927        let a: F = self.read_scalar(&args[0])?.to_float()?;
928        let b: F = self.read_scalar(&args[1])?.to_float()?;
929        let res = if a == b {
930            // They are definitely not NaN (those are never equal), but they could be `+0` and `-0`.
931            // Let the machine decide which one to return.
932            M::equal_float_min_max(self, a, b)
933        } else {
934            self.adjust_nan(a.max(b), &[a, b])
935        };
936        self.write_scalar(res, dest)?;
937        interp_ok(())
938    }
939
940    fn float_minimum_intrinsic<F>(
941        &mut self,
942        args: &[OpTy<'tcx, M::Provenance>],
943        dest: &PlaceTy<'tcx, M::Provenance>,
944    ) -> InterpResult<'tcx, ()>
945    where
946        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
947    {
948        let a: F = self.read_scalar(&args[0])?.to_float()?;
949        let b: F = self.read_scalar(&args[1])?.to_float()?;
950        let res = a.minimum(b);
951        let res = self.adjust_nan(res, &[a, b]);
952        self.write_scalar(res, dest)?;
953        interp_ok(())
954    }
955
956    fn float_maximum_intrinsic<F>(
957        &mut self,
958        args: &[OpTy<'tcx, M::Provenance>],
959        dest: &PlaceTy<'tcx, M::Provenance>,
960    ) -> InterpResult<'tcx, ()>
961    where
962        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
963    {
964        let a: F = self.read_scalar(&args[0])?.to_float()?;
965        let b: F = self.read_scalar(&args[1])?.to_float()?;
966        let res = a.maximum(b);
967        let res = self.adjust_nan(res, &[a, b]);
968        self.write_scalar(res, dest)?;
969        interp_ok(())
970    }
971
972    fn float_copysign_intrinsic<F>(
973        &mut self,
974        args: &[OpTy<'tcx, M::Provenance>],
975        dest: &PlaceTy<'tcx, M::Provenance>,
976    ) -> InterpResult<'tcx, ()>
977    where
978        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
979    {
980        let a: F = self.read_scalar(&args[0])?.to_float()?;
981        let b: F = self.read_scalar(&args[1])?.to_float()?;
982        // bitwise, no NaN adjustments
983        self.write_scalar(a.copy_sign(b), dest)?;
984        interp_ok(())
985    }
986
987    fn float_abs_intrinsic<F>(
988        &mut self,
989        args: &[OpTy<'tcx, M::Provenance>],
990        dest: &PlaceTy<'tcx, M::Provenance>,
991    ) -> InterpResult<'tcx, ()>
992    where
993        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
994    {
995        let x: F = self.read_scalar(&args[0])?.to_float()?;
996        // bitwise, no NaN adjustments
997        self.write_scalar(x.abs(), dest)?;
998        interp_ok(())
999    }
1000
1001    fn float_round_intrinsic<F>(
1002        &mut self,
1003        args: &[OpTy<'tcx, M::Provenance>],
1004        dest: &PlaceTy<'tcx, M::Provenance>,
1005        mode: rustc_apfloat::Round,
1006    ) -> InterpResult<'tcx, ()>
1007    where
1008        F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>,
1009    {
1010        let x: F = self.read_scalar(&args[0])?.to_float()?;
1011        let res = x.round_to_integral(mode).value;
1012        let res = self.adjust_nan(res, &[x]);
1013        self.write_scalar(res, dest)?;
1014        interp_ok(())
1015    }
1016}