rustc_const_eval/const_eval/
machine.rs

1use std::borrow::{Borrow, Cow};
2use std::fmt;
3use std::hash::Hash;
4
5use rustc_abi::{Align, Size};
6use rustc_ast::Mutability;
7use rustc_data_structures::fx::{FxHashMap, FxIndexMap, IndexEntry};
8use rustc_hir::def_id::{DefId, LocalDefId};
9use rustc_hir::{self as hir, CRATE_HIR_ID, LangItem};
10use rustc_middle::mir::AssertMessage;
11use rustc_middle::mir::interpret::ReportedErrorInfo;
12use rustc_middle::query::TyCtxtAt;
13use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout, ValidityRequirement};
14use rustc_middle::ty::{self, Ty, TyCtxt};
15use rustc_middle::{bug, mir};
16use rustc_span::{Span, Symbol, sym};
17use rustc_target::callconv::FnAbi;
18use tracing::debug;
19
20use super::error::*;
21use crate::errors::{LongRunning, LongRunningWarn};
22use crate::fluent_generated as fluent;
23use crate::interpret::{
24    self, AllocId, AllocInit, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame,
25    GlobalAlloc, ImmTy, InterpCx, InterpResult, OpTy, PlaceTy, Pointer, RangeSet, Scalar,
26    compile_time_machine, err_inval, interp_ok, throw_exhaust, throw_inval, throw_ub,
27    throw_ub_custom, throw_unsup, throw_unsup_format,
28};
29
30/// When hitting this many interpreted terminators we emit a deny by default lint
31/// that notfies the user that their constant takes a long time to evaluate. If that's
32/// what they intended, they can just allow the lint.
33const LINT_TERMINATOR_LIMIT: usize = 2_000_000;
34/// The limit used by `-Z tiny-const-eval-limit`. This smaller limit is useful for internal
35/// tests not needing to run 30s or more to show some behaviour.
36const TINY_LINT_TERMINATOR_LIMIT: usize = 20;
37/// After this many interpreted terminators, we start emitting progress indicators at every
38/// power of two of interpreted terminators.
39const PROGRESS_INDICATOR_START: usize = 4_000_000;
40
41/// Extra machine state for CTFE, and the Machine instance.
42//
43// Should be public because out-of-tree rustc consumers need this
44// if they want to interact with constant values.
45pub struct CompileTimeMachine<'tcx> {
46    /// The number of terminators that have been evaluated.
47    ///
48    /// This is used to produce lints informing the user that the compiler is not stuck.
49    /// Set to `usize::MAX` to never report anything.
50    pub(super) num_evaluated_steps: usize,
51
52    /// The virtual call stack.
53    pub(super) stack: Vec<Frame<'tcx>>,
54
55    /// Pattern matching on consts with references would be unsound if those references
56    /// could point to anything mutable. Therefore, when evaluating consts and when constructing valtrees,
57    /// we ensure that only immutable global memory can be accessed.
58    pub(super) can_access_mut_global: CanAccessMutGlobal,
59
60    /// Whether to check alignment during evaluation.
61    pub(super) check_alignment: CheckAlignment,
62
63    /// If `Some`, we are evaluating the initializer of the static with the given `LocalDefId`,
64    /// storing the result in the given `AllocId`.
65    /// Used to prevent accesses to a static's base allocation, as that may allow for self-initialization loops.
66    pub(crate) static_root_ids: Option<(AllocId, LocalDefId)>,
67
68    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
69    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
70}
71
72#[derive(Copy, Clone)]
73pub enum CheckAlignment {
74    /// Ignore all alignment requirements.
75    /// This is mainly used in interning.
76    No,
77    /// Hard error when dereferencing a misaligned pointer.
78    Error,
79}
80
81#[derive(Copy, Clone, PartialEq)]
82pub(crate) enum CanAccessMutGlobal {
83    No,
84    Yes,
85}
86
87impl From<bool> for CanAccessMutGlobal {
88    fn from(value: bool) -> Self {
89        if value { Self::Yes } else { Self::No }
90    }
91}
92
93impl<'tcx> CompileTimeMachine<'tcx> {
94    pub(crate) fn new(
95        can_access_mut_global: CanAccessMutGlobal,
96        check_alignment: CheckAlignment,
97    ) -> Self {
98        CompileTimeMachine {
99            num_evaluated_steps: 0,
100            stack: Vec::new(),
101            can_access_mut_global,
102            check_alignment,
103            static_root_ids: None,
104            union_data_ranges: FxHashMap::default(),
105        }
106    }
107}
108
109impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> {
110    #[inline(always)]
111    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
112    where
113        K: Borrow<Q>,
114    {
115        FxIndexMap::contains_key(self, k)
116    }
117
118    #[inline(always)]
119    fn contains_key_ref<Q: ?Sized + Hash + Eq>(&self, k: &Q) -> bool
120    where
121        K: Borrow<Q>,
122    {
123        FxIndexMap::contains_key(self, k)
124    }
125
126    #[inline(always)]
127    fn insert(&mut self, k: K, v: V) -> Option<V> {
128        FxIndexMap::insert(self, k, v)
129    }
130
131    #[inline(always)]
132    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
133    where
134        K: Borrow<Q>,
135    {
136        // FIXME(#120456) - is `swap_remove` correct?
137        FxIndexMap::swap_remove(self, k)
138    }
139
140    #[inline(always)]
141    fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
142        self.iter().filter_map(move |(k, v)| f(k, v)).collect()
143    }
144
145    #[inline(always)]
146    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
147        match self.get(&k) {
148            Some(v) => Ok(v),
149            None => {
150                vacant()?;
151                bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
152            }
153        }
154    }
155
156    #[inline(always)]
157    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
158        match self.entry(k) {
159            IndexEntry::Occupied(e) => Ok(e.into_mut()),
160            IndexEntry::Vacant(e) => {
161                let v = vacant()?;
162                Ok(e.insert(v))
163            }
164        }
165    }
166}
167
168pub type CompileTimeInterpCx<'tcx> = InterpCx<'tcx, CompileTimeMachine<'tcx>>;
169
170#[derive(Debug, PartialEq, Eq, Copy, Clone)]
171pub enum MemoryKind {
172    Heap {
173        /// Indicates whether `make_global` was called on this allocation.
174        /// If this is `true`, the allocation must be immutable.
175        was_made_global: bool,
176    },
177}
178
179impl fmt::Display for MemoryKind {
180    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
181        match self {
182            MemoryKind::Heap { was_made_global } => {
183                write!(f, "heap allocation{}", if *was_made_global { " (made global)" } else { "" })
184            }
185        }
186    }
187}
188
189impl interpret::MayLeak for MemoryKind {
190    #[inline(always)]
191    fn may_leak(self) -> bool {
192        match self {
193            MemoryKind::Heap { was_made_global } => was_made_global,
194        }
195    }
196}
197
198impl interpret::MayLeak for ! {
199    #[inline(always)]
200    fn may_leak(self) -> bool {
201        // `self` is uninhabited
202        self
203    }
204}
205
206impl<'tcx> CompileTimeInterpCx<'tcx> {
207    fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
208        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
209        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
210
211        use rustc_session::RemapFileNameExt;
212        use rustc_session::config::RemapPathScopeComponents;
213        (
214            Symbol::intern(
215                &caller
216                    .file
217                    .name
218                    .for_scope(self.tcx.sess, RemapPathScopeComponents::DIAGNOSTICS)
219                    .to_string_lossy(),
220            ),
221            u32::try_from(caller.line).unwrap(),
222            u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
223        )
224    }
225
226    /// "Intercept" a function call, because we have something special to do for it.
227    /// All `#[rustc_do_not_const_check]` functions MUST be hooked here.
228    /// If this returns `Some` function, which may be `instance` or a different function with
229    /// compatible arguments, then evaluation should continue with that function.
230    /// If this returns `None`, the function call has been handled and the function has returned.
231    fn hook_special_const_fn(
232        &mut self,
233        instance: ty::Instance<'tcx>,
234        args: &[FnArg<'tcx>],
235        _dest: &PlaceTy<'tcx>,
236        _ret: Option<mir::BasicBlock>,
237    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
238        let def_id = instance.def_id();
239
240        if self.tcx.is_lang_item(def_id, LangItem::PanicDisplay)
241            || self.tcx.is_lang_item(def_id, LangItem::BeginPanic)
242        {
243            let args = self.copy_fn_args(args);
244            // &str or &&str
245            assert!(args.len() == 1);
246
247            let mut msg_place = self.deref_pointer(&args[0])?;
248            while msg_place.layout.ty.is_ref() {
249                msg_place = self.deref_pointer(&msg_place)?;
250            }
251
252            let msg = Symbol::intern(self.read_str(&msg_place)?);
253            let span = self.find_closest_untracked_caller_location();
254            let (file, line, col) = self.location_triple_for_span(span);
255            return Err(ConstEvalErrKind::Panic { msg, file, line, col }).into();
256        } else if self.tcx.is_lang_item(def_id, LangItem::PanicFmt) {
257            // For panic_fmt, call const_panic_fmt instead.
258            let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, self.tcx.span);
259            let new_instance = ty::Instance::expect_resolve(
260                *self.tcx,
261                self.typing_env(),
262                const_def_id,
263                instance.args,
264                self.cur_span(),
265            );
266
267            return interp_ok(Some(new_instance));
268        }
269        interp_ok(Some(instance))
270    }
271
272    /// See documentation on the `ptr_guaranteed_cmp` intrinsic.
273    /// Returns `2` if the result is unknown.
274    /// Returns `1` if the pointers are guaranteed equal.
275    /// Returns `0` if the pointers are guaranteed inequal.
276    ///
277    /// Note that this intrinsic is exposed on stable for comparison with null. In other words, any
278    /// change to this function that affects comparison with null is insta-stable!
279    fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
280        interp_ok(match (a, b) {
281            // Comparisons between integers are always known.
282            (Scalar::Int(a), Scalar::Int(b)) => (a == b) as u8,
283            // Comparing a pointer `ptr` with an integer `int` is equivalent to comparing
284            // `ptr-int` with null, so we can reduce this case to a `scalar_may_be_null` test.
285            (Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
286                let int = int.to_target_usize(*self.tcx);
287                // The `wrapping_neg` here may produce a value that is not
288                // a valid target usize any more... but `wrapping_offset` handles that correctly.
289                let offset_ptr = ptr.wrapping_offset(Size::from_bytes(int.wrapping_neg()), self);
290                if !self.scalar_may_be_null(Scalar::from_pointer(offset_ptr, self))? {
291                    // `ptr.wrapping_sub(int)` is definitely not equal to `0`, so `ptr != int`
292                    0
293                } else {
294                    // `ptr.wrapping_sub(int)` could be equal to `0`, but might not be,
295                    // so we cannot know for sure if `ptr == int` or not
296                    2
297                }
298            }
299            (Scalar::Ptr(a, _), Scalar::Ptr(b, _)) => {
300                let (a_prov, a_offset) = a.prov_and_relative_offset();
301                let (b_prov, b_offset) = b.prov_and_relative_offset();
302                let a_allocid = a_prov.alloc_id();
303                let b_allocid = b_prov.alloc_id();
304                let a_info = self.get_alloc_info(a_allocid);
305                let b_info = self.get_alloc_info(b_allocid);
306
307                // Check if the pointers cannot be equal due to alignment
308                if a_info.align > Align::ONE && b_info.align > Align::ONE {
309                    let min_align = Ord::min(a_info.align.bytes(), b_info.align.bytes());
310                    let a_residue = a_offset.bytes() % min_align;
311                    let b_residue = b_offset.bytes() % min_align;
312                    if a_residue != b_residue {
313                        // If the two pointers have a different residue modulo their
314                        // common alignment, they cannot be equal.
315                        return interp_ok(0);
316                    }
317                    // The pointers have the same residue modulo their common alignment,
318                    // so they could be equal. Try the other checks.
319                }
320
321                if let (Some(GlobalAlloc::Static(a_did)), Some(GlobalAlloc::Static(b_did))) = (
322                    self.tcx.try_get_global_alloc(a_allocid),
323                    self.tcx.try_get_global_alloc(b_allocid),
324                ) {
325                    if a_allocid == b_allocid {
326                        debug_assert_eq!(
327                            a_did, b_did,
328                            "different static item DefIds had same AllocId? {a_allocid:?} == {b_allocid:?}, {a_did:?} != {b_did:?}"
329                        );
330                        // Comparing two pointers into the same static. As per
331                        // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
332                        // a static cannot be duplicated, so if two pointers are into the same
333                        // static, they are equal if and only if their offsets are equal.
334                        (a_offset == b_offset) as u8
335                    } else {
336                        debug_assert_ne!(
337                            a_did, b_did,
338                            "same static item DefId had two different AllocIds? {a_allocid:?} != {b_allocid:?}, {a_did:?} == {b_did:?}"
339                        );
340                        // Comparing two pointers into the different statics.
341                        // We can never determine for sure that two pointers into different statics
342                        // are *equal*, but we can know that they are *inequal* if they are both
343                        // strictly in-bounds (i.e. in-bounds and not one-past-the-end) of
344                        // their respective static, as different non-zero-sized statics cannot
345                        // overlap or be deduplicated as per
346                        // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
347                        // (non-deduplication), and
348                        // https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
349                        // (non-overlapping).
350                        if a_offset < a_info.size && b_offset < b_info.size {
351                            0
352                        } else {
353                            // Otherwise, conservatively say we don't know.
354                            // There are some cases we could still return `0` for, e.g.
355                            // if the pointers being equal would require their statics to overlap
356                            // one or more bytes, but for simplicity we currently only check
357                            // strictly in-bounds pointers.
358                            2
359                        }
360                    }
361                } else {
362                    // All other cases we conservatively say we don't know.
363                    //
364                    // For comparing statics to non-statics, as per https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
365                    // immutable statics can overlap with other kinds of allocations sometimes.
366                    //
367                    // FIXME: We could be more decisive for (non-zero-sized) mutable statics,
368                    // which cannot overlap with other kinds of allocations.
369                    //
370                    // Functions and vtables can be duplicated and deduplicated, so we
371                    // cannot be sure of runtime equality of pointers to the same one, or the
372                    // runtime inequality of pointers to different ones (see e.g. #73722),
373                    // so comparing those should return 2, whether they are the same allocation
374                    // or not.
375                    //
376                    // `GlobalAlloc::TypeId` exists mostly to prevent consteval from comparing
377                    // `TypeId`s, so comparing those should always return 2, whether they are the
378                    // same allocation or not.
379                    //
380                    // FIXME: We could revisit comparing pointers into the same
381                    // `GlobalAlloc::Memory` once https://github.com/rust-lang/rust/issues/128775
382                    // is fixed (but they can be deduplicated, so comparing pointers into different
383                    // ones should return 2).
384                    2
385                }
386            }
387        })
388    }
389}
390
391impl<'tcx> CompileTimeMachine<'tcx> {
392    #[inline(always)]
393    /// Find the first stack frame that is within the current crate, if any.
394    /// Otherwise, return the crate's HirId
395    pub fn best_lint_scope(&self, tcx: TyCtxt<'tcx>) -> hir::HirId {
396        self.stack.iter().find_map(|frame| frame.lint_root(tcx)).unwrap_or(CRATE_HIR_ID)
397    }
398}
399
400impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
401    compile_time_machine!(<'tcx>);
402
403    const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
404
405    #[inline(always)]
406    fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool {
407        matches!(ecx.machine.check_alignment, CheckAlignment::Error)
408    }
409
410    #[inline(always)]
411    fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
412        ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.is_uninhabited()
413    }
414
415    fn load_mir(
416        ecx: &InterpCx<'tcx, Self>,
417        instance: ty::InstanceKind<'tcx>,
418    ) -> &'tcx mir::Body<'tcx> {
419        match instance {
420            ty::InstanceKind::Item(def) => ecx.tcx.mir_for_ctfe(def),
421            _ => ecx.tcx.instance_mir(instance),
422        }
423    }
424
425    fn find_mir_or_eval_fn(
426        ecx: &mut InterpCx<'tcx, Self>,
427        orig_instance: ty::Instance<'tcx>,
428        _abi: &FnAbi<'tcx, Ty<'tcx>>,
429        args: &[FnArg<'tcx>],
430        dest: &PlaceTy<'tcx>,
431        ret: Option<mir::BasicBlock>,
432        _unwind: mir::UnwindAction, // unwinding is not supported in consts
433    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
434        debug!("find_mir_or_eval_fn: {:?}", orig_instance);
435
436        // Replace some functions.
437        let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
438            // Call has already been handled.
439            return interp_ok(None);
440        };
441
442        // Only check non-glue functions
443        if let ty::InstanceKind::Item(def) = instance.def {
444            // Execution might have wandered off into other crates, so we cannot do a stability-
445            // sensitive check here. But we can at least rule out functions that are not const at
446            // all. That said, we have to allow calling functions inside a `const trait`. These
447            // *are* const-checked!
448            if !ecx.tcx.is_const_fn(def) || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check) {
449                // We certainly do *not* want to actually call the fn
450                // though, so be sure we return here.
451                throw_unsup_format!("calling non-const function `{}`", instance)
452            }
453        }
454
455        // This is a const fn. Call it.
456        // In case of replacement, we return the *original* instance to make backtraces work out
457        // (and we hope this does not confuse the FnAbi checks too much).
458        interp_ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
459    }
460
461    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
462        let msg = Symbol::intern(msg);
463        let span = ecx.find_closest_untracked_caller_location();
464        let (file, line, col) = ecx.location_triple_for_span(span);
465        Err(ConstEvalErrKind::Panic { msg, file, line, col }).into()
466    }
467
468    fn call_intrinsic(
469        ecx: &mut InterpCx<'tcx, Self>,
470        instance: ty::Instance<'tcx>,
471        args: &[OpTy<'tcx>],
472        dest: &PlaceTy<'tcx, Self::Provenance>,
473        target: Option<mir::BasicBlock>,
474        _unwind: mir::UnwindAction,
475    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
476        // Shared intrinsics.
477        if ecx.eval_intrinsic(instance, args, dest, target)? {
478            return interp_ok(None);
479        }
480        let intrinsic_name = ecx.tcx.item_name(instance.def_id());
481
482        // CTFE-specific intrinsics.
483        match intrinsic_name {
484            sym::ptr_guaranteed_cmp => {
485                let a = ecx.read_scalar(&args[0])?;
486                let b = ecx.read_scalar(&args[1])?;
487                let cmp = ecx.guaranteed_cmp(a, b)?;
488                ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
489            }
490            sym::const_allocate => {
491                let size = ecx.read_scalar(&args[0])?.to_target_usize(ecx)?;
492                let align = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
493
494                let align = match Align::from_bytes(align) {
495                    Ok(a) => a,
496                    Err(err) => throw_ub_custom!(
497                        fluent::const_eval_invalid_align_details,
498                        name = "const_allocate",
499                        err_kind = err.diag_ident(),
500                        align = err.align()
501                    ),
502                };
503
504                let ptr = ecx.allocate_ptr(
505                    Size::from_bytes(size),
506                    align,
507                    interpret::MemoryKind::Machine(MemoryKind::Heap { was_made_global: false }),
508                    AllocInit::Uninit,
509                )?;
510                ecx.write_pointer(ptr, dest)?;
511            }
512            sym::const_deallocate => {
513                let ptr = ecx.read_pointer(&args[0])?;
514                let size = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
515                let align = ecx.read_scalar(&args[2])?.to_target_usize(ecx)?;
516
517                let size = Size::from_bytes(size);
518                let align = match Align::from_bytes(align) {
519                    Ok(a) => a,
520                    Err(err) => throw_ub_custom!(
521                        fluent::const_eval_invalid_align_details,
522                        name = "const_deallocate",
523                        err_kind = err.diag_ident(),
524                        align = err.align()
525                    ),
526                };
527
528                // If an allocation is created in an another const,
529                // we don't deallocate it.
530                let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr, 0)?;
531                let is_allocated_in_another_const = matches!(
532                    ecx.tcx.try_get_global_alloc(alloc_id),
533                    Some(interpret::GlobalAlloc::Memory(_))
534                );
535
536                if !is_allocated_in_another_const {
537                    ecx.deallocate_ptr(
538                        ptr,
539                        Some((size, align)),
540                        interpret::MemoryKind::Machine(MemoryKind::Heap { was_made_global: false }),
541                    )?;
542                }
543            }
544
545            sym::const_make_global => {
546                let ptr = ecx.read_pointer(&args[0])?;
547                ecx.make_const_heap_ptr_global(ptr)?;
548                ecx.write_pointer(ptr, dest)?;
549            }
550
551            // The intrinsic represents whether the value is known to the optimizer (LLVM).
552            // We're not doing any optimizations here, so there is no optimizer that could know the value.
553            // (We know the value here in the machine of course, but this is the runtime of that code,
554            // not the optimization stage.)
555            sym::is_val_statically_known => ecx.write_scalar(Scalar::from_bool(false), dest)?,
556
557            // We handle these here since Miri does not want to have them.
558            sym::assert_inhabited
559            | sym::assert_zero_valid
560            | sym::assert_mem_uninitialized_valid => {
561                let ty = instance.args.type_at(0);
562                let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
563
564                let should_panic = !ecx
565                    .tcx
566                    .check_validity_requirement((requirement, ecx.typing_env().as_query_input(ty)))
567                    .map_err(|_| err_inval!(TooGeneric))?;
568
569                if should_panic {
570                    let layout = ecx.layout_of(ty)?;
571
572                    let msg = match requirement {
573                        // For *all* intrinsics we first check `is_uninhabited` to give a more specific
574                        // error message.
575                        _ if layout.is_uninhabited() => format!(
576                            "aborted execution: attempted to instantiate uninhabited type `{ty}`"
577                        ),
578                        ValidityRequirement::Inhabited => bug!("handled earlier"),
579                        ValidityRequirement::Zero => format!(
580                            "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
581                        ),
582                        ValidityRequirement::UninitMitigated0x01Fill => format!(
583                            "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
584                        ),
585                        ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
586                    };
587
588                    Self::panic_nounwind(ecx, &msg)?;
589                    // Skip the `return_to_block` at the end (we panicked, we do not return).
590                    return interp_ok(None);
591                }
592            }
593
594            _ => {
595                // We haven't handled the intrinsic, let's see if we can use a fallback body.
596                if ecx.tcx.intrinsic(instance.def_id()).unwrap().must_be_overridden {
597                    throw_unsup_format!(
598                        "intrinsic `{intrinsic_name}` is not supported at compile-time"
599                    );
600                }
601                return interp_ok(Some(ty::Instance {
602                    def: ty::InstanceKind::Item(instance.def_id()),
603                    args: instance.args,
604                }));
605            }
606        }
607
608        // Intrinsic is done, jump to next block.
609        ecx.return_to_block(target)?;
610        interp_ok(None)
611    }
612
613    fn assert_panic(
614        ecx: &mut InterpCx<'tcx, Self>,
615        msg: &AssertMessage<'tcx>,
616        _unwind: mir::UnwindAction,
617    ) -> InterpResult<'tcx> {
618        use rustc_middle::mir::AssertKind::*;
619        // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
620        let eval_to_int =
621            |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
622        let err = match msg {
623            BoundsCheck { len, index } => {
624                let len = eval_to_int(len)?;
625                let index = eval_to_int(index)?;
626                BoundsCheck { len, index }
627            }
628            Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
629            OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
630            DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
631            RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
632            ResumedAfterReturn(coroutine_kind) => ResumedAfterReturn(*coroutine_kind),
633            ResumedAfterPanic(coroutine_kind) => ResumedAfterPanic(*coroutine_kind),
634            ResumedAfterDrop(coroutine_kind) => ResumedAfterDrop(*coroutine_kind),
635            MisalignedPointerDereference { required, found } => MisalignedPointerDereference {
636                required: eval_to_int(required)?,
637                found: eval_to_int(found)?,
638            },
639            NullPointerDereference => NullPointerDereference,
640            InvalidEnumConstruction(source) => InvalidEnumConstruction(eval_to_int(source)?),
641        };
642        Err(ConstEvalErrKind::AssertFailure(err)).into()
643    }
644
645    fn binary_ptr_op(
646        _ecx: &InterpCx<'tcx, Self>,
647        _bin_op: mir::BinOp,
648        _left: &ImmTy<'tcx>,
649        _right: &ImmTy<'tcx>,
650    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
651        throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
652    }
653
654    fn increment_const_eval_counter(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
655        // The step limit has already been hit in a previous call to `increment_const_eval_counter`.
656
657        if let Some(new_steps) = ecx.machine.num_evaluated_steps.checked_add(1) {
658            let (limit, start) = if ecx.tcx.sess.opts.unstable_opts.tiny_const_eval_limit {
659                (TINY_LINT_TERMINATOR_LIMIT, TINY_LINT_TERMINATOR_LIMIT)
660            } else {
661                (LINT_TERMINATOR_LIMIT, PROGRESS_INDICATOR_START)
662            };
663
664            ecx.machine.num_evaluated_steps = new_steps;
665            // By default, we have a *deny* lint kicking in after some time
666            // to ensure `loop {}` doesn't just go forever.
667            // In case that lint got reduced, in particular for `--cap-lint` situations, we also
668            // have a hard warning shown every now and then for really long executions.
669            if new_steps == limit {
670                // By default, we stop after a million steps, but the user can disable this lint
671                // to be able to run until the heat death of the universe or power loss, whichever
672                // comes first.
673                let hir_id = ecx.machine.best_lint_scope(*ecx.tcx);
674                let is_error = ecx
675                    .tcx
676                    .lint_level_at_node(
677                        rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
678                        hir_id,
679                    )
680                    .level
681                    .is_error();
682                let span = ecx.cur_span();
683                ecx.tcx.emit_node_span_lint(
684                    rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
685                    hir_id,
686                    span,
687                    LongRunning { item_span: ecx.tcx.span },
688                );
689                // If this was a hard error, don't bother continuing evaluation.
690                if is_error {
691                    let guard = ecx
692                        .tcx
693                        .dcx()
694                        .span_delayed_bug(span, "The deny lint should have already errored");
695                    throw_inval!(AlreadyReported(ReportedErrorInfo::allowed_in_infallible(guard)));
696                }
697            } else if new_steps > start && new_steps.is_power_of_two() {
698                // Only report after a certain number of terminators have been evaluated and the
699                // current number of evaluated terminators is a power of 2. The latter gives us a cheap
700                // way to implement exponential backoff.
701                let span = ecx.cur_span();
702                // We store a unique number in `force_duplicate` to evade `-Z deduplicate-diagnostics`.
703                // `new_steps` is guaranteed to be unique because `ecx.machine.num_evaluated_steps` is
704                // always increasing.
705                ecx.tcx.dcx().emit_warn(LongRunningWarn {
706                    span,
707                    item_span: ecx.tcx.span,
708                    force_duplicate: new_steps,
709                });
710            }
711        }
712
713        interp_ok(())
714    }
715
716    #[inline(always)]
717    fn expose_provenance(
718        _ecx: &InterpCx<'tcx, Self>,
719        _provenance: Self::Provenance,
720    ) -> InterpResult<'tcx> {
721        // This is only reachable with -Zunleash-the-miri-inside-of-you.
722        throw_unsup_format!("exposing pointers is not possible at compile-time")
723    }
724
725    #[inline(always)]
726    fn init_frame(
727        ecx: &mut InterpCx<'tcx, Self>,
728        frame: Frame<'tcx>,
729    ) -> InterpResult<'tcx, Frame<'tcx>> {
730        // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
731        if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
732            throw_exhaust!(StackFrameLimitReached)
733        } else {
734            interp_ok(frame)
735        }
736    }
737
738    #[inline(always)]
739    fn stack<'a>(
740        ecx: &'a InterpCx<'tcx, Self>,
741    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
742        &ecx.machine.stack
743    }
744
745    #[inline(always)]
746    fn stack_mut<'a>(
747        ecx: &'a mut InterpCx<'tcx, Self>,
748    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
749        &mut ecx.machine.stack
750    }
751
752    fn before_access_global(
753        _tcx: TyCtxtAt<'tcx>,
754        machine: &Self,
755        alloc_id: AllocId,
756        alloc: ConstAllocation<'tcx>,
757        _static_def_id: Option<DefId>,
758        is_write: bool,
759    ) -> InterpResult<'tcx> {
760        let alloc = alloc.inner();
761        if is_write {
762            // Write access. These are never allowed, but we give a targeted error message.
763            match alloc.mutability {
764                Mutability::Not => throw_ub!(WriteToReadOnly(alloc_id)),
765                Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal).into(),
766            }
767        } else {
768            // Read access. These are usually allowed, with some exceptions.
769            if machine.can_access_mut_global == CanAccessMutGlobal::Yes {
770                // Machine configuration allows us read from anything (e.g., `static` initializer).
771                interp_ok(())
772            } else if alloc.mutability == Mutability::Mut {
773                // Machine configuration does not allow us to read statics (e.g., `const`
774                // initializer).
775                Err(ConstEvalErrKind::ConstAccessesMutGlobal).into()
776            } else {
777                // Immutable global, this read is fine.
778                assert_eq!(alloc.mutability, Mutability::Not);
779                interp_ok(())
780            }
781        }
782    }
783
784    fn retag_ptr_value(
785        ecx: &mut InterpCx<'tcx, Self>,
786        _kind: mir::RetagKind,
787        val: &ImmTy<'tcx, CtfeProvenance>,
788    ) -> InterpResult<'tcx, ImmTy<'tcx, CtfeProvenance>> {
789        // If it's a frozen shared reference that's not already immutable, potentially make it immutable.
790        // (Do nothing on `None` provenance, that cannot store immutability anyway.)
791        if let ty::Ref(_, ty, mutbl) = val.layout.ty.kind()
792            && *mutbl == Mutability::Not
793            && val
794                .to_scalar_and_meta()
795                .0
796                .to_pointer(ecx)?
797                .provenance
798                .is_some_and(|p| !p.immutable())
799        {
800            // That next check is expensive, that's why we have all the guards above.
801            let is_immutable = ty.is_freeze(*ecx.tcx, ecx.typing_env());
802            let place = ecx.ref_to_mplace(val)?;
803            let new_place = if is_immutable {
804                place.map_provenance(CtfeProvenance::as_immutable)
805            } else {
806                // Even if it is not immutable, remember that it is a shared reference.
807                // This allows it to become part of the final value of the constant.
808                // (See <https://github.com/rust-lang/rust/pull/128543> for why we allow this
809                // even when there is interior mutability.)
810                place.map_provenance(CtfeProvenance::as_shared_ref)
811            };
812            interp_ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
813        } else {
814            interp_ok(val.clone())
815        }
816    }
817
818    fn before_memory_write(
819        _tcx: TyCtxtAt<'tcx>,
820        _machine: &mut Self,
821        _alloc_extra: &mut Self::AllocExtra,
822        _ptr: Pointer<Option<Self::Provenance>>,
823        (_alloc_id, immutable): (AllocId, bool),
824        range: AllocRange,
825    ) -> InterpResult<'tcx> {
826        if range.size == Size::ZERO {
827            // Nothing to check.
828            return interp_ok(());
829        }
830        // Reject writes through immutable pointers.
831        if immutable {
832            return Err(ConstEvalErrKind::WriteThroughImmutablePointer).into();
833        }
834        // Everything else is fine.
835        interp_ok(())
836    }
837
838    fn before_alloc_access(
839        tcx: TyCtxtAt<'tcx>,
840        machine: &Self,
841        alloc_id: AllocId,
842    ) -> InterpResult<'tcx> {
843        if machine.stack.is_empty() {
844            // Get out of the way for the final copy.
845            return interp_ok(());
846        }
847        // Check if this is the currently evaluated static.
848        if Some(alloc_id) == machine.static_root_ids.map(|(id, _)| id) {
849            return Err(ConstEvalErrKind::RecursiveStatic).into();
850        }
851        // If this is another static, make sure we fire off the query to detect cycles.
852        // But only do that when checks for static recursion are enabled.
853        if machine.static_root_ids.is_some() {
854            if let Some(GlobalAlloc::Static(def_id)) = tcx.try_get_global_alloc(alloc_id) {
855                if tcx.is_foreign_item(def_id) {
856                    throw_unsup!(ExternStatic(def_id));
857                }
858                tcx.eval_static_initializer(def_id)?;
859            }
860        }
861        interp_ok(())
862    }
863
864    fn cached_union_data_range<'e>(
865        ecx: &'e mut InterpCx<'tcx, Self>,
866        ty: Ty<'tcx>,
867        compute_range: impl FnOnce() -> RangeSet,
868    ) -> Cow<'e, RangeSet> {
869        if ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks {
870            Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
871        } else {
872            // Don't bother caching, we're only doing one validation at the end anyway.
873            Cow::Owned(compute_range())
874        }
875    }
876
877    fn get_default_alloc_params(&self) -> <Self::Bytes as mir::interpret::AllocBytes>::AllocParams {
878    }
879}
880
881// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
882// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
883// at the bottom of this file.