miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::cell::{Cell, RefCell};
7use std::path::Path;
8use std::rc::Rc;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_data_structures::fx::{FxHashMap, FxHashSet};
16#[allow(unused)]
17use rustc_data_structures::static_assert_size;
18use rustc_hir::attrs::InlineAttr;
19use rustc_middle::middle::codegen_fn_attrs::TargetFeatureKind;
20use rustc_middle::mir;
21use rustc_middle::query::TyCtxtAt;
22use rustc_middle::ty::layout::{
23    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
24};
25use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
26use rustc_session::config::InliningThreshold;
27use rustc_span::def_id::{CrateNum, DefId};
28use rustc_span::{Span, SpanData, Symbol};
29use rustc_target::callconv::FnAbi;
30
31use crate::alloc_addresses::EvalContextExt;
32use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
33use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
34use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
35use crate::*;
36
37/// First real-time signal.
38/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
39/// as typical values.
40pub const SIGRTMIN: i32 = 34;
41
42/// Last real-time signal.
43/// `signal(7)` says it must be between 32 and 64 and specifies
44/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
45pub const SIGRTMAX: i32 = 42;
46
47/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
48/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
49/// base address for each evaluation would produce unbounded memory usage.
50const ADDRS_PER_ANON_GLOBAL: usize = 32;
51
52/// Extra data stored with each stack frame
53pub struct FrameExtra<'tcx> {
54    /// Extra data for the Borrow Tracker.
55    pub borrow_tracker: Option<borrow_tracker::FrameState>,
56
57    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
58    /// called by `try`). When this frame is popped during unwinding a panic,
59    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
60    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
61
62    /// If `measureme` profiling is enabled, holds timing information
63    /// for the start of this frame. When we finish executing this frame,
64    /// we use this to register a completed event with `measureme`.
65    pub timing: Option<measureme::DetachedTiming>,
66
67    /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
68    /// `#[track_caller]`. We compute this once on creation and store the result, as an
69    /// optimization.
70    /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
71    pub is_user_relevant: bool,
72
73    /// Data race detector per-frame data.
74    pub data_race: Option<data_race::FrameState>,
75}
76
77impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
78    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
79        // Omitting `timing`, it does not support `Debug`.
80        let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant, data_race } =
81            self;
82        f.debug_struct("FrameData")
83            .field("borrow_tracker", borrow_tracker)
84            .field("catch_unwind", catch_unwind)
85            .field("is_user_relevant", is_user_relevant)
86            .field("data_race", data_race)
87            .finish()
88    }
89}
90
91impl VisitProvenance for FrameExtra<'_> {
92    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
93        let FrameExtra {
94            catch_unwind,
95            borrow_tracker,
96            timing: _,
97            is_user_relevant: _,
98            data_race: _,
99        } = self;
100
101        catch_unwind.visit_provenance(visit);
102        borrow_tracker.visit_provenance(visit);
103    }
104}
105
106/// Extra memory kinds
107#[derive(Debug, Copy, Clone, PartialEq, Eq)]
108pub enum MiriMemoryKind {
109    /// `__rust_alloc` memory.
110    Rust,
111    /// `miri_alloc` memory.
112    Miri,
113    /// `malloc` memory.
114    C,
115    /// Windows `HeapAlloc` memory.
116    WinHeap,
117    /// Windows "local" memory (to be freed with `LocalFree`)
118    WinLocal,
119    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
120    /// This memory may leak.
121    Machine,
122    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
123    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
124    Runtime,
125    /// Globals copied from `tcx`.
126    /// This memory may leak.
127    Global,
128    /// Memory for extern statics.
129    /// This memory may leak.
130    ExternStatic,
131    /// Memory for thread-local statics.
132    /// This memory may leak.
133    Tls,
134    /// Memory mapped directly by the program
135    Mmap,
136}
137
138impl From<MiriMemoryKind> for MemoryKind {
139    #[inline(always)]
140    fn from(kind: MiriMemoryKind) -> MemoryKind {
141        MemoryKind::Machine(kind)
142    }
143}
144
145impl MayLeak for MiriMemoryKind {
146    #[inline(always)]
147    fn may_leak(self) -> bool {
148        use self::MiriMemoryKind::*;
149        match self {
150            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
151            Machine | Global | ExternStatic | Tls | Mmap => true,
152        }
153    }
154}
155
156impl MiriMemoryKind {
157    /// Whether we have a useful allocation span for an allocation of this kind.
158    fn should_save_allocation_span(self) -> bool {
159        use self::MiriMemoryKind::*;
160        match self {
161            // Heap allocations are fine since the `Allocation` is created immediately.
162            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
163            // Everything else is unclear, let's not show potentially confusing spans.
164            Machine | Global | ExternStatic | Tls | Runtime => false,
165        }
166    }
167}
168
169impl fmt::Display for MiriMemoryKind {
170    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
171        use self::MiriMemoryKind::*;
172        match self {
173            Rust => write!(f, "Rust heap"),
174            Miri => write!(f, "Miri bare-metal heap"),
175            C => write!(f, "C heap"),
176            WinHeap => write!(f, "Windows heap"),
177            WinLocal => write!(f, "Windows local memory"),
178            Machine => write!(f, "machine-managed memory"),
179            Runtime => write!(f, "language runtime memory"),
180            Global => write!(f, "global (static or const)"),
181            ExternStatic => write!(f, "extern static"),
182            Tls => write!(f, "thread-local static"),
183            Mmap => write!(f, "mmap"),
184        }
185    }
186}
187
188pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
189
190/// Pointer provenance.
191// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
192// *might* be recursive and then it has to track which places have already been visited.
193// These implementations are a bit questionable, and it means we may check the same place multiple
194// times with different provenance, but that is in general not wrong.
195#[derive(Clone, Copy, PartialEq, Eq, Hash)]
196pub enum Provenance {
197    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
198    /// and what their borrow tag is.
199    Concrete {
200        alloc_id: AllocId,
201        /// Borrow Tracker tag.
202        tag: BorTag,
203    },
204    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
205    /// specification, we should at that point angelically "guess" a provenance that will make all
206    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
207    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
208    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
209    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
210    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
211    /// then we allow the access. This allows too much code in two ways:
212    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
213    ///   subsequent memory accesses.
214    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
215    ///   the access, we also have to update the aliasing state -- and that update can be very
216    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
217    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
218    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
219    ///   stack that would be true with all possible choices.
220    Wildcard,
221}
222
223/// The "extra" information a pointer has over a regular AllocId.
224#[derive(Copy, Clone, PartialEq)]
225pub enum ProvenanceExtra {
226    Concrete(BorTag),
227    Wildcard,
228}
229
230#[cfg(target_pointer_width = "64")]
231static_assert_size!(StrictPointer, 24);
232// FIXME: this would with in 24bytes but layout optimizations are not smart enough
233// #[cfg(target_pointer_width = "64")]
234//static_assert_size!(Pointer, 24);
235#[cfg(target_pointer_width = "64")]
236static_assert_size!(Scalar, 32);
237
238impl fmt::Debug for Provenance {
239    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
240        match self {
241            Provenance::Concrete { alloc_id, tag } => {
242                // Forward `alternate` flag to `alloc_id` printing.
243                if f.alternate() {
244                    write!(f, "[{alloc_id:#?}]")?;
245                } else {
246                    write!(f, "[{alloc_id:?}]")?;
247                }
248                // Print Borrow Tracker tag.
249                write!(f, "{tag:?}")?;
250            }
251            Provenance::Wildcard => {
252                write!(f, "[wildcard]")?;
253            }
254        }
255        Ok(())
256    }
257}
258
259impl interpret::Provenance for Provenance {
260    /// We use absolute addresses in the `offset` of a `StrictPointer`.
261    const OFFSET_IS_ADDR: bool = true;
262
263    /// Miri implements wildcard provenance.
264    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
265
266    fn get_alloc_id(self) -> Option<AllocId> {
267        match self {
268            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
269            Provenance::Wildcard => None,
270        }
271    }
272
273    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
274        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
275        write!(f, "{:#x}", addr.bytes())?;
276        if f.alternate() {
277            write!(f, "{prov:#?}")?;
278        } else {
279            write!(f, "{prov:?}")?;
280        }
281        Ok(())
282    }
283
284    fn join(left: Self, right: Self) -> Option<Self> {
285        match (left, right) {
286            // If both are the *same* concrete tag, that is the result.
287            (
288                Provenance::Concrete { alloc_id: left_alloc, tag: left_tag },
289                Provenance::Concrete { alloc_id: right_alloc, tag: right_tag },
290            ) if left_alloc == right_alloc && left_tag == right_tag => Some(left),
291            // If one side is a wildcard, the best possible outcome is that it is equal to the other
292            // one, and we use that.
293            (Provenance::Wildcard, o) | (o, Provenance::Wildcard) => Some(o),
294            // Otherwise, fall back to `None`.
295            _ => None,
296        }
297    }
298}
299
300impl fmt::Debug for ProvenanceExtra {
301    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
302        match self {
303            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
304            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
305        }
306    }
307}
308
309impl ProvenanceExtra {
310    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
311        match self {
312            ProvenanceExtra::Concrete(pid) => f(pid),
313            ProvenanceExtra::Wildcard => None,
314        }
315    }
316}
317
318/// Extra per-allocation data
319#[derive(Debug)]
320pub struct AllocExtra<'tcx> {
321    /// Global state of the borrow tracker, if enabled.
322    pub borrow_tracker: Option<borrow_tracker::AllocState>,
323    /// Extra state for data race detection.
324    ///
325    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
326    pub data_race: AllocDataRaceHandler,
327    /// A backtrace to where this allocation was allocated.
328    /// As this is recorded for leak reports, it only exists
329    /// if this allocation is leakable. The backtrace is not
330    /// pruned yet; that should be done before printing it.
331    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
332    /// Synchronization primitives like to attach extra data to particular addresses. We store that
333    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
334    /// freed.
335    /// This maps offsets to synchronization-primitive-specific data.
336    pub sync: FxHashMap<Size, Box<dyn Any>>,
337}
338
339// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
340// but that should never end up actually cloning our `AllocExtra`.
341impl<'tcx> Clone for AllocExtra<'tcx> {
342    fn clone(&self) -> Self {
343        panic!("our allocations should never be cloned");
344    }
345}
346
347impl VisitProvenance for AllocExtra<'_> {
348    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
349        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
350
351        borrow_tracker.visit_provenance(visit);
352        data_race.visit_provenance(visit);
353    }
354}
355
356/// Precomputed layouts of primitive types
357pub struct PrimitiveLayouts<'tcx> {
358    pub unit: TyAndLayout<'tcx>,
359    pub i8: TyAndLayout<'tcx>,
360    pub i16: TyAndLayout<'tcx>,
361    pub i32: TyAndLayout<'tcx>,
362    pub i64: TyAndLayout<'tcx>,
363    pub i128: TyAndLayout<'tcx>,
364    pub isize: TyAndLayout<'tcx>,
365    pub u8: TyAndLayout<'tcx>,
366    pub u16: TyAndLayout<'tcx>,
367    pub u32: TyAndLayout<'tcx>,
368    pub u64: TyAndLayout<'tcx>,
369    pub u128: TyAndLayout<'tcx>,
370    pub usize: TyAndLayout<'tcx>,
371    pub bool: TyAndLayout<'tcx>,
372    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
373    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
374}
375
376impl<'tcx> PrimitiveLayouts<'tcx> {
377    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
378        let tcx = layout_cx.tcx();
379        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
380        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
381        Ok(Self {
382            unit: layout_cx.layout_of(tcx.types.unit)?,
383            i8: layout_cx.layout_of(tcx.types.i8)?,
384            i16: layout_cx.layout_of(tcx.types.i16)?,
385            i32: layout_cx.layout_of(tcx.types.i32)?,
386            i64: layout_cx.layout_of(tcx.types.i64)?,
387            i128: layout_cx.layout_of(tcx.types.i128)?,
388            isize: layout_cx.layout_of(tcx.types.isize)?,
389            u8: layout_cx.layout_of(tcx.types.u8)?,
390            u16: layout_cx.layout_of(tcx.types.u16)?,
391            u32: layout_cx.layout_of(tcx.types.u32)?,
392            u64: layout_cx.layout_of(tcx.types.u64)?,
393            u128: layout_cx.layout_of(tcx.types.u128)?,
394            usize: layout_cx.layout_of(tcx.types.usize)?,
395            bool: layout_cx.layout_of(tcx.types.bool)?,
396            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
397            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
398        })
399    }
400
401    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
402        match size.bits() {
403            8 => Some(self.u8),
404            16 => Some(self.u16),
405            32 => Some(self.u32),
406            64 => Some(self.u64),
407            128 => Some(self.u128),
408            _ => None,
409        }
410    }
411
412    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
413        match size.bits() {
414            8 => Some(self.i8),
415            16 => Some(self.i16),
416            32 => Some(self.i32),
417            64 => Some(self.i64),
418            128 => Some(self.i128),
419            _ => None,
420        }
421    }
422}
423
424/// The machine itself.
425///
426/// If you add anything here that stores machine values, remember to update
427/// `visit_all_machine_values`!
428pub struct MiriMachine<'tcx> {
429    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
430    pub tcx: TyCtxt<'tcx>,
431
432    /// Global data for borrow tracking.
433    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
434
435    /// Depending on settings, this will be `None`,
436    /// global data for a data race detector,
437    /// or the context required for running in GenMC mode.
438    ///
439    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
440    pub data_race: GlobalDataRaceHandler,
441
442    /// Ptr-int-cast module global data.
443    pub alloc_addresses: alloc_addresses::GlobalState,
444
445    /// Environment variables.
446    pub(crate) env_vars: EnvVars<'tcx>,
447
448    /// Return place of the main function.
449    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
450
451    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
452    /// These are *pointers* to argc/argv because macOS.
453    /// We also need the full command line as one string because of Windows.
454    pub(crate) argc: Option<Pointer>,
455    pub(crate) argv: Option<Pointer>,
456    pub(crate) cmd_line: Option<Pointer>,
457
458    /// TLS state.
459    pub(crate) tls: TlsData<'tcx>,
460
461    /// What should Miri do when an op requires communicating with the host,
462    /// such as accessing host env vars, random number generation, and
463    /// file system access.
464    pub(crate) isolated_op: IsolatedOp,
465
466    /// Whether to enforce the validity invariant.
467    pub(crate) validation: ValidationMode,
468
469    /// The table of file descriptors.
470    pub(crate) fds: shims::FdTable,
471    /// The table of directory descriptors.
472    pub(crate) dirs: shims::DirTable,
473
474    /// The list of all EpollEventInterest.
475    pub(crate) epoll_interests: shims::EpollInterestTable,
476
477    /// This machine's monotone clock.
478    pub(crate) monotonic_clock: MonotonicClock,
479
480    /// The set of threads.
481    pub(crate) threads: ThreadManager<'tcx>,
482
483    /// Stores which thread is eligible to run on which CPUs.
484    /// This has no effect at all, it is just tracked to produce the correct result
485    /// in `sched_getaffinity`
486    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
487
488    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
489    pub(crate) layouts: PrimitiveLayouts<'tcx>,
490
491    /// Allocations that are considered roots of static memory (that may leak).
492    pub(crate) static_roots: Vec<AllocId>,
493
494    /// The `measureme` profiler used to record timing information about
495    /// the emulated program.
496    profiler: Option<measureme::Profiler>,
497    /// Used with `profiler` to cache the `StringId`s for event names
498    /// used with `measureme`.
499    string_cache: FxHashMap<String, measureme::StringId>,
500
501    /// Cache of `Instance` exported under the given `Symbol` name.
502    /// `None` means no `Instance` exported under the given name is found.
503    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
504
505    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
506    pub(crate) backtrace_style: BacktraceStyle,
507
508    /// Crates which are considered local for the purposes of error reporting.
509    pub(crate) local_crates: Vec<CrateNum>,
510
511    /// Mapping extern static names to their pointer.
512    extern_statics: FxHashMap<Symbol, StrictPointer>,
513
514    /// The random number generator used for resolving non-determinism.
515    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
516    pub(crate) rng: RefCell<StdRng>,
517
518    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
519    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
520
521    /// The allocation IDs to report when they are being allocated
522    /// (helps for debugging memory leaks and use after free bugs).
523    tracked_alloc_ids: FxHashSet<AllocId>,
524    /// For the tracked alloc ids, also report read/write accesses.
525    track_alloc_accesses: bool,
526
527    /// Controls whether alignment of memory accesses is being checked.
528    pub(crate) check_alignment: AlignmentCheck,
529
530    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
531    pub(crate) cmpxchg_weak_failure_rate: f64,
532
533    /// The probability of the active thread being preempted at the end of each basic block.
534    pub(crate) preemption_rate: f64,
535
536    /// If `Some`, we will report the current stack every N basic blocks.
537    pub(crate) report_progress: Option<u32>,
538    // The total number of blocks that have been executed.
539    pub(crate) basic_block_count: u64,
540
541    /// Handle of the optional shared object file for native functions.
542    #[cfg(all(unix, feature = "native-lib"))]
543    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
544    #[cfg(not(all(unix, feature = "native-lib")))]
545    pub native_lib: Vec<!>,
546
547    /// Run a garbage collector for BorTags every N basic blocks.
548    pub(crate) gc_interval: u32,
549    /// The number of blocks that passed since the last BorTag GC pass.
550    pub(crate) since_gc: u32,
551
552    /// The number of CPUs to be reported by miri.
553    pub(crate) num_cpus: u32,
554
555    /// Determines Miri's page size and associated values
556    pub(crate) page_size: u64,
557    pub(crate) stack_addr: u64,
558    pub(crate) stack_size: u64,
559
560    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
561    pub(crate) collect_leak_backtraces: bool,
562
563    /// The spans we will use to report where an allocation was created and deallocated in
564    /// diagnostics.
565    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
566
567    /// For each allocation, an offset inside that allocation that was deemed aligned even for
568    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
569    /// tracked for vtables and function allocations as well as regular allocations.
570    ///
571    /// Invariant: the promised alignment will never be less than the native alignment of the
572    /// allocation.
573    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
574
575    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
576    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
577
578    /// Caches the sanity-checks for various pthread primitives.
579    pub(crate) pthread_mutex_sanity: Cell<bool>,
580    pub(crate) pthread_rwlock_sanity: Cell<bool>,
581    pub(crate) pthread_condvar_sanity: Cell<bool>,
582
583    /// Remembers whether we already warned about an extern type with Stacked Borrows.
584    pub(crate) sb_extern_type_warned: Cell<bool>,
585    /// Remember whether we already warned about sharing memory with a native call.
586    #[allow(unused)]
587    pub(crate) native_call_mem_warned: Cell<bool>,
588    /// Remembers which shims have already shown the warning about erroring in isolation.
589    pub(crate) reject_in_isolation_warned: RefCell<FxHashSet<String>>,
590    /// Remembers which int2ptr casts we have already warned about.
591    pub(crate) int2ptr_warned: RefCell<FxHashSet<Span>>,
592
593    /// Cache for `mangle_internal_symbol`.
594    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
595
596    /// Always prefer the intrinsic fallback body over the native Miri implementation.
597    pub force_intrinsic_fallback: bool,
598
599    /// Whether floating-point operations can behave non-deterministically.
600    pub float_nondet: bool,
601    /// Whether floating-point operations can have a non-deterministic rounding error.
602    pub float_rounding_error: bool,
603}
604
605impl<'tcx> MiriMachine<'tcx> {
606    /// Create a new MiriMachine.
607    ///
608    /// Invariant: `genmc_ctx.is_some() == config.genmc_config.is_some()`
609    pub(crate) fn new(
610        config: &MiriConfig,
611        layout_cx: LayoutCx<'tcx>,
612        genmc_ctx: Option<Rc<GenmcCtx>>,
613    ) -> Self {
614        let tcx = layout_cx.tcx();
615        let local_crates = helpers::get_local_crates(tcx);
616        let layouts =
617            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
618        let profiler = config.measureme_out.as_ref().map(|out| {
619            let crate_name =
620                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
621            let pid = process::id();
622            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
623            // the PID is padded so that the nondeterministic value of the PID does not spread
624            // nondeterminism to the allocator. In Miri we are not aiming for such performance
625            // control, we just pad for consistency with rustc.
626            let filename = format!("{crate_name}-{pid:07}");
627            let path = Path::new(out).join(filename);
628            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
629        });
630        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
631        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
632        let data_race = if config.genmc_config.is_some() {
633            // `genmc_ctx` persists across executions, so we don't create a new one here.
634            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
635        } else if config.data_race_detector {
636            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
637        } else {
638            GlobalDataRaceHandler::None
639        };
640        // Determine page size, stack address, and stack size.
641        // These values are mostly meaningless, but the stack address is also where we start
642        // allocating physical integer addresses for all allocations.
643        let page_size = if let Some(page_size) = config.page_size {
644            page_size
645        } else {
646            let target = &tcx.sess.target;
647            match target.arch.as_ref() {
648                "wasm32" | "wasm64" => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
649                "aarch64" => {
650                    if target.options.vendor.as_ref() == "apple" {
651                        // No "definitive" source, but see:
652                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
653                        // https://github.com/ziglang/zig/issues/11308 etc.
654                        16 * 1024
655                    } else {
656                        4 * 1024
657                    }
658                }
659                _ => 4 * 1024,
660            }
661        };
662        // On 16bit targets, 32 pages is more than the entire address space!
663        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
664        let stack_size =
665            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
666        assert!(
667            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
668            "miri only supports up to {} CPUs, but {} were configured",
669            cpu_affinity::MAX_CPUS,
670            config.num_cpus
671        );
672        let threads = ThreadManager::new(config);
673        let mut thread_cpu_affinity = FxHashMap::default();
674        if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
675            thread_cpu_affinity
676                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
677        }
678        MiriMachine {
679            tcx,
680            borrow_tracker,
681            data_race,
682            alloc_addresses: RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr)),
683            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
684            env_vars: EnvVars::default(),
685            main_fn_ret_place: None,
686            argc: None,
687            argv: None,
688            cmd_line: None,
689            tls: TlsData::default(),
690            isolated_op: config.isolated_op,
691            validation: config.validation,
692            fds: shims::FdTable::init(config.mute_stdout_stderr),
693            epoll_interests: shims::EpollInterestTable::new(),
694            dirs: Default::default(),
695            layouts,
696            threads,
697            thread_cpu_affinity,
698            static_roots: Vec::new(),
699            profiler,
700            string_cache: Default::default(),
701            exported_symbols_cache: FxHashMap::default(),
702            backtrace_style: config.backtrace_style,
703            local_crates,
704            extern_statics: FxHashMap::default(),
705            rng: RefCell::new(rng),
706            allocator: if !config.native_lib.is_empty() {
707                Some(Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new())))
708            } else { None },
709            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
710            track_alloc_accesses: config.track_alloc_accesses,
711            check_alignment: config.check_alignment,
712            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
713            preemption_rate: config.preemption_rate,
714            report_progress: config.report_progress,
715            basic_block_count: 0,
716            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
717            #[cfg(all(unix, feature = "native-lib"))]
718            native_lib: config.native_lib.iter().map(|lib_file_path| {
719                let host_triple = rustc_session::config::host_tuple();
720                let target_triple = tcx.sess.opts.target_triple.tuple();
721                // Check if host target == the session target.
722                if host_triple != target_triple {
723                    panic!(
724                        "calling native C functions in linked .so file requires host and target to be the same: \
725                        host={host_triple}, target={target_triple}",
726                    );
727                }
728                // Note: it is the user's responsibility to provide a correct SO file.
729                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
730                // undefined behaviour in Miri itself!
731                (
732                    unsafe {
733                        libloading::Library::new(lib_file_path)
734                            .expect("failed to read specified extern shared object file")
735                    },
736                    lib_file_path.clone(),
737                )
738            }).collect(),
739            #[cfg(not(all(unix, feature = "native-lib")))]
740            native_lib: config.native_lib.iter().map(|_| {
741                panic!("calling functions from native libraries via FFI is not supported in this build of Miri")
742            }).collect(),
743            gc_interval: config.gc_interval,
744            since_gc: 0,
745            num_cpus: config.num_cpus,
746            page_size,
747            stack_addr,
748            stack_size,
749            collect_leak_backtraces: config.collect_leak_backtraces,
750            allocation_spans: RefCell::new(FxHashMap::default()),
751            symbolic_alignment: RefCell::new(FxHashMap::default()),
752            union_data_ranges: FxHashMap::default(),
753            pthread_mutex_sanity: Cell::new(false),
754            pthread_rwlock_sanity: Cell::new(false),
755            pthread_condvar_sanity: Cell::new(false),
756            sb_extern_type_warned: Cell::new(false),
757            native_call_mem_warned: Cell::new(false),
758            reject_in_isolation_warned: Default::default(),
759            int2ptr_warned: Default::default(),
760            mangle_internal_symbol_cache: Default::default(),
761            force_intrinsic_fallback: config.force_intrinsic_fallback,
762            float_nondet: config.float_nondet,
763            float_rounding_error: config.float_rounding_error,
764        }
765    }
766
767    pub(crate) fn late_init(
768        ecx: &mut MiriInterpCx<'tcx>,
769        config: &MiriConfig,
770        on_main_stack_empty: StackEmptyCallback<'tcx>,
771    ) -> InterpResult<'tcx> {
772        EnvVars::init(ecx, config)?;
773        MiriMachine::init_extern_statics(ecx)?;
774        ThreadManager::init(ecx, on_main_stack_empty);
775        interp_ok(())
776    }
777
778    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
779        // This got just allocated, so there definitely is a pointer here.
780        let ptr = ptr.into_pointer_or_addr().unwrap();
781        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
782    }
783
784    pub(crate) fn communicate(&self) -> bool {
785        self.isolated_op == IsolatedOp::Allow
786    }
787
788    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
789    pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
790        let def_id = frame.instance.def_id();
791        def_id.is_local() || self.local_crates.contains(&def_id.krate)
792    }
793
794    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
795    pub(crate) fn handle_abnormal_termination(&mut self) {
796        // All strings in the profile data are stored in a single string table which is not
797        // written to disk until the profiler is dropped. If the interpreter exits without dropping
798        // the profiler, it is not possible to interpret the profile data and all measureme tools
799        // will panic when given the file.
800        drop(self.profiler.take());
801    }
802
803    pub(crate) fn page_align(&self) -> Align {
804        Align::from_bytes(self.page_size).unwrap()
805    }
806
807    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
808        self.allocation_spans
809            .borrow()
810            .get(&alloc_id)
811            .map(|(allocated, _deallocated)| allocated.data())
812    }
813
814    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
815        self.allocation_spans
816            .borrow()
817            .get(&alloc_id)
818            .and_then(|(_allocated, deallocated)| *deallocated)
819            .map(Span::data)
820    }
821
822    fn init_allocation(
823        ecx: &MiriInterpCx<'tcx>,
824        id: AllocId,
825        kind: MemoryKind,
826        size: Size,
827        align: Align,
828    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
829        if ecx.machine.tracked_alloc_ids.contains(&id) {
830            ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id, size, align, kind));
831        }
832
833        let borrow_tracker = ecx
834            .machine
835            .borrow_tracker
836            .as_ref()
837            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
838
839        let data_race = match &ecx.machine.data_race {
840            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
841            GlobalDataRaceHandler::Vclocks(data_race) =>
842                AllocDataRaceHandler::Vclocks(
843                    data_race::AllocState::new_allocation(
844                        data_race,
845                        &ecx.machine.threads,
846                        size,
847                        kind,
848                        ecx.machine.current_span(),
849                    ),
850                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
851                ),
852            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
853                // GenMC learns about new allocations directly from the alloc_addresses module,
854                // since it has to be able to control the address at which they are placed.
855                AllocDataRaceHandler::Genmc
856            }
857        };
858
859        // If an allocation is leaked, we want to report a backtrace to indicate where it was
860        // allocated. We don't need to record a backtrace for allocations which are allowed to
861        // leak.
862        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
863            None
864        } else {
865            Some(ecx.generate_stacktrace())
866        };
867
868        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
869            ecx.machine
870                .allocation_spans
871                .borrow_mut()
872                .insert(id, (ecx.machine.current_span(), None));
873        }
874
875        interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
876    }
877}
878
879impl VisitProvenance for MiriMachine<'_> {
880    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
881        #[rustfmt::skip]
882        let MiriMachine {
883            threads,
884            thread_cpu_affinity: _,
885            tls,
886            env_vars,
887            main_fn_ret_place,
888            argc,
889            argv,
890            cmd_line,
891            extern_statics,
892            dirs,
893            borrow_tracker,
894            data_race,
895            alloc_addresses,
896            fds,
897            epoll_interests:_,
898            tcx: _,
899            isolated_op: _,
900            validation: _,
901            monotonic_clock: _,
902            layouts: _,
903            static_roots: _,
904            profiler: _,
905            string_cache: _,
906            exported_symbols_cache: _,
907            backtrace_style: _,
908            local_crates: _,
909            rng: _,
910            allocator: _,
911            tracked_alloc_ids: _,
912            track_alloc_accesses: _,
913            check_alignment: _,
914            cmpxchg_weak_failure_rate: _,
915            preemption_rate: _,
916            report_progress: _,
917            basic_block_count: _,
918            native_lib: _,
919            gc_interval: _,
920            since_gc: _,
921            num_cpus: _,
922            page_size: _,
923            stack_addr: _,
924            stack_size: _,
925            collect_leak_backtraces: _,
926            allocation_spans: _,
927            symbolic_alignment: _,
928            union_data_ranges: _,
929            pthread_mutex_sanity: _,
930            pthread_rwlock_sanity: _,
931            pthread_condvar_sanity: _,
932            sb_extern_type_warned: _,
933            native_call_mem_warned: _,
934            reject_in_isolation_warned: _,
935            int2ptr_warned: _,
936            mangle_internal_symbol_cache: _,
937            force_intrinsic_fallback: _,
938            float_nondet: _,
939            float_rounding_error: _,
940        } = self;
941
942        threads.visit_provenance(visit);
943        tls.visit_provenance(visit);
944        env_vars.visit_provenance(visit);
945        dirs.visit_provenance(visit);
946        fds.visit_provenance(visit);
947        data_race.visit_provenance(visit);
948        borrow_tracker.visit_provenance(visit);
949        alloc_addresses.visit_provenance(visit);
950        main_fn_ret_place.visit_provenance(visit);
951        argc.visit_provenance(visit);
952        argv.visit_provenance(visit);
953        cmd_line.visit_provenance(visit);
954        for ptr in extern_statics.values() {
955            ptr.visit_provenance(visit);
956        }
957    }
958}
959
960/// A rustc InterpCx for Miri.
961pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
962
963/// A little trait that's useful to be inherited by extension traits.
964pub trait MiriInterpCxExt<'tcx> {
965    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
966    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
967}
968impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
969    #[inline(always)]
970    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
971        self
972    }
973    #[inline(always)]
974    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
975        self
976    }
977}
978
979/// Machine hook implementations.
980impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
981    type MemoryKind = MiriMemoryKind;
982    type ExtraFnVal = DynSym;
983
984    type FrameExtra = FrameExtra<'tcx>;
985    type AllocExtra = AllocExtra<'tcx>;
986
987    type Provenance = Provenance;
988    type ProvenanceExtra = ProvenanceExtra;
989    type Bytes = MiriAllocBytes;
990
991    type MemoryMap =
992        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
993
994    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
995
996    const PANIC_ON_ALLOC_FAIL: bool = false;
997
998    #[inline(always)]
999    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
1000        ecx.machine.check_alignment != AlignmentCheck::None
1001    }
1002
1003    #[inline(always)]
1004    fn alignment_check(
1005        ecx: &MiriInterpCx<'tcx>,
1006        alloc_id: AllocId,
1007        alloc_align: Align,
1008        alloc_kind: AllocKind,
1009        offset: Size,
1010        align: Align,
1011    ) -> Option<Misalignment> {
1012        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1013            // Just use the built-in check.
1014            return None;
1015        }
1016        if alloc_kind != AllocKind::LiveData {
1017            // Can't have any extra info here.
1018            return None;
1019        }
1020        // Let's see which alignment we have been promised for this allocation.
1021        let (promised_offset, promised_align) = ecx
1022            .machine
1023            .symbolic_alignment
1024            .borrow()
1025            .get(&alloc_id)
1026            .copied()
1027            .unwrap_or((Size::ZERO, alloc_align));
1028        if promised_align < align {
1029            // Definitely not enough.
1030            Some(Misalignment { has: promised_align, required: align })
1031        } else {
1032            // What's the offset between us and the promised alignment?
1033            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1034            // That must also be aligned.
1035            if distance.is_multiple_of(align.bytes()) {
1036                // All looking good!
1037                None
1038            } else {
1039                // The biggest power of two through which `distance` is divisible.
1040                let distance_pow2 = 1 << distance.trailing_zeros();
1041                Some(Misalignment {
1042                    has: Align::from_bytes(distance_pow2).unwrap(),
1043                    required: align,
1044                })
1045            }
1046        }
1047    }
1048
1049    #[inline(always)]
1050    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1051        ecx.machine.validation != ValidationMode::No
1052    }
1053    #[inline(always)]
1054    fn enforce_validity_recursively(
1055        ecx: &InterpCx<'tcx, Self>,
1056        _layout: TyAndLayout<'tcx>,
1057    ) -> bool {
1058        ecx.machine.validation == ValidationMode::Deep
1059    }
1060
1061    #[inline(always)]
1062    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1063        !ecx.tcx.sess.overflow_checks()
1064    }
1065
1066    fn check_fn_target_features(
1067        ecx: &MiriInterpCx<'tcx>,
1068        instance: ty::Instance<'tcx>,
1069    ) -> InterpResult<'tcx> {
1070        let attrs = ecx.tcx.codegen_instance_attrs(instance.def);
1071        if attrs
1072            .target_features
1073            .iter()
1074            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1075        {
1076            let unavailable = attrs
1077                .target_features
1078                .iter()
1079                .filter(|&feature| {
1080                    feature.kind != TargetFeatureKind::Implied && !ecx.tcx.sess.target_features.contains(&feature.name)
1081                })
1082                .fold(String::new(), |mut s, feature| {
1083                    if !s.is_empty() {
1084                        s.push_str(", ");
1085                    }
1086                    s.push_str(feature.name.as_str());
1087                    s
1088                });
1089            let msg = format!(
1090                "calling a function that requires unavailable target features: {unavailable}"
1091            );
1092            // On WASM, this is not UB, but instead gets rejected during validation of the module
1093            // (see #84988).
1094            if ecx.tcx.sess.target.is_like_wasm {
1095                throw_machine_stop!(TerminationInfo::Abort(msg));
1096            } else {
1097                throw_ub_format!("{msg}");
1098            }
1099        }
1100        interp_ok(())
1101    }
1102
1103    #[inline(always)]
1104    fn find_mir_or_eval_fn(
1105        ecx: &mut MiriInterpCx<'tcx>,
1106        instance: ty::Instance<'tcx>,
1107        abi: &FnAbi<'tcx, Ty<'tcx>>,
1108        args: &[FnArg<'tcx, Provenance>],
1109        dest: &PlaceTy<'tcx>,
1110        ret: Option<mir::BasicBlock>,
1111        unwind: mir::UnwindAction,
1112    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1113        // For foreign items, try to see if we can emulate them.
1114        if ecx.tcx.is_foreign_item(instance.def_id()) {
1115            let _trace = enter_trace_span!("emulate_foreign_item");
1116            // An external function call that does not have a MIR body. We either find MIR elsewhere
1117            // or emulate its effect.
1118            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1119            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1120            // foreign function
1121            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1122            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1123            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1124            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1125        }
1126
1127        // Otherwise, load the MIR.
1128        let _trace = enter_trace_span!("load_mir");
1129        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1130    }
1131
1132    #[inline(always)]
1133    fn call_extra_fn(
1134        ecx: &mut MiriInterpCx<'tcx>,
1135        fn_val: DynSym,
1136        abi: &FnAbi<'tcx, Ty<'tcx>>,
1137        args: &[FnArg<'tcx, Provenance>],
1138        dest: &PlaceTy<'tcx>,
1139        ret: Option<mir::BasicBlock>,
1140        unwind: mir::UnwindAction,
1141    ) -> InterpResult<'tcx> {
1142        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1143        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1144    }
1145
1146    #[inline(always)]
1147    fn call_intrinsic(
1148        ecx: &mut MiriInterpCx<'tcx>,
1149        instance: ty::Instance<'tcx>,
1150        args: &[OpTy<'tcx>],
1151        dest: &PlaceTy<'tcx>,
1152        ret: Option<mir::BasicBlock>,
1153        unwind: mir::UnwindAction,
1154    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1155        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1156    }
1157
1158    #[inline(always)]
1159    fn assert_panic(
1160        ecx: &mut MiriInterpCx<'tcx>,
1161        msg: &mir::AssertMessage<'tcx>,
1162        unwind: mir::UnwindAction,
1163    ) -> InterpResult<'tcx> {
1164        ecx.assert_panic(msg, unwind)
1165    }
1166
1167    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1168        ecx.start_panic_nounwind(msg)
1169    }
1170
1171    fn unwind_terminate(
1172        ecx: &mut InterpCx<'tcx, Self>,
1173        reason: mir::UnwindTerminateReason,
1174    ) -> InterpResult<'tcx> {
1175        // Call the lang item.
1176        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1177        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1178        ecx.call_function(
1179            panic,
1180            ExternAbi::Rust,
1181            &[],
1182            None,
1183            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1184        )?;
1185        interp_ok(())
1186    }
1187
1188    #[inline(always)]
1189    fn binary_ptr_op(
1190        ecx: &MiriInterpCx<'tcx>,
1191        bin_op: mir::BinOp,
1192        left: &ImmTy<'tcx>,
1193        right: &ImmTy<'tcx>,
1194    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1195        ecx.binary_ptr_op(bin_op, left, right)
1196    }
1197
1198    #[inline(always)]
1199    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1200        ecx: &InterpCx<'tcx, Self>,
1201        inputs: &[F1],
1202    ) -> F2 {
1203        ecx.generate_nan(inputs)
1204    }
1205
1206    #[inline(always)]
1207    fn apply_float_nondet(
1208        ecx: &mut InterpCx<'tcx, Self>,
1209        val: ImmTy<'tcx>,
1210    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1211        crate::math::apply_random_float_error_to_imm(ecx, val, 2 /* log2(4) */)
1212    }
1213
1214    #[inline(always)]
1215    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1216        ecx.equal_float_min_max(a, b)
1217    }
1218
1219    #[inline(always)]
1220    fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1221        interp_ok(ecx.tcx.sess.ub_checks())
1222    }
1223
1224    #[inline(always)]
1225    fn contract_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1226        interp_ok(ecx.tcx.sess.contract_checks())
1227    }
1228
1229    #[inline(always)]
1230    fn thread_local_static_pointer(
1231        ecx: &mut MiriInterpCx<'tcx>,
1232        def_id: DefId,
1233    ) -> InterpResult<'tcx, StrictPointer> {
1234        ecx.get_or_create_thread_local_alloc(def_id)
1235    }
1236
1237    fn extern_static_pointer(
1238        ecx: &MiriInterpCx<'tcx>,
1239        def_id: DefId,
1240    ) -> InterpResult<'tcx, StrictPointer> {
1241        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1242        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1243            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1244            // information. That uses the type information of this static.
1245            // Make sure it matches the Miri allocation for this.
1246            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1247                panic!("extern_statics cannot contain wildcards")
1248            };
1249            let info = ecx.get_alloc_info(alloc_id);
1250            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1251            let extern_decl_layout =
1252                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1253            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1254                throw_unsup_format!(
1255                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1256                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1257                    but Miri emulates it via an extern static shim \
1258                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1259                    name = ecx.tcx.def_path_str(def_id),
1260                    krate = ecx.tcx.crate_name(def_id.krate),
1261                    decl_size = extern_decl_layout.size.bytes(),
1262                    decl_align = extern_decl_layout.align.abi.bytes(),
1263                    shim_size = info.size.bytes(),
1264                    shim_align = info.align.bytes(),
1265                )
1266            }
1267            interp_ok(ptr)
1268        } else {
1269            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1270        }
1271    }
1272
1273    fn init_local_allocation(
1274        ecx: &MiriInterpCx<'tcx>,
1275        id: AllocId,
1276        kind: MemoryKind,
1277        size: Size,
1278        align: Align,
1279    ) -> InterpResult<'tcx, Self::AllocExtra> {
1280        assert!(kind != MiriMemoryKind::Global.into());
1281        MiriMachine::init_allocation(ecx, id, kind, size, align)
1282    }
1283
1284    fn adjust_alloc_root_pointer(
1285        ecx: &MiriInterpCx<'tcx>,
1286        ptr: interpret::Pointer<CtfeProvenance>,
1287        kind: Option<MemoryKind>,
1288    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1289        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1290        let alloc_id = ptr.provenance.alloc_id();
1291        if cfg!(debug_assertions) {
1292            // The machine promises to never call us on thread-local or extern statics.
1293            match ecx.tcx.try_get_global_alloc(alloc_id) {
1294                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1295                    panic!("adjust_alloc_root_pointer called on thread-local static")
1296                }
1297                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1298                    panic!("adjust_alloc_root_pointer called on extern static")
1299                }
1300                _ => {}
1301            }
1302        }
1303        // FIXME: can we somehow preserve the immutability of `ptr`?
1304        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1305            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1306        } else {
1307            // Value does not matter, SB is disabled
1308            BorTag::default()
1309        };
1310        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1311    }
1312
1313    /// Called on `usize as ptr` casts.
1314    #[inline(always)]
1315    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1316        ecx.ptr_from_addr_cast(addr)
1317    }
1318
1319    /// Called on `ptr as usize` casts.
1320    /// (Actually computing the resulting `usize` doesn't need machine help,
1321    /// that's just `Scalar::try_to_int`.)
1322    #[inline(always)]
1323    fn expose_provenance(
1324        ecx: &InterpCx<'tcx, Self>,
1325        provenance: Self::Provenance,
1326    ) -> InterpResult<'tcx> {
1327        ecx.expose_provenance(provenance)
1328    }
1329
1330    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1331    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1332    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1333    /// allocations.
1334    ///
1335    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1336    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1337    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1338    /// stored in machine state).
1339    ///
1340    /// When this fails, that means the pointer does not point to a live allocation.
1341    fn ptr_get_alloc(
1342        ecx: &MiriInterpCx<'tcx>,
1343        ptr: StrictPointer,
1344        size: i64,
1345    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1346        let rel = ecx.ptr_get_alloc(ptr, size);
1347
1348        rel.map(|(alloc_id, size)| {
1349            let tag = match ptr.provenance {
1350                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1351                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1352            };
1353            (alloc_id, size, tag)
1354        })
1355    }
1356
1357    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1358    ///
1359    /// If `alloc` contains pointers, then they are all pointing to globals.
1360    ///
1361    /// This should avoid copying if no work has to be done! If this returns an owned
1362    /// allocation (because a copy had to be done to adjust things), machine memory will
1363    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1364    /// owned allocation to the map even when the map is shared.)
1365    fn adjust_global_allocation<'b>(
1366        ecx: &InterpCx<'tcx, Self>,
1367        id: AllocId,
1368        alloc: &'b Allocation,
1369    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1370    {
1371        let alloc = alloc.adjust_from_tcx(
1372            &ecx.tcx,
1373            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1374            |ptr| ecx.global_root_pointer(ptr),
1375        )?;
1376        let kind = MiriMemoryKind::Global.into();
1377        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1378        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1379    }
1380
1381    #[inline(always)]
1382    fn before_memory_read(
1383        _tcx: TyCtxtAt<'tcx>,
1384        machine: &Self,
1385        alloc_extra: &AllocExtra<'tcx>,
1386        ptr: Pointer,
1387        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1388        range: AllocRange,
1389    ) -> InterpResult<'tcx> {
1390        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1391            machine
1392                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
1393        }
1394        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1395        match &machine.data_race {
1396            GlobalDataRaceHandler::None => {}
1397            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1398                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1399            GlobalDataRaceHandler::Vclocks(_data_race) => {
1400                let _trace = enter_trace_span!(data_race::before_memory_read);
1401                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
1402                else {
1403                    unreachable!();
1404                };
1405                data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1406                if let Some(weak_memory) = weak_memory {
1407                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1408                }
1409            }
1410        }
1411        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1412            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1413        }
1414        interp_ok(())
1415    }
1416
1417    #[inline(always)]
1418    fn before_memory_write(
1419        _tcx: TyCtxtAt<'tcx>,
1420        machine: &mut Self,
1421        alloc_extra: &mut AllocExtra<'tcx>,
1422        ptr: Pointer,
1423        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1424        range: AllocRange,
1425    ) -> InterpResult<'tcx> {
1426        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1427            machine
1428                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
1429        }
1430        match &machine.data_race {
1431            GlobalDataRaceHandler::None => {}
1432            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
1433                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?;
1434            }
1435            GlobalDataRaceHandler::Vclocks(_global_state) => {
1436                let _trace = enter_trace_span!(data_race::before_memory_write);
1437                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1438                    &mut alloc_extra.data_race
1439                else {
1440                    unreachable!()
1441                };
1442                data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1443                if let Some(weak_memory) = weak_memory {
1444                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1445                }
1446            }
1447        }
1448        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1449            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1450        }
1451        interp_ok(())
1452    }
1453
1454    #[inline(always)]
1455    fn before_memory_deallocation(
1456        _tcx: TyCtxtAt<'tcx>,
1457        machine: &mut Self,
1458        alloc_extra: &mut AllocExtra<'tcx>,
1459        ptr: Pointer,
1460        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1461        size: Size,
1462        align: Align,
1463        kind: MemoryKind,
1464    ) -> InterpResult<'tcx> {
1465        if machine.tracked_alloc_ids.contains(&alloc_id) {
1466            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1467        }
1468        match &machine.data_race {
1469            GlobalDataRaceHandler::None => {}
1470            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1471                genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?,
1472            GlobalDataRaceHandler::Vclocks(_global_state) => {
1473                let _trace = enter_trace_span!(data_race::before_memory_deallocation);
1474                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1475                data_race.write(
1476                    alloc_id,
1477                    alloc_range(Size::ZERO, size),
1478                    NaWriteType::Deallocate,
1479                    None,
1480                    machine,
1481                )?;
1482            }
1483        }
1484        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1485            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1486        }
1487        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1488        {
1489            *deallocated_at = Some(machine.current_span());
1490        }
1491        machine.free_alloc_id(alloc_id, size, align, kind);
1492        interp_ok(())
1493    }
1494
1495    #[inline(always)]
1496    fn retag_ptr_value(
1497        ecx: &mut InterpCx<'tcx, Self>,
1498        kind: mir::RetagKind,
1499        val: &ImmTy<'tcx>,
1500    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1501        if ecx.machine.borrow_tracker.is_some() {
1502            ecx.retag_ptr_value(kind, val)
1503        } else {
1504            interp_ok(val.clone())
1505        }
1506    }
1507
1508    #[inline(always)]
1509    fn retag_place_contents(
1510        ecx: &mut InterpCx<'tcx, Self>,
1511        kind: mir::RetagKind,
1512        place: &PlaceTy<'tcx>,
1513    ) -> InterpResult<'tcx> {
1514        if ecx.machine.borrow_tracker.is_some() {
1515            ecx.retag_place_contents(kind, place)?;
1516        }
1517        interp_ok(())
1518    }
1519
1520    fn protect_in_place_function_argument(
1521        ecx: &mut InterpCx<'tcx, Self>,
1522        place: &MPlaceTy<'tcx>,
1523    ) -> InterpResult<'tcx> {
1524        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1525        // writes* during this call are insta-UB.
1526        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1527            ecx.protect_place(place)?
1528        } else {
1529            // No borrow tracker.
1530            place.clone()
1531        };
1532        // We do need to write `uninit` so that even after the call ends, the former contents of
1533        // this place cannot be observed any more. We do the write after retagging so that for
1534        // Tree Borrows, this is considered to activate the new tag.
1535        // Conveniently this also ensures that the place actually points to suitable memory.
1536        ecx.write_uninit(&protected_place)?;
1537        // Now we throw away the protected place, ensuring its tag is never used again.
1538        interp_ok(())
1539    }
1540
1541    #[inline(always)]
1542    fn init_frame(
1543        ecx: &mut InterpCx<'tcx, Self>,
1544        frame: Frame<'tcx, Provenance>,
1545    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1546        // Start recording our event before doing anything else
1547        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1548            let fn_name = frame.instance().to_string();
1549            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1550            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1551
1552            Some(profiler.start_recording_interval_event_detached(
1553                *name,
1554                measureme::EventId::from_label(*name),
1555                ecx.active_thread().to_u32(),
1556            ))
1557        } else {
1558            None
1559        };
1560
1561        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1562
1563        let extra = FrameExtra {
1564            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1565            catch_unwind: None,
1566            timing,
1567            is_user_relevant: ecx.machine.is_user_relevant(&frame),
1568            data_race: ecx
1569                .machine
1570                .data_race
1571                .as_vclocks_ref()
1572                .map(|_| data_race::FrameState::default()),
1573        };
1574
1575        interp_ok(frame.with_extra(extra))
1576    }
1577
1578    fn stack<'a>(
1579        ecx: &'a InterpCx<'tcx, Self>,
1580    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1581        ecx.active_thread_stack()
1582    }
1583
1584    fn stack_mut<'a>(
1585        ecx: &'a mut InterpCx<'tcx, Self>,
1586    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1587        ecx.active_thread_stack_mut()
1588    }
1589
1590    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1591        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1592        ecx.machine.since_gc += 1;
1593        // Possibly report our progress. This will point at the terminator we are about to execute.
1594        if let Some(report_progress) = ecx.machine.report_progress {
1595            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1596                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1597                    block_count: ecx.machine.basic_block_count,
1598                });
1599            }
1600        }
1601
1602        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1603        // stacks.
1604        // When debug assertions are enabled, run the GC as often as possible so that any cases
1605        // where it mistakenly removes an important tag become visible.
1606        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1607            ecx.machine.since_gc = 0;
1608            ecx.run_provenance_gc();
1609        }
1610
1611        // These are our preemption points.
1612        // (This will only take effect after the terminator has been executed.)
1613        ecx.maybe_preempt_active_thread();
1614
1615        // Make sure some time passes.
1616        ecx.machine.monotonic_clock.tick();
1617
1618        interp_ok(())
1619    }
1620
1621    #[inline(always)]
1622    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1623        if ecx.frame().extra.is_user_relevant {
1624            // We just pushed a local frame, so we know that the topmost local frame is the topmost
1625            // frame. If we push a non-local frame, there's no need to do anything.
1626            let stack_len = ecx.active_thread_stack().len();
1627            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1628        }
1629        interp_ok(())
1630    }
1631
1632    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1633        let frame = ecx.frame();
1634        // We want this *before* the return value copy, because the return place itself is protected
1635        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1636        if ecx.machine.borrow_tracker.is_some() {
1637            ecx.on_stack_pop(frame)?;
1638        }
1639        if frame.extra.is_user_relevant {
1640            // All that we store is whether or not the frame we just removed is local, so now we
1641            // have no idea where the next topmost local frame is. So we recompute it.
1642            // (If this ever becomes a bottleneck, we could have `push` store the previous
1643            // user-relevant frame and restore that here.)
1644            // We have to skip the frame that is just being popped.
1645            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1646        }
1647        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1648        // concurrency and what it prints is just plain wrong. So we print our own information
1649        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1650        info!("Leaving {}", ecx.frame().instance());
1651        interp_ok(())
1652    }
1653
1654    #[inline(always)]
1655    fn after_stack_pop(
1656        ecx: &mut InterpCx<'tcx, Self>,
1657        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1658        unwinding: bool,
1659    ) -> InterpResult<'tcx, ReturnAction> {
1660        let res = {
1661            // Move `frame` into a sub-scope so we control when it will be dropped.
1662            let mut frame = frame;
1663            let timing = frame.extra.timing.take();
1664            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1665            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1666                profiler.finish_recording_interval_event(timing.unwrap());
1667            }
1668            res
1669        };
1670        // Needs to be done after dropping frame to show up on the right nesting level.
1671        // (Cc https://github.com/rust-lang/miri/issues/2266)
1672        if !ecx.active_thread_stack().is_empty() {
1673            info!("Continuing in {}", ecx.frame().instance());
1674        }
1675        res
1676    }
1677
1678    fn after_local_read(
1679        ecx: &InterpCx<'tcx, Self>,
1680        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1681        local: mir::Local,
1682    ) -> InterpResult<'tcx> {
1683        if let Some(data_race) = &frame.extra.data_race {
1684            let _trace = enter_trace_span!(data_race::after_local_read);
1685            data_race.local_read(local, &ecx.machine);
1686        }
1687        interp_ok(())
1688    }
1689
1690    fn after_local_write(
1691        ecx: &mut InterpCx<'tcx, Self>,
1692        local: mir::Local,
1693        storage_live: bool,
1694    ) -> InterpResult<'tcx> {
1695        if let Some(data_race) = &ecx.frame().extra.data_race {
1696            let _trace = enter_trace_span!(data_race::after_local_write);
1697            data_race.local_write(local, storage_live, &ecx.machine);
1698        }
1699        interp_ok(())
1700    }
1701
1702    fn after_local_moved_to_memory(
1703        ecx: &mut InterpCx<'tcx, Self>,
1704        local: mir::Local,
1705        mplace: &MPlaceTy<'tcx>,
1706    ) -> InterpResult<'tcx> {
1707        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1708            panic!("after_local_allocated should only be called on fresh allocations");
1709        };
1710        // Record the span where this was allocated: the declaration of the local.
1711        let local_decl = &ecx.frame().body().local_decls[local];
1712        let span = local_decl.source_info.span;
1713        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1714        // The data race system has to fix the clocks used for this write.
1715        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1716        if let Some(data_race) =
1717            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1718        {
1719            let _trace = enter_trace_span!(data_race::after_local_moved_to_memory);
1720            data_race.local_moved_to_memory(
1721                local,
1722                alloc_info.data_race.as_vclocks_mut().unwrap(),
1723                machine,
1724            );
1725        }
1726        interp_ok(())
1727    }
1728
1729    fn get_global_alloc_salt(
1730        ecx: &InterpCx<'tcx, Self>,
1731        instance: Option<ty::Instance<'tcx>>,
1732    ) -> usize {
1733        let unique = if let Some(instance) = instance {
1734            // Functions cannot be identified by pointers, as asm-equal functions can get
1735            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1736            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1737            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1738            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1739            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1740            // whether codegen will actually emit duplicate functions. It does that when they have
1741            // non-lifetime generics, or when they can be inlined. All other functions are given a
1742            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1743            // cannot be relied upon for anything. But if we don't do this, the
1744            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1745            // backtraces look terrible.
1746            let is_generic = instance
1747                .args
1748                .into_iter()
1749                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1750            let can_be_inlined = matches!(
1751                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1752                InliningThreshold::Always
1753            ) || !matches!(
1754                ecx.tcx.codegen_instance_attrs(instance.def).inline,
1755                InlineAttr::Never
1756            );
1757            !is_generic && !can_be_inlined
1758        } else {
1759            // Non-functions are never unique.
1760            false
1761        };
1762        // Always use the same salt if the allocation is unique.
1763        if unique {
1764            CTFE_ALLOC_SALT
1765        } else {
1766            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1767        }
1768    }
1769
1770    fn cached_union_data_range<'e>(
1771        ecx: &'e mut InterpCx<'tcx, Self>,
1772        ty: Ty<'tcx>,
1773        compute_range: impl FnOnce() -> RangeSet,
1774    ) -> Cow<'e, RangeSet> {
1775        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1776    }
1777
1778    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1779        use crate::alloc::MiriAllocParams;
1780
1781        match &self.allocator {
1782            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1783            None => MiriAllocParams::Global,
1784        }
1785    }
1786
1787    fn enter_trace_span(span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
1788        #[cfg(feature = "tracing")]
1789        {
1790            span().entered()
1791        }
1792        #[cfg(not(feature = "tracing"))]
1793        #[expect(clippy::unused_unit)]
1794        {
1795            let _ = span; // so we avoid the "unused variable" warning
1796            ()
1797        }
1798    }
1799}
1800
1801/// Trait for callbacks handling asynchronous machine operations.
1802pub trait MachineCallback<'tcx, T>: VisitProvenance {
1803    /// The function to be invoked when the callback is fired.
1804    fn call(
1805        self: Box<Self>,
1806        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1807        arg: T,
1808    ) -> InterpResult<'tcx>;
1809}
1810
1811/// Type alias for boxed machine callbacks with generic argument type.
1812pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1813
1814/// Creates a `DynMachineCallback`:
1815///
1816/// ```rust
1817/// callback!(
1818///     @capture<'tcx> {
1819///         var1: Ty1,
1820///         var2: Ty2<'tcx>,
1821///     }
1822///     |this, arg: ArgTy| {
1823///         // Implement the callback here.
1824///         todo!()
1825///     }
1826/// )
1827/// ```
1828///
1829/// All the argument types must implement `VisitProvenance`.
1830#[macro_export]
1831macro_rules! callback {
1832    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1833        { $($name:ident: $type:ty),* $(,)? }
1834     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1835        struct Callback<$tcx, $($lft),*> {
1836            $($name: $type,)*
1837            _phantom: std::marker::PhantomData<&$tcx ()>,
1838        }
1839
1840        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1841            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1842                $(
1843                    self.$name.visit_provenance(_visit);
1844                )*
1845            }
1846        }
1847
1848        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
1849            fn call(
1850                self: Box<Self>,
1851                $this: &mut MiriInterpCx<$tcx>,
1852                $arg: $arg_ty
1853            ) -> InterpResult<$tcx> {
1854                #[allow(unused_variables)]
1855                let Callback { $($name,)* _phantom } = *self;
1856                $body
1857            }
1858        }
1859
1860        Box::new(Callback {
1861            $($name,)*
1862            _phantom: std::marker::PhantomData
1863        })
1864    }};
1865}