miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::cell::{Cell, RefCell};
7use std::path::Path;
8use std::rc::Rc;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_data_structures::fx::{FxHashMap, FxHashSet};
16#[allow(unused)]
17use rustc_data_structures::static_assert_size;
18use rustc_hir::attrs::InlineAttr;
19use rustc_middle::middle::codegen_fn_attrs::TargetFeatureKind;
20use rustc_middle::mir;
21use rustc_middle::query::TyCtxtAt;
22use rustc_middle::ty::layout::{
23    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
24};
25use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
26use rustc_session::config::InliningThreshold;
27use rustc_span::def_id::{CrateNum, DefId};
28use rustc_span::{Span, SpanData, Symbol};
29use rustc_target::callconv::FnAbi;
30
31use crate::alloc_addresses::EvalContextExt;
32use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
33use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
34use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
35use crate::*;
36
37/// First real-time signal.
38/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
39/// as typical values.
40pub const SIGRTMIN: i32 = 34;
41
42/// Last real-time signal.
43/// `signal(7)` says it must be between 32 and 64 and specifies
44/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
45pub const SIGRTMAX: i32 = 42;
46
47/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
48/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
49/// base address for each evaluation would produce unbounded memory usage.
50const ADDRS_PER_ANON_GLOBAL: usize = 32;
51
52#[derive(Copy, Clone, Debug, PartialEq)]
53pub enum AlignmentCheck {
54    /// Do not check alignment.
55    None,
56    /// Check alignment "symbolically", i.e., using only the requested alignment for an allocation and not its real base address.
57    Symbolic,
58    /// Check alignment on the actual physical integer address.
59    Int,
60}
61
62#[derive(Copy, Clone, Debug, PartialEq)]
63pub enum RejectOpWith {
64    /// Isolated op is rejected with an abort of the machine.
65    Abort,
66
67    /// If not Abort, miri returns an error for an isolated op.
68    /// Following options determine if user should be warned about such error.
69    /// Do not print warning about rejected isolated op.
70    NoWarning,
71
72    /// Print a warning about rejected isolated op, with backtrace.
73    Warning,
74
75    /// Print a warning about rejected isolated op, without backtrace.
76    WarningWithoutBacktrace,
77}
78
79#[derive(Copy, Clone, Debug, PartialEq)]
80pub enum IsolatedOp {
81    /// Reject an op requiring communication with the host. By
82    /// default, miri rejects the op with an abort. If not, it returns
83    /// an error code, and prints a warning about it. Warning levels
84    /// are controlled by `RejectOpWith` enum.
85    Reject(RejectOpWith),
86
87    /// Execute op requiring communication with the host, i.e. disable isolation.
88    Allow,
89}
90
91#[derive(Debug, Copy, Clone, PartialEq, Eq)]
92pub enum BacktraceStyle {
93    /// Prints a terser backtrace which ideally only contains relevant information.
94    Short,
95    /// Prints a backtrace with all possible information.
96    Full,
97    /// Prints only the frame that the error occurs in.
98    Off,
99}
100
101#[derive(Debug, Copy, Clone, PartialEq, Eq)]
102pub enum ValidationMode {
103    /// Do not perform any kind of validation.
104    No,
105    /// Validate the interior of the value, but not things behind references.
106    Shallow,
107    /// Fully recursively validate references.
108    Deep,
109}
110
111#[derive(Debug, Copy, Clone, PartialEq, Eq)]
112pub enum FloatRoundingErrorMode {
113    /// Apply a random error (the default).
114    Random,
115    /// Don't apply any error.
116    None,
117    /// Always apply the maximum error (with a random sign).
118    Max,
119}
120
121/// Extra data stored with each stack frame
122pub struct FrameExtra<'tcx> {
123    /// Extra data for the Borrow Tracker.
124    pub borrow_tracker: Option<borrow_tracker::FrameState>,
125
126    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
127    /// called by `try`). When this frame is popped during unwinding a panic,
128    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
129    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
130
131    /// If `measureme` profiling is enabled, holds timing information
132    /// for the start of this frame. When we finish executing this frame,
133    /// we use this to register a completed event with `measureme`.
134    pub timing: Option<measureme::DetachedTiming>,
135
136    /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
137    /// `#[track_caller]`. We compute this once on creation and store the result, as an
138    /// optimization.
139    /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
140    pub is_user_relevant: bool,
141
142    /// Data race detector per-frame data.
143    pub data_race: Option<data_race::FrameState>,
144}
145
146impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
147    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
148        // Omitting `timing`, it does not support `Debug`.
149        let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant, data_race } =
150            self;
151        f.debug_struct("FrameData")
152            .field("borrow_tracker", borrow_tracker)
153            .field("catch_unwind", catch_unwind)
154            .field("is_user_relevant", is_user_relevant)
155            .field("data_race", data_race)
156            .finish()
157    }
158}
159
160impl VisitProvenance for FrameExtra<'_> {
161    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
162        let FrameExtra {
163            catch_unwind,
164            borrow_tracker,
165            timing: _,
166            is_user_relevant: _,
167            data_race: _,
168        } = self;
169
170        catch_unwind.visit_provenance(visit);
171        borrow_tracker.visit_provenance(visit);
172    }
173}
174
175/// Extra memory kinds
176#[derive(Debug, Copy, Clone, PartialEq, Eq)]
177pub enum MiriMemoryKind {
178    /// `__rust_alloc` memory.
179    Rust,
180    /// `miri_alloc` memory.
181    Miri,
182    /// `malloc` memory.
183    C,
184    /// Windows `HeapAlloc` memory.
185    WinHeap,
186    /// Windows "local" memory (to be freed with `LocalFree`)
187    WinLocal,
188    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
189    /// This memory may leak.
190    Machine,
191    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
192    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
193    Runtime,
194    /// Globals copied from `tcx`.
195    /// This memory may leak.
196    Global,
197    /// Memory for extern statics.
198    /// This memory may leak.
199    ExternStatic,
200    /// Memory for thread-local statics.
201    /// This memory may leak.
202    Tls,
203    /// Memory mapped directly by the program
204    Mmap,
205}
206
207impl From<MiriMemoryKind> for MemoryKind {
208    #[inline(always)]
209    fn from(kind: MiriMemoryKind) -> MemoryKind {
210        MemoryKind::Machine(kind)
211    }
212}
213
214impl MayLeak for MiriMemoryKind {
215    #[inline(always)]
216    fn may_leak(self) -> bool {
217        use self::MiriMemoryKind::*;
218        match self {
219            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
220            Machine | Global | ExternStatic | Tls | Mmap => true,
221        }
222    }
223}
224
225impl MiriMemoryKind {
226    /// Whether we have a useful allocation span for an allocation of this kind.
227    fn should_save_allocation_span(self) -> bool {
228        use self::MiriMemoryKind::*;
229        match self {
230            // Heap allocations are fine since the `Allocation` is created immediately.
231            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
232            // Everything else is unclear, let's not show potentially confusing spans.
233            Machine | Global | ExternStatic | Tls | Runtime => false,
234        }
235    }
236}
237
238impl fmt::Display for MiriMemoryKind {
239    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
240        use self::MiriMemoryKind::*;
241        match self {
242            Rust => write!(f, "Rust heap"),
243            Miri => write!(f, "Miri bare-metal heap"),
244            C => write!(f, "C heap"),
245            WinHeap => write!(f, "Windows heap"),
246            WinLocal => write!(f, "Windows local memory"),
247            Machine => write!(f, "machine-managed memory"),
248            Runtime => write!(f, "language runtime memory"),
249            Global => write!(f, "global (static or const)"),
250            ExternStatic => write!(f, "extern static"),
251            Tls => write!(f, "thread-local static"),
252            Mmap => write!(f, "mmap"),
253        }
254    }
255}
256
257pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
258
259/// Pointer provenance.
260// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
261// *might* be recursive and then it has to track which places have already been visited.
262// These implementations are a bit questionable, and it means we may check the same place multiple
263// times with different provenance, but that is in general not wrong.
264#[derive(Clone, Copy, PartialEq, Eq, Hash)]
265pub enum Provenance {
266    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
267    /// and what their borrow tag is.
268    Concrete {
269        alloc_id: AllocId,
270        /// Borrow Tracker tag.
271        tag: BorTag,
272    },
273    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
274    /// specification, we should at that point angelically "guess" a provenance that will make all
275    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
276    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
277    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
278    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
279    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
280    /// then we allow the access. This allows too much code in two ways:
281    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
282    ///   subsequent memory accesses.
283    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
284    ///   the access, we also have to update the aliasing state -- and that update can be very
285    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
286    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
287    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
288    ///   stack that would be true with all possible choices.
289    Wildcard,
290}
291
292/// The "extra" information a pointer has over a regular AllocId.
293#[derive(Copy, Clone, PartialEq)]
294pub enum ProvenanceExtra {
295    Concrete(BorTag),
296    Wildcard,
297}
298
299#[cfg(target_pointer_width = "64")]
300static_assert_size!(StrictPointer, 24);
301// FIXME: this would with in 24bytes but layout optimizations are not smart enough
302// #[cfg(target_pointer_width = "64")]
303//static_assert_size!(Pointer, 24);
304#[cfg(target_pointer_width = "64")]
305static_assert_size!(Scalar, 32);
306
307impl fmt::Debug for Provenance {
308    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
309        match self {
310            Provenance::Concrete { alloc_id, tag } => {
311                // Forward `alternate` flag to `alloc_id` printing.
312                if f.alternate() {
313                    write!(f, "[{alloc_id:#?}]")?;
314                } else {
315                    write!(f, "[{alloc_id:?}]")?;
316                }
317                // Print Borrow Tracker tag.
318                write!(f, "{tag:?}")?;
319            }
320            Provenance::Wildcard => {
321                write!(f, "[wildcard]")?;
322            }
323        }
324        Ok(())
325    }
326}
327
328impl interpret::Provenance for Provenance {
329    /// We use absolute addresses in the `offset` of a `StrictPointer`.
330    const OFFSET_IS_ADDR: bool = true;
331
332    /// Miri implements wildcard provenance.
333    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
334
335    fn get_alloc_id(self) -> Option<AllocId> {
336        match self {
337            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
338            Provenance::Wildcard => None,
339        }
340    }
341
342    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
343        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
344        write!(f, "{:#x}", addr.bytes())?;
345        if f.alternate() {
346            write!(f, "{prov:#?}")?;
347        } else {
348            write!(f, "{prov:?}")?;
349        }
350        Ok(())
351    }
352
353    fn join(left: Self, right: Self) -> Option<Self> {
354        match (left, right) {
355            // If both are the *same* concrete tag, that is the result.
356            (
357                Provenance::Concrete { alloc_id: left_alloc, tag: left_tag },
358                Provenance::Concrete { alloc_id: right_alloc, tag: right_tag },
359            ) if left_alloc == right_alloc && left_tag == right_tag => Some(left),
360            // If one side is a wildcard, the best possible outcome is that it is equal to the other
361            // one, and we use that.
362            (Provenance::Wildcard, o) | (o, Provenance::Wildcard) => Some(o),
363            // Otherwise, fall back to `None`.
364            _ => None,
365        }
366    }
367}
368
369impl fmt::Debug for ProvenanceExtra {
370    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
371        match self {
372            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
373            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
374        }
375    }
376}
377
378impl ProvenanceExtra {
379    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
380        match self {
381            ProvenanceExtra::Concrete(pid) => f(pid),
382            ProvenanceExtra::Wildcard => None,
383        }
384    }
385}
386
387/// Extra per-allocation data
388#[derive(Debug)]
389pub struct AllocExtra<'tcx> {
390    /// Global state of the borrow tracker, if enabled.
391    pub borrow_tracker: Option<borrow_tracker::AllocState>,
392    /// Extra state for data race detection.
393    ///
394    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
395    pub data_race: AllocDataRaceHandler,
396    /// A backtrace to where this allocation was allocated.
397    /// As this is recorded for leak reports, it only exists
398    /// if this allocation is leakable. The backtrace is not
399    /// pruned yet; that should be done before printing it.
400    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
401    /// Synchronization primitives like to attach extra data to particular addresses. We store that
402    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
403    /// freed.
404    /// This maps offsets to synchronization-primitive-specific data.
405    pub sync: FxHashMap<Size, Box<dyn Any>>,
406}
407
408// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
409// but that should never end up actually cloning our `AllocExtra`.
410impl<'tcx> Clone for AllocExtra<'tcx> {
411    fn clone(&self) -> Self {
412        panic!("our allocations should never be cloned");
413    }
414}
415
416impl VisitProvenance for AllocExtra<'_> {
417    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
418        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
419
420        borrow_tracker.visit_provenance(visit);
421        data_race.visit_provenance(visit);
422    }
423}
424
425/// Precomputed layouts of primitive types
426pub struct PrimitiveLayouts<'tcx> {
427    pub unit: TyAndLayout<'tcx>,
428    pub i8: TyAndLayout<'tcx>,
429    pub i16: TyAndLayout<'tcx>,
430    pub i32: TyAndLayout<'tcx>,
431    pub i64: TyAndLayout<'tcx>,
432    pub i128: TyAndLayout<'tcx>,
433    pub isize: TyAndLayout<'tcx>,
434    pub u8: TyAndLayout<'tcx>,
435    pub u16: TyAndLayout<'tcx>,
436    pub u32: TyAndLayout<'tcx>,
437    pub u64: TyAndLayout<'tcx>,
438    pub u128: TyAndLayout<'tcx>,
439    pub usize: TyAndLayout<'tcx>,
440    pub bool: TyAndLayout<'tcx>,
441    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
442    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
443}
444
445impl<'tcx> PrimitiveLayouts<'tcx> {
446    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
447        let tcx = layout_cx.tcx();
448        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
449        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
450        Ok(Self {
451            unit: layout_cx.layout_of(tcx.types.unit)?,
452            i8: layout_cx.layout_of(tcx.types.i8)?,
453            i16: layout_cx.layout_of(tcx.types.i16)?,
454            i32: layout_cx.layout_of(tcx.types.i32)?,
455            i64: layout_cx.layout_of(tcx.types.i64)?,
456            i128: layout_cx.layout_of(tcx.types.i128)?,
457            isize: layout_cx.layout_of(tcx.types.isize)?,
458            u8: layout_cx.layout_of(tcx.types.u8)?,
459            u16: layout_cx.layout_of(tcx.types.u16)?,
460            u32: layout_cx.layout_of(tcx.types.u32)?,
461            u64: layout_cx.layout_of(tcx.types.u64)?,
462            u128: layout_cx.layout_of(tcx.types.u128)?,
463            usize: layout_cx.layout_of(tcx.types.usize)?,
464            bool: layout_cx.layout_of(tcx.types.bool)?,
465            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
466            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
467        })
468    }
469
470    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
471        match size.bits() {
472            8 => Some(self.u8),
473            16 => Some(self.u16),
474            32 => Some(self.u32),
475            64 => Some(self.u64),
476            128 => Some(self.u128),
477            _ => None,
478        }
479    }
480
481    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
482        match size.bits() {
483            8 => Some(self.i8),
484            16 => Some(self.i16),
485            32 => Some(self.i32),
486            64 => Some(self.i64),
487            128 => Some(self.i128),
488            _ => None,
489        }
490    }
491}
492
493/// The machine itself.
494///
495/// If you add anything here that stores machine values, remember to update
496/// `visit_all_machine_values`!
497pub struct MiriMachine<'tcx> {
498    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
499    pub tcx: TyCtxt<'tcx>,
500
501    /// Global data for borrow tracking.
502    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
503
504    /// Depending on settings, this will be `None`,
505    /// global data for a data race detector,
506    /// or the context required for running in GenMC mode.
507    ///
508    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
509    pub data_race: GlobalDataRaceHandler,
510
511    /// Ptr-int-cast module global data.
512    pub alloc_addresses: alloc_addresses::GlobalState,
513
514    /// Environment variables.
515    pub(crate) env_vars: EnvVars<'tcx>,
516
517    /// Return place of the main function.
518    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
519
520    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
521    /// These are *pointers* to argc/argv because macOS.
522    /// We also need the full command line as one string because of Windows.
523    pub(crate) argc: Option<Pointer>,
524    pub(crate) argv: Option<Pointer>,
525    pub(crate) cmd_line: Option<Pointer>,
526
527    /// TLS state.
528    pub(crate) tls: TlsData<'tcx>,
529
530    /// What should Miri do when an op requires communicating with the host,
531    /// such as accessing host env vars, random number generation, and
532    /// file system access.
533    pub(crate) isolated_op: IsolatedOp,
534
535    /// Whether to enforce the validity invariant.
536    pub(crate) validation: ValidationMode,
537
538    /// The table of file descriptors.
539    pub(crate) fds: shims::FdTable,
540    /// The table of directory descriptors.
541    pub(crate) dirs: shims::DirTable,
542
543    /// The list of all EpollEventInterest.
544    pub(crate) epoll_interests: shims::EpollInterestTable,
545
546    /// This machine's monotone clock.
547    pub(crate) monotonic_clock: MonotonicClock,
548
549    /// The set of threads.
550    pub(crate) threads: ThreadManager<'tcx>,
551
552    /// Stores which thread is eligible to run on which CPUs.
553    /// This has no effect at all, it is just tracked to produce the correct result
554    /// in `sched_getaffinity`
555    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
556
557    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
558    pub(crate) layouts: PrimitiveLayouts<'tcx>,
559
560    /// Allocations that are considered roots of static memory (that may leak).
561    pub(crate) static_roots: Vec<AllocId>,
562
563    /// The `measureme` profiler used to record timing information about
564    /// the emulated program.
565    profiler: Option<measureme::Profiler>,
566    /// Used with `profiler` to cache the `StringId`s for event names
567    /// used with `measureme`.
568    string_cache: FxHashMap<String, measureme::StringId>,
569
570    /// Cache of `Instance` exported under the given `Symbol` name.
571    /// `None` means no `Instance` exported under the given name is found.
572    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
573
574    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
575    pub(crate) backtrace_style: BacktraceStyle,
576
577    /// Crates which are considered local for the purposes of error reporting.
578    pub(crate) local_crates: Vec<CrateNum>,
579
580    /// Mapping extern static names to their pointer.
581    extern_statics: FxHashMap<Symbol, StrictPointer>,
582
583    /// The random number generator used for resolving non-determinism.
584    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
585    pub(crate) rng: RefCell<StdRng>,
586
587    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
588    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
589
590    /// The allocation IDs to report when they are being allocated
591    /// (helps for debugging memory leaks and use after free bugs).
592    tracked_alloc_ids: FxHashSet<AllocId>,
593    /// For the tracked alloc ids, also report read/write accesses.
594    track_alloc_accesses: bool,
595
596    /// Controls whether alignment of memory accesses is being checked.
597    pub(crate) check_alignment: AlignmentCheck,
598
599    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
600    pub(crate) cmpxchg_weak_failure_rate: f64,
601
602    /// The probability of the active thread being preempted at the end of each basic block.
603    pub(crate) preemption_rate: f64,
604
605    /// If `Some`, we will report the current stack every N basic blocks.
606    pub(crate) report_progress: Option<u32>,
607    // The total number of blocks that have been executed.
608    pub(crate) basic_block_count: u64,
609
610    /// Handle of the optional shared object file for native functions.
611    #[cfg(all(unix, feature = "native-lib"))]
612    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
613    #[cfg(not(all(unix, feature = "native-lib")))]
614    pub native_lib: Vec<!>,
615
616    /// Run a garbage collector for BorTags every N basic blocks.
617    pub(crate) gc_interval: u32,
618    /// The number of blocks that passed since the last BorTag GC pass.
619    pub(crate) since_gc: u32,
620
621    /// The number of CPUs to be reported by miri.
622    pub(crate) num_cpus: u32,
623
624    /// Determines Miri's page size and associated values
625    pub(crate) page_size: u64,
626    pub(crate) stack_addr: u64,
627    pub(crate) stack_size: u64,
628
629    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
630    pub(crate) collect_leak_backtraces: bool,
631
632    /// The spans we will use to report where an allocation was created and deallocated in
633    /// diagnostics.
634    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
635
636    /// For each allocation, an offset inside that allocation that was deemed aligned even for
637    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
638    /// tracked for vtables and function allocations as well as regular allocations.
639    ///
640    /// Invariant: the promised alignment will never be less than the native alignment of the
641    /// allocation.
642    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
643
644    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
645    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
646
647    /// Caches the sanity-checks for various pthread primitives.
648    pub(crate) pthread_mutex_sanity: Cell<bool>,
649    pub(crate) pthread_rwlock_sanity: Cell<bool>,
650    pub(crate) pthread_condvar_sanity: Cell<bool>,
651
652    /// Remembers whether we already warned about an extern type with Stacked Borrows.
653    pub(crate) sb_extern_type_warned: Cell<bool>,
654    /// Remember whether we already warned about sharing memory with a native call.
655    #[allow(unused)]
656    pub(crate) native_call_mem_warned: Cell<bool>,
657    /// Remembers which shims have already shown the warning about erroring in isolation.
658    pub(crate) reject_in_isolation_warned: RefCell<FxHashSet<String>>,
659    /// Remembers which int2ptr casts we have already warned about.
660    pub(crate) int2ptr_warned: RefCell<FxHashSet<Span>>,
661
662    /// Cache for `mangle_internal_symbol`.
663    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
664
665    /// Always prefer the intrinsic fallback body over the native Miri implementation.
666    pub force_intrinsic_fallback: bool,
667
668    /// Whether floating-point operations can behave non-deterministically.
669    pub float_nondet: bool,
670    /// Whether floating-point operations can have a non-deterministic rounding error.
671    pub float_rounding_error: FloatRoundingErrorMode,
672
673    /// Whether Miri artifically introduces short reads/writes on file descriptors.
674    pub short_fd_operations: bool,
675}
676
677impl<'tcx> MiriMachine<'tcx> {
678    /// Create a new MiriMachine.
679    ///
680    /// Invariant: `genmc_ctx.is_some() == config.genmc_config.is_some()`
681    pub(crate) fn new(
682        config: &MiriConfig,
683        layout_cx: LayoutCx<'tcx>,
684        genmc_ctx: Option<Rc<GenmcCtx>>,
685    ) -> Self {
686        let tcx = layout_cx.tcx();
687        let local_crates = helpers::get_local_crates(tcx);
688        let layouts =
689            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
690        let profiler = config.measureme_out.as_ref().map(|out| {
691            let crate_name =
692                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
693            let pid = process::id();
694            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
695            // the PID is padded so that the nondeterministic value of the PID does not spread
696            // nondeterminism to the allocator. In Miri we are not aiming for such performance
697            // control, we just pad for consistency with rustc.
698            let filename = format!("{crate_name}-{pid:07}");
699            let path = Path::new(out).join(filename);
700            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
701        });
702        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
703        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
704        let data_race = if config.genmc_config.is_some() {
705            // `genmc_ctx` persists across executions, so we don't create a new one here.
706            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
707        } else if config.data_race_detector {
708            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
709        } else {
710            GlobalDataRaceHandler::None
711        };
712        // Determine page size, stack address, and stack size.
713        // These values are mostly meaningless, but the stack address is also where we start
714        // allocating physical integer addresses for all allocations.
715        let page_size = if let Some(page_size) = config.page_size {
716            page_size
717        } else {
718            let target = &tcx.sess.target;
719            match target.arch.as_ref() {
720                "wasm32" | "wasm64" => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
721                "aarch64" => {
722                    if target.options.vendor.as_ref() == "apple" {
723                        // No "definitive" source, but see:
724                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
725                        // https://github.com/ziglang/zig/issues/11308 etc.
726                        16 * 1024
727                    } else {
728                        4 * 1024
729                    }
730                }
731                _ => 4 * 1024,
732            }
733        };
734        // On 16bit targets, 32 pages is more than the entire address space!
735        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
736        let stack_size =
737            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
738        assert!(
739            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
740            "miri only supports up to {} CPUs, but {} were configured",
741            cpu_affinity::MAX_CPUS,
742            config.num_cpus
743        );
744        let threads = ThreadManager::new(config);
745        let mut thread_cpu_affinity = FxHashMap::default();
746        if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
747            thread_cpu_affinity
748                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
749        }
750        MiriMachine {
751            tcx,
752            borrow_tracker,
753            data_race,
754            alloc_addresses: RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr)),
755            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
756            env_vars: EnvVars::default(),
757            main_fn_ret_place: None,
758            argc: None,
759            argv: None,
760            cmd_line: None,
761            tls: TlsData::default(),
762            isolated_op: config.isolated_op,
763            validation: config.validation,
764            fds: shims::FdTable::init(config.mute_stdout_stderr),
765            epoll_interests: shims::EpollInterestTable::new(),
766            dirs: Default::default(),
767            layouts,
768            threads,
769            thread_cpu_affinity,
770            static_roots: Vec::new(),
771            profiler,
772            string_cache: Default::default(),
773            exported_symbols_cache: FxHashMap::default(),
774            backtrace_style: config.backtrace_style,
775            local_crates,
776            extern_statics: FxHashMap::default(),
777            rng: RefCell::new(rng),
778            allocator: if !config.native_lib.is_empty() {
779                Some(Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new())))
780            } else { None },
781            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
782            track_alloc_accesses: config.track_alloc_accesses,
783            check_alignment: config.check_alignment,
784            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
785            preemption_rate: config.preemption_rate,
786            report_progress: config.report_progress,
787            basic_block_count: 0,
788            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
789            #[cfg(all(unix, feature = "native-lib"))]
790            native_lib: config.native_lib.iter().map(|lib_file_path| {
791                let host_triple = rustc_session::config::host_tuple();
792                let target_triple = tcx.sess.opts.target_triple.tuple();
793                // Check if host target == the session target.
794                if host_triple != target_triple {
795                    panic!(
796                        "calling native C functions in linked .so file requires host and target to be the same: \
797                        host={host_triple}, target={target_triple}",
798                    );
799                }
800                // Note: it is the user's responsibility to provide a correct SO file.
801                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
802                // undefined behaviour in Miri itself!
803                (
804                    unsafe {
805                        libloading::Library::new(lib_file_path)
806                            .expect("failed to read specified extern shared object file")
807                    },
808                    lib_file_path.clone(),
809                )
810            }).collect(),
811            #[cfg(not(all(unix, feature = "native-lib")))]
812            native_lib: config.native_lib.iter().map(|_| {
813                panic!("calling functions from native libraries via FFI is not supported in this build of Miri")
814            }).collect(),
815            gc_interval: config.gc_interval,
816            since_gc: 0,
817            num_cpus: config.num_cpus,
818            page_size,
819            stack_addr,
820            stack_size,
821            collect_leak_backtraces: config.collect_leak_backtraces,
822            allocation_spans: RefCell::new(FxHashMap::default()),
823            symbolic_alignment: RefCell::new(FxHashMap::default()),
824            union_data_ranges: FxHashMap::default(),
825            pthread_mutex_sanity: Cell::new(false),
826            pthread_rwlock_sanity: Cell::new(false),
827            pthread_condvar_sanity: Cell::new(false),
828            sb_extern_type_warned: Cell::new(false),
829            native_call_mem_warned: Cell::new(false),
830            reject_in_isolation_warned: Default::default(),
831            int2ptr_warned: Default::default(),
832            mangle_internal_symbol_cache: Default::default(),
833            force_intrinsic_fallback: config.force_intrinsic_fallback,
834            float_nondet: config.float_nondet,
835            float_rounding_error: config.float_rounding_error,
836            short_fd_operations: config.short_fd_operations,
837        }
838    }
839
840    pub(crate) fn late_init(
841        ecx: &mut MiriInterpCx<'tcx>,
842        config: &MiriConfig,
843        on_main_stack_empty: StackEmptyCallback<'tcx>,
844    ) -> InterpResult<'tcx> {
845        EnvVars::init(ecx, config)?;
846        MiriMachine::init_extern_statics(ecx)?;
847        ThreadManager::init(ecx, on_main_stack_empty);
848        interp_ok(())
849    }
850
851    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
852        // This got just allocated, so there definitely is a pointer here.
853        let ptr = ptr.into_pointer_or_addr().unwrap();
854        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
855    }
856
857    pub(crate) fn communicate(&self) -> bool {
858        self.isolated_op == IsolatedOp::Allow
859    }
860
861    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
862    pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
863        let def_id = frame.instance.def_id();
864        def_id.is_local() || self.local_crates.contains(&def_id.krate)
865    }
866
867    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
868    pub(crate) fn handle_abnormal_termination(&mut self) {
869        // All strings in the profile data are stored in a single string table which is not
870        // written to disk until the profiler is dropped. If the interpreter exits without dropping
871        // the profiler, it is not possible to interpret the profile data and all measureme tools
872        // will panic when given the file.
873        drop(self.profiler.take());
874    }
875
876    pub(crate) fn page_align(&self) -> Align {
877        Align::from_bytes(self.page_size).unwrap()
878    }
879
880    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
881        self.allocation_spans
882            .borrow()
883            .get(&alloc_id)
884            .map(|(allocated, _deallocated)| allocated.data())
885    }
886
887    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
888        self.allocation_spans
889            .borrow()
890            .get(&alloc_id)
891            .and_then(|(_allocated, deallocated)| *deallocated)
892            .map(Span::data)
893    }
894
895    fn init_allocation(
896        ecx: &MiriInterpCx<'tcx>,
897        id: AllocId,
898        kind: MemoryKind,
899        size: Size,
900        align: Align,
901    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
902        if ecx.machine.tracked_alloc_ids.contains(&id) {
903            ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id, size, align, kind));
904        }
905
906        let borrow_tracker = ecx
907            .machine
908            .borrow_tracker
909            .as_ref()
910            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
911
912        let data_race = match &ecx.machine.data_race {
913            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
914            GlobalDataRaceHandler::Vclocks(data_race) =>
915                AllocDataRaceHandler::Vclocks(
916                    data_race::AllocState::new_allocation(
917                        data_race,
918                        &ecx.machine.threads,
919                        size,
920                        kind,
921                        ecx.machine.current_span(),
922                    ),
923                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
924                ),
925            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
926                // GenMC learns about new allocations directly from the alloc_addresses module,
927                // since it has to be able to control the address at which they are placed.
928                AllocDataRaceHandler::Genmc
929            }
930        };
931
932        // If an allocation is leaked, we want to report a backtrace to indicate where it was
933        // allocated. We don't need to record a backtrace for allocations which are allowed to
934        // leak.
935        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
936            None
937        } else {
938            Some(ecx.generate_stacktrace())
939        };
940
941        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
942            ecx.machine
943                .allocation_spans
944                .borrow_mut()
945                .insert(id, (ecx.machine.current_span(), None));
946        }
947
948        interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
949    }
950}
951
952impl VisitProvenance for MiriMachine<'_> {
953    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
954        #[rustfmt::skip]
955        let MiriMachine {
956            threads,
957            thread_cpu_affinity: _,
958            tls,
959            env_vars,
960            main_fn_ret_place,
961            argc,
962            argv,
963            cmd_line,
964            extern_statics,
965            dirs,
966            borrow_tracker,
967            data_race,
968            alloc_addresses,
969            fds,
970            epoll_interests:_,
971            tcx: _,
972            isolated_op: _,
973            validation: _,
974            monotonic_clock: _,
975            layouts: _,
976            static_roots: _,
977            profiler: _,
978            string_cache: _,
979            exported_symbols_cache: _,
980            backtrace_style: _,
981            local_crates: _,
982            rng: _,
983            allocator: _,
984            tracked_alloc_ids: _,
985            track_alloc_accesses: _,
986            check_alignment: _,
987            cmpxchg_weak_failure_rate: _,
988            preemption_rate: _,
989            report_progress: _,
990            basic_block_count: _,
991            native_lib: _,
992            gc_interval: _,
993            since_gc: _,
994            num_cpus: _,
995            page_size: _,
996            stack_addr: _,
997            stack_size: _,
998            collect_leak_backtraces: _,
999            allocation_spans: _,
1000            symbolic_alignment: _,
1001            union_data_ranges: _,
1002            pthread_mutex_sanity: _,
1003            pthread_rwlock_sanity: _,
1004            pthread_condvar_sanity: _,
1005            sb_extern_type_warned: _,
1006            native_call_mem_warned: _,
1007            reject_in_isolation_warned: _,
1008            int2ptr_warned: _,
1009            mangle_internal_symbol_cache: _,
1010            force_intrinsic_fallback: _,
1011            float_nondet: _,
1012            float_rounding_error: _,
1013            short_fd_operations: _,
1014        } = self;
1015
1016        threads.visit_provenance(visit);
1017        tls.visit_provenance(visit);
1018        env_vars.visit_provenance(visit);
1019        dirs.visit_provenance(visit);
1020        fds.visit_provenance(visit);
1021        data_race.visit_provenance(visit);
1022        borrow_tracker.visit_provenance(visit);
1023        alloc_addresses.visit_provenance(visit);
1024        main_fn_ret_place.visit_provenance(visit);
1025        argc.visit_provenance(visit);
1026        argv.visit_provenance(visit);
1027        cmd_line.visit_provenance(visit);
1028        for ptr in extern_statics.values() {
1029            ptr.visit_provenance(visit);
1030        }
1031    }
1032}
1033
1034/// A rustc InterpCx for Miri.
1035pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
1036
1037/// A little trait that's useful to be inherited by extension traits.
1038pub trait MiriInterpCxExt<'tcx> {
1039    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
1040    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
1041}
1042impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
1043    #[inline(always)]
1044    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
1045        self
1046    }
1047    #[inline(always)]
1048    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
1049        self
1050    }
1051}
1052
1053/// Machine hook implementations.
1054impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
1055    type MemoryKind = MiriMemoryKind;
1056    type ExtraFnVal = DynSym;
1057
1058    type FrameExtra = FrameExtra<'tcx>;
1059    type AllocExtra = AllocExtra<'tcx>;
1060
1061    type Provenance = Provenance;
1062    type ProvenanceExtra = ProvenanceExtra;
1063    type Bytes = MiriAllocBytes;
1064
1065    type MemoryMap =
1066        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
1067
1068    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
1069
1070    const PANIC_ON_ALLOC_FAIL: bool = false;
1071
1072    #[inline(always)]
1073    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
1074        ecx.machine.check_alignment != AlignmentCheck::None
1075    }
1076
1077    #[inline(always)]
1078    fn alignment_check(
1079        ecx: &MiriInterpCx<'tcx>,
1080        alloc_id: AllocId,
1081        alloc_align: Align,
1082        alloc_kind: AllocKind,
1083        offset: Size,
1084        align: Align,
1085    ) -> Option<Misalignment> {
1086        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1087            // Just use the built-in check.
1088            return None;
1089        }
1090        if alloc_kind != AllocKind::LiveData {
1091            // Can't have any extra info here.
1092            return None;
1093        }
1094        // Let's see which alignment we have been promised for this allocation.
1095        let (promised_offset, promised_align) = ecx
1096            .machine
1097            .symbolic_alignment
1098            .borrow()
1099            .get(&alloc_id)
1100            .copied()
1101            .unwrap_or((Size::ZERO, alloc_align));
1102        if promised_align < align {
1103            // Definitely not enough.
1104            Some(Misalignment { has: promised_align, required: align })
1105        } else {
1106            // What's the offset between us and the promised alignment?
1107            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1108            // That must also be aligned.
1109            if distance.is_multiple_of(align.bytes()) {
1110                // All looking good!
1111                None
1112            } else {
1113                // The biggest power of two through which `distance` is divisible.
1114                let distance_pow2 = 1 << distance.trailing_zeros();
1115                Some(Misalignment {
1116                    has: Align::from_bytes(distance_pow2).unwrap(),
1117                    required: align,
1118                })
1119            }
1120        }
1121    }
1122
1123    #[inline(always)]
1124    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1125        ecx.machine.validation != ValidationMode::No
1126    }
1127    #[inline(always)]
1128    fn enforce_validity_recursively(
1129        ecx: &InterpCx<'tcx, Self>,
1130        _layout: TyAndLayout<'tcx>,
1131    ) -> bool {
1132        ecx.machine.validation == ValidationMode::Deep
1133    }
1134
1135    #[inline(always)]
1136    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1137        !ecx.tcx.sess.overflow_checks()
1138    }
1139
1140    fn check_fn_target_features(
1141        ecx: &MiriInterpCx<'tcx>,
1142        instance: ty::Instance<'tcx>,
1143    ) -> InterpResult<'tcx> {
1144        let attrs = ecx.tcx.codegen_instance_attrs(instance.def);
1145        if attrs
1146            .target_features
1147            .iter()
1148            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1149        {
1150            let unavailable = attrs
1151                .target_features
1152                .iter()
1153                .filter(|&feature| {
1154                    feature.kind != TargetFeatureKind::Implied
1155                        && !ecx.tcx.sess.target_features.contains(&feature.name)
1156                })
1157                .fold(String::new(), |mut s, feature| {
1158                    if !s.is_empty() {
1159                        s.push_str(", ");
1160                    }
1161                    s.push_str(feature.name.as_str());
1162                    s
1163                });
1164            let msg = format!(
1165                "calling a function that requires unavailable target features: {unavailable}"
1166            );
1167            // On WASM, this is not UB, but instead gets rejected during validation of the module
1168            // (see #84988).
1169            if ecx.tcx.sess.target.is_like_wasm {
1170                throw_machine_stop!(TerminationInfo::Abort(msg));
1171            } else {
1172                throw_ub_format!("{msg}");
1173            }
1174        }
1175        interp_ok(())
1176    }
1177
1178    #[inline(always)]
1179    fn find_mir_or_eval_fn(
1180        ecx: &mut MiriInterpCx<'tcx>,
1181        instance: ty::Instance<'tcx>,
1182        abi: &FnAbi<'tcx, Ty<'tcx>>,
1183        args: &[FnArg<'tcx, Provenance>],
1184        dest: &PlaceTy<'tcx>,
1185        ret: Option<mir::BasicBlock>,
1186        unwind: mir::UnwindAction,
1187    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1188        // For foreign items, try to see if we can emulate them.
1189        if ecx.tcx.is_foreign_item(instance.def_id()) {
1190            let _trace = enter_trace_span!("emulate_foreign_item");
1191            // An external function call that does not have a MIR body. We either find MIR elsewhere
1192            // or emulate its effect.
1193            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1194            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1195            // foreign function
1196            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1197            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1198            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1199            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1200        }
1201
1202        // Otherwise, load the MIR.
1203        let _trace = enter_trace_span!("load_mir");
1204        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1205    }
1206
1207    #[inline(always)]
1208    fn call_extra_fn(
1209        ecx: &mut MiriInterpCx<'tcx>,
1210        fn_val: DynSym,
1211        abi: &FnAbi<'tcx, Ty<'tcx>>,
1212        args: &[FnArg<'tcx, Provenance>],
1213        dest: &PlaceTy<'tcx>,
1214        ret: Option<mir::BasicBlock>,
1215        unwind: mir::UnwindAction,
1216    ) -> InterpResult<'tcx> {
1217        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1218        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1219    }
1220
1221    #[inline(always)]
1222    fn call_intrinsic(
1223        ecx: &mut MiriInterpCx<'tcx>,
1224        instance: ty::Instance<'tcx>,
1225        args: &[OpTy<'tcx>],
1226        dest: &PlaceTy<'tcx>,
1227        ret: Option<mir::BasicBlock>,
1228        unwind: mir::UnwindAction,
1229    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1230        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1231    }
1232
1233    #[inline(always)]
1234    fn assert_panic(
1235        ecx: &mut MiriInterpCx<'tcx>,
1236        msg: &mir::AssertMessage<'tcx>,
1237        unwind: mir::UnwindAction,
1238    ) -> InterpResult<'tcx> {
1239        ecx.assert_panic(msg, unwind)
1240    }
1241
1242    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1243        ecx.start_panic_nounwind(msg)
1244    }
1245
1246    fn unwind_terminate(
1247        ecx: &mut InterpCx<'tcx, Self>,
1248        reason: mir::UnwindTerminateReason,
1249    ) -> InterpResult<'tcx> {
1250        // Call the lang item.
1251        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1252        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1253        ecx.call_function(
1254            panic,
1255            ExternAbi::Rust,
1256            &[],
1257            None,
1258            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1259        )?;
1260        interp_ok(())
1261    }
1262
1263    #[inline(always)]
1264    fn binary_ptr_op(
1265        ecx: &MiriInterpCx<'tcx>,
1266        bin_op: mir::BinOp,
1267        left: &ImmTy<'tcx>,
1268        right: &ImmTy<'tcx>,
1269    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1270        ecx.binary_ptr_op(bin_op, left, right)
1271    }
1272
1273    #[inline(always)]
1274    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1275        ecx: &InterpCx<'tcx, Self>,
1276        inputs: &[F1],
1277    ) -> F2 {
1278        ecx.generate_nan(inputs)
1279    }
1280
1281    #[inline(always)]
1282    fn apply_float_nondet(
1283        ecx: &mut InterpCx<'tcx, Self>,
1284        val: ImmTy<'tcx>,
1285    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1286        crate::math::apply_random_float_error_to_imm(ecx, val, 4)
1287    }
1288
1289    #[inline(always)]
1290    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1291        ecx.equal_float_min_max(a, b)
1292    }
1293
1294    #[inline(always)]
1295    fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1296        interp_ok(ecx.tcx.sess.ub_checks())
1297    }
1298
1299    #[inline(always)]
1300    fn contract_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1301        interp_ok(ecx.tcx.sess.contract_checks())
1302    }
1303
1304    #[inline(always)]
1305    fn thread_local_static_pointer(
1306        ecx: &mut MiriInterpCx<'tcx>,
1307        def_id: DefId,
1308    ) -> InterpResult<'tcx, StrictPointer> {
1309        ecx.get_or_create_thread_local_alloc(def_id)
1310    }
1311
1312    fn extern_static_pointer(
1313        ecx: &MiriInterpCx<'tcx>,
1314        def_id: DefId,
1315    ) -> InterpResult<'tcx, StrictPointer> {
1316        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1317        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1318            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1319            // information. That uses the type information of this static.
1320            // Make sure it matches the Miri allocation for this.
1321            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1322                panic!("extern_statics cannot contain wildcards")
1323            };
1324            let info = ecx.get_alloc_info(alloc_id);
1325            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1326            let extern_decl_layout =
1327                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1328            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1329                throw_unsup_format!(
1330                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1331                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1332                    but Miri emulates it via an extern static shim \
1333                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1334                    name = ecx.tcx.def_path_str(def_id),
1335                    krate = ecx.tcx.crate_name(def_id.krate),
1336                    decl_size = extern_decl_layout.size.bytes(),
1337                    decl_align = extern_decl_layout.align.abi.bytes(),
1338                    shim_size = info.size.bytes(),
1339                    shim_align = info.align.bytes(),
1340                )
1341            }
1342            interp_ok(ptr)
1343        } else {
1344            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1345        }
1346    }
1347
1348    fn init_local_allocation(
1349        ecx: &MiriInterpCx<'tcx>,
1350        id: AllocId,
1351        kind: MemoryKind,
1352        size: Size,
1353        align: Align,
1354    ) -> InterpResult<'tcx, Self::AllocExtra> {
1355        assert!(kind != MiriMemoryKind::Global.into());
1356        MiriMachine::init_allocation(ecx, id, kind, size, align)
1357    }
1358
1359    fn adjust_alloc_root_pointer(
1360        ecx: &MiriInterpCx<'tcx>,
1361        ptr: interpret::Pointer<CtfeProvenance>,
1362        kind: Option<MemoryKind>,
1363    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1364        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1365        let alloc_id = ptr.provenance.alloc_id();
1366        if cfg!(debug_assertions) {
1367            // The machine promises to never call us on thread-local or extern statics.
1368            match ecx.tcx.try_get_global_alloc(alloc_id) {
1369                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1370                    panic!("adjust_alloc_root_pointer called on thread-local static")
1371                }
1372                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1373                    panic!("adjust_alloc_root_pointer called on extern static")
1374                }
1375                _ => {}
1376            }
1377        }
1378        // FIXME: can we somehow preserve the immutability of `ptr`?
1379        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1380            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1381        } else {
1382            // Value does not matter, SB is disabled
1383            BorTag::default()
1384        };
1385        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1386    }
1387
1388    /// Called on `usize as ptr` casts.
1389    #[inline(always)]
1390    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1391        ecx.ptr_from_addr_cast(addr)
1392    }
1393
1394    /// Called on `ptr as usize` casts.
1395    /// (Actually computing the resulting `usize` doesn't need machine help,
1396    /// that's just `Scalar::try_to_int`.)
1397    #[inline(always)]
1398    fn expose_provenance(
1399        ecx: &InterpCx<'tcx, Self>,
1400        provenance: Self::Provenance,
1401    ) -> InterpResult<'tcx> {
1402        ecx.expose_provenance(provenance)
1403    }
1404
1405    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1406    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1407    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1408    /// allocations.
1409    ///
1410    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1411    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1412    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1413    /// stored in machine state).
1414    ///
1415    /// When this fails, that means the pointer does not point to a live allocation.
1416    fn ptr_get_alloc(
1417        ecx: &MiriInterpCx<'tcx>,
1418        ptr: StrictPointer,
1419        size: i64,
1420    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1421        let rel = ecx.ptr_get_alloc(ptr, size);
1422
1423        rel.map(|(alloc_id, size)| {
1424            let tag = match ptr.provenance {
1425                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1426                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1427            };
1428            (alloc_id, size, tag)
1429        })
1430    }
1431
1432    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1433    ///
1434    /// If `alloc` contains pointers, then they are all pointing to globals.
1435    ///
1436    /// This should avoid copying if no work has to be done! If this returns an owned
1437    /// allocation (because a copy had to be done to adjust things), machine memory will
1438    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1439    /// owned allocation to the map even when the map is shared.)
1440    fn adjust_global_allocation<'b>(
1441        ecx: &InterpCx<'tcx, Self>,
1442        id: AllocId,
1443        alloc: &'b Allocation,
1444    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1445    {
1446        let alloc = alloc.adjust_from_tcx(
1447            &ecx.tcx,
1448            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1449            |ptr| ecx.global_root_pointer(ptr),
1450        )?;
1451        let kind = MiriMemoryKind::Global.into();
1452        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1453        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1454    }
1455
1456    #[inline(always)]
1457    fn before_memory_read(
1458        _tcx: TyCtxtAt<'tcx>,
1459        machine: &Self,
1460        alloc_extra: &AllocExtra<'tcx>,
1461        ptr: Pointer,
1462        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1463        range: AllocRange,
1464    ) -> InterpResult<'tcx> {
1465        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1466            machine
1467                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
1468        }
1469        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1470        match &machine.data_race {
1471            GlobalDataRaceHandler::None => {}
1472            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1473                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1474            GlobalDataRaceHandler::Vclocks(_data_race) => {
1475                let _trace = enter_trace_span!(data_race::before_memory_read);
1476                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
1477                else {
1478                    unreachable!();
1479                };
1480                data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1481                if let Some(weak_memory) = weak_memory {
1482                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1483                }
1484            }
1485        }
1486        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1487            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1488        }
1489        interp_ok(())
1490    }
1491
1492    #[inline(always)]
1493    fn before_memory_write(
1494        _tcx: TyCtxtAt<'tcx>,
1495        machine: &mut Self,
1496        alloc_extra: &mut AllocExtra<'tcx>,
1497        ptr: Pointer,
1498        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1499        range: AllocRange,
1500    ) -> InterpResult<'tcx> {
1501        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1502            machine
1503                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
1504        }
1505        match &machine.data_race {
1506            GlobalDataRaceHandler::None => {}
1507            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
1508                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?;
1509            }
1510            GlobalDataRaceHandler::Vclocks(_global_state) => {
1511                let _trace = enter_trace_span!(data_race::before_memory_write);
1512                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1513                    &mut alloc_extra.data_race
1514                else {
1515                    unreachable!()
1516                };
1517                data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1518                if let Some(weak_memory) = weak_memory {
1519                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1520                }
1521            }
1522        }
1523        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1524            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1525        }
1526        interp_ok(())
1527    }
1528
1529    #[inline(always)]
1530    fn before_memory_deallocation(
1531        _tcx: TyCtxtAt<'tcx>,
1532        machine: &mut Self,
1533        alloc_extra: &mut AllocExtra<'tcx>,
1534        ptr: Pointer,
1535        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1536        size: Size,
1537        align: Align,
1538        kind: MemoryKind,
1539    ) -> InterpResult<'tcx> {
1540        if machine.tracked_alloc_ids.contains(&alloc_id) {
1541            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1542        }
1543        match &machine.data_race {
1544            GlobalDataRaceHandler::None => {}
1545            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1546                genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?,
1547            GlobalDataRaceHandler::Vclocks(_global_state) => {
1548                let _trace = enter_trace_span!(data_race::before_memory_deallocation);
1549                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1550                data_race.write(
1551                    alloc_id,
1552                    alloc_range(Size::ZERO, size),
1553                    NaWriteType::Deallocate,
1554                    None,
1555                    machine,
1556                )?;
1557            }
1558        }
1559        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1560            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1561        }
1562        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1563        {
1564            *deallocated_at = Some(machine.current_span());
1565        }
1566        machine.free_alloc_id(alloc_id, size, align, kind);
1567        interp_ok(())
1568    }
1569
1570    #[inline(always)]
1571    fn retag_ptr_value(
1572        ecx: &mut InterpCx<'tcx, Self>,
1573        kind: mir::RetagKind,
1574        val: &ImmTy<'tcx>,
1575    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1576        if ecx.machine.borrow_tracker.is_some() {
1577            ecx.retag_ptr_value(kind, val)
1578        } else {
1579            interp_ok(val.clone())
1580        }
1581    }
1582
1583    #[inline(always)]
1584    fn retag_place_contents(
1585        ecx: &mut InterpCx<'tcx, Self>,
1586        kind: mir::RetagKind,
1587        place: &PlaceTy<'tcx>,
1588    ) -> InterpResult<'tcx> {
1589        if ecx.machine.borrow_tracker.is_some() {
1590            ecx.retag_place_contents(kind, place)?;
1591        }
1592        interp_ok(())
1593    }
1594
1595    fn protect_in_place_function_argument(
1596        ecx: &mut InterpCx<'tcx, Self>,
1597        place: &MPlaceTy<'tcx>,
1598    ) -> InterpResult<'tcx> {
1599        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1600        // writes* during this call are insta-UB.
1601        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1602            ecx.protect_place(place)?
1603        } else {
1604            // No borrow tracker.
1605            place.clone()
1606        };
1607        // We do need to write `uninit` so that even after the call ends, the former contents of
1608        // this place cannot be observed any more. We do the write after retagging so that for
1609        // Tree Borrows, this is considered to activate the new tag.
1610        // Conveniently this also ensures that the place actually points to suitable memory.
1611        ecx.write_uninit(&protected_place)?;
1612        // Now we throw away the protected place, ensuring its tag is never used again.
1613        interp_ok(())
1614    }
1615
1616    #[inline(always)]
1617    fn init_frame(
1618        ecx: &mut InterpCx<'tcx, Self>,
1619        frame: Frame<'tcx, Provenance>,
1620    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1621        // Start recording our event before doing anything else
1622        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1623            let fn_name = frame.instance().to_string();
1624            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1625            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1626
1627            Some(profiler.start_recording_interval_event_detached(
1628                *name,
1629                measureme::EventId::from_label(*name),
1630                ecx.active_thread().to_u32(),
1631            ))
1632        } else {
1633            None
1634        };
1635
1636        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1637
1638        let extra = FrameExtra {
1639            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1640            catch_unwind: None,
1641            timing,
1642            is_user_relevant: ecx.machine.is_user_relevant(&frame),
1643            data_race: ecx
1644                .machine
1645                .data_race
1646                .as_vclocks_ref()
1647                .map(|_| data_race::FrameState::default()),
1648        };
1649
1650        interp_ok(frame.with_extra(extra))
1651    }
1652
1653    fn stack<'a>(
1654        ecx: &'a InterpCx<'tcx, Self>,
1655    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1656        ecx.active_thread_stack()
1657    }
1658
1659    fn stack_mut<'a>(
1660        ecx: &'a mut InterpCx<'tcx, Self>,
1661    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1662        ecx.active_thread_stack_mut()
1663    }
1664
1665    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1666        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1667        ecx.machine.since_gc += 1;
1668        // Possibly report our progress. This will point at the terminator we are about to execute.
1669        if let Some(report_progress) = ecx.machine.report_progress {
1670            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1671                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1672                    block_count: ecx.machine.basic_block_count,
1673                });
1674            }
1675        }
1676
1677        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1678        // stacks.
1679        // When debug assertions are enabled, run the GC as often as possible so that any cases
1680        // where it mistakenly removes an important tag become visible.
1681        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1682            ecx.machine.since_gc = 0;
1683            ecx.run_provenance_gc();
1684        }
1685
1686        // These are our preemption points.
1687        // (This will only take effect after the terminator has been executed.)
1688        ecx.maybe_preempt_active_thread();
1689
1690        // Make sure some time passes.
1691        ecx.machine.monotonic_clock.tick();
1692
1693        interp_ok(())
1694    }
1695
1696    #[inline(always)]
1697    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1698        if ecx.frame().extra.is_user_relevant {
1699            // We just pushed a local frame, so we know that the topmost local frame is the topmost
1700            // frame. If we push a non-local frame, there's no need to do anything.
1701            let stack_len = ecx.active_thread_stack().len();
1702            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1703        }
1704        interp_ok(())
1705    }
1706
1707    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1708        let frame = ecx.frame();
1709        // We want this *before* the return value copy, because the return place itself is protected
1710        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1711        if ecx.machine.borrow_tracker.is_some() {
1712            ecx.on_stack_pop(frame)?;
1713        }
1714        if frame.extra.is_user_relevant {
1715            // All that we store is whether or not the frame we just removed is local, so now we
1716            // have no idea where the next topmost local frame is. So we recompute it.
1717            // (If this ever becomes a bottleneck, we could have `push` store the previous
1718            // user-relevant frame and restore that here.)
1719            // We have to skip the frame that is just being popped.
1720            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1721        }
1722        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1723        // concurrency and what it prints is just plain wrong. So we print our own information
1724        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1725        info!("Leaving {}", ecx.frame().instance());
1726        interp_ok(())
1727    }
1728
1729    #[inline(always)]
1730    fn after_stack_pop(
1731        ecx: &mut InterpCx<'tcx, Self>,
1732        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1733        unwinding: bool,
1734    ) -> InterpResult<'tcx, ReturnAction> {
1735        let res = {
1736            // Move `frame` into a sub-scope so we control when it will be dropped.
1737            let mut frame = frame;
1738            let timing = frame.extra.timing.take();
1739            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1740            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1741                profiler.finish_recording_interval_event(timing.unwrap());
1742            }
1743            res
1744        };
1745        // Needs to be done after dropping frame to show up on the right nesting level.
1746        // (Cc https://github.com/rust-lang/miri/issues/2266)
1747        if !ecx.active_thread_stack().is_empty() {
1748            info!("Continuing in {}", ecx.frame().instance());
1749        }
1750        res
1751    }
1752
1753    fn after_local_read(
1754        ecx: &InterpCx<'tcx, Self>,
1755        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1756        local: mir::Local,
1757    ) -> InterpResult<'tcx> {
1758        if let Some(data_race) = &frame.extra.data_race {
1759            let _trace = enter_trace_span!(data_race::after_local_read);
1760            data_race.local_read(local, &ecx.machine);
1761        }
1762        interp_ok(())
1763    }
1764
1765    fn after_local_write(
1766        ecx: &mut InterpCx<'tcx, Self>,
1767        local: mir::Local,
1768        storage_live: bool,
1769    ) -> InterpResult<'tcx> {
1770        if let Some(data_race) = &ecx.frame().extra.data_race {
1771            let _trace = enter_trace_span!(data_race::after_local_write);
1772            data_race.local_write(local, storage_live, &ecx.machine);
1773        }
1774        interp_ok(())
1775    }
1776
1777    fn after_local_moved_to_memory(
1778        ecx: &mut InterpCx<'tcx, Self>,
1779        local: mir::Local,
1780        mplace: &MPlaceTy<'tcx>,
1781    ) -> InterpResult<'tcx> {
1782        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1783            panic!("after_local_allocated should only be called on fresh allocations");
1784        };
1785        // Record the span where this was allocated: the declaration of the local.
1786        let local_decl = &ecx.frame().body().local_decls[local];
1787        let span = local_decl.source_info.span;
1788        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1789        // The data race system has to fix the clocks used for this write.
1790        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1791        if let Some(data_race) =
1792            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1793        {
1794            let _trace = enter_trace_span!(data_race::after_local_moved_to_memory);
1795            data_race.local_moved_to_memory(
1796                local,
1797                alloc_info.data_race.as_vclocks_mut().unwrap(),
1798                machine,
1799            );
1800        }
1801        interp_ok(())
1802    }
1803
1804    fn get_global_alloc_salt(
1805        ecx: &InterpCx<'tcx, Self>,
1806        instance: Option<ty::Instance<'tcx>>,
1807    ) -> usize {
1808        let unique = if let Some(instance) = instance {
1809            // Functions cannot be identified by pointers, as asm-equal functions can get
1810            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1811            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1812            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1813            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1814            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1815            // whether codegen will actually emit duplicate functions. It does that when they have
1816            // non-lifetime generics, or when they can be inlined. All other functions are given a
1817            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1818            // cannot be relied upon for anything. But if we don't do this, the
1819            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1820            // backtraces look terrible.
1821            let is_generic = instance
1822                .args
1823                .into_iter()
1824                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1825            let can_be_inlined = matches!(
1826                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1827                InliningThreshold::Always
1828            ) || !matches!(
1829                ecx.tcx.codegen_instance_attrs(instance.def).inline,
1830                InlineAttr::Never
1831            );
1832            !is_generic && !can_be_inlined
1833        } else {
1834            // Non-functions are never unique.
1835            false
1836        };
1837        // Always use the same salt if the allocation is unique.
1838        if unique {
1839            CTFE_ALLOC_SALT
1840        } else {
1841            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1842        }
1843    }
1844
1845    fn cached_union_data_range<'e>(
1846        ecx: &'e mut InterpCx<'tcx, Self>,
1847        ty: Ty<'tcx>,
1848        compute_range: impl FnOnce() -> RangeSet,
1849    ) -> Cow<'e, RangeSet> {
1850        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1851    }
1852
1853    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1854        use crate::alloc::MiriAllocParams;
1855
1856        match &self.allocator {
1857            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1858            None => MiriAllocParams::Global,
1859        }
1860    }
1861
1862    fn enter_trace_span(span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
1863        #[cfg(feature = "tracing")]
1864        {
1865            span().entered()
1866        }
1867        #[cfg(not(feature = "tracing"))]
1868        #[expect(clippy::unused_unit)]
1869        {
1870            let _ = span; // so we avoid the "unused variable" warning
1871            ()
1872        }
1873    }
1874}
1875
1876/// Trait for callbacks handling asynchronous machine operations.
1877pub trait MachineCallback<'tcx, T>: VisitProvenance {
1878    /// The function to be invoked when the callback is fired.
1879    fn call(
1880        self: Box<Self>,
1881        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1882        arg: T,
1883    ) -> InterpResult<'tcx>;
1884}
1885
1886/// Type alias for boxed machine callbacks with generic argument type.
1887pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1888
1889/// Creates a `DynMachineCallback`:
1890///
1891/// ```rust
1892/// callback!(
1893///     @capture<'tcx> {
1894///         var1: Ty1,
1895///         var2: Ty2<'tcx>,
1896///     }
1897///     |this, arg: ArgTy| {
1898///         // Implement the callback here.
1899///         todo!()
1900///     }
1901/// )
1902/// ```
1903///
1904/// All the argument types must implement `VisitProvenance`.
1905#[macro_export]
1906macro_rules! callback {
1907    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1908        { $($name:ident: $type:ty),* $(,)? }
1909     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1910        struct Callback<$tcx, $($lft),*> {
1911            $($name: $type,)*
1912            _phantom: std::marker::PhantomData<&$tcx ()>,
1913        }
1914
1915        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1916            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1917                $(
1918                    self.$name.visit_provenance(_visit);
1919                )*
1920            }
1921        }
1922
1923        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
1924            fn call(
1925                self: Box<Self>,
1926                $this: &mut MiriInterpCx<$tcx>,
1927                $arg: $arg_ty
1928            ) -> InterpResult<$tcx> {
1929                #[allow(unused_variables)]
1930                let Callback { $($name,)* _phantom } = *self;
1931                $body
1932            }
1933        }
1934
1935        Box::new(Callback {
1936            $($name,)*
1937            _phantom: std::marker::PhantomData
1938        })
1939    }};
1940}