rustc_const_eval/interpret/
memory.rs

1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6//! integer. It is crucial that these operations call `check_align` *before*
7//! short-circuiting the empty case!
8
9use std::assert_matches::assert_matches;
10use std::borrow::{Borrow, Cow};
11use std::cell::Cell;
12use std::collections::VecDeque;
13use std::{fmt, ptr};
14
15use rustc_abi::{Align, HasDataLayout, Size};
16use rustc_ast::Mutability;
17use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
18use rustc_middle::mir::display_allocation;
19use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
20use rustc_middle::{bug, throw_ub_format};
21use tracing::{debug, instrument, trace};
22
23use super::{
24    AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,
25    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
26    Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,
27    err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
28};
29use crate::const_eval::ConstEvalErrKind;
30use crate::fluent_generated as fluent;
31
32#[derive(Debug, PartialEq, Copy, Clone)]
33pub enum MemoryKind<T> {
34    /// Stack memory. Error if deallocated except during a stack pop.
35    Stack,
36    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
37    CallerLocation,
38    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
39    Machine(T),
40}
41
42impl<T: MayLeak> MayLeak for MemoryKind<T> {
43    #[inline]
44    fn may_leak(self) -> bool {
45        match self {
46            MemoryKind::Stack => false,
47            MemoryKind::CallerLocation => true,
48            MemoryKind::Machine(k) => k.may_leak(),
49        }
50    }
51}
52
53impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
54    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
55        match self {
56            MemoryKind::Stack => write!(f, "stack variable"),
57            MemoryKind::CallerLocation => write!(f, "caller location"),
58            MemoryKind::Machine(m) => write!(f, "{m}"),
59        }
60    }
61}
62
63/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
64#[derive(Copy, Clone, PartialEq, Debug)]
65pub enum AllocKind {
66    /// A regular live data allocation.
67    LiveData,
68    /// A function allocation (that fn ptrs point to).
69    Function,
70    /// A vtable allocation.
71    VTable,
72    /// A TypeId allocation.
73    TypeId,
74    /// A dead allocation.
75    Dead,
76}
77
78/// Metadata about an `AllocId`.
79#[derive(Copy, Clone, PartialEq, Debug)]
80pub struct AllocInfo {
81    pub size: Size,
82    pub align: Align,
83    pub kind: AllocKind,
84    pub mutbl: Mutability,
85}
86
87impl AllocInfo {
88    fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {
89        Self { size, align, kind, mutbl }
90    }
91}
92
93/// The value of a function pointer.
94#[derive(Debug, Copy, Clone)]
95pub enum FnVal<'tcx, Other> {
96    Instance(Instance<'tcx>),
97    Other(Other),
98}
99
100impl<'tcx, Other> FnVal<'tcx, Other> {
101    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
102        match self {
103            FnVal::Instance(instance) => interp_ok(instance),
104            FnVal::Other(_) => {
105                throw_unsup_format!("'foreign' function pointers are not supported in this context")
106            }
107        }
108    }
109}
110
111// `Memory` has to depend on the `Machine` because some of its operations
112// (e.g., `get`) call a `Machine` hook.
113pub struct Memory<'tcx, M: Machine<'tcx>> {
114    /// Allocations local to this instance of the interpreter. The kind
115    /// helps ensure that the same mechanism is used for allocation and
116    /// deallocation. When an allocation is not found here, it is a
117    /// global and looked up in the `tcx` for read access. Some machines may
118    /// have to mutate this map even on a read-only access to a global (because
119    /// they do pointer provenance tracking and the allocations in `tcx` have
120    /// the wrong type), so we let the machine override this type.
121    /// Either way, if the machine allows writing to a global, doing so will
122    /// create a copy of the global allocation here.
123    // FIXME: this should not be public, but interning currently needs access to it
124    pub(super) alloc_map: M::MemoryMap,
125
126    /// Map for "extra" function pointers.
127    extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
128
129    /// To be able to compare pointers with null, and to check alignment for accesses
130    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
131    /// that do not exist any more.
132    // FIXME: this should not be public, but interning currently needs access to it
133    pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,
134
135    /// This stores whether we are currently doing reads purely for the purpose of validation.
136    /// Those reads do not trigger the machine's hooks for memory reads.
137    /// Needless to say, this must only be set with great care!
138    validation_in_progress: Cell<bool>,
139}
140
141/// A reference to some allocation that was already bounds-checked for the given region
142/// and had the on-access machine hooks run.
143#[derive(Copy, Clone)]
144pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
145    alloc: &'a Allocation<Prov, Extra, Bytes>,
146    range: AllocRange,
147    tcx: TyCtxt<'tcx>,
148    alloc_id: AllocId,
149}
150/// A reference to some allocation that was already bounds-checked for the given region
151/// and had the on-access machine hooks run.
152pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
153    alloc: &'a mut Allocation<Prov, Extra, Bytes>,
154    range: AllocRange,
155    tcx: TyCtxt<'tcx>,
156    alloc_id: AllocId,
157}
158
159impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
160    pub fn new() -> Self {
161        Memory {
162            alloc_map: M::MemoryMap::default(),
163            extra_fn_ptr_map: FxIndexMap::default(),
164            dead_alloc_map: FxIndexMap::default(),
165            validation_in_progress: Cell::new(false),
166        }
167    }
168
169    /// This is used by [priroda](https://github.com/oli-obk/priroda)
170    pub fn alloc_map(&self) -> &M::MemoryMap {
171        &self.alloc_map
172    }
173}
174
175impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
176    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
177    /// the machine pointer to the allocation. Must never be used
178    /// for any other pointers, nor for TLS statics.
179    ///
180    /// Using the resulting pointer represents a *direct* access to that memory
181    /// (e.g. by directly using a `static`),
182    /// as opposed to access through a pointer that was created by the program.
183    ///
184    /// This function can fail only if `ptr` points to an `extern static`.
185    #[inline]
186    pub fn global_root_pointer(
187        &self,
188        ptr: Pointer<CtfeProvenance>,
189    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
190        let alloc_id = ptr.provenance.alloc_id();
191        // We need to handle `extern static`.
192        match self.tcx.try_get_global_alloc(alloc_id) {
193            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
194                // Thread-local statics do not have a constant address. They *must* be accessed via
195                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
196                bug!("global memory cannot point to thread-local static")
197            }
198            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
199                return M::extern_static_pointer(self, def_id);
200            }
201            None => {
202                assert!(
203                    self.memory.extra_fn_ptr_map.contains_key(&alloc_id),
204                    "{alloc_id:?} is neither global nor a function pointer"
205                );
206            }
207            _ => {}
208        }
209        // And we need to get the provenance.
210        M::adjust_alloc_root_pointer(self, ptr, M::GLOBAL_KIND.map(MemoryKind::Machine))
211    }
212
213    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
214        let id = match fn_val {
215            FnVal::Instance(instance) => {
216                let salt = M::get_global_alloc_salt(self, Some(instance));
217                self.tcx.reserve_and_set_fn_alloc(instance, salt)
218            }
219            FnVal::Other(extra) => {
220                // FIXME(RalfJung): Should we have a cache here?
221                let id = self.tcx.reserve_alloc_id();
222                let old = self.memory.extra_fn_ptr_map.insert(id, extra);
223                assert!(old.is_none());
224                id
225            }
226        };
227        // Functions are global allocations, so make sure we get the right root pointer.
228        // We know this is not an `extern static` so this cannot fail.
229        self.global_root_pointer(Pointer::from(id)).unwrap()
230    }
231
232    pub fn allocate_ptr(
233        &mut self,
234        size: Size,
235        align: Align,
236        kind: MemoryKind<M::MemoryKind>,
237        init: AllocInit,
238    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
239        let params = self.machine.get_default_alloc_params();
240        let alloc = if M::PANIC_ON_ALLOC_FAIL {
241            Allocation::new(size, align, init, params)
242        } else {
243            Allocation::try_new(size, align, init, params)?
244        };
245        self.insert_allocation(alloc, kind)
246    }
247
248    pub fn allocate_bytes_ptr(
249        &mut self,
250        bytes: &[u8],
251        align: Align,
252        kind: MemoryKind<M::MemoryKind>,
253        mutability: Mutability,
254    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
255        let params = self.machine.get_default_alloc_params();
256        let alloc = Allocation::from_bytes(bytes, align, mutability, params);
257        self.insert_allocation(alloc, kind)
258    }
259
260    pub fn insert_allocation(
261        &mut self,
262        alloc: Allocation<M::Provenance, (), M::Bytes>,
263        kind: MemoryKind<M::MemoryKind>,
264    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
265        assert!(alloc.size() <= self.max_size_of_val());
266        let id = self.tcx.reserve_alloc_id();
267        debug_assert_ne!(
268            Some(kind),
269            M::GLOBAL_KIND.map(MemoryKind::Machine),
270            "dynamically allocating global memory"
271        );
272        // This cannot be merged with the `adjust_global_allocation` code path
273        // since here we have an allocation that already uses `M::Bytes`.
274        let extra = M::init_local_allocation(self, id, kind, alloc.size(), alloc.align)?;
275        let alloc = alloc.with_extra(extra);
276        self.memory.alloc_map.insert(id, (kind, alloc));
277        M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))
278    }
279
280    /// If this grows the allocation, `init_growth` determines
281    /// whether the additional space will be initialized.
282    pub fn reallocate_ptr(
283        &mut self,
284        ptr: Pointer<Option<M::Provenance>>,
285        old_size_and_align: Option<(Size, Align)>,
286        new_size: Size,
287        new_align: Align,
288        kind: MemoryKind<M::MemoryKind>,
289        init_growth: AllocInit,
290    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
291        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
292        if offset.bytes() != 0 {
293            throw_ub_custom!(
294                fluent::const_eval_realloc_or_alloc_with_offset,
295                ptr = format!("{ptr:?}"),
296                kind = "realloc"
297            );
298        }
299
300        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
301        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
302        // If requested, we zero-init the entire allocation, to ensure that a growing
303        // allocation has its new bytes properly set. For the part that is copied,
304        // `mem_copy` below will de-initialize things as necessary.
305        let new_ptr = self.allocate_ptr(new_size, new_align, kind, init_growth)?;
306        let old_size = match old_size_and_align {
307            Some((size, _align)) => size,
308            None => self.get_alloc_raw(alloc_id)?.size(),
309        };
310        // This will also call the access hooks.
311        self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
312        self.deallocate_ptr(ptr, old_size_and_align, kind)?;
313
314        interp_ok(new_ptr)
315    }
316
317    /// Mark the `const_allocate`d allocation `ptr` points to as immutable so we can intern it.
318    pub fn make_const_heap_ptr_global(
319        &mut self,
320        ptr: Pointer<Option<CtfeProvenance>>,
321    ) -> InterpResult<'tcx>
322    where
323        M: Machine<'tcx, MemoryKind = crate::const_eval::MemoryKind, Provenance = CtfeProvenance>,
324    {
325        let (alloc_id, offset, _) = self.ptr_get_alloc_id(ptr, 0)?;
326        if offset.bytes() != 0 {
327            return Err(ConstEvalErrKind::ConstMakeGlobalWithOffset(ptr)).into();
328        }
329
330        if matches!(self.tcx.try_get_global_alloc(alloc_id), Some(_)) {
331            // This points to something outside the current interpreter.
332            return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
333        }
334
335        // If we can't find it in `alloc_map` it must be dangling (because we don't use
336        // `extra_fn_ptr_map` in const-eval).
337        let (kind, alloc) = self
338            .memory
339            .alloc_map
340            .get_mut_or(alloc_id, || Err(ConstEvalErrKind::ConstMakeGlobalWithDanglingPtr(ptr)))?;
341
342        // Ensure this is actually a *heap* allocation, and record it as made-global.
343        match kind {
344            MemoryKind::Stack | MemoryKind::CallerLocation => {
345                return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
346            }
347            MemoryKind::Machine(crate::const_eval::MemoryKind::Heap { was_made_global }) => {
348                if *was_made_global {
349                    return Err(ConstEvalErrKind::ConstMakeGlobalPtrAlreadyMadeGlobal(alloc_id))
350                        .into();
351                }
352                *was_made_global = true;
353            }
354        }
355
356        // Prevent further mutation, this is now an immutable global.
357        alloc.mutability = Mutability::Not;
358
359        interp_ok(())
360    }
361
362    #[instrument(skip(self), level = "debug")]
363    pub fn deallocate_ptr(
364        &mut self,
365        ptr: Pointer<Option<M::Provenance>>,
366        old_size_and_align: Option<(Size, Align)>,
367        kind: MemoryKind<M::MemoryKind>,
368    ) -> InterpResult<'tcx> {
369        let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
370        trace!("deallocating: {alloc_id:?}");
371
372        if offset.bytes() != 0 {
373            throw_ub_custom!(
374                fluent::const_eval_realloc_or_alloc_with_offset,
375                ptr = format!("{ptr:?}"),
376                kind = "dealloc",
377            );
378        }
379
380        let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
381            // Deallocating global memory -- always an error
382            return Err(match self.tcx.try_get_global_alloc(alloc_id) {
383                Some(GlobalAlloc::Function { .. }) => {
384                    err_ub_custom!(
385                        fluent::const_eval_invalid_dealloc,
386                        alloc_id = alloc_id,
387                        kind = "fn",
388                    )
389                }
390                Some(GlobalAlloc::VTable(..)) => {
391                    err_ub_custom!(
392                        fluent::const_eval_invalid_dealloc,
393                        alloc_id = alloc_id,
394                        kind = "vtable",
395                    )
396                }
397                Some(GlobalAlloc::TypeId { .. }) => {
398                    err_ub_custom!(
399                        fluent::const_eval_invalid_dealloc,
400                        alloc_id = alloc_id,
401                        kind = "typeid",
402                    )
403                }
404                Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
405                    err_ub_custom!(
406                        fluent::const_eval_invalid_dealloc,
407                        alloc_id = alloc_id,
408                        kind = "static_mem"
409                    )
410                }
411                None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccess)),
412            })
413            .into();
414        };
415
416        if alloc.mutability.is_not() {
417            throw_ub_custom!(fluent::const_eval_dealloc_immutable, alloc = alloc_id,);
418        }
419        if alloc_kind != kind {
420            throw_ub_custom!(
421                fluent::const_eval_dealloc_kind_mismatch,
422                alloc = alloc_id,
423                alloc_kind = format!("{alloc_kind}"),
424                kind = format!("{kind}"),
425            );
426        }
427        if let Some((size, align)) = old_size_and_align {
428            if size != alloc.size() || align != alloc.align {
429                throw_ub_custom!(
430                    fluent::const_eval_dealloc_incorrect_layout,
431                    alloc = alloc_id,
432                    size = alloc.size().bytes(),
433                    align = alloc.align.bytes(),
434                    size_found = size.bytes(),
435                    align_found = align.bytes(),
436                )
437            }
438        }
439
440        // Let the machine take some extra action
441        let size = alloc.size();
442        M::before_memory_deallocation(
443            self.tcx,
444            &mut self.machine,
445            &mut alloc.extra,
446            ptr,
447            (alloc_id, prov),
448            size,
449            alloc.align,
450            kind,
451        )?;
452
453        // Don't forget to remember size and align of this now-dead allocation
454        let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
455        if old.is_some() {
456            bug!("Nothing can be deallocated twice");
457        }
458
459        interp_ok(())
460    }
461
462    /// Internal helper function to determine the allocation and offset of a pointer (if any).
463    #[inline(always)]
464    fn get_ptr_access(
465        &self,
466        ptr: Pointer<Option<M::Provenance>>,
467        size: Size,
468    ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
469        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
470        Self::check_and_deref_ptr(
471            self,
472            ptr,
473            size,
474            CheckInAllocMsg::MemoryAccess,
475            |this, alloc_id, offset, prov| {
476                let (size, align) =
477                    this.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccess)?;
478                interp_ok((size, align, (alloc_id, offset, prov)))
479            },
480        )
481    }
482
483    /// Check if the given pointer points to live memory of the given `size`.
484    /// The caller can control the error message for the out-of-bounds case.
485    #[inline(always)]
486    pub fn check_ptr_access(
487        &self,
488        ptr: Pointer<Option<M::Provenance>>,
489        size: Size,
490        msg: CheckInAllocMsg,
491    ) -> InterpResult<'tcx> {
492        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
493        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
494            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
495            interp_ok((size, align, ()))
496        })?;
497        interp_ok(())
498    }
499
500    /// Check whether the given pointer points to live memory for a signed amount of bytes.
501    /// A negative amounts means that the given range of memory to the left of the pointer
502    /// needs to be dereferenceable.
503    pub fn check_ptr_access_signed(
504        &self,
505        ptr: Pointer<Option<M::Provenance>>,
506        size: i64,
507        msg: CheckInAllocMsg,
508    ) -> InterpResult<'tcx> {
509        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
510            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
511            interp_ok((size, align, ()))
512        })?;
513        interp_ok(())
514    }
515
516    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
517    /// to the allocation it points to. Supports both shared and mutable references, as the actual
518    /// checking is offloaded to a helper closure. Supports signed sizes for checks "to the left" of
519    /// a pointer.
520    ///
521    /// `alloc_size` will only get called for non-zero-sized accesses.
522    ///
523    /// Returns `None` if and only if the size is 0.
524    fn check_and_deref_ptr<T, R: Borrow<Self>>(
525        this: R,
526        ptr: Pointer<Option<M::Provenance>>,
527        size: i64,
528        msg: CheckInAllocMsg,
529        alloc_size: impl FnOnce(
530            R,
531            AllocId,
532            Size,
533            M::ProvenanceExtra,
534        ) -> InterpResult<'tcx, (Size, Align, T)>,
535    ) -> InterpResult<'tcx, Option<T>> {
536        // Everything is okay with size 0.
537        if size == 0 {
538            return interp_ok(None);
539        }
540
541        interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
542            Err(addr) => {
543                // We couldn't get a proper allocation.
544                throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
545            }
546            Ok((alloc_id, offset, prov)) => {
547                let tcx = this.borrow().tcx;
548                let (alloc_size, _alloc_align, ret_val) = alloc_size(this, alloc_id, offset, prov)?;
549                let offset = offset.bytes();
550                // Compute absolute begin and end of the range.
551                let (begin, end) = if size >= 0 {
552                    (Some(offset), offset.checked_add(size as u64))
553                } else {
554                    (offset.checked_sub(size.unsigned_abs()), Some(offset))
555                };
556                // Ensure both are within bounds.
557                let in_bounds = begin.is_some() && end.is_some_and(|e| e <= alloc_size.bytes());
558                if !in_bounds {
559                    throw_ub!(PointerOutOfBounds {
560                        alloc_id,
561                        alloc_size,
562                        ptr_offset: tcx.sign_extend_to_target_isize(offset),
563                        inbounds_size: size,
564                        msg,
565                    })
566                }
567
568                Some(ret_val)
569            }
570        })
571    }
572
573    pub(super) fn check_misalign(
574        &self,
575        misaligned: Option<Misalignment>,
576        msg: CheckAlignMsg,
577    ) -> InterpResult<'tcx> {
578        if let Some(misaligned) = misaligned {
579            throw_ub!(AlignmentCheckFailed(misaligned, msg))
580        }
581        interp_ok(())
582    }
583
584    pub(super) fn is_ptr_misaligned(
585        &self,
586        ptr: Pointer<Option<M::Provenance>>,
587        align: Align,
588    ) -> Option<Misalignment> {
589        if !M::enforce_alignment(self) || align.bytes() == 1 {
590            return None;
591        }
592
593        #[inline]
594        fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {
595            if offset.is_multiple_of(align.bytes()) {
596                None
597            } else {
598                // The biggest power of two through which `offset` is divisible.
599                let offset_pow2 = 1 << offset.trailing_zeros();
600                Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
601            }
602        }
603
604        match self.ptr_try_get_alloc_id(ptr, 0) {
605            Err(addr) => is_offset_misaligned(addr, align),
606            Ok((alloc_id, offset, _prov)) => {
607                let alloc_info = self.get_alloc_info(alloc_id);
608                if let Some(misalign) = M::alignment_check(
609                    self,
610                    alloc_id,
611                    alloc_info.align,
612                    alloc_info.kind,
613                    offset,
614                    align,
615                ) {
616                    Some(misalign)
617                } else if M::Provenance::OFFSET_IS_ADDR {
618                    is_offset_misaligned(ptr.addr().bytes(), align)
619                } else {
620                    // Check allocation alignment and offset alignment.
621                    if alloc_info.align.bytes() < align.bytes() {
622                        Some(Misalignment { has: alloc_info.align, required: align })
623                    } else {
624                        is_offset_misaligned(offset.bytes(), align)
625                    }
626                }
627            }
628        }
629    }
630
631    /// Checks a pointer for misalignment.
632    ///
633    /// The error assumes this is checking the pointer used directly for an access.
634    pub fn check_ptr_align(
635        &self,
636        ptr: Pointer<Option<M::Provenance>>,
637        align: Align,
638    ) -> InterpResult<'tcx> {
639        self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
640    }
641}
642
643impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
644    /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
645    pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
646        // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
647        // is live, here all the IDs in the map are for dead allocations so we don't
648        // need to check for liveness.
649        #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
650        self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));
651    }
652}
653
654/// Allocation accessors
655impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
656    /// Helper function to obtain a global (tcx) allocation.
657    /// This attempts to return a reference to an existing allocation if
658    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
659    /// this machine use the same pointer provenance, so it is indirected through
660    /// `M::adjust_allocation`.
661    fn get_global_alloc(
662        &self,
663        id: AllocId,
664        is_write: bool,
665    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
666        let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
667            Some(GlobalAlloc::Memory(mem)) => {
668                // Memory of a constant or promoted or anonymous memory referenced by a static.
669                (mem, None)
670            }
671            Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)),
672            Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
673            Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)),
674            None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),
675            Some(GlobalAlloc::Static(def_id)) => {
676                assert!(self.tcx.is_static(def_id));
677                // Thread-local statics do not have a constant address. They *must* be accessed via
678                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
679                assert!(!self.tcx.is_thread_local_static(def_id));
680                // Notice that every static has two `AllocId` that will resolve to the same
681                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
682                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
683                // `eval_static_initializer` and it is the "resolved" ID.
684                // The resolved ID is never used by the interpreted program, it is hidden.
685                // This is relied upon for soundness of const-patterns; a pointer to the resolved
686                // ID would "sidestep" the checks that make sure consts do not point to statics!
687                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
688                // contains a reference to memory that was created during its evaluation (i.e., not
689                // to another static), those inner references only exist in "resolved" form.
690                if self.tcx.is_foreign_item(def_id) {
691                    // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
692                    // referencing arbitrary (declared) extern statics.
693                    throw_unsup!(ExternStatic(def_id));
694                }
695
696                // We don't give a span -- statics don't need that, they cannot be generic or associated.
697                let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
698                (val, Some(def_id))
699            }
700        };
701        M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
702        // We got tcx memory. Let the machine initialize its "extra" stuff.
703        M::adjust_global_allocation(
704            self,
705            id, // always use the ID we got as input, not the "hidden" one.
706            alloc.inner(),
707        )
708    }
709
710    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
711    /// The caller is responsible for calling the access hooks!
712    ///
713    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
714    pub fn get_alloc_raw(
715        &self,
716        id: AllocId,
717    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
718        // The error type of the inner closure here is somewhat funny. We have two
719        // ways of "erroring": An actual error, or because we got a reference from
720        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
721        // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
722        let a = self.memory.alloc_map.get_or(id, || {
723            // We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
724            // so we use `report_err` for that.
725            let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
726            match alloc {
727                Cow::Borrowed(alloc) => {
728                    // We got a ref, cheaply return that as an "error" so that the
729                    // map does not get mutated.
730                    Err(Ok(alloc))
731                }
732                Cow::Owned(alloc) => {
733                    // Need to put it into the map and return a ref to that
734                    let kind = M::GLOBAL_KIND.expect(
735                        "I got a global allocation that I have to copy but the machine does \
736                            not expect that to happen",
737                    );
738                    Ok((MemoryKind::Machine(kind), alloc))
739                }
740            }
741        });
742        // Now unpack that funny error type
743        match a {
744            Ok(a) => interp_ok(&a.1),
745            Err(a) => a.into(),
746        }
747    }
748
749    /// Gives raw, immutable access to the `Allocation` address, without bounds or alignment checks.
750    /// The caller is responsible for calling the access hooks!
751    pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
752        let alloc = self.get_alloc_raw(id)?;
753        interp_ok(alloc.get_bytes_unchecked_raw())
754    }
755
756    /// Bounds-checked *but not align-checked* allocation access.
757    pub fn get_ptr_alloc<'a>(
758        &'a self,
759        ptr: Pointer<Option<M::Provenance>>,
760        size: Size,
761    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
762    {
763        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
764        let ptr_and_alloc = Self::check_and_deref_ptr(
765            self,
766            ptr,
767            size_i64,
768            CheckInAllocMsg::MemoryAccess,
769            |this, alloc_id, offset, prov| {
770                let alloc = this.get_alloc_raw(alloc_id)?;
771                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
772            },
773        )?;
774        // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
775        // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
776        // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
777        if !self.memory.validation_in_progress.get() {
778            if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {
779                M::before_alloc_access(self.tcx, &self.machine, alloc_id)?;
780            }
781        }
782
783        if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
784            let range = alloc_range(offset, size);
785            if !self.memory.validation_in_progress.get() {
786                M::before_memory_read(
787                    self.tcx,
788                    &self.machine,
789                    &alloc.extra,
790                    ptr,
791                    (alloc_id, prov),
792                    range,
793                )?;
794            }
795            interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
796        } else {
797            interp_ok(None)
798        }
799    }
800
801    /// Return the `extra` field of the given allocation.
802    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
803        interp_ok(&self.get_alloc_raw(id)?.extra)
804    }
805
806    /// Return the `mutability` field of the given allocation.
807    pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
808        interp_ok(self.get_alloc_raw(id)?.mutability)
809    }
810
811    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
812    /// The caller is responsible for calling the access hooks!
813    ///
814    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
815    /// allocation.
816    ///
817    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
818    pub fn get_alloc_raw_mut(
819        &mut self,
820        id: AllocId,
821    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
822        // We have "NLL problem case #3" here, which cannot be worked around without loss of
823        // efficiency even for the common case where the key is in the map.
824        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
825        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`, and that boils down to
826        // Miri's `adjust_alloc_root_pointer` needing to look up the size of the allocation.
827        // It could be avoided with a totally separate codepath in Miri for handling the absolute address
828        // of global allocations, but that's not worth it.)
829        if self.memory.alloc_map.get_mut(id).is_none() {
830            // Slow path.
831            // Allocation not found locally, go look global.
832            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
833            let kind = M::GLOBAL_KIND.expect(
834                "I got a global allocation that I have to copy but the machine does \
835                    not expect that to happen",
836            );
837            self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
838        }
839
840        let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
841        if alloc.mutability.is_not() {
842            throw_ub!(WriteToReadOnly(id))
843        }
844        interp_ok((alloc, &mut self.machine))
845    }
846
847    /// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
848    /// The caller is responsible for calling the access hooks!
849    pub fn get_alloc_bytes_unchecked_raw_mut(
850        &mut self,
851        id: AllocId,
852    ) -> InterpResult<'tcx, *mut u8> {
853        let alloc = self.get_alloc_raw_mut(id)?.0;
854        interp_ok(alloc.get_bytes_unchecked_raw_mut())
855    }
856
857    /// Bounds-checked *but not align-checked* allocation access.
858    pub fn get_ptr_alloc_mut<'a>(
859        &'a mut self,
860        ptr: Pointer<Option<M::Provenance>>,
861        size: Size,
862    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
863    {
864        let tcx = self.tcx;
865        let validation_in_progress = self.memory.validation_in_progress.get();
866
867        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
868        let ptr_and_alloc = Self::check_and_deref_ptr(
869            self,
870            ptr,
871            size_i64,
872            CheckInAllocMsg::MemoryAccess,
873            |this, alloc_id, offset, prov| {
874                let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
875                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
876            },
877        )?;
878
879        if let Some((alloc_id, offset, prov, alloc, machine)) = ptr_and_alloc {
880            let range = alloc_range(offset, size);
881            if !validation_in_progress {
882                // For writes, it's okay to only call those when there actually is a non-zero
883                // amount of bytes to be written: a zero-sized write doesn't manifest anything.
884                M::before_alloc_access(tcx, machine, alloc_id)?;
885                M::before_memory_write(
886                    tcx,
887                    machine,
888                    &mut alloc.extra,
889                    ptr,
890                    (alloc_id, prov),
891                    range,
892                )?;
893            }
894            interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
895        } else {
896            interp_ok(None)
897        }
898    }
899
900    /// Return the `extra` field of the given allocation.
901    pub fn get_alloc_extra_mut<'a>(
902        &'a mut self,
903        id: AllocId,
904    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
905        let (alloc, machine) = self.get_alloc_raw_mut(id)?;
906        interp_ok((&mut alloc.extra, machine))
907    }
908
909    /// Check whether an allocation is live. This is faster than calling
910    /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is
911    /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.
912    pub fn is_alloc_live(&self, id: AllocId) -> bool {
913        self.memory.alloc_map.contains_key_ref(&id)
914            || self.memory.extra_fn_ptr_map.contains_key(&id)
915            // We check `tcx` last as that has to acquire a lock in `many-seeds` mode.
916            // This also matches the order in `get_alloc_info`.
917            || self.tcx.try_get_global_alloc(id).is_some()
918    }
919
920    /// Obtain the size and alignment of an allocation, even if that allocation has
921    /// been deallocated.
922    pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
923        // # Regular allocations
924        // Don't use `self.get_raw` here as that will
925        // a) cause cycles in case `id` refers to a static
926        // b) duplicate a global's allocation in miri
927        if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
928            return AllocInfo::new(
929                alloc.size(),
930                alloc.align,
931                AllocKind::LiveData,
932                alloc.mutability,
933            );
934        }
935
936        // # Function pointers
937        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
938        if let Some(fn_val) = self.get_fn_alloc(id) {
939            let align = match fn_val {
940                FnVal::Instance(_instance) => {
941                    // FIXME: Until we have a clear design for the effects of align(N) functions
942                    // on the address of function pointers, we don't consider the align(N)
943                    // attribute on functions in the interpreter.
944                    // See <https://github.com/rust-lang/rust/issues/144661> for more context.
945                    Align::ONE
946                }
947                // Machine-specific extra functions currently do not support alignment restrictions.
948                FnVal::Other(_) => Align::ONE,
949            };
950
951            return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not);
952        }
953
954        // # Global allocations
955        if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
956            let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
957            let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
958            let kind = match global_alloc {
959                GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
960                GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
961                GlobalAlloc::VTable { .. } => AllocKind::VTable,
962                GlobalAlloc::TypeId { .. } => AllocKind::TypeId,
963            };
964            return AllocInfo::new(size, align, kind, mutbl);
965        }
966
967        // # Dead pointers
968        let (size, align) = *self
969            .memory
970            .dead_alloc_map
971            .get(&id)
972            .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
973        AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)
974    }
975
976    /// Obtain the size and alignment of a *live* allocation.
977    fn get_live_alloc_size_and_align(
978        &self,
979        id: AllocId,
980        msg: CheckInAllocMsg,
981    ) -> InterpResult<'tcx, (Size, Align)> {
982        let info = self.get_alloc_info(id);
983        if matches!(info.kind, AllocKind::Dead) {
984            throw_ub!(PointerUseAfterFree(id, msg))
985        }
986        interp_ok((info.size, info.align))
987    }
988
989    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
990        if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
991            Some(FnVal::Other(*extra))
992        } else {
993            match self.tcx.try_get_global_alloc(id) {
994                Some(GlobalAlloc::Function { instance, .. }) => Some(FnVal::Instance(instance)),
995                _ => None,
996            }
997        }
998    }
999
1000    /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its
1001    /// provenance refers to, as well as the segment of the hash that this pointer covers.
1002    pub fn get_ptr_type_id(
1003        &self,
1004        ptr: Pointer<Option<M::Provenance>>,
1005    ) -> InterpResult<'tcx, (Ty<'tcx>, u64)> {
1006        let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
1007        let Some(GlobalAlloc::TypeId { ty }) = self.tcx.try_get_global_alloc(alloc_id) else {
1008            throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata")
1009        };
1010        interp_ok((ty, offset.bytes()))
1011    }
1012
1013    pub fn get_ptr_fn(
1014        &self,
1015        ptr: Pointer<Option<M::Provenance>>,
1016    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
1017        trace!("get_ptr_fn({:?})", ptr);
1018        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
1019        if offset.bytes() != 0 {
1020            throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
1021        }
1022        self.get_fn_alloc(alloc_id)
1023            .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
1024            .into()
1025    }
1026
1027    /// Get the dynamic type of the given vtable pointer.
1028    /// If `expected_trait` is `Some`, it must be a vtable for the given trait.
1029    pub fn get_ptr_vtable_ty(
1030        &self,
1031        ptr: Pointer<Option<M::Provenance>>,
1032        expected_trait: Option<&'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>>,
1033    ) -> InterpResult<'tcx, Ty<'tcx>> {
1034        trace!("get_ptr_vtable({:?})", ptr);
1035        let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr, 0)?;
1036        if offset.bytes() != 0 {
1037            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
1038        }
1039        let Some(GlobalAlloc::VTable(ty, vtable_dyn_type)) =
1040            self.tcx.try_get_global_alloc(alloc_id)
1041        else {
1042            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
1043        };
1044        if let Some(expected_dyn_type) = expected_trait {
1045            self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
1046        }
1047        interp_ok(ty)
1048    }
1049
1050    pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
1051        self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
1052        interp_ok(())
1053    }
1054
1055    /// Visit all allocations reachable from the given start set, by recursively traversing the
1056    /// provenance information of those allocations.
1057    pub fn visit_reachable_allocs(
1058        &mut self,
1059        start: Vec<AllocId>,
1060        mut visit: impl FnMut(&mut Self, AllocId, &AllocInfo) -> InterpResult<'tcx>,
1061    ) -> InterpResult<'tcx> {
1062        let mut done = FxHashSet::default();
1063        let mut todo = start;
1064        while let Some(id) = todo.pop() {
1065            if !done.insert(id) {
1066                // We already saw this allocation before, don't process it again.
1067                continue;
1068            }
1069            let info = self.get_alloc_info(id);
1070
1071            // Recurse, if there is data here.
1072            // Do this *before* invoking the callback, as the callback might mutate the
1073            // allocation and e.g. replace all provenance by wildcards!
1074            if matches!(info.kind, AllocKind::LiveData) {
1075                let alloc = self.get_alloc_raw(id)?;
1076                for prov in alloc.provenance().provenances() {
1077                    if let Some(id) = prov.get_alloc_id() {
1078                        todo.push(id);
1079                    }
1080                }
1081            }
1082
1083            // Call the callback.
1084            visit(self, id, &info)?;
1085        }
1086        interp_ok(())
1087    }
1088
1089    /// Create a lazy debug printer that prints the given allocation and all allocations it points
1090    /// to, recursively.
1091    #[must_use]
1092    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {
1093        self.dump_allocs(vec![id])
1094    }
1095
1096    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
1097    /// recursively.
1098    #[must_use]
1099    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {
1100        allocs.sort();
1101        allocs.dedup();
1102        DumpAllocs { ecx: self, allocs }
1103    }
1104
1105    /// Print the allocation's bytes, without any nested allocations.
1106    pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {
1107        // Using the "raw" access to avoid the `before_alloc_read` hook, we specifically
1108        // want to be able to read all memory for diagnostics, even if that is cyclic.
1109        let alloc = self.get_alloc_raw(id).unwrap();
1110        let mut bytes = String::new();
1111        if alloc.size() != Size::ZERO {
1112            bytes = "\n".into();
1113            // FIXME(translation) there might be pieces that are translatable.
1114            rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, "    ")
1115                .unwrap();
1116        }
1117        bytes
1118    }
1119
1120    /// Find leaked allocations, remove them from memory and return them. Allocations reachable from
1121    /// `static_roots` or a `Global` allocation are not considered leaked, as well as leaks whose
1122    /// kind's `may_leak()` returns true.
1123    ///
1124    /// This is highly destructive, no more execution can happen after this!
1125    pub fn take_leaked_allocations(
1126        &mut self,
1127        static_roots: impl FnOnce(&Self) -> &[AllocId],
1128    ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>
1129    {
1130        // Collect the set of allocations that are *reachable* from `Global` allocations.
1131        let reachable = {
1132            let mut reachable = FxHashSet::default();
1133            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
1134            let mut todo: Vec<_> =
1135                self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
1136                    if Some(kind) == global_kind { Some(id) } else { None }
1137                });
1138            todo.extend(static_roots(self));
1139            while let Some(id) = todo.pop() {
1140                if reachable.insert(id) {
1141                    // This is a new allocation, add the allocations it points to `todo`.
1142                    // We only need to care about `alloc_map` memory here, as entirely unchanged
1143                    // global memory cannot point to memory relevant for the leak check.
1144                    if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
1145                        todo.extend(
1146                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
1147                        );
1148                    }
1149                }
1150            }
1151            reachable
1152        };
1153
1154        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
1155        let leaked: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
1156            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
1157        });
1158        let mut result = Vec::new();
1159        for &id in leaked.iter() {
1160            let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();
1161            result.push((id, kind, alloc));
1162        }
1163        result
1164    }
1165
1166    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1167    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1168    ///
1169    /// We do this so Miri's allocation access tracking does not show the validation
1170    /// reads as spurious accesses.
1171    pub fn run_for_validation_mut<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
1172        // This deliberately uses `==` on `bool` to follow the pattern
1173        // `assert!(val.replace(new) == old)`.
1174        assert!(
1175            self.memory.validation_in_progress.replace(true) == false,
1176            "`validation_in_progress` was already set"
1177        );
1178        let res = f(self);
1179        assert!(
1180            self.memory.validation_in_progress.replace(false) == true,
1181            "`validation_in_progress` was unset by someone else"
1182        );
1183        res
1184    }
1185
1186    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1187    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1188    ///
1189    /// We do this so Miri's allocation access tracking does not show the validation
1190    /// reads as spurious accesses.
1191    pub fn run_for_validation_ref<R>(&self, f: impl FnOnce(&Self) -> R) -> R {
1192        // This deliberately uses `==` on `bool` to follow the pattern
1193        // `assert!(val.replace(new) == old)`.
1194        assert!(
1195            self.memory.validation_in_progress.replace(true) == false,
1196            "`validation_in_progress` was already set"
1197        );
1198        let res = f(self);
1199        assert!(
1200            self.memory.validation_in_progress.replace(false) == true,
1201            "`validation_in_progress` was unset by someone else"
1202        );
1203        res
1204    }
1205
1206    pub(super) fn validation_in_progress(&self) -> bool {
1207        self.memory.validation_in_progress.get()
1208    }
1209}
1210
1211#[doc(hidden)]
1212/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
1213pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {
1214    ecx: &'a InterpCx<'tcx, M>,
1215    allocs: Vec<AllocId>,
1216}
1217
1218impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
1219    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1220        // Cannot be a closure because it is generic in `Prov`, `Extra`.
1221        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
1222            fmt: &mut std::fmt::Formatter<'_>,
1223            tcx: TyCtxt<'tcx>,
1224            allocs_to_print: &mut VecDeque<AllocId>,
1225            alloc: &Allocation<Prov, Extra, Bytes>,
1226        ) -> std::fmt::Result {
1227            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
1228            {
1229                allocs_to_print.push_back(alloc_id);
1230            }
1231            write!(fmt, "{}", display_allocation(tcx, alloc))
1232        }
1233
1234        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
1235        // `allocs_printed` contains all allocations that we have already printed.
1236        let mut allocs_printed = FxHashSet::default();
1237
1238        while let Some(id) = allocs_to_print.pop_front() {
1239            if !allocs_printed.insert(id) {
1240                // Already printed, so skip this.
1241                continue;
1242            }
1243
1244            write!(fmt, "{id:?}")?;
1245            match self.ecx.memory.alloc_map.get(id) {
1246                Some((kind, alloc)) => {
1247                    // normal alloc
1248                    write!(fmt, " ({kind}, ")?;
1249                    write_allocation_track_relocs(
1250                        &mut *fmt,
1251                        *self.ecx.tcx,
1252                        &mut allocs_to_print,
1253                        alloc,
1254                    )?;
1255                }
1256                None => {
1257                    // global alloc
1258                    match self.ecx.tcx.try_get_global_alloc(id) {
1259                        Some(GlobalAlloc::Memory(alloc)) => {
1260                            write!(fmt, " (unchanged global, ")?;
1261                            write_allocation_track_relocs(
1262                                &mut *fmt,
1263                                *self.ecx.tcx,
1264                                &mut allocs_to_print,
1265                                alloc.inner(),
1266                            )?;
1267                        }
1268                        Some(GlobalAlloc::Function { instance, .. }) => {
1269                            write!(fmt, " (fn: {instance})")?;
1270                        }
1271                        Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
1272                            write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
1273                        }
1274                        Some(GlobalAlloc::TypeId { ty }) => {
1275                            write!(fmt, " (typeid for {ty})")?;
1276                        }
1277                        Some(GlobalAlloc::Static(did)) => {
1278                            write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
1279                        }
1280                        None => {
1281                            write!(fmt, " (deallocated)")?;
1282                        }
1283                    }
1284                }
1285            }
1286            writeln!(fmt)?;
1287        }
1288        Ok(())
1289    }
1290}
1291
1292/// Reading and writing.
1293impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
1294    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
1295{
1296    pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {
1297        AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }
1298    }
1299
1300    /// `range` is relative to this allocation reference, not the base of the allocation.
1301    pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
1302        let range = self.range.subrange(range);
1303        debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
1304
1305        self.alloc
1306            .write_scalar(&self.tcx, range, val)
1307            .map_err(|e| e.to_interp_error(self.alloc_id))
1308            .into()
1309    }
1310
1311    /// `offset` is relative to this allocation reference, not the base of the allocation.
1312    pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
1313        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val)
1314    }
1315
1316    /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
1317    pub fn write_uninit(&mut self, range: AllocRange) {
1318        let range = self.range.subrange(range);
1319
1320        self.alloc.write_uninit(&self.tcx, range);
1321    }
1322
1323    /// Mark the entire referenced range as uninitialized
1324    pub fn write_uninit_full(&mut self) {
1325        self.alloc.write_uninit(&self.tcx, self.range);
1326    }
1327
1328    /// Remove all provenance in the reference range.
1329    pub fn clear_provenance(&mut self) {
1330        self.alloc.clear_provenance(&self.tcx, self.range);
1331    }
1332}
1333
1334impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
1335    /// `range` is relative to this allocation reference, not the base of the allocation.
1336    pub fn read_scalar(
1337        &self,
1338        range: AllocRange,
1339        read_provenance: bool,
1340    ) -> InterpResult<'tcx, Scalar<Prov>> {
1341        let range = self.range.subrange(range);
1342        self.alloc
1343            .read_scalar(&self.tcx, range, read_provenance)
1344            .map_err(|e| e.to_interp_error(self.alloc_id))
1345            .into()
1346    }
1347
1348    /// `range` is relative to this allocation reference, not the base of the allocation.
1349    pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
1350        self.read_scalar(range, /*read_provenance*/ false)
1351    }
1352
1353    /// `offset` is relative to this allocation reference, not the base of the allocation.
1354    pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
1355        self.read_scalar(
1356            alloc_range(offset, self.tcx.data_layout().pointer_size()),
1357            /*read_provenance*/ true,
1358        )
1359    }
1360
1361    /// `range` is relative to this allocation reference, not the base of the allocation.
1362    pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
1363        self.alloc
1364            .get_bytes_strip_provenance(&self.tcx, self.range)
1365            .map_err(|e| e.to_interp_error(self.alloc_id))
1366            .into()
1367    }
1368
1369    /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
1370    pub fn has_provenance(&self) -> bool {
1371        !self.alloc.provenance().range_empty(self.range, &self.tcx)
1372    }
1373}
1374
1375impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1376    /// Reads the given number of bytes from memory, and strips their provenance if possible.
1377    /// Returns them as a slice.
1378    ///
1379    /// Performs appropriate bounds checks.
1380    pub fn read_bytes_ptr_strip_provenance(
1381        &self,
1382        ptr: Pointer<Option<M::Provenance>>,
1383        size: Size,
1384    ) -> InterpResult<'tcx, &[u8]> {
1385        let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
1386            // zero-sized access
1387            return interp_ok(&[]);
1388        };
1389        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1390        // (We are staying inside the bounds here so all is good.)
1391        interp_ok(
1392            alloc_ref
1393                .alloc
1394                .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
1395                .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
1396        )
1397    }
1398
1399    /// Writes the given stream of bytes into memory.
1400    ///
1401    /// Performs appropriate bounds checks.
1402    pub fn write_bytes_ptr(
1403        &mut self,
1404        ptr: Pointer<Option<M::Provenance>>,
1405        src: impl IntoIterator<Item = u8>,
1406    ) -> InterpResult<'tcx> {
1407        let mut src = src.into_iter();
1408        let (lower, upper) = src.size_hint();
1409        let len = upper.expect("can only write bounded iterators");
1410        assert_eq!(lower, len, "can only write iterators with a precise length");
1411
1412        let size = Size::from_bytes(len);
1413        let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
1414            // zero-sized access
1415            assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
1416            return interp_ok(());
1417        };
1418
1419        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1420        // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
1421        let bytes =
1422            alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);
1423        // `zip` would stop when the first iterator ends; we want to definitely
1424        // cover all of `bytes`.
1425        for dest in bytes {
1426            *dest = src.next().expect("iterator was shorter than it said it would be");
1427        }
1428        assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1429        interp_ok(())
1430    }
1431
1432    pub fn mem_copy(
1433        &mut self,
1434        src: Pointer<Option<M::Provenance>>,
1435        dest: Pointer<Option<M::Provenance>>,
1436        size: Size,
1437        nonoverlapping: bool,
1438    ) -> InterpResult<'tcx> {
1439        self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
1440    }
1441
1442    /// Performs `num_copies` many copies of `size` many bytes from `src` to `dest + i*size` (where
1443    /// `i` is the index of the copy).
1444    ///
1445    /// Either `nonoverlapping` must be true or `num_copies` must be 1; doing repeated copies that
1446    /// may overlap is not supported.
1447    pub fn mem_copy_repeatedly(
1448        &mut self,
1449        src: Pointer<Option<M::Provenance>>,
1450        dest: Pointer<Option<M::Provenance>>,
1451        size: Size,
1452        num_copies: u64,
1453        nonoverlapping: bool,
1454    ) -> InterpResult<'tcx> {
1455        let tcx = self.tcx;
1456        // We need to do our own bounds-checks.
1457        let src_parts = self.get_ptr_access(src, size)?;
1458        let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
1459
1460        // Similar to `get_ptr_alloc`, we need to call `before_alloc_access` even for zero-sized
1461        // reads. However, just like in `get_ptr_alloc_mut`, the write part is okay to skip for
1462        // zero-sized writes.
1463        if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(src, size.bytes().try_into().unwrap())
1464        {
1465            M::before_alloc_access(tcx, &self.machine, alloc_id)?;
1466        }
1467
1468        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1469        // and once below to get the underlying `&[mut] Allocation`.
1470
1471        // Source alloc preparations and access hooks.
1472        let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
1473            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
1474            return interp_ok(());
1475        };
1476        let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1477        let src_range = alloc_range(src_offset, size);
1478        assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
1479
1480        // Trigger read hook.
1481        // For the overlapping case, it is crucial that we trigger the read hook
1482        // before the write hook -- the aliasing model cares about the order.
1483        M::before_memory_read(
1484            tcx,
1485            &self.machine,
1486            &src_alloc.extra,
1487            src,
1488            (src_alloc_id, src_prov),
1489            src_range,
1490        )?;
1491        // We need the `dest` ptr for the next operation, so we get it now.
1492        // We already did the source checks and called the hooks so we are good to return early.
1493        let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
1494            // Zero-sized *destination*.
1495            return interp_ok(());
1496        };
1497
1498        // Prepare getting source provenance.
1499        let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1500        // first copy the provenance to a temporary buffer, because
1501        // `get_bytes_mut` will clear the provenance, which is correct,
1502        // since we don't want to keep any provenance at the target.
1503        // This will also error if copying partial provenance is not supported.
1504        let provenance = src_alloc
1505            .provenance()
1506            .prepare_copy(src_range, dest_offset, num_copies, self)
1507            .map_err(|e| e.to_interp_error(src_alloc_id))?;
1508        // Prepare a copy of the initialization mask.
1509        let init = src_alloc.init_mask().prepare_copy(src_range);
1510
1511        // Destination alloc preparations...
1512        let (dest_alloc, machine) = self.get_alloc_raw_mut(dest_alloc_id)?;
1513        let dest_range = alloc_range(dest_offset, size * num_copies);
1514        // ...and access hooks.
1515        M::before_alloc_access(tcx, machine, dest_alloc_id)?;
1516        M::before_memory_write(
1517            tcx,
1518            machine,
1519            &mut dest_alloc.extra,
1520            dest,
1521            (dest_alloc_id, dest_prov),
1522            dest_range,
1523        )?;
1524        // Yes we do overwrite all bytes in `dest_bytes`.
1525        let dest_bytes =
1526            dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();
1527
1528        if init.no_bytes_init() {
1529            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1530            // is marked as uninitialized but we otherwise omit changing the byte representation which may
1531            // be arbitrary for uninitialized bytes.
1532            // This also avoids writing to the target bytes so that the backing allocation is never
1533            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1534            // operating system this can avoid physically allocating the page.
1535            dest_alloc.write_uninit(&tcx, dest_range);
1536            // `write_uninit` also resets the provenance, so we are done.
1537            return interp_ok(());
1538        }
1539
1540        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1541        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1542        // `dest` could possibly overlap.
1543        // The pointers above remain valid even if the `HashMap` table is moved around because they
1544        // point into the `Vec` storing the bytes.
1545        unsafe {
1546            if src_alloc_id == dest_alloc_id {
1547                if nonoverlapping {
1548                    // `Size` additions
1549                    if (src_offset <= dest_offset && src_offset + size > dest_offset)
1550                        || (dest_offset <= src_offset && dest_offset + size > src_offset)
1551                    {
1552                        throw_ub_custom!(fluent::const_eval_copy_nonoverlapping_overlapping);
1553                    }
1554                }
1555            }
1556            if num_copies > 1 {
1557                assert!(nonoverlapping, "multi-copy only supported in non-overlapping mode");
1558            }
1559
1560            let size_in_bytes = size.bytes_usize();
1561            // For particularly large arrays (where this is perf-sensitive) it's common that
1562            // we're writing a single byte repeatedly. So, optimize that case to a memset.
1563            if size_in_bytes == 1 {
1564                debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.
1565                // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).
1566                let value = *src_bytes;
1567                dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());
1568            } else if src_alloc_id == dest_alloc_id {
1569                let mut dest_ptr = dest_bytes;
1570                for _ in 0..num_copies {
1571                    // Here we rely on `src` and `dest` being non-overlapping if there is more than
1572                    // one copy.
1573                    ptr::copy(src_bytes, dest_ptr, size_in_bytes);
1574                    dest_ptr = dest_ptr.add(size_in_bytes);
1575                }
1576            } else {
1577                let mut dest_ptr = dest_bytes;
1578                for _ in 0..num_copies {
1579                    ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);
1580                    dest_ptr = dest_ptr.add(size_in_bytes);
1581                }
1582            }
1583        }
1584
1585        // now fill in all the "init" data
1586        dest_alloc.init_mask_apply_copy(
1587            init,
1588            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1589            num_copies,
1590        );
1591        // copy the provenance to the destination
1592        dest_alloc.provenance_apply_copy(provenance);
1593
1594        interp_ok(())
1595    }
1596}
1597
1598/// Machine pointer introspection.
1599impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1600    /// Test if this value might be null.
1601    /// If the machine does not support ptr-to-int casts, this is conservative.
1602    pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
1603        match scalar.try_to_scalar_int() {
1604            Ok(int) => interp_ok(int.is_null()),
1605            Err(_) => {
1606                // We can't cast this pointer to an integer. Can only happen during CTFE.
1607                let ptr = scalar.to_pointer(self)?;
1608                match self.ptr_try_get_alloc_id(ptr, 0) {
1609                    Ok((alloc_id, offset, _)) => {
1610                        let info = self.get_alloc_info(alloc_id);
1611                        if matches!(info.kind, AllocKind::TypeId) {
1612                            // We *could* actually precisely answer this question since here,
1613                            // the offset *is* the integer value. But the entire point of making
1614                            // this a pointer is not to leak the integer value, so we say everything
1615                            // might be null.
1616                            return interp_ok(true);
1617                        }
1618                        // If the pointer is in-bounds (including "at the end"), it is definitely not null.
1619                        if offset <= info.size {
1620                            return interp_ok(false);
1621                        }
1622                        // If the allocation is N-aligned, and the offset is not divisible by N,
1623                        // then `base + offset` has a non-zero remainder after division by `N`,
1624                        // which means `base + offset` cannot be null.
1625                        if !offset.bytes().is_multiple_of(info.align.bytes()) {
1626                            return interp_ok(false);
1627                        }
1628                        // We don't know enough, this might be null.
1629                        interp_ok(true)
1630                    }
1631                    Err(_offset) => bug!("a non-int scalar is always a pointer"),
1632                }
1633            }
1634        }
1635    }
1636
1637    /// Turning a "maybe pointer" into a proper pointer (and some information
1638    /// about where it points), or an absolute address.
1639    ///
1640    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1641    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1642    /// where a wildcard pointer sits right in between two allocations.
1643    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1644    /// for handling wildcard pointers.
1645    ///
1646    /// The result must be used immediately; it is not allowed to convert
1647    /// the returned data back into a `Pointer` and store that in machine state.
1648    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1649    /// we don't have an operation to turn it back into `M::Provenance`.)
1650    pub fn ptr_try_get_alloc_id(
1651        &self,
1652        ptr: Pointer<Option<M::Provenance>>,
1653        size: i64,
1654    ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
1655        match ptr.into_pointer_or_addr() {
1656            Ok(ptr) => match M::ptr_get_alloc(self, ptr, size) {
1657                Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1658                None => {
1659                    assert!(M::Provenance::OFFSET_IS_ADDR);
1660                    // Offset is absolute, as we just asserted.
1661                    let (_, addr) = ptr.into_raw_parts();
1662                    Err(addr.bytes())
1663                }
1664            },
1665            Err(addr) => Err(addr.bytes()),
1666        }
1667    }
1668
1669    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1670    ///
1671    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1672    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1673    /// where a wildcard pointer sits right in between two allocations.
1674    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1675    /// for handling wildcard pointers.
1676    ///
1677    /// The result must be used immediately; it is not allowed to convert
1678    /// the returned data back into a `Pointer` and store that in machine state.
1679    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1680    /// we don't have an operation to turn it back into `M::Provenance`.)
1681    #[inline(always)]
1682    pub fn ptr_get_alloc_id(
1683        &self,
1684        ptr: Pointer<Option<M::Provenance>>,
1685        size: i64,
1686    ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
1687        self.ptr_try_get_alloc_id(ptr, size)
1688            .map_err(|offset| {
1689                err_ub!(DanglingIntPointer {
1690                    addr: offset,
1691                    inbounds_size: size,
1692                    msg: CheckInAllocMsg::Dereferenceable
1693                })
1694            })
1695            .into()
1696    }
1697}