rustc_const_eval/interpret/
call.rs

1//! Manages calling a concrete function (with known MIR body) with argument passing,
2//! and returning the return value to the caller.
3use std::assert_matches::assert_matches;
4use std::borrow::Cow;
5
6use either::{Left, Right};
7use rustc_abi::{self as abi, ExternAbi, FieldIdx, Integer, VariantIdx};
8use rustc_hir::def_id::DefId;
9use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
10use rustc_middle::ty::{self, AdtDef, Instance, Ty, VariantDef};
11use rustc_middle::{bug, mir, span_bug};
12use rustc_span::sym;
13use rustc_target::callconv::{ArgAbi, FnAbi, PassMode};
14use tracing::field::Empty;
15use tracing::{info, instrument, trace};
16
17use super::{
18    CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
19    Projectable, Provenance, ReturnAction, ReturnContinuation, Scalar, StackPopInfo, interp_ok,
20    throw_ub, throw_ub_custom, throw_unsup_format,
21};
22use crate::interpret::EnteredTraceSpan;
23use crate::{enter_trace_span, fluent_generated as fluent};
24
25/// An argument passed to a function.
26#[derive(Clone, Debug)]
27pub enum FnArg<'tcx, Prov: Provenance = CtfeProvenance> {
28    /// Pass a copy of the given operand.
29    Copy(OpTy<'tcx, Prov>),
30    /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and
31    /// make the place inaccessible for the duration of the function call.
32    InPlace(MPlaceTy<'tcx, Prov>),
33}
34
35impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
36    pub fn layout(&self) -> &TyAndLayout<'tcx> {
37        match self {
38            FnArg::Copy(op) => &op.layout,
39            FnArg::InPlace(mplace) => &mplace.layout,
40        }
41    }
42}
43
44impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
45    /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
46    /// original memory occurs.
47    pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> {
48        match arg {
49            FnArg::Copy(op) => op.clone(),
50            FnArg::InPlace(mplace) => mplace.clone().into(),
51        }
52    }
53
54    /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the
55    /// original memory occurs.
56    pub fn copy_fn_args(
57        &self,
58        args: &[FnArg<'tcx, M::Provenance>],
59    ) -> Vec<OpTy<'tcx, M::Provenance>> {
60        args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
61    }
62
63    /// Helper function for argument untupling.
64    pub(super) fn fn_arg_field(
65        &self,
66        arg: &FnArg<'tcx, M::Provenance>,
67        field: FieldIdx,
68    ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
69        interp_ok(match arg {
70            FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
71            FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?),
72        })
73    }
74
75    /// Find the wrapped inner type of a transparent wrapper.
76    /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
77    ///
78    /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields.
79    fn unfold_transparent(
80        &self,
81        layout: TyAndLayout<'tcx>,
82        may_unfold: impl Fn(AdtDef<'tcx>) -> bool,
83    ) -> TyAndLayout<'tcx> {
84        match layout.ty.kind() {
85            ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => {
86                assert!(!adt_def.is_enum());
87                // Find the non-1-ZST field, and recurse.
88                let (_, field) = layout.non_1zst_field(self).unwrap();
89                self.unfold_transparent(field, may_unfold)
90            }
91            // Not a transparent type, no further unfolding.
92            _ => layout,
93        }
94    }
95
96    /// Unwrap types that are guaranteed a null-pointer-optimization
97    fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
98        // Check if this is an option-like type wrapping some type.
99        let ty::Adt(def, args) = layout.ty.kind() else {
100            // Not an ADT, so definitely no NPO.
101            return interp_ok(layout);
102        };
103        if def.variants().len() != 2 {
104            // Not a 2-variant enum, so no NPO.
105            return interp_ok(layout);
106        }
107        assert!(def.is_enum());
108
109        let all_fields_1zst = |variant: &VariantDef| -> InterpResult<'tcx, _> {
110            for field in &variant.fields {
111                let ty = field.ty(*self.tcx, args);
112                let layout = self.layout_of(ty)?;
113                if !layout.is_1zst() {
114                    return interp_ok(false);
115                }
116            }
117            interp_ok(true)
118        };
119
120        // If one variant consists entirely of 1-ZST, then the other variant
121        // is the only "relevant" one for this check.
122        let var0 = VariantIdx::from_u32(0);
123        let var1 = VariantIdx::from_u32(1);
124        let relevant_variant = if all_fields_1zst(def.variant(var0))? {
125            def.variant(var1)
126        } else if all_fields_1zst(def.variant(var1))? {
127            def.variant(var0)
128        } else {
129            // No varant is all-1-ZST, so no NPO.
130            return interp_ok(layout);
131        };
132        // The "relevant" variant must have exactly one field, and its type is the "inner" type.
133        if relevant_variant.fields.len() != 1 {
134            return interp_ok(layout);
135        }
136        let inner = relevant_variant.fields[FieldIdx::from_u32(0)].ty(*self.tcx, args);
137        let inner = self.layout_of(inner)?;
138
139        // Check if the inner type is one of the NPO-guaranteed ones.
140        // For that we first unpeel transparent *structs* (but not unions).
141        let is_npo = |def: AdtDef<'tcx>| {
142            self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
143        };
144        let inner = self.unfold_transparent(inner, /* may_unfold */ |def| {
145            // Stop at NPO types so that we don't miss that attribute in the check below!
146            def.is_struct() && !is_npo(def)
147        });
148        interp_ok(match inner.ty.kind() {
149            ty::Ref(..) | ty::FnPtr(..) => {
150                // Option<&T> behaves like &T, and same for fn()
151                inner
152            }
153            ty::Adt(def, _) if is_npo(*def) => {
154                // Once we found a `nonnull_optimization_guaranteed` type, further strip off
155                // newtype structs from it to find the underlying ABI type.
156                self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct())
157            }
158            _ => {
159                // Everything else we do not unfold.
160                layout
161            }
162        })
163    }
164
165    /// Check if these two layouts look like they are fn-ABI-compatible.
166    /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out
167    /// that only checking the `PassMode` is insufficient.)
168    fn layout_compat(
169        &self,
170        caller: TyAndLayout<'tcx>,
171        callee: TyAndLayout<'tcx>,
172    ) -> InterpResult<'tcx, bool> {
173        // Fast path: equal types are definitely compatible.
174        if caller.ty == callee.ty {
175            return interp_ok(true);
176        }
177        // 1-ZST are compatible with all 1-ZST (and with nothing else).
178        if caller.is_1zst() || callee.is_1zst() {
179            return interp_ok(caller.is_1zst() && callee.is_1zst());
180        }
181        // Unfold newtypes and NPO optimizations.
182        let unfold = |layout: TyAndLayout<'tcx>| {
183            self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true))
184        };
185        let caller = unfold(caller)?;
186        let callee = unfold(callee)?;
187        // Now see if these inner types are compatible.
188
189        // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)`
190        // things as compatible due to `DispatchFromDyn`. For instance, `Rc<i32>` and `*mut i32`
191        // must be compatible. So we just accept everything with Pointer ABI as compatible,
192        // even if this will accept some code that is not stably guaranteed to work.
193        // This also handles function pointers.
194        let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
195            abi::BackendRepr::Scalar(s) => match s.primitive() {
196                abi::Primitive::Pointer(addr_space) => Some(addr_space),
197                _ => None,
198            },
199            _ => None,
200        };
201        if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
202            return interp_ok(caller == callee);
203        }
204        // For wide pointers we have to get the pointee type.
205        let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
206            // We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
207            interp_ok(Some(match ty.kind() {
208                ty::Ref(_, ty, _) => *ty,
209                ty::RawPtr(ty, _) => *ty,
210                // We only accept `Box` with the default allocator.
211                _ if ty.is_box_global(*self.tcx) => ty.expect_boxed_ty(),
212                _ => return interp_ok(None),
213            }))
214        };
215        if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
216            // This is okay if they have the same metadata type.
217            let meta_ty = |ty: Ty<'tcx>| {
218                // Even if `ty` is normalized, the search for the unsized tail will project
219                // to fields, which can yield non-normalized types. So we need to provide a
220                // normalization function.
221                let normalize = |ty| self.tcx.normalize_erasing_regions(self.typing_env, ty);
222                ty.ptr_metadata_ty(*self.tcx, normalize)
223            };
224            return interp_ok(meta_ty(caller) == meta_ty(callee));
225        }
226
227        // Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
228        // `char` counts as `u32.`
229        let int_ty = |ty: Ty<'tcx>| {
230            Some(match ty.kind() {
231                ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true),
232                ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false),
233                ty::Char => (Integer::I32, /* signed */ false),
234                _ => return None,
235            })
236        };
237        if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
238            // This is okay if they are the same integer type.
239            return interp_ok(caller == callee);
240        }
241
242        // Fall back to exact equality.
243        interp_ok(caller == callee)
244    }
245
246    /// Returns a `bool` saying whether the two arguments are ABI-compatible.
247    pub fn check_argument_compat(
248        &self,
249        caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
250        callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
251    ) -> InterpResult<'tcx, bool> {
252        // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target,
253        // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility.
254        if self.layout_compat(caller_abi.layout, callee_abi.layout)? {
255            // Ensure that our checks imply actual ABI compatibility for this concrete call.
256            // (This can fail e.g. if `#[rustc_nonnull_optimization_guaranteed]` is used incorrectly.)
257            assert!(caller_abi.eq_abi(callee_abi));
258            interp_ok(true)
259        } else {
260            trace!(
261                "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
262                caller_abi, callee_abi
263            );
264            interp_ok(false)
265        }
266    }
267
268    /// Initialize a single callee argument, checking the types for compatibility.
269    fn pass_argument<'x, 'y>(
270        &mut self,
271        caller_args: &mut impl Iterator<
272            Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
273        >,
274        callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
275        callee_arg_idx: usize,
276        callee_arg: &mir::Place<'tcx>,
277        callee_ty: Ty<'tcx>,
278        already_live: bool,
279    ) -> InterpResult<'tcx>
280    where
281        'tcx: 'x,
282        'tcx: 'y,
283    {
284        assert_eq!(callee_ty, callee_abi.layout.ty);
285        if matches!(callee_abi.mode, PassMode::Ignore) {
286            // This one is skipped. Still must be made live though!
287            if !already_live {
288                self.storage_live(callee_arg.as_local().unwrap())?;
289            }
290            return interp_ok(());
291        }
292        // Find next caller arg.
293        let Some((caller_arg, caller_abi)) = caller_args.next() else {
294            throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
295        };
296        assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout);
297        // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are
298        // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the
299        // right type to print to the user.
300
301        // Check compatibility
302        if !self.check_argument_compat(caller_abi, callee_abi)? {
303            throw_ub!(AbiMismatchArgument {
304                arg_idx: callee_arg_idx,
305                caller_ty: caller_abi.layout.ty,
306                callee_ty: callee_abi.layout.ty
307            });
308        }
309        // We work with a copy of the argument for now; if this is in-place argument passing, we
310        // will later protect the source it comes from. This means the callee cannot observe if we
311        // did in-place of by-copy argument passing, except for pointer equality tests.
312        let caller_arg_copy = self.copy_fn_arg(caller_arg);
313        if !already_live {
314            let local = callee_arg.as_local().unwrap();
315            let meta = caller_arg_copy.meta();
316            // `check_argument_compat` ensures that if metadata is needed, both have the same type,
317            // so we know they will use the metadata the same way.
318            assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty);
319
320            self.storage_live_dyn(local, meta)?;
321        }
322        // Now we can finally actually evaluate the callee place.
323        let callee_arg = self.eval_place(*callee_arg)?;
324        // We allow some transmutes here.
325        // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
326        // is true for all `copy_op`, but there are a lot of special cases for argument passing
327        // specifically.)
328        self.copy_op_allow_transmute(&caller_arg_copy, &callee_arg)?;
329        // If this was an in-place pass, protect the place it comes from for the duration of the call.
330        if let FnArg::InPlace(mplace) = caller_arg {
331            M::protect_in_place_function_argument(self, mplace)?;
332        }
333        interp_ok(())
334    }
335
336    /// The main entry point for creating a new stack frame: performs ABI checks and initializes
337    /// arguments.
338    #[instrument(skip(self), level = "trace")]
339    pub fn init_stack_frame(
340        &mut self,
341        instance: Instance<'tcx>,
342        body: &'tcx mir::Body<'tcx>,
343        caller_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
344        args: &[FnArg<'tcx, M::Provenance>],
345        with_caller_location: bool,
346        destination: &PlaceTy<'tcx, M::Provenance>,
347        mut cont: ReturnContinuation,
348    ) -> InterpResult<'tcx> {
349        let _span = enter_trace_span!(M, step::init_stack_frame, %instance, tracing_separate_thread = Empty);
350
351        // Compute callee information.
352        // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
353        let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
354
355        if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
356            throw_unsup_format!("calling a c-variadic function is not supported");
357        }
358
359        if caller_fn_abi.conv != callee_fn_abi.conv {
360            throw_ub_custom!(
361                fluent::const_eval_incompatible_calling_conventions,
362                callee_conv = format!("{}", callee_fn_abi.conv),
363                caller_conv = format!("{}", caller_fn_abi.conv),
364            )
365        }
366
367        // Check that all target features required by the callee (i.e., from
368        // the attribute `#[target_feature(enable = ...)]`) are enabled at
369        // compile time.
370        M::check_fn_target_features(self, instance)?;
371
372        if !callee_fn_abi.can_unwind {
373            // The callee cannot unwind, so force the `Unreachable` unwind handling.
374            match &mut cont {
375                ReturnContinuation::Stop { .. } => {}
376                ReturnContinuation::Goto { unwind, .. } => {
377                    *unwind = mir::UnwindAction::Unreachable;
378                }
379            }
380        }
381
382        self.push_stack_frame_raw(instance, body, destination, cont)?;
383
384        // If an error is raised here, pop the frame again to get an accurate backtrace.
385        // To this end, we wrap it all in a `try` block.
386        let res: InterpResult<'tcx> = try {
387            trace!(
388                "caller ABI: {:#?}, args: {:#?}",
389                caller_fn_abi,
390                args.iter()
391                    .map(|arg| (
392                        arg.layout().ty,
393                        match arg {
394                            FnArg::Copy(op) => format!("copy({op:?})"),
395                            FnArg::InPlace(mplace) => format!("in-place({mplace:?})"),
396                        }
397                    ))
398                    .collect::<Vec<_>>()
399            );
400            trace!(
401                "spread_arg: {:?}, locals: {:#?}",
402                body.spread_arg,
403                body.args_iter()
404                    .map(|local| (
405                        local,
406                        self.layout_of_local(self.frame(), local, None).unwrap().ty,
407                    ))
408                    .collect::<Vec<_>>()
409            );
410
411            // In principle, we have two iterators: Where the arguments come from, and where
412            // they go to.
413
414            // The "where they come from" part is easy, we expect the caller to do any special handling
415            // that might be required here (e.g. for untupling).
416            // If `with_caller_location` is set we pretend there is an extra argument (that
417            // we will not pass; our `caller_location` intrinsic implementation walks the stack instead).
418            assert_eq!(
419                args.len() + if with_caller_location { 1 } else { 0 },
420                caller_fn_abi.args.len(),
421                "mismatch between caller ABI and caller arguments",
422            );
423            let mut caller_args = args
424                .iter()
425                .zip(caller_fn_abi.args.iter())
426                .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
427
428            // Now we have to spread them out across the callee's locals,
429            // taking into account the `spread_arg`. If we could write
430            // this is a single iterator (that handles `spread_arg`), then
431            // `pass_argument` would be the loop body. It takes care to
432            // not advance `caller_iter` for ignored arguments.
433            let mut callee_args_abis = callee_fn_abi.args.iter().enumerate();
434            for local in body.args_iter() {
435                // Construct the destination place for this argument. At this point all
436                // locals are still dead, so we cannot construct a `PlaceTy`.
437                let dest = mir::Place::from(local);
438                // `layout_of_local` does more than just the instantiation we need to get the
439                // type, but the result gets cached so this avoids calling the instantiation
440                // query *again* the next time this local is accessed.
441                let ty = self.layout_of_local(self.frame(), local, None)?.ty;
442                if Some(local) == body.spread_arg {
443                    // Make the local live once, then fill in the value field by field.
444                    self.storage_live(local)?;
445                    // Must be a tuple
446                    let ty::Tuple(fields) = ty.kind() else {
447                        span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
448                    };
449                    for (i, field_ty) in fields.iter().enumerate() {
450                        let dest = dest.project_deeper(
451                            &[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)],
452                            *self.tcx,
453                        );
454                        let (idx, callee_abi) = callee_args_abis.next().unwrap();
455                        self.pass_argument(
456                            &mut caller_args,
457                            callee_abi,
458                            idx,
459                            &dest,
460                            field_ty,
461                            /* already_live */ true,
462                        )?;
463                    }
464                } else {
465                    // Normal argument. Cannot mark it as live yet, it might be unsized!
466                    let (idx, callee_abi) = callee_args_abis.next().unwrap();
467                    self.pass_argument(
468                        &mut caller_args,
469                        callee_abi,
470                        idx,
471                        &dest,
472                        ty,
473                        /* already_live */ false,
474                    )?;
475                }
476            }
477            // If the callee needs a caller location, pretend we consume one more argument from the ABI.
478            if instance.def.requires_caller_location(*self.tcx) {
479                callee_args_abis.next().unwrap();
480            }
481            // Now we should have no more caller args or callee arg ABIs
482            assert!(
483                callee_args_abis.next().is_none(),
484                "mismatch between callee ABI and callee body arguments"
485            );
486            if caller_args.next().is_some() {
487                throw_ub_custom!(fluent::const_eval_too_many_caller_args);
488            }
489            // Don't forget to check the return type!
490            if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
491                throw_ub!(AbiMismatchReturn {
492                    caller_ty: caller_fn_abi.ret.layout.ty,
493                    callee_ty: callee_fn_abi.ret.layout.ty
494                });
495            }
496
497            // Protect return place for in-place return value passing.
498            // We only need to protect anything if this is actually an in-memory place.
499            if let Left(mplace) = destination.as_mplace_or_local() {
500                M::protect_in_place_function_argument(self, &mplace)?;
501            }
502
503            // Don't forget to mark "initially live" locals as live.
504            self.storage_live_for_always_live_locals()?;
505        };
506        res.inspect_err_kind(|_| {
507            // Don't show the incomplete stack frame in the error stacktrace.
508            self.stack_mut().pop();
509        })
510    }
511
512    /// Initiate a call to this function -- pushing the stack frame and initializing the arguments.
513    ///
514    /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
515    /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
516    ///
517    /// `with_caller_location` indicates whether the caller passed a caller location. Miri
518    /// implements caller locations without argument passing, but to match `FnAbi` we need to know
519    /// when those arguments are present.
520    pub(super) fn init_fn_call(
521        &mut self,
522        fn_val: FnVal<'tcx, M::ExtraFnVal>,
523        (caller_abi, caller_fn_abi): (ExternAbi, &FnAbi<'tcx, Ty<'tcx>>),
524        args: &[FnArg<'tcx, M::Provenance>],
525        with_caller_location: bool,
526        destination: &PlaceTy<'tcx, M::Provenance>,
527        target: Option<mir::BasicBlock>,
528        unwind: mir::UnwindAction,
529    ) -> InterpResult<'tcx> {
530        let _span =
531            enter_trace_span!(M, step::init_fn_call, tracing_separate_thread = Empty, ?fn_val)
532                .or_if_tracing_disabled(|| trace!("init_fn_call: {:#?}", fn_val));
533
534        let instance = match fn_val {
535            FnVal::Instance(instance) => instance,
536            FnVal::Other(extra) => {
537                return M::call_extra_fn(
538                    self,
539                    extra,
540                    caller_fn_abi,
541                    args,
542                    destination,
543                    target,
544                    unwind,
545                );
546            }
547        };
548
549        match instance.def {
550            ty::InstanceKind::Intrinsic(def_id) => {
551                assert!(self.tcx.intrinsic(def_id).is_some());
552                // FIXME: Should `InPlace` arguments be reset to uninit?
553                if let Some(fallback) = M::call_intrinsic(
554                    self,
555                    instance,
556                    &self.copy_fn_args(args),
557                    destination,
558                    target,
559                    unwind,
560                )? {
561                    assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden);
562                    assert_matches!(fallback.def, ty::InstanceKind::Item(_));
563                    return self.init_fn_call(
564                        FnVal::Instance(fallback),
565                        (caller_abi, caller_fn_abi),
566                        args,
567                        with_caller_location,
568                        destination,
569                        target,
570                        unwind,
571                    );
572                } else {
573                    interp_ok(())
574                }
575            }
576            ty::InstanceKind::VTableShim(..)
577            | ty::InstanceKind::ReifyShim(..)
578            | ty::InstanceKind::ClosureOnceShim { .. }
579            | ty::InstanceKind::ConstructCoroutineInClosureShim { .. }
580            | ty::InstanceKind::FnPtrShim(..)
581            | ty::InstanceKind::DropGlue(..)
582            | ty::InstanceKind::CloneShim(..)
583            | ty::InstanceKind::FnPtrAddrShim(..)
584            | ty::InstanceKind::ThreadLocalShim(..)
585            | ty::InstanceKind::AsyncDropGlueCtorShim(..)
586            | ty::InstanceKind::AsyncDropGlue(..)
587            | ty::InstanceKind::FutureDropPollShim(..)
588            | ty::InstanceKind::Item(_) => {
589                // We need MIR for this fn.
590                // Note that this can be an intrinsic, if we are executing its fallback body.
591                let Some((body, instance)) = M::find_mir_or_eval_fn(
592                    self,
593                    instance,
594                    caller_fn_abi,
595                    args,
596                    destination,
597                    target,
598                    unwind,
599                )?
600                else {
601                    return interp_ok(());
602                };
603
604                // Special handling for the closure ABI: untuple the last argument.
605                let args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
606                    if caller_abi == ExternAbi::RustCall && !args.is_empty() {
607                        // Untuple
608                        let (untuple_arg, args) = args.split_last().unwrap();
609                        trace!("init_fn_call: Will pass last argument by untupling");
610                        Cow::from(
611                            args.iter()
612                                .map(|a| interp_ok(a.clone()))
613                                .chain((0..untuple_arg.layout().fields.count()).map(|i| {
614                                    self.fn_arg_field(untuple_arg, FieldIdx::from_usize(i))
615                                }))
616                                .collect::<InterpResult<'_, Vec<_>>>()?,
617                        )
618                    } else {
619                        // Plain arg passing
620                        Cow::from(args)
621                    };
622
623                self.init_stack_frame(
624                    instance,
625                    body,
626                    caller_fn_abi,
627                    &args,
628                    with_caller_location,
629                    destination,
630                    ReturnContinuation::Goto { ret: target, unwind },
631                )
632            }
633            // `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
634            // codegen'd / interpreted as virtual calls through the vtable.
635            ty::InstanceKind::Virtual(def_id, idx) => {
636                let mut args = args.to_vec();
637                // We have to implement all "dyn-compatible receivers". So we have to go search for a
638                // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
639                // unwrap those newtypes until we are there.
640                // An `InPlace` does nothing here, we keep the original receiver intact. We can't
641                // really pass the argument in-place anyway, and we are constructing a new
642                // `Immediate` receiver.
643                let mut receiver = self.copy_fn_arg(&args[0]);
644                let receiver_place = loop {
645                    match receiver.layout.ty.kind() {
646                        ty::Ref(..) | ty::RawPtr(..) => {
647                            // We do *not* use `deref_pointer` here: we don't want to conceptually
648                            // create a place that must be dereferenceable, since the receiver might
649                            // be a raw pointer and (for `*const dyn Trait`) we don't need to
650                            // actually access memory to resolve this method.
651                            // Also see <https://github.com/rust-lang/miri/issues/2786>.
652                            let val = self.read_immediate(&receiver)?;
653                            break self.ref_to_mplace(&val)?;
654                        }
655                        ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values
656                        _ => {
657                            // Not there yet, search for the only non-ZST field.
658                            // (The rules for `DispatchFromDyn` ensure there's exactly one such field.)
659                            let (idx, _) = receiver.layout.non_1zst_field(self).expect(
660                                "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
661                            );
662                            receiver = self.project_field(&receiver, idx)?;
663                        }
664                    }
665                };
666
667                // Obtain the underlying trait we are working on, and the adjusted receiver argument.
668                // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
669                // (For that reason we also cannot use `unpack_dyn_trait`.)
670                let receiver_tail =
671                    self.tcx.struct_tail_for_codegen(receiver_place.layout.ty, self.typing_env);
672                let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else {
673                    span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
674                };
675                assert!(receiver_place.layout.is_unsized());
676
677                // Get the required information from the vtable.
678                let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?;
679                let dyn_ty = self.get_ptr_vtable_ty(vptr, Some(receiver_trait))?;
680                let adjusted_recv = receiver_place.ptr();
681
682                // Now determine the actual method to call. Usually we use the easy way of just
683                // looking up the method at index `idx`.
684                let vtable_entries = self.vtable_entries(receiver_trait.principal(), dyn_ty);
685                let Some(ty::VtblEntry::Method(fn_inst)) = vtable_entries.get(idx).copied() else {
686                    // FIXME(fee1-dead) these could be variants of the UB info enum instead of this
687                    throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method);
688                };
689                trace!("Virtual call dispatches to {fn_inst:#?}");
690                // We can also do the lookup based on `def_id` and `dyn_ty`, and check that that
691                // produces the same result.
692                self.assert_virtual_instance_matches_concrete(dyn_ty, def_id, instance, fn_inst);
693
694                // Adjust receiver argument. Layout can be any (thin) ptr.
695                let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty);
696                args[0] = FnArg::Copy(
697                    ImmTy::from_immediate(
698                        Scalar::from_maybe_pointer(adjusted_recv, self).into(),
699                        self.layout_of(receiver_ty)?,
700                    )
701                    .into(),
702                );
703                trace!("Patched receiver operand to {:#?}", args[0]);
704                // Need to also adjust the type in the ABI. Strangely, the layout there is actually
705                // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr`
706                // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus
707                // type, so we just patch this up locally.
708                let mut caller_fn_abi = caller_fn_abi.clone();
709                caller_fn_abi.args[0].layout.ty = receiver_ty;
710
711                // recurse with concrete function
712                self.init_fn_call(
713                    FnVal::Instance(fn_inst),
714                    (caller_abi, &caller_fn_abi),
715                    &args,
716                    with_caller_location,
717                    destination,
718                    target,
719                    unwind,
720                )
721            }
722        }
723    }
724
725    fn assert_virtual_instance_matches_concrete(
726        &self,
727        dyn_ty: Ty<'tcx>,
728        def_id: DefId,
729        virtual_instance: ty::Instance<'tcx>,
730        concrete_instance: ty::Instance<'tcx>,
731    ) {
732        let tcx = *self.tcx;
733
734        let trait_def_id = tcx.trait_of_assoc(def_id).unwrap();
735        let virtual_trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, virtual_instance.args);
736        let existential_trait_ref = ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
737        let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
738
739        let concrete_method = Instance::expect_resolve_for_vtable(
740            tcx,
741            self.typing_env,
742            def_id,
743            virtual_instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args),
744            self.cur_span(),
745        );
746        assert_eq!(concrete_instance, concrete_method);
747    }
748
749    /// Initiate a tail call to this function -- popping the current stack frame, pushing the new
750    /// stack frame and initializing the arguments.
751    pub(super) fn init_fn_tail_call(
752        &mut self,
753        fn_val: FnVal<'tcx, M::ExtraFnVal>,
754        (caller_abi, caller_fn_abi): (ExternAbi, &FnAbi<'tcx, Ty<'tcx>>),
755        args: &[FnArg<'tcx, M::Provenance>],
756        with_caller_location: bool,
757    ) -> InterpResult<'tcx> {
758        trace!("init_fn_tail_call: {:#?}", fn_val);
759
760        // This is the "canonical" implementation of tails calls,
761        // a pop of the current stack frame, followed by a normal call
762        // which pushes a new stack frame, with the return address from
763        // the popped stack frame.
764        //
765        // Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`,
766        // as the latter "executes" the goto to the return block, but we don't want to,
767        // only the tail called function should return to the current return block.
768        let StackPopInfo { return_action, return_cont, return_place } =
769            self.pop_stack_frame_raw(false, |_this, _return_place| {
770                // This function's return value is just discarded, the tail-callee will fill in the return place instead.
771                interp_ok(())
772            })?;
773
774        assert_eq!(return_action, ReturnAction::Normal);
775
776        // Take the "stack pop cleanup" info, and use that to initiate the next call.
777        let ReturnContinuation::Goto { ret, unwind } = return_cont else {
778            bug!("can't tailcall as root");
779        };
780
781        // FIXME(explicit_tail_calls):
782        //   we should check if both caller&callee can/n't unwind,
783        //   see <https://github.com/rust-lang/rust/pull/113128#issuecomment-1614979803>
784
785        self.init_fn_call(
786            fn_val,
787            (caller_abi, caller_fn_abi),
788            args,
789            with_caller_location,
790            &return_place,
791            ret,
792            unwind,
793        )
794    }
795
796    pub(super) fn init_drop_in_place_call(
797        &mut self,
798        place: &PlaceTy<'tcx, M::Provenance>,
799        instance: ty::Instance<'tcx>,
800        target: mir::BasicBlock,
801        unwind: mir::UnwindAction,
802    ) -> InterpResult<'tcx> {
803        trace!("init_drop_in_place_call: {:?},\n  instance={:?}", place, instance);
804        // We take the address of the object. This may well be unaligned, which is fine
805        // for us here. However, unaligned accesses will probably make the actual drop
806        // implementation fail -- a problem shared by rustc.
807        let place = self.force_allocation(place)?;
808
809        // We behave a bit different from codegen here.
810        // Codegen creates an `InstanceKind::Virtual` with index 0 (the slot of the drop method) and
811        // then dispatches that to the normal call machinery. However, our call machinery currently
812        // only supports calling `VtblEntry::Method`; it would choke on a `MetadataDropInPlace`. So
813        // instead we do the virtual call stuff ourselves. It's easier here than in `eval_fn_call`
814        // since we can just get a place of the underlying type and use `mplace_to_ref`.
815        let place = match place.layout.ty.kind() {
816            ty::Dynamic(data, _, ty::Dyn) => {
817                // Dropping a trait object. Need to find actual drop fn.
818                self.unpack_dyn_trait(&place, data)?
819            }
820            _ => {
821                debug_assert_eq!(
822                    instance,
823                    ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
824                );
825                place
826            }
827        };
828        let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
829        let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
830
831        let arg = self.mplace_to_ref(&place)?;
832        let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
833
834        self.init_fn_call(
835            FnVal::Instance(instance),
836            (ExternAbi::Rust, fn_abi),
837            &[FnArg::Copy(arg.into())],
838            false,
839            &ret.into(),
840            Some(target),
841            unwind,
842        )
843    }
844
845    /// Pops the current frame from the stack, copies the return value to the caller, deallocates
846    /// the memory for allocated locals, and jumps to an appropriate place.
847    ///
848    /// If `unwinding` is `false`, then we are performing a normal return
849    /// from a function. In this case, we jump back into the frame of the caller,
850    /// and continue execution as normal.
851    ///
852    /// If `unwinding` is `true`, then we are in the middle of a panic,
853    /// and need to unwind this frame. In this case, we jump to the
854    /// `cleanup` block for the function, which is responsible for running
855    /// `Drop` impls for any locals that have been initialized at this point.
856    /// The cleanup block ends with a special `Resume` terminator, which will
857    /// cause us to continue unwinding.
858    #[instrument(skip(self), level = "trace")]
859    pub(super) fn return_from_current_stack_frame(
860        &mut self,
861        unwinding: bool,
862    ) -> InterpResult<'tcx> {
863        info!(
864            "popping stack frame ({})",
865            if unwinding { "during unwinding" } else { "returning from function" }
866        );
867
868        // Check `unwinding`.
869        assert_eq!(
870            unwinding,
871            match self.frame().loc {
872                Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
873                Right(_) => true,
874            }
875        );
876        if unwinding && self.frame_idx() == 0 {
877            throw_ub_custom!(fluent::const_eval_unwind_past_top);
878        }
879
880        // Get out the return value. Must happen *before* the frame is popped as we have to get the
881        // local's value out.
882        let return_op =
883            self.local_to_op(mir::RETURN_PLACE, None).expect("return place should always be live");
884        // Do the actual pop + copy.
885        let stack_pop_info = self.pop_stack_frame_raw(unwinding, |this, return_place| {
886            this.copy_op_allow_transmute(&return_op, return_place)?;
887            trace!("return value: {:?}", this.dump_place(return_place));
888            interp_ok(())
889        })?;
890
891        match stack_pop_info.return_action {
892            ReturnAction::Normal => {}
893            ReturnAction::NoJump => {
894                // The hook already did everything.
895                return interp_ok(());
896            }
897            ReturnAction::NoCleanup => {
898                // If we are not doing cleanup, also skip everything else.
899                assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
900                assert!(!unwinding, "tried to skip cleanup during unwinding");
901                // Don't jump anywhere.
902                return interp_ok(());
903            }
904        }
905
906        // Normal return, figure out where to jump.
907        if unwinding {
908            // Follow the unwind edge.
909            match stack_pop_info.return_cont {
910                ReturnContinuation::Goto { unwind, .. } => {
911                    // This must be the very last thing that happens, since it can in fact push a new stack frame.
912                    self.unwind_to_block(unwind)
913                }
914                ReturnContinuation::Stop { .. } => {
915                    panic!("encountered ReturnContinuation::Stop when unwinding!")
916                }
917            }
918        } else {
919            // Follow the normal return edge.
920            match stack_pop_info.return_cont {
921                ReturnContinuation::Goto { ret, .. } => self.return_to_block(ret),
922                ReturnContinuation::Stop { .. } => {
923                    assert!(
924                        self.stack().is_empty(),
925                        "only the bottommost frame can have ReturnContinuation::Stop"
926                    );
927                    interp_ok(())
928                }
929            }
930        }
931    }
932}