rustc_const_eval/interpret/call.rs
1//! Manages calling a concrete function (with known MIR body) with argument passing,
2//! and returning the return value to the caller.
3use std::assert_matches::assert_matches;
4use std::borrow::Cow;
5
6use either::{Left, Right};
7use rustc_abi::{self as abi, ExternAbi, FieldIdx, Integer, VariantIdx};
8use rustc_hir::def_id::DefId;
9use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
10use rustc_middle::ty::{self, AdtDef, Instance, Ty, VariantDef};
11use rustc_middle::{bug, mir, span_bug};
12use rustc_span::sym;
13use rustc_target::callconv::{ArgAbi, FnAbi, PassMode};
14use tracing::field::Empty;
15use tracing::{info, instrument, trace};
16
17use super::{
18 CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
19 Projectable, Provenance, ReturnAction, ReturnContinuation, Scalar, StackPopInfo, interp_ok,
20 throw_ub, throw_ub_custom, throw_unsup_format,
21};
22use crate::interpret::EnteredTraceSpan;
23use crate::{enter_trace_span, fluent_generated as fluent};
24
25/// An argument passed to a function.
26#[derive(Clone, Debug)]
27pub enum FnArg<'tcx, Prov: Provenance = CtfeProvenance> {
28 /// Pass a copy of the given operand.
29 Copy(OpTy<'tcx, Prov>),
30 /// Allow for the argument to be passed in-place: destroy the value originally stored at that
31 /// place and make the place inaccessible for the duration of the function call. This *must* be
32 /// an in-memory place so that we can do the proper alias checks.
33 InPlace(MPlaceTy<'tcx, Prov>),
34}
35
36impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
37 pub fn layout(&self) -> &TyAndLayout<'tcx> {
38 match self {
39 FnArg::Copy(op) => &op.layout,
40 FnArg::InPlace(mplace) => &mplace.layout,
41 }
42 }
43}
44
45impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
46 /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
47 /// original memory occurs.
48 pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> {
49 match arg {
50 FnArg::Copy(op) => op.clone(),
51 FnArg::InPlace(mplace) => mplace.clone().into(),
52 }
53 }
54
55 /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the
56 /// original memory occurs.
57 pub fn copy_fn_args(
58 &self,
59 args: &[FnArg<'tcx, M::Provenance>],
60 ) -> Vec<OpTy<'tcx, M::Provenance>> {
61 args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
62 }
63
64 /// Helper function for argument untupling.
65 pub(super) fn fn_arg_field(
66 &self,
67 arg: &FnArg<'tcx, M::Provenance>,
68 field: FieldIdx,
69 ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
70 interp_ok(match arg {
71 FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
72 FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?),
73 })
74 }
75
76 /// Find the wrapped inner type of a transparent wrapper.
77 /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
78 ///
79 /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields.
80 fn unfold_transparent(
81 &self,
82 layout: TyAndLayout<'tcx>,
83 may_unfold: impl Fn(AdtDef<'tcx>) -> bool,
84 ) -> TyAndLayout<'tcx> {
85 match layout.ty.kind() {
86 ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => {
87 assert!(!adt_def.is_enum());
88 // Find the non-1-ZST field, and recurse.
89 let (_, field) = layout.non_1zst_field(self).unwrap();
90 self.unfold_transparent(field, may_unfold)
91 }
92 // Not a transparent type, no further unfolding.
93 _ => layout,
94 }
95 }
96
97 /// Unwrap types that are guaranteed a null-pointer-optimization
98 fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
99 // Check if this is an option-like type wrapping some type.
100 let ty::Adt(def, args) = layout.ty.kind() else {
101 // Not an ADT, so definitely no NPO.
102 return interp_ok(layout);
103 };
104 if def.variants().len() != 2 {
105 // Not a 2-variant enum, so no NPO.
106 return interp_ok(layout);
107 }
108 assert!(def.is_enum());
109
110 let all_fields_1zst = |variant: &VariantDef| -> InterpResult<'tcx, _> {
111 for field in &variant.fields {
112 let ty = field.ty(*self.tcx, args);
113 let layout = self.layout_of(ty)?;
114 if !layout.is_1zst() {
115 return interp_ok(false);
116 }
117 }
118 interp_ok(true)
119 };
120
121 // If one variant consists entirely of 1-ZST, then the other variant
122 // is the only "relevant" one for this check.
123 let var0 = VariantIdx::from_u32(0);
124 let var1 = VariantIdx::from_u32(1);
125 let relevant_variant = if all_fields_1zst(def.variant(var0))? {
126 def.variant(var1)
127 } else if all_fields_1zst(def.variant(var1))? {
128 def.variant(var0)
129 } else {
130 // No varant is all-1-ZST, so no NPO.
131 return interp_ok(layout);
132 };
133 // The "relevant" variant must have exactly one field, and its type is the "inner" type.
134 if relevant_variant.fields.len() != 1 {
135 return interp_ok(layout);
136 }
137 let inner = relevant_variant.fields[FieldIdx::from_u32(0)].ty(*self.tcx, args);
138 let inner = self.layout_of(inner)?;
139
140 // Check if the inner type is one of the NPO-guaranteed ones.
141 // For that we first unpeel transparent *structs* (but not unions).
142 let is_npo = |def: AdtDef<'tcx>| {
143 self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
144 };
145 let inner = self.unfold_transparent(inner, /* may_unfold */ |def| {
146 // Stop at NPO types so that we don't miss that attribute in the check below!
147 def.is_struct() && !is_npo(def)
148 });
149 interp_ok(match inner.ty.kind() {
150 ty::Ref(..) | ty::FnPtr(..) => {
151 // Option<&T> behaves like &T, and same for fn()
152 inner
153 }
154 ty::Adt(def, _) if is_npo(*def) => {
155 // Once we found a `nonnull_optimization_guaranteed` type, further strip off
156 // newtype structs from it to find the underlying ABI type.
157 self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct())
158 }
159 _ => {
160 // Everything else we do not unfold.
161 layout
162 }
163 })
164 }
165
166 /// Check if these two layouts look like they are fn-ABI-compatible.
167 /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out
168 /// that only checking the `PassMode` is insufficient.)
169 fn layout_compat(
170 &self,
171 caller: TyAndLayout<'tcx>,
172 callee: TyAndLayout<'tcx>,
173 ) -> InterpResult<'tcx, bool> {
174 // Fast path: equal types are definitely compatible.
175 if caller.ty == callee.ty {
176 return interp_ok(true);
177 }
178 // 1-ZST are compatible with all 1-ZST (and with nothing else).
179 if caller.is_1zst() || callee.is_1zst() {
180 return interp_ok(caller.is_1zst() && callee.is_1zst());
181 }
182 // Unfold newtypes and NPO optimizations.
183 let unfold = |layout: TyAndLayout<'tcx>| {
184 self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true))
185 };
186 let caller = unfold(caller)?;
187 let callee = unfold(callee)?;
188 // Now see if these inner types are compatible.
189
190 // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)`
191 // things as compatible due to `DispatchFromDyn`. For instance, `Rc<i32>` and `*mut i32`
192 // must be compatible. So we just accept everything with Pointer ABI as compatible,
193 // even if this will accept some code that is not stably guaranteed to work.
194 // This also handles function pointers.
195 let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
196 abi::BackendRepr::Scalar(s) => match s.primitive() {
197 abi::Primitive::Pointer(addr_space) => Some(addr_space),
198 _ => None,
199 },
200 _ => None,
201 };
202 if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
203 return interp_ok(caller == callee);
204 }
205 // For wide pointers we have to get the pointee type.
206 let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
207 // We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
208 interp_ok(Some(match ty.kind() {
209 ty::Ref(_, ty, _) => *ty,
210 ty::RawPtr(ty, _) => *ty,
211 // We only accept `Box` with the default allocator.
212 _ if ty.is_box_global(*self.tcx) => ty.expect_boxed_ty(),
213 _ => return interp_ok(None),
214 }))
215 };
216 if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
217 // This is okay if they have the same metadata type.
218 let meta_ty = |ty: Ty<'tcx>| {
219 // Even if `ty` is normalized, the search for the unsized tail will project
220 // to fields, which can yield non-normalized types. So we need to provide a
221 // normalization function.
222 let normalize = |ty| self.tcx.normalize_erasing_regions(self.typing_env, ty);
223 ty.ptr_metadata_ty(*self.tcx, normalize)
224 };
225 return interp_ok(meta_ty(caller) == meta_ty(callee));
226 }
227
228 // Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
229 // `char` counts as `u32.`
230 let int_ty = |ty: Ty<'tcx>| {
231 Some(match ty.kind() {
232 ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true),
233 ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false),
234 ty::Char => (Integer::I32, /* signed */ false),
235 _ => return None,
236 })
237 };
238 if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
239 // This is okay if they are the same integer type.
240 return interp_ok(caller == callee);
241 }
242
243 // Fall back to exact equality.
244 interp_ok(caller == callee)
245 }
246
247 /// Returns a `bool` saying whether the two arguments are ABI-compatible.
248 pub fn check_argument_compat(
249 &self,
250 caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
251 callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
252 ) -> InterpResult<'tcx, bool> {
253 // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target,
254 // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility.
255 if self.layout_compat(caller_abi.layout, callee_abi.layout)? {
256 // Ensure that our checks imply actual ABI compatibility for this concrete call.
257 // (This can fail e.g. if `#[rustc_nonnull_optimization_guaranteed]` is used incorrectly.)
258 assert!(caller_abi.eq_abi(callee_abi));
259 interp_ok(true)
260 } else {
261 trace!(
262 "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
263 caller_abi, callee_abi
264 );
265 interp_ok(false)
266 }
267 }
268
269 /// Initialize a single callee argument, checking the types for compatibility.
270 fn pass_argument<'x, 'y>(
271 &mut self,
272 caller_args: &mut impl Iterator<
273 Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
274 >,
275 callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
276 callee_arg_idx: usize,
277 callee_arg: &mir::Place<'tcx>,
278 callee_ty: Ty<'tcx>,
279 already_live: bool,
280 ) -> InterpResult<'tcx>
281 where
282 'tcx: 'x,
283 'tcx: 'y,
284 {
285 assert_eq!(callee_ty, callee_abi.layout.ty);
286 if matches!(callee_abi.mode, PassMode::Ignore) {
287 // This one is skipped. Still must be made live though!
288 if !already_live {
289 self.storage_live(callee_arg.as_local().unwrap())?;
290 }
291 return interp_ok(());
292 }
293 // Find next caller arg.
294 let Some((caller_arg, caller_abi)) = caller_args.next() else {
295 throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
296 };
297 assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout);
298 // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are
299 // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the
300 // right type to print to the user.
301
302 // Check compatibility
303 if !self.check_argument_compat(caller_abi, callee_abi)? {
304 throw_ub!(AbiMismatchArgument {
305 arg_idx: callee_arg_idx,
306 caller_ty: caller_abi.layout.ty,
307 callee_ty: callee_abi.layout.ty
308 });
309 }
310 // We work with a copy of the argument for now; if this is in-place argument passing, we
311 // will later protect the source it comes from. This means the callee cannot observe if we
312 // did in-place of by-copy argument passing, except for pointer equality tests.
313 let caller_arg_copy = self.copy_fn_arg(caller_arg);
314 if !already_live {
315 let local = callee_arg.as_local().unwrap();
316 let meta = caller_arg_copy.meta();
317 // `check_argument_compat` ensures that if metadata is needed, both have the same type,
318 // so we know they will use the metadata the same way.
319 assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty);
320
321 self.storage_live_dyn(local, meta)?;
322 }
323 // Now we can finally actually evaluate the callee place.
324 let callee_arg = self.eval_place(*callee_arg)?;
325 // We allow some transmutes here.
326 // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
327 // is true for all `copy_op`, but there are a lot of special cases for argument passing
328 // specifically.)
329 self.copy_op_allow_transmute(&caller_arg_copy, &callee_arg)?;
330 // If this was an in-place pass, protect the place it comes from for the duration of the call.
331 if let FnArg::InPlace(mplace) = caller_arg {
332 M::protect_in_place_function_argument(self, mplace)?;
333 }
334 interp_ok(())
335 }
336
337 /// The main entry point for creating a new stack frame: performs ABI checks and initializes
338 /// arguments.
339 #[instrument(skip(self), level = "trace")]
340 pub fn init_stack_frame(
341 &mut self,
342 instance: Instance<'tcx>,
343 body: &'tcx mir::Body<'tcx>,
344 caller_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
345 args: &[FnArg<'tcx, M::Provenance>],
346 with_caller_location: bool,
347 destination: &PlaceTy<'tcx, M::Provenance>,
348 mut cont: ReturnContinuation,
349 ) -> InterpResult<'tcx> {
350 let _trace = enter_trace_span!(M, step::init_stack_frame, %instance, tracing_separate_thread = Empty);
351
352 // Compute callee information.
353 // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
354 let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
355
356 if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
357 throw_unsup_format!("calling a c-variadic function is not supported");
358 }
359
360 if caller_fn_abi.conv != callee_fn_abi.conv {
361 throw_ub_custom!(
362 fluent::const_eval_incompatible_calling_conventions,
363 callee_conv = format!("{}", callee_fn_abi.conv),
364 caller_conv = format!("{}", caller_fn_abi.conv),
365 )
366 }
367
368 // Check that all target features required by the callee (i.e., from
369 // the attribute `#[target_feature(enable = ...)]`) are enabled at
370 // compile time.
371 M::check_fn_target_features(self, instance)?;
372
373 if !callee_fn_abi.can_unwind {
374 // The callee cannot unwind, so force the `Unreachable` unwind handling.
375 match &mut cont {
376 ReturnContinuation::Stop { .. } => {}
377 ReturnContinuation::Goto { unwind, .. } => {
378 *unwind = mir::UnwindAction::Unreachable;
379 }
380 }
381 }
382
383 // *Before* pushing the new frame, determine whether the return destination is in memory.
384 // Need to use `place_to_op` to be *sure* we get the mplace if there is one.
385 let destination_mplace = self.place_to_op(destination)?.as_mplace_or_imm().left();
386
387 // Push the "raw" frame -- this leaves locals uninitialized.
388 self.push_stack_frame_raw(instance, body, destination, cont)?;
389
390 // If an error is raised here, pop the frame again to get an accurate backtrace.
391 // To this end, we wrap it all in a `try` block.
392 let res: InterpResult<'tcx> = try {
393 trace!(
394 "caller ABI: {:#?}, args: {:#?}",
395 caller_fn_abi,
396 args.iter()
397 .map(|arg| (
398 arg.layout().ty,
399 match arg {
400 FnArg::Copy(op) => format!("copy({op:?})"),
401 FnArg::InPlace(mplace) => format!("in-place({mplace:?})"),
402 }
403 ))
404 .collect::<Vec<_>>()
405 );
406 trace!(
407 "spread_arg: {:?}, locals: {:#?}",
408 body.spread_arg,
409 body.args_iter()
410 .map(|local| (
411 local,
412 self.layout_of_local(self.frame(), local, None).unwrap().ty,
413 ))
414 .collect::<Vec<_>>()
415 );
416
417 // In principle, we have two iterators: Where the arguments come from, and where
418 // they go to.
419
420 // The "where they come from" part is easy, we expect the caller to do any special handling
421 // that might be required here (e.g. for untupling).
422 // If `with_caller_location` is set we pretend there is an extra argument (that
423 // we will not pass; our `caller_location` intrinsic implementation walks the stack instead).
424 assert_eq!(
425 args.len() + if with_caller_location { 1 } else { 0 },
426 caller_fn_abi.args.len(),
427 "mismatch between caller ABI and caller arguments",
428 );
429 let mut caller_args = args
430 .iter()
431 .zip(caller_fn_abi.args.iter())
432 .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
433
434 // Now we have to spread them out across the callee's locals,
435 // taking into account the `spread_arg`. If we could write
436 // this is a single iterator (that handles `spread_arg`), then
437 // `pass_argument` would be the loop body. It takes care to
438 // not advance `caller_iter` for ignored arguments.
439 let mut callee_args_abis = callee_fn_abi.args.iter().enumerate();
440 for local in body.args_iter() {
441 // Construct the destination place for this argument. At this point all
442 // locals are still dead, so we cannot construct a `PlaceTy`.
443 let dest = mir::Place::from(local);
444 // `layout_of_local` does more than just the instantiation we need to get the
445 // type, but the result gets cached so this avoids calling the instantiation
446 // query *again* the next time this local is accessed.
447 let ty = self.layout_of_local(self.frame(), local, None)?.ty;
448 if Some(local) == body.spread_arg {
449 // Make the local live once, then fill in the value field by field.
450 self.storage_live(local)?;
451 // Must be a tuple
452 let ty::Tuple(fields) = ty.kind() else {
453 span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
454 };
455 for (i, field_ty) in fields.iter().enumerate() {
456 let dest = dest.project_deeper(
457 &[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)],
458 *self.tcx,
459 );
460 let (idx, callee_abi) = callee_args_abis.next().unwrap();
461 self.pass_argument(
462 &mut caller_args,
463 callee_abi,
464 idx,
465 &dest,
466 field_ty,
467 /* already_live */ true,
468 )?;
469 }
470 } else {
471 // Normal argument. Cannot mark it as live yet, it might be unsized!
472 let (idx, callee_abi) = callee_args_abis.next().unwrap();
473 self.pass_argument(
474 &mut caller_args,
475 callee_abi,
476 idx,
477 &dest,
478 ty,
479 /* already_live */ false,
480 )?;
481 }
482 }
483 // If the callee needs a caller location, pretend we consume one more argument from the ABI.
484 if instance.def.requires_caller_location(*self.tcx) {
485 callee_args_abis.next().unwrap();
486 }
487 // Now we should have no more caller args or callee arg ABIs
488 assert!(
489 callee_args_abis.next().is_none(),
490 "mismatch between callee ABI and callee body arguments"
491 );
492 if caller_args.next().is_some() {
493 throw_ub_custom!(fluent::const_eval_too_many_caller_args);
494 }
495 // Don't forget to check the return type!
496 if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
497 throw_ub!(AbiMismatchReturn {
498 caller_ty: caller_fn_abi.ret.layout.ty,
499 callee_ty: callee_fn_abi.ret.layout.ty
500 });
501 }
502
503 // Protect return place for in-place return value passing.
504 // We only need to protect anything if this is actually an in-memory place.
505 if let Some(mplace) = destination_mplace {
506 M::protect_in_place_function_argument(self, &mplace)?;
507 }
508
509 // Don't forget to mark "initially live" locals as live.
510 self.storage_live_for_always_live_locals()?;
511 };
512 res.inspect_err_kind(|_| {
513 // Don't show the incomplete stack frame in the error stacktrace.
514 self.stack_mut().pop();
515 })
516 }
517
518 /// Initiate a call to this function -- pushing the stack frame and initializing the arguments.
519 ///
520 /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
521 /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
522 ///
523 /// `with_caller_location` indicates whether the caller passed a caller location. Miri
524 /// implements caller locations without argument passing, but to match `FnAbi` we need to know
525 /// when those arguments are present.
526 pub(super) fn init_fn_call(
527 &mut self,
528 fn_val: FnVal<'tcx, M::ExtraFnVal>,
529 (caller_abi, caller_fn_abi): (ExternAbi, &FnAbi<'tcx, Ty<'tcx>>),
530 args: &[FnArg<'tcx, M::Provenance>],
531 with_caller_location: bool,
532 destination: &PlaceTy<'tcx, M::Provenance>,
533 target: Option<mir::BasicBlock>,
534 unwind: mir::UnwindAction,
535 ) -> InterpResult<'tcx> {
536 let _trace =
537 enter_trace_span!(M, step::init_fn_call, tracing_separate_thread = Empty, ?fn_val)
538 .or_if_tracing_disabled(|| trace!("init_fn_call: {:#?}", fn_val));
539
540 let instance = match fn_val {
541 FnVal::Instance(instance) => instance,
542 FnVal::Other(extra) => {
543 return M::call_extra_fn(
544 self,
545 extra,
546 caller_fn_abi,
547 args,
548 destination,
549 target,
550 unwind,
551 );
552 }
553 };
554
555 match instance.def {
556 ty::InstanceKind::Intrinsic(def_id) => {
557 assert!(self.tcx.intrinsic(def_id).is_some());
558 // FIXME: Should `InPlace` arguments be reset to uninit?
559 if let Some(fallback) = M::call_intrinsic(
560 self,
561 instance,
562 &self.copy_fn_args(args),
563 destination,
564 target,
565 unwind,
566 )? {
567 assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden);
568 assert_matches!(fallback.def, ty::InstanceKind::Item(_));
569 return self.init_fn_call(
570 FnVal::Instance(fallback),
571 (caller_abi, caller_fn_abi),
572 args,
573 with_caller_location,
574 destination,
575 target,
576 unwind,
577 );
578 } else {
579 interp_ok(())
580 }
581 }
582 ty::InstanceKind::VTableShim(..)
583 | ty::InstanceKind::ReifyShim(..)
584 | ty::InstanceKind::ClosureOnceShim { .. }
585 | ty::InstanceKind::ConstructCoroutineInClosureShim { .. }
586 | ty::InstanceKind::FnPtrShim(..)
587 | ty::InstanceKind::DropGlue(..)
588 | ty::InstanceKind::CloneShim(..)
589 | ty::InstanceKind::FnPtrAddrShim(..)
590 | ty::InstanceKind::ThreadLocalShim(..)
591 | ty::InstanceKind::AsyncDropGlueCtorShim(..)
592 | ty::InstanceKind::AsyncDropGlue(..)
593 | ty::InstanceKind::FutureDropPollShim(..)
594 | ty::InstanceKind::Item(_) => {
595 // We need MIR for this fn.
596 // Note that this can be an intrinsic, if we are executing its fallback body.
597 let Some((body, instance)) = M::find_mir_or_eval_fn(
598 self,
599 instance,
600 caller_fn_abi,
601 args,
602 destination,
603 target,
604 unwind,
605 )?
606 else {
607 return interp_ok(());
608 };
609
610 // Special handling for the closure ABI: untuple the last argument.
611 let args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
612 if caller_abi == ExternAbi::RustCall && !args.is_empty() {
613 // Untuple
614 let (untuple_arg, args) = args.split_last().unwrap();
615 trace!("init_fn_call: Will pass last argument by untupling");
616 Cow::from(
617 args.iter()
618 .map(|a| interp_ok(a.clone()))
619 .chain((0..untuple_arg.layout().fields.count()).map(|i| {
620 self.fn_arg_field(untuple_arg, FieldIdx::from_usize(i))
621 }))
622 .collect::<InterpResult<'_, Vec<_>>>()?,
623 )
624 } else {
625 // Plain arg passing
626 Cow::from(args)
627 };
628
629 self.init_stack_frame(
630 instance,
631 body,
632 caller_fn_abi,
633 &args,
634 with_caller_location,
635 destination,
636 ReturnContinuation::Goto { ret: target, unwind },
637 )
638 }
639 // `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
640 // codegen'd / interpreted as virtual calls through the vtable.
641 ty::InstanceKind::Virtual(def_id, idx) => {
642 let mut args = args.to_vec();
643 // We have to implement all "dyn-compatible receivers". So we have to go search for a
644 // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
645 // unwrap those newtypes until we are there.
646 // An `InPlace` does nothing here, we keep the original receiver intact. We can't
647 // really pass the argument in-place anyway, and we are constructing a new
648 // `Immediate` receiver.
649 let mut receiver = self.copy_fn_arg(&args[0]);
650 let receiver_place = loop {
651 match receiver.layout.ty.kind() {
652 ty::Ref(..) | ty::RawPtr(..) => {
653 // We do *not* use `deref_pointer` here: we don't want to conceptually
654 // create a place that must be dereferenceable, since the receiver might
655 // be a raw pointer and (for `*const dyn Trait`) we don't need to
656 // actually access memory to resolve this method.
657 // Also see <https://github.com/rust-lang/miri/issues/2786>.
658 let val = self.read_immediate(&receiver)?;
659 break self.ref_to_mplace(&val)?;
660 }
661 ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values
662 _ => {
663 // Not there yet, search for the only non-ZST field.
664 // (The rules for `DispatchFromDyn` ensure there's exactly one such field.)
665 let (idx, _) = receiver.layout.non_1zst_field(self).expect(
666 "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
667 );
668 receiver = self.project_field(&receiver, idx)?;
669 }
670 }
671 };
672
673 // Obtain the underlying trait we are working on, and the adjusted receiver argument.
674 // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
675 // (For that reason we also cannot use `unpack_dyn_trait`.)
676 let receiver_tail =
677 self.tcx.struct_tail_for_codegen(receiver_place.layout.ty, self.typing_env);
678 let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else {
679 span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
680 };
681 assert!(receiver_place.layout.is_unsized());
682
683 // Get the required information from the vtable.
684 let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?;
685 let dyn_ty = self.get_ptr_vtable_ty(vptr, Some(receiver_trait))?;
686 let adjusted_recv = receiver_place.ptr();
687
688 // Now determine the actual method to call. Usually we use the easy way of just
689 // looking up the method at index `idx`.
690 let vtable_entries = self.vtable_entries(receiver_trait.principal(), dyn_ty);
691 let Some(ty::VtblEntry::Method(fn_inst)) = vtable_entries.get(idx).copied() else {
692 // FIXME(fee1-dead) these could be variants of the UB info enum instead of this
693 throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method);
694 };
695 trace!("Virtual call dispatches to {fn_inst:#?}");
696 // We can also do the lookup based on `def_id` and `dyn_ty`, and check that that
697 // produces the same result.
698 self.assert_virtual_instance_matches_concrete(dyn_ty, def_id, instance, fn_inst);
699
700 // Adjust receiver argument. Layout can be any (thin) ptr.
701 let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty);
702 args[0] = FnArg::Copy(
703 ImmTy::from_immediate(
704 Scalar::from_maybe_pointer(adjusted_recv, self).into(),
705 self.layout_of(receiver_ty)?,
706 )
707 .into(),
708 );
709 trace!("Patched receiver operand to {:#?}", args[0]);
710 // Need to also adjust the type in the ABI. Strangely, the layout there is actually
711 // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr`
712 // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus
713 // type, so we just patch this up locally.
714 let mut caller_fn_abi = caller_fn_abi.clone();
715 caller_fn_abi.args[0].layout.ty = receiver_ty;
716
717 // recurse with concrete function
718 self.init_fn_call(
719 FnVal::Instance(fn_inst),
720 (caller_abi, &caller_fn_abi),
721 &args,
722 with_caller_location,
723 destination,
724 target,
725 unwind,
726 )
727 }
728 }
729 }
730
731 fn assert_virtual_instance_matches_concrete(
732 &self,
733 dyn_ty: Ty<'tcx>,
734 def_id: DefId,
735 virtual_instance: ty::Instance<'tcx>,
736 concrete_instance: ty::Instance<'tcx>,
737 ) {
738 let tcx = *self.tcx;
739
740 let trait_def_id = tcx.parent(def_id);
741 let virtual_trait_ref = ty::TraitRef::from_assoc(tcx, trait_def_id, virtual_instance.args);
742 let existential_trait_ref = ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
743 let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
744
745 let concrete_method = {
746 let _trace = enter_trace_span!(M, resolve::expect_resolve_for_vtable, ?def_id);
747 Instance::expect_resolve_for_vtable(
748 tcx,
749 self.typing_env,
750 def_id,
751 virtual_instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args),
752 self.cur_span(),
753 )
754 };
755 assert_eq!(concrete_instance, concrete_method);
756 }
757
758 /// Initiate a tail call to this function -- popping the current stack frame, pushing the new
759 /// stack frame and initializing the arguments.
760 pub(super) fn init_fn_tail_call(
761 &mut self,
762 fn_val: FnVal<'tcx, M::ExtraFnVal>,
763 (caller_abi, caller_fn_abi): (ExternAbi, &FnAbi<'tcx, Ty<'tcx>>),
764 args: &[FnArg<'tcx, M::Provenance>],
765 with_caller_location: bool,
766 ) -> InterpResult<'tcx> {
767 trace!("init_fn_tail_call: {:#?}", fn_val);
768
769 // This is the "canonical" implementation of tails calls,
770 // a pop of the current stack frame, followed by a normal call
771 // which pushes a new stack frame, with the return address from
772 // the popped stack frame.
773 //
774 // Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`,
775 // as the latter "executes" the goto to the return block, but we don't want to,
776 // only the tail called function should return to the current return block.
777 let StackPopInfo { return_action, return_cont, return_place } =
778 self.pop_stack_frame_raw(false, |_this, _return_place| {
779 // This function's return value is just discarded, the tail-callee will fill in the return place instead.
780 interp_ok(())
781 })?;
782
783 assert_eq!(return_action, ReturnAction::Normal);
784
785 // Take the "stack pop cleanup" info, and use that to initiate the next call.
786 let ReturnContinuation::Goto { ret, unwind } = return_cont else {
787 bug!("can't tailcall as root");
788 };
789
790 // FIXME(explicit_tail_calls):
791 // we should check if both caller&callee can/n't unwind,
792 // see <https://github.com/rust-lang/rust/pull/113128#issuecomment-1614979803>
793
794 self.init_fn_call(
795 fn_val,
796 (caller_abi, caller_fn_abi),
797 args,
798 with_caller_location,
799 &return_place,
800 ret,
801 unwind,
802 )
803 }
804
805 pub(super) fn init_drop_in_place_call(
806 &mut self,
807 place: &PlaceTy<'tcx, M::Provenance>,
808 instance: ty::Instance<'tcx>,
809 target: mir::BasicBlock,
810 unwind: mir::UnwindAction,
811 ) -> InterpResult<'tcx> {
812 trace!("init_drop_in_place_call: {:?},\n instance={:?}", place, instance);
813 // We take the address of the object. This may well be unaligned, which is fine
814 // for us here. However, unaligned accesses will probably make the actual drop
815 // implementation fail -- a problem shared by rustc.
816 let place = self.force_allocation(place)?;
817
818 // We behave a bit different from codegen here.
819 // Codegen creates an `InstanceKind::Virtual` with index 0 (the slot of the drop method) and
820 // then dispatches that to the normal call machinery. However, our call machinery currently
821 // only supports calling `VtblEntry::Method`; it would choke on a `MetadataDropInPlace`. So
822 // instead we do the virtual call stuff ourselves. It's easier here than in `eval_fn_call`
823 // since we can just get a place of the underlying type and use `mplace_to_ref`.
824 let place = match place.layout.ty.kind() {
825 ty::Dynamic(data, _, ty::Dyn) => {
826 // Dropping a trait object. Need to find actual drop fn.
827 self.unpack_dyn_trait(&place, data)?
828 }
829 _ => {
830 debug_assert_eq!(
831 instance,
832 ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
833 );
834 place
835 }
836 };
837 let instance = {
838 let _trace =
839 enter_trace_span!(M, resolve::resolve_drop_in_place, ty = ?place.layout.ty);
840 ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
841 };
842 let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
843
844 let arg = self.mplace_to_ref(&place)?;
845 let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
846
847 self.init_fn_call(
848 FnVal::Instance(instance),
849 (ExternAbi::Rust, fn_abi),
850 &[FnArg::Copy(arg.into())],
851 false,
852 &ret.into(),
853 Some(target),
854 unwind,
855 )
856 }
857
858 /// Pops the current frame from the stack, copies the return value to the caller, deallocates
859 /// the memory for allocated locals, and jumps to an appropriate place.
860 ///
861 /// If `unwinding` is `false`, then we are performing a normal return
862 /// from a function. In this case, we jump back into the frame of the caller,
863 /// and continue execution as normal.
864 ///
865 /// If `unwinding` is `true`, then we are in the middle of a panic,
866 /// and need to unwind this frame. In this case, we jump to the
867 /// `cleanup` block for the function, which is responsible for running
868 /// `Drop` impls for any locals that have been initialized at this point.
869 /// The cleanup block ends with a special `Resume` terminator, which will
870 /// cause us to continue unwinding.
871 #[instrument(skip(self), level = "trace")]
872 pub(super) fn return_from_current_stack_frame(
873 &mut self,
874 unwinding: bool,
875 ) -> InterpResult<'tcx> {
876 info!(
877 "popping stack frame ({})",
878 if unwinding { "during unwinding" } else { "returning from function" }
879 );
880
881 // Check `unwinding`.
882 assert_eq!(
883 unwinding,
884 match self.frame().loc {
885 Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
886 Right(_) => true,
887 }
888 );
889 if unwinding && self.frame_idx() == 0 {
890 throw_ub_custom!(fluent::const_eval_unwind_past_top);
891 }
892
893 // Get out the return value. Must happen *before* the frame is popped as we have to get the
894 // local's value out.
895 let return_op =
896 self.local_to_op(mir::RETURN_PLACE, None).expect("return place should always be live");
897 // Do the actual pop + copy.
898 let stack_pop_info = self.pop_stack_frame_raw(unwinding, |this, return_place| {
899 this.copy_op_allow_transmute(&return_op, return_place)?;
900 trace!("return value: {:?}", this.dump_place(return_place));
901 interp_ok(())
902 })?;
903
904 match stack_pop_info.return_action {
905 ReturnAction::Normal => {}
906 ReturnAction::NoJump => {
907 // The hook already did everything.
908 return interp_ok(());
909 }
910 ReturnAction::NoCleanup => {
911 // If we are not doing cleanup, also skip everything else.
912 assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
913 assert!(!unwinding, "tried to skip cleanup during unwinding");
914 // Don't jump anywhere.
915 return interp_ok(());
916 }
917 }
918
919 // Normal return, figure out where to jump.
920 if unwinding {
921 // Follow the unwind edge.
922 match stack_pop_info.return_cont {
923 ReturnContinuation::Goto { unwind, .. } => {
924 // This must be the very last thing that happens, since it can in fact push a new stack frame.
925 self.unwind_to_block(unwind)
926 }
927 ReturnContinuation::Stop { .. } => {
928 panic!("encountered ReturnContinuation::Stop when unwinding!")
929 }
930 }
931 } else {
932 // Follow the normal return edge.
933 match stack_pop_info.return_cont {
934 ReturnContinuation::Goto { ret, .. } => self.return_to_block(ret),
935 ReturnContinuation::Stop { .. } => {
936 assert!(
937 self.stack().is_empty(),
938 "only the bottommost frame can have ReturnContinuation::Stop"
939 );
940 interp_ok(())
941 }
942 }
943 }
944 }
945}