rustc_const_eval/interpret/
step.rs

1//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
2//!
3//! The main entry point is the `step` method.
4
5use either::Either;
6use rustc_abi::{FIRST_VARIANT, FieldIdx};
7use rustc_index::IndexSlice;
8use rustc_middle::ty::{self, Instance, Ty};
9use rustc_middle::{bug, mir, span_bug};
10use rustc_span::source_map::Spanned;
11use rustc_target::callconv::FnAbi;
12use tracing::field::Empty;
13use tracing::{info, instrument, trace};
14
15use super::{
16    FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy,
17    Projectable, Scalar, interp_ok, throw_ub, throw_unsup_format,
18};
19use crate::interpret::EnteredTraceSpan;
20use crate::{enter_trace_span, util};
21
22struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> {
23    callee: FnVal<'tcx, M::ExtraFnVal>,
24    args: Vec<FnArg<'tcx, M::Provenance>>,
25    fn_sig: ty::FnSig<'tcx>,
26    fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
27    /// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`])
28    with_caller_location: bool,
29}
30
31impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
32    /// Returns `true` as long as there are more things to do.
33    ///
34    /// This is used by [priroda](https://github.com/oli-obk/priroda)
35    ///
36    /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
37    #[inline(always)]
38    pub fn step(&mut self) -> InterpResult<'tcx, bool> {
39        if self.stack().is_empty() {
40            return interp_ok(false);
41        }
42
43        let Either::Left(loc) = self.frame().loc else {
44            // We are unwinding and this fn has no cleanup code.
45            // Just go on unwinding.
46            trace!("unwinding: skipping frame");
47            self.return_from_current_stack_frame(/* unwinding */ true)?;
48            return interp_ok(true);
49        };
50        let basic_block = &self.body().basic_blocks[loc.block];
51
52        if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
53            let old_frames = self.frame_idx();
54            self.eval_statement(stmt)?;
55            // Make sure we are not updating `statement_index` of the wrong frame.
56            assert_eq!(old_frames, self.frame_idx());
57            // Advance the program counter.
58            self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
59            return interp_ok(true);
60        }
61
62        M::before_terminator(self)?;
63
64        let terminator = basic_block.terminator();
65        self.eval_terminator(terminator)?;
66        if !self.stack().is_empty() {
67            if let Either::Left(loc) = self.frame().loc {
68                info!("// executing {:?}", loc.block);
69            }
70        }
71        interp_ok(true)
72    }
73
74    /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
75    /// statement counter.
76    ///
77    /// This does NOT move the statement counter forward, the caller has to do that!
78    pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
79        let _span = enter_trace_span!(
80            M,
81            step::eval_statement,
82            stmt = ?stmt.kind,
83            span = ?stmt.source_info.span,
84            tracing_separate_thread = Empty,
85        )
86        .or_if_tracing_disabled(|| info!(stmt = ?stmt.kind));
87
88        use rustc_middle::mir::StatementKind::*;
89
90        match &stmt.kind {
91            Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
92
93            SetDiscriminant { place, variant_index } => {
94                let dest = self.eval_place(**place)?;
95                self.write_discriminant(*variant_index, &dest)?;
96            }
97
98            Deinit(place) => {
99                let dest = self.eval_place(**place)?;
100                self.write_uninit(&dest)?;
101            }
102
103            // Mark locals as alive
104            StorageLive(local) => {
105                self.storage_live(*local)?;
106            }
107
108            // Mark locals as dead
109            StorageDead(local) => {
110                self.storage_dead(*local)?;
111            }
112
113            // No dynamic semantics attached to `FakeRead`; MIR
114            // interpreter is solely intended for borrowck'ed code.
115            FakeRead(..) => {}
116
117            // Stacked Borrows.
118            Retag(kind, place) => {
119                let dest = self.eval_place(**place)?;
120                M::retag_place_contents(self, *kind, &dest)?;
121            }
122
123            Intrinsic(box intrinsic) => self.eval_nondiverging_intrinsic(intrinsic)?,
124
125            // Evaluate the place expression, without reading from it.
126            PlaceMention(box place) => {
127                let _ = self.eval_place(*place)?;
128            }
129
130            // This exists purely to guide borrowck lifetime inference, and does not have
131            // an operational effect.
132            AscribeUserType(..) => {}
133
134            // Currently, Miri discards Coverage statements. Coverage statements are only injected
135            // via an optional compile time MIR pass and have no side effects. Since Coverage
136            // statements don't exist at the source level, it is safe for Miri to ignore them, even
137            // for undefined behavior (UB) checks.
138            //
139            // A coverage counter inside a const expression (for example, a counter injected in a
140            // const function) is discarded when the const is evaluated at compile time. Whether
141            // this should change, and/or how to implement a const eval counter, is a subject of the
142            // following issue:
143            //
144            // FIXME(#73156): Handle source code coverage in const eval
145            Coverage(..) => {}
146
147            ConstEvalCounter => {
148                M::increment_const_eval_counter(self)?;
149            }
150
151            // Defined to do nothing. These are added by optimization passes, to avoid changing the
152            // size of MIR constantly.
153            Nop => {}
154
155            // Only used for temporary lifetime lints
156            BackwardIncompatibleDropHint { .. } => {}
157        }
158
159        interp_ok(())
160    }
161
162    /// Evaluate an assignment statement.
163    ///
164    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
165    /// type writes its results directly into the memory specified by the place.
166    pub fn eval_rvalue_into_place(
167        &mut self,
168        rvalue: &mir::Rvalue<'tcx>,
169        place: mir::Place<'tcx>,
170    ) -> InterpResult<'tcx> {
171        let dest = self.eval_place(place)?;
172        // FIXME: ensure some kind of non-aliasing between LHS and RHS?
173        // Also see https://github.com/rust-lang/rust/issues/68364.
174
175        use rustc_middle::mir::Rvalue::*;
176        match *rvalue {
177            ThreadLocalRef(did) => {
178                let ptr = M::thread_local_static_pointer(self, did)?;
179                self.write_pointer(ptr, &dest)?;
180            }
181
182            Use(ref operand) => {
183                // Avoid recomputing the layout
184                let op = self.eval_operand(operand, Some(dest.layout))?;
185                self.copy_op(&op, &dest)?;
186            }
187
188            CopyForDeref(place) => {
189                let op = self.eval_place_to_op(place, Some(dest.layout))?;
190                self.copy_op(&op, &dest)?;
191            }
192
193            BinaryOp(bin_op, box (ref left, ref right)) => {
194                let layout = util::binop_left_homogeneous(bin_op).then_some(dest.layout);
195                let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
196                let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
197                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
198                let result = self.binary_op(bin_op, &left, &right)?;
199                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {bin_op:?}");
200                self.write_immediate(*result, &dest)?;
201            }
202
203            UnaryOp(un_op, ref operand) => {
204                // The operand always has the same type as the result.
205                let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
206                let result = self.unary_op(un_op, &val)?;
207                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {un_op:?}");
208                self.write_immediate(*result, &dest)?;
209            }
210
211            NullaryOp(null_op, ty) => {
212                let ty = self.instantiate_from_current_frame_and_normalize_erasing_regions(ty)?;
213                let val = self.nullary_op(null_op, ty)?;
214                self.write_immediate(*val, &dest)?;
215            }
216
217            Aggregate(box ref kind, ref operands) => {
218                self.write_aggregate(kind, operands, &dest)?;
219            }
220
221            Repeat(ref operand, _) => {
222                self.write_repeat(operand, &dest)?;
223            }
224
225            Len(place) => {
226                let src = self.eval_place(place)?;
227                let len = src.len(self)?;
228                self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
229            }
230
231            Ref(_, borrow_kind, place) => {
232                let src = self.eval_place(place)?;
233                let place = self.force_allocation(&src)?;
234                let val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
235                // A fresh reference was created, make sure it gets retagged.
236                let val = M::retag_ptr_value(
237                    self,
238                    if borrow_kind.allows_two_phase_borrow() {
239                        mir::RetagKind::TwoPhase
240                    } else {
241                        mir::RetagKind::Default
242                    },
243                    &val,
244                )?;
245                self.write_immediate(*val, &dest)?;
246            }
247
248            RawPtr(kind, place) => {
249                // Figure out whether this is an addr_of of an already raw place.
250                let place_base_raw = if place.is_indirect_first_projection() {
251                    let ty = self.frame().body.local_decls[place.local].ty;
252                    ty.is_raw_ptr()
253                } else {
254                    // Not a deref, and thus not raw.
255                    false
256                };
257
258                let src = self.eval_place(place)?;
259                let place = self.force_allocation(&src)?;
260                let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
261                if !place_base_raw && !kind.is_fake() {
262                    // If this was not already raw, it needs retagging -- except for "fake"
263                    // raw borrows whose defining property is that they do not get retagged.
264                    val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
265                }
266                self.write_immediate(*val, &dest)?;
267            }
268
269            ShallowInitBox(ref operand, _) => {
270                let src = self.eval_operand(operand, None)?;
271                let v = self.read_immediate(&src)?;
272                self.write_immediate(*v, &dest)?;
273            }
274
275            Cast(cast_kind, ref operand, cast_ty) => {
276                let src = self.eval_operand(operand, None)?;
277                let cast_ty =
278                    self.instantiate_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
279                self.cast(&src, cast_kind, cast_ty, &dest)?;
280            }
281
282            Discriminant(place) => {
283                let op = self.eval_place_to_op(place, None)?;
284                let variant = self.read_discriminant(&op)?;
285                let discr = self.discriminant_for_variant(op.layout.ty, variant)?;
286                self.write_immediate(*discr, &dest)?;
287            }
288
289            WrapUnsafeBinder(ref op, _ty) => {
290                // Constructing an unsafe binder acts like a transmute
291                // since the operand's layout does not change.
292                let op = self.eval_operand(op, None)?;
293                self.copy_op_allow_transmute(&op, &dest)?;
294            }
295        }
296
297        trace!("{:?}", self.dump_place(&dest));
298
299        interp_ok(())
300    }
301
302    /// Writes the aggregate to the destination.
303    #[instrument(skip(self), level = "trace")]
304    fn write_aggregate(
305        &mut self,
306        kind: &mir::AggregateKind<'tcx>,
307        operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
308        dest: &PlaceTy<'tcx, M::Provenance>,
309    ) -> InterpResult<'tcx> {
310        self.write_uninit(dest)?; // make sure all the padding ends up as uninit
311        let (variant_index, variant_dest, active_field_index) = match *kind {
312            mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
313                let variant_dest = self.project_downcast(dest, variant_index)?;
314                (variant_index, variant_dest, active_field_index)
315            }
316            mir::AggregateKind::RawPtr(..) => {
317                // Pointers don't have "fields" in the normal sense, so the
318                // projection-based code below would either fail in projection
319                // or in type mismatches. Instead, build an `Immediate` from
320                // the parts and write that to the destination.
321                let [data, meta] = &operands.raw else {
322                    bug!("{kind:?} should have 2 operands, had {operands:?}");
323                };
324                let data = self.eval_operand(data, None)?;
325                let data = self.read_pointer(&data)?;
326                let meta = self.eval_operand(meta, None)?;
327                let meta = if meta.layout.is_zst() {
328                    MemPlaceMeta::None
329                } else {
330                    MemPlaceMeta::Meta(self.read_scalar(&meta)?)
331                };
332                let ptr_imm = Immediate::new_pointer_with_meta(data, meta, self);
333                let ptr = ImmTy::from_immediate(ptr_imm, dest.layout);
334                self.copy_op(&ptr, dest)?;
335                return interp_ok(());
336            }
337            _ => (FIRST_VARIANT, dest.clone(), None),
338        };
339        if active_field_index.is_some() {
340            assert_eq!(operands.len(), 1);
341        }
342        for (field_index, operand) in operands.iter_enumerated() {
343            let field_index = active_field_index.unwrap_or(field_index);
344            let field_dest = self.project_field(&variant_dest, field_index)?;
345            let op = self.eval_operand(operand, Some(field_dest.layout))?;
346            self.copy_op(&op, &field_dest)?;
347        }
348        self.write_discriminant(variant_index, dest)
349    }
350
351    /// Repeats `operand` into the destination. `dest` must have array type, and that type
352    /// determines how often `operand` is repeated.
353    fn write_repeat(
354        &mut self,
355        operand: &mir::Operand<'tcx>,
356        dest: &PlaceTy<'tcx, M::Provenance>,
357    ) -> InterpResult<'tcx> {
358        let src = self.eval_operand(operand, None)?;
359        assert!(src.layout.is_sized());
360        let dest = self.force_allocation(&dest)?;
361        let length = dest.len(self)?;
362
363        if length == 0 {
364            // Nothing to copy... but let's still make sure that `dest` as a place is valid.
365            self.get_place_alloc_mut(&dest)?;
366        } else {
367            // Write the src to the first element.
368            let first = self.project_index(&dest, 0)?;
369            self.copy_op(&src, &first)?;
370
371            // This is performance-sensitive code for big static/const arrays! So we
372            // avoid writing each operand individually and instead just make many copies
373            // of the first element.
374            let elem_size = first.layout.size;
375            let first_ptr = first.ptr();
376            let rest_ptr = first_ptr.wrapping_offset(elem_size, self);
377            // No alignment requirement since `copy_op` above already checked it.
378            self.mem_copy_repeatedly(
379                first_ptr,
380                rest_ptr,
381                elem_size,
382                length - 1,
383                /*nonoverlapping:*/ true,
384            )?;
385        }
386
387        interp_ok(())
388    }
389
390    /// Evaluate the arguments of a function call
391    fn eval_fn_call_argument(
392        &self,
393        op: &mir::Operand<'tcx>,
394    ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
395        interp_ok(match op {
396            mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
397                // Make a regular copy.
398                let op = self.eval_operand(op, None)?;
399                FnArg::Copy(op)
400            }
401            mir::Operand::Move(place) => {
402                // If this place lives in memory, preserve its location.
403                // We call `place_to_op` which will be an `MPlaceTy` whenever there exists
404                // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local`
405                // which can return a local even if that has an mplace.)
406                let place = self.eval_place(*place)?;
407                let op = self.place_to_op(&place)?;
408
409                match op.as_mplace_or_imm() {
410                    Either::Left(mplace) => FnArg::InPlace(mplace),
411                    Either::Right(_imm) => {
412                        // This argument doesn't live in memory, so there's no place
413                        // to make inaccessible during the call.
414                        // We rely on there not being any stray `PlaceTy` that would let the
415                        // caller directly access this local!
416                        // This is also crucial for tail calls, where we want the `FnArg` to
417                        // stay valid when the old stack frame gets popped.
418                        FnArg::Copy(op)
419                    }
420                }
421            }
422        })
423    }
424
425    /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the
426    /// necessary information about callee and arguments to make a call.
427    fn eval_callee_and_args(
428        &self,
429        terminator: &mir::Terminator<'tcx>,
430        func: &mir::Operand<'tcx>,
431        args: &[Spanned<mir::Operand<'tcx>>],
432    ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> {
433        let func = self.eval_operand(func, None)?;
434        let args = args
435            .iter()
436            .map(|arg| self.eval_fn_call_argument(&arg.node))
437            .collect::<InterpResult<'tcx, Vec<_>>>()?;
438
439        let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
440        let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.typing_env, fn_sig_binder);
441        let extra_args = &args[fn_sig.inputs().len()..];
442        let extra_args =
443            self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
444
445        let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
446            ty::FnPtr(..) => {
447                let fn_ptr = self.read_pointer(&func)?;
448                let fn_val = self.get_ptr_fn(fn_ptr)?;
449                (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
450            }
451            ty::FnDef(def_id, args) => {
452                let instance = self.resolve(def_id, args)?;
453                (
454                    FnVal::Instance(instance),
455                    self.fn_abi_of_instance(instance, extra_args)?,
456                    instance.def.requires_caller_location(*self.tcx),
457                )
458            }
459            _ => {
460                span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty)
461            }
462        };
463
464        interp_ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
465    }
466
467    fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
468        let _span = enter_trace_span!(
469            M,
470            step::eval_terminator,
471            terminator = ?terminator.kind,
472            span = ?terminator.source_info.span,
473            tracing_separate_thread = Empty,
474        )
475        .or_if_tracing_disabled(|| info!(terminator = ?terminator.kind));
476
477        use rustc_middle::mir::TerminatorKind::*;
478        match terminator.kind {
479            Return => {
480                self.return_from_current_stack_frame(/* unwinding */ false)?
481            }
482
483            Goto { target } => self.go_to_block(target),
484
485            SwitchInt { ref discr, ref targets } => {
486                let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
487                trace!("SwitchInt({:?})", *discr);
488
489                // Branch to the `otherwise` case by default, if no match is found.
490                let mut target_block = targets.otherwise();
491
492                for (const_int, target) in targets.iter() {
493                    // Compare using MIR BinOp::Eq, to also support pointer values.
494                    // (Avoiding `self.binary_op` as that does some redundant layout computation.)
495                    let res = self.binary_op(
496                        mir::BinOp::Eq,
497                        &discr,
498                        &ImmTy::from_uint(const_int, discr.layout),
499                    )?;
500                    if res.to_scalar().to_bool()? {
501                        target_block = target;
502                        break;
503                    }
504                }
505
506                self.go_to_block(target_block);
507            }
508
509            Call {
510                ref func,
511                ref args,
512                destination,
513                target,
514                unwind,
515                call_source: _,
516                fn_span: _,
517            } => {
518                let old_stack = self.frame_idx();
519                let old_loc = self.frame().loc;
520
521                let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
522                    self.eval_callee_and_args(terminator, func, args)?;
523
524                let destination = self.eval_place(destination)?;
525                self.init_fn_call(
526                    callee,
527                    (fn_sig.abi, fn_abi),
528                    &args,
529                    with_caller_location,
530                    &destination,
531                    target,
532                    if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable },
533                )?;
534                // Sanity-check that `eval_fn_call` either pushed a new frame or
535                // did a jump to another block.
536                if self.frame_idx() == old_stack && self.frame().loc == old_loc {
537                    span_bug!(terminator.source_info.span, "evaluating this call made no progress");
538                }
539            }
540
541            TailCall { ref func, ref args, fn_span: _ } => {
542                let old_frame_idx = self.frame_idx();
543
544                let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
545                    self.eval_callee_and_args(terminator, func, args)?;
546
547                self.init_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?;
548
549                if self.frame_idx() != old_frame_idx {
550                    span_bug!(
551                        terminator.source_info.span,
552                        "evaluating this tail call pushed a new stack frame"
553                    );
554                }
555            }
556
557            Drop { place, target, unwind, replace: _, drop, async_fut } => {
558                assert!(
559                    async_fut.is_none() && drop.is_none(),
560                    "Async Drop must be expanded or reset to sync in runtime MIR"
561                );
562                let place = self.eval_place(place)?;
563                let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
564                if let ty::InstanceKind::DropGlue(_, None) = instance.def {
565                    // This is the branch we enter if and only if the dropped type has no drop glue
566                    // whatsoever. This can happen as a result of monomorphizing a drop of a
567                    // generic. In order to make sure that generic and non-generic code behaves
568                    // roughly the same (and in keeping with Mir semantics) we do nothing here.
569                    self.go_to_block(target);
570                    return interp_ok(());
571                }
572                trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
573                self.init_drop_in_place_call(&place, instance, target, unwind)?;
574            }
575
576            Assert { ref cond, expected, ref msg, target, unwind } => {
577                let ignored =
578                    M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check();
579                let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
580                if ignored || expected == cond_val {
581                    self.go_to_block(target);
582                } else {
583                    M::assert_panic(self, msg, unwind)?;
584                }
585            }
586
587            UnwindTerminate(reason) => {
588                M::unwind_terminate(self, reason)?;
589            }
590
591            // When we encounter Resume, we've finished unwinding
592            // cleanup for the current stack frame. We pop it in order
593            // to continue unwinding the next frame
594            UnwindResume => {
595                trace!("unwinding: resuming from cleanup");
596                // By definition, a Resume terminator means
597                // that we're unwinding
598                self.return_from_current_stack_frame(/* unwinding */ true)?;
599                return interp_ok(());
600            }
601
602            // It is UB to ever encounter this.
603            Unreachable => throw_ub!(Unreachable),
604
605            // These should never occur for MIR we actually run.
606            FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
607                terminator.source_info.span,
608                "{:#?} should have been eliminated by MIR pass",
609                terminator.kind
610            ),
611
612            InlineAsm { .. } => {
613                throw_unsup_format!("inline assembly is not supported");
614            }
615        }
616
617        interp_ok(())
618    }
619}