rustc_mir_transform/
lib.rs

1// tidy-alphabetical-start
2#![feature(array_windows)]
3#![feature(assert_matches)]
4#![feature(box_patterns)]
5#![feature(const_type_name)]
6#![feature(cow_is_borrowed)]
7#![feature(file_buffered)]
8#![feature(gen_blocks)]
9#![feature(if_let_guard)]
10#![feature(impl_trait_in_assoc_type)]
11#![feature(try_blocks)]
12#![feature(yeet_expr)]
13// tidy-alphabetical-end
14
15use hir::ConstContext;
16use required_consts::RequiredConstsVisitor;
17use rustc_const_eval::check_consts::{self, ConstCx};
18use rustc_const_eval::util;
19use rustc_data_structures::fx::FxIndexSet;
20use rustc_data_structures::steal::Steal;
21use rustc_hir as hir;
22use rustc_hir::def::{CtorKind, DefKind};
23use rustc_hir::def_id::LocalDefId;
24use rustc_index::IndexVec;
25use rustc_middle::mir::{
26    AnalysisPhase, Body, CallSource, ClearCrossCrate, ConstOperand, ConstQualifs, LocalDecl,
27    MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, START_BLOCK,
28    SourceInfo, Statement, StatementKind, TerminatorKind,
29};
30use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
31use rustc_middle::util::Providers;
32use rustc_middle::{bug, query, span_bug};
33use rustc_mir_build::builder::build_mir;
34use rustc_span::source_map::Spanned;
35use rustc_span::{DUMMY_SP, sym};
36use tracing::debug;
37
38#[macro_use]
39mod pass_manager;
40
41use std::sync::LazyLock;
42
43use pass_manager::{self as pm, Lint, MirLint, MirPass, WithMinOptLevel};
44
45mod check_pointers;
46mod cost_checker;
47mod cross_crate_inline;
48mod deduce_param_attrs;
49mod elaborate_drop;
50mod errors;
51mod ffi_unwind_calls;
52mod lint;
53mod lint_tail_expr_drop_order;
54mod patch;
55mod shim;
56mod ssa;
57
58/// We import passes via this macro so that we can have a static list of pass names
59/// (used to verify CLI arguments). It takes a list of modules, followed by the passes
60/// declared within them.
61/// ```ignore,macro-test
62/// declare_passes! {
63///     // Declare a single pass from the module `abort_unwinding_calls`
64///     mod abort_unwinding_calls : AbortUnwindingCalls;
65///     // When passes are grouped together as an enum, declare the two constituent passes
66///     mod add_call_guards : AddCallGuards {
67///         AllCallEdges,
68///         CriticalCallEdges
69///     };
70///     // Declares multiple pass groups, each containing their own constituent passes
71///     mod simplify : SimplifyCfg {
72///         Initial,
73///         /* omitted */
74///     }, SimplifyLocals {
75///         BeforeConstProp,
76///         /* omitted */
77///     };
78/// }
79/// ```
80macro_rules! declare_passes {
81    (
82        $(
83            $vis:vis mod $mod_name:ident : $($pass_name:ident $( { $($ident:ident),* } )?),+ $(,)?;
84        )*
85    ) => {
86        $(
87            $vis mod $mod_name;
88            $(
89                // Make sure the type name is correct
90                #[allow(unused_imports)]
91                use $mod_name::$pass_name as _;
92            )+
93        )*
94
95        static PASS_NAMES: LazyLock<FxIndexSet<&str>> = LazyLock::new(|| [
96            // Fake marker pass
97            "PreCodegen",
98            $(
99                $(
100                    stringify!($pass_name),
101                    $(
102                        $(
103                            $mod_name::$pass_name::$ident.name(),
104                        )*
105                    )?
106                )+
107            )*
108        ].into_iter().collect());
109    };
110}
111
112declare_passes! {
113    mod abort_unwinding_calls : AbortUnwindingCalls;
114    mod add_call_guards : AddCallGuards { AllCallEdges, CriticalCallEdges };
115    mod add_moves_for_packed_drops : AddMovesForPackedDrops;
116    mod add_retag : AddRetag;
117    mod add_subtyping_projections : Subtyper;
118    mod check_inline : CheckForceInline;
119    mod check_call_recursion : CheckCallRecursion, CheckDropRecursion;
120    mod check_inline_always_target_features: CheckInlineAlwaysTargetFeature;
121    mod check_alignment : CheckAlignment;
122    mod check_enums : CheckEnums;
123    mod check_const_item_mutation : CheckConstItemMutation;
124    mod check_null : CheckNull;
125    mod check_packed_ref : CheckPackedRef;
126    // This pass is public to allow external drivers to perform MIR cleanup
127    pub mod cleanup_post_borrowck : CleanupPostBorrowck;
128
129    mod copy_prop : CopyProp;
130    mod coroutine : StateTransform;
131    mod coverage : InstrumentCoverage;
132    mod ctfe_limit : CtfeLimit;
133    mod dataflow_const_prop : DataflowConstProp;
134    mod dead_store_elimination : DeadStoreElimination {
135        Initial,
136        Final
137    };
138    mod deref_separator : Derefer;
139    mod dest_prop : DestinationPropagation;
140    pub mod dump_mir : Marker;
141    mod early_otherwise_branch : EarlyOtherwiseBranch;
142    mod elaborate_box_derefs : ElaborateBoxDerefs;
143    mod elaborate_drops : ElaborateDrops;
144    mod function_item_references : FunctionItemReferences;
145    mod gvn : GVN;
146    // Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
147    // by custom rustc drivers, running all the steps by themselves. See #114628.
148    pub mod inline : Inline, ForceInline;
149    mod impossible_predicates : ImpossiblePredicates;
150    mod instsimplify : InstSimplify { BeforeInline, AfterSimplifyCfg };
151    mod jump_threading : JumpThreading;
152    mod known_panics_lint : KnownPanicsLint;
153    mod large_enums : EnumSizeOpt;
154    mod lower_intrinsics : LowerIntrinsics;
155    mod lower_slice_len : LowerSliceLenCalls;
156    mod match_branches : MatchBranchSimplification;
157    mod mentioned_items : MentionedItems;
158    mod multiple_return_terminators : MultipleReturnTerminators;
159    mod nrvo : RenameReturnPlace;
160    mod post_drop_elaboration : CheckLiveDrops;
161    mod prettify : ReorderBasicBlocks, ReorderLocals;
162    mod promote_consts : PromoteTemps;
163    mod ref_prop : ReferencePropagation;
164    mod remove_noop_landing_pads : RemoveNoopLandingPads;
165    mod remove_place_mention : RemovePlaceMention;
166    mod remove_storage_markers : RemoveStorageMarkers;
167    mod remove_uninit_drops : RemoveUninitDrops;
168    mod remove_unneeded_drops : RemoveUnneededDrops;
169    mod remove_zsts : RemoveZsts;
170    mod required_consts : RequiredConstsVisitor;
171    mod post_analysis_normalize : PostAnalysisNormalize;
172    mod sanity_check : SanityCheck;
173    // This pass is public to allow external drivers to perform MIR cleanup
174    pub mod simplify :
175        SimplifyCfg {
176            Initial,
177            PromoteConsts,
178            RemoveFalseEdges,
179            PostAnalysis,
180            PreOptimizations,
181            Final,
182            MakeShim,
183            AfterUnreachableEnumBranching
184        },
185        SimplifyLocals {
186            BeforeConstProp,
187            AfterGVN,
188            Final
189        };
190    mod simplify_branches : SimplifyConstCondition {
191        AfterConstProp,
192        Final
193    };
194    mod simplify_comparison_integral : SimplifyComparisonIntegral;
195    mod single_use_consts : SingleUseConsts;
196    mod sroa : ScalarReplacementOfAggregates;
197    mod strip_debuginfo : StripDebugInfo;
198    mod unreachable_enum_branching : UnreachableEnumBranching;
199    mod unreachable_prop : UnreachablePropagation;
200    mod validate : Validator;
201}
202
203rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
204
205pub fn provide(providers: &mut Providers) {
206    coverage::query::provide(providers);
207    ffi_unwind_calls::provide(providers);
208    shim::provide(providers);
209    cross_crate_inline::provide(providers);
210    providers.queries = query::Providers {
211        mir_keys,
212        mir_built,
213        mir_const_qualif,
214        mir_promoted,
215        mir_drops_elaborated_and_const_checked,
216        mir_for_ctfe,
217        mir_coroutine_witnesses: coroutine::mir_coroutine_witnesses,
218        optimized_mir,
219        is_mir_available,
220        is_ctfe_mir_available: is_mir_available,
221        mir_callgraph_cyclic: inline::cycle::mir_callgraph_cyclic,
222        mir_inliner_callees: inline::cycle::mir_inliner_callees,
223        promoted_mir,
224        deduced_param_attrs: deduce_param_attrs::deduced_param_attrs,
225        coroutine_by_move_body_def_id: coroutine::coroutine_by_move_body_def_id,
226        ..providers.queries
227    };
228}
229
230fn remap_mir_for_const_eval_select<'tcx>(
231    tcx: TyCtxt<'tcx>,
232    mut body: Body<'tcx>,
233    context: hir::Constness,
234) -> Body<'tcx> {
235    for bb in body.basic_blocks.as_mut().iter_mut() {
236        let terminator = bb.terminator.as_mut().expect("invalid terminator");
237        match terminator.kind {
238            TerminatorKind::Call {
239                func: Operand::Constant(box ConstOperand { ref const_, .. }),
240                ref mut args,
241                destination,
242                target,
243                unwind,
244                fn_span,
245                ..
246            } if let ty::FnDef(def_id, _) = *const_.ty().kind()
247                && tcx.is_intrinsic(def_id, sym::const_eval_select) =>
248            {
249                let Ok([tupled_args, called_in_const, called_at_rt]) = take_array(args) else {
250                    unreachable!()
251                };
252                let ty = tupled_args.node.ty(&body.local_decls, tcx);
253                let fields = ty.tuple_fields();
254                let num_args = fields.len();
255                let func =
256                    if context == hir::Constness::Const { called_in_const } else { called_at_rt };
257                let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
258                    match tupled_args.node {
259                        Operand::Constant(_) => {
260                            // There is no good way of extracting a tuple arg from a constant
261                            // (const generic stuff) so we just create a temporary and deconstruct
262                            // that.
263                            let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
264                            bb.statements.push(Statement::new(
265                                SourceInfo::outermost(fn_span),
266                                StatementKind::Assign(Box::new((
267                                    local.into(),
268                                    Rvalue::Use(tupled_args.node.clone()),
269                                ))),
270                            ));
271                            (Operand::Move, local.into())
272                        }
273                        Operand::Move(place) => (Operand::Move, place),
274                        Operand::Copy(place) => (Operand::Copy, place),
275                    };
276                let place_elems = place.projection;
277                let arguments = (0..num_args)
278                    .map(|x| {
279                        let mut place_elems = place_elems.to_vec();
280                        place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
281                        let projection = tcx.mk_place_elems(&place_elems);
282                        let place = Place { local: place.local, projection };
283                        Spanned { node: method(place), span: DUMMY_SP }
284                    })
285                    .collect();
286                terminator.kind = TerminatorKind::Call {
287                    func: func.node,
288                    args: arguments,
289                    destination,
290                    target,
291                    unwind,
292                    call_source: CallSource::Misc,
293                    fn_span,
294                };
295            }
296            _ => {}
297        }
298    }
299    body
300}
301
302fn take_array<T, const N: usize>(b: &mut Box<[T]>) -> Result<[T; N], Box<[T]>> {
303    let b: Box<[T; N]> = std::mem::take(b).try_into()?;
304    Ok(*b)
305}
306
307fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
308    tcx.mir_keys(()).contains(&def_id)
309}
310
311/// Finds the full set of `DefId`s within the current crate that have
312/// MIR associated with them.
313fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
314    // All body-owners have MIR associated with them.
315    let mut set: FxIndexSet<_> = tcx.hir_body_owners().collect();
316
317    // Remove the fake bodies for `global_asm!`, since they're not useful
318    // to be emitted (`--emit=mir`) or encoded (in metadata).
319    set.retain(|&def_id| !matches!(tcx.def_kind(def_id), DefKind::GlobalAsm));
320
321    // Coroutine-closures (e.g. async closures) have an additional by-move MIR
322    // body that isn't in the HIR.
323    for body_owner in tcx.hir_body_owners() {
324        if let DefKind::Closure = tcx.def_kind(body_owner)
325            && tcx.needs_coroutine_by_move_body_def_id(body_owner.to_def_id())
326        {
327            set.insert(tcx.coroutine_by_move_body_def_id(body_owner).expect_local());
328        }
329    }
330
331    // tuple struct/variant constructors have MIR, but they don't have a BodyId,
332    // so we need to build them separately.
333    for item in tcx.hir_crate_items(()).free_items() {
334        if let DefKind::Struct | DefKind::Enum = tcx.def_kind(item.owner_id) {
335            for variant in tcx.adt_def(item.owner_id).variants() {
336                if let Some((CtorKind::Fn, ctor_def_id)) = variant.ctor {
337                    set.insert(ctor_def_id.expect_local());
338                }
339            }
340        }
341    }
342
343    set
344}
345
346fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
347    // N.B., this `borrow()` is guaranteed to be valid (i.e., the value
348    // cannot yet be stolen), because `mir_promoted()`, which steals
349    // from `mir_built()`, forces this query to execute before
350    // performing the steal.
351    let body = &tcx.mir_built(def).borrow();
352    let ccx = check_consts::ConstCx::new(tcx, body);
353    // No need to const-check a non-const `fn`.
354    match ccx.const_kind {
355        Some(ConstContext::Const { .. } | ConstContext::Static(_) | ConstContext::ConstFn) => {}
356        None => span_bug!(
357            tcx.def_span(def),
358            "`mir_const_qualif` should only be called on const fns and const items"
359        ),
360    }
361
362    if body.return_ty().references_error() {
363        // It's possible to reach here without an error being emitted (#121103).
364        tcx.dcx().span_delayed_bug(body.span, "mir_const_qualif: MIR had errors");
365        return Default::default();
366    }
367
368    let mut validator = check_consts::check::Checker::new(&ccx);
369    validator.check_body();
370
371    // We return the qualifs in the return place for every MIR body, even though it is only used
372    // when deciding to promote a reference to a `const` for now.
373    validator.qualifs_in_return_place()
374}
375
376fn mir_built(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
377    let mut body = build_mir(tcx, def);
378
379    pass_manager::dump_mir_for_phase_change(tcx, &body);
380
381    pm::run_passes(
382        tcx,
383        &mut body,
384        &[
385            // MIR-level lints.
386            &Lint(check_inline::CheckForceInline),
387            &Lint(check_call_recursion::CheckCallRecursion),
388            // Check callee's target features match callers target features when
389            // using `#[inline(always)]`
390            &Lint(check_inline_always_target_features::CheckInlineAlwaysTargetFeature),
391            &Lint(check_packed_ref::CheckPackedRef),
392            &Lint(check_const_item_mutation::CheckConstItemMutation),
393            &Lint(function_item_references::FunctionItemReferences),
394            // What we need to do constant evaluation.
395            &simplify::SimplifyCfg::Initial,
396            &Lint(sanity_check::SanityCheck),
397        ],
398        None,
399        pm::Optimizations::Allowed,
400    );
401    tcx.alloc_steal_mir(body)
402}
403
404/// Compute the main MIR body and the list of MIR bodies of the promoteds.
405fn mir_promoted(
406    tcx: TyCtxt<'_>,
407    def: LocalDefId,
408) -> (&Steal<Body<'_>>, &Steal<IndexVec<Promoted, Body<'_>>>) {
409    // Ensure that we compute the `mir_const_qualif` for constants at
410    // this point, before we steal the mir-const result.
411    // Also this means promotion can rely on all const checks having been done.
412
413    let const_qualifs = match tcx.def_kind(def) {
414        DefKind::Fn | DefKind::AssocFn | DefKind::Closure
415            if tcx.constness(def) == hir::Constness::Const
416                || tcx.is_const_default_method(def.to_def_id()) =>
417        {
418            tcx.mir_const_qualif(def)
419        }
420        DefKind::AssocConst
421        | DefKind::Const
422        | DefKind::Static { .. }
423        | DefKind::InlineConst
424        | DefKind::AnonConst => tcx.mir_const_qualif(def),
425        _ => ConstQualifs::default(),
426    };
427
428    // the `has_ffi_unwind_calls` query uses the raw mir, so make sure it is run.
429    tcx.ensure_done().has_ffi_unwind_calls(def);
430
431    // the `by_move_body` query uses the raw mir, so make sure it is run.
432    if tcx.needs_coroutine_by_move_body_def_id(def.to_def_id()) {
433        tcx.ensure_done().coroutine_by_move_body_def_id(def);
434    }
435
436    let mut body = tcx.mir_built(def).steal();
437    if let Some(error_reported) = const_qualifs.tainted_by_errors {
438        body.tainted_by_errors = Some(error_reported);
439    }
440
441    // Collect `required_consts` *before* promotion, so if there are any consts being promoted
442    // we still add them to the list in the outer MIR body.
443    RequiredConstsVisitor::compute_required_consts(&mut body);
444
445    // What we need to run borrowck etc.
446    let promote_pass = promote_consts::PromoteTemps::default();
447    pm::run_passes(
448        tcx,
449        &mut body,
450        &[&promote_pass, &simplify::SimplifyCfg::PromoteConsts, &coverage::InstrumentCoverage],
451        Some(MirPhase::Analysis(AnalysisPhase::Initial)),
452        pm::Optimizations::Allowed,
453    );
454
455    lint_tail_expr_drop_order::run_lint(tcx, def, &body);
456
457    let promoted = promote_pass.promoted_fragments.into_inner();
458    (tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
459}
460
461/// Compute the MIR that is used during CTFE (and thus has no optimizations run on it)
462fn mir_for_ctfe(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &Body<'_> {
463    tcx.arena.alloc(inner_mir_for_ctfe(tcx, def_id))
464}
465
466fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
467    // FIXME: don't duplicate this between the optimized_mir/mir_for_ctfe queries
468    if tcx.is_constructor(def.to_def_id()) {
469        // There's no reason to run all of the MIR passes on constructors when
470        // we can just output the MIR we want directly. This also saves const
471        // qualification and borrow checking the trouble of special casing
472        // constructors.
473        return shim::build_adt_ctor(tcx, def.to_def_id());
474    }
475
476    let body = tcx.mir_drops_elaborated_and_const_checked(def);
477    let body = match tcx.hir_body_const_context(def) {
478        // consts and statics do not have `optimized_mir`, so we can steal the body instead of
479        // cloning it.
480        Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => body.steal(),
481        Some(hir::ConstContext::ConstFn) => body.borrow().clone(),
482        None => bug!("`mir_for_ctfe` called on non-const {def:?}"),
483    };
484
485    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::Const);
486    pm::run_passes(tcx, &mut body, &[&ctfe_limit::CtfeLimit], None, pm::Optimizations::Allowed);
487
488    body
489}
490
491/// Obtain just the main MIR (no promoteds) and run some cleanups on it. This also runs
492/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
493/// end up missing the source MIR due to stealing happening.
494fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
495    if tcx.is_coroutine(def.to_def_id()) {
496        tcx.ensure_done().mir_coroutine_witnesses(def);
497    }
498
499    // We only need to borrowck non-synthetic MIR.
500    let tainted_by_errors = if !tcx.is_synthetic_mir(def) {
501        tcx.mir_borrowck(tcx.typeck_root_def_id(def.to_def_id()).expect_local()).err()
502    } else {
503        None
504    };
505
506    let is_fn_like = tcx.def_kind(def).is_fn_like();
507    if is_fn_like {
508        // Do not compute the mir call graph without said call graph actually being used.
509        if pm::should_run_pass(tcx, &inline::Inline, pm::Optimizations::Allowed)
510            || inline::ForceInline::should_run_pass_for_callee(tcx, def.to_def_id())
511        {
512            tcx.ensure_done().mir_inliner_callees(ty::InstanceKind::Item(def.to_def_id()));
513        }
514    }
515
516    let (body, _) = tcx.mir_promoted(def);
517    let mut body = body.steal();
518
519    if let Some(error_reported) = tainted_by_errors {
520        body.tainted_by_errors = Some(error_reported);
521    }
522
523    // Also taint the body if it's within a top-level item that is not well formed.
524    //
525    // We do this check here and not during `mir_promoted` because that may result
526    // in borrowck cycles if WF requires looking into an opaque hidden type.
527    let root = tcx.typeck_root_def_id(def.to_def_id());
528    match tcx.def_kind(root) {
529        DefKind::Fn
530        | DefKind::AssocFn
531        | DefKind::Static { .. }
532        | DefKind::Const
533        | DefKind::AssocConst => {
534            if let Err(guar) = tcx.ensure_ok().check_well_formed(root.expect_local()) {
535                body.tainted_by_errors = Some(guar);
536            }
537        }
538        _ => {}
539    }
540
541    run_analysis_to_runtime_passes(tcx, &mut body);
542
543    tcx.alloc_steal_mir(body)
544}
545
546// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
547// by custom rustc drivers, running all the steps by themselves. See #114628.
548pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
549    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial));
550    let did = body.source.def_id();
551
552    debug!("analysis_mir_cleanup({:?})", did);
553    run_analysis_cleanup_passes(tcx, body);
554    assert!(body.phase == MirPhase::Analysis(AnalysisPhase::PostCleanup));
555
556    // Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled.
557    if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, body)) {
558        pm::run_passes(
559            tcx,
560            body,
561            &[
562                &remove_uninit_drops::RemoveUninitDrops,
563                &simplify::SimplifyCfg::RemoveFalseEdges,
564                &Lint(post_drop_elaboration::CheckLiveDrops),
565            ],
566            None,
567            pm::Optimizations::Allowed,
568        );
569    }
570
571    debug!("runtime_mir_lowering({:?})", did);
572    run_runtime_lowering_passes(tcx, body);
573    assert!(body.phase == MirPhase::Runtime(RuntimePhase::Initial));
574
575    debug!("runtime_mir_cleanup({:?})", did);
576    run_runtime_cleanup_passes(tcx, body);
577    assert!(body.phase == MirPhase::Runtime(RuntimePhase::PostCleanup));
578}
579
580// FIXME(JakobDegen): Can we make these lists of passes consts?
581
582/// After this series of passes, no lifetime analysis based on borrowing can be done.
583fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
584    let passes: &[&dyn MirPass<'tcx>] = &[
585        &impossible_predicates::ImpossiblePredicates,
586        &cleanup_post_borrowck::CleanupPostBorrowck,
587        &remove_noop_landing_pads::RemoveNoopLandingPads,
588        &simplify::SimplifyCfg::PostAnalysis,
589        &deref_separator::Derefer,
590    ];
591
592    pm::run_passes(
593        tcx,
594        body,
595        passes,
596        Some(MirPhase::Analysis(AnalysisPhase::PostCleanup)),
597        pm::Optimizations::Allowed,
598    );
599}
600
601/// Returns the sequence of passes that lowers analysis to runtime MIR.
602fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
603    let passes: &[&dyn MirPass<'tcx>] = &[
604        // These next passes must be executed together.
605        &add_call_guards::CriticalCallEdges,
606        // Must be done before drop elaboration because we need to drop opaque types, too.
607        &post_analysis_normalize::PostAnalysisNormalize,
608        // Calling this after `PostAnalysisNormalize` ensures that we don't deal with opaque types.
609        &add_subtyping_projections::Subtyper,
610        &elaborate_drops::ElaborateDrops,
611        // Needs to happen after drop elaboration.
612        &Lint(check_call_recursion::CheckDropRecursion),
613        // This will remove extraneous landing pads which are no longer
614        // necessary as well as forcing any call in a non-unwinding
615        // function calling a possibly-unwinding function to abort the process.
616        &abort_unwinding_calls::AbortUnwindingCalls,
617        // AddMovesForPackedDrops needs to run after drop
618        // elaboration.
619        &add_moves_for_packed_drops::AddMovesForPackedDrops,
620        // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`.
621        // Otherwise it should run fairly late, but before optimizations begin.
622        &add_retag::AddRetag,
623        &elaborate_box_derefs::ElaborateBoxDerefs,
624        &coroutine::StateTransform,
625        &Lint(known_panics_lint::KnownPanicsLint),
626    ];
627    pm::run_passes_no_validate(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::Initial)));
628}
629
630/// Returns the sequence of passes that do the initial cleanup of runtime MIR.
631fn run_runtime_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
632    let passes: &[&dyn MirPass<'tcx>] = &[
633        &lower_intrinsics::LowerIntrinsics,
634        &remove_place_mention::RemovePlaceMention,
635        &simplify::SimplifyCfg::PreOptimizations,
636    ];
637
638    pm::run_passes(
639        tcx,
640        body,
641        passes,
642        Some(MirPhase::Runtime(RuntimePhase::PostCleanup)),
643        pm::Optimizations::Allowed,
644    );
645
646    // Clear this by anticipation. Optimizations and runtime MIR have no reason to look
647    // into this information, which is meant for borrowck diagnostics.
648    for decl in &mut body.local_decls {
649        decl.local_info = ClearCrossCrate::Clear;
650    }
651}
652
653pub(crate) fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
654    fn o1<T>(x: T) -> WithMinOptLevel<T> {
655        WithMinOptLevel(1, x)
656    }
657
658    let def_id = body.source.def_id();
659    let optimizations = if tcx.def_kind(def_id).has_codegen_attrs()
660        && tcx.codegen_fn_attrs(def_id).optimize.do_not_optimize()
661    {
662        pm::Optimizations::Suppressed
663    } else {
664        pm::Optimizations::Allowed
665    };
666
667    // The main optimizations that we do on MIR.
668    pm::run_passes(
669        tcx,
670        body,
671        &[
672            // Add some UB checks before any UB gets optimized away.
673            &check_alignment::CheckAlignment,
674            &check_null::CheckNull,
675            &check_enums::CheckEnums,
676            // Before inlining: trim down MIR with passes to reduce inlining work.
677
678            // Has to be done before inlining, otherwise actual call will be almost always inlined.
679            // Also simple, so can just do first.
680            &lower_slice_len::LowerSliceLenCalls,
681            // Perform instsimplify before inline to eliminate some trivial calls (like clone
682            // shims).
683            &instsimplify::InstSimplify::BeforeInline,
684            // Perform inlining of `#[rustc_force_inline]`-annotated callees.
685            &inline::ForceInline,
686            // Perform inlining, which may add a lot of code.
687            &inline::Inline,
688            // Code from other crates may have storage markers, so this needs to happen after
689            // inlining.
690            &remove_storage_markers::RemoveStorageMarkers,
691            // Inlining and instantiation may introduce ZST and useless drops.
692            &remove_zsts::RemoveZsts,
693            &remove_unneeded_drops::RemoveUnneededDrops,
694            // Type instantiation may create uninhabited enums.
695            // Also eliminates some unreachable branches based on variants of enums.
696            &unreachable_enum_branching::UnreachableEnumBranching,
697            &unreachable_prop::UnreachablePropagation,
698            &o1(simplify::SimplifyCfg::AfterUnreachableEnumBranching),
699            // Inlining may have introduced a lot of redundant code and a large move pattern.
700            // Now, we need to shrink the generated MIR.
701            &ref_prop::ReferencePropagation,
702            &sroa::ScalarReplacementOfAggregates,
703            &multiple_return_terminators::MultipleReturnTerminators,
704            // After simplifycfg, it allows us to discover new opportunities for peephole
705            // optimizations.
706            &instsimplify::InstSimplify::AfterSimplifyCfg,
707            &simplify::SimplifyLocals::BeforeConstProp,
708            &dead_store_elimination::DeadStoreElimination::Initial,
709            &gvn::GVN,
710            &simplify::SimplifyLocals::AfterGVN,
711            &match_branches::MatchBranchSimplification,
712            &dataflow_const_prop::DataflowConstProp,
713            &single_use_consts::SingleUseConsts,
714            &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
715            &jump_threading::JumpThreading,
716            &early_otherwise_branch::EarlyOtherwiseBranch,
717            &simplify_comparison_integral::SimplifyComparisonIntegral,
718            &dest_prop::DestinationPropagation,
719            &o1(simplify_branches::SimplifyConstCondition::Final),
720            &o1(remove_noop_landing_pads::RemoveNoopLandingPads),
721            &o1(simplify::SimplifyCfg::Final),
722            // After the last SimplifyCfg, because this wants one-block functions.
723            &strip_debuginfo::StripDebugInfo,
724            &copy_prop::CopyProp,
725            &dead_store_elimination::DeadStoreElimination::Final,
726            &nrvo::RenameReturnPlace,
727            &simplify::SimplifyLocals::Final,
728            &multiple_return_terminators::MultipleReturnTerminators,
729            &large_enums::EnumSizeOpt { discrepancy: 128 },
730            // Some cleanup necessary at least for LLVM and potentially other codegen backends.
731            &add_call_guards::CriticalCallEdges,
732            // Cleanup for human readability, off by default.
733            &prettify::ReorderBasicBlocks,
734            &prettify::ReorderLocals,
735            // Dump the end result for testing and debugging purposes.
736            &dump_mir::Marker("PreCodegen"),
737        ],
738        Some(MirPhase::Runtime(RuntimePhase::Optimized)),
739        optimizations,
740    );
741}
742
743/// Optimize the MIR and prepare it for codegen.
744fn optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> &Body<'_> {
745    tcx.arena.alloc(inner_optimized_mir(tcx, did))
746}
747
748fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
749    if tcx.is_constructor(did.to_def_id()) {
750        // There's no reason to run all of the MIR passes on constructors when
751        // we can just output the MIR we want directly. This also saves const
752        // qualification and borrow checking the trouble of special casing
753        // constructors.
754        return shim::build_adt_ctor(tcx, did.to_def_id());
755    }
756
757    match tcx.hir_body_const_context(did) {
758        // Run the `mir_for_ctfe` query, which depends on `mir_drops_elaborated_and_const_checked`
759        // which we are going to steal below. Thus we need to run `mir_for_ctfe` first, so it
760        // computes and caches its result.
761        Some(hir::ConstContext::ConstFn) => tcx.ensure_done().mir_for_ctfe(did),
762        None => {}
763        Some(other) => panic!("do not use `optimized_mir` for constants: {other:?}"),
764    }
765    debug!("about to call mir_drops_elaborated...");
766    let body = tcx.mir_drops_elaborated_and_const_checked(did).steal();
767    let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::NotConst);
768
769    if body.tainted_by_errors.is_some() {
770        return body;
771    }
772
773    // Before doing anything, remember which items are being mentioned so that the set of items
774    // visited does not depend on the optimization level.
775    // We do not use `run_passes` for this as that might skip the pass if `injection_phase` is set.
776    mentioned_items::MentionedItems.run_pass(tcx, &mut body);
777
778    // If `mir_drops_elaborated_and_const_checked` found that the current body has unsatisfiable
779    // predicates, it will shrink the MIR to a single `unreachable` terminator.
780    // More generally, if MIR is a lone `unreachable`, there is nothing to optimize.
781    if let TerminatorKind::Unreachable = body.basic_blocks[START_BLOCK].terminator().kind
782        && body.basic_blocks[START_BLOCK].statements.is_empty()
783    {
784        return body;
785    }
786
787    run_optimization_passes(tcx, &mut body);
788
789    body
790}
791
792/// Fetch all the promoteds of an item and prepare their MIR bodies to be ready for
793/// constant evaluation once all generic parameters become known.
794fn promoted_mir(tcx: TyCtxt<'_>, def: LocalDefId) -> &IndexVec<Promoted, Body<'_>> {
795    if tcx.is_constructor(def.to_def_id()) {
796        return tcx.arena.alloc(IndexVec::new());
797    }
798
799    if !tcx.is_synthetic_mir(def) {
800        tcx.ensure_done().mir_borrowck(tcx.typeck_root_def_id(def.to_def_id()).expect_local());
801    }
802    let mut promoted = tcx.mir_promoted(def).1.steal();
803
804    for body in &mut promoted {
805        run_analysis_to_runtime_passes(tcx, body);
806    }
807
808    tcx.arena.alloc(promoted)
809}