rustc_mir_transform/
inline.rs

1//! Inlining pass for MIR functions.
2
3use std::assert_matches::debug_assert_matches;
4use std::iter;
5use std::ops::{Range, RangeFrom};
6
7use rustc_abi::{ExternAbi, FieldIdx};
8use rustc_attr_data_structures::{InlineAttr, OptimizeAttr};
9use rustc_hir::def::DefKind;
10use rustc_hir::def_id::DefId;
11use rustc_index::Idx;
12use rustc_index::bit_set::DenseBitSet;
13use rustc_middle::bug;
14use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
15use rustc_middle::mir::visit::*;
16use rustc_middle::mir::*;
17use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TyCtxt, TypeFlags, TypeVisitableExt};
18use rustc_session::config::{DebugInfo, OptLevel};
19use rustc_span::source_map::Spanned;
20use tracing::{debug, instrument, trace, trace_span};
21
22use crate::cost_checker::{CostChecker, is_call_like};
23use crate::deref_separator::deref_finder;
24use crate::simplify::simplify_cfg;
25use crate::validate::validate_types;
26use crate::{check_inline, util};
27
28pub(crate) mod cycle;
29
30const HISTORY_DEPTH_LIMIT: usize = 20;
31const TOP_DOWN_DEPTH_LIMIT: usize = 5;
32
33#[derive(Clone, Debug)]
34struct CallSite<'tcx> {
35    callee: Instance<'tcx>,
36    fn_sig: ty::PolyFnSig<'tcx>,
37    block: BasicBlock,
38    source_info: SourceInfo,
39}
40
41// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
42// by custom rustc drivers, running all the steps by themselves. See #114628.
43pub struct Inline;
44
45impl<'tcx> crate::MirPass<'tcx> for Inline {
46    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
47        if let Some(enabled) = sess.opts.unstable_opts.inline_mir {
48            return enabled;
49        }
50
51        match sess.mir_opt_level() {
52            0 | 1 => false,
53            2 => {
54                (sess.opts.optimize == OptLevel::More || sess.opts.optimize == OptLevel::Aggressive)
55                    && sess.opts.incremental == None
56            }
57            _ => true,
58        }
59    }
60
61    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
62        let span = trace_span!("inline", body = %tcx.def_path_str(body.source.def_id()));
63        let _guard = span.enter();
64        if inline::<NormalInliner<'tcx>>(tcx, body) {
65            debug!("running simplify cfg on {:?}", body.source);
66            simplify_cfg(tcx, body);
67            deref_finder(tcx, body);
68        }
69    }
70
71    fn is_required(&self) -> bool {
72        false
73    }
74}
75
76pub struct ForceInline;
77
78impl ForceInline {
79    pub fn should_run_pass_for_callee<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
80        matches!(tcx.codegen_fn_attrs(def_id).inline, InlineAttr::Force { .. })
81    }
82}
83
84impl<'tcx> crate::MirPass<'tcx> for ForceInline {
85    fn is_enabled(&self, _: &rustc_session::Session) -> bool {
86        true
87    }
88
89    fn can_be_overridden(&self) -> bool {
90        false
91    }
92
93    fn is_required(&self) -> bool {
94        true
95    }
96
97    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
98        let span = trace_span!("force_inline", body = %tcx.def_path_str(body.source.def_id()));
99        let _guard = span.enter();
100        if inline::<ForceInliner<'tcx>>(tcx, body) {
101            debug!("running simplify cfg on {:?}", body.source);
102            simplify_cfg(tcx, body);
103            deref_finder(tcx, body);
104        }
105    }
106}
107
108trait Inliner<'tcx> {
109    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self;
110
111    fn tcx(&self) -> TyCtxt<'tcx>;
112    fn typing_env(&self) -> ty::TypingEnv<'tcx>;
113    fn history(&self) -> &[DefId];
114    fn caller_def_id(&self) -> DefId;
115
116    /// Has the caller body been changed?
117    fn changed(self) -> bool;
118
119    /// Should inlining happen for a given callee?
120    fn should_inline_for_callee(&self, def_id: DefId) -> bool;
121
122    fn check_codegen_attributes_extra(
123        &self,
124        callee_attrs: &CodegenFnAttrs,
125    ) -> Result<(), &'static str>;
126
127    fn check_caller_mir_body(&self, body: &Body<'tcx>) -> bool;
128
129    /// Returns inlining decision that is based on the examination of callee MIR body.
130    /// Assumes that codegen attributes have been checked for compatibility already.
131    fn check_callee_mir_body(
132        &self,
133        callsite: &CallSite<'tcx>,
134        callee_body: &Body<'tcx>,
135        callee_attrs: &CodegenFnAttrs,
136    ) -> Result<(), &'static str>;
137
138    /// Called when inlining succeeds.
139    fn on_inline_success(
140        &mut self,
141        callsite: &CallSite<'tcx>,
142        caller_body: &mut Body<'tcx>,
143        new_blocks: std::ops::Range<BasicBlock>,
144    );
145
146    /// Called when inlining failed or was not performed.
147    fn on_inline_failure(&self, callsite: &CallSite<'tcx>, reason: &'static str);
148}
149
150struct ForceInliner<'tcx> {
151    tcx: TyCtxt<'tcx>,
152    typing_env: ty::TypingEnv<'tcx>,
153    /// `DefId` of caller.
154    def_id: DefId,
155    /// Stack of inlined instances.
156    /// We only check the `DefId` and not the args because we want to
157    /// avoid inlining cases of polymorphic recursion.
158    /// The number of `DefId`s is finite, so checking history is enough
159    /// to ensure that we do not loop endlessly while inlining.
160    history: Vec<DefId>,
161    /// Indicates that the caller body has been modified.
162    changed: bool,
163}
164
165impl<'tcx> Inliner<'tcx> for ForceInliner<'tcx> {
166    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self {
167        Self { tcx, typing_env: body.typing_env(tcx), def_id, history: Vec::new(), changed: false }
168    }
169
170    fn tcx(&self) -> TyCtxt<'tcx> {
171        self.tcx
172    }
173
174    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
175        self.typing_env
176    }
177
178    fn history(&self) -> &[DefId] {
179        &self.history
180    }
181
182    fn caller_def_id(&self) -> DefId {
183        self.def_id
184    }
185
186    fn changed(self) -> bool {
187        self.changed
188    }
189
190    fn should_inline_for_callee(&self, def_id: DefId) -> bool {
191        ForceInline::should_run_pass_for_callee(self.tcx(), def_id)
192    }
193
194    fn check_codegen_attributes_extra(
195        &self,
196        callee_attrs: &CodegenFnAttrs,
197    ) -> Result<(), &'static str> {
198        debug_assert_matches!(callee_attrs.inline, InlineAttr::Force { .. });
199        Ok(())
200    }
201
202    fn check_caller_mir_body(&self, _: &Body<'tcx>) -> bool {
203        true
204    }
205
206    #[instrument(level = "debug", skip(self, callee_body))]
207    fn check_callee_mir_body(
208        &self,
209        _: &CallSite<'tcx>,
210        callee_body: &Body<'tcx>,
211        callee_attrs: &CodegenFnAttrs,
212    ) -> Result<(), &'static str> {
213        if callee_body.tainted_by_errors.is_some() {
214            return Err("body has errors");
215        }
216
217        let caller_attrs = self.tcx().codegen_fn_attrs(self.caller_def_id());
218        if callee_attrs.instruction_set != caller_attrs.instruction_set
219            && callee_body
220                .basic_blocks
221                .iter()
222                .any(|bb| matches!(bb.terminator().kind, TerminatorKind::InlineAsm { .. }))
223        {
224            // During the attribute checking stage we allow a callee with no
225            // instruction_set assigned to count as compatible with a function that does
226            // assign one. However, during this stage we require an exact match when any
227            // inline-asm is detected. LLVM will still possibly do an inline later on
228            // if the no-attribute function ends up with the same instruction set anyway.
229            Err("cannot move inline-asm across instruction sets")
230        } else {
231            Ok(())
232        }
233    }
234
235    fn on_inline_success(
236        &mut self,
237        callsite: &CallSite<'tcx>,
238        caller_body: &mut Body<'tcx>,
239        new_blocks: std::ops::Range<BasicBlock>,
240    ) {
241        self.changed = true;
242
243        self.history.push(callsite.callee.def_id());
244        process_blocks(self, caller_body, new_blocks);
245        self.history.pop();
246    }
247
248    fn on_inline_failure(&self, callsite: &CallSite<'tcx>, reason: &'static str) {
249        let tcx = self.tcx();
250        let InlineAttr::Force { attr_span, reason: justification } =
251            tcx.codegen_fn_attrs(callsite.callee.def_id()).inline
252        else {
253            bug!("called on item without required inlining");
254        };
255
256        let call_span = callsite.source_info.span;
257        tcx.dcx().emit_err(crate::errors::ForceInlineFailure {
258            call_span,
259            attr_span,
260            caller_span: tcx.def_span(self.def_id),
261            caller: tcx.def_path_str(self.def_id),
262            callee_span: tcx.def_span(callsite.callee.def_id()),
263            callee: tcx.def_path_str(callsite.callee.def_id()),
264            reason,
265            justification: justification.map(|sym| crate::errors::ForceInlineJustification { sym }),
266        });
267    }
268}
269
270struct NormalInliner<'tcx> {
271    tcx: TyCtxt<'tcx>,
272    typing_env: ty::TypingEnv<'tcx>,
273    /// `DefId` of caller.
274    def_id: DefId,
275    /// Stack of inlined instances.
276    /// We only check the `DefId` and not the args because we want to
277    /// avoid inlining cases of polymorphic recursion.
278    /// The number of `DefId`s is finite, so checking history is enough
279    /// to ensure that we do not loop endlessly while inlining.
280    history: Vec<DefId>,
281    /// How many (multi-call) callsites have we inlined for the top-level call?
282    ///
283    /// We need to limit this in order to prevent super-linear growth in MIR size.
284    top_down_counter: usize,
285    /// Indicates that the caller body has been modified.
286    changed: bool,
287    /// Indicates that the caller is #[inline] and just calls another function,
288    /// and thus we can inline less into it as it'll be inlined itself.
289    caller_is_inline_forwarder: bool,
290}
291
292impl<'tcx> NormalInliner<'tcx> {
293    fn past_depth_limit(&self) -> bool {
294        self.history.len() > HISTORY_DEPTH_LIMIT || self.top_down_counter > TOP_DOWN_DEPTH_LIMIT
295    }
296}
297
298impl<'tcx> Inliner<'tcx> for NormalInliner<'tcx> {
299    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self {
300        let typing_env = body.typing_env(tcx);
301        let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
302
303        Self {
304            tcx,
305            typing_env,
306            def_id,
307            history: Vec::new(),
308            top_down_counter: 0,
309            changed: false,
310            caller_is_inline_forwarder: matches!(
311                codegen_fn_attrs.inline,
312                InlineAttr::Hint | InlineAttr::Always | InlineAttr::Force { .. }
313            ) && body_is_forwarder(body),
314        }
315    }
316
317    fn tcx(&self) -> TyCtxt<'tcx> {
318        self.tcx
319    }
320
321    fn caller_def_id(&self) -> DefId {
322        self.def_id
323    }
324
325    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
326        self.typing_env
327    }
328
329    fn history(&self) -> &[DefId] {
330        &self.history
331    }
332
333    fn changed(self) -> bool {
334        self.changed
335    }
336
337    fn should_inline_for_callee(&self, _: DefId) -> bool {
338        true
339    }
340
341    fn check_codegen_attributes_extra(
342        &self,
343        callee_attrs: &CodegenFnAttrs,
344    ) -> Result<(), &'static str> {
345        if self.past_depth_limit() && matches!(callee_attrs.inline, InlineAttr::None) {
346            Err("Past depth limit so not inspecting unmarked callee")
347        } else {
348            Ok(())
349        }
350    }
351
352    fn check_caller_mir_body(&self, body: &Body<'tcx>) -> bool {
353        // Avoid inlining into coroutines, since their `optimized_mir` is used for layout computation,
354        // which can create a cycle, even when no attempt is made to inline the function in the other
355        // direction.
356        if body.coroutine.is_some() {
357            return false;
358        }
359
360        true
361    }
362
363    #[instrument(level = "debug", skip(self, callee_body))]
364    fn check_callee_mir_body(
365        &self,
366        callsite: &CallSite<'tcx>,
367        callee_body: &Body<'tcx>,
368        callee_attrs: &CodegenFnAttrs,
369    ) -> Result<(), &'static str> {
370        let tcx = self.tcx();
371
372        if let Some(_) = callee_body.tainted_by_errors {
373            return Err("body has errors");
374        }
375
376        if self.past_depth_limit() && callee_body.basic_blocks.len() > 1 {
377            return Err("Not inlining multi-block body as we're past a depth limit");
378        }
379
380        let mut threshold = if self.caller_is_inline_forwarder || self.past_depth_limit() {
381            tcx.sess.opts.unstable_opts.inline_mir_forwarder_threshold.unwrap_or(30)
382        } else if tcx.cross_crate_inlinable(callsite.callee.def_id()) {
383            tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100)
384        } else {
385            tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50)
386        };
387
388        // Give a bonus functions with a small number of blocks,
389        // We normally have two or three blocks for even
390        // very small functions.
391        if callee_body.basic_blocks.len() <= 3 {
392            threshold += threshold / 4;
393        }
394        debug!("    final inline threshold = {}", threshold);
395
396        // FIXME: Give a bonus to functions with only a single caller
397
398        let mut checker =
399            CostChecker::new(tcx, self.typing_env(), Some(callsite.callee), callee_body);
400
401        checker.add_function_level_costs();
402
403        // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
404        let mut work_list = vec![START_BLOCK];
405        let mut visited = DenseBitSet::new_empty(callee_body.basic_blocks.len());
406        while let Some(bb) = work_list.pop() {
407            if !visited.insert(bb.index()) {
408                continue;
409            }
410
411            let blk = &callee_body.basic_blocks[bb];
412            checker.visit_basic_block_data(bb, blk);
413
414            let term = blk.terminator();
415            let caller_attrs = tcx.codegen_fn_attrs(self.caller_def_id());
416            if let TerminatorKind::Drop {
417                ref place,
418                target,
419                unwind,
420                replace: _,
421                drop: _,
422                async_fut: _,
423            } = term.kind
424            {
425                work_list.push(target);
426
427                // If the place doesn't actually need dropping, treat it like a regular goto.
428                let ty = callsite
429                    .callee
430                    .instantiate_mir(tcx, ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty));
431                if ty.needs_drop(tcx, self.typing_env())
432                    && let UnwindAction::Cleanup(unwind) = unwind
433                {
434                    work_list.push(unwind);
435                }
436            } else if callee_attrs.instruction_set != caller_attrs.instruction_set
437                && matches!(term.kind, TerminatorKind::InlineAsm { .. })
438            {
439                // During the attribute checking stage we allow a callee with no
440                // instruction_set assigned to count as compatible with a function that does
441                // assign one. However, during this stage we require an exact match when any
442                // inline-asm is detected. LLVM will still possibly do an inline later on
443                // if the no-attribute function ends up with the same instruction set anyway.
444                return Err("cannot move inline-asm across instruction sets");
445            } else if let TerminatorKind::TailCall { .. } = term.kind {
446                // FIXME(explicit_tail_calls): figure out how exactly functions containing tail
447                // calls can be inlined (and if they even should)
448                return Err("can't inline functions with tail calls");
449            } else {
450                work_list.extend(term.successors())
451            }
452        }
453
454        // N.B. We still apply our cost threshold to #[inline(always)] functions.
455        // That attribute is often applied to very large functions that exceed LLVM's (very
456        // generous) inlining threshold. Such functions are very poor MIR inlining candidates.
457        // Always inlining #[inline(always)] functions in MIR, on net, slows down the compiler.
458        let cost = checker.cost();
459        if cost <= threshold {
460            debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
461            Ok(())
462        } else {
463            debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
464            Err("cost above threshold")
465        }
466    }
467
468    fn on_inline_success(
469        &mut self,
470        callsite: &CallSite<'tcx>,
471        caller_body: &mut Body<'tcx>,
472        new_blocks: std::ops::Range<BasicBlock>,
473    ) {
474        self.changed = true;
475
476        let new_calls_count = new_blocks
477            .clone()
478            .filter(|&bb| is_call_like(caller_body.basic_blocks[bb].terminator()))
479            .count();
480        if new_calls_count > 1 {
481            self.top_down_counter += 1;
482        }
483
484        self.history.push(callsite.callee.def_id());
485        process_blocks(self, caller_body, new_blocks);
486        self.history.pop();
487
488        if self.history.is_empty() {
489            self.top_down_counter = 0;
490        }
491    }
492
493    fn on_inline_failure(&self, _: &CallSite<'tcx>, _: &'static str) {}
494}
495
496fn inline<'tcx, T: Inliner<'tcx>>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
497    let def_id = body.source.def_id();
498
499    // Only do inlining into fn bodies.
500    if !tcx.hir_body_owner_kind(def_id).is_fn_or_closure() {
501        return false;
502    }
503
504    let mut inliner = T::new(tcx, def_id, body);
505    if !inliner.check_caller_mir_body(body) {
506        return false;
507    }
508
509    let blocks = START_BLOCK..body.basic_blocks.next_index();
510    process_blocks(&mut inliner, body, blocks);
511    inliner.changed()
512}
513
514fn process_blocks<'tcx, I: Inliner<'tcx>>(
515    inliner: &mut I,
516    caller_body: &mut Body<'tcx>,
517    blocks: Range<BasicBlock>,
518) {
519    for bb in blocks {
520        let bb_data = &caller_body[bb];
521        if bb_data.is_cleanup {
522            continue;
523        }
524
525        let Some(callsite) = resolve_callsite(inliner, caller_body, bb, bb_data) else {
526            continue;
527        };
528
529        let span = trace_span!("process_blocks", %callsite.callee, ?bb);
530        let _guard = span.enter();
531
532        match try_inlining(inliner, caller_body, &callsite) {
533            Err(reason) => {
534                debug!("not-inlined {} [{}]", callsite.callee, reason);
535                inliner.on_inline_failure(&callsite, reason);
536            }
537            Ok(new_blocks) => {
538                debug!("inlined {}", callsite.callee);
539                inliner.on_inline_success(&callsite, caller_body, new_blocks);
540            }
541        }
542    }
543}
544
545fn resolve_callsite<'tcx, I: Inliner<'tcx>>(
546    inliner: &I,
547    caller_body: &Body<'tcx>,
548    bb: BasicBlock,
549    bb_data: &BasicBlockData<'tcx>,
550) -> Option<CallSite<'tcx>> {
551    let tcx = inliner.tcx();
552    // Only consider direct calls to functions
553    let terminator = bb_data.terminator();
554
555    // FIXME(explicit_tail_calls): figure out if we can inline tail calls
556    if let TerminatorKind::Call { ref func, fn_span, .. } = terminator.kind {
557        let func_ty = func.ty(caller_body, tcx);
558        if let ty::FnDef(def_id, args) = *func_ty.kind() {
559            if !inliner.should_inline_for_callee(def_id) {
560                debug!("not enabled");
561                return None;
562            }
563
564            // To resolve an instance its args have to be fully normalized.
565            let args = tcx.try_normalize_erasing_regions(inliner.typing_env(), args).ok()?;
566            let callee =
567                Instance::try_resolve(tcx, inliner.typing_env(), def_id, args).ok().flatten()?;
568
569            if let InstanceKind::Virtual(..) | InstanceKind::Intrinsic(_) = callee.def {
570                return None;
571            }
572
573            if inliner.history().contains(&callee.def_id()) {
574                return None;
575            }
576
577            let fn_sig = tcx.fn_sig(def_id).instantiate(tcx, args);
578
579            // Additionally, check that the body that we're inlining actually agrees
580            // with the ABI of the trait that the item comes from.
581            if let InstanceKind::Item(instance_def_id) = callee.def
582                && tcx.def_kind(instance_def_id) == DefKind::AssocFn
583                && let instance_fn_sig = tcx.fn_sig(instance_def_id).skip_binder()
584                && instance_fn_sig.abi() != fn_sig.abi()
585            {
586                return None;
587            }
588
589            let source_info = SourceInfo { span: fn_span, ..terminator.source_info };
590
591            return Some(CallSite { callee, fn_sig, block: bb, source_info });
592        }
593    }
594
595    None
596}
597
598/// Attempts to inline a callsite into the caller body. When successful returns basic blocks
599/// containing the inlined body. Otherwise returns an error describing why inlining didn't take
600/// place.
601fn try_inlining<'tcx, I: Inliner<'tcx>>(
602    inliner: &I,
603    caller_body: &mut Body<'tcx>,
604    callsite: &CallSite<'tcx>,
605) -> Result<std::ops::Range<BasicBlock>, &'static str> {
606    let tcx = inliner.tcx();
607    check_mir_is_available(inliner, caller_body, callsite.callee)?;
608
609    let callee_attrs = tcx.codegen_fn_attrs(callsite.callee.def_id());
610    check_inline::is_inline_valid_on_fn(tcx, callsite.callee.def_id())?;
611    check_codegen_attributes(inliner, callsite, callee_attrs)?;
612    inliner.check_codegen_attributes_extra(callee_attrs)?;
613
614    let terminator = caller_body[callsite.block].terminator.as_ref().unwrap();
615    let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() };
616    let destination_ty = destination.ty(&caller_body.local_decls, tcx).ty;
617    for arg in args {
618        if !arg.node.ty(&caller_body.local_decls, tcx).is_sized(tcx, inliner.typing_env()) {
619            // We do not allow inlining functions with unsized params. Inlining these functions
620            // could create unsized locals, which are unsound and being phased out.
621            return Err("call has unsized argument");
622        }
623    }
624
625    let callee_body = try_instance_mir(tcx, callsite.callee.def)?;
626    check_inline::is_inline_valid_on_body(tcx, callee_body)?;
627    inliner.check_callee_mir_body(callsite, callee_body, callee_attrs)?;
628
629    let Ok(callee_body) = callsite.callee.try_instantiate_mir_and_normalize_erasing_regions(
630        tcx,
631        inliner.typing_env(),
632        ty::EarlyBinder::bind(callee_body.clone()),
633    ) else {
634        debug!("failed to normalize callee body");
635        return Err("implementation limitation -- could not normalize callee body");
636    };
637
638    // Normally, this shouldn't be required, but trait normalization failure can create a
639    // validation ICE.
640    if !validate_types(tcx, inliner.typing_env(), &callee_body, &caller_body).is_empty() {
641        debug!("failed to validate callee body");
642        return Err("implementation limitation -- callee body failed validation");
643    }
644
645    // Check call signature compatibility.
646    // Normally, this shouldn't be required, but trait normalization failure can create a
647    // validation ICE.
648    let output_type = callee_body.return_ty();
649    if !util::sub_types(tcx, inliner.typing_env(), output_type, destination_ty) {
650        trace!(?output_type, ?destination_ty);
651        return Err("implementation limitation -- return type mismatch");
652    }
653    if callsite.fn_sig.abi() == ExternAbi::RustCall {
654        let (self_arg, arg_tuple) = match &args[..] {
655            [arg_tuple] => (None, arg_tuple),
656            [self_arg, arg_tuple] => (Some(self_arg), arg_tuple),
657            _ => bug!("Expected `rust-call` to have 1 or 2 args"),
658        };
659
660        let self_arg_ty = self_arg.map(|self_arg| self_arg.node.ty(&caller_body.local_decls, tcx));
661
662        let arg_tuple_ty = arg_tuple.node.ty(&caller_body.local_decls, tcx);
663        let arg_tys = if callee_body.spread_arg.is_some() {
664            std::slice::from_ref(&arg_tuple_ty)
665        } else {
666            let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else {
667                bug!("Closure arguments are not passed as a tuple");
668            };
669            arg_tuple_tys.as_slice()
670        };
671
672        for (arg_ty, input) in
673            self_arg_ty.into_iter().chain(arg_tys.iter().copied()).zip(callee_body.args_iter())
674        {
675            let input_type = callee_body.local_decls[input].ty;
676            if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) {
677                trace!(?arg_ty, ?input_type);
678                debug!("failed to normalize tuple argument type");
679                return Err("implementation limitation");
680            }
681        }
682    } else {
683        for (arg, input) in args.iter().zip(callee_body.args_iter()) {
684            let input_type = callee_body.local_decls[input].ty;
685            let arg_ty = arg.node.ty(&caller_body.local_decls, tcx);
686            if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) {
687                trace!(?arg_ty, ?input_type);
688                debug!("failed to normalize argument type");
689                return Err("implementation limitation -- arg mismatch");
690            }
691        }
692    }
693
694    let old_blocks = caller_body.basic_blocks.next_index();
695    inline_call(inliner, caller_body, callsite, callee_body);
696    let new_blocks = old_blocks..caller_body.basic_blocks.next_index();
697
698    Ok(new_blocks)
699}
700
701fn check_mir_is_available<'tcx, I: Inliner<'tcx>>(
702    inliner: &I,
703    caller_body: &Body<'tcx>,
704    callee: Instance<'tcx>,
705) -> Result<(), &'static str> {
706    let caller_def_id = caller_body.source.def_id();
707    let callee_def_id = callee.def_id();
708    if callee_def_id == caller_def_id {
709        return Err("self-recursion");
710    }
711
712    match callee.def {
713        InstanceKind::Item(_) => {
714            // If there is no MIR available (either because it was not in metadata or
715            // because it has no MIR because it's an extern function), then the inliner
716            // won't cause cycles on this.
717            if !inliner.tcx().is_mir_available(callee_def_id) {
718                debug!("item MIR unavailable");
719                return Err("implementation limitation -- MIR unavailable");
720            }
721        }
722        // These have no own callable MIR.
723        InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => {
724            debug!("instance without MIR (intrinsic / virtual)");
725            return Err("implementation limitation -- cannot inline intrinsic");
726        }
727
728        // FIXME(#127030): `ConstParamHasTy` has bad interactions with
729        // the drop shim builder, which does not evaluate predicates in
730        // the correct param-env for types being dropped. Stall resolving
731        // the MIR for this instance until all of its const params are
732        // substituted.
733        InstanceKind::DropGlue(_, Some(ty)) if ty.has_type_flags(TypeFlags::HAS_CT_PARAM) => {
734            debug!("still needs substitution");
735            return Err("implementation limitation -- HACK for dropping polymorphic type");
736        }
737        InstanceKind::AsyncDropGlue(_, ty) | InstanceKind::AsyncDropGlueCtorShim(_, ty) => {
738            return if ty.still_further_specializable() {
739                Err("still needs substitution")
740            } else {
741                Ok(())
742            };
743        }
744        InstanceKind::FutureDropPollShim(_, ty, ty2) => {
745            return if ty.still_further_specializable() || ty2.still_further_specializable() {
746                Err("still needs substitution")
747            } else {
748                Ok(())
749            };
750        }
751
752        // This cannot result in an immediate cycle since the callee MIR is a shim, which does
753        // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
754        // do not need to catch this here, we can wait until the inliner decides to continue
755        // inlining a second time.
756        InstanceKind::VTableShim(_)
757        | InstanceKind::ReifyShim(..)
758        | InstanceKind::FnPtrShim(..)
759        | InstanceKind::ClosureOnceShim { .. }
760        | InstanceKind::ConstructCoroutineInClosureShim { .. }
761        | InstanceKind::DropGlue(..)
762        | InstanceKind::CloneShim(..)
763        | InstanceKind::ThreadLocalShim(..)
764        | InstanceKind::FnPtrAddrShim(..) => return Ok(()),
765    }
766
767    if inliner.tcx().is_constructor(callee_def_id) {
768        trace!("constructors always have MIR");
769        // Constructor functions cannot cause a query cycle.
770        return Ok(());
771    }
772
773    if callee_def_id.is_local()
774        && !inliner
775            .tcx()
776            .is_lang_item(inliner.tcx().parent(caller_def_id), rustc_hir::LangItem::FnOnce)
777    {
778        // If we know for sure that the function we're calling will itself try to
779        // call us, then we avoid inlining that function.
780        if inliner.tcx().mir_callgraph_reachable((callee, caller_def_id.expect_local())) {
781            debug!("query cycle avoidance");
782            return Err("caller might be reachable from callee");
783        }
784
785        Ok(())
786    } else {
787        // This cannot result in an immediate cycle since the callee MIR is from another crate
788        // and is already optimized. Any subsequent inlining may cause cycles, but we do
789        // not need to catch this here, we can wait until the inliner decides to continue
790        // inlining a second time.
791        trace!("functions from other crates always have MIR");
792        Ok(())
793    }
794}
795
796/// Returns an error if inlining is not possible based on codegen attributes alone. A success
797/// indicates that inlining decision should be based on other criteria.
798fn check_codegen_attributes<'tcx, I: Inliner<'tcx>>(
799    inliner: &I,
800    callsite: &CallSite<'tcx>,
801    callee_attrs: &CodegenFnAttrs,
802) -> Result<(), &'static str> {
803    let tcx = inliner.tcx();
804    if let InlineAttr::Never = callee_attrs.inline {
805        return Err("never inline attribute");
806    }
807
808    if let OptimizeAttr::DoNotOptimize = callee_attrs.optimize {
809        return Err("has DoNotOptimize attribute");
810    }
811
812    inliner.check_codegen_attributes_extra(callee_attrs)?;
813
814    // Reachability pass defines which functions are eligible for inlining. Generally inlining
815    // other functions is incorrect because they could reference symbols that aren't exported.
816    let is_generic = callsite.callee.args.non_erasable_generics().next().is_some();
817    if !is_generic && !tcx.cross_crate_inlinable(callsite.callee.def_id()) {
818        return Err("not exported");
819    }
820
821    let codegen_fn_attrs = tcx.codegen_fn_attrs(inliner.caller_def_id());
822    if callee_attrs.no_sanitize != codegen_fn_attrs.no_sanitize {
823        return Err("incompatible sanitizer set");
824    }
825
826    // Two functions are compatible if the callee has no attribute (meaning
827    // that it's codegen agnostic), or sets an attribute that is identical
828    // to this function's attribute.
829    if callee_attrs.instruction_set.is_some()
830        && callee_attrs.instruction_set != codegen_fn_attrs.instruction_set
831    {
832        return Err("incompatible instruction set");
833    }
834
835    let callee_feature_names = callee_attrs.target_features.iter().map(|f| f.name);
836    let this_feature_names = codegen_fn_attrs.target_features.iter().map(|f| f.name);
837    if callee_feature_names.ne(this_feature_names) {
838        // In general it is not correct to inline a callee with target features that are a
839        // subset of the caller. This is because the callee might contain calls, and the ABI of
840        // those calls depends on the target features of the surrounding function. By moving a
841        // `Call` terminator from one MIR body to another with more target features, we might
842        // change the ABI of that call!
843        return Err("incompatible target features");
844    }
845
846    Ok(())
847}
848
849fn inline_call<'tcx, I: Inliner<'tcx>>(
850    inliner: &I,
851    caller_body: &mut Body<'tcx>,
852    callsite: &CallSite<'tcx>,
853    mut callee_body: Body<'tcx>,
854) {
855    let tcx = inliner.tcx();
856    let terminator = caller_body[callsite.block].terminator.take().unwrap();
857    let TerminatorKind::Call { func, args, destination, unwind, target, .. } = terminator.kind
858    else {
859        bug!("unexpected terminator kind {:?}", terminator.kind);
860    };
861
862    let return_block = if let Some(block) = target {
863        // Prepare a new block for code that should execute when call returns. We don't use
864        // target block directly since it might have other predecessors.
865        let data = BasicBlockData::new(
866            Some(Terminator {
867                source_info: terminator.source_info,
868                kind: TerminatorKind::Goto { target: block },
869            }),
870            caller_body[block].is_cleanup,
871        );
872        Some(caller_body.basic_blocks_mut().push(data))
873    } else {
874        None
875    };
876
877    // If the call is something like `a[*i] = f(i)`, where
878    // `i : &mut usize`, then just duplicating the `a[*i]`
879    // Place could result in two different locations if `f`
880    // writes to `i`. To prevent this we need to create a temporary
881    // borrow of the place and pass the destination as `*temp` instead.
882    fn dest_needs_borrow(place: Place<'_>) -> bool {
883        for elem in place.projection.iter() {
884            match elem {
885                ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
886                _ => {}
887            }
888        }
889
890        false
891    }
892
893    let dest = if dest_needs_borrow(destination) {
894        trace!("creating temp for return destination");
895        let dest = Rvalue::Ref(
896            tcx.lifetimes.re_erased,
897            BorrowKind::Mut { kind: MutBorrowKind::Default },
898            destination,
899        );
900        let dest_ty = dest.ty(caller_body, tcx);
901        let temp = Place::from(new_call_temp(caller_body, callsite, dest_ty, return_block));
902        caller_body[callsite.block].statements.push(Statement {
903            source_info: callsite.source_info,
904            kind: StatementKind::Assign(Box::new((temp, dest))),
905        });
906        tcx.mk_place_deref(temp)
907    } else {
908        destination
909    };
910
911    // Always create a local to hold the destination, as `RETURN_PLACE` may appear
912    // where a full `Place` is not allowed.
913    let (remap_destination, destination_local) = if let Some(d) = dest.as_local() {
914        (false, d)
915    } else {
916        (
917            true,
918            new_call_temp(caller_body, callsite, destination.ty(caller_body, tcx).ty, return_block),
919        )
920    };
921
922    // Copy the arguments if needed.
923    let args = make_call_args(inliner, args, callsite, caller_body, &callee_body, return_block);
924
925    let mut integrator = Integrator {
926        args: &args,
927        new_locals: caller_body.local_decls.next_index()..,
928        new_scopes: caller_body.source_scopes.next_index()..,
929        new_blocks: caller_body.basic_blocks.next_index()..,
930        destination: destination_local,
931        callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
932        callsite,
933        cleanup_block: unwind,
934        in_cleanup_block: false,
935        return_block,
936        tcx,
937        always_live_locals: DenseBitSet::new_filled(callee_body.local_decls.len()),
938    };
939
940    // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
941    // (or existing ones, in a few special cases) in the caller.
942    integrator.visit_body(&mut callee_body);
943
944    // If there are any locals without storage markers, give them storage only for the
945    // duration of the call.
946    for local in callee_body.vars_and_temps_iter() {
947        if integrator.always_live_locals.contains(local) {
948            let new_local = integrator.map_local(local);
949            caller_body[callsite.block].statements.push(Statement {
950                source_info: callsite.source_info,
951                kind: StatementKind::StorageLive(new_local),
952            });
953        }
954    }
955    if let Some(block) = return_block {
956        // To avoid repeated O(n) insert, push any new statements to the end and rotate
957        // the slice once.
958        let mut n = 0;
959        if remap_destination {
960            caller_body[block].statements.push(Statement {
961                source_info: callsite.source_info,
962                kind: StatementKind::Assign(Box::new((
963                    dest,
964                    Rvalue::Use(Operand::Move(destination_local.into())),
965                ))),
966            });
967            n += 1;
968        }
969        for local in callee_body.vars_and_temps_iter().rev() {
970            if integrator.always_live_locals.contains(local) {
971                let new_local = integrator.map_local(local);
972                caller_body[block].statements.push(Statement {
973                    source_info: callsite.source_info,
974                    kind: StatementKind::StorageDead(new_local),
975                });
976                n += 1;
977            }
978        }
979        caller_body[block].statements.rotate_right(n);
980    }
981
982    // Insert all of the (mapped) parts of the callee body into the caller.
983    caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
984    caller_body.source_scopes.append(&mut callee_body.source_scopes);
985    if tcx
986        .sess
987        .opts
988        .unstable_opts
989        .inline_mir_preserve_debug
990        .unwrap_or(tcx.sess.opts.debuginfo != DebugInfo::None)
991    {
992        // Note that we need to preserve these in the standard library so that
993        // people working on rust can build with or without debuginfo while
994        // still getting consistent results from the mir-opt tests.
995        caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
996    }
997    caller_body.basic_blocks_mut().append(callee_body.basic_blocks_mut());
998
999    caller_body[callsite.block].terminator = Some(Terminator {
1000        source_info: callsite.source_info,
1001        kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
1002    });
1003
1004    // Copy required constants from the callee_body into the caller_body. Although we are only
1005    // pushing unevaluated consts to `required_consts`, here they may have been evaluated
1006    // because we are calling `instantiate_and_normalize_erasing_regions` -- so we filter again.
1007    caller_body.required_consts.as_mut().unwrap().extend(
1008        callee_body.required_consts().into_iter().filter(|ct| ct.const_.is_required_const()),
1009    );
1010    // Now that we incorporated the callee's `required_consts`, we can remove the callee from
1011    // `mentioned_items` -- but we have to take their `mentioned_items` in return. This does
1012    // some extra work here to save the monomorphization collector work later. It helps a lot,
1013    // since monomorphization can avoid a lot of work when the "mentioned items" are similar to
1014    // the actually used items. By doing this we can entirely avoid visiting the callee!
1015    // We need to reconstruct the `required_item` for the callee so that we can find and
1016    // remove it.
1017    let callee_item = MentionedItem::Fn(func.ty(caller_body, tcx));
1018    let caller_mentioned_items = caller_body.mentioned_items.as_mut().unwrap();
1019    if let Some(idx) = caller_mentioned_items.iter().position(|item| item.node == callee_item) {
1020        // We found the callee, so remove it and add its items instead.
1021        caller_mentioned_items.remove(idx);
1022        caller_mentioned_items.extend(callee_body.mentioned_items());
1023    } else {
1024        // If we can't find the callee, there's no point in adding its items. Probably it
1025        // already got removed by being inlined elsewhere in the same function, so we already
1026        // took its items.
1027    }
1028}
1029
1030fn make_call_args<'tcx, I: Inliner<'tcx>>(
1031    inliner: &I,
1032    args: Box<[Spanned<Operand<'tcx>>]>,
1033    callsite: &CallSite<'tcx>,
1034    caller_body: &mut Body<'tcx>,
1035    callee_body: &Body<'tcx>,
1036    return_block: Option<BasicBlock>,
1037) -> Box<[Local]> {
1038    let tcx = inliner.tcx();
1039
1040    // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
1041    // The caller provides the arguments wrapped up in a tuple:
1042    //
1043    //     tuple_tmp = (a, b, c)
1044    //     Fn::call(closure_ref, tuple_tmp)
1045    //
1046    // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
1047    // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
1048    // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
1049    // a vector like
1050    //
1051    //     [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
1052    //
1053    // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
1054    // if we "spill" that into *another* temporary, so that we can map the argument
1055    // variable in the callee MIR directly to an argument variable on our side.
1056    // So we introduce temporaries like:
1057    //
1058    //     tmp0 = tuple_tmp.0
1059    //     tmp1 = tuple_tmp.1
1060    //     tmp2 = tuple_tmp.2
1061    //
1062    // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
1063    if callsite.fn_sig.abi() == ExternAbi::RustCall && callee_body.spread_arg.is_none() {
1064        // FIXME(edition_2024): switch back to a normal method call.
1065        let mut args = <_>::into_iter(args);
1066        let self_ = create_temp_if_necessary(
1067            inliner,
1068            args.next().unwrap().node,
1069            callsite,
1070            caller_body,
1071            return_block,
1072        );
1073        let tuple = create_temp_if_necessary(
1074            inliner,
1075            args.next().unwrap().node,
1076            callsite,
1077            caller_body,
1078            return_block,
1079        );
1080        assert!(args.next().is_none());
1081
1082        let tuple = Place::from(tuple);
1083        let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else {
1084            bug!("Closure arguments are not passed as a tuple");
1085        };
1086
1087        // The `closure_ref` in our example above.
1088        let closure_ref_arg = iter::once(self_);
1089
1090        // The `tmp0`, `tmp1`, and `tmp2` in our example above.
1091        let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
1092            // This is e.g., `tuple_tmp.0` in our example above.
1093            let tuple_field = Operand::Move(tcx.mk_place_field(tuple, FieldIdx::new(i), ty));
1094
1095            // Spill to a local to make e.g., `tmp0`.
1096            create_temp_if_necessary(inliner, tuple_field, callsite, caller_body, return_block)
1097        });
1098
1099        closure_ref_arg.chain(tuple_tmp_args).collect()
1100    } else {
1101        args.into_iter()
1102            .map(|a| create_temp_if_necessary(inliner, a.node, callsite, caller_body, return_block))
1103            .collect()
1104    }
1105}
1106
1107/// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh temporary `T` and an
1108/// instruction `T = arg`, and returns `T`.
1109fn create_temp_if_necessary<'tcx, I: Inliner<'tcx>>(
1110    inliner: &I,
1111    arg: Operand<'tcx>,
1112    callsite: &CallSite<'tcx>,
1113    caller_body: &mut Body<'tcx>,
1114    return_block: Option<BasicBlock>,
1115) -> Local {
1116    // Reuse the operand if it is a moved temporary.
1117    if let Operand::Move(place) = &arg
1118        && let Some(local) = place.as_local()
1119        && caller_body.local_kind(local) == LocalKind::Temp
1120    {
1121        return local;
1122    }
1123
1124    // Otherwise, create a temporary for the argument.
1125    trace!("creating temp for argument {:?}", arg);
1126    let arg_ty = arg.ty(caller_body, inliner.tcx());
1127    let local = new_call_temp(caller_body, callsite, arg_ty, return_block);
1128    caller_body[callsite.block].statements.push(Statement {
1129        source_info: callsite.source_info,
1130        kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))),
1131    });
1132    local
1133}
1134
1135/// Introduces a new temporary into the caller body that is live for the duration of the call.
1136fn new_call_temp<'tcx>(
1137    caller_body: &mut Body<'tcx>,
1138    callsite: &CallSite<'tcx>,
1139    ty: Ty<'tcx>,
1140    return_block: Option<BasicBlock>,
1141) -> Local {
1142    let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
1143
1144    caller_body[callsite.block].statements.push(Statement {
1145        source_info: callsite.source_info,
1146        kind: StatementKind::StorageLive(local),
1147    });
1148
1149    if let Some(block) = return_block {
1150        caller_body[block].statements.insert(
1151            0,
1152            Statement {
1153                source_info: callsite.source_info,
1154                kind: StatementKind::StorageDead(local),
1155            },
1156        );
1157    }
1158
1159    local
1160}
1161
1162/**
1163 * Integrator.
1164 *
1165 * Integrates blocks from the callee function into the calling function.
1166 * Updates block indices, references to locals and other control flow
1167 * stuff.
1168*/
1169struct Integrator<'a, 'tcx> {
1170    args: &'a [Local],
1171    new_locals: RangeFrom<Local>,
1172    new_scopes: RangeFrom<SourceScope>,
1173    new_blocks: RangeFrom<BasicBlock>,
1174    destination: Local,
1175    callsite_scope: SourceScopeData<'tcx>,
1176    callsite: &'a CallSite<'tcx>,
1177    cleanup_block: UnwindAction,
1178    in_cleanup_block: bool,
1179    return_block: Option<BasicBlock>,
1180    tcx: TyCtxt<'tcx>,
1181    always_live_locals: DenseBitSet<Local>,
1182}
1183
1184impl Integrator<'_, '_> {
1185    fn map_local(&self, local: Local) -> Local {
1186        let new = if local == RETURN_PLACE {
1187            self.destination
1188        } else {
1189            let idx = local.index() - 1;
1190            if idx < self.args.len() {
1191                self.args[idx]
1192            } else {
1193                self.new_locals.start + (idx - self.args.len())
1194            }
1195        };
1196        trace!("mapping local `{:?}` to `{:?}`", local, new);
1197        new
1198    }
1199
1200    fn map_scope(&self, scope: SourceScope) -> SourceScope {
1201        let new = self.new_scopes.start + scope.index();
1202        trace!("mapping scope `{:?}` to `{:?}`", scope, new);
1203        new
1204    }
1205
1206    fn map_block(&self, block: BasicBlock) -> BasicBlock {
1207        let new = self.new_blocks.start + block.index();
1208        trace!("mapping block `{:?}` to `{:?}`", block, new);
1209        new
1210    }
1211
1212    fn map_unwind(&self, unwind: UnwindAction) -> UnwindAction {
1213        if self.in_cleanup_block {
1214            match unwind {
1215                UnwindAction::Cleanup(_) | UnwindAction::Continue => {
1216                    bug!("cleanup on cleanup block");
1217                }
1218                UnwindAction::Unreachable | UnwindAction::Terminate(_) => return unwind,
1219            }
1220        }
1221
1222        match unwind {
1223            UnwindAction::Unreachable | UnwindAction::Terminate(_) => unwind,
1224            UnwindAction::Cleanup(target) => UnwindAction::Cleanup(self.map_block(target)),
1225            // Add an unwind edge to the original call's cleanup block
1226            UnwindAction::Continue => self.cleanup_block,
1227        }
1228    }
1229}
1230
1231impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
1232    fn tcx(&self) -> TyCtxt<'tcx> {
1233        self.tcx
1234    }
1235
1236    fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
1237        *local = self.map_local(*local);
1238    }
1239
1240    fn visit_source_scope_data(&mut self, scope_data: &mut SourceScopeData<'tcx>) {
1241        self.super_source_scope_data(scope_data);
1242        if scope_data.parent_scope.is_none() {
1243            // Attach the outermost callee scope as a child of the callsite
1244            // scope, via the `parent_scope` and `inlined_parent_scope` chains.
1245            scope_data.parent_scope = Some(self.callsite.source_info.scope);
1246            assert_eq!(scope_data.inlined_parent_scope, None);
1247            scope_data.inlined_parent_scope = if self.callsite_scope.inlined.is_some() {
1248                Some(self.callsite.source_info.scope)
1249            } else {
1250                self.callsite_scope.inlined_parent_scope
1251            };
1252
1253            // Mark the outermost callee scope as an inlined one.
1254            assert_eq!(scope_data.inlined, None);
1255            scope_data.inlined = Some((self.callsite.callee, self.callsite.source_info.span));
1256        } else if scope_data.inlined_parent_scope.is_none() {
1257            // Make it easy to find the scope with `inlined` set above.
1258            scope_data.inlined_parent_scope = Some(self.map_scope(OUTERMOST_SOURCE_SCOPE));
1259        }
1260    }
1261
1262    fn visit_source_scope(&mut self, scope: &mut SourceScope) {
1263        *scope = self.map_scope(*scope);
1264    }
1265
1266    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
1267        self.in_cleanup_block = data.is_cleanup;
1268        self.super_basic_block_data(block, data);
1269        self.in_cleanup_block = false;
1270    }
1271
1272    fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
1273        self.super_retag(kind, place, loc);
1274
1275        // We have to patch all inlined retags to be aware that they are no longer
1276        // happening on function entry.
1277        if *kind == RetagKind::FnEntry {
1278            *kind = RetagKind::Default;
1279        }
1280    }
1281
1282    fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
1283        if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) =
1284            statement.kind
1285        {
1286            self.always_live_locals.remove(local);
1287        }
1288        self.super_statement(statement, location);
1289    }
1290
1291    fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
1292        // Don't try to modify the implicit `_0` access on return (`return` terminators are
1293        // replaced down below anyways).
1294        if !matches!(terminator.kind, TerminatorKind::Return) {
1295            self.super_terminator(terminator, loc);
1296        } else {
1297            self.visit_source_info(&mut terminator.source_info);
1298        }
1299
1300        match terminator.kind {
1301            TerminatorKind::CoroutineDrop | TerminatorKind::Yield { .. } => bug!(),
1302            TerminatorKind::Goto { ref mut target } => {
1303                *target = self.map_block(*target);
1304            }
1305            TerminatorKind::SwitchInt { ref mut targets, .. } => {
1306                for tgt in targets.all_targets_mut() {
1307                    *tgt = self.map_block(*tgt);
1308                }
1309            }
1310            TerminatorKind::Drop { ref mut target, ref mut unwind, .. } => {
1311                *target = self.map_block(*target);
1312                *unwind = self.map_unwind(*unwind);
1313            }
1314            TerminatorKind::TailCall { .. } => {
1315                // check_mir_body forbids tail calls
1316                unreachable!()
1317            }
1318            TerminatorKind::Call { ref mut target, ref mut unwind, .. } => {
1319                if let Some(ref mut tgt) = *target {
1320                    *tgt = self.map_block(*tgt);
1321                }
1322                *unwind = self.map_unwind(*unwind);
1323            }
1324            TerminatorKind::Assert { ref mut target, ref mut unwind, .. } => {
1325                *target = self.map_block(*target);
1326                *unwind = self.map_unwind(*unwind);
1327            }
1328            TerminatorKind::Return => {
1329                terminator.kind = if let Some(tgt) = self.return_block {
1330                    TerminatorKind::Goto { target: tgt }
1331                } else {
1332                    TerminatorKind::Unreachable
1333                }
1334            }
1335            TerminatorKind::UnwindResume => {
1336                terminator.kind = match self.cleanup_block {
1337                    UnwindAction::Cleanup(tgt) => TerminatorKind::Goto { target: tgt },
1338                    UnwindAction::Continue => TerminatorKind::UnwindResume,
1339                    UnwindAction::Unreachable => TerminatorKind::Unreachable,
1340                    UnwindAction::Terminate(reason) => TerminatorKind::UnwindTerminate(reason),
1341                };
1342            }
1343            TerminatorKind::UnwindTerminate(_) => {}
1344            TerminatorKind::Unreachable => {}
1345            TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
1346                *real_target = self.map_block(*real_target);
1347                *imaginary_target = self.map_block(*imaginary_target);
1348            }
1349            TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
1350            // see the ordering of passes in the optimized_mir query.
1351            {
1352                bug!("False unwinds should have been removed before inlining")
1353            }
1354            TerminatorKind::InlineAsm { ref mut targets, ref mut unwind, .. } => {
1355                for tgt in targets.iter_mut() {
1356                    *tgt = self.map_block(*tgt);
1357                }
1358                *unwind = self.map_unwind(*unwind);
1359            }
1360        }
1361    }
1362}
1363
1364#[instrument(skip(tcx), level = "debug")]
1365fn try_instance_mir<'tcx>(
1366    tcx: TyCtxt<'tcx>,
1367    instance: InstanceKind<'tcx>,
1368) -> Result<&'tcx Body<'tcx>, &'static str> {
1369    if let ty::InstanceKind::DropGlue(_, Some(ty)) | ty::InstanceKind::AsyncDropGlueCtorShim(_, ty) =
1370        instance
1371        && let ty::Adt(def, args) = ty.kind()
1372    {
1373        let fields = def.all_fields();
1374        for field in fields {
1375            let field_ty = field.ty(tcx, args);
1376            if field_ty.has_param() && field_ty.has_aliases() {
1377                return Err("cannot build drop shim for polymorphic type");
1378            }
1379        }
1380    }
1381    Ok(tcx.instance_mir(instance))
1382}
1383
1384fn body_is_forwarder(body: &Body<'_>) -> bool {
1385    let TerminatorKind::Call { target, .. } = body.basic_blocks[START_BLOCK].terminator().kind
1386    else {
1387        return false;
1388    };
1389    if let Some(target) = target {
1390        let TerminatorKind::Return = body.basic_blocks[target].terminator().kind else {
1391            return false;
1392        };
1393    }
1394
1395    let max_blocks = if !body.is_polymorphic {
1396        2
1397    } else if target.is_none() {
1398        3
1399    } else {
1400        4
1401    };
1402    if body.basic_blocks.len() > max_blocks {
1403        return false;
1404    }
1405
1406    body.basic_blocks.iter_enumerated().all(|(bb, bb_data)| {
1407        bb == START_BLOCK
1408            || matches!(
1409                bb_data.terminator().kind,
1410                TerminatorKind::Return
1411                    | TerminatorKind::Drop { .. }
1412                    | TerminatorKind::UnwindResume
1413                    | TerminatorKind::UnwindTerminate(_)
1414            )
1415    })
1416}