rustc_codegen_llvm/
context.rs

1use std::borrow::Borrow;
2use std::cell::{Cell, RefCell};
3use std::ffi::{CStr, c_char, c_uint};
4use std::marker::PhantomData;
5use std::ops::{Deref, DerefMut};
6use std::str;
7
8use rustc_abi::{HasDataLayout, Size, TargetDataLayout, VariantIdx};
9use rustc_codegen_ssa::back::versioned_llvm_target;
10use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
11use rustc_codegen_ssa::common::TypeKind;
12use rustc_codegen_ssa::errors as ssa_errors;
13use rustc_codegen_ssa::traits::*;
14use rustc_data_structures::base_n::{ALPHANUMERIC_ONLY, ToBaseN};
15use rustc_data_structures::fx::FxHashMap;
16use rustc_data_structures::small_c_str::SmallCStr;
17use rustc_hir::def_id::DefId;
18use rustc_middle::middle::codegen_fn_attrs::PatchableFunctionEntry;
19use rustc_middle::mir::mono::CodegenUnit;
20use rustc_middle::ty::layout::{
21    FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTypingEnv, LayoutError, LayoutOfHelpers,
22};
23use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
24use rustc_middle::{bug, span_bug};
25use rustc_session::Session;
26use rustc_session::config::{
27    BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, FunctionReturn, PAuthKey, PacRet,
28};
29use rustc_span::source_map::Spanned;
30use rustc_span::{DUMMY_SP, Span};
31use rustc_symbol_mangling::mangle_internal_symbol;
32use rustc_target::spec::{HasTargetSpec, RelocModel, SmallDataThresholdSupport, Target, TlsModel};
33use smallvec::SmallVec;
34
35use crate::back::write::to_llvm_code_model;
36use crate::callee::get_fn;
37use crate::common::AsCCharPtr;
38use crate::debuginfo::metadata::apply_vcall_visibility_metadata;
39use crate::llvm::Metadata;
40use crate::type_::Type;
41use crate::value::Value;
42use crate::{attributes, common, coverageinfo, debuginfo, llvm, llvm_util};
43
44/// `TyCtxt` (and related cache datastructures) can't be move between threads.
45/// However, there are various cx related functions which we want to be available to the builder and
46/// other compiler pieces. Here we define a small subset which has enough information and can be
47/// moved around more freely.
48pub(crate) struct SCx<'ll> {
49    pub llmod: &'ll llvm::Module,
50    pub llcx: &'ll llvm::Context,
51    pub isize_ty: &'ll Type,
52}
53
54impl<'ll> Borrow<SCx<'ll>> for FullCx<'ll, '_> {
55    fn borrow(&self) -> &SCx<'ll> {
56        &self.scx
57    }
58}
59
60impl<'ll, 'tcx> Deref for FullCx<'ll, 'tcx> {
61    type Target = SimpleCx<'ll>;
62
63    #[inline]
64    fn deref(&self) -> &Self::Target {
65        &self.scx
66    }
67}
68
69pub(crate) struct GenericCx<'ll, T: Borrow<SCx<'ll>>>(T, PhantomData<SCx<'ll>>);
70
71impl<'ll, T: Borrow<SCx<'ll>>> Deref for GenericCx<'ll, T> {
72    type Target = T;
73
74    #[inline]
75    fn deref(&self) -> &Self::Target {
76        &self.0
77    }
78}
79
80impl<'ll, T: Borrow<SCx<'ll>>> DerefMut for GenericCx<'ll, T> {
81    #[inline]
82    fn deref_mut(&mut self) -> &mut Self::Target {
83        &mut self.0
84    }
85}
86
87pub(crate) type SimpleCx<'ll> = GenericCx<'ll, SCx<'ll>>;
88
89/// There is one `CodegenCx` per codegen unit. Each one has its own LLVM
90/// `llvm::Context` so that several codegen units may be processed in parallel.
91/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
92pub(crate) type CodegenCx<'ll, 'tcx> = GenericCx<'ll, FullCx<'ll, 'tcx>>;
93
94pub(crate) struct FullCx<'ll, 'tcx> {
95    pub tcx: TyCtxt<'tcx>,
96    pub scx: SimpleCx<'ll>,
97    pub use_dll_storage_attrs: bool,
98    pub tls_model: llvm::ThreadLocalMode,
99
100    pub codegen_unit: &'tcx CodegenUnit<'tcx>,
101
102    /// Cache instances of monomorphic and polymorphic items
103    pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
104    /// Cache generated vtables
105    pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>), &'ll Value>>,
106    /// Cache of constant strings,
107    pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
108
109    /// Cache of emitted const globals (value -> global)
110    pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
111
112    /// List of globals for static variables which need to be passed to the
113    /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
114    /// (We have to make sure we don't invalidate any Values referring
115    /// to constants.)
116    pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
117
118    /// Statics that will be placed in the llvm.used variable
119    /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
120    pub used_statics: Vec<&'ll Value>,
121
122    /// Statics that will be placed in the llvm.compiler.used variable
123    /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
124    pub compiler_used_statics: Vec<&'ll Value>,
125
126    /// Mapping of non-scalar types to llvm types.
127    pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>,
128
129    /// Mapping of scalar types to llvm types.
130    pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
131
132    /// Extra per-CGU codegen state needed when coverage instrumentation is enabled.
133    pub coverage_cx: Option<coverageinfo::CguCoverageContext<'ll, 'tcx>>,
134    pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
135
136    eh_personality: Cell<Option<&'ll Value>>,
137    eh_catch_typeinfo: Cell<Option<&'ll Value>>,
138    pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
139
140    intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
141
142    /// A counter that is used for generating local symbol names
143    local_gen_sym_counter: Cell<usize>,
144
145    /// `codegen_static` will sometimes create a second global variable with a
146    /// different type and clear the symbol name of the original global.
147    /// `global_asm!` needs to be able to find this new global so that it can
148    /// compute the correct mangled symbol name to insert into the asm.
149    pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
150}
151
152fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
153    match tls_model {
154        TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
155        TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
156        TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
157        TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
158        TlsModel::Emulated => llvm::ThreadLocalMode::GeneralDynamic,
159    }
160}
161
162pub(crate) unsafe fn create_module<'ll>(
163    tcx: TyCtxt<'_>,
164    llcx: &'ll llvm::Context,
165    mod_name: &str,
166) -> &'ll llvm::Module {
167    let sess = tcx.sess;
168    let mod_name = SmallCStr::new(mod_name);
169    let llmod = unsafe { llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx) };
170
171    let mut target_data_layout = sess.target.data_layout.to_string();
172    let llvm_version = llvm_util::get_version();
173
174    if llvm_version < (20, 0, 0) {
175        if sess.target.arch == "aarch64" || sess.target.arch.starts_with("arm64") {
176            // LLVM 20 defines three additional address spaces for alternate
177            // pointer kinds used in Windows.
178            // See https://github.com/llvm/llvm-project/pull/111879
179            target_data_layout =
180                target_data_layout.replace("-p270:32:32-p271:32:32-p272:64:64", "");
181        }
182        if sess.target.arch.starts_with("sparc") {
183            // LLVM 20 updates the sparc layout to correctly align 128 bit integers to 128 bit.
184            // See https://github.com/llvm/llvm-project/pull/106951
185            target_data_layout = target_data_layout.replace("-i128:128", "");
186        }
187        if sess.target.arch.starts_with("mips64") {
188            // LLVM 20 updates the mips64 layout to correctly align 128 bit integers to 128 bit.
189            // See https://github.com/llvm/llvm-project/pull/112084
190            target_data_layout = target_data_layout.replace("-i128:128", "");
191        }
192        if sess.target.arch.starts_with("powerpc64") {
193            // LLVM 20 updates the powerpc64 layout to correctly align 128 bit integers to 128 bit.
194            // See https://github.com/llvm/llvm-project/pull/118004
195            target_data_layout = target_data_layout.replace("-i128:128", "");
196        }
197        if sess.target.arch.starts_with("wasm32") || sess.target.arch.starts_with("wasm64") {
198            // LLVM 20 updates the wasm(32|64) layout to correctly align 128 bit integers to 128 bit.
199            // See https://github.com/llvm/llvm-project/pull/119204
200            target_data_layout = target_data_layout.replace("-i128:128", "");
201        }
202    }
203    if llvm_version < (21, 0, 0) {
204        if sess.target.arch == "nvptx64" {
205            // LLVM 21 updated the default layout on nvptx: https://github.com/llvm/llvm-project/pull/124961
206            target_data_layout = target_data_layout.replace("e-p6:32:32-i64", "e-i64");
207        }
208    }
209
210    // Ensure the data-layout values hardcoded remain the defaults.
211    {
212        let tm = crate::back::write::create_informational_target_machine(tcx.sess, false);
213        unsafe {
214            llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm.raw());
215        }
216
217        let llvm_data_layout = unsafe { llvm::LLVMGetDataLayoutStr(llmod) };
218        let llvm_data_layout =
219            str::from_utf8(unsafe { CStr::from_ptr(llvm_data_layout) }.to_bytes())
220                .expect("got a non-UTF8 data-layout from LLVM");
221
222        if target_data_layout != llvm_data_layout {
223            tcx.dcx().emit_err(crate::errors::MismatchedDataLayout {
224                rustc_target: sess.opts.target_triple.to_string().as_str(),
225                rustc_layout: target_data_layout.as_str(),
226                llvm_target: sess.target.llvm_target.borrow(),
227                llvm_layout: llvm_data_layout,
228            });
229        }
230    }
231
232    let data_layout = SmallCStr::new(&target_data_layout);
233    unsafe {
234        llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
235    }
236
237    let llvm_target = SmallCStr::new(&versioned_llvm_target(sess));
238    unsafe {
239        llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
240    }
241
242    let reloc_model = sess.relocation_model();
243    if matches!(reloc_model, RelocModel::Pic | RelocModel::Pie) {
244        unsafe {
245            llvm::LLVMRustSetModulePICLevel(llmod);
246        }
247        // PIE is potentially more effective than PIC, but can only be used in executables.
248        // If all our outputs are executables, then we can relax PIC to PIE.
249        if reloc_model == RelocModel::Pie
250            || tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable)
251        {
252            unsafe {
253                llvm::LLVMRustSetModulePIELevel(llmod);
254            }
255        }
256    }
257
258    // Linking object files with different code models is undefined behavior
259    // because the compiler would have to generate additional code (to span
260    // longer jumps) if a larger code model is used with a smaller one.
261    //
262    // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
263    unsafe {
264        llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
265    }
266
267    // If skipping the PLT is enabled, we need to add some module metadata
268    // to ensure intrinsic calls don't use it.
269    if !sess.needs_plt() {
270        llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "RtLibUseGOT", 1);
271    }
272
273    // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.)
274    if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() {
275        llvm::add_module_flag_u32(
276            llmod,
277            llvm::ModuleFlagMergeBehavior::Override,
278            "CFI Canonical Jump Tables",
279            1,
280        );
281    }
282
283    // If we're normalizing integers with CFI, ensure LLVM generated functions do the same.
284    // See https://github.com/llvm/llvm-project/pull/104826
285    if sess.is_sanitizer_cfi_normalize_integers_enabled() {
286        llvm::add_module_flag_u32(
287            llmod,
288            llvm::ModuleFlagMergeBehavior::Override,
289            "cfi-normalize-integers",
290            1,
291        );
292    }
293
294    // Enable LTO unit splitting if specified or if CFI is enabled. (See
295    // https://reviews.llvm.org/D53891.)
296    if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
297        llvm::add_module_flag_u32(
298            llmod,
299            llvm::ModuleFlagMergeBehavior::Override,
300            "EnableSplitLTOUnit",
301            1,
302        );
303    }
304
305    // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.)
306    if sess.is_sanitizer_kcfi_enabled() {
307        llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Override, "kcfi", 1);
308
309        // Add "kcfi-offset" module flag with -Z patchable-function-entry (See
310        // https://reviews.llvm.org/D141172).
311        let pfe =
312            PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry);
313        if pfe.prefix() > 0 {
314            llvm::add_module_flag_u32(
315                llmod,
316                llvm::ModuleFlagMergeBehavior::Override,
317                "kcfi-offset",
318                pfe.prefix().into(),
319            );
320        }
321
322        // Add "kcfi-arity" module flag if KCFI arity indicator is enabled. (See
323        // https://github.com/llvm/llvm-project/pull/117121.)
324        if sess.is_sanitizer_kcfi_arity_enabled() {
325            // KCFI arity indicator requires LLVM 21.0.0 or later.
326            if llvm_version < (21, 0, 0) {
327                tcx.dcx().emit_err(crate::errors::SanitizerKcfiArityRequiresLLVM2100);
328            }
329
330            llvm::add_module_flag_u32(
331                llmod,
332                llvm::ModuleFlagMergeBehavior::Override,
333                "kcfi-arity",
334                1,
335            );
336        }
337    }
338
339    // Control Flow Guard is currently only supported by MSVC and LLVM on Windows.
340    if sess.target.is_like_msvc
341        || (sess.target.options.os == "windows"
342            && sess.target.options.env == "gnu"
343            && sess.target.options.abi == "llvm")
344    {
345        match sess.opts.cg.control_flow_guard {
346            CFGuard::Disabled => {}
347            CFGuard::NoChecks => {
348                // Set `cfguard=1` module flag to emit metadata only.
349                llvm::add_module_flag_u32(
350                    llmod,
351                    llvm::ModuleFlagMergeBehavior::Warning,
352                    "cfguard",
353                    1,
354                );
355            }
356            CFGuard::Checks => {
357                // Set `cfguard=2` module flag to emit metadata and checks.
358                llvm::add_module_flag_u32(
359                    llmod,
360                    llvm::ModuleFlagMergeBehavior::Warning,
361                    "cfguard",
362                    2,
363                );
364            }
365        }
366    }
367
368    if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
369        if sess.target.arch == "aarch64" {
370            llvm::add_module_flag_u32(
371                llmod,
372                llvm::ModuleFlagMergeBehavior::Min,
373                "branch-target-enforcement",
374                bti.into(),
375            );
376            llvm::add_module_flag_u32(
377                llmod,
378                llvm::ModuleFlagMergeBehavior::Min,
379                "sign-return-address",
380                pac_ret.is_some().into(),
381            );
382            let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, pc: false, key: PAuthKey::A });
383            llvm::add_module_flag_u32(
384                llmod,
385                llvm::ModuleFlagMergeBehavior::Min,
386                "branch-protection-pauth-lr",
387                pac_opts.pc.into(),
388            );
389            llvm::add_module_flag_u32(
390                llmod,
391                llvm::ModuleFlagMergeBehavior::Min,
392                "sign-return-address-all",
393                pac_opts.leaf.into(),
394            );
395            llvm::add_module_flag_u32(
396                llmod,
397                llvm::ModuleFlagMergeBehavior::Min,
398                "sign-return-address-with-bkey",
399                u32::from(pac_opts.key == PAuthKey::B),
400            );
401        } else {
402            bug!(
403                "branch-protection used on non-AArch64 target; \
404                  this should be checked in rustc_session."
405            );
406        }
407    }
408
409    // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
410    if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
411        llvm::add_module_flag_u32(
412            llmod,
413            llvm::ModuleFlagMergeBehavior::Override,
414            "cf-protection-branch",
415            1,
416        );
417    }
418    if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
419        llvm::add_module_flag_u32(
420            llmod,
421            llvm::ModuleFlagMergeBehavior::Override,
422            "cf-protection-return",
423            1,
424        );
425    }
426
427    if sess.opts.unstable_opts.virtual_function_elimination {
428        llvm::add_module_flag_u32(
429            llmod,
430            llvm::ModuleFlagMergeBehavior::Error,
431            "Virtual Function Elim",
432            1,
433        );
434    }
435
436    // Set module flag to enable Windows EHCont Guard (/guard:ehcont).
437    if sess.opts.unstable_opts.ehcont_guard {
438        llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "ehcontguard", 1);
439    }
440
441    match sess.opts.unstable_opts.function_return {
442        FunctionReturn::Keep => {}
443        FunctionReturn::ThunkExtern => {
444            llvm::add_module_flag_u32(
445                llmod,
446                llvm::ModuleFlagMergeBehavior::Override,
447                "function_return_thunk_extern",
448                1,
449            );
450        }
451    }
452
453    match (sess.opts.unstable_opts.small_data_threshold, sess.target.small_data_threshold_support())
454    {
455        // Set up the small-data optimization limit for architectures that use
456        // an LLVM module flag to control this.
457        (Some(threshold), SmallDataThresholdSupport::LlvmModuleFlag(flag)) => {
458            llvm::add_module_flag_u32(
459                llmod,
460                llvm::ModuleFlagMergeBehavior::Error,
461                &flag,
462                threshold as u32,
463            );
464        }
465        _ => (),
466    };
467
468    // Insert `llvm.ident` metadata.
469    //
470    // On the wasm targets it will get hooked up to the "producer" sections
471    // `processed-by` information.
472    #[allow(clippy::option_env_unwrap)]
473    let rustc_producer =
474        format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"));
475    let name_metadata = unsafe {
476        llvm::LLVMMDStringInContext2(
477            llcx,
478            rustc_producer.as_c_char_ptr(),
479            rustc_producer.as_bytes().len(),
480        )
481    };
482    unsafe {
483        llvm::LLVMAddNamedMetadataOperand(
484            llmod,
485            c"llvm.ident".as_ptr(),
486            &llvm::LLVMMetadataAsValue(llcx, llvm::LLVMMDNodeInContext2(llcx, &name_metadata, 1)),
487        );
488    }
489
490    // Emit RISC-V specific target-abi metadata
491    // to workaround lld as the LTO plugin not
492    // correctly setting target-abi for the LTO object
493    // FIXME: https://github.com/llvm/llvm-project/issues/50591
494    // If llvm_abiname is empty, emit nothing.
495    let llvm_abiname = &sess.target.options.llvm_abiname;
496    if matches!(sess.target.arch.as_ref(), "riscv32" | "riscv64") && !llvm_abiname.is_empty() {
497        llvm::add_module_flag_str(
498            llmod,
499            llvm::ModuleFlagMergeBehavior::Error,
500            "target-abi",
501            llvm_abiname,
502        );
503    }
504
505    // Add module flags specified via -Z llvm_module_flag
506    for (key, value, merge_behavior) in &sess.opts.unstable_opts.llvm_module_flag {
507        let merge_behavior = match merge_behavior.as_str() {
508            "error" => llvm::ModuleFlagMergeBehavior::Error,
509            "warning" => llvm::ModuleFlagMergeBehavior::Warning,
510            "require" => llvm::ModuleFlagMergeBehavior::Require,
511            "override" => llvm::ModuleFlagMergeBehavior::Override,
512            "append" => llvm::ModuleFlagMergeBehavior::Append,
513            "appendunique" => llvm::ModuleFlagMergeBehavior::AppendUnique,
514            "max" => llvm::ModuleFlagMergeBehavior::Max,
515            "min" => llvm::ModuleFlagMergeBehavior::Min,
516            // We already checked this during option parsing
517            _ => unreachable!(),
518        };
519        llvm::add_module_flag_u32(llmod, merge_behavior, key, *value);
520    }
521
522    llmod
523}
524
525impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
526    pub(crate) fn new(
527        tcx: TyCtxt<'tcx>,
528        codegen_unit: &'tcx CodegenUnit<'tcx>,
529        llvm_module: &'ll crate::ModuleLlvm,
530    ) -> Self {
531        // An interesting part of Windows which MSVC forces our hand on (and
532        // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
533        // attributes in LLVM IR as well as native dependencies (in C these
534        // correspond to `__declspec(dllimport)`).
535        //
536        // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
537        // relying on that can result in issues like #50176.
538        // LLD won't support that and expects symbols with proper attributes.
539        // Because of that we make MinGW target emit dllexport just like MSVC.
540        // When it comes to dllimport we use it for constants but for functions
541        // rely on the linker to do the right thing. Opposed to dllexport this
542        // task is easy for them (both LD and LLD) and allows us to easily use
543        // symbols from static libraries in shared libraries.
544        //
545        // Whenever a dynamic library is built on Windows it must have its public
546        // interface specified by functions tagged with `dllexport` or otherwise
547        // they're not available to be linked against. This poses a few problems
548        // for the compiler, some of which are somewhat fundamental, but we use
549        // the `use_dll_storage_attrs` variable below to attach the `dllexport`
550        // attribute to all LLVM functions that are exported e.g., they're
551        // already tagged with external linkage). This is suboptimal for a few
552        // reasons:
553        //
554        // * If an object file will never be included in a dynamic library,
555        //   there's no need to attach the dllexport attribute. Most object
556        //   files in Rust are not destined to become part of a dll as binaries
557        //   are statically linked by default.
558        // * If the compiler is emitting both an rlib and a dylib, the same
559        //   source object file is currently used but with MSVC this may be less
560        //   feasible. The compiler may be able to get around this, but it may
561        //   involve some invasive changes to deal with this.
562        //
563        // The flip side of this situation is that whenever you link to a dll and
564        // you import a function from it, the import should be tagged with
565        // `dllimport`. At this time, however, the compiler does not emit
566        // `dllimport` for any declarations other than constants (where it is
567        // required), which is again suboptimal for even more reasons!
568        //
569        // * Calling a function imported from another dll without using
570        //   `dllimport` causes the linker/compiler to have extra overhead (one
571        //   `jmp` instruction on x86) when calling the function.
572        // * The same object file may be used in different circumstances, so a
573        //   function may be imported from a dll if the object is linked into a
574        //   dll, but it may be just linked against if linked into an rlib.
575        // * The compiler has no knowledge about whether native functions should
576        //   be tagged dllimport or not.
577        //
578        // For now the compiler takes the perf hit (I do not have any numbers to
579        // this effect) by marking very little as `dllimport` and praying the
580        // linker will take care of everything. Fixing this problem will likely
581        // require adding a few attributes to Rust itself (feature gated at the
582        // start) and then strongly recommending static linkage on Windows!
583        let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
584
585        let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
586
587        let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
588
589        let coverage_cx =
590            tcx.sess.instrument_coverage().then(coverageinfo::CguCoverageContext::new);
591
592        let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
593            let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
594            debuginfo::metadata::build_compile_unit_di_node(
595                tcx,
596                codegen_unit.name().as_str(),
597                &dctx,
598            );
599            Some(dctx)
600        } else {
601            None
602        };
603
604        GenericCx(
605            FullCx {
606                tcx,
607                scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size),
608                use_dll_storage_attrs,
609                tls_model,
610                codegen_unit,
611                instances: Default::default(),
612                vtables: Default::default(),
613                const_str_cache: Default::default(),
614                const_globals: Default::default(),
615                statics_to_rauw: RefCell::new(Vec::new()),
616                used_statics: Vec::new(),
617                compiler_used_statics: Vec::new(),
618                type_lowering: Default::default(),
619                scalar_lltypes: Default::default(),
620                coverage_cx,
621                dbg_cx,
622                eh_personality: Cell::new(None),
623                eh_catch_typeinfo: Cell::new(None),
624                rust_try_fn: Cell::new(None),
625                intrinsics: Default::default(),
626                local_gen_sym_counter: Cell::new(0),
627                renamed_statics: Default::default(),
628            },
629            PhantomData,
630        )
631    }
632
633    pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
634        &self.statics_to_rauw
635    }
636
637    /// Extra state that is only available when coverage instrumentation is enabled.
638    #[inline]
639    #[track_caller]
640    pub(crate) fn coverage_cx(&self) -> &coverageinfo::CguCoverageContext<'ll, 'tcx> {
641        self.coverage_cx.as_ref().expect("only called when coverage instrumentation is enabled")
642    }
643
644    pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
645        let array = self.const_array(self.type_ptr(), values);
646
647        let g = llvm::add_global(self.llmod, self.val_ty(array), name);
648        llvm::set_initializer(g, array);
649        llvm::set_linkage(g, llvm::Linkage::AppendingLinkage);
650        llvm::set_section(g, c"llvm.metadata");
651    }
652}
653impl<'ll> SimpleCx<'ll> {
654    pub(crate) fn get_return_type(&self, ty: &'ll Type) -> &'ll Type {
655        assert_eq!(self.type_kind(ty), TypeKind::Function);
656        unsafe { llvm::LLVMGetReturnType(ty) }
657    }
658    pub(crate) fn get_type_of_global(&self, val: &'ll Value) -> &'ll Type {
659        unsafe { llvm::LLVMGlobalGetValueType(val) }
660    }
661    pub(crate) fn val_ty(&self, v: &'ll Value) -> &'ll Type {
662        common::val_ty(v)
663    }
664}
665impl<'ll> SimpleCx<'ll> {
666    pub(crate) fn new(
667        llmod: &'ll llvm::Module,
668        llcx: &'ll llvm::Context,
669        pointer_size: Size,
670    ) -> Self {
671        let isize_ty = llvm::Type::ix_llcx(llcx, pointer_size.bits());
672        Self(SCx { llmod, llcx, isize_ty }, PhantomData)
673    }
674}
675
676impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
677    pub(crate) fn get_metadata_value(&self, metadata: &'ll Metadata) -> &'ll Value {
678        llvm::LLVMMetadataAsValue(self.llcx(), metadata)
679    }
680
681    // FIXME(autodiff): We should split `ConstCodegenMethods` to pull the reusable parts
682    // onto a trait that is also implemented for GenericCx.
683    pub(crate) fn get_const_i64(&self, n: u64) -> &'ll Value {
684        let ty = unsafe { llvm::LLVMInt64TypeInContext(self.llcx()) };
685        unsafe { llvm::LLVMConstInt(ty, n, llvm::False) }
686    }
687
688    pub(crate) fn get_function(&self, name: &str) -> Option<&'ll Value> {
689        let name = SmallCStr::new(name);
690        unsafe { llvm::LLVMGetNamedFunction((**self).borrow().llmod, name.as_ptr()) }
691    }
692
693    pub(crate) fn get_md_kind_id(&self, name: &str) -> llvm::MetadataKindId {
694        unsafe {
695            llvm::LLVMGetMDKindIDInContext(
696                self.llcx(),
697                name.as_ptr() as *const c_char,
698                name.len() as c_uint,
699            )
700        }
701    }
702
703    pub(crate) fn create_metadata(&self, name: String) -> Option<&'ll Metadata> {
704        Some(unsafe {
705            llvm::LLVMMDStringInContext2(self.llcx(), name.as_ptr() as *const c_char, name.len())
706        })
707    }
708
709    pub(crate) fn get_functions(&self) -> Vec<&'ll Value> {
710        let mut functions = vec![];
711        let mut func = unsafe { llvm::LLVMGetFirstFunction(self.llmod()) };
712        while let Some(f) = func {
713            functions.push(f);
714            func = unsafe { llvm::LLVMGetNextFunction(f) }
715        }
716        functions
717    }
718}
719
720impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
721    fn vtables(
722        &self,
723    ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>), &'ll Value>> {
724        &self.vtables
725    }
726
727    fn apply_vcall_visibility_metadata(
728        &self,
729        ty: Ty<'tcx>,
730        poly_trait_ref: Option<ty::ExistentialTraitRef<'tcx>>,
731        vtable: &'ll Value,
732    ) {
733        apply_vcall_visibility_metadata(self, ty, poly_trait_ref, vtable);
734    }
735
736    fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
737        get_fn(self, instance)
738    }
739
740    fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
741        get_fn(self, instance)
742    }
743
744    fn eh_personality(&self) -> &'ll Value {
745        // The exception handling personality function.
746        //
747        // If our compilation unit has the `eh_personality` lang item somewhere
748        // within it, then we just need to codegen that. Otherwise, we're
749        // building an rlib which will depend on some upstream implementation of
750        // this function, so we just codegen a generic reference to it. We don't
751        // specify any of the types for the function, we just make it a symbol
752        // that LLVM can later use.
753        //
754        // Note that MSVC is a little special here in that we don't use the
755        // `eh_personality` lang item at all. Currently LLVM has support for
756        // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
757        // *name of the personality function* to decide what kind of unwind side
758        // tables/landing pads to emit. It looks like Dwarf is used by default,
759        // injecting a dependency on the `_Unwind_Resume` symbol for resuming
760        // an "exception", but for MSVC we want to force SEH. This means that we
761        // can't actually have the personality function be our standard
762        // `rust_eh_personality` function, but rather we wired it up to the
763        // CRT's custom personality function, which forces LLVM to consider
764        // landing pads as "landing pads for SEH".
765        if let Some(llpersonality) = self.eh_personality.get() {
766            return llpersonality;
767        }
768
769        let name = if wants_msvc_seh(self.sess()) {
770            Some("__CxxFrameHandler3")
771        } else if wants_wasm_eh(self.sess()) {
772            // LLVM specifically tests for the name of the personality function
773            // There is no need for this function to exist anywhere, it will
774            // not be called. However, its name has to be "__gxx_wasm_personality_v0"
775            // for native wasm exceptions.
776            Some("__gxx_wasm_personality_v0")
777        } else {
778            None
779        };
780
781        let tcx = self.tcx;
782        let llfn = match tcx.lang_items().eh_personality() {
783            Some(def_id) if name.is_none() => self.get_fn_addr(ty::Instance::expect_resolve(
784                tcx,
785                self.typing_env(),
786                def_id,
787                ty::List::empty(),
788                DUMMY_SP,
789            )),
790            _ => {
791                let name = name.unwrap_or("rust_eh_personality");
792                if let Some(llfn) = self.get_declared_value(name) {
793                    llfn
794                } else {
795                    let fty = self.type_variadic_func(&[], self.type_i32());
796                    let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
797                    let target_cpu = attributes::target_cpu_attr(self);
798                    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]);
799                    llfn
800                }
801            }
802        };
803        self.eh_personality.set(Some(llfn));
804        llfn
805    }
806
807    fn sess(&self) -> &Session {
808        self.tcx.sess
809    }
810
811    fn set_frame_pointer_type(&self, llfn: &'ll Value) {
812        if let Some(attr) = attributes::frame_pointer_type_attr(self) {
813            attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
814        }
815    }
816
817    fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
818        let mut attrs = SmallVec::<[_; 2]>::new();
819        attrs.push(attributes::target_cpu_attr(self));
820        attrs.extend(attributes::tune_cpu_attr(self));
821        attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
822    }
823
824    fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
825        let entry_name = self.sess().target.entry_name.as_ref();
826        if self.get_declared_value(entry_name).is_none() {
827            Some(self.declare_entry_fn(
828                entry_name,
829                llvm::CallConv::from_conv(
830                    self.sess().target.entry_abi,
831                    self.sess().target.arch.borrow(),
832                ),
833                llvm::UnnamedAddr::Global,
834                fn_type,
835            ))
836        } else {
837            // If the symbol already exists, it is an error: for example, the user wrote
838            // #[no_mangle] extern "C" fn main(..) {..}
839            None
840        }
841    }
842}
843
844impl<'ll> CodegenCx<'ll, '_> {
845    pub(crate) fn get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value) {
846        if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
847            return v;
848        }
849
850        self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
851    }
852
853    fn insert_intrinsic(
854        &self,
855        name: &'static str,
856        args: Option<&[&'ll llvm::Type]>,
857        ret: &'ll llvm::Type,
858    ) -> (&'ll llvm::Type, &'ll llvm::Value) {
859        let fn_ty = if let Some(args) = args {
860            self.type_func(args, ret)
861        } else {
862            self.type_variadic_func(&[], ret)
863        };
864        let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
865        self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
866        (fn_ty, f)
867    }
868
869    fn declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)> {
870        macro_rules! ifn {
871            ($name:expr, fn() -> $ret:expr) => (
872                if key == $name {
873                    return Some(self.insert_intrinsic($name, Some(&[]), $ret));
874                }
875            );
876            ($name:expr, fn(...) -> $ret:expr) => (
877                if key == $name {
878                    return Some(self.insert_intrinsic($name, None, $ret));
879                }
880            );
881            ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
882                if key == $name {
883                    return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
884                }
885            );
886        }
887        macro_rules! mk_struct {
888            ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
889        }
890
891        let ptr = self.type_ptr();
892        let void = self.type_void();
893        let i1 = self.type_i1();
894        let t_i8 = self.type_i8();
895        let t_i16 = self.type_i16();
896        let t_i32 = self.type_i32();
897        let t_i64 = self.type_i64();
898        let t_i128 = self.type_i128();
899        let t_isize = self.type_isize();
900        let t_f16 = self.type_f16();
901        let t_f32 = self.type_f32();
902        let t_f64 = self.type_f64();
903        let t_f128 = self.type_f128();
904        let t_metadata = self.type_metadata();
905        let t_token = self.type_token();
906
907        ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr);
908        ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
909
910        ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
911        ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
912        ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
913        ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
914        ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
915        ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
916        ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
917        ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
918
919        ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
920        ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
921        ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
922        ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
923        ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
924        ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
925        ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
926        ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
927        ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
928        ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
929
930        ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
931        ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
932        ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
933        ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
934        ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
935        ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
936        ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
937        ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
938        ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
939        ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
940
941        ifn!("llvm.trap", fn() -> void);
942        ifn!("llvm.debugtrap", fn() -> void);
943        ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
944
945        ifn!("llvm.powi.f16.i32", fn(t_f16, t_i32) -> t_f16);
946        ifn!("llvm.powi.f32.i32", fn(t_f32, t_i32) -> t_f32);
947        ifn!("llvm.powi.f64.i32", fn(t_f64, t_i32) -> t_f64);
948        ifn!("llvm.powi.f128.i32", fn(t_f128, t_i32) -> t_f128);
949
950        ifn!("llvm.pow.f16", fn(t_f16, t_f16) -> t_f16);
951        ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
952        ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
953        ifn!("llvm.pow.f128", fn(t_f128, t_f128) -> t_f128);
954
955        ifn!("llvm.sqrt.f16", fn(t_f16) -> t_f16);
956        ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
957        ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
958        ifn!("llvm.sqrt.f128", fn(t_f128) -> t_f128);
959
960        ifn!("llvm.sin.f16", fn(t_f16) -> t_f16);
961        ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
962        ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
963        ifn!("llvm.sin.f128", fn(t_f128) -> t_f128);
964
965        ifn!("llvm.cos.f16", fn(t_f16) -> t_f16);
966        ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
967        ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
968        ifn!("llvm.cos.f128", fn(t_f128) -> t_f128);
969
970        ifn!("llvm.exp.f16", fn(t_f16) -> t_f16);
971        ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
972        ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
973        ifn!("llvm.exp.f128", fn(t_f128) -> t_f128);
974
975        ifn!("llvm.exp2.f16", fn(t_f16) -> t_f16);
976        ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
977        ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
978        ifn!("llvm.exp2.f128", fn(t_f128) -> t_f128);
979
980        ifn!("llvm.log.f16", fn(t_f16) -> t_f16);
981        ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
982        ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
983        ifn!("llvm.log.f128", fn(t_f128) -> t_f128);
984
985        ifn!("llvm.log10.f16", fn(t_f16) -> t_f16);
986        ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
987        ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
988        ifn!("llvm.log10.f128", fn(t_f128) -> t_f128);
989
990        ifn!("llvm.log2.f16", fn(t_f16) -> t_f16);
991        ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
992        ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
993        ifn!("llvm.log2.f128", fn(t_f128) -> t_f128);
994
995        ifn!("llvm.fma.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
996        ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
997        ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
998        ifn!("llvm.fma.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
999
1000        ifn!("llvm.fmuladd.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
1001        ifn!("llvm.fmuladd.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
1002        ifn!("llvm.fmuladd.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
1003        ifn!("llvm.fmuladd.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
1004
1005        ifn!("llvm.fabs.f16", fn(t_f16) -> t_f16);
1006        ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
1007        ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
1008        ifn!("llvm.fabs.f128", fn(t_f128) -> t_f128);
1009
1010        ifn!("llvm.minnum.f16", fn(t_f16, t_f16) -> t_f16);
1011        ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
1012        ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
1013        ifn!("llvm.minnum.f128", fn(t_f128, t_f128) -> t_f128);
1014
1015        ifn!("llvm.minimum.f16", fn(t_f16, t_f16) -> t_f16);
1016        ifn!("llvm.minimum.f32", fn(t_f32, t_f32) -> t_f32);
1017        ifn!("llvm.minimum.f64", fn(t_f64, t_f64) -> t_f64);
1018        // There are issues on x86_64 and aarch64 with the f128 variant.
1019        //  - https://github.com/llvm/llvm-project/issues/139380
1020        //  - https://github.com/llvm/llvm-project/issues/139381
1021        // ifn!("llvm.minimum.f128", fn(t_f128, t_f128) -> t_f128);
1022
1023        ifn!("llvm.maxnum.f16", fn(t_f16, t_f16) -> t_f16);
1024        ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
1025        ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
1026        ifn!("llvm.maxnum.f128", fn(t_f128, t_f128) -> t_f128);
1027
1028        ifn!("llvm.maximum.f16", fn(t_f16, t_f16) -> t_f16);
1029        ifn!("llvm.maximum.f32", fn(t_f32, t_f32) -> t_f32);
1030        ifn!("llvm.maximum.f64", fn(t_f64, t_f64) -> t_f64);
1031        // There are issues on x86_64 and aarch64 with the f128 variant.
1032        //  - https://github.com/llvm/llvm-project/issues/139380
1033        //  - https://github.com/llvm/llvm-project/issues/139381
1034        // ifn!("llvm.maximum.f128", fn(t_f128, t_f128) -> t_f128);
1035
1036        ifn!("llvm.floor.f16", fn(t_f16) -> t_f16);
1037        ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
1038        ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
1039        ifn!("llvm.floor.f128", fn(t_f128) -> t_f128);
1040
1041        ifn!("llvm.ceil.f16", fn(t_f16) -> t_f16);
1042        ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
1043        ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
1044        ifn!("llvm.ceil.f128", fn(t_f128) -> t_f128);
1045
1046        ifn!("llvm.trunc.f16", fn(t_f16) -> t_f16);
1047        ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
1048        ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
1049        ifn!("llvm.trunc.f128", fn(t_f128) -> t_f128);
1050
1051        ifn!("llvm.copysign.f16", fn(t_f16, t_f16) -> t_f16);
1052        ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
1053        ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
1054        ifn!("llvm.copysign.f128", fn(t_f128, t_f128) -> t_f128);
1055
1056        ifn!("llvm.round.f16", fn(t_f16) -> t_f16);
1057        ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
1058        ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
1059        ifn!("llvm.round.f128", fn(t_f128) -> t_f128);
1060
1061        ifn!("llvm.roundeven.f16", fn(t_f16) -> t_f16);
1062        ifn!("llvm.roundeven.f32", fn(t_f32) -> t_f32);
1063        ifn!("llvm.roundeven.f64", fn(t_f64) -> t_f64);
1064        ifn!("llvm.roundeven.f128", fn(t_f128) -> t_f128);
1065
1066        ifn!("llvm.rint.f16", fn(t_f16) -> t_f16);
1067        ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
1068        ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
1069        ifn!("llvm.rint.f128", fn(t_f128) -> t_f128);
1070
1071        ifn!("llvm.nearbyint.f16", fn(t_f16) -> t_f16);
1072        ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
1073        ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
1074        ifn!("llvm.nearbyint.f128", fn(t_f128) -> t_f128);
1075
1076        ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
1077        ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
1078        ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
1079        ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
1080        ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
1081
1082        ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
1083        ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
1084        ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
1085        ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
1086        ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
1087
1088        ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
1089        ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
1090        ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
1091        ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
1092        ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
1093
1094        ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
1095        ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
1096        ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
1097        ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
1098
1099        ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
1100        ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
1101        ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
1102        ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
1103        ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
1104
1105        ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
1106        ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
1107        ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
1108        ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
1109        ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
1110
1111        ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
1112        ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
1113        ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
1114        ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
1115        ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
1116
1117        ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
1118        ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
1119        ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
1120        ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
1121        ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
1122
1123        ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
1124        ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
1125        ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
1126        ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
1127        ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
1128
1129        ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
1130        ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
1131        ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
1132        ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
1133        ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
1134
1135        ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
1136        ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
1137        ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
1138        ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
1139        ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
1140
1141        ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
1142        ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
1143        ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
1144        ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
1145        ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
1146
1147        ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
1148        ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
1149        ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
1150        ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
1151        ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
1152
1153        ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
1154        ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
1155        ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
1156        ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
1157        ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
1158
1159        ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
1160        ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
1161        ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
1162        ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
1163        ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
1164
1165        ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
1166        ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
1167        ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
1168        ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
1169        ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
1170
1171        ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
1172        ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
1173        ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
1174        ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
1175        ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
1176
1177        ifn!("llvm.scmp.i8.i8", fn(t_i8, t_i8) -> t_i8);
1178        ifn!("llvm.scmp.i8.i16", fn(t_i16, t_i16) -> t_i8);
1179        ifn!("llvm.scmp.i8.i32", fn(t_i32, t_i32) -> t_i8);
1180        ifn!("llvm.scmp.i8.i64", fn(t_i64, t_i64) -> t_i8);
1181        ifn!("llvm.scmp.i8.i128", fn(t_i128, t_i128) -> t_i8);
1182
1183        ifn!("llvm.ucmp.i8.i8", fn(t_i8, t_i8) -> t_i8);
1184        ifn!("llvm.ucmp.i8.i16", fn(t_i16, t_i16) -> t_i8);
1185        ifn!("llvm.ucmp.i8.i32", fn(t_i32, t_i32) -> t_i8);
1186        ifn!("llvm.ucmp.i8.i64", fn(t_i64, t_i64) -> t_i8);
1187        ifn!("llvm.ucmp.i8.i128", fn(t_i128, t_i128) -> t_i8);
1188
1189        ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
1190        ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
1191
1192        // FIXME: This is an infinitesimally small portion of the types you can
1193        // pass to this intrinsic, if we can ever lazily register intrinsics we
1194        // should register these when they're used, that way any type can be
1195        // passed.
1196        ifn!("llvm.is.constant.i1", fn(i1) -> i1);
1197        ifn!("llvm.is.constant.i8", fn(t_i8) -> i1);
1198        ifn!("llvm.is.constant.i16", fn(t_i16) -> i1);
1199        ifn!("llvm.is.constant.i32", fn(t_i32) -> i1);
1200        ifn!("llvm.is.constant.i64", fn(t_i64) -> i1);
1201        ifn!("llvm.is.constant.i128", fn(t_i128) -> i1);
1202        ifn!("llvm.is.constant.isize", fn(t_isize) -> i1);
1203        ifn!("llvm.is.constant.f16", fn(t_f16) -> i1);
1204        ifn!("llvm.is.constant.f32", fn(t_f32) -> i1);
1205        ifn!("llvm.is.constant.f64", fn(t_f64) -> i1);
1206        ifn!("llvm.is.constant.f128", fn(t_f128) -> i1);
1207        ifn!("llvm.is.constant.ptr", fn(ptr) -> i1);
1208
1209        ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
1210        ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
1211        ifn!("llvm.localescape", fn(...) -> void);
1212        ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr);
1213        ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr);
1214
1215        ifn!("llvm.assume", fn(i1) -> void);
1216        ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
1217
1218        // This isn't an "LLVM intrinsic", but LLVM's optimization passes
1219        // recognize it like one (including turning it into `bcmp` sometimes)
1220        // and we use it to implement intrinsics like `raw_eq` and `compare_bytes`
1221        match self.sess().target.arch.as_ref() {
1222            "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
1223            _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
1224        }
1225
1226        // variadic intrinsics
1227        ifn!("llvm.va_start", fn(ptr) -> void);
1228        ifn!("llvm.va_end", fn(ptr) -> void);
1229        ifn!("llvm.va_copy", fn(ptr, ptr) -> void);
1230
1231        if self.sess().instrument_coverage() {
1232            ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
1233            ifn!("llvm.instrprof.mcdc.parameters", fn(ptr, t_i64, t_i32) -> void);
1234            ifn!("llvm.instrprof.mcdc.tvbitmap.update", fn(ptr, t_i64, t_i32, ptr) -> void);
1235        }
1236
1237        ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1);
1238        ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1});
1239
1240        if self.sess().opts.debuginfo != DebugInfo::None {
1241            ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
1242            ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
1243        }
1244
1245        ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
1246        ifn!("llvm.threadlocal.address", fn(ptr) -> ptr);
1247
1248        None
1249    }
1250
1251    pub(crate) fn eh_catch_typeinfo(&self) -> &'ll Value {
1252        if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
1253            return eh_catch_typeinfo;
1254        }
1255        let tcx = self.tcx;
1256        assert!(self.sess().target.os == "emscripten");
1257        let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
1258            Some(def_id) => self.get_static(def_id),
1259            _ => {
1260                let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false);
1261                self.declare_global(&mangle_internal_symbol(self.tcx, "rust_eh_catch_typeinfo"), ty)
1262            }
1263        };
1264        self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
1265        eh_catch_typeinfo
1266    }
1267}
1268
1269impl CodegenCx<'_, '_> {
1270    /// Generates a new symbol name with the given prefix. This symbol name must
1271    /// only be used for definitions with `internal` or `private` linkage.
1272    pub(crate) fn generate_local_symbol_name(&self, prefix: &str) -> String {
1273        let idx = self.local_gen_sym_counter.get();
1274        self.local_gen_sym_counter.set(idx + 1);
1275        // Include a '.' character, so there can be no accidental conflicts with
1276        // user defined names
1277        let mut name = String::with_capacity(prefix.len() + 6);
1278        name.push_str(prefix);
1279        name.push('.');
1280        name.push_str(&(idx as u64).to_base(ALPHANUMERIC_ONLY));
1281        name
1282    }
1283}
1284
1285impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
1286    /// A wrapper for [`llvm::LLVMSetMetadata`], but it takes `Metadata` as a parameter instead of `Value`.
1287    pub(crate) fn set_metadata<'a>(
1288        &self,
1289        val: &'a Value,
1290        kind_id: impl Into<llvm::MetadataKindId>,
1291        md: &'ll Metadata,
1292    ) {
1293        let node = self.get_metadata_value(md);
1294        llvm::LLVMSetMetadata(val, kind_id.into(), node);
1295    }
1296}
1297
1298impl HasDataLayout for CodegenCx<'_, '_> {
1299    #[inline]
1300    fn data_layout(&self) -> &TargetDataLayout {
1301        &self.tcx.data_layout
1302    }
1303}
1304
1305impl HasTargetSpec for CodegenCx<'_, '_> {
1306    #[inline]
1307    fn target_spec(&self) -> &Target {
1308        &self.tcx.sess.target
1309    }
1310}
1311
1312impl<'tcx> ty::layout::HasTyCtxt<'tcx> for CodegenCx<'_, 'tcx> {
1313    #[inline]
1314    fn tcx(&self) -> TyCtxt<'tcx> {
1315        self.tcx
1316    }
1317}
1318
1319impl<'tcx, 'll> HasTypingEnv<'tcx> for CodegenCx<'ll, 'tcx> {
1320    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
1321        ty::TypingEnv::fully_monomorphized()
1322    }
1323}
1324
1325impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
1326    #[inline]
1327    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
1328        if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
1329            self.tcx.dcx().emit_fatal(Spanned { span, node: err.into_diagnostic() })
1330        } else {
1331            self.tcx.dcx().emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
1332        }
1333    }
1334}
1335
1336impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
1337    #[inline]
1338    fn handle_fn_abi_err(
1339        &self,
1340        err: FnAbiError<'tcx>,
1341        span: Span,
1342        fn_abi_request: FnAbiRequest<'tcx>,
1343    ) -> ! {
1344        match err {
1345            FnAbiError::Layout(LayoutError::SizeOverflow(_) | LayoutError::Cycle(_)) => {
1346                self.tcx.dcx().emit_fatal(Spanned { span, node: err });
1347            }
1348            _ => match fn_abi_request {
1349                FnAbiRequest::OfFnPtr { sig, extra_args } => {
1350                    span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}",);
1351                }
1352                FnAbiRequest::OfInstance { instance, extra_args } => {
1353                    span_bug!(
1354                        span,
1355                        "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}",
1356                    );
1357                }
1358            },
1359        }
1360    }
1361}