rustc_codegen_llvm/
attributes.rs

1//! Set and unset common attributes on LLVM values.
2use rustc_codegen_ssa::traits::*;
3use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr};
4use rustc_hir::def_id::DefId;
5use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFunctionEntry};
6use rustc_middle::ty::{self, TyCtxt};
7use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet};
8use rustc_symbol_mangling::mangle_internal_symbol;
9use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
10use smallvec::SmallVec;
11
12use crate::context::CodegenCx;
13use crate::errors::SanitizerMemtagRequiresMte;
14use crate::llvm::AttributePlace::Function;
15use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects};
16use crate::value::Value;
17use crate::{attributes, llvm_util};
18
19pub(crate) fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
20    if !attrs.is_empty() {
21        llvm::AddFunctionAttributes(llfn, idx, attrs);
22    }
23}
24
25pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
26    if !attrs.is_empty() {
27        llvm::AddCallSiteAttributes(callsite, idx, attrs);
28    }
29}
30
31/// Get LLVM attribute for the provided inline heuristic.
32#[inline]
33fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> {
34    if !cx.tcx.sess.opts.unstable_opts.inline_llvm {
35        // disable LLVM inlining
36        return Some(AttributeKind::NoInline.create_attr(cx.llcx));
37    }
38    match inline {
39        InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
40        InlineAttr::Always | InlineAttr::Force { .. } => {
41            Some(AttributeKind::AlwaysInline.create_attr(cx.llcx))
42        }
43        InlineAttr::Never => {
44            if cx.sess().target.arch != "amdgpu" {
45                Some(AttributeKind::NoInline.create_attr(cx.llcx))
46            } else {
47                None
48            }
49        }
50        InlineAttr::None => None,
51    }
52}
53
54#[inline]
55fn patchable_function_entry_attrs<'ll>(
56    cx: &CodegenCx<'ll, '_>,
57    attr: Option<PatchableFunctionEntry>,
58) -> SmallVec<[&'ll Attribute; 2]> {
59    let mut attrs = SmallVec::new();
60    let patchable_spec = attr.unwrap_or_else(|| {
61        PatchableFunctionEntry::from_config(cx.tcx.sess.opts.unstable_opts.patchable_function_entry)
62    });
63    let entry = patchable_spec.entry();
64    let prefix = patchable_spec.prefix();
65    if entry > 0 {
66        attrs.push(llvm::CreateAttrStringValue(
67            cx.llcx,
68            "patchable-function-entry",
69            &format!("{}", entry),
70        ));
71    }
72    if prefix > 0 {
73        attrs.push(llvm::CreateAttrStringValue(
74            cx.llcx,
75            "patchable-function-prefix",
76            &format!("{}", prefix),
77        ));
78    }
79    attrs
80}
81
82/// Get LLVM sanitize attributes.
83#[inline]
84pub(crate) fn sanitize_attrs<'ll>(
85    cx: &CodegenCx<'ll, '_>,
86    no_sanitize: SanitizerSet,
87) -> SmallVec<[&'ll Attribute; 4]> {
88    let mut attrs = SmallVec::new();
89    let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
90    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
91        attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
92    }
93    if enabled.contains(SanitizerSet::MEMORY) {
94        attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
95    }
96    if enabled.contains(SanitizerSet::THREAD) {
97        attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
98    }
99    if enabled.contains(SanitizerSet::HWADDRESS) {
100        attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
101    }
102    if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
103        attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
104    }
105    if enabled.contains(SanitizerSet::MEMTAG) {
106        // Check to make sure the mte target feature is actually enabled.
107        let features = cx.tcx.global_backend_features(());
108        let mte_feature =
109            features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
110        if let None | Some("-mte") = mte_feature {
111            cx.tcx.dcx().emit_err(SanitizerMemtagRequiresMte);
112        }
113
114        attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
115    }
116    if enabled.contains(SanitizerSet::SAFESTACK) {
117        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
118    }
119    attrs
120}
121
122/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
123#[inline]
124pub(crate) fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute {
125    // NOTE: We should determine if we even need async unwind tables, as they
126    // take have more overhead and if we can use sync unwind tables we
127    // probably should.
128    let async_unwind = !use_sync_unwind.unwrap_or(false);
129    llvm::CreateUWTableAttr(llcx, async_unwind)
130}
131
132pub(crate) fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
133    let mut fp = cx.sess().target.frame_pointer;
134    let opts = &cx.sess().opts;
135    // "mcount" function relies on stack pointer.
136    // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
137    if opts.unstable_opts.instrument_mcount {
138        fp.ratchet(FramePointer::Always);
139    }
140    fp.ratchet(opts.cg.force_frame_pointers);
141    let attr_value = match fp {
142        FramePointer::Always => "all",
143        FramePointer::NonLeaf => "non-leaf",
144        FramePointer::MayOmit => return None,
145    };
146    Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
147}
148
149fn function_return_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
150    let function_return_attr = match cx.sess().opts.unstable_opts.function_return {
151        FunctionReturn::Keep => return None,
152        FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern,
153    };
154
155    Some(function_return_attr.create_attr(cx.llcx))
156}
157
158/// Tell LLVM what instrument function to insert.
159#[inline]
160fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 4]> {
161    let mut attrs = SmallVec::new();
162    if cx.sess().opts.unstable_opts.instrument_mcount {
163        // Similar to `clang -pg` behavior. Handled by the
164        // `post-inline-ee-instrument` LLVM pass.
165
166        // The function name varies on platforms.
167        // See test/CodeGen/mcount.c in clang.
168        let mcount_name = match &cx.sess().target.llvm_mcount_intrinsic {
169            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
170            None => cx.sess().target.mcount.as_ref(),
171        };
172
173        attrs.push(llvm::CreateAttrStringValue(
174            cx.llcx,
175            "instrument-function-entry-inlined",
176            mcount_name,
177        ));
178    }
179    if let Some(options) = &cx.sess().opts.unstable_opts.instrument_xray {
180        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
181        // Function prologue and epilogue are instrumented with NOP sleds,
182        // a runtime library later replaces them with detours into tracing code.
183        if options.always {
184            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
185        }
186        if options.never {
187            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
188        }
189        if options.ignore_loops {
190            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
191        }
192        // LLVM will not choose the default for us, but rather requires specific
193        // threshold in absence of "xray-always". Use the same default as Clang.
194        let threshold = options.instruction_threshold.unwrap_or(200);
195        attrs.push(llvm::CreateAttrStringValue(
196            cx.llcx,
197            "xray-instruction-threshold",
198            &threshold.to_string(),
199        ));
200        if options.skip_entry {
201            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
202        }
203        if options.skip_exit {
204            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
205        }
206    }
207    attrs
208}
209
210fn nojumptables_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
211    if !cx.sess().opts.unstable_opts.no_jump_tables {
212        return None;
213    }
214
215    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
216}
217
218fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
219    // Currently stack probes seem somewhat incompatible with the address
220    // sanitizer and thread sanitizer. With asan we're already protected from
221    // stack overflow anyway so we don't really need stack probes regardless.
222    if cx
223        .sess()
224        .opts
225        .unstable_opts
226        .sanitizer
227        .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
228    {
229        return None;
230    }
231
232    // probestack doesn't play nice either with `-C profile-generate`.
233    if cx.sess().opts.cg.profile_generate.enabled() {
234        return None;
235    }
236
237    let attr_value = match cx.sess().target.stack_probes {
238        StackProbeType::None => return None,
239        // Request LLVM to generate the probes inline. If the given LLVM version does not support
240        // this, no probe is generated at all (even if the attribute is specified).
241        StackProbeType::Inline => "inline-asm",
242        // Flag our internal `__rust_probestack` function as the stack probe symbol.
243        // This is defined in the `compiler-builtins` crate for each architecture.
244        StackProbeType::Call => &mangle_internal_symbol(cx.tcx, "__rust_probestack"),
245        // Pick from the two above based on the LLVM version.
246        StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
247            if llvm_util::get_version() < min_llvm_version_for_inline {
248                &mangle_internal_symbol(cx.tcx, "__rust_probestack")
249            } else {
250                "inline-asm"
251            }
252        }
253    };
254    Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
255}
256
257fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
258    let sspattr = match cx.sess().stack_protector() {
259        StackProtector::None => return None,
260        StackProtector::All => AttributeKind::StackProtectReq,
261        StackProtector::Strong => AttributeKind::StackProtectStrong,
262        StackProtector::Basic => AttributeKind::StackProtect,
263    };
264
265    Some(sspattr.create_attr(cx.llcx))
266}
267
268fn backchain_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
269    if cx.sess().target.arch != "s390x" {
270        return None;
271    }
272
273    let requested_features = cx.sess().opts.cg.target_feature.split(',');
274    let found_positive = requested_features.clone().any(|r| r == "+backchain");
275
276    if found_positive { Some(llvm::CreateAttrString(cx.llcx, "backchain")) } else { None }
277}
278
279pub(crate) fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute {
280    let target_cpu = llvm_util::target_cpu(cx.tcx.sess);
281    llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
282}
283
284pub(crate) fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
285    llvm_util::tune_cpu(cx.tcx.sess)
286        .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
287}
288
289/// Get the `NonLazyBind` LLVM attribute,
290/// if the codegen options allow skipping the PLT.
291pub(crate) fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
292    // Don't generate calls through PLT if it's not necessary
293    if !cx.sess().needs_plt() {
294        Some(AttributeKind::NonLazyBind.create_attr(cx.llcx))
295    } else {
296        None
297    }
298}
299
300/// Get the default optimizations attrs for a function.
301#[inline]
302pub(crate) fn default_optimisation_attrs<'ll>(
303    cx: &CodegenCx<'ll, '_>,
304) -> SmallVec<[&'ll Attribute; 2]> {
305    let mut attrs = SmallVec::new();
306    match cx.sess().opts.optimize {
307        OptLevel::Size => {
308            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
309        }
310        OptLevel::SizeMin => {
311            attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
312            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
313        }
314        _ => {}
315    }
316    attrs
317}
318
319fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
320    llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
321}
322
323/// Helper for `FnAbi::apply_attrs_llfn`:
324/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
325/// attributes.
326pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
327    cx: &CodegenCx<'ll, 'tcx>,
328    llfn: &'ll Value,
329    instance: ty::Instance<'tcx>,
330) {
331    let codegen_fn_attrs = cx.tcx.codegen_instance_attrs(instance.def);
332
333    let mut to_add = SmallVec::<[_; 16]>::new();
334
335    match codegen_fn_attrs.optimize {
336        OptimizeAttr::Default => {
337            to_add.extend(default_optimisation_attrs(cx));
338        }
339        OptimizeAttr::DoNotOptimize => {
340            to_add.push(llvm::AttributeKind::OptimizeNone.create_attr(cx.llcx));
341        }
342        OptimizeAttr::Size => {
343            to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
344            to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
345        }
346        OptimizeAttr::Speed => {}
347    }
348
349    // `optnone` requires `noinline`
350    let inline = match (codegen_fn_attrs.inline, &codegen_fn_attrs.optimize) {
351        (_, OptimizeAttr::DoNotOptimize) => InlineAttr::Never,
352        (InlineAttr::None, _) if instance.def.requires_inline(cx.tcx) => InlineAttr::Hint,
353        (inline, _) => inline,
354    };
355    to_add.extend(inline_attr(cx, inline));
356
357    if cx.sess().must_emit_unwind_tables() {
358        to_add.push(uwtable_attr(cx.llcx, cx.sess().opts.unstable_opts.use_sync_unwind));
359    }
360
361    if cx.sess().opts.unstable_opts.profile_sample_use.is_some() {
362        to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
363    }
364
365    // FIXME: none of these functions interact with source level attributes.
366    to_add.extend(frame_pointer_type_attr(cx));
367    to_add.extend(function_return_attr(cx));
368    to_add.extend(instrument_function_attr(cx));
369    to_add.extend(nojumptables_attr(cx));
370    to_add.extend(probestack_attr(cx));
371    to_add.extend(stackprotector_attr(cx));
372
373    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
374        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
375    }
376
377    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
378        to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
379    }
380    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
381        to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx));
382    }
383    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
384        to_add.push(MemoryEffects::None.create_attr(cx.llcx));
385    }
386    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
387        // do nothing; a naked function is converted into an extern function
388        // and a global assembly block. LLVM's support for naked functions is
389        // not used.
390    } else {
391        // Do not set sanitizer attributes for naked functions.
392        to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize));
393
394        // For non-naked functions, set branch protection attributes on aarch64.
395        if let Some(BranchProtection { bti, pac_ret }) =
396            cx.sess().opts.unstable_opts.branch_protection
397        {
398            assert!(cx.sess().target.arch == "aarch64");
399            if bti {
400                to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement"));
401            }
402            if let Some(PacRet { leaf, pc, key }) = pac_ret {
403                if pc {
404                    to_add.push(llvm::CreateAttrString(cx.llcx, "branch-protection-pauth-lr"));
405                }
406                to_add.push(llvm::CreateAttrStringValue(
407                    cx.llcx,
408                    "sign-return-address",
409                    if leaf { "all" } else { "non-leaf" },
410                ));
411                to_add.push(llvm::CreateAttrStringValue(
412                    cx.llcx,
413                    "sign-return-address-key",
414                    if key == PAuthKey::A { "a_key" } else { "b_key" },
415                ));
416            }
417        }
418    }
419    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
420        || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
421    {
422        to_add.push(create_alloc_family_attr(cx.llcx));
423        if let Some(zv) =
424            cx.tcx.get_attr(instance.def_id(), rustc_span::sym::rustc_allocator_zeroed_variant)
425            && let Some(name) = zv.value_str()
426        {
427            to_add.push(llvm::CreateAttrStringValue(
428                cx.llcx,
429                "alloc-variant-zeroed",
430                &mangle_internal_symbol(cx.tcx, name.as_str()),
431            ));
432        }
433        // apply to argument place instead of function
434        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
435        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
436        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
437        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
438        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
439            flags |= AllocKindFlags::Uninitialized;
440        } else {
441            flags |= AllocKindFlags::Zeroed;
442        }
443        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
444        // apply to return place instead of function (unlike all other attributes applied in this
445        // function)
446        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
447        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
448    }
449    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
450        to_add.push(create_alloc_family_attr(cx.llcx));
451        to_add.push(llvm::CreateAllocKindAttr(
452            cx.llcx,
453            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
454        ));
455        // applies to argument place instead of function place
456        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
457        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
458        // apply to argument place instead of function
459        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
460        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
461        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
462        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
463        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
464    }
465    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
466        to_add.push(create_alloc_family_attr(cx.llcx));
467        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
468        // applies to argument place instead of function place
469        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
470        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
471    }
472    if let Some(align) = codegen_fn_attrs.alignment {
473        llvm::set_alignment(llfn, align);
474    }
475    if let Some(backchain) = backchain_attr(cx) {
476        to_add.push(backchain);
477    }
478    to_add.extend(patchable_function_entry_attrs(cx, codegen_fn_attrs.patchable_function_entry));
479
480    // Always annotate functions with the target-cpu they are compiled for.
481    // Without this, ThinLTO won't inline Rust functions into Clang generated
482    // functions (because Clang annotates functions this way too).
483    to_add.push(target_cpu_attr(cx));
484    // tune-cpu is only conveyed through the attribute for our purpose.
485    // The target doesn't care; the subtarget reads our attribute.
486    to_add.extend(tune_cpu_attr(cx));
487
488    let function_features =
489        codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>();
490
491    let function_features = function_features
492        .iter()
493        // Convert to LLVMFeatures and filter out unavailable ones
494        .flat_map(|feat| llvm_util::to_llvm_features(cx.tcx.sess, feat))
495        // Convert LLVMFeatures & dependencies to +<feats>s
496        .flat_map(|feat| feat.into_iter().map(|f| format!("+{f}")))
497        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
498            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
499            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
500        }))
501        .collect::<Vec<String>>();
502
503    if cx.tcx.sess.target.is_like_wasm {
504        // If this function is an import from the environment but the wasm
505        // import has a specific module/name, apply them here.
506        if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
507            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module));
508
509            let name =
510                codegen_fn_attrs.symbol_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
511            let name = name.as_str();
512            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
513        }
514    }
515
516    let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
517    let function_features = function_features.iter().map(|s| s.as_str());
518    let target_features: String =
519        global_features.chain(function_features).intersperse(",").collect();
520    if !target_features.is_empty() {
521        to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
522    }
523
524    attributes::apply_to_llfn(llfn, Function, &to_add);
525}
526
527fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
528    tcx.wasm_import_module_map(id.krate).get(&id)
529}