[go: up one dir, main page]

rustc_codegen_llvm/
attributes.rs

1//! Set and unset common attributes on LLVM values.
2use rustc_attr_data_structures::{InlineAttr, InstructionSetAttr, OptimizeAttr};
3use rustc_codegen_ssa::traits::*;
4use rustc_hir::def_id::DefId;
5use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFunctionEntry};
6use rustc_middle::ty::{self, TyCtxt};
7use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet};
8use rustc_symbol_mangling::mangle_internal_symbol;
9use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
10use smallvec::SmallVec;
11
12use crate::context::CodegenCx;
13use crate::errors::SanitizerMemtagRequiresMte;
14use crate::llvm::AttributePlace::Function;
15use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects};
16use crate::value::Value;
17use crate::{attributes, llvm_util};
18
19pub(crate) fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
20    if !attrs.is_empty() {
21        llvm::AddFunctionAttributes(llfn, idx, attrs);
22    }
23}
24
25pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
26    if !attrs.is_empty() {
27        llvm::AddCallSiteAttributes(callsite, idx, attrs);
28    }
29}
30
31pub(crate) fn has_attr(llfn: &Value, idx: AttributePlace, attr: AttributeKind) -> bool {
32    llvm::HasAttributeAtIndex(llfn, idx, attr)
33}
34
35pub(crate) fn has_string_attr(llfn: &Value, name: &str) -> bool {
36    llvm::HasStringAttribute(llfn, name)
37}
38
39pub(crate) fn remove_from_llfn(llfn: &Value, place: AttributePlace, kind: AttributeKind) {
40    llvm::RemoveRustEnumAttributeAtIndex(llfn, place, kind);
41}
42
43pub(crate) fn remove_string_attr_from_llfn(llfn: &Value, name: &str) {
44    llvm::RemoveStringAttrFromFn(llfn, name);
45}
46
47/// Get LLVM attribute for the provided inline heuristic.
48#[inline]
49fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> {
50    if !cx.tcx.sess.opts.unstable_opts.inline_llvm {
51        // disable LLVM inlining
52        return Some(AttributeKind::NoInline.create_attr(cx.llcx));
53    }
54    match inline {
55        InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
56        InlineAttr::Always | InlineAttr::Force { .. } => {
57            Some(AttributeKind::AlwaysInline.create_attr(cx.llcx))
58        }
59        InlineAttr::Never => {
60            if cx.sess().target.arch != "amdgpu" {
61                Some(AttributeKind::NoInline.create_attr(cx.llcx))
62            } else {
63                None
64            }
65        }
66        InlineAttr::None => None,
67    }
68}
69
70#[inline]
71fn patchable_function_entry_attrs<'ll>(
72    cx: &CodegenCx<'ll, '_>,
73    attr: Option<PatchableFunctionEntry>,
74) -> SmallVec<[&'ll Attribute; 2]> {
75    let mut attrs = SmallVec::new();
76    let patchable_spec = attr.unwrap_or_else(|| {
77        PatchableFunctionEntry::from_config(cx.tcx.sess.opts.unstable_opts.patchable_function_entry)
78    });
79    let entry = patchable_spec.entry();
80    let prefix = patchable_spec.prefix();
81    if entry > 0 {
82        attrs.push(llvm::CreateAttrStringValue(
83            cx.llcx,
84            "patchable-function-entry",
85            &format!("{}", entry),
86        ));
87    }
88    if prefix > 0 {
89        attrs.push(llvm::CreateAttrStringValue(
90            cx.llcx,
91            "patchable-function-prefix",
92            &format!("{}", prefix),
93        ));
94    }
95    attrs
96}
97
98/// Get LLVM sanitize attributes.
99#[inline]
100pub(crate) fn sanitize_attrs<'ll>(
101    cx: &CodegenCx<'ll, '_>,
102    no_sanitize: SanitizerSet,
103) -> SmallVec<[&'ll Attribute; 4]> {
104    let mut attrs = SmallVec::new();
105    let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
106    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
107        attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
108    }
109    if enabled.contains(SanitizerSet::MEMORY) {
110        attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
111    }
112    if enabled.contains(SanitizerSet::THREAD) {
113        attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
114    }
115    if enabled.contains(SanitizerSet::HWADDRESS) {
116        attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
117    }
118    if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
119        attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
120    }
121    if enabled.contains(SanitizerSet::MEMTAG) {
122        // Check to make sure the mte target feature is actually enabled.
123        let features = cx.tcx.global_backend_features(());
124        let mte_feature =
125            features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
126        if let None | Some("-mte") = mte_feature {
127            cx.tcx.dcx().emit_err(SanitizerMemtagRequiresMte);
128        }
129
130        attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
131    }
132    if enabled.contains(SanitizerSet::SAFESTACK) {
133        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
134    }
135    attrs
136}
137
138/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
139#[inline]
140pub(crate) fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute {
141    // NOTE: We should determine if we even need async unwind tables, as they
142    // take have more overhead and if we can use sync unwind tables we
143    // probably should.
144    let async_unwind = !use_sync_unwind.unwrap_or(false);
145    llvm::CreateUWTableAttr(llcx, async_unwind)
146}
147
148pub(crate) fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
149    let mut fp = cx.sess().target.frame_pointer;
150    let opts = &cx.sess().opts;
151    // "mcount" function relies on stack pointer.
152    // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
153    if opts.unstable_opts.instrument_mcount {
154        fp.ratchet(FramePointer::Always);
155    }
156    fp.ratchet(opts.cg.force_frame_pointers);
157    let attr_value = match fp {
158        FramePointer::Always => "all",
159        FramePointer::NonLeaf => "non-leaf",
160        FramePointer::MayOmit => return None,
161    };
162    Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
163}
164
165fn function_return_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
166    let function_return_attr = match cx.sess().opts.unstable_opts.function_return {
167        FunctionReturn::Keep => return None,
168        FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern,
169    };
170
171    Some(function_return_attr.create_attr(cx.llcx))
172}
173
174/// Tell LLVM what instrument function to insert.
175#[inline]
176fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 4]> {
177    let mut attrs = SmallVec::new();
178    if cx.sess().opts.unstable_opts.instrument_mcount {
179        // Similar to `clang -pg` behavior. Handled by the
180        // `post-inline-ee-instrument` LLVM pass.
181
182        // The function name varies on platforms.
183        // See test/CodeGen/mcount.c in clang.
184        let mcount_name = match &cx.sess().target.llvm_mcount_intrinsic {
185            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
186            None => cx.sess().target.mcount.as_ref(),
187        };
188
189        attrs.push(llvm::CreateAttrStringValue(
190            cx.llcx,
191            "instrument-function-entry-inlined",
192            mcount_name,
193        ));
194    }
195    if let Some(options) = &cx.sess().opts.unstable_opts.instrument_xray {
196        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
197        // Function prologue and epilogue are instrumented with NOP sleds,
198        // a runtime library later replaces them with detours into tracing code.
199        if options.always {
200            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
201        }
202        if options.never {
203            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
204        }
205        if options.ignore_loops {
206            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
207        }
208        // LLVM will not choose the default for us, but rather requires specific
209        // threshold in absence of "xray-always". Use the same default as Clang.
210        let threshold = options.instruction_threshold.unwrap_or(200);
211        attrs.push(llvm::CreateAttrStringValue(
212            cx.llcx,
213            "xray-instruction-threshold",
214            &threshold.to_string(),
215        ));
216        if options.skip_entry {
217            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
218        }
219        if options.skip_exit {
220            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
221        }
222    }
223    attrs
224}
225
226fn nojumptables_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
227    if !cx.sess().opts.unstable_opts.no_jump_tables {
228        return None;
229    }
230
231    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
232}
233
234fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
235    // Currently stack probes seem somewhat incompatible with the address
236    // sanitizer and thread sanitizer. With asan we're already protected from
237    // stack overflow anyway so we don't really need stack probes regardless.
238    if cx
239        .sess()
240        .opts
241        .unstable_opts
242        .sanitizer
243        .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
244    {
245        return None;
246    }
247
248    // probestack doesn't play nice either with `-C profile-generate`.
249    if cx.sess().opts.cg.profile_generate.enabled() {
250        return None;
251    }
252
253    let attr_value = match cx.sess().target.stack_probes {
254        StackProbeType::None => return None,
255        // Request LLVM to generate the probes inline. If the given LLVM version does not support
256        // this, no probe is generated at all (even if the attribute is specified).
257        StackProbeType::Inline => "inline-asm",
258        // Flag our internal `__rust_probestack` function as the stack probe symbol.
259        // This is defined in the `compiler-builtins` crate for each architecture.
260        StackProbeType::Call => &mangle_internal_symbol(cx.tcx, "__rust_probestack"),
261        // Pick from the two above based on the LLVM version.
262        StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
263            if llvm_util::get_version() < min_llvm_version_for_inline {
264                &mangle_internal_symbol(cx.tcx, "__rust_probestack")
265            } else {
266                "inline-asm"
267            }
268        }
269    };
270    Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
271}
272
273fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
274    let sspattr = match cx.sess().stack_protector() {
275        StackProtector::None => return None,
276        StackProtector::All => AttributeKind::StackProtectReq,
277        StackProtector::Strong => AttributeKind::StackProtectStrong,
278        StackProtector::Basic => AttributeKind::StackProtect,
279    };
280
281    Some(sspattr.create_attr(cx.llcx))
282}
283
284fn backchain_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
285    if cx.sess().target.arch != "s390x" {
286        return None;
287    }
288
289    let requested_features = cx.sess().opts.cg.target_feature.split(',');
290    let found_positive = requested_features.clone().any(|r| r == "+backchain");
291
292    if found_positive { Some(llvm::CreateAttrString(cx.llcx, "backchain")) } else { None }
293}
294
295pub(crate) fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute {
296    let target_cpu = llvm_util::target_cpu(cx.tcx.sess);
297    llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
298}
299
300pub(crate) fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
301    llvm_util::tune_cpu(cx.tcx.sess)
302        .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
303}
304
305/// Get the `NonLazyBind` LLVM attribute,
306/// if the codegen options allow skipping the PLT.
307pub(crate) fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
308    // Don't generate calls through PLT if it's not necessary
309    if !cx.sess().needs_plt() {
310        Some(AttributeKind::NonLazyBind.create_attr(cx.llcx))
311    } else {
312        None
313    }
314}
315
316/// Get the default optimizations attrs for a function.
317#[inline]
318pub(crate) fn default_optimisation_attrs<'ll>(
319    cx: &CodegenCx<'ll, '_>,
320) -> SmallVec<[&'ll Attribute; 2]> {
321    let mut attrs = SmallVec::new();
322    match cx.sess().opts.optimize {
323        OptLevel::Size => {
324            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
325        }
326        OptLevel::SizeMin => {
327            attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
328            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
329        }
330        _ => {}
331    }
332    attrs
333}
334
335fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
336    llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
337}
338
339/// Helper for `FnAbi::apply_attrs_llfn`:
340/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
341/// attributes.
342pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
343    cx: &CodegenCx<'ll, 'tcx>,
344    llfn: &'ll Value,
345    instance: ty::Instance<'tcx>,
346) {
347    let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
348
349    let mut to_add = SmallVec::<[_; 16]>::new();
350
351    match codegen_fn_attrs.optimize {
352        OptimizeAttr::Default => {
353            to_add.extend(default_optimisation_attrs(cx));
354        }
355        OptimizeAttr::DoNotOptimize => {
356            to_add.push(llvm::AttributeKind::OptimizeNone.create_attr(cx.llcx));
357        }
358        OptimizeAttr::Size => {
359            to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
360            to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
361        }
362        OptimizeAttr::Speed => {}
363    }
364
365    // `optnone` requires `noinline`
366    let inline = match (codegen_fn_attrs.inline, &codegen_fn_attrs.optimize) {
367        (_, OptimizeAttr::DoNotOptimize) => InlineAttr::Never,
368        (InlineAttr::None, _) if instance.def.requires_inline(cx.tcx) => InlineAttr::Hint,
369        (inline, _) => inline,
370    };
371    to_add.extend(inline_attr(cx, inline));
372
373    // The `uwtable` attribute according to LLVM is:
374    //
375    //     This attribute indicates that the ABI being targeted requires that an
376    //     unwind table entry be produced for this function even if we can show
377    //     that no exceptions passes by it. This is normally the case for the
378    //     ELF x86-64 abi, but it can be disabled for some compilation units.
379    //
380    // Typically when we're compiling with `-C panic=abort` (which implies this
381    // `no_landing_pads` check) we don't need `uwtable` because we can't
382    // generate any exceptions! On Windows, however, exceptions include other
383    // events such as illegal instructions, segfaults, etc. This means that on
384    // Windows we end up still needing the `uwtable` attribute even if the `-C
385    // panic=abort` flag is passed.
386    //
387    // You can also find more info on why Windows always requires uwtables here:
388    //      https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
389    if cx.sess().must_emit_unwind_tables() {
390        to_add.push(uwtable_attr(cx.llcx, cx.sess().opts.unstable_opts.use_sync_unwind));
391    }
392
393    if cx.sess().opts.unstable_opts.profile_sample_use.is_some() {
394        to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
395    }
396
397    // FIXME: none of these functions interact with source level attributes.
398    to_add.extend(frame_pointer_type_attr(cx));
399    to_add.extend(function_return_attr(cx));
400    to_add.extend(instrument_function_attr(cx));
401    to_add.extend(nojumptables_attr(cx));
402    to_add.extend(probestack_attr(cx));
403    to_add.extend(stackprotector_attr(cx));
404
405    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
406        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
407    }
408
409    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
410        to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
411    }
412    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
413        to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx));
414    }
415    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
416        to_add.push(MemoryEffects::None.create_attr(cx.llcx));
417    }
418    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
419        // do nothing; a naked function is converted into an extern function
420        // and a global assembly block. LLVM's support for naked functions is
421        // not used.
422    } else {
423        // Do not set sanitizer attributes for naked functions.
424        to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize));
425
426        // For non-naked functions, set branch protection attributes on aarch64.
427        if let Some(BranchProtection { bti, pac_ret }) =
428            cx.sess().opts.unstable_opts.branch_protection
429        {
430            assert!(cx.sess().target.arch == "aarch64");
431            if bti {
432                to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement"));
433            }
434            if let Some(PacRet { leaf, pc, key }) = pac_ret {
435                if pc {
436                    to_add.push(llvm::CreateAttrString(cx.llcx, "branch-protection-pauth-lr"));
437                }
438                to_add.push(llvm::CreateAttrStringValue(
439                    cx.llcx,
440                    "sign-return-address",
441                    if leaf { "all" } else { "non-leaf" },
442                ));
443                to_add.push(llvm::CreateAttrStringValue(
444                    cx.llcx,
445                    "sign-return-address-key",
446                    if key == PAuthKey::A { "a_key" } else { "b_key" },
447                ));
448            }
449        }
450    }
451    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
452        || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
453    {
454        to_add.push(create_alloc_family_attr(cx.llcx));
455        // apply to argument place instead of function
456        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
457        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
458        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
459        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
460        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
461            flags |= AllocKindFlags::Uninitialized;
462        } else {
463            flags |= AllocKindFlags::Zeroed;
464        }
465        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
466        // apply to return place instead of function (unlike all other attributes applied in this
467        // function)
468        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
469        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
470    }
471    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
472        to_add.push(create_alloc_family_attr(cx.llcx));
473        to_add.push(llvm::CreateAllocKindAttr(
474            cx.llcx,
475            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
476        ));
477        // applies to argument place instead of function place
478        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
479        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
480        // apply to argument place instead of function
481        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
482        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
483        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
484        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
485        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
486    }
487    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
488        to_add.push(create_alloc_family_attr(cx.llcx));
489        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
490        // applies to argument place instead of function place
491        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
492        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
493    }
494    if let Some(align) = codegen_fn_attrs.alignment {
495        llvm::set_alignment(llfn, align);
496    }
497    if let Some(backchain) = backchain_attr(cx) {
498        to_add.push(backchain);
499    }
500    to_add.extend(patchable_function_entry_attrs(cx, codegen_fn_attrs.patchable_function_entry));
501
502    // Always annotate functions with the target-cpu they are compiled for.
503    // Without this, ThinLTO won't inline Rust functions into Clang generated
504    // functions (because Clang annotates functions this way too).
505    to_add.push(target_cpu_attr(cx));
506    // tune-cpu is only conveyed through the attribute for our purpose.
507    // The target doesn't care; the subtarget reads our attribute.
508    to_add.extend(tune_cpu_attr(cx));
509
510    let function_features =
511        codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>();
512
513    let function_features = function_features
514        .iter()
515        // Convert to LLVMFeatures and filter out unavailable ones
516        .flat_map(|feat| llvm_util::to_llvm_features(cx.tcx.sess, feat))
517        // Convert LLVMFeatures & dependencies to +<feats>s
518        .flat_map(|feat| feat.into_iter().map(|f| format!("+{f}")))
519        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
520            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
521            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
522        }))
523        .collect::<Vec<String>>();
524
525    if cx.tcx.sess.target.is_like_wasm {
526        // If this function is an import from the environment but the wasm
527        // import has a specific module/name, apply them here.
528        if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
529            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module));
530
531            let name =
532                codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
533            let name = name.as_str();
534            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
535        }
536    }
537
538    let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
539    let function_features = function_features.iter().map(|s| s.as_str());
540    let target_features: String =
541        global_features.chain(function_features).intersperse(",").collect();
542    if !target_features.is_empty() {
543        to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
544    }
545
546    attributes::apply_to_llfn(llfn, Function, &to_add);
547}
548
549fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
550    tcx.wasm_import_module_map(id.krate).get(&id)
551}