[go: up one dir, main page]

rustc_codegen_llvm/
intrinsic.rs

1use std::assert_matches::assert_matches;
2use std::cmp::Ordering;
3
4use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size};
5use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
6use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
7use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
8use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
10use rustc_codegen_ssa::traits::*;
11use rustc_hir as hir;
12use rustc_middle::mir::BinOp;
13use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
14use rustc_middle::ty::{self, GenericArgsRef, Ty};
15use rustc_middle::{bug, span_bug};
16use rustc_span::{Span, Symbol, sym};
17use rustc_symbol_mangling::mangle_internal_symbol;
18use rustc_target::spec::PanicStrategy;
19use tracing::debug;
20
21use crate::abi::FnAbiLlvmExt;
22use crate::builder::Builder;
23use crate::context::CodegenCx;
24use crate::llvm::{self, Metadata};
25use crate::type_::Type;
26use crate::type_of::LayoutLlvmExt;
27use crate::va_arg::emit_va_arg;
28use crate::value::Value;
29
30fn call_simple_intrinsic<'ll, 'tcx>(
31    bx: &mut Builder<'_, 'll, 'tcx>,
32    name: Symbol,
33    args: &[OperandRef<'tcx, &'ll Value>],
34) -> Option<&'ll Value> {
35    let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
36        sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
37        sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
38        sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
39        sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
40
41        sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
42        sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
43        sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
44        sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
45
46        sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
47        sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
48        sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
49        sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
50
51        sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
52        sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
53        sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
54        sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
55
56        sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
57        sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
58        sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
59        sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
60
61        sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
62        sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
63        sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
64        sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
65
66        sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
67        sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
68        sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
69        sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
70
71        sym::logf16 => ("llvm.log", &[bx.type_f16()]),
72        sym::logf32 => ("llvm.log", &[bx.type_f32()]),
73        sym::logf64 => ("llvm.log", &[bx.type_f64()]),
74        sym::logf128 => ("llvm.log", &[bx.type_f128()]),
75
76        sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
77        sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
78        sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
79        sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
80
81        sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
82        sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
83        sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
84        sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
85
86        sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
87        sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
88        sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
89        sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
90
91        sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
92        sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
93        sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
94        sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
95
96        sym::fabsf16 => ("llvm.fabs", &[bx.type_f16()]),
97        sym::fabsf32 => ("llvm.fabs", &[bx.type_f32()]),
98        sym::fabsf64 => ("llvm.fabs", &[bx.type_f64()]),
99        sym::fabsf128 => ("llvm.fabs", &[bx.type_f128()]),
100
101        sym::minnumf16 => ("llvm.minnum", &[bx.type_f16()]),
102        sym::minnumf32 => ("llvm.minnum", &[bx.type_f32()]),
103        sym::minnumf64 => ("llvm.minnum", &[bx.type_f64()]),
104        sym::minnumf128 => ("llvm.minnum", &[bx.type_f128()]),
105
106        sym::minimumf16 => ("llvm.minimum", &[bx.type_f16()]),
107        sym::minimumf32 => ("llvm.minimum", &[bx.type_f32()]),
108        sym::minimumf64 => ("llvm.minimum", &[bx.type_f64()]),
109        // There are issues on x86_64 and aarch64 with the f128 variant,
110        // let's instead use the instrinsic fallback body.
111        // sym::minimumf128 => ("llvm.minimum", &[cx.type_f128()]),
112        sym::maxnumf16 => ("llvm.maxnum", &[bx.type_f16()]),
113        sym::maxnumf32 => ("llvm.maxnum", &[bx.type_f32()]),
114        sym::maxnumf64 => ("llvm.maxnum", &[bx.type_f64()]),
115        sym::maxnumf128 => ("llvm.maxnum", &[bx.type_f128()]),
116
117        sym::maximumf16 => ("llvm.maximum", &[bx.type_f16()]),
118        sym::maximumf32 => ("llvm.maximum", &[bx.type_f32()]),
119        sym::maximumf64 => ("llvm.maximum", &[bx.type_f64()]),
120        // There are issues on x86_64 and aarch64 with the f128 variant,
121        // let's instead use the instrinsic fallback body.
122        // sym::maximumf128 => ("llvm.maximum", &[cx.type_f128()]),
123        sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
124        sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
125        sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
126        sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
127
128        sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
129        sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
130        sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
131        sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
132
133        sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
134        sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
135        sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
136        sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
137
138        sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
139        sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
140        sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
141        sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
142
143        // We could use any of `rint`, `nearbyint`, or `roundeven`
144        // for this -- they are all identical in semantics when
145        // assuming the default FP environment.
146        // `rint` is what we used for $forever.
147        sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
148        sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
149        sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
150        sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
151
152        sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
153        sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
154        sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
155        sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
156
157        _ => return None,
158    };
159    Some(bx.call_intrinsic(
160        base_name,
161        type_params,
162        &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
163    ))
164}
165
166impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
167    fn codegen_intrinsic_call(
168        &mut self,
169        instance: ty::Instance<'tcx>,
170        args: &[OperandRef<'tcx, &'ll Value>],
171        result: PlaceRef<'tcx, &'ll Value>,
172        span: Span,
173    ) -> Result<(), ty::Instance<'tcx>> {
174        let tcx = self.tcx;
175
176        let name = tcx.item_name(instance.def_id());
177        let fn_args = instance.args;
178
179        let simple = call_simple_intrinsic(self, name, args);
180        let llval = match name {
181            _ if simple.is_some() => simple.unwrap(),
182            sym::ptr_mask => {
183                let ptr = args[0].immediate();
184                self.call_intrinsic(
185                    "llvm.ptrmask",
186                    &[self.val_ty(ptr), self.type_isize()],
187                    &[ptr, args[1].immediate()],
188                )
189            }
190            sym::is_val_statically_known => {
191                if let OperandValue::Immediate(imm) = args[0].val {
192                    self.call_intrinsic(
193                        "llvm.is.constant",
194                        &[args[0].layout.immediate_llvm_type(self.cx)],
195                        &[imm],
196                    )
197                } else {
198                    self.const_bool(false)
199                }
200            }
201            sym::select_unpredictable => {
202                let cond = args[0].immediate();
203                assert_eq!(args[1].layout, args[2].layout);
204                let select = |bx: &mut Self, true_val, false_val| {
205                    let result = bx.select(cond, true_val, false_val);
206                    bx.set_unpredictable(&result);
207                    result
208                };
209                match (args[1].val, args[2].val) {
210                    (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
211                        assert!(true_val.llextra.is_none());
212                        assert!(false_val.llextra.is_none());
213                        assert_eq!(true_val.align, false_val.align);
214                        let ptr = select(self, true_val.llval, false_val.llval);
215                        let selected =
216                            OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
217                        selected.store(self, result);
218                        return Ok(());
219                    }
220                    (OperandValue::Immediate(_), OperandValue::Immediate(_))
221                    | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
222                        let true_val = args[1].immediate_or_packed_pair(self);
223                        let false_val = args[2].immediate_or_packed_pair(self);
224                        select(self, true_val, false_val)
225                    }
226                    (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
227                    _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
228                }
229            }
230            sym::catch_unwind => {
231                catch_unwind_intrinsic(
232                    self,
233                    args[0].immediate(),
234                    args[1].immediate(),
235                    args[2].immediate(),
236                    result,
237                );
238                return Ok(());
239            }
240            sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
241            sym::va_copy => {
242                let dest = args[0].immediate();
243                self.call_intrinsic(
244                    "llvm.va_copy",
245                    &[self.val_ty(dest)],
246                    &[dest, args[1].immediate()],
247                )
248            }
249            sym::va_arg => {
250                match result.layout.backend_repr {
251                    BackendRepr::Scalar(scalar) => {
252                        match scalar.primitive() {
253                            Primitive::Int(..) => {
254                                if self.cx().size_of(result.layout.ty).bytes() < 4 {
255                                    // `va_arg` should not be called on an integer type
256                                    // less than 4 bytes in length. If it is, promote
257                                    // the integer to an `i32` and truncate the result
258                                    // back to the smaller type.
259                                    let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
260                                    self.trunc(promoted_result, result.layout.llvm_type(self))
261                                } else {
262                                    emit_va_arg(self, args[0], result.layout.ty)
263                                }
264                            }
265                            Primitive::Float(Float::F16) => {
266                                bug!("the va_arg intrinsic does not work with `f16`")
267                            }
268                            Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
269                                emit_va_arg(self, args[0], result.layout.ty)
270                            }
271                            // `va_arg` should never be used with the return type f32.
272                            Primitive::Float(Float::F32) => {
273                                bug!("the va_arg intrinsic does not work with `f32`")
274                            }
275                            Primitive::Float(Float::F128) => {
276                                bug!("the va_arg intrinsic does not work with `f128`")
277                            }
278                        }
279                    }
280                    _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
281                }
282            }
283
284            sym::volatile_load | sym::unaligned_volatile_load => {
285                let ptr = args[0].immediate();
286                let load = self.volatile_load(result.layout.llvm_type(self), ptr);
287                let align = if name == sym::unaligned_volatile_load {
288                    1
289                } else {
290                    result.layout.align.abi.bytes() as u32
291                };
292                unsafe {
293                    llvm::LLVMSetAlignment(load, align);
294                }
295                if !result.layout.is_zst() {
296                    self.store_to_place(load, result.val);
297                }
298                return Ok(());
299            }
300            sym::volatile_store => {
301                let dst = args[0].deref(self.cx());
302                args[1].val.volatile_store(self, dst);
303                return Ok(());
304            }
305            sym::unaligned_volatile_store => {
306                let dst = args[0].deref(self.cx());
307                args[1].val.unaligned_volatile_store(self, dst);
308                return Ok(());
309            }
310            sym::prefetch_read_data
311            | sym::prefetch_write_data
312            | sym::prefetch_read_instruction
313            | sym::prefetch_write_instruction => {
314                let (rw, cache_type) = match name {
315                    sym::prefetch_read_data => (0, 1),
316                    sym::prefetch_write_data => (1, 1),
317                    sym::prefetch_read_instruction => (0, 0),
318                    sym::prefetch_write_instruction => (1, 0),
319                    _ => bug!(),
320                };
321                let ptr = args[0].immediate();
322                self.call_intrinsic(
323                    "llvm.prefetch",
324                    &[self.val_ty(ptr)],
325                    &[ptr, self.const_i32(rw), args[1].immediate(), self.const_i32(cache_type)],
326                )
327            }
328            sym::carrying_mul_add => {
329                let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
330
331                let wide_llty = self.type_ix(size.bits() * 2);
332                let args = args.as_array().unwrap();
333                let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
334
335                let wide = if signed {
336                    let prod = self.unchecked_smul(a, b);
337                    let acc = self.unchecked_sadd(prod, c);
338                    self.unchecked_sadd(acc, d)
339                } else {
340                    let prod = self.unchecked_umul(a, b);
341                    let acc = self.unchecked_uadd(prod, c);
342                    self.unchecked_uadd(acc, d)
343                };
344
345                let narrow_llty = self.type_ix(size.bits());
346                let low = self.trunc(wide, narrow_llty);
347                let bits_const = self.const_uint(wide_llty, size.bits());
348                // No need for ashr when signed; LLVM changes it to lshr anyway.
349                let high = self.lshr(wide, bits_const);
350                // FIXME: could be `trunc nuw`, even for signed.
351                let high = self.trunc(high, narrow_llty);
352
353                let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
354                let pair = self.const_poison(pair_llty);
355                let pair = self.insert_value(pair, low, 0);
356                let pair = self.insert_value(pair, high, 1);
357                pair
358            }
359            sym::ctlz
360            | sym::ctlz_nonzero
361            | sym::cttz
362            | sym::cttz_nonzero
363            | sym::ctpop
364            | sym::bswap
365            | sym::bitreverse
366            | sym::rotate_left
367            | sym::rotate_right
368            | sym::saturating_add
369            | sym::saturating_sub => {
370                let ty = args[0].layout.ty;
371                if !ty.is_integral() {
372                    tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
373                        span,
374                        name,
375                        ty,
376                    });
377                    return Ok(());
378                }
379                let (size, signed) = ty.int_size_and_signed(self.tcx);
380                let width = size.bits();
381                let llty = self.type_ix(width);
382                match name {
383                    sym::ctlz | sym::cttz => {
384                        let y = self.const_bool(false);
385                        let ret = self.call_intrinsic(
386                            format!("llvm.{name}"),
387                            &[llty],
388                            &[args[0].immediate(), y],
389                        );
390
391                        self.intcast(ret, result.layout.llvm_type(self), false)
392                    }
393                    sym::ctlz_nonzero => {
394                        let y = self.const_bool(true);
395                        let ret =
396                            self.call_intrinsic("llvm.ctlz", &[llty], &[args[0].immediate(), y]);
397                        self.intcast(ret, result.layout.llvm_type(self), false)
398                    }
399                    sym::cttz_nonzero => {
400                        let y = self.const_bool(true);
401                        let ret =
402                            self.call_intrinsic("llvm.cttz", &[llty], &[args[0].immediate(), y]);
403                        self.intcast(ret, result.layout.llvm_type(self), false)
404                    }
405                    sym::ctpop => {
406                        let ret =
407                            self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
408                        self.intcast(ret, result.layout.llvm_type(self), false)
409                    }
410                    sym::bswap => {
411                        if width == 8 {
412                            args[0].immediate() // byte swap a u8/i8 is just a no-op
413                        } else {
414                            self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
415                        }
416                    }
417                    sym::bitreverse => {
418                        self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
419                    }
420                    sym::rotate_left | sym::rotate_right => {
421                        let is_left = name == sym::rotate_left;
422                        let val = args[0].immediate();
423                        let raw_shift = args[1].immediate();
424                        // rotate = funnel shift with first two args the same
425                        let llvm_name = format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
426
427                        // llvm expects shift to be the same type as the values, but rust
428                        // always uses `u32`.
429                        let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
430
431                        self.call_intrinsic(llvm_name, &[llty], &[val, val, raw_shift])
432                    }
433                    sym::saturating_add | sym::saturating_sub => {
434                        let is_add = name == sym::saturating_add;
435                        let lhs = args[0].immediate();
436                        let rhs = args[1].immediate();
437                        let llvm_name = format!(
438                            "llvm.{}{}.sat",
439                            if signed { 's' } else { 'u' },
440                            if is_add { "add" } else { "sub" },
441                        );
442                        self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs])
443                    }
444                    _ => bug!(),
445                }
446            }
447
448            sym::raw_eq => {
449                use BackendRepr::*;
450                let tp_ty = fn_args.type_at(0);
451                let layout = self.layout_of(tp_ty).layout;
452                let use_integer_compare = match layout.backend_repr() {
453                    Scalar(_) | ScalarPair(_, _) => true,
454                    SimdVector { .. } => false,
455                    Memory { .. } => {
456                        // For rusty ABIs, small aggregates are actually passed
457                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
458                        // so we re-use that same threshold here.
459                        layout.size() <= self.data_layout().pointer_size * 2
460                    }
461                };
462
463                let a = args[0].immediate();
464                let b = args[1].immediate();
465                if layout.size().bytes() == 0 {
466                    self.const_bool(true)
467                } else if use_integer_compare {
468                    let integer_ty = self.type_ix(layout.size().bits());
469                    let a_val = self.load(integer_ty, a, layout.align().abi);
470                    let b_val = self.load(integer_ty, b, layout.align().abi);
471                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
472                } else {
473                    let n = self.const_usize(layout.size().bytes());
474                    let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
475                    self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
476                }
477            }
478
479            sym::compare_bytes => {
480                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
481                let cmp = self.call_intrinsic(
482                    "memcmp",
483                    &[],
484                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
485                );
486                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
487                self.sext(cmp, self.type_ix(32))
488            }
489
490            sym::black_box => {
491                args[0].val.store(self, result);
492                let result_val_span = [result.val.llval];
493                // We need to "use" the argument in some way LLVM can't introspect, and on
494                // targets that support it we can typically leverage inline assembly to do
495                // this. LLVM's interpretation of inline assembly is that it's, well, a black
496                // box. This isn't the greatest implementation since it probably deoptimizes
497                // more than we want, but it's so far good enough.
498                //
499                // For zero-sized types, the location pointed to by the result may be
500                // uninitialized. Do not "use" the result in this case; instead just clobber
501                // the memory.
502                let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
503                    ("~{memory}", &[])
504                } else {
505                    ("r,~{memory}", &result_val_span)
506                };
507                crate::asm::inline_asm_call(
508                    self,
509                    "",
510                    constraint,
511                    inputs,
512                    self.type_void(),
513                    &[],
514                    true,
515                    false,
516                    llvm::AsmDialect::Att,
517                    &[span],
518                    false,
519                    None,
520                    None,
521                )
522                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
523
524                // We have copied the value to `result` already.
525                return Ok(());
526            }
527
528            _ if name.as_str().starts_with("simd_") => {
529                // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
530                // This gives them the expected layout of a regular #[repr(simd)] vector.
531                let mut loaded_args = Vec::new();
532                for arg in args {
533                    loaded_args.push(
534                        // #[repr(packed, simd)] vectors are passed like arrays (as references,
535                        // with reduced alignment and no padding) rather than as immediates.
536                        // We can use a vector load to fix the layout and turn the argument
537                        // into an immediate.
538                        if arg.layout.ty.is_simd()
539                            && let OperandValue::Ref(place) = arg.val
540                        {
541                            let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
542                            let elem_ll_ty = match elem_ty.kind() {
543                                ty::Float(f) => self.type_float_from_ty(*f),
544                                ty::Int(i) => self.type_int_from_ty(*i),
545                                ty::Uint(u) => self.type_uint_from_ty(*u),
546                                ty::RawPtr(_, _) => self.type_ptr(),
547                                _ => unreachable!(),
548                            };
549                            let loaded =
550                                self.load_from_place(self.type_vector(elem_ll_ty, size), place);
551                            OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
552                        } else {
553                            *arg
554                        },
555                    );
556                }
557
558                let llret_ty = if result.layout.ty.is_simd()
559                    && let BackendRepr::Memory { .. } = result.layout.backend_repr
560                {
561                    let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
562                    let elem_ll_ty = match elem_ty.kind() {
563                        ty::Float(f) => self.type_float_from_ty(*f),
564                        ty::Int(i) => self.type_int_from_ty(*i),
565                        ty::Uint(u) => self.type_uint_from_ty(*u),
566                        ty::RawPtr(_, _) => self.type_ptr(),
567                        _ => unreachable!(),
568                    };
569                    self.type_vector(elem_ll_ty, size)
570                } else {
571                    result.layout.llvm_type(self)
572                };
573
574                match generic_simd_intrinsic(
575                    self,
576                    name,
577                    fn_args,
578                    &loaded_args,
579                    result.layout.ty,
580                    llret_ty,
581                    span,
582                ) {
583                    Ok(llval) => llval,
584                    // If there was an error, just skip this invocation... we'll abort compilation
585                    // anyway, but we can keep codegen'ing to find more errors.
586                    Err(()) => return Ok(()),
587                }
588            }
589
590            _ => {
591                debug!("unknown intrinsic '{}' -- falling back to default body", name);
592                // Call the fallback body instead of generating the intrinsic code
593                return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
594            }
595        };
596
597        if result.layout.ty.is_bool() {
598            let val = self.from_immediate(llval);
599            self.store_to_place(val, result.val);
600        } else if !result.layout.ty.is_unit() {
601            self.store_to_place(llval, result.val);
602        }
603        Ok(())
604    }
605
606    fn abort(&mut self) {
607        self.call_intrinsic("llvm.trap", &[], &[]);
608    }
609
610    fn assume(&mut self, val: Self::Value) {
611        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
612            self.call_intrinsic("llvm.assume", &[], &[val]);
613        }
614    }
615
616    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
617        if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
618            self.call_intrinsic(
619                "llvm.expect",
620                &[self.type_i1()],
621                &[cond, self.const_bool(expected)],
622            )
623        } else {
624            cond
625        }
626    }
627
628    fn type_checked_load(
629        &mut self,
630        llvtable: &'ll Value,
631        vtable_byte_offset: u64,
632        typeid: &'ll Metadata,
633    ) -> Self::Value {
634        let typeid = self.get_metadata_value(typeid);
635        let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
636        let type_checked_load = self.call_intrinsic(
637            "llvm.type.checked.load",
638            &[],
639            &[llvtable, vtable_byte_offset, typeid],
640        );
641        self.extract_value(type_checked_load, 0)
642    }
643
644    fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
645        self.call_intrinsic("llvm.va_start", &[self.val_ty(va_list)], &[va_list])
646    }
647
648    fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
649        self.call_intrinsic("llvm.va_end", &[self.val_ty(va_list)], &[va_list])
650    }
651}
652
653fn catch_unwind_intrinsic<'ll, 'tcx>(
654    bx: &mut Builder<'_, 'll, 'tcx>,
655    try_func: &'ll Value,
656    data: &'ll Value,
657    catch_func: &'ll Value,
658    dest: PlaceRef<'tcx, &'ll Value>,
659) {
660    if bx.sess().panic_strategy() == PanicStrategy::Abort {
661        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
662        bx.call(try_func_ty, None, None, try_func, &[data], None, None);
663        // Return 0 unconditionally from the intrinsic call;
664        // we can never unwind.
665        OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
666    } else if wants_msvc_seh(bx.sess()) {
667        codegen_msvc_try(bx, try_func, data, catch_func, dest);
668    } else if wants_wasm_eh(bx.sess()) {
669        codegen_wasm_try(bx, try_func, data, catch_func, dest);
670    } else if bx.sess().target.os == "emscripten" {
671        codegen_emcc_try(bx, try_func, data, catch_func, dest);
672    } else {
673        codegen_gnu_try(bx, try_func, data, catch_func, dest);
674    }
675}
676
677// MSVC's definition of the `rust_try` function.
678//
679// This implementation uses the new exception handling instructions in LLVM
680// which have support in LLVM for SEH on MSVC targets. Although these
681// instructions are meant to work for all targets, as of the time of this
682// writing, however, LLVM does not recommend the usage of these new instructions
683// as the old ones are still more optimized.
684fn codegen_msvc_try<'ll, 'tcx>(
685    bx: &mut Builder<'_, 'll, 'tcx>,
686    try_func: &'ll Value,
687    data: &'ll Value,
688    catch_func: &'ll Value,
689    dest: PlaceRef<'tcx, &'ll Value>,
690) {
691    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
692        bx.set_personality_fn(bx.eh_personality());
693
694        let normal = bx.append_sibling_block("normal");
695        let catchswitch = bx.append_sibling_block("catchswitch");
696        let catchpad_rust = bx.append_sibling_block("catchpad_rust");
697        let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
698        let caught = bx.append_sibling_block("caught");
699
700        let try_func = llvm::get_param(bx.llfn(), 0);
701        let data = llvm::get_param(bx.llfn(), 1);
702        let catch_func = llvm::get_param(bx.llfn(), 2);
703
704        // We're generating an IR snippet that looks like:
705        //
706        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
707        //      %slot = alloca i8*
708        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
709        //
710        //   normal:
711        //      ret i32 0
712        //
713        //   catchswitch:
714        //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
715        //
716        //   catchpad_rust:
717        //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
718        //      %ptr = load %slot
719        //      call %catch_func(%data, %ptr)
720        //      catchret from %tok to label %caught
721        //
722        //   catchpad_foreign:
723        //      %tok = catchpad within %cs [null, 64, null]
724        //      call %catch_func(%data, null)
725        //      catchret from %tok to label %caught
726        //
727        //   caught:
728        //      ret i32 1
729        //   }
730        //
731        // This structure follows the basic usage of throw/try/catch in LLVM.
732        // For example, compile this C++ snippet to see what LLVM generates:
733        //
734        //      struct rust_panic {
735        //          rust_panic(const rust_panic&);
736        //          ~rust_panic();
737        //
738        //          void* x[2];
739        //      };
740        //
741        //      int __rust_try(
742        //          void (*try_func)(void*),
743        //          void *data,
744        //          void (*catch_func)(void*, void*) noexcept
745        //      ) {
746        //          try {
747        //              try_func(data);
748        //              return 0;
749        //          } catch(rust_panic& a) {
750        //              catch_func(data, &a);
751        //              return 1;
752        //          } catch(...) {
753        //              catch_func(data, NULL);
754        //              return 1;
755        //          }
756        //      }
757        //
758        // More information can be found in libstd's seh.rs implementation.
759        let ptr_size = bx.tcx().data_layout.pointer_size;
760        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
761        let slot = bx.alloca(ptr_size, ptr_align);
762        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
763        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
764
765        bx.switch_to_block(normal);
766        bx.ret(bx.const_i32(0));
767
768        bx.switch_to_block(catchswitch);
769        let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
770
771        // We can't use the TypeDescriptor defined in libpanic_unwind because it
772        // might be in another DLL and the SEH encoding only supports specifying
773        // a TypeDescriptor from the current module.
774        //
775        // However this isn't an issue since the MSVC runtime uses string
776        // comparison on the type name to match TypeDescriptors rather than
777        // pointer equality.
778        //
779        // So instead we generate a new TypeDescriptor in each module that uses
780        // `try` and let the linker merge duplicate definitions in the same
781        // module.
782        //
783        // When modifying, make sure that the type_name string exactly matches
784        // the one used in library/panic_unwind/src/seh.rs.
785        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
786        let type_name = bx.const_bytes(b"rust_panic\0");
787        let type_info =
788            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
789        let tydesc = bx.declare_global(
790            &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
791            bx.val_ty(type_info),
792        );
793
794        llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
795        if bx.cx.tcx.sess.target.supports_comdat() {
796            llvm::SetUniqueComdat(bx.llmod, tydesc);
797        }
798        llvm::set_initializer(tydesc, type_info);
799
800        // The flag value of 8 indicates that we are catching the exception by
801        // reference instead of by value. We can't use catch by value because
802        // that requires copying the exception object, which we don't support
803        // since our exception object effectively contains a Box.
804        //
805        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
806        bx.switch_to_block(catchpad_rust);
807        let flags = bx.const_i32(8);
808        let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
809        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
810        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
811        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
812        bx.catch_ret(&funclet, caught);
813
814        // The flag value of 64 indicates a "catch-all".
815        bx.switch_to_block(catchpad_foreign);
816        let flags = bx.const_i32(64);
817        let null = bx.const_null(bx.type_ptr());
818        let funclet = bx.catch_pad(cs, &[null, flags, null]);
819        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
820        bx.catch_ret(&funclet, caught);
821
822        bx.switch_to_block(caught);
823        bx.ret(bx.const_i32(1));
824    });
825
826    // Note that no invoke is used here because by definition this function
827    // can't panic (that's what it's catching).
828    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
829    OperandValue::Immediate(ret).store(bx, dest);
830}
831
832// WASM's definition of the `rust_try` function.
833fn codegen_wasm_try<'ll, 'tcx>(
834    bx: &mut Builder<'_, 'll, 'tcx>,
835    try_func: &'ll Value,
836    data: &'ll Value,
837    catch_func: &'ll Value,
838    dest: PlaceRef<'tcx, &'ll Value>,
839) {
840    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
841        bx.set_personality_fn(bx.eh_personality());
842
843        let normal = bx.append_sibling_block("normal");
844        let catchswitch = bx.append_sibling_block("catchswitch");
845        let catchpad = bx.append_sibling_block("catchpad");
846        let caught = bx.append_sibling_block("caught");
847
848        let try_func = llvm::get_param(bx.llfn(), 0);
849        let data = llvm::get_param(bx.llfn(), 1);
850        let catch_func = llvm::get_param(bx.llfn(), 2);
851
852        // We're generating an IR snippet that looks like:
853        //
854        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
855        //      %slot = alloca i8*
856        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
857        //
858        //   normal:
859        //      ret i32 0
860        //
861        //   catchswitch:
862        //      %cs = catchswitch within none [%catchpad] unwind to caller
863        //
864        //   catchpad:
865        //      %tok = catchpad within %cs [null]
866        //      %ptr = call @llvm.wasm.get.exception(token %tok)
867        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
868        //      call %catch_func(%data, %ptr)
869        //      catchret from %tok to label %caught
870        //
871        //   caught:
872        //      ret i32 1
873        //   }
874        //
875        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
876        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
877
878        bx.switch_to_block(normal);
879        bx.ret(bx.const_i32(0));
880
881        bx.switch_to_block(catchswitch);
882        let cs = bx.catch_switch(None, None, &[catchpad]);
883
884        bx.switch_to_block(catchpad);
885        let null = bx.const_null(bx.type_ptr());
886        let funclet = bx.catch_pad(cs, &[null]);
887
888        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
889        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
890
891        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
892        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
893        bx.catch_ret(&funclet, caught);
894
895        bx.switch_to_block(caught);
896        bx.ret(bx.const_i32(1));
897    });
898
899    // Note that no invoke is used here because by definition this function
900    // can't panic (that's what it's catching).
901    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
902    OperandValue::Immediate(ret).store(bx, dest);
903}
904
905// Definition of the standard `try` function for Rust using the GNU-like model
906// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
907// instructions).
908//
909// This codegen is a little surprising because we always call a shim
910// function instead of inlining the call to `invoke` manually here. This is done
911// because in LLVM we're only allowed to have one personality per function
912// definition. The call to the `try` intrinsic is being inlined into the
913// function calling it, and that function may already have other personality
914// functions in play. By calling a shim we're guaranteed that our shim will have
915// the right personality function.
916fn codegen_gnu_try<'ll, 'tcx>(
917    bx: &mut Builder<'_, 'll, 'tcx>,
918    try_func: &'ll Value,
919    data: &'ll Value,
920    catch_func: &'ll Value,
921    dest: PlaceRef<'tcx, &'ll Value>,
922) {
923    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
924        // Codegens the shims described above:
925        //
926        //   bx:
927        //      invoke %try_func(%data) normal %normal unwind %catch
928        //
929        //   normal:
930        //      ret 0
931        //
932        //   catch:
933        //      (%ptr, _) = landingpad
934        //      call %catch_func(%data, %ptr)
935        //      ret 1
936        let then = bx.append_sibling_block("then");
937        let catch = bx.append_sibling_block("catch");
938
939        let try_func = llvm::get_param(bx.llfn(), 0);
940        let data = llvm::get_param(bx.llfn(), 1);
941        let catch_func = llvm::get_param(bx.llfn(), 2);
942        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
943        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
944
945        bx.switch_to_block(then);
946        bx.ret(bx.const_i32(0));
947
948        // Type indicator for the exception being thrown.
949        //
950        // The first value in this tuple is a pointer to the exception object
951        // being thrown. The second value is a "selector" indicating which of
952        // the landing pad clauses the exception's type had been matched to.
953        // rust_try ignores the selector.
954        bx.switch_to_block(catch);
955        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
956        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
957        let tydesc = bx.const_null(bx.type_ptr());
958        bx.add_clause(vals, tydesc);
959        let ptr = bx.extract_value(vals, 0);
960        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
961        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
962        bx.ret(bx.const_i32(1));
963    });
964
965    // Note that no invoke is used here because by definition this function
966    // can't panic (that's what it's catching).
967    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
968    OperandValue::Immediate(ret).store(bx, dest);
969}
970
971// Variant of codegen_gnu_try used for emscripten where Rust panics are
972// implemented using C++ exceptions. Here we use exceptions of a specific type
973// (`struct rust_panic`) to represent Rust panics.
974fn codegen_emcc_try<'ll, 'tcx>(
975    bx: &mut Builder<'_, 'll, 'tcx>,
976    try_func: &'ll Value,
977    data: &'ll Value,
978    catch_func: &'ll Value,
979    dest: PlaceRef<'tcx, &'ll Value>,
980) {
981    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
982        // Codegens the shims described above:
983        //
984        //   bx:
985        //      invoke %try_func(%data) normal %normal unwind %catch
986        //
987        //   normal:
988        //      ret 0
989        //
990        //   catch:
991        //      (%ptr, %selector) = landingpad
992        //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
993        //      %is_rust_panic = %selector == %rust_typeid
994        //      %catch_data = alloca { i8*, i8 }
995        //      %catch_data[0] = %ptr
996        //      %catch_data[1] = %is_rust_panic
997        //      call %catch_func(%data, %catch_data)
998        //      ret 1
999        let then = bx.append_sibling_block("then");
1000        let catch = bx.append_sibling_block("catch");
1001
1002        let try_func = llvm::get_param(bx.llfn(), 0);
1003        let data = llvm::get_param(bx.llfn(), 1);
1004        let catch_func = llvm::get_param(bx.llfn(), 2);
1005        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1006        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1007
1008        bx.switch_to_block(then);
1009        bx.ret(bx.const_i32(0));
1010
1011        // Type indicator for the exception being thrown.
1012        //
1013        // The first value in this tuple is a pointer to the exception object
1014        // being thrown. The second value is a "selector" indicating which of
1015        // the landing pad clauses the exception's type had been matched to.
1016        bx.switch_to_block(catch);
1017        let tydesc = bx.eh_catch_typeinfo();
1018        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1019        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1020        bx.add_clause(vals, tydesc);
1021        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1022        let ptr = bx.extract_value(vals, 0);
1023        let selector = bx.extract_value(vals, 1);
1024
1025        // Check if the typeid we got is the one for a Rust panic.
1026        let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.val_ty(tydesc)], &[tydesc]);
1027        let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1028        let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1029
1030        // We need to pass two values to catch_func (ptr and is_rust_panic), so
1031        // create an alloca and pass a pointer to that.
1032        let ptr_size = bx.tcx().data_layout.pointer_size;
1033        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1034        let i8_align = bx.tcx().data_layout.i8_align.abi;
1035        // Required in order for there to be no padding between the fields.
1036        assert!(i8_align <= ptr_align);
1037        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1038        bx.store(ptr, catch_data, ptr_align);
1039        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1040        bx.store(is_rust_panic, catch_data_1, i8_align);
1041
1042        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1043        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1044        bx.ret(bx.const_i32(1));
1045    });
1046
1047    // Note that no invoke is used here because by definition this function
1048    // can't panic (that's what it's catching).
1049    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1050    OperandValue::Immediate(ret).store(bx, dest);
1051}
1052
1053// Helper function to give a Block to a closure to codegen a shim function.
1054// This is currently primarily used for the `try` intrinsic functions above.
1055fn gen_fn<'a, 'll, 'tcx>(
1056    cx: &'a CodegenCx<'ll, 'tcx>,
1057    name: &str,
1058    rust_fn_sig: ty::PolyFnSig<'tcx>,
1059    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1060) -> (&'ll Type, &'ll Value) {
1061    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1062    let llty = fn_abi.llvm_type(cx);
1063    let llfn = cx.declare_fn(name, fn_abi, None);
1064    cx.set_frame_pointer_type(llfn);
1065    cx.apply_target_cpu_attr(llfn);
1066    // FIXME(eddyb) find a nicer way to do this.
1067    llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1068    let llbb = Builder::append_block(cx, llfn, "entry-block");
1069    let bx = Builder::build(cx, llbb);
1070    codegen(bx);
1071    (llty, llfn)
1072}
1073
1074// Helper function used to get a handle to the `__rust_try` function used to
1075// catch exceptions.
1076//
1077// This function is only generated once and is then cached.
1078fn get_rust_try_fn<'a, 'll, 'tcx>(
1079    cx: &'a CodegenCx<'ll, 'tcx>,
1080    codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1081) -> (&'ll Type, &'ll Value) {
1082    if let Some(llfn) = cx.rust_try_fn.get() {
1083        return llfn;
1084    }
1085
1086    // Define the type up front for the signature of the rust_try function.
1087    let tcx = cx.tcx;
1088    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1089    // `unsafe fn(*mut i8) -> ()`
1090    let try_fn_ty = Ty::new_fn_ptr(
1091        tcx,
1092        ty::Binder::dummy(tcx.mk_fn_sig(
1093            [i8p],
1094            tcx.types.unit,
1095            false,
1096            hir::Safety::Unsafe,
1097            ExternAbi::Rust,
1098        )),
1099    );
1100    // `unsafe fn(*mut i8, *mut i8) -> ()`
1101    let catch_fn_ty = Ty::new_fn_ptr(
1102        tcx,
1103        ty::Binder::dummy(tcx.mk_fn_sig(
1104            [i8p, i8p],
1105            tcx.types.unit,
1106            false,
1107            hir::Safety::Unsafe,
1108            ExternAbi::Rust,
1109        )),
1110    );
1111    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
1112    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1113        [try_fn_ty, i8p, catch_fn_ty],
1114        tcx.types.i32,
1115        false,
1116        hir::Safety::Unsafe,
1117        ExternAbi::Rust,
1118    ));
1119    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1120    cx.rust_try_fn.set(Some(rust_try));
1121    rust_try
1122}
1123
1124fn generic_simd_intrinsic<'ll, 'tcx>(
1125    bx: &mut Builder<'_, 'll, 'tcx>,
1126    name: Symbol,
1127    fn_args: GenericArgsRef<'tcx>,
1128    args: &[OperandRef<'tcx, &'ll Value>],
1129    ret_ty: Ty<'tcx>,
1130    llret_ty: &'ll Type,
1131    span: Span,
1132) -> Result<&'ll Value, ()> {
1133    macro_rules! return_error {
1134        ($diag: expr) => {{
1135            bx.sess().dcx().emit_err($diag);
1136            return Err(());
1137        }};
1138    }
1139
1140    macro_rules! require {
1141        ($cond: expr, $diag: expr) => {
1142            if !$cond {
1143                return_error!($diag);
1144            }
1145        };
1146    }
1147
1148    macro_rules! require_simd {
1149        ($ty: expr, $variant:ident) => {{
1150            require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1151            $ty.simd_size_and_type(bx.tcx())
1152        }};
1153    }
1154
1155    /// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
1156    macro_rules! require_int_or_uint_ty {
1157        ($ty: expr, $diag: expr) => {
1158            match $ty {
1159                ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
1160                ty::Uint(i) => {
1161                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
1162                }
1163                _ => {
1164                    return_error!($diag);
1165                }
1166            }
1167        };
1168    }
1169
1170    /// Converts a vector mask, where each element has a bit width equal to the data elements it is used with,
1171    /// down to an i1 based mask that can be used by llvm intrinsics.
1172    ///
1173    /// The rust simd semantics are that each element should either consist of all ones or all zeroes,
1174    /// but this information is not available to llvm. Truncating the vector effectively uses the lowest bit,
1175    /// but codegen for several targets is better if we consider the highest bit by shifting.
1176    ///
1177    /// For x86 SSE/AVX targets this is beneficial since most instructions with mask parameters only consider the highest bit.
1178    /// So even though on llvm level we have an additional shift, in the final assembly there is no shift or truncate and
1179    /// instead the mask can be used as is.
1180    ///
1181    /// For aarch64 and other targets there is a benefit because a mask from the sign bit can be more
1182    /// efficiently converted to an all ones / all zeroes mask by comparing whether each element is negative.
1183    fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1184        bx: &mut Builder<'a, 'll, 'tcx>,
1185        i_xn: &'ll Value,
1186        in_elem_bitwidth: u64,
1187        in_len: u64,
1188    ) -> &'ll Value {
1189        // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1190        let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1191        let shift_indices = vec![shift_idx; in_len as _];
1192        let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1193        // Truncate vector to an <i1 x N>
1194        bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1195    }
1196
1197    // Sanity-check: all vector arguments must be immediates.
1198    if cfg!(debug_assertions) {
1199        for arg in args {
1200            if arg.layout.ty.is_simd() {
1201                assert_matches!(arg.val, OperandValue::Immediate(_));
1202            }
1203        }
1204    }
1205
1206    if name == sym::simd_select_bitmask {
1207        let (len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1208
1209        let expected_int_bits = len.max(8).next_power_of_two();
1210        let expected_bytes = len.div_ceil(8);
1211
1212        let mask_ty = args[0].layout.ty;
1213        let mask = match mask_ty.kind() {
1214            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1215            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1216            ty::Array(elem, len)
1217                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1218                    && len
1219                        .try_to_target_usize(bx.tcx)
1220                        .expect("expected monomorphic const in codegen")
1221                        == expected_bytes =>
1222            {
1223                let place = PlaceRef::alloca(bx, args[0].layout);
1224                args[0].val.store(bx, place);
1225                let int_ty = bx.type_ix(expected_bytes * 8);
1226                bx.load(int_ty, place.val.llval, Align::ONE)
1227            }
1228            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
1229                span,
1230                name,
1231                mask_ty,
1232                expected_int_bits,
1233                expected_bytes
1234            }),
1235        };
1236
1237        let i1 = bx.type_i1();
1238        let im = bx.type_ix(len);
1239        let i1xn = bx.type_vector(i1, len);
1240        let m_im = bx.trunc(mask, im);
1241        let m_i1s = bx.bitcast(m_im, i1xn);
1242        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1243    }
1244
1245    // every intrinsic below takes a SIMD vector as its first argument
1246    let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
1247    let in_ty = args[0].layout.ty;
1248
1249    let comparison = match name {
1250        sym::simd_eq => Some(BinOp::Eq),
1251        sym::simd_ne => Some(BinOp::Ne),
1252        sym::simd_lt => Some(BinOp::Lt),
1253        sym::simd_le => Some(BinOp::Le),
1254        sym::simd_gt => Some(BinOp::Gt),
1255        sym::simd_ge => Some(BinOp::Ge),
1256        _ => None,
1257    };
1258
1259    if let Some(cmp_op) = comparison {
1260        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1261
1262        require!(
1263            in_len == out_len,
1264            InvalidMonomorphization::ReturnLengthInputType {
1265                span,
1266                name,
1267                in_len,
1268                in_ty,
1269                ret_ty,
1270                out_len
1271            }
1272        );
1273        require!(
1274            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1275            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
1276        );
1277
1278        return Ok(compare_simd_types(
1279            bx,
1280            args[0].immediate(),
1281            args[1].immediate(),
1282            in_elem,
1283            llret_ty,
1284            cmp_op,
1285        ));
1286    }
1287
1288    if name == sym::simd_shuffle_const_generic {
1289        let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
1290        let n = idx.len() as u64;
1291
1292        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1293        require!(
1294            out_len == n,
1295            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1296        );
1297        require!(
1298            in_elem == out_ty,
1299            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1300        );
1301
1302        let total_len = in_len * 2;
1303
1304        let indices: Option<Vec<_>> = idx
1305            .iter()
1306            .enumerate()
1307            .map(|(arg_idx, val)| {
1308                let idx = val.unwrap_leaf().to_i32();
1309                if idx >= i32::try_from(total_len).unwrap() {
1310                    bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
1311                        span,
1312                        name,
1313                        arg_idx: arg_idx as u64,
1314                        total_len: total_len.into(),
1315                    });
1316                    None
1317                } else {
1318                    Some(bx.const_i32(idx))
1319                }
1320            })
1321            .collect();
1322        let Some(indices) = indices else {
1323            return Ok(bx.const_null(llret_ty));
1324        };
1325
1326        return Ok(bx.shuffle_vector(
1327            args[0].immediate(),
1328            args[1].immediate(),
1329            bx.const_vector(&indices),
1330        ));
1331    }
1332
1333    if name == sym::simd_shuffle {
1334        // Make sure this is actually a SIMD vector.
1335        let idx_ty = args[2].layout.ty;
1336        let n: u64 = if idx_ty.is_simd()
1337            && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
1338        {
1339            idx_ty.simd_size_and_type(bx.cx.tcx).0
1340        } else {
1341            return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
1342        };
1343
1344        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
1345        require!(
1346            out_len == n,
1347            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
1348        );
1349        require!(
1350            in_elem == out_ty,
1351            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
1352        );
1353
1354        let total_len = u128::from(in_len) * 2;
1355
1356        // Check that the indices are in-bounds.
1357        let indices = args[2].immediate();
1358        for i in 0..n {
1359            let val = bx.const_get_elt(indices, i as u64);
1360            let idx = bx
1361                .const_to_opt_u128(val, true)
1362                .unwrap_or_else(|| bug!("typeck should have already ensured that these are const"));
1363            if idx >= total_len {
1364                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1365                    span,
1366                    name,
1367                    arg_idx: i,
1368                    total_len,
1369                });
1370            }
1371        }
1372
1373        return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
1374    }
1375
1376    if name == sym::simd_insert || name == sym::simd_insert_dyn {
1377        require!(
1378            in_elem == args[2].layout.ty,
1379            InvalidMonomorphization::InsertedType {
1380                span,
1381                name,
1382                in_elem,
1383                in_ty,
1384                out_ty: args[2].layout.ty
1385            }
1386        );
1387
1388        let index_imm = if name == sym::simd_insert {
1389            let idx = bx
1390                .const_to_opt_u128(args[1].immediate(), false)
1391                .expect("typeck should have ensure that this is a const");
1392            if idx >= in_len.into() {
1393                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1394                    span,
1395                    name,
1396                    arg_idx: 1,
1397                    total_len: in_len.into(),
1398                });
1399            }
1400            bx.const_i32(idx as i32)
1401        } else {
1402            args[1].immediate()
1403        };
1404
1405        return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
1406    }
1407    if name == sym::simd_extract || name == sym::simd_extract_dyn {
1408        require!(
1409            ret_ty == in_elem,
1410            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1411        );
1412        let index_imm = if name == sym::simd_extract {
1413            let idx = bx
1414                .const_to_opt_u128(args[1].immediate(), false)
1415                .expect("typeck should have ensure that this is a const");
1416            if idx >= in_len.into() {
1417                return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
1418                    span,
1419                    name,
1420                    arg_idx: 1,
1421                    total_len: in_len.into(),
1422                });
1423            }
1424            bx.const_i32(idx as i32)
1425        } else {
1426            args[1].immediate()
1427        };
1428
1429        return Ok(bx.extract_element(args[0].immediate(), index_imm));
1430    }
1431
1432    if name == sym::simd_select {
1433        let m_elem_ty = in_elem;
1434        let m_len = in_len;
1435        let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
1436        require!(
1437            m_len == v_len,
1438            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
1439        );
1440        let in_elem_bitwidth = require_int_or_uint_ty!(
1441            m_elem_ty.kind(),
1442            InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
1443        );
1444        let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
1445        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1446    }
1447
1448    if name == sym::simd_bitmask {
1449        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a vector mask and
1450        // returns one bit for each lane (which must all be `0` or `!0`) in the form of either:
1451        // * an unsigned integer
1452        // * an array of `u8`
1453        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
1454        //
1455        // The bit order of the result depends on the byte endianness, LSB-first for little
1456        // endian and MSB-first for big endian.
1457        let expected_int_bits = in_len.max(8).next_power_of_two();
1458        let expected_bytes = in_len.div_ceil(8);
1459
1460        // Integer vector <i{in_bitwidth} x in_len>:
1461        let in_elem_bitwidth = require_int_or_uint_ty!(
1462            in_elem.kind(),
1463            InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
1464        );
1465
1466        let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
1467        // Bitcast <i1 x N> to iN:
1468        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1469
1470        match ret_ty.kind() {
1471            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1472                // Zero-extend iN to the bitmask type:
1473                return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1474            }
1475            ty::Array(elem, len)
1476                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1477                    && len
1478                        .try_to_target_usize(bx.tcx)
1479                        .expect("expected monomorphic const in codegen")
1480                        == expected_bytes =>
1481            {
1482                // Zero-extend iN to the array length:
1483                let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1484
1485                // Convert the integer to a byte array
1486                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
1487                bx.store(ze, ptr, Align::ONE);
1488                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1489                return Ok(bx.load(array_ty, ptr, Align::ONE));
1490            }
1491            _ => return_error!(InvalidMonomorphization::CannotReturn {
1492                span,
1493                name,
1494                ret_ty,
1495                expected_int_bits,
1496                expected_bytes
1497            }),
1498        }
1499    }
1500
1501    fn simd_simple_float_intrinsic<'ll, 'tcx>(
1502        name: Symbol,
1503        in_elem: Ty<'_>,
1504        in_ty: Ty<'_>,
1505        in_len: u64,
1506        bx: &mut Builder<'_, 'll, 'tcx>,
1507        span: Span,
1508        args: &[OperandRef<'tcx, &'ll Value>],
1509    ) -> Result<&'ll Value, ()> {
1510        macro_rules! return_error {
1511            ($diag: expr) => {{
1512                bx.sess().dcx().emit_err($diag);
1513                return Err(());
1514            }};
1515        }
1516
1517        let elem_ty = if let ty::Float(f) = in_elem.kind() {
1518            bx.cx.type_float_from_ty(*f)
1519        } else {
1520            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
1521        };
1522
1523        let vec_ty = bx.type_vector(elem_ty, in_len);
1524
1525        let intr_name = match name {
1526            sym::simd_ceil => "llvm.ceil",
1527            sym::simd_fabs => "llvm.fabs",
1528            sym::simd_fcos => "llvm.cos",
1529            sym::simd_fexp2 => "llvm.exp2",
1530            sym::simd_fexp => "llvm.exp",
1531            sym::simd_flog10 => "llvm.log10",
1532            sym::simd_flog2 => "llvm.log2",
1533            sym::simd_flog => "llvm.log",
1534            sym::simd_floor => "llvm.floor",
1535            sym::simd_fma => "llvm.fma",
1536            sym::simd_relaxed_fma => "llvm.fmuladd",
1537            sym::simd_fsin => "llvm.sin",
1538            sym::simd_fsqrt => "llvm.sqrt",
1539            sym::simd_round => "llvm.round",
1540            sym::simd_trunc => "llvm.trunc",
1541            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
1542        };
1543        Ok(bx.call_intrinsic(
1544            intr_name,
1545            &[vec_ty],
1546            &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1547        ))
1548    }
1549
1550    if std::matches!(
1551        name,
1552        sym::simd_ceil
1553            | sym::simd_fabs
1554            | sym::simd_fcos
1555            | sym::simd_fexp2
1556            | sym::simd_fexp
1557            | sym::simd_flog10
1558            | sym::simd_flog2
1559            | sym::simd_flog
1560            | sym::simd_floor
1561            | sym::simd_fma
1562            | sym::simd_fsin
1563            | sym::simd_fsqrt
1564            | sym::simd_relaxed_fma
1565            | sym::simd_round
1566            | sym::simd_trunc
1567    ) {
1568        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1569    }
1570
1571    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
1572        let elem_ty = match *elem_ty.kind() {
1573            ty::Int(v) => cx.type_int_from_ty(v),
1574            ty::Uint(v) => cx.type_uint_from_ty(v),
1575            ty::Float(v) => cx.type_float_from_ty(v),
1576            ty::RawPtr(_, _) => cx.type_ptr(),
1577            _ => unreachable!(),
1578        };
1579        cx.type_vector(elem_ty, vec_len)
1580    }
1581
1582    if name == sym::simd_gather {
1583        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1584        //             mask: <N x i{M}>) -> <N x T>
1585        // * N: number of elements in the input vectors
1586        // * T: type of the element to load
1587        // * M: any integer width is supported, will be truncated to i1
1588
1589        // All types must be simd vector types
1590
1591        // The second argument must be a simd vector with an element type that's a pointer
1592        // to the element type of the first argument
1593        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1594        let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1595        // The element type of the third argument must be a signed integer type of any width:
1596        let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1597        require_simd!(ret_ty, SimdReturn);
1598
1599        // Of the same length:
1600        require!(
1601            in_len == out_len,
1602            InvalidMonomorphization::SecondArgumentLength {
1603                span,
1604                name,
1605                in_len,
1606                in_ty,
1607                arg_ty: args[1].layout.ty,
1608                out_len
1609            }
1610        );
1611        require!(
1612            in_len == out_len2,
1613            InvalidMonomorphization::ThirdArgumentLength {
1614                span,
1615                name,
1616                in_len,
1617                in_ty,
1618                arg_ty: args[2].layout.ty,
1619                out_len: out_len2
1620            }
1621        );
1622
1623        // The return type must match the first argument type
1624        require!(
1625            ret_ty == in_ty,
1626            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
1627        );
1628
1629        require!(
1630            matches!(
1631                *element_ty1.kind(),
1632                ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
1633            ),
1634            InvalidMonomorphization::ExpectedElementType {
1635                span,
1636                name,
1637                expected_element: element_ty1,
1638                second_arg: args[1].layout.ty,
1639                in_elem,
1640                in_ty,
1641                mutability: ExpectedPointerMutability::Not,
1642            }
1643        );
1644
1645        let mask_elem_bitwidth = require_int_or_uint_ty!(
1646            element_ty2.kind(),
1647            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1648        );
1649
1650        // Alignment of T, must be a constant integer value:
1651        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1652
1653        // Truncate the mask vector to a vector of i1s:
1654        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1655
1656        // Type of the vector of pointers:
1657        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1658
1659        // Type of the vector of elements:
1660        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1661
1662        return Ok(bx.call_intrinsic(
1663            "llvm.masked.gather",
1664            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
1665            &[args[1].immediate(), alignment, mask, args[0].immediate()],
1666        ));
1667    }
1668
1669    if name == sym::simd_masked_load {
1670        // simd_masked_load(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
1671        // * N: number of elements in the input vectors
1672        // * T: type of the element to load
1673        // * M: any integer width is supported, will be truncated to i1
1674        // Loads contiguous elements from memory behind `pointer`, but only for
1675        // those lanes whose `mask` bit is enabled.
1676        // The memory addresses corresponding to the “off” lanes are not accessed.
1677
1678        // The element type of the "mask" argument must be a signed integer type of any width
1679        let mask_ty = in_ty;
1680        let (mask_len, mask_elem) = (in_len, in_elem);
1681
1682        // The second argument must be a pointer matching the element type
1683        let pointer_ty = args[1].layout.ty;
1684
1685        // The last argument is a passthrough vector providing values for disabled lanes
1686        let values_ty = args[2].layout.ty;
1687        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1688
1689        require_simd!(ret_ty, SimdReturn);
1690
1691        // Of the same length:
1692        require!(
1693            values_len == mask_len,
1694            InvalidMonomorphization::ThirdArgumentLength {
1695                span,
1696                name,
1697                in_len: mask_len,
1698                in_ty: mask_ty,
1699                arg_ty: values_ty,
1700                out_len: values_len
1701            }
1702        );
1703
1704        // The return type must match the last argument type
1705        require!(
1706            ret_ty == values_ty,
1707            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
1708        );
1709
1710        require!(
1711            matches!(
1712                *pointer_ty.kind(),
1713                ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
1714            ),
1715            InvalidMonomorphization::ExpectedElementType {
1716                span,
1717                name,
1718                expected_element: values_elem,
1719                second_arg: pointer_ty,
1720                in_elem: values_elem,
1721                in_ty: values_ty,
1722                mutability: ExpectedPointerMutability::Not,
1723            }
1724        );
1725
1726        let m_elem_bitwidth = require_int_or_uint_ty!(
1727            mask_elem.kind(),
1728            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1729        );
1730
1731        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1732
1733        // Alignment of T, must be a constant integer value:
1734        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1735
1736        let llvm_pointer = bx.type_ptr();
1737
1738        // Type of the vector of elements:
1739        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1740
1741        return Ok(bx.call_intrinsic(
1742            "llvm.masked.load",
1743            &[llvm_elem_vec_ty, llvm_pointer],
1744            &[args[1].immediate(), alignment, mask, args[2].immediate()],
1745        ));
1746    }
1747
1748    if name == sym::simd_masked_store {
1749        // simd_masked_store(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
1750        // * N: number of elements in the input vectors
1751        // * T: type of the element to load
1752        // * M: any integer width is supported, will be truncated to i1
1753        // Stores contiguous elements to memory behind `pointer`, but only for
1754        // those lanes whose `mask` bit is enabled.
1755        // The memory addresses corresponding to the “off” lanes are not accessed.
1756
1757        // The element type of the "mask" argument must be a signed integer type of any width
1758        let mask_ty = in_ty;
1759        let (mask_len, mask_elem) = (in_len, in_elem);
1760
1761        // The second argument must be a pointer matching the element type
1762        let pointer_ty = args[1].layout.ty;
1763
1764        // The last argument specifies the values to store to memory
1765        let values_ty = args[2].layout.ty;
1766        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
1767
1768        // Of the same length:
1769        require!(
1770            values_len == mask_len,
1771            InvalidMonomorphization::ThirdArgumentLength {
1772                span,
1773                name,
1774                in_len: mask_len,
1775                in_ty: mask_ty,
1776                arg_ty: values_ty,
1777                out_len: values_len
1778            }
1779        );
1780
1781        // The second argument must be a mutable pointer type matching the element type
1782        require!(
1783            matches!(
1784                *pointer_ty.kind(),
1785                ty::RawPtr(p_ty, p_mutbl)
1786                    if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
1787            ),
1788            InvalidMonomorphization::ExpectedElementType {
1789                span,
1790                name,
1791                expected_element: values_elem,
1792                second_arg: pointer_ty,
1793                in_elem: values_elem,
1794                in_ty: values_ty,
1795                mutability: ExpectedPointerMutability::Mut,
1796            }
1797        );
1798
1799        let m_elem_bitwidth = require_int_or_uint_ty!(
1800            mask_elem.kind(),
1801            InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
1802        );
1803
1804        let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
1805
1806        // Alignment of T, must be a constant integer value:
1807        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
1808
1809        let llvm_pointer = bx.type_ptr();
1810
1811        // Type of the vector of elements:
1812        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
1813
1814        return Ok(bx.call_intrinsic(
1815            "llvm.masked.store",
1816            &[llvm_elem_vec_ty, llvm_pointer],
1817            &[args[2].immediate(), args[1].immediate(), alignment, mask],
1818        ));
1819    }
1820
1821    if name == sym::simd_scatter {
1822        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1823        //             mask: <N x i{M}>) -> ()
1824        // * N: number of elements in the input vectors
1825        // * T: type of the element to load
1826        // * M: any integer width is supported, will be truncated to i1
1827
1828        // All types must be simd vector types
1829        // The second argument must be a simd vector with an element type that's a pointer
1830        // to the element type of the first argument
1831        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
1832        let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
1833        let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
1834
1835        // Of the same length:
1836        require!(
1837            in_len == element_len1,
1838            InvalidMonomorphization::SecondArgumentLength {
1839                span,
1840                name,
1841                in_len,
1842                in_ty,
1843                arg_ty: args[1].layout.ty,
1844                out_len: element_len1
1845            }
1846        );
1847        require!(
1848            in_len == element_len2,
1849            InvalidMonomorphization::ThirdArgumentLength {
1850                span,
1851                name,
1852                in_len,
1853                in_ty,
1854                arg_ty: args[2].layout.ty,
1855                out_len: element_len2
1856            }
1857        );
1858
1859        require!(
1860            matches!(
1861                *element_ty1.kind(),
1862                ty::RawPtr(p_ty, p_mutbl)
1863                    if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
1864            ),
1865            InvalidMonomorphization::ExpectedElementType {
1866                span,
1867                name,
1868                expected_element: element_ty1,
1869                second_arg: args[1].layout.ty,
1870                in_elem,
1871                in_ty,
1872                mutability: ExpectedPointerMutability::Mut,
1873            }
1874        );
1875
1876        // The element type of the third argument must be an integer type of any width:
1877        let mask_elem_bitwidth = require_int_or_uint_ty!(
1878            element_ty2.kind(),
1879            InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
1880        );
1881
1882        // Alignment of T, must be a constant integer value:
1883        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1884
1885        // Truncate the mask vector to a vector of i1s:
1886        let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
1887
1888        // Type of the vector of pointers:
1889        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
1890
1891        // Type of the vector of elements:
1892        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
1893
1894        return Ok(bx.call_intrinsic(
1895            "llvm.masked.scatter",
1896            &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
1897            &[args[0].immediate(), args[1].immediate(), alignment, mask],
1898        ));
1899    }
1900
1901    macro_rules! arith_red {
1902        ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
1903         $identity:expr) => {
1904            if name == sym::$name {
1905                require!(
1906                    ret_ty == in_elem,
1907                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1908                );
1909                return match in_elem.kind() {
1910                    ty::Int(_) | ty::Uint(_) => {
1911                        let r = bx.$integer_reduce(args[0].immediate());
1912                        if $ordered {
1913                            // if overflow occurs, the result is the
1914                            // mathematical result modulo 2^n:
1915                            Ok(bx.$op(args[1].immediate(), r))
1916                        } else {
1917                            Ok(bx.$integer_reduce(args[0].immediate()))
1918                        }
1919                    }
1920                    ty::Float(f) => {
1921                        let acc = if $ordered {
1922                            // ordered arithmetic reductions take an accumulator
1923                            args[1].immediate()
1924                        } else {
1925                            // unordered arithmetic reductions use the identity accumulator
1926                            match f.bit_width() {
1927                                32 => bx.const_real(bx.type_f32(), $identity),
1928                                64 => bx.const_real(bx.type_f64(), $identity),
1929                                v => return_error!(
1930                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
1931                                        span,
1932                                        name,
1933                                        symbol: sym::$name,
1934                                        in_ty,
1935                                        in_elem,
1936                                        size: v,
1937                                        ret_ty
1938                                    }
1939                                ),
1940                            }
1941                        };
1942                        Ok(bx.$float_reduce(acc, args[0].immediate()))
1943                    }
1944                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
1945                        span,
1946                        name,
1947                        symbol: sym::$name,
1948                        in_ty,
1949                        in_elem,
1950                        ret_ty
1951                    }),
1952                };
1953            }
1954        };
1955    }
1956
1957    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
1958    arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
1959    arith_red!(
1960        simd_reduce_add_unordered: vector_reduce_add,
1961        vector_reduce_fadd_reassoc,
1962        false,
1963        add,
1964        -0.0
1965    );
1966    arith_red!(
1967        simd_reduce_mul_unordered: vector_reduce_mul,
1968        vector_reduce_fmul_reassoc,
1969        false,
1970        mul,
1971        1.0
1972    );
1973
1974    macro_rules! minmax_red {
1975        ($name:ident: $int_red:ident, $float_red:ident) => {
1976            if name == sym::$name {
1977                require!(
1978                    ret_ty == in_elem,
1979                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
1980                );
1981                return match in_elem.kind() {
1982                    ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
1983                    ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
1984                    ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
1985                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
1986                        span,
1987                        name,
1988                        symbol: sym::$name,
1989                        in_ty,
1990                        in_elem,
1991                        ret_ty
1992                    }),
1993                };
1994            }
1995        };
1996    }
1997
1998    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
1999    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2000
2001    macro_rules! bitwise_red {
2002        ($name:ident : $red:ident, $boolean:expr) => {
2003            if name == sym::$name {
2004                let input = if !$boolean {
2005                    require!(
2006                        ret_ty == in_elem,
2007                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2008                    );
2009                    args[0].immediate()
2010                } else {
2011                    let bitwidth = match in_elem.kind() {
2012                        ty::Int(i) => {
2013                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
2014                        }
2015                        ty::Uint(i) => {
2016                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
2017                        }
2018                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2019                            span,
2020                            name,
2021                            symbol: sym::$name,
2022                            in_ty,
2023                            in_elem,
2024                            ret_ty
2025                        }),
2026                    };
2027
2028                    vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2029                };
2030                return match in_elem.kind() {
2031                    ty::Int(_) | ty::Uint(_) => {
2032                        let r = bx.$red(input);
2033                        Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2034                    }
2035                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2036                        span,
2037                        name,
2038                        symbol: sym::$name,
2039                        in_ty,
2040                        in_elem,
2041                        ret_ty
2042                    }),
2043                };
2044            }
2045        };
2046    }
2047
2048    bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2049    bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2050    bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2051    bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2052    bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2053
2054    if name == sym::simd_cast_ptr {
2055        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2056        require!(
2057            in_len == out_len,
2058            InvalidMonomorphization::ReturnLengthInputType {
2059                span,
2060                name,
2061                in_len,
2062                in_ty,
2063                ret_ty,
2064                out_len
2065            }
2066        );
2067
2068        match in_elem.kind() {
2069            ty::RawPtr(p_ty, _) => {
2070                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2071                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2072                });
2073                require!(
2074                    metadata.is_unit(),
2075                    InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2076                );
2077            }
2078            _ => {
2079                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2080            }
2081        }
2082        match out_elem.kind() {
2083            ty::RawPtr(p_ty, _) => {
2084                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2085                    bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2086                });
2087                require!(
2088                    metadata.is_unit(),
2089                    InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2090                );
2091            }
2092            _ => {
2093                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2094            }
2095        }
2096
2097        return Ok(args[0].immediate());
2098    }
2099
2100    if name == sym::simd_expose_provenance {
2101        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2102        require!(
2103            in_len == out_len,
2104            InvalidMonomorphization::ReturnLengthInputType {
2105                span,
2106                name,
2107                in_len,
2108                in_ty,
2109                ret_ty,
2110                out_len
2111            }
2112        );
2113
2114        match in_elem.kind() {
2115            ty::RawPtr(_, _) => {}
2116            _ => {
2117                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2118            }
2119        }
2120        match out_elem.kind() {
2121            ty::Uint(ty::UintTy::Usize) => {}
2122            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
2123        }
2124
2125        return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
2126    }
2127
2128    if name == sym::simd_with_exposed_provenance {
2129        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2130        require!(
2131            in_len == out_len,
2132            InvalidMonomorphization::ReturnLengthInputType {
2133                span,
2134                name,
2135                in_len,
2136                in_ty,
2137                ret_ty,
2138                out_len
2139            }
2140        );
2141
2142        match in_elem.kind() {
2143            ty::Uint(ty::UintTy::Usize) => {}
2144            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
2145        }
2146        match out_elem.kind() {
2147            ty::RawPtr(_, _) => {}
2148            _ => {
2149                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2150            }
2151        }
2152
2153        return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
2154    }
2155
2156    if name == sym::simd_cast || name == sym::simd_as {
2157        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
2158        require!(
2159            in_len == out_len,
2160            InvalidMonomorphization::ReturnLengthInputType {
2161                span,
2162                name,
2163                in_len,
2164                in_ty,
2165                ret_ty,
2166                out_len
2167            }
2168        );
2169        // casting cares about nominal type, not just structural type
2170        if in_elem == out_elem {
2171            return Ok(args[0].immediate());
2172        }
2173
2174        #[derive(Copy, Clone)]
2175        enum Sign {
2176            Unsigned,
2177            Signed,
2178        }
2179        use Sign::*;
2180
2181        enum Style {
2182            Float,
2183            Int(Sign),
2184            Unsupported,
2185        }
2186
2187        let (in_style, in_width) = match in_elem.kind() {
2188            // vectors of pointer-sized integers should've been
2189            // disallowed before here, so this unwrap is safe.
2190            ty::Int(i) => (
2191                Style::Int(Signed),
2192                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2193            ),
2194            ty::Uint(u) => (
2195                Style::Int(Unsigned),
2196                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2197            ),
2198            ty::Float(f) => (Style::Float, f.bit_width()),
2199            _ => (Style::Unsupported, 0),
2200        };
2201        let (out_style, out_width) = match out_elem.kind() {
2202            ty::Int(i) => (
2203                Style::Int(Signed),
2204                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2205            ),
2206            ty::Uint(u) => (
2207                Style::Int(Unsigned),
2208                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2209            ),
2210            ty::Float(f) => (Style::Float, f.bit_width()),
2211            _ => (Style::Unsupported, 0),
2212        };
2213
2214        match (in_style, out_style) {
2215            (Style::Int(sign), Style::Int(_)) => {
2216                return Ok(match in_width.cmp(&out_width) {
2217                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2218                    Ordering::Equal => args[0].immediate(),
2219                    Ordering::Less => match sign {
2220                        Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2221                        Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2222                    },
2223                });
2224            }
2225            (Style::Int(Sign::Signed), Style::Float) => {
2226                return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2227            }
2228            (Style::Int(Sign::Unsigned), Style::Float) => {
2229                return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2230            }
2231            (Style::Float, Style::Int(sign)) => {
2232                return Ok(match (sign, name == sym::simd_as) {
2233                    (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2234                    (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2235                    (_, true) => bx.cast_float_to_int(
2236                        matches!(sign, Sign::Signed),
2237                        args[0].immediate(),
2238                        llret_ty,
2239                    ),
2240                });
2241            }
2242            (Style::Float, Style::Float) => {
2243                return Ok(match in_width.cmp(&out_width) {
2244                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2245                    Ordering::Equal => args[0].immediate(),
2246                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2247                });
2248            }
2249            _ => { /* Unsupported. Fallthrough. */ }
2250        }
2251        return_error!(InvalidMonomorphization::UnsupportedCast {
2252            span,
2253            name,
2254            in_ty,
2255            in_elem,
2256            ret_ty,
2257            out_elem
2258        });
2259    }
2260    macro_rules! arith_binary {
2261        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2262            $(if name == sym::$name {
2263                match in_elem.kind() {
2264                    $($(ty::$p(_))|* => {
2265                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2266                    })*
2267                    _ => {},
2268                }
2269                return_error!(
2270                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2271                );
2272            })*
2273        }
2274    }
2275    arith_binary! {
2276        simd_add: Uint, Int => add, Float => fadd;
2277        simd_sub: Uint, Int => sub, Float => fsub;
2278        simd_mul: Uint, Int => mul, Float => fmul;
2279        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2280        simd_rem: Uint => urem, Int => srem, Float => frem;
2281        simd_shl: Uint, Int => shl;
2282        simd_shr: Uint => lshr, Int => ashr;
2283        simd_and: Uint, Int => and;
2284        simd_or: Uint, Int => or;
2285        simd_xor: Uint, Int => xor;
2286        simd_fmax: Float => maxnum;
2287        simd_fmin: Float => minnum;
2288
2289    }
2290    macro_rules! arith_unary {
2291        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2292            $(if name == sym::$name {
2293                match in_elem.kind() {
2294                    $($(ty::$p(_))|* => {
2295                        return Ok(bx.$call(args[0].immediate()))
2296                    })*
2297                    _ => {},
2298                }
2299                return_error!(
2300                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
2301                );
2302            })*
2303        }
2304    }
2305    arith_unary! {
2306        simd_neg: Int => neg, Float => fneg;
2307    }
2308
2309    // Unary integer intrinsics
2310    if matches!(
2311        name,
2312        sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_ctpop | sym::simd_cttz
2313    ) {
2314        let vec_ty = bx.cx.type_vector(
2315            match *in_elem.kind() {
2316                ty::Int(i) => bx.cx.type_int_from_ty(i),
2317                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
2318                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
2319                    span,
2320                    name,
2321                    in_ty,
2322                    in_elem
2323                }),
2324            },
2325            in_len as u64,
2326        );
2327        let llvm_intrinsic = match name {
2328            sym::simd_bswap => "llvm.bswap",
2329            sym::simd_bitreverse => "llvm.bitreverse",
2330            sym::simd_ctlz => "llvm.ctlz",
2331            sym::simd_ctpop => "llvm.ctpop",
2332            sym::simd_cttz => "llvm.cttz",
2333            _ => unreachable!(),
2334        };
2335        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
2336
2337        return match name {
2338            // byte swap is no-op for i8/u8
2339            sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
2340            sym::simd_ctlz | sym::simd_cttz => {
2341                // for the (int, i1 immediate) pair, the second arg adds `(0, true) => poison`
2342                let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
2343                Ok(bx.call_intrinsic(
2344                    llvm_intrinsic,
2345                    &[vec_ty],
2346                    &[args[0].immediate(), dont_poison_on_zero],
2347                ))
2348            }
2349            sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
2350                // simple unary argument cases
2351                Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
2352            }
2353            _ => unreachable!(),
2354        };
2355    }
2356
2357    if name == sym::simd_arith_offset {
2358        // This also checks that the first operand is a ptr type.
2359        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
2360            span_bug!(span, "must be called with a vector of pointer types as first argument")
2361        });
2362        let layout = bx.layout_of(pointee);
2363        let ptrs = args[0].immediate();
2364        // The second argument must be a ptr-sized integer.
2365        // (We don't care about the signedness, this is wrapping anyway.)
2366        let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
2367        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
2368            span_bug!(
2369                span,
2370                "must be called with a vector of pointer-sized integers as second argument"
2371            );
2372        }
2373        let offsets = args[1].immediate();
2374
2375        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
2376    }
2377
2378    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2379        let lhs = args[0].immediate();
2380        let rhs = args[1].immediate();
2381        let is_add = name == sym::simd_saturating_add;
2382        let (signed, elem_ty) = match *in_elem.kind() {
2383            ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
2384            ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
2385            _ => {
2386                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
2387                    span,
2388                    name,
2389                    expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
2390                    vector_type: args[0].layout.ty
2391                });
2392            }
2393        };
2394        let llvm_intrinsic = format!(
2395            "llvm.{}{}.sat",
2396            if signed { 's' } else { 'u' },
2397            if is_add { "add" } else { "sub" },
2398        );
2399        let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2400
2401        return Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
2402    }
2403
2404    span_bug!(span, "unknown SIMD intrinsic");
2405}