1use rustc_abi::WrappingRange;
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::errors::InvalidMonomorphization;
12use crate::traits::*;
13use crate::{MemFlags, errors, meth, size_of_val};
14
15fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
16 bx: &mut Bx,
17 allow_overlap: bool,
18 volatile: bool,
19 ty: Ty<'tcx>,
20 dst: Bx::Value,
21 src: Bx::Value,
22 count: Bx::Value,
23) {
24 let layout = bx.layout_of(ty);
25 let size = layout.size;
26 let align = layout.align.abi;
27 let size = bx.mul(bx.const_usize(size.bytes()), count);
28 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
29 if allow_overlap {
30 bx.memmove(dst, align, src, align, size, flags);
31 } else {
32 bx.memcpy(dst, align, src, align, size, flags);
33 }
34}
35
36fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
37 bx: &mut Bx,
38 volatile: bool,
39 ty: Ty<'tcx>,
40 dst: Bx::Value,
41 val: Bx::Value,
42 count: Bx::Value,
43) {
44 let layout = bx.layout_of(ty);
45 let size = layout.size;
46 let align = layout.align.abi;
47 let size = bx.mul(bx.const_usize(size.bytes()), count);
48 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
49 bx.memset(dst, val, size, align, flags);
50}
51
52impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
53 pub fn codegen_intrinsic_call(
55 &mut self,
56 bx: &mut Bx,
57 instance: ty::Instance<'tcx>,
58 args: &[OperandRef<'tcx, Bx::Value>],
59 result: PlaceRef<'tcx, Bx::Value>,
60 source_info: SourceInfo,
61 ) -> Result<(), ty::Instance<'tcx>> {
62 let span = source_info.span;
63 let callee_ty = instance.ty(bx.tcx(), bx.typing_env());
64
65 let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
66 span_bug!(span, "expected fn item type, found {}", callee_ty);
67 };
68
69 let sig = callee_ty.fn_sig(bx.tcx());
70 let sig = bx.tcx().normalize_erasing_late_bound_regions(bx.typing_env(), sig);
71 let arg_tys = sig.inputs();
72 let ret_ty = sig.output();
73 let name = bx.tcx().item_name(def_id);
74 let name_str = name.as_str();
75
76 if let sym::typed_swap_nonoverlapping = name {
80 let pointee_ty = fn_args.type_at(0);
81 let pointee_layout = bx.layout_of(pointee_ty);
82 if !bx.is_backend_ref(pointee_layout)
83 || bx.sess().opts.optimize == OptLevel::No
86 || bx.sess().target.arch == "spirv"
91 {
92 let align = pointee_layout.align.abi;
93 let x_place = args[0].val.deref(align);
94 let y_place = args[1].val.deref(align);
95 bx.typed_place_swap(x_place, y_place, pointee_layout);
96 return Ok(());
97 }
98 }
99
100 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
101
102 let ret_llval = |bx: &mut Bx, llval| {
103 if result.layout.ty.is_bool() {
104 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
105 .val
106 .store(bx, result);
107 } else if !result.layout.ty.is_unit() {
108 bx.store_to_place(llval, result.val);
109 }
110 Ok(())
111 };
112
113 let llval = match name {
114 sym::abort => {
115 bx.abort();
116 return Ok(());
117 }
118
119 sym::caller_location => {
120 let location = self.get_caller_location(bx, source_info);
121 location.val.store(bx, result);
122 return Ok(());
123 }
124
125 sym::va_start => bx.va_start(args[0].immediate()),
126 sym::va_end => bx.va_end(args[0].immediate()),
127 sym::size_of_val => {
128 let tp_ty = fn_args.type_at(0);
129 let (_, meta) = args[0].val.pointer_parts();
130 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
131 llsize
132 }
133 sym::min_align_of_val => {
134 let tp_ty = fn_args.type_at(0);
135 let (_, meta) = args[0].val.pointer_parts();
136 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
137 llalign
138 }
139 sym::vtable_size | sym::vtable_align => {
140 let vtable = args[0].immediate();
141 let idx = match name {
142 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
143 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
144 _ => bug!(),
145 };
146 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable, callee_ty);
147 match name {
148 sym::vtable_size => {
150 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
151 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
152 }
153 sym::vtable_align => {
155 bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
156 }
157 _ => {}
158 }
159 value
160 }
161 sym::pref_align_of
162 | sym::needs_drop
163 | sym::type_id
164 | sym::type_name
165 | sym::variant_count => {
166 let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap();
167 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
168 }
169 sym::arith_offset => {
170 let ty = fn_args.type_at(0);
171 let layout = bx.layout_of(ty);
172 let ptr = args[0].immediate();
173 let offset = args[1].immediate();
174 bx.gep(bx.backend_type(layout), ptr, &[offset])
175 }
176 sym::copy => {
177 copy_intrinsic(
178 bx,
179 true,
180 false,
181 fn_args.type_at(0),
182 args[1].immediate(),
183 args[0].immediate(),
184 args[2].immediate(),
185 );
186 return Ok(());
187 }
188 sym::write_bytes => {
189 memset_intrinsic(
190 bx,
191 false,
192 fn_args.type_at(0),
193 args[0].immediate(),
194 args[1].immediate(),
195 args[2].immediate(),
196 );
197 return Ok(());
198 }
199
200 sym::volatile_copy_nonoverlapping_memory => {
201 copy_intrinsic(
202 bx,
203 false,
204 true,
205 fn_args.type_at(0),
206 args[0].immediate(),
207 args[1].immediate(),
208 args[2].immediate(),
209 );
210 return Ok(());
211 }
212 sym::volatile_copy_memory => {
213 copy_intrinsic(
214 bx,
215 true,
216 true,
217 fn_args.type_at(0),
218 args[0].immediate(),
219 args[1].immediate(),
220 args[2].immediate(),
221 );
222 return Ok(());
223 }
224 sym::volatile_set_memory => {
225 memset_intrinsic(
226 bx,
227 true,
228 fn_args.type_at(0),
229 args[0].immediate(),
230 args[1].immediate(),
231 args[2].immediate(),
232 );
233 return Ok(());
234 }
235 sym::volatile_store => {
236 let dst = args[0].deref(bx.cx());
237 args[1].val.volatile_store(bx, dst);
238 return Ok(());
239 }
240 sym::unaligned_volatile_store => {
241 let dst = args[0].deref(bx.cx());
242 args[1].val.unaligned_volatile_store(bx, dst);
243 return Ok(());
244 }
245 sym::disjoint_bitor => {
246 let a = args[0].immediate();
247 let b = args[1].immediate();
248 bx.or_disjoint(a, b)
249 }
250 sym::exact_div => {
251 let ty = arg_tys[0];
252 match int_type_width_signed(ty, bx.tcx()) {
253 Some((_width, signed)) => {
254 if signed {
255 bx.exactsdiv(args[0].immediate(), args[1].immediate())
256 } else {
257 bx.exactudiv(args[0].immediate(), args[1].immediate())
258 }
259 }
260 None => {
261 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
262 span,
263 name,
264 ty,
265 });
266 return Ok(());
267 }
268 }
269 }
270 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
271 match float_type_width(arg_tys[0]) {
272 Some(_width) => match name {
273 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
274 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
275 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
276 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
277 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
278 _ => bug!(),
279 },
280 None => {
281 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
282 span,
283 name,
284 ty: arg_tys[0],
285 });
286 return Ok(());
287 }
288 }
289 }
290 sym::fadd_algebraic
291 | sym::fsub_algebraic
292 | sym::fmul_algebraic
293 | sym::fdiv_algebraic
294 | sym::frem_algebraic => match float_type_width(arg_tys[0]) {
295 Some(_width) => match name {
296 sym::fadd_algebraic => {
297 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
298 }
299 sym::fsub_algebraic => {
300 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
301 }
302 sym::fmul_algebraic => {
303 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
304 }
305 sym::fdiv_algebraic => {
306 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
307 }
308 sym::frem_algebraic => {
309 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
310 }
311 _ => bug!(),
312 },
313 None => {
314 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
315 span,
316 name,
317 ty: arg_tys[0],
318 });
319 return Ok(());
320 }
321 },
322
323 sym::float_to_int_unchecked => {
324 if float_type_width(arg_tys[0]).is_none() {
325 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
326 span,
327 ty: arg_tys[0],
328 });
329 return Ok(());
330 }
331 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
332 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
333 span,
334 ty: ret_ty,
335 });
336 return Ok(());
337 };
338 if signed {
339 bx.fptosi(args[0].immediate(), llret_ty)
340 } else {
341 bx.fptoui(args[0].immediate(), llret_ty)
342 }
343 }
344
345 name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
348 use rustc_middle::ty::AtomicOrdering::*;
349
350 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
351
352 let invalid_monomorphization = |ty| {
353 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
354 span,
355 name,
356 ty,
357 });
358 };
359
360 let parse_const_generic_ordering = |ord: ty::Value<'tcx>| {
361 let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
362 discr.to_atomic_ordering()
363 };
364
365 match name {
367 sym::atomic_load => {
368 let ty = fn_args.type_at(0);
369 let ordering = fn_args.const_at(1).to_value();
370 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
371 invalid_monomorphization(ty);
372 return Ok(());
373 }
374 let layout = bx.layout_of(ty);
375 let source = args[0].immediate();
376 let llval = bx.atomic_load(
377 bx.backend_type(layout),
378 source,
379 parse_const_generic_ordering(ordering),
380 layout.size,
381 );
382
383 return ret_llval(bx, llval);
384 }
385
386 _ => {}
388 }
389
390 let Some((instruction, ordering)) = atomic.split_once('_') else {
391 bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
392 };
393
394 let parse_ordering = |bx: &Bx, s| match s {
395 "relaxed" => Relaxed,
396 "acquire" => Acquire,
397 "release" => Release,
398 "acqrel" => AcqRel,
399 "seqcst" => SeqCst,
400 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
401 };
402
403 match instruction {
404 "cxchg" | "cxchgweak" => {
405 let Some((success, failure)) = ordering.split_once('_') else {
406 bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange);
407 };
408 let ty = fn_args.type_at(0);
409 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
410 let weak = instruction == "cxchgweak";
411 let dst = args[0].immediate();
412 let cmp = args[1].immediate();
413 let src = args[2].immediate();
414 let (val, success) = bx.atomic_cmpxchg(
415 dst,
416 cmp,
417 src,
418 parse_ordering(bx, success),
419 parse_ordering(bx, failure),
420 weak,
421 );
422 let val = bx.from_immediate(val);
423 let success = bx.from_immediate(success);
424
425 let dest = result.project_field(bx, 0);
426 bx.store_to_place(val, dest.val);
427 let dest = result.project_field(bx, 1);
428 bx.store_to_place(success, dest.val);
429 } else {
430 invalid_monomorphization(ty);
431 }
432 return Ok(());
433 }
434
435 "store" => {
436 let ty = fn_args.type_at(0);
437 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
438 let size = bx.layout_of(ty).size;
439 let val = args[1].immediate();
440 let ptr = args[0].immediate();
441 bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
442 } else {
443 invalid_monomorphization(ty);
444 }
445 return Ok(());
446 }
447
448 "fence" => {
449 bx.atomic_fence(
450 parse_ordering(bx, ordering),
451 SynchronizationScope::CrossThread,
452 );
453 return Ok(());
454 }
455
456 "singlethreadfence" => {
457 bx.atomic_fence(
458 parse_ordering(bx, ordering),
459 SynchronizationScope::SingleThread,
460 );
461 return Ok(());
462 }
463
464 "max" | "min" => {
466 let atom_op = if instruction == "max" {
467 AtomicRmwBinOp::AtomicMax
468 } else {
469 AtomicRmwBinOp::AtomicMin
470 };
471
472 let ty = fn_args.type_at(0);
473 if matches!(ty.kind(), ty::Int(_)) {
474 let ptr = args[0].immediate();
475 let val = args[1].immediate();
476 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
477 } else {
478 invalid_monomorphization(ty);
479 return Ok(());
480 }
481 }
482 "umax" | "umin" => {
483 let atom_op = if instruction == "umax" {
484 AtomicRmwBinOp::AtomicUMax
485 } else {
486 AtomicRmwBinOp::AtomicUMin
487 };
488
489 let ty = fn_args.type_at(0);
490 if matches!(ty.kind(), ty::Uint(_)) {
491 let ptr = args[0].immediate();
492 let val = args[1].immediate();
493 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
494 } else {
495 invalid_monomorphization(ty);
496 return Ok(());
497 }
498 }
499 op => {
500 let atom_op = match op {
501 "xchg" => AtomicRmwBinOp::AtomicXchg,
502 "xadd" => AtomicRmwBinOp::AtomicAdd,
503 "xsub" => AtomicRmwBinOp::AtomicSub,
504 "and" => AtomicRmwBinOp::AtomicAnd,
505 "nand" => AtomicRmwBinOp::AtomicNand,
506 "or" => AtomicRmwBinOp::AtomicOr,
507 "xor" => AtomicRmwBinOp::AtomicXor,
508 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOperation),
509 };
510
511 let ty = fn_args.type_at(0);
512 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
513 let ptr = args[0].immediate();
514 let val = args[1].immediate();
515 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
516 } else {
517 invalid_monomorphization(ty);
518 return Ok(());
519 }
520 }
521 }
522 }
523
524 sym::nontemporal_store => {
525 let dst = args[0].deref(bx.cx());
526 args[1].val.nontemporal_store(bx, dst);
527 return Ok(());
528 }
529
530 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
531 let ty = fn_args.type_at(0);
532 let pointee_size = bx.layout_of(ty).size;
533
534 let a = args[0].immediate();
535 let b = args[1].immediate();
536 let a = bx.ptrtoint(a, bx.type_isize());
537 let b = bx.ptrtoint(b, bx.type_isize());
538 let pointee_size = bx.const_usize(pointee_size.bytes());
539 if name == sym::ptr_offset_from {
540 let d = bx.sub(a, b);
544 bx.exactsdiv(d, pointee_size)
546 } else {
547 let d = bx.unchecked_usub(a, b);
550 bx.exactudiv(d, pointee_size)
551 }
552 }
553
554 sym::cold_path => {
555 return Ok(());
557 }
558
559 _ => {
560 return bx.codegen_intrinsic_call(instance, args, result, span);
562 }
563 };
564
565 ret_llval(bx, llval)
566 }
567}
568
569fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
574 match ty.kind() {
575 ty::Int(t) => {
576 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
577 }
578 ty::Uint(t) => {
579 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
580 }
581 _ => None,
582 }
583}
584
585fn float_type_width(ty: Ty<'_>) -> Option<u64> {
588 match ty.kind() {
589 ty::Float(t) => Some(t.bit_width()),
590 _ => None,
591 }
592}