1use rustc_abi::WrappingRange;
2use rustc_middle::bug;
3use rustc_middle::mir::SourceInfo;
4use rustc_middle::ty::{self, Ty, TyCtxt};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7
8use super::FunctionCx;
9use super::operand::OperandRef;
10use super::place::PlaceRef;
11use crate::errors::InvalidMonomorphization;
12use crate::traits::*;
13use crate::{MemFlags, errors, meth, size_of_val};
14
15fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
16 bx: &mut Bx,
17 allow_overlap: bool,
18 volatile: bool,
19 ty: Ty<'tcx>,
20 dst: Bx::Value,
21 src: Bx::Value,
22 count: Bx::Value,
23) {
24 let layout = bx.layout_of(ty);
25 let size = layout.size;
26 let align = layout.align.abi;
27 let size = bx.mul(bx.const_usize(size.bytes()), count);
28 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
29 if allow_overlap {
30 bx.memmove(dst, align, src, align, size, flags);
31 } else {
32 bx.memcpy(dst, align, src, align, size, flags);
33 }
34}
35
36fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
37 bx: &mut Bx,
38 volatile: bool,
39 ty: Ty<'tcx>,
40 dst: Bx::Value,
41 val: Bx::Value,
42 count: Bx::Value,
43) {
44 let layout = bx.layout_of(ty);
45 let size = layout.size;
46 let align = layout.align.abi;
47 let size = bx.mul(bx.const_usize(size.bytes()), count);
48 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
49 bx.memset(dst, val, size, align, flags);
50}
51
52impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
53 pub fn codegen_intrinsic_call(
55 &mut self,
56 bx: &mut Bx,
57 instance: ty::Instance<'tcx>,
58 args: &[OperandRef<'tcx, Bx::Value>],
59 result: PlaceRef<'tcx, Bx::Value>,
60 source_info: SourceInfo,
61 ) -> Result<(), ty::Instance<'tcx>> {
62 let span = source_info.span;
63
64 let name = bx.tcx().item_name(instance.def_id());
65 let name_str = name.as_str();
66 let fn_args = instance.args;
67
68 if let sym::typed_swap_nonoverlapping = name {
72 let pointee_ty = fn_args.type_at(0);
73 let pointee_layout = bx.layout_of(pointee_ty);
74 if !bx.is_backend_ref(pointee_layout)
75 || bx.sess().opts.optimize == OptLevel::No
78 || bx.sess().target.arch == "spirv"
83 {
84 let align = pointee_layout.align.abi;
85 let x_place = args[0].val.deref(align);
86 let y_place = args[1].val.deref(align);
87 bx.typed_place_swap(x_place, y_place, pointee_layout);
88 return Ok(());
89 }
90 }
91
92 let ret_llval = |bx: &mut Bx, llval| {
93 if result.layout.ty.is_bool() {
94 let val = bx.from_immediate(llval);
95 bx.store_to_place(val, result.val);
96 } else if !result.layout.ty.is_unit() {
97 bx.store_to_place(llval, result.val);
98 }
99 Ok(())
100 };
101
102 let llval = match name {
103 sym::abort => {
104 bx.abort();
105 return Ok(());
106 }
107
108 sym::caller_location => {
109 let location = self.get_caller_location(bx, source_info);
110 location.val.store(bx, result);
111 return Ok(());
112 }
113
114 sym::va_start => bx.va_start(args[0].immediate()),
115 sym::va_end => bx.va_end(args[0].immediate()),
116 sym::size_of_val => {
117 let tp_ty = fn_args.type_at(0);
118 let (_, meta) = args[0].val.pointer_parts();
119 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
120 llsize
121 }
122 sym::min_align_of_val => {
123 let tp_ty = fn_args.type_at(0);
124 let (_, meta) = args[0].val.pointer_parts();
125 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
126 llalign
127 }
128 sym::vtable_size | sym::vtable_align => {
129 let vtable = args[0].immediate();
130 let idx = match name {
131 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
132 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
133 _ => bug!(),
134 };
135 let value = meth::VirtualIndex::from_index(idx).get_usize(
136 bx,
137 vtable,
138 instance.ty(bx.tcx(), bx.typing_env()),
139 );
140 match name {
141 sym::vtable_size => {
143 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
144 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
145 }
146 sym::vtable_align => {
148 bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
149 }
150 _ => {}
151 }
152 value
153 }
154 sym::pref_align_of
155 | sym::needs_drop
156 | sym::type_id
157 | sym::type_name
158 | sym::variant_count => {
159 let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap();
160 OperandRef::from_const(bx, value, result.layout.ty).immediate_or_packed_pair(bx)
161 }
162 sym::arith_offset => {
163 let ty = fn_args.type_at(0);
164 let layout = bx.layout_of(ty);
165 let ptr = args[0].immediate();
166 let offset = args[1].immediate();
167 bx.gep(bx.backend_type(layout), ptr, &[offset])
168 }
169 sym::copy => {
170 copy_intrinsic(
171 bx,
172 true,
173 false,
174 fn_args.type_at(0),
175 args[1].immediate(),
176 args[0].immediate(),
177 args[2].immediate(),
178 );
179 return Ok(());
180 }
181 sym::write_bytes => {
182 memset_intrinsic(
183 bx,
184 false,
185 fn_args.type_at(0),
186 args[0].immediate(),
187 args[1].immediate(),
188 args[2].immediate(),
189 );
190 return Ok(());
191 }
192
193 sym::volatile_copy_nonoverlapping_memory => {
194 copy_intrinsic(
195 bx,
196 false,
197 true,
198 fn_args.type_at(0),
199 args[0].immediate(),
200 args[1].immediate(),
201 args[2].immediate(),
202 );
203 return Ok(());
204 }
205 sym::volatile_copy_memory => {
206 copy_intrinsic(
207 bx,
208 true,
209 true,
210 fn_args.type_at(0),
211 args[0].immediate(),
212 args[1].immediate(),
213 args[2].immediate(),
214 );
215 return Ok(());
216 }
217 sym::volatile_set_memory => {
218 memset_intrinsic(
219 bx,
220 true,
221 fn_args.type_at(0),
222 args[0].immediate(),
223 args[1].immediate(),
224 args[2].immediate(),
225 );
226 return Ok(());
227 }
228 sym::volatile_store => {
229 let dst = args[0].deref(bx.cx());
230 args[1].val.volatile_store(bx, dst);
231 return Ok(());
232 }
233 sym::unaligned_volatile_store => {
234 let dst = args[0].deref(bx.cx());
235 args[1].val.unaligned_volatile_store(bx, dst);
236 return Ok(());
237 }
238 sym::disjoint_bitor => {
239 let a = args[0].immediate();
240 let b = args[1].immediate();
241 bx.or_disjoint(a, b)
242 }
243 sym::exact_div => {
244 let ty = args[0].layout.ty;
245 match int_type_width_signed(ty, bx.tcx()) {
246 Some((_width, signed)) => {
247 if signed {
248 bx.exactsdiv(args[0].immediate(), args[1].immediate())
249 } else {
250 bx.exactudiv(args[0].immediate(), args[1].immediate())
251 }
252 }
253 None => {
254 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
255 span,
256 name,
257 ty,
258 });
259 return Ok(());
260 }
261 }
262 }
263 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
264 match float_type_width(args[0].layout.ty) {
265 Some(_width) => match name {
266 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
267 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
268 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
269 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
270 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
271 _ => bug!(),
272 },
273 None => {
274 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
275 span,
276 name,
277 ty: args[0].layout.ty,
278 });
279 return Ok(());
280 }
281 }
282 }
283 sym::fadd_algebraic
284 | sym::fsub_algebraic
285 | sym::fmul_algebraic
286 | sym::fdiv_algebraic
287 | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
288 Some(_width) => match name {
289 sym::fadd_algebraic => {
290 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
291 }
292 sym::fsub_algebraic => {
293 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
294 }
295 sym::fmul_algebraic => {
296 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
297 }
298 sym::fdiv_algebraic => {
299 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
300 }
301 sym::frem_algebraic => {
302 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
303 }
304 _ => bug!(),
305 },
306 None => {
307 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
308 span,
309 name,
310 ty: args[0].layout.ty,
311 });
312 return Ok(());
313 }
314 },
315
316 sym::float_to_int_unchecked => {
317 if float_type_width(args[0].layout.ty).is_none() {
318 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
319 span,
320 ty: args[0].layout.ty,
321 });
322 return Ok(());
323 }
324 let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
325 else {
326 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
327 span,
328 ty: result.layout.ty,
329 });
330 return Ok(());
331 };
332 if signed {
333 bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
334 } else {
335 bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
336 }
337 }
338
339 name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
342 use rustc_middle::ty::AtomicOrdering::*;
343
344 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
345
346 let invalid_monomorphization = |ty| {
347 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
348 span,
349 name,
350 ty,
351 });
352 };
353
354 let parse_const_generic_ordering = |ord: ty::Value<'tcx>| {
355 let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
356 discr.to_atomic_ordering()
357 };
358
359 match name {
361 sym::atomic_load => {
362 let ty = fn_args.type_at(0);
363 let ordering = fn_args.const_at(1).to_value();
364 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
365 invalid_monomorphization(ty);
366 return Ok(());
367 }
368 let layout = bx.layout_of(ty);
369 let source = args[0].immediate();
370 let llval = bx.atomic_load(
371 bx.backend_type(layout),
372 source,
373 parse_const_generic_ordering(ordering),
374 layout.size,
375 );
376
377 return ret_llval(bx, llval);
378 }
379
380 _ => {}
382 }
383
384 let Some((instruction, ordering)) = atomic.split_once('_') else {
385 bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
386 };
387
388 let parse_ordering = |bx: &Bx, s| match s {
389 "relaxed" => Relaxed,
390 "acquire" => Acquire,
391 "release" => Release,
392 "acqrel" => AcqRel,
393 "seqcst" => SeqCst,
394 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
395 };
396
397 match instruction {
398 "cxchg" | "cxchgweak" => {
399 let Some((success, failure)) = ordering.split_once('_') else {
400 bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange);
401 };
402 let ty = fn_args.type_at(0);
403 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
404 let weak = instruction == "cxchgweak";
405 let dst = args[0].immediate();
406 let cmp = args[1].immediate();
407 let src = args[2].immediate();
408 let (val, success) = bx.atomic_cmpxchg(
409 dst,
410 cmp,
411 src,
412 parse_ordering(bx, success),
413 parse_ordering(bx, failure),
414 weak,
415 );
416 let val = bx.from_immediate(val);
417 let success = bx.from_immediate(success);
418
419 let dest = result.project_field(bx, 0);
420 bx.store_to_place(val, dest.val);
421 let dest = result.project_field(bx, 1);
422 bx.store_to_place(success, dest.val);
423 } else {
424 invalid_monomorphization(ty);
425 }
426 return Ok(());
427 }
428
429 "store" => {
430 let ty = fn_args.type_at(0);
431 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
432 let size = bx.layout_of(ty).size;
433 let val = args[1].immediate();
434 let ptr = args[0].immediate();
435 bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
436 } else {
437 invalid_monomorphization(ty);
438 }
439 return Ok(());
440 }
441
442 "fence" => {
443 bx.atomic_fence(
444 parse_ordering(bx, ordering),
445 SynchronizationScope::CrossThread,
446 );
447 return Ok(());
448 }
449
450 "singlethreadfence" => {
451 bx.atomic_fence(
452 parse_ordering(bx, ordering),
453 SynchronizationScope::SingleThread,
454 );
455 return Ok(());
456 }
457
458 "max" | "min" => {
460 let atom_op = if instruction == "max" {
461 AtomicRmwBinOp::AtomicMax
462 } else {
463 AtomicRmwBinOp::AtomicMin
464 };
465
466 let ty = fn_args.type_at(0);
467 if matches!(ty.kind(), ty::Int(_)) {
468 let ptr = args[0].immediate();
469 let val = args[1].immediate();
470 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
471 } else {
472 invalid_monomorphization(ty);
473 return Ok(());
474 }
475 }
476 "umax" | "umin" => {
477 let atom_op = if instruction == "umax" {
478 AtomicRmwBinOp::AtomicUMax
479 } else {
480 AtomicRmwBinOp::AtomicUMin
481 };
482
483 let ty = fn_args.type_at(0);
484 if matches!(ty.kind(), ty::Uint(_)) {
485 let ptr = args[0].immediate();
486 let val = args[1].immediate();
487 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
488 } else {
489 invalid_monomorphization(ty);
490 return Ok(());
491 }
492 }
493 op => {
494 let atom_op = match op {
495 "xchg" => AtomicRmwBinOp::AtomicXchg,
496 "xadd" => AtomicRmwBinOp::AtomicAdd,
497 "xsub" => AtomicRmwBinOp::AtomicSub,
498 "and" => AtomicRmwBinOp::AtomicAnd,
499 "nand" => AtomicRmwBinOp::AtomicNand,
500 "or" => AtomicRmwBinOp::AtomicOr,
501 "xor" => AtomicRmwBinOp::AtomicXor,
502 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOperation),
503 };
504
505 let ty = fn_args.type_at(0);
506 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
507 let ptr = args[0].immediate();
508 let val = args[1].immediate();
509 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
510 } else {
511 invalid_monomorphization(ty);
512 return Ok(());
513 }
514 }
515 }
516 }
517
518 sym::nontemporal_store => {
519 let dst = args[0].deref(bx.cx());
520 args[1].val.nontemporal_store(bx, dst);
521 return Ok(());
522 }
523
524 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
525 let ty = fn_args.type_at(0);
526 let pointee_size = bx.layout_of(ty).size;
527
528 let a = args[0].immediate();
529 let b = args[1].immediate();
530 let a = bx.ptrtoint(a, bx.type_isize());
531 let b = bx.ptrtoint(b, bx.type_isize());
532 let pointee_size = bx.const_usize(pointee_size.bytes());
533 if name == sym::ptr_offset_from {
534 let d = bx.sub(a, b);
538 bx.exactsdiv(d, pointee_size)
540 } else {
541 let d = bx.unchecked_usub(a, b);
544 bx.exactudiv(d, pointee_size)
545 }
546 }
547
548 sym::cold_path => {
549 return Ok(());
551 }
552
553 _ => {
554 return bx.codegen_intrinsic_call(instance, args, result, span);
556 }
557 };
558
559 ret_llval(bx, llval)
560 }
561}
562
563fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
568 match ty.kind() {
569 ty::Int(t) => {
570 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
571 }
572 ty::Uint(t) => {
573 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
574 }
575 _ => None,
576 }
577}
578
579fn float_type_width(ty: Ty<'_>) -> Option<u64> {
582 match ty.kind() {
583 ty::Float(t) => Some(t.bit_width()),
584 _ => None,
585 }
586}