rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", doc(rust_logo))]
4#![cfg_attr(feature = "nightly", feature(assert_matches))]
5#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
6#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
7#![cfg_attr(feature = "nightly", feature(step_trait))]
8// tidy-alphabetical-end
9
10/*! ABI handling for rustc
11
12## What is an "ABI"?
13
14Literally, "application binary interface", which means it is everything about how code interacts,
15at the machine level, with other code. This means it technically covers all of the following:
16- object binary format for e.g. relocations or offset tables
17- in-memory layout of types
18- procedure calling conventions
19
20When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
21To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
22Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
23You will encounter all of them and more if you study target-specific codegen enough!
24Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
25either or both of
26- `repr(Rust)` types have a mostly-unspecified layout
27- `extern "Rust" fn(A) -> R` has an unspecified calling convention
28
29## Crate Goal
30
31ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
32It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
33Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
34It should contain traits and types that other crates then use in their implementation.
35For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
36but `rustc_abi` contains the types for calculating layout and describing register-passing.
37This makes it easier to describe things in the same way across targets, codegen backends, and
38even other Rust compilers, such as rust-analyzer!
39
40*/
41
42use std::fmt;
43#[cfg(feature = "nightly")]
44use std::iter::Step;
45use std::num::{NonZeroUsize, ParseIntError};
46use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
47use std::str::FromStr;
48
49use bitflags::bitflags;
50#[cfg(feature = "nightly")]
51use rustc_data_structures::stable_hasher::StableOrd;
52use rustc_hashes::Hash64;
53use rustc_index::{Idx, IndexSlice, IndexVec};
54#[cfg(feature = "nightly")]
55use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
56
57mod callconv;
58mod canon_abi;
59mod extern_abi;
60mod layout;
61#[cfg(test)]
62mod tests;
63
64pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
65pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
66pub use extern_abi::{ExternAbi, all_names};
67#[cfg(feature = "nightly")]
68pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
69pub use layout::{LayoutCalculator, LayoutCalculatorError};
70
71/// Requirements for a `StableHashingContext` to be used in this crate.
72/// This is a hack to allow using the `HashStable_Generic` derive macro
73/// instead of implementing everything in `rustc_middle`.
74#[cfg(feature = "nightly")]
75pub trait HashStableContext {}
76
77#[derive(Clone, Copy, PartialEq, Eq, Default)]
78#[cfg_attr(
79    feature = "nightly",
80    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
81)]
82pub struct ReprFlags(u8);
83
84bitflags! {
85    impl ReprFlags: u8 {
86        const IS_C               = 1 << 0;
87        const IS_SIMD            = 1 << 1;
88        const IS_TRANSPARENT     = 1 << 2;
89        // Internal only for now. If true, don't reorder fields.
90        // On its own it does not prevent ABI optimizations.
91        const IS_LINEAR          = 1 << 3;
92        // If true, the type's crate has opted into layout randomization.
93        // Other flags can still inhibit reordering and thus randomization.
94        // The seed stored in `ReprOptions.field_shuffle_seed`.
95        const RANDOMIZE_LAYOUT   = 1 << 4;
96        // Any of these flags being set prevent field reordering optimisation.
97        const FIELD_ORDER_UNOPTIMIZABLE   = ReprFlags::IS_C.bits()
98                                 | ReprFlags::IS_SIMD.bits()
99                                 | ReprFlags::IS_LINEAR.bits();
100        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
101    }
102}
103
104// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
105// `rustc_data_structures` to make it build on stable.
106impl std::fmt::Debug for ReprFlags {
107    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
108        bitflags::parser::to_writer(self, f)
109    }
110}
111
112#[derive(Copy, Clone, Debug, Eq, PartialEq)]
113#[cfg_attr(
114    feature = "nightly",
115    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
116)]
117pub enum IntegerType {
118    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
119    /// `Pointer(true)` means `isize`.
120    Pointer(bool),
121    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
122    /// `Fixed(I8, false)` means `u8`.
123    Fixed(Integer, bool),
124}
125
126impl IntegerType {
127    pub fn is_signed(&self) -> bool {
128        match self {
129            IntegerType::Pointer(b) => *b,
130            IntegerType::Fixed(_, b) => *b,
131        }
132    }
133}
134
135/// Represents the repr options provided by the user.
136#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
137#[cfg_attr(
138    feature = "nightly",
139    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
140)]
141pub struct ReprOptions {
142    pub int: Option<IntegerType>,
143    pub align: Option<Align>,
144    pub pack: Option<Align>,
145    pub flags: ReprFlags,
146    /// The seed to be used for randomizing a type's layout
147    ///
148    /// Note: This could technically be a `u128` which would
149    /// be the "most accurate" hash as it'd encompass the item and crate
150    /// hash without loss, but it does pay the price of being larger.
151    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
152    /// purposes (primarily `-Z randomize-layout`)
153    pub field_shuffle_seed: Hash64,
154}
155
156impl ReprOptions {
157    #[inline]
158    pub fn simd(&self) -> bool {
159        self.flags.contains(ReprFlags::IS_SIMD)
160    }
161
162    #[inline]
163    pub fn c(&self) -> bool {
164        self.flags.contains(ReprFlags::IS_C)
165    }
166
167    #[inline]
168    pub fn packed(&self) -> bool {
169        self.pack.is_some()
170    }
171
172    #[inline]
173    pub fn transparent(&self) -> bool {
174        self.flags.contains(ReprFlags::IS_TRANSPARENT)
175    }
176
177    #[inline]
178    pub fn linear(&self) -> bool {
179        self.flags.contains(ReprFlags::IS_LINEAR)
180    }
181
182    /// Returns the discriminant type, given these `repr` options.
183    /// This must only be called on enums!
184    pub fn discr_type(&self) -> IntegerType {
185        self.int.unwrap_or(IntegerType::Pointer(true))
186    }
187
188    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
189    /// layout" optimizations, such as representing `Foo<&T>` as a
190    /// single pointer.
191    pub fn inhibit_enum_layout_opt(&self) -> bool {
192        self.c() || self.int.is_some()
193    }
194
195    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
196        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
197    }
198
199    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
200    /// e.g. `repr(C)` or `repr(<int>)`.
201    pub fn inhibit_struct_field_reordering(&self) -> bool {
202        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
203    }
204
205    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
206    /// was enabled for its declaration crate.
207    pub fn can_randomize_type_layout(&self) -> bool {
208        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
209    }
210
211    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
212    pub fn inhibits_union_abi_opt(&self) -> bool {
213        self.c()
214    }
215}
216
217/// The maximum supported number of lanes in a SIMD vector.
218///
219/// This value is selected based on backend support:
220/// * LLVM does not appear to have a vector width limit.
221/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
222pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
223
224/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
225/// for a target, which contains everything needed to compute layouts.
226#[derive(Debug, PartialEq, Eq)]
227pub struct TargetDataLayout {
228    pub endian: Endian,
229    pub i1_align: AbiAndPrefAlign,
230    pub i8_align: AbiAndPrefAlign,
231    pub i16_align: AbiAndPrefAlign,
232    pub i32_align: AbiAndPrefAlign,
233    pub i64_align: AbiAndPrefAlign,
234    pub i128_align: AbiAndPrefAlign,
235    pub f16_align: AbiAndPrefAlign,
236    pub f32_align: AbiAndPrefAlign,
237    pub f64_align: AbiAndPrefAlign,
238    pub f128_align: AbiAndPrefAlign,
239    pub pointer_size: Size,
240    pub pointer_align: AbiAndPrefAlign,
241    pub aggregate_align: AbiAndPrefAlign,
242
243    /// Alignments for vector types.
244    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
245
246    pub instruction_address_space: AddressSpace,
247
248    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
249    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
250    /// so the only valid spec for LLVM is c_int::BITS or 8
251    pub c_enum_min_size: Integer,
252}
253
254impl Default for TargetDataLayout {
255    /// Creates an instance of `TargetDataLayout`.
256    fn default() -> TargetDataLayout {
257        let align = |bits| Align::from_bits(bits).unwrap();
258        TargetDataLayout {
259            endian: Endian::Big,
260            i1_align: AbiAndPrefAlign::new(align(8)),
261            i8_align: AbiAndPrefAlign::new(align(8)),
262            i16_align: AbiAndPrefAlign::new(align(16)),
263            i32_align: AbiAndPrefAlign::new(align(32)),
264            i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
265            i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
266            f16_align: AbiAndPrefAlign::new(align(16)),
267            f32_align: AbiAndPrefAlign::new(align(32)),
268            f64_align: AbiAndPrefAlign::new(align(64)),
269            f128_align: AbiAndPrefAlign::new(align(128)),
270            pointer_size: Size::from_bits(64),
271            pointer_align: AbiAndPrefAlign::new(align(64)),
272            aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
273            vector_align: vec![
274                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
275                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
276            ],
277            instruction_address_space: AddressSpace::DATA,
278            c_enum_min_size: Integer::I32,
279        }
280    }
281}
282
283pub enum TargetDataLayoutErrors<'a> {
284    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
285    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
286    MissingAlignment { cause: &'a str },
287    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
288    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
289    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
290    InvalidBitsSize { err: String },
291}
292
293impl TargetDataLayout {
294    /// Parse data layout from an
295    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
296    ///
297    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
298    /// determined from llvm string.
299    pub fn parse_from_llvm_datalayout_string<'a>(
300        input: &'a str,
301    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
302        // Parse an address space index from a string.
303        let parse_address_space = |s: &'a str, cause: &'a str| {
304            s.parse::<u32>().map(AddressSpace).map_err(|err| {
305                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
306            })
307        };
308
309        // Parse a bit count from a string.
310        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
311            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
312                kind,
313                bit: s,
314                cause,
315                err,
316            })
317        };
318
319        // Parse a size string.
320        let parse_size =
321            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
322
323        // Parse an alignment string.
324        let parse_align = |s: &[&'a str], cause: &'a str| {
325            if s.is_empty() {
326                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
327            }
328            let align_from_bits = |bits| {
329                Align::from_bits(bits)
330                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
331            };
332            let abi = parse_bits(s[0], "alignment", cause)?;
333            let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
334            Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
335        };
336
337        let mut dl = TargetDataLayout::default();
338        let mut i128_align_src = 64;
339        for spec in input.split('-') {
340            let spec_parts = spec.split(':').collect::<Vec<_>>();
341
342            match &*spec_parts {
343                ["e"] => dl.endian = Endian::Little,
344                ["E"] => dl.endian = Endian::Big,
345                [p] if p.starts_with('P') => {
346                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
347                }
348                ["a", a @ ..] => dl.aggregate_align = parse_align(a, "a")?,
349                ["f16", a @ ..] => dl.f16_align = parse_align(a, "f16")?,
350                ["f32", a @ ..] => dl.f32_align = parse_align(a, "f32")?,
351                ["f64", a @ ..] => dl.f64_align = parse_align(a, "f64")?,
352                ["f128", a @ ..] => dl.f128_align = parse_align(a, "f128")?,
353                // FIXME(erikdesjardins): we should be parsing nonzero address spaces
354                // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
355                // with e.g. `fn pointer_size_in(AddressSpace)`
356                [p @ "p", s, a @ ..] | [p @ "p0", s, a @ ..] => {
357                    dl.pointer_size = parse_size(s, p)?;
358                    dl.pointer_align = parse_align(a, p)?;
359                }
360                [s, a @ ..] if s.starts_with('i') => {
361                    let Ok(bits) = s[1..].parse::<u64>() else {
362                        parse_size(&s[1..], "i")?; // For the user error.
363                        continue;
364                    };
365                    let a = parse_align(a, s)?;
366                    match bits {
367                        1 => dl.i1_align = a,
368                        8 => dl.i8_align = a,
369                        16 => dl.i16_align = a,
370                        32 => dl.i32_align = a,
371                        64 => dl.i64_align = a,
372                        _ => {}
373                    }
374                    if bits >= i128_align_src && bits <= 128 {
375                        // Default alignment for i128 is decided by taking the alignment of
376                        // largest-sized i{64..=128}.
377                        i128_align_src = bits;
378                        dl.i128_align = a;
379                    }
380                }
381                [s, a @ ..] if s.starts_with('v') => {
382                    let v_size = parse_size(&s[1..], "v")?;
383                    let a = parse_align(a, s)?;
384                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
385                        v.1 = a;
386                        continue;
387                    }
388                    // No existing entry, add a new one.
389                    dl.vector_align.push((v_size, a));
390                }
391                _ => {} // Ignore everything else.
392            }
393        }
394        Ok(dl)
395    }
396
397    /// Returns **exclusive** upper bound on object size in bytes.
398    ///
399    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
400    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
401    /// index every address within an object along with one byte past the end, along with allowing
402    /// `isize` to store the difference between any two pointers into an object.
403    ///
404    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
405    /// so we adopt such a more-constrained size bound due to its technical limitations.
406    #[inline]
407    pub fn obj_size_bound(&self) -> u64 {
408        match self.pointer_size.bits() {
409            16 => 1 << 15,
410            32 => 1 << 31,
411            64 => 1 << 61,
412            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
413        }
414    }
415
416    #[inline]
417    pub fn ptr_sized_integer(&self) -> Integer {
418        use Integer::*;
419        match self.pointer_size.bits() {
420            16 => I16,
421            32 => I32,
422            64 => I64,
423            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
424        }
425    }
426
427    /// psABI-mandated alignment for a vector type, if any
428    #[inline]
429    fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAndPrefAlign> {
430        self.vector_align
431            .iter()
432            .find(|(size, _align)| *size == vec_size)
433            .map(|(_size, align)| *align)
434    }
435
436    /// an alignment resembling the one LLVM would pick for a vector
437    #[inline]
438    pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
439        self.cabi_vector_align(vec_size).unwrap_or(AbiAndPrefAlign::new(
440            Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(),
441        ))
442    }
443}
444
445pub trait HasDataLayout {
446    fn data_layout(&self) -> &TargetDataLayout;
447}
448
449impl HasDataLayout for TargetDataLayout {
450    #[inline]
451    fn data_layout(&self) -> &TargetDataLayout {
452        self
453    }
454}
455
456// used by rust-analyzer
457impl HasDataLayout for &TargetDataLayout {
458    #[inline]
459    fn data_layout(&self) -> &TargetDataLayout {
460        (**self).data_layout()
461    }
462}
463
464/// Endianness of the target, which must match cfg(target-endian).
465#[derive(Copy, Clone, PartialEq, Eq)]
466pub enum Endian {
467    Little,
468    Big,
469}
470
471impl Endian {
472    pub fn as_str(&self) -> &'static str {
473        match self {
474            Self::Little => "little",
475            Self::Big => "big",
476        }
477    }
478}
479
480impl fmt::Debug for Endian {
481    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
482        f.write_str(self.as_str())
483    }
484}
485
486impl FromStr for Endian {
487    type Err = String;
488
489    fn from_str(s: &str) -> Result<Self, Self::Err> {
490        match s {
491            "little" => Ok(Self::Little),
492            "big" => Ok(Self::Big),
493            _ => Err(format!(r#"unknown endian: "{s}""#)),
494        }
495    }
496}
497
498/// Size of a type in bytes.
499#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
500#[cfg_attr(
501    feature = "nightly",
502    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
503)]
504pub struct Size {
505    raw: u64,
506}
507
508#[cfg(feature = "nightly")]
509impl StableOrd for Size {
510    const CAN_USE_UNSTABLE_SORT: bool = true;
511
512    // `Ord` is implemented as just comparing numerical values and numerical values
513    // are not changed by (de-)serialization.
514    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
515}
516
517// This is debug-printed a lot in larger structs, don't waste too much space there
518impl fmt::Debug for Size {
519    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
520        write!(f, "Size({} bytes)", self.bytes())
521    }
522}
523
524impl Size {
525    pub const ZERO: Size = Size { raw: 0 };
526
527    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
528    /// not a multiple of 8.
529    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
530        let bits = bits.try_into().ok().unwrap();
531        // Avoid potential overflow from `bits + 7`.
532        Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
533    }
534
535    #[inline]
536    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
537        let bytes: u64 = bytes.try_into().ok().unwrap();
538        Size { raw: bytes }
539    }
540
541    #[inline]
542    pub fn bytes(self) -> u64 {
543        self.raw
544    }
545
546    #[inline]
547    pub fn bytes_usize(self) -> usize {
548        self.bytes().try_into().unwrap()
549    }
550
551    #[inline]
552    pub fn bits(self) -> u64 {
553        #[cold]
554        fn overflow(bytes: u64) -> ! {
555            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
556        }
557
558        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
559    }
560
561    #[inline]
562    pub fn bits_usize(self) -> usize {
563        self.bits().try_into().unwrap()
564    }
565
566    #[inline]
567    pub fn align_to(self, align: Align) -> Size {
568        let mask = align.bytes() - 1;
569        Size::from_bytes((self.bytes() + mask) & !mask)
570    }
571
572    #[inline]
573    pub fn is_aligned(self, align: Align) -> bool {
574        let mask = align.bytes() - 1;
575        self.bytes() & mask == 0
576    }
577
578    #[inline]
579    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
580        let dl = cx.data_layout();
581
582        let bytes = self.bytes().checked_add(offset.bytes())?;
583
584        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
585    }
586
587    #[inline]
588    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
589        let dl = cx.data_layout();
590
591        let bytes = self.bytes().checked_mul(count)?;
592        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
593    }
594
595    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
596    /// (i.e., if it is negative, fill with 1's on the left).
597    #[inline]
598    pub fn sign_extend(self, value: u128) -> i128 {
599        let size = self.bits();
600        if size == 0 {
601            // Truncated until nothing is left.
602            return 0;
603        }
604        // Sign-extend it.
605        let shift = 128 - size;
606        // Shift the unsigned value to the left, then shift back to the right as signed
607        // (essentially fills with sign bit on the left).
608        ((value << shift) as i128) >> shift
609    }
610
611    /// Truncates `value` to `self` bits.
612    #[inline]
613    pub fn truncate(self, value: u128) -> u128 {
614        let size = self.bits();
615        if size == 0 {
616            // Truncated until nothing is left.
617            return 0;
618        }
619        let shift = 128 - size;
620        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
621        (value << shift) >> shift
622    }
623
624    #[inline]
625    pub fn signed_int_min(&self) -> i128 {
626        self.sign_extend(1_u128 << (self.bits() - 1))
627    }
628
629    #[inline]
630    pub fn signed_int_max(&self) -> i128 {
631        i128::MAX >> (128 - self.bits())
632    }
633
634    #[inline]
635    pub fn unsigned_int_max(&self) -> u128 {
636        u128::MAX >> (128 - self.bits())
637    }
638}
639
640// Panicking addition, subtraction and multiplication for convenience.
641// Avoid during layout computation, return `LayoutError` instead.
642
643impl Add for Size {
644    type Output = Size;
645    #[inline]
646    fn add(self, other: Size) -> Size {
647        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
648            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
649        }))
650    }
651}
652
653impl Sub for Size {
654    type Output = Size;
655    #[inline]
656    fn sub(self, other: Size) -> Size {
657        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
658            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
659        }))
660    }
661}
662
663impl Mul<Size> for u64 {
664    type Output = Size;
665    #[inline]
666    fn mul(self, size: Size) -> Size {
667        size * self
668    }
669}
670
671impl Mul<u64> for Size {
672    type Output = Size;
673    #[inline]
674    fn mul(self, count: u64) -> Size {
675        match self.bytes().checked_mul(count) {
676            Some(bytes) => Size::from_bytes(bytes),
677            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
678        }
679    }
680}
681
682impl AddAssign for Size {
683    #[inline]
684    fn add_assign(&mut self, other: Size) {
685        *self = *self + other;
686    }
687}
688
689#[cfg(feature = "nightly")]
690impl Step for Size {
691    #[inline]
692    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
693        u64::steps_between(&start.bytes(), &end.bytes())
694    }
695
696    #[inline]
697    fn forward_checked(start: Self, count: usize) -> Option<Self> {
698        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
699    }
700
701    #[inline]
702    fn forward(start: Self, count: usize) -> Self {
703        Self::from_bytes(u64::forward(start.bytes(), count))
704    }
705
706    #[inline]
707    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
708        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
709    }
710
711    #[inline]
712    fn backward_checked(start: Self, count: usize) -> Option<Self> {
713        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
714    }
715
716    #[inline]
717    fn backward(start: Self, count: usize) -> Self {
718        Self::from_bytes(u64::backward(start.bytes(), count))
719    }
720
721    #[inline]
722    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
723        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
724    }
725}
726
727/// Alignment of a type in bytes (always a power of two).
728#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
729#[cfg_attr(
730    feature = "nightly",
731    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
732)]
733pub struct Align {
734    pow2: u8,
735}
736
737// This is debug-printed a lot in larger structs, don't waste too much space there
738impl fmt::Debug for Align {
739    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
740        write!(f, "Align({} bytes)", self.bytes())
741    }
742}
743
744#[derive(Clone, Copy)]
745pub enum AlignFromBytesError {
746    NotPowerOfTwo(u64),
747    TooLarge(u64),
748}
749
750impl AlignFromBytesError {
751    pub fn diag_ident(self) -> &'static str {
752        match self {
753            Self::NotPowerOfTwo(_) => "not_power_of_two",
754            Self::TooLarge(_) => "too_large",
755        }
756    }
757
758    pub fn align(self) -> u64 {
759        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
760        align
761    }
762}
763
764impl fmt::Debug for AlignFromBytesError {
765    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
766        fmt::Display::fmt(self, f)
767    }
768}
769
770impl fmt::Display for AlignFromBytesError {
771    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
772        match self {
773            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
774            AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
775        }
776    }
777}
778
779impl Align {
780    pub const ONE: Align = Align { pow2: 0 };
781    pub const EIGHT: Align = Align { pow2: 3 };
782    // LLVM has a maximal supported alignment of 2^29, we inherit that.
783    pub const MAX: Align = Align { pow2: 29 };
784
785    #[inline]
786    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
787        Align::from_bytes(Size::from_bits(bits).bytes())
788    }
789
790    #[inline]
791    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
792        // Treat an alignment of 0 bytes like 1-byte alignment.
793        if align == 0 {
794            return Ok(Align::ONE);
795        }
796
797        #[cold]
798        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
799            AlignFromBytesError::NotPowerOfTwo(align)
800        }
801
802        #[cold]
803        const fn too_large(align: u64) -> AlignFromBytesError {
804            AlignFromBytesError::TooLarge(align)
805        }
806
807        let tz = align.trailing_zeros();
808        if align != (1 << tz) {
809            return Err(not_power_of_2(align));
810        }
811
812        let pow2 = tz as u8;
813        if pow2 > Self::MAX.pow2 {
814            return Err(too_large(align));
815        }
816
817        Ok(Align { pow2 })
818    }
819
820    #[inline]
821    pub const fn bytes(self) -> u64 {
822        1 << self.pow2
823    }
824
825    #[inline]
826    pub fn bytes_usize(self) -> usize {
827        self.bytes().try_into().unwrap()
828    }
829
830    #[inline]
831    pub const fn bits(self) -> u64 {
832        self.bytes() * 8
833    }
834
835    #[inline]
836    pub fn bits_usize(self) -> usize {
837        self.bits().try_into().unwrap()
838    }
839
840    /// Obtain the greatest factor of `size` that is an alignment
841    /// (the largest power of two the Size is a multiple of).
842    ///
843    /// Note that all numbers are factors of 0
844    #[inline]
845    pub fn max_aligned_factor(size: Size) -> Align {
846        Align { pow2: size.bytes().trailing_zeros() as u8 }
847    }
848
849    /// Reduces Align to an aligned factor of `size`.
850    #[inline]
851    pub fn restrict_for_offset(self, size: Size) -> Align {
852        self.min(Align::max_aligned_factor(size))
853    }
854}
855
856/// A pair of alignments, ABI-mandated and preferred.
857///
858/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
859/// it is not exposed semantically to programmers nor can they meaningfully affect it.
860/// The only concern for us is that preferred alignment must not be less than the mandated alignment
861/// and thus in practice the two values are almost always identical.
862///
863/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
864/// It is of effectively no consequence for layout in structs and on the stack.
865#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
866#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
867pub struct AbiAndPrefAlign {
868    pub abi: Align,
869    pub pref: Align,
870}
871
872impl AbiAndPrefAlign {
873    #[inline]
874    pub fn new(align: Align) -> AbiAndPrefAlign {
875        AbiAndPrefAlign { abi: align, pref: align }
876    }
877
878    #[inline]
879    pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
880        AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
881    }
882
883    #[inline]
884    pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
885        AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
886    }
887}
888
889/// Integers, also used for enum discriminants.
890#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
891#[cfg_attr(
892    feature = "nightly",
893    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
894)]
895pub enum Integer {
896    I8,
897    I16,
898    I32,
899    I64,
900    I128,
901}
902
903impl Integer {
904    pub fn int_ty_str(self) -> &'static str {
905        use Integer::*;
906        match self {
907            I8 => "i8",
908            I16 => "i16",
909            I32 => "i32",
910            I64 => "i64",
911            I128 => "i128",
912        }
913    }
914
915    pub fn uint_ty_str(self) -> &'static str {
916        use Integer::*;
917        match self {
918            I8 => "u8",
919            I16 => "u16",
920            I32 => "u32",
921            I64 => "u64",
922            I128 => "u128",
923        }
924    }
925
926    #[inline]
927    pub fn size(self) -> Size {
928        use Integer::*;
929        match self {
930            I8 => Size::from_bytes(1),
931            I16 => Size::from_bytes(2),
932            I32 => Size::from_bytes(4),
933            I64 => Size::from_bytes(8),
934            I128 => Size::from_bytes(16),
935        }
936    }
937
938    /// Gets the Integer type from an IntegerType.
939    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
940        let dl = cx.data_layout();
941
942        match ity {
943            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
944            IntegerType::Fixed(x, _) => x,
945        }
946    }
947
948    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
949        use Integer::*;
950        let dl = cx.data_layout();
951
952        match self {
953            I8 => dl.i8_align,
954            I16 => dl.i16_align,
955            I32 => dl.i32_align,
956            I64 => dl.i64_align,
957            I128 => dl.i128_align,
958        }
959    }
960
961    /// Returns the largest signed value that can be represented by this Integer.
962    #[inline]
963    pub fn signed_max(self) -> i128 {
964        use Integer::*;
965        match self {
966            I8 => i8::MAX as i128,
967            I16 => i16::MAX as i128,
968            I32 => i32::MAX as i128,
969            I64 => i64::MAX as i128,
970            I128 => i128::MAX,
971        }
972    }
973
974    /// Finds the smallest Integer type which can represent the signed value.
975    #[inline]
976    pub fn fit_signed(x: i128) -> Integer {
977        use Integer::*;
978        match x {
979            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
980            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
981            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
982            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
983            _ => I128,
984        }
985    }
986
987    /// Finds the smallest Integer type which can represent the unsigned value.
988    #[inline]
989    pub fn fit_unsigned(x: u128) -> Integer {
990        use Integer::*;
991        match x {
992            0..=0x0000_0000_0000_00ff => I8,
993            0..=0x0000_0000_0000_ffff => I16,
994            0..=0x0000_0000_ffff_ffff => I32,
995            0..=0xffff_ffff_ffff_ffff => I64,
996            _ => I128,
997        }
998    }
999
1000    /// Finds the smallest integer with the given alignment.
1001    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1002        use Integer::*;
1003        let dl = cx.data_layout();
1004
1005        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1006            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1007        })
1008    }
1009
1010    /// Find the largest integer with the given alignment or less.
1011    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1012        use Integer::*;
1013        let dl = cx.data_layout();
1014
1015        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1016        for candidate in [I64, I32, I16] {
1017            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1018                return candidate;
1019            }
1020        }
1021        I8
1022    }
1023
1024    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1025    // `Integer` given some requirements.
1026    #[inline]
1027    pub fn from_size(size: Size) -> Result<Self, String> {
1028        match size.bits() {
1029            8 => Ok(Integer::I8),
1030            16 => Ok(Integer::I16),
1031            32 => Ok(Integer::I32),
1032            64 => Ok(Integer::I64),
1033            128 => Ok(Integer::I128),
1034            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
1035        }
1036    }
1037}
1038
1039/// Floating-point types.
1040#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1041#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1042pub enum Float {
1043    F16,
1044    F32,
1045    F64,
1046    F128,
1047}
1048
1049impl Float {
1050    pub fn size(self) -> Size {
1051        use Float::*;
1052
1053        match self {
1054            F16 => Size::from_bits(16),
1055            F32 => Size::from_bits(32),
1056            F64 => Size::from_bits(64),
1057            F128 => Size::from_bits(128),
1058        }
1059    }
1060
1061    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
1062        use Float::*;
1063        let dl = cx.data_layout();
1064
1065        match self {
1066            F16 => dl.f16_align,
1067            F32 => dl.f32_align,
1068            F64 => dl.f64_align,
1069            F128 => dl.f128_align,
1070        }
1071    }
1072}
1073
1074/// Fundamental unit of memory access and layout.
1075#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1076#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1077pub enum Primitive {
1078    /// The `bool` is the signedness of the `Integer` type.
1079    ///
1080    /// One would think we would not care about such details this low down,
1081    /// but some ABIs are described in terms of C types and ISAs where the
1082    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1083    /// a negative integer passed by zero-extension will appear positive in
1084    /// the callee, and most operations on it will produce the wrong values.
1085    Int(Integer, bool),
1086    Float(Float),
1087    Pointer(AddressSpace),
1088}
1089
1090impl Primitive {
1091    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1092        use Primitive::*;
1093        let dl = cx.data_layout();
1094
1095        match self {
1096            Int(i, _) => i.size(),
1097            Float(f) => f.size(),
1098            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
1099            // different address spaces can have different sizes
1100            // (but TargetDataLayout doesn't currently parse that part of the DL string)
1101            Pointer(_) => dl.pointer_size,
1102        }
1103    }
1104
1105    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
1106        use Primitive::*;
1107        let dl = cx.data_layout();
1108
1109        match self {
1110            Int(i, _) => i.align(dl),
1111            Float(f) => f.align(dl),
1112            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
1113            // different address spaces can have different alignments
1114            // (but TargetDataLayout doesn't currently parse that part of the DL string)
1115            Pointer(_) => dl.pointer_align,
1116        }
1117    }
1118}
1119
1120/// Inclusive wrap-around range of valid values, that is, if
1121/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1122///
1123/// That is, for an i8 primitive, a range of `254..=2` means following
1124/// sequence:
1125///
1126///    254 (-2), 255 (-1), 0, 1, 2
1127///
1128/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1129#[derive(Clone, Copy, PartialEq, Eq, Hash)]
1130#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1131pub struct WrappingRange {
1132    pub start: u128,
1133    pub end: u128,
1134}
1135
1136impl WrappingRange {
1137    pub fn full(size: Size) -> Self {
1138        Self { start: 0, end: size.unsigned_int_max() }
1139    }
1140
1141    /// Returns `true` if `v` is contained in the range.
1142    #[inline(always)]
1143    pub fn contains(&self, v: u128) -> bool {
1144        if self.start <= self.end {
1145            self.start <= v && v <= self.end
1146        } else {
1147            self.start <= v || v <= self.end
1148        }
1149    }
1150
1151    /// Returns `self` with replaced `start`
1152    #[inline(always)]
1153    fn with_start(mut self, start: u128) -> Self {
1154        self.start = start;
1155        self
1156    }
1157
1158    /// Returns `self` with replaced `end`
1159    #[inline(always)]
1160    fn with_end(mut self, end: u128) -> Self {
1161        self.end = end;
1162        self
1163    }
1164
1165    /// Returns `true` if `size` completely fills the range.
1166    #[inline]
1167    fn is_full_for(&self, size: Size) -> bool {
1168        let max_value = size.unsigned_int_max();
1169        debug_assert!(self.start <= max_value && self.end <= max_value);
1170        self.start == (self.end.wrapping_add(1) & max_value)
1171    }
1172}
1173
1174impl fmt::Debug for WrappingRange {
1175    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1176        if self.start > self.end {
1177            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1178        } else {
1179            write!(fmt, "{}..={}", self.start, self.end)?;
1180        }
1181        Ok(())
1182    }
1183}
1184
1185/// Information about one scalar component of a Rust type.
1186#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1187#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1188pub enum Scalar {
1189    Initialized {
1190        value: Primitive,
1191
1192        // FIXME(eddyb) always use the shortest range, e.g., by finding
1193        // the largest space between two consecutive valid values and
1194        // taking everything else as the (shortest) valid range.
1195        valid_range: WrappingRange,
1196    },
1197    Union {
1198        /// Even for unions, we need to use the correct registers for the kind of
1199        /// values inside the union, so we keep the `Primitive` type around. We
1200        /// also use it to compute the size of the scalar.
1201        /// However, unions never have niches and even allow undef,
1202        /// so there is no `valid_range`.
1203        value: Primitive,
1204    },
1205}
1206
1207impl Scalar {
1208    #[inline]
1209    pub fn is_bool(&self) -> bool {
1210        use Integer::*;
1211        matches!(
1212            self,
1213            Scalar::Initialized {
1214                value: Primitive::Int(I8, false),
1215                valid_range: WrappingRange { start: 0, end: 1 }
1216            }
1217        )
1218    }
1219
1220    /// Get the primitive representation of this type, ignoring the valid range and whether the
1221    /// value is allowed to be undefined (due to being a union).
1222    pub fn primitive(&self) -> Primitive {
1223        match *self {
1224            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1225        }
1226    }
1227
1228    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
1229        self.primitive().align(cx)
1230    }
1231
1232    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1233        self.primitive().size(cx)
1234    }
1235
1236    #[inline]
1237    pub fn to_union(&self) -> Self {
1238        Self::Union { value: self.primitive() }
1239    }
1240
1241    #[inline]
1242    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1243        match *self {
1244            Scalar::Initialized { valid_range, .. } => valid_range,
1245            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1246        }
1247    }
1248
1249    #[inline]
1250    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1251    /// union.
1252    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1253        match self {
1254            Scalar::Initialized { valid_range, .. } => valid_range,
1255            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1256        }
1257    }
1258
1259    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1260    /// layout.
1261    #[inline]
1262    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1263        match *self {
1264            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1265            Scalar::Union { .. } => true,
1266        }
1267    }
1268
1269    /// Returns `true` if this type can be left uninit.
1270    #[inline]
1271    pub fn is_uninit_valid(&self) -> bool {
1272        match *self {
1273            Scalar::Initialized { .. } => false,
1274            Scalar::Union { .. } => true,
1275        }
1276    }
1277
1278    /// Returns `true` if this is a signed integer scalar
1279    #[inline]
1280    pub fn is_signed(&self) -> bool {
1281        match self.primitive() {
1282            Primitive::Int(_, signed) => signed,
1283            _ => false,
1284        }
1285    }
1286}
1287
1288// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1289/// Describes how the fields of a type are located in memory.
1290#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1291#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1292pub enum FieldsShape<FieldIdx: Idx> {
1293    /// Scalar primitives and `!`, which never have fields.
1294    Primitive,
1295
1296    /// All fields start at no offset. The `usize` is the field count.
1297    Union(NonZeroUsize),
1298
1299    /// Array/vector-like placement, with all fields of identical types.
1300    Array { stride: Size, count: u64 },
1301
1302    /// Struct-like placement, with precomputed offsets.
1303    ///
1304    /// Fields are guaranteed to not overlap, but note that gaps
1305    /// before, between and after all the fields are NOT always
1306    /// padding, and as such their contents may not be discarded.
1307    /// For example, enum variants leave a gap at the start,
1308    /// where the discriminant field in the enum layout goes.
1309    Arbitrary {
1310        /// Offsets for the first byte of each field,
1311        /// ordered to match the source definition order.
1312        /// This vector does not go in increasing order.
1313        // FIXME(eddyb) use small vector optimization for the common case.
1314        offsets: IndexVec<FieldIdx, Size>,
1315
1316        /// Maps source order field indices to memory order indices,
1317        /// depending on how the fields were reordered (if at all).
1318        /// This is a permutation, with both the source order and the
1319        /// memory order using the same (0..n) index ranges.
1320        ///
1321        /// Note that during computation of `memory_index`, sometimes
1322        /// it is easier to operate on the inverse mapping (that is,
1323        /// from memory order to source order), and that is usually
1324        /// named `inverse_memory_index`.
1325        ///
1326        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1327        // FIXME(camlorn) also consider small vector optimization here.
1328        memory_index: IndexVec<FieldIdx, u32>,
1329    },
1330}
1331
1332impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1333    #[inline]
1334    pub fn count(&self) -> usize {
1335        match *self {
1336            FieldsShape::Primitive => 0,
1337            FieldsShape::Union(count) => count.get(),
1338            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1339            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1340        }
1341    }
1342
1343    #[inline]
1344    pub fn offset(&self, i: usize) -> Size {
1345        match *self {
1346            FieldsShape::Primitive => {
1347                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1348            }
1349            FieldsShape::Union(count) => {
1350                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1351                Size::ZERO
1352            }
1353            FieldsShape::Array { stride, count } => {
1354                let i = u64::try_from(i).unwrap();
1355                assert!(i < count, "tried to access field {i} of array with {count} fields");
1356                stride * i
1357            }
1358            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1359        }
1360    }
1361
1362    #[inline]
1363    pub fn memory_index(&self, i: usize) -> usize {
1364        match *self {
1365            FieldsShape::Primitive => {
1366                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1367            }
1368            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1369            FieldsShape::Arbitrary { ref memory_index, .. } => {
1370                memory_index[FieldIdx::new(i)].try_into().unwrap()
1371            }
1372        }
1373    }
1374
1375    /// Gets source indices of the fields by increasing offsets.
1376    #[inline]
1377    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1378        let mut inverse_small = [0u8; 64];
1379        let mut inverse_big = IndexVec::new();
1380        let use_small = self.count() <= inverse_small.len();
1381
1382        // We have to write this logic twice in order to keep the array small.
1383        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1384            if use_small {
1385                for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1386                    inverse_small[mem_idx as usize] = field_idx.index() as u8;
1387                }
1388            } else {
1389                inverse_big = memory_index.invert_bijective_mapping();
1390            }
1391        }
1392
1393        // Primitives don't really have fields in the way that structs do,
1394        // but having this return an empty iterator for them is unhelpful
1395        // since that makes them look kinda like ZSTs, which they're not.
1396        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1397
1398        (0..pseudofield_count).map(move |i| match *self {
1399            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1400            FieldsShape::Arbitrary { .. } => {
1401                if use_small {
1402                    inverse_small[i] as usize
1403                } else {
1404                    inverse_big[i as u32].index()
1405                }
1406            }
1407        })
1408    }
1409}
1410
1411/// An identifier that specifies the address space that some operation
1412/// should operate on. Special address spaces have an effect on code generation,
1413/// depending on the target and the address spaces it implements.
1414#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1415#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1416pub struct AddressSpace(pub u32);
1417
1418impl AddressSpace {
1419    /// The default address space, corresponding to data space.
1420    pub const DATA: Self = AddressSpace(0);
1421}
1422
1423/// The way we represent values to the backend
1424///
1425/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1426/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1427/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1428/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1429/// how the value will be lowered to the calling convention, in itself.
1430///
1431/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1432/// and larger values will usually prefer to be represented as memory.
1433#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1434#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1435pub enum BackendRepr {
1436    Scalar(Scalar),
1437    ScalarPair(Scalar, Scalar),
1438    SimdVector {
1439        element: Scalar,
1440        count: u64,
1441    },
1442    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1443    Memory {
1444        /// If true, the size is exact, otherwise it's only a lower bound.
1445        sized: bool,
1446    },
1447}
1448
1449impl BackendRepr {
1450    /// Returns `true` if the layout corresponds to an unsized type.
1451    #[inline]
1452    pub fn is_unsized(&self) -> bool {
1453        match *self {
1454            BackendRepr::Scalar(_)
1455            | BackendRepr::ScalarPair(..)
1456            | BackendRepr::SimdVector { .. } => false,
1457            BackendRepr::Memory { sized } => !sized,
1458        }
1459    }
1460
1461    #[inline]
1462    pub fn is_sized(&self) -> bool {
1463        !self.is_unsized()
1464    }
1465
1466    /// Returns `true` if this is a single signed integer scalar.
1467    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1468    #[inline]
1469    pub fn is_signed(&self) -> bool {
1470        match self {
1471            BackendRepr::Scalar(scal) => scal.is_signed(),
1472            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
1473        }
1474    }
1475
1476    /// Returns `true` if this is a scalar type
1477    #[inline]
1478    pub fn is_scalar(&self) -> bool {
1479        matches!(*self, BackendRepr::Scalar(_))
1480    }
1481
1482    /// Returns `true` if this is a bool
1483    #[inline]
1484    pub fn is_bool(&self) -> bool {
1485        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1486    }
1487
1488    /// The psABI alignment for a `Scalar` or `ScalarPair`
1489    ///
1490    /// `None` for other variants.
1491    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1492        match *self {
1493            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1494            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1495            // The align of a Vector can vary in surprising ways
1496            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1497        }
1498    }
1499
1500    /// The psABI size for a `Scalar` or `ScalarPair`
1501    ///
1502    /// `None` for other variants
1503    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1504        match *self {
1505            // No padding in scalars.
1506            BackendRepr::Scalar(s) => Some(s.size(cx)),
1507            // May have some padding between the pair.
1508            BackendRepr::ScalarPair(s1, s2) => {
1509                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1510                let size = (field2_offset + s2.size(cx)).align_to(
1511                    self.scalar_align(cx)
1512                        // We absolutely must have an answer here or everything is FUBAR.
1513                        .unwrap(),
1514                );
1515                Some(size)
1516            }
1517            // The size of a Vector can vary in surprising ways
1518            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1519        }
1520    }
1521
1522    /// Discard validity range information and allow undef.
1523    pub fn to_union(&self) -> Self {
1524        match *self {
1525            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1526            BackendRepr::ScalarPair(s1, s2) => {
1527                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1528            }
1529            BackendRepr::SimdVector { element, count } => {
1530                BackendRepr::SimdVector { element: element.to_union(), count }
1531            }
1532            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1533        }
1534    }
1535
1536    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1537        match (self, other) {
1538            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1539            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1540            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1541            (
1542                BackendRepr::SimdVector { element: element_l, count: count_l },
1543                BackendRepr::SimdVector { element: element_r, count: count_r },
1544            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1545            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1546                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1547            }
1548            // Everything else must be strictly identical.
1549            _ => self == other,
1550        }
1551    }
1552}
1553
1554// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1555#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1556#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1557pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1558    /// A type with no valid variants. Must be uninhabited.
1559    Empty,
1560
1561    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1562    Single {
1563        /// Always `0` for types that cannot have multiple variants.
1564        index: VariantIdx,
1565    },
1566
1567    /// Enum-likes with more than one variant: each variant comes with
1568    /// a *discriminant* (usually the same as the variant index but the user can
1569    /// assign explicit discriminant values). That discriminant is encoded
1570    /// as a *tag* on the machine. The layout of each variant is
1571    /// a struct, and they all have space reserved for the tag.
1572    /// For enums, the tag is the sole field of the layout.
1573    Multiple {
1574        tag: Scalar,
1575        tag_encoding: TagEncoding<VariantIdx>,
1576        tag_field: FieldIdx,
1577        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1578    },
1579}
1580
1581// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1582#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1583#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1584pub enum TagEncoding<VariantIdx: Idx> {
1585    /// The tag directly stores the discriminant, but possibly with a smaller layout
1586    /// (so converting the tag to the discriminant can require sign extension).
1587    Direct,
1588
1589    /// Niche (values invalid for a type) encoding the discriminant:
1590    /// Discriminant and variant index coincide.
1591    /// The variant `untagged_variant` contains a niche at an arbitrary
1592    /// offset (field `tag_field` of the enum), which for a variant with
1593    /// discriminant `d` is set to
1594    /// `(d - niche_variants.start).wrapping_add(niche_start)`
1595    /// (this is wrapping arithmetic using the type of the niche field).
1596    ///
1597    /// For example, `Option<(usize, &T)>`  is represented such that
1598    /// `None` has a null pointer for the second tuple field, and
1599    /// `Some` is the identity function (with a non-null reference).
1600    ///
1601    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1602    /// range cannot be represented; they must be uninhabited.
1603    Niche {
1604        untagged_variant: VariantIdx,
1605        /// This range *may* contain `untagged_variant`; that is then just a "dead value" and
1606        /// not used to encode anything.
1607        niche_variants: RangeInclusive<VariantIdx>,
1608        /// This is inbounds of the type of the niche field
1609        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1610        niche_start: u128,
1611    },
1612}
1613
1614#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1615#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1616pub struct Niche {
1617    pub offset: Size,
1618    pub value: Primitive,
1619    pub valid_range: WrappingRange,
1620}
1621
1622impl Niche {
1623    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1624        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1625        let niche = Niche { offset, value, valid_range };
1626        if niche.available(cx) > 0 { Some(niche) } else { None }
1627    }
1628
1629    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1630        let Self { value, valid_range: v, .. } = *self;
1631        let size = value.size(cx);
1632        assert!(size.bits() <= 128);
1633        let max_value = size.unsigned_int_max();
1634
1635        // Find out how many values are outside the valid range.
1636        let niche = v.end.wrapping_add(1)..v.start;
1637        niche.end.wrapping_sub(niche.start) & max_value
1638    }
1639
1640    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1641        assert!(count > 0);
1642
1643        let Self { value, valid_range: v, .. } = *self;
1644        let size = value.size(cx);
1645        assert!(size.bits() <= 128);
1646        let max_value = size.unsigned_int_max();
1647
1648        let niche = v.end.wrapping_add(1)..v.start;
1649        let available = niche.end.wrapping_sub(niche.start) & max_value;
1650        if count > available {
1651            return None;
1652        }
1653
1654        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1655        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1656        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1657        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1658        // enable some special optimizations.
1659        //
1660        // Bound selection criteria:
1661        // 1. Select closest to zero given wrapping semantics.
1662        // 2. Avoid moving past zero if possible.
1663        //
1664        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1665        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1666        // bounds are of little interest.
1667        let move_start = |v: WrappingRange| {
1668            let start = v.start.wrapping_sub(count) & max_value;
1669            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1670        };
1671        let move_end = |v: WrappingRange| {
1672            let start = v.end.wrapping_add(1) & max_value;
1673            let end = v.end.wrapping_add(count) & max_value;
1674            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1675        };
1676        let distance_end_zero = max_value - v.end;
1677        if v.start > v.end {
1678            // zero is unavailable because wrapping occurs
1679            move_end(v)
1680        } else if v.start <= distance_end_zero {
1681            if count <= v.start {
1682                move_start(v)
1683            } else {
1684                // moved past zero, use other bound
1685                move_end(v)
1686            }
1687        } else {
1688            let end = v.end.wrapping_add(count) & max_value;
1689            let overshot_zero = (1..=v.end).contains(&end);
1690            if overshot_zero {
1691                // moved past zero, use other bound
1692                move_start(v)
1693            } else {
1694                move_end(v)
1695            }
1696        }
1697    }
1698}
1699
1700// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1701#[derive(PartialEq, Eq, Hash, Clone)]
1702#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1703pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
1704    /// Says where the fields are located within the layout.
1705    pub fields: FieldsShape<FieldIdx>,
1706
1707    /// Encodes information about multi-variant layouts.
1708    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1709    /// shared between all variants. One of them will be the discriminant,
1710    /// but e.g. coroutines can have more.
1711    ///
1712    /// To access all fields of this layout, both `fields` and the fields of the active variant
1713    /// must be taken into account.
1714    pub variants: Variants<FieldIdx, VariantIdx>,
1715
1716    /// The `backend_repr` defines how this data will be represented to the codegen backend,
1717    /// and encodes value restrictions via `valid_range`.
1718    ///
1719    /// Note that this is entirely orthogonal to the recursive structure defined by
1720    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1721    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
1722    /// have to be taken into account to find all fields of this layout.
1723    pub backend_repr: BackendRepr,
1724
1725    /// The leaf scalar with the largest number of invalid values
1726    /// (i.e. outside of its `valid_range`), if it exists.
1727    pub largest_niche: Option<Niche>,
1728    /// Is this type known to be uninhabted?
1729    ///
1730    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
1731    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
1732    pub uninhabited: bool,
1733
1734    pub align: AbiAndPrefAlign,
1735    pub size: Size,
1736
1737    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
1738    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
1739    /// requested, even if the requested alignment is equal to the natural alignment.
1740    pub max_repr_align: Option<Align>,
1741
1742    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
1743    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
1744    /// in some cases.
1745    pub unadjusted_abi_align: Align,
1746
1747    /// The randomization seed based on this type's own repr and its fields.
1748    ///
1749    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
1750    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
1751    /// types.
1752    ///
1753    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
1754    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
1755    /// to reorder its fields based on that information. The current implementation is a conservative
1756    /// approximation of this goal.
1757    pub randomization_seed: Hash64,
1758}
1759
1760impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
1761    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
1762    pub fn is_aggregate(&self) -> bool {
1763        match self.backend_repr {
1764            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false,
1765            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
1766        }
1767    }
1768
1769    /// Returns `true` if this is an uninhabited type
1770    pub fn is_uninhabited(&self) -> bool {
1771        self.uninhabited
1772    }
1773}
1774
1775impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
1776where
1777    FieldsShape<FieldIdx>: fmt::Debug,
1778    Variants<FieldIdx, VariantIdx>: fmt::Debug,
1779{
1780    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1781        // This is how `Layout` used to print before it become
1782        // `Interned<LayoutS>`. We print it like this to avoid having to update
1783        // expected output in a lot of tests.
1784        let LayoutData {
1785            size,
1786            align,
1787            backend_repr,
1788            fields,
1789            largest_niche,
1790            uninhabited,
1791            variants,
1792            max_repr_align,
1793            unadjusted_abi_align,
1794            randomization_seed,
1795        } = self;
1796        f.debug_struct("Layout")
1797            .field("size", size)
1798            .field("align", align)
1799            .field("backend_repr", backend_repr)
1800            .field("fields", fields)
1801            .field("largest_niche", largest_niche)
1802            .field("uninhabited", uninhabited)
1803            .field("variants", variants)
1804            .field("max_repr_align", max_repr_align)
1805            .field("unadjusted_abi_align", unadjusted_abi_align)
1806            .field("randomization_seed", randomization_seed)
1807            .finish()
1808    }
1809}
1810
1811#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1812pub enum PointerKind {
1813    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
1814    SharedRef { frozen: bool },
1815    /// Mutable reference. `unpin` indicates the absence of any pinned data.
1816    MutableRef { unpin: bool },
1817    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
1818    /// uses the global allocator or a custom one.
1819    Box { unpin: bool, global: bool },
1820}
1821
1822/// Encodes extra information we have about a pointer.
1823/// Note that this information is advisory only, and backends are free to ignore it:
1824/// if the information is wrong, that can cause UB, but if the information is absent,
1825/// that must always be okay.
1826#[derive(Copy, Clone, Debug)]
1827pub struct PointeeInfo {
1828    /// If this is `None`, then this is a raw pointer, so size and alignment are not guaranteed to
1829    /// be reliable.
1830    pub safe: Option<PointerKind>,
1831    /// If `safe` is `Some`, then the pointer is either null or dereferenceable for this many bytes.
1832    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
1833    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
1834    /// while this function is still running.
1835    /// The size can be zero if the pointer is not dereferenceable.
1836    pub size: Size,
1837    /// If `safe` is `Some`, then the pointer is aligned as indicated.
1838    pub align: Align,
1839}
1840
1841impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
1842    /// Returns `true` if the layout corresponds to an unsized type.
1843    #[inline]
1844    pub fn is_unsized(&self) -> bool {
1845        self.backend_repr.is_unsized()
1846    }
1847
1848    #[inline]
1849    pub fn is_sized(&self) -> bool {
1850        self.backend_repr.is_sized()
1851    }
1852
1853    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
1854    pub fn is_1zst(&self) -> bool {
1855        self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
1856    }
1857
1858    /// Returns `true` if the type is a ZST and not unsized.
1859    ///
1860    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
1861    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
1862    pub fn is_zst(&self) -> bool {
1863        match self.backend_repr {
1864            BackendRepr::Scalar(_)
1865            | BackendRepr::ScalarPair(..)
1866            | BackendRepr::SimdVector { .. } => false,
1867            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
1868        }
1869    }
1870
1871    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
1872    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
1873    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
1874    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
1875    /// checks would otherwise be required.
1876    pub fn eq_abi(&self, other: &Self) -> bool {
1877        // The one thing that we are not capturing here is that for unsized types, the metadata must
1878        // also have the same ABI, and moreover that the same metadata leads to the same size. The
1879        // 2nd point is quite hard to check though.
1880        self.size == other.size
1881            && self.is_sized() == other.is_sized()
1882            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
1883            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
1884            && self.align.abi == other.align.abi
1885            && self.max_repr_align == other.max_repr_align
1886            && self.unadjusted_abi_align == other.unadjusted_abi_align
1887    }
1888}
1889
1890#[derive(Copy, Clone, Debug)]
1891pub enum StructKind {
1892    /// A tuple, closure, or univariant which cannot be coerced to unsized.
1893    AlwaysSized,
1894    /// A univariant, the last field of which may be coerced to unsized.
1895    MaybeUnsized,
1896    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
1897    Prefixed(Size, Align),
1898}
1899
1900#[derive(Clone, Debug)]
1901pub enum AbiFromStrErr {
1902    /// not a known ABI
1903    Unknown,
1904    /// no "-unwind" variant can be used here
1905    NoExplicitUnwind,
1906}