rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", doc(rust_logo))]
4#![cfg_attr(feature = "nightly", feature(assert_matches))]
5#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
6#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
7#![cfg_attr(feature = "nightly", feature(step_trait))]
8// tidy-alphabetical-end
9
10/*! ABI handling for rustc
11
12## What is an "ABI"?
13
14Literally, "application binary interface", which means it is everything about how code interacts,
15at the machine level, with other code. This means it technically covers all of the following:
16- object binary format for e.g. relocations or offset tables
17- in-memory layout of types
18- procedure calling conventions
19
20When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
21To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
22Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
23You will encounter all of them and more if you study target-specific codegen enough!
24Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
25either or both of
26- `repr(Rust)` types have a mostly-unspecified layout
27- `extern "Rust" fn(A) -> R` has an unspecified calling convention
28
29## Crate Goal
30
31ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
32It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
33Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
34It should contain traits and types that other crates then use in their implementation.
35For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
36but `rustc_abi` contains the types for calculating layout and describing register-passing.
37This makes it easier to describe things in the same way across targets, codegen backends, and
38even other Rust compilers, such as rust-analyzer!
39
40*/
41
42use std::fmt;
43#[cfg(feature = "nightly")]
44use std::iter::Step;
45use std::num::{NonZeroUsize, ParseIntError};
46use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
47use std::str::FromStr;
48
49use bitflags::bitflags;
50#[cfg(feature = "nightly")]
51use rustc_data_structures::stable_hasher::StableOrd;
52use rustc_hashes::Hash64;
53use rustc_index::{Idx, IndexSlice, IndexVec};
54#[cfg(feature = "nightly")]
55use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
56
57mod callconv;
58mod canon_abi;
59mod extern_abi;
60mod layout;
61#[cfg(test)]
62mod tests;
63
64pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
65pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
66#[cfg(feature = "nightly")]
67pub use extern_abi::CVariadicStatus;
68pub use extern_abi::{ExternAbi, all_names};
69#[cfg(feature = "nightly")]
70pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
71pub use layout::{LayoutCalculator, LayoutCalculatorError};
72
73/// Requirements for a `StableHashingContext` to be used in this crate.
74/// This is a hack to allow using the `HashStable_Generic` derive macro
75/// instead of implementing everything in `rustc_middle`.
76#[cfg(feature = "nightly")]
77pub trait HashStableContext {}
78
79#[derive(Clone, Copy, PartialEq, Eq, Default)]
80#[cfg_attr(
81    feature = "nightly",
82    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
83)]
84pub struct ReprFlags(u8);
85
86bitflags! {
87    impl ReprFlags: u8 {
88        const IS_C               = 1 << 0;
89        const IS_SIMD            = 1 << 1;
90        const IS_TRANSPARENT     = 1 << 2;
91        // Internal only for now. If true, don't reorder fields.
92        // On its own it does not prevent ABI optimizations.
93        const IS_LINEAR          = 1 << 3;
94        // If true, the type's crate has opted into layout randomization.
95        // Other flags can still inhibit reordering and thus randomization.
96        // The seed stored in `ReprOptions.field_shuffle_seed`.
97        const RANDOMIZE_LAYOUT   = 1 << 4;
98        // Any of these flags being set prevent field reordering optimisation.
99        const FIELD_ORDER_UNOPTIMIZABLE   = ReprFlags::IS_C.bits()
100                                 | ReprFlags::IS_SIMD.bits()
101                                 | ReprFlags::IS_LINEAR.bits();
102        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
103    }
104}
105
106// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
107// `rustc_data_structures` to make it build on stable.
108impl std::fmt::Debug for ReprFlags {
109    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
110        bitflags::parser::to_writer(self, f)
111    }
112}
113
114#[derive(Copy, Clone, Debug, Eq, PartialEq)]
115#[cfg_attr(
116    feature = "nightly",
117    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
118)]
119pub enum IntegerType {
120    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
121    /// `Pointer(true)` means `isize`.
122    Pointer(bool),
123    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
124    /// `Fixed(I8, false)` means `u8`.
125    Fixed(Integer, bool),
126}
127
128impl IntegerType {
129    pub fn is_signed(&self) -> bool {
130        match self {
131            IntegerType::Pointer(b) => *b,
132            IntegerType::Fixed(_, b) => *b,
133        }
134    }
135}
136
137/// Represents the repr options provided by the user.
138#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
139#[cfg_attr(
140    feature = "nightly",
141    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
142)]
143pub struct ReprOptions {
144    pub int: Option<IntegerType>,
145    pub align: Option<Align>,
146    pub pack: Option<Align>,
147    pub flags: ReprFlags,
148    /// The seed to be used for randomizing a type's layout
149    ///
150    /// Note: This could technically be a `u128` which would
151    /// be the "most accurate" hash as it'd encompass the item and crate
152    /// hash without loss, but it does pay the price of being larger.
153    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
154    /// purposes (primarily `-Z randomize-layout`)
155    pub field_shuffle_seed: Hash64,
156}
157
158impl ReprOptions {
159    #[inline]
160    pub fn simd(&self) -> bool {
161        self.flags.contains(ReprFlags::IS_SIMD)
162    }
163
164    #[inline]
165    pub fn c(&self) -> bool {
166        self.flags.contains(ReprFlags::IS_C)
167    }
168
169    #[inline]
170    pub fn packed(&self) -> bool {
171        self.pack.is_some()
172    }
173
174    #[inline]
175    pub fn transparent(&self) -> bool {
176        self.flags.contains(ReprFlags::IS_TRANSPARENT)
177    }
178
179    #[inline]
180    pub fn linear(&self) -> bool {
181        self.flags.contains(ReprFlags::IS_LINEAR)
182    }
183
184    /// Returns the discriminant type, given these `repr` options.
185    /// This must only be called on enums!
186    pub fn discr_type(&self) -> IntegerType {
187        self.int.unwrap_or(IntegerType::Pointer(true))
188    }
189
190    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
191    /// layout" optimizations, such as representing `Foo<&T>` as a
192    /// single pointer.
193    pub fn inhibit_enum_layout_opt(&self) -> bool {
194        self.c() || self.int.is_some()
195    }
196
197    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
198        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
199    }
200
201    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
202    /// e.g. `repr(C)` or `repr(<int>)`.
203    pub fn inhibit_struct_field_reordering(&self) -> bool {
204        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
205    }
206
207    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
208    /// was enabled for its declaration crate.
209    pub fn can_randomize_type_layout(&self) -> bool {
210        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
211    }
212
213    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
214    pub fn inhibits_union_abi_opt(&self) -> bool {
215        self.c()
216    }
217}
218
219/// The maximum supported number of lanes in a SIMD vector.
220///
221/// This value is selected based on backend support:
222/// * LLVM does not appear to have a vector width limit.
223/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
224pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
225
226/// How pointers are represented in a given address space
227#[derive(Copy, Clone, Debug, PartialEq, Eq)]
228pub struct PointerSpec {
229    /// The size of the bitwise representation of the pointer.
230    pointer_size: Size,
231    /// The alignment of pointers for this address space
232    pointer_align: AbiAlign,
233    /// The size of the value a pointer can be offset by in this address space.
234    pointer_offset: Size,
235    /// Pointers into this address space contain extra metadata
236    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
237    _is_fat: bool,
238}
239
240/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
241/// for a target, which contains everything needed to compute layouts.
242#[derive(Debug, PartialEq, Eq)]
243pub struct TargetDataLayout {
244    pub endian: Endian,
245    pub i1_align: AbiAlign,
246    pub i8_align: AbiAlign,
247    pub i16_align: AbiAlign,
248    pub i32_align: AbiAlign,
249    pub i64_align: AbiAlign,
250    pub i128_align: AbiAlign,
251    pub f16_align: AbiAlign,
252    pub f32_align: AbiAlign,
253    pub f64_align: AbiAlign,
254    pub f128_align: AbiAlign,
255    pub aggregate_align: AbiAlign,
256
257    /// Alignments for vector types.
258    pub vector_align: Vec<(Size, AbiAlign)>,
259
260    pub default_address_space: AddressSpace,
261    pub default_address_space_pointer_spec: PointerSpec,
262
263    /// Address space information of all known address spaces.
264    ///
265    /// # Note
266    ///
267    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
268    /// which instead lives in [`Self::default_address_space_pointer_spec`].
269    address_space_info: Vec<(AddressSpace, PointerSpec)>,
270
271    pub instruction_address_space: AddressSpace,
272
273    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
274    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
275    /// so the only valid spec for LLVM is c_int::BITS or 8
276    pub c_enum_min_size: Integer,
277}
278
279impl Default for TargetDataLayout {
280    /// Creates an instance of `TargetDataLayout`.
281    fn default() -> TargetDataLayout {
282        let align = |bits| Align::from_bits(bits).unwrap();
283        TargetDataLayout {
284            endian: Endian::Big,
285            i1_align: AbiAlign::new(align(8)),
286            i8_align: AbiAlign::new(align(8)),
287            i16_align: AbiAlign::new(align(16)),
288            i32_align: AbiAlign::new(align(32)),
289            i64_align: AbiAlign::new(align(32)),
290            i128_align: AbiAlign::new(align(32)),
291            f16_align: AbiAlign::new(align(16)),
292            f32_align: AbiAlign::new(align(32)),
293            f64_align: AbiAlign::new(align(64)),
294            f128_align: AbiAlign::new(align(128)),
295            aggregate_align: AbiAlign { abi: align(8) },
296            vector_align: vec![
297                (Size::from_bits(64), AbiAlign::new(align(64))),
298                (Size::from_bits(128), AbiAlign::new(align(128))),
299            ],
300            default_address_space: AddressSpace::ZERO,
301            default_address_space_pointer_spec: PointerSpec {
302                pointer_size: Size::from_bits(64),
303                pointer_align: AbiAlign::new(align(64)),
304                pointer_offset: Size::from_bits(64),
305                _is_fat: false,
306            },
307            address_space_info: vec![],
308            instruction_address_space: AddressSpace::ZERO,
309            c_enum_min_size: Integer::I32,
310        }
311    }
312}
313
314pub enum TargetDataLayoutErrors<'a> {
315    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
316    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
317    MissingAlignment { cause: &'a str },
318    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
319    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
320    InconsistentTargetPointerWidth { pointer_size: u64, target: u16 },
321    InvalidBitsSize { err: String },
322    UnknownPointerSpecification { err: String },
323}
324
325impl TargetDataLayout {
326    /// Parse data layout from an
327    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
328    ///
329    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
330    /// determined from llvm string.
331    pub fn parse_from_llvm_datalayout_string<'a>(
332        input: &'a str,
333        default_address_space: AddressSpace,
334    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
335        // Parse an address space index from a string.
336        let parse_address_space = |s: &'a str, cause: &'a str| {
337            s.parse::<u32>().map(AddressSpace).map_err(|err| {
338                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
339            })
340        };
341
342        // Parse a bit count from a string.
343        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
344            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
345                kind,
346                bit: s,
347                cause,
348                err,
349            })
350        };
351
352        // Parse a size string.
353        let parse_size =
354            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
355
356        // Parse an alignment string.
357        let parse_align_str = |s: &'a str, cause: &'a str| {
358            let align_from_bits = |bits| {
359                Align::from_bits(bits)
360                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
361            };
362            let abi = parse_bits(s, "alignment", cause)?;
363            Ok(AbiAlign::new(align_from_bits(abi)?))
364        };
365
366        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
367        // ignoring the secondary alignment specifications.
368        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
369            if s.is_empty() {
370                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
371            }
372            parse_align_str(s[0], cause)
373        };
374
375        let mut dl = TargetDataLayout::default();
376        dl.default_address_space = default_address_space;
377
378        let mut i128_align_src = 64;
379        for spec in input.split('-') {
380            let spec_parts = spec.split(':').collect::<Vec<_>>();
381
382            match &*spec_parts {
383                ["e"] => dl.endian = Endian::Little,
384                ["E"] => dl.endian = Endian::Big,
385                [p] if p.starts_with('P') => {
386                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
387                }
388                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
389                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
390                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
391                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
392                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
393                [p, s, a @ ..] if p.starts_with("p") => {
394                    let mut p = p.strip_prefix('p').unwrap();
395                    let mut _is_fat = false;
396
397                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
398                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
399
400                    if p.starts_with('f') {
401                        p = p.strip_prefix('f').unwrap();
402                        _is_fat = true;
403                    }
404
405                    // However, we currently don't take into account further specifications:
406                    // an error is emitted instead.
407                    if p.starts_with(char::is_alphabetic) {
408                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
409                            err: p.to_string(),
410                        });
411                    }
412
413                    let addr_space = if !p.is_empty() {
414                        parse_address_space(p, "p-")?
415                    } else {
416                        AddressSpace::ZERO
417                    };
418
419                    let pointer_size = parse_size(s, "p-")?;
420                    let pointer_align = parse_align_seq(a, "p-")?;
421                    let info = PointerSpec {
422                        pointer_offset: pointer_size,
423                        pointer_size,
424                        pointer_align,
425                        _is_fat,
426                    };
427                    if addr_space == default_address_space {
428                        dl.default_address_space_pointer_spec = info;
429                    } else {
430                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
431                            Some(e) => e.1 = info,
432                            None => {
433                                dl.address_space_info.push((addr_space, info));
434                            }
435                        }
436                    }
437                }
438                [p, s, a, _pr, i] if p.starts_with("p") => {
439                    let mut p = p.strip_prefix('p').unwrap();
440                    let mut _is_fat = false;
441
442                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
443                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
444
445                    if p.starts_with('f') {
446                        p = p.strip_prefix('f').unwrap();
447                        _is_fat = true;
448                    }
449
450                    // However, we currently don't take into account further specifications:
451                    // an error is emitted instead.
452                    if p.starts_with(char::is_alphabetic) {
453                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
454                            err: p.to_string(),
455                        });
456                    }
457
458                    let addr_space = if !p.is_empty() {
459                        parse_address_space(p, "p")?
460                    } else {
461                        AddressSpace::ZERO
462                    };
463
464                    let info = PointerSpec {
465                        pointer_size: parse_size(s, "p-")?,
466                        pointer_align: parse_align_str(a, "p-")?,
467                        pointer_offset: parse_size(i, "p-")?,
468                        _is_fat,
469                    };
470
471                    if addr_space == default_address_space {
472                        dl.default_address_space_pointer_spec = info;
473                    } else {
474                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
475                            Some(e) => e.1 = info,
476                            None => {
477                                dl.address_space_info.push((addr_space, info));
478                            }
479                        }
480                    }
481                }
482
483                [s, a @ ..] if s.starts_with('i') => {
484                    let Ok(bits) = s[1..].parse::<u64>() else {
485                        parse_size(&s[1..], "i")?; // For the user error.
486                        continue;
487                    };
488                    let a = parse_align_seq(a, s)?;
489                    match bits {
490                        1 => dl.i1_align = a,
491                        8 => dl.i8_align = a,
492                        16 => dl.i16_align = a,
493                        32 => dl.i32_align = a,
494                        64 => dl.i64_align = a,
495                        _ => {}
496                    }
497                    if bits >= i128_align_src && bits <= 128 {
498                        // Default alignment for i128 is decided by taking the alignment of
499                        // largest-sized i{64..=128}.
500                        i128_align_src = bits;
501                        dl.i128_align = a;
502                    }
503                }
504                [s, a @ ..] if s.starts_with('v') => {
505                    let v_size = parse_size(&s[1..], "v")?;
506                    let a = parse_align_seq(a, s)?;
507                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
508                        v.1 = a;
509                        continue;
510                    }
511                    // No existing entry, add a new one.
512                    dl.vector_align.push((v_size, a));
513                }
514                _ => {} // Ignore everything else.
515            }
516        }
517
518        // Inherit, if not given, address space information for specific LLVM elements from the
519        // default data address space.
520        if (dl.instruction_address_space != dl.default_address_space)
521            && dl
522                .address_space_info
523                .iter()
524                .find(|(a, _)| *a == dl.instruction_address_space)
525                .is_none()
526        {
527            dl.address_space_info.push((
528                dl.instruction_address_space,
529                dl.default_address_space_pointer_spec.clone(),
530            ));
531        }
532
533        Ok(dl)
534    }
535
536    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
537    /// space.
538    ///
539    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
540    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
541    /// index every address within an object along with one byte past the end, along with allowing
542    /// `isize` to store the difference between any two pointers into an object.
543    ///
544    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
545    /// so we adopt such a more-constrained size bound due to its technical limitations.
546    #[inline]
547    pub fn obj_size_bound(&self) -> u64 {
548        match self.pointer_size().bits() {
549            16 => 1 << 15,
550            32 => 1 << 31,
551            64 => 1 << 61,
552            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
553        }
554    }
555
556    /// Returns **exclusive** upper bound on object size in bytes.
557    ///
558    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
559    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
560    /// index every address within an object along with one byte past the end, along with allowing
561    /// `isize` to store the difference between any two pointers into an object.
562    ///
563    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
564    /// so we adopt such a more-constrained size bound due to its technical limitations.
565    #[inline]
566    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
567        match self.pointer_size_in(address_space).bits() {
568            16 => 1 << 15,
569            32 => 1 << 31,
570            64 => 1 << 61,
571            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
572        }
573    }
574
575    #[inline]
576    pub fn ptr_sized_integer(&self) -> Integer {
577        use Integer::*;
578        match self.pointer_offset().bits() {
579            16 => I16,
580            32 => I32,
581            64 => I64,
582            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
583        }
584    }
585
586    #[inline]
587    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
588        use Integer::*;
589        match self.pointer_offset_in(address_space).bits() {
590            16 => I16,
591            32 => I32,
592            64 => I64,
593            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
594        }
595    }
596
597    /// psABI-mandated alignment for a vector type, if any
598    #[inline]
599    fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAlign> {
600        self.vector_align
601            .iter()
602            .find(|(size, _align)| *size == vec_size)
603            .map(|(_size, align)| *align)
604    }
605
606    /// an alignment resembling the one LLVM would pick for a vector
607    #[inline]
608    pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAlign {
609        self.cabi_vector_align(vec_size).unwrap_or(AbiAlign::new(
610            Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(),
611        ))
612    }
613
614    /// Get the pointer size in the default data address space.
615    #[inline]
616    pub fn pointer_size(&self) -> Size {
617        self.default_address_space_pointer_spec.pointer_size
618    }
619
620    /// Get the pointer size in a specific address space.
621    #[inline]
622    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
623        if c == self.default_address_space {
624            return self.default_address_space_pointer_spec.pointer_size;
625        }
626
627        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
628            e.1.pointer_size
629        } else {
630            panic!("Use of unknown address space {c:?}");
631        }
632    }
633
634    /// Get the pointer index in the default data address space.
635    #[inline]
636    pub fn pointer_offset(&self) -> Size {
637        self.default_address_space_pointer_spec.pointer_offset
638    }
639
640    /// Get the pointer index in a specific address space.
641    #[inline]
642    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
643        if c == self.default_address_space {
644            return self.default_address_space_pointer_spec.pointer_offset;
645        }
646
647        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
648            e.1.pointer_offset
649        } else {
650            panic!("Use of unknown address space {c:?}");
651        }
652    }
653
654    /// Get the pointer alignment in the default data address space.
655    #[inline]
656    pub fn pointer_align(&self) -> AbiAlign {
657        self.default_address_space_pointer_spec.pointer_align
658    }
659
660    /// Get the pointer alignment in a specific address space.
661    #[inline]
662    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
663        if c == self.default_address_space {
664            return self.default_address_space_pointer_spec.pointer_align;
665        }
666
667        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
668            e.1.pointer_align
669        } else {
670            panic!("Use of unknown address space {c:?}");
671        }
672    }
673}
674
675pub trait HasDataLayout {
676    fn data_layout(&self) -> &TargetDataLayout;
677}
678
679impl HasDataLayout for TargetDataLayout {
680    #[inline]
681    fn data_layout(&self) -> &TargetDataLayout {
682        self
683    }
684}
685
686// used by rust-analyzer
687impl HasDataLayout for &TargetDataLayout {
688    #[inline]
689    fn data_layout(&self) -> &TargetDataLayout {
690        (**self).data_layout()
691    }
692}
693
694/// Endianness of the target, which must match cfg(target-endian).
695#[derive(Copy, Clone, PartialEq, Eq)]
696pub enum Endian {
697    Little,
698    Big,
699}
700
701impl Endian {
702    pub fn as_str(&self) -> &'static str {
703        match self {
704            Self::Little => "little",
705            Self::Big => "big",
706        }
707    }
708}
709
710impl fmt::Debug for Endian {
711    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
712        f.write_str(self.as_str())
713    }
714}
715
716impl FromStr for Endian {
717    type Err = String;
718
719    fn from_str(s: &str) -> Result<Self, Self::Err> {
720        match s {
721            "little" => Ok(Self::Little),
722            "big" => Ok(Self::Big),
723            _ => Err(format!(r#"unknown endian: "{s}""#)),
724        }
725    }
726}
727
728/// Size of a type in bytes.
729#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
730#[cfg_attr(
731    feature = "nightly",
732    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
733)]
734pub struct Size {
735    raw: u64,
736}
737
738#[cfg(feature = "nightly")]
739impl StableOrd for Size {
740    const CAN_USE_UNSTABLE_SORT: bool = true;
741
742    // `Ord` is implemented as just comparing numerical values and numerical values
743    // are not changed by (de-)serialization.
744    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
745}
746
747// This is debug-printed a lot in larger structs, don't waste too much space there
748impl fmt::Debug for Size {
749    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
750        write!(f, "Size({} bytes)", self.bytes())
751    }
752}
753
754impl Size {
755    pub const ZERO: Size = Size { raw: 0 };
756
757    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
758    /// not a multiple of 8.
759    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
760        let bits = bits.try_into().ok().unwrap();
761        Size { raw: bits.div_ceil(8) }
762    }
763
764    #[inline]
765    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
766        let bytes: u64 = bytes.try_into().ok().unwrap();
767        Size { raw: bytes }
768    }
769
770    #[inline]
771    pub fn bytes(self) -> u64 {
772        self.raw
773    }
774
775    #[inline]
776    pub fn bytes_usize(self) -> usize {
777        self.bytes().try_into().unwrap()
778    }
779
780    #[inline]
781    pub fn bits(self) -> u64 {
782        #[cold]
783        fn overflow(bytes: u64) -> ! {
784            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
785        }
786
787        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
788    }
789
790    #[inline]
791    pub fn bits_usize(self) -> usize {
792        self.bits().try_into().unwrap()
793    }
794
795    #[inline]
796    pub fn align_to(self, align: Align) -> Size {
797        let mask = align.bytes() - 1;
798        Size::from_bytes((self.bytes() + mask) & !mask)
799    }
800
801    #[inline]
802    pub fn is_aligned(self, align: Align) -> bool {
803        let mask = align.bytes() - 1;
804        self.bytes() & mask == 0
805    }
806
807    #[inline]
808    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
809        let dl = cx.data_layout();
810
811        let bytes = self.bytes().checked_add(offset.bytes())?;
812
813        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
814    }
815
816    #[inline]
817    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
818        let dl = cx.data_layout();
819
820        let bytes = self.bytes().checked_mul(count)?;
821        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
822    }
823
824    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
825    /// (i.e., if it is negative, fill with 1's on the left).
826    #[inline]
827    pub fn sign_extend(self, value: u128) -> i128 {
828        let size = self.bits();
829        if size == 0 {
830            // Truncated until nothing is left.
831            return 0;
832        }
833        // Sign-extend it.
834        let shift = 128 - size;
835        // Shift the unsigned value to the left, then shift back to the right as signed
836        // (essentially fills with sign bit on the left).
837        ((value << shift) as i128) >> shift
838    }
839
840    /// Truncates `value` to `self` bits.
841    #[inline]
842    pub fn truncate(self, value: u128) -> u128 {
843        let size = self.bits();
844        if size == 0 {
845            // Truncated until nothing is left.
846            return 0;
847        }
848        let shift = 128 - size;
849        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
850        (value << shift) >> shift
851    }
852
853    #[inline]
854    pub fn signed_int_min(&self) -> i128 {
855        self.sign_extend(1_u128 << (self.bits() - 1))
856    }
857
858    #[inline]
859    pub fn signed_int_max(&self) -> i128 {
860        i128::MAX >> (128 - self.bits())
861    }
862
863    #[inline]
864    pub fn unsigned_int_max(&self) -> u128 {
865        u128::MAX >> (128 - self.bits())
866    }
867}
868
869// Panicking addition, subtraction and multiplication for convenience.
870// Avoid during layout computation, return `LayoutError` instead.
871
872impl Add for Size {
873    type Output = Size;
874    #[inline]
875    fn add(self, other: Size) -> Size {
876        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
877            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
878        }))
879    }
880}
881
882impl Sub for Size {
883    type Output = Size;
884    #[inline]
885    fn sub(self, other: Size) -> Size {
886        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
887            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
888        }))
889    }
890}
891
892impl Mul<Size> for u64 {
893    type Output = Size;
894    #[inline]
895    fn mul(self, size: Size) -> Size {
896        size * self
897    }
898}
899
900impl Mul<u64> for Size {
901    type Output = Size;
902    #[inline]
903    fn mul(self, count: u64) -> Size {
904        match self.bytes().checked_mul(count) {
905            Some(bytes) => Size::from_bytes(bytes),
906            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
907        }
908    }
909}
910
911impl AddAssign for Size {
912    #[inline]
913    fn add_assign(&mut self, other: Size) {
914        *self = *self + other;
915    }
916}
917
918#[cfg(feature = "nightly")]
919impl Step for Size {
920    #[inline]
921    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
922        u64::steps_between(&start.bytes(), &end.bytes())
923    }
924
925    #[inline]
926    fn forward_checked(start: Self, count: usize) -> Option<Self> {
927        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
928    }
929
930    #[inline]
931    fn forward(start: Self, count: usize) -> Self {
932        Self::from_bytes(u64::forward(start.bytes(), count))
933    }
934
935    #[inline]
936    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
937        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
938    }
939
940    #[inline]
941    fn backward_checked(start: Self, count: usize) -> Option<Self> {
942        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
943    }
944
945    #[inline]
946    fn backward(start: Self, count: usize) -> Self {
947        Self::from_bytes(u64::backward(start.bytes(), count))
948    }
949
950    #[inline]
951    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
952        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
953    }
954}
955
956/// Alignment of a type in bytes (always a power of two).
957#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
958#[cfg_attr(
959    feature = "nightly",
960    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
961)]
962pub struct Align {
963    pow2: u8,
964}
965
966// This is debug-printed a lot in larger structs, don't waste too much space there
967impl fmt::Debug for Align {
968    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
969        write!(f, "Align({} bytes)", self.bytes())
970    }
971}
972
973#[derive(Clone, Copy)]
974pub enum AlignFromBytesError {
975    NotPowerOfTwo(u64),
976    TooLarge(u64),
977}
978
979impl AlignFromBytesError {
980    pub fn diag_ident(self) -> &'static str {
981        match self {
982            Self::NotPowerOfTwo(_) => "not_power_of_two",
983            Self::TooLarge(_) => "too_large",
984        }
985    }
986
987    pub fn align(self) -> u64 {
988        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
989        align
990    }
991}
992
993impl fmt::Debug for AlignFromBytesError {
994    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
995        fmt::Display::fmt(self, f)
996    }
997}
998
999impl fmt::Display for AlignFromBytesError {
1000    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1001        match self {
1002            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
1003            AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
1004        }
1005    }
1006}
1007
1008impl Align {
1009    pub const ONE: Align = Align { pow2: 0 };
1010    pub const EIGHT: Align = Align { pow2: 3 };
1011    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1012    pub const MAX: Align = Align { pow2: 29 };
1013
1014    #[inline]
1015    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1016        Align::from_bytes(Size::from_bits(bits).bytes())
1017    }
1018
1019    #[inline]
1020    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1021        // Treat an alignment of 0 bytes like 1-byte alignment.
1022        if align == 0 {
1023            return Ok(Align::ONE);
1024        }
1025
1026        #[cold]
1027        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1028            AlignFromBytesError::NotPowerOfTwo(align)
1029        }
1030
1031        #[cold]
1032        const fn too_large(align: u64) -> AlignFromBytesError {
1033            AlignFromBytesError::TooLarge(align)
1034        }
1035
1036        let tz = align.trailing_zeros();
1037        if align != (1 << tz) {
1038            return Err(not_power_of_2(align));
1039        }
1040
1041        let pow2 = tz as u8;
1042        if pow2 > Self::MAX.pow2 {
1043            return Err(too_large(align));
1044        }
1045
1046        Ok(Align { pow2 })
1047    }
1048
1049    #[inline]
1050    pub const fn bytes(self) -> u64 {
1051        1 << self.pow2
1052    }
1053
1054    #[inline]
1055    pub fn bytes_usize(self) -> usize {
1056        self.bytes().try_into().unwrap()
1057    }
1058
1059    #[inline]
1060    pub const fn bits(self) -> u64 {
1061        self.bytes() * 8
1062    }
1063
1064    #[inline]
1065    pub fn bits_usize(self) -> usize {
1066        self.bits().try_into().unwrap()
1067    }
1068
1069    /// Obtain the greatest factor of `size` that is an alignment
1070    /// (the largest power of two the Size is a multiple of).
1071    ///
1072    /// Note that all numbers are factors of 0
1073    #[inline]
1074    pub fn max_aligned_factor(size: Size) -> Align {
1075        Align { pow2: size.bytes().trailing_zeros() as u8 }
1076    }
1077
1078    /// Reduces Align to an aligned factor of `size`.
1079    #[inline]
1080    pub fn restrict_for_offset(self, size: Size) -> Align {
1081        self.min(Align::max_aligned_factor(size))
1082    }
1083}
1084
1085/// A pair of alignments, ABI-mandated and preferred.
1086///
1087/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1088/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1089/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1090/// and thus in practice the two values are almost always identical.
1091///
1092/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1093/// It is of effectively no consequence for layout in structs and on the stack.
1094#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1095#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1096pub struct AbiAlign {
1097    pub abi: Align,
1098}
1099
1100impl AbiAlign {
1101    #[inline]
1102    pub fn new(align: Align) -> AbiAlign {
1103        AbiAlign { abi: align }
1104    }
1105
1106    #[inline]
1107    pub fn min(self, other: AbiAlign) -> AbiAlign {
1108        AbiAlign { abi: self.abi.min(other.abi) }
1109    }
1110
1111    #[inline]
1112    pub fn max(self, other: AbiAlign) -> AbiAlign {
1113        AbiAlign { abi: self.abi.max(other.abi) }
1114    }
1115}
1116
1117impl Deref for AbiAlign {
1118    type Target = Align;
1119
1120    fn deref(&self) -> &Self::Target {
1121        &self.abi
1122    }
1123}
1124
1125/// Integers, also used for enum discriminants.
1126#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1127#[cfg_attr(
1128    feature = "nightly",
1129    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
1130)]
1131pub enum Integer {
1132    I8,
1133    I16,
1134    I32,
1135    I64,
1136    I128,
1137}
1138
1139impl Integer {
1140    pub fn int_ty_str(self) -> &'static str {
1141        use Integer::*;
1142        match self {
1143            I8 => "i8",
1144            I16 => "i16",
1145            I32 => "i32",
1146            I64 => "i64",
1147            I128 => "i128",
1148        }
1149    }
1150
1151    pub fn uint_ty_str(self) -> &'static str {
1152        use Integer::*;
1153        match self {
1154            I8 => "u8",
1155            I16 => "u16",
1156            I32 => "u32",
1157            I64 => "u64",
1158            I128 => "u128",
1159        }
1160    }
1161
1162    #[inline]
1163    pub fn size(self) -> Size {
1164        use Integer::*;
1165        match self {
1166            I8 => Size::from_bytes(1),
1167            I16 => Size::from_bytes(2),
1168            I32 => Size::from_bytes(4),
1169            I64 => Size::from_bytes(8),
1170            I128 => Size::from_bytes(16),
1171        }
1172    }
1173
1174    /// Gets the Integer type from an IntegerType.
1175    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1176        let dl = cx.data_layout();
1177
1178        match ity {
1179            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1180            IntegerType::Fixed(x, _) => x,
1181        }
1182    }
1183
1184    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1185        use Integer::*;
1186        let dl = cx.data_layout();
1187
1188        match self {
1189            I8 => dl.i8_align,
1190            I16 => dl.i16_align,
1191            I32 => dl.i32_align,
1192            I64 => dl.i64_align,
1193            I128 => dl.i128_align,
1194        }
1195    }
1196
1197    /// Returns the largest signed value that can be represented by this Integer.
1198    #[inline]
1199    pub fn signed_max(self) -> i128 {
1200        use Integer::*;
1201        match self {
1202            I8 => i8::MAX as i128,
1203            I16 => i16::MAX as i128,
1204            I32 => i32::MAX as i128,
1205            I64 => i64::MAX as i128,
1206            I128 => i128::MAX,
1207        }
1208    }
1209
1210    /// Returns the smallest signed value that can be represented by this Integer.
1211    #[inline]
1212    pub fn signed_min(self) -> i128 {
1213        use Integer::*;
1214        match self {
1215            I8 => i8::MIN as i128,
1216            I16 => i16::MIN as i128,
1217            I32 => i32::MIN as i128,
1218            I64 => i64::MIN as i128,
1219            I128 => i128::MIN,
1220        }
1221    }
1222
1223    /// Finds the smallest Integer type which can represent the signed value.
1224    #[inline]
1225    pub fn fit_signed(x: i128) -> Integer {
1226        use Integer::*;
1227        match x {
1228            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1229            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1230            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1231            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1232            _ => I128,
1233        }
1234    }
1235
1236    /// Finds the smallest Integer type which can represent the unsigned value.
1237    #[inline]
1238    pub fn fit_unsigned(x: u128) -> Integer {
1239        use Integer::*;
1240        match x {
1241            0..=0x0000_0000_0000_00ff => I8,
1242            0..=0x0000_0000_0000_ffff => I16,
1243            0..=0x0000_0000_ffff_ffff => I32,
1244            0..=0xffff_ffff_ffff_ffff => I64,
1245            _ => I128,
1246        }
1247    }
1248
1249    /// Finds the smallest integer with the given alignment.
1250    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1251        use Integer::*;
1252        let dl = cx.data_layout();
1253
1254        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1255            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1256        })
1257    }
1258
1259    /// Find the largest integer with the given alignment or less.
1260    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1261        use Integer::*;
1262        let dl = cx.data_layout();
1263
1264        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1265        for candidate in [I64, I32, I16] {
1266            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1267                return candidate;
1268            }
1269        }
1270        I8
1271    }
1272
1273    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1274    // `Integer` given some requirements.
1275    #[inline]
1276    pub fn from_size(size: Size) -> Result<Self, String> {
1277        match size.bits() {
1278            8 => Ok(Integer::I8),
1279            16 => Ok(Integer::I16),
1280            32 => Ok(Integer::I32),
1281            64 => Ok(Integer::I64),
1282            128 => Ok(Integer::I128),
1283            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
1284        }
1285    }
1286}
1287
1288/// Floating-point types.
1289#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1290#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1291pub enum Float {
1292    F16,
1293    F32,
1294    F64,
1295    F128,
1296}
1297
1298impl Float {
1299    pub fn size(self) -> Size {
1300        use Float::*;
1301
1302        match self {
1303            F16 => Size::from_bits(16),
1304            F32 => Size::from_bits(32),
1305            F64 => Size::from_bits(64),
1306            F128 => Size::from_bits(128),
1307        }
1308    }
1309
1310    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1311        use Float::*;
1312        let dl = cx.data_layout();
1313
1314        match self {
1315            F16 => dl.f16_align,
1316            F32 => dl.f32_align,
1317            F64 => dl.f64_align,
1318            F128 => dl.f128_align,
1319        }
1320    }
1321}
1322
1323/// Fundamental unit of memory access and layout.
1324#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1325#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1326pub enum Primitive {
1327    /// The `bool` is the signedness of the `Integer` type.
1328    ///
1329    /// One would think we would not care about such details this low down,
1330    /// but some ABIs are described in terms of C types and ISAs where the
1331    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1332    /// a negative integer passed by zero-extension will appear positive in
1333    /// the callee, and most operations on it will produce the wrong values.
1334    Int(Integer, bool),
1335    Float(Float),
1336    Pointer(AddressSpace),
1337}
1338
1339impl Primitive {
1340    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1341        use Primitive::*;
1342        let dl = cx.data_layout();
1343
1344        match self {
1345            Int(i, _) => i.size(),
1346            Float(f) => f.size(),
1347            Pointer(a) => dl.pointer_size_in(a),
1348        }
1349    }
1350
1351    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1352        use Primitive::*;
1353        let dl = cx.data_layout();
1354
1355        match self {
1356            Int(i, _) => i.align(dl),
1357            Float(f) => f.align(dl),
1358            Pointer(a) => dl.pointer_align_in(a),
1359        }
1360    }
1361}
1362
1363/// Inclusive wrap-around range of valid values, that is, if
1364/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1365///
1366/// That is, for an i8 primitive, a range of `254..=2` means following
1367/// sequence:
1368///
1369///    254 (-2), 255 (-1), 0, 1, 2
1370///
1371/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1372#[derive(Clone, Copy, PartialEq, Eq, Hash)]
1373#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1374pub struct WrappingRange {
1375    pub start: u128,
1376    pub end: u128,
1377}
1378
1379impl WrappingRange {
1380    pub fn full(size: Size) -> Self {
1381        Self { start: 0, end: size.unsigned_int_max() }
1382    }
1383
1384    /// Returns `true` if `v` is contained in the range.
1385    #[inline(always)]
1386    pub fn contains(&self, v: u128) -> bool {
1387        if self.start <= self.end {
1388            self.start <= v && v <= self.end
1389        } else {
1390            self.start <= v || v <= self.end
1391        }
1392    }
1393
1394    /// Returns `true` if all the values in `other` are contained in this range,
1395    /// when the values are considered as having width `size`.
1396    #[inline(always)]
1397    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1398        if self.is_full_for(size) {
1399            true
1400        } else {
1401            let trunc = |x| size.truncate(x);
1402
1403            let delta = self.start;
1404            let max = trunc(self.end.wrapping_sub(delta));
1405
1406            let other_start = trunc(other.start.wrapping_sub(delta));
1407            let other_end = trunc(other.end.wrapping_sub(delta));
1408
1409            // Having shifted both input ranges by `delta`, now we only need to check
1410            // whether `0..=max` contains `other_start..=other_end`, which can only
1411            // happen if the other doesn't wrap since `self` isn't everything.
1412            (other_start <= other_end) && (other_end <= max)
1413        }
1414    }
1415
1416    /// Returns `self` with replaced `start`
1417    #[inline(always)]
1418    fn with_start(mut self, start: u128) -> Self {
1419        self.start = start;
1420        self
1421    }
1422
1423    /// Returns `self` with replaced `end`
1424    #[inline(always)]
1425    fn with_end(mut self, end: u128) -> Self {
1426        self.end = end;
1427        self
1428    }
1429
1430    /// Returns `true` if `size` completely fills the range.
1431    ///
1432    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1433    /// Niche calculations can produce full ranges which are not the canonical one;
1434    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1435    #[inline]
1436    fn is_full_for(&self, size: Size) -> bool {
1437        let max_value = size.unsigned_int_max();
1438        debug_assert!(self.start <= max_value && self.end <= max_value);
1439        self.start == (self.end.wrapping_add(1) & max_value)
1440    }
1441
1442    /// Checks whether this range is considered non-wrapping when the values are
1443    /// interpreted as *unsigned* numbers of width `size`.
1444    ///
1445    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1446    /// and `Err(..)` if the range is full so it depends how you think about it.
1447    #[inline]
1448    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1449        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1450    }
1451
1452    /// Checks whether this range is considered non-wrapping when the values are
1453    /// interpreted as *signed* numbers of width `size`.
1454    ///
1455    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1456    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1457    ///
1458    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1459    /// and `Err(..)` if the range is full so it depends how you think about it.
1460    #[inline]
1461    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1462        if self.is_full_for(size) {
1463            Err(..)
1464        } else {
1465            let start: i128 = size.sign_extend(self.start);
1466            let end: i128 = size.sign_extend(self.end);
1467            Ok(start <= end)
1468        }
1469    }
1470}
1471
1472impl fmt::Debug for WrappingRange {
1473    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1474        if self.start > self.end {
1475            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1476        } else {
1477            write!(fmt, "{}..={}", self.start, self.end)?;
1478        }
1479        Ok(())
1480    }
1481}
1482
1483/// Information about one scalar component of a Rust type.
1484#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1485#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1486pub enum Scalar {
1487    Initialized {
1488        value: Primitive,
1489
1490        // FIXME(eddyb) always use the shortest range, e.g., by finding
1491        // the largest space between two consecutive valid values and
1492        // taking everything else as the (shortest) valid range.
1493        valid_range: WrappingRange,
1494    },
1495    Union {
1496        /// Even for unions, we need to use the correct registers for the kind of
1497        /// values inside the union, so we keep the `Primitive` type around. We
1498        /// also use it to compute the size of the scalar.
1499        /// However, unions never have niches and even allow undef,
1500        /// so there is no `valid_range`.
1501        value: Primitive,
1502    },
1503}
1504
1505impl Scalar {
1506    #[inline]
1507    pub fn is_bool(&self) -> bool {
1508        use Integer::*;
1509        matches!(
1510            self,
1511            Scalar::Initialized {
1512                value: Primitive::Int(I8, false),
1513                valid_range: WrappingRange { start: 0, end: 1 }
1514            }
1515        )
1516    }
1517
1518    /// Get the primitive representation of this type, ignoring the valid range and whether the
1519    /// value is allowed to be undefined (due to being a union).
1520    pub fn primitive(&self) -> Primitive {
1521        match *self {
1522            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1523        }
1524    }
1525
1526    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1527        self.primitive().align(cx)
1528    }
1529
1530    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1531        self.primitive().size(cx)
1532    }
1533
1534    #[inline]
1535    pub fn to_union(&self) -> Self {
1536        Self::Union { value: self.primitive() }
1537    }
1538
1539    #[inline]
1540    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1541        match *self {
1542            Scalar::Initialized { valid_range, .. } => valid_range,
1543            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1544        }
1545    }
1546
1547    #[inline]
1548    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1549    /// union.
1550    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1551        match self {
1552            Scalar::Initialized { valid_range, .. } => valid_range,
1553            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1554        }
1555    }
1556
1557    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1558    /// layout.
1559    #[inline]
1560    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1561        match *self {
1562            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1563            Scalar::Union { .. } => true,
1564        }
1565    }
1566
1567    /// Returns `true` if this type can be left uninit.
1568    #[inline]
1569    pub fn is_uninit_valid(&self) -> bool {
1570        match *self {
1571            Scalar::Initialized { .. } => false,
1572            Scalar::Union { .. } => true,
1573        }
1574    }
1575
1576    /// Returns `true` if this is a signed integer scalar
1577    #[inline]
1578    pub fn is_signed(&self) -> bool {
1579        match self.primitive() {
1580            Primitive::Int(_, signed) => signed,
1581            _ => false,
1582        }
1583    }
1584}
1585
1586// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1587/// Describes how the fields of a type are located in memory.
1588#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1589#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1590pub enum FieldsShape<FieldIdx: Idx> {
1591    /// Scalar primitives and `!`, which never have fields.
1592    Primitive,
1593
1594    /// All fields start at no offset. The `usize` is the field count.
1595    Union(NonZeroUsize),
1596
1597    /// Array/vector-like placement, with all fields of identical types.
1598    Array { stride: Size, count: u64 },
1599
1600    /// Struct-like placement, with precomputed offsets.
1601    ///
1602    /// Fields are guaranteed to not overlap, but note that gaps
1603    /// before, between and after all the fields are NOT always
1604    /// padding, and as such their contents may not be discarded.
1605    /// For example, enum variants leave a gap at the start,
1606    /// where the discriminant field in the enum layout goes.
1607    Arbitrary {
1608        /// Offsets for the first byte of each field,
1609        /// ordered to match the source definition order.
1610        /// This vector does not go in increasing order.
1611        // FIXME(eddyb) use small vector optimization for the common case.
1612        offsets: IndexVec<FieldIdx, Size>,
1613
1614        /// Maps source order field indices to memory order indices,
1615        /// depending on how the fields were reordered (if at all).
1616        /// This is a permutation, with both the source order and the
1617        /// memory order using the same (0..n) index ranges.
1618        ///
1619        /// Note that during computation of `memory_index`, sometimes
1620        /// it is easier to operate on the inverse mapping (that is,
1621        /// from memory order to source order), and that is usually
1622        /// named `inverse_memory_index`.
1623        ///
1624        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1625        // FIXME(camlorn) also consider small vector optimization here.
1626        memory_index: IndexVec<FieldIdx, u32>,
1627    },
1628}
1629
1630impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1631    #[inline]
1632    pub fn count(&self) -> usize {
1633        match *self {
1634            FieldsShape::Primitive => 0,
1635            FieldsShape::Union(count) => count.get(),
1636            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1637            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1638        }
1639    }
1640
1641    #[inline]
1642    pub fn offset(&self, i: usize) -> Size {
1643        match *self {
1644            FieldsShape::Primitive => {
1645                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1646            }
1647            FieldsShape::Union(count) => {
1648                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1649                Size::ZERO
1650            }
1651            FieldsShape::Array { stride, count } => {
1652                let i = u64::try_from(i).unwrap();
1653                assert!(i < count, "tried to access field {i} of array with {count} fields");
1654                stride * i
1655            }
1656            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1657        }
1658    }
1659
1660    #[inline]
1661    pub fn memory_index(&self, i: usize) -> usize {
1662        match *self {
1663            FieldsShape::Primitive => {
1664                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1665            }
1666            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1667            FieldsShape::Arbitrary { ref memory_index, .. } => {
1668                memory_index[FieldIdx::new(i)].try_into().unwrap()
1669            }
1670        }
1671    }
1672
1673    /// Gets source indices of the fields by increasing offsets.
1674    #[inline]
1675    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1676        let mut inverse_small = [0u8; 64];
1677        let mut inverse_big = IndexVec::new();
1678        let use_small = self.count() <= inverse_small.len();
1679
1680        // We have to write this logic twice in order to keep the array small.
1681        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1682            if use_small {
1683                for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1684                    inverse_small[mem_idx as usize] = field_idx.index() as u8;
1685                }
1686            } else {
1687                inverse_big = memory_index.invert_bijective_mapping();
1688            }
1689        }
1690
1691        // Primitives don't really have fields in the way that structs do,
1692        // but having this return an empty iterator for them is unhelpful
1693        // since that makes them look kinda like ZSTs, which they're not.
1694        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1695
1696        (0..pseudofield_count).map(move |i| match *self {
1697            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1698            FieldsShape::Arbitrary { .. } => {
1699                if use_small {
1700                    inverse_small[i] as usize
1701                } else {
1702                    inverse_big[i as u32].index()
1703                }
1704            }
1705        })
1706    }
1707}
1708
1709/// An identifier that specifies the address space that some operation
1710/// should operate on. Special address spaces have an effect on code generation,
1711/// depending on the target and the address spaces it implements.
1712#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1713#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1714pub struct AddressSpace(pub u32);
1715
1716impl AddressSpace {
1717    /// LLVM's `0` address space.
1718    pub const ZERO: Self = AddressSpace(0);
1719}
1720
1721/// The way we represent values to the backend
1722///
1723/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1724/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1725/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1726/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1727/// how the value will be lowered to the calling convention, in itself.
1728///
1729/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1730/// and larger values will usually prefer to be represented as memory.
1731#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1732#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1733pub enum BackendRepr {
1734    Scalar(Scalar),
1735    ScalarPair(Scalar, Scalar),
1736    SimdVector {
1737        element: Scalar,
1738        count: u64,
1739    },
1740    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1741    Memory {
1742        /// If true, the size is exact, otherwise it's only a lower bound.
1743        sized: bool,
1744    },
1745}
1746
1747impl BackendRepr {
1748    /// Returns `true` if the layout corresponds to an unsized type.
1749    #[inline]
1750    pub fn is_unsized(&self) -> bool {
1751        match *self {
1752            BackendRepr::Scalar(_)
1753            | BackendRepr::ScalarPair(..)
1754            | BackendRepr::SimdVector { .. } => false,
1755            BackendRepr::Memory { sized } => !sized,
1756        }
1757    }
1758
1759    #[inline]
1760    pub fn is_sized(&self) -> bool {
1761        !self.is_unsized()
1762    }
1763
1764    /// Returns `true` if this is a single signed integer scalar.
1765    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1766    #[inline]
1767    pub fn is_signed(&self) -> bool {
1768        match self {
1769            BackendRepr::Scalar(scal) => scal.is_signed(),
1770            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
1771        }
1772    }
1773
1774    /// Returns `true` if this is a scalar type
1775    #[inline]
1776    pub fn is_scalar(&self) -> bool {
1777        matches!(*self, BackendRepr::Scalar(_))
1778    }
1779
1780    /// Returns `true` if this is a bool
1781    #[inline]
1782    pub fn is_bool(&self) -> bool {
1783        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1784    }
1785
1786    /// The psABI alignment for a `Scalar` or `ScalarPair`
1787    ///
1788    /// `None` for other variants.
1789    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1790        match *self {
1791            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1792            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1793            // The align of a Vector can vary in surprising ways
1794            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1795        }
1796    }
1797
1798    /// The psABI size for a `Scalar` or `ScalarPair`
1799    ///
1800    /// `None` for other variants
1801    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1802        match *self {
1803            // No padding in scalars.
1804            BackendRepr::Scalar(s) => Some(s.size(cx)),
1805            // May have some padding between the pair.
1806            BackendRepr::ScalarPair(s1, s2) => {
1807                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1808                let size = (field2_offset + s2.size(cx)).align_to(
1809                    self.scalar_align(cx)
1810                        // We absolutely must have an answer here or everything is FUBAR.
1811                        .unwrap(),
1812                );
1813                Some(size)
1814            }
1815            // The size of a Vector can vary in surprising ways
1816            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1817        }
1818    }
1819
1820    /// Discard validity range information and allow undef.
1821    pub fn to_union(&self) -> Self {
1822        match *self {
1823            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1824            BackendRepr::ScalarPair(s1, s2) => {
1825                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1826            }
1827            BackendRepr::SimdVector { element, count } => {
1828                BackendRepr::SimdVector { element: element.to_union(), count }
1829            }
1830            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1831        }
1832    }
1833
1834    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1835        match (self, other) {
1836            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1837            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1838            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1839            (
1840                BackendRepr::SimdVector { element: element_l, count: count_l },
1841                BackendRepr::SimdVector { element: element_r, count: count_r },
1842            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1843            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1844                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1845            }
1846            // Everything else must be strictly identical.
1847            _ => self == other,
1848        }
1849    }
1850}
1851
1852// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1853#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1854#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1855pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1856    /// A type with no valid variants. Must be uninhabited.
1857    Empty,
1858
1859    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1860    Single {
1861        /// Always `0` for types that cannot have multiple variants.
1862        index: VariantIdx,
1863    },
1864
1865    /// Enum-likes with more than one variant: each variant comes with
1866    /// a *discriminant* (usually the same as the variant index but the user can
1867    /// assign explicit discriminant values). That discriminant is encoded
1868    /// as a *tag* on the machine. The layout of each variant is
1869    /// a struct, and they all have space reserved for the tag.
1870    /// For enums, the tag is the sole field of the layout.
1871    Multiple {
1872        tag: Scalar,
1873        tag_encoding: TagEncoding<VariantIdx>,
1874        tag_field: FieldIdx,
1875        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1876    },
1877}
1878
1879// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1880#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1881#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1882pub enum TagEncoding<VariantIdx: Idx> {
1883    /// The tag directly stores the discriminant, but possibly with a smaller layout
1884    /// (so converting the tag to the discriminant can require sign extension).
1885    Direct,
1886
1887    /// Niche (values invalid for a type) encoding the discriminant.
1888    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1889    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1890    ///
1891    /// The variant `untagged_variant` contains a niche at an arbitrary
1892    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1893    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1894    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1895    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1896    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1897    /// query implementation).
1898    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1899    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1900    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
1901    ///
1902    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
1903    /// `None` is the null pointer in the second tuple field, and
1904    /// `Some` is the identity function (with a non-null reference)
1905    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
1906    ///
1907    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1908    /// range cannot be represented; they must be uninhabited.
1909    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
1910    Niche {
1911        untagged_variant: VariantIdx,
1912        /// This range *may* contain `untagged_variant` or uninhabited variants;
1913        /// these are then just "dead values" and not used to encode anything.
1914        niche_variants: RangeInclusive<VariantIdx>,
1915        /// This is inbounds of the type of the niche field
1916        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1917        niche_start: u128,
1918    },
1919}
1920
1921#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1922#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1923pub struct Niche {
1924    pub offset: Size,
1925    pub value: Primitive,
1926    pub valid_range: WrappingRange,
1927}
1928
1929impl Niche {
1930    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1931        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1932        let niche = Niche { offset, value, valid_range };
1933        if niche.available(cx) > 0 { Some(niche) } else { None }
1934    }
1935
1936    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1937        let Self { value, valid_range: v, .. } = *self;
1938        let size = value.size(cx);
1939        assert!(size.bits() <= 128);
1940        let max_value = size.unsigned_int_max();
1941
1942        // Find out how many values are outside the valid range.
1943        let niche = v.end.wrapping_add(1)..v.start;
1944        niche.end.wrapping_sub(niche.start) & max_value
1945    }
1946
1947    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1948        assert!(count > 0);
1949
1950        let Self { value, valid_range: v, .. } = *self;
1951        let size = value.size(cx);
1952        assert!(size.bits() <= 128);
1953        let max_value = size.unsigned_int_max();
1954
1955        let niche = v.end.wrapping_add(1)..v.start;
1956        let available = niche.end.wrapping_sub(niche.start) & max_value;
1957        if count > available {
1958            return None;
1959        }
1960
1961        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1962        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1963        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1964        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1965        // enable some special optimizations.
1966        //
1967        // Bound selection criteria:
1968        // 1. Select closest to zero given wrapping semantics.
1969        // 2. Avoid moving past zero if possible.
1970        //
1971        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1972        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1973        // bounds are of little interest.
1974        let move_start = |v: WrappingRange| {
1975            let start = v.start.wrapping_sub(count) & max_value;
1976            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1977        };
1978        let move_end = |v: WrappingRange| {
1979            let start = v.end.wrapping_add(1) & max_value;
1980            let end = v.end.wrapping_add(count) & max_value;
1981            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1982        };
1983        let distance_end_zero = max_value - v.end;
1984        if v.start > v.end {
1985            // zero is unavailable because wrapping occurs
1986            move_end(v)
1987        } else if v.start <= distance_end_zero {
1988            if count <= v.start {
1989                move_start(v)
1990            } else {
1991                // moved past zero, use other bound
1992                move_end(v)
1993            }
1994        } else {
1995            let end = v.end.wrapping_add(count) & max_value;
1996            let overshot_zero = (1..=v.end).contains(&end);
1997            if overshot_zero {
1998                // moved past zero, use other bound
1999                move_start(v)
2000            } else {
2001                move_end(v)
2002            }
2003        }
2004    }
2005}
2006
2007// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2008#[derive(PartialEq, Eq, Hash, Clone)]
2009#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
2010pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2011    /// Says where the fields are located within the layout.
2012    pub fields: FieldsShape<FieldIdx>,
2013
2014    /// Encodes information about multi-variant layouts.
2015    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2016    /// shared between all variants. One of them will be the discriminant,
2017    /// but e.g. coroutines can have more.
2018    ///
2019    /// To access all fields of this layout, both `fields` and the fields of the active variant
2020    /// must be taken into account.
2021    pub variants: Variants<FieldIdx, VariantIdx>,
2022
2023    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2024    /// and encodes value restrictions via `valid_range`.
2025    ///
2026    /// Note that this is entirely orthogonal to the recursive structure defined by
2027    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2028    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2029    /// have to be taken into account to find all fields of this layout.
2030    pub backend_repr: BackendRepr,
2031
2032    /// The leaf scalar with the largest number of invalid values
2033    /// (i.e. outside of its `valid_range`), if it exists.
2034    pub largest_niche: Option<Niche>,
2035    /// Is this type known to be uninhabted?
2036    ///
2037    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2038    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2039    pub uninhabited: bool,
2040
2041    pub align: AbiAlign,
2042    pub size: Size,
2043
2044    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2045    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2046    /// requested, even if the requested alignment is equal to the natural alignment.
2047    pub max_repr_align: Option<Align>,
2048
2049    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2050    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2051    /// in some cases.
2052    pub unadjusted_abi_align: Align,
2053
2054    /// The randomization seed based on this type's own repr and its fields.
2055    ///
2056    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2057    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2058    /// types.
2059    ///
2060    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2061    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2062    /// to reorder its fields based on that information. The current implementation is a conservative
2063    /// approximation of this goal.
2064    pub randomization_seed: Hash64,
2065}
2066
2067impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2068    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2069    pub fn is_aggregate(&self) -> bool {
2070        match self.backend_repr {
2071            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false,
2072            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2073        }
2074    }
2075
2076    /// Returns `true` if this is an uninhabited type
2077    pub fn is_uninhabited(&self) -> bool {
2078        self.uninhabited
2079    }
2080}
2081
2082impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2083where
2084    FieldsShape<FieldIdx>: fmt::Debug,
2085    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2086{
2087    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2088        // This is how `Layout` used to print before it become
2089        // `Interned<LayoutData>`. We print it like this to avoid having to update
2090        // expected output in a lot of tests.
2091        let LayoutData {
2092            size,
2093            align,
2094            backend_repr,
2095            fields,
2096            largest_niche,
2097            uninhabited,
2098            variants,
2099            max_repr_align,
2100            unadjusted_abi_align,
2101            randomization_seed,
2102        } = self;
2103        f.debug_struct("Layout")
2104            .field("size", size)
2105            .field("align", align)
2106            .field("backend_repr", backend_repr)
2107            .field("fields", fields)
2108            .field("largest_niche", largest_niche)
2109            .field("uninhabited", uninhabited)
2110            .field("variants", variants)
2111            .field("max_repr_align", max_repr_align)
2112            .field("unadjusted_abi_align", unadjusted_abi_align)
2113            .field("randomization_seed", randomization_seed)
2114            .finish()
2115    }
2116}
2117
2118#[derive(Copy, Clone, PartialEq, Eq, Debug)]
2119pub enum PointerKind {
2120    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2121    SharedRef { frozen: bool },
2122    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2123    MutableRef { unpin: bool },
2124    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2125    /// uses the global allocator or a custom one.
2126    Box { unpin: bool, global: bool },
2127}
2128
2129/// Encodes extra information we have about a pointer.
2130/// Note that this information is advisory only, and backends are free to ignore it:
2131/// if the information is wrong, that can cause UB, but if the information is absent,
2132/// that must always be okay.
2133#[derive(Copy, Clone, Debug)]
2134pub struct PointeeInfo {
2135    /// If this is `None`, then this is a raw pointer, so size and alignment are not guaranteed to
2136    /// be reliable.
2137    pub safe: Option<PointerKind>,
2138    /// If `safe` is `Some`, then the pointer is either null or dereferenceable for this many bytes.
2139    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2140    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2141    /// while this function is still running.
2142    /// The size can be zero if the pointer is not dereferenceable.
2143    pub size: Size,
2144    /// If `safe` is `Some`, then the pointer is aligned as indicated.
2145    pub align: Align,
2146}
2147
2148impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2149    /// Returns `true` if the layout corresponds to an unsized type.
2150    #[inline]
2151    pub fn is_unsized(&self) -> bool {
2152        self.backend_repr.is_unsized()
2153    }
2154
2155    #[inline]
2156    pub fn is_sized(&self) -> bool {
2157        self.backend_repr.is_sized()
2158    }
2159
2160    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2161    pub fn is_1zst(&self) -> bool {
2162        self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
2163    }
2164
2165    /// Returns `true` if the type is a ZST and not unsized.
2166    ///
2167    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2168    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2169    pub fn is_zst(&self) -> bool {
2170        match self.backend_repr {
2171            BackendRepr::Scalar(_)
2172            | BackendRepr::ScalarPair(..)
2173            | BackendRepr::SimdVector { .. } => false,
2174            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2175        }
2176    }
2177
2178    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2179    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2180    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2181    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2182    /// checks would otherwise be required.
2183    pub fn eq_abi(&self, other: &Self) -> bool {
2184        // The one thing that we are not capturing here is that for unsized types, the metadata must
2185        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2186        // 2nd point is quite hard to check though.
2187        self.size == other.size
2188            && self.is_sized() == other.is_sized()
2189            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2190            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2191            && self.align.abi == other.align.abi
2192            && self.max_repr_align == other.max_repr_align
2193            && self.unadjusted_abi_align == other.unadjusted_abi_align
2194    }
2195}
2196
2197#[derive(Copy, Clone, Debug)]
2198pub enum StructKind {
2199    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2200    AlwaysSized,
2201    /// A univariant, the last field of which may be coerced to unsized.
2202    MaybeUnsized,
2203    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2204    Prefixed(Size, Align),
2205}
2206
2207#[derive(Clone, Debug)]
2208pub enum AbiFromStrErr {
2209    /// not a known ABI
2210    Unknown,
2211    /// no "-unwind" variant can be used here
2212    NoExplicitUnwind,
2213}