rustc_abi/
layout.rs

1use std::collections::BTreeSet;
2use std::fmt::{self, Write};
3use std::ops::{Bound, Deref};
4use std::{cmp, iter};
5
6use rustc_hashes::Hash64;
7use rustc_index::Idx;
8use rustc_index::bit_set::BitMatrix;
9use tracing::{debug, trace};
10
11use crate::{
12    AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
13    LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
14    Variants, WrappingRange,
15};
16
17mod coroutine;
18mod simple;
19
20#[cfg(feature = "nightly")]
21mod ty;
22
23#[cfg(feature = "nightly")]
24pub use ty::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
25
26// A variant is absent if it's uninhabited and only has ZST fields.
27// Present uninhabited variants only require space for their fields,
28// but *not* an encoding of the discriminant (e.g., a tag value).
29// See issue #49298 for more details on the need to leave space
30// for non-ZST uninhabited data (mostly partial initialization).
31fn absent<'a, FieldIdx, VariantIdx, F>(fields: &IndexSlice<FieldIdx, F>) -> bool
32where
33    FieldIdx: Idx,
34    VariantIdx: Idx,
35    F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
36{
37    let uninhabited = fields.iter().any(|f| f.is_uninhabited());
38    // We cannot ignore alignment; that might lead us to entirely discard a variant and
39    // produce an enum that is less aligned than it should be!
40    let is_1zst = fields.iter().all(|f| f.is_1zst());
41    uninhabited && is_1zst
42}
43
44/// Determines towards which end of a struct layout optimizations will try to place the best niches.
45enum NicheBias {
46    Start,
47    End,
48}
49
50#[derive(Copy, Clone, Debug, PartialEq, Eq)]
51pub enum LayoutCalculatorError<F> {
52    /// An unsized type was found in a location where a sized type was expected.
53    ///
54    /// This is not always a compile error, for example if there is a `[T]: Sized`
55    /// bound in a where clause.
56    ///
57    /// Contains the field that was unexpectedly unsized.
58    UnexpectedUnsized(F),
59
60    /// A type was too large for the target platform.
61    SizeOverflow,
62
63    /// A union had no fields.
64    EmptyUnion,
65
66    /// The fields or variants have irreconcilable reprs
67    ReprConflict,
68
69    /// The length of an SIMD type is zero
70    ZeroLengthSimdType,
71
72    /// The length of an SIMD type exceeds the maximum number of lanes
73    OversizedSimdType { max_lanes: u64 },
74
75    /// An element type of an SIMD type isn't a primitive
76    NonPrimitiveSimdType(F),
77}
78
79impl<F> LayoutCalculatorError<F> {
80    pub fn without_payload(&self) -> LayoutCalculatorError<()> {
81        use LayoutCalculatorError::*;
82        match *self {
83            UnexpectedUnsized(_) => UnexpectedUnsized(()),
84            SizeOverflow => SizeOverflow,
85            EmptyUnion => EmptyUnion,
86            ReprConflict => ReprConflict,
87            ZeroLengthSimdType => ZeroLengthSimdType,
88            OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
89            NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
90        }
91    }
92
93    /// Format an untranslated diagnostic for this type
94    ///
95    /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
96    pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
97        use LayoutCalculatorError::*;
98        f.write_str(match self {
99            UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
100            SizeOverflow => "size overflow",
101            EmptyUnion => "type is a union with no fields",
102            ReprConflict => "type has an invalid repr",
103            ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
104                "invalid simd type definition"
105            }
106        })
107    }
108}
109
110type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =
111    Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;
112
113#[derive(Clone, Copy, Debug)]
114pub struct LayoutCalculator<Cx> {
115    pub cx: Cx,
116}
117
118impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
119    pub fn new(cx: Cx) -> Self {
120        Self { cx }
121    }
122
123    pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
124        &self,
125        element: &LayoutData<FieldIdx, VariantIdx>,
126        count_if_sized: Option<u64>, // None for slices
127    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
128        let count = count_if_sized.unwrap_or(0);
129        let size =
130            element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
131
132        Ok(LayoutData {
133            variants: Variants::Single { index: VariantIdx::new(0) },
134            fields: FieldsShape::Array { stride: element.size, count },
135            backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
136            largest_niche: element.largest_niche.filter(|_| count != 0),
137            uninhabited: element.uninhabited && count != 0,
138            align: element.align,
139            size,
140            max_repr_align: None,
141            unadjusted_abi_align: element.align.abi,
142            randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
143        })
144    }
145
146    pub fn simd_type<
147        FieldIdx: Idx,
148        VariantIdx: Idx,
149        F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
150    >(
151        &self,
152        element: F,
153        count: u64,
154        repr_packed: bool,
155    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
156        let elt = element.as_ref();
157        if count == 0 {
158            return Err(LayoutCalculatorError::ZeroLengthSimdType);
159        } else if count > crate::MAX_SIMD_LANES {
160            return Err(LayoutCalculatorError::OversizedSimdType {
161                max_lanes: crate::MAX_SIMD_LANES,
162            });
163        }
164
165        let BackendRepr::Scalar(e_repr) = elt.backend_repr else {
166            return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
167        };
168
169        // Compute the size and alignment of the vector
170        let dl = self.cx.data_layout();
171        let size =
172            elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
173        let (repr, align) = if repr_packed && !count.is_power_of_two() {
174            // Non-power-of-two vectors have padding up to the next power-of-two.
175            // If we're a packed repr, remove the padding while keeping the alignment as close
176            // to a vector as possible.
177            (BackendRepr::Memory { sized: true }, AbiAlign { abi: Align::max_aligned_factor(size) })
178        } else {
179            (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
180        };
181        let size = size.align_to(align.abi);
182
183        Ok(LayoutData {
184            variants: Variants::Single { index: VariantIdx::new(0) },
185            fields: FieldsShape::Arbitrary {
186                offsets: [Size::ZERO].into(),
187                memory_index: [0].into(),
188            },
189            backend_repr: repr,
190            largest_niche: elt.largest_niche,
191            uninhabited: false,
192            size,
193            align,
194            max_repr_align: None,
195            unadjusted_abi_align: elt.align.abi,
196            randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
197        })
198    }
199
200    /// Compute the layout for a coroutine.
201    ///
202    /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
203    /// fields may be shared between multiple variants (see the [`coroutine`] module for details).
204    pub fn coroutine<
205        'a,
206        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
207        VariantIdx: Idx,
208        FieldIdx: Idx,
209        LocalIdx: Idx,
210    >(
211        &self,
212        local_layouts: &IndexSlice<LocalIdx, F>,
213        prefix_layouts: IndexVec<FieldIdx, F>,
214        variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
215        storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
216        tag_to_layout: impl Fn(Scalar) -> F,
217    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
218        coroutine::layout(
219            self,
220            local_layouts,
221            prefix_layouts,
222            variant_fields,
223            storage_conflicts,
224            tag_to_layout,
225        )
226    }
227
228    pub fn univariant<
229        'a,
230        FieldIdx: Idx,
231        VariantIdx: Idx,
232        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
233    >(
234        &self,
235        fields: &IndexSlice<FieldIdx, F>,
236        repr: &ReprOptions,
237        kind: StructKind,
238    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
239        let dl = self.cx.data_layout();
240        let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);
241        // Enums prefer niches close to the beginning or the end of the variants so that other
242        // (smaller) data-carrying variants can be packed into the space after/before the niche.
243        // If the default field ordering does not give us a niche at the front then we do a second
244        // run and bias niches to the right and then check which one is closer to one of the
245        // struct's edges.
246        if let Ok(layout) = &layout {
247            // Don't try to calculate an end-biased layout for unsizable structs,
248            // otherwise we could end up with different layouts for
249            // Foo<Type> and Foo<dyn Trait> which would break unsizing.
250            if !matches!(kind, StructKind::MaybeUnsized) {
251                if let Some(niche) = layout.largest_niche {
252                    let head_space = niche.offset.bytes();
253                    let niche_len = niche.value.size(dl).bytes();
254                    let tail_space = layout.size.bytes() - head_space - niche_len;
255
256                    // This may end up doing redundant work if the niche is already in the last
257                    // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
258                    // to get the unpadded size so we try anyway.
259                    if fields.len() > 1 && head_space != 0 && tail_space > 0 {
260                        let alt_layout = self
261                            .univariant_biased(fields, repr, kind, NicheBias::End)
262                            .expect("alt layout should always work");
263                        let alt_niche = alt_layout
264                            .largest_niche
265                            .expect("alt layout should have a niche like the regular one");
266                        let alt_head_space = alt_niche.offset.bytes();
267                        let alt_niche_len = alt_niche.value.size(dl).bytes();
268                        let alt_tail_space =
269                            alt_layout.size.bytes() - alt_head_space - alt_niche_len;
270
271                        debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
272
273                        let prefer_alt_layout =
274                            alt_head_space > head_space && alt_head_space > tail_space;
275
276                        debug!(
277                            "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
278                            layout: {}\n\
279                            alt_layout: {}\n",
280                            layout.size.bytes(),
281                            head_space,
282                            niche_len,
283                            tail_space,
284                            alt_head_space,
285                            alt_niche_len,
286                            alt_tail_space,
287                            layout.fields.count(),
288                            prefer_alt_layout,
289                            self.format_field_niches(layout, fields),
290                            self.format_field_niches(&alt_layout, fields),
291                        );
292
293                        if prefer_alt_layout {
294                            return Ok(alt_layout);
295                        }
296                    }
297                }
298            }
299        }
300        layout
301    }
302
303    pub fn layout_of_struct_or_enum<
304        'a,
305        FieldIdx: Idx,
306        VariantIdx: Idx,
307        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
308    >(
309        &self,
310        repr: &ReprOptions,
311        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
312        is_enum: bool,
313        is_special_no_niche: bool,
314        scalar_valid_range: (Bound<u128>, Bound<u128>),
315        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
316        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
317        always_sized: bool,
318    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
319        let (present_first, present_second) = {
320            let mut present_variants = variants
321                .iter_enumerated()
322                .filter_map(|(i, v)| if !repr.c() && absent(v) { None } else { Some(i) });
323            (present_variants.next(), present_variants.next())
324        };
325        let present_first = match present_first {
326            Some(present_first) => present_first,
327            // Uninhabited because it has no variants, or only absent ones.
328            None if is_enum => {
329                return Ok(LayoutData::never_type(&self.cx));
330            }
331            // If it's a struct, still compute a layout so that we can still compute the
332            // field offsets.
333            None => VariantIdx::new(0),
334        };
335
336        // take the struct path if it is an actual struct
337        if !is_enum ||
338            // or for optimizing univariant enums
339            (present_second.is_none() && !repr.inhibit_enum_layout_opt())
340        {
341            self.layout_of_struct(
342                repr,
343                variants,
344                is_enum,
345                is_special_no_niche,
346                scalar_valid_range,
347                always_sized,
348                present_first,
349            )
350        } else {
351            // At this point, we have handled all unions and
352            // structs. (We have also handled univariant enums
353            // that allow representation optimization.)
354            assert!(is_enum);
355            self.layout_of_enum(repr, variants, discr_range_of_repr, discriminants)
356        }
357    }
358
359    pub fn layout_of_union<
360        'a,
361        FieldIdx: Idx,
362        VariantIdx: Idx,
363        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
364    >(
365        &self,
366        repr: &ReprOptions,
367        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
368    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
369        let dl = self.cx.data_layout();
370        let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
371        let mut max_repr_align = repr.align;
372
373        // If all the non-ZST fields have the same repr and union repr optimizations aren't
374        // disabled, we can use that common repr for the union as a whole.
375        struct AbiMismatch;
376        let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() {
377            // Can't optimize
378            Err(AbiMismatch)
379        } else {
380            Ok(None)
381        };
382
383        let mut size = Size::ZERO;
384        let only_variant_idx = VariantIdx::new(0);
385        let only_variant = &variants[only_variant_idx];
386        for field in only_variant {
387            if field.is_unsized() {
388                return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
389            }
390
391            align = align.max(field.align);
392            max_repr_align = max_repr_align.max(field.max_repr_align);
393            size = cmp::max(size, field.size);
394
395            if field.is_zst() {
396                // Nothing more to do for ZST fields
397                continue;
398            }
399
400            if let Ok(common) = common_non_zst_repr_and_align {
401                // Discard valid range information and allow undef
402                let field_abi = field.backend_repr.to_union();
403
404                if let Some((common_abi, common_align)) = common {
405                    if common_abi != field_abi {
406                        // Different fields have different ABI: disable opt
407                        common_non_zst_repr_and_align = Err(AbiMismatch);
408                    } else {
409                        // Fields with the same non-Aggregate ABI should also
410                        // have the same alignment
411                        if !matches!(common_abi, BackendRepr::Memory { .. }) {
412                            assert_eq!(
413                                common_align, field.align.abi,
414                                "non-Aggregate field with matching ABI but differing alignment"
415                            );
416                        }
417                    }
418                } else {
419                    // First non-ZST field: record its ABI and alignment
420                    common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi)));
421                }
422            }
423        }
424
425        if let Some(pack) = repr.pack {
426            align = align.min(AbiAlign::new(pack));
427        }
428        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
429        // See documentation on `LayoutData::unadjusted_abi_align`.
430        let unadjusted_abi_align = align.abi;
431        if let Some(repr_align) = repr.align {
432            align = align.max(AbiAlign::new(repr_align));
433        }
434        // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
435        let align = align;
436
437        // If all non-ZST fields have the same ABI, we may forward that ABI
438        // for the union as a whole, unless otherwise inhibited.
439        let backend_repr = match common_non_zst_repr_and_align {
440            Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
441            Ok(Some((repr, _))) => match repr {
442                // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
443                BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)
444                    if repr.scalar_align(dl).unwrap() != align.abi =>
445                {
446                    BackendRepr::Memory { sized: true }
447                }
448                // Vectors require at least element alignment, else disable the opt
449                BackendRepr::SimdVector { element, count: _ }
450                    if element.align(dl).abi > align.abi =>
451                {
452                    BackendRepr::Memory { sized: true }
453                }
454                // the alignment tests passed and we can use this
455                BackendRepr::Scalar(..)
456                | BackendRepr::ScalarPair(..)
457                | BackendRepr::SimdVector { .. }
458                | BackendRepr::Memory { .. } => repr,
459            },
460        };
461
462        let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
463            return Err(LayoutCalculatorError::EmptyUnion);
464        };
465
466        let combined_seed = only_variant
467            .iter()
468            .map(|v| v.randomization_seed)
469            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
470
471        Ok(LayoutData {
472            variants: Variants::Single { index: only_variant_idx },
473            fields: FieldsShape::Union(union_field_count),
474            backend_repr,
475            largest_niche: None,
476            uninhabited: false,
477            align,
478            size: size.align_to(align.abi),
479            max_repr_align,
480            unadjusted_abi_align,
481            randomization_seed: combined_seed,
482        })
483    }
484
485    /// single-variant enums are just structs, if you think about it
486    fn layout_of_struct<
487        'a,
488        FieldIdx: Idx,
489        VariantIdx: Idx,
490        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
491    >(
492        &self,
493        repr: &ReprOptions,
494        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
495        is_enum: bool,
496        is_special_no_niche: bool,
497        scalar_valid_range: (Bound<u128>, Bound<u128>),
498        always_sized: bool,
499        present_first: VariantIdx,
500    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
501        // Struct, or univariant enum equivalent to a struct.
502        // (Typechecking will reject discriminant-sizing attrs.)
503
504        let dl = self.cx.data_layout();
505        let v = present_first;
506        let kind = if is_enum || variants[v].is_empty() || always_sized {
507            StructKind::AlwaysSized
508        } else {
509            StructKind::MaybeUnsized
510        };
511
512        let mut st = self.univariant(&variants[v], repr, kind)?;
513        st.variants = Variants::Single { index: v };
514
515        if is_special_no_niche {
516            let hide_niches = |scalar: &mut _| match scalar {
517                Scalar::Initialized { value, valid_range } => {
518                    *valid_range = WrappingRange::full(value.size(dl))
519                }
520                // Already doesn't have any niches
521                Scalar::Union { .. } => {}
522            };
523            match &mut st.backend_repr {
524                BackendRepr::Scalar(scalar) => hide_niches(scalar),
525                BackendRepr::ScalarPair(a, b) => {
526                    hide_niches(a);
527                    hide_niches(b);
528                }
529                BackendRepr::SimdVector { element, count: _ } => hide_niches(element),
530                BackendRepr::Memory { sized: _ } => {}
531            }
532            st.largest_niche = None;
533            return Ok(st);
534        }
535
536        let (start, end) = scalar_valid_range;
537        match st.backend_repr {
538            BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
539                // Enlarging validity ranges would result in missed
540                // optimizations, *not* wrongly assuming the inner
541                // value is valid. e.g. unions already enlarge validity ranges,
542                // because the values may be uninitialized.
543                //
544                // Because of that we only check that the start and end
545                // of the range is representable with this scalar type.
546
547                let max_value = scalar.size(dl).unsigned_int_max();
548                if let Bound::Included(start) = start {
549                    // FIXME(eddyb) this might be incorrect - it doesn't
550                    // account for wrap-around (end < start) ranges.
551                    assert!(start <= max_value, "{start} > {max_value}");
552                    scalar.valid_range_mut().start = start;
553                }
554                if let Bound::Included(end) = end {
555                    // FIXME(eddyb) this might be incorrect - it doesn't
556                    // account for wrap-around (end < start) ranges.
557                    assert!(end <= max_value, "{end} > {max_value}");
558                    scalar.valid_range_mut().end = end;
559                }
560
561                // Update `largest_niche` if we have introduced a larger niche.
562                let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
563                if let Some(niche) = niche {
564                    match st.largest_niche {
565                        Some(largest_niche) => {
566                            // Replace the existing niche even if they're equal,
567                            // because this one is at a lower offset.
568                            if largest_niche.available(dl) <= niche.available(dl) {
569                                st.largest_niche = Some(niche);
570                            }
571                        }
572                        None => st.largest_niche = Some(niche),
573                    }
574                }
575            }
576            _ => assert!(
577                start == Bound::Unbounded && end == Bound::Unbounded,
578                "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
579            ),
580        }
581
582        Ok(st)
583    }
584
585    fn layout_of_enum<
586        'a,
587        FieldIdx: Idx,
588        VariantIdx: Idx,
589        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
590    >(
591        &self,
592        repr: &ReprOptions,
593        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
594        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
595        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
596    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
597        // Until we've decided whether to use the tagged or
598        // niche filling LayoutData, we don't want to intern the
599        // variant layouts, so we can't store them in the
600        // overall LayoutData. Store the overall LayoutData
601        // and the variant LayoutDatas here until then.
602        struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
603            layout: LayoutData<FieldIdx, VariantIdx>,
604            variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
605        }
606
607        let dl = self.cx.data_layout();
608        // bail if the enum has an incoherent repr that cannot be computed
609        if repr.packed() {
610            return Err(LayoutCalculatorError::ReprConflict);
611        }
612
613        let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
614            if repr.inhibit_enum_layout_opt() {
615                return None;
616            }
617
618            if variants.len() < 2 {
619                return None;
620            }
621
622            let mut align = dl.aggregate_align;
623            let mut max_repr_align = repr.align;
624            let mut unadjusted_abi_align = align.abi;
625
626            let mut variant_layouts = variants
627                .iter_enumerated()
628                .map(|(j, v)| {
629                    let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
630                    st.variants = Variants::Single { index: j };
631
632                    align = align.max(st.align);
633                    max_repr_align = max_repr_align.max(st.max_repr_align);
634                    unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
635
636                    Some(st)
637                })
638                .collect::<Option<IndexVec<VariantIdx, _>>>()?;
639
640            let largest_variant_index = variant_layouts
641                .iter_enumerated()
642                .max_by_key(|(_i, layout)| layout.size.bytes())
643                .map(|(i, _layout)| i)?;
644
645            let all_indices = variants.indices();
646            let needs_disc =
647                |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
648            let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
649                ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
650
651            let count =
652                (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
653
654            // Use the largest niche in the largest variant.
655            let niche = variant_layouts[largest_variant_index].largest_niche?;
656            let (niche_start, niche_scalar) = niche.reserve(dl, count)?;
657            let niche_offset = niche.offset;
658            let niche_size = niche.value.size(dl);
659            let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
660
661            let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
662                if i == largest_variant_index {
663                    return true;
664                }
665
666                layout.largest_niche = None;
667
668                if layout.size <= niche_offset {
669                    // This variant will fit before the niche.
670                    return true;
671                }
672
673                // Determine if it'll fit after the niche.
674                let this_align = layout.align.abi;
675                let this_offset = (niche_offset + niche_size).align_to(this_align);
676
677                if this_offset + layout.size > size {
678                    return false;
679                }
680
681                // It'll fit, but we need to make some adjustments.
682                match layout.fields {
683                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
684                        for offset in offsets.iter_mut() {
685                            *offset += this_offset;
686                        }
687                    }
688                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
689                        panic!("Layout of fields should be Arbitrary for variants")
690                    }
691                }
692
693                // It can't be a Scalar or ScalarPair because the offset isn't 0.
694                if !layout.is_uninhabited() {
695                    layout.backend_repr = BackendRepr::Memory { sized: true };
696                }
697                layout.size += this_offset;
698
699                true
700            });
701
702            if !all_variants_fit {
703                return None;
704            }
705
706            let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
707
708            let others_zst = variant_layouts
709                .iter_enumerated()
710                .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
711            let same_size = size == variant_layouts[largest_variant_index].size;
712            let same_align = align == variant_layouts[largest_variant_index].align;
713
714            let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited());
715            let abi = if same_size && same_align && others_zst {
716                match variant_layouts[largest_variant_index].backend_repr {
717                    // When the total alignment and size match, we can use the
718                    // same ABI as the scalar variant with the reserved niche.
719                    BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
720                    BackendRepr::ScalarPair(first, second) => {
721                        // Only the niche is guaranteed to be initialised,
722                        // so use union layouts for the other primitive.
723                        if niche_offset == Size::ZERO {
724                            BackendRepr::ScalarPair(niche_scalar, second.to_union())
725                        } else {
726                            BackendRepr::ScalarPair(first.to_union(), niche_scalar)
727                        }
728                    }
729                    _ => BackendRepr::Memory { sized: true },
730                }
731            } else {
732                BackendRepr::Memory { sized: true }
733            };
734
735            let combined_seed = variant_layouts
736                .iter()
737                .map(|v| v.randomization_seed)
738                .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
739
740            let layout = LayoutData {
741                variants: Variants::Multiple {
742                    tag: niche_scalar,
743                    tag_encoding: TagEncoding::Niche {
744                        untagged_variant: largest_variant_index,
745                        niche_variants,
746                        niche_start,
747                    },
748                    tag_field: FieldIdx::new(0),
749                    variants: IndexVec::new(),
750                },
751                fields: FieldsShape::Arbitrary {
752                    offsets: [niche_offset].into(),
753                    memory_index: [0].into(),
754                },
755                backend_repr: abi,
756                largest_niche,
757                uninhabited,
758                size,
759                align,
760                max_repr_align,
761                unadjusted_abi_align,
762                randomization_seed: combined_seed,
763            };
764
765            Some(TmpLayout { layout, variants: variant_layouts })
766        };
767
768        let niche_filling_layout = calculate_niche_filling_layout();
769
770        let discr_type = repr.discr_type();
771        let discr_int = Integer::from_attr(dl, discr_type);
772        // Because we can only represent one range of valid values, we'll look for the
773        // largest range of invalid values and pick everything else as the range of valid
774        // values.
775
776        // First we need to sort the possible discriminant values so that we can look for the largest gap:
777        let valid_discriminants: BTreeSet<i128> = discriminants
778            .filter(|&(i, _)| repr.c() || variants[i].iter().all(|f| !f.is_uninhabited()))
779            .map(|(_, val)| {
780                if discr_type.is_signed() {
781                    // sign extend the raw representation to be an i128
782                    // FIXME: do this at the discriminant iterator creation sites
783                    discr_int.size().sign_extend(val as u128)
784                } else {
785                    val
786                }
787            })
788            .collect();
789        trace!(?valid_discriminants);
790        let discriminants = valid_discriminants.iter().copied();
791        //let next_discriminants = discriminants.clone().cycle().skip(1);
792        let next_discriminants =
793            discriminants.clone().chain(valid_discriminants.first().copied()).skip(1);
794        // Iterate over pairs of each discriminant together with the next one.
795        // Since they were sorted, we can now compute the niche sizes and pick the largest.
796        let discriminants = discriminants.zip(next_discriminants);
797        let largest_niche = discriminants.max_by_key(|&(start, end)| {
798            trace!(?start, ?end);
799            // If this is a wraparound range, the niche size is `MAX - abs(diff)`, as the diff between
800            // the two end points is actually the size of the valid discriminants.
801            let dist = if start > end {
802                // Overflow can happen for 128 bit discriminants if `end` is negative.
803                // But in that case casting to `u128` still gets us the right value,
804                // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
805                let dist = start.wrapping_sub(end);
806                if discr_type.is_signed() {
807                    discr_int.signed_max().wrapping_sub(dist) as u128
808                } else {
809                    discr_int.size().unsigned_int_max() - dist as u128
810                }
811            } else {
812                // Overflow can happen for 128 bit discriminants if `start` is negative.
813                // But in that case casting to `u128` still gets us the right value,
814                // as the distance must be positive if the lhs of the subtraction is larger than the rhs.
815                end.wrapping_sub(start) as u128
816            };
817            trace!(?dist);
818            dist
819        });
820        trace!(?largest_niche);
821
822        // `max` is the last valid discriminant before the largest niche
823        // `min` is the first valid discriminant after the largest niche
824        let (max, min) = largest_niche
825            // We might have no inhabited variants, so pretend there's at least one.
826            .unwrap_or((0, 0));
827        let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
828
829        let mut align = dl.aggregate_align;
830        let mut max_repr_align = repr.align;
831        let mut unadjusted_abi_align = align.abi;
832
833        let mut size = Size::ZERO;
834
835        // We're interested in the smallest alignment, so start large.
836        let mut start_align = Align::from_bytes(256).unwrap();
837        assert_eq!(Integer::for_align(dl, start_align), None);
838
839        // repr(C) on an enum tells us to make a (tag, union) layout,
840        // so we need to grow the prefix alignment to be at least
841        // the alignment of the union. (This value is used both for
842        // determining the alignment of the overall enum, and the
843        // determining the alignment of the payload after the tag.)
844        let mut prefix_align = min_ity.align(dl).abi;
845        if repr.c() {
846            for fields in variants {
847                for field in fields {
848                    prefix_align = prefix_align.max(field.align.abi);
849                }
850            }
851        }
852
853        // Create the set of structs that represent each variant.
854        let mut layout_variants = variants
855            .iter_enumerated()
856            .map(|(i, field_layouts)| {
857                let mut st = self.univariant(
858                    field_layouts,
859                    repr,
860                    StructKind::Prefixed(min_ity.size(), prefix_align),
861                )?;
862                st.variants = Variants::Single { index: i };
863                // Find the first field we can't move later
864                // to make room for a larger discriminant.
865                for field_idx in st.fields.index_by_increasing_offset() {
866                    let field = &field_layouts[FieldIdx::new(field_idx)];
867                    if !field.is_1zst() {
868                        start_align = start_align.min(field.align.abi);
869                        break;
870                    }
871                }
872                size = cmp::max(size, st.size);
873                align = align.max(st.align);
874                max_repr_align = max_repr_align.max(st.max_repr_align);
875                unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
876                Ok(st)
877            })
878            .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
879
880        // Align the maximum variant size to the largest alignment.
881        size = size.align_to(align.abi);
882
883        // FIXME(oli-obk): deduplicate and harden these checks
884        if size.bytes() >= dl.obj_size_bound() {
885            return Err(LayoutCalculatorError::SizeOverflow);
886        }
887
888        let typeck_ity = Integer::from_attr(dl, repr.discr_type());
889        if typeck_ity < min_ity {
890            // It is a bug if Layout decided on a greater discriminant size than typeck for
891            // some reason at this point (based on values discriminant can take on). Mostly
892            // because this discriminant will be loaded, and then stored into variable of
893            // type calculated by typeck. Consider such case (a bug): typeck decided on
894            // byte-sized discriminant, but layout thinks we need a 16-bit to store all
895            // discriminant values. That would be a bug, because then, in codegen, in order
896            // to store this 16-bit discriminant into 8-bit sized temporary some of the
897            // space necessary to represent would have to be discarded (or layout is wrong
898            // on thinking it needs 16 bits)
899            panic!(
900                "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
901            );
902            // However, it is fine to make discr type however large (as an optimisation)
903            // after this point – we’ll just truncate the value we load in codegen.
904        }
905
906        // Check to see if we should use a different type for the
907        // discriminant. We can safely use a type with the same size
908        // as the alignment of the first field of each variant.
909        // We increase the size of the discriminant to avoid LLVM copying
910        // padding when it doesn't need to. This normally causes unaligned
911        // load/stores and excessive memcpy/memset operations. By using a
912        // bigger integer size, LLVM can be sure about its contents and
913        // won't be so conservative.
914
915        // Use the initial field alignment
916        let mut ity = if repr.c() || repr.int.is_some() {
917            min_ity
918        } else {
919            Integer::for_align(dl, start_align).unwrap_or(min_ity)
920        };
921
922        // If the alignment is not larger than the chosen discriminant size,
923        // don't use the alignment as the final size.
924        if ity <= min_ity {
925            ity = min_ity;
926        } else {
927            // Patch up the variants' first few fields.
928            let old_ity_size = min_ity.size();
929            let new_ity_size = ity.size();
930            for variant in &mut layout_variants {
931                match variant.fields {
932                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
933                        for i in offsets {
934                            if *i <= old_ity_size {
935                                assert_eq!(*i, old_ity_size);
936                                *i = new_ity_size;
937                            }
938                        }
939                        // We might be making the struct larger.
940                        if variant.size <= old_ity_size {
941                            variant.size = new_ity_size;
942                        }
943                    }
944                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
945                        panic!("encountered a non-arbitrary layout during enum layout")
946                    }
947                }
948            }
949        }
950
951        let tag_mask = ity.size().unsigned_int_max();
952        let tag = Scalar::Initialized {
953            value: Primitive::Int(ity, signed),
954            valid_range: WrappingRange {
955                start: (min as u128 & tag_mask),
956                end: (max as u128 & tag_mask),
957            },
958        };
959        let mut abi = BackendRepr::Memory { sized: true };
960
961        let uninhabited = layout_variants.iter().all(|v| v.is_uninhabited());
962        if tag.size(dl) == size {
963            // Make sure we only use scalar layout when the enum is entirely its
964            // own tag (i.e. it has no padding nor any non-ZST variant fields).
965            abi = BackendRepr::Scalar(tag);
966        } else {
967            // Try to use a ScalarPair for all tagged enums.
968            // That's possible only if we can find a common primitive type for all variants.
969            let mut common_prim = None;
970            let mut common_prim_initialized_in_all_variants = true;
971            for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
972                let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
973                    panic!("encountered a non-arbitrary layout during enum layout");
974                };
975                // We skip *all* ZST here and later check if we are good in terms of alignment.
976                // This lets us handle some cases involving aligned ZST.
977                let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
978                let (field, offset) = match (fields.next(), fields.next()) {
979                    (None, None) => {
980                        common_prim_initialized_in_all_variants = false;
981                        continue;
982                    }
983                    (Some(pair), None) => pair,
984                    _ => {
985                        common_prim = None;
986                        break;
987                    }
988                };
989                let prim = match field.backend_repr {
990                    BackendRepr::Scalar(scalar) => {
991                        common_prim_initialized_in_all_variants &=
992                            matches!(scalar, Scalar::Initialized { .. });
993                        scalar.primitive()
994                    }
995                    _ => {
996                        common_prim = None;
997                        break;
998                    }
999                };
1000                if let Some((old_prim, common_offset)) = common_prim {
1001                    // All variants must be at the same offset
1002                    if offset != common_offset {
1003                        common_prim = None;
1004                        break;
1005                    }
1006                    // This is pretty conservative. We could go fancier
1007                    // by realising that (u8, u8) could just cohabit with
1008                    // u16 or even u32.
1009                    let new_prim = match (old_prim, prim) {
1010                        // Allow all identical primitives.
1011                        (x, y) if x == y => x,
1012                        // Allow integers of the same size with differing signedness.
1013                        // We arbitrarily choose the signedness of the first variant.
1014                        (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
1015                        // Allow integers mixed with pointers of the same layout.
1016                        // We must represent this using a pointer, to avoid
1017                        // roundtripping pointers through ptrtoint/inttoptr.
1018                        (p @ Primitive::Pointer(_), i @ Primitive::Int(..))
1019                        | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
1020                            if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
1021                        {
1022                            p
1023                        }
1024                        _ => {
1025                            common_prim = None;
1026                            break;
1027                        }
1028                    };
1029                    // We may be updating the primitive here, for example from int->ptr.
1030                    common_prim = Some((new_prim, common_offset));
1031                } else {
1032                    common_prim = Some((prim, offset));
1033                }
1034            }
1035            if let Some((prim, offset)) = common_prim {
1036                let prim_scalar = if common_prim_initialized_in_all_variants {
1037                    let size = prim.size(dl);
1038                    assert!(size.bits() <= 128);
1039                    Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
1040                } else {
1041                    // Common prim might be uninit.
1042                    Scalar::Union { value: prim }
1043                };
1044                let pair =
1045                    LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
1046                let pair_offsets = match pair.fields {
1047                    FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1048                        assert_eq!(memory_index.raw, [0, 1]);
1049                        offsets
1050                    }
1051                    _ => panic!("encountered a non-arbitrary layout during enum layout"),
1052                };
1053                if pair_offsets[FieldIdx::new(0)] == Size::ZERO
1054                    && pair_offsets[FieldIdx::new(1)] == *offset
1055                    && align == pair.align
1056                    && size == pair.size
1057                {
1058                    // We can use `ScalarPair` only when it matches our
1059                    // already computed layout (including `#[repr(C)]`).
1060                    abi = pair.backend_repr;
1061                }
1062            }
1063        }
1064
1065        // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1066        // variants to ensure they are consistent. This is because a downcast is
1067        // semantically a NOP, and thus should not affect layout.
1068        if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
1069            for variant in &mut layout_variants {
1070                // We only do this for variants with fields; the others are not accessed anyway.
1071                // Also do not overwrite any already existing "clever" ABIs.
1072                if variant.fields.count() > 0
1073                    && matches!(variant.backend_repr, BackendRepr::Memory { .. })
1074                {
1075                    variant.backend_repr = abi;
1076                    // Also need to bump up the size and alignment, so that the entire value fits
1077                    // in here.
1078                    variant.size = cmp::max(variant.size, size);
1079                    variant.align.abi = cmp::max(variant.align.abi, align.abi);
1080                }
1081            }
1082        }
1083
1084        let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1085
1086        let combined_seed = layout_variants
1087            .iter()
1088            .map(|v| v.randomization_seed)
1089            .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed));
1090
1091        let tagged_layout = LayoutData {
1092            variants: Variants::Multiple {
1093                tag,
1094                tag_encoding: TagEncoding::Direct,
1095                tag_field: FieldIdx::new(0),
1096                variants: IndexVec::new(),
1097            },
1098            fields: FieldsShape::Arbitrary {
1099                offsets: [Size::ZERO].into(),
1100                memory_index: [0].into(),
1101            },
1102            largest_niche,
1103            uninhabited,
1104            backend_repr: abi,
1105            align,
1106            size,
1107            max_repr_align,
1108            unadjusted_abi_align,
1109            randomization_seed: combined_seed,
1110        };
1111
1112        let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1113
1114        let mut best_layout = match (tagged_layout, niche_filling_layout) {
1115            (tl, Some(nl)) => {
1116                // Pick the smaller layout; otherwise,
1117                // pick the layout with the larger niche; otherwise,
1118                // pick tagged as it has simpler codegen.
1119                use cmp::Ordering::*;
1120                let niche_size = |tmp_l: &TmpLayout<FieldIdx, VariantIdx>| {
1121                    tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1122                };
1123                match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
1124                    (Greater, _) => nl,
1125                    (Equal, Less) => nl,
1126                    _ => tl,
1127                }
1128            }
1129            (tl, None) => tl,
1130        };
1131
1132        // Now we can intern the variant layouts and store them in the enum layout.
1133        best_layout.layout.variants = match best_layout.layout.variants {
1134            Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
1135                Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
1136            }
1137            Variants::Single { .. } | Variants::Empty => {
1138                panic!("encountered a single-variant or empty enum during multi-variant layout")
1139            }
1140        };
1141        Ok(best_layout.layout)
1142    }
1143
1144    fn univariant_biased<
1145        'a,
1146        FieldIdx: Idx,
1147        VariantIdx: Idx,
1148        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
1149    >(
1150        &self,
1151        fields: &IndexSlice<FieldIdx, F>,
1152        repr: &ReprOptions,
1153        kind: StructKind,
1154        niche_bias: NicheBias,
1155    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
1156        let dl = self.cx.data_layout();
1157        let pack = repr.pack;
1158        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
1159        let mut max_repr_align = repr.align;
1160        let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
1161        let optimize_field_order = !repr.inhibit_struct_field_reordering();
1162        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
1163        let optimizing = &mut inverse_memory_index.raw[..end];
1164        let fields_excluding_tail = &fields.raw[..end];
1165        // unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
1166        let field_seed = fields_excluding_tail
1167            .iter()
1168            .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed));
1169
1170        if optimize_field_order && fields.len() > 1 {
1171            // If `-Z randomize-layout` was enabled for the type definition we can shuffle
1172            // the field ordering to try and catch some code making assumptions about layouts
1173            // we don't guarantee.
1174            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
1175                #[cfg(feature = "randomize")]
1176                {
1177                    use rand::SeedableRng;
1178                    use rand::seq::SliceRandom;
1179                    // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
1180                    // ordering.
1181                    let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64(
1182                        field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(),
1183                    );
1184
1185                    // Shuffle the ordering of the fields.
1186                    optimizing.shuffle(&mut rng);
1187                }
1188                // Otherwise we just leave things alone and actually optimize the type's fields
1189            } else {
1190                // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
1191                // not depend on the layout of the tail.
1192                let max_field_align =
1193                    fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
1194                let largest_niche_size = fields_excluding_tail
1195                    .iter()
1196                    .filter_map(|f| f.largest_niche)
1197                    .map(|n| n.available(dl))
1198                    .max()
1199                    .unwrap_or(0);
1200
1201                // Calculates a sort key to group fields by their alignment or possibly some
1202                // size-derived pseudo-alignment.
1203                let alignment_group_key = |layout: &F| {
1204                    // The two branches here return values that cannot be meaningfully compared with
1205                    // each other. However, we know that consistently for all executions of
1206                    // `alignment_group_key`, one or the other branch will be taken, so this is okay.
1207                    if let Some(pack) = pack {
1208                        // Return the packed alignment in bytes.
1209                        layout.align.abi.min(pack).bytes()
1210                    } else {
1211                        // Returns `log2(effective-align)`. The calculation assumes that size is an
1212                        // integer multiple of align, except for ZSTs.
1213                        let align = layout.align.abi.bytes();
1214                        let size = layout.size.bytes();
1215                        let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
1216                        // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
1217                        let size_as_align = align.max(size).trailing_zeros();
1218                        let size_as_align = if largest_niche_size > 0 {
1219                            match niche_bias {
1220                                // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
1221                                // array to the front in the first case (for aligned loads) but keep
1222                                // the bool in front in the second case for its niches.
1223                                NicheBias::Start => {
1224                                    max_field_align.trailing_zeros().min(size_as_align)
1225                                }
1226                                // When moving niches towards the end of the struct then for
1227                                // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
1228                                // in the align-1 group because its bool can be moved closer to the end.
1229                                NicheBias::End if niche_size == largest_niche_size => {
1230                                    align.trailing_zeros()
1231                                }
1232                                NicheBias::End => size_as_align,
1233                            }
1234                        } else {
1235                            size_as_align
1236                        };
1237                        size_as_align as u64
1238                    }
1239                };
1240
1241                match kind {
1242                    StructKind::AlwaysSized | StructKind::MaybeUnsized => {
1243                        // Currently `LayoutData` only exposes a single niche so sorting is usually
1244                        // sufficient to get one niche into the preferred position. If it ever
1245                        // supported multiple niches then a more advanced pick-and-pack approach could
1246                        // provide better results. But even for the single-niche cache it's not
1247                        // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
1248                        // bool to the front but it would require packing the tuple together with the
1249                        // u16 to build a 4-byte group so that the u32 can be placed after it without
1250                        // padding. This kind of packing can't be achieved by sorting.
1251                        optimizing.sort_by_key(|&x| {
1252                            let f = &fields[x];
1253                            let field_size = f.size.bytes();
1254                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1255                            let niche_size_key = match niche_bias {
1256                                // large niche first
1257                                NicheBias::Start => !niche_size,
1258                                // large niche last
1259                                NicheBias::End => niche_size,
1260                            };
1261                            let inner_niche_offset_key = match niche_bias {
1262                                NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
1263                                NicheBias::End => f.largest_niche.map_or(0, |n| {
1264                                    !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
1265                                }),
1266                            };
1267
1268                            (
1269                                // Then place largest alignments first.
1270                                cmp::Reverse(alignment_group_key(f)),
1271                                // Then prioritize niche placement within alignment group according to
1272                                // `niche_bias_start`.
1273                                niche_size_key,
1274                                // Then among fields with equally-sized niches prefer the ones
1275                                // closer to the start/end of the field.
1276                                inner_niche_offset_key,
1277                            )
1278                        });
1279                    }
1280
1281                    StructKind::Prefixed(..) => {
1282                        // Sort in ascending alignment so that the layout stays optimal
1283                        // regardless of the prefix.
1284                        // And put the largest niche in an alignment group at the end
1285                        // so it can be used as discriminant in jagged enums
1286                        optimizing.sort_by_key(|&x| {
1287                            let f = &fields[x];
1288                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
1289                            (alignment_group_key(f), niche_size)
1290                        });
1291                    }
1292                }
1293
1294                // FIXME(Kixiron): We can always shuffle fields within a given alignment class
1295                //                 regardless of the status of `-Z randomize-layout`
1296            }
1297        }
1298        // inverse_memory_index holds field indices by increasing memory offset.
1299        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
1300        // We now write field offsets to the corresponding offset slot;
1301        // field 5 with offset 0 puts 0 in offsets[5].
1302        // At the bottom of this function, we invert `inverse_memory_index` to
1303        // produce `memory_index` (see `invert_mapping`).
1304        let mut unsized_field = None::<&F>;
1305        let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
1306        let mut offset = Size::ZERO;
1307        let mut largest_niche = None;
1308        let mut largest_niche_available = 0;
1309        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1310            let prefix_align =
1311                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
1312            align = align.max(AbiAlign::new(prefix_align));
1313            offset = prefix_size.align_to(prefix_align);
1314        }
1315        for &i in &inverse_memory_index {
1316            let field = &fields[i];
1317            if let Some(unsized_field) = unsized_field {
1318                return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
1319            }
1320
1321            if field.is_unsized() {
1322                if let StructKind::MaybeUnsized = kind {
1323                    unsized_field = Some(field);
1324                } else {
1325                    return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
1326                }
1327            }
1328
1329            // Invariant: offset < dl.obj_size_bound() <= 1<<61
1330            let field_align = if let Some(pack) = pack {
1331                field.align.min(AbiAlign::new(pack))
1332            } else {
1333                field.align
1334            };
1335            offset = offset.align_to(field_align.abi);
1336            align = align.max(field_align);
1337            max_repr_align = max_repr_align.max(field.max_repr_align);
1338
1339            debug!("univariant offset: {:?} field: {:#?}", offset, field);
1340            offsets[i] = offset;
1341
1342            if let Some(mut niche) = field.largest_niche {
1343                let available = niche.available(dl);
1344                // Pick up larger niches.
1345                let prefer_new_niche = match niche_bias {
1346                    NicheBias::Start => available > largest_niche_available,
1347                    // if there are several niches of the same size then pick the last one
1348                    NicheBias::End => available >= largest_niche_available,
1349                };
1350                if prefer_new_niche {
1351                    largest_niche_available = available;
1352                    niche.offset += offset;
1353                    largest_niche = Some(niche);
1354                }
1355            }
1356
1357            offset =
1358                offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;
1359        }
1360
1361        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1362        // See documentation on `LayoutData::unadjusted_abi_align`.
1363        let unadjusted_abi_align = align.abi;
1364        if let Some(repr_align) = repr.align {
1365            align = align.max(AbiAlign::new(repr_align));
1366        }
1367        // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1368        let align = align;
1369
1370        debug!("univariant min_size: {:?}", offset);
1371        let min_size = offset;
1372        // As stated above, inverse_memory_index holds field indices by increasing offset.
1373        // This makes it an already-sorted view of the offsets vec.
1374        // To invert it, consider:
1375        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1376        // Field 5 would be the first element, so memory_index is i:
1377        // Note: if we didn't optimize, it's already right.
1378        let memory_index = if optimize_field_order {
1379            inverse_memory_index.invert_bijective_mapping()
1380        } else {
1381            debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
1382            inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
1383        };
1384        let size = min_size.align_to(align.abi);
1385        // FIXME(oli-obk): deduplicate and harden these checks
1386        if size.bytes() >= dl.obj_size_bound() {
1387            return Err(LayoutCalculatorError::SizeOverflow);
1388        }
1389        let mut layout_of_single_non_zst_field = None;
1390        let sized = unsized_field.is_none();
1391        let mut abi = BackendRepr::Memory { sized };
1392
1393        let optimize_abi = !repr.inhibit_newtype_abi_optimization();
1394
1395        // Try to make this a Scalar/ScalarPair.
1396        if sized && size.bytes() > 0 {
1397            // We skip *all* ZST here and later check if we are good in terms of alignment.
1398            // This lets us handle some cases involving aligned ZST.
1399            let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
1400
1401            match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1402                // We have exactly one non-ZST field.
1403                (Some((i, field)), None, None) => {
1404                    layout_of_single_non_zst_field = Some(field);
1405
1406                    // Field fills the struct and it has a scalar or scalar pair ABI.
1407                    if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
1408                    {
1409                        match field.backend_repr {
1410                            // For plain scalars, or vectors of them, we can't unpack
1411                            // newtypes for `#[repr(C)]`, as that affects C ABIs.
1412                            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. }
1413                                if optimize_abi =>
1414                            {
1415                                abi = field.backend_repr;
1416                            }
1417                            // But scalar pairs are Rust-specific and get
1418                            // treated as aggregates by C ABIs anyway.
1419                            BackendRepr::ScalarPair(..) => {
1420                                abi = field.backend_repr;
1421                            }
1422                            _ => {}
1423                        }
1424                    }
1425                }
1426
1427                // Two non-ZST fields, and they're both scalars.
1428                (Some((i, a)), Some((j, b)), None) => {
1429                    match (a.backend_repr, b.backend_repr) {
1430                        (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
1431                            // Order by the memory placement, not source order.
1432                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1433                                ((i, a), (j, b))
1434                            } else {
1435                                ((j, b), (i, a))
1436                            };
1437                            let pair =
1438                                LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
1439                            let pair_offsets = match pair.fields {
1440                                FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1441                                    assert_eq!(memory_index.raw, [0, 1]);
1442                                    offsets
1443                                }
1444                                FieldsShape::Primitive
1445                                | FieldsShape::Array { .. }
1446                                | FieldsShape::Union(..) => {
1447                                    panic!("encountered a non-arbitrary layout during enum layout")
1448                                }
1449                            };
1450                            if offsets[i] == pair_offsets[FieldIdx::new(0)]
1451                                && offsets[j] == pair_offsets[FieldIdx::new(1)]
1452                                && align == pair.align
1453                                && size == pair.size
1454                            {
1455                                // We can use `ScalarPair` only when it matches our
1456                                // already computed layout (including `#[repr(C)]`).
1457                                abi = pair.backend_repr;
1458                            }
1459                        }
1460                        _ => {}
1461                    }
1462                }
1463
1464                _ => {}
1465            }
1466        }
1467        let uninhabited = fields.iter().any(|f| f.is_uninhabited());
1468
1469        let unadjusted_abi_align = if repr.transparent() {
1470            match layout_of_single_non_zst_field {
1471                Some(l) => l.unadjusted_abi_align,
1472                None => {
1473                    // `repr(transparent)` with all ZST fields.
1474                    align.abi
1475                }
1476            }
1477        } else {
1478            unadjusted_abi_align
1479        };
1480
1481        let seed = field_seed.wrapping_add(repr.field_shuffle_seed);
1482
1483        Ok(LayoutData {
1484            variants: Variants::Single { index: VariantIdx::new(0) },
1485            fields: FieldsShape::Arbitrary { offsets, memory_index },
1486            backend_repr: abi,
1487            largest_niche,
1488            uninhabited,
1489            align,
1490            size,
1491            max_repr_align,
1492            unadjusted_abi_align,
1493            randomization_seed: seed,
1494        })
1495    }
1496
1497    fn format_field_niches<
1498        'a,
1499        FieldIdx: Idx,
1500        VariantIdx: Idx,
1501        F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
1502    >(
1503        &self,
1504        layout: &LayoutData<FieldIdx, VariantIdx>,
1505        fields: &IndexSlice<FieldIdx, F>,
1506    ) -> String {
1507        let dl = self.cx.data_layout();
1508        let mut s = String::new();
1509        for i in layout.fields.index_by_increasing_offset() {
1510            let offset = layout.fields.offset(i);
1511            let f = &fields[FieldIdx::new(i)];
1512            write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
1513            if let Some(n) = f.largest_niche {
1514                write!(
1515                    s,
1516                    " n{}b{}s{}",
1517                    n.offset.bytes(),
1518                    n.available(dl).ilog2(),
1519                    n.value.size(dl).bytes()
1520                )
1521                .unwrap();
1522            }
1523            write!(s, "] ").unwrap();
1524        }
1525        s
1526    }
1527}