rustc_lint/
types.rs

1use std::iter;
2use std::ops::ControlFlow;
3
4use rustc_abi::{
5    BackendRepr, Integer, IntegerType, TagEncoding, VariantIdx, Variants, WrappingRange,
6};
7use rustc_data_structures::fx::FxHashSet;
8use rustc_errors::DiagMessage;
9use rustc_hir::intravisit::VisitorExt;
10use rustc_hir::{AmbigArg, Expr, ExprKind, HirId, LangItem};
11use rustc_middle::bug;
12use rustc_middle::ty::layout::{LayoutOf, SizeSkeleton};
13use rustc_middle::ty::{
14    self, Adt, AdtKind, GenericArgsRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable,
15    TypeVisitableExt,
16};
17use rustc_session::{declare_lint, declare_lint_pass, impl_lint_pass};
18use rustc_span::def_id::LocalDefId;
19use rustc_span::{Span, Symbol, sym};
20use tracing::debug;
21use {rustc_ast as ast, rustc_hir as hir};
22
23mod improper_ctypes;
24
25use crate::lints::{
26    AmbiguousWidePointerComparisons, AmbiguousWidePointerComparisonsAddrMetadataSuggestion,
27    AmbiguousWidePointerComparisonsAddrSuggestion, AmbiguousWidePointerComparisonsCastSuggestion,
28    AmbiguousWidePointerComparisonsExpectSuggestion, AtomicOrderingFence, AtomicOrderingLoad,
29    AtomicOrderingStore, ImproperCTypes, InvalidAtomicOrderingDiag, InvalidNanComparisons,
30    InvalidNanComparisonsSuggestion, UnpredictableFunctionPointerComparisons,
31    UnpredictableFunctionPointerComparisonsSuggestion, UnusedComparisons, UsesPowerAlignment,
32    VariantSizeDifferencesDiag,
33};
34use crate::{LateContext, LateLintPass, LintContext, fluent_generated as fluent};
35
36mod literal;
37
38use literal::{int_ty_range, lint_literal, uint_ty_range};
39
40declare_lint! {
41    /// The `unused_comparisons` lint detects comparisons made useless by
42    /// limits of the types involved.
43    ///
44    /// ### Example
45    ///
46    /// ```rust
47    /// fn foo(x: u8) {
48    ///     x >= 0;
49    /// }
50    /// ```
51    ///
52    /// {{produces}}
53    ///
54    /// ### Explanation
55    ///
56    /// A useless comparison may indicate a mistake, and should be fixed or
57    /// removed.
58    UNUSED_COMPARISONS,
59    Warn,
60    "comparisons made useless by limits of the types involved"
61}
62
63declare_lint! {
64    /// The `overflowing_literals` lint detects literal out of range for its
65    /// type.
66    ///
67    /// ### Example
68    ///
69    /// ```rust,compile_fail
70    /// let x: u8 = 1000;
71    /// ```
72    ///
73    /// {{produces}}
74    ///
75    /// ### Explanation
76    ///
77    /// It is usually a mistake to use a literal that overflows the type where
78    /// it is used. Either use a literal that is within range, or change the
79    /// type to be within the range of the literal.
80    OVERFLOWING_LITERALS,
81    Deny,
82    "literal out of range for its type"
83}
84
85declare_lint! {
86    /// The `variant_size_differences` lint detects enums with widely varying
87    /// variant sizes.
88    ///
89    /// ### Example
90    ///
91    /// ```rust,compile_fail
92    /// #![deny(variant_size_differences)]
93    /// enum En {
94    ///     V0(u8),
95    ///     VBig([u8; 1024]),
96    /// }
97    /// ```
98    ///
99    /// {{produces}}
100    ///
101    /// ### Explanation
102    ///
103    /// It can be a mistake to add a variant to an enum that is much larger
104    /// than the other variants, bloating the overall size required for all
105    /// variants. This can impact performance and memory usage. This is
106    /// triggered if one variant is more than 3 times larger than the
107    /// second-largest variant.
108    ///
109    /// Consider placing the large variant's contents on the heap (for example
110    /// via [`Box`]) to keep the overall size of the enum itself down.
111    ///
112    /// This lint is "allow" by default because it can be noisy, and may not be
113    /// an actual problem. Decisions about this should be guided with
114    /// profiling and benchmarking.
115    ///
116    /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html
117    VARIANT_SIZE_DIFFERENCES,
118    Allow,
119    "detects enums with widely varying variant sizes"
120}
121
122declare_lint! {
123    /// The `invalid_nan_comparisons` lint checks comparison with `f32::NAN` or `f64::NAN`
124    /// as one of the operand.
125    ///
126    /// ### Example
127    ///
128    /// ```rust
129    /// let a = 2.3f32;
130    /// if a == f32::NAN {}
131    /// ```
132    ///
133    /// {{produces}}
134    ///
135    /// ### Explanation
136    ///
137    /// NaN does not compare meaningfully to anything – not
138    /// even itself – so those comparisons are always false.
139    INVALID_NAN_COMPARISONS,
140    Warn,
141    "detects invalid floating point NaN comparisons"
142}
143
144declare_lint! {
145    /// The `ambiguous_wide_pointer_comparisons` lint checks comparison
146    /// of `*const/*mut ?Sized` as the operands.
147    ///
148    /// ### Example
149    ///
150    /// ```rust
151    /// # struct A;
152    /// # struct B;
153    ///
154    /// # trait T {}
155    /// # impl T for A {}
156    /// # impl T for B {}
157    ///
158    /// let ab = (A, B);
159    /// let a = &ab.0 as *const dyn T;
160    /// let b = &ab.1 as *const dyn T;
161    ///
162    /// let _ = a == b;
163    /// ```
164    ///
165    /// {{produces}}
166    ///
167    /// ### Explanation
168    ///
169    /// The comparison includes metadata which may not be expected.
170    AMBIGUOUS_WIDE_POINTER_COMPARISONS,
171    Warn,
172    "detects ambiguous wide pointer comparisons"
173}
174
175declare_lint! {
176    /// The `unpredictable_function_pointer_comparisons` lint checks comparison
177    /// of function pointer as the operands.
178    ///
179    /// ### Example
180    ///
181    /// ```rust
182    /// fn a() {}
183    /// fn b() {}
184    ///
185    /// let f: fn() = a;
186    /// let g: fn() = b;
187    ///
188    /// let _ = f == g;
189    /// ```
190    ///
191    /// {{produces}}
192    ///
193    /// ### Explanation
194    ///
195    /// Function pointers comparisons do not produce meaningful result since
196    /// they are never guaranteed to be unique and could vary between different
197    /// code generation units. Furthermore, different functions could have the
198    /// same address after being merged together.
199    UNPREDICTABLE_FUNCTION_POINTER_COMPARISONS,
200    Warn,
201    "detects unpredictable function pointer comparisons"
202}
203
204#[derive(Copy, Clone, Default)]
205pub(crate) struct TypeLimits {
206    /// Id of the last visited negated expression
207    negated_expr_id: Option<hir::HirId>,
208    /// Span of the last visited negated expression
209    negated_expr_span: Option<Span>,
210}
211
212impl_lint_pass!(TypeLimits => [
213    UNUSED_COMPARISONS,
214    OVERFLOWING_LITERALS,
215    INVALID_NAN_COMPARISONS,
216    AMBIGUOUS_WIDE_POINTER_COMPARISONS,
217    UNPREDICTABLE_FUNCTION_POINTER_COMPARISONS
218]);
219
220impl TypeLimits {
221    pub(crate) fn new() -> TypeLimits {
222        TypeLimits { negated_expr_id: None, negated_expr_span: None }
223    }
224}
225
226fn lint_nan<'tcx>(
227    cx: &LateContext<'tcx>,
228    e: &'tcx hir::Expr<'tcx>,
229    binop: hir::BinOpKind,
230    l: &'tcx hir::Expr<'tcx>,
231    r: &'tcx hir::Expr<'tcx>,
232) {
233    fn is_nan(cx: &LateContext<'_>, expr: &hir::Expr<'_>) -> bool {
234        let expr = expr.peel_blocks().peel_borrows();
235        match expr.kind {
236            ExprKind::Path(qpath) => {
237                let Some(def_id) = cx.typeck_results().qpath_res(&qpath, expr.hir_id).opt_def_id()
238                else {
239                    return false;
240                };
241
242                matches!(
243                    cx.tcx.get_diagnostic_name(def_id),
244                    Some(sym::f16_nan | sym::f32_nan | sym::f64_nan | sym::f128_nan)
245                )
246            }
247            _ => false,
248        }
249    }
250
251    fn eq_ne(
252        e: &hir::Expr<'_>,
253        l: &hir::Expr<'_>,
254        r: &hir::Expr<'_>,
255        f: impl FnOnce(Span, Span) -> InvalidNanComparisonsSuggestion,
256    ) -> InvalidNanComparisons {
257        let suggestion = if let Some(l_span) = l.span.find_ancestor_inside(e.span)
258            && let Some(r_span) = r.span.find_ancestor_inside(e.span)
259        {
260            f(l_span, r_span)
261        } else {
262            InvalidNanComparisonsSuggestion::Spanless
263        };
264
265        InvalidNanComparisons::EqNe { suggestion }
266    }
267
268    let lint = match binop {
269        hir::BinOpKind::Eq | hir::BinOpKind::Ne if is_nan(cx, l) => {
270            eq_ne(e, l, r, |l_span, r_span| InvalidNanComparisonsSuggestion::Spanful {
271                nan_plus_binop: l_span.until(r_span),
272                float: r_span.shrink_to_hi(),
273                neg: (binop == hir::BinOpKind::Ne).then(|| r_span.shrink_to_lo()),
274            })
275        }
276        hir::BinOpKind::Eq | hir::BinOpKind::Ne if is_nan(cx, r) => {
277            eq_ne(e, l, r, |l_span, r_span| InvalidNanComparisonsSuggestion::Spanful {
278                nan_plus_binop: l_span.shrink_to_hi().to(r_span),
279                float: l_span.shrink_to_hi(),
280                neg: (binop == hir::BinOpKind::Ne).then(|| l_span.shrink_to_lo()),
281            })
282        }
283        hir::BinOpKind::Lt | hir::BinOpKind::Le | hir::BinOpKind::Gt | hir::BinOpKind::Ge
284            if is_nan(cx, l) || is_nan(cx, r) =>
285        {
286            InvalidNanComparisons::LtLeGtGe
287        }
288        _ => return,
289    };
290
291    cx.emit_span_lint(INVALID_NAN_COMPARISONS, e.span, lint);
292}
293
294#[derive(Debug, PartialEq, Copy, Clone)]
295enum ComparisonOp {
296    BinOp(hir::BinOpKind),
297    Other,
298}
299
300fn lint_wide_pointer<'tcx>(
301    cx: &LateContext<'tcx>,
302    e: &'tcx hir::Expr<'tcx>,
303    cmpop: ComparisonOp,
304    l: &'tcx hir::Expr<'tcx>,
305    r: &'tcx hir::Expr<'tcx>,
306) {
307    let ptr_unsized = |mut ty: Ty<'tcx>| -> Option<(
308        /* number of refs */ usize,
309        /* modifiers */ String,
310        /* is dyn */ bool,
311    )> {
312        let mut refs = 0;
313        // here we remove any "implicit" references and count the number
314        // of them to correctly suggest the right number of deref
315        while let ty::Ref(_, inner_ty, _) = ty.kind() {
316            ty = *inner_ty;
317            refs += 1;
318        }
319
320        // get the inner type of a pointer (or akin)
321        let mut modifiers = String::new();
322        ty = match ty.kind() {
323            ty::RawPtr(ty, _) => *ty,
324            ty::Adt(def, args) if cx.tcx.is_diagnostic_item(sym::NonNull, def.did()) => {
325                modifiers.push_str(".as_ptr()");
326                args.type_at(0)
327            }
328            _ => return None,
329        };
330
331        (!ty.is_sized(cx.tcx, cx.typing_env()))
332            .then(|| (refs, modifiers, matches!(ty.kind(), ty::Dynamic(_, _, ty::Dyn))))
333    };
334
335    // the left and right operands can have references, remove any explicit references
336    let l = l.peel_borrows();
337    let r = r.peel_borrows();
338
339    let Some(l_ty) = cx.typeck_results().expr_ty_opt(l) else {
340        return;
341    };
342    let Some(r_ty) = cx.typeck_results().expr_ty_opt(r) else {
343        return;
344    };
345
346    let Some((l_ty_refs, l_modifiers, l_inner_ty_is_dyn)) = ptr_unsized(l_ty) else {
347        return;
348    };
349    let Some((r_ty_refs, r_modifiers, r_inner_ty_is_dyn)) = ptr_unsized(r_ty) else {
350        return;
351    };
352
353    let (Some(l_span), Some(r_span)) =
354        (l.span.find_ancestor_inside(e.span), r.span.find_ancestor_inside(e.span))
355    else {
356        return cx.emit_span_lint(
357            AMBIGUOUS_WIDE_POINTER_COMPARISONS,
358            e.span,
359            AmbiguousWidePointerComparisons::Spanless,
360        );
361    };
362
363    let ne = if cmpop == ComparisonOp::BinOp(hir::BinOpKind::Ne) { "!" } else { "" };
364    let is_eq_ne = matches!(cmpop, ComparisonOp::BinOp(hir::BinOpKind::Eq | hir::BinOpKind::Ne));
365    let is_dyn_comparison = l_inner_ty_is_dyn && r_inner_ty_is_dyn;
366    let via_method_call = matches!(&e.kind, ExprKind::MethodCall(..) | ExprKind::Call(..));
367
368    let left = e.span.shrink_to_lo().until(l_span.shrink_to_lo());
369    let middle = l_span.shrink_to_hi().until(r_span.shrink_to_lo());
370    let right = r_span.shrink_to_hi().until(e.span.shrink_to_hi());
371
372    let deref_left = &*"*".repeat(l_ty_refs);
373    let deref_right = &*"*".repeat(r_ty_refs);
374
375    let l_modifiers = &*l_modifiers;
376    let r_modifiers = &*r_modifiers;
377
378    cx.emit_span_lint(
379        AMBIGUOUS_WIDE_POINTER_COMPARISONS,
380        e.span,
381        if is_eq_ne {
382            AmbiguousWidePointerComparisons::SpanfulEq {
383                addr_metadata_suggestion: (!is_dyn_comparison).then(|| {
384                    AmbiguousWidePointerComparisonsAddrMetadataSuggestion {
385                        ne,
386                        deref_left,
387                        deref_right,
388                        l_modifiers,
389                        r_modifiers,
390                        left,
391                        middle,
392                        right,
393                    }
394                }),
395                addr_suggestion: AmbiguousWidePointerComparisonsAddrSuggestion {
396                    ne,
397                    deref_left,
398                    deref_right,
399                    l_modifiers,
400                    r_modifiers,
401                    left,
402                    middle,
403                    right,
404                },
405            }
406        } else {
407            AmbiguousWidePointerComparisons::SpanfulCmp {
408                cast_suggestion: AmbiguousWidePointerComparisonsCastSuggestion {
409                    deref_left,
410                    deref_right,
411                    l_modifiers,
412                    r_modifiers,
413                    paren_left: if l_ty_refs != 0 { ")" } else { "" },
414                    paren_right: if r_ty_refs != 0 { ")" } else { "" },
415                    left_before: (l_ty_refs != 0).then_some(l_span.shrink_to_lo()),
416                    left_after: l_span.shrink_to_hi(),
417                    right_before: (r_ty_refs != 0).then_some(r_span.shrink_to_lo()),
418                    right_after: r_span.shrink_to_hi(),
419                },
420                expect_suggestion: AmbiguousWidePointerComparisonsExpectSuggestion {
421                    paren_left: if via_method_call { "" } else { "(" },
422                    paren_right: if via_method_call { "" } else { ")" },
423                    before: e.span.shrink_to_lo(),
424                    after: e.span.shrink_to_hi(),
425                },
426            }
427        },
428    );
429}
430
431fn lint_fn_pointer<'tcx>(
432    cx: &LateContext<'tcx>,
433    e: &'tcx hir::Expr<'tcx>,
434    cmpop: ComparisonOp,
435    l: &'tcx hir::Expr<'tcx>,
436    r: &'tcx hir::Expr<'tcx>,
437) {
438    let peel_refs = |mut ty: Ty<'tcx>| -> (Ty<'tcx>, usize) {
439        let mut refs = 0;
440
441        while let ty::Ref(_, inner_ty, _) = ty.kind() {
442            ty = *inner_ty;
443            refs += 1;
444        }
445
446        (ty, refs)
447    };
448
449    // Left and right operands can have borrows, remove them
450    let l = l.peel_borrows();
451    let r = r.peel_borrows();
452
453    let Some(l_ty) = cx.typeck_results().expr_ty_opt(l) else { return };
454    let Some(r_ty) = cx.typeck_results().expr_ty_opt(r) else { return };
455
456    // Remove any references as `==` will deref through them (and count the
457    // number of references removed, for latter).
458    let (l_ty, l_ty_refs) = peel_refs(l_ty);
459    let (r_ty, r_ty_refs) = peel_refs(r_ty);
460
461    if l_ty.is_fn() && r_ty.is_fn() {
462        // both operands are function pointers, fallthrough
463    } else if let ty::Adt(l_def, l_args) = l_ty.kind()
464        && let ty::Adt(r_def, r_args) = r_ty.kind()
465        && cx.tcx.is_lang_item(l_def.did(), LangItem::Option)
466        && cx.tcx.is_lang_item(r_def.did(), LangItem::Option)
467        && let Some(l_some_arg) = l_args.get(0)
468        && let Some(r_some_arg) = r_args.get(0)
469        && l_some_arg.expect_ty().is_fn()
470        && r_some_arg.expect_ty().is_fn()
471    {
472        // both operands are `Option<{function ptr}>`
473        return cx.emit_span_lint(
474            UNPREDICTABLE_FUNCTION_POINTER_COMPARISONS,
475            e.span,
476            UnpredictableFunctionPointerComparisons::Warn,
477        );
478    } else {
479        // types are not function pointers, nothing to do
480        return;
481    }
482
483    // Let's try to suggest `ptr::fn_addr_eq` if/when possible.
484
485    let is_eq_ne = matches!(cmpop, ComparisonOp::BinOp(hir::BinOpKind::Eq | hir::BinOpKind::Ne));
486
487    if !is_eq_ne {
488        // Neither `==` nor `!=`, we can't suggest `ptr::fn_addr_eq`, just show the warning.
489        return cx.emit_span_lint(
490            UNPREDICTABLE_FUNCTION_POINTER_COMPARISONS,
491            e.span,
492            UnpredictableFunctionPointerComparisons::Warn,
493        );
494    }
495
496    let (Some(l_span), Some(r_span)) =
497        (l.span.find_ancestor_inside(e.span), r.span.find_ancestor_inside(e.span))
498    else {
499        // No appropriate spans for the left and right operands, just show the warning.
500        return cx.emit_span_lint(
501            UNPREDICTABLE_FUNCTION_POINTER_COMPARISONS,
502            e.span,
503            UnpredictableFunctionPointerComparisons::Warn,
504        );
505    };
506
507    let ne = if cmpop == ComparisonOp::BinOp(hir::BinOpKind::Ne) { "!" } else { "" };
508
509    // `ptr::fn_addr_eq` only works with raw pointer, deref any references.
510    let deref_left = &*"*".repeat(l_ty_refs);
511    let deref_right = &*"*".repeat(r_ty_refs);
512
513    let left = e.span.shrink_to_lo().until(l_span.shrink_to_lo());
514    let middle = l_span.shrink_to_hi().until(r_span.shrink_to_lo());
515    let right = r_span.shrink_to_hi().until(e.span.shrink_to_hi());
516
517    let sugg =
518        // We only check for a right cast as `FnDef` == `FnPtr` is not possible,
519        // only `FnPtr == FnDef` is possible.
520        if !r_ty.is_fn_ptr() {
521            let fn_sig = r_ty.fn_sig(cx.tcx);
522
523            UnpredictableFunctionPointerComparisonsSuggestion::FnAddrEqWithCast {
524                ne,
525                fn_sig,
526                deref_left,
527                deref_right,
528                left,
529                middle,
530                right,
531            }
532        } else {
533            UnpredictableFunctionPointerComparisonsSuggestion::FnAddrEq {
534                ne,
535                deref_left,
536                deref_right,
537                left,
538                middle,
539                right,
540            }
541        };
542
543    cx.emit_span_lint(
544        UNPREDICTABLE_FUNCTION_POINTER_COMPARISONS,
545        e.span,
546        UnpredictableFunctionPointerComparisons::Suggestion { sugg },
547    );
548}
549
550impl<'tcx> LateLintPass<'tcx> for TypeLimits {
551    fn check_lit(
552        &mut self,
553        cx: &LateContext<'tcx>,
554        hir_id: HirId,
555        lit: &'tcx hir::Lit,
556        negated: bool,
557    ) {
558        if negated {
559            self.negated_expr_id = Some(hir_id);
560            self.negated_expr_span = Some(lit.span);
561        }
562        lint_literal(cx, self, hir_id, lit.span, lit, negated);
563    }
564
565    fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) {
566        match e.kind {
567            hir::ExprKind::Unary(hir::UnOp::Neg, expr) => {
568                // Propagate negation, if the negation itself isn't negated
569                if self.negated_expr_id != Some(e.hir_id) {
570                    self.negated_expr_id = Some(expr.hir_id);
571                    self.negated_expr_span = Some(e.span);
572                }
573            }
574            hir::ExprKind::Binary(binop, ref l, ref r) => {
575                if is_comparison(binop.node) {
576                    if !check_limits(cx, binop.node, l, r) {
577                        cx.emit_span_lint(UNUSED_COMPARISONS, e.span, UnusedComparisons);
578                    } else {
579                        lint_nan(cx, e, binop.node, l, r);
580                        let cmpop = ComparisonOp::BinOp(binop.node);
581                        lint_wide_pointer(cx, e, cmpop, l, r);
582                        lint_fn_pointer(cx, e, cmpop, l, r);
583                    }
584                }
585            }
586            hir::ExprKind::Call(path, [l, r])
587                if let ExprKind::Path(ref qpath) = path.kind
588                    && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
589                    && let Some(diag_item) = cx.tcx.get_diagnostic_name(def_id)
590                    && let Some(cmpop) = diag_item_cmpop(diag_item) =>
591            {
592                lint_wide_pointer(cx, e, cmpop, l, r);
593                lint_fn_pointer(cx, e, cmpop, l, r);
594            }
595            hir::ExprKind::MethodCall(_, l, [r], _)
596                if let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
597                    && let Some(diag_item) = cx.tcx.get_diagnostic_name(def_id)
598                    && let Some(cmpop) = diag_item_cmpop(diag_item) =>
599            {
600                lint_wide_pointer(cx, e, cmpop, l, r);
601                lint_fn_pointer(cx, e, cmpop, l, r);
602            }
603            _ => {}
604        };
605
606        fn is_valid<T: PartialOrd>(binop: hir::BinOpKind, v: T, min: T, max: T) -> bool {
607            match binop {
608                hir::BinOpKind::Lt => v > min && v <= max,
609                hir::BinOpKind::Le => v >= min && v < max,
610                hir::BinOpKind::Gt => v >= min && v < max,
611                hir::BinOpKind::Ge => v > min && v <= max,
612                hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max,
613                _ => bug!(),
614            }
615        }
616
617        fn rev_binop(binop: hir::BinOpKind) -> hir::BinOpKind {
618            match binop {
619                hir::BinOpKind::Lt => hir::BinOpKind::Gt,
620                hir::BinOpKind::Le => hir::BinOpKind::Ge,
621                hir::BinOpKind::Gt => hir::BinOpKind::Lt,
622                hir::BinOpKind::Ge => hir::BinOpKind::Le,
623                _ => binop,
624            }
625        }
626
627        fn check_limits(
628            cx: &LateContext<'_>,
629            binop: hir::BinOpKind,
630            l: &hir::Expr<'_>,
631            r: &hir::Expr<'_>,
632        ) -> bool {
633            let (lit, expr, swap) = match (&l.kind, &r.kind) {
634                (&hir::ExprKind::Lit(_), _) => (l, r, true),
635                (_, &hir::ExprKind::Lit(_)) => (r, l, false),
636                _ => return true,
637            };
638            // Normalize the binop so that the literal is always on the RHS in
639            // the comparison
640            let norm_binop = if swap { rev_binop(binop) } else { binop };
641            match *cx.typeck_results().node_type(expr.hir_id).kind() {
642                ty::Int(int_ty) => {
643                    let (min, max) = int_ty_range(int_ty);
644                    let lit_val: i128 = match lit.kind {
645                        hir::ExprKind::Lit(li) => match li.node {
646                            ast::LitKind::Int(
647                                v,
648                                ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed,
649                            ) => v.get() as i128,
650                            _ => return true,
651                        },
652                        _ => bug!(),
653                    };
654                    is_valid(norm_binop, lit_val, min, max)
655                }
656                ty::Uint(uint_ty) => {
657                    let (min, max): (u128, u128) = uint_ty_range(uint_ty);
658                    let lit_val: u128 = match lit.kind {
659                        hir::ExprKind::Lit(li) => match li.node {
660                            ast::LitKind::Int(v, _) => v.get(),
661                            _ => return true,
662                        },
663                        _ => bug!(),
664                    };
665                    is_valid(norm_binop, lit_val, min, max)
666                }
667                _ => true,
668            }
669        }
670
671        fn is_comparison(binop: hir::BinOpKind) -> bool {
672            matches!(
673                binop,
674                hir::BinOpKind::Eq
675                    | hir::BinOpKind::Lt
676                    | hir::BinOpKind::Le
677                    | hir::BinOpKind::Ne
678                    | hir::BinOpKind::Ge
679                    | hir::BinOpKind::Gt
680            )
681        }
682
683        fn diag_item_cmpop(diag_item: Symbol) -> Option<ComparisonOp> {
684            Some(match diag_item {
685                sym::cmp_ord_max => ComparisonOp::Other,
686                sym::cmp_ord_min => ComparisonOp::Other,
687                sym::ord_cmp_method => ComparisonOp::Other,
688                sym::cmp_partialeq_eq => ComparisonOp::BinOp(hir::BinOpKind::Eq),
689                sym::cmp_partialeq_ne => ComparisonOp::BinOp(hir::BinOpKind::Ne),
690                sym::cmp_partialord_cmp => ComparisonOp::Other,
691                sym::cmp_partialord_ge => ComparisonOp::BinOp(hir::BinOpKind::Ge),
692                sym::cmp_partialord_gt => ComparisonOp::BinOp(hir::BinOpKind::Gt),
693                sym::cmp_partialord_le => ComparisonOp::BinOp(hir::BinOpKind::Le),
694                sym::cmp_partialord_lt => ComparisonOp::BinOp(hir::BinOpKind::Lt),
695                _ => return None,
696            })
697        }
698    }
699}
700
701declare_lint! {
702    /// The `improper_ctypes` lint detects incorrect use of types in foreign
703    /// modules.
704    ///
705    /// ### Example
706    ///
707    /// ```rust
708    /// unsafe extern "C" {
709    ///     static STATIC: String;
710    /// }
711    /// ```
712    ///
713    /// {{produces}}
714    ///
715    /// ### Explanation
716    ///
717    /// The compiler has several checks to verify that types used in `extern`
718    /// blocks are safe and follow certain rules to ensure proper
719    /// compatibility with the foreign interfaces. This lint is issued when it
720    /// detects a probable mistake in a definition. The lint usually should
721    /// provide a description of the issue, along with possibly a hint on how
722    /// to resolve it.
723    IMPROPER_CTYPES,
724    Warn,
725    "proper use of libc types in foreign modules"
726}
727
728declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]);
729
730declare_lint! {
731    /// The `improper_ctypes_definitions` lint detects incorrect use of
732    /// [`extern` function] definitions.
733    ///
734    /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier
735    ///
736    /// ### Example
737    ///
738    /// ```rust
739    /// # #![allow(unused)]
740    /// pub extern "C" fn str_type(p: &str) { }
741    /// ```
742    ///
743    /// {{produces}}
744    ///
745    /// ### Explanation
746    ///
747    /// There are many parameter and return types that may be specified in an
748    /// `extern` function that are not compatible with the given ABI. This
749    /// lint is an alert that these types should not be used. The lint usually
750    /// should provide a description of the issue, along with possibly a hint
751    /// on how to resolve it.
752    IMPROPER_CTYPES_DEFINITIONS,
753    Warn,
754    "proper use of libc types in foreign item definitions"
755}
756
757declare_lint! {
758    /// The `uses_power_alignment` lint detects specific `repr(C)`
759    /// aggregates on AIX.
760    /// In its platform C ABI, AIX uses the "power" (as in PowerPC) alignment
761    /// rule (detailed in https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=data-using-alignment-modes#alignment),
762    /// which can also be set for XLC by `#pragma align(power)` or
763    /// `-qalign=power`. Aggregates with a floating-point type as the
764    /// recursively first field (as in "at offset 0") modify the layout of
765    /// *subsequent* fields of the associated structs to use an alignment value
766    /// where the floating-point type is aligned on a 4-byte boundary.
767    ///
768    /// Effectively, subsequent floating-point fields act as-if they are `repr(packed(4))`. This
769    /// would be unsound to do in a `repr(C)` type without all the restrictions that come with
770    /// `repr(packed)`. Rust instead chooses a layout that maintains soundness of Rust code, at the
771    /// expense of incompatibility with C code.
772    ///
773    /// ### Example
774    ///
775    /// ```rust,ignore (fails on non-powerpc64-ibm-aix)
776    /// #[repr(C)]
777    /// pub struct Floats {
778    ///     a: f64,
779    ///     b: u8,
780    ///     c: f64,
781    /// }
782    /// ```
783    ///
784    /// This will produce:
785    ///
786    /// ```text
787    /// warning: repr(C) does not follow the power alignment rule. This may affect platform C ABI compatibility for this type
788    ///  --> <source>:5:3
789    ///   |
790    /// 5 |   c: f64,
791    ///   |   ^^^^^^
792    ///   |
793    ///   = note: `#[warn(uses_power_alignment)]` on by default
794    /// ```
795    ///
796    /// ### Explanation
797    ///
798    /// The power alignment rule specifies that the above struct has the
799    /// following alignment:
800    ///  - offset_of!(Floats, a) == 0
801    ///  - offset_of!(Floats, b) == 8
802    ///  - offset_of!(Floats, c) == 12
803    ///
804    /// However, Rust currently aligns `c` at `offset_of!(Floats, c) == 16`.
805    /// Using offset 12 would be unsound since `f64` generally must be 8-aligned on this target.
806    /// Thus, a warning is produced for the above struct.
807    USES_POWER_ALIGNMENT,
808    Warn,
809    "Structs do not follow the power alignment rule under repr(C)"
810}
811
812declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS, USES_POWER_ALIGNMENT]);
813
814#[derive(Clone, Copy)]
815pub(crate) enum CItemKind {
816    Declaration,
817    Definition,
818}
819
820struct ImproperCTypesVisitor<'a, 'tcx> {
821    cx: &'a LateContext<'tcx>,
822    mode: CItemKind,
823}
824
825/// Accumulator for recursive ffi type checking
826struct CTypesVisitorState<'tcx> {
827    cache: FxHashSet<Ty<'tcx>>,
828    /// The original type being checked, before we recursed
829    /// to any other types it contains.
830    base_ty: Ty<'tcx>,
831}
832
833enum FfiResult<'tcx> {
834    FfiSafe,
835    FfiPhantom(Ty<'tcx>),
836    FfiUnsafe { ty: Ty<'tcx>, reason: DiagMessage, help: Option<DiagMessage> },
837}
838
839pub(crate) fn nonnull_optimization_guaranteed<'tcx>(
840    tcx: TyCtxt<'tcx>,
841    def: ty::AdtDef<'tcx>,
842) -> bool {
843    tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
844}
845
846/// `repr(transparent)` structs can have a single non-1-ZST field, this function returns that
847/// field.
848pub(crate) fn transparent_newtype_field<'a, 'tcx>(
849    tcx: TyCtxt<'tcx>,
850    variant: &'a ty::VariantDef,
851) -> Option<&'a ty::FieldDef> {
852    let typing_env = ty::TypingEnv::non_body_analysis(tcx, variant.def_id);
853    variant.fields.iter().find(|field| {
854        let field_ty = tcx.type_of(field.did).instantiate_identity();
855        let is_1zst =
856            tcx.layout_of(typing_env.as_query_input(field_ty)).is_ok_and(|layout| layout.is_1zst());
857        !is_1zst
858    })
859}
860
861/// Is type known to be non-null?
862fn ty_is_known_nonnull<'tcx>(
863    tcx: TyCtxt<'tcx>,
864    typing_env: ty::TypingEnv<'tcx>,
865    ty: Ty<'tcx>,
866    mode: CItemKind,
867) -> bool {
868    let ty = tcx.try_normalize_erasing_regions(typing_env, ty).unwrap_or(ty);
869
870    match ty.kind() {
871        ty::FnPtr(..) => true,
872        ty::Ref(..) => true,
873        ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
874        ty::Adt(def, args) if def.repr().transparent() && !def.is_union() => {
875            let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
876
877            if marked_non_null {
878                return true;
879            }
880
881            // `UnsafeCell` and `UnsafePinned` have their niche hidden.
882            if def.is_unsafe_cell() || def.is_unsafe_pinned() {
883                return false;
884            }
885
886            def.variants()
887                .iter()
888                .filter_map(|variant| transparent_newtype_field(tcx, variant))
889                .any(|field| ty_is_known_nonnull(tcx, typing_env, field.ty(tcx, args), mode))
890        }
891        ty::Pat(base, pat) => {
892            ty_is_known_nonnull(tcx, typing_env, *base, mode)
893                || pat_ty_is_known_nonnull(tcx, typing_env, *pat)
894        }
895        _ => false,
896    }
897}
898
899fn pat_ty_is_known_nonnull<'tcx>(
900    tcx: TyCtxt<'tcx>,
901    typing_env: ty::TypingEnv<'tcx>,
902    pat: ty::Pattern<'tcx>,
903) -> bool {
904    Option::unwrap_or_default(
905        try {
906            match *pat {
907                ty::PatternKind::Range { start, end } => {
908                    let start = start.try_to_value()?.try_to_bits(tcx, typing_env)?;
909                    let end = end.try_to_value()?.try_to_bits(tcx, typing_env)?;
910
911                    // This also works for negative numbers, as we just need
912                    // to ensure we aren't wrapping over zero.
913                    start > 0 && end >= start
914                }
915                ty::PatternKind::Or(patterns) => {
916                    patterns.iter().all(|pat| pat_ty_is_known_nonnull(tcx, typing_env, pat))
917                }
918            }
919        },
920    )
921}
922
923/// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
924/// If the type passed in was not scalar, returns None.
925fn get_nullable_type<'tcx>(
926    tcx: TyCtxt<'tcx>,
927    typing_env: ty::TypingEnv<'tcx>,
928    ty: Ty<'tcx>,
929) -> Option<Ty<'tcx>> {
930    let ty = tcx.try_normalize_erasing_regions(typing_env, ty).unwrap_or(ty);
931
932    Some(match *ty.kind() {
933        ty::Adt(field_def, field_args) => {
934            let inner_field_ty = {
935                let mut first_non_zst_ty =
936                    field_def.variants().iter().filter_map(|v| transparent_newtype_field(tcx, v));
937                debug_assert_eq!(
938                    first_non_zst_ty.clone().count(),
939                    1,
940                    "Wrong number of fields for transparent type"
941                );
942                first_non_zst_ty
943                    .next_back()
944                    .expect("No non-zst fields in transparent type.")
945                    .ty(tcx, field_args)
946            };
947            return get_nullable_type(tcx, typing_env, inner_field_ty);
948        }
949        ty::Pat(base, ..) => return get_nullable_type(tcx, typing_env, base),
950        ty::Int(_) | ty::Uint(_) | ty::RawPtr(..) => ty,
951        // As these types are always non-null, the nullable equivalent of
952        // `Option<T>` of these types are their raw pointer counterparts.
953        ty::Ref(_region, ty, mutbl) => Ty::new_ptr(tcx, ty, mutbl),
954        // There is no nullable equivalent for Rust's function pointers,
955        // you must use an `Option<fn(..) -> _>` to represent it.
956        ty::FnPtr(..) => ty,
957        // We should only ever reach this case if `ty_is_known_nonnull` is
958        // extended to other types.
959        ref unhandled => {
960            debug!(
961                "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}",
962                unhandled, ty
963            );
964            return None;
965        }
966    })
967}
968
969/// A type is niche-optimization candidate iff:
970/// - Is a zero-sized type with alignment 1 (a “1-ZST”).
971/// - Has no fields.
972/// - Does not have the `#[non_exhaustive]` attribute.
973fn is_niche_optimization_candidate<'tcx>(
974    tcx: TyCtxt<'tcx>,
975    typing_env: ty::TypingEnv<'tcx>,
976    ty: Ty<'tcx>,
977) -> bool {
978    if tcx.layout_of(typing_env.as_query_input(ty)).is_ok_and(|layout| !layout.is_1zst()) {
979        return false;
980    }
981
982    match ty.kind() {
983        ty::Adt(ty_def, _) => {
984            let non_exhaustive = ty_def.is_variant_list_non_exhaustive();
985            let empty = (ty_def.is_struct() && ty_def.all_fields().next().is_none())
986                || (ty_def.is_enum() && ty_def.variants().is_empty());
987
988            !non_exhaustive && empty
989        }
990        ty::Tuple(tys) => tys.is_empty(),
991        _ => false,
992    }
993}
994
995/// Check if this enum can be safely exported based on the "nullable pointer optimization". If it
996/// can, return the type that `ty` can be safely converted to, otherwise return `None`.
997/// Currently restricted to function pointers, boxes, references, `core::num::NonZero`,
998/// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
999pub(crate) fn repr_nullable_ptr<'tcx>(
1000    tcx: TyCtxt<'tcx>,
1001    typing_env: ty::TypingEnv<'tcx>,
1002    ty: Ty<'tcx>,
1003    ckind: CItemKind,
1004) -> Option<Ty<'tcx>> {
1005    debug!("is_repr_nullable_ptr(tcx, ty = {:?})", ty);
1006    match ty.kind() {
1007        ty::Adt(ty_def, args) => {
1008            let field_ty = match &ty_def.variants().raw[..] {
1009                [var_one, var_two] => match (&var_one.fields.raw[..], &var_two.fields.raw[..]) {
1010                    ([], [field]) | ([field], []) => field.ty(tcx, args),
1011                    ([field1], [field2]) => {
1012                        let ty1 = field1.ty(tcx, args);
1013                        let ty2 = field2.ty(tcx, args);
1014
1015                        if is_niche_optimization_candidate(tcx, typing_env, ty1) {
1016                            ty2
1017                        } else if is_niche_optimization_candidate(tcx, typing_env, ty2) {
1018                            ty1
1019                        } else {
1020                            return None;
1021                        }
1022                    }
1023                    _ => return None,
1024                },
1025                _ => return None,
1026            };
1027
1028            if !ty_is_known_nonnull(tcx, typing_env, field_ty, ckind) {
1029                return None;
1030            }
1031
1032            // At this point, the field's type is known to be nonnull and the parent enum is Option-like.
1033            // If the computed size for the field and the enum are different, the nonnull optimization isn't
1034            // being applied (and we've got a problem somewhere).
1035            let compute_size_skeleton = |t| SizeSkeleton::compute(t, tcx, typing_env).ok();
1036            if !compute_size_skeleton(ty)?.same_size(compute_size_skeleton(field_ty)?) {
1037                bug!("improper_ctypes: Option nonnull optimization not applied?");
1038            }
1039
1040            // Return the nullable type this Option-like enum can be safely represented with.
1041            let field_ty_layout = tcx.layout_of(typing_env.as_query_input(field_ty));
1042            if field_ty_layout.is_err() && !field_ty.has_non_region_param() {
1043                bug!("should be able to compute the layout of non-polymorphic type");
1044            }
1045
1046            let field_ty_abi = &field_ty_layout.ok()?.backend_repr;
1047            if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi {
1048                match field_ty_scalar.valid_range(&tcx) {
1049                    WrappingRange { start: 0, end }
1050                        if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
1051                    {
1052                        return Some(get_nullable_type(tcx, typing_env, field_ty).unwrap());
1053                    }
1054                    WrappingRange { start: 1, .. } => {
1055                        return Some(get_nullable_type(tcx, typing_env, field_ty).unwrap());
1056                    }
1057                    WrappingRange { start, end } => {
1058                        unreachable!("Unhandled start and end range: ({}, {})", start, end)
1059                    }
1060                };
1061            }
1062            None
1063        }
1064        ty::Pat(base, pat) => get_nullable_type_from_pat(tcx, typing_env, *base, *pat),
1065        _ => None,
1066    }
1067}
1068
1069fn get_nullable_type_from_pat<'tcx>(
1070    tcx: TyCtxt<'tcx>,
1071    typing_env: ty::TypingEnv<'tcx>,
1072    base: Ty<'tcx>,
1073    pat: ty::Pattern<'tcx>,
1074) -> Option<Ty<'tcx>> {
1075    match *pat {
1076        ty::PatternKind::Range { .. } => get_nullable_type(tcx, typing_env, base),
1077        ty::PatternKind::Or(patterns) => {
1078            let first = get_nullable_type_from_pat(tcx, typing_env, base, patterns[0])?;
1079            for &pat in &patterns[1..] {
1080                assert_eq!(first, get_nullable_type_from_pat(tcx, typing_env, base, pat)?);
1081            }
1082            Some(first)
1083        }
1084    }
1085}
1086
1087impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
1088    /// Check if the type is array and emit an unsafe type lint.
1089    fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
1090        if let ty::Array(..) = ty.kind() {
1091            self.emit_ffi_unsafe_type_lint(
1092                ty,
1093                sp,
1094                fluent::lint_improper_ctypes_array_reason,
1095                Some(fluent::lint_improper_ctypes_array_help),
1096            );
1097            true
1098        } else {
1099            false
1100        }
1101    }
1102
1103    /// Checks if the given field's type is "ffi-safe".
1104    fn check_field_type_for_ffi(
1105        &self,
1106        acc: &mut CTypesVisitorState<'tcx>,
1107        field: &ty::FieldDef,
1108        args: GenericArgsRef<'tcx>,
1109    ) -> FfiResult<'tcx> {
1110        let field_ty = field.ty(self.cx.tcx, args);
1111        let field_ty = self
1112            .cx
1113            .tcx
1114            .try_normalize_erasing_regions(self.cx.typing_env(), field_ty)
1115            .unwrap_or(field_ty);
1116        self.check_type_for_ffi(acc, field_ty)
1117    }
1118
1119    /// Checks if the given `VariantDef`'s field types are "ffi-safe".
1120    fn check_variant_for_ffi(
1121        &self,
1122        acc: &mut CTypesVisitorState<'tcx>,
1123        ty: Ty<'tcx>,
1124        def: ty::AdtDef<'tcx>,
1125        variant: &ty::VariantDef,
1126        args: GenericArgsRef<'tcx>,
1127    ) -> FfiResult<'tcx> {
1128        use FfiResult::*;
1129        let transparent_with_all_zst_fields = if def.repr().transparent() {
1130            if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
1131                // Transparent newtypes have at most one non-ZST field which needs to be checked..
1132                match self.check_field_type_for_ffi(acc, field, args) {
1133                    FfiUnsafe { ty, .. } if ty.is_unit() => (),
1134                    r => return r,
1135                }
1136
1137                false
1138            } else {
1139                // ..or have only ZST fields, which is FFI-unsafe (unless those fields are all
1140                // `PhantomData`).
1141                true
1142            }
1143        } else {
1144            false
1145        };
1146
1147        // We can't completely trust `repr(C)` markings, so make sure the fields are actually safe.
1148        let mut all_phantom = !variant.fields.is_empty();
1149        for field in &variant.fields {
1150            all_phantom &= match self.check_field_type_for_ffi(acc, field, args) {
1151                FfiSafe => false,
1152                // `()` fields are FFI-safe!
1153                FfiUnsafe { ty, .. } if ty.is_unit() => false,
1154                FfiPhantom(..) => true,
1155                r @ FfiUnsafe { .. } => return r,
1156            }
1157        }
1158
1159        if all_phantom {
1160            FfiPhantom(ty)
1161        } else if transparent_with_all_zst_fields {
1162            FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_struct_zst, help: None }
1163        } else {
1164            FfiSafe
1165        }
1166    }
1167
1168    /// Checks if the given type is "ffi-safe" (has a stable, well-defined
1169    /// representation which can be exported to C code).
1170    fn check_type_for_ffi(
1171        &self,
1172        acc: &mut CTypesVisitorState<'tcx>,
1173        ty: Ty<'tcx>,
1174    ) -> FfiResult<'tcx> {
1175        use FfiResult::*;
1176
1177        let tcx = self.cx.tcx;
1178
1179        // Protect against infinite recursion, for example
1180        // `struct S(*mut S);`.
1181        // FIXME: A recursion limit is necessary as well, for irregular
1182        // recursive types.
1183        if !acc.cache.insert(ty) {
1184            return FfiSafe;
1185        }
1186
1187        match *ty.kind() {
1188            ty::Adt(def, args) => {
1189                if let Some(boxed) = ty.boxed_ty()
1190                    && matches!(self.mode, CItemKind::Definition)
1191                {
1192                    if boxed.is_sized(tcx, self.cx.typing_env()) {
1193                        return FfiSafe;
1194                    } else {
1195                        return FfiUnsafe {
1196                            ty,
1197                            reason: fluent::lint_improper_ctypes_box,
1198                            help: None,
1199                        };
1200                    }
1201                }
1202                if def.is_phantom_data() {
1203                    return FfiPhantom(ty);
1204                }
1205                match def.adt_kind() {
1206                    AdtKind::Struct | AdtKind::Union => {
1207                        if let Some(sym::cstring_type | sym::cstr_type) =
1208                            tcx.get_diagnostic_name(def.did())
1209                            && !acc.base_ty.is_mutable_ptr()
1210                        {
1211                            return FfiUnsafe {
1212                                ty,
1213                                reason: fluent::lint_improper_ctypes_cstr_reason,
1214                                help: Some(fluent::lint_improper_ctypes_cstr_help),
1215                            };
1216                        }
1217
1218                        if !def.repr().c() && !def.repr().transparent() {
1219                            return FfiUnsafe {
1220                                ty,
1221                                reason: if def.is_struct() {
1222                                    fluent::lint_improper_ctypes_struct_layout_reason
1223                                } else {
1224                                    fluent::lint_improper_ctypes_union_layout_reason
1225                                },
1226                                help: if def.is_struct() {
1227                                    Some(fluent::lint_improper_ctypes_struct_layout_help)
1228                                } else {
1229                                    Some(fluent::lint_improper_ctypes_union_layout_help)
1230                                },
1231                            };
1232                        }
1233
1234                        if def.non_enum_variant().field_list_has_applicable_non_exhaustive() {
1235                            return FfiUnsafe {
1236                                ty,
1237                                reason: if def.is_struct() {
1238                                    fluent::lint_improper_ctypes_struct_non_exhaustive
1239                                } else {
1240                                    fluent::lint_improper_ctypes_union_non_exhaustive
1241                                },
1242                                help: None,
1243                            };
1244                        }
1245
1246                        if def.non_enum_variant().fields.is_empty() {
1247                            return FfiUnsafe {
1248                                ty,
1249                                reason: if def.is_struct() {
1250                                    fluent::lint_improper_ctypes_struct_fieldless_reason
1251                                } else {
1252                                    fluent::lint_improper_ctypes_union_fieldless_reason
1253                                },
1254                                help: if def.is_struct() {
1255                                    Some(fluent::lint_improper_ctypes_struct_fieldless_help)
1256                                } else {
1257                                    Some(fluent::lint_improper_ctypes_union_fieldless_help)
1258                                },
1259                            };
1260                        }
1261
1262                        self.check_variant_for_ffi(acc, ty, def, def.non_enum_variant(), args)
1263                    }
1264                    AdtKind::Enum => {
1265                        if def.variants().is_empty() {
1266                            // Empty enums are okay... although sort of useless.
1267                            return FfiSafe;
1268                        }
1269                        // Check for a repr() attribute to specify the size of the
1270                        // discriminant.
1271                        if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
1272                        {
1273                            // Special-case types like `Option<extern fn()>` and `Result<extern fn(), ()>`
1274                            if let Some(ty) =
1275                                repr_nullable_ptr(self.cx.tcx, self.cx.typing_env(), ty, self.mode)
1276                            {
1277                                return self.check_type_for_ffi(acc, ty);
1278                            }
1279
1280                            return FfiUnsafe {
1281                                ty,
1282                                reason: fluent::lint_improper_ctypes_enum_repr_reason,
1283                                help: Some(fluent::lint_improper_ctypes_enum_repr_help),
1284                            };
1285                        }
1286
1287                        if let Some(IntegerType::Fixed(Integer::I128, _)) = def.repr().int {
1288                            return FfiUnsafe {
1289                                ty,
1290                                reason: fluent::lint_improper_ctypes_128bit,
1291                                help: None,
1292                            };
1293                        }
1294
1295                        use improper_ctypes::check_non_exhaustive_variant;
1296
1297                        let non_exhaustive = def.variant_list_has_applicable_non_exhaustive();
1298                        // Check the contained variants.
1299                        let ret = def.variants().iter().try_for_each(|variant| {
1300                            check_non_exhaustive_variant(non_exhaustive, variant)
1301                                .map_break(|reason| FfiUnsafe { ty, reason, help: None })?;
1302
1303                            match self.check_variant_for_ffi(acc, ty, def, variant, args) {
1304                                FfiSafe => ControlFlow::Continue(()),
1305                                r => ControlFlow::Break(r),
1306                            }
1307                        });
1308                        if let ControlFlow::Break(result) = ret {
1309                            return result;
1310                        }
1311
1312                        FfiSafe
1313                    }
1314                }
1315            }
1316
1317            ty::Char => FfiUnsafe {
1318                ty,
1319                reason: fluent::lint_improper_ctypes_char_reason,
1320                help: Some(fluent::lint_improper_ctypes_char_help),
1321            },
1322
1323            // It's just extra invariants on the type that you need to uphold,
1324            // but only the base type is relevant for being representable in FFI.
1325            ty::Pat(base, ..) => self.check_type_for_ffi(acc, base),
1326
1327            ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => {
1328                FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_128bit, help: None }
1329            }
1330
1331            // Primitive types with a stable representation.
1332            ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
1333
1334            ty::Slice(_) => FfiUnsafe {
1335                ty,
1336                reason: fluent::lint_improper_ctypes_slice_reason,
1337                help: Some(fluent::lint_improper_ctypes_slice_help),
1338            },
1339
1340            ty::Dynamic(..) => {
1341                FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_dyn, help: None }
1342            }
1343
1344            ty::Str => FfiUnsafe {
1345                ty,
1346                reason: fluent::lint_improper_ctypes_str_reason,
1347                help: Some(fluent::lint_improper_ctypes_str_help),
1348            },
1349
1350            ty::Tuple(..) => FfiUnsafe {
1351                ty,
1352                reason: fluent::lint_improper_ctypes_tuple_reason,
1353                help: Some(fluent::lint_improper_ctypes_tuple_help),
1354            },
1355
1356            ty::RawPtr(ty, _) | ty::Ref(_, ty, _)
1357                if {
1358                    matches!(self.mode, CItemKind::Definition)
1359                        && ty.is_sized(self.cx.tcx, self.cx.typing_env())
1360                } =>
1361            {
1362                FfiSafe
1363            }
1364
1365            ty::RawPtr(ty, _)
1366                if match ty.kind() {
1367                    ty::Tuple(tuple) => tuple.is_empty(),
1368                    _ => false,
1369                } =>
1370            {
1371                FfiSafe
1372            }
1373
1374            ty::RawPtr(ty, _) | ty::Ref(_, ty, _) => self.check_type_for_ffi(acc, ty),
1375
1376            ty::Array(inner_ty, _) => self.check_type_for_ffi(acc, inner_ty),
1377
1378            ty::FnPtr(sig_tys, hdr) => {
1379                let sig = sig_tys.with(hdr);
1380                if sig.abi().is_rustic_abi() {
1381                    return FfiUnsafe {
1382                        ty,
1383                        reason: fluent::lint_improper_ctypes_fnptr_reason,
1384                        help: Some(fluent::lint_improper_ctypes_fnptr_help),
1385                    };
1386                }
1387
1388                let sig = tcx.instantiate_bound_regions_with_erased(sig);
1389                for arg in sig.inputs() {
1390                    match self.check_type_for_ffi(acc, *arg) {
1391                        FfiSafe => {}
1392                        r => return r,
1393                    }
1394                }
1395
1396                let ret_ty = sig.output();
1397                if ret_ty.is_unit() {
1398                    return FfiSafe;
1399                }
1400
1401                self.check_type_for_ffi(acc, ret_ty)
1402            }
1403
1404            ty::Foreign(..) => FfiSafe,
1405
1406            // While opaque types are checked for earlier, if a projection in a struct field
1407            // normalizes to an opaque type, then it will reach this branch.
1408            ty::Alias(ty::Opaque, ..) => {
1409                FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_opaque, help: None }
1410            }
1411
1412            // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
1413            //  so they are currently ignored for the purposes of this lint.
1414            ty::Param(..) | ty::Alias(ty::Projection | ty::Inherent, ..)
1415                if matches!(self.mode, CItemKind::Definition) =>
1416            {
1417                FfiSafe
1418            }
1419
1420            ty::UnsafeBinder(_) => todo!("FIXME(unsafe_binder)"),
1421
1422            ty::Param(..)
1423            | ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
1424            | ty::Infer(..)
1425            | ty::Bound(..)
1426            | ty::Error(_)
1427            | ty::Closure(..)
1428            | ty::CoroutineClosure(..)
1429            | ty::Coroutine(..)
1430            | ty::CoroutineWitness(..)
1431            | ty::Placeholder(..)
1432            | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
1433        }
1434    }
1435
1436    fn emit_ffi_unsafe_type_lint(
1437        &mut self,
1438        ty: Ty<'tcx>,
1439        sp: Span,
1440        note: DiagMessage,
1441        help: Option<DiagMessage>,
1442    ) {
1443        let lint = match self.mode {
1444            CItemKind::Declaration => IMPROPER_CTYPES,
1445            CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS,
1446        };
1447        let desc = match self.mode {
1448            CItemKind::Declaration => "block",
1449            CItemKind::Definition => "fn",
1450        };
1451        let span_note = if let ty::Adt(def, _) = ty.kind()
1452            && let Some(sp) = self.cx.tcx.hir_span_if_local(def.did())
1453        {
1454            Some(sp)
1455        } else {
1456            None
1457        };
1458        self.cx.emit_span_lint(
1459            lint,
1460            sp,
1461            ImproperCTypes { ty, desc, label: sp, help, note, span_note },
1462        );
1463    }
1464
1465    fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
1466        struct ProhibitOpaqueTypes;
1467        impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for ProhibitOpaqueTypes {
1468            type Result = ControlFlow<Ty<'tcx>>;
1469
1470            fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
1471                if !ty.has_opaque_types() {
1472                    return ControlFlow::Continue(());
1473                }
1474
1475                if let ty::Alias(ty::Opaque, ..) = ty.kind() {
1476                    ControlFlow::Break(ty)
1477                } else {
1478                    ty.super_visit_with(self)
1479                }
1480            }
1481        }
1482
1483        if let Some(ty) = self
1484            .cx
1485            .tcx
1486            .try_normalize_erasing_regions(self.cx.typing_env(), ty)
1487            .unwrap_or(ty)
1488            .visit_with(&mut ProhibitOpaqueTypes)
1489            .break_value()
1490        {
1491            self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint_improper_ctypes_opaque, None);
1492            true
1493        } else {
1494            false
1495        }
1496    }
1497
1498    fn check_type_for_ffi_and_report_errors(
1499        &mut self,
1500        sp: Span,
1501        ty: Ty<'tcx>,
1502        is_static: bool,
1503        is_return_type: bool,
1504    ) {
1505        if self.check_for_opaque_ty(sp, ty) {
1506            // We've already emitted an error due to an opaque type.
1507            return;
1508        }
1509
1510        let ty = self.cx.tcx.try_normalize_erasing_regions(self.cx.typing_env(), ty).unwrap_or(ty);
1511
1512        // C doesn't really support passing arrays by value - the only way to pass an array by value
1513        // is through a struct. So, first test that the top level isn't an array, and then
1514        // recursively check the types inside.
1515        if !is_static && self.check_for_array_ty(sp, ty) {
1516            return;
1517        }
1518
1519        // Don't report FFI errors for unit return types. This check exists here, and not in
1520        // the caller (where it would make more sense) so that normalization has definitely
1521        // happened.
1522        if is_return_type && ty.is_unit() {
1523            return;
1524        }
1525
1526        let mut acc = CTypesVisitorState { cache: FxHashSet::default(), base_ty: ty };
1527        match self.check_type_for_ffi(&mut acc, ty) {
1528            FfiResult::FfiSafe => {}
1529            FfiResult::FfiPhantom(ty) => {
1530                self.emit_ffi_unsafe_type_lint(
1531                    ty,
1532                    sp,
1533                    fluent::lint_improper_ctypes_only_phantomdata,
1534                    None,
1535                );
1536            }
1537            FfiResult::FfiUnsafe { ty, reason, help } => {
1538                self.emit_ffi_unsafe_type_lint(ty, sp, reason, help);
1539            }
1540        }
1541    }
1542
1543    /// Check if a function's argument types and result type are "ffi-safe".
1544    ///
1545    /// For a external ABI function, argument types and the result type are walked to find fn-ptr
1546    /// types that have external ABIs, as these still need checked.
1547    fn check_fn(&mut self, def_id: LocalDefId, decl: &'tcx hir::FnDecl<'_>) {
1548        let sig = self.cx.tcx.fn_sig(def_id).instantiate_identity();
1549        let sig = self.cx.tcx.instantiate_bound_regions_with_erased(sig);
1550
1551        for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
1552            for (fn_ptr_ty, span) in self.find_fn_ptr_ty_with_external_abi(input_hir, *input_ty) {
1553                self.check_type_for_ffi_and_report_errors(span, fn_ptr_ty, false, false);
1554            }
1555        }
1556
1557        if let hir::FnRetTy::Return(ret_hir) = decl.output {
1558            for (fn_ptr_ty, span) in self.find_fn_ptr_ty_with_external_abi(ret_hir, sig.output()) {
1559                self.check_type_for_ffi_and_report_errors(span, fn_ptr_ty, false, true);
1560            }
1561        }
1562    }
1563
1564    /// Check if a function's argument types and result type are "ffi-safe".
1565    fn check_foreign_fn(&mut self, def_id: LocalDefId, decl: &'tcx hir::FnDecl<'_>) {
1566        let sig = self.cx.tcx.fn_sig(def_id).instantiate_identity();
1567        let sig = self.cx.tcx.instantiate_bound_regions_with_erased(sig);
1568
1569        for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
1570            self.check_type_for_ffi_and_report_errors(input_hir.span, *input_ty, false, false);
1571        }
1572
1573        if let hir::FnRetTy::Return(ret_hir) = decl.output {
1574            self.check_type_for_ffi_and_report_errors(ret_hir.span, sig.output(), false, true);
1575        }
1576    }
1577
1578    fn check_foreign_static(&mut self, id: hir::OwnerId, span: Span) {
1579        let ty = self.cx.tcx.type_of(id).instantiate_identity();
1580        self.check_type_for_ffi_and_report_errors(span, ty, true, false);
1581    }
1582
1583    /// Find any fn-ptr types with external ABIs in `ty`.
1584    ///
1585    /// For example, `Option<extern "C" fn()>` returns `extern "C" fn()`
1586    fn find_fn_ptr_ty_with_external_abi(
1587        &self,
1588        hir_ty: &hir::Ty<'tcx>,
1589        ty: Ty<'tcx>,
1590    ) -> Vec<(Ty<'tcx>, Span)> {
1591        struct FnPtrFinder<'tcx> {
1592            spans: Vec<Span>,
1593            tys: Vec<Ty<'tcx>>,
1594        }
1595
1596        impl<'tcx> hir::intravisit::Visitor<'_> for FnPtrFinder<'tcx> {
1597            fn visit_ty(&mut self, ty: &'_ hir::Ty<'_, AmbigArg>) {
1598                debug!(?ty);
1599                if let hir::TyKind::BareFn(hir::BareFnTy { abi, .. }) = ty.kind
1600                    && !abi.is_rustic_abi()
1601                {
1602                    self.spans.push(ty.span);
1603                }
1604
1605                hir::intravisit::walk_ty(self, ty)
1606            }
1607        }
1608
1609        impl<'tcx> ty::TypeVisitor<TyCtxt<'tcx>> for FnPtrFinder<'tcx> {
1610            type Result = ();
1611
1612            fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
1613                if let ty::FnPtr(_, hdr) = ty.kind()
1614                    && !hdr.abi.is_rustic_abi()
1615                {
1616                    self.tys.push(ty);
1617                }
1618
1619                ty.super_visit_with(self)
1620            }
1621        }
1622
1623        let mut visitor = FnPtrFinder { spans: Vec::new(), tys: Vec::new() };
1624        ty.visit_with(&mut visitor);
1625        visitor.visit_ty_unambig(hir_ty);
1626
1627        iter::zip(visitor.tys.drain(..), visitor.spans.drain(..)).collect()
1628    }
1629}
1630
1631impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations {
1632    fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, it: &hir::ForeignItem<'tcx>) {
1633        let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration };
1634        let abi = cx.tcx.hir_get_foreign_abi(it.hir_id());
1635
1636        match it.kind {
1637            hir::ForeignItemKind::Fn(sig, _, _) => {
1638                if abi.is_rustic_abi() {
1639                    vis.check_fn(it.owner_id.def_id, sig.decl)
1640                } else {
1641                    vis.check_foreign_fn(it.owner_id.def_id, sig.decl);
1642                }
1643            }
1644            hir::ForeignItemKind::Static(ty, _, _) if !abi.is_rustic_abi() => {
1645                vis.check_foreign_static(it.owner_id, ty.span);
1646            }
1647            hir::ForeignItemKind::Static(..) | hir::ForeignItemKind::Type => (),
1648        }
1649    }
1650}
1651
1652impl ImproperCTypesDefinitions {
1653    fn check_ty_maybe_containing_foreign_fnptr<'tcx>(
1654        &mut self,
1655        cx: &LateContext<'tcx>,
1656        hir_ty: &'tcx hir::Ty<'_>,
1657        ty: Ty<'tcx>,
1658    ) {
1659        let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition };
1660        for (fn_ptr_ty, span) in vis.find_fn_ptr_ty_with_external_abi(hir_ty, ty) {
1661            vis.check_type_for_ffi_and_report_errors(span, fn_ptr_ty, true, false);
1662        }
1663    }
1664
1665    fn check_arg_for_power_alignment<'tcx>(
1666        &mut self,
1667        cx: &LateContext<'tcx>,
1668        ty: Ty<'tcx>,
1669    ) -> bool {
1670        assert!(cx.tcx.sess.target.os == "aix");
1671        // Structs (under repr(C)) follow the power alignment rule if:
1672        //   - the first field of the struct is a floating-point type that
1673        //     is greater than 4-bytes, or
1674        //   - the first field of the struct is an aggregate whose
1675        //     recursively first field is a floating-point type greater than
1676        //     4 bytes.
1677        if ty.is_floating_point() && ty.primitive_size(cx.tcx).bytes() > 4 {
1678            return true;
1679        } else if let Adt(adt_def, _) = ty.kind()
1680            && adt_def.is_struct()
1681            && adt_def.repr().c()
1682            && !adt_def.repr().packed()
1683            && adt_def.repr().align.is_none()
1684        {
1685            let struct_variant = adt_def.variant(VariantIdx::ZERO);
1686            // Within a nested struct, all fields are examined to correctly
1687            // report if any fields after the nested struct within the
1688            // original struct are misaligned.
1689            for struct_field in &struct_variant.fields {
1690                let field_ty = cx.tcx.type_of(struct_field.did).instantiate_identity();
1691                if self.check_arg_for_power_alignment(cx, field_ty) {
1692                    return true;
1693                }
1694            }
1695        }
1696        return false;
1697    }
1698
1699    fn check_struct_for_power_alignment<'tcx>(
1700        &mut self,
1701        cx: &LateContext<'tcx>,
1702        item: &'tcx hir::Item<'tcx>,
1703    ) {
1704        let adt_def = cx.tcx.adt_def(item.owner_id.to_def_id());
1705        // repr(C) structs also with packed or aligned representation
1706        // should be ignored.
1707        if adt_def.repr().c()
1708            && !adt_def.repr().packed()
1709            && adt_def.repr().align.is_none()
1710            && cx.tcx.sess.target.os == "aix"
1711            && !adt_def.all_fields().next().is_none()
1712        {
1713            let struct_variant_data = item.expect_struct().2;
1714            for field_def in struct_variant_data.fields().iter().skip(1) {
1715                // Struct fields (after the first field) are checked for the
1716                // power alignment rule, as fields after the first are likely
1717                // to be the fields that are misaligned.
1718                let def_id = field_def.def_id;
1719                let ty = cx.tcx.type_of(def_id).instantiate_identity();
1720                if self.check_arg_for_power_alignment(cx, ty) {
1721                    cx.emit_span_lint(USES_POWER_ALIGNMENT, field_def.span, UsesPowerAlignment);
1722                }
1723            }
1724        }
1725    }
1726}
1727
1728/// `ImproperCTypesDefinitions` checks items outside of foreign items (e.g. stuff that isn't in
1729/// `extern "C" { }` blocks):
1730///
1731/// - `extern "<abi>" fn` definitions are checked in the same way as the
1732///   `ImproperCtypesDeclarations` visitor checks functions if `<abi>` is external (e.g. "C").
1733/// - All other items which contain types (e.g. other functions, struct definitions, etc) are
1734///   checked for extern fn-ptrs with external ABIs.
1735impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
1736    fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
1737        match item.kind {
1738            hir::ItemKind::Static(_, _, ty, _)
1739            | hir::ItemKind::Const(_, _, ty, _)
1740            | hir::ItemKind::TyAlias(_, _, ty) => {
1741                self.check_ty_maybe_containing_foreign_fnptr(
1742                    cx,
1743                    ty,
1744                    cx.tcx.type_of(item.owner_id).instantiate_identity(),
1745                );
1746            }
1747            // See `check_fn`..
1748            hir::ItemKind::Fn { .. } => {}
1749            // Structs are checked based on if they follow the power alignment
1750            // rule (under repr(C)).
1751            hir::ItemKind::Struct(..) => {
1752                self.check_struct_for_power_alignment(cx, item);
1753            }
1754            // See `check_field_def`..
1755            hir::ItemKind::Union(..) | hir::ItemKind::Enum(..) => {}
1756            // Doesn't define something that can contain a external type to be checked.
1757            hir::ItemKind::Impl(..)
1758            | hir::ItemKind::TraitAlias(..)
1759            | hir::ItemKind::Trait(..)
1760            | hir::ItemKind::GlobalAsm { .. }
1761            | hir::ItemKind::ForeignMod { .. }
1762            | hir::ItemKind::Mod(..)
1763            | hir::ItemKind::Macro(..)
1764            | hir::ItemKind::Use(..)
1765            | hir::ItemKind::ExternCrate(..) => {}
1766        }
1767    }
1768
1769    fn check_field_def(&mut self, cx: &LateContext<'tcx>, field: &'tcx hir::FieldDef<'tcx>) {
1770        self.check_ty_maybe_containing_foreign_fnptr(
1771            cx,
1772            field.ty,
1773            cx.tcx.type_of(field.def_id).instantiate_identity(),
1774        );
1775    }
1776
1777    fn check_fn(
1778        &mut self,
1779        cx: &LateContext<'tcx>,
1780        kind: hir::intravisit::FnKind<'tcx>,
1781        decl: &'tcx hir::FnDecl<'_>,
1782        _: &'tcx hir::Body<'_>,
1783        _: Span,
1784        id: LocalDefId,
1785    ) {
1786        use hir::intravisit::FnKind;
1787
1788        let abi = match kind {
1789            FnKind::ItemFn(_, _, header, ..) => header.abi,
1790            FnKind::Method(_, sig, ..) => sig.header.abi,
1791            _ => return,
1792        };
1793
1794        let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition };
1795        if abi.is_rustic_abi() {
1796            vis.check_fn(id, decl);
1797        } else {
1798            vis.check_foreign_fn(id, decl);
1799        }
1800    }
1801}
1802
1803declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
1804
1805impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
1806    fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
1807        if let hir::ItemKind::Enum(_, _, ref enum_definition) = it.kind {
1808            let t = cx.tcx.type_of(it.owner_id).instantiate_identity();
1809            let ty = cx.tcx.erase_regions(t);
1810            let Ok(layout) = cx.layout_of(ty) else { return };
1811            let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, variants, .. } =
1812                &layout.variants
1813            else {
1814                return;
1815            };
1816
1817            let tag_size = tag.size(&cx.tcx).bytes();
1818
1819            debug!(
1820                "enum `{}` is {} bytes large with layout:\n{:#?}",
1821                t,
1822                layout.size.bytes(),
1823                layout
1824            );
1825
1826            let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
1827                .map(|(variant, variant_layout)| {
1828                    // Subtract the size of the enum tag.
1829                    let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
1830
1831                    debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
1832                    bytes
1833                })
1834                .enumerate()
1835                .fold((0, 0, 0), |(l, s, li), (idx, size)| {
1836                    if size > l {
1837                        (size, l, idx)
1838                    } else if size > s {
1839                        (l, size, li)
1840                    } else {
1841                        (l, s, li)
1842                    }
1843                });
1844
1845            // We only warn if the largest variant is at least thrice as large as
1846            // the second-largest.
1847            if largest > slargest * 3 && slargest > 0 {
1848                cx.emit_span_lint(
1849                    VARIANT_SIZE_DIFFERENCES,
1850                    enum_definition.variants[largest_index].span,
1851                    VariantSizeDifferencesDiag { largest },
1852                );
1853            }
1854        }
1855    }
1856}
1857
1858declare_lint! {
1859    /// The `invalid_atomic_ordering` lint detects passing an `Ordering`
1860    /// to an atomic operation that does not support that ordering.
1861    ///
1862    /// ### Example
1863    ///
1864    /// ```rust,compile_fail
1865    /// # use core::sync::atomic::{AtomicU8, Ordering};
1866    /// let atom = AtomicU8::new(0);
1867    /// let value = atom.load(Ordering::Release);
1868    /// # let _ = value;
1869    /// ```
1870    ///
1871    /// {{produces}}
1872    ///
1873    /// ### Explanation
1874    ///
1875    /// Some atomic operations are only supported for a subset of the
1876    /// `atomic::Ordering` variants. Passing an unsupported variant will cause
1877    /// an unconditional panic at runtime, which is detected by this lint.
1878    ///
1879    /// This lint will trigger in the following cases: (where `AtomicType` is an
1880    /// atomic type from `core::sync::atomic`, such as `AtomicBool`,
1881    /// `AtomicPtr`, `AtomicUsize`, or any of the other integer atomics).
1882    ///
1883    /// - Passing `Ordering::Acquire` or `Ordering::AcqRel` to
1884    ///   `AtomicType::store`.
1885    ///
1886    /// - Passing `Ordering::Release` or `Ordering::AcqRel` to
1887    ///   `AtomicType::load`.
1888    ///
1889    /// - Passing `Ordering::Relaxed` to `core::sync::atomic::fence` or
1890    ///   `core::sync::atomic::compiler_fence`.
1891    ///
1892    /// - Passing `Ordering::Release` or `Ordering::AcqRel` as the failure
1893    ///   ordering for any of `AtomicType::compare_exchange`,
1894    ///   `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`.
1895    INVALID_ATOMIC_ORDERING,
1896    Deny,
1897    "usage of invalid atomic ordering in atomic operations and memory fences"
1898}
1899
1900declare_lint_pass!(InvalidAtomicOrdering => [INVALID_ATOMIC_ORDERING]);
1901
1902impl InvalidAtomicOrdering {
1903    fn inherent_atomic_method_call<'hir>(
1904        cx: &LateContext<'_>,
1905        expr: &Expr<'hir>,
1906        recognized_names: &[Symbol], // used for fast path calculation
1907    ) -> Option<(Symbol, &'hir [Expr<'hir>])> {
1908        const ATOMIC_TYPES: &[Symbol] = &[
1909            sym::AtomicBool,
1910            sym::AtomicPtr,
1911            sym::AtomicUsize,
1912            sym::AtomicU8,
1913            sym::AtomicU16,
1914            sym::AtomicU32,
1915            sym::AtomicU64,
1916            sym::AtomicU128,
1917            sym::AtomicIsize,
1918            sym::AtomicI8,
1919            sym::AtomicI16,
1920            sym::AtomicI32,
1921            sym::AtomicI64,
1922            sym::AtomicI128,
1923        ];
1924        if let ExprKind::MethodCall(method_path, _, args, _) = &expr.kind
1925            && recognized_names.contains(&method_path.ident.name)
1926            && let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
1927            && let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
1928            && let Some(adt) = cx.tcx.type_of(impl_did).instantiate_identity().ty_adt_def()
1929            // skip extension traits, only lint functions from the standard library
1930            && cx.tcx.trait_id_of_impl(impl_did).is_none()
1931            && let parent = cx.tcx.parent(adt.did())
1932            && cx.tcx.is_diagnostic_item(sym::atomic_mod, parent)
1933            && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did()))
1934        {
1935            return Some((method_path.ident.name, args));
1936        }
1937        None
1938    }
1939
1940    fn match_ordering(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<Symbol> {
1941        let ExprKind::Path(ref ord_qpath) = ord_arg.kind else { return None };
1942        let did = cx.qpath_res(ord_qpath, ord_arg.hir_id).opt_def_id()?;
1943        let tcx = cx.tcx;
1944        let atomic_ordering = tcx.get_diagnostic_item(sym::Ordering);
1945        let name = tcx.item_name(did);
1946        let parent = tcx.parent(did);
1947        [sym::Relaxed, sym::Release, sym::Acquire, sym::AcqRel, sym::SeqCst].into_iter().find(
1948            |&ordering| {
1949                name == ordering
1950                    && (Some(parent) == atomic_ordering
1951                            // needed in case this is a ctor, not a variant
1952                            || tcx.opt_parent(parent) == atomic_ordering)
1953            },
1954        )
1955    }
1956
1957    fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
1958        if let Some((method, args)) =
1959            Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store])
1960            && let Some((ordering_arg, invalid_ordering)) = match method {
1961                sym::load => Some((&args[0], sym::Release)),
1962                sym::store => Some((&args[1], sym::Acquire)),
1963                _ => None,
1964            }
1965            && let Some(ordering) = Self::match_ordering(cx, ordering_arg)
1966            && (ordering == invalid_ordering || ordering == sym::AcqRel)
1967        {
1968            if method == sym::load {
1969                cx.emit_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingLoad);
1970            } else {
1971                cx.emit_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingStore);
1972            };
1973        }
1974    }
1975
1976    fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
1977        if let ExprKind::Call(func, args) = expr.kind
1978            && let ExprKind::Path(ref func_qpath) = func.kind
1979            && let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id()
1980            && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence))
1981            && Self::match_ordering(cx, &args[0]) == Some(sym::Relaxed)
1982        {
1983            cx.emit_span_lint(INVALID_ATOMIC_ORDERING, args[0].span, AtomicOrderingFence);
1984        }
1985    }
1986
1987    fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
1988        let Some((method, args)) = Self::inherent_atomic_method_call(
1989            cx,
1990            expr,
1991            &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak],
1992        ) else {
1993            return;
1994        };
1995
1996        let fail_order_arg = match method {
1997            sym::fetch_update => &args[1],
1998            sym::compare_exchange | sym::compare_exchange_weak => &args[3],
1999            _ => return,
2000        };
2001
2002        let Some(fail_ordering) = Self::match_ordering(cx, fail_order_arg) else { return };
2003
2004        if matches!(fail_ordering, sym::Release | sym::AcqRel) {
2005            cx.emit_span_lint(
2006                INVALID_ATOMIC_ORDERING,
2007                fail_order_arg.span,
2008                InvalidAtomicOrderingDiag { method, fail_order_arg_span: fail_order_arg.span },
2009            );
2010        }
2011    }
2012}
2013
2014impl<'tcx> LateLintPass<'tcx> for InvalidAtomicOrdering {
2015    fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
2016        Self::check_atomic_load_store(cx, expr);
2017        Self::check_memory_fence(cx, expr);
2018        Self::check_atomic_compare_exchange(cx, expr);
2019    }
2020}