rustc_target/callconv/
mips64.rs

1use rustc_abi::{
2    BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface,
3};
4
5use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform};
6
7fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
8    // Always sign extend u32 values on 64-bit mips
9    if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr
10        && let Primitive::Int(i, signed) = scalar.primitive()
11        && !signed
12        && i.size().bits() == 32
13        && let PassMode::Direct(ref mut attrs) = arg.mode
14    {
15        attrs.ext(ArgExtension::Sext);
16        return;
17    }
18
19    arg.extend_integer_width_to(bits);
20}
21
22fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
23where
24    Ty: TyAbiInterface<'a, C> + Copy,
25    C: HasDataLayout,
26{
27    match ret.layout.field(cx, i).backend_repr {
28        BackendRepr::Scalar(scalar) => match scalar.primitive() {
29            Primitive::Float(Float::F32) => Some(Reg::f32()),
30            Primitive::Float(Float::F64) => Some(Reg::f64()),
31            _ => None,
32        },
33        _ => None,
34    }
35}
36
37fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
38where
39    Ty: TyAbiInterface<'a, C> + Copy,
40    C: HasDataLayout,
41{
42    if !ret.layout.is_aggregate() {
43        extend_integer_width_mips(ret, 64);
44        return;
45    }
46
47    let size = ret.layout.size;
48    let bits = size.bits();
49    if bits <= 128 {
50        // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
51        // use of float registers to structures (not unions) containing exactly one or two
52        // float fields.
53
54        if let FieldsShape::Arbitrary { .. } = ret.layout.fields {
55            if ret.layout.fields.count() == 1 {
56                if let Some(reg) = float_reg(cx, ret, 0) {
57                    ret.cast_to(reg);
58                    return;
59                }
60            } else if ret.layout.fields.count() == 2
61                && let Some(reg0) = float_reg(cx, ret, 0)
62                && let Some(reg1) = float_reg(cx, ret, 1)
63            {
64                ret.cast_to(CastTarget::pair(reg0, reg1));
65                return;
66            }
67        }
68
69        // Cast to a uniform int structure
70        ret.cast_to(Uniform::new(Reg::i64(), size));
71    } else {
72        ret.make_indirect();
73    }
74}
75
76fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
77where
78    Ty: TyAbiInterface<'a, C> + Copy,
79    C: HasDataLayout,
80{
81    if !arg.layout.is_aggregate() {
82        extend_integer_width_mips(arg, 64);
83        return;
84    }
85
86    let dl = cx.data_layout();
87    let size = arg.layout.size;
88    let mut prefix = [None; 8];
89    let mut prefix_index = 0;
90
91    match arg.layout.fields {
92        FieldsShape::Primitive => unreachable!(),
93        FieldsShape::Array { .. } => {
94            // Arrays are passed indirectly
95            arg.make_indirect();
96            return;
97        }
98        FieldsShape::Union(_) => {
99            // Unions and are always treated as a series of 64-bit integer chunks
100        }
101        FieldsShape::Arbitrary { .. } => {
102            // Structures are split up into a series of 64-bit integer chunks, but any aligned
103            // doubles not part of another aggregate are passed as floats.
104            let mut last_offset = Size::ZERO;
105
106            for i in 0..arg.layout.fields.count() {
107                let field = arg.layout.field(cx, i);
108                let offset = arg.layout.fields.offset(i);
109
110                // We only care about aligned doubles
111                if let BackendRepr::Scalar(scalar) = field.backend_repr {
112                    if scalar.primitive() == Primitive::Float(Float::F64) {
113                        if offset.is_aligned(dl.f64_align.abi) {
114                            // Insert enough integers to cover [last_offset, offset)
115                            assert!(last_offset.is_aligned(dl.f64_align.abi));
116                            for _ in 0..((offset - last_offset).bits() / 64)
117                                .min((prefix.len() - prefix_index) as u64)
118                            {
119                                prefix[prefix_index] = Some(Reg::i64());
120                                prefix_index += 1;
121                            }
122
123                            if prefix_index == prefix.len() {
124                                break;
125                            }
126
127                            prefix[prefix_index] = Some(Reg::f64());
128                            prefix_index += 1;
129                            last_offset = offset + Reg::f64().size;
130                        }
131                    }
132                }
133            }
134        }
135    };
136
137    // Extract first 8 chunks as the prefix
138    let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
139    arg.cast_to(CastTarget::prefixed(prefix, Uniform::new(Reg::i64(), rest_size)));
140}
141
142pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
143where
144    Ty: TyAbiInterface<'a, C> + Copy,
145    C: HasDataLayout,
146{
147    if !fn_abi.ret.is_ignore() {
148        classify_ret(cx, &mut fn_abi.ret);
149    }
150
151    for arg in fn_abi.args.iter_mut() {
152        if arg.is_ignore() {
153            continue;
154        }
155        classify_arg(cx, arg);
156    }
157}