Rollup merge of #142680 - beetrees:sparc64-float-struct-abi, r=tgross35

Fix passing/returning structs with the 64-bit SPARC ABI

Fixes the 64-bit SPARC part of rust-lang/rust#115609 by replacing the current implementation with a new implementation modelled on the RISC-V calling convention code ([SPARC ABI reference](https://sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz)).

Pinging `sparcv9-sun-solaris` target maintainers: @psumbera @kulikjak
Fixes rust-lang/rust#115336
Fixes rust-lang/rust#115399
Fixes rust-lang/rust#122620
Fixes https://github.com/rust-lang/rust/issues/147883
r? @workingjubilee
This commit is contained in:
Jacob Pratt 2026-02-12 00:41:05 -05:00 committed by GitHub
commit b1b6533077
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 366 additions and 186 deletions

View file

@ -35,6 +35,7 @@ impl Reg {
reg_ctor!(f32, Float, 32);
reg_ctor!(f64, Float, 64);
reg_ctor!(f128, Float, 128);
}
impl Reg {

View file

@ -1,214 +1,203 @@
// FIXME: This needs an audit for correctness and completeness.
use rustc_abi::{
BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Scalar, Size, TyAbiInterface,
TyAndLayout,
Align, BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface,
TyAndLayout, Variants,
};
use crate::callconv::{ArgAbi, ArgAttribute, CastTarget, FnAbi, Uniform};
use crate::spec::{Env, HasTargetSpec, Os};
use crate::spec::{HasTargetSpec, Os};
#[derive(Clone, Debug)]
struct Sdata {
pub prefix: [Option<Reg>; 8],
pub prefix_index: usize,
pub last_offset: Size,
pub has_float: bool,
pub arg_attribute: ArgAttribute,
// NOTE: GCC and Clang/LLVM have disagreements that the ABI doesn't resolve, we match the
// Clang/LLVM behavior in these cases.
#[derive(Copy, Clone)]
enum DoubleWord {
F64,
F128Start,
F128End,
Words([Word; 2]),
}
fn arg_scalar<C>(cx: &C, scalar: &Scalar, offset: Size, mut data: Sdata) -> Sdata
where
C: HasDataLayout,
{
let dl = cx.data_layout();
if !matches!(scalar.primitive(), Primitive::Float(Float::F32 | Float::F64)) {
return data;
}
data.has_float = true;
if !data.last_offset.is_aligned(dl.f64_align) && data.last_offset < offset {
if data.prefix_index == data.prefix.len() {
return data;
}
data.prefix[data.prefix_index] = Some(Reg::i32());
data.prefix_index += 1;
data.last_offset = data.last_offset + Reg::i32().size;
}
for _ in 0..((offset - data.last_offset).bits() / 64)
.min((data.prefix.len() - data.prefix_index) as u64)
{
data.prefix[data.prefix_index] = Some(Reg::i64());
data.prefix_index += 1;
data.last_offset = data.last_offset + Reg::i64().size;
}
if data.last_offset < offset {
if data.prefix_index == data.prefix.len() {
return data;
}
data.prefix[data.prefix_index] = Some(Reg::i32());
data.prefix_index += 1;
data.last_offset = data.last_offset + Reg::i32().size;
}
if data.prefix_index == data.prefix.len() {
return data;
}
if scalar.primitive() == Primitive::Float(Float::F32) {
data.arg_attribute = ArgAttribute::InReg;
data.prefix[data.prefix_index] = Some(Reg::f32());
data.last_offset = offset + Reg::f32().size;
} else {
data.prefix[data.prefix_index] = Some(Reg::f64());
data.last_offset = offset + Reg::f64().size;
}
data.prefix_index += 1;
data
#[derive(Copy, Clone)]
enum Word {
F32,
Integer,
}
fn arg_scalar_pair<C>(
fn classify<'a, Ty, C>(
cx: &C,
scalar1: &Scalar,
scalar2: &Scalar,
mut offset: Size,
mut data: Sdata,
) -> Sdata
where
C: HasDataLayout,
{
data = arg_scalar(cx, scalar1, offset, data);
match (scalar1.primitive(), scalar2.primitive()) {
(Primitive::Float(Float::F32), _) => offset += Reg::f32().size,
(_, Primitive::Float(Float::F64)) => offset += Reg::f64().size,
(Primitive::Int(i, _signed), _) => offset += i.size(),
(Primitive::Pointer(_), _) => offset += Reg::i64().size,
_ => {}
}
if !offset.bytes().is_multiple_of(4)
&& matches!(scalar2.primitive(), Primitive::Float(Float::F32 | Float::F64))
{
offset += Size::from_bytes(4 - (offset.bytes() % 4));
}
data = arg_scalar(cx, scalar2, offset, data);
data
}
fn parse_structure<'a, Ty, C>(
cx: &C,
layout: TyAndLayout<'a, Ty>,
mut data: Sdata,
mut offset: Size,
) -> Sdata
where
arg_layout: &TyAndLayout<'a, Ty>,
offset: Size,
double_words: &mut [DoubleWord; 4],
) where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if let FieldsShape::Union(_) = layout.fields {
return data;
}
// If this function does not update the `double_words` array, the value will be passed via
// integer registers. The array is initialized with `DoubleWord::Words([Word::Integer; 2])`.
match layout.backend_repr {
BackendRepr::Scalar(scalar) => {
data = arg_scalar(cx, &scalar, offset, data);
}
BackendRepr::Memory { .. } => {
for i in 0..layout.fields.count() {
if offset < layout.fields.offset(i) {
offset = layout.fields.offset(i);
match arg_layout.backend_repr {
BackendRepr::Scalar(scalar) => match scalar.primitive() {
Primitive::Float(float) => {
if offset.is_aligned(Ord::min(*float.align(cx), Align::EIGHT)) {
let index = offset.bytes_usize() / 8;
match float {
Float::F128 => {
double_words[index] = DoubleWord::F128Start;
double_words[index + 1] = DoubleWord::F128End;
}
Float::F64 => {
double_words[index] = DoubleWord::F64;
}
Float::F32 => match &mut double_words[index] {
DoubleWord::Words(words) => {
words[(offset.bytes_usize() % 8) / 4] = Word::F32;
}
_ => unreachable!(),
},
Float::F16 => {
// Match LLVM by passing `f16` in integer registers.
}
}
} else {
/* pass unaligned floats in integer registers */
}
data = parse_structure(cx, layout.field(cx, i), data.clone(), offset);
}
}
_ => {
if let BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
Primitive::Int(_, _) | Primitive::Pointer(_) => { /* pass in integer registers */ }
},
BackendRepr::SimdVector { .. } => {}
BackendRepr::ScalableVector { .. } => {}
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
FieldsShape::Primitive => {
unreachable!("aggregates can't have `FieldsShape::Primitive`")
}
}
FieldsShape::Union(_) => {
if !arg_layout.is_zst() {
if arg_layout.is_transparent() {
let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
classify(cx, &non_1zst_elem, offset, double_words);
}
}
}
FieldsShape::Array { .. } => {}
FieldsShape::Arbitrary { .. } => match arg_layout.variants {
Variants::Multiple { .. } => {}
Variants::Single { .. } | Variants::Empty => {
// Match Clang by ignoring whether a struct is packed and just considering
// whether individual fields are aligned. GCC currently uses only integer
// registers when passing packed structs.
for i in arg_layout.fields.index_by_increasing_offset() {
classify(
cx,
&arg_layout.field(cx, i),
offset + arg_layout.fields.offset(i),
double_words,
);
}
}
},
},
}
data
}
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, in_registers_max: Size)
where
fn classify_arg<'a, Ty, C>(
cx: &C,
arg: &mut ArgAbi<'a, Ty>,
in_registers_max: Size,
total_double_word_count: &mut usize,
) where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
// 64-bit SPARC allocates argument stack space in 64-bit chunks (double words), some of which
// are promoted to registers based on their position on the stack.
// Keep track of the total number of double words used by arguments so far. This allows padding
// arguments to be inserted where necessary to ensure that 16-aligned arguments are passed in an
// aligned set of registers.
let pad = !total_double_word_count.is_multiple_of(2) && arg.layout.align.abi.bytes() == 16;
// The number of double words used by this argument.
let double_word_count = arg.layout.size.bytes_usize().div_ceil(8);
// The number of double words before this argument, including any padding.
let start_double_word_count = *total_double_word_count + usize::from(pad);
if arg.layout.pass_indirectly_in_non_rustic_abis(cx) {
arg.make_indirect();
*total_double_word_count += 1;
return;
}
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
*total_double_word_count = start_double_word_count + double_word_count;
return;
}
let total = arg.layout.size;
if total > in_registers_max {
arg.make_indirect();
*total_double_word_count += 1;
return;
}
match arg.layout.fields {
FieldsShape::Primitive => unreachable!(),
FieldsShape::Array { .. } => {
// Arrays are passed indirectly
arg.make_indirect();
return;
*total_double_word_count = start_double_word_count + double_word_count;
const ARGUMENT_REGISTERS: usize = 8;
let mut double_words = [DoubleWord::Words([Word::Integer; 2]); ARGUMENT_REGISTERS / 2];
classify(cx, &arg.layout, Size::ZERO, &mut double_words);
let mut regs = [None; ARGUMENT_REGISTERS];
let mut i = 0;
let mut push = |reg| {
regs[i] = Some(reg);
i += 1;
};
let mut attrs = ArgAttribute::empty();
for (index, double_word) in double_words.into_iter().enumerate() {
if arg.layout.size.bytes_usize() <= index * 8 {
break;
}
FieldsShape::Union(_) => {
// Unions and are always treated as a series of 64-bit integer chunks
}
FieldsShape::Arbitrary { .. } => {
// Structures with floating point numbers need special care.
let mut data = parse_structure(
cx,
arg.layout,
Sdata {
prefix: [None; 8],
prefix_index: 0,
last_offset: Size::ZERO,
has_float: false,
arg_attribute: ArgAttribute::default(),
},
Size::ZERO,
);
if data.has_float {
// Structure { float, int, int } doesn't like to be handled like
// { float, long int }. Other way around it doesn't mind.
if data.last_offset < arg.layout.size
&& !data.last_offset.bytes().is_multiple_of(8)
&& data.prefix_index < data.prefix.len()
{
data.prefix[data.prefix_index] = Some(Reg::i32());
data.prefix_index += 1;
data.last_offset += Reg::i32().size;
match double_word {
// `f128` must be aligned to be assigned a float register.
DoubleWord::F128Start if (start_double_word_count + index).is_multiple_of(2) => {
push(Reg::f128());
}
DoubleWord::F128Start => {
// Clang currently handles this case nonsensically, always returning a packed
// `struct { long double x; }` in an aligned quad floating-point register even when
// the `long double` isn't aligned on the stack, which also makes all future
// arguments get passed in the wrong registers. This passes the `f128` in integer
// registers when it is unaligned, same as with `f32` and `f64`.
push(Reg::i64());
push(Reg::i64());
}
DoubleWord::F128End => {} // Already handled by `F128Start`
DoubleWord::F64 => push(Reg::f64()),
DoubleWord::Words([Word::Integer, Word::Integer]) => push(Reg::i64()),
DoubleWord::Words(words) => {
attrs |= ArgAttribute::InReg;
for word in words {
match word {
Word::F32 => push(Reg::f32()),
Word::Integer => push(Reg::i32()),
}
}
let mut rest_size = arg.layout.size - data.last_offset;
if !rest_size.bytes().is_multiple_of(8) && data.prefix_index < data.prefix.len() {
data.prefix[data.prefix_index] = Some(Reg::i32());
rest_size = rest_size - Reg::i32().size;
}
arg.cast_to(
CastTarget::prefixed(data.prefix, Uniform::new(Reg::i64(), rest_size))
.with_attrs(data.arg_attribute.into()),
);
return;
}
}
}
arg.cast_to(Uniform::new(Reg::i64(), total));
let cast_target = match regs {
[Some(reg), None, rest @ ..] => {
// Just a single register is needed for this value.
debug_assert!(rest.iter().all(|x| x.is_none()));
CastTarget::from(reg)
}
_ => CastTarget::prefixed(regs, Uniform::new(Reg::i8(), Size::ZERO)),
};
arg.cast_to_and_pad_i32(cast_target.with_attrs(attrs.into()), pad);
}
pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
@ -217,23 +206,26 @@ where
C: HasDataLayout + HasTargetSpec,
{
if !fn_abi.ret.is_ignore() && fn_abi.ret.layout.is_sized() {
classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32));
// A return value of 32 bytes or smaller is passed via registers.
classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32), &mut 0);
}
// sparc64-unknown-linux-{gnu,musl,uclibc} doesn't ignore ZSTs.
let passes_zsts = matches!(cx.target_spec().os, Os::Linux);
let mut double_word_count = 0;
for arg in fn_abi.args.iter_mut() {
if !arg.layout.is_sized() {
continue;
}
if arg.is_ignore() {
// sparc64-unknown-linux-{gnu,musl,uclibc} doesn't ignore ZSTs.
if cx.target_spec().os == Os::Linux
&& matches!(cx.target_spec().env, Env::Gnu | Env::Musl | Env::Uclibc)
&& arg.layout.is_zst()
{
if passes_zsts && arg.layout.is_zst() {
arg.make_indirect_from_ignore();
double_word_count += 1;
}
return;
continue;
}
classify_arg(cx, arg, Size::from_bytes(16));
// An argument of 16 bytes or smaller is passed via registers.
classify_arg(cx, arg, Size::from_bytes(16), &mut double_word_count);
}
}

View file

@ -9,6 +9,7 @@
#![crate_type = "lib"]
#![feature(no_core, lang_items)]
#![no_core]
#![feature(f128)]
extern crate minicore;
use minicore::*;
@ -21,8 +22,33 @@ pub struct Franta {
d: f32,
}
#[repr(C, packed)]
struct Misaligned(i32, f64);
#[repr(C)]
struct AlignToMakeAssemblyShorter<T>(T, f64);
#[repr(C)]
pub struct Floats(i32, f32, f64, f128);
#[repr(C)]
pub struct LessFloats(f32, i32, f64);
#[repr(C)]
pub struct NotMisaligned(i32, Misaligned);
#[repr(C, align(16))]
pub struct Align16(f64, i32, i32);
impl Copy for Misaligned {}
impl<T: Copy> Copy for AlignToMakeAssemblyShorter<T> {}
impl Copy for Floats {}
impl Copy for LessFloats {}
impl Copy for NotMisaligned {}
impl Copy for Align16 {}
// NB: due to delay slots the `ld` following the call is actually executed before the call.
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn callee(arg: Franta) {
// CHECK-LABEL: callee:
// CHECK: st %f3, [[PLACE_D:.*]]
@ -54,7 +80,7 @@ extern "C" {
fn tail_call_avoidance_fn();
}
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn caller() {
// CHECK-LABEL: caller:
// CHECK: ld [{{.*}}], %f0
@ -62,7 +88,168 @@ pub unsafe extern "C" fn caller() {
// CHECK: ld [{{.*}}], %f2
// CHECK: ld [{{.*}}], %f3
// CHECK: call opaque_callee
// CHECK: mov 3, %o2
// CHECK: mov 3, %o2
opaque_callee(Franta { a: 1.0, b: 2.0, c: 3.0, d: 4.0 }, 3);
tail_call_avoidance_fn();
}
// Check that misaligned floats aren't promoted to floating point registers.
// CHECK-LABEL: misaligned_arg:
#[unsafe(no_mangle)]
extern "C" fn misaligned_arg(x: &mut AlignToMakeAssemblyShorter<Misaligned>, value: Misaligned) {
// CHECK: srlx %o2, 32, %o2
// CHECK-NEXT: stx %o1, [%o0]
// CHECK-NEXT: retl
// CHECK-NEXT: st %o2, [%o0+8]
x.0 = value;
}
// CHECK-LABEL: misaligned_ret:
#[unsafe(no_mangle)]
extern "C" fn misaligned_ret(x: &AlignToMakeAssemblyShorter<Misaligned>) -> Misaligned {
// CHECK: ld [%o0+8], %o1
// CHECK-NEXT: ldx [%o0], %o0
// CHECK-NEXT: retl
// CHECK-NEXT: sllx %o1, 32, %o1
x.0
}
// Check structs where 32 >= size > 16 are promoted to register only as an argument.
// Also check that the various floating-point types are promoted to the correct registers.
// CHECK-LABEL: floats_arg:
#[unsafe(no_mangle)]
extern "C" fn floats_arg(x: &mut Floats, value: Floats) {
// CHECK: ldx [%o1+24], %o2
// CHECK-NEXT: ldx [%o1+16], %o3
// CHECK-NEXT: ldx [%o1+8], %o4
// CHECK-NEXT: ldx [%o1], %o1
// CHECK-NEXT: stx %o2, [%o0+24]
// CHECK-NEXT: stx %o3, [%o0+16]
// CHECK-NEXT: stx %o4, [%o0+8]
// CHECK-NEXT: retl
// CHECK-NEXT: stx %o1, [%o0]
*x = value;
}
// CHECK-LABEL: floats_ret:
#[unsafe(no_mangle)]
extern "C" fn floats_ret(x: &Floats) -> Floats {
// CHECK: ld [%o0+4], %f1
// CHECK-NEXT: ldd [%o0+8], %f2
// CHECK-NEXT: ldd [%o0+16], %f4
// CHECK-NEXT: ld [%o0], %o1
// CHECK-NEXT: ldd [%o0+24], %f6
// CHECK-NEXT: retl
// CHECK-NEXT: sllx %o1, 32, %o0
*x
}
// Check float promotion when passing as an argument with a struct where size <= 16.
// CHECK-LABEL: less_floats_arg:
#[unsafe(no_mangle)]
extern "C" fn less_floats_arg(x: &mut LessFloats, value: LessFloats) {
// CHECK: st %f2, [%o0]
// CHECK-NEXT: st %o1, [%o0+4]
// CHECK-NEXT: retl
// CHECK-NEXT: std %f4, [%o0+8]
*x = value;
}
// CHECK-LABEL: less_floats_ret:
#[unsafe(no_mangle)]
extern "C" fn less_floats_ret(x: &LessFloats) -> LessFloats {
// CHECK: ld [%o0], %f0
// CHECK-NEXT: ldd [%o0+8], %f2
// CHECK-NEXT: retl
// CHECK-NEXT: ld [%o0+4], %o0
*x
}
// Check fields are promoted if they are aligned in the overall structure.
// This matches Clang's behaviour but not GCC's.
// CHECK-LABEL: not_misaligned_arg:
#[unsafe(no_mangle)]
extern "C" fn not_misaligned_arg(
x: &mut AlignToMakeAssemblyShorter<NotMisaligned>,
value: NotMisaligned,
) {
// CHECK: stx %o1, [%o0]
// CHECK-NEXT: retl
// CHECK-NEXT: std %f4, [%o0+8]
x.0 = value;
}
// CHECK-LABEL: not_misaligned_ret:
#[unsafe(no_mangle)]
extern "C" fn not_misaligned_ret(x: &AlignToMakeAssemblyShorter<NotMisaligned>) -> NotMisaligned {
// CHECK: ldx [%o0], %o1
// CHECK-NEXT: ldd [%o0+8], %f2
// CHECK-NEXT: retl
// CHECK-NEXT: mov %o1, %o0
x.0
}
// Check that 16-aligned structs are allocated the correct registers.
// CHECK-LABEL: align_16_arg:
#[unsafe(no_mangle)]
extern "C" fn align_16_arg(x: &mut Align16, value: Align16) {
// CHECK: std %f4, [%o0]
// CHECK-NEXT: retl
// CHECK-NEXT: stx %o3, [%o0+8]
*x = value;
}
// CHECK-LABEL: align_16_ret:
#[unsafe(no_mangle)]
extern "C" fn align_16_ret(x: &Align16) -> Align16 {
// CHECK: ldd [%o0], %f0
// CHECK-NEXT: retl
// CHECK-NEXT: ldx [%o0+8], %o1
*x
}
// Check ZST args don't prevent further arguments from being processed.
// CHECK-LABEL: zst_arg:
#[unsafe(no_mangle)]
extern "C" fn zst_arg(_: (), value: LessFloats, x: &mut LessFloats) {
// CHECK: st %f0, [%o2]
// CHECK-NEXT: st %o0, [%o2+4]
// CHECK-NEXT: retl
// CHECK-NEXT: std %f2, [%o2+8]
*x = value;
}
#[repr(C)]
struct I32F32Input {
a: i32,
b: f32,
}
#[repr(C)]
struct I32F32Output {
b: f32,
a: i32,
}
// The clang/LLVM implementation mentions that this case requires special handling.
// CHECK-LABEL: i32_f32:
#[unsafe(no_mangle)]
extern "C" fn i32_f32(input: I32F32Input) -> I32F32Output {
// CHECK: srlx %o0, 32, %o0
// CHECK-NEXT: fmovs %f1, %f0
// CHECK-NEXT: retl
// CHECK-NEXT: nop
I32F32Output { a: input.a, b: input.b }
}
#[repr(C)]
pub struct C {
a: f64,
b: f32,
}
// regression test for https://github.com/rust-lang/rust/issues/147883.
#[unsafe(no_mangle)]
pub extern "C" fn foo(c: C) -> C {
c
}

View file

@ -119,7 +119,7 @@ pub extern "C" fn returns_twou16s() -> TwoU16s {
// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:{ i64, i64 }]] {{.*}}[[ABI_VALUE:%.+]])
// x86_64-SAME: ([[ABI_TYPE:{ i64, i16 }]] {{.*}}[[ABI_VALUE:%.+]])
#[no_mangle]
#[inline(never)]
@ -148,7 +148,7 @@ pub extern "C" fn returns_fiveu16s() -> FiveU16s {
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
@ -217,7 +217,7 @@ pub extern "C" fn returns_doubledouble() -> DoubleDouble {
// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]])
// sparc64-SAME: ([[ABI_TYPE:{ i64, i64 }]] {{.*}}[[ABI_VALUE:%.+]])
// x86_64-SAME: ([[ABI_TYPE:{ i64, i32 }]] {{.*}}[[ABI_VALUE:%.+]])
#[no_mangle]
#[inline(never)]
@ -246,7 +246,7 @@ pub extern "C" fn returns_three32s() -> Three32s {
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]]
@ -399,7 +399,7 @@ pub fn call_fiveu16s() {
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_fiveu16s([[ABI_TYPE]] [[ABI_VALUE]])
@ -424,7 +424,7 @@ pub fn return_fiveu16s() -> FiveU16s {
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i64 }]] @returns_fiveu16s()
// x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i16 }]] @returns_fiveu16s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
@ -595,7 +595,7 @@ pub fn call_three32s() {
// aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]
// CHECK: call void @receives_three32s([[ABI_TYPE]] [[ABI_VALUE]])
@ -619,7 +619,7 @@ pub fn return_three32s() -> Three32s {
// aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s()
// loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s()
// sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i64 }]] @returns_three32s()
// x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i32 }]] @returns_three32s()
// aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]]