codegen: implement repr(scalable)

Introduces `BackendRepr::ScalableVector` corresponding to scalable
vector types annotated with `repr(scalable)` which lowers to a scalable
vector type in LLVM.

Co-authored-by: Jamie Cunliffe <Jamie.Cunliffe@arm.com>
This commit is contained in:
David Wood 2025-07-10 10:17:44 +00:00
parent ba9262936e
commit a56b1b9283
No known key found for this signature in database
44 changed files with 571 additions and 74 deletions

View file

@ -82,6 +82,10 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
}))
}
BackendRepr::ScalableVector { .. } => {
unreachable!("`homogeneous_aggregate` should not be called for scalable vectors")
}
BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
// Helper for computing `homogeneous_aggregate`, allowing a custom
// starting offset (used below for handling variants).

View file

@ -11,7 +11,7 @@ use tracing::{debug, trace};
use crate::{
AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
Variants, WrappingRange,
TargetDataLayout, Variants, WrappingRange,
};
mod coroutine;
@ -143,58 +143,32 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
})
}
pub fn simd_type<
pub fn scalable_vector_type<FieldIdx, VariantIdx, F>(
&self,
element: F,
count: u64,
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
where
FieldIdx: Idx,
VariantIdx: Idx,
F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
>(
{
vector_type_layout(VectorKind::Scalable, self.cx.data_layout(), element, count)
}
pub fn simd_type<FieldIdx, VariantIdx, F>(
&self,
element: F,
count: u64,
repr_packed: bool,
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
let elt = element.as_ref();
if count == 0 {
return Err(LayoutCalculatorError::ZeroLengthSimdType);
} else if count > crate::MAX_SIMD_LANES {
return Err(LayoutCalculatorError::OversizedSimdType {
max_lanes: crate::MAX_SIMD_LANES,
});
}
let BackendRepr::Scalar(e_repr) = elt.backend_repr else {
return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
};
// Compute the size and alignment of the vector
let dl = self.cx.data_layout();
let size =
elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
let (repr, align) = if repr_packed && !count.is_power_of_two() {
// Non-power-of-two vectors have padding up to the next power-of-two.
// If we're a packed repr, remove the padding while keeping the alignment as close
// to a vector as possible.
(BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size))
} else {
(BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
};
let size = size.align_to(align);
Ok(LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO].into(),
memory_index: [0].into(),
},
backend_repr: repr,
largest_niche: elt.largest_niche,
uninhabited: false,
size,
align: AbiAlign::new(align),
max_repr_align: None,
unadjusted_abi_align: elt.align.abi,
randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
})
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
where
FieldIdx: Idx,
VariantIdx: Idx,
F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
{
let kind = if repr_packed { VectorKind::PackedFixed } else { VectorKind::Fixed };
vector_type_layout(kind, self.cx.data_layout(), element, count)
}
/// Compute the layout for a coroutine.
@ -453,6 +427,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
BackendRepr::Scalar(..)
| BackendRepr::ScalarPair(..)
| BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. }
| BackendRepr::Memory { .. } => repr,
},
};
@ -524,7 +499,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
hide_niches(a);
hide_niches(b);
}
BackendRepr::SimdVector { element, count: _ } => hide_niches(element),
BackendRepr::SimdVector { element, .. }
| BackendRepr::ScalableVector { element, .. } => hide_niches(element),
BackendRepr::Memory { sized: _ } => {}
}
st.largest_niche = None;
@ -1501,3 +1477,67 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
s
}
}
enum VectorKind {
/// `#[rustc_scalable_vector]`
Scalable,
/// `#[repr(simd, packed)]`
PackedFixed,
/// `#[repr(simd)]`
Fixed,
}
fn vector_type_layout<FieldIdx, VariantIdx, F>(
kind: VectorKind,
dl: &TargetDataLayout,
element: F,
count: u64,
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F>
where
FieldIdx: Idx,
VariantIdx: Idx,
F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
{
let elt = element.as_ref();
if count == 0 {
return Err(LayoutCalculatorError::ZeroLengthSimdType);
} else if count > crate::MAX_SIMD_LANES {
return Err(LayoutCalculatorError::OversizedSimdType { max_lanes: crate::MAX_SIMD_LANES });
}
let BackendRepr::Scalar(element) = elt.backend_repr else {
return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
};
// Compute the size and alignment of the vector
let size =
elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
let (repr, align) = match kind {
VectorKind::Scalable => {
(BackendRepr::ScalableVector { element, count }, dl.llvmlike_vector_align(size))
}
// Non-power-of-two vectors have padding up to the next power-of-two.
// If we're a packed repr, remove the padding while keeping the alignment as close
// to a vector as possible.
VectorKind::PackedFixed if !count.is_power_of_two() => {
(BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size))
}
VectorKind::PackedFixed | VectorKind::Fixed => {
(BackendRepr::SimdVector { element, count }, dl.llvmlike_vector_align(size))
}
};
let size = size.align_to(align);
Ok(LayoutData {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() },
backend_repr: repr,
largest_niche: elt.largest_niche,
uninhabited: false,
size,
align: AbiAlign::new(align),
max_repr_align: None,
unadjusted_abi_align: elt.align.abi,
randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
})
}

View file

@ -172,6 +172,7 @@ pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
fn is_transparent(this: TyAndLayout<'a, Self>) -> bool;
fn is_scalable_vector(this: TyAndLayout<'a, Self>) -> bool;
/// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
fn is_pass_indirectly_in_non_rustic_abis_flag_set(this: TyAndLayout<'a, Self>) -> bool;
}
@ -271,6 +272,13 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Ty::is_transparent(self)
}
pub fn is_scalable_vector<C>(self) -> bool
where
Ty: TyAbiInterface<'a, C>,
{
Ty::is_scalable_vector(self)
}
/// If this method returns `true`, then this type should always have a `PassMode` of
/// `Indirect { on_stack: false, .. }` when being used as the argument type of a function with a
/// non-Rustic ABI (this is true for structs annotated with the

View file

@ -1758,6 +1758,10 @@ impl AddressSpace {
pub enum BackendRepr {
Scalar(Scalar),
ScalarPair(Scalar, Scalar),
ScalableVector {
element: Scalar,
count: u64,
},
SimdVector {
element: Scalar,
count: u64,
@ -1776,6 +1780,12 @@ impl BackendRepr {
match *self {
BackendRepr::Scalar(_)
| BackendRepr::ScalarPair(..)
// FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the
// `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is
// fully implemented, scalable vectors will remain `Sized`, they just won't be
// `const Sized` - whether `is_unsized` continues to return `false` at that point will
// need to be revisited and will depend on what `is_unsized` is used for.
| BackendRepr::ScalableVector { .. }
| BackendRepr::SimdVector { .. } => false,
BackendRepr::Memory { sized } => !sized,
}
@ -1816,7 +1826,9 @@ impl BackendRepr {
BackendRepr::Scalar(s) => Some(s.align(cx).abi),
BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
// The align of a Vector can vary in surprising ways
BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
BackendRepr::SimdVector { .. }
| BackendRepr::Memory { .. }
| BackendRepr::ScalableVector { .. } => None,
}
}
@ -1838,7 +1850,9 @@ impl BackendRepr {
Some(size)
}
// The size of a Vector can vary in surprising ways
BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
BackendRepr::SimdVector { .. }
| BackendRepr::Memory { .. }
| BackendRepr::ScalableVector { .. } => None,
}
}
@ -1853,6 +1867,9 @@ impl BackendRepr {
BackendRepr::SimdVector { element: element.to_union(), count }
}
BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
BackendRepr::ScalableVector { element, count } => {
BackendRepr::ScalableVector { element: element.to_union(), count }
}
}
}
@ -2093,7 +2110,9 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
/// Returns `true` if this is an aggregate type (including a ScalarPair!)
pub fn is_aggregate(&self) -> bool {
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false,
BackendRepr::Scalar(_)
| BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. } => false,
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
}
}
@ -2187,6 +2206,19 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
}
/// Returns `true` if the size of the type is only known at runtime.
pub fn is_runtime_sized(&self) -> bool {
matches!(self.backend_repr, BackendRepr::ScalableVector { .. })
}
/// Returns the elements count of a scalable vector.
pub fn scalable_vector_element_count(&self) -> Option<u64> {
match self.backend_repr {
BackendRepr::ScalableVector { count, .. } => Some(count),
_ => None,
}
}
/// Returns `true` if the type is a ZST and not unsized.
///
/// Note that this does *not* imply that the type is irrelevant for layout! It can still have
@ -2195,6 +2227,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
match self.backend_repr {
BackendRepr::Scalar(_)
| BackendRepr::ScalarPair(..)
| BackendRepr::ScalableVector { .. }
| BackendRepr::SimdVector { .. } => false,
BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
}

View file

@ -943,6 +943,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
.get_address(self.location)
}
fn scalable_alloca(&mut self, _elt: u64, _align: Align, _element_ty: Ty<'_>) -> RValue<'gcc> {
todo!()
}
fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
let block = self.llbb();
let function = block.get_function();

View file

@ -504,7 +504,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
let layout = self.layout_of(tp_ty).layout;
let _use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
SimdVector { .. } => false,
SimdVector { .. } | ScalableVector { .. } => false,
Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),

View file

@ -85,6 +85,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
);
}
BackendRepr::Memory { .. } => {}
BackendRepr::ScalableVector { .. } => todo!(),
}
let name = match *layout.ty.kind() {
@ -179,6 +180,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
fn is_gcc_immediate(&self) -> bool {
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true,
// FIXME(rustc_scalable_vector): Not yet implemented in rustc_codegen_gcc.
BackendRepr::ScalableVector { .. } => todo!(),
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false,
}
}
@ -188,6 +191,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
BackendRepr::ScalarPair(..) => true,
BackendRepr::Scalar(_)
| BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. }
| BackendRepr::Memory { .. } => false,
}
}

View file

@ -613,6 +613,25 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn scalable_alloca(&mut self, elt: u64, align: Align, element_ty: Ty<'_>) -> Self::Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
let llvm_ty = match element_ty.kind() {
ty::Bool => bx.type_i1(),
ty::Int(int_ty) => self.cx.type_int_from_ty(*int_ty),
ty::Uint(uint_ty) => self.cx.type_uint_from_ty(*uint_ty),
ty::Float(float_ty) => self.cx.type_float_from_ty(*float_ty),
_ => unreachable!("scalable vectors can only contain a bool, int, uint or float"),
};
unsafe {
let ty = llvm::LLVMScalableVectorType(llvm_ty, elt.try_into().unwrap());
let alloca = llvm::LLVMBuildAlloca(&bx.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);

View file

@ -480,6 +480,14 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let use_integer_compare = match layout.backend_repr() {
Scalar(_) | ScalarPair(_, _) => true,
SimdVector { .. } => false,
ScalableVector { .. } => {
tcx.dcx().emit_err(InvalidMonomorphization::NonScalableType {
span,
name: sym::raw_eq,
ty: tp_ty,
});
return Ok(());
}
Memory { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
@ -1679,11 +1687,27 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
);
let in_elem_bitwidth = require_int_or_uint_ty!(
m_elem_ty.kind(),
InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
);
let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len);
let m_i1s = if args[1].layout.ty.is_scalable_vector() {
match m_elem_ty.kind() {
ty::Bool => {}
_ => return_error!(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: m_elem_ty
}),
};
let i1 = bx.type_i1();
let i1xn = bx.type_scalable_vector(i1, m_len as u64);
bx.trunc(args[0].immediate(), i1xn)
} else {
let in_elem_bitwidth = require_int_or_uint_ty!(
m_elem_ty.kind(),
InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
);
vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len)
};
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}

View file

@ -998,6 +998,7 @@ unsafe extern "C" {
// Operations on array, pointer, and vector types (sequence types)
pub(crate) safe fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type;
pub(crate) fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
pub(crate) fn LLVMScalableVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
pub(crate) fn LLVMGetElementType(Ty: &Type) -> &Type;
pub(crate) fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;

View file

@ -68,6 +68,10 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
}
pub(crate) fn type_scalable_vector(&self, ty: &'ll Type, count: u64) -> &'ll Type {
unsafe { llvm::LLVMScalableVectorType(ty, count as c_uint) }
}
pub(crate) fn add_func(&self, name: &str, ty: &'ll Type) -> &'ll Value {
let name = SmallCStr::new(name);
unsafe { llvm::LLVMAddFunction(self.llmod(), name.as_ptr(), ty) }

View file

@ -24,6 +24,15 @@ fn uncached_llvm_type<'a, 'tcx>(
let element = layout.scalar_llvm_type_at(cx, element);
return cx.type_vector(element, count);
}
BackendRepr::ScalableVector { ref element, count } => {
let element = if element.is_bool() {
cx.type_i1()
} else {
layout.scalar_llvm_type_at(cx, *element)
};
return cx.type_scalable_vector(element, count);
}
BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
}
@ -176,7 +185,9 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
match self.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true,
BackendRepr::Scalar(_)
| BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. } => true,
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false,
}
}
@ -186,6 +197,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
BackendRepr::ScalarPair(..) => true,
BackendRepr::Scalar(_)
| BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. }
| BackendRepr::Memory { .. } => false,
}
}

View file

@ -549,7 +549,7 @@ fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>(
registers_for_primitive(scalar1.primitive());
registers_for_primitive(scalar2.primitive());
}
BackendRepr::SimdVector { .. } => {
BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => {
// Because no instance of VaArgSafe uses a non-scalar `BackendRepr`.
unreachable!(
"No x86-64 SysV va_arg implementation for {:?}",
@ -689,7 +689,9 @@ fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>(
}
}
// The Previous match on `BackendRepr` means control flow already escaped.
BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => unreachable!(),
BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. }
| BackendRepr::Memory { .. } => unreachable!(),
};
// AMD64-ABI 3.5.7p5: Step 5. Set:

View file

@ -129,6 +129,7 @@ codegen_ssa_invalid_monomorphization_mask_wrong_element_type = invalid monomorph
codegen_ssa_invalid_monomorphization_mismatched_lengths = invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}`
codegen_ssa_invalid_monomorphization_non_scalable_type = invalid monomorphization of `{$name}` intrinsic: expected non-scalable type, found scalable type `{$ty}`
codegen_ssa_invalid_monomorphization_return_element = invalid monomorphization of `{$name}` intrinsic: expected return element type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` with element type `{$out_ty}`
codegen_ssa_invalid_monomorphization_return_integer_type = invalid monomorphization of `{$name}` intrinsic: expected return type with integer elements, found `{$ret_ty}` with non-integer `{$out_ty}`

View file

@ -1094,6 +1094,14 @@ pub enum InvalidMonomorphization<'tcx> {
expected_element: Ty<'tcx>,
vector_type: Ty<'tcx>,
},
#[diag(codegen_ssa_invalid_monomorphization_non_scalable_type, code = E0511)]
NonScalableType {
#[primary_span]
span: Span,
name: Symbol,
ty: Ty<'tcx>,
},
}
pub enum ExpectedPointerMutability {

View file

@ -405,7 +405,9 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
imm
}
}
BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(),
BackendRepr::ScalarPair(_, _)
| BackendRepr::Memory { .. }
| BackendRepr::ScalableVector { .. } => bug!(),
})
};
@ -692,7 +694,9 @@ impl<'a, 'tcx, V: CodegenObject> OperandRefBuilder<'tcx, V> {
BackendRepr::ScalarPair(a, b) => {
OperandValueBuilder::Pair(Either::Right(a), Either::Right(b))
}
BackendRepr::SimdVector { .. } => OperandValueBuilder::Vector(Either::Right(())),
BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => {
OperandValueBuilder::Vector(Either::Right(()))
}
BackendRepr::Memory { .. } => {
bug!("Cannot use non-ZST Memory-ABI type in operand builder: {layout:?}");
}

View file

@ -109,7 +109,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> Self {
Self::alloca_size(bx, layout.size, layout)
if layout.is_runtime_sized() {
Self::alloca_runtime_sized(bx, layout)
} else {
Self::alloca_size(bx, layout.size, layout)
}
}
pub fn alloca_size<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
@ -146,6 +150,18 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
}
}
fn alloca_runtime_sized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> Self {
let (element_count, ty) = layout.ty.scalable_vector_element_count_and_type(bx.tcx());
PlaceValue::new_sized(
bx.scalable_alloca(element_count as u64, layout.align.abi, ty),
layout.align.abi,
)
.with_type(layout)
}
}
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {

View file

@ -235,6 +235,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
fn scalable_alloca(&mut self, elt: u64, align: Align, element_ty: Ty<'_>) -> Self::Value;
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;

View file

@ -1315,7 +1315,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
self.visit_scalar(b, b_layout)?;
}
}
BackendRepr::SimdVector { .. } => {
BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => {
// No checks here, we assume layout computation gets this right.
// (This is harder to check since Miri does not represent these as `Immediate`. We
// also cannot use field projections since this might be a newtype around a vector.)

View file

@ -119,7 +119,9 @@ fn check_validity_requirement_lax<'tcx>(
}
BackendRepr::SimdVector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
BackendRepr::Memory { .. } => true, // Fields are checked below.
BackendRepr::ScalableVector { element, .. } => scalar_allows_raw_init(element),
};
if !valid {
// This is definitely not okay.
return Ok(false);

View file

@ -1178,6 +1178,10 @@ where
matches!(this.ty.kind(), ty::Adt(def, _) if def.repr().transparent())
}
fn is_scalable_vector(this: TyAndLayout<'tcx>) -> bool {
this.ty.is_scalable_vector()
}
/// See [`TyAndLayout::pass_indirectly_in_non_rustic_abis`] for details.
fn is_pass_indirectly_in_non_rustic_abis_flag_set(this: TyAndLayout<'tcx>) -> bool {
matches!(this.ty.kind(), ty::Adt(def, _) if def.repr().flags.contains(ReprFlags::PASS_INDIRECTLY_IN_NON_RUSTIC_ABIS))

View file

@ -7,7 +7,7 @@ use std::borrow::Cow;
use std::ops::{ControlFlow, Range};
use hir::def::{CtorKind, DefKind};
use rustc_abi::{FIRST_VARIANT, FieldIdx, VariantIdx};
use rustc_abi::{FIRST_VARIANT, FieldIdx, ScalableElt, VariantIdx};
use rustc_errors::{ErrorGuaranteed, MultiSpan};
use rustc_hir as hir;
use rustc_hir::LangItem;
@ -1269,6 +1269,19 @@ impl<'tcx> Ty<'tcx> {
}
}
pub fn scalable_vector_element_count_and_type(self, tcx: TyCtxt<'tcx>) -> (u16, Ty<'tcx>) {
let Adt(def, args) = self.kind() else {
bug!("`scalable_vector_size_and_type` called on invalid type")
};
let Some(ScalableElt::ElementCount(element_count)) = def.repr().scalable else {
bug!("`scalable_vector_size_and_type` called on non-scalable vector type");
};
let variant = def.non_enum_variant();
assert_eq!(variant.fields.len(), 1);
let field_ty = variant.fields[FieldIdx::ZERO].ty(tcx, args);
(element_count, field_ty)
}
pub fn simd_size_and_type(self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) {
let Adt(def, args) = self.kind() else {
bug!("`simd_size_and_type` called on invalid type")

View file

@ -1661,7 +1661,9 @@ impl<'body, 'a, 'tcx> VnState<'body, 'a, 'tcx> {
BackendRepr::ScalarPair(a, b) => {
!a.is_always_valid(&self.ecx) || !b.is_always_valid(&self.ecx)
}
BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => false,
BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. }
| BackendRepr::Memory { .. } => false,
}
}

View file

@ -225,6 +225,10 @@ pub enum ValueAbi {
element: Scalar,
count: u64,
},
ScalableVector {
element: Scalar,
count: u64,
},
Aggregate {
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
@ -235,7 +239,15 @@ impl ValueAbi {
/// Returns `true` if the layout corresponds to an unsized type.
pub fn is_unsized(&self) -> bool {
match *self {
ValueAbi::Scalar(_) | ValueAbi::ScalarPair(..) | ValueAbi::Vector { .. } => false,
ValueAbi::Scalar(_)
| ValueAbi::ScalarPair(..)
| ValueAbi::Vector { .. }
// FIXME(rustc_scalable_vector): Scalable vectors are `Sized` while the
// `sized_hierarchy` feature is not yet fully implemented. After `sized_hierarchy` is
// fully implemented, scalable vectors will remain `Sized`, they just won't be
// `const Sized` - whether `is_unsized` continues to return `false` at that point will
// need to be revisited and will depend on what `is_unsized` is used for.
| ValueAbi::ScalableVector { .. } => false,
ValueAbi::Aggregate { sized } => !sized,
}
}

View file

@ -256,6 +256,9 @@ impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr {
rustc_abi::BackendRepr::SimdVector { element, count } => {
ValueAbi::Vector { element: element.stable(tables, cx), count }
}
rustc_abi::BackendRepr::ScalableVector { element, count } => {
ValueAbi::ScalableVector { element: element.stable(tables, cx), count }
}
rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized },
}
}

View file

@ -78,7 +78,7 @@ where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
if !ret.layout.is_sized() {
if !ret.layout.is_sized() || ret.layout.is_scalable_vector() {
// Not touching this...
return;
}
@ -110,7 +110,7 @@ where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
if !arg.layout.is_sized() {
if !arg.layout.is_sized() || arg.layout.is_scalable_vector() {
// Not touching this...
return;
}

View file

@ -85,7 +85,10 @@ where
}
}
},
BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv),
BackendRepr::SimdVector { .. } => {
return Err(CannotUseFpConv);
}
BackendRepr::ScalableVector { .. } => panic!("scalable vectors are unsupported"),
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
FieldsShape::Primitive => {
unreachable!("aggregates can't have `FieldsShape::Primitive`")

View file

@ -393,6 +393,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
),
BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()),
BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
BackendRepr::ScalableVector { .. } => PassMode::Direct(ArgAttributes::new()),
};
ArgAbi { layout, mode }
}

View file

@ -91,7 +91,9 @@ where
}
}
},
BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv),
BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => {
return Err(CannotUseFpConv);
}
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
FieldsShape::Primitive => {
unreachable!("aggregates can't have `FieldsShape::Primitive`")

View file

@ -103,6 +103,9 @@ where
}
false
}
BackendRepr::ScalableVector { .. } => {
panic!("scalable vectors are unsupported")
}
}
}

View file

@ -59,6 +59,8 @@ where
BackendRepr::SimdVector { .. } => Class::Sse,
BackendRepr::ScalableVector { .. } => panic!("scalable vectors are unsupported"),
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
for i in 0..layout.fields.count() {
let field_off = off + layout.fields.offset(i);

View file

@ -25,6 +25,7 @@ where
// FIXME(eddyb) there should be a size cap here
// (probably what clang calls "illegal vectors").
}
BackendRepr::ScalableVector { .. } => panic!("scalable vectors are unsupported"),
BackendRepr::Scalar(scalar) => {
if is_ret && matches!(scalar.primitive(), Primitive::Int(Integer::I128, _)) {
if cx.target_spec().rustc_abi == Some(RustcAbi::X86Softfloat) {

View file

@ -407,7 +407,9 @@ fn fn_abi_sanity_check<'tcx>(
// `layout.backend_repr` and ignore everything else. We should just reject
//`Aggregate` entirely here, but some targets need to be fixed first.
match arg.layout.backend_repr {
BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => {}
BackendRepr::Scalar(_)
| BackendRepr::SimdVector { .. }
| BackendRepr::ScalableVector { .. } => {}
BackendRepr::ScalarPair(..) => {
panic!("`PassMode::Direct` used for ScalarPair type {}", arg.layout.ty)
}

View file

@ -3,8 +3,8 @@ use rustc_abi::Integer::{I8, I32};
use rustc_abi::Primitive::{self, Float, Int, Pointer};
use rustc_abi::{
AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout,
LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
VariantIdx, Variants, WrappingRange,
LayoutCalculatorError, LayoutData, Niche, ReprOptions, ScalableElt, Scalar, Size, StructKind,
TagEncoding, VariantIdx, Variants, WrappingRange,
};
use rustc_hashes::Hash64;
use rustc_hir::attrs::AttributeKind;
@ -567,6 +567,37 @@ fn layout_of_uncached<'tcx>(
univariant(tys, kind)?
}
// Scalable vector types
//
// ```rust (ignore, example)
// #[rustc_scalable_vector(3)]
// struct svuint32_t(u32);
// ```
ty::Adt(def, args)
if matches!(def.repr().scalable, Some(ScalableElt::ElementCount(..))) =>
{
let Some(element_ty) = def
.is_struct()
.then(|| &def.variant(FIRST_VARIANT).fields)
.filter(|fields| fields.len() == 1)
.map(|fields| fields[FieldIdx::ZERO].ty(tcx, args))
else {
let guar = tcx
.dcx()
.delayed_bug("#[rustc_scalable_vector] was applied to an invalid type");
return Err(error(cx, LayoutError::ReferencesError(guar)));
};
let Some(ScalableElt::ElementCount(element_count)) = def.repr().scalable else {
let guar = tcx
.dcx()
.delayed_bug("#[rustc_scalable_vector] was applied to an invalid type");
return Err(error(cx, LayoutError::ReferencesError(guar)));
};
let element_layout = cx.layout_of(element_ty)?;
map_layout(cx.calc.scalable_vector_type(element_layout, element_count as u64))?
}
// SIMD vector types.
ty::Adt(def, args) if def.repr().simd() => {
// Supported SIMD vectors are ADTs with a single array field:

View file

@ -250,7 +250,7 @@ pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayou
// And the size has to be element * count plus alignment padding, of course
assert!(size == (element_size * count).align_to(align));
}
BackendRepr::Memory { .. } => {} // Nothing to check.
BackendRepr::Memory { .. } | BackendRepr::ScalableVector { .. } => {} // Nothing to check.
}
}

View file

@ -0,0 +1,49 @@
//@ edition: 2021
//@ only-aarch64
#![crate_type = "lib"]
#![allow(incomplete_features, internal_features)]
#![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics)]
#[derive(Copy, Clone)]
#[rustc_scalable_vector(4)]
#[allow(non_camel_case_types)]
pub struct svint32_t(i32);
#[inline(never)]
#[target_feature(enable = "sve")]
pub unsafe fn svdup_n_s32(op: i32) -> svint32_t {
extern "C" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")]
fn _svdup_n_s32(op: i32) -> svint32_t;
}
unsafe { _svdup_n_s32(op) }
}
#[inline]
#[target_feature(enable = "sve,sve2")]
pub unsafe fn svxar_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
extern "C" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")]
fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
}
unsafe { _svxar_n_s32(op1, op2, IMM3) }
}
#[inline(never)]
#[no_mangle]
#[target_feature(enable = "sve,sve2")]
// CHECK: define <vscale x 4 x i32> @pass_as_ref(ptr {{.*}}align 16{{.*}} %a, <vscale x 4 x i32> %b)
pub unsafe fn pass_as_ref(a: &svint32_t, b: svint32_t) -> svint32_t {
// CHECK: load <vscale x 4 x i32>, ptr %a, align 16
svxar_n_s32::<1>(*a, b)
}
#[no_mangle]
#[target_feature(enable = "sve,sve2")]
// CHECK: define <vscale x 4 x i32> @test()
pub unsafe fn test() -> svint32_t {
let a = svdup_n_s32(1);
let b = svdup_n_s32(2);
// CHECK: %_0 = call <vscale x 4 x i32> @pass_as_ref(ptr {{.*}}align 16{{.*}} %a, <vscale x 4 x i32> %b)
pass_as_ref(&a, b)
}

View file

@ -0,0 +1,51 @@
//@ compile-flags: --crate-type=lib
//@ only-aarch64
#![allow(incomplete_features, internal_features)]
#![feature(
link_llvm_intrinsics,
rustc_attrs,
simd_ffi
)]
#[derive(Copy, Clone)]
#[rustc_scalable_vector(4)]
#[allow(non_camel_case_types)]
pub struct svint32_t(i32);
#[inline(never)]
#[target_feature(enable = "sve")]
pub unsafe fn svdup_n_s32(op: i32) -> svint32_t {
extern "C" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")]
fn _svdup_n_s32(op: i32) -> svint32_t;
}
unsafe { _svdup_n_s32(op) }
}
#[inline]
#[target_feature(enable = "sve,sve2")]
pub unsafe fn svxar_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
extern "C" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")]
fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
}
unsafe { _svxar_n_s32(op1, op2, IMM3) }
}
#[inline(never)]
#[target_feature(enable = "sve,sve2")]
fn run(f: impl Fn() -> ()) {
f();
}
#[target_feature(enable = "sve,sve2")]
fn foo() {
unsafe {
let a = svdup_n_s32(42);
run(move || {
//~^ ERROR: scalable vectors cannot be tuple fields
svxar_n_s32::<2>(a, a);
});
}
}

View file

@ -0,0 +1,8 @@
error: scalable vectors cannot be tuple fields
--> $DIR/closure-capture.rs:46:9
|
LL | run(move || {
| ^^^
error: aborting due to 1 previous error

View file

@ -0,0 +1,31 @@
//@ build-pass
//@ only-aarch64
#![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics)]
#[derive(Copy, Clone)]
#[rustc_scalable_vector(4)]
#[allow(non_camel_case_types)]
pub struct svint32_t(i32);
#[target_feature(enable = "sve")]
pub unsafe fn svdup_n_s32(op: i32) -> svint32_t {
extern "C" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")]
fn _svdup_n_s32(op: i32) -> svint32_t;
//~^ WARN: `extern` block uses type `svint32_t`, which is not FFI-safe
}
unsafe { _svdup_n_s32(op) }
}
#[target_feature(enable = "sve")]
fn require_copy<T: Copy>(t: T) {}
#[target_feature(enable = "sve")]
fn test() {
unsafe {
let a = svdup_n_s32(1);
require_copy(a);
}
}
fn main() {}

View file

@ -0,0 +1,17 @@
warning: `extern` block uses type `svint32_t`, which is not FFI-safe
--> $DIR/copy-clone.rs:14:37
|
LL | fn _svdup_n_s32(op: i32) -> svint32_t;
| ^^^^^^^^^ not FFI-safe
|
= help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
= note: this struct has unspecified layout
note: the type is defined here
--> $DIR/copy-clone.rs:8:1
|
LL | pub struct svint32_t(i32);
| ^^^^^^^^^^^^^^^^^^^^
= note: `#[warn(improper_ctypes)]` on by default
warning: 1 warning emitted

View file

@ -0,0 +1,13 @@
#![allow(internal_features)]
#![feature(rustc_attrs)]
#[rustc_scalable_vector(4)]
pub struct ScalableSimdFloat(f32);
unsafe fn test<T>(f: T)
where
T: Fn(ScalableSimdFloat), //~ ERROR: scalable vectors cannot be tuple fields
{
}
fn main() {}

View file

@ -0,0 +1,8 @@
error: scalable vectors cannot be tuple fields
--> $DIR/fn-trait.rs:9:8
|
LL | T: Fn(ScalableSimdFloat),
| ^^^^^^^^^^^^^^^^^^^^^
error: aborting due to 1 previous error

View file

@ -0,0 +1,37 @@
//@ build-pass
//@ compile-flags: --crate-type=lib
//@ only-aarch64
#![allow(internal_features)]
#![feature(
link_llvm_intrinsics,
rustc_attrs,
simd_ffi,
)]
#[derive(Copy, Clone)]
#[rustc_scalable_vector(4)]
#[allow(non_camel_case_types)]
pub struct svint32_t(i32);
#[target_feature(enable = "sve")]
pub unsafe fn svdup_n_s32(op: i32) -> svint32_t {
extern "C" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")]
fn _svdup_n_s32(op: i32) -> svint32_t;
//~^ WARN: `extern` block uses type `svint32_t`, which is not FFI-safe
}
unsafe { _svdup_n_s32(op) }
}
// Tests that scalable vectors can be locals, arguments and return types.
#[target_feature(enable = "sve")]
fn id(v: svint32_t) -> svint32_t { v }
#[target_feature(enable = "sve")]
fn foo() {
unsafe {
let v = svdup_n_s32(1);
let v = id(v);
}
}

View file

@ -0,0 +1,17 @@
warning: `extern` block uses type `svint32_t`, which is not FFI-safe
--> $DIR/value-type.rs:20:37
|
LL | fn _svdup_n_s32(op: i32) -> svint32_t;
| ^^^^^^^^^ not FFI-safe
|
= help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
= note: this struct has unspecified layout
note: the type is defined here
--> $DIR/value-type.rs:14:1
|
LL | pub struct svint32_t(i32);
| ^^^^^^^^^^^^^^^^^^^^
= note: `#[warn(improper_ctypes)]` on by default
warning: 1 warning emitted