Merge pull request #4375 from rust-lang/rustup-2025-06-04

Automatic Rustup
This commit is contained in:
Ralf Jung 2025-06-04 06:36:37 +00:00 committed by GitHub
commit 9a7255ee2b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
1117 changed files with 61632 additions and 6998 deletions

View file

@ -4,7 +4,6 @@ members = [
# tidy-alphabetical-start
"compiler/rustc",
"src/build_helper",
"src/etc/test-float-parse",
"src/rustc-std-workspace/rustc-std-workspace-alloc",
"src/rustc-std-workspace/rustc-std-workspace-core",
"src/rustc-std-workspace/rustc-std-workspace-std",
@ -41,6 +40,7 @@ members = [
"src/tools/rustdoc-themes",
"src/tools/rustfmt",
"src/tools/suggest-tests",
"src/tools/test-float-parse",
"src/tools/tidy",
"src/tools/tier-check",
"src/tools/unicode-table-generator",

View file

@ -0,0 +1,136 @@
use std::fmt;
#[cfg(feature = "nightly")]
use rustc_macros::HashStable_Generic;
use crate::ExternAbi;
/// Calling convention to determine codegen
///
/// CanonAbi erases certain distinctions ExternAbi preserves, but remains target-dependent.
/// There are still both target-specific variants and aliasing variants, though much fewer.
/// The reason for this step is the frontend may wish to show an ExternAbi but implement that ABI
/// using a different ABI than the string per se, or describe irrelevant differences, e.g.
/// - extern "system"
/// - extern "cdecl"
/// - extern "C-unwind"
/// In that sense, this erases mere syntactic distinctions to create a canonical *directive*,
/// rather than picking the "actual" ABI.
#[derive(Copy, Clone, Debug)]
#[derive(PartialOrd, Ord, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum CanonAbi {
// NOTE: the use of nested variants for some ABIs is for many targets they don't matter,
// and this pushes the complexity of their reasoning to target-specific code,
// allowing a `match` to easily exhaustively ignore these subcategories of variants.
// Otherwise it is very tempting to avoid matching exhaustively!
C,
Rust,
RustCold,
/// ABIs relevant to 32-bit Arm targets
Arm(ArmCall),
/// ABI relevant to GPUs: the entry point for a GPU kernel
GpuKernel,
/// ABIs relevant to bare-metal interrupt targets
// FIXME(workingjubilee): a particular reason for this nesting is we might not need these?
// interrupt ABIs should have the same properties:
// - uncallable by Rust calls, as LLVM rejects it in most cases
// - uses a preserve-all-registers *callee* convention
// - should always return `-> !` (effectively... it can't use normal `ret`)
// what differs between targets is
// - allowed arguments: x86 differs slightly, having 2-3 arguments which are handled magically
// - may need special prologues/epilogues for some interrupts, without affecting "call ABI"
Interrupt(InterruptKind),
/// ABIs relevant to Windows or x86 targets
X86(X86Call),
}
impl fmt::Display for CanonAbi {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.to_erased_extern_abi().as_str().fmt(f)
}
}
impl CanonAbi {
/// convert to the ExternAbi that *shares a string* with this CanonAbi
///
/// A target-insensitive mapping of CanonAbi to ExternAbi, convenient for "forwarding" impls.
/// Importantly, the set of CanonAbi values is a logical *subset* of ExternAbi values,
/// so this is injective: if you take an ExternAbi to a CanonAbi and back, you have lost data.
const fn to_erased_extern_abi(self) -> ExternAbi {
match self {
CanonAbi::C => ExternAbi::C { unwind: false },
CanonAbi::Rust => ExternAbi::Rust,
CanonAbi::RustCold => ExternAbi::RustCold,
CanonAbi::Arm(arm_call) => match arm_call {
ArmCall::Aapcs => ExternAbi::Aapcs { unwind: false },
ArmCall::CCmseNonSecureCall => ExternAbi::CCmseNonSecureCall,
ArmCall::CCmseNonSecureEntry => ExternAbi::CCmseNonSecureEntry,
},
CanonAbi::GpuKernel => ExternAbi::GpuKernel,
CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind {
InterruptKind::Avr => ExternAbi::AvrInterrupt,
InterruptKind::AvrNonBlocking => ExternAbi::AvrNonBlockingInterrupt,
InterruptKind::Msp430 => ExternAbi::Msp430Interrupt,
InterruptKind::RiscvMachine => ExternAbi::RiscvInterruptM,
InterruptKind::RiscvSupervisor => ExternAbi::RiscvInterruptS,
InterruptKind::X86 => ExternAbi::X86Interrupt,
},
CanonAbi::X86(x86_call) => match x86_call {
X86Call::Fastcall => ExternAbi::Fastcall { unwind: false },
X86Call::Stdcall => ExternAbi::Stdcall { unwind: false },
X86Call::SysV64 => ExternAbi::SysV64 { unwind: false },
X86Call::Thiscall => ExternAbi::Thiscall { unwind: false },
X86Call::Vectorcall => ExternAbi::Vectorcall { unwind: false },
X86Call::Win64 => ExternAbi::Win64 { unwind: false },
},
}
}
}
/// Callee codegen for interrupts
///
/// This is named differently from the "Call" enums because it is different:
/// these "ABI" differences are not relevant to callers, since there is "no caller".
/// These only affect callee codegen. making their categorization as distinct ABIs a bit peculiar.
#[derive(Copy, Clone, Debug)]
#[derive(PartialOrd, Ord, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum InterruptKind {
Avr,
AvrNonBlocking,
Msp430,
RiscvMachine,
RiscvSupervisor,
X86,
}
/// ABIs defined for x86-{32,64}
///
/// One of SysV64 or Win64 may alias the C ABI, and arguably Win64 is cross-platform now?
#[derive(Clone, Copy, Debug)]
#[derive(PartialOrd, Ord, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum X86Call {
/// "fastcall" has both GNU and Windows variants
Fastcall,
/// "stdcall" has both GNU and Windows variants
Stdcall,
SysV64,
Thiscall,
Vectorcall,
Win64,
}
/// ABIs defined for 32-bit Arm
#[derive(Copy, Clone, Debug)]
#[derive(PartialOrd, Ord, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub enum ArmCall {
Aapcs,
CCmseNonSecureCall,
CCmseNonSecureEntry,
}

View file

@ -7,6 +7,8 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd};
#[cfg(feature = "nightly")]
use rustc_macros::{Decodable, Encodable};
use crate::AbiFromStrErr;
#[cfg(test)]
mod tests;
@ -99,11 +101,6 @@ macro_rules! abi_impls {
}
}
#[derive(Debug)]
pub enum AbiFromStrErr {
Unknown,
}
abi_impls! {
ExternAbi = {
C { unwind: false } =><= "C",

View file

@ -55,13 +55,14 @@ use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
mod callconv;
mod canon_abi;
mod extern_abi;
mod layout;
#[cfg(test)]
mod tests;
mod extern_abi;
pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
pub use extern_abi::{ExternAbi, all_names};
#[cfg(feature = "nightly")]
pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
@ -1895,3 +1896,11 @@ pub enum StructKind {
/// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
Prefixed(Size, Align),
}
#[derive(Clone, Debug)]
pub enum AbiFromStrErr {
/// not a known ABI
Unknown,
/// no "-unwind" variant can be used here
NoExplicitUnwind,
}

View file

@ -99,8 +99,15 @@ pub struct Path {
impl PartialEq<Symbol> for Path {
#[inline]
fn eq(&self, symbol: &Symbol) -> bool {
matches!(&self.segments[..], [segment] if segment.ident.name == *symbol)
fn eq(&self, name: &Symbol) -> bool {
if let [segment] = self.segments.as_ref()
&& segment.args.is_none()
&& segment.ident.name == *name
{
true
} else {
false
}
}
}
@ -120,17 +127,6 @@ impl Path {
Path { segments: thin_vec![PathSegment::from_ident(ident)], span: ident.span, tokens: None }
}
pub fn is_ident(&self, name: Symbol) -> bool {
if let [segment] = self.segments.as_ref()
&& segment.args.is_none()
&& segment.ident.name == name
{
true
} else {
false
}
}
pub fn is_global(&self) -> bool {
self.segments.first().is_some_and(|segment| segment.ident.name == kw::PathRoot)
}
@ -2465,6 +2461,39 @@ impl TyKind {
None
}
}
/// Returns `true` if this type is considered a scalar primitive (e.g.,
/// `i32`, `u8`, `bool`, etc).
///
/// This check is based on **symbol equality** and does **not** remove any
/// path prefixes or references. If a type alias or shadowing is present
/// (e.g., `type i32 = CustomType;`), this method will still return `true`
/// for `i32`, even though it may not refer to the primitive type.
pub fn maybe_scalar(&self) -> bool {
let Some(ty_sym) = self.is_simple_path() else {
// unit type
return self.is_unit();
};
matches!(
ty_sym,
sym::i8
| sym::i16
| sym::i32
| sym::i64
| sym::i128
| sym::u8
| sym::u16
| sym::u32
| sym::u64
| sym::u128
| sym::f16
| sym::f32
| sym::f64
| sym::f128
| sym::char
| sym::bool
)
}
}
/// A pattern type pattern.

View file

@ -12,7 +12,6 @@ use std::panic;
use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_span::source_map::Spanned;
use rustc_span::{Ident, Span};
use smallvec::{Array, SmallVec, smallvec};
use thin_vec::ThinVec;
@ -499,58 +498,6 @@ fn walk_assoc_item_constraint<T: MutVisitor>(
vis.visit_span(span);
}
pub fn walk_ty<T: MutVisitor>(vis: &mut T, ty: &mut Ty) {
let Ty { id, kind, span, tokens: _ } = ty;
vis.visit_id(id);
match kind {
TyKind::Err(_guar) => {}
TyKind::Infer | TyKind::ImplicitSelf | TyKind::Dummy | TyKind::Never | TyKind::CVarArgs => {
}
TyKind::Slice(ty) => vis.visit_ty(ty),
TyKind::Ptr(MutTy { ty, mutbl: _ }) => vis.visit_ty(ty),
TyKind::Ref(lt, MutTy { ty, mutbl: _ }) | TyKind::PinnedRef(lt, MutTy { ty, mutbl: _ }) => {
visit_opt(lt, |lt| vis.visit_lifetime(lt));
vis.visit_ty(ty);
}
TyKind::BareFn(bft) => {
let BareFnTy { safety, ext: _, generic_params, decl, decl_span } = bft.deref_mut();
visit_safety(vis, safety);
generic_params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
vis.visit_fn_decl(decl);
vis.visit_span(decl_span);
}
TyKind::UnsafeBinder(binder) => {
let UnsafeBinderTy { generic_params, inner_ty } = binder.deref_mut();
generic_params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
vis.visit_ty(inner_ty);
}
TyKind::Tup(tys) => visit_thin_vec(tys, |ty| vis.visit_ty(ty)),
TyKind::Paren(ty) => vis.visit_ty(ty),
TyKind::Pat(ty, pat) => {
vis.visit_ty(ty);
vis.visit_ty_pat(pat);
}
TyKind::Path(qself, path) => {
vis.visit_qself(qself);
vis.visit_path(path);
}
TyKind::Array(ty, length) => {
vis.visit_ty(ty);
vis.visit_anon_const(length);
}
TyKind::Typeof(expr) => vis.visit_anon_const(expr),
TyKind::TraitObject(bounds, _syntax) => {
visit_vec(bounds, |bound| vis.visit_param_bound(bound, BoundKind::TraitObject))
}
TyKind::ImplTrait(id, bounds) => {
vis.visit_id(id);
visit_vec(bounds, |bound| vis.visit_param_bound(bound, BoundKind::Impl));
}
TyKind::MacCall(mac) => vis.visit_mac_call(mac),
}
vis.visit_span(span);
}
pub fn walk_ty_pat<T: MutVisitor>(vis: &mut T, ty: &mut TyPat) {
let TyPat { id, kind, span, tokens: _ } = ty;
vis.visit_id(id);
@ -588,13 +535,6 @@ fn walk_ident<T: MutVisitor>(vis: &mut T, Ident { name: _, span }: &mut Ident) {
vis.visit_span(span);
}
fn walk_path_segment<T: MutVisitor>(vis: &mut T, segment: &mut PathSegment) {
let PathSegment { ident, id, args } = segment;
vis.visit_id(id);
vis.visit_ident(ident);
visit_opt(args, |args| vis.visit_generic_args(args));
}
fn walk_path<T: MutVisitor>(vis: &mut T, Path { segments, span, tokens: _ }: &mut Path) {
for segment in segments {
vis.visit_path_segment(segment);
@ -729,18 +669,6 @@ fn walk_closure_binder<T: MutVisitor>(vis: &mut T, binder: &mut ClosureBinder) {
}
}
fn walk_coroutine_kind<T: MutVisitor>(vis: &mut T, coroutine_kind: &mut CoroutineKind) {
match coroutine_kind {
CoroutineKind::Async { span, closure_id, return_impl_trait_id }
| CoroutineKind::Gen { span, closure_id, return_impl_trait_id }
| CoroutineKind::AsyncGen { span, closure_id, return_impl_trait_id } => {
vis.visit_id(closure_id);
vis.visit_id(return_impl_trait_id);
vis.visit_span(span);
}
}
}
fn walk_fn<T: MutVisitor>(vis: &mut T, kind: FnKind<'_>) {
match kind {
FnKind::Fn(
@ -991,13 +919,6 @@ pub fn walk_flat_map_expr_field<T: MutVisitor>(
smallvec![f]
}
pub fn walk_block<T: MutVisitor>(vis: &mut T, block: &mut Block) {
let Block { id, stmts, rules: _, span, tokens: _ } = block;
vis.visit_id(id);
stmts.flat_map_in_place(|stmt| vis.flat_map_stmt(stmt));
vis.visit_span(span);
}
pub fn walk_item_kind<K: WalkItemKind>(
kind: &mut K,
span: Span,
@ -1041,57 +962,6 @@ pub fn walk_flat_map_assoc_item(
smallvec![item]
}
pub fn walk_pat<T: MutVisitor>(vis: &mut T, pat: &mut Pat) {
let Pat { id, kind, span, tokens: _ } = pat;
vis.visit_id(id);
match kind {
PatKind::Err(_guar) => {}
PatKind::Missing | PatKind::Wild | PatKind::Rest | PatKind::Never => {}
PatKind::Ident(_binding_mode, ident, sub) => {
vis.visit_ident(ident);
visit_opt(sub, |sub| vis.visit_pat(sub));
}
PatKind::Expr(e) => vis.visit_expr(e),
PatKind::TupleStruct(qself, path, elems) => {
vis.visit_qself(qself);
vis.visit_path(path);
visit_thin_vec(elems, |elem| vis.visit_pat(elem));
}
PatKind::Path(qself, path) => {
vis.visit_qself(qself);
vis.visit_path(path);
}
PatKind::Struct(qself, path, fields, _etc) => {
vis.visit_qself(qself);
vis.visit_path(path);
fields.flat_map_in_place(|field| vis.flat_map_pat_field(field));
}
PatKind::Box(inner) => vis.visit_pat(inner),
PatKind::Deref(inner) => vis.visit_pat(inner),
PatKind::Ref(inner, _mutbl) => vis.visit_pat(inner),
PatKind::Range(e1, e2, Spanned { span: _, node: _ }) => {
visit_opt(e1, |e| vis.visit_expr(e));
visit_opt(e2, |e| vis.visit_expr(e));
vis.visit_span(span);
}
PatKind::Guard(p, e) => {
vis.visit_pat(p);
vis.visit_expr(e);
}
PatKind::Tuple(elems) | PatKind::Slice(elems) | PatKind::Or(elems) => {
visit_thin_vec(elems, |elem| vis.visit_pat(elem))
}
PatKind::Paren(inner) => vis.visit_pat(inner),
PatKind::MacCall(mac) => vis.visit_mac_call(mac),
}
vis.visit_span(span);
}
fn walk_anon_const<T: MutVisitor>(vis: &mut T, AnonConst { id, value }: &mut AnonConst) {
vis.visit_id(id);
vis.visit_expr(value);
}
fn walk_inline_asm<T: MutVisitor>(vis: &mut T, asm: &mut InlineAsm) {
// FIXME: Visit spans inside all this currently ignored stuff.
let InlineAsm {

View file

@ -210,7 +210,7 @@ pub trait Visitor<'ast>: Sized {
walk_poly_trait_ref(self, t)
}
fn visit_variant_data(&mut self, s: &'ast VariantData) -> Self::Result {
walk_struct_def(self, s)
walk_variant_data(self, s)
}
fn visit_field_def(&mut self, s: &'ast FieldDef) -> Self::Result {
walk_field_def(self, s)
@ -233,10 +233,13 @@ pub trait Visitor<'ast>: Sized {
fn visit_mac_call(&mut self, mac: &'ast MacCall) -> Self::Result {
walk_mac(self, mac)
}
fn visit_macro_def(&mut self, _mac: &'ast MacroDef, _id: NodeId) -> Self::Result {
fn visit_id(&mut self, _id: NodeId) -> Self::Result {
Self::Result::output()
}
fn visit_path(&mut self, path: &'ast Path, _id: NodeId) -> Self::Result {
fn visit_macro_def(&mut self, _mac: &'ast MacroDef) -> Self::Result {
Self::Result::output()
}
fn visit_path(&mut self, path: &'ast Path) -> Self::Result {
walk_path(self, path)
}
fn visit_use_tree(
@ -295,8 +298,8 @@ pub trait Visitor<'ast>: Sized {
fn visit_capture_by(&mut self, _capture_by: &'ast CaptureBy) -> Self::Result {
Self::Result::output()
}
fn visit_coroutine_kind(&mut self, _coroutine_kind: &'ast CoroutineKind) -> Self::Result {
Self::Result::output()
fn visit_coroutine_kind(&mut self, coroutine_kind: &'ast CoroutineKind) -> Self::Result {
walk_coroutine_kind(self, coroutine_kind)
}
fn visit_fn_decl(&mut self, fn_decl: &'ast FnDecl) -> Self::Result {
walk_fn_decl(self, fn_decl)
@ -334,17 +337,14 @@ macro_rules! common_visitor_and_walkers {
$(${ignore($lt)}V::Result::output())?
}
// this is only used by the MutVisitor. We include this symmetry here to make writing other functions easier
/// helper since `Visitor` wants `NodeId` but `MutVisitor` wants `&mut NodeId`
$(${ignore($lt)}
#[expect(unused, rustc::pass_by_value)]
#[inline]
#[expect(rustc::pass_by_value)]
)?
#[inline]
fn visit_id<$($lt,)? V: $Visitor$(<$lt>)?>(visitor: &mut V, id: &$($lt)? $($mut)? NodeId) $(-> <V as Visitor<$lt>>::Result)? {
$(
${ignore($mut)}
visitor.visit_id(id);
)?
$(${ignore($lt)}V::Result::output())?
// deref `&NodeId` into `NodeId` only for `Visitor`
visitor.visit_id( $(${ignore($lt)} * )? id)
}
// this is only used by the MutVisitor. We include this symmetry here to make writing other functions easier
@ -577,8 +577,7 @@ macro_rules! common_visitor_and_walkers {
ItemKind::MacCall(m) => vis.visit_mac_call(m),
ItemKind::MacroDef(ident, def) => {
try_visit!(vis.visit_ident(ident));
// FIXME(fee1-dead) assymetry
vis.visit_macro_def(def$(${ignore($lt)}, id)?)
vis.visit_macro_def(def)
}
ItemKind::Delegation(box Delegation {
id,
@ -591,7 +590,7 @@ macro_rules! common_visitor_and_walkers {
}) => {
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(path$(${ignore($lt)}, *id)?));
try_visit!(vis.visit_path(path));
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_ident, rename);
visit_opt!(vis, visit_block, body);
@ -599,7 +598,7 @@ macro_rules! common_visitor_and_walkers {
}
ItemKind::DelegationMac(box DelegationMac { qself, prefix, suffixes, body }) => {
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(prefix$(${ignore($lt)}, id)?));
try_visit!(vis.visit_path(prefix));
if let Some(suffixes) = suffixes {
for (ident, rename) in suffixes {
try_visit!(vis.visit_ident(ident));
@ -642,8 +641,7 @@ macro_rules! common_visitor_and_walkers {
if let Some(define_opaque) = define_opaque {
for (id, path) in define_opaque {
try_visit!(visit_id(visitor, id));
// FIXME(fee1-dead): look into this weird assymetry
try_visit!(visitor.visit_path(path$(${ignore($lt)}, *id)?));
try_visit!(visitor.visit_path(path));
}
}
$(<V as Visitor<$lt>>::Result::output())?
@ -699,7 +697,7 @@ macro_rules! common_visitor_and_walkers {
}) => {
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(path $(${ignore($lt)}, *id)?));
try_visit!(vis.visit_path(path));
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_ident, rename);
visit_opt!(vis, visit_block, body);
@ -707,7 +705,7 @@ macro_rules! common_visitor_and_walkers {
}
AssocItemKind::DelegationMac(box DelegationMac { qself, prefix, suffixes, body }) => {
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(prefix$(${ignore($lt)}, id)?));
try_visit!(vis.visit_path(prefix));
if let Some(suffixes) = suffixes {
for (ident, rename) in suffixes {
try_visit!(vis.visit_ident(ident));
@ -773,6 +771,190 @@ macro_rules! common_visitor_and_walkers {
}
}
}
fn walk_coroutine_kind<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
coroutine_kind: &$($lt)? $($mut)? CoroutineKind,
) $(-> <V as Visitor<$lt>>::Result)? {
match coroutine_kind {
CoroutineKind::Async { span, closure_id, return_impl_trait_id }
| CoroutineKind::Gen { span, closure_id, return_impl_trait_id }
| CoroutineKind::AsyncGen { span, closure_id, return_impl_trait_id } => {
try_visit!(visit_id(vis, closure_id));
try_visit!(visit_id(vis, return_impl_trait_id));
visit_span(vis, span)
}
}
}
pub fn walk_pat<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
pattern: &$($lt)? $($mut)? Pat
) $(-> <V as Visitor<$lt>>::Result)? {
let Pat { id, kind, span, tokens: _ } = pattern;
try_visit!(visit_id(vis, id));
match kind {
PatKind::Err(_guar) => {}
PatKind::Missing | PatKind::Wild | PatKind::Rest | PatKind::Never => {}
PatKind::Ident(_bmode, ident, optional_subpattern) => {
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_pat, optional_subpattern);
}
PatKind::Expr(expression) => try_visit!(vis.visit_expr(expression)),
PatKind::TupleStruct(opt_qself, path, elems) => {
try_visit!(vis.visit_qself(opt_qself));
try_visit!(vis.visit_path(path));
walk_list!(vis, visit_pat, elems);
}
PatKind::Path(opt_qself, path) => {
try_visit!(vis.visit_qself(opt_qself));
try_visit!(vis.visit_path(path))
}
PatKind::Struct(opt_qself, path, fields, _rest) => {
try_visit!(vis.visit_qself(opt_qself));
try_visit!(vis.visit_path(path));
$(
${ignore($lt)}
walk_list!(vis, visit_pat_field, fields);
)?
$(
${ignore($mut)}
fields.flat_map_in_place(|field| vis.flat_map_pat_field(field));
)?
}
PatKind::Box(subpattern) | PatKind::Deref(subpattern) | PatKind::Paren(subpattern) => {
try_visit!(vis.visit_pat(subpattern));
}
PatKind::Ref(subpattern, _ /*mutbl*/) => {
try_visit!(vis.visit_pat(subpattern));
}
PatKind::Range(lower_bound, upper_bound, _end) => {
visit_opt!(vis, visit_expr, lower_bound);
visit_opt!(vis, visit_expr, upper_bound);
try_visit!(visit_span(vis, span));
}
PatKind::Guard(subpattern, guard_condition) => {
try_visit!(vis.visit_pat(subpattern));
try_visit!(vis.visit_expr(guard_condition));
}
PatKind::Tuple(elems) | PatKind::Slice(elems) | PatKind::Or(elems) => {
walk_list!(vis, visit_pat, elems);
}
PatKind::MacCall(mac) => try_visit!(vis.visit_mac_call(mac)),
}
visit_span(vis, span)
}
pub fn walk_anon_const<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
constant: &$($lt)? $($mut)? AnonConst,
) $(-> <V as Visitor<$lt>>::Result)? {
let AnonConst { id, value } = constant;
try_visit!(visit_id(vis, id));
vis.visit_expr(value)
}
pub fn walk_path_segment<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
segment: &$($lt)? $($mut)? PathSegment,
) $(-> <V as Visitor<$lt>>::Result)? {
let PathSegment { ident, id, args } = segment;
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_generic_args, args);
$(<V as Visitor<$lt>>::Result::output())?
}
pub fn walk_block<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
block: &$($lt)? $($mut)? Block
) $(-> <V as Visitor<$lt>>::Result)? {
let Block { stmts, id, rules: _, span, tokens: _ } = block;
try_visit!(visit_id(vis, id));
$(
${ignore($lt)}
walk_list!(vis, visit_stmt, stmts);
)?
$(
${ignore($mut)}
stmts.flat_map_in_place(|stmt| vis.flat_map_stmt(stmt));
)?
visit_span(vis, span)
}
pub fn walk_ty<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V, ty: &$($lt)? $($mut)? Ty
) $(-> <V as Visitor<$lt>>::Result)? {
let Ty { id, kind, span, tokens: _ } = ty;
try_visit!(visit_id(vis, id));
match kind {
TyKind::Err(_guar) => {}
TyKind::Infer | TyKind::ImplicitSelf | TyKind::Dummy | TyKind::Never | TyKind::CVarArgs => {}
TyKind::Slice(ty) | TyKind::Paren(ty) => try_visit!(vis.visit_ty(ty)),
TyKind::Ptr(MutTy { ty, mutbl: _ }) => try_visit!(vis.visit_ty(ty)),
TyKind::Ref(opt_lifetime, MutTy { ty, mutbl: _ })
| TyKind::PinnedRef(opt_lifetime, MutTy { ty, mutbl: _ }) => {
// FIXME(fee1-dead) asymmetry
visit_opt!(vis, visit_lifetime, opt_lifetime$(${ignore($lt)}, LifetimeCtxt::Ref)?);
try_visit!(vis.visit_ty(ty));
}
TyKind::Tup(tuple_element_types) => {
walk_list!(vis, visit_ty, tuple_element_types);
}
TyKind::BareFn(function_declaration) => {
let BareFnTy { safety, ext: _, generic_params, decl, decl_span } =
&$($mut)? **function_declaration;
visit_safety(vis, safety);
$(
${ignore($lt)}
walk_list!(vis, visit_generic_param, generic_params);
)?
$(
${ignore($mut)}
generic_params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
)?
try_visit!(vis.visit_fn_decl(decl));
try_visit!(visit_span(vis, decl_span));
}
TyKind::UnsafeBinder(binder) => {
$(
${ignore($lt)}
walk_list!(vis, visit_generic_param, &binder.generic_params);
)?
$(
${ignore($mut)}
binder.generic_params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
)?
try_visit!(vis.visit_ty(&$($mut)?binder.inner_ty));
}
TyKind::Path(maybe_qself, path) => {
try_visit!(vis.visit_qself(maybe_qself));
try_visit!(vis.visit_path(path));
}
TyKind::Pat(ty, pat) => {
try_visit!(vis.visit_ty(ty));
try_visit!(vis.visit_ty_pat(pat));
}
TyKind::Array(ty, length) => {
try_visit!(vis.visit_ty(ty));
try_visit!(vis.visit_anon_const(length));
}
TyKind::TraitObject(bounds, _syntax) => {
walk_list!(vis, visit_param_bound, bounds, BoundKind::TraitObject);
}
TyKind::ImplTrait(id, bounds) => {
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_param_bound, bounds, BoundKind::Impl);
}
TyKind::Typeof(expression) => try_visit!(vis.visit_anon_const(expression)),
TyKind::MacCall(mac) => try_visit!(vis.visit_mac_call(mac)),
}
visit_span(vis, span)
}
};
}
@ -808,7 +990,8 @@ where
pub fn walk_trait_ref<'a, V: Visitor<'a>>(visitor: &mut V, trait_ref: &'a TraitRef) -> V::Result {
let TraitRef { path, ref_id } = trait_ref;
visitor.visit_path(path, *ref_id)
try_visit!(visitor.visit_path(path));
visitor.visit_id(*ref_id)
}
pub fn walk_enum_def<'a, V: Visitor<'a>>(
@ -848,56 +1031,6 @@ pub fn walk_pat_field<'a, V: Visitor<'a>>(visitor: &mut V, fp: &'a PatField) ->
V::Result::output()
}
pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) -> V::Result {
let Ty { id, kind, span: _, tokens: _ } = typ;
match kind {
TyKind::Slice(ty) | TyKind::Paren(ty) => try_visit!(visitor.visit_ty(ty)),
TyKind::Ptr(MutTy { ty, mutbl: _ }) => try_visit!(visitor.visit_ty(ty)),
TyKind::Ref(opt_lifetime, MutTy { ty, mutbl: _ })
| TyKind::PinnedRef(opt_lifetime, MutTy { ty, mutbl: _ }) => {
visit_opt!(visitor, visit_lifetime, opt_lifetime, LifetimeCtxt::Ref);
try_visit!(visitor.visit_ty(ty));
}
TyKind::Tup(tuple_element_types) => {
walk_list!(visitor, visit_ty, tuple_element_types);
}
TyKind::BareFn(function_declaration) => {
let BareFnTy { safety: _, ext: _, generic_params, decl, decl_span: _ } =
&**function_declaration;
walk_list!(visitor, visit_generic_param, generic_params);
try_visit!(visitor.visit_fn_decl(decl));
}
TyKind::UnsafeBinder(binder) => {
walk_list!(visitor, visit_generic_param, &binder.generic_params);
try_visit!(visitor.visit_ty(&binder.inner_ty));
}
TyKind::Path(maybe_qself, path) => {
try_visit!(visitor.visit_qself(maybe_qself));
try_visit!(visitor.visit_path(path, *id));
}
TyKind::Pat(ty, pat) => {
try_visit!(visitor.visit_ty(ty));
try_visit!(visitor.visit_ty_pat(pat));
}
TyKind::Array(ty, length) => {
try_visit!(visitor.visit_ty(ty));
try_visit!(visitor.visit_anon_const(length));
}
TyKind::TraitObject(bounds, _syntax) => {
walk_list!(visitor, visit_param_bound, bounds, BoundKind::TraitObject);
}
TyKind::ImplTrait(_id, bounds) => {
walk_list!(visitor, visit_param_bound, bounds, BoundKind::Impl);
}
TyKind::Typeof(expression) => try_visit!(visitor.visit_anon_const(expression)),
TyKind::Infer | TyKind::ImplicitSelf | TyKind::Dummy => {}
TyKind::Err(_guar) => {}
TyKind::MacCall(mac) => try_visit!(visitor.visit_mac_call(mac)),
TyKind::Never | TyKind::CVarArgs => {}
}
V::Result::output()
}
pub fn walk_ty_pat<'a, V: Visitor<'a>>(visitor: &mut V, tp: &'a TyPat) -> V::Result {
let TyPat { id: _, kind, span: _, tokens: _ } = tp;
match kind {
@ -931,7 +1064,8 @@ pub fn walk_use_tree<'a, V: Visitor<'a>>(
id: NodeId,
) -> V::Result {
let UseTree { prefix, kind, span: _ } = use_tree;
try_visit!(visitor.visit_path(prefix, id));
try_visit!(visitor.visit_id(id));
try_visit!(visitor.visit_path(prefix));
match kind {
UseTreeKind::Simple(rename) => {
// The extra IDs are handled during AST lowering.
@ -947,16 +1081,6 @@ pub fn walk_use_tree<'a, V: Visitor<'a>>(
V::Result::output()
}
pub fn walk_path_segment<'a, V: Visitor<'a>>(
visitor: &mut V,
segment: &'a PathSegment,
) -> V::Result {
let PathSegment { ident, id: _, args } = segment;
try_visit!(visitor.visit_ident(ident));
visit_opt!(visitor, visit_generic_args, args);
V::Result::output()
}
pub fn walk_generic_args<'a, V>(visitor: &mut V, generic_args: &'a GenericArgs) -> V::Result
where
V: Visitor<'a>,
@ -1012,52 +1136,6 @@ pub fn walk_assoc_item_constraint<'a, V: Visitor<'a>>(
V::Result::output()
}
pub fn walk_pat<'a, V: Visitor<'a>>(visitor: &mut V, pattern: &'a Pat) -> V::Result {
let Pat { id, kind, span: _, tokens: _ } = pattern;
match kind {
PatKind::TupleStruct(opt_qself, path, elems) => {
try_visit!(visitor.visit_qself(opt_qself));
try_visit!(visitor.visit_path(path, *id));
walk_list!(visitor, visit_pat, elems);
}
PatKind::Path(opt_qself, path) => {
try_visit!(visitor.visit_qself(opt_qself));
try_visit!(visitor.visit_path(path, *id))
}
PatKind::Struct(opt_qself, path, fields, _rest) => {
try_visit!(visitor.visit_qself(opt_qself));
try_visit!(visitor.visit_path(path, *id));
walk_list!(visitor, visit_pat_field, fields);
}
PatKind::Box(subpattern) | PatKind::Deref(subpattern) | PatKind::Paren(subpattern) => {
try_visit!(visitor.visit_pat(subpattern));
}
PatKind::Ref(subpattern, _ /*mutbl*/) => {
try_visit!(visitor.visit_pat(subpattern));
}
PatKind::Ident(_bmode, ident, optional_subpattern) => {
try_visit!(visitor.visit_ident(ident));
visit_opt!(visitor, visit_pat, optional_subpattern);
}
PatKind::Expr(expression) => try_visit!(visitor.visit_expr(expression)),
PatKind::Range(lower_bound, upper_bound, _end) => {
visit_opt!(visitor, visit_expr, lower_bound);
visit_opt!(visitor, visit_expr, upper_bound);
}
PatKind::Guard(subpattern, guard_condition) => {
try_visit!(visitor.visit_pat(subpattern));
try_visit!(visitor.visit_expr(guard_condition));
}
PatKind::Missing | PatKind::Wild | PatKind::Rest | PatKind::Never => {}
PatKind::Err(_guar) => {}
PatKind::Tuple(elems) | PatKind::Slice(elems) | PatKind::Or(elems) => {
walk_list!(visitor, visit_pat, elems);
}
PatKind::MacCall(mac) => try_visit!(visitor.visit_mac_call(mac)),
}
V::Result::output()
}
pub fn walk_param_bound<'a, V: Visitor<'a>>(visitor: &mut V, bound: &'a GenericBound) -> V::Result {
match bound {
GenericBound::Trait(trait_ref) => visitor.visit_poly_trait_ref(trait_ref),
@ -1075,7 +1153,10 @@ pub fn walk_precise_capturing_arg<'a, V: Visitor<'a>>(
) -> V::Result {
match arg {
PreciseCapturingArg::Lifetime(lt) => visitor.visit_lifetime(lt, LifetimeCtxt::GenericArg),
PreciseCapturingArg::Arg(path, id) => visitor.visit_path(path, *id),
PreciseCapturingArg::Arg(path, id) => {
try_visit!(visitor.visit_id(*id));
visitor.visit_path(path)
}
}
}
@ -1216,11 +1297,9 @@ pub fn walk_fn<'a, V: Visitor<'a>>(visitor: &mut V, kind: FnKind<'a>) -> V::Resu
V::Result::output()
}
pub fn walk_struct_def<'a, V: Visitor<'a>>(
visitor: &mut V,
struct_definition: &'a VariantData,
) -> V::Result {
walk_list!(visitor, visit_field_def, struct_definition.fields());
pub fn walk_variant_data<'a, V: Visitor<'a>>(visitor: &mut V, data: &'a VariantData) -> V::Result {
visit_opt!(visitor, visit_id, data.ctor_node_id());
walk_list!(visitor, visit_field_def, data.fields());
V::Result::output()
}
@ -1235,12 +1314,6 @@ pub fn walk_field_def<'a, V: Visitor<'a>>(visitor: &mut V, field: &'a FieldDef)
V::Result::output()
}
pub fn walk_block<'a, V: Visitor<'a>>(visitor: &mut V, block: &'a Block) -> V::Result {
let Block { stmts, id: _, rules: _, span: _, tokens: _ } = block;
walk_list!(visitor, visit_stmt, stmts);
V::Result::output()
}
pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) -> V::Result {
let Stmt { id: _, kind, span: _ } = statement;
match kind {
@ -1259,12 +1332,7 @@ pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) -> V:
pub fn walk_mac<'a, V: Visitor<'a>>(visitor: &mut V, mac: &'a MacCall) -> V::Result {
let MacCall { path, args: _ } = mac;
visitor.visit_path(path, DUMMY_NODE_ID)
}
pub fn walk_anon_const<'a, V: Visitor<'a>>(visitor: &mut V, constant: &'a AnonConst) -> V::Result {
let AnonConst { id: _, value } = constant;
visitor.visit_expr(value)
visitor.visit_path(path)
}
pub fn walk_inline_asm<'a, V: Visitor<'a>>(visitor: &mut V, asm: &'a InlineAsm) -> V::Result {
@ -1304,7 +1372,8 @@ pub fn walk_inline_asm_sym<'a, V: Visitor<'a>>(
InlineAsmSym { id, qself, path }: &'a InlineAsmSym,
) -> V::Result {
try_visit!(visitor.visit_qself(qself));
visitor.visit_path(path, *id)
try_visit!(visitor.visit_id(*id));
visitor.visit_path(path)
}
pub fn walk_format_args<'a, V: Visitor<'a>>(visitor: &mut V, fmt: &'a FormatArgs) -> V::Result {
@ -1336,7 +1405,8 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) -> V
ExprKind::Struct(se) => {
let StructExpr { qself, path, fields, rest } = &**se;
try_visit!(visitor.visit_qself(qself));
try_visit!(visitor.visit_path(path, *id));
try_visit!(visitor.visit_id(*id));
try_visit!(visitor.visit_path(path));
walk_list!(visitor, visit_expr_field, fields);
match rest {
StructRest::Base(expr) => try_visit!(visitor.visit_expr(expr)),
@ -1446,7 +1516,8 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) -> V
ExprKind::Underscore => {}
ExprKind::Path(maybe_qself, path) => {
try_visit!(visitor.visit_qself(maybe_qself));
try_visit!(visitor.visit_path(path, *id));
try_visit!(visitor.visit_id(*id));
try_visit!(visitor.visit_path(path));
}
ExprKind::Break(opt_label, opt_expr) => {
visit_opt!(visitor, visit_label, opt_label);
@ -1509,7 +1580,8 @@ pub fn walk_vis<'a, V: Visitor<'a>>(visitor: &mut V, vis: &'a Visibility) -> V::
let Visibility { kind, span: _, tokens: _ } = vis;
match kind {
VisibilityKind::Restricted { path, id, shorthand: _ } => {
try_visit!(visitor.visit_path(path, *id));
try_visit!(visitor.visit_id(*id));
try_visit!(visitor.visit_path(path));
}
VisibilityKind::Public | VisibilityKind::Inherited => {}
}
@ -1522,7 +1594,7 @@ pub fn walk_attribute<'a, V: Visitor<'a>>(visitor: &mut V, attr: &'a Attribute)
AttrKind::Normal(normal) => {
let NormalAttr { item, tokens: _ } = &**normal;
let AttrItem { unsafety: _, path, args, tokens: _ } = item;
try_visit!(visitor.visit_path(path, DUMMY_NODE_ID));
try_visit!(visitor.visit_path(path));
try_visit!(walk_attr_args(visitor, args));
}
AttrKind::DocComment(_kind, _sym) => {}

View file

@ -446,13 +446,7 @@ impl<'a> SelfResolver<'a> {
}
impl<'ast, 'a> Visitor<'ast> for SelfResolver<'a> {
fn visit_path(&mut self, path: &'ast Path, id: NodeId) {
fn visit_id(&mut self, id: NodeId) {
self.try_replace_id(id);
visit::walk_path(self, path);
}
fn visit_path_segment(&mut self, seg: &'ast PathSegment) {
self.try_replace_id(seg.id);
visit::walk_path_segment(self, seg);
}
}

View file

@ -1,4 +1,3 @@
use std::assert_matches::assert_matches;
use std::ops::ControlFlow;
use std::sync::Arc;
@ -1199,11 +1198,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
let closure_def_id = self.local_def_id(closure_id);
let (binder_clause, generic_params) = self.lower_closure_binder(binder);
assert_matches!(
coroutine_kind,
CoroutineKind::Async { .. },
"only async closures are supported currently"
);
let coroutine_desugaring = match coroutine_kind {
CoroutineKind::Async { .. } => hir::CoroutineDesugaring::Async,
CoroutineKind::Gen { .. } => hir::CoroutineDesugaring::Gen,
CoroutineKind::AsyncGen { span, .. } => {
span_bug!(span, "only async closures and `iter!` closures are supported currently")
}
};
let body = self.with_new_scopes(fn_decl_span, |this| {
let inner_decl =
@ -1247,7 +1248,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
// Lower this as a `CoroutineClosure`. That will ensure that HIR typeck
// knows that a `FnDecl` output type like `-> &str` actually means
// "coroutine that returns &str", rather than directly returning a `&str`.
kind: hir::ClosureKind::CoroutineClosure(hir::CoroutineDesugaring::Async),
kind: hir::ClosureKind::CoroutineClosure(coroutine_desugaring),
constness: hir::Constness::NotConst,
});
hir::ExprKind::Closure(c)

View file

@ -3,10 +3,11 @@ use rustc_ast::ptr::P;
use rustc_ast::visit::AssocCtxt;
use rustc_ast::*;
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def::{DefKind, PerNS, Res};
use rustc_hir::def_id::{CRATE_DEF_ID, LocalDefId};
use rustc_hir::{self as hir, HirId, LifetimeSource, PredicateOrigin};
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::span_bug;
use rustc_middle::ty::{ResolverAstLowering, TyCtxt};
use rustc_span::edit_distance::find_best_match_for_name;
use rustc_span::{DUMMY_SP, DesugaringKind, Ident, Span, Symbol, kw, sym};
@ -527,7 +528,22 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
UseTreeKind::Glob => {
let res = self.expect_full_res(id);
let res = smallvec![self.lower_res(res)];
let res = self.lower_res(res);
// Put the result in the appropriate namespace.
let res = match res {
Res::Def(DefKind::Mod | DefKind::Trait, _) => {
PerNS { type_ns: Some(res), value_ns: None, macro_ns: None }
}
Res::Def(DefKind::Enum, _) => {
PerNS { type_ns: None, value_ns: Some(res), macro_ns: None }
}
Res::Err => {
// Propagate the error to all namespaces, just to be sure.
let err = Some(Res::Err);
PerNS { type_ns: err, value_ns: err, macro_ns: err }
}
_ => span_bug!(path.span, "bad glob res {:?}", res),
};
let path = Path { segments, span: path.span, tokens: None };
let path = self.lower_use_path(res, &path, ParamMode::Explicit);
hir::ItemKind::Use(path, hir::UseKind::Glob)
@ -601,7 +617,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
} else {
// For non-empty lists we can just drop all the data, the prefix is already
// present in HIR as a part of nested imports.
self.arena.alloc(hir::UsePath { res: smallvec![], segments: &[], span })
self.arena.alloc(hir::UsePath { res: PerNS::default(), segments: &[], span })
};
hir::ItemKind::Use(path, hir::UseKind::ListStem)
}

View file

@ -64,7 +64,7 @@ use rustc_middle::ty::{ResolverAstLowering, TyCtxt};
use rustc_session::parse::{add_feature_diagnostics, feature_err};
use rustc_span::symbol::{Ident, Symbol, kw, sym};
use rustc_span::{DUMMY_SP, DesugaringKind, Span};
use smallvec::{SmallVec, smallvec};
use smallvec::SmallVec;
use thin_vec::ThinVec;
use tracing::{debug, instrument, trace};
@ -705,14 +705,16 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
self.resolver.get_partial_res(id).map_or(Res::Err, |pr| pr.expect_full_res())
}
fn lower_import_res(&mut self, id: NodeId, span: Span) -> SmallVec<[Res; 3]> {
let res = self.resolver.get_import_res(id).present_items();
let res: SmallVec<_> = res.map(|res| self.lower_res(res)).collect();
if res.is_empty() {
fn lower_import_res(&mut self, id: NodeId, span: Span) -> PerNS<Option<Res>> {
let per_ns = self.resolver.get_import_res(id);
let per_ns = per_ns.map(|res| res.map(|res| self.lower_res(res)));
if per_ns.is_empty() {
// Propagate the error to all namespaces, just to be sure.
self.dcx().span_delayed_bug(span, "no resolution for an import");
return smallvec![Res::Err];
let err = Some(Res::Err);
return PerNS { type_ns: err, value_ns: err, macro_ns: err };
}
res
per_ns
}
fn make_lang_item_qpath(

View file

@ -1,13 +1,13 @@
use std::sync::Arc;
use rustc_ast::{self as ast, *};
use rustc_hir::def::{DefKind, PartialRes, Res};
use rustc_hir::def::{DefKind, PartialRes, PerNS, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::{self as hir, GenericArg};
use rustc_middle::{span_bug, ty};
use rustc_session::parse::add_feature_diagnostics;
use rustc_span::{BytePos, DUMMY_SP, DesugaringKind, Ident, Span, Symbol, sym};
use smallvec::{SmallVec, smallvec};
use smallvec::smallvec;
use tracing::{debug, instrument};
use super::errors::{
@ -226,11 +226,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
pub(crate) fn lower_use_path(
&mut self,
res: SmallVec<[Res; 3]>,
res: PerNS<Option<Res>>,
p: &Path,
param_mode: ParamMode,
) -> &'hir hir::UsePath<'hir> {
assert!((1..=3).contains(&res.len()));
assert!(!res.is_empty());
self.arena.alloc(hir::UsePath {
res,
segments: self.arena.alloc_from_iter(p.segments.iter().map(|segment| {

View file

@ -477,11 +477,12 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) {
for span in spans {
if (!visitor.features.coroutines() && !span.allows_unstable(sym::coroutines))
&& (!visitor.features.gen_blocks() && !span.allows_unstable(sym::gen_blocks))
&& (!visitor.features.yield_expr() && !span.allows_unstable(sym::yield_expr))
{
#[allow(rustc::untranslatable_diagnostic)]
// Don't know which of the two features to include in the
// error message, so I am arbitrarily picking one.
feature_err(&visitor.sess, sym::coroutines, *span, "yield syntax is experimental")
// Emit yield_expr as the error, since that will be sufficient. You can think of it
// as coroutines and gen_blocks imply yield_expr.
feature_err(&visitor.sess, sym::yield_expr, *span, "yield syntax is experimental")
.emit();
}
}

View file

@ -840,14 +840,22 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
} else {
bug!("not an upvar")
};
err.span_label(
*span,
format!(
"calling `{}` requires mutable binding due to {}",
self.describe_place(the_place_err).unwrap(),
reason
),
);
// sometimes we deliberately don't store the name of a place when coming from a macro in
// another crate. We generally want to limit those diagnostics a little, to hide
// implementation details (such as those from pin!() or format!()). In that case show a
// slightly different error message, or none at all if something else happened. In other
// cases the message is likely not useful.
if let Some(place_name) = self.describe_place(the_place_err) {
err.span_label(
*span,
format!("calling `{place_name}` requires mutable binding due to {reason}"),
);
} else if span.from_expansion() {
err.span_label(
*span,
format!("a call in this macro requires a mutable binding due to {reason}",),
);
}
}
}

View file

@ -52,7 +52,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
assert_matches!(
self.tcx().coroutine_kind(self.tcx().coroutine_for_closure(mir_def_id)),
Some(hir::CoroutineKind::Desugared(
hir::CoroutineDesugaring::Async,
hir::CoroutineDesugaring::Async | hir::CoroutineDesugaring::Gen,
hir::CoroutineSource::Closure
)),
"this needs to be modified if we're lowering non-async closures"

View file

@ -8,6 +8,8 @@ use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use crate::deriving::{path_local, path_std};
/// Expands a `#[derive(PartialEq)]` attribute into an implementation for the
/// target item.
pub(crate) fn expand_deriving_partial_eq(
cx: &ExtCtxt<'_>,
span: Span,
@ -16,62 +18,6 @@ pub(crate) fn expand_deriving_partial_eq(
push: &mut dyn FnMut(Annotatable),
is_const: bool,
) {
fn cs_eq(cx: &ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
let base = true;
let expr = cs_fold(
true, // use foldl
cx,
span,
substr,
|cx, fold| match fold {
CsFold::Single(field) => {
let [other_expr] = &field.other_selflike_exprs[..] else {
cx.dcx()
.span_bug(field.span, "not exactly 2 arguments in `derive(PartialEq)`");
};
// We received arguments of type `&T`. Convert them to type `T` by stripping
// any leading `&`. This isn't necessary for type checking, but
// it results in better error messages if something goes wrong.
//
// Note: for arguments that look like `&{ x }`, which occur with packed
// structs, this would cause expressions like `{ self.x } == { other.x }`,
// which isn't valid Rust syntax. This wouldn't break compilation because these
// AST nodes are constructed within the compiler. But it would mean that code
// printed by `-Zunpretty=expanded` (or `cargo expand`) would have invalid
// syntax, which would be suboptimal. So we wrap these in parens, giving
// `({ self.x }) == ({ other.x })`, which is valid syntax.
let convert = |expr: &P<Expr>| {
if let ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, inner) =
&expr.kind
{
if let ExprKind::Block(..) = &inner.kind {
// `&{ x }` form: remove the `&`, add parens.
cx.expr_paren(field.span, inner.clone())
} else {
// `&x` form: remove the `&`.
inner.clone()
}
} else {
expr.clone()
}
};
cx.expr_binary(
field.span,
BinOpKind::Eq,
convert(&field.self_expr),
convert(other_expr),
)
}
CsFold::Combine(span, expr1, expr2) => {
cx.expr_binary(span, BinOpKind::And, expr1, expr2)
}
CsFold::Fieldless => cx.expr_bool(span, base),
},
);
BlockOrExpr::new_expr(expr)
}
let structural_trait_def = TraitDef {
span,
path: path_std!(marker::StructuralPartialEq),
@ -97,7 +43,9 @@ pub(crate) fn expand_deriving_partial_eq(
ret_ty: Path(path_local!(bool)),
attributes: thin_vec![cx.attr_word(sym::inline, span)],
fieldless_variants_strategy: FieldlessVariantsStrategy::Unify,
combine_substructure: combine_substructure(Box::new(|a, b, c| cs_eq(a, b, c))),
combine_substructure: combine_substructure(Box::new(|a, b, c| {
BlockOrExpr::new_expr(get_substructure_equality_expr(a, b, c))
})),
}];
let trait_def = TraitDef {
@ -113,3 +61,156 @@ pub(crate) fn expand_deriving_partial_eq(
};
trait_def.expand(cx, mitem, item, push)
}
/// Generates the equality expression for a struct or enum variant when deriving
/// `PartialEq`.
///
/// This function generates an expression that checks if all fields of a struct
/// or enum variant are equal.
/// - Scalar fields are compared first for efficiency, followed by compound
/// fields.
/// - If there are no fields, returns `true` (fieldless types are always equal).
///
/// Whether a field is considered "scalar" is determined by comparing the symbol
/// of its type to a set of known scalar type symbols (e.g., `i32`, `u8`, etc).
/// This check is based on the type's symbol.
///
/// ### Example 1
/// ```
/// #[derive(PartialEq)]
/// struct i32;
///
/// // Here, `field_2` is of type `i32`, but since it's a user-defined type (not
/// // the primitive), it will not be treated as scalar. The function will still
/// // check equality of `field_2` first because the symbol matches `i32`.
/// #[derive(PartialEq)]
/// struct Struct {
/// field_1: &'static str,
/// field_2: i32,
/// }
/// ```
///
/// ### Example 2
/// ```
/// mod ty {
/// pub type i32 = i32;
/// }
///
/// // Here, `field_2` is of type `ty::i32`, which is a type alias for `i32`.
/// // However, the function will not reorder the fields because the symbol for
/// // `ty::i32` does not match the symbol for the primitive `i32`
/// // ("ty::i32" != "i32").
/// #[derive(PartialEq)]
/// struct Struct {
/// field_1: &'static str,
/// field_2: ty::i32,
/// }
/// ```
///
/// For enums, the discriminant is compared first, then the rest of the fields.
///
/// # Panics
///
/// If called on static or all-fieldless enums/structs, which should not occur
/// during derive expansion.
fn get_substructure_equality_expr(
cx: &ExtCtxt<'_>,
span: Span,
substructure: &Substructure<'_>,
) -> P<Expr> {
use SubstructureFields::*;
match substructure.fields {
EnumMatching(.., fields) | Struct(.., fields) => {
let combine = move |acc, field| {
let rhs = get_field_equality_expr(cx, field);
if let Some(lhs) = acc {
// Combine the previous comparison with the current field
// using logical AND.
return Some(cx.expr_binary(field.span, BinOpKind::And, lhs, rhs));
}
// Start the chain with the first field's comparison.
Some(rhs)
};
// First compare scalar fields, then compound fields, combining all
// with logical AND.
return fields
.iter()
.filter(|field| !field.maybe_scalar)
.fold(fields.iter().filter(|field| field.maybe_scalar).fold(None, combine), combine)
// If there are no fields, treat as always equal.
.unwrap_or_else(|| cx.expr_bool(span, true));
}
EnumDiscr(disc, match_expr) => {
let lhs = get_field_equality_expr(cx, disc);
let Some(match_expr) = match_expr else {
return lhs;
};
// Compare the discriminant first (cheaper), then the rest of the
// fields.
return cx.expr_binary(disc.span, BinOpKind::And, lhs, match_expr.clone());
}
StaticEnum(..) => cx.dcx().span_bug(
span,
"unexpected static enum encountered during `derive(PartialEq)` expansion",
),
StaticStruct(..) => cx.dcx().span_bug(
span,
"unexpected static struct encountered during `derive(PartialEq)` expansion",
),
AllFieldlessEnum(..) => cx.dcx().span_bug(
span,
"unexpected all-fieldless enum encountered during `derive(PartialEq)` expansion",
),
}
}
/// Generates an equality comparison expression for a single struct or enum
/// field.
///
/// This function produces an AST expression that compares the `self` and
/// `other` values for a field using `==`. It removes any leading references
/// from both sides for readability. If the field is a block expression, it is
/// wrapped in parentheses to ensure valid syntax.
///
/// # Panics
///
/// Panics if there are not exactly two arguments to compare (should be `self`
/// and `other`).
fn get_field_equality_expr(cx: &ExtCtxt<'_>, field: &FieldInfo) -> P<Expr> {
let [rhs] = &field.other_selflike_exprs[..] else {
cx.dcx().span_bug(field.span, "not exactly 2 arguments in `derive(PartialEq)`");
};
cx.expr_binary(
field.span,
BinOpKind::Eq,
wrap_block_expr(cx, peel_refs(&field.self_expr)),
wrap_block_expr(cx, peel_refs(rhs)),
)
}
/// Removes all leading immutable references from an expression.
///
/// This is used to strip away any number of leading `&` from an expression
/// (e.g., `&&&T` becomes `T`). Only removes immutable references; mutable
/// references are preserved.
fn peel_refs(mut expr: &P<Expr>) -> P<Expr> {
while let ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, inner) = &expr.kind {
expr = &inner;
}
expr.clone()
}
/// Wraps a block expression in parentheses to ensure valid AST in macro
/// expansion output.
///
/// If the given expression is a block, it is wrapped in parentheses; otherwise,
/// it is returned unchanged.
fn wrap_block_expr(cx: &ExtCtxt<'_>, expr: P<Expr>) -> P<Expr> {
if matches!(&expr.kind, ExprKind::Block(..)) {
return cx.expr_paren(expr.span, expr);
}
expr
}

View file

@ -284,6 +284,7 @@ pub(crate) struct FieldInfo {
/// The expressions corresponding to references to this field in
/// the other selflike arguments.
pub other_selflike_exprs: Vec<P<Expr>>,
pub maybe_scalar: bool,
}
#[derive(Copy, Clone)]
@ -1220,7 +1221,8 @@ impl<'a> MethodDef<'a> {
let self_expr = discr_exprs.remove(0);
let other_selflike_exprs = discr_exprs;
let discr_field = FieldInfo { span, name: None, self_expr, other_selflike_exprs };
let discr_field =
FieldInfo { span, name: None, self_expr, other_selflike_exprs, maybe_scalar: true };
let discr_let_stmts: ThinVec<_> = iter::zip(&discr_idents, &selflike_args)
.map(|(&ident, selflike_arg)| {
@ -1533,6 +1535,7 @@ impl<'a> TraitDef<'a> {
name: struct_field.ident,
self_expr,
other_selflike_exprs,
maybe_scalar: struct_field.ty.peel_refs().kind.maybe_scalar(),
}
})
.collect()

View file

@ -0,0 +1,53 @@
use rustc_ast::ptr::P;
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::{CoroutineKind, DUMMY_NODE_ID, Expr, ast, token};
use rustc_errors::PResult;
use rustc_expand::base::{self, DummyResult, ExpandResult, ExtCtxt, MacroExpanderResult};
use rustc_span::Span;
pub(crate) fn expand<'cx>(
cx: &'cx mut ExtCtxt<'_>,
sp: Span,
tts: TokenStream,
) -> MacroExpanderResult<'cx> {
let closure = match parse_closure(cx, sp, tts) {
Ok(parsed) => parsed,
Err(err) => {
return ExpandResult::Ready(DummyResult::any(sp, err.emit()));
}
};
ExpandResult::Ready(base::MacEager::expr(closure))
}
fn parse_closure<'a>(
cx: &mut ExtCtxt<'a>,
span: Span,
stream: TokenStream,
) -> PResult<'a, P<Expr>> {
let mut closure_parser = cx.new_parser_from_tts(stream);
let coroutine_kind = Some(CoroutineKind::Gen {
span,
closure_id: DUMMY_NODE_ID,
return_impl_trait_id: DUMMY_NODE_ID,
});
let mut closure = closure_parser.parse_expr()?;
match &mut closure.kind {
ast::ExprKind::Closure(c) => {
if let Some(kind) = c.coroutine_kind {
cx.dcx().span_err(kind.span(), "only plain closures allowed in `iter!`");
}
c.coroutine_kind = coroutine_kind;
if closure_parser.token != token::Eof {
closure_parser.unexpected()?;
}
Ok(closure)
}
_ => {
cx.dcx().span_err(closure.span, "`iter!` body must be a closure");
Err(closure_parser.unexpected().unwrap_err())
}
}
}

View file

@ -47,6 +47,7 @@ mod errors;
mod format;
mod format_foreign;
mod global_allocator;
mod iter;
mod log_syntax;
mod pattern_type;
mod source_util;
@ -95,6 +96,7 @@ pub fn register_builtin_macros(resolver: &mut dyn ResolverExpand) {
include: source_util::expand_include,
include_bytes: source_util::expand_include_bytes,
include_str: source_util::expand_include_str,
iter: iter::expand,
line: source_util::expand_line,
log_syntax: log_syntax::expand_log_syntax,
module_path: source_util::expand_mod,

View file

@ -213,11 +213,13 @@ pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
if filename == "." || filename == ".." {
continue;
}
let src = from.join(&filename);
let dst = to.join(&filename);
if entry.metadata().unwrap().is_dir() {
fs::create_dir(to.join(&filename)).unwrap();
copy_dir_recursively(&from.join(&filename), &to.join(&filename));
fs::create_dir(&dst).unwrap_or_else(|e| panic!("failed to create {dst:?}: {e}"));
copy_dir_recursively(&src, &dst);
} else {
fs::copy(from.join(&filename), to.join(&filename)).unwrap();
fs::copy(&src, &dst).unwrap_or_else(|e| panic!("failed to copy {src:?}->{dst:?}: {e}"));
}
}
}

View file

@ -10,7 +10,7 @@ use std::mem;
use cranelift_codegen::ir::{ArgumentPurpose, SigRef};
use cranelift_codegen::isa::CallConv;
use cranelift_module::ModuleError;
use rustc_abi::ExternAbi;
use rustc_abi::{CanonAbi, ExternAbi, X86Call};
use rustc_codegen_ssa::base::is_call_from_compiler_builtins_to_upstream_monomorphization;
use rustc_codegen_ssa::errors::CompilerBuiltinsCannotCall;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@ -19,7 +19,7 @@ use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_session::Session;
use rustc_span::source_map::Spanned;
use rustc_target::callconv::{Conv, FnAbi, PassMode};
use rustc_target::callconv::{FnAbi, PassMode};
use smallvec::SmallVec;
use self::pass_mode::*;
@ -42,32 +42,27 @@ fn clif_sig_from_fn_abi<'tcx>(
Signature { params, returns, call_conv }
}
pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: CallConv) -> CallConv {
pub(crate) fn conv_to_call_conv(
sess: &Session,
c: CanonAbi,
default_call_conv: CallConv,
) -> CallConv {
match c {
Conv::Rust | Conv::C => default_call_conv,
Conv::Cold | Conv::PreserveMost | Conv::PreserveAll => CallConv::Cold,
Conv::X86_64SysV => CallConv::SystemV,
Conv::X86_64Win64 => CallConv::WindowsFastcall,
CanonAbi::Rust | CanonAbi::C => default_call_conv,
CanonAbi::RustCold => CallConv::Cold,
// Should already get a back compat warning
Conv::X86Fastcall | Conv::X86Stdcall | Conv::X86ThisCall | Conv::X86VectorCall => {
default_call_conv
}
CanonAbi::X86(x86_call) => match x86_call {
X86Call::SysV64 => CallConv::SystemV,
X86Call::Win64 => CallConv::WindowsFastcall,
// Should already get a back compat warning
_ => default_call_conv,
},
Conv::X86Intr | Conv::RiscvInterrupt { .. } => {
sess.dcx().fatal(format!("interrupt call conv {c:?} not yet implemented"))
CanonAbi::Interrupt(_) | CanonAbi::Arm(_) => {
sess.dcx().fatal("call conv {c:?} is not yet implemented")
}
Conv::ArmAapcs => sess.dcx().fatal("aapcs call conv not yet implemented"),
Conv::CCmseNonSecureCall => {
sess.dcx().fatal("C-cmse-nonsecure-call call conv is not yet implemented");
}
Conv::CCmseNonSecureEntry => {
sess.dcx().fatal("C-cmse-nonsecure-entry call conv is not yet implemented");
}
Conv::Msp430Intr | Conv::GpuKernel | Conv::AvrInterrupt | Conv::AvrNonBlockingInterrupt => {
unreachable!("tried to use {c:?} call conv which only exists on an unsupported target");
CanonAbi::GpuKernel => {
unreachable!("tried to use {c:?} call conv which only exists on an unsupported target")
}
}
}
@ -610,7 +605,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
target: CallTarget,
call_args: &mut Vec<Value>,
) {
if fn_abi.conv != Conv::C {
if fn_abi.conv != CanonAbi::C {
fx.tcx.dcx().span_fatal(
source_info.span,
format!("Variadic call for non-C abi {:?}", fn_abi.conv),

View file

@ -1,7 +1,7 @@
#[cfg(feature = "master")]
use gccjit::FnAttribute;
use gccjit::{ToLValue, ToRValue, Type};
use rustc_abi::{Reg, RegKind};
use rustc_abi::{ArmCall, CanonAbi, InterruptKind, Reg, RegKind, X86Call};
use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeCodegenMethods};
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::bug;
@ -10,8 +10,6 @@ use rustc_middle::ty::layout::LayoutOf;
#[cfg(feature = "master")]
use rustc_session::config;
use rustc_target::callconv::{ArgAttributes, CastTarget, FnAbi, PassMode};
#[cfg(feature = "master")]
use rustc_target::callconv::{Conv, RiscvInterruptKind};
use crate::builder::Builder;
use crate::context::CodegenCx;
@ -238,29 +236,16 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
#[cfg(feature = "master")]
pub fn conv_to_fn_attribute<'gcc>(conv: Conv, arch: &str) -> Option<FnAttribute<'gcc>> {
pub fn conv_to_fn_attribute<'gcc>(conv: CanonAbi, arch: &str) -> Option<FnAttribute<'gcc>> {
let attribute = match conv {
Conv::C | Conv::Rust => return None,
Conv::CCmseNonSecureCall => {
if arch == "arm" {
FnAttribute::ArmCmseNonsecureCall
} else {
return None;
}
}
Conv::CCmseNonSecureEntry => {
if arch == "arm" {
FnAttribute::ArmCmseNonsecureEntry
} else {
return None;
}
}
Conv::Cold => FnAttribute::Cold,
// NOTE: the preserve attributes are not yet implemented in GCC:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110899
Conv::PreserveMost => return None,
Conv::PreserveAll => return None,
Conv::GpuKernel => {
CanonAbi::C | CanonAbi::Rust => return None,
CanonAbi::Arm(arm_call) => match arm_call {
ArmCall::CCmseNonSecureCall => FnAttribute::ArmCmseNonsecureCall,
ArmCall::CCmseNonSecureEntry => FnAttribute::ArmCmseNonsecureEntry,
ArmCall::Aapcs => FnAttribute::ArmPcs("aapcs"),
},
CanonAbi::RustCold => FnAttribute::Cold,
CanonAbi::GpuKernel => {
if arch == "amdgpu" {
FnAttribute::GcnAmdGpuHsaKernel
} else if arch == "nvptx64" {
@ -270,26 +255,24 @@ pub fn conv_to_fn_attribute<'gcc>(conv: Conv, arch: &str) -> Option<FnAttribute<
}
}
// TODO(antoyo): check if those AVR attributes are mapped correctly.
Conv::AvrInterrupt => FnAttribute::AvrSignal,
Conv::AvrNonBlockingInterrupt => FnAttribute::AvrInterrupt,
Conv::ArmAapcs => FnAttribute::ArmPcs("aapcs"),
Conv::Msp430Intr => FnAttribute::Msp430Interrupt,
Conv::RiscvInterrupt { kind } => {
let kind = match kind {
RiscvInterruptKind::Machine => "machine",
RiscvInterruptKind::Supervisor => "supervisor",
};
FnAttribute::RiscvInterrupt(kind)
}
Conv::X86Fastcall => FnAttribute::X86FastCall,
Conv::X86Intr => FnAttribute::X86Interrupt,
Conv::X86Stdcall => FnAttribute::X86Stdcall,
Conv::X86ThisCall => FnAttribute::X86ThisCall,
// NOTE: the vectorcall calling convention is not yet implemented in GCC:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89485
Conv::X86VectorCall => return None,
Conv::X86_64SysV => FnAttribute::X86SysvAbi,
Conv::X86_64Win64 => FnAttribute::X86MsAbi,
CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind {
InterruptKind::Avr => FnAttribute::AvrSignal,
InterruptKind::AvrNonBlocking => FnAttribute::AvrInterrupt,
InterruptKind::Msp430 => FnAttribute::Msp430Interrupt,
InterruptKind::RiscvMachine => FnAttribute::RiscvInterrupt("machine"),
InterruptKind::RiscvSupervisor => FnAttribute::RiscvInterrupt("supervisor"),
InterruptKind::X86 => FnAttribute::X86Interrupt,
},
CanonAbi::X86(x86_call) => match x86_call {
X86Call::Fastcall => FnAttribute::X86FastCall,
X86Call::Stdcall => FnAttribute::X86Stdcall,
X86Call::Thiscall => FnAttribute::X86ThisCall,
// // NOTE: the vectorcall calling convention is not yet implemented in GCC:
// // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89485
X86Call::Vectorcall => return None,
X86Call::SysV64 => FnAttribute::X86SysvAbi,
X86Call::Win64 => FnAttribute::X86MsAbi,
},
};
Some(attribute)
}

View file

@ -52,10 +52,6 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
fn clear_dbg_loc(&mut self) {
self.location = None;
}
fn get_dbg_loc(&self) -> Option<Self::DILocation> {
self.location
}
}
/// Generate the `debug_context` in an MIR Body.

View file

@ -3,11 +3,11 @@
//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp};
use rustc_abi::{Endian, ExternAbi};
use rustc_abi::{CanonAbi, Endian, ExternAbi};
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeCodegenMethods, BuilderMethods, OverflowOp};
use rustc_middle::ty::{self, Ty};
use rustc_target::callconv::{ArgAbi, ArgAttributes, Conv, FnAbi, PassMode};
use rustc_target::callconv::{ArgAbi, ArgAttributes, FnAbi, PassMode};
use crate::builder::{Builder, ToGccComp};
use crate::common::{SignType, TypeReflection};
@ -397,7 +397,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
ret: arg_abi,
c_variadic: false,
fixed_count: 3,
conv: Conv::C,
conv: CanonAbi::C,
can_unwind: false,
};
fn_abi.adjust_for_foreign_abi(self.cx, ExternAbi::C { unwind: false });

View file

@ -524,11 +524,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
cond
}
fn type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value {
// Unsupported.
self.context.new_rvalue_from_int(self.int_type, 0)
}
fn type_checked_load(
&mut self,
_llvtable: Self::Value,

View file

@ -2,7 +2,10 @@ use std::borrow::Borrow;
use std::cmp;
use libc::c_uint;
use rustc_abi::{BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size};
use rustc_abi::{
ArmCall, BackendRepr, CanonAbi, HasDataLayout, InterruptKind, Primitive, Reg, RegKind, Size,
X86Call,
};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
@ -12,7 +15,7 @@ use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::{bug, ty};
use rustc_session::config;
use rustc_target::callconv::{
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, Conv, FnAbi, PassMode,
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode,
};
use rustc_target::spec::SanitizerSet;
use smallvec::SmallVec;
@ -409,11 +412,17 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
if !self.can_unwind {
func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
}
if let Conv::RiscvInterrupt { kind } = self.conv {
func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
}
if let Conv::CCmseNonSecureEntry = self.conv {
func_attrs.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"))
match self.conv {
CanonAbi::Interrupt(InterruptKind::RiscvMachine) => {
func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", "machine"))
}
CanonAbi::Interrupt(InterruptKind::RiscvSupervisor) => {
func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", "supervisor"))
}
CanonAbi::Arm(ArmCall::CCmseNonSecureEntry) => {
func_attrs.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"))
}
_ => (),
}
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
@ -600,7 +609,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
llvm::SetInstructionCallConv(callsite, cconv);
}
if self.conv == Conv::CCmseNonSecureCall {
if self.conv == CanonAbi::Arm(ArmCall::CCmseNonSecureCall) {
// This will probably get ignored on all targets but those supporting the TrustZone-M
// extension (thumbv8m targets).
let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
@ -636,17 +645,11 @@ impl AbiBuilderMethods for Builder<'_, '_, '_> {
}
impl llvm::CallConv {
pub(crate) fn from_conv(conv: Conv, arch: &str) -> Self {
pub(crate) fn from_conv(conv: CanonAbi, arch: &str) -> Self {
match conv {
Conv::C
| Conv::Rust
| Conv::CCmseNonSecureCall
| Conv::CCmseNonSecureEntry
| Conv::RiscvInterrupt { .. } => llvm::CCallConv,
Conv::Cold => llvm::ColdCallConv,
Conv::PreserveMost => llvm::PreserveMost,
Conv::PreserveAll => llvm::PreserveAll,
Conv::GpuKernel => {
CanonAbi::C | CanonAbi::Rust => llvm::CCallConv,
CanonAbi::RustCold => llvm::PreserveMost,
CanonAbi::GpuKernel => {
if arch == "amdgpu" {
llvm::AmdgpuKernel
} else if arch == "nvptx64" {
@ -655,17 +658,25 @@ impl llvm::CallConv {
panic!("Architecture {arch} does not support GpuKernel calling convention");
}
}
Conv::AvrInterrupt => llvm::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
Conv::Msp430Intr => llvm::Msp430Intr,
Conv::X86Fastcall => llvm::X86FastcallCallConv,
Conv::X86Intr => llvm::X86_Intr,
Conv::X86Stdcall => llvm::X86StdcallCallConv,
Conv::X86ThisCall => llvm::X86_ThisCall,
Conv::X86VectorCall => llvm::X86_VectorCall,
Conv::X86_64SysV => llvm::X86_64_SysV,
Conv::X86_64Win64 => llvm::X86_64_Win64,
CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind {
InterruptKind::Avr => llvm::AvrInterrupt,
InterruptKind::AvrNonBlocking => llvm::AvrNonBlockingInterrupt,
InterruptKind::Msp430 => llvm::Msp430Intr,
InterruptKind::RiscvMachine | InterruptKind::RiscvSupervisor => llvm::CCallConv,
InterruptKind::X86 => llvm::X86_Intr,
},
CanonAbi::Arm(arm_call) => match arm_call {
ArmCall::Aapcs => llvm::ArmAapcsCallConv,
ArmCall::CCmseNonSecureCall | ArmCall::CCmseNonSecureEntry => llvm::CCallConv,
},
CanonAbi::X86(x86_call) => match x86_call {
X86Call::Fastcall => llvm::X86FastcallCallConv,
X86Call::Stdcall => llvm::X86StdcallCallConv,
X86Call::SysV64 => llvm::X86_64_SysV,
X86Call::Thiscall => llvm::X86_ThisCall,
X86Call::Vectorcall => llvm::X86_VectorCall,
X86Call::Win64 => llvm::X86_64_Win64,
},
}
}
}

View file

@ -1815,8 +1815,11 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap();
let dbg_loc = self.get_dbg_loc();
// Test whether the function pointer is associated with the type identifier.
let cond = self.type_test(llfn, typeid_metadata);
// Test whether the function pointer is associated with the type identifier using the
// llvm.type.test intrinsic. The LowerTypeTests link-time optimization pass replaces
// calls to this intrinsic with code to test type membership.
let typeid = self.get_metadata_value(typeid_metadata);
let cond = self.call_intrinsic("llvm.type.test", &[llfn, typeid]);
let bb_pass = self.append_sibling_block("type_test.pass");
let bb_fail = self.append_sibling_block("type_test.fail");
self.cond_br(cond, bb_pass, bb_fail);

View file

@ -147,6 +147,12 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
}
}
impl<'ll> Builder<'_, 'll, '_> {
pub(crate) fn get_dbg_loc(&self) -> Option<&'ll DILocation> {
unsafe { llvm::LLVMGetCurrentDebugLocation2(self.llbuilder) }
}
}
impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
@ -209,10 +215,6 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
}
}
fn get_dbg_loc(&self) -> Option<&'ll DILocation> {
unsafe { llvm::LLVMGetCurrentDebugLocation2(self.llbuilder) }
}
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
}

View file

@ -636,13 +636,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
}
fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value {
// Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
// optimization pass replaces calls to this intrinsic with code to test type membership.
let typeid = self.get_metadata_value(typeid);
self.call_intrinsic("llvm.type.test", &[pointer, typeid])
}
fn type_checked_load(
&mut self,
llvtable: &'ll Value,

View file

@ -1,5 +1,6 @@
use std::collections::hash_map::Entry::*;
use rustc_abi::{CanonAbi, X86Call};
use rustc_ast::expand::allocator::{ALLOCATOR_METHODS, NO_ALLOC_SHIM_IS_UNSTABLE, global_fn_name};
use rustc_data_structures::unord::UnordMap;
use rustc_hir::def::DefKind;
@ -14,7 +15,6 @@ use rustc_middle::ty::{self, GenericArgKind, GenericArgsRef, Instance, SymbolNam
use rustc_middle::util::Providers;
use rustc_session::config::{CrateType, OomStrategy};
use rustc_symbol_mangling::mangle_internal_symbol;
use rustc_target::callconv::Conv;
use rustc_target::spec::{SanitizerSet, TlsModel};
use tracing::debug;
@ -652,7 +652,7 @@ pub(crate) fn symbol_name_for_instance_in_crate<'tcx>(
fn calling_convention_for_symbol<'tcx>(
tcx: TyCtxt<'tcx>,
symbol: ExportedSymbol<'tcx>,
) -> (Conv, &'tcx [rustc_target::callconv::ArgAbi<'tcx, Ty<'tcx>>]) {
) -> (CanonAbi, &'tcx [rustc_target::callconv::ArgAbi<'tcx, Ty<'tcx>>]) {
let instance = match symbol {
ExportedSymbol::NonGeneric(def_id) | ExportedSymbol::Generic(def_id, _)
if tcx.is_static(def_id) =>
@ -683,7 +683,7 @@ fn calling_convention_for_symbol<'tcx>(
})
.map(|fnabi| (fnabi.conv, &fnabi.args[..]))
// FIXME(workingjubilee): why don't we know the convention here?
.unwrap_or((Conv::Rust, &[]))
.unwrap_or((CanonAbi::Rust, &[]))
}
/// This is the symbol name of the given instance as seen by the linker.
@ -717,14 +717,14 @@ pub(crate) fn linking_symbol_name_for_instance_in_crate<'tcx>(
_ => return undecorated,
};
let (conv, args) = calling_convention_for_symbol(tcx, symbol);
let (callconv, args) = calling_convention_for_symbol(tcx, symbol);
// Decorate symbols with prefixes, suffixes and total number of bytes of arguments.
// Reference: https://docs.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170
let (prefix, suffix) = match conv {
Conv::X86Fastcall => ("@", "@"),
Conv::X86Stdcall => ("_", "@"),
Conv::X86VectorCall => ("", "@@"),
let (prefix, suffix) = match callconv {
CanonAbi::X86(X86Call::Fastcall) => ("@", "@"),
CanonAbi::X86(X86Call::Stdcall) => ("_", "@"),
CanonAbi::X86(X86Call::Vectorcall) => ("", "@@"),
_ => {
if let Some(prefix) = prefix {
undecorated.insert(0, prefix);
@ -758,9 +758,9 @@ pub(crate) fn extend_exported_symbols<'tcx>(
symbol: ExportedSymbol<'tcx>,
instantiating_crate: CrateNum,
) {
let (conv, _) = calling_convention_for_symbol(tcx, symbol);
let (callconv, _) = calling_convention_for_symbol(tcx, symbol);
if conv != Conv::GpuKernel || tcx.sess.target.os != "amdhsa" {
if callconv != CanonAbi::GpuKernel || tcx.sess.target.os != "amdhsa" {
return;
}

View file

@ -81,7 +81,6 @@ pub trait DebugInfoBuilderMethods: BackendTypes {
);
fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation);
fn clear_dbg_loc(&mut self);
fn get_dbg_loc(&self) -> Option<Self::DILocation>;
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
fn set_var_name(&mut self, value: Self::Value, name: &str);
}

View file

@ -22,8 +22,6 @@ pub trait IntrinsicCallBuilderMethods<'tcx>: BackendTypes {
fn abort(&mut self);
fn assume(&mut self, val: Self::Value);
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
/// Trait method used to test whether a given pointer is associated with a type identifier.
fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value;
/// Trait method used to load a function while testing if it is associated with a type
/// identifier.
fn type_checked_load(

View file

@ -89,9 +89,9 @@ const_eval_dyn_call_not_a_method =
`dyn` call trying to call something that is not a method
const_eval_error = {$error_kind ->
[static] could not evaluate static initializer
[const] evaluation of constant value failed
[const_with_path] evaluation of `{$instance}` failed
[static] evaluation of static initializer failed here
[const] evaluation of constant value failed here
[const_with_path] evaluation of `{$instance}` failed here
*[other] {""}
}
@ -118,7 +118,7 @@ const_eval_frame_note_inner = inside {$where_ ->
const_eval_frame_note_last = the failure occurred here
const_eval_incompatible_calling_conventions =
calling a function with calling convention {$callee_conv} using calling convention {$caller_conv}
calling a function with calling convention "{$callee_conv}" using calling convention "{$caller_conv}"
const_eval_incompatible_return_types =
calling a function with return type {$callee_ty} passing return place of type {$caller_ty}

View file

@ -589,12 +589,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
Rvalue::Aggregate(kind, ..) => {
if let AggregateKind::Coroutine(def_id, ..) = kind.as_ref()
&& let Some(
coroutine_kind @ hir::CoroutineKind::Desugared(
hir::CoroutineDesugaring::Async,
_,
),
) = self.tcx.coroutine_kind(def_id)
&& let Some(coroutine_kind) = self.tcx.coroutine_kind(def_id)
{
self.check_op(ops::Coroutine(coroutine_kind));
}

View file

@ -486,24 +486,25 @@ impl<'tcx> NonConstOp<'tcx> for IntrinsicUnstable {
pub(crate) struct Coroutine(pub hir::CoroutineKind);
impl<'tcx> NonConstOp<'tcx> for Coroutine {
fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
if let hir::CoroutineKind::Desugared(
hir::CoroutineDesugaring::Async,
hir::CoroutineSource::Block,
) = self.0
{
Status::Unstable {
match self.0 {
hir::CoroutineKind::Desugared(
hir::CoroutineDesugaring::Async,
hir::CoroutineSource::Block,
)
// FIXME(coroutines): eventually we want to gate const coroutine coroutines behind a
// different feature.
| hir::CoroutineKind::Coroutine(_) => Status::Unstable {
gate: sym::const_async_blocks,
gate_already_checked: false,
safe_to_expose_on_stable: false,
is_function_call: false,
}
} else {
Status::Forbidden
},
_ => Status::Forbidden,
}
}
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
let msg = format!("{:#}s are not allowed in {}s", self.0, ccx.const_kind());
let msg = format!("{} are not allowed in {}s", self.0.to_plural_string(), ccx.const_kind());
if let Status::Unstable { gate, .. } = self.status_in_item(ccx) {
ccx.tcx.sess.create_feature_err(errors::UnallowedOpInConstContext { span, msg }, gate)
} else {

View file

@ -1,6 +1,6 @@
use std::mem;
use rustc_errors::{DiagArgName, DiagArgValue, DiagMessage, Diagnostic, IntoDiagArg};
use rustc_errors::{Diag, DiagArgName, DiagArgValue, DiagMessage, IntoDiagArg};
use rustc_middle::mir::AssertKind;
use rustc_middle::mir::interpret::{Provenance, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt;
@ -131,10 +131,10 @@ pub fn get_span_and_frames<'tcx>(
/// Create a diagnostic for a const eval error.
///
/// This will use the `mk` function for creating the error which will get passed labels according to
/// the `InterpError` and the span and a stacktrace of current execution according to
/// `get_span_and_frames`.
pub(super) fn report<'tcx, C, F, E>(
/// This will use the `mk` function for adding more information to the error.
/// You can use it to add a stacktrace of current execution according to
/// `get_span_and_frames` or just give context on where the const eval error happened.
pub(super) fn report<'tcx, C, F>(
tcx: TyCtxt<'tcx>,
error: InterpErrorKind<'tcx>,
span: Span,
@ -143,8 +143,7 @@ pub(super) fn report<'tcx, C, F, E>(
) -> ErrorHandled
where
C: FnOnce() -> (Span, Vec<FrameNote>),
F: FnOnce(Span, Vec<FrameNote>) -> E,
E: Diagnostic<'tcx>,
F: FnOnce(&mut Diag<'_>, Span, Vec<FrameNote>),
{
// Special handling for certain errors
match error {
@ -163,8 +162,7 @@ where
_ => {
let (our_span, frames) = get_span_and_frames();
let span = span.substitute_dummy(our_span);
let err = mk(span, frames);
let mut err = tcx.dcx().create_err(err);
let mut err = tcx.dcx().struct_span_err(our_span, error.diagnostic_message());
// We allow invalid programs in infallible promoteds since invalid layouts can occur
// anyway (e.g. due to size overflow). And we allow OOM as that can happen any time.
let allowed_in_infallible = matches!(
@ -172,11 +170,9 @@ where
InterpErrorKind::ResourceExhaustion(_) | InterpErrorKind::InvalidProgram(_)
);
let msg = error.diagnostic_message();
error.add_args(&mut err);
// Use *our* span to label the interp error
err.span_label(our_span, msg);
mk(&mut err, span, frames);
let g = err.emit();
let reported = if allowed_in_infallible {
ReportedErrorInfo::allowed_in_infallible(g)

View file

@ -2,6 +2,7 @@ use std::sync::atomic::Ordering::Relaxed;
use either::{Left, Right};
use rustc_abi::{self as abi, BackendRepr};
use rustc_errors::E0080;
use rustc_hir::def::DefKind;
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo, ReportedErrorInfo};
use rustc_middle::mir::{self, ConstAlloc, ConstValue};
@ -290,12 +291,18 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
|error| {
let span = tcx.def_span(def_id);
// FIXME(oli-obk): why don't we have any tests for this code path?
super::report(
tcx,
error.into_kind(),
span,
|| (span, vec![]),
|span, _| errors::NullaryIntrinsicError { span },
|diag, span, _| {
diag.span_label(
span,
crate::fluent_generated::const_eval_nullary_intrinsic_fail,
);
},
)
},
);
@ -443,11 +450,15 @@ fn report_eval_error<'tcx>(
error,
DUMMY_SP,
|| super::get_span_and_frames(ecx.tcx, ecx.stack()),
|span, frames| errors::ConstEvalError {
span,
error_kind: kind,
instance,
frame_notes: frames,
|diag, span, frames| {
// FIXME(oli-obk): figure out how to use structured diagnostics again.
diag.code(E0080);
diag.span_label(span, crate::fluent_generated::const_eval_error);
diag.arg("instance", instance);
diag.arg("error_kind", kind);
for frame in frames {
diag.subdiagnostic(frame);
}
},
)
}
@ -477,6 +488,15 @@ fn report_validation_error<'tcx>(
error,
DUMMY_SP,
|| crate::const_eval::get_span_and_frames(ecx.tcx, ecx.stack()),
move |span, frames| errors::ValidationFailure { span, ub_note: (), frames, raw_bytes },
move |diag, span, frames| {
// FIXME(oli-obk): figure out how to use structured diagnostics again.
diag.code(E0080);
diag.span_label(span, crate::fluent_generated::const_eval_validation_failure);
diag.note(crate::fluent_generated::const_eval_validation_failure_note);
for frame in frames {
diag.subdiagnostic(frame);
}
diag.subdiagnostic(raw_bytes);
},
)
}

View file

@ -439,38 +439,6 @@ pub struct LiveDrop<'tcx> {
pub dropped_at: Span,
}
#[derive(Diagnostic)]
#[diag(const_eval_error, code = E0080)]
pub struct ConstEvalError {
#[primary_span]
pub span: Span,
/// One of "const", "const_with_path", and "static"
pub error_kind: &'static str,
pub instance: String,
#[subdiagnostic]
pub frame_notes: Vec<FrameNote>,
}
#[derive(Diagnostic)]
#[diag(const_eval_nullary_intrinsic_fail)]
pub struct NullaryIntrinsicError {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(const_eval_validation_failure, code = E0080)]
pub struct ValidationFailure {
#[primary_span]
pub span: Span,
#[note(const_eval_validation_failure_note)]
pub ub_note: (),
#[subdiagnostic]
pub frames: Vec<FrameNote>,
#[subdiagnostic]
pub raw_bytes: RawBytesNote,
}
pub trait ReportErrorExt {
/// Returns the diagnostic message for this error.
fn diagnostic_message(&self) -> DiagMessage;

View file

@ -8,7 +8,6 @@ macro_rules! arena_types {
[] asm_template: rustc_ast::InlineAsmTemplatePiece,
[] attribute: rustc_hir::Attribute,
[] owner_info: rustc_hir::OwnerInfo<'tcx>,
[] use_path: rustc_hir::UsePath<'tcx>,
[] lit: rustc_hir::Lit,
[] macro_def: rustc_ast::MacroDef,
]);

View file

@ -584,7 +584,7 @@ impl<CTX: crate::HashStableContext> ToStableHashKey<CTX> for Namespace {
}
/// Just a helper separate structure for each namespace.
#[derive(Copy, Clone, Default, Debug)]
#[derive(Copy, Clone, Default, Debug, HashStable_Generic)]
pub struct PerNS<T> {
pub value_ns: T,
pub type_ns: T,
@ -596,10 +596,16 @@ impl<T> PerNS<T> {
PerNS { value_ns: f(self.value_ns), type_ns: f(self.type_ns), macro_ns: f(self.macro_ns) }
}
/// Note: Do you really want to use this? Often you know which namespace a
/// name will belong in, and you can consider just that namespace directly,
/// rather than iterating through all of them.
pub fn into_iter(self) -> IntoIter<T, 3> {
[self.value_ns, self.type_ns, self.macro_ns].into_iter()
}
/// Note: Do you really want to use this? Often you know which namespace a
/// name will belong in, and you can consider just that namespace directly,
/// rather than iterating through all of them.
pub fn iter(&self) -> IntoIter<&T, 3> {
[&self.value_ns, &self.type_ns, &self.macro_ns].into_iter()
}
@ -634,6 +640,10 @@ impl<T> PerNS<Option<T>> {
}
/// Returns an iterator over the items which are `Some`.
///
/// Note: Do you really want to use this? Often you know which namespace a
/// name will belong in, and you can consider just that namespace directly,
/// rather than iterating through all of them.
pub fn present_items(self) -> impl Iterator<Item = T> {
[self.type_ns, self.value_ns, self.macro_ns].into_iter().flatten()
}

View file

@ -30,7 +30,7 @@ use thin_vec::ThinVec;
use tracing::debug;
use crate::LangItem;
use crate::def::{CtorKind, DefKind, Res};
use crate::def::{CtorKind, DefKind, PerNS, Res};
use crate::def_id::{DefId, LocalDefIdMap};
pub(crate) use crate::hir_id::{HirId, ItemLocalId, ItemLocalMap, OwnerId};
use crate::intravisit::{FnKind, VisitorExt};
@ -347,7 +347,7 @@ pub struct Path<'hir, R = Res> {
}
/// Up to three resolutions for type, value and macro namespaces.
pub type UsePath<'hir> = Path<'hir, SmallVec<[Res; 3]>>;
pub type UsePath<'hir> = Path<'hir, PerNS<Option<Res>>>;
impl Path<'_> {
pub fn is_global(&self) -> bool {
@ -2061,12 +2061,19 @@ impl CoroutineKind {
CoroutineKind::Coroutine(mov) => mov,
}
}
}
impl CoroutineKind {
pub fn is_fn_like(self) -> bool {
matches!(self, CoroutineKind::Desugared(_, CoroutineSource::Fn))
}
pub fn to_plural_string(&self) -> String {
match self {
CoroutineKind::Desugared(d, CoroutineSource::Fn) => format!("{d:#}fn bodies"),
CoroutineKind::Desugared(d, CoroutineSource::Block) => format!("{d:#}blocks"),
CoroutineKind::Desugared(d, CoroutineSource::Closure) => format!("{d:#}closure bodies"),
CoroutineKind::Coroutine(_) => "coroutines".to_string(),
}
}
}
impl fmt::Display for CoroutineKind {
@ -2370,6 +2377,10 @@ impl Expr<'_> {
// Lang item paths cannot currently be local variables or statics.
ExprKind::Path(QPath::LangItem(..)) => false,
// Suppress errors for bad expressions.
ExprKind::Err(_guar)
| ExprKind::Let(&LetExpr { recovered: ast::Recovered::Yes(_guar), .. }) => true,
// Partially qualified paths in expressions can only legally
// refer to associated items which are always rvalues.
ExprKind::Path(QPath::TypeRelative(..))
@ -2401,8 +2412,7 @@ impl Expr<'_> {
| ExprKind::Binary(..)
| ExprKind::Yield(..)
| ExprKind::Cast(..)
| ExprKind::DropTemps(..)
| ExprKind::Err(_) => false,
| ExprKind::DropTemps(..) => false,
}
}

View file

@ -527,13 +527,15 @@ pub trait VisitorExt<'v>: Visitor<'v> {
impl<'v, V: Visitor<'v>> VisitorExt<'v> for V {}
pub fn walk_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v Param<'v>) -> V::Result {
try_visit!(visitor.visit_id(param.hir_id));
visitor.visit_pat(param.pat)
let Param { hir_id, pat, ty_span: _, span: _ } = param;
try_visit!(visitor.visit_id(*hir_id));
visitor.visit_pat(pat)
}
pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V::Result {
let Item { owner_id: _, kind, span: _, vis_span: _ } = item;
try_visit!(visitor.visit_id(item.hir_id()));
match item.kind {
match *kind {
ItemKind::ExternCrate(orig_name, ident) => {
visit_opt!(visitor, visit_name, orig_name);
try_visit!(visitor.visit_ident(ident));
@ -631,8 +633,9 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V::
}
pub fn walk_body<'v, V: Visitor<'v>>(visitor: &mut V, body: &Body<'v>) -> V::Result {
walk_list!(visitor, visit_param, body.params);
visitor.visit_expr(body.value)
let Body { params, value } = body;
walk_list!(visitor, visit_param, *params);
visitor.visit_expr(*value)
}
pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, ident: Ident) -> V::Result {
@ -640,7 +643,8 @@ pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, ident: Ident) -> V::Resul
}
pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod<'v>) -> V::Result {
walk_list!(visitor, visit_nested_item, module.item_ids.iter().copied());
let Mod { spans: _, item_ids } = module;
walk_list!(visitor, visit_nested_item, item_ids.iter().copied());
V::Result::output()
}
@ -648,10 +652,11 @@ pub fn walk_foreign_item<'v, V: Visitor<'v>>(
visitor: &mut V,
foreign_item: &'v ForeignItem<'v>,
) -> V::Result {
let ForeignItem { ident, kind, owner_id: _, span: _, vis_span: _ } = foreign_item;
try_visit!(visitor.visit_id(foreign_item.hir_id()));
try_visit!(visitor.visit_ident(foreign_item.ident));
try_visit!(visitor.visit_ident(*ident));
match foreign_item.kind {
match *kind {
ForeignItemKind::Fn(ref sig, param_idents, ref generics) => {
try_visit!(visitor.visit_generics(generics));
try_visit!(visitor.visit_fn_decl(sig.decl));
@ -670,24 +675,27 @@ pub fn walk_foreign_item<'v, V: Visitor<'v>>(
pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v LetStmt<'v>) -> V::Result {
// Intentionally visiting the expr first - the initialization expr
// dominates the local's definition.
visit_opt!(visitor, visit_expr, local.init);
try_visit!(visitor.visit_id(local.hir_id));
try_visit!(visitor.visit_pat(local.pat));
visit_opt!(visitor, visit_block, local.els);
visit_opt!(visitor, visit_ty_unambig, local.ty);
let LetStmt { super_: _, pat, ty, init, els, hir_id, span: _, source: _ } = local;
visit_opt!(visitor, visit_expr, *init);
try_visit!(visitor.visit_id(*hir_id));
try_visit!(visitor.visit_pat(*pat));
visit_opt!(visitor, visit_block, *els);
visit_opt!(visitor, visit_ty_unambig, *ty);
V::Result::output()
}
pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block<'v>) -> V::Result {
try_visit!(visitor.visit_id(block.hir_id));
walk_list!(visitor, visit_stmt, block.stmts);
visit_opt!(visitor, visit_expr, block.expr);
let Block { stmts, expr, hir_id, rules: _, span: _, targeted_by_break: _ } = block;
try_visit!(visitor.visit_id(*hir_id));
walk_list!(visitor, visit_stmt, *stmts);
visit_opt!(visitor, visit_expr, *expr);
V::Result::output()
}
pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt<'v>) -> V::Result {
try_visit!(visitor.visit_id(statement.hir_id));
match statement.kind {
let Stmt { kind, hir_id, span: _ } = statement;
try_visit!(visitor.visit_id(*hir_id));
match *kind {
StmtKind::Let(ref local) => visitor.visit_local(local),
StmtKind::Item(item) => visitor.visit_nested_item(item),
StmtKind::Expr(ref expression) | StmtKind::Semi(ref expression) => {
@ -697,15 +705,17 @@ pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt<'v>) -
}
pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm<'v>) -> V::Result {
try_visit!(visitor.visit_id(arm.hir_id));
try_visit!(visitor.visit_pat(arm.pat));
visit_opt!(visitor, visit_expr, arm.guard);
visitor.visit_expr(arm.body)
let Arm { hir_id, span: _, pat, guard, body } = arm;
try_visit!(visitor.visit_id(*hir_id));
try_visit!(visitor.visit_pat(*pat));
visit_opt!(visitor, visit_expr, *guard);
visitor.visit_expr(*body)
}
pub fn walk_ty_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v TyPat<'v>) -> V::Result {
try_visit!(visitor.visit_id(pattern.hir_id));
match pattern.kind {
let TyPat { kind, hir_id, span: _ } = pattern;
try_visit!(visitor.visit_id(*hir_id));
match *kind {
TyPatKind::Range(lower_bound, upper_bound) => {
try_visit!(visitor.visit_const_arg_unambig(lower_bound));
try_visit!(visitor.visit_const_arg_unambig(upper_bound));
@ -717,14 +727,15 @@ pub fn walk_ty_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v TyPat<'v>)
}
pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat<'v>) -> V::Result {
try_visit!(visitor.visit_id(pattern.hir_id));
match pattern.kind {
let Pat { hir_id, kind, span, default_binding_modes: _ } = pattern;
try_visit!(visitor.visit_id(*hir_id));
match *kind {
PatKind::TupleStruct(ref qpath, children, _) => {
try_visit!(visitor.visit_qpath(qpath, pattern.hir_id, pattern.span));
try_visit!(visitor.visit_qpath(qpath, *hir_id, *span));
walk_list!(visitor, visit_pat, children);
}
PatKind::Struct(ref qpath, fields, _) => {
try_visit!(visitor.visit_qpath(qpath, pattern.hir_id, pattern.span));
try_visit!(visitor.visit_qpath(qpath, *hir_id, *span));
walk_list!(visitor, visit_pat_field, fields);
}
PatKind::Or(pats) => walk_list!(visitor, visit_pat, pats),
@ -760,36 +771,41 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat<'v>) -> V:
}
pub fn walk_pat_field<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v PatField<'v>) -> V::Result {
try_visit!(visitor.visit_id(field.hir_id));
try_visit!(visitor.visit_ident(field.ident));
visitor.visit_pat(field.pat)
let PatField { hir_id, ident, pat, is_shorthand: _, span: _ } = field;
try_visit!(visitor.visit_id(*hir_id));
try_visit!(visitor.visit_ident(*ident));
visitor.visit_pat(*pat)
}
pub fn walk_pat_expr<'v, V: Visitor<'v>>(visitor: &mut V, expr: &'v PatExpr<'v>) -> V::Result {
try_visit!(visitor.visit_id(expr.hir_id));
match &expr.kind {
PatExprKind::Lit { lit, negated } => visitor.visit_lit(expr.hir_id, lit, *negated),
let PatExpr { hir_id, span, kind } = expr;
try_visit!(visitor.visit_id(*hir_id));
match kind {
PatExprKind::Lit { lit, negated } => visitor.visit_lit(*hir_id, lit, *negated),
PatExprKind::ConstBlock(c) => visitor.visit_inline_const(c),
PatExprKind::Path(qpath) => visitor.visit_qpath(qpath, expr.hir_id, expr.span),
PatExprKind::Path(qpath) => visitor.visit_qpath(qpath, *hir_id, *span),
}
}
pub fn walk_anon_const<'v, V: Visitor<'v>>(visitor: &mut V, constant: &'v AnonConst) -> V::Result {
try_visit!(visitor.visit_id(constant.hir_id));
visitor.visit_nested_body(constant.body)
let AnonConst { hir_id, def_id: _, body, span: _ } = constant;
try_visit!(visitor.visit_id(*hir_id));
visitor.visit_nested_body(*body)
}
pub fn walk_inline_const<'v, V: Visitor<'v>>(
visitor: &mut V,
constant: &'v ConstBlock,
) -> V::Result {
try_visit!(visitor.visit_id(constant.hir_id));
visitor.visit_nested_body(constant.body)
let ConstBlock { hir_id, def_id: _, body } = constant;
try_visit!(visitor.visit_id(*hir_id));
visitor.visit_nested_body(*body)
}
pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>) -> V::Result {
try_visit!(visitor.visit_id(expression.hir_id));
match expression.kind {
let Expr { hir_id, kind, span } = expression;
try_visit!(visitor.visit_id(*hir_id));
match *kind {
ExprKind::Array(subexpressions) => {
walk_list!(visitor, visit_expr, subexpressions);
}
@ -801,7 +817,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
try_visit!(visitor.visit_const_arg_unambig(count));
}
ExprKind::Struct(ref qpath, fields, ref optional_base) => {
try_visit!(visitor.visit_qpath(qpath, expression.hir_id, expression.span));
try_visit!(visitor.visit_qpath(qpath, *hir_id, *span));
walk_list!(visitor, visit_expr_field, fields);
match optional_base {
StructTailExpr::Base(base) => try_visit!(visitor.visit_expr(base)),
@ -869,7 +885,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
constness: _,
}) => {
walk_list!(visitor, visit_generic_param, bound_generic_params);
try_visit!(visitor.visit_fn(FnKind::Closure, fn_decl, body, expression.span, def_id));
try_visit!(visitor.visit_fn(FnKind::Closure, fn_decl, body, *span, def_id));
}
ExprKind::Block(ref block, ref opt_label) => {
visit_opt!(visitor, visit_label, opt_label);
@ -892,7 +908,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
try_visit!(visitor.visit_expr(index_expression));
}
ExprKind::Path(ref qpath) => {
try_visit!(visitor.visit_qpath(qpath, expression.hir_id, expression.span));
try_visit!(visitor.visit_qpath(qpath, *hir_id, *span));
}
ExprKind::Break(ref destination, ref opt_expr) => {
visit_opt!(visitor, visit_label, &destination.label);
@ -906,7 +922,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
}
ExprKind::Become(ref expr) => try_visit!(visitor.visit_expr(expr)),
ExprKind::InlineAsm(ref asm) => {
try_visit!(visitor.visit_inline_asm(asm, expression.hir_id));
try_visit!(visitor.visit_inline_asm(asm, *hir_id));
}
ExprKind::OffsetOf(ref container, ref fields) => {
try_visit!(visitor.visit_ty_unambig(container));
@ -919,16 +935,17 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
try_visit!(visitor.visit_expr(expr));
visit_opt!(visitor, visit_ty_unambig, ty);
}
ExprKind::Lit(lit) => try_visit!(visitor.visit_lit(expression.hir_id, lit, false)),
ExprKind::Lit(lit) => try_visit!(visitor.visit_lit(*hir_id, lit, false)),
ExprKind::Err(_) => {}
}
V::Result::output()
}
pub fn walk_expr_field<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v ExprField<'v>) -> V::Result {
try_visit!(visitor.visit_id(field.hir_id));
try_visit!(visitor.visit_ident(field.ident));
visitor.visit_expr(field.expr)
let ExprField { hir_id, ident, expr, span: _, is_shorthand: _ } = field;
try_visit!(visitor.visit_id(*hir_id));
try_visit!(visitor.visit_ident(*ident));
visitor.visit_expr(*expr)
}
/// We track whether an infer var is from a [`Ty`], [`ConstArg`], or [`GenericArg`] so that
/// HIR visitors overriding [`Visitor::visit_infer`] can determine what kind of infer is being visited
@ -946,7 +963,10 @@ pub fn walk_generic_arg<'v, V: Visitor<'v>>(
GenericArg::Lifetime(lt) => visitor.visit_lifetime(lt),
GenericArg::Type(ty) => visitor.visit_ty(ty),
GenericArg::Const(ct) => visitor.visit_const_arg(ct),
GenericArg::Infer(inf) => visitor.visit_infer(inf.hir_id, inf.span, InferKind::Ambig(inf)),
GenericArg::Infer(inf) => {
let InferArg { hir_id, span } = inf;
visitor.visit_infer(*hir_id, *span, InferKind::Ambig(inf))
}
}
}
@ -954,16 +974,18 @@ pub fn walk_unambig_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v>) ->
match typ.try_as_ambig_ty() {
Some(ambig_ty) => visitor.visit_ty(ambig_ty),
None => {
try_visit!(visitor.visit_id(typ.hir_id));
visitor.visit_infer(typ.hir_id, typ.span, InferKind::Ty(typ))
let Ty { hir_id, span, kind: _ } = typ;
try_visit!(visitor.visit_id(*hir_id));
visitor.visit_infer(*hir_id, *span, InferKind::Ty(typ))
}
}
}
pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v, AmbigArg>) -> V::Result {
try_visit!(visitor.visit_id(typ.hir_id));
let Ty { hir_id, span: _, kind } = typ;
try_visit!(visitor.visit_id(*hir_id));
match typ.kind {
match *kind {
TyKind::Slice(ref ty) => try_visit!(visitor.visit_ty_unambig(ty)),
TyKind::Ptr(ref mutable_type) => try_visit!(visitor.visit_ty_unambig(mutable_type.ty)),
TyKind::Ref(ref lifetime, ref mutable_type) => {
@ -1018,8 +1040,9 @@ pub fn walk_const_arg<'v, V: Visitor<'v>>(
match const_arg.try_as_ambig_ct() {
Some(ambig_ct) => visitor.visit_const_arg(ambig_ct),
None => {
try_visit!(visitor.visit_id(const_arg.hir_id));
visitor.visit_infer(const_arg.hir_id, const_arg.span(), InferKind::Const(const_arg))
let ConstArg { hir_id, kind: _ } = const_arg;
try_visit!(visitor.visit_id(*hir_id));
visitor.visit_infer(*hir_id, const_arg.span(), InferKind::Const(const_arg))
}
}
}
@ -1028,9 +1051,10 @@ pub fn walk_ambig_const_arg<'v, V: Visitor<'v>>(
visitor: &mut V,
const_arg: &'v ConstArg<'v, AmbigArg>,
) -> V::Result {
try_visit!(visitor.visit_id(const_arg.hir_id));
match &const_arg.kind {
ConstArgKind::Path(qpath) => visitor.visit_qpath(qpath, const_arg.hir_id, qpath.span()),
let ConstArg { hir_id, kind } = const_arg;
try_visit!(visitor.visit_id(*hir_id));
match kind {
ConstArgKind::Path(qpath) => visitor.visit_qpath(qpath, *hir_id, qpath.span()),
ConstArgKind::Anon(anon) => visitor.visit_anon_const(*anon),
}
}
@ -1039,12 +1063,22 @@ pub fn walk_generic_param<'v, V: Visitor<'v>>(
visitor: &mut V,
param: &'v GenericParam<'v>,
) -> V::Result {
try_visit!(visitor.visit_id(param.hir_id));
match param.name {
let GenericParam {
hir_id,
def_id: _,
name,
span: _,
pure_wrt_drop: _,
kind,
colon_span: _,
source: _,
} = param;
try_visit!(visitor.visit_id(*hir_id));
match *name {
ParamName::Plain(ident) | ParamName::Error(ident) => try_visit!(visitor.visit_ident(ident)),
ParamName::Fresh => {}
}
match param.kind {
match *kind {
GenericParamKind::Lifetime { .. } => {}
GenericParamKind::Type { ref default, .. } => {
visit_opt!(visitor, visit_ty_unambig, default)
@ -1052,7 +1086,7 @@ pub fn walk_generic_param<'v, V: Visitor<'v>>(
GenericParamKind::Const { ref ty, ref default, synthetic: _ } => {
try_visit!(visitor.visit_ty_unambig(ty));
if let Some(default) = default {
try_visit!(visitor.visit_const_param_default(param.hir_id, default));
try_visit!(visitor.visit_const_param_default(*hir_id, default));
}
}
}
@ -1067,8 +1101,15 @@ pub fn walk_const_param_default<'v, V: Visitor<'v>>(
}
pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics<'v>) -> V::Result {
walk_list!(visitor, visit_generic_param, generics.params);
walk_list!(visitor, visit_where_predicate, generics.predicates);
let &Generics {
params,
predicates,
has_where_clause_predicates: _,
where_clause_span: _,
span: _,
} = generics;
walk_list!(visitor, visit_generic_param, params);
walk_list!(visitor, visit_where_predicate, predicates);
V::Result::output()
}
@ -1109,8 +1150,10 @@ pub fn walk_fn_decl<'v, V: Visitor<'v>>(
visitor: &mut V,
function_declaration: &'v FnDecl<'v>,
) -> V::Result {
walk_list!(visitor, visit_ty_unambig, function_declaration.inputs);
visitor.visit_fn_ret_ty(&function_declaration.output)
let FnDecl { inputs, output, c_variadic: _, implicit_self: _, lifetime_elision_allowed: _ } =
function_declaration;
walk_list!(visitor, visit_ty_unambig, *inputs);
visitor.visit_fn_ret_ty(output)
}
pub fn walk_fn_ret_ty<'v, V: Visitor<'v>>(visitor: &mut V, ret_ty: &'v FnRetTy<'v>) -> V::Result {
@ -1148,7 +1191,7 @@ pub fn walk_use<'v, V: Visitor<'v>>(
hir_id: HirId,
) -> V::Result {
let UsePath { segments, ref res, span } = *path;
for &res in res {
for res in res.present_items() {
try_visit!(visitor.visit_path(&Path { segments, res, span }, hir_id));
}
V::Result::output()
@ -1264,8 +1307,9 @@ pub fn walk_trait_ref<'v, V: Visitor<'v>>(
visitor: &mut V,
trait_ref: &'v TraitRef<'v>,
) -> V::Result {
try_visit!(visitor.visit_id(trait_ref.hir_ref_id));
visitor.visit_path(trait_ref.path, trait_ref.hir_ref_id)
let TraitRef { hir_ref_id, path } = trait_ref;
try_visit!(visitor.visit_id(*hir_ref_id));
visitor.visit_path(*path, *hir_ref_id)
}
pub fn walk_param_bound<'v, V: Visitor<'v>>(
@ -1288,7 +1332,10 @@ pub fn walk_precise_capturing_arg<'v, V: Visitor<'v>>(
) -> V::Result {
match *arg {
PreciseCapturingArg::Lifetime(lt) => visitor.visit_lifetime(lt),
PreciseCapturingArg::Param(param) => visitor.visit_id(param.hir_id),
PreciseCapturingArg::Param(param) => {
let PreciseCapturingNonLifetimeArg { hir_id, ident: _, res: _ } = param;
visitor.visit_id(hir_id)
}
}
}
@ -1296,8 +1343,9 @@ pub fn walk_poly_trait_ref<'v, V: Visitor<'v>>(
visitor: &mut V,
trait_ref: &'v PolyTraitRef<'v>,
) -> V::Result {
walk_list!(visitor, visit_generic_param, trait_ref.bound_generic_params);
visitor.visit_trait_ref(&trait_ref.trait_ref)
let PolyTraitRef { bound_generic_params, modifiers: _, trait_ref, span: _ } = trait_ref;
walk_list!(visitor, visit_generic_param, *bound_generic_params);
visitor.visit_trait_ref(trait_ref)
}
pub fn walk_opaque_ty<'v, V: Visitor<'v>>(visitor: &mut V, opaque: &'v OpaqueTy<'v>) -> V::Result {
@ -1330,29 +1378,34 @@ pub fn walk_enum_def<'v, V: Visitor<'v>>(
visitor: &mut V,
enum_definition: &'v EnumDef<'v>,
) -> V::Result {
walk_list!(visitor, visit_variant, enum_definition.variants);
let EnumDef { variants } = enum_definition;
walk_list!(visitor, visit_variant, *variants);
V::Result::output()
}
pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V, variant: &'v Variant<'v>) -> V::Result {
try_visit!(visitor.visit_ident(variant.ident));
try_visit!(visitor.visit_id(variant.hir_id));
try_visit!(visitor.visit_variant_data(&variant.data));
visit_opt!(visitor, visit_anon_const, &variant.disr_expr);
let Variant { ident, hir_id, def_id: _, data, disr_expr, span: _ } = variant;
try_visit!(visitor.visit_ident(*ident));
try_visit!(visitor.visit_id(*hir_id));
try_visit!(visitor.visit_variant_data(data));
visit_opt!(visitor, visit_anon_const, disr_expr);
V::Result::output()
}
pub fn walk_label<'v, V: Visitor<'v>>(visitor: &mut V, label: &'v Label) -> V::Result {
visitor.visit_ident(label.ident)
let Label { ident } = label;
visitor.visit_ident(*ident)
}
pub fn walk_inf<'v, V: Visitor<'v>>(visitor: &mut V, inf: &'v InferArg) -> V::Result {
visitor.visit_id(inf.hir_id)
let InferArg { hir_id, span: _ } = inf;
visitor.visit_id(*hir_id)
}
pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) -> V::Result {
try_visit!(visitor.visit_id(lifetime.hir_id));
visitor.visit_ident(lifetime.ident)
let Lifetime { hir_id, ident, kind: _, source: _, syntax: _ } = lifetime;
try_visit!(visitor.visit_id(*hir_id));
visitor.visit_ident(*ident)
}
pub fn walk_qpath<'v, V: Visitor<'v>>(
@ -1374,7 +1427,8 @@ pub fn walk_qpath<'v, V: Visitor<'v>>(
}
pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &Path<'v>) -> V::Result {
walk_list!(visitor, visit_path_segment, path.segments);
let Path { segments, span: _, res: _ } = path;
walk_list!(visitor, visit_path_segment, *segments);
V::Result::output()
}
@ -1382,9 +1436,10 @@ pub fn walk_path_segment<'v, V: Visitor<'v>>(
visitor: &mut V,
segment: &'v PathSegment<'v>,
) -> V::Result {
try_visit!(visitor.visit_ident(segment.ident));
try_visit!(visitor.visit_id(segment.hir_id));
visit_opt!(visitor, visit_generic_args, segment.args);
let PathSegment { ident, hir_id, res: _, args, infer_args: _ } = segment;
try_visit!(visitor.visit_ident(*ident));
try_visit!(visitor.visit_id(*hir_id));
visit_opt!(visitor, visit_generic_args, *args);
V::Result::output()
}
@ -1392,8 +1447,9 @@ pub fn walk_generic_args<'v, V: Visitor<'v>>(
visitor: &mut V,
generic_args: &'v GenericArgs<'v>,
) -> V::Result {
walk_list!(visitor, visit_generic_arg, generic_args.args);
walk_list!(visitor, visit_assoc_item_constraint, generic_args.constraints);
let GenericArgs { args, constraints, parenthesized: _, span_ext: _ } = generic_args;
walk_list!(visitor, visit_generic_arg, *args);
walk_list!(visitor, visit_assoc_item_constraint, *constraints);
V::Result::output()
}
@ -1401,9 +1457,10 @@ pub fn walk_assoc_item_constraint<'v, V: Visitor<'v>>(
visitor: &mut V,
constraint: &'v AssocItemConstraint<'v>,
) -> V::Result {
try_visit!(visitor.visit_id(constraint.hir_id));
try_visit!(visitor.visit_ident(constraint.ident));
try_visit!(visitor.visit_generic_args(constraint.gen_args));
let AssocItemConstraint { hir_id, ident, gen_args, kind: _, span: _ } = constraint;
try_visit!(visitor.visit_id(*hir_id));
try_visit!(visitor.visit_ident(*ident));
try_visit!(visitor.visit_generic_args(*gen_args));
match constraint.kind {
AssocItemConstraintKind::Equality { ref term } => match term {
Term::Ty(ty) => try_visit!(visitor.visit_ty_unambig(ty)),

View file

@ -204,14 +204,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
}
hir::ClosureKind::CoroutineClosure(kind) => {
// async closures always return the type ascribed after the `->` (if present),
// and yield `()`.
let (bound_return_ty, bound_yield_ty) = match kind {
hir::CoroutineDesugaring::Gen => {
// `iter!` closures always return unit and yield the `Iterator::Item` type
// that we have to infer.
(tcx.types.unit, self.infcx.next_ty_var(expr_span))
}
hir::CoroutineDesugaring::Async => {
// async closures always return the type ascribed after the `->` (if present),
// and yield `()`.
(bound_sig.skip_binder().output(), tcx.types.unit)
}
hir::CoroutineDesugaring::Gen | hir::CoroutineDesugaring::AsyncGen => {
todo!("`gen` and `async gen` closures not supported yet")
hir::CoroutineDesugaring::AsyncGen => {
todo!("`async gen` closures not supported yet")
}
};
// Compute all of the variables that will be used to populate the coroutine.
@ -465,7 +470,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if let Some(trait_def_id) = trait_def_id {
let found_kind = match closure_kind {
hir::ClosureKind::Closure => self.tcx.fn_trait_kind_from_def_id(trait_def_id),
hir::ClosureKind::Closure
// FIXME(iter_macro): Someday we'll probably want iterator closures instead of
// just using Fn* for iterators.
| hir::ClosureKind::CoroutineClosure(hir::CoroutineDesugaring::Gen) => {
self.tcx.fn_trait_kind_from_def_id(trait_def_id)
}
hir::ClosureKind::CoroutineClosure(hir::CoroutineDesugaring::Async) => self
.tcx
.async_fn_trait_kind_from_def_id(trait_def_id)

View file

@ -60,10 +60,6 @@ pub fn parse<'a>(sess: &'a Session) -> ast::Crate {
guar.raise_fatal();
});
if sess.opts.unstable_opts.input_stats {
input_stats::print_ast_stats(&krate, "PRE EXPANSION AST STATS", "ast-stats-1");
}
rustc_builtin_macros::cmdline_attrs::inject(
&mut krate,
&sess.psess,
@ -298,7 +294,7 @@ fn early_lint_checks(tcx: TyCtxt<'_>, (): ()) {
let mut lint_buffer = resolver.lint_buffer.steal();
if sess.opts.unstable_opts.input_stats {
input_stats::print_ast_stats(krate, "POST EXPANSION AST STATS", "ast-stats-2");
input_stats::print_ast_stats(krate, "POST EXPANSION AST STATS", "ast-stats");
}
// Needs to go *after* expansion to be able to check the results of macro expansion.

View file

@ -74,8 +74,8 @@ impl<'ecx, 'tcx, T: EarlyLintPass> EarlyContextAndPass<'ecx, 'tcx, T> {
impl<'ast, 'ecx, 'tcx, T: EarlyLintPass> ast_visit::Visitor<'ast>
for EarlyContextAndPass<'ecx, 'tcx, T>
{
fn visit_coroutine_kind(&mut self, coroutine_kind: &'ast ast::CoroutineKind) -> Self::Result {
self.check_id(coroutine_kind.closure_id());
fn visit_id(&mut self, id: rustc_ast::NodeId) {
self.check_id(id);
}
fn visit_param(&mut self, param: &'ast ast::Param) {
@ -101,7 +101,6 @@ impl<'ast, 'ecx, 'tcx, T: EarlyLintPass> ast_visit::Visitor<'ast>
fn visit_pat(&mut self, p: &'ast ast::Pat) {
lint_callback!(self, check_pat, p);
self.check_id(p.id);
ast_visit::walk_pat(self, p);
lint_callback!(self, check_pat_post, p);
}
@ -112,11 +111,6 @@ impl<'ast, 'ecx, 'tcx, T: EarlyLintPass> ast_visit::Visitor<'ast>
});
}
fn visit_anon_const(&mut self, c: &'ast ast::AnonConst) {
self.check_id(c.id);
ast_visit::walk_anon_const(self, c);
}
fn visit_expr(&mut self, e: &'ast ast::Expr) {
self.with_lint_attrs(e.id, &e.attrs, |cx| {
lint_callback!(cx, check_expr, e);
@ -157,13 +151,6 @@ impl<'ast, 'ecx, 'tcx, T: EarlyLintPass> ast_visit::Visitor<'ast>
ast_visit::walk_fn(self, fk);
}
fn visit_variant_data(&mut self, s: &'ast ast::VariantData) {
if let Some(ctor_node_id) = s.ctor_node_id() {
self.check_id(ctor_node_id);
}
ast_visit::walk_struct_def(self, s);
}
fn visit_field_def(&mut self, s: &'ast ast::FieldDef) {
self.with_lint_attrs(s.id, &s.attrs, |cx| {
ast_visit::walk_field_def(cx, s);
@ -179,7 +166,6 @@ impl<'ast, 'ecx, 'tcx, T: EarlyLintPass> ast_visit::Visitor<'ast>
fn visit_ty(&mut self, t: &'ast ast::Ty) {
lint_callback!(self, check_ty, t);
self.check_id(t.id);
ast_visit::walk_ty(self, t);
}
@ -196,7 +182,6 @@ impl<'ast, 'ecx, 'tcx, T: EarlyLintPass> ast_visit::Visitor<'ast>
fn visit_block(&mut self, b: &'ast ast::Block) {
lint_callback!(self, check_block, b);
self.check_id(b.id);
ast_visit::walk_block(self, b);
}
@ -257,29 +242,13 @@ impl<'ast, 'ecx, 'tcx, T: EarlyLintPass> ast_visit::Visitor<'ast>
});
}
fn visit_lifetime(&mut self, lt: &'ast ast::Lifetime, _: ast_visit::LifetimeCtxt) {
self.check_id(lt.id);
ast_visit::walk_lifetime(self, lt);
}
fn visit_path(&mut self, p: &'ast ast::Path, id: ast::NodeId) {
self.check_id(id);
ast_visit::walk_path(self, p);
}
fn visit_path_segment(&mut self, s: &'ast ast::PathSegment) {
self.check_id(s.id);
ast_visit::walk_path_segment(self, s);
}
fn visit_attribute(&mut self, attr: &'ast ast::Attribute) {
lint_callback!(self, check_attribute, attr);
ast_visit::walk_attribute(self, attr);
}
fn visit_macro_def(&mut self, mac: &'ast ast::MacroDef, id: ast::NodeId) {
fn visit_macro_def(&mut self, mac: &'ast ast::MacroDef) {
lint_callback!(self, check_mac_def, mac);
self.check_id(id);
}
fn visit_mac_call(&mut self, mac: &'ast ast::MacCall) {

View file

@ -328,16 +328,19 @@ impl<'tcx> LateLintPass<'tcx> for TypeIr {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
let rustc_hir::ItemKind::Use(path, kind) = item.kind else { return };
let is_mod_inherent = |def_id| cx.tcx.is_diagnostic_item(sym::type_ir_inherent, def_id);
let is_mod_inherent = |res: Res| {
res.opt_def_id()
.is_some_and(|def_id| cx.tcx.is_diagnostic_item(sym::type_ir_inherent, def_id))
};
// Path segments except for the final.
if let Some(seg) =
path.segments.iter().find(|seg| seg.res.opt_def_id().is_some_and(is_mod_inherent))
{
if let Some(seg) = path.segments.iter().find(|seg| is_mod_inherent(seg.res)) {
cx.emit_span_lint(USAGE_OF_TYPE_IR_INHERENT, seg.ident.span, TypeIrInherentUsage);
}
// Final path resolutions, like `use rustc_type_ir::inherent`
else if path.res.iter().any(|res| res.opt_def_id().is_some_and(is_mod_inherent)) {
else if let Some(type_ns) = path.res.type_ns
&& is_mod_inherent(type_ns)
{
cx.emit_span_lint(
USAGE_OF_TYPE_IR_INHERENT,
path.segments.last().unwrap().ident.span,
@ -346,13 +349,12 @@ impl<'tcx> LateLintPass<'tcx> for TypeIr {
}
let (lo, hi, snippet) = match path.segments {
[.., penultimate, segment]
if penultimate.res.opt_def_id().is_some_and(is_mod_inherent) =>
{
[.., penultimate, segment] if is_mod_inherent(penultimate.res) => {
(segment.ident.span, item.kind.ident().unwrap().span, "*")
}
[.., segment]
if path.res.iter().flat_map(Res::opt_def_id).any(is_mod_inherent)
if let Some(type_ns) = path.res.type_ns
&& is_mod_inherent(type_ns)
&& let rustc_hir::UseKind::Single(ident) = kind =>
{
let (lo, snippet) =

View file

@ -126,7 +126,7 @@ fn lints_that_dont_need_to_run(tcx: TyCtxt<'_>, (): ()) -> UnordSet<LintId> {
.filter(|lint| {
// Lints that show up in future-compat reports must always be run.
let has_future_breakage =
lint.future_incompatible.is_some_and(|fut| fut.reason.has_future_breakage());
lint.future_incompatible.is_some_and(|fut| fut.report_in_deps);
!has_future_breakage && !lint.eval_always
})
.filter(|lint| {

View file

@ -1,4 +1,3 @@
use rustc_hir::def::{DefKind, Res};
use rustc_hir::{self as hir};
use rustc_session::{declare_lint, declare_lint_pass};
use rustc_span::kw;
@ -47,17 +46,15 @@ declare_lint_pass!(UnqualifiedLocalImports => [UNQUALIFIED_LOCAL_IMPORTS]);
impl<'tcx> LateLintPass<'tcx> for UnqualifiedLocalImports {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
let hir::ItemKind::Use(path, _kind) = item.kind else { return };
// `path` has three resolutions for the type, module, value namespaces.
// Check if any of them qualifies: local crate, and not a macro.
// (Macros can't be imported any other way so we don't complain about them.)
let is_local_import = |res: &Res| {
matches!(
res,
hir::def::Res::Def(def_kind, def_id)
if def_id.is_local() && !matches!(def_kind, DefKind::Macro(_)),
)
};
if !path.res.iter().any(is_local_import) {
// Check the type and value namespace resolutions for a local crate.
let is_local_import = matches!(
path.res.type_ns,
Some(hir::def::Res::Def(_, def_id)) if def_id.is_local()
) || matches!(
path.res.value_ns,
Some(hir::def::Res::Def(_, def_id)) if def_id.is_local()
);
if !is_local_import {
return;
}
// So this does refer to something local. Let's check whether it starts with `self`,

View file

@ -178,8 +178,9 @@ declare_lint! {
Warn,
"applying forbid to lint-groups",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #81670 <https://github.com/rust-lang/rust/issues/81670>",
report_in_deps: true,
};
}
@ -214,7 +215,7 @@ declare_lint! {
Deny,
"ill-formed attribute inputs that were previously accepted and used in practice",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #57571 <https://github.com/rust-lang/rust/issues/57571>",
};
crate_level_only
@ -251,8 +252,9 @@ declare_lint! {
Deny,
"conflicts between `#[repr(..)]` hints that were previously accepted and used in practice",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #68585 <https://github.com/rust-lang/rust/issues/68585>",
report_in_deps: true,
};
}
@ -1240,8 +1242,9 @@ declare_lint! {
Deny,
"detect public re-exports of private extern crates",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #127909 <https://github.com/rust-lang/rust/issues/127909>",
report_in_deps: true,
};
}
@ -1270,8 +1273,9 @@ declare_lint! {
Deny,
"type parameter default erroneously allowed in invalid location",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #36887 <https://github.com/rust-lang/rust/issues/36887>",
report_in_deps: true,
};
}
@ -1409,7 +1413,7 @@ declare_lint! {
Deny,
"patterns in functions without body were erroneously allowed",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #35203 <https://github.com/rust-lang/rust/issues/35203>",
};
}
@ -1453,8 +1457,9 @@ declare_lint! {
Deny,
"detects missing fragment specifiers in unused `macro_rules!` patterns",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #40107 <https://github.com/rust-lang/rust/issues/40107>",
report_in_deps: true,
};
}
@ -1495,7 +1500,7 @@ declare_lint! {
Warn,
"detects generic lifetime arguments in path segments with late bound lifetime parameters",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #42868 <https://github.com/rust-lang/rust/issues/42868>",
};
}
@ -2122,8 +2127,9 @@ declare_lint! {
Deny,
"detects proc macro derives using inaccessible names from parent modules",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #83583 <https://github.com/rust-lang/rust/issues/83583>",
report_in_deps: true,
};
}
@ -2225,7 +2231,7 @@ declare_lint! {
"macro-expanded `macro_export` macros from the current crate \
cannot be referred to by absolute paths",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #52234 <https://github.com/rust-lang/rust/issues/52234>",
};
crate_level_only
@ -2346,7 +2352,7 @@ declare_lint! {
Deny,
"ambiguous associated items",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #57644 <https://github.com/rust-lang/rust/issues/57644>",
};
}
@ -2362,8 +2368,9 @@ declare_lint! {
Deny,
"a feature gate that doesn't break dependent crates",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #64266 <https://github.com/rust-lang/rust/issues/64266>",
report_in_deps: true,
};
}
@ -2674,7 +2681,7 @@ declare_lint! {
Warn,
"detects a generic constant is used in a type without a emitting a warning",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #76200 <https://github.com/rust-lang/rust/issues/76200>",
};
}
@ -2733,7 +2740,7 @@ declare_lint! {
Warn,
"uninhabited static",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #74840 <https://github.com/rust-lang/rust/issues/74840>",
};
}
@ -2866,7 +2873,7 @@ declare_lint! {
Warn,
"detect unsupported use of `Self` from outer item",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #124186 <https://github.com/rust-lang/rust/issues/124186>",
};
}
@ -2912,8 +2919,9 @@ declare_lint! {
Warn,
"trailing semicolon in macro body used as expression",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #79813 <https://github.com/rust-lang/rust/issues/79813>",
report_in_deps: true,
};
}
@ -2959,7 +2967,7 @@ declare_lint! {
Warn,
"detects derive helper attributes that are used before they are introduced",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #79202 <https://github.com/rust-lang/rust/issues/79202>",
};
}
@ -3126,7 +3134,7 @@ declare_lint! {
Warn,
"transparent type contains an external ZST that is marked #[non_exhaustive] or contains private fields",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #78586 <https://github.com/rust-lang/rust/issues/78586>",
};
}
@ -3177,7 +3185,7 @@ declare_lint! {
Warn,
"unstable syntax can change at any point in the future, causing a hard error!",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #65860 <https://github.com/rust-lang/rust/issues/65860>",
};
}
@ -3685,8 +3693,9 @@ declare_lint! {
Warn,
"use of unsupported calling convention for function pointer",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #130260 <https://github.com/rust-lang/rust/issues/130260>",
report_in_deps: true,
};
}
@ -4368,7 +4377,7 @@ declare_lint! {
Warn,
"detects certain glob imports that require reporting an ambiguity error",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #114095 <https://github.com/rust-lang/rust/issues/114095>",
};
}
@ -4523,7 +4532,7 @@ declare_lint! {
Deny,
"elided lifetimes cannot be used in associated constants in impls",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #115010 <https://github.com/rust-lang/rust/issues/115010>",
};
}
@ -4570,7 +4579,7 @@ declare_lint! {
Warn,
"detects certain macro bindings that should not be re-exported",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #120192 <https://github.com/rust-lang/rust/issues/120192>",
};
}
@ -4635,7 +4644,7 @@ declare_lint! {
Warn,
"impl contains type parameters that are not covered",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #124559 <https://github.com/rust-lang/rust/issues/124559>",
};
}
@ -4799,7 +4808,7 @@ declare_lint! {
Warn,
"detects out of scope calls to `macro_rules` in key-value attributes",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #124535 <https://github.com/rust-lang/rust/issues/124535>",
};
}
@ -5040,8 +5049,9 @@ declare_lint! {
Warn,
"detects code relying on rustc's non-spec-compliant wasm C ABI",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #138762 <https://github.com/rust-lang/rust/issues/138762>",
report_in_deps: true,
};
}
@ -5081,7 +5091,8 @@ declare_lint! {
Warn,
"detects code that could be affected by ABI issues on aarch64 softfloat targets",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
reference: "issue #134375 <https://github.com/rust-lang/rust/issues/134375>",
report_in_deps: true,
};
}

View file

@ -361,6 +361,18 @@ pub struct FutureIncompatibleInfo {
/// Set to false for lints that already include a more detailed
/// explanation.
pub explain_reason: bool,
/// If set to `true`, this will make future incompatibility warnings show up in cargo's
/// reports.
///
/// When a future incompatibility warning is first inroduced, set this to `false`
/// (or, rather, don't override the default). This allows crate developers an opportunity
/// to fix the warning before blasting all dependents with a warning they can't fix
/// (dependents have to wait for a new release of the affected crate to be published).
///
/// After a lint has been in this state for a while, consider setting this to true, so it
/// warns for everyone. It is a good signal that it is ready if you can determine that all
/// or most affected crates on crates.io have been updated.
pub report_in_deps: bool,
}
/// The reason for future incompatibility
@ -380,46 +392,24 @@ pub struct FutureIncompatibleInfo {
pub enum FutureIncompatibilityReason {
/// This will be an error in a future release for all editions
///
/// This will *not* show up in cargo's future breakage report.
/// The warning will hence only be seen in local crates, not in dependencies.
///
/// Choose this variant when you are first introducing a "future
/// incompatible" warning that is intended to eventually be fixed in the
/// future. This allows crate developers an opportunity to fix the warning
/// before blasting all dependents with a warning they can't fix
/// (dependents have to wait for a new release of the affected crate to be
/// published).
/// future.
///
/// After a lint has been in this state for a while, consider graduating
/// it to [`FutureIncompatibilityReason::FutureReleaseErrorReportInDeps`].
FutureReleaseErrorDontReportInDeps,
/// This will be an error in a future release, and
/// Cargo should create a report even for dependencies
///
/// This is the *only* reason that will make future incompatibility warnings show up in cargo's
/// reports. All other future incompatibility warnings are not visible when they occur in a
/// dependency.
///
/// Choose this variant after the lint has been sitting in the
/// [`FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps`]
/// state for a while, and you feel like it is ready to graduate to
/// warning everyone. It is a good signal that it is ready if you can
/// determine that all or most affected crates on crates.io have been
/// updated.
/// After a lint has been in this state for a while and you feel like it is ready to graduate
/// to warning everyone, consider setting [`FutureIncompatibleInfo::report_in_deps`] to true.
/// (see it's documentation for more guidance)
///
/// After some period of time, lints with this variant can be turned into
/// hard errors (and the lint removed). Preferably when there is some
/// confidence that the number of impacted projects is very small (few
/// should have a broken dependency in their dependency tree).
///
/// [`EditionAndFutureReleaseError`]: FutureIncompatibilityReason::EditionAndFutureReleaseError
FutureReleaseErrorReportInDeps,
FutureReleaseError,
/// Code that changes meaning in some way in a
/// future release.
///
/// Choose this variant when the semantics of existing code is changing,
/// (as opposed to
/// [`FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps`],
/// (as opposed to [`FutureIncompatibilityReason::FutureReleaseError`],
/// which is for when code is going to be rejected in the future).
FutureReleaseSemanticsChange,
/// Previously accepted code that will become an
@ -454,13 +444,12 @@ pub enum FutureIncompatibilityReason {
/// This will be an error in the provided edition *and* in a future
/// release.
///
/// This variant a combination of [`FutureReleaseErrorDontReportInDeps`]
/// and [`EditionError`]. This is useful in rare cases when we
/// want to have "preview" of a breaking change in an edition, but do a
/// breaking change later on all editions anyway.
/// This variant a combination of [`FutureReleaseError`] and [`EditionError`].
/// This is useful in rare cases when we want to have "preview" of a breaking
/// change in an edition, but do a breaking change later on all editions anyway.
///
/// [`EditionError`]: FutureIncompatibilityReason::EditionError
/// [`FutureReleaseErrorDontReportInDeps`]: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps
/// [`FutureReleaseError`]: FutureIncompatibilityReason::FutureReleaseError
EditionAndFutureReleaseError(Edition),
/// This will change meaning in the provided edition *and* in a future
/// release.
@ -478,7 +467,7 @@ pub enum FutureIncompatibilityReason {
/// Choose this variant if the built-in text of the diagnostic of the
/// other variants doesn't match your situation. This is behaviorally
/// equivalent to
/// [`FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps`].
/// [`FutureIncompatibilityReason::FutureReleaseError`].
Custom(&'static str),
}
@ -490,34 +479,20 @@ impl FutureIncompatibilityReason {
| Self::EditionAndFutureReleaseError(e)
| Self::EditionAndFutureReleaseSemanticsChange(e) => Some(e),
FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps
| FutureIncompatibilityReason::FutureReleaseErrorReportInDeps
FutureIncompatibilityReason::FutureReleaseError
| FutureIncompatibilityReason::FutureReleaseSemanticsChange
| FutureIncompatibilityReason::Custom(_) => None,
}
}
pub fn has_future_breakage(self) -> bool {
match self {
FutureIncompatibilityReason::FutureReleaseErrorReportInDeps => true,
FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps
| FutureIncompatibilityReason::FutureReleaseSemanticsChange
| FutureIncompatibilityReason::EditionError(_)
| FutureIncompatibilityReason::EditionSemanticsChange(_)
| FutureIncompatibilityReason::EditionAndFutureReleaseError(_)
| FutureIncompatibilityReason::EditionAndFutureReleaseSemanticsChange(_)
| FutureIncompatibilityReason::Custom(_) => false,
}
}
}
impl FutureIncompatibleInfo {
pub const fn default_fields_for_macro() -> Self {
FutureIncompatibleInfo {
reference: "",
reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reason: FutureIncompatibilityReason::FutureReleaseError,
explain_reason: true,
report_in_deps: false,
}
}
}

View file

@ -228,10 +228,10 @@ fn main() {
let mut cmd = Command::new(&llvm_config);
cmd.arg(llvm_link_arg).arg("--libs");
// Don't link system libs if cross-compiling unless targeting Windows.
// Don't link system libs if cross-compiling unless targeting Windows from Windows host.
// On Windows system DLLs aren't linked directly, instead import libraries are used.
// These import libraries are independent of the host.
if !is_crossed || target.contains("windows") {
if !is_crossed || target.contains("windows") && host.contains("windows") {
cmd.arg("--system-libs");
}

View file

@ -299,7 +299,7 @@ pub fn lint_level(
let has_future_breakage = future_incompatible.map_or(
// Default allow lints trigger too often for testing.
sess.opts.unstable_opts.future_incompat_test && lint.default_level != Level::Allow,
|incompat| incompat.reason.has_future_breakage(),
|incompat| incompat.report_in_deps,
);
// Convert lint level to error level.
@ -370,8 +370,7 @@ pub fn lint_level(
if let Some(future_incompatible) = future_incompatible {
let explanation = match future_incompatible.reason {
FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps
| FutureIncompatibilityReason::FutureReleaseErrorReportInDeps => {
FutureIncompatibilityReason::FutureReleaseError => {
"this was previously accepted by the compiler but is being phased out; \
it will become a hard error in a future release!"
.to_owned()

View file

@ -41,7 +41,8 @@ pub fn walk_expr<'thir, 'tcx: 'thir, V: Visitor<'thir, 'tcx>>(
expr: &'thir Expr<'tcx>,
) {
use ExprKind::*;
match expr.kind {
let Expr { kind, ty: _, temp_lifetime: _, span: _ } = expr;
match *kind {
Scope { value, region_scope: _, lint_level: _ } => {
visitor.visit_expr(&visitor.thir()[value])
}
@ -191,7 +192,8 @@ pub fn walk_stmt<'thir, 'tcx: 'thir, V: Visitor<'thir, 'tcx>>(
visitor: &mut V,
stmt: &'thir Stmt<'tcx>,
) {
match &stmt.kind {
let Stmt { kind } = stmt;
match kind {
StmtKind::Expr { expr, scope: _ } => visitor.visit_expr(&visitor.thir()[*expr]),
StmtKind::Let {
initializer,
@ -217,11 +219,13 @@ pub fn walk_block<'thir, 'tcx: 'thir, V: Visitor<'thir, 'tcx>>(
visitor: &mut V,
block: &'thir Block,
) {
for &stmt in &*block.stmts {
let Block { stmts, expr, targeted_by_break: _, region_scope: _, span: _, safety_mode: _ } =
block;
for &stmt in &*stmts {
visitor.visit_stmt(&visitor.thir()[stmt]);
}
if let Some(expr) = block.expr {
visitor.visit_expr(&visitor.thir()[expr]);
if let Some(expr) = expr {
visitor.visit_expr(&visitor.thir()[*expr]);
}
}
@ -229,11 +233,12 @@ pub fn walk_arm<'thir, 'tcx: 'thir, V: Visitor<'thir, 'tcx>>(
visitor: &mut V,
arm: &'thir Arm<'tcx>,
) {
if let Some(expr) = arm.guard {
visitor.visit_expr(&visitor.thir()[expr])
let Arm { guard, pattern, body, lint_level: _, span: _, scope: _ } = arm;
if let Some(expr) = guard {
visitor.visit_expr(&visitor.thir()[*expr])
}
visitor.visit_pat(&arm.pattern);
visitor.visit_expr(&visitor.thir()[arm.body]);
visitor.visit_pat(pattern);
visitor.visit_expr(&visitor.thir()[*body]);
}
pub fn walk_pat<'thir, 'tcx: 'thir, V: Visitor<'thir, 'tcx>>(
@ -249,7 +254,8 @@ pub(crate) fn for_each_immediate_subpat<'a, 'tcx>(
pat: &'a Pat<'tcx>,
mut callback: impl FnMut(&'a Pat<'tcx>),
) {
match &pat.kind {
let Pat { kind, ty: _, span: _ } = pat;
match kind {
PatKind::Missing
| PatKind::Wild
| PatKind::Binding { subpattern: None, .. }

View file

@ -1047,26 +1047,21 @@ fn find_fallback_pattern_typo<'tcx>(
let hir::ItemKind::Use(path, _) = item.kind else {
continue;
};
for res in &path.res {
if let Res::Def(DefKind::Const, id) = res
&& infcx.can_eq(param_env, ty, cx.tcx.type_of(id).instantiate_identity())
{
if cx.tcx.visibility(id).is_accessible_from(parent, cx.tcx) {
// The original const is accessible, suggest using it directly.
let item_name = cx.tcx.item_name(*id);
accessible.push(item_name);
accessible_path.push(with_no_trimmed_paths!(cx.tcx.def_path_str(id)));
} else if cx
.tcx
.visibility(item.owner_id)
.is_accessible_from(parent, cx.tcx)
{
// The const is accessible only through the re-export, point at
// the `use`.
let ident = item.kind.ident().unwrap();
imported.push(ident.name);
imported_spans.push(ident.span);
}
if let Some(value_ns) = path.res.value_ns
&& let Res::Def(DefKind::Const, id) = value_ns
&& infcx.can_eq(param_env, ty, cx.tcx.type_of(id).instantiate_identity())
{
if cx.tcx.visibility(id).is_accessible_from(parent, cx.tcx) {
// The original const is accessible, suggest using it directly.
let item_name = cx.tcx.item_name(id);
accessible.push(item_name);
accessible_path.push(with_no_trimmed_paths!(cx.tcx.def_path_str(id)));
} else if cx.tcx.visibility(item.owner_id).is_accessible_from(parent, cx.tcx) {
// The const is accessible only through the re-export, point at
// the `use`.
let ident = item.kind.ident().unwrap();
imported.push(ident.name);
imported_spans.push(ident.span);
}
}
}

View file

@ -1,6 +1,7 @@
use std::{fmt, iter, mem};
use rustc_abi::{FIRST_VARIANT, FieldIdx, VariantIdx};
use rustc_hir::def::DefKind;
use rustc_hir::lang_items::LangItem;
use rustc_index::Idx;
use rustc_middle::mir::*;
@ -254,8 +255,19 @@ where
// impl_item_refs may be empty if drop fn is not implemented in 'impl AsyncDrop for ...'
// (#140974).
// Such code will report error, so just generate sync drop here and return
let Some(drop_fn_def_id) =
tcx.associated_item_def_ids(drop_trait).into_iter().nth(0).copied()
let Some(drop_fn_def_id) = tcx
.associated_item_def_ids(drop_trait)
.first()
.and_then(|def_id| {
if tcx.def_kind(def_id) == DefKind::AssocFn
&& tcx.check_args_compatible(*def_id, trait_args)
{
Some(def_id)
} else {
None
}
})
.copied()
else {
tcx.dcx().span_delayed_bug(
self.elaborator.body().span,

View file

@ -1,6 +1,6 @@
//! This module ensures that if a function's ABI requires a particular target feature,
//! that target feature is enabled both on the callee and all callers.
use rustc_abi::{BackendRepr, RegKind};
use rustc_abi::{BackendRepr, CanonAbi, RegKind, X86Call};
use rustc_hir::{CRATE_HIR_ID, HirId};
use rustc_middle::mir::{self, Location, traversal};
use rustc_middle::ty::layout::LayoutCx;
@ -8,7 +8,7 @@ use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TyCtxt, TypingEnv};
use rustc_session::lint::builtin::WASM_C_ABI;
use rustc_span::def_id::DefId;
use rustc_span::{DUMMY_SP, Span, Symbol, sym};
use rustc_target::callconv::{ArgAbi, Conv, FnAbi, PassMode};
use rustc_target::callconv::{ArgAbi, FnAbi, PassMode};
use rustc_target::spec::{HasWasmCAbiOpt, WasmCAbi};
use crate::errors;
@ -72,7 +72,7 @@ fn do_check_simd_vector_abi<'tcx>(
}
}
// The `vectorcall` ABI is special in that it requires SSE2 no matter which types are being passed.
if abi.conv == Conv::X86VectorCall && !have_feature(sym::sse2) {
if abi.conv == CanonAbi::X86(X86Call::Vectorcall) && !have_feature(sym::sse2) {
let (span, _hir_id) = loc();
tcx.dcx().emit_err(errors::AbiRequiredTargetFeature {
span,
@ -128,7 +128,7 @@ fn do_check_wasm_abi<'tcx>(
if !(tcx.sess.target.arch == "wasm32"
&& tcx.sess.target.os == "unknown"
&& tcx.wasm_c_abi_opt() == WasmCAbi::Legacy { with_lint: true }
&& abi.conv == Conv::C)
&& abi.conv == CanonAbi::C)
{
return;
}

View file

@ -834,7 +834,7 @@ impl<'a> Parser<'a> {
// guides recovery in case we write `&raw expr`.
if borrow_kind == ast::BorrowKind::Ref
&& mutbl == ast::Mutability::Not
&& matches!(&expr.kind, ExprKind::Path(None, p) if p.is_ident(kw::Raw))
&& matches!(&expr.kind, ExprKind::Path(None, p) if *p == kw::Raw)
{
self.expected_token_types.insert(TokenType::KwMut);
self.expected_token_types.insert(TokenType::KwConst);

View file

@ -713,7 +713,7 @@ impl<'a> Parser<'a> {
/// Parses the rest of a block expression or function body.
/// Precondition: already parsed the '{'.
pub(crate) fn parse_block_tail(
pub fn parse_block_tail(
&mut self,
lo: Span,
s: BlockCheckMode,

View file

@ -132,7 +132,7 @@ impl<'tcx> Visitor<'tcx> for ExportableItemCollector<'tcx> {
self.add_exportable(def_id);
}
hir::ItemKind::Use(path, _) => {
for res in &path.res {
for res in path.res.present_items() {
// Only local items are exportable.
if let Some(res_id) = res.opt_def_id()
&& let Some(res_id) = res_id.as_local()

View file

@ -118,9 +118,10 @@ impl<'a, 'ra, 'tcx> UnusedImportCheckVisitor<'a, 'ra, 'tcx> {
ast::UseTreeKind::Simple(Some(ident)) => {
if ident.name == kw::Underscore
&& !self.r.import_res_map.get(&id).is_some_and(|per_ns| {
per_ns.iter().filter_map(|res| res.as_ref()).any(|res| {
matches!(res, Res::Def(DefKind::Trait | DefKind::TraitAlias, _))
})
matches!(
per_ns.type_ns,
Some(Res::Def(DefKind::Trait | DefKind::TraitAlias, _))
)
})
{
self.unused_import(self.base_id).add(id);

View file

@ -874,7 +874,7 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
kind: LifetimeBinderKind::PolyTrait,
span,
},
|this| this.visit_path(path, ty.id),
|this| this.visit_path(path),
);
} else {
visit::walk_ty(self, ty)
@ -1265,7 +1265,7 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
AnonConstKind::ConstArg(IsRepeatExpr::No),
|this| {
this.smart_resolve_path(ty.id, &None, path, PathSource::Expr(None));
this.visit_path(path, ty.id);
this.visit_path(path);
},
);
@ -3640,7 +3640,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
if let Some(qself) = &delegation.qself {
self.visit_ty(&qself.ty);
}
self.visit_path(&delegation.path, delegation.id);
self.visit_path(&delegation.path);
let Some(body) = &delegation.body else { return };
self.with_rib(ValueNS, RibKind::FnOrCoroutine, |this| {
let span = delegation.path.segments.last().unwrap().ident.span;
@ -4867,7 +4867,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
if let Some(qself) = &se.qself {
self.visit_ty(&qself.ty);
}
self.visit_path(&se.path, expr.id);
self.visit_path(&se.path);
walk_list!(self, resolve_expr_field, &se.fields, expr);
match &se.rest {
StructRest::Base(expr) => self.visit_expr(expr),
@ -4898,11 +4898,28 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
self.resolve_expr(e, Some(expr));
}
ExprKind::Let(ref pat, ref scrutinee, _, _) => {
ExprKind::Let(ref pat, ref scrutinee, _, Recovered::No) => {
self.visit_expr(scrutinee);
self.resolve_pattern_top(pat, PatternSource::Let);
}
ExprKind::Let(ref pat, ref scrutinee, _, Recovered::Yes(_)) => {
self.visit_expr(scrutinee);
// This is basically a tweaked, inlined `resolve_pattern_top`.
let mut bindings = smallvec![(PatBoundCtx::Product, Default::default())];
self.resolve_pattern(pat, PatternSource::Let, &mut bindings);
// We still collect the bindings in this `let` expression which is in
// an invalid position (and therefore shouldn't declare variables into
// its parent scope). To avoid unnecessary errors though, we do just
// reassign the resolutions to `Res::Err`.
for (_, bindings) in &mut bindings {
for (_, binding) in bindings {
*binding = Res::Err;
}
}
self.apply_pattern_bindings(bindings);
}
ExprKind::If(ref cond, ref then, ref opt_else) => {
self.with_rib(ValueNS, RibKind::Normal, |this| {
let old = this.diag_metadata.in_if_condition.replace(cond);

View file

@ -4,10 +4,11 @@
//! For more information about LLVM CFI and cross-language LLVM CFI support for the Rust compiler,
//! see design document in the tracking issue #89653.
use rustc_abi::CanonAbi;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::bug;
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt};
use rustc_target::callconv::{Conv, FnAbi, PassMode};
use rustc_target::callconv::{FnAbi, PassMode};
use tracing::instrument;
mod encode;
@ -45,7 +46,7 @@ pub fn typeid_for_fnabi<'tcx>(
let mut encode_ty_options = EncodeTyOptions::from_bits(options.bits())
.unwrap_or_else(|| bug!("typeid_for_fnabi: invalid option(s) `{:?}`", options.bits()));
match fn_abi.conv {
Conv::C => {
CanonAbi::C => {
encode_ty_options.insert(EncodeTyOptions::GENERALIZE_REPR_C);
}
_ => {

View file

@ -2,8 +2,9 @@
#![allow(rustc::usage_of_qualified_ty)]
use rustc_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
use rustc_middle::ty;
use rustc_target::callconv::{self, Conv};
use rustc_target::callconv;
use stable_mir::abi::{
AddressSpace, ArgAbi, CallConvention, FieldsShape, FloatLength, FnAbi, IntegerLength, Layout,
LayoutShape, PassMode, Primitive, Scalar, TagEncoding, TyAndLayout, ValueAbi, VariantsShape,
@ -69,7 +70,7 @@ impl<'tcx> Stable<'tcx> for callconv::FnAbi<'tcx, ty::Ty<'tcx>> {
fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
assert!(self.args.len() >= self.fixed_count as usize);
assert!(!self.c_variadic || matches!(self.conv, Conv::C));
assert!(!self.c_variadic || matches!(self.conv, CanonAbi::C));
FnAbi {
args: self.args.as_ref().stable(tables),
ret: self.ret.stable(tables),
@ -92,31 +93,37 @@ impl<'tcx> Stable<'tcx> for callconv::ArgAbi<'tcx, ty::Ty<'tcx>> {
}
}
impl<'tcx> Stable<'tcx> for callconv::Conv {
impl<'tcx> Stable<'tcx> for CanonAbi {
type T = CallConvention;
fn stable(&self, _tables: &mut Tables<'_>) -> Self::T {
match self {
Conv::C => CallConvention::C,
Conv::Rust => CallConvention::Rust,
Conv::Cold => CallConvention::Cold,
Conv::PreserveMost => CallConvention::PreserveMost,
Conv::PreserveAll => CallConvention::PreserveAll,
Conv::ArmAapcs => CallConvention::ArmAapcs,
Conv::CCmseNonSecureCall => CallConvention::CCmseNonSecureCall,
Conv::CCmseNonSecureEntry => CallConvention::CCmseNonSecureEntry,
Conv::Msp430Intr => CallConvention::Msp430Intr,
Conv::X86Fastcall => CallConvention::X86Fastcall,
Conv::X86Intr => CallConvention::X86Intr,
Conv::X86Stdcall => CallConvention::X86Stdcall,
Conv::X86ThisCall => CallConvention::X86ThisCall,
Conv::X86VectorCall => CallConvention::X86VectorCall,
Conv::X86_64SysV => CallConvention::X86_64SysV,
Conv::X86_64Win64 => CallConvention::X86_64Win64,
Conv::GpuKernel => CallConvention::GpuKernel,
Conv::AvrInterrupt => CallConvention::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => CallConvention::AvrNonBlockingInterrupt,
Conv::RiscvInterrupt { .. } => CallConvention::RiscvInterrupt,
CanonAbi::C => CallConvention::C,
CanonAbi::Rust => CallConvention::Rust,
CanonAbi::RustCold => CallConvention::Cold,
CanonAbi::Arm(arm_call) => match arm_call {
ArmCall::Aapcs => CallConvention::ArmAapcs,
ArmCall::CCmseNonSecureCall => CallConvention::CCmseNonSecureCall,
ArmCall::CCmseNonSecureEntry => CallConvention::CCmseNonSecureEntry,
},
CanonAbi::GpuKernel => CallConvention::GpuKernel,
CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind {
InterruptKind::Avr => CallConvention::AvrInterrupt,
InterruptKind::AvrNonBlocking => CallConvention::AvrNonBlockingInterrupt,
InterruptKind::Msp430 => CallConvention::Msp430Intr,
InterruptKind::RiscvMachine | InterruptKind::RiscvSupervisor => {
CallConvention::RiscvInterrupt
}
InterruptKind::X86 => CallConvention::X86Intr,
},
CanonAbi::X86(x86_call) => match x86_call {
X86Call::Fastcall => CallConvention::X86Fastcall,
X86Call::Stdcall => CallConvention::X86Stdcall,
X86Call::SysV64 => CallConvention::X86_64SysV,
X86Call::Thiscall => CallConvention::X86ThisCall,
X86Call::Vectorcall => CallConvention::X86VectorCall,
X86Call::Win64 => CallConvention::X86_64Win64,
},
}
}
}

View file

@ -1,6 +1,6 @@
use rustc_abi::{HasDataLayout, TyAbiInterface};
use rustc_abi::{ArmCall, CanonAbi, HasDataLayout, TyAbiInterface};
use crate::callconv::{ArgAbi, Conv, FnAbi, Reg, RegKind, Uniform};
use crate::callconv::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
use crate::spec::HasTargetSpec;
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
@ -90,7 +90,7 @@ where
// If this is a target with a hard-float ABI, and the function is not explicitly
// `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
let vfp = cx.target_spec().llvm_target.ends_with("hf")
&& fn_abi.conv != Conv::ArmAapcs
&& fn_abi.conv != CanonAbi::Arm(ArmCall::Aapcs)
&& !fn_abi.c_variadic;
if !fn_abi.ret.is_ignore() {

View file

@ -1,13 +1,12 @@
use std::fmt::Display;
use std::str::FromStr;
use std::{fmt, iter};
use rustc_abi::{
AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Primitive, Reg, RegKind, Scalar,
Size, TyAbiInterface, TyAndLayout,
AddressSpace, Align, BackendRepr, CanonAbi, ExternAbi, HasDataLayout, Primitive, Reg, RegKind,
Scalar, Size, TyAbiInterface, TyAndLayout,
};
use rustc_macros::HashStable_Generic;
pub use crate::spec::AbiMap;
use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, RustcAbi, WasmCAbi};
mod aarch64;
@ -529,41 +528,6 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub enum Conv {
// General language calling conventions, for which every target
// should have its own backend (e.g. LLVM) support.
C,
Rust,
Cold,
PreserveMost,
PreserveAll,
// Target-specific calling conventions.
ArmAapcs,
CCmseNonSecureCall,
CCmseNonSecureEntry,
Msp430Intr,
GpuKernel,
X86Fastcall,
X86Intr,
X86Stdcall,
X86ThisCall,
X86VectorCall,
X86_64SysV,
X86_64Win64,
AvrInterrupt,
AvrNonBlockingInterrupt,
RiscvInterrupt { kind: RiscvInterruptKind },
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub enum RiscvInterruptKind {
Machine,
@ -605,7 +569,7 @@ pub struct FnAbi<'a, Ty> {
/// This can be used to know whether an argument is variadic or not.
pub fixed_count: u32,
/// The calling convention of this function.
pub conv: Conv,
pub conv: CanonAbi,
/// Indicates if an unwind may happen across a call to this function.
pub can_unwind: bool,
}
@ -696,7 +660,6 @@ impl<'a, Ty> FnAbi<'a, Ty> {
"sparc" => sparc::compute_abi_info(cx, self),
"sparc64" => sparc64::compute_abi_info(cx, self),
"nvptx64" => {
let abi = cx.target_spec().adjust_abi(abi, self.c_variadic);
if abi == ExternAbi::PtxKernel || abi == ExternAbi::GpuKernel {
nvptx64::compute_ptx_kernel_abi_info(cx, self)
} else {
@ -863,70 +826,6 @@ impl<'a, Ty> FnAbi<'a, Ty> {
}
}
impl FromStr for Conv {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"C" => Ok(Conv::C),
"Rust" => Ok(Conv::Rust),
"RustCold" => Ok(Conv::Rust),
"ArmAapcs" => Ok(Conv::ArmAapcs),
"CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
"CCmseNonSecureEntry" => Ok(Conv::CCmseNonSecureEntry),
"Msp430Intr" => Ok(Conv::Msp430Intr),
"X86Fastcall" => Ok(Conv::X86Fastcall),
"X86Intr" => Ok(Conv::X86Intr),
"X86Stdcall" => Ok(Conv::X86Stdcall),
"X86ThisCall" => Ok(Conv::X86ThisCall),
"X86VectorCall" => Ok(Conv::X86VectorCall),
"X86_64SysV" => Ok(Conv::X86_64SysV),
"X86_64Win64" => Ok(Conv::X86_64Win64),
"GpuKernel" => Ok(Conv::GpuKernel),
"AvrInterrupt" => Ok(Conv::AvrInterrupt),
"AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
"RiscvInterrupt(machine)" => {
Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine })
}
"RiscvInterrupt(supervisor)" => {
Ok(Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor })
}
_ => Err(format!("'{s}' is not a valid value for entry function call convention.")),
}
}
}
fn conv_to_externabi(conv: &Conv) -> ExternAbi {
match conv {
Conv::C => ExternAbi::C { unwind: false },
Conv::Rust => ExternAbi::Rust,
Conv::PreserveMost => ExternAbi::RustCold,
Conv::ArmAapcs => ExternAbi::Aapcs { unwind: false },
Conv::CCmseNonSecureCall => ExternAbi::CCmseNonSecureCall,
Conv::CCmseNonSecureEntry => ExternAbi::CCmseNonSecureEntry,
Conv::Msp430Intr => ExternAbi::Msp430Interrupt,
Conv::GpuKernel => ExternAbi::GpuKernel,
Conv::X86Fastcall => ExternAbi::Fastcall { unwind: false },
Conv::X86Intr => ExternAbi::X86Interrupt,
Conv::X86Stdcall => ExternAbi::Stdcall { unwind: false },
Conv::X86ThisCall => ExternAbi::Thiscall { unwind: false },
Conv::X86VectorCall => ExternAbi::Vectorcall { unwind: false },
Conv::X86_64SysV => ExternAbi::SysV64 { unwind: false },
Conv::X86_64Win64 => ExternAbi::Win64 { unwind: false },
Conv::AvrInterrupt => ExternAbi::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => ExternAbi::AvrNonBlockingInterrupt,
Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine } => ExternAbi::RiscvInterruptM,
Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor } => ExternAbi::RiscvInterruptS,
Conv::Cold | Conv::PreserveAll => unreachable!(),
}
}
impl Display for Conv {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", conv_to_externabi(self))
}
}
// Some types are used a lot. Make sure they don't unintentionally get bigger.
#[cfg(target_pointer_width = "64")]
mod size_asserts {

View file

@ -92,38 +92,6 @@ impl<A: ToJson> ToJson for Option<A> {
}
}
impl ToJson for crate::callconv::Conv {
fn to_json(&self) -> Json {
let buf: String;
let s = match self {
Self::C => "C",
Self::Rust => "Rust",
Self::Cold => "Cold",
Self::PreserveMost => "PreserveMost",
Self::PreserveAll => "PreserveAll",
Self::ArmAapcs => "ArmAapcs",
Self::CCmseNonSecureCall => "CCmseNonSecureCall",
Self::CCmseNonSecureEntry => "CCmseNonSecureEntry",
Self::Msp430Intr => "Msp430Intr",
Self::X86Fastcall => "X86Fastcall",
Self::X86Intr => "X86Intr",
Self::X86Stdcall => "X86Stdcall",
Self::X86ThisCall => "X86ThisCall",
Self::X86VectorCall => "X86VectorCall",
Self::X86_64SysV => "X86_64SysV",
Self::X86_64Win64 => "X86_64Win64",
Self::GpuKernel => "GpuKernel",
Self::AvrInterrupt => "AvrInterrupt",
Self::AvrNonBlockingInterrupt => "AvrNonBlockingInterrupt",
Self::RiscvInterrupt { kind } => {
buf = format!("RiscvInterrupt({})", kind.as_str());
&buf
}
};
Json::String(s.to_owned())
}
}
impl ToJson for TargetMetadata {
fn to_json(&self) -> Json {
json!({
@ -140,3 +108,9 @@ impl ToJson for rustc_abi::Endian {
self.as_str().to_json()
}
}
impl ToJson for rustc_abi::CanonAbi {
fn to_json(&self) -> Json {
self.to_string().to_json()
}
}

View file

@ -0,0 +1,187 @@
use rustc_abi::{ArmCall, CanonAbi, ExternAbi, InterruptKind, X86Call};
use crate::spec::Target;
/// Mapping for ExternAbi to CanonAbi according to a Target
///
/// A maybe-transitional structure circa 2025 for hosting future experiments in
/// encapsulating arch-specific ABI lowering details to make them more testable.
#[derive(Clone, Debug)]
pub struct AbiMap {
arch: Arch,
os: OsKind,
}
#[derive(Copy, Clone, Debug)]
pub enum AbiMapping {
/// this ABI is exactly mapped for this platform
Direct(CanonAbi),
/// we don't yet warn on this, but we will
Deprecated(CanonAbi),
Invalid,
}
impl AbiMapping {
pub fn into_option(self) -> Option<CanonAbi> {
match self {
Self::Direct(abi) | Self::Deprecated(abi) => Some(abi),
Self::Invalid => None,
}
}
pub fn unwrap(self) -> CanonAbi {
self.into_option().unwrap()
}
pub fn is_mapped(self) -> bool {
self.into_option().is_some()
}
}
impl AbiMap {
pub fn from_target(target: &Target) -> Self {
// the purpose of this little exercise is to force listing what affects these mappings
let arch = match &*target.arch {
"aarch64" => Arch::Aarch64,
"amdgpu" => Arch::Amdgpu,
"arm" if target.llvm_target.starts_with("thumbv8m") => Arch::Arm(ArmVer::ThumbV8M),
"arm" => Arch::Arm(ArmVer::Other),
"avr" => Arch::Avr,
"msp430" => Arch::Msp430,
"nvptx64" => Arch::Nvptx,
"riscv32" | "riscv64" => Arch::Riscv,
"x86" => Arch::X86,
"x86_64" => Arch::X86_64,
_ => Arch::Other,
};
let os = if target.is_like_windows { OsKind::Windows } else { OsKind::Other };
AbiMap { arch, os }
}
pub fn canonize_abi(&self, extern_abi: ExternAbi, has_c_varargs: bool) -> AbiMapping {
let AbiMap { os, arch } = *self;
let canon_abi = match (extern_abi, arch) {
// infallible lowerings
(ExternAbi::C { .. }, _) => CanonAbi::C,
(ExternAbi::Rust | ExternAbi::RustCall, _) => CanonAbi::Rust,
(ExternAbi::Unadjusted, _) => CanonAbi::C,
(ExternAbi::RustCold, _) if self.os == OsKind::Windows => CanonAbi::Rust,
(ExternAbi::RustCold, _) => CanonAbi::RustCold,
(ExternAbi::System { .. }, Arch::X86) if os == OsKind::Windows && !has_c_varargs => {
CanonAbi::X86(X86Call::Stdcall)
}
(ExternAbi::System { .. }, _) => CanonAbi::C,
// fallible lowerings
(ExternAbi::EfiApi, Arch::Arm(..)) => CanonAbi::Arm(ArmCall::Aapcs),
(ExternAbi::EfiApi, Arch::X86_64) => CanonAbi::X86(X86Call::Win64),
(ExternAbi::EfiApi, Arch::Aarch64 | Arch::Riscv | Arch::X86) => CanonAbi::C,
(ExternAbi::EfiApi, _) => return AbiMapping::Invalid,
(ExternAbi::Aapcs { .. }, Arch::Arm(..)) => CanonAbi::Arm(ArmCall::Aapcs),
(ExternAbi::Aapcs { .. }, _) => return AbiMapping::Invalid,
(ExternAbi::CCmseNonSecureCall, Arch::Arm(ArmVer::ThumbV8M)) => {
CanonAbi::Arm(ArmCall::CCmseNonSecureCall)
}
(ExternAbi::CCmseNonSecureEntry, Arch::Arm(ArmVer::ThumbV8M)) => {
CanonAbi::Arm(ArmCall::CCmseNonSecureEntry)
}
(ExternAbi::CCmseNonSecureCall | ExternAbi::CCmseNonSecureEntry, ..) => {
return AbiMapping::Invalid;
}
(ExternAbi::Cdecl { .. }, Arch::X86) => CanonAbi::C,
(ExternAbi::Cdecl { .. }, _) => return AbiMapping::Deprecated(CanonAbi::C),
(ExternAbi::Fastcall { .. }, Arch::X86) => CanonAbi::X86(X86Call::Fastcall),
(ExternAbi::Fastcall { .. }, _) if os == OsKind::Windows => {
return AbiMapping::Deprecated(CanonAbi::C);
}
(ExternAbi::Fastcall { .. }, _) => return AbiMapping::Invalid,
(ExternAbi::Stdcall { .. }, Arch::X86) => CanonAbi::X86(X86Call::Stdcall),
(ExternAbi::Stdcall { .. }, _) if os == OsKind::Windows => {
return AbiMapping::Deprecated(CanonAbi::C);
}
(ExternAbi::Stdcall { .. }, _) => return AbiMapping::Invalid,
(ExternAbi::Thiscall { .. }, Arch::X86) => CanonAbi::X86(X86Call::Thiscall),
(ExternAbi::Thiscall { .. }, _) => return AbiMapping::Invalid,
(ExternAbi::Vectorcall { .. }, Arch::X86 | Arch::X86_64) => {
CanonAbi::X86(X86Call::Vectorcall)
}
(ExternAbi::Vectorcall { .. }, _) if os == OsKind::Windows => {
return AbiMapping::Deprecated(CanonAbi::C);
}
(ExternAbi::Vectorcall { .. }, _) => return AbiMapping::Invalid,
(ExternAbi::SysV64 { .. }, Arch::X86_64) => CanonAbi::X86(X86Call::SysV64),
(ExternAbi::Win64 { .. }, Arch::X86_64) => CanonAbi::X86(X86Call::Win64),
(ExternAbi::SysV64 { .. } | ExternAbi::Win64 { .. }, _) => return AbiMapping::Invalid,
(ExternAbi::PtxKernel, Arch::Nvptx) => CanonAbi::GpuKernel,
(ExternAbi::GpuKernel, Arch::Amdgpu | Arch::Nvptx) => CanonAbi::GpuKernel,
(ExternAbi::PtxKernel | ExternAbi::GpuKernel, _) => return AbiMapping::Invalid,
(ExternAbi::AvrInterrupt, Arch::Avr) => CanonAbi::Interrupt(InterruptKind::Avr),
(ExternAbi::AvrNonBlockingInterrupt, Arch::Avr) => {
CanonAbi::Interrupt(InterruptKind::AvrNonBlocking)
}
(ExternAbi::Msp430Interrupt, Arch::Msp430) => {
CanonAbi::Interrupt(InterruptKind::Msp430)
}
(ExternAbi::RiscvInterruptM, Arch::Riscv) => {
CanonAbi::Interrupt(InterruptKind::RiscvMachine)
}
(ExternAbi::RiscvInterruptS, Arch::Riscv) => {
CanonAbi::Interrupt(InterruptKind::RiscvSupervisor)
}
(ExternAbi::X86Interrupt, Arch::X86 | Arch::X86_64) => {
CanonAbi::Interrupt(InterruptKind::X86)
}
(
ExternAbi::AvrInterrupt
| ExternAbi::AvrNonBlockingInterrupt
| ExternAbi::Msp430Interrupt
| ExternAbi::RiscvInterruptM
| ExternAbi::RiscvInterruptS
| ExternAbi::X86Interrupt,
_,
) => return AbiMapping::Invalid,
};
AbiMapping::Direct(canon_abi)
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum Arch {
Aarch64,
Amdgpu,
Arm(ArmVer),
Avr,
Msp430,
Nvptx,
Riscv,
X86,
X86_64,
/// Architectures which don't need other considerations for ABI lowering
Other,
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum OsKind {
Windows,
Other,
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum ArmVer {
ThumbV8M,
Other,
}

View file

@ -2,10 +2,12 @@ use std::borrow::Cow;
use std::collections::BTreeMap;
use std::str::FromStr;
use rustc_abi::ExternAbi;
use serde_json::Value;
use super::{Target, TargetKind, TargetOptions, TargetWarnings};
use crate::json::{Json, ToJson};
use crate::spec::AbiMap;
impl Target {
/// Loads a target descriptor from a JSON object.
@ -515,18 +517,6 @@ impl Target {
}
}
} );
($key_name:ident, Conv) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
match super::Conv::from_str(s) {
Ok(c) => {
base.$key_name = c;
Some(Ok(()))
}
Err(e) => Some(Err(e))
}
})).unwrap_or(Ok(()))
} );
}
if let Some(j) = obj.remove("target-endian") {
@ -660,9 +650,23 @@ impl Target {
key!(supports_stack_protector, bool);
key!(small_data_threshold_support, SmallDataThresholdSupport)?;
key!(entry_name);
key!(entry_abi, Conv)?;
key!(supports_xray, bool);
// we're going to run `update_from_cli`, but that won't change the target's AbiMap
// FIXME: better factor the Target definition so we enforce this on a type level
let abi_map = AbiMap::from_target(&base);
if let Some(abi_str) = obj.remove("entry-abi") {
if let Json::String(abi_str) = abi_str {
match abi_str.parse::<ExternAbi>() {
Ok(abi) => base.options.entry_abi = abi_map.canonize_abi(abi, false).unwrap(),
Err(_) => return Err(format!("{abi_str} is not a valid ExternAbi")),
}
} else {
incorrect_type.push("entry-abi".to_owned())
}
}
base.update_from_cli();
base.check_consistency(TargetKind::Json)?;

View file

@ -43,7 +43,7 @@ use std::str::FromStr;
use std::{fmt, io};
use rustc_abi::{
Align, Endian, ExternAbi, Integer, Size, TargetDataLayout, TargetDataLayoutErrors,
Align, CanonAbi, Endian, ExternAbi, Integer, Size, TargetDataLayout, TargetDataLayoutErrors,
};
use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
use rustc_fs_util::try_canonicalize;
@ -53,15 +53,16 @@ use rustc_span::{Symbol, kw, sym};
use serde_json::Value;
use tracing::debug;
use crate::callconv::Conv;
use crate::json::{Json, ToJson};
use crate::spec::crt_objects::CrtObjects;
pub mod crt_objects;
mod abi_map;
mod base;
mod json;
pub use abi_map::AbiMap;
pub use base::apple;
pub use base::avr::ef_avr_arch;
@ -2655,9 +2656,9 @@ pub struct TargetOptions {
/// Default value is "main"
pub entry_name: StaticCow<str>,
/// The ABI of entry function.
/// Default value is `Conv::C`, i.e. C call convention
pub entry_abi: Conv,
/// The ABI of the entry function.
/// Default value is `CanonAbi::C`
pub entry_abi: CanonAbi,
/// Whether the target supports XRay instrumentation.
pub supports_xray: bool,
@ -2888,7 +2889,7 @@ impl Default for TargetOptions {
generate_arange_section: true,
supports_stack_protector: true,
entry_name: "main".into(),
entry_abi: Conv::C,
entry_abi: CanonAbi::C,
supports_xray: false,
small_data_threshold_support: SmallDataThresholdSupport::DefaultForArch,
}
@ -2914,114 +2915,9 @@ impl DerefMut for Target {
}
impl Target {
/// Given a function ABI, turn it into the correct ABI for this target.
pub fn adjust_abi(&self, abi: ExternAbi, c_variadic: bool) -> ExternAbi {
use ExternAbi::*;
match abi {
// On Windows, `extern "system"` behaves like msvc's `__stdcall`.
// `__stdcall` only applies on x86 and on non-variadic functions:
// https://learn.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-170
System { unwind } => {
if self.is_like_windows && self.arch == "x86" && !c_variadic {
Stdcall { unwind }
} else {
C { unwind }
}
}
EfiApi => {
if self.arch == "arm" {
Aapcs { unwind: false }
} else if self.arch == "x86_64" {
Win64 { unwind: false }
} else {
C { unwind: false }
}
}
// See commentary in `is_abi_supported`.
Stdcall { unwind } | Thiscall { unwind } | Fastcall { unwind } => {
if self.arch == "x86" { abi } else { C { unwind } }
}
Vectorcall { unwind } => {
if ["x86", "x86_64"].contains(&&*self.arch) {
abi
} else {
C { unwind }
}
}
// The Windows x64 calling convention we use for `extern "Rust"`
// <https://learn.microsoft.com/en-us/cpp/build/x64-software-conventions#register-volatility-and-preservation>
// expects the callee to save `xmm6` through `xmm15`, but `PreserveMost`
// (that we use by default for `extern "rust-cold"`) doesn't save any of those.
// So to avoid bloating callers, just use the Rust convention here.
RustCold if self.is_like_windows && self.arch == "x86_64" => Rust,
abi => abi,
}
}
pub fn is_abi_supported(&self, abi: ExternAbi) -> bool {
use ExternAbi::*;
match abi {
Rust | C { .. } | System { .. } | RustCall | Unadjusted | Cdecl { .. } | RustCold => {
true
}
EfiApi => {
["arm", "aarch64", "riscv32", "riscv64", "x86", "x86_64"].contains(&&self.arch[..])
}
X86Interrupt => ["x86", "x86_64"].contains(&&self.arch[..]),
Aapcs { .. } => "arm" == self.arch,
CCmseNonSecureCall | CCmseNonSecureEntry => {
["thumbv8m.main-none-eabi", "thumbv8m.main-none-eabihf", "thumbv8m.base-none-eabi"]
.contains(&&self.llvm_target[..])
}
Win64 { .. } | SysV64 { .. } => self.arch == "x86_64",
PtxKernel => self.arch == "nvptx64",
GpuKernel => ["amdgpu", "nvptx64"].contains(&&self.arch[..]),
Msp430Interrupt => self.arch == "msp430",
RiscvInterruptM | RiscvInterruptS => ["riscv32", "riscv64"].contains(&&self.arch[..]),
AvrInterrupt | AvrNonBlockingInterrupt => self.arch == "avr",
Thiscall { .. } => self.arch == "x86",
// On windows these fall-back to platform native calling convention (C) when the
// architecture is not supported.
//
// This is I believe a historical accident that has occurred as part of Microsoft
// striving to allow most of the code to "just" compile when support for 64-bit x86
// was added and then later again, when support for ARM architectures was added.
//
// This is well documented across MSDN. Support for this in Rust has been added in
// #54576. This makes much more sense in context of Microsoft's C++ than it does in
// Rust, but there isn't much leeway remaining here to change it back at the time this
// comment has been written.
//
// Following are the relevant excerpts from the MSDN documentation.
//
// > The __vectorcall calling convention is only supported in native code on x86 and
// x64 processors that include Streaming SIMD Extensions 2 (SSE2) and above.
// > ...
// > On ARM machines, __vectorcall is accepted and ignored by the compiler.
//
// -- https://docs.microsoft.com/en-us/cpp/cpp/vectorcall?view=msvc-160
//
// > On ARM and x64 processors, __stdcall is accepted and ignored by the compiler;
//
// -- https://docs.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-160
//
// > In most cases, keywords or compiler switches that specify an unsupported
// > convention on a particular platform are ignored, and the platform default
// > convention is used.
//
// -- https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions
Stdcall { .. } | Fastcall { .. } | Vectorcall { .. } if self.is_like_windows => true,
// Outside of Windows we want to only support these calling conventions for the
// architectures for which these calling conventions are actually well defined.
Stdcall { .. } | Fastcall { .. } if self.arch == "x86" => true,
Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => true,
// Reject these calling conventions everywhere else.
Stdcall { .. } | Fastcall { .. } | Vectorcall { .. } => false,
}
let abi_map = AbiMap::from_target(self);
abi_map.canonize_abi(abi, false).is_mapped()
}
/// Minimum integer size in bits that this target can perform atomic

View file

@ -5,7 +5,8 @@
// The win64 ABI is used. It differs from the sysv64 ABI, so we must use a windows target with
// LLVM. "x86_64-unknown-windows" is used to get the minimal subset of windows-specific features.
use crate::callconv::Conv;
use rustc_abi::{CanonAbi, X86Call};
use crate::spec::{RustcAbi, Target, TargetMetadata, base};
pub(crate) fn target() -> Target {
@ -13,7 +14,7 @@ pub(crate) fn target() -> Target {
base.cpu = "x86-64".into();
base.plt_by_default = false;
base.max_atomic_width = Some(64);
base.entry_abi = Conv::X86_64Win64;
base.entry_abi = CanonAbi::X86(X86Call::Win64);
// We disable MMX and SSE for now, even though UEFI allows using them. Problem is, you have to
// enable these CPU features explicitly before their first use, otherwise their instructions

View file

@ -72,8 +72,6 @@ trait_selection_adjust_signature_remove_borrow = consider adjusting the signatur
trait_selection_ascribe_user_type_prove_predicate = ...so that the where clause holds
trait_selection_async_closure_not_fn = async closure does not implement `{$kind}` because it captures state from its environment
trait_selection_await_both_futures = consider `await`ing on both `Future`s
trait_selection_await_future = consider `await`ing on the `Future`
trait_selection_await_note = calling an async function returns a future
@ -123,6 +121,8 @@ trait_selection_closure_kind_requirement = the requirement to implement `{$trait
trait_selection_compare_impl_item_obligation = ...so that the definition in impl matches the definition from the trait
trait_selection_consider_specifying_length = consider specifying the actual array length
trait_selection_coro_closure_not_fn = {$coro_kind}closure does not implement `{$kind}` because it captures state from its environment
trait_selection_data_flows = ...but data{$label_var1_exists ->
[true] {" "}from `{$label_var1}`
*[false] {""}

View file

@ -42,9 +42,7 @@ use super::{
use crate::error_reporting::TypeErrCtxt;
use crate::error_reporting::infer::TyCategory;
use crate::error_reporting::traits::report_dyn_incompatibility;
use crate::errors::{
AsyncClosureNotFn, ClosureFnMutLabel, ClosureFnOnceLabel, ClosureKindMismatch,
};
use crate::errors::{ClosureFnMutLabel, ClosureFnOnceLabel, ClosureKindMismatch, CoroClosureNotFn};
use crate::infer::{self, InferCtxt, InferCtxtExt as _};
use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
use crate::traits::{
@ -886,9 +884,18 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
// is unimplemented is because async closures don't implement `Fn`/`FnMut`
// if they have captures.
if has_self_borrows && expected_kind != ty::ClosureKind::FnOnce {
let mut err = self.dcx().create_err(AsyncClosureNotFn {
let coro_kind = match self
.tcx
.coroutine_kind(self.tcx.coroutine_for_closure(closure_def_id))
.unwrap()
{
rustc_hir::CoroutineKind::Desugared(desugaring, _) => desugaring.to_string(),
coro => coro.to_string(),
};
let mut err = self.dcx().create_err(CoroClosureNotFn {
span: self.tcx.def_span(closure_def_id),
kind: expected_kind.as_str(),
coro_kind,
});
self.note_obligation_cause(&mut err, &obligation);
return Some(err.emit());

View file

@ -201,11 +201,12 @@ pub struct ClosureFnMutLabel {
}
#[derive(Diagnostic)]
#[diag(trait_selection_async_closure_not_fn)]
pub(crate) struct AsyncClosureNotFn {
#[diag(trait_selection_coro_closure_not_fn)]
pub(crate) struct CoroClosureNotFn {
#[primary_span]
pub span: Span,
pub kind: &'static str,
pub coro_kind: String,
}
#[derive(Diagnostic)]

View file

@ -11,7 +11,7 @@ use std::ops::ControlFlow;
use hir::LangItem;
use hir::def_id::DefId;
use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
use rustc_hir as hir;
use rustc_hir::{self as hir, CoroutineDesugaring, CoroutineKind};
use rustc_infer::traits::{Obligation, PolyTraitObligation, SelectionError};
use rustc_middle::ty::fast_reject::DeepRejectCtxt;
use rustc_middle::ty::{self, Ty, TypeVisitableExt, TypingMode, elaborate};
@ -438,6 +438,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
#[instrument(level = "debug", skip(self, candidates))]
fn assemble_async_closure_candidates(
&mut self,
obligation: &PolyTraitObligation<'tcx>,
@ -446,15 +447,30 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let goal_kind =
self.tcx().async_fn_trait_kind_from_def_id(obligation.predicate.def_id()).unwrap();
debug!("self_ty = {:?}", obligation.self_ty().skip_binder().kind());
match *obligation.self_ty().skip_binder().kind() {
ty::CoroutineClosure(_, args) => {
ty::CoroutineClosure(def_id, args) => {
if let Some(closure_kind) =
args.as_coroutine_closure().kind_ty().to_opt_closure_kind()
&& !closure_kind.extends(goal_kind)
{
return;
}
candidates.vec.push(AsyncClosureCandidate);
// Make sure this is actually an async closure.
let Some(coroutine_kind) =
self.tcx().coroutine_kind(self.tcx().coroutine_for_closure(def_id))
else {
bug!("coroutine with no kind");
};
debug!(?coroutine_kind);
match coroutine_kind {
CoroutineKind::Desugared(CoroutineDesugaring::Async, _) => {
candidates.vec.push(AsyncClosureCandidate);
}
_ => (),
}
}
// Closures and fn pointers implement `AsyncFn*` if their return types
// implement `Future`, which is checked later.

View file

@ -13,7 +13,7 @@ use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt};
use rustc_session::config::OptLevel;
use rustc_span::def_id::DefId;
use rustc_target::callconv::{
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, RiscvInterruptKind,
AbiMap, ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, FnAbi, PassMode,
};
use tracing::debug;
@ -240,45 +240,6 @@ fn fn_sig_for_fn_abi<'tcx>(
}
}
#[inline]
fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: ExternAbi, c_variadic: bool) -> Conv {
use rustc_abi::ExternAbi::*;
match tcx.sess.target.adjust_abi(abi, c_variadic) {
Rust | RustCall => Conv::Rust,
// This is intentionally not using `Conv::Cold`, as that has to preserve
// even SIMD registers, which is generally not a good trade-off.
RustCold => Conv::PreserveMost,
// It's the ABI's job to select this, not ours.
System { .. } => bug!("system abi should be selected elsewhere"),
EfiApi => bug!("eficall abi should be selected elsewhere"),
Stdcall { .. } => Conv::X86Stdcall,
Fastcall { .. } => Conv::X86Fastcall,
Vectorcall { .. } => Conv::X86VectorCall,
Thiscall { .. } => Conv::X86ThisCall,
C { .. } => Conv::C,
Unadjusted => Conv::C,
Win64 { .. } => Conv::X86_64Win64,
SysV64 { .. } => Conv::X86_64SysV,
Aapcs { .. } => Conv::ArmAapcs,
CCmseNonSecureCall => Conv::CCmseNonSecureCall,
CCmseNonSecureEntry => Conv::CCmseNonSecureEntry,
PtxKernel => Conv::GpuKernel,
Msp430Interrupt => Conv::Msp430Intr,
X86Interrupt => Conv::X86Intr,
GpuKernel => Conv::GpuKernel,
AvrInterrupt => Conv::AvrInterrupt,
AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
RiscvInterruptM => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine },
RiscvInterruptS => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Supervisor },
// These API constants ought to be more specific...
Cdecl { .. } => Conv::C,
}
}
fn fn_abi_of_fn_ptr<'tcx>(
tcx: TyCtxt<'tcx>,
query: ty::PseudoCanonicalInput<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
@ -529,7 +490,8 @@ fn fn_abi_new_uncached<'tcx>(
};
let sig = tcx.normalize_erasing_regions(cx.typing_env, sig);
let conv = conv_from_spec_abi(cx.tcx(), sig.abi, sig.c_variadic);
let abi_map = AbiMap::from_target(&tcx.sess.target);
let conv = abi_map.canonize_abi(sig.abi, sig.c_variadic).unwrap();
let mut inputs = sig.inputs();
let extra_args = if sig.abi == ExternAbi::RustCall {

View file

@ -32,7 +32,6 @@ optimize_for_size = ["core/optimize_for_size"]
[lints.rust.unexpected_cfgs]
level = "warn"
check-cfg = [
'cfg(bootstrap)',
'cfg(no_global_oom_handling)',
'cfg(no_rc)',
'cfg(no_sync)',

View file

@ -66,7 +66,6 @@
)]
#![doc(cfg_hide(
not(test),
not(any(test, bootstrap)),
no_global_oom_handling,
not(no_global_oom_handling),
not(no_rc),

View file

@ -39,7 +39,6 @@ harness = false
[lints.rust.unexpected_cfgs]
level = "warn"
check-cfg = [
'cfg(bootstrap)',
'cfg(no_global_oom_handling)',
'cfg(no_rc)',
'cfg(no_sync)',

View file

@ -0,0 +1,16 @@
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors and IDEs
# editorconfig.org
root = true
[*]
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4
[*.yml]
indent_size = 2

View file

@ -0,0 +1,6 @@
# Use `git config blame.ignorerevsfile .git-blame-ignore-revs` to make
# `git blame` ignore the following commits.
# Reformat with a new `.rustfmt.toml`
# In rust-lang/libm this was 5882cabb83c30bf7c36023f9a55a80583636b0e8
4bb07a6275cc628ef81c65ac971dc6479963322f

View file

@ -0,0 +1,344 @@
name: CI
on:
push: { branches: [master] }
pull_request:
concurrency:
# Make sure that new pushes cancel running jobs
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
CARGO_TERM_COLOR: always
RUSTDOCFLAGS: -Dwarnings
RUSTFLAGS: -Dwarnings
RUST_BACKTRACE: full
BENCHMARK_RUSTC: nightly-2025-01-16 # Pin the toolchain for reproducable results
jobs:
# Determine which tests should be run based on changed files.
calculate_vars:
name: Calculate workflow variables
runs-on: ubuntu-24.04
timeout-minutes: 10
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
outputs:
extensive_matrix: ${{ steps.script.outputs.extensive_matrix }}
may_skip_libm_ci: ${{ steps.script.outputs.may_skip_libm_ci }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 500
- name: Fetch pull request ref
run: git fetch origin "$GITHUB_REF:$GITHUB_REF"
if: github.event_name == 'pull_request'
- run: python3 ci/ci-util.py generate-matrix >> "$GITHUB_OUTPUT"
id: script
test:
name: Build and test
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- target: aarch64-apple-darwin
os: macos-15
- target: aarch64-unknown-linux-gnu
os: ubuntu-24.04-arm
- target: aarch64-pc-windows-msvc
os: windows-2025
test_verbatim: 1
build_only: 1
- target: arm-unknown-linux-gnueabi
os: ubuntu-24.04
- target: arm-unknown-linux-gnueabihf
os: ubuntu-24.04
- target: armv7-unknown-linux-gnueabihf
os: ubuntu-24.04
- target: i586-unknown-linux-gnu
os: ubuntu-24.04
- target: i686-unknown-linux-gnu
os: ubuntu-24.04
- target: loongarch64-unknown-linux-gnu
os: ubuntu-24.04
- target: powerpc-unknown-linux-gnu
os: ubuntu-24.04
- target: powerpc64-unknown-linux-gnu
os: ubuntu-24.04
- target: powerpc64le-unknown-linux-gnu
os: ubuntu-24.04
- target: riscv64gc-unknown-linux-gnu
os: ubuntu-24.04
- target: thumbv6m-none-eabi
os: ubuntu-24.04
- target: thumbv7em-none-eabi
os: ubuntu-24.04
- target: thumbv7em-none-eabihf
os: ubuntu-24.04
- target: thumbv7m-none-eabi
os: ubuntu-24.04
- target: wasm32-unknown-unknown
os: ubuntu-24.04
- target: x86_64-unknown-linux-gnu
os: ubuntu-24.04
- target: x86_64-apple-darwin
os: macos-13
- target: i686-pc-windows-msvc
os: windows-2025
test_verbatim: 1
- target: x86_64-pc-windows-msvc
os: windows-2025
test_verbatim: 1
- target: i686-pc-windows-gnu
os: windows-2025
channel: nightly-i686-gnu
- target: x86_64-pc-windows-gnu
os: windows-2025
channel: nightly-x86_64-gnu
runs-on: ${{ matrix.os }}
needs: [calculate_vars]
env:
BUILD_ONLY: ${{ matrix.build_only }}
TEST_VERBATIM: ${{ matrix.test_verbatim }}
MAY_SKIP_LIBM_CI: ${{ needs.calculate_vars.outputs.may_skip_libm_ci }}
steps:
- name: Print runner information
run: uname -a
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust (rustup)
shell: bash
run: |
channel="nightly"
# Account for channels that have required components (MinGW)
[ -n "${{ matrix.channel }}" ] && channel="${{ matrix.channel }}"
rustup update "$channel" --no-self-update
rustup default "$channel"
rustup target add "${{ matrix.target }}"
rustup component add llvm-tools-preview
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
with:
key: ${{ matrix.target }}
- name: Cache Docker layers
uses: actions/cache@v4
if: matrix.os == 'ubuntu-24.04'
with:
path: /tmp/.buildx-cache
key: ${{ matrix.target }}-buildx-${{ github.sha }}
restore-keys: ${{ matrix.target }}-buildx-
# Configure buildx to use Docker layer caching
- uses: docker/setup-buildx-action@v3
if: matrix.os == 'ubuntu-24.04'
- name: Cache compiler-rt
id: cache-compiler-rt
uses: actions/cache@v4
with:
path: compiler-rt
key: ${{ runner.os }}-compiler-rt-${{ hashFiles('ci/download-compiler-rt.sh') }}
- name: Download compiler-rt reference sources
if: steps.cache-compiler-rt.outputs.cache-hit != 'true'
run: ./ci/download-compiler-rt.sh
shell: bash
- run: echo "RUST_COMPILER_RT_ROOT=$(realpath ./compiler-rt)" >> "$GITHUB_ENV"
shell: bash
- name: Verify API list
if: matrix.os == 'ubuntu-24.04'
run: python3 etc/update-api-list.py --check
# Non-linux tests just use our raw script
- name: Run locally
if: matrix.os != 'ubuntu-24.04'
shell: bash
run: ./ci/run.sh ${{ matrix.target }}
# Otherwise we use our docker containers to run builds
- name: Run in Docker
if: matrix.os == 'ubuntu-24.04'
run: ./ci/run-docker.sh ${{ matrix.target }}
- name: Print test logs if available
if: always()
run: if [ -f "target/test-log.txt" ]; then cat target/test-log.txt; fi
shell: bash
# Workaround to keep Docker cache smaller
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Move Docker cache
if: matrix.os == 'ubuntu-24.04'
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
clippy:
name: Clippy
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
submodules: true
# Unlike rustfmt, stable clippy does not work on code with nightly features.
- name: Install nightly `clippy`
run: |
rustup set profile minimal
rustup default nightly
rustup component add clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy --workspace --all-targets
benchmarks:
name: Benchmarks
runs-on: ubuntu-24.04
timeout-minutes: 20
steps:
- uses: actions/checkout@master
with:
submodules: true
- uses: taiki-e/install-action@cargo-binstall
- name: Set up dependencies
run: |
sudo apt-get update
sudo apt-get install -y valgrind gdb libc6-dbg # Needed for iai-callgrind
rustup update "$BENCHMARK_RUSTC" --no-self-update
rustup default "$BENCHMARK_RUSTC"
# Install the version of iai-callgrind-runner that is specified in Cargo.toml
iai_version="$(cargo metadata --format-version=1 --features icount |
jq -r '.packages[] | select(.name == "iai-callgrind").version')"
cargo binstall -y iai-callgrind-runner --version "$iai_version"
sudo apt-get install valgrind
- uses: Swatinem/rust-cache@v2
- name: Run icount benchmarks
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
run: ./ci/bench-icount.sh
- name: Upload the benchmark baseline
uses: actions/upload-artifact@v4
with:
name: ${{ env.BASELINE_NAME }}
path: ${{ env.BASELINE_NAME }}.tar.xz
- name: Run wall time benchmarks
run: |
# Always use the same seed for benchmarks. Ideally we should switch to a
# non-random generator.
export LIBM_SEED=benchesbenchesbenchesbencheswoo!
cargo bench --package libm-test \
--no-default-features \
--features short-benchmarks,build-musl,libm/force-soft-floats
- name: Print test logs if available
if: always()
run: if [ -f "target/test-log.txt" ]; then cat target/test-log.txt; fi
shell: bash
miri:
name: Miri
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust (rustup)
run: rustup update nightly --no-self-update && rustup default nightly
shell: bash
- run: rustup component add miri
- run: cargo miri setup
- uses: Swatinem/rust-cache@v2
- run: ./ci/miri.sh
msrv:
name: Check libm MSRV
runs-on: ubuntu-24.04
timeout-minutes: 10
env:
RUSTFLAGS: # No need to check warnings on old MSRV, unset `-Dwarnings`
steps:
- uses: actions/checkout@master
- name: Install Rust
run: |
msrv="$(perl -ne 'print if s/rust-version\s*=\s*"(.*)"/\1/g' libm/Cargo.toml)"
echo "MSRV: $msrv"
rustup update "$msrv" --no-self-update && rustup default "$msrv"
- uses: Swatinem/rust-cache@v2
- run: |
# FIXME(msrv): Remove the workspace Cargo.toml so 1.63 cargo doesn't see
# `edition = "2024"` and get spooked.
rm Cargo.toml
cargo build --manifest-path libm/Cargo.toml
rustfmt:
name: Rustfmt
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable `rustfmt`
run: rustup set profile minimal && rustup default stable && rustup component add rustfmt
- run: cargo fmt -- --check
extensive:
name: Extensive tests for ${{ matrix.ty }}
needs:
# Wait on `clippy` so we have some confidence that the crate will build
- clippy
- calculate_vars
runs-on: ubuntu-24.04
timeout-minutes: 240 # 4 hours
strategy:
matrix:
# Use the output from `calculate_vars` to create the matrix
# FIXME: it would be better to run all jobs (i.e. all types) but mark those that
# didn't change as skipped, rather than completely excluding the job. However,
# this is not currently possible https://github.com/actions/runner/issues/1985.
include: ${{ fromJSON(needs.calculate_vars.outputs.extensive_matrix).extensive_matrix }}
env:
TO_TEST: ${{ matrix.to_test }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust
run: |
rustup update nightly --no-self-update
rustup default nightly
- uses: Swatinem/rust-cache@v2
- name: Run extensive tests
run: ./ci/run-extensive.sh
- name: Print test logs if available
run: if [ -f "target/test-log.txt" ]; then cat target/test-log.txt; fi
shell: bash
success:
needs:
- benchmarks
- clippy
- extensive
- miri
- msrv
- rustfmt
- test
runs-on: ubuntu-24.04
timeout-minutes: 10
# GitHub branch protection is exceedingly silly and treats "jobs skipped because a dependency
# failed" as success. So we have to do some contortions to ensure the job fails if any of its
# dependencies fails.
if: always() # make sure this is never "skipped"
steps:
# Manually check the status of all dependencies. `if: failure()` does not work.
- name: check if any dependency failed
run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}'

View file

@ -0,0 +1,25 @@
name: Release-plz
permissions:
pull-requests: write
contents: write
on:
push: { branches: [master] }
jobs:
release-plz:
name: Release-plz
runs-on: ubuntu-24.04
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Rust (rustup)
run: rustup update nightly --no-self-update && rustup default nightly
- name: Run release-plz
uses: MarcoIeni/release-plz-action@v0.5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}

16
library/compiler-builtins/.gitignore vendored Normal file
View file

@ -0,0 +1,16 @@
# Rust files
Cargo.lock
target
# Sources for external files
compiler-rt
*.tar.gz
# Benchmark cache
baseline-*
iai-home
# Temporary files
*.bk
*.rs.bk
.#*

4
library/compiler-builtins/.gitmodules vendored Normal file
View file

@ -0,0 +1,4 @@
[submodule "crates/musl-math-sys/musl"]
path = crates/musl-math-sys/musl
url = https://git.musl-libc.org/git/musl
shallow = true

View file

@ -0,0 +1,13 @@
[workspace]
# As part of the release process, we delete `libm/Cargo.toml`. Since
# this is only run in CI, we shouldn't need to worry about it.
allow_dirty = true
publish_allow_dirty = true
[[package]]
name = "compiler_builtins"
semver_check = false
changelog_include = ["libm"] # libm is included as part of builtins
[[package]]
name = "libm"

View file

@ -0,0 +1,4 @@
# This matches rustc
style_edition = "2024"
group_imports = "StdExternalCrate"
imports_granularity = "Module"

View file

@ -0,0 +1,167 @@
# How to contribute
## compiler-builtins
1. From the [pending list](compiler-builtins/README.md#progress), pick one or
more intrinsics.
2. Port the version from [`compiler-rt`] and, if applicable, their
[tests][rt-tests]. Note that this crate has generic implementations for a lot
of routines, which may be usable without porting the entire implementation.
3. Add a test to `builtins-test`, comparing the behavior of the ported
intrinsic(s) with their implementation on the testing host.
4. Add the intrinsic to `builtins-test-intrinsics/src/main.rs` to verify it can
be linked on all targets.
5. Send a Pull Request (PR) :tada:.
[`compiler-rt`]: https://github.com/llvm/llvm-project/tree/b6820c35c59a4da3e59c11f657093ffbd79ae1db/compiler-rt/lib/builtins
[rt-tests]: https://github.com/llvm/llvm-project/tree/b6820c35c59a4da3e59c11f657093ffbd79ae1db/compiler-rt/test/builtins
## Porting Reminders
1. [Rust][prec-rust] and [C][prec-c] have slightly different operator
precedence. C evaluates comparisons (`== !=`) before bitwise operations
(`& | ^`), while Rust evaluates the other way.
2. C assumes wrapping operations everywhere. Rust panics on overflow when in
debug mode. Consider using the [Wrapping][wrap-ty] type or the explicit
[wrapping_*][wrap-fn] functions where applicable.
3. Note [C implicit casts][casts], especially integer promotion. Rust is much
more explicit about casting, so be sure that any cast which affects the
output is ported to the Rust implementation.
4. Rust has [many functions][i32] for integer or floating point manipulation in
the standard library. Consider using one of these functions rather than
porting a new one.
[prec-rust]: https://doc.rust-lang.org/reference/expressions.html#expression-precedence
[prec-c]: http://en.cppreference.com/w/c/language/operator_precedence
[wrap-ty]: https://doc.rust-lang.org/core/num/struct.Wrapping.html
[wrap-fn]: https://doc.rust-lang.org/std/primitive.i32.html#method.wrapping_add
[casts]: http://en.cppreference.com/w/cpp/language/implicit_conversion
[i32]: https://doc.rust-lang.org/std/primitive.i32.html
## Tips and tricks
- _IMPORTANT_ The code in this crate will end up being used in the `core` crate
so it can **not** have any external dependencies (other than a subset of
`core` itself).
- Only use relative imports within the `math` directory / module, e.g.
`use self::fabs::fabs` or `use super::k_cos`. Absolute imports from core are
OK, e.g. `use core::u64`.
- To reinterpret a float as an integer use the `to_bits` method. The MUSL code
uses the `GET_FLOAT_WORD` macro, or a union, to do this operation.
- To reinterpret an integer as a float use the `f32::from_bits` constructor. The
MUSL code uses the `SET_FLOAT_WORD` macro, or a union, to do this operation.
- You may use other methods from core like `f64::is_nan`, etc. as appropriate.
- Rust does not have hex float literals. This crate provides two `hf16!`,
`hf32!`, `hf64!`, and `hf128!` which convert string literals to floats at
compile time.
```rust
assert_eq!(hf32!("0x1.ffep+8").to_bits(), 0x43fff000);
assert_eq!(hf64!("0x1.ffep+8").to_bits(), 0x407ffe0000000000);
```
- Rust code panics on arithmetic overflows when not optimized. You may need to
use the [`Wrapping`] newtype to avoid this problem, or individual methods like
[`wrapping_add`].
[`Wrapping`]: https://doc.rust-lang.org/std/num/struct.Wrapping.html
[`wrapping_add`]: https://doc.rust-lang.org/std/primitive.u32.html#method.wrapping_add
## Testing
Testing for these crates can be somewhat complex, so feel free to rely on CI.
The easiest way replicate CI testing is using Docker. This can be done by
running `./ci/run-docker.sh [target]`. If no target is specified, all targets
will be run.
Tests can also be run without Docker:
```sh
# Run basic tests
#
# --no-default-features always needs to be passed, an unfortunate limitation
# since the `#![compiler_builtins]` feature is enabled by default.
cargo test --workspace --no-default-features
# Test with all interesting features
cargo test --workspace --no-default-features \
--features arch,unstable-float,unstable-intrinsics,mem
# Run with more detailed tests for libm
cargo test --workspace --no-default-features \
--features arch,unstable-float,unstable-intrinsics,mem \
--features build-mpfr,build-musl \
--profile release-checked
```
The multiprecision tests use the [`rug`] crate for bindings to MPFR. MPFR can be
difficult to build on non-Unix systems, refer to [`gmp_mpfr_sys`] for help.
`build-musl` does not build with MSVC, Wasm, or Thumb.
[`rug`]: https://docs.rs/rug/latest/rug/
[`gmp_mpfr_sys`]: https://docs.rs/gmp-mpfr-sys/1.6.4/gmp_mpfr_sys/
In order to run all tests, some dependencies may be required:
```sh
# Allow testing compiler-builtins
./ci/download-compiler-rt.sh
# Optional, initialize musl for `--features build-musl`
git submodule init
git submodule update
# `--release` ables more test cases
cargo test --release
```
### Extensive tests
Libm also has tests that are exhaustive (for single-argument `f32` and 1- or 2-
argument `f16`) or extensive (for all other float and argument combinations).
These take quite a long time to run, but are launched in CI when relevant files
are changed.
Exhaustive tests can be selected by passing an environment variable:
```sh
LIBM_EXTENSIVE_TESTS=sqrt,sqrtf cargo test --features build-mpfr \
--test z_extensive \
--profile release-checked
# Run all tests for one type
LIBM_EXTENSIVE_TESTS=all_f16 cargo test ...
# Ensure `f64` tests can run exhaustively. Estimated completion test for a
# single test is 57306 years on my machine so this may be worth skipping.
LIBM_EXTENSIVE_TESTS=all LIBM_EXTENSIVE_ITERATIONS=18446744073709551615 cargo test ...
```
## Benchmarking
Regular walltime benchmarks can be run with `cargo bench`:
```sh
cargo bench --no-default-features \
--features arch,unstable-float,unstable-intrinsics,mem \
--features benchmarking-reports
```
There are also benchmarks that check instruction count behind the `icount`
feature. These require [`iai-callgrind-runner`] (via Cargo) and [Valgrind]
to be installed, which means these only run on limited platforms.
Instruction count benchmarks are run as part of CI to flag performance
regresions.
```sh
cargo bench --no-default-features \
--features arch,unstable-float,unstable-intrinsics,mem \
--features icount \
--bench icount --bench mem_icount
```
[`iai-callgrind-runner`]: https://crates.io/crates/iai-callgrind-runner
[Valgrind]: https://valgrind.org/

View file

@ -0,0 +1,50 @@
[workspace]
resolver = "2"
members = [
"builtins-test",
"compiler-builtins",
"crates/libm-macros",
"crates/musl-math-sys",
"crates/panic-handler",
"crates/util",
"libm",
"libm-test",
]
default-members = [
"builtins-test",
"compiler-builtins",
"crates/libm-macros",
"libm",
"libm-test",
]
exclude = [
# `builtins-test-intrinsics` needs the feature `compiler-builtins` enabled
# and `mangled-names` disabled, which is the opposite of what is needed for
# other tests, so it makes sense to keep it out of the workspace.
"builtins-test-intrinsics",
]
[profile.release]
panic = "abort"
[profile.dev]
panic = "abort"
# Release mode with debug assertions
[profile.release-checked]
inherits = "release"
debug-assertions = true
overflow-checks = true
# Release with maximum optimizations, which is very slow to build. This is also
# what is needed to check `no-panic`.
[profile.release-opt]
inherits = "release"
codegen-units = 1
lto = "fat"
[profile.bench]
# Required for iai-callgrind
debug = true

View file

@ -0,0 +1,275 @@
The compiler-builtins crate is available for use under both the MIT license
and the Apache-2.0 license with the LLVM exception (MIT AND Apache-2.0 WITH
LLVM-exception).
The libm crate is available for use under the MIT license.
As a contributor, you agree that your code may be used under any of the
following: the MIT license, the Apache-2.0 license, or the Apache-2.0 license
with the LLVM exception. In other words, original (non-derivative) work is
licensed under MIT OR Apache-2.0 OR Apache-2.0 WITH LLVM-exception. This is
the default license for all other source in this repository.
Text of the relevant licenses is provided below:
------------------------------------------------------------------------------
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---- LLVM Exceptions to the Apache 2.0 License ----
As an exception, if, as a result of your compiling your source code, portions
of this Software are embedded into an Object form of such source code, you
may redistribute such embedded portions in such Object form without complying
with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
In addition, if you combine or link compiled forms of this Software with
software that is licensed under the GPLv2 ("Combined Software") and if a
court of competent jurisdiction determines that the patent provision (Section
3), the indemnity provision (Section 9) or other Section of the License
conflicts with the conditions of the GPLv2, you may retroactively and
prospectively choose to deem waived or otherwise exclude such Section(s) of
the License, but only in their entirety and only with respect to the Combined
Software.
------------------------------------------------------------------------------
Portions of this software are derived from third-party works licensed under
terms compatible with the above Apache-2.0 WITH LLVM-exception AND MIT
license:
* compiler-builtins is derived from LLVM's compiler-rt (https://llvm.org/).
Work derived from compiler-rt prior to 2019-01-19 is usable under the MIT
license, with the following copyright:
Copyright (c) 2009-2016 by the contributors listed in CREDITS.TXT
The relevant CREDITS.TXT is located at
https://github.com/llvm/llvm-project/blob/main/compiler-rt/CREDITS.TXT.
* Work derived from compiler-rt after 2019-01-19 is usable under the
Apache-2.0 license with the LLVM exception.
* The bundled `math` module is from the libm crate, usable under the MIT
license. For further details and copyrights, see see libm/LICENSE.txt at
https://github.com/rust-lang/compiler-builtins.
Additionally, some source files may contain comments with specific copyrights
or licenses.

View file

@ -0,0 +1,16 @@
# Publishing to crates.io
Publishing `compiler-builtins` to crates.io takes a few steps unfortunately.
It's not great, but it works for now. PRs to improve this process would be
greatly appreciated!
1. Make sure you've got a clean working tree and it's updated with the latest
changes on `master`
2. Edit `Cargo.toml` to bump the version number
3. Commit this change
4. Run `git tag` to create a tag for this version
5. Delete the `libm/Cargo.toml` file
6. Run `cargo +nightly publish`
7. Push the tag
8. Push the commit
9. Undo changes to `Cargo.toml` and the `libm` submodule

View file

@ -0,0 +1,27 @@
# `compiler-builtins` and `libm`
This repository contains two main crates:
* `compiler-builtins`: symbols that the compiler expects to be available at
link time
* `libm`: a Rust implementation of C math libraries, used to provide
implementations in `ocre`.
More details are at [compiler-builtins/README.md](compiler-builtins/README.md)
and [libm/README.md](libm/README.md).
For instructions on contributing, see [CONTRIBUTING.md](CONTRIBUTING.md).
## License
* `libm` may be used under the [MIT License]
* `compiler-builtins` may be used under the [MIT License] and the
[Apache License, Version 2.0] with the LLVM exception.
* All original contributions must be under all of: the MIT license, the
Apache-2.0 license, and the Apache-2.0 license with the LLVM exception.
More details are in [LICENSE.txt](LICENSE.txt) and
[libm/LICENSE.txt](libm/LICENSE.txt).
[MIT License]: https://opensource.org/license/mit
[Apache License, Version 2.0]: htps://www.apache.org/licenses/LICENSE-2.0

View file

@ -0,0 +1,19 @@
[package]
name = "builtins-test-intrinsics"
version = "0.1.0"
edition = "2021"
publish = false
license = "MIT OR Apache-2.0"
[dependencies]
compiler_builtins = { path = "../compiler-builtins", features = ["compiler-builtins"]}
panic-handler = { path = "../crates/panic-handler" }
[features]
c = ["compiler_builtins/c"]
[profile.release]
panic = "abort"
[profile.dev]
panic = "abort"

View file

@ -0,0 +1,11 @@
mod builtins_configure {
include!("../compiler-builtins/configure.rs");
}
fn main() {
println!("cargo::rerun-if-changed=../configure.rs");
let target = builtins_configure::Target::from_env();
builtins_configure::configure_f16_f128(&target);
builtins_configure::configure_aliases(&target);
}

View file

@ -0,0 +1,697 @@
// By compiling this file we check that all the intrinsics we care about continue to be provided by
// the `compiler_builtins` crate regardless of the changes we make to it. If we, by mistake, stop
// compiling a C implementation and forget to implement that intrinsic in Rust, this file will fail
// to link due to the missing intrinsic (symbol).
#![allow(unused_features)]
#![allow(internal_features)]
#![deny(dead_code)]
#![feature(allocator_api)]
#![feature(f128)]
#![feature(f16)]
#![feature(lang_items)]
#![no_std]
#![no_main]
extern crate panic_handler;
#[cfg(all(not(thumb), not(windows), not(target_arch = "wasm32")))]
#[link(name = "c")]
extern "C" {}
// Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform
// doesn't have native support for the operation used in the function. ARM has a naming convention
// convention for its intrinsics that's different from other architectures; that's why some function
// have an additional comment: the function name is the ARM name for the intrinsic and the comment
// in the non-ARM name for the intrinsic.
mod intrinsics {
/* f16 operations */
#[cfg(f16_enabled)]
pub fn extendhfsf(x: f16) -> f32 {
x as f32
}
#[cfg(f16_enabled)]
pub fn extendhfdf(x: f16) -> f64 {
x as f64
}
#[cfg(all(
f16_enabled,
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn extendhftf(x: f16) -> f128 {
x as f128
}
/* f32 operations */
#[cfg(f16_enabled)]
pub fn truncsfhf(x: f32) -> f16 {
x as f16
}
// extendsfdf2
pub fn aeabi_f2d(x: f32) -> f64 {
x as f64
}
#[cfg(f128_enabled)]
pub fn extendsftf(x: f32) -> f128 {
x as f128
}
// fixsfsi
pub fn aeabi_f2iz(x: f32) -> i32 {
x as i32
}
// fixsfdi
pub fn aeabi_f2lz(x: f32) -> i64 {
x as i64
}
pub fn fixsfti(x: f32) -> i128 {
x as i128
}
// fixunssfsi
pub fn aeabi_f2uiz(x: f32) -> u32 {
x as u32
}
// fixunssfdi
pub fn aeabi_f2ulz(x: f32) -> u64 {
x as u64
}
pub fn fixunssfti(x: f32) -> u128 {
x as u128
}
// addsf3
pub fn aeabi_fadd(a: f32, b: f32) -> f32 {
a + b
}
// eqsf2
pub fn aeabi_fcmpeq(a: f32, b: f32) -> bool {
a == b
}
// gtsf2
pub fn aeabi_fcmpgt(a: f32, b: f32) -> bool {
a > b
}
// ltsf2
pub fn aeabi_fcmplt(a: f32, b: f32) -> bool {
a < b
}
// divsf3
pub fn aeabi_fdiv(a: f32, b: f32) -> f32 {
a / b
}
// mulsf3
pub fn aeabi_fmul(a: f32, b: f32) -> f32 {
a * b
}
// subsf3
pub fn aeabi_fsub(a: f32, b: f32) -> f32 {
a - b
}
/* f64 operations */
// truncdfsf2
pub fn aeabi_d2f(x: f64) -> f32 {
x as f32
}
// fixdfsi
pub fn aeabi_d2i(x: f64) -> i32 {
x as i32
}
// fixdfdi
pub fn aeabi_d2l(x: f64) -> i64 {
x as i64
}
pub fn fixdfti(x: f64) -> i128 {
x as i128
}
// fixunsdfsi
pub fn aeabi_d2uiz(x: f64) -> u32 {
x as u32
}
// fixunsdfdi
pub fn aeabi_d2ulz(x: f64) -> u64 {
x as u64
}
pub fn fixunsdfti(x: f64) -> u128 {
x as u128
}
// adddf3
pub fn aeabi_dadd(a: f64, b: f64) -> f64 {
a + b
}
// eqdf2
pub fn aeabi_dcmpeq(a: f64, b: f64) -> bool {
a == b
}
// gtdf2
pub fn aeabi_dcmpgt(a: f64, b: f64) -> bool {
a > b
}
// ltdf2
pub fn aeabi_dcmplt(a: f64, b: f64) -> bool {
a < b
}
// divdf3
pub fn aeabi_ddiv(a: f64, b: f64) -> f64 {
a / b
}
// muldf3
pub fn aeabi_dmul(a: f64, b: f64) -> f64 {
a * b
}
// subdf3
pub fn aeabi_dsub(a: f64, b: f64) -> f64 {
a - b
}
/* f128 operations */
#[cfg(all(
f16_enabled,
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn trunctfhf(x: f128) -> f16 {
x as f16
}
#[cfg(f128_enabled)]
pub fn trunctfsf(x: f128) -> f32 {
x as f32
}
#[cfg(f128_enabled)]
pub fn trunctfdf(x: f128) -> f64 {
x as f64
}
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn fixtfsi(x: f128) -> i32 {
x as i32
}
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn fixtfdi(x: f128) -> i64 {
x as i64
}
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn fixtfti(x: f128) -> i128 {
x as i128
}
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn fixunstfsi(x: f128) -> u32 {
x as u32
}
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn fixunstfdi(x: f128) -> u64 {
x as u64
}
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
pub fn fixunstfti(x: f128) -> u128 {
x as u128
}
#[cfg(f128_enabled)]
pub fn addtf(a: f128, b: f128) -> f128 {
a + b
}
#[cfg(f128_enabled)]
pub fn eqtf(a: f128, b: f128) -> bool {
a == b
}
#[cfg(f128_enabled)]
pub fn gttf(a: f128, b: f128) -> bool {
a > b
}
#[cfg(f128_enabled)]
pub fn lttf(a: f128, b: f128) -> bool {
a < b
}
#[cfg(f128_enabled)]
pub fn multf(a: f128, b: f128) -> f128 {
a * b
}
#[cfg(f128_enabled)]
pub fn divtf(a: f128, b: f128) -> f128 {
a / b
}
#[cfg(f128_enabled)]
pub fn subtf(a: f128, b: f128) -> f128 {
a - b
}
/* i32 operations */
// floatsisf
pub fn aeabi_i2f(x: i32) -> f32 {
x as f32
}
// floatsidf
pub fn aeabi_i2d(x: i32) -> f64 {
x as f64
}
#[cfg(f128_enabled)]
pub fn floatsitf(x: i32) -> f128 {
x as f128
}
pub fn aeabi_idiv(a: i32, b: i32) -> i32 {
a.wrapping_div(b)
}
pub fn aeabi_idivmod(a: i32, b: i32) -> i32 {
a % b
}
/* i64 operations */
// floatdisf
pub fn aeabi_l2f(x: i64) -> f32 {
x as f32
}
// floatdidf
pub fn aeabi_l2d(x: i64) -> f64 {
x as f64
}
#[cfg(f128_enabled)]
pub fn floatditf(x: i64) -> f128 {
x as f128
}
pub fn mulodi4(a: i64, b: i64) -> i64 {
a * b
}
// divdi3
pub fn aeabi_ldivmod(a: i64, b: i64) -> i64 {
a / b
}
pub fn moddi3(a: i64, b: i64) -> i64 {
a % b
}
// muldi3
pub fn aeabi_lmul(a: i64, b: i64) -> i64 {
a.wrapping_mul(b)
}
/* i128 operations */
pub fn floattisf(x: i128) -> f32 {
x as f32
}
pub fn floattidf(x: i128) -> f64 {
x as f64
}
#[cfg(f128_enabled)]
pub fn floattitf(x: i128) -> f128 {
x as f128
}
pub fn lshrti3(a: i128, b: usize) -> i128 {
a >> b
}
pub fn divti3(a: i128, b: i128) -> i128 {
a / b
}
pub fn modti3(a: i128, b: i128) -> i128 {
a % b
}
/* u32 operations */
// floatunsisf
pub fn aeabi_ui2f(x: u32) -> f32 {
x as f32
}
// floatunsidf
pub fn aeabi_ui2d(x: u32) -> f64 {
x as f64
}
#[cfg(f128_enabled)]
pub fn floatunsitf(x: u32) -> f128 {
x as f128
}
pub fn aeabi_uidiv(a: u32, b: u32) -> u32 {
a / b
}
pub fn aeabi_uidivmod(a: u32, b: u32) -> u32 {
a % b
}
/* u64 operations */
// floatundisf
pub fn aeabi_ul2f(x: u64) -> f32 {
x as f32
}
// floatundidf
pub fn aeabi_ul2d(x: u64) -> f64 {
x as f64
}
#[cfg(f128_enabled)]
pub fn floatunditf(x: u64) -> f128 {
x as f128
}
// udivdi3
pub fn aeabi_uldivmod(a: u64, b: u64) -> u64 {
a * b
}
pub fn umoddi3(a: u64, b: u64) -> u64 {
a % b
}
/* u128 operations */
pub fn floatuntisf(x: u128) -> f32 {
x as f32
}
pub fn floatuntidf(x: u128) -> f64 {
x as f64
}
#[cfg(f128_enabled)]
pub fn floatuntitf(x: u128) -> f128 {
x as f128
}
pub fn muloti4(a: u128, b: u128) -> Option<u128> {
a.checked_mul(b)
}
pub fn multi3(a: u128, b: u128) -> u128 {
a.wrapping_mul(b)
}
pub fn ashlti3(a: u128, b: usize) -> u128 {
a >> b
}
pub fn ashrti3(a: u128, b: usize) -> u128 {
a << b
}
pub fn udivti3(a: u128, b: u128) -> u128 {
a / b
}
pub fn umodti3(a: u128, b: u128) -> u128 {
a % b
}
}
fn run() {
use core::hint::black_box as bb;
use intrinsics::*;
// FIXME(f16_f128): some PPC f128 <-> int conversion functions have the wrong names
#[cfg(f128_enabled)]
bb(addtf(bb(2.), bb(2.)));
bb(aeabi_d2f(bb(2.)));
bb(aeabi_d2i(bb(2.)));
bb(aeabi_d2l(bb(2.)));
bb(aeabi_d2uiz(bb(2.)));
bb(aeabi_d2ulz(bb(2.)));
bb(aeabi_dadd(bb(2.), bb(3.)));
bb(aeabi_dcmpeq(bb(2.), bb(3.)));
bb(aeabi_dcmpgt(bb(2.), bb(3.)));
bb(aeabi_dcmplt(bb(2.), bb(3.)));
bb(aeabi_ddiv(bb(2.), bb(3.)));
bb(aeabi_dmul(bb(2.), bb(3.)));
bb(aeabi_dsub(bb(2.), bb(3.)));
bb(aeabi_f2d(bb(2.)));
bb(aeabi_f2iz(bb(2.)));
bb(aeabi_f2lz(bb(2.)));
bb(aeabi_f2uiz(bb(2.)));
bb(aeabi_f2ulz(bb(2.)));
bb(aeabi_fadd(bb(2.), bb(3.)));
bb(aeabi_fcmpeq(bb(2.), bb(3.)));
bb(aeabi_fcmpgt(bb(2.), bb(3.)));
bb(aeabi_fcmplt(bb(2.), bb(3.)));
bb(aeabi_fdiv(bb(2.), bb(3.)));
bb(aeabi_fmul(bb(2.), bb(3.)));
bb(aeabi_fsub(bb(2.), bb(3.)));
bb(aeabi_i2d(bb(2)));
bb(aeabi_i2f(bb(2)));
bb(aeabi_idiv(bb(2), bb(3)));
bb(aeabi_idivmod(bb(2), bb(3)));
bb(aeabi_l2d(bb(2)));
bb(aeabi_l2f(bb(2)));
bb(aeabi_ldivmod(bb(2), bb(3)));
bb(aeabi_lmul(bb(2), bb(3)));
bb(aeabi_ui2d(bb(2)));
bb(aeabi_ui2f(bb(2)));
bb(aeabi_uidiv(bb(2), bb(3)));
bb(aeabi_uidivmod(bb(2), bb(3)));
bb(aeabi_ul2d(bb(2)));
bb(aeabi_ul2f(bb(2)));
bb(aeabi_uldivmod(bb(2), bb(3)));
bb(ashlti3(bb(2), bb(2)));
bb(ashrti3(bb(2), bb(2)));
#[cfg(f128_enabled)]
bb(divtf(bb(2.), bb(2.)));
bb(divti3(bb(2), bb(2)));
#[cfg(f128_enabled)]
bb(eqtf(bb(2.), bb(2.)));
#[cfg(f16_enabled)]
bb(extendhfdf(bb(2.)));
#[cfg(f16_enabled)]
bb(extendhfsf(bb(2.)));
#[cfg(all(
f16_enabled,
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(extendhftf(bb(2.)));
#[cfg(f128_enabled)]
bb(extendsftf(bb(2.)));
bb(fixdfti(bb(2.)));
bb(fixsfti(bb(2.)));
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(fixtfdi(bb(2.)));
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(fixtfsi(bb(2.)));
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(fixtfti(bb(2.)));
bb(fixunsdfti(bb(2.)));
bb(fixunssfti(bb(2.)));
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(fixunstfdi(bb(2.)));
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(fixunstfsi(bb(2.)));
#[cfg(all(
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(fixunstfti(bb(2.)));
#[cfg(f128_enabled)]
bb(floatditf(bb(2)));
#[cfg(f128_enabled)]
bb(floatsitf(bb(2)));
bb(floattidf(bb(2)));
bb(floattisf(bb(2)));
#[cfg(f128_enabled)]
bb(floattitf(bb(2)));
#[cfg(f128_enabled)]
bb(floatunditf(bb(2)));
#[cfg(f128_enabled)]
bb(floatunsitf(bb(2)));
bb(floatuntidf(bb(2)));
bb(floatuntisf(bb(2)));
#[cfg(f128_enabled)]
bb(floatuntitf(bb(2)));
#[cfg(f128_enabled)]
bb(gttf(bb(2.), bb(2.)));
bb(lshrti3(bb(2), bb(2)));
#[cfg(f128_enabled)]
bb(lttf(bb(2.), bb(2.)));
bb(moddi3(bb(2), bb(3)));
bb(modti3(bb(2), bb(2)));
bb(mulodi4(bb(2), bb(3)));
bb(muloti4(bb(2), bb(2)));
#[cfg(f128_enabled)]
bb(multf(bb(2.), bb(2.)));
bb(multi3(bb(2), bb(2)));
#[cfg(f128_enabled)]
bb(subtf(bb(2.), bb(2.)));
#[cfg(f16_enabled)]
bb(truncsfhf(bb(2.)));
#[cfg(f128_enabled)]
bb(trunctfdf(bb(2.)));
#[cfg(all(
f16_enabled,
f128_enabled,
not(any(target_arch = "powerpc", target_arch = "powerpc64"))
))]
bb(trunctfhf(bb(2.)));
#[cfg(f128_enabled)]
bb(trunctfsf(bb(2.)));
bb(udivti3(bb(2), bb(2)));
bb(umoddi3(bb(2), bb(3)));
bb(umodti3(bb(2), bb(2)));
something_with_a_dtor(&|| assert_eq!(bb(1), 1));
// FIXME(#802): This should be re-enabled once a workaround is found.
// extern "C" {
// fn rust_begin_unwind(x: usize);
// }
// unsafe {
// rust_begin_unwind(0);
// }
}
fn something_with_a_dtor(f: &dyn Fn()) {
struct A<'a>(&'a (dyn Fn() + 'a));
impl Drop for A<'_> {
fn drop(&mut self) {
(self.0)();
}
}
let _a = A(f);
f();
}
#[unsafe(no_mangle)]
#[cfg(not(thumb))]
fn main(_argc: core::ffi::c_int, _argv: *const *const u8) -> core::ffi::c_int {
run();
0
}
#[unsafe(no_mangle)]
#[cfg(thumb)]
pub fn _start() -> ! {
run();
loop {}
}
#[cfg(windows)]
#[link(name = "kernel32")]
#[link(name = "msvcrt")]
extern "C" {}
// ARM targets need these symbols
#[unsafe(no_mangle)]
pub fn __aeabi_unwind_cpp_pr0() {}
#[unsafe(no_mangle)]
pub fn __aeabi_unwind_cpp_pr1() {}
#[cfg(not(any(windows, target_os = "cygwin")))]
#[allow(non_snake_case)]
#[unsafe(no_mangle)]
pub fn _Unwind_Resume() {}
#[cfg(not(any(windows, target_os = "cygwin")))]
#[lang = "eh_personality"]
pub extern "C" fn eh_personality() {}
#[cfg(any(all(windows, target_env = "gnu"), target_os = "cygwin"))]
mod mingw_unwinding {
#[unsafe(no_mangle)]
pub fn rust_eh_personality() {}
#[unsafe(no_mangle)]
pub fn rust_eh_unwind_resume() {}
#[unsafe(no_mangle)]
pub fn rust_eh_register_frames() {}
#[unsafe(no_mangle)]
pub fn rust_eh_unregister_frames() {}
}

View file

@ -0,0 +1,99 @@
[package]
name = "builtins-test"
version = "0.1.0"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
edition = "2024"
publish = false
license = "MIT AND Apache-2.0 WITH LLVM-exception AND (MIT OR Apache-2.0)"
[dependencies]
# For fuzzing tests we want a deterministic seedable RNG. We also eliminate potential
# problems with system RNGs on the variety of platforms this crate is tested on.
# `xoshiro128**` is used for its quality, size, and speed at generating `u32` shift amounts.
rand_xoshiro = "0.6"
# To compare float builtins against
rustc_apfloat = "0.2.1"
# Really a dev dependency, but dev dependencies can't be optional
iai-callgrind = { version = "0.14.0", optional = true }
[dependencies.compiler_builtins]
path = "../compiler-builtins"
default-features = false
features = ["unstable-public-internals"]
[dev-dependencies]
criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] }
paste = "1.0.15"
[target.'cfg(all(target_arch = "arm", not(any(target_env = "gnu", target_env = "musl")), target_os = "linux"))'.dev-dependencies]
test = { git = "https://github.com/japaric/utest" }
utest-cortex-m-qemu = { default-features = false, git = "https://github.com/japaric/utest" }
utest-macros = { git = "https://github.com/japaric/utest" }
[features]
default = ["mangled-names"]
c = ["compiler_builtins/c"]
no-asm = ["compiler_builtins/no-asm"]
no-f16-f128 = ["compiler_builtins/no-f16-f128"]
mem = ["compiler_builtins/mem"]
mangled-names = ["compiler_builtins/mangled-names"]
# Skip tests that rely on f128 symbols being available on the system
no-sys-f128 = ["no-sys-f128-int-convert", "no-sys-f16-f128-convert"]
# Some platforms have some f128 functions but everything except integer conversions
no-sys-f128-int-convert = []
no-sys-f16-f128-convert = []
no-sys-f16-f64-convert = []
# Skip tests that rely on f16 symbols being available on the system
no-sys-f16 = ["no-sys-f16-f64-convert"]
# Enable icount benchmarks (requires iai-callgrind and valgrind)
icount = ["dep:iai-callgrind"]
# Enable report generation without bringing in more dependencies by default
benchmarking-reports = ["criterion/plotters", "criterion/html_reports"]
# NOTE: benchmarks must be run with `--no-default-features` or with
# `-p builtins-test`, otherwise the default `compiler-builtins` feature
# of the `compiler_builtins` crate gets activated, resulting in linker
# errors.
[[bench]]
name = "float_add"
harness = false
[[bench]]
name = "float_sub"
harness = false
[[bench]]
name = "float_mul"
harness = false
[[bench]]
name = "float_div"
harness = false
[[bench]]
name = "float_cmp"
harness = false
[[bench]]
name = "float_conv"
harness = false
[[bench]]
name = "float_extend"
harness = false
[[bench]]
name = "float_trunc"
harness = false
[[bench]]
name = "float_pow"
harness = false
[[bench]]
name = "mem_icount"
harness = false
required-features = ["icount"]

View file

@ -0,0 +1,93 @@
#![cfg_attr(f128_enabled, feature(f128))]
use builtins_test::float_bench;
use compiler_builtins::float::add;
use criterion::{Criterion, criterion_main};
float_bench! {
name: add_f32,
sig: (a: f32, b: f32) -> f32,
crate_fn: add::__addsf3,
sys_fn: __addsf3,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
asm!(
"addss {a}, {b}",
a = inout(xmm_reg) a,
b = in(xmm_reg) b,
options(nomem, nostack, pure)
);
a
};
#[cfg(target_arch = "aarch64")] {
asm!(
"fadd {a:s}, {a:s}, {b:s}",
a = inout(vreg) a,
b = in(vreg) b,
options(nomem, nostack, pure)
);
a
};
],
}
float_bench! {
name: add_f64,
sig: (a: f64, b: f64) -> f64,
crate_fn: add::__adddf3,
sys_fn: __adddf3,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
asm!(
"addsd {a}, {b}",
a = inout(xmm_reg) a,
b = in(xmm_reg) b,
options(nomem, nostack, pure)
);
a
};
#[cfg(target_arch = "aarch64")] {
asm!(
"fadd {a:d}, {a:d}, {b:d}",
a = inout(vreg) a,
b = in(vreg) b,
options(nomem, nostack, pure)
);
a
};
],
}
#[cfg(f128_enabled)]
float_bench! {
name: add_f128,
sig: (a: f128, b: f128) -> f128,
crate_fn: add::__addtf3,
crate_fn_ppc: add::__addkf3,
sys_fn: __addtf3,
sys_fn_ppc: __addkf3,
sys_available: not(feature = "no-sys-f128"),
asm: []
}
pub fn float_add() {
let mut criterion = Criterion::default().configure_from_args();
add_f32(&mut criterion);
add_f64(&mut criterion);
#[cfg(f128_enabled)]
{
add_f128(&mut criterion);
}
}
criterion_main!(float_add);

View file

@ -0,0 +1,207 @@
#![cfg_attr(f128_enabled, feature(f128))]
use builtins_test::float_bench;
use compiler_builtins::float::cmp;
use criterion::{Criterion, criterion_main};
/// `gt` symbols are allowed to return differing results, they just get compared
/// to 0.
fn gt_res_eq(a: i32, b: i32) -> bool {
let a_lt_0 = a <= 0;
let b_lt_0 = b <= 0;
(a_lt_0 && b_lt_0) || (!a_lt_0 && !b_lt_0)
}
float_bench! {
name: cmp_f32_gt,
sig: (a: f32, b: f32) -> i32,
crate_fn: cmp::__gtsf2,
sys_fn: __gtsf2,
sys_available: all(),
output_eq: gt_res_eq,
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
asm!(
"xor {ret:e}, {ret:e}",
"ucomiss {a}, {b}",
"seta {ret:l}",
a = in(xmm_reg) a,
b = in(xmm_reg) b,
ret = out(reg) ret,
options(nomem, nostack, pure)
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
asm!(
"fcmp {a:s}, {b:s}",
"cset {ret:w}, gt",
a = in(vreg) a,
b = in(vreg) b,
ret = out(reg) ret,
options(nomem,nostack),
);
ret
};
],
}
float_bench! {
name: cmp_f32_unord,
sig: (a: f32, b: f32) -> i32,
crate_fn: cmp::__unordsf2,
sys_fn: __unordsf2,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
asm!(
"xor {ret:e}, {ret:e}",
"ucomiss {a}, {b}",
"setp {ret:l}",
a = in(xmm_reg) a,
b = in(xmm_reg) b,
ret = out(reg) ret,
options(nomem, nostack, pure)
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
asm!(
"fcmp {a:s}, {b:s}",
"cset {ret:w}, vs",
a = in(vreg) a,
b = in(vreg) b,
ret = out(reg) ret,
options(nomem, nostack, pure)
);
ret
};
],
}
float_bench! {
name: cmp_f64_gt,
sig: (a: f64, b: f64) -> i32,
crate_fn: cmp::__gtdf2,
sys_fn: __gtdf2,
sys_available: all(),
output_eq: gt_res_eq,
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
asm!(
"xor {ret:e}, {ret:e}",
"ucomisd {a}, {b}",
"seta {ret:l}",
a = in(xmm_reg) a,
b = in(xmm_reg) b,
ret = out(reg) ret,
options(nomem, nostack, pure)
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
asm!(
"fcmp {a:d}, {b:d}",
"cset {ret:w}, gt",
a = in(vreg) a,
b = in(vreg) b,
ret = out(reg) ret,
options(nomem, nostack, pure)
);
ret
};
],
}
float_bench! {
name: cmp_f64_unord,
sig: (a: f64, b: f64) -> i32,
crate_fn: cmp::__unorddf2,
sys_fn: __unorddf2,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
asm!(
"xor {ret:e}, {ret:e}",
"ucomisd {a}, {b}",
"setp {ret:l}",
a = in(xmm_reg) a,
b = in(xmm_reg) b,
ret = out(reg) ret,
options(nomem, nostack, pure)
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
asm!(
"fcmp {a:d}, {b:d}",
"cset {ret:w}, vs",
a = in(vreg) a,
b = in(vreg) b,
ret = out(reg) ret,
options(nomem, nostack, pure)
);
ret
};
],
}
float_bench! {
name: cmp_f128_gt,
sig: (a: f128, b: f128) -> i32,
crate_fn: cmp::__gttf2,
crate_fn_ppc: cmp::__gtkf2,
sys_fn: __gttf2,
sys_fn_ppc: __gtkf2,
sys_available: not(feature = "no-sys-f128"),
output_eq: gt_res_eq,
asm: []
}
float_bench! {
name: cmp_f128_unord,
sig: (a: f128, b: f128) -> i32,
crate_fn: cmp::__unordtf2,
crate_fn_ppc: cmp::__unordkf2,
sys_fn: __unordtf2,
sys_fn_ppc: __unordkf2,
sys_available: not(feature = "no-sys-f128"),
asm: []
}
pub fn float_cmp() {
let mut criterion = Criterion::default().configure_from_args();
cmp_f32_gt(&mut criterion);
cmp_f32_unord(&mut criterion);
cmp_f64_gt(&mut criterion);
cmp_f64_unord(&mut criterion);
#[cfg(f128_enabled)]
{
cmp_f128_gt(&mut criterion);
cmp_f128_unord(&mut criterion);
}
}
criterion_main!(float_cmp);

View file

@ -0,0 +1,688 @@
#![allow(improper_ctypes)]
#![cfg_attr(f128_enabled, feature(f128))]
use builtins_test::float_bench;
use compiler_builtins::float::conv;
use criterion::{Criterion, criterion_main};
/* unsigned int -> float */
float_bench! {
name: conv_u32_f32,
sig: (a: u32) -> f32,
crate_fn: conv::__floatunsisf,
sys_fn: __floatunsisf,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: f32;
asm!(
"mov {tmp:e}, {a:e}",
"cvtsi2ss {ret}, {tmp}",
a = in(reg) a,
tmp = out(reg) _,
ret = lateout(xmm_reg) ret,
options(nomem, nostack, pure),
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: f32;
asm!(
"ucvtf {ret:s}, {a:w}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_u32_f64,
sig: (a: u32) -> f64,
crate_fn: conv::__floatunsidf,
sys_fn: __floatunsidf,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: f64;
asm!(
"mov {tmp:e}, {a:e}",
"cvtsi2sd {ret}, {tmp}",
a = in(reg) a,
tmp = out(reg) _,
ret = lateout(xmm_reg) ret,
options(nomem, nostack, pure),
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: f64;
asm!(
"ucvtf {ret:d}, {a:w}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_u32_f128,
sig: (a: u32) -> f128,
crate_fn: conv::__floatunsitf,
crate_fn_ppc: conv::__floatunsikf,
sys_fn: __floatunsitf,
sys_fn_ppc: __floatunsikf,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
float_bench! {
name: conv_u64_f32,
sig: (a: u64) -> f32,
crate_fn: conv::__floatundisf,
sys_fn: __floatundisf,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: f32;
asm!(
"ucvtf {ret:s}, {a:x}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_u64_f64,
sig: (a: u64) -> f64,
crate_fn: conv::__floatundidf,
sys_fn: __floatundidf,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: f64;
asm!(
"ucvtf {ret:d}, {a:x}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_u64_f128,
sig: (a: u64) -> f128,
crate_fn: conv::__floatunditf,
crate_fn_ppc: conv::__floatundikf,
sys_fn: __floatunditf,
sys_fn_ppc: __floatundikf,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
float_bench! {
name: conv_u128_f32,
sig: (a: u128) -> f32,
crate_fn: conv::__floatuntisf,
sys_fn: __floatuntisf,
sys_available: all(),
asm: []
}
float_bench! {
name: conv_u128_f64,
sig: (a: u128) -> f64,
crate_fn: conv::__floatuntidf,
sys_fn: __floatuntidf,
sys_available: all(),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_u128_f128,
sig: (a: u128) -> f128,
crate_fn: conv::__floatuntitf,
crate_fn_ppc: conv::__floatuntikf,
sys_fn: __floatuntitf,
sys_fn_ppc: __floatuntikf,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
/* signed int -> float */
float_bench! {
name: conv_i32_f32,
sig: (a: i32) -> f32,
crate_fn: conv::__floatsisf,
sys_fn: __floatsisf,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: f32;
asm!(
"cvtsi2ss {ret}, {a:e}",
a = in(reg) a,
ret = lateout(xmm_reg) ret,
options(nomem, nostack, pure),
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: f32;
asm!(
"scvtf {ret:s}, {a:w}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_i32_f64,
sig: (a: i32) -> f64,
crate_fn: conv::__floatsidf,
sys_fn: __floatsidf,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: f64;
asm!(
"cvtsi2sd {ret}, {a:e}",
a = in(reg) a,
ret = lateout(xmm_reg) ret,
options(nomem, nostack, pure),
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: f64;
asm!(
"scvtf {ret:d}, {a:w}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_i32_f128,
sig: (a: i32) -> f128,
crate_fn: conv::__floatsitf,
crate_fn_ppc: conv::__floatsikf,
sys_fn: __floatsitf,
sys_fn_ppc: __floatsikf,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
float_bench! {
name: conv_i64_f32,
sig: (a: i64) -> f32,
crate_fn: conv::__floatdisf,
sys_fn: __floatdisf,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: f32;
asm!(
"cvtsi2ss {ret}, {a:r}",
a = in(reg) a,
ret = lateout(xmm_reg) ret,
options(nomem, nostack, pure),
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: f32;
asm!(
"scvtf {ret:s}, {a:x}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_i64_f64,
sig: (a: i64) -> f64,
crate_fn: conv::__floatdidf,
sys_fn: __floatdidf,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: f64;
asm!(
"cvtsi2sd {ret}, {a:r}",
a = in(reg) a,
ret = lateout(xmm_reg) ret,
options(nomem, nostack, pure),
);
ret
};
#[cfg(target_arch = "aarch64")] {
let ret: f64;
asm!(
"scvtf {ret:d}, {a:x}",
a = in(reg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_i64_f128,
sig: (a: i64) -> f128,
crate_fn: conv::__floatditf,
crate_fn_ppc: conv::__floatdikf,
sys_fn: __floatditf,
sys_fn_ppc: __floatdikf,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
float_bench! {
name: conv_i128_f32,
sig: (a: i128) -> f32,
crate_fn: conv::__floattisf,
sys_fn: __floattisf,
sys_available: all(),
asm: []
}
float_bench! {
name: conv_i128_f64,
sig: (a: i128) -> f64,
crate_fn: conv::__floattidf,
sys_fn: __floattidf,
sys_available: all(),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_i128_f128,
sig: (a: i128) -> f128,
crate_fn: conv::__floattitf,
crate_fn_ppc: conv::__floattikf,
sys_fn: __floattitf,
sys_fn_ppc: __floattikf,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
/* float -> unsigned int */
#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
float_bench! {
name: conv_f32_u32,
sig: (a: f32) -> u32,
crate_fn: conv::__fixunssfsi,
sys_fn: __fixunssfsi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: u32;
asm!(
"fcvtzu {ret:w}, {a:s}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
float_bench! {
name: conv_f32_u64,
sig: (a: f32) -> u64,
crate_fn: conv::__fixunssfdi,
sys_fn: __fixunssfdi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: u64;
asm!(
"fcvtzu {ret:x}, {a:s}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
float_bench! {
name: conv_f32_u128,
sig: (a: f32) -> u128,
crate_fn: conv::__fixunssfti,
sys_fn: __fixunssfti,
sys_available: all(),
asm: []
}
float_bench! {
name: conv_f64_u32,
sig: (a: f64) -> u32,
crate_fn: conv::__fixunsdfsi,
sys_fn: __fixunsdfsi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: u32;
asm!(
"fcvtzu {ret:w}, {a:d}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_f64_u64,
sig: (a: f64) -> u64,
crate_fn: conv::__fixunsdfdi,
sys_fn: __fixunsdfdi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: u64;
asm!(
"fcvtzu {ret:x}, {a:d}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_f64_u128,
sig: (a: f64) -> u128,
crate_fn: conv::__fixunsdfti,
sys_fn: __fixunsdfti,
sys_available: all(),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_f128_u32,
sig: (a: f128) -> u32,
crate_fn: conv::__fixunstfsi,
crate_fn_ppc: conv::__fixunskfsi,
sys_fn: __fixunstfsi,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_f128_u64,
sig: (a: f128) -> u64,
crate_fn: conv::__fixunstfdi,
crate_fn_ppc: conv::__fixunskfdi,
sys_fn: __fixunstfdi,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_f128_u128,
sig: (a: f128) -> u128,
crate_fn: conv::__fixunstfti,
crate_fn_ppc: conv::__fixunskfti,
sys_fn: __fixunstfti,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
/* float -> signed int */
#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
float_bench! {
name: conv_f32_i32,
sig: (a: f32) -> i32,
crate_fn: conv::__fixsfsi,
sys_fn: __fixsfsi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: i32;
asm!(
"fcvtzs {ret:w}, {a:s}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
float_bench! {
name: conv_f32_i64,
sig: (a: f32) -> i64,
crate_fn: conv::__fixsfdi,
sys_fn: __fixsfdi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: i64;
asm!(
"fcvtzs {ret:x}, {a:s}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
float_bench! {
name: conv_f32_i128,
sig: (a: f32) -> i128,
crate_fn: conv::__fixsfti,
sys_fn: __fixsfti,
sys_available: all(),
asm: []
}
float_bench! {
name: conv_f64_i32,
sig: (a: f64) -> i32,
crate_fn: conv::__fixdfsi,
sys_fn: __fixdfsi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: i32;
asm!(
"fcvtzs {ret:w}, {a:d}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_f64_i64,
sig: (a: f64) -> i64,
crate_fn: conv::__fixdfdi,
sys_fn: __fixdfdi,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: i64;
asm!(
"fcvtzs {ret:x}, {a:d}",
a = in(vreg) a,
ret = lateout(reg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
float_bench! {
name: conv_f64_i128,
sig: (a: f64) -> i128,
crate_fn: conv::__fixdfti,
sys_fn: __fixdfti,
sys_available: all(),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_f128_i32,
sig: (a: f128) -> i32,
crate_fn: conv::__fixtfsi,
crate_fn_ppc: conv::__fixkfsi,
sys_fn: __fixtfsi,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_f128_i64,
sig: (a: f128) -> i64,
crate_fn: conv::__fixtfdi,
crate_fn_ppc: conv::__fixkfdi,
sys_fn: __fixtfdi,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
#[cfg(f128_enabled)]
float_bench! {
name: conv_f128_i128,
sig: (a: f128) -> i128,
crate_fn: conv::__fixtfti,
crate_fn_ppc: conv::__fixkfti,
sys_fn: __fixtfti,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: []
}
pub fn float_conv() {
let mut criterion = Criterion::default().configure_from_args();
conv_u32_f32(&mut criterion);
conv_u32_f64(&mut criterion);
conv_u64_f32(&mut criterion);
conv_u64_f64(&mut criterion);
conv_u128_f32(&mut criterion);
conv_u128_f64(&mut criterion);
conv_i32_f32(&mut criterion);
conv_i32_f64(&mut criterion);
conv_i64_f32(&mut criterion);
conv_i64_f64(&mut criterion);
conv_i128_f32(&mut criterion);
conv_i128_f64(&mut criterion);
conv_f64_u32(&mut criterion);
conv_f64_u64(&mut criterion);
conv_f64_u128(&mut criterion);
conv_f64_i32(&mut criterion);
conv_f64_i64(&mut criterion);
conv_f64_i128(&mut criterion);
#[cfg(f128_enabled)]
// FIXME: ppc64le has a sporadic overflow panic in the crate functions
// <https://github.com/rust-lang/compiler-builtins/issues/617#issuecomment-2125914639>
#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
{
conv_u32_f128(&mut criterion);
conv_u64_f128(&mut criterion);
conv_u128_f128(&mut criterion);
conv_i32_f128(&mut criterion);
conv_i64_f128(&mut criterion);
conv_i128_f128(&mut criterion);
conv_f128_u32(&mut criterion);
conv_f128_u64(&mut criterion);
conv_f128_u128(&mut criterion);
conv_f128_i32(&mut criterion);
conv_f128_i64(&mut criterion);
conv_f128_i128(&mut criterion);
}
}
criterion_main!(float_conv);

View file

@ -0,0 +1,93 @@
#![cfg_attr(f128_enabled, feature(f128))]
use builtins_test::float_bench;
use compiler_builtins::float::div;
use criterion::{Criterion, criterion_main};
float_bench! {
name: div_f32,
sig: (a: f32, b: f32) -> f32,
crate_fn: div::__divsf3,
sys_fn: __divsf3,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
asm!(
"divss {a}, {b}",
a = inout(xmm_reg) a,
b = in(xmm_reg) b,
options(nomem, nostack, pure)
);
a
};
#[cfg(target_arch = "aarch64")] {
asm!(
"fdiv {a:s}, {a:s}, {b:s}",
a = inout(vreg) a,
b = in(vreg) b,
options(nomem, nostack, pure)
);
a
};
],
}
float_bench! {
name: div_f64,
sig: (a: f64, b: f64) -> f64,
crate_fn: div::__divdf3,
sys_fn: __divdf3,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
asm!(
"divsd {a}, {b}",
a = inout(xmm_reg) a,
b = in(xmm_reg) b,
options(nomem, nostack, pure)
);
a
};
#[cfg(target_arch = "aarch64")] {
asm!(
"fdiv {a:d}, {a:d}, {b:d}",
a = inout(vreg) a,
b = in(vreg) b,
options(nomem, nostack, pure)
);
a
};
],
}
#[cfg(f128_enabled)]
float_bench! {
name: div_f128,
sig: (a: f128, b: f128) -> f128,
crate_fn: div::__divtf3,
crate_fn_ppc: div::__divkf3,
sys_fn: __divtf3,
sys_fn_ppc: __divkf3,
sys_available: not(feature = "no-sys-f128"),
asm: []
}
pub fn float_div() {
let mut criterion = Criterion::default().configure_from_args();
div_f32(&mut criterion);
div_f64(&mut criterion);
#[cfg(f128_enabled)]
{
div_f128(&mut criterion);
}
}
criterion_main!(float_div);

View file

@ -0,0 +1,133 @@
#![allow(unused_variables)] // "unused" f16 registers
#![cfg_attr(f128_enabled, feature(f128))]
#![cfg_attr(f16_enabled, feature(f16))]
use builtins_test::float_bench;
use compiler_builtins::float::extend;
use criterion::{Criterion, criterion_main};
#[cfg(f16_enabled)]
float_bench! {
name: extend_f16_f32,
sig: (a: f16) -> f32,
crate_fn: extend::__extendhfsf2,
sys_fn: __extendhfsf2,
sys_available: not(feature = "no-sys-f16"),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: f32;
asm!(
"fcvt {ret:s}, {a:h}",
a = in(vreg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(f16_enabled)]
float_bench! {
name: extend_f16_f64,
sig: (a: f16) -> f64,
crate_fn: extend::__extendhfdf2,
sys_fn: __extendhfdf2,
sys_available: not(feature = "no-sys-f16-f64-convert"),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: f64;
asm!(
"fcvt {ret:d}, {a:h}",
a = in(vreg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(all(f16_enabled, f128_enabled))]
float_bench! {
name: extend_f16_f128,
sig: (a: f16) -> f128,
crate_fn: extend::__extendhftf2,
crate_fn_ppc: extend::__extendhfkf2,
sys_fn: __extendhftf2,
sys_fn_ppc: __extendhfkf2,
sys_available: not(feature = "no-sys-f16-f128-convert"),
asm: [],
}
float_bench! {
name: extend_f32_f64,
sig: (a: f32) -> f64,
crate_fn: extend::__extendsfdf2,
sys_fn: __extendsfdf2,
sys_available: all(),
asm: [
#[cfg(target_arch = "aarch64")] {
let ret: f64;
asm!(
"fcvt {ret:d}, {a:s}",
a = in(vreg) a,
ret = lateout(vreg) ret,
options(nomem, nostack, pure),
);
ret
};
],
}
#[cfg(f128_enabled)]
float_bench! {
name: extend_f32_f128,
sig: (a: f32) -> f128,
crate_fn: extend::__extendsftf2,
crate_fn_ppc: extend::__extendsfkf2,
sys_fn: __extendsftf2,
sys_fn_ppc: __extendsfkf2,
sys_available: not(feature = "no-sys-f128"),
asm: [],
}
#[cfg(f128_enabled)]
float_bench! {
name: extend_f64_f128,
sig: (a: f64) -> f128,
crate_fn: extend::__extenddftf2,
crate_fn_ppc: extend::__extenddfkf2,
sys_fn: __extenddftf2,
sys_fn_ppc: __extenddfkf2,
sys_available: not(feature = "no-sys-f128"),
asm: [],
}
pub fn float_extend() {
let mut criterion = Criterion::default().configure_from_args();
// FIXME(#655): `f16` tests disabled until we can bootstrap symbols
#[cfg(f16_enabled)]
#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
{
extend_f16_f32(&mut criterion);
extend_f16_f64(&mut criterion);
#[cfg(f128_enabled)]
extend_f16_f128(&mut criterion);
}
extend_f32_f64(&mut criterion);
#[cfg(f128_enabled)]
{
extend_f32_f128(&mut criterion);
extend_f64_f128(&mut criterion);
}
}
criterion_main!(float_extend);

Some files were not shown because too many files have changed in this diff Show more