Merge from rustc

This commit is contained in:
The Miri Cronjob Bot 2025-06-06 05:02:49 +00:00
commit c44bc10b67
291 changed files with 4610 additions and 2821 deletions

View file

@ -4673,6 +4673,7 @@ dependencies = [
"bincode",
"rustc-hash 2.1.1",
"serde",
"serde_derive",
"serde_json",
]

View file

@ -1406,7 +1406,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
};
let span = self.tcx.sess.source_map().start_point(t.span).shrink_to_hi();
let region = Lifetime { ident: Ident::new(kw::UnderscoreLifetime, span), id };
(region, LifetimeSyntax::Hidden)
(region, LifetimeSyntax::Implicit)
}
};
self.lower_lifetime(&region, LifetimeSource::Reference, syntax)
@ -1790,7 +1790,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
id,
Ident::new(kw::UnderscoreLifetime, span),
LifetimeSource::Path { angle_brackets },
LifetimeSyntax::Hidden,
LifetimeSyntax::Implicit,
)
}
@ -2422,7 +2422,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
Ident::new(kw::UnderscoreLifetime, self.lower_span(span)),
hir::LifetimeKind::ImplicitObjectLifetimeDefault,
LifetimeSource::Other,
LifetimeSyntax::Hidden,
LifetimeSyntax::Implicit,
);
debug!("elided_dyn_bound: r={:?}", r);
self.arena.alloc(r)

View file

@ -5,11 +5,9 @@ use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::{RegionVid, TyCtxt, VarianceDiagInfo};
use rustc_span::Span;
use tracing::{debug, instrument};
use tracing::debug;
use crate::region_infer::{AnnotatedSccs, ConstraintSccs, RegionDefinition, SccAnnotations};
use crate::type_check::Locations;
use crate::universal_regions::UniversalRegions;
pub(crate) mod graph;
@ -53,112 +51,6 @@ impl<'tcx> OutlivesConstraintSet<'tcx> {
) -> &IndexSlice<OutlivesConstraintIndex, OutlivesConstraint<'tcx>> {
&self.outlives
}
/// Computes cycles (SCCs) in the graph of regions. In particular,
/// find all regions R1, R2 such that R1: R2 and R2: R1 and group
/// them into an SCC, and find the relationships between SCCs.
pub(crate) fn compute_sccs(
&self,
static_region: RegionVid,
definitions: &IndexVec<RegionVid, RegionDefinition<'tcx>>,
) -> AnnotatedSccs {
let constraint_graph = self.graph(definitions.len());
let region_graph = &constraint_graph.region_graph(self, static_region);
let mut annotation_visitor = SccAnnotations::new(definitions);
(
ConstraintSccs::new_with_annotation(&region_graph, &mut annotation_visitor),
annotation_visitor.scc_to_annotation,
)
}
/// This method handles Universe errors by rewriting the constraint
/// graph. For each strongly connected component in the constraint
/// graph such that there is a series of constraints
/// A: B: C: ... : X where
/// A's universe is smaller than X's and A is a placeholder,
/// add a constraint that A: 'static. This is a safe upper bound
/// in the face of borrow checker/trait solver limitations that will
/// eventually go away.
///
/// For a more precise definition, see the documentation for
/// [`crate::region_infer::RegionTracker`].
///
/// This edge case used to be handled during constraint propagation
/// by iterating over the strongly connected components in the constraint
/// graph while maintaining a set of bookkeeping mappings similar
/// to what is stored in `RegionTracker` and manually adding 'static as
/// needed.
///
/// It was rewritten as part of the Polonius project with the goal of moving
/// higher-kindedness concerns out of the path of the borrow checker,
/// for two reasons:
///
/// 1. Implementing Polonius is difficult enough without also
/// handling them.
/// 2. The long-term goal is to handle higher-kinded concerns
/// in the trait solver, where they belong. This avoids
/// logic duplication and allows future trait solvers
/// to compute better bounds than for example our
/// "must outlive 'static" here.
///
/// This code is a stop-gap measure in preparation for the future trait solver.
///
/// Every constraint added by this method is an
/// internal `IllegalUniverse` constraint.
#[instrument(skip(self, universal_regions, definitions))]
pub(crate) fn add_outlives_static(
&mut self,
universal_regions: &UniversalRegions<'tcx>,
definitions: &IndexVec<RegionVid, RegionDefinition<'tcx>>,
) -> AnnotatedSccs {
let fr_static = universal_regions.fr_static;
let (sccs, annotations) = self.compute_sccs(fr_static, definitions);
// Changed to `true` if we added any constraints to `self` and need to
// recompute SCCs.
let mut added_constraints = false;
for scc in sccs.all_sccs() {
// No point in adding 'static: 'static!
// This micro-optimisation makes somewhat sense
// because static outlives *everything*.
if scc == sccs.scc(fr_static) {
continue;
}
let annotation = annotations[scc];
// If this SCC participates in a universe violation,
// e.g. if it reaches a region with a universe smaller than
// the largest region reached, add a requirement that it must
// outlive `'static`.
if annotation.has_incompatible_universes() {
// Optimisation opportunity: this will add more constraints than
// needed for correctness, since an SCC upstream of another with
// a universe violation will "infect" its downstream SCCs to also
// outlive static.
added_constraints = true;
let scc_representative_outlives_static = OutlivesConstraint {
sup: annotation.representative,
sub: fr_static,
category: ConstraintCategory::IllegalUniverse,
locations: Locations::All(rustc_span::DUMMY_SP),
span: rustc_span::DUMMY_SP,
variance_info: VarianceDiagInfo::None,
from_closure: false,
};
self.push(scc_representative_outlives_static);
}
}
if added_constraints {
// We changed the constraint set and so must recompute SCCs.
self.compute_sccs(fr_static, definitions)
} else {
// If we didn't add any back-edges; no more work needs doing
(sccs, annotations)
}
}
}
impl<'tcx> Index<OutlivesConstraintIndex> for OutlivesConstraintSet<'tcx> {

View file

@ -0,0 +1,348 @@
//! Logic for lowering higher-kinded outlives constraints
//! (with placeholders and universes) and turn them into regular
//! outlives constraints.
use rustc_data_structures::frozen::Frozen;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::graph::scc;
use rustc_data_structures::graph::scc::Sccs;
use rustc_index::IndexVec;
use rustc_infer::infer::RegionVariableOrigin;
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::{RegionVid, UniverseIndex};
use tracing::debug;
use crate::constraints::{ConstraintSccIndex, OutlivesConstraintSet};
use crate::consumers::OutlivesConstraint;
use crate::diagnostics::UniverseInfo;
use crate::member_constraints::MemberConstraintSet;
use crate::region_infer::values::{LivenessValues, PlaceholderIndices};
use crate::region_infer::{ConstraintSccs, RegionDefinition, Representative, TypeTest};
use crate::ty::VarianceDiagInfo;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::type_check::{Locations, MirTypeckRegionConstraints};
use crate::universal_regions::UniversalRegions;
use crate::{BorrowckInferCtxt, NllRegionVariableOrigin};
/// A set of outlives constraints after rewriting to remove
/// higher-kinded constraints.
pub(crate) struct LoweredConstraints<'tcx> {
pub(crate) constraint_sccs: Sccs<RegionVid, ConstraintSccIndex>,
pub(crate) definitions: Frozen<IndexVec<RegionVid, RegionDefinition<'tcx>>>,
pub(crate) scc_annotations: IndexVec<ConstraintSccIndex, RegionTracker>,
pub(crate) member_constraints: MemberConstraintSet<'tcx, RegionVid>,
pub(crate) outlives_constraints: Frozen<OutlivesConstraintSet<'tcx>>,
pub(crate) type_tests: Vec<TypeTest<'tcx>>,
pub(crate) liveness_constraints: LivenessValues,
pub(crate) universe_causes: FxIndexMap<UniverseIndex, UniverseInfo<'tcx>>,
pub(crate) placeholder_indices: PlaceholderIndices,
}
impl<'d, 'tcx, A: scc::Annotation> SccAnnotations<'d, 'tcx, A> {
pub(crate) fn init(definitions: &'d IndexVec<RegionVid, RegionDefinition<'tcx>>) -> Self {
Self { scc_to_annotation: IndexVec::new(), definitions }
}
}
/// A Visitor for SCC annotation construction.
pub(crate) struct SccAnnotations<'d, 'tcx, A: scc::Annotation> {
pub(crate) scc_to_annotation: IndexVec<ConstraintSccIndex, A>,
definitions: &'d IndexVec<RegionVid, RegionDefinition<'tcx>>,
}
impl scc::Annotations<RegionVid> for SccAnnotations<'_, '_, RegionTracker> {
fn new(&self, element: RegionVid) -> RegionTracker {
RegionTracker::new(element, &self.definitions[element])
}
fn annotate_scc(&mut self, scc: ConstraintSccIndex, annotation: RegionTracker) {
let idx = self.scc_to_annotation.push(annotation);
assert!(idx == scc);
}
type Ann = RegionTracker;
type SccIdx = ConstraintSccIndex;
}
/// An annotation for region graph SCCs that tracks
/// the values of its elements. This annotates a single SCC.
#[derive(Copy, Debug, Clone)]
pub(crate) struct RegionTracker {
/// The largest universe of a placeholder reached from this SCC.
/// This includes placeholders within this SCC.
max_placeholder_universe_reached: UniverseIndex,
/// The largest universe nameable from this SCC.
/// It is the smallest nameable universes of all
/// existential regions reachable from it.
max_nameable_universe: UniverseIndex,
/// The representative Region Variable Id for this SCC.
pub(crate) representative: Representative,
}
impl RegionTracker {
pub(crate) fn new(rvid: RegionVid, definition: &RegionDefinition<'_>) -> Self {
let placeholder_universe =
if matches!(definition.origin, NllRegionVariableOrigin::Placeholder(_)) {
definition.universe
} else {
UniverseIndex::ROOT
};
Self {
max_placeholder_universe_reached: placeholder_universe,
max_nameable_universe: definition.universe,
representative: Representative::new(rvid, definition),
}
}
/// The largest universe this SCC can name. It's the smallest
/// largest nameable uninverse of any reachable region.
pub(crate) fn max_nameable_universe(self) -> UniverseIndex {
self.max_nameable_universe
}
fn merge_min_max_seen(&mut self, other: &Self) {
self.max_placeholder_universe_reached = std::cmp::max(
self.max_placeholder_universe_reached,
other.max_placeholder_universe_reached,
);
self.max_nameable_universe =
std::cmp::min(self.max_nameable_universe, other.max_nameable_universe);
}
/// Returns `true` if during the annotated SCC reaches a placeholder
/// with a universe larger than the smallest nameable universe of any
/// reachable existential region.
pub(crate) fn has_incompatible_universes(&self) -> bool {
self.max_nameable_universe().cannot_name(self.max_placeholder_universe_reached)
}
/// Determine if the tracked universes of the two SCCs are compatible.
pub(crate) fn universe_compatible_with(&self, other: Self) -> bool {
self.max_nameable_universe().can_name(other.max_nameable_universe())
|| self.max_nameable_universe().can_name(other.max_placeholder_universe_reached)
}
}
impl scc::Annotation for RegionTracker {
fn merge_scc(mut self, other: Self) -> Self {
self.representative = self.representative.merge_scc(other.representative);
self.merge_min_max_seen(&other);
self
}
fn merge_reached(mut self, other: Self) -> Self {
// No update to in-component values, only add seen values.
self.merge_min_max_seen(&other);
self
}
}
/// Determines if the region variable definitions contain
/// placeholders, and compute them for later use.
fn region_definitions<'tcx>(
universal_regions: &UniversalRegions<'tcx>,
infcx: &BorrowckInferCtxt<'tcx>,
) -> (Frozen<IndexVec<RegionVid, RegionDefinition<'tcx>>>, bool) {
let var_infos = infcx.get_region_var_infos();
// Create a RegionDefinition for each inference variable. This happens here because
// it allows us to sneak in a cheap check for placeholders. Otherwise, its proper home
// is in `RegionInferenceContext::new()`, probably.
let mut definitions = IndexVec::with_capacity(var_infos.len());
let mut has_placeholders = false;
for info in var_infos.iter() {
let origin = match info.origin {
RegionVariableOrigin::Nll(origin) => origin,
_ => NllRegionVariableOrigin::Existential { from_forall: false },
};
let definition = RegionDefinition { origin, universe: info.universe, external_name: None };
has_placeholders |= matches!(origin, NllRegionVariableOrigin::Placeholder(_));
definitions.push(definition);
}
// Add external names from universal regions in fun function definitions.
// FIXME: this two-step method is annoying, but I don't know how to avoid it.
for (external_name, variable) in universal_regions.named_universal_regions_iter() {
debug!("region {:?} has external name {:?}", variable, external_name);
definitions[variable].external_name = Some(external_name);
}
(Frozen::freeze(definitions), has_placeholders)
}
/// This method handles placeholders by rewriting the constraint
/// graph. For each strongly connected component in the constraint
/// graph such that there is a series of constraints
/// A: B: C: ... : X where
/// A contains a placeholder whose universe cannot be named by X,
/// add a constraint that A: 'static. This is a safe upper bound
/// in the face of borrow checker/trait solver limitations that will
/// eventually go away.
///
/// For a more precise definition, see the documentation for
/// [`RegionTracker`] and its methods!
///
/// This edge case used to be handled during constraint propagation.
/// It was rewritten as part of the Polonius project with the goal of moving
/// higher-kindedness concerns out of the path of the borrow checker,
/// for two reasons:
///
/// 1. Implementing Polonius is difficult enough without also
/// handling them.
/// 2. The long-term goal is to handle higher-kinded concerns
/// in the trait solver, where they belong. This avoids
/// logic duplication and allows future trait solvers
/// to compute better bounds than for example our
/// "must outlive 'static" here.
///
/// This code is a stop-gap measure in preparation for the future trait solver.
///
/// Every constraint added by this method is an internal `IllegalUniverse` constraint.
pub(crate) fn compute_sccs_applying_placeholder_outlives_constraints<'tcx>(
constraints: MirTypeckRegionConstraints<'tcx>,
universal_region_relations: &Frozen<UniversalRegionRelations<'tcx>>,
infcx: &BorrowckInferCtxt<'tcx>,
) -> LoweredConstraints<'tcx> {
let universal_regions = &universal_region_relations.universal_regions;
let (definitions, has_placeholders) = region_definitions(universal_regions, infcx);
let MirTypeckRegionConstraints {
placeholder_indices,
placeholder_index_to_region: _,
liveness_constraints,
mut outlives_constraints,
mut member_constraints,
universe_causes,
type_tests,
} = constraints;
if let Some(guar) = universal_regions.tainted_by_errors() {
debug!("Universal regions tainted by errors; removing constraints!");
// Suppress unhelpful extra errors in `infer_opaque_types` by clearing out all
// outlives bounds that we may end up checking.
outlives_constraints = Default::default();
member_constraints = Default::default();
// Also taint the entire scope.
infcx.set_tainted_by_errors(guar);
}
let fr_static = universal_regions.fr_static;
let compute_sccs =
|constraints: &OutlivesConstraintSet<'tcx>,
annotations: &mut SccAnnotations<'_, 'tcx, RegionTracker>| {
ConstraintSccs::new_with_annotation(
&constraints.graph(definitions.len()).region_graph(constraints, fr_static),
annotations,
)
};
let mut scc_annotations = SccAnnotations::init(&definitions);
let constraint_sccs = compute_sccs(&outlives_constraints, &mut scc_annotations);
// This code structure is a bit convoluted because it allows for a planned
// future change where the early return here has a different type of annotation
// that does much less work.
if !has_placeholders {
debug!("No placeholder regions found; skipping rewriting logic!");
return LoweredConstraints {
type_tests,
member_constraints,
constraint_sccs,
scc_annotations: scc_annotations.scc_to_annotation,
definitions,
outlives_constraints: Frozen::freeze(outlives_constraints),
liveness_constraints,
universe_causes,
placeholder_indices,
};
}
debug!("Placeholders present; activating placeholder handling logic!");
let added_constraints = rewrite_placeholder_outlives(
&constraint_sccs,
&scc_annotations,
fr_static,
&mut outlives_constraints,
);
let (constraint_sccs, scc_annotations) = if added_constraints {
let mut annotations = SccAnnotations::init(&definitions);
// We changed the constraint set and so must recompute SCCs.
// Optimisation opportunity: if we can add them incrementally (and that's
// possible because edges to 'static always only merge SCCs into 'static),
// we would potentially save a lot of work here.
(compute_sccs(&outlives_constraints, &mut annotations), annotations.scc_to_annotation)
} else {
// If we didn't add any back-edges; no more work needs doing
debug!("No constraints rewritten!");
(constraint_sccs, scc_annotations.scc_to_annotation)
};
LoweredConstraints {
constraint_sccs,
definitions,
scc_annotations,
member_constraints,
outlives_constraints: Frozen::freeze(outlives_constraints),
type_tests,
liveness_constraints,
universe_causes,
placeholder_indices,
}
}
fn rewrite_placeholder_outlives<'tcx>(
sccs: &Sccs<RegionVid, ConstraintSccIndex>,
annotations: &SccAnnotations<'_, '_, RegionTracker>,
fr_static: RegionVid,
outlives_constraints: &mut OutlivesConstraintSet<'tcx>,
) -> bool {
// Changed to `true` if we added any constraints and need to
// recompute SCCs.
let mut added_constraints = false;
let annotations = &annotations.scc_to_annotation;
for scc in sccs.all_sccs() {
// No point in adding 'static: 'static!
// This micro-optimisation makes somewhat sense
// because static outlives *everything*.
if scc == sccs.scc(fr_static) {
continue;
}
let annotation = annotations[scc];
// If this SCC participates in a universe violation,
// e.g. if it reaches a region with a universe smaller than
// the largest region reached, add a requirement that it must
// outlive `'static`.
if annotation.has_incompatible_universes() {
// Optimisation opportunity: this will add more constraints than
// needed for correctness, since an SCC upstream of another with
// a universe violation will "infect" its downstream SCCs to also
// outlive static.
let scc_representative_outlives_static = OutlivesConstraint {
sup: annotation.representative.rvid(),
sub: fr_static,
category: ConstraintCategory::IllegalUniverse,
locations: Locations::All(rustc_span::DUMMY_SP),
span: rustc_span::DUMMY_SP,
variance_info: VarianceDiagInfo::None,
from_closure: false,
};
outlives_constraints.push(scc_representative_outlives_static);
added_constraints = true;
debug!("Added {:?}: 'static!", annotation.representative.rvid());
}
}
added_constraints
}

View file

@ -72,6 +72,7 @@ mod constraints;
mod dataflow;
mod def_use;
mod diagnostics;
mod handle_placeholders;
mod member_constraints;
mod nll;
mod path_utils;

View file

@ -20,6 +20,7 @@ use tracing::{debug, instrument};
use crate::borrow_set::BorrowSet;
use crate::consumers::ConsumerOptions;
use crate::diagnostics::RegionErrors;
use crate::handle_placeholders::compute_sccs_applying_placeholder_outlives_constraints;
use crate::polonius::PoloniusDiagnosticsContext;
use crate::polonius::legacy::{
PoloniusFacts, PoloniusFactsExt, PoloniusLocationTable, PoloniusOutput,
@ -113,6 +114,12 @@ pub(crate) fn compute_regions<'tcx>(
Rc::clone(&location_map),
);
let lowered_constraints = compute_sccs_applying_placeholder_outlives_constraints(
constraints,
&universal_region_relations,
infcx,
);
// If requested, emit legacy polonius facts.
polonius::legacy::emit_facts(
&mut polonius_facts,
@ -122,11 +129,15 @@ pub(crate) fn compute_regions<'tcx>(
borrow_set,
move_data,
&universal_region_relations,
&constraints,
&lowered_constraints,
);
let mut regioncx =
RegionInferenceContext::new(infcx, constraints, universal_region_relations, location_map);
let mut regioncx = RegionInferenceContext::new(
infcx,
lowered_constraints,
universal_region_relations,
location_map,
);
// If requested for `-Zpolonius=next`, convert NLL constraints to localized outlives constraints
// and use them to compute loan liveness.

View file

@ -13,7 +13,7 @@ use tracing::debug;
use crate::borrow_set::BorrowSet;
use crate::constraints::OutlivesConstraint;
use crate::type_check::MirTypeckRegionConstraints;
use crate::handle_placeholders::LoweredConstraints;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::universal_regions::UniversalRegions;
@ -43,7 +43,7 @@ pub(crate) fn emit_facts<'tcx>(
borrow_set: &BorrowSet<'tcx>,
move_data: &MoveData<'tcx>,
universal_region_relations: &UniversalRegionRelations<'tcx>,
constraints: &MirTypeckRegionConstraints<'tcx>,
constraints: &LoweredConstraints<'tcx>,
) {
let Some(facts) = facts else {
// We don't do anything if there are no facts to fill.
@ -203,7 +203,7 @@ pub(crate) fn emit_drop_facts<'tcx>(
fn emit_outlives_facts<'tcx>(
facts: &mut PoloniusFacts,
location_table: &PoloniusLocationTable,
constraints: &MirTypeckRegionConstraints<'tcx>,
constraints: &LoweredConstraints<'tcx>,
) {
facts.subset_base.extend(constraints.outlives_constraints.outlives().iter().flat_map(
|constraint: &OutlivesConstraint<'_>| {

View file

@ -46,7 +46,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
"| {r:rw$?} | {ui:4?} | {v}",
r = region,
rw = REGION_WIDTH,
ui = self.region_universe(region),
ui = self.max_nameable_universe(self.constraint_sccs.scc(region)),
v = self.region_value_str(region),
)?;
}

View file

@ -11,7 +11,7 @@ use rustc_hir::def_id::CRATE_DEF_ID;
use rustc_index::IndexVec;
use rustc_infer::infer::outlives::test_type_match;
use rustc_infer::infer::region_constraints::{GenericKind, VerifyBound, VerifyIfEq};
use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin, RegionVariableOrigin};
use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin};
use rustc_middle::bug;
use rustc_middle::mir::{
AnnotationSource, BasicBlock, Body, ConstraintCategory, Local, Location, ReturnConstraint,
@ -28,13 +28,14 @@ use crate::constraints::graph::{self, NormalConstraintGraph, RegionGraph};
use crate::constraints::{ConstraintSccIndex, OutlivesConstraint, OutlivesConstraintSet};
use crate::dataflow::BorrowIndex;
use crate::diagnostics::{RegionErrorKind, RegionErrors, UniverseInfo};
use crate::handle_placeholders::{LoweredConstraints, RegionTracker};
use crate::member_constraints::{MemberConstraintSet, NllMemberConstraintIndex};
use crate::polonius::LiveLoans;
use crate::polonius::legacy::PoloniusOutput;
use crate::region_infer::reverse_sccs::ReverseSccGraph;
use crate::region_infer::values::{LivenessValues, RegionElement, RegionValues, ToElementIndex};
use crate::type_check::Locations;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::type_check::{Locations, MirTypeckRegionConstraints};
use crate::universal_regions::UniversalRegions;
use crate::{
BorrowckInferCtxt, ClosureOutlivesRequirement, ClosureOutlivesSubject,
@ -48,125 +49,48 @@ mod reverse_sccs;
pub(crate) mod values;
pub(crate) type ConstraintSccs = Sccs<RegionVid, ConstraintSccIndex>;
pub(crate) type AnnotatedSccs = (ConstraintSccs, IndexVec<ConstraintSccIndex, RegionTracker>);
/// An annotation for region graph SCCs that tracks
/// the values of its elements. This annotates a single SCC.
#[derive(Copy, Debug, Clone)]
pub(crate) struct RegionTracker {
/// The largest universe of a placeholder reached from this SCC.
/// This includes placeholders within this SCC.
max_placeholder_universe_reached: UniverseIndex,
/// The smallest universe index reachable form the nodes of this SCC.
min_reachable_universe: UniverseIndex,
/// The representative Region Variable Id for this SCC. We prefer
/// placeholders over existentially quantified variables, otherwise
/// it's the one with the smallest Region Variable ID.
pub(crate) representative: RegionVid,
/// Is the current representative a placeholder?
representative_is_placeholder: bool,
/// Is the current representative existentially quantified?
representative_is_existential: bool,
/// The representative region variable for an SCC, tagged by its origin.
/// We prefer placeholders over existentially quantified variables, otherwise
/// it's the one with the smallest Region Variable ID. In other words,
/// the order of this enumeration really matters!
#[derive(Copy, Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
pub(crate) enum Representative {
FreeRegion(RegionVid),
Placeholder(RegionVid),
Existential(RegionVid),
}
impl scc::Annotation for RegionTracker {
fn merge_scc(mut self, mut other: Self) -> Self {
// Prefer any placeholder over any existential
if other.representative_is_placeholder && self.representative_is_existential {
other.merge_min_max_seen(&self);
return other;
impl Representative {
pub(crate) fn rvid(self) -> RegionVid {
match self {
Representative::FreeRegion(region_vid)
| Representative::Placeholder(region_vid)
| Representative::Existential(region_vid) => region_vid,
}
if self.representative_is_placeholder && other.representative_is_existential
|| (self.representative <= other.representative)
{
self.merge_min_max_seen(&other);
return self;
}
other.merge_min_max_seen(&self);
other
}
fn merge_reached(mut self, other: Self) -> Self {
// No update to in-component values, only add seen values.
self.merge_min_max_seen(&other);
pub(crate) fn new(r: RegionVid, definition: &RegionDefinition<'_>) -> Self {
match definition.origin {
NllRegionVariableOrigin::FreeRegion => Representative::FreeRegion(r),
NllRegionVariableOrigin::Placeholder(_) => Representative::Placeholder(r),
NllRegionVariableOrigin::Existential { .. } => Representative::Existential(r),
}
}
}
impl scc::Annotation for Representative {
fn merge_scc(self, other: Self) -> Self {
// Just pick the smallest one. Note that we order by tag first!
std::cmp::min(self, other)
}
// For reachability, we do nothing since the representative doesn't change.
fn merge_reached(self, _other: Self) -> Self {
self
}
}
/// A Visitor for SCC annotation construction.
pub(crate) struct SccAnnotations<'d, 'tcx, A: scc::Annotation> {
pub(crate) scc_to_annotation: IndexVec<ConstraintSccIndex, A>,
definitions: &'d IndexVec<RegionVid, RegionDefinition<'tcx>>,
}
impl<'d, 'tcx, A: scc::Annotation> SccAnnotations<'d, 'tcx, A> {
pub(crate) fn new(definitions: &'d IndexVec<RegionVid, RegionDefinition<'tcx>>) -> Self {
Self { scc_to_annotation: IndexVec::new(), definitions }
}
}
impl scc::Annotations<RegionVid> for SccAnnotations<'_, '_, RegionTracker> {
fn new(&self, element: RegionVid) -> RegionTracker {
RegionTracker::new(element, &self.definitions[element])
}
fn annotate_scc(&mut self, scc: ConstraintSccIndex, annotation: RegionTracker) {
let idx = self.scc_to_annotation.push(annotation);
assert!(idx == scc);
}
type Ann = RegionTracker;
type SccIdx = ConstraintSccIndex;
}
impl RegionTracker {
pub(crate) fn new(rvid: RegionVid, definition: &RegionDefinition<'_>) -> Self {
let (representative_is_placeholder, representative_is_existential) = match definition.origin
{
NllRegionVariableOrigin::FreeRegion => (false, false),
NllRegionVariableOrigin::Placeholder(_) => (true, false),
NllRegionVariableOrigin::Existential { .. } => (false, true),
};
let placeholder_universe =
if representative_is_placeholder { definition.universe } else { UniverseIndex::ROOT };
Self {
max_placeholder_universe_reached: placeholder_universe,
min_reachable_universe: definition.universe,
representative: rvid,
representative_is_placeholder,
representative_is_existential,
}
}
/// The smallest-indexed universe reachable from and/or in this SCC.
fn min_universe(self) -> UniverseIndex {
self.min_reachable_universe
}
fn merge_min_max_seen(&mut self, other: &Self) {
self.max_placeholder_universe_reached = std::cmp::max(
self.max_placeholder_universe_reached,
other.max_placeholder_universe_reached,
);
self.min_reachable_universe =
std::cmp::min(self.min_reachable_universe, other.min_reachable_universe);
}
/// Returns `true` if during the annotated SCC reaches a placeholder
/// with a universe larger than the smallest reachable one, `false` otherwise.
pub(crate) fn has_incompatible_universes(&self) -> bool {
self.min_universe().cannot_name(self.max_placeholder_universe_reached)
}
}
pub(crate) type ConstraintSccs = Sccs<RegionVid, ConstraintSccIndex>;
pub struct RegionInferenceContext<'tcx> {
/// Contains the definition for every region variable. Region
@ -414,26 +338,6 @@ fn sccs_info<'tcx>(infcx: &BorrowckInferCtxt<'tcx>, sccs: &ConstraintSccs) {
debug!("SCC edges {:#?}", scc_node_to_edges);
}
fn create_definitions<'tcx>(
infcx: &BorrowckInferCtxt<'tcx>,
universal_regions: &UniversalRegions<'tcx>,
) -> Frozen<IndexVec<RegionVid, RegionDefinition<'tcx>>> {
// Create a RegionDefinition for each inference variable.
let mut definitions: IndexVec<_, _> = infcx
.get_region_var_infos()
.iter()
.map(|info| RegionDefinition::new(info.universe, info.origin))
.collect();
// Add the external name for all universal regions.
for (external_name, variable) in universal_regions.named_universal_regions_iter() {
debug!("region {variable:?} has external name {external_name:?}");
definitions[variable].external_name = Some(external_name);
}
Frozen::freeze(definitions)
}
impl<'tcx> RegionInferenceContext<'tcx> {
/// Creates a new region inference context with a total of
/// `num_region_variables` valid inference variables; the first N
@ -444,42 +348,30 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// of constraints produced by the MIR type check.
pub(crate) fn new(
infcx: &BorrowckInferCtxt<'tcx>,
constraints: MirTypeckRegionConstraints<'tcx>,
lowered_constraints: LoweredConstraints<'tcx>,
universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
location_map: Rc<DenseLocationMap>,
) -> Self {
let universal_regions = &universal_region_relations.universal_regions;
let MirTypeckRegionConstraints {
placeholder_indices,
placeholder_index_to_region: _,
liveness_constraints,
mut outlives_constraints,
mut member_constraints,
universe_causes,
let LoweredConstraints {
constraint_sccs,
definitions,
outlives_constraints,
scc_annotations,
type_tests,
} = constraints;
liveness_constraints,
universe_causes,
placeholder_indices,
member_constraints,
} = lowered_constraints;
debug!("universal_regions: {:#?}", universal_region_relations.universal_regions);
debug!("outlives constraints: {:#?}", outlives_constraints);
debug!("placeholder_indices: {:#?}", placeholder_indices);
debug!("type tests: {:#?}", type_tests);
if let Some(guar) = universal_region_relations.universal_regions.tainted_by_errors() {
// Suppress unhelpful extra errors in `infer_opaque_types` by clearing out all
// outlives bounds that we may end up checking.
outlives_constraints = Default::default();
member_constraints = Default::default();
// Also taint the entire scope.
infcx.set_tainted_by_errors(guar);
}
let definitions = create_definitions(infcx, &universal_regions);
let (constraint_sccs, scc_annotations) =
outlives_constraints.add_outlives_static(&universal_regions, &definitions);
let constraints = Frozen::freeze(outlives_constraints);
let constraint_graph = Frozen::freeze(constraints.graph(definitions.len()));
let constraint_graph = Frozen::freeze(outlives_constraints.graph(definitions.len()));
if cfg!(debug_assertions) {
sccs_info(infcx, &constraint_sccs);
@ -499,7 +391,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
let mut result = Self {
definitions,
liveness_constraints,
constraints,
constraints: outlives_constraints,
constraint_graph,
constraint_sccs,
scc_annotations,
@ -658,11 +550,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
self.scc_values.placeholders_contained_in(scc)
}
/// Returns access to the value of `r` for debugging purposes.
pub(crate) fn region_universe(&self, r: RegionVid) -> ty::UniverseIndex {
self.scc_universe(self.constraint_sccs.scc(r))
}
/// Once region solving has completed, this function will return the member constraints that
/// were applied to the value of a given SCC `scc`. See `AppliedMemberConstraint`.
pub(crate) fn applied_member_constraints(
@ -826,7 +713,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// If the member region lives in a higher universe, we currently choose
// the most conservative option by leaving it unchanged.
if !self.scc_universe(scc).is_root() {
if !self.max_nameable_universe(scc).is_root() {
return;
}
@ -902,20 +789,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// in `scc_a`. Used during constraint propagation, and only once
/// the value of `scc_b` has been computed.
fn universe_compatible(&self, scc_b: ConstraintSccIndex, scc_a: ConstraintSccIndex) -> bool {
let a_annotation = self.scc_annotations[scc_a];
let b_annotation = self.scc_annotations[scc_b];
let a_universe = a_annotation.min_universe();
// If scc_b's declared universe is a subset of
// scc_a's declared universe (typically, both are ROOT), then
// it cannot contain any problematic universe elements.
if a_universe.can_name(b_annotation.min_universe()) {
return true;
}
// Otherwise, there can be no placeholder in `b` with a too high
// universe index to name from `a`.
a_universe.can_name(b_annotation.max_placeholder_universe_reached)
self.scc_annotations[scc_a].universe_compatible_with(self.scc_annotations[scc_b])
}
/// Once regions have been propagated, this method is used to see
@ -1019,7 +893,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
"lower_bound = {:?} r_scc={:?} universe={:?}",
lower_bound,
r_scc,
self.scc_universe(r_scc)
self.max_nameable_universe(r_scc)
);
// If the type test requires that `T: 'a` where `'a` is a
// placeholder from another universe, that effectively requires
@ -1497,10 +1371,9 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
}
/// The minimum universe of any variable reachable from this
/// SCC, inside or outside of it.
fn scc_universe(&self, scc: ConstraintSccIndex) -> UniverseIndex {
self.scc_annotations[scc].min_universe()
/// The largest universe of any region nameable from this SCC.
fn max_nameable_universe(&self, scc: ConstraintSccIndex) -> UniverseIndex {
self.scc_annotations[scc].max_nameable_universe()
}
/// Checks the final value for the free region `fr` to see if it
@ -1522,7 +1395,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// Because this free region must be in the ROOT universe, we
// know it cannot contain any bound universes.
assert!(self.scc_universe(longer_fr_scc).is_root());
assert!(self.max_nameable_universe(longer_fr_scc).is_root());
// Only check all of the relations for the main representative of each
// SCC, otherwise just check that we outlive said representative. This
@ -1913,7 +1786,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
#[instrument(skip(self), level = "trace", ret)]
pub(crate) fn find_sub_region_live_at(&self, fr1: RegionVid, location: Location) -> RegionVid {
trace!(scc = ?self.constraint_sccs.scc(fr1));
trace!(universe = ?self.region_universe(fr1));
trace!(universe = ?self.max_nameable_universe(self.constraint_sccs.scc(fr1)));
self.find_constraint_paths_between_regions(fr1, |r| {
// First look for some `r` such that `fr1: r` and `r` is live at `location`
trace!(?r, liveness_constraints=?self.liveness_constraints.pretty_print_live_points(r));
@ -2244,7 +2117,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// they *must* be equal (though not having the same repr does not
/// mean they are unequal).
fn scc_representative(&self, scc: ConstraintSccIndex) -> RegionVid {
self.scc_annotations[scc].representative
self.scc_annotations[scc].representative.rvid()
}
pub(crate) fn liveness_constraints(&self) -> &LivenessValues {
@ -2266,21 +2139,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
}
impl<'tcx> RegionDefinition<'tcx> {
fn new(universe: ty::UniverseIndex, rv_origin: RegionVariableOrigin) -> Self {
// Create a new region definition. Note that, for free
// regions, the `external_name` field gets updated later in
// `init_free_and_bound_regions`.
let origin = match rv_origin {
RegionVariableOrigin::Nll(origin) => origin,
_ => NllRegionVariableOrigin::Existential { from_forall: false },
};
Self { origin, universe, external_name: None }
}
}
#[derive(Clone, Debug)]
pub(crate) struct BlameConstraint<'tcx> {
pub category: ConstraintCategory<'tcx>,

View file

@ -191,7 +191,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
let scc = self.constraint_sccs.scc(vid);
// Special handling of higher-ranked regions.
if !self.scc_universe(scc).is_root() {
if !self.max_nameable_universe(scc).is_root() {
match self.scc_values.placeholders_contained_in(scc).enumerate().last() {
// If the region contains a single placeholder then they're equal.
Some((0, placeholder)) => {

View file

@ -541,12 +541,12 @@ impl<'ll> CodegenCx<'ll, '_> {
// in the handling of `.init_array` (the static constructor list) in versions of
// the gold linker (prior to the one released with binutils 2.36).
//
// That said, we only ever emit these when compiling for ELF targets, unless
// `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
// on other targets, in particular MachO targets have *their* static constructor
// lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However,
// that check happens when assigning the `CodegenFnAttrFlags` in
// `rustc_hir_analysis`, so we don't need to take care of it here.
// That said, we only ever emit these when `#[used(compiler)]` is explicitly
// requested. This is to avoid similar breakage on other targets, in particular
// MachO targets have *their* static constructor lists broken if `llvm.compiler.used`
// is emitted rather than `llvm.used`. However, that check happens when assigning
// the `CodegenFnAttrFlags` in the `codegen_fn_attrs` query, so we don't need to
// take care of it here.
self.add_compiler_used_global(g);
}
if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {

View file

@ -1961,7 +1961,7 @@ fn add_post_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor
/// This method creates a synthetic object file, which contains undefined references to all symbols
/// that are necessary for the linking. They are only present in symbol table but not actually
/// used in any sections, so the linker will therefore pick relevant rlibs for linking, but
/// unused `#[no_mangle]` or `#[used]` can still be discard by GC sections.
/// unused `#[no_mangle]` or `#[used(compiler)]` can still be discard by GC sections.
///
/// There's a few internal crates in the standard library (aka libcore and
/// libstd) which actually have a circular dependence upon one another. This
@ -1995,7 +1995,8 @@ fn add_linked_symbol_object(
if file.format() == object::BinaryFormat::MachO {
// Divide up the sections into sub-sections via symbols for dead code stripping.
// Without this flag, unused `#[no_mangle]` or `#[used]` cannot be discard on MachO targets.
// Without this flag, unused `#[no_mangle]` or `#[used(compiler)]` cannot be
// discard on MachO targets.
file.set_subsections_via_symbols();
}

View file

@ -195,35 +195,10 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
tcx.dcx().emit_err(errors::ExpectedUsedSymbol { span: attr.span() });
}
None => {
// Unfortunately, unconditionally using `llvm.used` causes
// issues in handling `.init_array` with the gold linker,
// but using `llvm.compiler.used` caused a nontrivial amount
// of unintentional ecosystem breakage -- particularly on
// Mach-O targets.
//
// As a result, we emit `llvm.compiler.used` only on ELF
// targets. This is somewhat ad-hoc, but actually follows
// our pre-LLVM 13 behavior (prior to the ecosystem
// breakage), and seems to match `clang`'s behavior as well
// (both before and after LLVM 13), possibly because they
// have similar compatibility concerns to us. See
// https://github.com/rust-lang/rust/issues/47384#issuecomment-1019080146
// and following comments for some discussion of this, as
// well as the comments in `rustc_codegen_llvm` where these
// flags are handled.
//
// Anyway, to be clear: this is still up in the air
// somewhat, and is subject to change in the future (which
// is a good thing, because this would ideally be a bit
// more firmed up).
let is_like_elf = !(tcx.sess.target.is_like_darwin
|| tcx.sess.target.is_like_windows
|| tcx.sess.target.is_like_wasm);
codegen_fn_attrs.flags |= if is_like_elf {
CodegenFnAttrFlags::USED_COMPILER
} else {
CodegenFnAttrFlags::USED_LINKER
};
// Unconditionally using `llvm.used` causes issues in handling
// `.init_array` with the gold linker. Luckily gold has been
// deprecated with GCC 15 and rustc now warns about using gold.
codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER
}
}
}

View file

@ -454,11 +454,12 @@ fn report_eval_error<'tcx>(
// FIXME(oli-obk): figure out how to use structured diagnostics again.
diag.code(E0080);
diag.span_label(span, crate::fluent_generated::const_eval_error);
diag.arg("instance", instance);
diag.arg("error_kind", kind);
for frame in frames {
diag.subdiagnostic(frame);
}
// Add after the frame rendering above, as it adds its own `instance` args.
diag.arg("instance", instance);
diag.arg("error_kind", kind);
},
)
}

View file

@ -15,15 +15,20 @@ fn alloc_caller_location<'tcx>(
line: u32,
col: u32,
) -> MPlaceTy<'tcx> {
// Ensure that the filename itself does not contain nul bytes.
// This isn't possible via POSIX or Windows, but we should ensure no one
// ever does such a thing.
assert!(!filename.as_str().as_bytes().contains(&0));
let loc_details = ecx.tcx.sess.opts.unstable_opts.location_detail;
// This can fail if rustc runs out of memory right here. Trying to emit an error would be
// pointless, since that would require allocating more memory than these short strings.
let file = if loc_details.file {
ecx.allocate_str_dedup(filename.as_str()).unwrap()
} else {
ecx.allocate_str_dedup("<redacted>").unwrap()
let file_wide_ptr = {
let filename = if loc_details.file { filename.as_str() } else { "<redacted>" };
let filename_with_nul = filename.to_owned() + "\0";
// This can fail if rustc runs out of memory right here. Trying to emit an error would be
// pointless, since that would require allocating more memory than these short strings.
let file_ptr = ecx.allocate_bytes_dedup(filename_with_nul.as_bytes()).unwrap();
Immediate::new_slice(file_ptr.into(), filename_with_nul.len().try_into().unwrap(), ecx)
};
let file = file.map_provenance(CtfeProvenance::as_immutable);
let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
@ -36,7 +41,7 @@ fn alloc_caller_location<'tcx>(
let location = ecx.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
// Initialize fields.
ecx.write_immediate(file.to_ref(ecx), &ecx.project_field(&location, 0).unwrap())
ecx.write_immediate(file_wide_ptr, &ecx.project_field(&location, 0).unwrap())
.expect("writing to memory we just allocated cannot fail");
ecx.write_scalar(line, &ecx.project_field(&location, 1).unwrap())
.expect("writing to memory we just allocated cannot fail");

View file

@ -852,12 +852,7 @@ pub enum LifetimeRes {
/// late resolution. Those lifetimes will be inferred by typechecking.
Infer,
/// `'static` lifetime.
Static {
/// We do not want to emit `elided_named_lifetimes`
/// when we are inside of a const item or a static,
/// because it would get too annoying.
suppress_elision_warning: bool,
},
Static,
/// Resolution failure.
Error,
/// HACK: This is used to recover the NodeId of an elided lifetime.

View file

@ -72,13 +72,13 @@ pub enum LifetimeSource {
#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable_Generic)]
pub enum LifetimeSyntax {
/// E.g. `&Type`, `ContainsLifetime`
Hidden,
Implicit,
/// E.g. `&'_ Type`, `ContainsLifetime<'_>`, `impl Trait + '_`, `impl Trait + use<'_>`
Anonymous,
ExplicitAnonymous,
/// E.g. `&'a Type`, `ContainsLifetime<'a>`, `impl Trait + 'a`, `impl Trait + use<'a>`
Named,
ExplicitBound,
}
impl From<Ident> for LifetimeSyntax {
@ -88,10 +88,10 @@ impl From<Ident> for LifetimeSyntax {
if name == sym::empty {
unreachable!("A lifetime name should never be empty");
} else if name == kw::UnderscoreLifetime {
LifetimeSyntax::Anonymous
LifetimeSyntax::ExplicitAnonymous
} else {
debug_assert!(name.as_str().starts_with('\''));
LifetimeSyntax::Named
LifetimeSyntax::ExplicitBound
}
}
}
@ -102,48 +102,48 @@ impl From<Ident> for LifetimeSyntax {
///
/// ```
/// #[repr(C)]
/// struct S<'a>(&'a u32); // res=Param, name='a, source=Reference, syntax=Named
/// struct S<'a>(&'a u32); // res=Param, name='a, source=Reference, syntax=ExplicitBound
/// unsafe extern "C" {
/// fn f1(s: S); // res=Param, name='_, source=Path, syntax=Hidden
/// fn f2(s: S<'_>); // res=Param, name='_, source=Path, syntax=Anonymous
/// fn f3<'a>(s: S<'a>); // res=Param, name='a, source=Path, syntax=Named
/// fn f1(s: S); // res=Param, name='_, source=Path, syntax=Implicit
/// fn f2(s: S<'_>); // res=Param, name='_, source=Path, syntax=ExplicitAnonymous
/// fn f3<'a>(s: S<'a>); // res=Param, name='a, source=Path, syntax=ExplicitBound
/// }
///
/// struct St<'a> { x: &'a u32 } // res=Param, name='a, source=Reference, syntax=Named
/// struct St<'a> { x: &'a u32 } // res=Param, name='a, source=Reference, syntax=ExplicitBound
/// fn f() {
/// _ = St { x: &0 }; // res=Infer, name='_, source=Path, syntax=Hidden
/// _ = St::<'_> { x: &0 }; // res=Infer, name='_, source=Path, syntax=Anonymous
/// _ = St { x: &0 }; // res=Infer, name='_, source=Path, syntax=Implicit
/// _ = St::<'_> { x: &0 }; // res=Infer, name='_, source=Path, syntax=ExplicitAnonymous
/// }
///
/// struct Name<'a>(&'a str); // res=Param, name='a, source=Reference, syntax=Named
/// const A: Name = Name("a"); // res=Static, name='_, source=Path, syntax=Hidden
/// const B: &str = ""; // res=Static, name='_, source=Reference, syntax=Hidden
/// static C: &'_ str = ""; // res=Static, name='_, source=Reference, syntax=Anonymous
/// static D: &'static str = ""; // res=Static, name='static, source=Reference, syntax=Named
/// struct Name<'a>(&'a str); // res=Param, name='a, source=Reference, syntax=ExplicitBound
/// const A: Name = Name("a"); // res=Static, name='_, source=Path, syntax=Implicit
/// const B: &str = ""; // res=Static, name='_, source=Reference, syntax=Implicit
/// static C: &'_ str = ""; // res=Static, name='_, source=Reference, syntax=ExplicitAnonymous
/// static D: &'static str = ""; // res=Static, name='static, source=Reference, syntax=ExplicitBound
///
/// trait Tr {}
/// fn tr(_: Box<dyn Tr>) {} // res=ImplicitObjectLifetimeDefault, name='_, source=Other, syntax=Hidden
/// fn tr(_: Box<dyn Tr>) {} // res=ImplicitObjectLifetimeDefault, name='_, source=Other, syntax=Implicit
///
/// fn capture_outlives<'a>() ->
/// impl FnOnce() + 'a // res=Param, ident='a, source=OutlivesBound, syntax=Named
/// impl FnOnce() + 'a // res=Param, ident='a, source=OutlivesBound, syntax=ExplicitBound
/// {
/// || {}
/// }
///
/// fn capture_precise<'a>() ->
/// impl FnOnce() + use<'a> // res=Param, ident='a, source=PreciseCapturing, syntax=Named
/// impl FnOnce() + use<'a> // res=Param, ident='a, source=PreciseCapturing, syntax=ExplicitBound
/// {
/// || {}
/// }
///
/// // (commented out because these cases trigger errors)
/// // struct S1<'a>(&'a str); // res=Param, name='a, source=Reference, syntax=Named
/// // struct S2(S1); // res=Error, name='_, source=Path, syntax=Hidden
/// // struct S3(S1<'_>); // res=Error, name='_, source=Path, syntax=Anonymous
/// // struct S4(S1<'a>); // res=Error, name='a, source=Path, syntax=Named
/// // struct S1<'a>(&'a str); // res=Param, name='a, source=Reference, syntax=ExplicitBound
/// // struct S2(S1); // res=Error, name='_, source=Path, syntax=Implicit
/// // struct S3(S1<'_>); // res=Error, name='_, source=Path, syntax=ExplicitAnonymous
/// // struct S4(S1<'a>); // res=Error, name='a, source=Path, syntax=ExplicitBound
/// ```
///
/// Some combinations that cannot occur are `LifetimeSyntax::Hidden` with
/// Some combinations that cannot occur are `LifetimeSyntax::Implicit` with
/// `LifetimeSource::OutlivesBound` or `LifetimeSource::PreciseCapturing`
/// — there's no way to "elide" these lifetimes.
#[derive(Debug, Copy, Clone, HashStable_Generic)]
@ -206,7 +206,7 @@ impl ParamName {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable_Generic)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
pub enum LifetimeKind {
/// User-given names or fresh (synthetic) names.
Param(LocalDefId),
@ -287,12 +287,8 @@ impl Lifetime {
self.ident.name == kw::UnderscoreLifetime
}
pub fn is_syntactically_hidden(&self) -> bool {
matches!(self.syntax, LifetimeSyntax::Hidden)
}
pub fn is_syntactically_anonymous(&self) -> bool {
matches!(self.syntax, LifetimeSyntax::Anonymous)
pub fn is_implicit(&self) -> bool {
matches!(self.syntax, LifetimeSyntax::Implicit)
}
pub fn is_static(&self) -> bool {
@ -307,28 +303,28 @@ impl Lifetime {
match (self.syntax, self.source) {
// The user wrote `'a` or `'_`.
(Named | Anonymous, _) => (self.ident.span, format!("{new_lifetime}")),
(ExplicitBound | ExplicitAnonymous, _) => (self.ident.span, format!("{new_lifetime}")),
// The user wrote `Path<T>`, and omitted the `'_,`.
(Hidden, Path { angle_brackets: AngleBrackets::Full }) => {
(Implicit, Path { angle_brackets: AngleBrackets::Full }) => {
(self.ident.span, format!("{new_lifetime}, "))
}
// The user wrote `Path<>`, and omitted the `'_`..
(Hidden, Path { angle_brackets: AngleBrackets::Empty }) => {
(Implicit, Path { angle_brackets: AngleBrackets::Empty }) => {
(self.ident.span, format!("{new_lifetime}"))
}
// The user wrote `Path` and omitted the `<'_>`.
(Hidden, Path { angle_brackets: AngleBrackets::Missing }) => {
(Implicit, Path { angle_brackets: AngleBrackets::Missing }) => {
(self.ident.span.shrink_to_hi(), format!("<{new_lifetime}>"))
}
// The user wrote `&type` or `&mut type`.
(Hidden, Reference) => (self.ident.span, format!("{new_lifetime} ")),
(Implicit, Reference) => (self.ident.span, format!("{new_lifetime} ")),
(Hidden, source) => {
unreachable!("can't suggest for a hidden lifetime of {source:?}")
(Implicit, source) => {
unreachable!("can't suggest for a implicit lifetime of {source:?}")
}
}
}

View file

@ -55,7 +55,7 @@ fn trait_object_roundtrips_impl(syntax: TraitObjectSyntax) {
ident: Ident::new(sym::name, DUMMY_SP),
kind: LifetimeKind::Static,
source: LifetimeSource::Other,
syntax: LifetimeSyntax::Hidden,
syntax: LifetimeSyntax::Implicit,
};
let unambig = TyKind::TraitObject::<'_, ()>(&[], TaggedRef::new(&lt, syntax));
let unambig_to_ambig = unsafe { std::mem::transmute::<_, TyKind<'_, AmbigArg>>(unambig) };

View file

@ -545,11 +545,12 @@ impl Cursor<'_> {
let mut s = self.as_str();
let mut found = false;
let mut size = 0;
while let Some(closing) = s.find(&"-".repeat(length_opening as usize)) {
let preceding_chars_start = s[..closing].rfind("\n").map_or(0, |i| i + 1);
if s[preceding_chars_start..closing].chars().all(is_whitespace) {
// candidate found
self.bump_bytes(closing);
self.bump_bytes(size + closing);
// in case like
// ---cargo
// --- blahblah
@ -562,6 +563,7 @@ impl Cursor<'_> {
break;
} else {
s = &s[closing + length_opening as usize..];
size += closing + length_opening as usize;
}
}

View file

@ -253,11 +253,6 @@ lint_duplicate_macro_attribute =
lint_duplicate_matcher_binding = duplicate matcher binding
lint_elided_named_lifetime = elided lifetime has a name
.label_elided = this elided lifetime gets resolved as `{$name}`
.label_named = lifetime `{$name}` declared here
.suggestion = consider specifying it explicitly
lint_enum_intrinsics_mem_discriminant =
the return value of `mem::discriminant` is unspecified when called with a non-enum type
.note = the argument to `discriminant` should be a reference to an enum, but it was passed a reference to a `{$ty_param}`, which is not an enum
@ -516,6 +511,28 @@ lint_metavariable_still_repeating = variable `{$name}` is still repeating at thi
lint_metavariable_wrong_operator = meta-variable repeats with different Kleene operator
lint_mismatched_lifetime_syntaxes =
lifetime flowing from input to output with different syntax can be confusing
.label_mismatched_lifetime_syntaxes_inputs =
{$n_inputs ->
[one] this lifetime flows
*[other] these lifetimes flow
} to the output
.label_mismatched_lifetime_syntaxes_outputs =
the {$n_outputs ->
[one] lifetime gets
*[other] lifetimes get
} resolved as `{$lifetime_name}`
lint_mismatched_lifetime_syntaxes_suggestion_explicit =
one option is to consistently use `{$lifetime_name}`
lint_mismatched_lifetime_syntaxes_suggestion_implicit =
one option is to consistently remove the lifetime
lint_mismatched_lifetime_syntaxes_suggestion_mixed =
one option is to remove the lifetime for references and use the anonymous lifetime for paths
lint_missing_fragment_specifier = missing fragment specifier
lint_missing_unsafe_on_extern = extern blocks should be unsafe

View file

@ -10,11 +10,11 @@ use rustc_errors::{
use rustc_middle::middle::stability;
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
use rustc_session::lint::{BuiltinLintDiag, ElidedLifetimeResolution};
use rustc_span::{BytePos, kw};
use rustc_session::lint::BuiltinLintDiag;
use rustc_span::BytePos;
use tracing::debug;
use crate::lints::{self, ElidedNamedLifetime};
use crate::lints;
mod check_cfg;
@ -471,16 +471,5 @@ pub fn decorate_builtin_lint(
BuiltinLintDiag::UnexpectedBuiltinCfg { cfg, cfg_name, controlled_by } => {
lints::UnexpectedBuiltinCfg { cfg, cfg_name, controlled_by }.decorate_lint(diag)
}
BuiltinLintDiag::ElidedNamedLifetimes { elided: (span, kind), resolution } => {
match resolution {
ElidedLifetimeResolution::Static => {
ElidedNamedLifetime { span, kind, name: kw::StaticLifetime, declaration: None }
}
ElidedLifetimeResolution::Param(name, declaration) => {
ElidedNamedLifetime { span, kind, name, declaration: Some(declaration) }
}
}
.decorate_lint(diag)
}
}
}

View file

@ -55,6 +55,7 @@ mod invalid_from_utf8;
mod late;
mod let_underscore;
mod levels;
mod lifetime_syntax;
mod lints;
mod macro_expr_fragment_specifier_2024_migration;
mod map_unit_fn;
@ -96,6 +97,7 @@ use impl_trait_overcaptures::ImplTraitOvercaptures;
use internal::*;
use invalid_from_utf8::*;
use let_underscore::*;
use lifetime_syntax::*;
use macro_expr_fragment_specifier_2024_migration::*;
use map_unit_fn::*;
use multiple_supertrait_upcastable::*;
@ -246,6 +248,7 @@ late_lint_methods!(
StaticMutRefs: StaticMutRefs,
UnqualifiedLocalImports: UnqualifiedLocalImports,
CheckTransmutes: CheckTransmutes,
LifetimeSyntax: LifetimeSyntax,
]
]
);
@ -353,6 +356,7 @@ fn register_builtins(store: &mut LintStore) {
store.register_renamed("unused_tuple_struct_fields", "dead_code");
store.register_renamed("static_mut_ref", "static_mut_refs");
store.register_renamed("temporary_cstring_as_ptr", "dangling_pointers_from_temporaries");
store.register_renamed("elided_named_lifetimes", "mismatched_lifetime_syntaxes");
// These were moved to tool lints, but rustc still sees them when compiling normally, before
// tool lints are registered, so `check_tool_name_for_backwards_compat` doesn't work. Use

View file

@ -0,0 +1,503 @@
use rustc_data_structures::fx::FxIndexMap;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{self as hir, LifetimeSource};
use rustc_session::{declare_lint, declare_lint_pass};
use rustc_span::Span;
use tracing::instrument;
use crate::{LateContext, LateLintPass, LintContext, lints};
declare_lint! {
/// The `mismatched_lifetime_syntaxes` lint detects when the same
/// lifetime is referred to by different syntaxes between function
/// arguments and return values.
///
/// The three kinds of syntaxes are:
///
/// 1. Named lifetimes. These are references (`&'a str`) or paths
/// (`Person<'a>`) that use a lifetime with a name, such as
/// `'static` or `'a`.
///
/// 2. Elided lifetimes. These are references with no explicit
/// lifetime (`&str`), references using the anonymous lifetime
/// (`&'_ str`), and paths using the anonymous lifetime
/// (`Person<'_>`).
///
/// 3. Hidden lifetimes. These are paths that do not contain any
/// visual indication that it contains a lifetime (`Person`).
///
/// ### Example
///
/// ```rust,compile_fail
/// #![deny(mismatched_lifetime_syntaxes)]
///
/// pub fn mixing_named_with_elided(v: &'static u8) -> &u8 {
/// v
/// }
///
/// struct Person<'a> {
/// name: &'a str,
/// }
///
/// pub fn mixing_hidden_with_elided(v: Person) -> Person<'_> {
/// v
/// }
///
/// struct Foo;
///
/// impl Foo {
/// // Lifetime elision results in the output lifetime becoming
/// // `'static`, which is not what was intended.
/// pub fn get_mut(&'static self, x: &mut u8) -> &mut u8 {
/// unsafe { &mut *(x as *mut _) }
/// }
/// }
/// ```
///
/// {{produces}}
///
/// ### Explanation
///
/// Lifetime elision is useful because it frees you from having to
/// give each lifetime its own name and show the relation of input
/// and output lifetimes for common cases. However, a lifetime
/// that uses inconsistent syntax between related arguments and
/// return values is more confusing.
///
/// In certain `unsafe` code, lifetime elision combined with
/// inconsistent lifetime syntax may result in unsound code.
pub MISMATCHED_LIFETIME_SYNTAXES,
Warn,
"detects when a lifetime uses different syntax between arguments and return values"
}
declare_lint_pass!(LifetimeSyntax => [MISMATCHED_LIFETIME_SYNTAXES]);
impl<'tcx> LateLintPass<'tcx> for LifetimeSyntax {
#[instrument(skip_all)]
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: hir::intravisit::FnKind<'tcx>,
fd: &'tcx hir::FnDecl<'tcx>,
_: &'tcx hir::Body<'tcx>,
_: rustc_span::Span,
_: rustc_span::def_id::LocalDefId,
) {
let mut input_map = Default::default();
let mut output_map = Default::default();
for input in fd.inputs {
LifetimeInfoCollector::collect(input, &mut input_map);
}
if let hir::FnRetTy::Return(output) = fd.output {
LifetimeInfoCollector::collect(output, &mut output_map);
}
report_mismatches(cx, &input_map, &output_map);
}
}
#[instrument(skip_all)]
fn report_mismatches<'tcx>(
cx: &LateContext<'tcx>,
inputs: &LifetimeInfoMap<'tcx>,
outputs: &LifetimeInfoMap<'tcx>,
) {
for (resolved_lifetime, output_info) in outputs {
if let Some(input_info) = inputs.get(resolved_lifetime) {
if !lifetimes_use_matched_syntax(input_info, output_info) {
emit_mismatch_diagnostic(cx, input_info, output_info);
}
}
}
}
fn lifetimes_use_matched_syntax(input_info: &[Info<'_>], output_info: &[Info<'_>]) -> bool {
// Categorize lifetimes into source/syntax buckets.
let mut n_hidden = 0;
let mut n_elided = 0;
let mut n_named = 0;
for info in input_info.iter().chain(output_info) {
use LifetimeSource::*;
use hir::LifetimeSyntax::*;
let syntax_source = (info.lifetime.syntax, info.lifetime.source);
match syntax_source {
// Ignore any other kind of lifetime.
(_, Other) => continue,
// E.g. `&T`.
(Implicit, Reference | OutlivesBound | PreciseCapturing) |
// E.g. `&'_ T`.
(ExplicitAnonymous, Reference | OutlivesBound | PreciseCapturing) |
// E.g. `ContainsLifetime<'_>`.
(ExplicitAnonymous, Path { .. }) => n_elided += 1,
// E.g. `ContainsLifetime`.
(Implicit, Path { .. }) => n_hidden += 1,
// E.g. `&'a T`.
(ExplicitBound, Reference | OutlivesBound | PreciseCapturing) |
// E.g. `ContainsLifetime<'a>`.
(ExplicitBound, Path { .. }) => n_named += 1,
};
}
let syntax_counts = (n_hidden, n_elided, n_named);
tracing::debug!(?syntax_counts);
matches!(syntax_counts, (_, 0, 0) | (0, _, 0) | (0, 0, _))
}
fn emit_mismatch_diagnostic<'tcx>(
cx: &LateContext<'tcx>,
input_info: &[Info<'_>],
output_info: &[Info<'_>],
) {
// There can only ever be zero or one bound lifetime
// for a given lifetime resolution.
let mut bound_lifetime = None;
// We offer the following kinds of suggestions (when appropriate
// such that the suggestion wouldn't violate the lint):
//
// 1. Every lifetime becomes named, when there is already a
// user-provided name.
//
// 2. A "mixed" signature, where references become implicit
// and paths become explicitly anonymous.
//
// 3. Every lifetime becomes implicit.
//
// 4. Every lifetime becomes explicitly anonymous.
//
// Number 2 is arguably the most common pattern and the one we
// should push strongest. Number 3 is likely the next most common,
// followed by number 1. Coming in at a distant last would be
// number 4.
//
// Beyond these, there are variants of acceptable signatures that
// we won't suggest because they are very low-value. For example,
// we will never suggest `fn(&T1, &'_ T2) -> &T3` even though that
// would pass the lint.
//
// The following collections are the lifetime instances that we
// suggest changing to a given alternate style.
// 1. Convert all to named.
let mut suggest_change_to_explicit_bound = Vec::new();
// 2. Convert to mixed. We track each kind of change separately.
let mut suggest_change_to_mixed_implicit = Vec::new();
let mut suggest_change_to_mixed_explicit_anonymous = Vec::new();
// 3. Convert all to implicit.
let mut suggest_change_to_implicit = Vec::new();
// 4. Convert all to explicit anonymous.
let mut suggest_change_to_explicit_anonymous = Vec::new();
// Some styles prevent using implicit syntax at all.
let mut allow_suggesting_implicit = true;
// It only makes sense to suggest mixed if we have both sources.
let mut saw_a_reference = false;
let mut saw_a_path = false;
for info in input_info.iter().chain(output_info) {
use LifetimeSource::*;
use hir::LifetimeSyntax::*;
let syntax_source = (info.lifetime.syntax, info.lifetime.source);
if let (_, Other) = syntax_source {
// Ignore any other kind of lifetime.
continue;
}
if let (ExplicitBound, _) = syntax_source {
bound_lifetime = Some(info);
}
match syntax_source {
// E.g. `&T`.
(Implicit, Reference) => {
suggest_change_to_explicit_anonymous.push(info);
suggest_change_to_explicit_bound.push(info);
}
// E.g. `&'_ T`.
(ExplicitAnonymous, Reference) => {
suggest_change_to_implicit.push(info);
suggest_change_to_mixed_implicit.push(info);
suggest_change_to_explicit_bound.push(info);
}
// E.g. `ContainsLifetime`.
(Implicit, Path { .. }) => {
suggest_change_to_mixed_explicit_anonymous.push(info);
suggest_change_to_explicit_anonymous.push(info);
suggest_change_to_explicit_bound.push(info);
}
// E.g. `ContainsLifetime<'_>`.
(ExplicitAnonymous, Path { .. }) => {
suggest_change_to_explicit_bound.push(info);
}
// E.g. `&'a T`.
(ExplicitBound, Reference) => {
suggest_change_to_implicit.push(info);
suggest_change_to_mixed_implicit.push(info);
suggest_change_to_explicit_anonymous.push(info);
}
// E.g. `ContainsLifetime<'a>`.
(ExplicitBound, Path { .. }) => {
suggest_change_to_mixed_explicit_anonymous.push(info);
suggest_change_to_explicit_anonymous.push(info);
}
(Implicit, OutlivesBound | PreciseCapturing) => {
panic!("This syntax / source combination is not possible");
}
// E.g. `+ '_`, `+ use<'_>`.
(ExplicitAnonymous, OutlivesBound | PreciseCapturing) => {
suggest_change_to_explicit_bound.push(info);
}
// E.g. `+ 'a`, `+ use<'a>`.
(ExplicitBound, OutlivesBound | PreciseCapturing) => {
suggest_change_to_mixed_explicit_anonymous.push(info);
suggest_change_to_explicit_anonymous.push(info);
}
(_, Other) => {
panic!("This syntax / source combination has already been skipped");
}
}
if matches!(syntax_source, (_, Path { .. } | OutlivesBound | PreciseCapturing)) {
allow_suggesting_implicit = false;
}
match syntax_source {
(_, Reference) => saw_a_reference = true,
(_, Path { .. }) => saw_a_path = true,
_ => {}
}
}
let make_implicit_suggestions =
|infos: &[&Info<'_>]| infos.iter().map(|i| i.removing_span()).collect::<Vec<_>>();
let inputs = input_info.iter().map(|info| info.reporting_span()).collect();
let outputs = output_info.iter().map(|info| info.reporting_span()).collect();
let explicit_bound_suggestion = bound_lifetime.map(|info| {
build_mismatch_suggestion(info.lifetime_name(), &suggest_change_to_explicit_bound)
});
let is_bound_static = bound_lifetime.is_some_and(|info| info.is_static());
tracing::debug!(?bound_lifetime, ?explicit_bound_suggestion, ?is_bound_static);
let should_suggest_mixed =
// Do we have a mixed case?
(saw_a_reference && saw_a_path) &&
// Is there anything to change?
(!suggest_change_to_mixed_implicit.is_empty() ||
!suggest_change_to_mixed_explicit_anonymous.is_empty()) &&
// If we have `'static`, we don't want to remove it.
!is_bound_static;
let mixed_suggestion = should_suggest_mixed.then(|| {
let implicit_suggestions = make_implicit_suggestions(&suggest_change_to_mixed_implicit);
let explicit_anonymous_suggestions = suggest_change_to_mixed_explicit_anonymous
.iter()
.map(|info| info.suggestion("'_"))
.collect();
lints::MismatchedLifetimeSyntaxesSuggestion::Mixed {
implicit_suggestions,
explicit_anonymous_suggestions,
tool_only: false,
}
});
tracing::debug!(
?suggest_change_to_mixed_implicit,
?suggest_change_to_mixed_explicit_anonymous,
?mixed_suggestion,
);
let should_suggest_implicit =
// Is there anything to change?
!suggest_change_to_implicit.is_empty() &&
// We never want to hide the lifetime in a path (or similar).
allow_suggesting_implicit &&
// If we have `'static`, we don't want to remove it.
!is_bound_static;
let implicit_suggestion = should_suggest_implicit.then(|| {
let suggestions = make_implicit_suggestions(&suggest_change_to_implicit);
lints::MismatchedLifetimeSyntaxesSuggestion::Implicit { suggestions, tool_only: false }
});
tracing::debug!(
?should_suggest_implicit,
?suggest_change_to_implicit,
allow_suggesting_implicit,
?implicit_suggestion,
);
let should_suggest_explicit_anonymous =
// Is there anything to change?
!suggest_change_to_explicit_anonymous.is_empty() &&
// If we have `'static`, we don't want to remove it.
!is_bound_static;
let explicit_anonymous_suggestion = should_suggest_explicit_anonymous
.then(|| build_mismatch_suggestion("'_", &suggest_change_to_explicit_anonymous));
tracing::debug!(
?should_suggest_explicit_anonymous,
?suggest_change_to_explicit_anonymous,
?explicit_anonymous_suggestion,
);
let lifetime_name = bound_lifetime.map(|info| info.lifetime_name()).unwrap_or("'_").to_owned();
// We can produce a number of suggestions which may overwhelm
// the user. Instead, we order the suggestions based on Rust
// idioms. The "best" choice is shown to the user and the
// remaining choices are shown to tools only.
let mut suggestions = Vec::new();
suggestions.extend(explicit_bound_suggestion);
suggestions.extend(mixed_suggestion);
suggestions.extend(implicit_suggestion);
suggestions.extend(explicit_anonymous_suggestion);
cx.emit_span_lint(
MISMATCHED_LIFETIME_SYNTAXES,
Vec::clone(&inputs),
lints::MismatchedLifetimeSyntaxes { lifetime_name, inputs, outputs, suggestions },
);
}
fn build_mismatch_suggestion(
lifetime_name: &str,
infos: &[&Info<'_>],
) -> lints::MismatchedLifetimeSyntaxesSuggestion {
let lifetime_name = lifetime_name.to_owned();
let suggestions = infos.iter().map(|info| info.suggestion(&lifetime_name)).collect();
lints::MismatchedLifetimeSyntaxesSuggestion::Explicit {
lifetime_name,
suggestions,
tool_only: false,
}
}
#[derive(Debug)]
struct Info<'tcx> {
type_span: Span,
referenced_type_span: Option<Span>,
lifetime: &'tcx hir::Lifetime,
}
impl<'tcx> Info<'tcx> {
fn lifetime_name(&self) -> &str {
self.lifetime.ident.as_str()
}
fn is_static(&self) -> bool {
self.lifetime.is_static()
}
/// When reporting a lifetime that is implicit, we expand the span
/// to include the type. Otherwise we end up pointing at nothing,
/// which is a bit confusing.
fn reporting_span(&self) -> Span {
if self.lifetime.is_implicit() { self.type_span } else { self.lifetime.ident.span }
}
/// When removing an explicit lifetime from a reference,
/// we want to remove the whitespace after the lifetime.
///
/// ```rust
/// fn x(a: &'_ u8) {}
/// ```
///
/// Should become:
///
/// ```rust
/// fn x(a: &u8) {}
/// ```
// FIXME: Ideally, we'd also remove the lifetime declaration.
fn removing_span(&self) -> Span {
let mut span = self.suggestion("'dummy").0;
if let Some(referenced_type_span) = self.referenced_type_span {
span = span.until(referenced_type_span);
}
span
}
fn suggestion(&self, lifetime_name: &str) -> (Span, String) {
self.lifetime.suggestion(lifetime_name)
}
}
type LifetimeInfoMap<'tcx> = FxIndexMap<&'tcx hir::LifetimeKind, Vec<Info<'tcx>>>;
struct LifetimeInfoCollector<'a, 'tcx> {
type_span: Span,
referenced_type_span: Option<Span>,
map: &'a mut LifetimeInfoMap<'tcx>,
}
impl<'a, 'tcx> LifetimeInfoCollector<'a, 'tcx> {
fn collect(ty: &'tcx hir::Ty<'tcx>, map: &'a mut LifetimeInfoMap<'tcx>) {
let mut this = Self { type_span: ty.span, referenced_type_span: None, map };
intravisit::walk_unambig_ty(&mut this, ty);
}
}
impl<'a, 'tcx> Visitor<'tcx> for LifetimeInfoCollector<'a, 'tcx> {
#[instrument(skip(self))]
fn visit_lifetime(&mut self, lifetime: &'tcx hir::Lifetime) {
let type_span = self.type_span;
let referenced_type_span = self.referenced_type_span;
let info = Info { type_span, referenced_type_span, lifetime };
self.map.entry(&lifetime.kind).or_default().push(info);
}
#[instrument(skip(self))]
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx, hir::AmbigArg>) -> Self::Result {
let old_type_span = self.type_span;
let old_referenced_type_span = self.referenced_type_span;
self.type_span = ty.span;
if let hir::TyKind::Ref(_, ty) = ty.kind {
self.referenced_type_span = Some(ty.ty.span);
}
intravisit::walk_ty(self, ty);
self.type_span = old_type_span;
self.referenced_type_span = old_referenced_type_span;
}
}

View file

@ -8,17 +8,17 @@ use rustc_errors::{
Applicability, Diag, DiagArgValue, DiagMessage, DiagStyledString, ElidedLifetimeInPathSubdiag,
EmissionGuarantee, LintDiagnostic, MultiSpan, Subdiagnostic, SuggestionStyle,
};
use rustc_hir as hir;
use rustc_hir::def::Namespace;
use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::VisitorExt;
use rustc_hir::{self as hir, MissingLifetimeKind};
use rustc_macros::{LintDiagnostic, Subdiagnostic};
use rustc_middle::ty::inhabitedness::InhabitedPredicate;
use rustc_middle::ty::{Clause, PolyExistentialTraitRef, Ty, TyCtxt};
use rustc_session::Session;
use rustc_session::lint::AmbiguityErrorDiag;
use rustc_span::edition::Edition;
use rustc_span::{Ident, MacroRulesNormalizedIdent, Span, Symbol, kw, sym};
use rustc_span::{Ident, MacroRulesNormalizedIdent, Span, Symbol, sym};
use crate::builtin::{InitError, ShorthandAssocTyCollector, TypeAliasBounds};
use crate::errors::{OverruledAttributeSub, RequestedLevel};
@ -2752,58 +2752,6 @@ pub(crate) struct ElidedLifetimesInPaths {
pub subdiag: ElidedLifetimeInPathSubdiag,
}
pub(crate) struct ElidedNamedLifetime {
pub span: Span,
pub kind: MissingLifetimeKind,
pub name: Symbol,
pub declaration: Option<Span>,
}
impl<G: EmissionGuarantee> LintDiagnostic<'_, G> for ElidedNamedLifetime {
fn decorate_lint(self, diag: &mut rustc_errors::Diag<'_, G>) {
let Self { span, kind, name, declaration } = self;
diag.primary_message(fluent::lint_elided_named_lifetime);
diag.arg("name", name);
diag.span_label(span, fluent::lint_label_elided);
if let Some(declaration) = declaration {
diag.span_label(declaration, fluent::lint_label_named);
}
// FIXME(GrigorenkoPV): this `if` and `return` should be removed,
// but currently this lint's suggestions can conflict with those of `clippy::needless_lifetimes`:
// https://github.com/rust-lang/rust/pull/129840#issuecomment-2323349119
// HACK: `'static` suggestions will never sonflict, emit only those for now.
if name != kw::StaticLifetime {
return;
}
match kind {
MissingLifetimeKind::Underscore => diag.span_suggestion_verbose(
span,
fluent::lint_suggestion,
format!("{name}"),
Applicability::MachineApplicable,
),
MissingLifetimeKind::Ampersand => diag.span_suggestion_verbose(
span.shrink_to_hi(),
fluent::lint_suggestion,
format!("{name} "),
Applicability::MachineApplicable,
),
MissingLifetimeKind::Comma => diag.span_suggestion_verbose(
span.shrink_to_hi(),
fluent::lint_suggestion,
format!("{name}, "),
Applicability::MachineApplicable,
),
MissingLifetimeKind::Brackets => diag.span_suggestion_verbose(
span.shrink_to_hi(),
fluent::lint_suggestion,
format!("<{name}>"),
Applicability::MachineApplicable,
),
};
}
}
#[derive(LintDiagnostic)]
#[diag(lint_invalid_crate_type_value)]
pub(crate) struct UnknownCrateTypes {
@ -3241,3 +3189,128 @@ pub(crate) struct ReservedMultihash {
#[suggestion(code = " ", applicability = "machine-applicable")]
pub suggestion: Span,
}
#[derive(Debug)]
pub(crate) struct MismatchedLifetimeSyntaxes {
pub lifetime_name: String,
pub inputs: Vec<Span>,
pub outputs: Vec<Span>,
pub suggestions: Vec<MismatchedLifetimeSyntaxesSuggestion>,
}
impl<'a, G: EmissionGuarantee> LintDiagnostic<'a, G> for MismatchedLifetimeSyntaxes {
fn decorate_lint<'b>(self, diag: &'b mut Diag<'a, G>) {
diag.primary_message(fluent::lint_mismatched_lifetime_syntaxes);
diag.arg("lifetime_name", self.lifetime_name);
diag.arg("n_inputs", self.inputs.len());
for input in self.inputs {
let a = diag.eagerly_translate(fluent::lint_label_mismatched_lifetime_syntaxes_inputs);
diag.span_label(input, a);
}
diag.arg("n_outputs", self.outputs.len());
for output in self.outputs {
let a = diag.eagerly_translate(fluent::lint_label_mismatched_lifetime_syntaxes_outputs);
diag.span_label(output, a);
}
let mut suggestions = self.suggestions.into_iter();
if let Some(s) = suggestions.next() {
diag.subdiagnostic(s);
for mut s in suggestions {
s.make_tool_only();
diag.subdiagnostic(s);
}
}
}
}
#[derive(Debug)]
pub(crate) enum MismatchedLifetimeSyntaxesSuggestion {
Implicit {
suggestions: Vec<Span>,
tool_only: bool,
},
Mixed {
implicit_suggestions: Vec<Span>,
explicit_anonymous_suggestions: Vec<(Span, String)>,
tool_only: bool,
},
Explicit {
lifetime_name: String,
suggestions: Vec<(Span, String)>,
tool_only: bool,
},
}
impl MismatchedLifetimeSyntaxesSuggestion {
fn make_tool_only(&mut self) {
use MismatchedLifetimeSyntaxesSuggestion::*;
let tool_only = match self {
Implicit { tool_only, .. } | Mixed { tool_only, .. } | Explicit { tool_only, .. } => {
tool_only
}
};
*tool_only = true;
}
}
impl Subdiagnostic for MismatchedLifetimeSyntaxesSuggestion {
fn add_to_diag<G: EmissionGuarantee>(self, diag: &mut Diag<'_, G>) {
use MismatchedLifetimeSyntaxesSuggestion::*;
let style = |tool_only| {
if tool_only { SuggestionStyle::CompletelyHidden } else { SuggestionStyle::ShowAlways }
};
match self {
Implicit { suggestions, tool_only } => {
let suggestions = suggestions.into_iter().map(|s| (s, String::new())).collect();
diag.multipart_suggestion_with_style(
fluent::lint_mismatched_lifetime_syntaxes_suggestion_implicit,
suggestions,
Applicability::MachineApplicable,
style(tool_only),
);
}
Mixed { implicit_suggestions, explicit_anonymous_suggestions, tool_only } => {
let implicit_suggestions =
implicit_suggestions.into_iter().map(|s| (s, String::new()));
let suggestions =
implicit_suggestions.chain(explicit_anonymous_suggestions).collect();
diag.multipart_suggestion_with_style(
fluent::lint_mismatched_lifetime_syntaxes_suggestion_mixed,
suggestions,
Applicability::MachineApplicable,
style(tool_only),
);
}
Explicit { lifetime_name, suggestions, tool_only } => {
diag.arg("lifetime_name", lifetime_name);
let msg = diag.eagerly_translate(
fluent::lint_mismatched_lifetime_syntaxes_suggestion_explicit,
);
diag.multipart_suggestion_with_style(
msg,
suggestions,
Applicability::MachineApplicable,
style(tool_only),
);
}
}
}
}

View file

@ -40,7 +40,6 @@ declare_lint_pass! {
DUPLICATE_MACRO_ATTRIBUTES,
ELIDED_LIFETIMES_IN_ASSOCIATED_CONSTANT,
ELIDED_LIFETIMES_IN_PATHS,
ELIDED_NAMED_LIFETIMES,
EXPLICIT_BUILTIN_CFGS_IN_FLAGS,
EXPORTED_PRIVATE_DEPENDENCIES,
FFI_UNWIND_CALLS,
@ -1832,38 +1831,6 @@ declare_lint! {
"hidden lifetime parameters in types are deprecated"
}
declare_lint! {
/// The `elided_named_lifetimes` lint detects when an elided
/// lifetime ends up being a named lifetime, such as `'static`
/// or some lifetime parameter `'a`.
///
/// ### Example
///
/// ```rust,compile_fail
/// #![deny(elided_named_lifetimes)]
/// struct Foo;
/// impl Foo {
/// pub fn get_mut(&'static self, x: &mut u8) -> &mut u8 {
/// unsafe { &mut *(x as *mut _) }
/// }
/// }
/// ```
///
/// {{produces}}
///
/// ### Explanation
///
/// Lifetime elision is quite useful, because it frees you from having
/// to give each lifetime its own name, but sometimes it can produce
/// somewhat surprising resolutions. In safe code, it is mostly okay,
/// because the borrow checker prevents any unsoundness, so the worst
/// case scenario is you get a confusing error message in some other place.
/// But with `unsafe` code, such unexpected resolutions may lead to unsound code.
pub ELIDED_NAMED_LIFETIMES,
Warn,
"detects when an elided lifetime gets resolved to be `'static` or some named parameter"
}
declare_lint! {
/// The `bare_trait_objects` lint suggests using `dyn Trait` for trait
/// objects.

View file

@ -9,7 +9,7 @@ use rustc_data_structures::stable_hasher::{
use rustc_error_messages::{DiagMessage, MultiSpan};
use rustc_hir::def::Namespace;
use rustc_hir::def_id::DefPathHash;
use rustc_hir::{HashStableContext, HirId, ItemLocalId, MissingLifetimeKind};
use rustc_hir::{HashStableContext, HirId, ItemLocalId};
use rustc_macros::{Decodable, Encodable, HashStable_Generic};
pub use rustc_span::edition::Edition;
use rustc_span::{Ident, MacroRulesNormalizedIdent, Span, Symbol, sym};
@ -610,12 +610,6 @@ pub enum DeprecatedSinceKind {
InVersion(String),
}
#[derive(Debug)]
pub enum ElidedLifetimeResolution {
Static,
Param(Symbol, Span),
}
// This could be a closure, but then implementing derive trait
// becomes hacky (and it gets allocated).
#[derive(Debug)]
@ -628,10 +622,6 @@ pub enum BuiltinLintDiag {
},
MacroExpandedMacroExportsAccessedByAbsolutePaths(Span),
ElidedLifetimesInPaths(usize, Span, bool, Span),
ElidedNamedLifetimes {
elided: (Span, MissingLifetimeKind),
resolution: ElidedLifetimeResolution,
},
UnknownCrateTypes {
span: Span,
candidate: Option<Symbol>,

View file

@ -1729,7 +1729,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
if ident.name == kw::StaticLifetime {
self.record_lifetime_res(
lifetime.id,
LifetimeRes::Static { suppress_elision_warning: false },
LifetimeRes::Static,
LifetimeElisionCandidate::Named,
);
return;
@ -1877,8 +1877,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
if lifetimes_in_scope.is_empty() {
self.record_lifetime_res(
lifetime.id,
// We are inside a const item, so do not warn.
LifetimeRes::Static { suppress_elision_warning: true },
LifetimeRes::Static,
elision_candidate,
);
return;
@ -2225,47 +2224,6 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
panic!("lifetime {id:?} resolved multiple times ({prev_res:?} before, {res:?} now)")
}
match candidate {
LifetimeElisionCandidate::Missing(missing @ MissingLifetime { .. }) => {
debug_assert_eq!(id, missing.id);
match res {
LifetimeRes::Static { suppress_elision_warning } => {
if !suppress_elision_warning {
self.r.lint_buffer.buffer_lint(
lint::builtin::ELIDED_NAMED_LIFETIMES,
missing.id_for_lint,
missing.span,
BuiltinLintDiag::ElidedNamedLifetimes {
elided: (missing.span, missing.kind),
resolution: lint::ElidedLifetimeResolution::Static,
},
);
}
}
LifetimeRes::Param { param, binder: _ } => {
let tcx = self.r.tcx();
self.r.lint_buffer.buffer_lint(
lint::builtin::ELIDED_NAMED_LIFETIMES,
missing.id_for_lint,
missing.span,
BuiltinLintDiag::ElidedNamedLifetimes {
elided: (missing.span, missing.kind),
resolution: lint::ElidedLifetimeResolution::Param(
tcx.item_name(param.into()),
tcx.source_span(param),
),
},
);
}
LifetimeRes::Fresh { .. }
| LifetimeRes::Infer
| LifetimeRes::Error
| LifetimeRes::ElidedAnchor { .. } => {}
}
}
LifetimeElisionCandidate::Ignore | LifetimeElisionCandidate::Named => {}
}
match res {
LifetimeRes::Param { .. } | LifetimeRes::Fresh { .. } | LifetimeRes::Static { .. } => {
if let Some(ref mut candidates) = self.lifetime_elision_candidates {
@ -2788,14 +2746,9 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
..
}) => {
self.with_static_rib(def_kind, |this| {
this.with_lifetime_rib(
LifetimeRibKind::Elided(LifetimeRes::Static {
suppress_elision_warning: true,
}),
|this| {
this.visit_ty(ty);
},
);
this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Static), |this| {
this.visit_ty(ty);
});
if let Some(expr) = expr {
// We already forbid generic params because of the above item rib,
// so it doesn't matter whether this is a trivial constant.
@ -2832,9 +2785,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
this.visit_generics(generics);
this.with_lifetime_rib(
LifetimeRibKind::Elided(LifetimeRes::Static {
suppress_elision_warning: true,
}),
LifetimeRibKind::Elided(LifetimeRes::Static),
|this| this.visit_ty(ty),
);

View file

@ -3440,7 +3440,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
maybe_static = true;
in_scope_lifetimes = vec![(
Ident::with_dummy_span(kw::StaticLifetime),
(DUMMY_NODE_ID, LifetimeRes::Static { suppress_elision_warning: false }),
(DUMMY_NODE_ID, LifetimeRes::Static),
)];
}
} else if elided_len == 0 {
@ -3452,7 +3452,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
maybe_static = true;
in_scope_lifetimes = vec![(
Ident::with_dummy_span(kw::StaticLifetime),
(DUMMY_NODE_ID, LifetimeRes::Static { suppress_elision_warning: false }),
(DUMMY_NODE_ID, LifetimeRes::Static),
)];
}
} else if num_params == 1 {

View file

@ -29,6 +29,7 @@ impl AbiMapping {
}
}
#[track_caller]
pub fn unwrap(self) -> CanonAbi {
self.into_option().unwrap()
}

View file

@ -593,7 +593,7 @@ impl Subdiagnostic for AddLifetimeParamsSuggestion<'_> {
matches!(
arg,
hir::GenericArg::Lifetime(lifetime)
if lifetime.is_syntactically_hidden()
if lifetime.is_implicit()
)
}) {
self.suggestions.push((

View file

@ -5,7 +5,7 @@ on:
concurrency:
# Make sure that new pushes cancel running jobs
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
env:
@ -13,7 +13,7 @@ env:
RUSTDOCFLAGS: -Dwarnings
RUSTFLAGS: -Dwarnings
RUST_BACKTRACE: full
BENCHMARK_RUSTC: nightly-2025-01-16 # Pin the toolchain for reproducable results
BENCHMARK_RUSTC: nightly-2025-05-28 # Pin the toolchain for reproducable results
jobs:
# Determine which tests should be run based on changed files.
@ -108,8 +108,6 @@ jobs:
- name: Print runner information
run: uname -a
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust (rustup)
shell: bash
run: |
@ -119,7 +117,6 @@ jobs:
rustup update "$channel" --no-self-update
rustup default "$channel"
rustup target add "${{ matrix.target }}"
rustup component add llvm-tools-preview
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
with:
@ -147,6 +144,10 @@ jobs:
shell: bash
- run: echo "RUST_COMPILER_RT_ROOT=$(realpath ./compiler-rt)" >> "$GITHUB_ENV"
shell: bash
- name: Download musl source
run: ./ci/update-musl.sh
shell: bash
- name: Verify API list
if: matrix.os == 'ubuntu-24.04'
@ -183,8 +184,6 @@ jobs:
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
submodules: true
# Unlike rustfmt, stable clippy does not work on code with nightly features.
- name: Install nightly `clippy`
run: |
@ -192,16 +191,22 @@ jobs:
rustup default nightly
rustup component add clippy
- uses: Swatinem/rust-cache@v2
- name: Download musl source
run: ./ci/update-musl.sh
- run: cargo clippy --workspace --all-targets
benchmarks:
name: Benchmarks
runs-on: ubuntu-24.04
timeout-minutes: 20
strategy:
fail-fast: false
matrix:
include:
- target: x86_64-unknown-linux-gnu
os: ubuntu-24.04
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@master
with:
submodules: true
- uses: taiki-e/install-action@cargo-binstall
- name: Set up dependencies
@ -216,12 +221,16 @@ jobs:
cargo binstall -y iai-callgrind-runner --version "$iai_version"
sudo apt-get install valgrind
- uses: Swatinem/rust-cache@v2
with:
key: ${{ matrix.target }}
- name: Download musl source
run: ./ci/update-musl.sh
- name: Run icount benchmarks
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
run: ./ci/bench-icount.sh
run: ./ci/bench-icount.sh ${{ matrix.target }}
- name: Upload the benchmark baseline
uses: actions/upload-artifact@v4
@ -249,8 +258,6 @@ jobs:
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust (rustup)
run: rustup update nightly --no-self-update && rustup default nightly
shell: bash
@ -285,8 +292,6 @@ jobs:
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable `rustfmt`
run: rustup set profile minimal && rustup default stable && rustup component add rustfmt
- run: cargo fmt -- --check
@ -310,13 +315,13 @@ jobs:
TO_TEST: ${{ matrix.to_test }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust
run: |
rustup update nightly --no-self-update
rustup default nightly
- uses: Swatinem/rust-cache@v2
- name: download musl source
run: ./ci/update-musl.sh
- name: Run extensive tests
run: ./ci/run-extensive.sh
- name: Print test logs if available

View file

@ -14,3 +14,6 @@ iai-home
*.bk
*.rs.bk
.#*
# Manually managed
crates/musl-math-sys/musl

View file

@ -1,4 +0,0 @@
[submodule "crates/musl-math-sys/musl"]
path = crates/musl-math-sys/musl
url = https://git.musl-libc.org/git/musl
shallow = true

View file

@ -3,9 +3,11 @@ resolver = "2"
members = [
"builtins-test",
"compiler-builtins",
"crates/josh-sync",
"crates/libm-macros",
"crates/musl-math-sys",
"crates/panic-handler",
"crates/symbol-check",
"crates/util",
"libm",
"libm-test",

View file

@ -5,7 +5,7 @@ This repository contains two main crates:
* `compiler-builtins`: symbols that the compiler expects to be available at
link time
* `libm`: a Rust implementation of C math libraries, used to provide
implementations in `ocre`.
implementations in `core`.
More details are at [compiler-builtins/README.md](compiler-builtins/README.md)
and [libm/README.md](libm/README.md).

View file

@ -1,12 +1,12 @@
[package]
name = "builtins-test-intrinsics"
version = "0.1.0"
edition = "2021"
edition = "2024"
publish = false
license = "MIT OR Apache-2.0"
[dependencies]
compiler_builtins = { path = "../compiler-builtins", features = ["compiler-builtins"]}
compiler_builtins = { path = "../compiler-builtins", features = ["compiler-builtins"] }
panic-handler = { path = "../crates/panic-handler" }
[features]

View file

@ -13,11 +13,14 @@
#![no_std]
#![no_main]
// Ensure this `compiler_builtins` gets used, rather than the version injected from the sysroot.
extern crate compiler_builtins;
extern crate panic_handler;
// SAFETY: no definitions, only used for linking
#[cfg(all(not(thumb), not(windows), not(target_arch = "wasm32")))]
#[link(name = "c")]
extern "C" {}
unsafe extern "C" {}
// Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform
// doesn't have native support for the operation used in the function. ARM has a naming convention
@ -651,22 +654,23 @@ fn something_with_a_dtor(f: &dyn Fn()) {
#[unsafe(no_mangle)]
#[cfg(not(thumb))]
fn main(_argc: core::ffi::c_int, _argv: *const *const u8) -> core::ffi::c_int {
extern "C" fn main(_argc: core::ffi::c_int, _argv: *const *const u8) -> core::ffi::c_int {
run();
0
}
#[unsafe(no_mangle)]
#[cfg(thumb)]
pub fn _start() -> ! {
extern "C" fn _start() -> ! {
run();
loop {}
}
// SAFETY: no definitions, only used for linking
#[cfg(windows)]
#[link(name = "kernel32")]
#[link(name = "msvcrt")]
extern "C" {}
unsafe extern "C" {}
// ARM targets need these symbols
#[unsafe(no_mangle)]

View file

@ -10,11 +10,11 @@ license = "MIT AND Apache-2.0 WITH LLVM-exception AND (MIT OR Apache-2.0)"
# For fuzzing tests we want a deterministic seedable RNG. We also eliminate potential
# problems with system RNGs on the variety of platforms this crate is tested on.
# `xoshiro128**` is used for its quality, size, and speed at generating `u32` shift amounts.
rand_xoshiro = "0.6"
rand_xoshiro = "0.7"
# To compare float builtins against
rustc_apfloat = "0.2.1"
rustc_apfloat = "0.2.2"
# Really a dev dependency, but dev dependencies can't be optional
iai-callgrind = { version = "0.14.0", optional = true }
iai-callgrind = { version = "0.14.1", optional = true }
[dependencies.compiler_builtins]
path = "../compiler-builtins"
@ -22,7 +22,7 @@ default-features = false
features = ["unstable-public-internals"]
[dev-dependencies]
criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] }
criterion = { version = "0.6.0", default-features = false, features = ["cargo_bench_support"] }
paste = "1.0.15"
[target.'cfg(all(target_arch = "arm", not(any(target_env = "gnu", target_env = "musl")), target_os = "linux"))'.dev-dependencies]

View file

@ -1,12 +1,23 @@
#![cfg_attr(f128_enabled, feature(f128))]
use builtins_test::float_bench;
use compiler_builtins::float::cmp;
use compiler_builtins::float::cmp::{self, CmpResult};
use criterion::{Criterion, criterion_main};
/// `gt` symbols are allowed to return differing results, they just get compared
/// to 0.
fn gt_res_eq(a: i32, b: i32) -> bool {
fn gt_res_eq(mut a: CmpResult, mut b: CmpResult) -> bool {
// FIXME: Our CmpResult used to be `i32`, but GCC/LLVM expect `isize`. on 64-bit platforms,
// this means the top half of the word may be garbage if built with an old version of
// `compiler-builtins`, so add a hack around this.
//
// This can be removed once a version of `compiler-builtins` with the return type fix makes
// it upstream.
if size_of::<CmpResult>() == 8 {
a = a as i32 as CmpResult;
b = b as i32 as CmpResult;
}
let a_lt_0 = a <= 0;
let b_lt_0 = b <= 0;
(a_lt_0 && b_lt_0) || (!a_lt_0 && !b_lt_0)
@ -14,14 +25,14 @@ fn gt_res_eq(a: i32, b: i32) -> bool {
float_bench! {
name: cmp_f32_gt,
sig: (a: f32, b: f32) -> i32,
sig: (a: f32, b: f32) -> CmpResult,
crate_fn: cmp::__gtsf2,
sys_fn: __gtsf2,
sys_available: all(),
output_eq: gt_res_eq,
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"xor {ret:e}, {ret:e}",
"ucomiss {a}, {b}",
@ -36,7 +47,7 @@ float_bench! {
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"fcmp {a:s}, {b:s}",
"cset {ret:w}, gt",
@ -53,13 +64,13 @@ float_bench! {
float_bench! {
name: cmp_f32_unord,
sig: (a: f32, b: f32) -> i32,
sig: (a: f32, b: f32) -> CmpResult,
crate_fn: cmp::__unordsf2,
sys_fn: __unordsf2,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"xor {ret:e}, {ret:e}",
"ucomiss {a}, {b}",
@ -74,7 +85,7 @@ float_bench! {
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"fcmp {a:s}, {b:s}",
"cset {ret:w}, vs",
@ -91,14 +102,14 @@ float_bench! {
float_bench! {
name: cmp_f64_gt,
sig: (a: f64, b: f64) -> i32,
sig: (a: f64, b: f64) -> CmpResult,
crate_fn: cmp::__gtdf2,
sys_fn: __gtdf2,
sys_available: all(),
output_eq: gt_res_eq,
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"xor {ret:e}, {ret:e}",
"ucomisd {a}, {b}",
@ -113,7 +124,7 @@ float_bench! {
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"fcmp {a:d}, {b:d}",
"cset {ret:w}, gt",
@ -130,13 +141,13 @@ float_bench! {
float_bench! {
name: cmp_f64_unord,
sig: (a: f64, b: f64) -> i32,
sig: (a: f64, b: f64) -> CmpResult,
crate_fn: cmp::__unorddf2,
sys_fn: __unorddf2,
sys_available: all(),
asm: [
#[cfg(target_arch = "x86_64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"xor {ret:e}, {ret:e}",
"ucomisd {a}, {b}",
@ -151,7 +162,7 @@ float_bench! {
};
#[cfg(target_arch = "aarch64")] {
let ret: i32;
let ret: CmpResult;
asm!(
"fcmp {a:d}, {b:d}",
"cset {ret:w}, vs",
@ -168,7 +179,7 @@ float_bench! {
float_bench! {
name: cmp_f128_gt,
sig: (a: f128, b: f128) -> i32,
sig: (a: f128, b: f128) -> CmpResult,
crate_fn: cmp::__gttf2,
crate_fn_ppc: cmp::__gtkf2,
sys_fn: __gttf2,
@ -180,7 +191,7 @@ float_bench! {
float_bench! {
name: cmp_f128_unord,
sig: (a: f128, b: f128) -> i32,
sig: (a: f128, b: f128) -> CmpResult,
crate_fn: cmp::__unordtf2,
crate_fn_ppc: cmp::__unordkf2,
sys_fn: __unordtf2,

View file

@ -358,8 +358,8 @@ impl_testio!(float f16);
impl_testio!(float f32, f64);
#[cfg(f128_enabled)]
impl_testio!(float f128);
impl_testio!(int i16, i32, i64, i128);
impl_testio!(int u16, u32, u64, u128);
impl_testio!(int i8, i16, i32, i64, i128, isize);
impl_testio!(int u8, u16, u32, u64, u128, usize);
impl_testio!((float, int)(f32, i32));
impl_testio!((float, int)(f64, i32));
#[cfg(f128_enabled)]

View file

@ -40,6 +40,75 @@ pub const N: u32 = if cfg!(target_arch = "x86_64") && !cfg!(debug_assertions) {
10_000
};
/// Additional constants that determine how the integer gets fuzzed.
trait FuzzInt: MinInt {
/// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing
/// in `builtins-test`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,
/// 111,112,119,120,125,126,127].
const FUZZ_LENGTHS: [u8; 20] = make_fuzz_lengths(Self::BITS);
/// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128.
const FUZZ_NUM: usize = {
let log2 = Self::BITS.ilog2() as usize;
if log2 == 3 {
// case for u8
6
} else {
// 3 entries on each extreme, 2 in the middle, and 4 for each scale of intermediate
// boundaries.
8 + (4 * (log2 - 4))
}
};
}
impl<I> FuzzInt for I where I: MinInt {}
const fn make_fuzz_lengths(bits: u32) -> [u8; 20] {
let mut v = [0u8; 20];
v[0] = 0;
v[1] = 1;
v[2] = 2; // important for parity and the iX::MIN case when reversed
let mut i = 3;
// No need for any more until the byte boundary, because there should be no algorithms
// that are sensitive to anything not next to byte boundaries after 2. We also scale
// in powers of two, which is important to prevent u128 corner tests from getting too
// big.
let mut l = 8;
loop {
if l >= ((bits / 2) as u8) {
break;
}
// get both sides of the byte boundary
v[i] = l - 1;
i += 1;
v[i] = l;
i += 1;
l *= 2;
}
if bits != 8 {
// add the lower side of the middle boundary
v[i] = ((bits / 2) - 1) as u8;
i += 1;
}
// We do not want to jump directly from the Self::BITS/2 boundary to the Self::BITS
// boundary because of algorithms that split the high part up. We reverse the scaling
// as we go to Self::BITS.
let mid = i;
let mut j = 1;
loop {
v[i] = (bits as u8) - (v[mid - j]) - 1;
if j == mid {
break;
}
i += 1;
j += 1;
}
v
}
/// Random fuzzing step. When run several times, it results in excellent fuzzing entropy such as:
/// 11110101010101011110111110011111
/// 10110101010100001011101011001010
@ -92,10 +161,9 @@ fn fuzz_step<I: Int>(rng: &mut Xoshiro128StarStar, x: &mut I) {
macro_rules! edge_cases {
($I:ident, $case:ident, $inner:block) => {
for i0 in 0..$I::FUZZ_NUM {
let mask_lo = (!$I::UnsignedInt::ZERO).wrapping_shr($I::FUZZ_LENGTHS[i0] as u32);
let mask_lo = (!$I::Unsigned::ZERO).wrapping_shr($I::FUZZ_LENGTHS[i0] as u32);
for i1 in i0..I::FUZZ_NUM {
let mask_hi =
(!$I::UnsignedInt::ZERO).wrapping_shl($I::FUZZ_LENGTHS[i1 - i0] as u32);
let mask_hi = (!$I::Unsigned::ZERO).wrapping_shl($I::FUZZ_LENGTHS[i1 - i0] as u32);
let $case = I::from_unsigned(mask_lo & mask_hi);
$inner
}
@ -107,7 +175,7 @@ macro_rules! edge_cases {
/// edge cases, followed by a more random fuzzer that runs `n` times.
pub fn fuzz<I: Int, F: FnMut(I)>(n: u32, mut f: F)
where
<I as MinInt>::UnsignedInt: Int,
<I as MinInt>::Unsigned: Int,
{
// edge case tester. Calls `f` 210 times for u128.
// zero gets skipped by the loop
@ -128,7 +196,7 @@ where
/// The same as `fuzz`, except `f` has two inputs.
pub fn fuzz_2<I: Int, F: Fn(I, I)>(n: u32, f: F)
where
<I as MinInt>::UnsignedInt: Int,
<I as MinInt>::Unsigned: Int,
{
// Check cases where the first and second inputs are zero. Both call `f` 210 times for `u128`.
edge_cases!(I, case, {

View file

@ -24,7 +24,8 @@ macro_rules! panic {
};
}
extern "C" {
// SAFETY: defined in compiler-builtins
unsafe extern "aapcs" {
fn __aeabi_memclr4(dest: *mut u8, n: usize);
fn __aeabi_memset4(dest: *mut u8, n: usize, c: u32);
}

View file

@ -22,7 +22,8 @@ macro_rules! panic {
};
}
extern "C" {
// SAFETY: defined in compiler-builtins
unsafe extern "aapcs" {
fn __aeabi_memcpy(dest: *mut u8, src: *const u8, n: usize);
fn __aeabi_memcpy4(dest: *mut u8, src: *const u8, n: usize);
}

View file

@ -24,7 +24,8 @@ macro_rules! panic {
};
}
extern "C" {
// SAFETY: defined in compiler-builtins
unsafe extern "aapcs" {
fn __aeabi_memset4(dest: *mut u8, n: usize, c: u32);
}

View file

@ -58,8 +58,6 @@ pow! {
}
#[cfg(f128_enabled)]
// FIXME(f16_f128): MSVC cannot build these until `__divtf3` is available in nightly.
#[cfg(not(target_env = "msvc"))]
#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
pow! {
f128, 1e-36, __powitf2, not(feature = "no-sys-f128");

View file

@ -2,10 +2,21 @@
set -eux
target="${1:-}"
if [ -z "$target" ]; then
host_target=$(rustc -vV | awk '/^host/ { print $2 }')
echo "Defaulted to host target $host_target"
target="$host_target"
fi
iai_home="iai-home"
# Use the arch as a tag to disambiguate artifacts
tag="$(echo "$target" | cut -d'-' -f1)"
# Download the baseline from master
./ci/ci-util.py locate-baseline --download --extract
./ci/ci-util.py locate-baseline --download --extract --tag "$tag"
# Run benchmarks once
function run_icount_benchmarks() {
@ -35,16 +46,18 @@ function run_icount_benchmarks() {
shift
done
# Run iai-callgrind benchmarks
cargo bench "${cargo_args[@]}" -- "${iai_args[@]}"
# Run iai-callgrind benchmarks. Do this in a subshell with `&& true` to
# capture rather than exit on error.
(cargo bench "${cargo_args[@]}" -- "${iai_args[@]}") && true
exit_code="$?"
# NB: iai-callgrind should exit on error but does not, so we inspect the sumary
# for errors. See https://github.com/iai-callgrind/iai-callgrind/issues/337
if [ -n "${PR_NUMBER:-}" ]; then
# If this is for a pull request, ignore regressions if specified.
./ci/ci-util.py check-regressions --home "$iai_home" --allow-pr-override "$PR_NUMBER"
if [ "$exit_code" -eq 0 ]; then
echo "Benchmarks completed with no regressions"
elif [ -z "${PR_NUMBER:-}" ]; then
# Disregard regressions after merge
echo "Benchmarks completed with regressions; ignoring (not in a PR)"
else
./ci/ci-util.py check-regressions --home "$iai_home" || true
./ci/ci-util.py handle-banch-regressions "$PR_NUMBER"
fi
}
@ -53,6 +66,6 @@ run_icount_benchmarks --features force-soft-floats -- --save-baseline=softfloat
run_icount_benchmarks -- --save-baseline=hardfloat
# Name and tar the new baseline
name="baseline-icount-$(date -u +'%Y%m%d%H%M')-${GITHUB_SHA:0:12}"
name="baseline-icount-$tag-$(date -u +'%Y%m%d%H%M')-${GITHUB_SHA:0:12}"
echo "BASELINE_NAME=$name" >>"$GITHUB_ENV"
tar cJf "$name.tar.xz" "$iai_home"

View file

@ -11,7 +11,7 @@ import re
import subprocess as sp
import sys
from dataclasses import dataclass
from glob import glob, iglob
from glob import glob
from inspect import cleandoc
from os import getenv
from pathlib import Path
@ -28,21 +28,20 @@ USAGE = cleandoc(
Calculate a matrix of which functions had source change, print that as
a JSON object.
locate-baseline [--download] [--extract]
locate-baseline [--download] [--extract] [--tag TAG]
Locate the most recent benchmark baseline available in CI and, if flags
specify, download and extract it. Never exits with nonzero status if
downloading fails.
`--tag` can be specified to look for artifacts with a specific tag, such as
for a specific architecture.
Note that `--extract` will overwrite files in `iai-home`.
check-regressions [--home iai-home] [--allow-pr-override pr_number]
Check `iai-home` (or `iai-home` if unspecified) for `summary.json`
files and see if there are any regressions. This is used as a workaround
for `iai-callgrind` not exiting with error status; see
<https://github.com/iai-callgrind/iai-callgrind/issues/337>.
If `--allow-pr-override` is specified, the regression check will not exit
with failure if any line in the PR starts with `allow-regressions`.
handle-bench-regressions PR_NUMBER
Exit with success if the pull request contains a line starting with
`ci: allow-regressions`, indicating that regressions in benchmarks should
be accepted. Otherwise, exit 1.
"""
)
@ -50,7 +49,7 @@ REPO_ROOT = Path(__file__).parent.parent
GIT = ["git", "-C", REPO_ROOT]
DEFAULT_BRANCH = "master"
WORKFLOW_NAME = "CI" # Workflow that generates the benchmark artifacts
ARTIFACT_GLOB = "baseline-icount*"
ARTIFACT_PREFIX = "baseline-icount*"
# Place this in a PR body to skip regression checks (must be at the start of a line).
REGRESSION_DIRECTIVE = "ci: allow-regressions"
# Place this in a PR body to skip extensive tests
@ -278,6 +277,7 @@ def locate_baseline(flags: list[str]) -> None:
download = False
extract = False
tag = ""
while len(flags) > 0:
match flags[0]:
@ -285,6 +285,9 @@ def locate_baseline(flags: list[str]) -> None:
download = True
case "--extract":
extract = True
case "--tag":
tag = flags[1]
flags = flags[1:]
case _:
eprint(USAGE)
exit(1)
@ -333,8 +336,10 @@ def locate_baseline(flags: list[str]) -> None:
eprint("skipping download step")
return
artifact_glob = f"{ARTIFACT_PREFIX}{f"-{tag}" if tag else ""}*"
sp.run(
["gh", "run", "download", str(job_id), f"--pattern={ARTIFACT_GLOB}"],
["gh", "run", "download", str(job_id), f"--pattern={artifact_glob}"],
check=False,
)
@ -344,7 +349,7 @@ def locate_baseline(flags: list[str]) -> None:
# Find the baseline with the most recent timestamp. GH downloads the files to e.g.
# `some-dirname/some-dirname.tar.xz`, so just glob the whole thing together.
candidate_baselines = glob(f"{ARTIFACT_GLOB}/{ARTIFACT_GLOB}")
candidate_baselines = glob(f"{artifact_glob}/{artifact_glob}")
if len(candidate_baselines) == 0:
eprint("no possible baseline directories found")
return
@ -356,64 +361,22 @@ def locate_baseline(flags: list[str]) -> None:
eprint("baseline extracted successfully")
def check_iai_regressions(args: list[str]):
"""Find regressions in iai summary.json files, exit with failure if any are
found.
"""
def handle_bench_regressions(args: list[str]):
"""Exit with error unless the PR message contains an ignore directive."""
iai_home_str = "iai-home"
pr_number = None
match args:
case [pr_number]:
pr_number = pr_number
case _:
eprint(USAGE)
exit(1)
while len(args) > 0:
match args:
case ["--home", home, *rest]:
iai_home_str = home
args = rest
case ["--allow-pr-override", pr_num, *rest]:
pr_number = pr_num
args = rest
case _:
eprint(USAGE)
exit(1)
iai_home = Path(iai_home_str)
found_summaries = False
regressions: list[dict] = []
for summary_path in iglob("**/summary.json", root_dir=iai_home, recursive=True):
found_summaries = True
with open(iai_home / summary_path, "r") as f:
summary = json.load(f)
summary_regs = []
run = summary["callgrind_summary"]["callgrind_run"]
fname = summary["function_name"]
id = summary["id"]
name_entry = {"name": f"{fname}.{id}"}
for segment in run["segments"]:
summary_regs.extend(segment["regressions"])
summary_regs.extend(run["total"]["regressions"])
regressions.extend(name_entry | reg for reg in summary_regs)
if not found_summaries:
eprint(f"did not find any summary.json files within {iai_home}")
exit(1)
if len(regressions) == 0:
eprint("No regressions found")
pr = PrInfo.load(pr_number)
if pr.contains_directive(REGRESSION_DIRECTIVE):
eprint("PR allows regressions")
return
eprint("Found regressions:", json.dumps(regressions, indent=4))
if pr_number is not None:
pr = PrInfo.load(pr_number)
if pr.contains_directive(REGRESSION_DIRECTIVE):
eprint("PR allows regressions, returning")
return
eprint("Regressions were found; benchmark failed")
exit(1)
@ -424,8 +387,8 @@ def main():
ctx.emit_workflow_output()
case ["locate-baseline", *flags]:
locate_baseline(flags)
case ["check-regressions", *args]:
check_iai_regressions(args)
case ["handle-bench-regressions", *args]:
handle_bench_regressions(args)
case ["--help" | "-h"]:
print(USAGE)
exit()

View file

@ -47,130 +47,49 @@ else
fi
fi
# Ensure there are no duplicate symbols or references to `core` when
# `compiler-builtins` is built with various features. Symcheck invokes Cargo to
# build with the arguments we provide it, then validates the built artifacts.
symcheck=(cargo run -p symbol-check --release)
[[ "$target" = "wasm"* ]] && symcheck+=(--features wasm)
symcheck+=(-- build-and-check)
declare -a rlib_paths
"${symcheck[@]}" -p compiler_builtins --target "$target"
"${symcheck[@]}" -p compiler_builtins --target "$target" --release
"${symcheck[@]}" -p compiler_builtins --target "$target" --features c
"${symcheck[@]}" -p compiler_builtins --target "$target" --features c --release
"${symcheck[@]}" -p compiler_builtins --target "$target" --features no-asm
"${symcheck[@]}" -p compiler_builtins --target "$target" --features no-asm --release
"${symcheck[@]}" -p compiler_builtins --target "$target" --features no-f16-f128
"${symcheck[@]}" -p compiler_builtins --target "$target" --features no-f16-f128 --release
# Set the `rlib_paths` global array to a list of all compiler-builtins rlibs
update_rlib_paths() {
if [ -d /builtins-target ]; then
rlib_paths=( /builtins-target/"${target}"/debug/deps/libcompiler_builtins-*.rlib )
else
rlib_paths=( target/"${target}"/debug/deps/libcompiler_builtins-*.rlib )
fi
}
# Remove any existing artifacts from previous tests that don't set #![compiler_builtins]
update_rlib_paths
rm -f "${rlib_paths[@]}"
cargo build -p compiler_builtins --target "$target"
cargo build -p compiler_builtins --target "$target" --release
cargo build -p compiler_builtins --target "$target" --features c
cargo build -p compiler_builtins --target "$target" --features c --release
cargo build -p compiler_builtins --target "$target" --features no-asm
cargo build -p compiler_builtins --target "$target" --features no-asm --release
cargo build -p compiler_builtins --target "$target" --features no-f16-f128
cargo build -p compiler_builtins --target "$target" --features no-f16-f128 --release
PREFIX=${target//unknown-/}-
case "$target" in
armv7-*)
PREFIX=arm-linux-gnueabihf-
;;
thumb*)
PREFIX=arm-none-eabi-
;;
*86*-*)
PREFIX=
;;
esac
NM=$(find "$(rustc --print sysroot)" \( -name llvm-nm -o -name llvm-nm.exe \) )
if [ "$NM" = "" ]; then
NM="${PREFIX}nm"
fi
# i686-pc-windows-gnu tools have a dependency on some DLLs, so run it with
# rustup run to ensure that those are in PATH.
TOOLCHAIN="$(rustup show active-toolchain | sed 's/ (default)//')"
if [[ "$TOOLCHAIN" == *i686-pc-windows-gnu ]]; then
NM="rustup run $TOOLCHAIN $NM"
fi
# Look out for duplicated symbols when we include the compiler-rt (C) implementation
update_rlib_paths
for rlib in "${rlib_paths[@]}"; do
set +x
echo "================================================================"
echo "checking $rlib for duplicate symbols"
echo "================================================================"
set -x
duplicates_found=0
# NOTE On i586, It's normal that the get_pc_thunk symbol appears several
# times so ignore it
$NM -g --defined-only "$rlib" 2>&1 |
sort |
uniq -d |
grep -v __x86.get_pc_thunk --quiet |
grep 'T __' && duplicates_found=1
if [ "$duplicates_found" != 0 ]; then
echo "error: found duplicate symbols"
exit 1
else
echo "success; no duplicate symbols found"
fi
done
rm -f "${rlib_paths[@]}"
build_intrinsics_test() {
cargo build \
run_intrinsics_test() {
args=(
--target "$target" --verbose \
--manifest-path builtins-test-intrinsics/Cargo.toml "$@"
--manifest-path builtins-test-intrinsics/Cargo.toml
)
args+=( "$@" )
# symcheck also checks the results of builtins-test-intrinsics
"${symcheck[@]}" "${args[@]}"
# FIXME: we get access violations on Windows, our entrypoint may need to
# be tweaked.
if [ "${BUILD_ONLY:-}" != "1" ] && ! [[ "$target" = *"windows"* ]]; then
cargo run "${args[@]}"
fi
}
# Verify that we haven't dropped any intrinsics/symbols
build_intrinsics_test
build_intrinsics_test --release
build_intrinsics_test --features c
build_intrinsics_test --features c --release
run_intrinsics_test
run_intrinsics_test --release
run_intrinsics_test --features c
run_intrinsics_test --features c --release
# Verify that there are no undefined symbols to `panic` within our
# implementations
CARGO_PROFILE_DEV_LTO=true build_intrinsics_test
CARGO_PROFILE_RELEASE_LTO=true build_intrinsics_test --release
# Ensure no references to any symbols from core
update_rlib_paths
for rlib in "${rlib_paths[@]}"; do
set +x
echo "================================================================"
echo "checking $rlib for references to core"
echo "================================================================"
set -x
tmpdir="${CARGO_TARGET_DIR:-target}/tmp"
test -d "$tmpdir" || mkdir "$tmpdir"
defined="$tmpdir/defined_symbols.txt"
undefined="$tmpdir/defined_symbols.txt"
$NM --quiet -U "$rlib" | grep 'T _ZN4core' | awk '{print $3}' | sort | uniq > "$defined"
$NM --quiet -u "$rlib" | grep 'U _ZN4core' | awk '{print $2}' | sort | uniq > "$undefined"
grep_has_results=0
grep -v -F -x -f "$defined" "$undefined" && grep_has_results=1
if [ "$target" = "powerpc64-unknown-linux-gnu" ]; then
echo "FIXME: powerpc64 fails these tests"
elif [ "$grep_has_results" != 0 ]; then
echo "error: found unexpected references to core"
exit 1
else
echo "success; no references to core found"
fi
done
CARGO_PROFILE_DEV_LTO=true run_intrinsics_test
CARGO_PROFILE_RELEASE_LTO=true run_intrinsics_test --release
# Test libm

View file

@ -0,0 +1,15 @@
#!/bin/sh
# Download musl to a repository for `musl-math-sys`
set -eux
url=git://git.musl-libc.org/musl
ref=c47ad25ea3b484e10326f933e927c0bc8cded3da
dst=crates/musl-math-sys/musl
if ! [ -d "$dst" ]; then
git clone "$url" "$dst" --single-branch --depth=1000
fi
git -C "$dst" fetch "$url" --depth=1
git -C "$dst" checkout "$ref"

View file

@ -7,6 +7,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.1.160](https://github.com/rust-lang/compiler-builtins/compare/compiler_builtins-v0.1.159...compiler_builtins-v0.1.160) - 2025-05-29
### Other
- Change `compiler-builtins` to edition 2024
- Remove unneeded C symbols
- Reuse `libm`'s `Caat` and `CastFrom` in `compiler-builtins`
- Reuse `MinInt` and `Int` from `libm` in `compiler-builtins`
- Update `CmpResult` to use a pointer-sized return type
- Enable `__powitf2` on MSVC
- Fix `i256::MAX`
- Add a note saying why we use `frintx` rather than `frintn`
- Typo in README.md
- Clean up unused files
## [0.1.159](https://github.com/rust-lang/compiler-builtins/compare/compiler_builtins-v0.1.158...compiler_builtins-v0.1.159) - 2025-05-12
### Other

View file

@ -1,13 +1,13 @@
[package]
authors = ["Jorge Aparicio <japaricious@gmail.com>"]
name = "compiler_builtins"
version = "0.1.159"
version = "0.1.160"
license = "MIT AND Apache-2.0 WITH LLVM-exception AND (MIT OR Apache-2.0)"
readme = "README.md"
repository = "https://github.com/rust-lang/compiler-builtins"
homepage = "https://github.com/rust-lang/compiler-builtins"
documentation = "https://docs.rs/compiler_builtins"
edition = "2021"
edition = "2024"
description = "Compiler intrinsics used by the Rust compiler."
links = "compiler-rt"
@ -19,13 +19,10 @@ test = false
[dependencies]
# For more information on this dependency see
# https://github.com/rust-lang/rust/tree/master/library/rustc-std-workspace-core
core = { version = "1.0.0", optional = true, package = "rustc-std-workspace-core" }
core = { version = "1.0.1", optional = true, package = "rustc-std-workspace-core" }
[build-dependencies]
cc = { optional = true, version = "1.0" }
[dev-dependencies]
panic-handler = { path = "../crates/panic-handler" }
cc = { optional = true, version = "1.2" }
[features]
default = ["compiler-builtins"]

View file

@ -555,7 +555,6 @@ mod c {
if (target.arch == "aarch64" || target.arch == "arm64ec") && consider_float_intrinsics {
sources.extend(&[
("__comparetf2", "comparetf2.c"),
("__fe_getround", "fp_mode.c"),
("__fe_raise_inexact", "fp_mode.c"),
]);
@ -570,11 +569,11 @@ mod c {
}
if target.arch == "mips64" {
sources.extend(&[("__netf2", "comparetf2.c"), ("__fe_getround", "fp_mode.c")]);
sources.extend(&[("__fe_getround", "fp_mode.c")]);
}
if target.arch == "loongarch64" {
sources.extend(&[("__netf2", "comparetf2.c"), ("__fe_getround", "fp_mode.c")]);
sources.extend(&[("__fe_getround", "fp_mode.c")]);
}
// Remove the assembly implementations that won't compile for the target

View file

@ -1,13 +1,16 @@
#![cfg(not(feature = "no-asm"))]
// Interfaces used by naked trampolines.
extern "C" {
// SAFETY: these are defined in compiler-builtins
unsafe extern "C" {
fn __udivmodsi4(a: u32, b: u32, rem: *mut u32) -> u32;
fn __udivmoddi4(a: u64, b: u64, rem: *mut u64) -> u64;
fn __divmoddi4(a: i64, b: i64, rem: *mut i64) -> i64;
}
extern "aapcs" {
// SAFETY: these are defined in compiler-builtins
// FIXME(extern_custom), this isn't always the correct ABI
unsafe extern "aapcs" {
// AAPCS is not always the correct ABI for these intrinsics, but we only use this to
// forward another `__aeabi_` call so it doesn't matter.
fn __aeabi_idiv(a: i32, b: i32) -> i32;

View file

@ -1,5 +1,5 @@
use crate::float::Float;
use crate::int::{CastInto, Int, MinInt};
use crate::int::{CastFrom, CastInto, Int, MinInt};
/// Returns `a + b`
fn add<F: Float>(a: F, b: F) -> F
@ -12,7 +12,7 @@ where
let one = F::Int::ONE;
let zero = F::Int::ZERO;
let bits = F::BITS.cast();
let bits: F::Int = F::BITS.cast();
let significand_bits = F::SIG_BITS;
let max_exponent = F::EXP_SAT;
@ -115,9 +115,10 @@ where
let align = a_exponent.wrapping_sub(b_exponent).cast();
if align != MinInt::ZERO {
if align < bits {
let sticky =
F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != MinInt::ZERO);
b_significand = (b_significand >> align.cast()) | sticky;
let sticky = F::Int::from_bool(
b_significand << u32::cast_from(bits.wrapping_sub(align)) != MinInt::ZERO,
);
b_significand = (b_significand >> u32::cast_from(align)) | sticky;
} else {
b_significand = one; // sticky; b is known to be non-zero.
}
@ -132,8 +133,8 @@ where
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if a_significand < implicit_bit << 3 {
let shift =
a_significand.leading_zeros() as i32 - (implicit_bit << 3).leading_zeros() as i32;
let shift = a_significand.leading_zeros() as i32
- (implicit_bit << 3u32).leading_zeros() as i32;
a_significand <<= shift;
a_exponent -= shift;
}
@ -159,14 +160,15 @@ where
// Result is denormal before rounding; the exponent is zero and we
// need to shift the significand.
let shift = (1 - a_exponent).cast();
let sticky =
F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != MinInt::ZERO);
a_significand = (a_significand >> shift.cast()) | sticky;
let sticky = F::Int::from_bool(
(a_significand << u32::cast_from(bits.wrapping_sub(shift))) != MinInt::ZERO,
);
a_significand = (a_significand >> u32::cast_from(shift)) | sticky;
a_exponent = 0;
}
// Low three bits are round, guard, and sticky.
let a_significand_i32: i32 = a_significand.cast();
let a_significand_i32: i32 = a_significand.cast_lossy();
let round_guard_sticky: i32 = a_significand_i32 & 0x7;
// Shift the significand into place, and mask off the implicit bit.

View file

@ -2,14 +2,23 @@
use crate::float::Float;
use crate::int::MinInt;
use crate::support::cfg_if;
// https://github.com/llvm/llvm-project/blob/1e6ba3cd2fe96be00b6ed6ba28b3d9f9271d784d/compiler-rt/lib/builtins/fp_compare_impl.inc#L22
#[cfg(target_arch = "avr")]
pub type CmpResult = i8;
// https://github.com/llvm/llvm-project/blob/1e6ba3cd2fe96be00b6ed6ba28b3d9f9271d784d/compiler-rt/lib/builtins/fp_compare_impl.inc#L25
#[cfg(not(target_arch = "avr"))]
pub type CmpResult = i32;
// Taken from LLVM config:
// https://github.com/llvm/llvm-project/blob/0cf3c437c18ed27d9663d87804a9a15ff6874af2/compiler-rt/lib/builtins/fp_compare_impl.inc#L11-L27
cfg_if! {
if #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] {
// Aarch64 uses `int` rather than a pointer-sized value.
pub type CmpResult = i32;
} else if #[cfg(target_arch = "avr")] {
// AVR uses a single byte.
pub type CmpResult = i8;
} else {
// In compiler-rt, LLP64 ABIs use `long long` and everything else uses `long`. In effect,
// this means the return value is always pointer-sized.
pub type CmpResult = isize;
}
}
#[derive(Clone, Copy)]
enum Result {

View file

@ -72,9 +72,9 @@ mod int_to_float {
F: Float,
I: Int,
F::Int: CastFrom<I>,
Conv: Fn(I::UnsignedInt) -> F::Int,
Conv: Fn(I::Unsigned) -> F::Int,
{
let sign_bit = F::Int::cast_from(i >> (I::BITS - 1)) << (F::BITS - 1);
let sign_bit = F::Int::cast_from_lossy(i >> (I::BITS - 1)) << (F::BITS - 1);
F::from_bits(conv(i.unsigned_abs()) | sign_bit)
}
@ -166,7 +166,7 @@ mod int_to_float {
// Within the upper `F::BITS`, everything except for the signifcand
// gets truncated
let d1: u32 = (i_m >> (u128::BITS - f32::BITS - f32::SIG_BITS - 1)).cast();
let d1: u32 = (i_m >> (u128::BITS - f32::BITS - f32::SIG_BITS - 1)).cast_lossy();
// The entire rest of `i_m` gets truncated. Zero the upper `F::BITS` then just
// check if it is nonzero.
@ -313,10 +313,10 @@ intrinsics! {
fn float_to_unsigned_int<F, U>(f: F) -> U
where
F: Float,
U: Int<UnsignedInt = U>,
U: Int<Unsigned = U>,
F::Int: CastInto<U>,
F::Int: CastFrom<u32>,
F::Int: CastInto<U::UnsignedInt>,
F::Int: CastInto<U::Unsigned>,
u32: CastFrom<F::Int>,
{
float_to_int_inner::<F, U, _, _>(f.to_bits(), |i: U| i, || U::MAX)
@ -327,8 +327,8 @@ fn float_to_signed_int<F, I>(f: F) -> I
where
F: Float,
I: Int + Neg<Output = I>,
I::UnsignedInt: Int,
F::Int: CastInto<I::UnsignedInt>,
I::Unsigned: Int,
F::Int: CastInto<I::Unsigned>,
F::Int: CastFrom<u32>,
u32: CastFrom<F::Int>,
{
@ -355,27 +355,27 @@ where
I: Int,
FnFoo: FnOnce(I) -> I,
FnOob: FnOnce() -> I,
I::UnsignedInt: Int,
F::Int: CastInto<I::UnsignedInt>,
I::Unsigned: Int,
F::Int: CastInto<I::Unsigned>,
F::Int: CastFrom<u32>,
u32: CastFrom<F::Int>,
{
let int_max_exp = F::EXP_BIAS + I::MAX.ilog2() + 1;
let foobar = F::EXP_BIAS + I::UnsignedInt::BITS - 1;
let foobar = F::EXP_BIAS + I::Unsigned::BITS - 1;
if fbits < F::ONE.to_bits() {
// < 0 gets rounded to 0
I::ZERO
} else if fbits < F::Int::cast_from(int_max_exp) << F::SIG_BITS {
// >= 1, < integer max
let m_base = if I::UnsignedInt::BITS >= F::Int::BITS {
I::UnsignedInt::cast_from(fbits) << (I::BITS - F::SIG_BITS - 1)
let m_base = if I::Unsigned::BITS >= F::Int::BITS {
I::Unsigned::cast_from(fbits) << (I::BITS - F::SIG_BITS - 1)
} else {
I::UnsignedInt::cast_from(fbits >> (F::SIG_BITS - I::BITS + 1))
I::Unsigned::cast_from_lossy(fbits >> (F::SIG_BITS - I::BITS + 1))
};
// Set the implicit 1-bit.
let m: I::UnsignedInt = (I::UnsignedInt::ONE << (I::BITS - 1)) | m_base;
let m: I::Unsigned = (I::Unsigned::ONE << (I::BITS - 1)) | m_base;
// Shift based on the exponent and bias.
let s: u32 = (foobar) - u32::cast_from(fbits >> F::SIG_BITS);

View file

@ -370,7 +370,7 @@ where
let hi_corr: F::Int = corr_uq1 >> hw;
// x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
let mut x_uq0: F::Int = ((F::Int::from(x_uq0_hw) * hi_corr) << 1)
let mut x_uq0: F::Int = ((F::Int::from(x_uq0_hw) * hi_corr) << 1u32)
.wrapping_add((F::Int::from(x_uq0_hw) * lo_corr) >> (hw - 1))
// 1 to account for the highest bit of corr_UQ1 can be 1
// 1 to account for possible carry
@ -482,7 +482,7 @@ where
let ret = quotient.wrapping_shr(u32::cast_from(res_exponent.wrapping_neg()) + 1);
residual_lo = a_significand
.wrapping_shl(significand_bits.wrapping_add(CastInto::<u32>::cast(res_exponent)))
.wrapping_shl(significand_bits.wrapping_add(CastInto::<u32>::cast_lossy(res_exponent)))
.wrapping_sub(ret.wrapping_mul(b_significand) << 1);
ret
};

View file

@ -143,7 +143,7 @@ where
// a zero of the appropriate sign. Mathematically there is no need to
// handle this case separately, but we make it a special case to
// simplify the shift logic.
let shift = one.wrapping_sub(product_exponent.cast()).cast();
let shift: u32 = one.wrapping_sub(product_exponent.cast_lossy()).cast();
if shift >= bits {
return F::from_bits(product_sign);
}

View file

@ -32,8 +32,6 @@ intrinsics! {
#[ppc_alias = __powikf2]
#[cfg(f128_enabled)]
// FIXME(f16_f128): MSVC cannot build these until `__divtf3` is available in nightly.
#[cfg(not(target_env = "msvc"))]
pub extern "C" fn __powitf2(a: f128, b: i32) -> f128 {
pow(a, b)
}

View file

@ -20,10 +20,10 @@ pub trait Float:
+ ops::Rem<Output = Self>
{
/// A uint of the same width as the float
type Int: Int<OtherSign = Self::SignedInt, UnsignedInt = Self::Int>;
type Int: Int<OtherSign = Self::SignedInt, Unsigned = Self::Int>;
/// A int of the same width as the float
type SignedInt: Int + MinInt<OtherSign = Self::Int, UnsignedInt = Self::Int>;
type SignedInt: Int + MinInt<OtherSign = Self::Int, Unsigned = Self::Int>;
/// An int capable of containing the exponent bits plus a sign bit. This is signed.
type ExpInt: Int;

View file

@ -50,7 +50,7 @@ where
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
abs_result = (a_abs >> sig_bits_delta).cast();
abs_result = (a_abs >> sig_bits_delta).cast_lossy();
// Cast before shifting to prevent overflow.
let bias_diff: R::Int = src_exp_bias.wrapping_sub(dst_exp_bias).cast();
let tmp = bias_diff << R::SIG_BITS;

View file

@ -22,7 +22,7 @@ impl UAddSub for u128 {}
trait AddSub: Int
where
<Self as MinInt>::UnsignedInt: UAddSub,
<Self as MinInt>::Unsigned: UAddSub,
{
fn add(self, other: Self) -> Self {
Self::from_unsigned(self.unsigned().uadd(other.unsigned()))
@ -37,7 +37,7 @@ impl AddSub for i128 {}
trait Addo: AddSub
where
<Self as MinInt>::UnsignedInt: UAddSub,
<Self as MinInt>::Unsigned: UAddSub,
{
fn addo(self, other: Self) -> (Self, bool) {
let sum = AddSub::add(self, other);
@ -50,7 +50,7 @@ impl Addo for u128 {}
trait Subo: AddSub
where
<Self as MinInt>::UnsignedInt: UAddSub,
<Self as MinInt>::Unsigned: UAddSub,
{
fn subo(self, other: Self) -> (Self, bool) {
let sum = AddSub::sub(self, other);

View file

@ -45,7 +45,7 @@ impl i256 {
impl MinInt for u256 {
type OtherSign = i256;
type UnsignedInt = u256;
type Unsigned = u256;
const SIGNED: bool = false;
const BITS: u32 = 256;
@ -58,7 +58,7 @@ impl MinInt for u256 {
impl MinInt for i256 {
type OtherSign = u256;
type UnsignedInt = u256;
type Unsigned = u256;
const SIGNED: bool = false;
const BITS: u32 = 256;

View file

@ -9,11 +9,14 @@ pub use implementation::{leading_zeros_default, leading_zeros_riscv};
pub(crate) use implementation::{leading_zeros_default, leading_zeros_riscv};
mod implementation {
use crate::int::{CastInto, Int};
use crate::int::{CastFrom, Int};
/// Returns the number of leading binary zeros in `x`.
#[allow(dead_code)]
pub fn leading_zeros_default<T: Int + CastInto<usize>>(x: T) -> usize {
pub fn leading_zeros_default<I: Int>(x: I) -> usize
where
usize: CastFrom<I>,
{
// The basic idea is to test if the higher bits of `x` are zero and bisect the number
// of leading zeros. It is possible for all branches of the bisection to use the same
// code path by conditionally shifting the higher parts down to let the next bisection
@ -23,44 +26,48 @@ mod implementation {
// because it simplifies the final bisection step.
let mut x = x;
// the number of potential leading zeros
let mut z = T::BITS as usize;
let mut z = I::BITS as usize;
// a temporary
let mut t: T;
let mut t: I;
const { assert!(T::BITS <= 64) };
if T::BITS >= 64 {
const { assert!(I::BITS <= 64) };
if I::BITS >= 64 {
t = x >> 32;
if t != T::ZERO {
if t != I::ZERO {
z -= 32;
x = t;
}
}
if T::BITS >= 32 {
if I::BITS >= 32 {
t = x >> 16;
if t != T::ZERO {
if t != I::ZERO {
z -= 16;
x = t;
}
}
const { assert!(T::BITS >= 16) };
const { assert!(I::BITS >= 16) };
t = x >> 8;
if t != T::ZERO {
if t != I::ZERO {
z -= 8;
x = t;
}
t = x >> 4;
if t != T::ZERO {
if t != I::ZERO {
z -= 4;
x = t;
}
t = x >> 2;
if t != T::ZERO {
if t != I::ZERO {
z -= 2;
x = t;
}
// the last two bisections are combined into one conditional
t = x >> 1;
if t != T::ZERO { z - 2 } else { z - x.cast() }
if t != I::ZERO {
z - 2
} else {
z - usize::cast_from(x)
}
// We could potentially save a few cycles by using the LUT trick from
// "https://embeddedgurus.com/state-space/2014/09/
@ -82,10 +89,13 @@ mod implementation {
/// Returns the number of leading binary zeros in `x`.
#[allow(dead_code)]
pub fn leading_zeros_riscv<T: Int + CastInto<usize>>(x: T) -> usize {
pub fn leading_zeros_riscv<I: Int>(x: I) -> usize
where
usize: CastFrom<I>,
{
let mut x = x;
// the number of potential leading zeros
let mut z = T::BITS;
let mut z = I::BITS;
// a temporary
let mut t: u32;
@ -97,11 +107,11 @@ mod implementation {
// right). If we try to save an instruction by using `x < imm` for each bisection, we
// have to shift `x` left and compare with powers of two approaching `usize::MAX + 1`,
// but the immediate will never fit into 12 bits and never save an instruction.
const { assert!(T::BITS <= 64) };
if T::BITS >= 64 {
const { assert!(I::BITS <= 64) };
if I::BITS >= 64 {
// If the upper 32 bits of `x` are not all 0, `t` is set to `1 << 5`, otherwise
// `t` is set to 0.
t = ((x >= (T::ONE << 32)) as u32) << 5;
t = ((x >= (I::ONE << 32)) as u32) << 5;
// If `t` was set to `1 << 5`, then the upper 32 bits are shifted down for the
// next step to process.
x >>= t;
@ -109,27 +119,27 @@ mod implementation {
// leading zeros
z -= t;
}
if T::BITS >= 32 {
t = ((x >= (T::ONE << 16)) as u32) << 4;
if I::BITS >= 32 {
t = ((x >= (I::ONE << 16)) as u32) << 4;
x >>= t;
z -= t;
}
const { assert!(T::BITS >= 16) };
t = ((x >= (T::ONE << 8)) as u32) << 3;
const { assert!(I::BITS >= 16) };
t = ((x >= (I::ONE << 8)) as u32) << 3;
x >>= t;
z -= t;
t = ((x >= (T::ONE << 4)) as u32) << 2;
t = ((x >= (I::ONE << 4)) as u32) << 2;
x >>= t;
z -= t;
t = ((x >= (T::ONE << 2)) as u32) << 1;
t = ((x >= (I::ONE << 2)) as u32) << 1;
x >>= t;
z -= t;
t = (x >= (T::ONE << 1)) as u32;
t = (x >= (I::ONE << 1)) as u32;
x >>= t;
z -= t;
// All bits except the LSB are guaranteed to be zero for this final bisection step.
// If `x != 0` then `x == 1` and subtracts one potential zero from `z`.
z as usize - x.cast()
z as usize - usize::cast_from(x)
}
}

View file

@ -125,10 +125,10 @@ impl_normalization_shift!(
/// dependencies.
#[inline]
fn u64_by_u64_div_rem(duo: u64, div: u64) -> (u64, u64) {
if let Some(quo) = duo.checked_div(div) {
if let Some(rem) = duo.checked_rem(div) {
return (quo, rem);
}
if let Some(quo) = duo.checked_div(div)
&& let Some(rem) = duo.checked_rem(div)
{
return (quo, rem);
}
zero_div_fn()
}
@ -227,10 +227,10 @@ impl_asymmetric!(
#[inline]
#[allow(dead_code)]
fn u32_by_u32_div_rem(duo: u32, div: u32) -> (u32, u32) {
if let Some(quo) = duo.checked_div(div) {
if let Some(rem) = duo.checked_rem(div) {
return (quo, rem);
}
if let Some(quo) = duo.checked_div(div)
&& let Some(rem) = duo.checked_rem(div)
{
return (quo, rem);
}
zero_div_fn()
}

View file

@ -4,33 +4,38 @@ pub use implementation::trailing_zeros;
pub(crate) use implementation::trailing_zeros;
mod implementation {
use crate::int::{CastInto, Int};
use crate::int::{CastFrom, Int};
/// Returns number of trailing binary zeros in `x`.
#[allow(dead_code)]
pub fn trailing_zeros<T: Int + CastInto<u32> + CastInto<u16> + CastInto<u8>>(x: T) -> usize {
pub fn trailing_zeros<I: Int>(x: I) -> usize
where
u32: CastFrom<I>,
u16: CastFrom<I>,
u8: CastFrom<I>,
{
let mut x = x;
let mut r: u32 = 0;
let mut t: u32;
const { assert!(T::BITS <= 64) };
if T::BITS >= 64 {
r += ((CastInto::<u32>::cast(x) == 0) as u32) << 5; // if (x has no 32 small bits) t = 32 else 0
const { assert!(I::BITS <= 64) };
if I::BITS >= 64 {
r += ((u32::cast_from_lossy(x) == 0) as u32) << 5; // if (x has no 32 small bits) t = 32 else 0
x >>= r; // remove 32 zero bits
}
if T::BITS >= 32 {
t = ((CastInto::<u16>::cast(x) == 0) as u32) << 4; // if (x has no 16 small bits) t = 16 else 0
if I::BITS >= 32 {
t = ((u16::cast_from_lossy(x) == 0) as u32) << 4; // if (x has no 16 small bits) t = 16 else 0
r += t;
x >>= t; // x = [0 - 0xFFFF] + higher garbage bits
}
const { assert!(T::BITS >= 16) };
t = ((CastInto::<u8>::cast(x) == 0) as u32) << 3;
const { assert!(I::BITS >= 16) };
t = ((u8::cast_from_lossy(x) == 0) as u32) << 3;
x >>= t; // x = [0 - 0xFF] + higher garbage bits
r += t;
let mut x: u8 = x.cast();
let mut x: u8 = x.cast_lossy();
t = (((x & 0x0F) == 0) as u32) << 2;
x >>= t; // x = [0 - 0xF] + higher garbage bits

View file

@ -1,275 +1,4 @@
use core::ops;
/// Minimal integer implementations needed on all integer types, including wide integers.
#[allow(dead_code)]
pub trait MinInt:
Copy
+ core::fmt::Debug
+ ops::BitOr<Output = Self>
+ ops::Not<Output = Self>
+ ops::Shl<u32, Output = Self>
{
/// Type with the same width but other signedness
type OtherSign: MinInt;
/// Unsigned version of Self
type UnsignedInt: MinInt;
/// If `Self` is a signed integer
const SIGNED: bool;
/// The bitwidth of the int type
const BITS: u32;
const ZERO: Self;
const ONE: Self;
const MIN: Self;
const MAX: Self;
}
/// Trait for some basic operations on integers
#[allow(dead_code)]
pub trait Int:
MinInt
+ PartialEq
+ PartialOrd
+ ops::AddAssign
+ ops::SubAssign
+ ops::BitAndAssign
+ ops::BitOrAssign
+ ops::BitXorAssign
+ ops::ShlAssign<i32>
+ ops::ShrAssign<u32>
+ ops::Add<Output = Self>
+ ops::Sub<Output = Self>
+ ops::Mul<Output = Self>
+ ops::Div<Output = Self>
+ ops::Shr<u32, Output = Self>
+ ops::BitXor<Output = Self>
+ ops::BitAnd<Output = Self>
{
/// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing
/// in `builtins-test`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,
/// 111,112,119,120,125,126,127].
const FUZZ_LENGTHS: [u8; 20] = make_fuzz_lengths(<Self as MinInt>::BITS);
/// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128.
const FUZZ_NUM: usize = {
let log2 = (<Self as MinInt>::BITS - 1).count_ones() as usize;
if log2 == 3 {
// case for u8
6
} else {
// 3 entries on each extreme, 2 in the middle, and 4 for each scale of intermediate
// boundaries.
8 + (4 * (log2 - 4))
}
};
fn unsigned(self) -> Self::UnsignedInt;
fn from_unsigned(unsigned: Self::UnsignedInt) -> Self;
fn unsigned_abs(self) -> Self::UnsignedInt;
fn from_bool(b: bool) -> Self;
/// Prevents the need for excessive conversions between signed and unsigned
fn logical_shr(self, other: u32) -> Self;
/// Absolute difference between two integers.
fn abs_diff(self, other: Self) -> Self::UnsignedInt;
// copied from primitive integers, but put in a trait
fn is_zero(self) -> bool;
fn wrapping_neg(self) -> Self;
fn wrapping_add(self, other: Self) -> Self;
fn wrapping_mul(self, other: Self) -> Self;
fn wrapping_sub(self, other: Self) -> Self;
fn wrapping_shl(self, other: u32) -> Self;
fn wrapping_shr(self, other: u32) -> Self;
fn rotate_left(self, other: u32) -> Self;
fn overflowing_add(self, other: Self) -> (Self, bool);
fn leading_zeros(self) -> u32;
fn ilog2(self) -> u32;
}
pub(crate) const fn make_fuzz_lengths(bits: u32) -> [u8; 20] {
let mut v = [0u8; 20];
v[0] = 0;
v[1] = 1;
v[2] = 2; // important for parity and the iX::MIN case when reversed
let mut i = 3;
// No need for any more until the byte boundary, because there should be no algorithms
// that are sensitive to anything not next to byte boundaries after 2. We also scale
// in powers of two, which is important to prevent u128 corner tests from getting too
// big.
let mut l = 8;
loop {
if l >= ((bits / 2) as u8) {
break;
}
// get both sides of the byte boundary
v[i] = l - 1;
i += 1;
v[i] = l;
i += 1;
l *= 2;
}
if bits != 8 {
// add the lower side of the middle boundary
v[i] = ((bits / 2) - 1) as u8;
i += 1;
}
// We do not want to jump directly from the Self::BITS/2 boundary to the Self::BITS
// boundary because of algorithms that split the high part up. We reverse the scaling
// as we go to Self::BITS.
let mid = i;
let mut j = 1;
loop {
v[i] = (bits as u8) - (v[mid - j]) - 1;
if j == mid {
break;
}
i += 1;
j += 1;
}
v
}
macro_rules! int_impl_common {
($ty:ty) => {
fn from_bool(b: bool) -> Self {
b as $ty
}
fn logical_shr(self, other: u32) -> Self {
Self::from_unsigned(self.unsigned().wrapping_shr(other))
}
fn is_zero(self) -> bool {
self == Self::ZERO
}
fn wrapping_neg(self) -> Self {
<Self>::wrapping_neg(self)
}
fn wrapping_add(self, other: Self) -> Self {
<Self>::wrapping_add(self, other)
}
fn wrapping_mul(self, other: Self) -> Self {
<Self>::wrapping_mul(self, other)
}
fn wrapping_sub(self, other: Self) -> Self {
<Self>::wrapping_sub(self, other)
}
fn wrapping_shl(self, other: u32) -> Self {
<Self>::wrapping_shl(self, other)
}
fn wrapping_shr(self, other: u32) -> Self {
<Self>::wrapping_shr(self, other)
}
fn rotate_left(self, other: u32) -> Self {
<Self>::rotate_left(self, other)
}
fn overflowing_add(self, other: Self) -> (Self, bool) {
<Self>::overflowing_add(self, other)
}
fn leading_zeros(self) -> u32 {
<Self>::leading_zeros(self)
}
fn ilog2(self) -> u32 {
<Self>::ilog2(self)
}
};
}
macro_rules! int_impl {
($ity:ty, $uty:ty) => {
impl MinInt for $uty {
type OtherSign = $ity;
type UnsignedInt = $uty;
const BITS: u32 = <Self as MinInt>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
}
impl Int for $uty {
fn unsigned(self) -> $uty {
self
}
// It makes writing macros easier if this is implemented for both signed and unsigned
#[allow(clippy::wrong_self_convention)]
fn from_unsigned(me: $uty) -> Self {
me
}
fn unsigned_abs(self) -> Self {
self
}
fn abs_diff(self, other: Self) -> Self {
self.abs_diff(other)
}
int_impl_common!($uty);
}
impl MinInt for $ity {
type OtherSign = $uty;
type UnsignedInt = $uty;
const BITS: u32 = <Self as MinInt>::ZERO.count_zeros();
const SIGNED: bool = Self::MIN != Self::ZERO;
const ZERO: Self = 0;
const ONE: Self = 1;
const MIN: Self = <Self>::MIN;
const MAX: Self = <Self>::MAX;
}
impl Int for $ity {
fn unsigned(self) -> $uty {
self as $uty
}
fn from_unsigned(me: $uty) -> Self {
me as $ity
}
fn unsigned_abs(self) -> Self::UnsignedInt {
self.unsigned_abs()
}
fn abs_diff(self, other: Self) -> $uty {
self.abs_diff(other)
}
int_impl_common!($ity);
}
};
}
int_impl!(isize, usize);
int_impl!(i8, u8);
int_impl!(i16, u16);
int_impl!(i32, u32);
int_impl!(i64, u64);
int_impl!(i128, u128);
pub use crate::support::{CastFrom, CastInto, Int, MinInt};
/// Trait for integers twice the bit width of another integer. This is implemented for all
/// primitives except for `u8`, because there is not a smaller primitive.
@ -368,44 +97,3 @@ impl_h_int!(
i32 u32 i64,
i64 u64 i128
);
/// Trait to express (possibly lossy) casting of integers
pub trait CastInto<T: Copy>: Copy {
fn cast(self) -> T;
}
pub trait CastFrom<T: Copy>: Copy {
fn cast_from(value: T) -> Self;
}
impl<T: Copy, U: CastInto<T> + Copy> CastFrom<U> for T {
fn cast_from(value: U) -> Self {
value.cast()
}
}
macro_rules! cast_into {
($ty:ty) => {
cast_into!($ty; usize, isize, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128);
};
($ty:ty; $($into:ty),*) => {$(
impl CastInto<$into> for $ty {
fn cast(self) -> $into {
self as $into
}
}
)*};
}
cast_into!(usize);
cast_into!(isize);
cast_into!(u8);
cast_into!(i8);
cast_into!(u16);
cast_into!(i16);
cast_into!(u32);
cast_into!(i32);
cast_into!(u64);
cast_into!(i64);
cast_into!(u128);
cast_into!(i128);

View file

@ -132,7 +132,7 @@ macro_rules! intrinsics {
) => (
#[cfg($name = "optimized-c")]
pub $(unsafe $($empty)? )? extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? {
extern $abi {
unsafe extern $abi {
fn $name($($argname: $ty),*) $(-> $ret)?;
}
unsafe {
@ -435,7 +435,7 @@ macro_rules! intrinsics {
pub mod $name {
#[unsafe(naked)]
$(#[$($attr)*])*
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
#[cfg_attr(not(feature = "mangled-names"), unsafe(no_mangle))]
#[cfg_attr(not(any(all(windows, target_env = "gnu"), target_os = "cygwin")), linkage = "weak")]
pub unsafe extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? {
$($body)*

View file

@ -49,7 +49,9 @@
// We only define stack probing for these architectures today.
#![cfg(any(target_arch = "x86_64", target_arch = "x86"))]
extern "C" {
// SAFETY: defined in this module.
// FIXME(extern_custom): the ABI is not correct.
unsafe extern "C" {
pub fn __rust_probestack();
}

View file

@ -0,0 +1,7 @@
[package]
name = "josh-sync"
edition = "2024"
publish = false
[dependencies]
directories = "6.0.0"

View file

@ -0,0 +1,45 @@
use std::io::{Read, Write};
use std::process::exit;
use std::{env, io};
use crate::sync::{GitSync, Josh};
mod sync;
const USAGE: &str = r#"Utility for synchroniing compiler-builtins with rust-lang/rust
Usage:
josh-sync rustc-pull
Pull from rust-lang/rust to compiler-builtins. Creates a commit
updating the version file, followed by a merge commit.
josh-sync rustc-push GITHUB_USERNAME [BRANCH]
Create a branch off of rust-lang/rust updating compiler-builtins.
"#;
fn main() {
let sync = GitSync::from_current_dir();
// Collect args, then recollect as str refs so we can match on them
let args: Vec<_> = env::args().collect();
let args: Vec<&str> = args.iter().map(String::as_str).collect();
match args.as_slice()[1..] {
["rustc-pull"] => sync.rustc_pull(None),
["rustc-push", github_user, branch] => sync.rustc_push(github_user, Some(branch)),
["rustc-push", github_user] => sync.rustc_push(github_user, None),
["start-josh"] => {
let _josh = Josh::start();
println!("press enter to stop");
io::stdout().flush().unwrap();
let _ = io::stdin().read(&mut [0u8]).unwrap();
}
_ => {
println!("{USAGE}");
exit(1);
}
}
}

View file

@ -0,0 +1,371 @@
use std::net::{SocketAddr, TcpStream};
use std::process::{Command, Stdio, exit};
use std::time::Duration;
use std::{env, fs, process, thread};
const JOSH_PORT: u16 = 42042;
const DEFAULT_PR_BRANCH: &str = "update-builtins";
pub struct GitSync {
upstream_repo: String,
upstream_ref: String,
upstream_url: String,
josh_filter: String,
josh_url_base: String,
}
/// This code was adapted from the miri repository, via the rustc-dev-guide
/// (<https://github.com/rust-lang/rustc-dev-guide/tree/c51adbd12d/josh-sync>)
impl GitSync {
pub fn from_current_dir() -> Self {
let upstream_repo =
env::var("UPSTREAM_ORG").unwrap_or_else(|_| "rust-lang".to_owned()) + "/rust";
Self {
upstream_url: format!("https://github.com/{upstream_repo}"),
upstream_repo,
upstream_ref: env::var("UPSTREAM_REF").unwrap_or_else(|_| "HEAD".to_owned()),
josh_filter: ":/library/compiler-builtins".to_owned(),
josh_url_base: format!("http://localhost:{JOSH_PORT}"),
}
}
/// Pull from rust-lang/rust to compiler-builtins.
pub fn rustc_pull(&self, commit: Option<String>) {
let Self {
upstream_ref,
upstream_url,
upstream_repo,
..
} = self;
let new_upstream_base = commit.unwrap_or_else(|| {
let out = check_output(["git", "ls-remote", upstream_url, upstream_ref]);
out.split_whitespace()
.next()
.unwrap_or_else(|| panic!("could not split output: '{out}'"))
.to_owned()
});
ensure_clean();
// Make sure josh is running.
let _josh = Josh::start();
let josh_url_filtered = self.josh_url(
&self.upstream_repo,
Some(&new_upstream_base),
Some(&self.josh_filter),
);
let previous_upstream_base = fs::read_to_string("rust-version")
.expect("failed to read `rust-version`")
.trim()
.to_string();
assert_ne!(previous_upstream_base, new_upstream_base, "nothing to pull");
let orig_head = check_output(["git", "rev-parse", "HEAD"]);
println!("original upstream base: {previous_upstream_base}");
println!("new upstream base: {new_upstream_base}");
println!("original HEAD: {orig_head}");
// Fetch the latest upstream HEAD so we can get a summary. Use the Josh URL for caching.
run([
"git",
"fetch",
&self.josh_url(&self.upstream_repo, Some(&new_upstream_base), Some(":/")),
&new_upstream_base,
"--depth=1",
]);
let new_summary = check_output(["git", "log", "-1", "--format=%h %s", &new_upstream_base]);
// Update rust-version file. As a separate commit, since making it part of
// the merge has confused the heck out of josh in the past.
// We pass `--no-verify` to avoid running git hooks.
// We do this before the merge so that if there are merge conflicts, we have
// the right rust-version file while resolving them.
fs::write("rust-version", format!("{new_upstream_base}\n"))
.expect("failed to write rust-version");
let prep_message = format!(
"Update the upstream Rust version\n\n\
To prepare for merging from {upstream_repo}, set the version file to:\n\n \
{new_summary}\n\
",
);
run([
"git",
"commit",
"rust-version",
"--no-verify",
"-m",
&prep_message,
]);
// Fetch given rustc commit.
run(["git", "fetch", &josh_url_filtered]);
let incoming_ref = check_output(["git", "rev-parse", "FETCH_HEAD"]);
println!("incoming ref: {incoming_ref}");
let merge_message = format!(
"Merge ref '{upstream_head_short}{filter}' from {upstream_url}\n\n\
Pull recent changes from {upstream_repo} via Josh.\n\n\
Upstream ref: {new_upstream_base}\n\
Filtered ref: {incoming_ref}\n\
",
upstream_head_short = &new_upstream_base[..12],
filter = self.josh_filter
);
// This should not add any new root commits. So count those before and after merging.
let num_roots = || -> u32 {
let out = check_output(["git", "rev-list", "HEAD", "--max-parents=0", "--count"]);
out.trim()
.parse::<u32>()
.unwrap_or_else(|e| panic!("failed to parse `{out}`: {e}"))
};
let num_roots_before = num_roots();
let pre_merge_sha = check_output(["git", "rev-parse", "HEAD"]);
println!("pre-merge HEAD: {pre_merge_sha}");
// Merge the fetched commit.
run([
"git",
"merge",
"FETCH_HEAD",
"--no-verify",
"--no-ff",
"-m",
&merge_message,
]);
let current_sha = check_output(["git", "rev-parse", "HEAD"]);
if current_sha == pre_merge_sha {
run(["git", "reset", "--hard", &orig_head]);
eprintln!(
"No merge was performed, no changes to pull were found. \
Rolled back the preparation commit."
);
exit(1);
}
// Check that the number of roots did not increase.
assert_eq!(
num_roots(),
num_roots_before,
"Josh created a new root commit. This is probably not the history you want."
);
}
/// Construct an update to rust-lang/rust from compiler-builtins.
pub fn rustc_push(&self, github_user: &str, branch: Option<&str>) {
let Self {
josh_filter,
upstream_url,
..
} = self;
let branch = branch.unwrap_or(DEFAULT_PR_BRANCH);
let josh_url = self.josh_url(&format!("{github_user}/rust"), None, Some(josh_filter));
let user_upstream_url = format!("git@github.com:{github_user}/rust.git");
let Ok(rustc_git) = env::var("RUSTC_GIT") else {
panic!("the RUSTC_GIT environment variable must be set to a rust-lang/rust checkout")
};
ensure_clean();
let base = fs::read_to_string("rust-version")
.expect("failed to read `rust-version`")
.trim()
.to_string();
// Make sure josh is running.
let _josh = Josh::start();
// Prepare the branch. Pushing works much better if we use as base exactly
// the commit that we pulled from last time, so we use the `rust-version`
// file to find out which commit that would be.
println!("Preparing {github_user}/rust (base: {base})...");
if Command::new("git")
.args(["-C", &rustc_git, "fetch", &user_upstream_url, branch])
.output() // capture output
.expect("could not run fetch")
.status
.success()
{
panic!(
"The branch '{branch}' seems to already exist in '{user_upstream_url}'. \
Please delete it and try again."
);
}
run(["git", "-C", &rustc_git, "fetch", upstream_url, &base]);
run_cfg("git", |c| {
c.args([
"-C",
&rustc_git,
"push",
&user_upstream_url,
&format!("{base}:refs/heads/{branch}"),
])
.stdout(Stdio::null())
.stderr(Stdio::null()) // silence the "create GitHub PR" message
});
println!("pushed PR branch");
// Do the actual push.
println!("Pushing changes...");
run(["git", "push", &josh_url, &format!("HEAD:{branch}")]);
println!();
// Do a round-trip check to make sure the push worked as expected.
run(["git", "fetch", &josh_url, branch]);
let head = check_output(["git", "rev-parse", "HEAD"]);
let fetch_head = check_output(["git", "rev-parse", "FETCH_HEAD"]);
assert_eq!(
head, fetch_head,
"Josh created a non-roundtrip push! Do NOT merge this into rustc!\n\
Expected {head}, got {fetch_head}."
);
println!(
"Confirmed that the push round-trips back to compiler-builtins properly. Please \
create a rustc PR:"
);
// Open PR with `subtree update` title to silence the `no-merges` triagebot check
println!(
" {upstream_url}/compare/{github_user}:{branch}?quick_pull=1\
&title=Update%20the%20%60compiler-builtins%60%20subtree\
&body=Update%20the%20Josh%20subtree%20to%20https%3A%2F%2Fgithub.com%2Frust-lang%2F\
compiler-builtins%2Fcommit%2F{head_short}.%0A%0Ar%3F%20%40ghost",
head_short = &head[..12],
);
}
/// Construct a url to the local Josh server with (optionally)
fn josh_url(&self, repo: &str, rev: Option<&str>, filter: Option<&str>) -> String {
format!(
"{base}/{repo}.git{at}{rev}{filter}{filt_git}",
base = self.josh_url_base,
at = if rev.is_some() { "@" } else { "" },
rev = rev.unwrap_or_default(),
filter = filter.unwrap_or_default(),
filt_git = if filter.is_some() { ".git" } else { "" }
)
}
}
/// Fail if there are files that need to be checked in.
fn ensure_clean() {
let read = check_output(["git", "status", "--untracked-files=no", "--porcelain"]);
assert!(
read.is_empty(),
"working directory must be clean before performing rustc pull"
);
}
/* Helpers for running commands with logged invocations */
/// Run a command from an array, passing its output through.
fn run<'a, Args: AsRef<[&'a str]>>(l: Args) {
let l = l.as_ref();
run_cfg(l[0], |c| c.args(&l[1..]));
}
/// Run a command from an array, collecting its output.
fn check_output<'a, Args: AsRef<[&'a str]>>(l: Args) -> String {
let l = l.as_ref();
check_output_cfg(l[0], |c| c.args(&l[1..]))
}
/// [`run`] with configuration.
fn run_cfg(prog: &str, f: impl FnOnce(&mut Command) -> &mut Command) {
// self.read(l.as_ref());
check_output_cfg(prog, |c| f(c.stdout(Stdio::inherit())));
}
/// [`read`] with configuration. All shell helpers print the command and pass stderr.
fn check_output_cfg(prog: &str, f: impl FnOnce(&mut Command) -> &mut Command) -> String {
let mut cmd = Command::new(prog);
cmd.stderr(Stdio::inherit());
f(&mut cmd);
eprintln!("+ {cmd:?}");
let out = cmd.output().expect("command failed");
assert!(out.status.success());
String::from_utf8(out.stdout.trim_ascii().to_vec()).expect("non-UTF8 output")
}
/// Create a wrapper that stops Josh on drop.
pub struct Josh(process::Child);
impl Josh {
pub fn start() -> Self {
// Determine cache directory.
let user_dirs =
directories::ProjectDirs::from("org", "rust-lang", "rustc-compiler-builtins-josh")
.unwrap();
let local_dir = user_dirs.cache_dir().to_owned();
// Start josh, silencing its output.
#[expect(clippy::zombie_processes, reason = "clippy can't handle the loop")]
let josh = process::Command::new("josh-proxy")
.arg("--local")
.arg(local_dir)
.args([
"--remote=https://github.com",
&format!("--port={JOSH_PORT}"),
"--no-background",
])
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
.expect("failed to start josh-proxy, make sure it is installed");
// Wait until the port is open. We try every 10ms until 1s passed.
for _ in 0..100 {
// This will generally fail immediately when the port is still closed.
let addr = SocketAddr::from(([127, 0, 0, 1], JOSH_PORT));
let josh_ready = TcpStream::connect_timeout(&addr, Duration::from_millis(1));
if josh_ready.is_ok() {
println!("josh up and running");
return Josh(josh);
}
// Not ready yet.
thread::sleep(Duration::from_millis(10));
}
panic!("Even after waiting for 1s, josh-proxy is still not available.")
}
}
impl Drop for Josh {
fn drop(&mut self) {
if cfg!(unix) {
// Try to gracefully shut it down.
Command::new("kill")
.args(["-s", "INT", &self.0.id().to_string()])
.output()
.expect("failed to SIGINT josh-proxy");
// Sadly there is no "wait with timeout"... so we just give it some time to finish.
thread::sleep(Duration::from_millis(100));
// Now hopefully it is gone.
if self
.0
.try_wait()
.expect("failed to wait for josh-proxy")
.is_some()
{
return;
}
}
// If that didn't work (or we're not on Unix), kill it hard.
eprintln!(
"I have to kill josh-proxy the hard way, let's hope this does not \
break anything."
);
self.0.kill().expect("failed to SIGKILL josh-proxy");
}
}

View file

@ -10,9 +10,9 @@ proc-macro = true
[dependencies]
heck = "0.5.0"
proc-macro2 = "1.0.94"
proc-macro2 = "1.0.95"
quote = "1.0.40"
syn = { version = "2.0.100", features = ["full", "extra-traits", "visit-mut"] }
syn = { version = "2.0.101", features = ["full", "extra-traits", "visit-mut"] }
[lints.rust]
# Values used during testing

View file

@ -11,4 +11,4 @@ license = "MIT OR Apache-2.0"
libm = { path = "../../libm" }
[build-dependencies]
cc = "1.2.16"
cc = "1.2.25"

View file

@ -120,7 +120,7 @@ fn build_musl_math(cfg: &Config) {
let arch_dir = musl_dir.join("arch").join(&cfg.musl_arch);
assert!(
math.exists(),
"musl source not found. Is the submodule up to date?"
"musl source not found. You may need to run `./ci/update-musl.sh`."
);
let source_map = find_math_source(&math, cfg);

@ -1 +0,0 @@
Subproject commit c47ad25ea3b484e10326f933e927c0bc8cded3da

View file

@ -1,11 +1,8 @@
//! This is needed for tests on targets that require a `#[panic_handler]` function
#![feature(no_core)]
#![no_core]
extern crate core;
#![no_std]
#[panic_handler]
fn panic(_: &core::panic::PanicInfo) -> ! {
fn panic(_: &core::panic::PanicInfo<'_>) -> ! {
loop {}
}

View file

@ -0,0 +1,13 @@
[package]
name = "symbol-check"
version = "0.1.0"
edition = "2024"
publish = false
[dependencies]
# FIXME: used as a git dependency since the latest release does not support wasm
object = { git = "https://github.com/gimli-rs/object.git", rev = "013fac75da56a684377af4151b8164b78c1790e0" }
serde_json = "1.0.140"
[features]
wasm = ["object/wasm"]

View file

@ -0,0 +1,232 @@
//! Tool used by CI to inspect compiler-builtins archives and help ensure we won't run into any
//! linking errors.
use std::collections::{BTreeMap, BTreeSet};
use std::fs;
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use object::read::archive::{ArchiveFile, ArchiveMember};
use object::{Object, ObjectSymbol, Symbol, SymbolKind, SymbolScope, SymbolSection};
use serde_json::Value;
const CHECK_LIBRARIES: &[&str] = &["compiler_builtins", "builtins_test_intrinsics"];
const CHECK_EXTENSIONS: &[Option<&str>] = &[Some("rlib"), Some("a"), Some("exe"), None];
const USAGE: &str = "Usage:
symbol-check build-and-check CARGO_ARGS ...
Cargo will get invoked with `CARGO_ARGS` and all output
`compiler_builtins*.rlib` files will be checked.
";
fn main() {
// Create a `&str` vec so we can match on it.
let args = std::env::args().collect::<Vec<_>>();
let args_ref = args.iter().map(String::as_str).collect::<Vec<_>>();
match &args_ref[1..] {
["build-and-check", rest @ ..] if !rest.is_empty() => {
let paths = exec_cargo_with_args(rest);
for path in paths {
println!("Checking {}", path.display());
verify_no_duplicates(&path);
verify_core_symbols(&path);
}
}
_ => {
println!("{USAGE}");
std::process::exit(1);
}
}
}
/// Run `cargo build` with the provided additional arguments, collecting the list of created
/// libraries.
fn exec_cargo_with_args(args: &[&str]) -> Vec<PathBuf> {
let mut cmd = Command::new("cargo");
cmd.arg("build")
.arg("--message-format=json")
.args(args)
.stdout(Stdio::piped());
println!("running: {cmd:?}");
let mut child = cmd.spawn().expect("failed to launch Cargo");
let stdout = child.stdout.take().unwrap();
let reader = BufReader::new(stdout);
let mut check_files = Vec::new();
for line in reader.lines() {
let line = line.expect("failed to read line");
println!("{line}"); // tee to stdout
// Select only steps that create files
let j: Value = serde_json::from_str(&line).expect("failed to deserialize");
if j["reason"] != "compiler-artifact" {
continue;
}
// Find rlibs in the created file list that match our expected library names and
// extensions.
for fpath in j["filenames"].as_array().expect("filenames not an array") {
let path = fpath.as_str().expect("file name not a string");
let path = PathBuf::from(path);
if CHECK_EXTENSIONS.contains(&path.extension().map(|ex| ex.to_str().unwrap())) {
let fname = path.file_name().unwrap().to_str().unwrap();
if CHECK_LIBRARIES.iter().any(|lib| fname.contains(lib)) {
check_files.push(path);
}
}
}
}
assert!(child.wait().expect("failed to wait on Cargo").success());
assert!(!check_files.is_empty(), "no compiler_builtins rlibs found");
println!("Collected the following rlibs to check: {check_files:#?}");
check_files
}
/// Information collected from `object`, for convenience.
#[expect(unused)] // only for printing
#[derive(Clone, Debug)]
struct SymInfo {
name: String,
kind: SymbolKind,
scope: SymbolScope,
section: SymbolSection,
is_undefined: bool,
is_global: bool,
is_local: bool,
is_weak: bool,
is_common: bool,
address: u64,
object: String,
}
impl SymInfo {
fn new(sym: &Symbol, member: &ArchiveMember) -> Self {
Self {
name: sym.name().expect("missing name").to_owned(),
kind: sym.kind(),
scope: sym.scope(),
section: sym.section(),
is_undefined: sym.is_undefined(),
is_global: sym.is_global(),
is_local: sym.is_local(),
is_weak: sym.is_weak(),
is_common: sym.is_common(),
address: sym.address(),
object: String::from_utf8_lossy(member.name()).into_owned(),
}
}
}
/// Ensure that the same global symbol isn't defined in multiple object files within an archive.
///
/// Note that this will also locate cases where a symbol is weakly defined in more than one place.
/// Technically there are no linker errors that will come from this, but it keeps our binary more
/// straightforward and saves some distribution size.
fn verify_no_duplicates(path: &Path) {
let mut syms = BTreeMap::<String, SymInfo>::new();
let mut dups = Vec::new();
let mut found_any = false;
for_each_symbol(path, |symbol, member| {
// Only check defined globals
if !symbol.is_global() || symbol.is_undefined() {
return;
}
let sym = SymInfo::new(&symbol, member);
// x86-32 includes multiple copies of thunk symbols
if sym.name.starts_with("__x86.get_pc_thunk") {
return;
}
// Windows has symbols for literal numeric constants, string literals, and MinGW pseudo-
// relocations. These are allowed to have repeated definitions.
let win_allowed_dup_pfx = ["__real@", "__xmm@", "??_C@_", ".refptr"];
if win_allowed_dup_pfx
.iter()
.any(|pfx| sym.name.starts_with(pfx))
{
return;
}
match syms.get(&sym.name) {
Some(existing) => {
dups.push(sym);
dups.push(existing.clone());
}
None => {
syms.insert(sym.name.clone(), sym);
}
}
found_any = true;
});
assert!(found_any, "no symbols found");
if !dups.is_empty() {
dups.sort_unstable_by(|a, b| a.name.cmp(&b.name));
panic!("found duplicate symbols: {dups:#?}");
}
println!(" success: no duplicate symbols found");
}
/// Ensure that there are no references to symbols from `core` that aren't also (somehow) defined.
fn verify_core_symbols(path: &Path) {
let mut defined = BTreeSet::new();
let mut undefined = Vec::new();
let mut has_symbols = false;
for_each_symbol(path, |symbol, member| {
has_symbols = true;
// Find only symbols from `core`
if !symbol.name().unwrap().contains("_ZN4core") {
return;
}
let sym = SymInfo::new(&symbol, member);
if sym.is_undefined {
undefined.push(sym);
} else {
defined.insert(sym.name);
}
});
assert!(has_symbols, "no symbols found");
// Discard any symbols that are defined somewhere in the archive
undefined.retain(|sym| !defined.contains(&sym.name));
if !undefined.is_empty() {
undefined.sort_unstable_by(|a, b| a.name.cmp(&b.name));
panic!("found undefined symbols from core: {undefined:#?}");
}
println!(" success: no undefined references to core found");
}
/// For a given archive path, do something with each symbol.
fn for_each_symbol(path: &Path, mut f: impl FnMut(Symbol, &ArchiveMember)) {
let data = fs::read(path).expect("reading file failed");
let archive = ArchiveFile::parse(data.as_slice()).expect("archive parse failed");
for member in archive.members() {
let member = member.expect("failed to access member");
let obj_data = member.data(&*data).expect("failed to access object");
let obj = object::File::parse(obj_data).expect("failed to parse object");
obj.symbols().for_each(|sym| f(sym, &member));
}
}

View file

@ -6,7 +6,7 @@ publish = false
license = "MIT OR Apache-2.0"
[features]
default = ["build-mpfr", "build-musl", "unstable-float"]
default = ["build-mpfr", "unstable-float"]
# Propagated from libm because this affects which functions we test.
unstable-float = ["libm/unstable-float", "rug?/nightly-float"]
@ -28,28 +28,28 @@ icount = ["dep:iai-callgrind"]
short-benchmarks = []
[dependencies]
anyhow = "1.0.97"
anyhow = "1.0.98"
# This is not directly used but is required so we can enable `gmp-mpfr-sys/force-cross`.
gmp-mpfr-sys = { version = "1.6.4", optional = true, default-features = false }
iai-callgrind = { version = "0.14.0", optional = true }
gmp-mpfr-sys = { version = "1.6.5", optional = true, default-features = false }
iai-callgrind = { version = "0.14.1", optional = true }
indicatif = { version = "0.17.11", default-features = false }
libm = { path = "../libm", features = ["unstable-public-internals"] }
libm-macros = { path = "../crates/libm-macros" }
musl-math-sys = { path = "../crates/musl-math-sys", optional = true }
paste = "1.0.15"
rand = "0.9.0"
rand = "0.9.1"
rand_chacha = "0.9.0"
rayon = "1.10.0"
rug = { version = "1.27.0", optional = true, default-features = false, features = ["float", "integer", "std"] }
[target.'cfg(target_family = "wasm")'.dependencies]
getrandom = { version = "0.3.2", features = ["wasm_js"] }
getrandom = { version = "0.3.3", features = ["wasm_js"] }
[build-dependencies]
rand = { version = "0.9.0", optional = true }
rand = { version = "0.9.1", optional = true }
[dev-dependencies]
criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] }
criterion = { version = "0.6.0", default-features = false, features = ["cargo_bench_support"] }
libtest-mimic = "0.8.1"
[[bench]]

View file

@ -1,9 +1,11 @@
//! Benchmarks that use `iai-cachegrind` to be reasonably CI-stable.
#![feature(f16)]
#![feature(f128)]
use std::hint::black_box;
use iai_callgrind::{library_benchmark, library_benchmark_group, main};
use libm::support::{HInt, u256};
use libm::support::{HInt, Hexf, hf16, hf32, hf64, hf128, u256};
use libm_test::generate::spaced;
use libm_test::{CheckBasis, CheckCtx, GeneratorKind, MathOp, OpRustArgs, TupleCall, op};
@ -21,7 +23,7 @@ macro_rules! icount_benches {
let mut ctx = CheckCtx::new(
Op::IDENTIFIER,
CheckBasis::None,
GeneratorKind::QuickSpaced
GeneratorKind::Spaced
);
ctx.override_iterations(BENCH_ITER_ITEMS);
let ret = spaced::get_test_cases::<Op>(&ctx).0.collect::<Vec<_>>();
@ -109,11 +111,6 @@ fn icount_bench_u128_widen_mul(cases: Vec<(u128, u128)>) {
}
}
library_benchmark_group!(
name = icount_bench_u128_widen_mul_group;
benchmarks = icount_bench_u128_widen_mul
);
#[library_benchmark]
#[bench::linspace(setup_u256_add())]
fn icount_bench_u256_add(cases: Vec<(u256, u256)>) {
@ -122,11 +119,6 @@ fn icount_bench_u256_add(cases: Vec<(u256, u256)>) {
}
}
library_benchmark_group!(
name = icount_bench_u256_add_group;
benchmarks = icount_bench_u256_add
);
#[library_benchmark]
#[bench::linspace(setup_u256_shift())]
fn icount_bench_u256_shr(cases: Vec<(u256, u32)>) {
@ -136,16 +128,90 @@ fn icount_bench_u256_shr(cases: Vec<(u256, u32)>) {
}
library_benchmark_group!(
name = icount_bench_u256_shr_group;
benchmarks = icount_bench_u256_shr
name = icount_bench_u128_group;
benchmarks = icount_bench_u128_widen_mul, icount_bench_u256_add, icount_bench_u256_shr
);
#[library_benchmark]
#[bench::short("0x12.34p+8")]
#[bench::max("0x1.ffcp+15")]
fn icount_bench_hf16(s: &str) -> f16 {
black_box(hf16(s))
}
#[library_benchmark]
#[bench::short("0x12.34p+8")]
#[bench::max("0x1.fffffep+127")]
fn icount_bench_hf32(s: &str) -> f32 {
black_box(hf32(s))
}
#[library_benchmark]
#[bench::short("0x12.34p+8")]
#[bench::max("0x1.fffffffffffffp+1023")]
fn icount_bench_hf64(s: &str) -> f64 {
black_box(hf64(s))
}
#[library_benchmark]
#[bench::short("0x12.34p+8")]
#[bench::max("0x1.ffffffffffffffffffffffffffffp+16383")]
fn icount_bench_hf128(s: &str) -> f128 {
black_box(hf128(s))
}
library_benchmark_group!(
name = icount_bench_hf_parse_group;
benchmarks =
icount_bench_hf16,
icount_bench_hf32,
icount_bench_hf64,
icount_bench_hf128
);
#[library_benchmark]
#[bench::short(1.015625)]
#[bench::max(f16::MAX)]
fn icount_bench_print_hf16(x: f16) -> String {
black_box(Hexf(x).to_string())
}
#[library_benchmark]
#[bench::short(1.015625)]
#[bench::max(f32::MAX)]
fn icount_bench_print_hf32(x: f32) -> String {
black_box(Hexf(x).to_string())
}
#[library_benchmark]
#[bench::short(1.015625)]
#[bench::max(f64::MAX)]
fn icount_bench_print_hf64(x: f64) -> String {
black_box(Hexf(x).to_string())
}
#[library_benchmark]
#[bench::short(1.015625)]
#[bench::max(f128::MAX)]
fn icount_bench_print_hf128(x: f128) -> String {
black_box(Hexf(x).to_string())
}
library_benchmark_group!(
name = icount_bench_hf_print_group;
benchmarks =
icount_bench_print_hf16,
icount_bench_print_hf32,
icount_bench_print_hf64,
icount_bench_print_hf128
);
main!(
library_benchmark_groups =
// u256-related benchmarks
icount_bench_u128_widen_mul_group,
icount_bench_u256_add_group,
icount_bench_u256_shr_group,
// Benchmarks not related to public libm math
icount_bench_u128_group,
icount_bench_hf_parse_group,
icount_bench_hf_print_group,
// verify-apilist-start
// verify-sorted-start
icount_bench_acos_group,

View file

@ -55,7 +55,7 @@ where
Op: MathOp<FTy = f32, RustArgs = (f32,)>,
Op::RustArgs: SpacedInput<Op>,
{
let mut ctx = CheckCtx::new(Op::IDENTIFIER, CheckBasis::Mpfr, GeneratorKind::QuickSpaced);
let mut ctx = CheckCtx::new(Op::IDENTIFIER, CheckBasis::Mpfr, GeneratorKind::Spaced);
plot_one_generator(
out_dir,
&ctx,

View file

@ -51,6 +51,7 @@ where
// Check some special values that aren't included in the above ranges
values.push(Op::FTy::NAN);
values.push(Op::FTy::NEG_NAN);
values.extend(Op::FTy::consts().iter());
// Check around the maximum subnormal value

View file

@ -381,7 +381,7 @@ fn unop_common<F1: Float, F2: Float>(
}
// abs and copysign require signaling NaNs to be propagated, so verify bit equality.
if actual.to_bits() == expected.to_bits() {
if actual.biteq(expected) {
return CheckAction::Custom(Ok(()));
} else {
return CheckAction::Custom(Err(anyhow::anyhow!("NaNs have different bitpatterns")));
@ -444,13 +444,18 @@ fn binop_common<F1: Float, F2: Float>(
expected: F2,
ctx: &CheckCtx,
) -> CheckAction {
// MPFR only has one NaN bitpattern; allow the default `.is_nan()` checks to validate. Skip if
// the first input (magnitude source) is NaN and the output is also a NaN, or if the second
// input (sign source) is NaN.
if ctx.basis == CheckBasis::Mpfr
// MPFR only has one NaN bitpattern; skip tests in cases where the first argument would take
// the sign of a NaN second argument. The default NaN checks cover other cases.
if ctx.base_name == BaseName::Copysign && ctx.basis == CheckBasis::Mpfr && input.1.is_nan() {
return SKIP;
}
// FIXME(#939): this should not be skipped, there is a bug in our implementationi.
if ctx.base_name == BaseName::FmaximumNum
&& ctx.basis == CheckBasis::Mpfr
&& ((input.0.is_nan() && actual.is_nan() && expected.is_nan()) || input.1.is_nan())
{
return SKIP;
return XFAIL_NOCHECK;
}
/* FIXME(#439): our fmin and fmax do not compare signed zeros */

View file

@ -22,13 +22,38 @@ static EXTENSIVE_ITER_OVERRIDE: LazyLock<Option<u64>> = LazyLock::new(|| {
/// Specific tests that need to have a reduced amount of iterations to complete in a reasonable
/// amount of time.
///
/// Contains the itentifier+generator combo to match on, plus the factor to reduce by.
const EXTEMELY_SLOW_TESTS: &[(Identifier, GeneratorKind, u64)] = &[
(Identifier::Fmodf128, GeneratorKind::QuickSpaced, 50),
(Identifier::Fmodf128, GeneratorKind::Extensive, 50),
const EXTREMELY_SLOW_TESTS: &[SlowTest] = &[
SlowTest {
ident: Identifier::Fmodf128,
gen_kind: GeneratorKind::Spaced,
extensive: false,
reduce_factor: 50,
},
SlowTest {
ident: Identifier::Fmodf128,
gen_kind: GeneratorKind::Spaced,
extensive: true,
reduce_factor: 50,
},
];
/// A pattern to match a `CheckCtx`, plus a factor to reduce by.
struct SlowTest {
ident: Identifier,
gen_kind: GeneratorKind,
extensive: bool,
reduce_factor: u64,
}
impl SlowTest {
/// True if the test in `CheckCtx` should be reduced by `reduce_factor`.
fn matches_ctx(&self, ctx: &CheckCtx) -> bool {
self.ident == ctx.fn_ident
&& self.gen_kind == ctx.gen_kind
&& self.extensive == ctx.extensive
}
}
/// Maximum number of iterations to run for a single routine.
///
/// The default value of one greater than `u32::MAX` allows testing single-argument `f32` routines
@ -54,6 +79,7 @@ pub struct CheckCtx {
/// Source of truth for tests.
pub basis: CheckBasis,
pub gen_kind: GeneratorKind,
pub extensive: bool,
/// If specified, this value will override the value returned by [`iteration_count`].
pub override_iterations: Option<u64>,
}
@ -69,12 +95,19 @@ impl CheckCtx {
base_name_str: fn_ident.base_name().as_str(),
basis,
gen_kind,
extensive: false,
override_iterations: None,
};
ret.ulp = crate::default_ulp(&ret);
ret
}
/// Configure that this is an extensive test.
pub fn extensive(mut self, extensive: bool) -> Self {
self.extensive = extensive;
self
}
/// The number of input arguments for this function.
pub fn input_count(&self) -> usize {
self.fn_ident.math_op().rust_sig.args.len()
@ -100,14 +133,17 @@ pub enum CheckBasis {
/// and quantity.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum GeneratorKind {
/// Extremes, zeros, nonstandard numbers, etc.
EdgeCases,
Extensive,
QuickSpaced,
/// Spaced by logarithm (floats) or linear (integers).
Spaced,
/// Test inputs from an RNG.
Random,
/// A provided test case list.
List,
}
/// A list of all functions that should get extensive tests.
/// A list of all functions that should get extensive tests, as configured by environment variable.
///
/// This also supports the special test name `all` to run all tests, as well as `all_f16`,
/// `all_f32`, `all_f64`, and `all_f128` to run all tests for a specific float type.
@ -216,17 +252,17 @@ pub fn iteration_count(ctx: &CheckCtx, argnum: usize) -> u64 {
let random_iter_count = domain_iter_count / 100;
let mut total_iterations = match ctx.gen_kind {
GeneratorKind::QuickSpaced => domain_iter_count,
GeneratorKind::Spaced if ctx.extensive => extensive_max_iterations(),
GeneratorKind::Spaced => domain_iter_count,
GeneratorKind::Random => random_iter_count,
GeneratorKind::Extensive => extensive_max_iterations(),
GeneratorKind::EdgeCases | GeneratorKind::List => {
unimplemented!("shoudn't need `iteration_count` for {:?}", ctx.gen_kind)
}
};
// Larger float types get more iterations.
if t_env.large_float_ty && ctx.gen_kind != GeneratorKind::Extensive {
if ctx.gen_kind == GeneratorKind::Extensive {
if t_env.large_float_ty {
if ctx.extensive {
// Extensive already has a pretty high test count.
total_iterations *= 2;
} else {
@ -244,13 +280,13 @@ pub fn iteration_count(ctx: &CheckCtx, argnum: usize) -> u64 {
}
// Some tests are significantly slower than others and need to be further reduced.
if let Some((_id, _gen, scale)) = EXTEMELY_SLOW_TESTS
if let Some(slow) = EXTREMELY_SLOW_TESTS
.iter()
.find(|(id, generator, _scale)| *id == ctx.fn_ident && *generator == ctx.gen_kind)
.find(|slow| slow.matches_ctx(ctx))
{
// However, do not override if the extensive iteration count has been manually set.
if !(ctx.gen_kind == GeneratorKind::Extensive && EXTENSIVE_ITER_OVERRIDE.is_some()) {
total_iterations /= scale;
if !(ctx.extensive && EXTENSIVE_ITER_OVERRIDE.is_some()) {
total_iterations /= slow.reduce_factor;
}
}
@ -279,7 +315,7 @@ pub fn iteration_count(ctx: &CheckCtx, argnum: usize) -> u64 {
let total = ntests.pow(t_env.input_count.try_into().unwrap());
let seed_msg = match ctx.gen_kind {
GeneratorKind::QuickSpaced | GeneratorKind::Extensive => String::new(),
GeneratorKind::Spaced => String::new(),
GeneratorKind::Random => {
format!(
" using `{SEED_ENV}={}`",
@ -327,8 +363,8 @@ pub fn int_range(ctx: &CheckCtx, argnum: usize) -> RangeInclusive<i32> {
let extensive_range = (-0xfff)..=0xfffff;
match ctx.gen_kind {
GeneratorKind::Extensive => extensive_range,
GeneratorKind::QuickSpaced | GeneratorKind::Random => non_extensive_range,
_ if ctx.extensive => extensive_range,
GeneratorKind::Spaced | GeneratorKind::Random => non_extensive_range,
GeneratorKind::EdgeCases => extensive_range,
GeneratorKind::List => unimplemented!("shoudn't need range for {:?}", ctx.gen_kind),
}

View file

@ -312,12 +312,9 @@ where
let mut inner = || -> TestResult {
let mut allowed_ulp = ctx.ulp;
// Forbid overrides if the items came from an explicit list, as long as we are checking
// against either MPFR or the result itself.
let require_biteq = ctx.gen_kind == GeneratorKind::List && ctx.basis != CheckBasis::Musl;
match SpecialCase::check_float(input, actual, expected, ctx) {
_ if require_biteq => (),
// Forbid overrides if the items came from an explicit list
_ if ctx.gen_kind == GeneratorKind::List => (),
CheckAction::AssertSuccess => (),
CheckAction::AssertFailure(msg) => assert_failure_msg = Some(msg),
CheckAction::Custom(res) => return res,
@ -327,12 +324,20 @@ where
// Check when both are NaNs
if actual.is_nan() && expected.is_nan() {
if require_biteq && ctx.basis == CheckBasis::None {
ensure!(
actual.to_bits() == expected.to_bits(),
"mismatched NaN bitpatterns"
);
// Don't assert NaN bitwise equality if:
//
// * Testing against MPFR (there is a single NaN representation)
// * Testing against Musl except for explicit tests (Musl does some NaN quieting)
//
// In these cases, just the check that actual and expected are both NaNs is
// sufficient.
let skip_nan_biteq = ctx.basis == CheckBasis::Mpfr
|| (ctx.basis == CheckBasis::Musl && ctx.gen_kind != GeneratorKind::List);
if !skip_nan_biteq {
ensure!(actual.biteq(expected), "mismatched NaN bitpatterns");
}
// By default, NaNs have nothing special to check.
return Ok(());
} else if actual.is_nan() || expected.is_nan() {

View file

@ -65,7 +65,7 @@ macro_rules! musl_tests {
$(#[$attr])*
fn [< musl_quickspace_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::QuickSpaced);
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::Spaced);
let cases = spaced::get_test_cases::<Op>(&ctx).0;
musl_runner::<Op>(&ctx, cases, musl_math_sys::$fn_name);
}

View file

@ -55,7 +55,7 @@ macro_rules! mp_tests {
$(#[$attr])*
fn [< mp_quickspace_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::QuickSpaced);
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::Spaced);
let cases = spaced::get_test_cases::<Op>(&ctx).0;
mp_runner::<Op>(&ctx, cases);
}

View file

@ -17,7 +17,6 @@ use rayon::prelude::*;
use spaced::SpacedInput;
const BASIS: CheckBasis = CheckBasis::Mpfr;
const GEN_KIND: GeneratorKind = GeneratorKind::Extensive;
/// Run the extensive test suite.
pub fn run() {
@ -77,7 +76,7 @@ where
Op::RustArgs: SpacedInput<Op> + Send,
{
let test_name = format!("mp_extensive_{}", Op::NAME);
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GEN_KIND);
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::Spaced).extensive(true);
let skip = skip_extensive_test(&ctx);
let runner = move || {

View file

@ -34,7 +34,7 @@ Usage is under the MIT license, available at
### Contribution
Contributions are licensed under both the MIT license and the Apache License,
Version 2.0, available at <htps://www.apache.org/licenses/LICENSE-2.0>. Unless
Version 2.0, available at <https://www.apache.org/licenses/LICENSE-2.0>. Unless
you explicitly state otherwise, any contribution intentionally submitted for
inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as mentioned, without any additional terms or conditions.

View file

@ -30,6 +30,12 @@ pub fn fmaf(mut x: f32, y: f32, z: f32) -> f32 {
x
}
// NB: `frintx` is technically the correct instruction for C's `rint`. However, in Rust (and LLVM
// by default), `rint` is identical to `roundeven` (no fpenv interaction) so we use the
// side-effect-free `frintn`.
//
// In general, C code that calls Rust's libm should assume that fpenv is ignored.
pub fn rint(mut x: f64) -> f64 {
// SAFETY: `frintn` is available with neon and has no side effects.
//

View file

@ -59,9 +59,17 @@ mod tests {
// Not required but we expect it
assert_biteq!(f(F::NAN, F::NAN), F::NAN);
assert_biteq!(f(F::NEG_NAN, F::NAN), F::NAN);
assert_biteq!(f(F::NAN, F::ONE), F::NAN);
assert_biteq!(f(F::NAN, F::NEG_ONE), F::NEG_NAN);
assert_biteq!(f(F::NAN, F::NEG_NAN), F::NEG_NAN);
assert_biteq!(f(F::NEG_NAN, F::NAN), F::NAN);
assert_biteq!(f(F::NEG_NAN, F::ONE), F::NAN);
assert_biteq!(f(F::NEG_NAN, F::NEG_ONE), F::NEG_NAN);
assert_biteq!(f(F::NEG_NAN, F::NEG_NAN), F::NEG_NAN);
assert_biteq!(f(F::ONE, F::NAN), F::ONE);
assert_biteq!(f(F::ONE, F::NEG_NAN), F::NEG_ONE);
assert_biteq!(f(F::NEG_ONE, F::NAN), F::ONE);
assert_biteq!(f(F::NEG_ONE, F::NEG_NAN), F::NEG_ONE);
}
#[test]

View file

@ -1,8 +0,0 @@
/// Sign of Y, magnitude of X (f32)
///
/// Constructs a number with the magnitude (absolute value) of its
/// first argument, `x`, and the sign of its second argument, `y`.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn copysignf(x: f32, y: f32) -> f32 {
super::generic::copysign(x, y)
}

View file

@ -1,8 +0,0 @@
/// Sign of Y, magnitude of X (f128)
///
/// Constructs a number with the magnitude (absolute value) of its
/// first argument, `x`, and the sign of its second argument, `y`.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn copysignf128(x: f128, y: f128) -> f128 {
super::generic::copysign(x, y)
}

View file

@ -1,8 +0,0 @@
/// Sign of Y, magnitude of X (f16)
///
/// Constructs a number with the magnitude (absolute value) of its
/// first argument, `x`, and the sign of its second argument, `y`.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn copysignf16(x: f16, y: f16) -> f16 {
super::generic::copysign(x, y)
}

View file

@ -1,39 +0,0 @@
/// Absolute value (magnitude) (f32)
///
/// Calculates the absolute value (magnitude) of the argument `x`,
/// by direct manipulation of the bit representation of `x`.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fabsf(x: f32) -> f32 {
select_implementation! {
name: fabsf,
use_arch: all(target_arch = "wasm32", intrinsics_enabled),
args: x,
}
super::generic::fabs(x)
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sanity_check() {
assert_eq!(fabsf(-1.0), 1.0);
assert_eq!(fabsf(2.8), 2.8);
}
/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs
#[test]
fn spec_tests() {
assert!(fabsf(f32::NAN).is_nan());
for f in [0.0, -0.0].iter().copied() {
assert_eq!(fabsf(f), 0.0);
}
for f in [f32::INFINITY, f32::NEG_INFINITY].iter().copied() {
assert_eq!(fabsf(f), f32::INFINITY);
}
}
}

View file

@ -1,31 +0,0 @@
/// Absolute value (magnitude) (f128)
///
/// Calculates the absolute value (magnitude) of the argument `x`,
/// by direct manipulation of the bit representation of `x`.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fabsf128(x: f128) -> f128 {
super::generic::fabs(x)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sanity_check() {
assert_eq!(fabsf128(-1.0), 1.0);
assert_eq!(fabsf128(2.8), 2.8);
}
/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs
#[test]
fn spec_tests() {
assert!(fabsf128(f128::NAN).is_nan());
for f in [0.0, -0.0].iter().copied() {
assert_eq!(fabsf128(f), 0.0);
}
for f in [f128::INFINITY, f128::NEG_INFINITY].iter().copied() {
assert_eq!(fabsf128(f), f128::INFINITY);
}
}
}

View file

@ -1,31 +0,0 @@
/// Absolute value (magnitude) (f16)
///
/// Calculates the absolute value (magnitude) of the argument `x`,
/// by direct manipulation of the bit representation of `x`.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fabsf16(x: f16) -> f16 {
super::generic::fabs(x)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sanity_check() {
assert_eq!(fabsf16(-1.0), 1.0);
assert_eq!(fabsf16(2.8), 2.8);
}
/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs
#[test]
fn spec_tests() {
assert!(fabsf16(f16::NAN).is_nan());
for f in [0.0, -0.0].iter().copied() {
assert_eq!(fabsf16(f), 0.0);
}
for f in [f16::INFINITY, f16::NEG_INFINITY].iter().copied() {
assert_eq!(fabsf16(f), f16::INFINITY);
}
}
}

View file

@ -1,12 +0,0 @@
/// Positive difference (f32)
///
/// Determines the positive difference between arguments, returning:
/// * x - y if x > y, or
/// * +0 if x <= y, or
/// * NAN if either argument is NAN.
///
/// A range error may occur.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fdimf(x: f32, y: f32) -> f32 {
super::generic::fdim(x, y)
}

View file

@ -1,12 +0,0 @@
/// Positive difference (f128)
///
/// Determines the positive difference between arguments, returning:
/// * x - y if x > y, or
/// * +0 if x <= y, or
/// * NAN if either argument is NAN.
///
/// A range error may occur.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fdimf128(x: f128, y: f128) -> f128 {
super::generic::fdim(x, y)
}

Some files were not shown because too many files have changed in this diff Show more