Merge pull request #4400 from RalfJung/rustup

Rustup
This commit is contained in:
Ralf Jung 2025-06-15 12:14:05 +00:00 committed by GitHub
commit 5770b90356
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
183 changed files with 2633 additions and 2917 deletions

View file

@ -373,8 +373,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
fn unsized_feature_enabled(&self) -> bool {
let features = self.tcx().features();
features.unsized_locals() || features.unsized_fn_params()
self.tcx().features().unsized_fn_params()
}
/// Equate the inferred type and the annotated type for user type annotations
@ -957,7 +956,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
// When `unsized_fn_params` or `unsized_locals` is enabled, only function calls
// When `unsized_fn_params` is enabled, only function calls
// and nullary ops are checked in `check_call_dest`.
if !self.unsized_feature_enabled() {
match self.body.local_kind(local) {
@ -1941,7 +1940,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
);
}
// When `unsized_fn_params` and `unsized_locals` are both not enabled,
// When `unsized_fn_params` is not enabled,
// this check is done at `check_local`.
if self.unsized_feature_enabled() {
let span = term.source_info.span;

View file

@ -32,10 +32,6 @@ impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
trait Trait {
// This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
// without unsized_locals), but wrappers around `Self` currently are not.
// FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
// fn wrapper(self: Wrapper<Self>) -> i32;
fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;

View file

@ -37,10 +37,6 @@ impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
trait Trait {
// This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
// without unsized_locals), but wrappers around `Self` currently are not.
// FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
// fn wrapper(self: Wrapper<Self>) -> i32;
fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;

View file

@ -897,7 +897,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn checked_binop(
&mut self,
oop: OverflowOp,
typ: Ty<'_>,
typ: Ty<'tcx>,
lhs: Self::Value,
rhs: Self::Value,
) -> (Self::Value, Self::Value) {

View file

@ -14,7 +14,6 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::ty::layout::{
FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTypingEnv, LayoutError, LayoutOfHelpers,
@ -484,73 +483,31 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn checked_binop(
&mut self,
oop: OverflowOp,
ty: Ty<'_>,
ty: Ty<'tcx>,
lhs: Self::Value,
rhs: Self::Value,
) -> (Self::Value, Self::Value) {
use rustc_middle::ty::IntTy::*;
use rustc_middle::ty::UintTy::*;
use rustc_middle::ty::{Int, Uint};
let (size, signed) = ty.int_size_and_signed(self.tcx);
let width = size.bits();
let new_kind = match ty.kind() {
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
t @ (Uint(_) | Int(_)) => *t,
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
if oop == OverflowOp::Sub && !signed {
// Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these
// to be the canonical form. It will attempt to reform llvm.usub.with.overflow
// in the backend if profitable.
let sub = self.sub(lhs, rhs);
let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs);
return (sub, cmp);
}
let oop_str = match oop {
OverflowOp::Add => "add",
OverflowOp::Sub => "sub",
OverflowOp::Mul => "mul",
};
let name = match oop {
OverflowOp::Add => match new_kind {
Int(I8) => "llvm.sadd.with.overflow.i8",
Int(I16) => "llvm.sadd.with.overflow.i16",
Int(I32) => "llvm.sadd.with.overflow.i32",
Int(I64) => "llvm.sadd.with.overflow.i64",
Int(I128) => "llvm.sadd.with.overflow.i128",
let name = format!("llvm.{}{oop_str}.with.overflow", if signed { 's' } else { 'u' });
Uint(U8) => "llvm.uadd.with.overflow.i8",
Uint(U16) => "llvm.uadd.with.overflow.i16",
Uint(U32) => "llvm.uadd.with.overflow.i32",
Uint(U64) => "llvm.uadd.with.overflow.i64",
Uint(U128) => "llvm.uadd.with.overflow.i128",
_ => unreachable!(),
},
OverflowOp::Sub => match new_kind {
Int(I8) => "llvm.ssub.with.overflow.i8",
Int(I16) => "llvm.ssub.with.overflow.i16",
Int(I32) => "llvm.ssub.with.overflow.i32",
Int(I64) => "llvm.ssub.with.overflow.i64",
Int(I128) => "llvm.ssub.with.overflow.i128",
Uint(_) => {
// Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these
// to be the canonical form. It will attempt to reform llvm.usub.with.overflow
// in the backend if profitable.
let sub = self.sub(lhs, rhs);
let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs);
return (sub, cmp);
}
_ => unreachable!(),
},
OverflowOp::Mul => match new_kind {
Int(I8) => "llvm.smul.with.overflow.i8",
Int(I16) => "llvm.smul.with.overflow.i16",
Int(I32) => "llvm.smul.with.overflow.i32",
Int(I64) => "llvm.smul.with.overflow.i64",
Int(I128) => "llvm.smul.with.overflow.i128",
Uint(U8) => "llvm.umul.with.overflow.i8",
Uint(U16) => "llvm.umul.with.overflow.i16",
Uint(U32) => "llvm.umul.with.overflow.i32",
Uint(U64) => "llvm.umul.with.overflow.i64",
Uint(U128) => "llvm.umul.with.overflow.i128",
_ => unreachable!(),
},
};
let res = self.call_intrinsic(name, &[lhs, rhs]);
let res = self.call_intrinsic(&name, &[self.type_ix(width)], &[lhs, rhs]);
(self.extract_value(res, 0), self.extract_value(res, 1))
}
@ -954,11 +911,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.fptoint_sat(false, val, dest_ty)
self.call_intrinsic("llvm.fptoui.sat", &[dest_ty, self.val_ty(val)], &[val])
}
fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.fptoint_sat(true, val, dest_ty)
self.call_intrinsic("llvm.fptosi.sat", &[dest_ty, self.val_ty(val)], &[val])
}
fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
@ -981,15 +938,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
if self.cx.type_kind(src_ty) != TypeKind::Vector {
let float_width = self.cx.float_width(src_ty);
let int_width = self.cx.int_width(dest_ty);
let name = match (int_width, float_width) {
(32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
(32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
(64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
(64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
_ => None,
};
if let Some(name) = name {
return self.call_intrinsic(name, &[val]);
if matches!((int_width, float_width), (32 | 64, 32 | 64)) {
return self.call_intrinsic(
"llvm.wasm.trunc.unsigned",
&[dest_ty, src_ty],
&[val],
);
}
}
}
@ -1003,15 +957,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
if self.cx.type_kind(src_ty) != TypeKind::Vector {
let float_width = self.cx.float_width(src_ty);
let int_width = self.cx.int_width(dest_ty);
let name = match (int_width, float_width) {
(32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
(32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
(64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
(64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
_ => None,
};
if let Some(name) = name {
return self.call_intrinsic(name, &[val]);
if matches!((int_width, float_width), (32 | 64, 32 | 64)) {
return self.call_intrinsic(
"llvm.wasm.trunc.signed",
&[dest_ty, src_ty],
&[val],
);
}
}
}
@ -1084,22 +1035,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
return None;
}
let name = match (ty.is_signed(), ty.primitive_size(self.tcx).bits()) {
(true, 8) => "llvm.scmp.i8.i8",
(true, 16) => "llvm.scmp.i8.i16",
(true, 32) => "llvm.scmp.i8.i32",
(true, 64) => "llvm.scmp.i8.i64",
(true, 128) => "llvm.scmp.i8.i128",
let size = ty.primitive_size(self.tcx);
let name = if ty.is_signed() { "llvm.scmp" } else { "llvm.ucmp" };
(false, 8) => "llvm.ucmp.i8.i8",
(false, 16) => "llvm.ucmp.i8.i16",
(false, 32) => "llvm.ucmp.i8.i32",
(false, 64) => "llvm.ucmp.i8.i64",
(false, 128) => "llvm.ucmp.i8.i128",
_ => bug!("three-way compare unsupported for type {ty:?}"),
};
Some(self.call_intrinsic(name, &[lhs, rhs]))
Some(self.call_intrinsic(&name, &[self.type_i8(), self.type_ix(size.bits())], &[lhs, rhs]))
}
/* Miscellaneous instructions */
@ -1385,11 +1324,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
}
fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
}
fn call(
@ -1454,7 +1393,7 @@ impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> {
// Forward to the `get_static` method of `CodegenCx`
let global = self.cx().get_static(def_id);
if self.cx().tcx.is_thread_local_static(def_id) {
let pointer = self.call_intrinsic("llvm.threadlocal.address", &[global]);
let pointer = self.call_intrinsic("llvm.threadlocal.address", &[], &[global]);
// Cast to default address space if globals are in a different addrspace
self.pointercast(pointer, self.type_ptr())
} else {
@ -1649,8 +1588,13 @@ impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
}
impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
let (ty, f) = self.cx.get_intrinsic(intrinsic);
pub(crate) fn call_intrinsic(
&mut self,
base_name: &str,
type_params: &[&'ll Type],
args: &[&'ll Value],
) -> &'ll Value {
let (ty, f) = self.cx.get_intrinsic(base_name, type_params);
self.call(ty, None, None, f, args, None, None)
}
@ -1664,7 +1608,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
return;
}
self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
self.call_intrinsic(intrinsic, &[self.type_ptr()], &[self.cx.const_u64(size), ptr]);
}
}
impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
@ -1689,31 +1633,6 @@ impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> {
}
}
impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
let src_ty = self.cx.val_ty(val);
let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
(
self.cx.element_type(src_ty),
self.cx.element_type(dest_ty),
Some(self.cx.vector_length(src_ty)),
)
} else {
(src_ty, dest_ty, None)
};
let float_width = self.cx.float_width(float_ty);
let int_width = self.cx.int_width(int_ty);
let instr = if signed { "fptosi" } else { "fptoui" };
let name = if let Some(vector_length) = vector_length {
format!("llvm.{instr}.sat.v{vector_length}i{int_width}.v{vector_length}f{float_width}")
} else {
format!("llvm.{instr}.sat.i{int_width}.f{float_width}")
};
let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None, None)
}
pub(crate) fn landing_pad(
&mut self,
ty: &'ll Type,
@ -1819,7 +1738,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
// llvm.type.test intrinsic. The LowerTypeTests link-time optimization pass replaces
// calls to this intrinsic with code to test type membership.
let typeid = self.get_metadata_value(typeid_metadata);
let cond = self.call_intrinsic("llvm.type.test", &[llfn, typeid]);
let cond = self.call_intrinsic("llvm.type.test", &[], &[llfn, typeid]);
let bb_pass = self.append_sibling_block("type_test.pass");
let bb_fail = self.append_sibling_block("type_test.fail");
self.cond_br(cond, bb_pass, bb_fail);
@ -1887,7 +1806,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
num_counters: &'ll Value,
index: &'ll Value,
) {
self.call_intrinsic("llvm.instrprof.increment", &[fn_name, hash, num_counters, index]);
self.call_intrinsic("llvm.instrprof.increment", &[], &[fn_name, hash, num_counters, index]);
}
/// Emits a call to `llvm.instrprof.mcdc.parameters`.
@ -1906,7 +1825,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
hash: &'ll Value,
bitmap_bits: &'ll Value,
) {
self.call_intrinsic("llvm.instrprof.mcdc.parameters", &[fn_name, hash, bitmap_bits]);
self.call_intrinsic("llvm.instrprof.mcdc.parameters", &[], &[fn_name, hash, bitmap_bits]);
}
#[instrument(level = "debug", skip(self))]
@ -1918,7 +1837,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
mcdc_temp: &'ll Value,
) {
let args = &[fn_name, hash, bitmap_index, mcdc_temp];
self.call_intrinsic("llvm.instrprof.mcdc.tvbitmap.update", args);
self.call_intrinsic("llvm.instrprof.mcdc.tvbitmap.update", &[], args);
}
#[instrument(level = "debug", skip(self))]

View file

@ -137,7 +137,8 @@ pub(crate) struct FullCx<'ll, 'tcx> {
eh_catch_typeinfo: Cell<Option<&'ll Value>>,
pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
intrinsics:
RefCell<FxHashMap<(&'static str, SmallVec<[&'ll Type; 2]>), (&'ll Type, &'ll Value)>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
@ -842,17 +843,24 @@ impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
impl<'ll> CodegenCx<'ll, '_> {
pub(crate) fn get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value) {
if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
return v;
pub(crate) fn get_intrinsic(
&self,
base_name: &str,
type_params: &[&'ll Type],
) -> (&'ll Type, &'ll Value) {
if let Some(v) =
self.intrinsics.borrow().get(&(base_name, SmallVec::from_slice(type_params)))
{
return *v;
}
self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
self.declare_intrinsic(base_name, type_params)
}
fn insert_intrinsic(
&self,
name: &'static str,
base_name: &'static str,
type_params: &[&'ll Type],
args: Option<&[&'ll llvm::Type]>,
ret: &'ll llvm::Type,
) -> (&'ll llvm::Type, &'ll llvm::Value) {
@ -861,372 +869,153 @@ impl<'ll> CodegenCx<'ll, '_> {
} else {
self.type_variadic_func(&[], ret)
};
let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
let intrinsic = llvm::Intrinsic::lookup(base_name.as_bytes())
.expect("Unknown LLVM intrinsic `{base_name}`");
let full_name = if intrinsic.is_overloaded() {
&intrinsic.overloaded_name(self.llmod, type_params)
} else {
base_name
};
let f = self.declare_cfn(full_name, llvm::UnnamedAddr::No, fn_ty);
self.intrinsics
.borrow_mut()
.insert((base_name, SmallVec::from_slice(type_params)), (fn_ty, f));
(fn_ty, f)
}
fn declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)> {
fn declare_intrinsic(
&self,
base_name: &str,
type_params: &[&'ll Type],
) -> (&'ll Type, &'ll Value) {
macro_rules! param {
($index:literal) => {
type_params[$index]
};
($other:expr) => {
$other
};
}
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
return Some(self.insert_intrinsic($name, Some(&[]), $ret));
}
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
return Some(self.insert_intrinsic($name, None, $ret));
if base_name == $name {
return self.insert_intrinsic($name, type_params, None, param!($ret));
}
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
if base_name == $name {
return self.insert_intrinsic($name, type_params, Some(&[$(param!($arg)),*]), param!($ret));
}
);
}
macro_rules! mk_struct {
($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
($($field_ty:expr),*) => (self.type_struct( &[$(param!($field_ty)),*], false))
}
let same_width_vector = |index, element_ty| {
self.type_vector(element_ty, self.vector_length(type_params[index]) as u64)
};
let ptr = self.type_ptr();
let void = self.type_void();
let i1 = self.type_i1();
let t_i8 = self.type_i8();
let t_i16 = self.type_i16();
let t_i32 = self.type_i32();
let t_i64 = self.type_i64();
let t_i128 = self.type_i128();
let t_isize = self.type_isize();
let t_f16 = self.type_f16();
let t_f32 = self.type_f32();
let t_f64 = self.type_f64();
let t_f128 = self.type_f128();
let t_metadata = self.type_metadata();
let t_token = self.type_token();
ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr);
ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
ifn!("llvm.wasm.trunc.unsigned", fn(1) -> 0);
ifn!("llvm.wasm.trunc.signed", fn(1) -> 0);
ifn!("llvm.fptosi.sat", fn(1) -> 0);
ifn!("llvm.fptoui.sat", fn(1) -> 0);
ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);
ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
ifn!("llvm.powi.f16.i32", fn(t_f16, t_i32) -> t_f16);
ifn!("llvm.powi.f32.i32", fn(t_f32, t_i32) -> t_f32);
ifn!("llvm.powi.f64.i32", fn(t_f64, t_i32) -> t_f64);
ifn!("llvm.powi.f128.i32", fn(t_f128, t_i32) -> t_f128);
ifn!("llvm.powi", fn(0, 1) -> 0);
ifn!("llvm.pow", fn(0, 0) -> 0);
ifn!("llvm.sqrt", fn(0) -> 0);
ifn!("llvm.sin", fn(0) -> 0);
ifn!("llvm.cos", fn(0) -> 0);
ifn!("llvm.exp", fn(0) -> 0);
ifn!("llvm.exp2", fn(0) -> 0);
ifn!("llvm.log", fn(0) -> 0);
ifn!("llvm.log10", fn(0) -> 0);
ifn!("llvm.log2", fn(0) -> 0);
ifn!("llvm.fma", fn(0, 0, 0) -> 0);
ifn!("llvm.fmuladd", fn(0, 0, 0) -> 0);
ifn!("llvm.fabs", fn(0) -> 0);
ifn!("llvm.minnum", fn(0, 0) -> 0);
ifn!("llvm.minimum", fn(0, 0) -> 0);
ifn!("llvm.maxnum", fn(0, 0) -> 0);
ifn!("llvm.maximum", fn(0, 0) -> 0);
ifn!("llvm.floor", fn(0) -> 0);
ifn!("llvm.ceil", fn(0) -> 0);
ifn!("llvm.trunc", fn(0) -> 0);
ifn!("llvm.copysign", fn(0, 0) -> 0);
ifn!("llvm.round", fn(0) -> 0);
ifn!("llvm.rint", fn(0) -> 0);
ifn!("llvm.nearbyint", fn(0) -> 0);
ifn!("llvm.pow.f16", fn(t_f16, t_f16) -> t_f16);
ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.pow.f128", fn(t_f128, t_f128) -> t_f128);
ifn!("llvm.ctpop", fn(0) -> 0);
ifn!("llvm.ctlz", fn(0, i1) -> 0);
ifn!("llvm.cttz", fn(0, i1) -> 0);
ifn!("llvm.bswap", fn(0) -> 0);
ifn!("llvm.bitreverse", fn(0) -> 0);
ifn!("llvm.fshl", fn(0, 0, 0) -> 0);
ifn!("llvm.fshr", fn(0, 0, 0) -> 0);
ifn!("llvm.sqrt.f16", fn(t_f16) -> t_f16);
ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
ifn!("llvm.sqrt.f128", fn(t_f128) -> t_f128);
ifn!("llvm.sadd.with.overflow", fn(0, 0) -> mk_struct! {0, i1});
ifn!("llvm.uadd.with.overflow", fn(0, 0) -> mk_struct! {0, i1});
ifn!("llvm.ssub.with.overflow", fn(0, 0) -> mk_struct! {0, i1});
ifn!("llvm.usub.with.overflow", fn(0, 0) -> mk_struct! {0, i1});
ifn!("llvm.smul.with.overflow", fn(0, 0) -> mk_struct! {0, i1});
ifn!("llvm.umul.with.overflow", fn(0, 0) -> mk_struct! {0, i1});
ifn!("llvm.sin.f16", fn(t_f16) -> t_f16);
ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
ifn!("llvm.sin.f128", fn(t_f128) -> t_f128);
ifn!("llvm.sadd.sat", fn(0, 0) -> 0);
ifn!("llvm.uadd.sat", fn(0, 0) -> 0);
ifn!("llvm.ssub.sat", fn(0, 0) -> 0);
ifn!("llvm.usub.sat", fn(0, 0) -> 0);
ifn!("llvm.cos.f16", fn(t_f16) -> t_f16);
ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
ifn!("llvm.cos.f128", fn(t_f128) -> t_f128);
ifn!("llvm.scmp", fn(1, 1) -> 0);
ifn!("llvm.ucmp", fn(1, 1) -> 0);
ifn!("llvm.exp.f16", fn(t_f16) -> t_f16);
ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp.f128", fn(t_f128) -> t_f128);
ifn!("llvm.lifetime.start", fn(t_i64, 0) -> void);
ifn!("llvm.lifetime.end", fn(t_i64, 0) -> void);
ifn!("llvm.exp2.f16", fn(t_f16) -> t_f16);
ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp2.f128", fn(t_f128) -> t_f128);
ifn!("llvm.is.constant", fn(0) -> i1);
ifn!("llvm.expect", fn(0, 0) -> 0);
ifn!("llvm.log.f16", fn(t_f16) -> t_f16);
ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log.f128", fn(t_f128) -> t_f128);
ifn!("llvm.log10.f16", fn(t_f16) -> t_f16);
ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log10.f128", fn(t_f128) -> t_f128);
ifn!("llvm.log2.f16", fn(t_f16) -> t_f16);
ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log2.f128", fn(t_f128) -> t_f128);
ifn!("llvm.fma.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
ifn!("llvm.fma.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
ifn!("llvm.fmuladd.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
ifn!("llvm.fmuladd.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
ifn!("llvm.fmuladd.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
ifn!("llvm.fmuladd.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
ifn!("llvm.fabs.f16", fn(t_f16) -> t_f16);
ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
ifn!("llvm.fabs.f128", fn(t_f128) -> t_f128);
ifn!("llvm.minnum.f16", fn(t_f16, t_f16) -> t_f16);
ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.minnum.f128", fn(t_f128, t_f128) -> t_f128);
ifn!("llvm.minimum.f16", fn(t_f16, t_f16) -> t_f16);
ifn!("llvm.minimum.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.minimum.f64", fn(t_f64, t_f64) -> t_f64);
// There are issues on x86_64 and aarch64 with the f128 variant.
// - https://github.com/llvm/llvm-project/issues/139380
// - https://github.com/llvm/llvm-project/issues/139381
// ifn!("llvm.minimum.f128", fn(t_f128, t_f128) -> t_f128);
ifn!("llvm.maxnum.f16", fn(t_f16, t_f16) -> t_f16);
ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.maxnum.f128", fn(t_f128, t_f128) -> t_f128);
ifn!("llvm.maximum.f16", fn(t_f16, t_f16) -> t_f16);
ifn!("llvm.maximum.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.maximum.f64", fn(t_f64, t_f64) -> t_f64);
// There are issues on x86_64 and aarch64 with the f128 variant.
// - https://github.com/llvm/llvm-project/issues/139380
// - https://github.com/llvm/llvm-project/issues/139381
// ifn!("llvm.maximum.f128", fn(t_f128, t_f128) -> t_f128);
ifn!("llvm.floor.f16", fn(t_f16) -> t_f16);
ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
ifn!("llvm.floor.f128", fn(t_f128) -> t_f128);
ifn!("llvm.ceil.f16", fn(t_f16) -> t_f16);
ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
ifn!("llvm.ceil.f128", fn(t_f128) -> t_f128);
ifn!("llvm.trunc.f16", fn(t_f16) -> t_f16);
ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
ifn!("llvm.trunc.f128", fn(t_f128) -> t_f128);
ifn!("llvm.copysign.f16", fn(t_f16, t_f16) -> t_f16);
ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.copysign.f128", fn(t_f128, t_f128) -> t_f128);
ifn!("llvm.round.f16", fn(t_f16) -> t_f16);
ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
ifn!("llvm.round.f128", fn(t_f128) -> t_f128);
ifn!("llvm.roundeven.f16", fn(t_f16) -> t_f16);
ifn!("llvm.roundeven.f32", fn(t_f32) -> t_f32);
ifn!("llvm.roundeven.f64", fn(t_f64) -> t_f64);
ifn!("llvm.roundeven.f128", fn(t_f128) -> t_f128);
ifn!("llvm.rint.f16", fn(t_f16) -> t_f16);
ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.rint.f128", fn(t_f128) -> t_f128);
ifn!("llvm.nearbyint.f16", fn(t_f16) -> t_f16);
ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.nearbyint.f128", fn(t_f128) -> t_f128);
ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.scmp.i8.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.scmp.i8.i16", fn(t_i16, t_i16) -> t_i8);
ifn!("llvm.scmp.i8.i32", fn(t_i32, t_i32) -> t_i8);
ifn!("llvm.scmp.i8.i64", fn(t_i64, t_i64) -> t_i8);
ifn!("llvm.scmp.i8.i128", fn(t_i128, t_i128) -> t_i8);
ifn!("llvm.ucmp.i8.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.ucmp.i8.i16", fn(t_i16, t_i16) -> t_i8);
ifn!("llvm.ucmp.i8.i32", fn(t_i32, t_i32) -> t_i8);
ifn!("llvm.ucmp.i8.i64", fn(t_i64, t_i64) -> t_i8);
ifn!("llvm.ucmp.i8.i128", fn(t_i128, t_i128) -> t_i8);
ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
// FIXME: This is an infinitesimally small portion of the types you can
// pass to this intrinsic, if we can ever lazily register intrinsics we
// should register these when they're used, that way any type can be
// passed.
ifn!("llvm.is.constant.i1", fn(i1) -> i1);
ifn!("llvm.is.constant.i8", fn(t_i8) -> i1);
ifn!("llvm.is.constant.i16", fn(t_i16) -> i1);
ifn!("llvm.is.constant.i32", fn(t_i32) -> i1);
ifn!("llvm.is.constant.i64", fn(t_i64) -> i1);
ifn!("llvm.is.constant.i128", fn(t_i128) -> i1);
ifn!("llvm.is.constant.isize", fn(t_isize) -> i1);
ifn!("llvm.is.constant.f16", fn(t_f16) -> i1);
ifn!("llvm.is.constant.f32", fn(t_f32) -> i1);
ifn!("llvm.is.constant.f64", fn(t_f64) -> i1);
ifn!("llvm.is.constant.f128", fn(t_f128) -> i1);
ifn!("llvm.is.constant.ptr", fn(ptr) -> i1);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
ifn!("llvm.eh.typeid.for", fn(0) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void);
ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr);
ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr);
ifn!("llvm.assume", fn(i1) -> void);
ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
ifn!("llvm.prefetch", fn(0, t_i32, t_i32, t_i32) -> void);
// This isn't an "LLVM intrinsic", but LLVM's optimization passes
// recognize it like one (including turning it into `bcmp` sometimes)
// and we use it to implement intrinsics like `raw_eq` and `compare_bytes`
match self.sess().target.arch.as_ref() {
"avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
_ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
if base_name == "memcmp" {
let fn_ty = self.type_func(&[ptr, ptr, t_isize], self.type_int());
let f = self.declare_cfn("memcmp", llvm::UnnamedAddr::No, fn_ty);
self.intrinsics.borrow_mut().insert(("memcmp", SmallVec::new()), (fn_ty, f));
return (fn_ty, f);
}
// variadic intrinsics
ifn!("llvm.va_start", fn(ptr) -> void);
ifn!("llvm.va_end", fn(ptr) -> void);
ifn!("llvm.va_copy", fn(ptr, ptr) -> void);
ifn!("llvm.va_start", fn(0) -> void);
ifn!("llvm.va_end", fn(0) -> void);
ifn!("llvm.va_copy", fn(0, 0) -> void);
if self.sess().instrument_coverage() {
ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
@ -1238,14 +1027,19 @@ impl<'ll> CodegenCx<'ll, '_> {
ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1});
if self.sess().opts.debuginfo != DebugInfo::None {
ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata, t_metadata) -> void);
ifn!("llvm.dbg.value", fn(t_metadata, t_metadata, t_metadata) -> void);
}
ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
ifn!("llvm.ptrmask", fn(0, 1) -> 0);
ifn!("llvm.threadlocal.address", fn(ptr) -> ptr);
None
ifn!("llvm.masked.load", fn(1, t_i32, same_width_vector(0, i1), 0) -> 0);
ifn!("llvm.masked.store", fn(0, 1, t_i32, same_width_vector(0, i1)) -> void);
ifn!("llvm.masked.gather", fn(1, t_i32, same_width_vector(0, i1), 0) -> 0);
ifn!("llvm.masked.scatter", fn(0, 1, t_i32, same_width_vector(0, i1)) -> void);
bug!("Unknown intrinsic: `{base_name}`")
}
pub(crate) fn eh_catch_typeinfo(&self) -> &'ll Value {

View file

@ -15,7 +15,7 @@ use rustc_middle::ty::{self, GenericArgsRef, Ty};
use rustc_middle::{bug, span_bug};
use rustc_span::{Span, Symbol, sym};
use rustc_symbol_mangling::mangle_internal_symbol;
use rustc_target::spec::{HasTargetSpec, PanicStrategy};
use rustc_target::spec::PanicStrategy;
use tracing::debug;
use crate::abi::FnAbiLlvmExt;
@ -27,137 +27,142 @@ use crate::type_of::LayoutLlvmExt;
use crate::va_arg::emit_va_arg;
use crate::value::Value;
fn get_simple_intrinsic<'ll>(
cx: &CodegenCx<'ll, '_>,
fn call_simple_intrinsic<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
name: Symbol,
) -> Option<(&'ll Type, &'ll Value)> {
let llvm_name = match name {
sym::sqrtf16 => "llvm.sqrt.f16",
sym::sqrtf32 => "llvm.sqrt.f32",
sym::sqrtf64 => "llvm.sqrt.f64",
sym::sqrtf128 => "llvm.sqrt.f128",
args: &[OperandRef<'tcx, &'ll Value>],
) -> Option<&'ll Value> {
let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
sym::powif16 => "llvm.powi.f16.i32",
sym::powif32 => "llvm.powi.f32.i32",
sym::powif64 => "llvm.powi.f64.i32",
sym::powif128 => "llvm.powi.f128.i32",
sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
sym::sinf16 => "llvm.sin.f16",
sym::sinf32 => "llvm.sin.f32",
sym::sinf64 => "llvm.sin.f64",
sym::sinf128 => "llvm.sin.f128",
sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
sym::cosf16 => "llvm.cos.f16",
sym::cosf32 => "llvm.cos.f32",
sym::cosf64 => "llvm.cos.f64",
sym::cosf128 => "llvm.cos.f128",
sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
sym::powf16 => "llvm.pow.f16",
sym::powf32 => "llvm.pow.f32",
sym::powf64 => "llvm.pow.f64",
sym::powf128 => "llvm.pow.f128",
sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
sym::expf16 => "llvm.exp.f16",
sym::expf32 => "llvm.exp.f32",
sym::expf64 => "llvm.exp.f64",
sym::expf128 => "llvm.exp.f128",
sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
sym::exp2f16 => "llvm.exp2.f16",
sym::exp2f32 => "llvm.exp2.f32",
sym::exp2f64 => "llvm.exp2.f64",
sym::exp2f128 => "llvm.exp2.f128",
sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
sym::logf16 => "llvm.log.f16",
sym::logf32 => "llvm.log.f32",
sym::logf64 => "llvm.log.f64",
sym::logf128 => "llvm.log.f128",
sym::logf16 => ("llvm.log", &[bx.type_f16()]),
sym::logf32 => ("llvm.log", &[bx.type_f32()]),
sym::logf64 => ("llvm.log", &[bx.type_f64()]),
sym::logf128 => ("llvm.log", &[bx.type_f128()]),
sym::log10f16 => "llvm.log10.f16",
sym::log10f32 => "llvm.log10.f32",
sym::log10f64 => "llvm.log10.f64",
sym::log10f128 => "llvm.log10.f128",
sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
sym::log2f16 => "llvm.log2.f16",
sym::log2f32 => "llvm.log2.f32",
sym::log2f64 => "llvm.log2.f64",
sym::log2f128 => "llvm.log2.f128",
sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
sym::fmaf16 => "llvm.fma.f16",
sym::fmaf32 => "llvm.fma.f32",
sym::fmaf64 => "llvm.fma.f64",
sym::fmaf128 => "llvm.fma.f128",
sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
sym::fmuladdf16 => "llvm.fmuladd.f16",
sym::fmuladdf32 => "llvm.fmuladd.f32",
sym::fmuladdf64 => "llvm.fmuladd.f64",
sym::fmuladdf128 => "llvm.fmuladd.f128",
sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
sym::fabsf16 => "llvm.fabs.f16",
sym::fabsf32 => "llvm.fabs.f32",
sym::fabsf64 => "llvm.fabs.f64",
sym::fabsf128 => "llvm.fabs.f128",
sym::fabsf16 => ("llvm.fabs", &[bx.type_f16()]),
sym::fabsf32 => ("llvm.fabs", &[bx.type_f32()]),
sym::fabsf64 => ("llvm.fabs", &[bx.type_f64()]),
sym::fabsf128 => ("llvm.fabs", &[bx.type_f128()]),
sym::minnumf16 => "llvm.minnum.f16",
sym::minnumf32 => "llvm.minnum.f32",
sym::minnumf64 => "llvm.minnum.f64",
sym::minnumf128 => "llvm.minnum.f128",
sym::minnumf16 => ("llvm.minnum", &[bx.type_f16()]),
sym::minnumf32 => ("llvm.minnum", &[bx.type_f32()]),
sym::minnumf64 => ("llvm.minnum", &[bx.type_f64()]),
sym::minnumf128 => ("llvm.minnum", &[bx.type_f128()]),
sym::minimumf16 => "llvm.minimum.f16",
sym::minimumf32 => "llvm.minimum.f32",
sym::minimumf64 => "llvm.minimum.f64",
sym::minimumf16 => ("llvm.minimum", &[bx.type_f16()]),
sym::minimumf32 => ("llvm.minimum", &[bx.type_f32()]),
sym::minimumf64 => ("llvm.minimum", &[bx.type_f64()]),
// There are issues on x86_64 and aarch64 with the f128 variant,
// let's instead use the instrinsic fallback body.
// sym::minimumf128 => "llvm.minimum.f128",
sym::maxnumf16 => "llvm.maxnum.f16",
sym::maxnumf32 => "llvm.maxnum.f32",
sym::maxnumf64 => "llvm.maxnum.f64",
sym::maxnumf128 => "llvm.maxnum.f128",
// sym::minimumf128 => ("llvm.minimum", &[cx.type_f128()]),
sym::maxnumf16 => ("llvm.maxnum", &[bx.type_f16()]),
sym::maxnumf32 => ("llvm.maxnum", &[bx.type_f32()]),
sym::maxnumf64 => ("llvm.maxnum", &[bx.type_f64()]),
sym::maxnumf128 => ("llvm.maxnum", &[bx.type_f128()]),
sym::maximumf16 => "llvm.maximum.f16",
sym::maximumf32 => "llvm.maximum.f32",
sym::maximumf64 => "llvm.maximum.f64",
sym::maximumf16 => ("llvm.maximum", &[bx.type_f16()]),
sym::maximumf32 => ("llvm.maximum", &[bx.type_f32()]),
sym::maximumf64 => ("llvm.maximum", &[bx.type_f64()]),
// There are issues on x86_64 and aarch64 with the f128 variant,
// let's instead use the instrinsic fallback body.
// sym::maximumf128 => "llvm.maximum.f128",
sym::copysignf16 => "llvm.copysign.f16",
sym::copysignf32 => "llvm.copysign.f32",
sym::copysignf64 => "llvm.copysign.f64",
sym::copysignf128 => "llvm.copysign.f128",
// sym::maximumf128 => ("llvm.maximum", &[cx.type_f128()]),
sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
sym::floorf16 => "llvm.floor.f16",
sym::floorf32 => "llvm.floor.f32",
sym::floorf64 => "llvm.floor.f64",
sym::floorf128 => "llvm.floor.f128",
sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
sym::ceilf16 => "llvm.ceil.f16",
sym::ceilf32 => "llvm.ceil.f32",
sym::ceilf64 => "llvm.ceil.f64",
sym::ceilf128 => "llvm.ceil.f128",
sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
sym::truncf16 => "llvm.trunc.f16",
sym::truncf32 => "llvm.trunc.f32",
sym::truncf64 => "llvm.trunc.f64",
sym::truncf128 => "llvm.trunc.f128",
sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
// We could use any of `rint`, `nearbyint`, or `roundeven`
// for this -- they are all identical in semantics when
// assuming the default FP environment.
// `rint` is what we used for $forever.
sym::round_ties_even_f16 => "llvm.rint.f16",
sym::round_ties_even_f32 => "llvm.rint.f32",
sym::round_ties_even_f64 => "llvm.rint.f64",
sym::round_ties_even_f128 => "llvm.rint.f128",
sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
sym::roundf16 => "llvm.round.f16",
sym::roundf32 => "llvm.round.f32",
sym::roundf64 => "llvm.round.f64",
sym::roundf128 => "llvm.round.f128",
sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
sym::ptr_mask => "llvm.ptrmask",
sym::ptr_mask => ("llvm.ptrmask", &[bx.type_ptr(), bx.type_isize()]),
_ => return None,
};
Some(cx.get_intrinsic(llvm_name))
Some(bx.call_intrinsic(
base_name,
type_params,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
))
}
impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
@ -173,36 +178,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let name = tcx.item_name(instance.def_id());
let fn_args = instance.args;
let simple = get_simple_intrinsic(self, name);
let simple = call_simple_intrinsic(self, name, args);
let llval = match name {
_ if simple.is_some() => {
let (simple_ty, simple_fn) = simple.unwrap();
self.call(
simple_ty,
None,
None,
simple_fn,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
Some(instance),
)
}
_ if simple.is_some() => simple.unwrap(),
sym::is_val_statically_known => {
let intrinsic_type = args[0].layout.immediate_llvm_type(self.cx);
let kind = self.type_kind(intrinsic_type);
let intrinsic_name = match kind {
TypeKind::Pointer | TypeKind::Integer => {
Some(format!("llvm.is.constant.{intrinsic_type:?}"))
}
// LLVM float types' intrinsic names differ from their type names.
TypeKind::Half => Some(format!("llvm.is.constant.f16")),
TypeKind::Float => Some(format!("llvm.is.constant.f32")),
TypeKind::Double => Some(format!("llvm.is.constant.f64")),
TypeKind::FP128 => Some(format!("llvm.is.constant.f128")),
_ => None,
};
if let Some(intrinsic_name) = intrinsic_name {
self.call_intrinsic(&intrinsic_name, &[args[0].immediate()])
if let OperandValue::Immediate(imm) = args[0].val {
self.call_intrinsic(
"llvm.is.constant",
&[args[0].layout.immediate_llvm_type(self.cx)],
&[imm],
)
} else {
self.const_bool(false)
}
@ -246,10 +231,12 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
);
return Ok(());
}
sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
sym::va_copy => {
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
}
sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
sym::va_copy => self.call_intrinsic(
"llvm.va_copy",
&[self.type_ptr()],
&[args[0].immediate(), args[1].immediate()],
),
sym::va_arg => {
match result.layout.backend_repr {
BackendRepr::Scalar(scalar) => {
@ -324,6 +311,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
};
self.call_intrinsic(
"llvm.prefetch",
&[self.type_ptr()],
&[
args[0].immediate(),
self.const_i32(rw),
@ -385,11 +373,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
let (size, signed) = ty.int_size_and_signed(self.tcx);
let width = size.bits();
let llty = self.type_ix(width);
match name {
sym::ctlz | sym::cttz => {
let y = self.const_bool(false);
let ret = self.call_intrinsic(
&format!("llvm.{name}.i{width}"),
&format!("llvm.{name}"),
&[llty],
&[args[0].immediate(), y],
);
@ -397,62 +387,54 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
sym::ctlz_nonzero => {
let y = self.const_bool(true);
let llvm_name = &format!("llvm.ctlz.i{width}");
let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
let ret =
self.call_intrinsic("llvm.ctlz", &[llty], &[args[0].immediate(), y]);
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::cttz_nonzero => {
let y = self.const_bool(true);
let llvm_name = &format!("llvm.cttz.i{width}");
let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
let ret =
self.call_intrinsic("llvm.cttz", &[llty], &[args[0].immediate(), y]);
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::ctpop => {
let ret = self.call_intrinsic(
&format!("llvm.ctpop.i{width}"),
&[args[0].immediate()],
);
let ret =
self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::bswap => {
if width == 8 {
args[0].immediate() // byte swap a u8/i8 is just a no-op
} else {
self.call_intrinsic(
&format!("llvm.bswap.i{width}"),
&[args[0].immediate()],
)
self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
}
}
sym::bitreverse => self.call_intrinsic(
&format!("llvm.bitreverse.i{width}"),
&[args[0].immediate()],
),
sym::bitreverse => {
self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
}
sym::rotate_left | sym::rotate_right => {
let is_left = name == sym::rotate_left;
let val = args[0].immediate();
let raw_shift = args[1].immediate();
// rotate = funnel shift with first two args the same
let llvm_name =
&format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
let llvm_name = format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
// llvm expects shift to be the same type as the values, but rust
// always uses `u32`.
let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
self.call_intrinsic(llvm_name, &[val, val, raw_shift])
self.call_intrinsic(&llvm_name, &[llty], &[val, val, raw_shift])
}
sym::saturating_add | sym::saturating_sub => {
let is_add = name == sym::saturating_add;
let lhs = args[0].immediate();
let rhs = args[1].immediate();
let llvm_name = &format!(
"llvm.{}{}.sat.i{}",
let llvm_name = format!(
"llvm.{}{}.sat",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
width
);
self.call_intrinsic(llvm_name, &[lhs, rhs])
self.call_intrinsic(&llvm_name, &[llty], &[lhs, rhs])
}
_ => bug!(),
}
@ -484,11 +466,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else {
let n = self.const_usize(layout.size().bytes());
let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
match self.cx.sess().target.arch.as_ref() {
"avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
_ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
}
let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
}
}
@ -496,6 +475,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
// Here we assume that the `memcmp` provided by the target is a NOP for size 0.
let cmp = self.call_intrinsic(
"memcmp",
&[],
&[args[0].immediate(), args[1].immediate(), args[2].immediate()],
);
// Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
@ -619,18 +599,22 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
fn abort(&mut self) {
self.call_intrinsic("llvm.trap", &[]);
self.call_intrinsic("llvm.trap", &[], &[]);
}
fn assume(&mut self, val: Self::Value) {
if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
self.call_intrinsic("llvm.assume", &[val]);
self.call_intrinsic("llvm.assume", &[], &[val]);
}
}
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
self.call_intrinsic(
"llvm.expect",
&[self.type_i1()],
&[cond, self.const_bool(expected)],
)
} else {
cond
}
@ -644,17 +628,20 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
) -> Self::Value {
let typeid = self.get_metadata_value(typeid);
let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
let type_checked_load =
self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
let type_checked_load = self.call_intrinsic(
"llvm.type.checked.load",
&[],
&[llvtable, vtable_byte_offset, typeid],
);
self.extract_value(type_checked_load, 0)
}
fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
self.call_intrinsic("llvm.va_start", &[va_list])
self.call_intrinsic("llvm.va_start", &[self.type_ptr()], &[va_list])
}
fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
self.call_intrinsic("llvm.va_end", &[va_list])
self.call_intrinsic("llvm.va_end", &[self.type_ptr()], &[va_list])
}
}
@ -893,8 +880,8 @@ fn codegen_wasm_try<'ll, 'tcx>(
let null = bx.const_null(bx.type_ptr());
let funclet = bx.catch_pad(cs, &[null]);
let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
@ -1031,7 +1018,7 @@ fn codegen_emcc_try<'ll, 'tcx>(
let selector = bx.extract_value(vals, 1);
// Check if the typeid we got is the one for a Rust panic.
let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.type_ptr()], &[tydesc]);
let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
@ -1522,56 +1509,37 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}};
}
let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
let elem_ty = bx.cx.type_float_from_ty(*f);
match f.bit_width() {
16 => ("f16", elem_ty),
32 => ("f32", elem_ty),
64 => ("f64", elem_ty),
128 => ("f128", elem_ty),
_ => return_error!(InvalidMonomorphization::FloatingPointVector {
span,
name,
f_ty: *f,
in_ty,
}),
}
let elem_ty = if let ty::Float(f) = in_elem.kind() {
bx.cx.type_float_from_ty(*f)
} else {
return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
};
let vec_ty = bx.type_vector(elem_ty, in_len);
let (intr_name, fn_ty) = match name {
sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
sym::simd_relaxed_fma => ("fmuladd", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
let intr_name = match name {
sym::simd_ceil => "llvm.ceil",
sym::simd_fabs => "llvm.fabs",
sym::simd_fcos => "llvm.cos",
sym::simd_fexp2 => "llvm.exp2",
sym::simd_fexp => "llvm.exp",
sym::simd_flog10 => "llvm.log10",
sym::simd_flog2 => "llvm.log2",
sym::simd_flog => "llvm.log",
sym::simd_floor => "llvm.floor",
sym::simd_fma => "llvm.fma",
sym::simd_relaxed_fma => "llvm.fmuladd",
sym::simd_fsin => "llvm.sin",
sym::simd_fsqrt => "llvm.sqrt",
sym::simd_round => "llvm.round",
sym::simd_trunc => "llvm.trunc",
_ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
};
let llvm_name = &format!("llvm.{intr_name}.v{in_len}{elem_ty_str}");
let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
let c = bx.call(
fn_ty,
None,
None,
f,
Ok(bx.call_intrinsic(
intr_name,
&[vec_ty],
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
None,
);
Ok(c)
))
}
if std::matches!(
@ -1595,29 +1563,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
}
// FIXME: use:
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
match *elem_ty.kind() {
ty::Int(v) => format!(
"v{}i{}",
vec_len,
// Normalize to prevent crash if v: IntTy::Isize
v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
),
ty::Uint(v) => format!(
"v{}i{}",
vec_len,
// Normalize to prevent crash if v: UIntTy::Usize
v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
),
ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
ty::RawPtr(_, _) => format!("v{}p0", vec_len),
_ => unreachable!(),
}
}
fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
let elem_ty = match *elem_ty.kind() {
ty::Int(v) => cx.type_int_from_ty(v),
@ -1698,38 +1643,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
);
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
let mask_ty = bx.type_vector(bx.type_i1(), in_len);
// Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
// Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
let llvm_intrinsic =
format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
let fn_ty = bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(
fn_ty,
None,
None,
f,
return Ok(bx.call_intrinsic(
"llvm.masked.gather",
&[llvm_elem_vec_ty, llvm_pointer_vec_ty],
&[args[1].immediate(), alignment, mask, args[0].immediate()],
None,
None,
);
return Ok(v);
));
}
if name == sym::simd_masked_load {
@ -1795,32 +1724,20 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
);
let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
let llvm_pointer = bx.type_ptr();
// Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
let llvm_intrinsic = format!("llvm.masked.load.{llvm_elem_vec_str}.p0");
let fn_ty = bx
.type_func(&[llvm_pointer, alignment_ty, mask_ty, llvm_elem_vec_ty], llvm_elem_vec_ty);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(
fn_ty,
None,
None,
f,
return Ok(bx.call_intrinsic(
"llvm.masked.load",
&[llvm_elem_vec_ty, llvm_pointer],
&[args[1].immediate(), alignment, mask, args[2].immediate()],
None,
None,
);
return Ok(v);
));
}
if name == sym::simd_masked_store {
@ -1880,33 +1797,20 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
);
let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
let mask_ty = bx.type_vector(bx.type_i1(), mask_len);
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
let ret_t = bx.type_void();
let llvm_pointer = bx.type_ptr();
// Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
let llvm_intrinsic = format!("llvm.masked.store.{llvm_elem_vec_str}.p0");
let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer, alignment_ty, mask_ty], ret_t);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(
fn_ty,
None,
None,
f,
return Ok(bx.call_intrinsic(
"llvm.masked.store",
&[llvm_elem_vec_ty, llvm_pointer],
&[args[2].immediate(), args[1].immediate(), alignment, mask],
None,
None,
);
return Ok(v);
));
}
if name == sym::simd_scatter {
@ -1971,38 +1875,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
);
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
let mask_ty = bx.type_vector(bx.type_i1(), in_len);
let ret_t = bx.type_void();
// Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
// Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
let llvm_intrinsic =
format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
let fn_ty =
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(
fn_ty,
None,
None,
f,
return Ok(bx.call_intrinsic(
"llvm.masked.scatter",
&[llvm_elem_vec_ty, llvm_pointer_vec_ty],
&[args[0].immediate(), args[1].immediate(), alignment, mask],
None,
None,
);
return Ok(v);
));
}
macro_rules! arith_red {
@ -2431,40 +2319,31 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
},
in_len as u64,
);
let intrinsic_name = match name {
sym::simd_bswap => "bswap",
sym::simd_bitreverse => "bitreverse",
sym::simd_ctlz => "ctlz",
sym::simd_ctpop => "ctpop",
sym::simd_cttz => "cttz",
let llvm_intrinsic = match name {
sym::simd_bswap => "llvm.bswap",
sym::simd_bitreverse => "llvm.bitreverse",
sym::simd_ctlz => "llvm.ctlz",
sym::simd_ctpop => "llvm.ctpop",
sym::simd_cttz => "llvm.cttz",
_ => unreachable!(),
};
let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,);
return match name {
// byte swap is no-op for i8/u8
sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
sym::simd_ctlz | sym::simd_cttz => {
// for the (int, i1 immediate) pair, the second arg adds `(0, true) => poison`
let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty);
let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
Ok(bx.call(
fn_ty,
None,
None,
f,
Ok(bx.call_intrinsic(
llvm_intrinsic,
&[vec_ty],
&[args[0].immediate(), dont_poison_on_zero],
None,
None,
))
}
sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
// simple unary argument cases
let fn_ty = bx.type_func(&[vec_ty], vec_ty);
let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None, None))
Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
}
_ => unreachable!(),
};
@ -2495,10 +2374,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let lhs = args[0].immediate();
let rhs = args[1].immediate();
let is_add = name == sym::simd_saturating_add;
let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
let (signed, elem_width, elem_ty) = match *in_elem.kind() {
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
let (signed, elem_ty) = match *in_elem.kind() {
ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
_ => {
return_error!(InvalidMonomorphization::ExpectedVectorElementType {
span,
@ -2508,19 +2386,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
});
}
};
let llvm_intrinsic = &format!(
"llvm.{}{}.sat.v{}i{}",
let llvm_intrinsic = format!(
"llvm.{}{}.sat",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
in_len,
elem_width
);
let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None, None);
return Ok(v);
return Ok(bx.call_intrinsic(&llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
}
span_bug!(span, "unknown SIMD intrinsic");

View file

@ -15,6 +15,7 @@
use std::fmt::Debug;
use std::marker::PhantomData;
use std::num::NonZero;
use std::ptr;
use bitflags::bitflags;
@ -1195,6 +1196,17 @@ unsafe extern "C" {
// Operations on functions
pub(crate) fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
// Operations about llvm intrinsics
pub(crate) fn LLVMLookupIntrinsicID(Name: *const c_char, NameLen: size_t) -> c_uint;
pub(crate) fn LLVMIntrinsicIsOverloaded(ID: NonZero<c_uint>) -> Bool;
pub(crate) fn LLVMIntrinsicCopyOverloadedName2<'a>(
Mod: &'a Module,
ID: NonZero<c_uint>,
ParamTypes: *const &'a Type,
ParamCount: size_t,
NameLength: *mut size_t,
) -> *mut c_char;
// Operations on parameters
pub(crate) fn LLVMIsAArgument(Val: &Value) -> Option<&Value>;
pub(crate) safe fn LLVMCountParams(Fn: &Value) -> c_uint;

View file

@ -1,9 +1,10 @@
#![allow(non_snake_case)]
use std::ffi::{CStr, CString};
use std::ptr;
use std::num::NonZero;
use std::str::FromStr;
use std::string::FromUtf8Error;
use std::{ptr, slice};
use libc::c_uint;
use rustc_abi::{Align, Size, WrappingRange};
@ -327,6 +328,48 @@ pub(crate) fn get_value_name(value: &Value) -> &[u8] {
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct Intrinsic {
id: NonZero<c_uint>,
}
impl Intrinsic {
pub(crate) fn lookup(name: &[u8]) -> Option<Self> {
let id = unsafe { LLVMLookupIntrinsicID(name.as_c_char_ptr(), name.len()) };
NonZero::new(id).map(|id| Self { id })
}
pub(crate) fn is_overloaded(self) -> bool {
unsafe { LLVMIntrinsicIsOverloaded(self.id) == True }
}
pub(crate) fn overloaded_name<'ll>(
self,
llmod: &'ll Module,
type_params: &[&'ll Type],
) -> String {
let mut len = 0;
let ptr = unsafe {
LLVMIntrinsicCopyOverloadedName2(
llmod,
self.id,
type_params.as_ptr(),
type_params.len(),
&mut len,
)
};
let slice = unsafe { slice::from_raw_parts_mut(ptr.cast(), len) };
let copied = str::from_utf8(slice).expect("Non-UTF8 intrinsic name").to_string();
unsafe {
libc::free(ptr.cast());
}
copied
}
}
/// Safe wrapper for `LLVMSetValueName2` from a byte slice
pub(crate) fn set_value_name(value: &Value, name: &[u8]) {
unsafe {

View file

@ -1,4 +1,5 @@
use std::borrow::Borrow;
use std::hash::{Hash, Hasher};
use std::{fmt, ptr};
use libc::{c_char, c_uint};
@ -25,6 +26,14 @@ impl PartialEq for Type {
}
}
impl Eq for Type {}
impl Hash for Type {
fn hash<H: Hasher>(&self, state: &mut H) {
ptr::hash(self, state);
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(

View file

@ -23,7 +23,7 @@ use crate::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, Synchronization
use crate::mir::operand::{OperandRef, OperandValue};
use crate::mir::place::{PlaceRef, PlaceValue};
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum OverflowOp {
Add,
Sub,
@ -215,7 +215,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn checked_binop(
&mut self,
oop: OverflowOp,
ty: Ty<'_>,
ty: Ty<'tcx>,
lhs: Self::Value,
rhs: Self::Value,
) -> (Self::Value, Self::Value);

View file

@ -263,6 +263,8 @@ declare_features! (
/// Allows unnamed fields of struct and union type
(removed, unnamed_fields, "1.83.0", Some(49804), Some("feature needs redesign"), 131045),
(removed, unsafe_no_drop_flag, "1.0.0", None, None),
/// Allows unsized rvalues at arguments and parameters.
(removed, unsized_locals, "CURRENT_RUSTC_VERSION", Some(48055), Some("removed due to implementation concerns; see https://github.com/rust-lang/rust/issues/111942")),
(removed, unsized_tuple_coercion, "1.87.0", Some(42877),
Some("The feature restricts possible layouts for tuples, and this restriction is not worth it."), 137728),
/// Allows `union` fields that don't implement `Copy` as long as they don't have any drop glue.

View file

@ -667,8 +667,6 @@ declare_features! (
(incomplete, unsized_const_params, "1.82.0", Some(95174)),
/// Allows unsized fn parameters.
(internal, unsized_fn_params, "1.49.0", Some(48055)),
/// Allows unsized rvalues at arguments and parameters.
(incomplete, unsized_locals, "1.30.0", Some(48055)),
/// Allows using the `#[used(linker)]` (or `#[used(compiler)]`) attribute.
(unstable, used_with_arg, "1.60.0", Some(93798)),
/// Allows use of attributes in `where` clauses.

View file

@ -599,7 +599,7 @@ hir_analysis_value_of_associated_struct_already_specified =
.label = re-bound here
.previous_bound_label = `{$item_name}` bound here first
hir_analysis_variadic_function_compatible_convention = C-variadic function must have a compatible calling convention, like {$conventions}
hir_analysis_variadic_function_compatible_convention = C-variadic functions with the {$convention} calling convention are not supported
.label = C-variadic function must have a compatible calling convention
hir_analysis_variances_of = {$variances}

View file

@ -10,6 +10,7 @@ use std::mem;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{Arm, Block, Expr, LetStmt, Pat, PatKind, Stmt};
@ -752,13 +753,19 @@ fn resolve_local<'tcx>(
record_rvalue_scope_if_borrow_expr(visitor, arm.body, blk_id);
}
}
hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) => {
// FIXME(@dingxiangfei2009): choose call arguments here
// for candidacy for extended parameter rule application
}
hir::ExprKind::Index(..) => {
// FIXME(@dingxiangfei2009): select the indices
// as candidate for rvalue scope rules
hir::ExprKind::Call(func, args) => {
// Recurse into tuple constructors, such as `Some(&temp())`.
//
// That way, there is no difference between `Some(..)` and `Some { 0: .. }`,
// even though the former is syntactically a function call.
if let hir::ExprKind::Path(path) = &func.kind
&& let hir::QPath::Resolved(None, path) = path
&& let Res::SelfCtor(_) | Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) = path.res
{
for arg in args {
record_rvalue_scope_if_borrow_expr(visitor, arg, blk_id);
}
}
}
_ => {}
}

View file

@ -276,8 +276,7 @@ fn create_generic_args<'tcx>(
tcx.impl_trait_header(parent).unwrap().trait_ref.instantiate_identity().args;
let trait_args = ty::GenericArgs::identity_for_item(tcx, sig_id);
let method_args =
tcx.mk_args_from_iter(trait_args.iter().skip(callee_generics.parent_count));
let method_args = tcx.mk_args(&trait_args[callee_generics.parent_count..]);
let method_args = build_generic_args(tcx, sig_id, def_id, method_args);
tcx.mk_args_from_iter(parent_args.iter().chain(method_args))

View file

@ -633,7 +633,7 @@ pub(crate) struct VariadicFunctionCompatibleConvention<'a> {
#[primary_span]
#[label]
pub span: Span,
pub conventions: &'a str,
pub convention: &'a str,
}
#[derive(Diagnostic)]

View file

@ -115,12 +115,6 @@ fn require_c_abi_if_c_variadic(
abi: ExternAbi,
span: Span,
) {
const CONVENTIONS_UNSTABLE: &str =
"`C`, `cdecl`, `system`, `aapcs`, `win64`, `sysv64` or `efiapi`";
const CONVENTIONS_STABLE: &str = "`C` or `cdecl`";
const UNSTABLE_EXPLAIN: &str =
"using calling conventions other than `C` or `cdecl` for varargs functions is unstable";
// ABIs which can stably use varargs
if !decl.c_variadic || matches!(abi, ExternAbi::C { .. } | ExternAbi::Cdecl { .. }) {
return;
@ -140,20 +134,18 @@ fn require_c_abi_if_c_variadic(
// Looks like we need to pick an error to emit.
// Is there any feature which we could have enabled to make this work?
let unstable_explain =
format!("C-variadic functions with the {abi} calling convention are unstable");
match abi {
ExternAbi::System { .. } => {
feature_err(&tcx.sess, sym::extern_system_varargs, span, UNSTABLE_EXPLAIN)
feature_err(&tcx.sess, sym::extern_system_varargs, span, unstable_explain)
}
abi if abi.supports_varargs() => {
feature_err(&tcx.sess, sym::extended_varargs_abi_support, span, UNSTABLE_EXPLAIN)
feature_err(&tcx.sess, sym::extended_varargs_abi_support, span, unstable_explain)
}
_ => tcx.dcx().create_err(errors::VariadicFunctionCompatibleConvention {
span,
conventions: if tcx.sess.opts.unstable_features.is_nightly_build() {
CONVENTIONS_UNSTABLE
} else {
CONVENTIONS_STABLE
},
convention: &format!("{abi}"),
}),
}
.emit();

View file

@ -1662,9 +1662,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
blk_id,
expression,
);
if !fcx.tcx.features().unsized_locals() {
unsized_return = self.is_return_ty_definitely_unsized(fcx);
}
unsized_return = self.is_return_ty_definitely_unsized(fcx);
}
ObligationCauseCode::ReturnValue(return_expr_id) => {
err = self.report_return_mismatched_types(
@ -1676,9 +1674,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
return_expr_id,
expression,
);
if !fcx.tcx.features().unsized_locals() {
unsized_return = self.is_return_ty_definitely_unsized(fcx);
}
unsized_return = self.is_return_ty_definitely_unsized(fcx);
}
ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
arm_span,

View file

@ -809,9 +809,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
}
// Here we want to prevent struct constructors from returning unsized types.
// There were two cases this happened: fn pointer coercion in stable
// and usual function call in presence of unsized_locals.
// Here we want to prevent struct constructors from returning unsized types,
// which can happen with fn pointer coercion on stable.
// Also, as we just want to check sizedness, instead of introducing
// placeholder lifetimes with probing, we just replace higher lifetimes
// with fresh vars.

View file

@ -202,7 +202,7 @@ impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> {
),
);
}
} else if !self.fcx.tcx.features().unsized_locals() {
} else {
self.fcx.require_type_is_sized(
var_ty,
p.span,

View file

@ -724,7 +724,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
let def_path = tcx.def_path_str(adt_def.did());
err.span_suggestion(
ty.span.to(item_ident.span),
sugg_span,
format!("to construct a value of type `{}`, use the explicit path", def_path),
def_path,
Applicability::MachineApplicable,

View file

@ -492,7 +492,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let final_upvar_tys = self.final_upvar_tys(closure_def_id);
debug!(?closure_hir_id, ?args, ?final_upvar_tys);
if self.tcx.features().unsized_locals() || self.tcx.features().unsized_fn_params() {
if self.tcx.features().unsized_fn_params() {
for capture in
self.typeck_results.borrow().closure_min_captures_flattened(closure_def_id)
{

View file

@ -84,19 +84,45 @@ impl<'tcx> LateLintPass<'tcx> for LifetimeSyntax {
_: rustc_span::Span,
_: rustc_span::def_id::LocalDefId,
) {
let mut input_map = Default::default();
let mut output_map = Default::default();
for input in fd.inputs {
LifetimeInfoCollector::collect(input, &mut input_map);
}
if let hir::FnRetTy::Return(output) = fd.output {
LifetimeInfoCollector::collect(output, &mut output_map);
}
report_mismatches(cx, &input_map, &output_map);
check_fn_like(cx, fd);
}
#[instrument(skip_all)]
fn check_trait_item(&mut self, cx: &LateContext<'tcx>, ti: &'tcx hir::TraitItem<'tcx>) {
match ti.kind {
hir::TraitItemKind::Const(..) => {}
hir::TraitItemKind::Fn(fn_sig, _trait_fn) => check_fn_like(cx, fn_sig.decl),
hir::TraitItemKind::Type(..) => {}
}
}
#[instrument(skip_all)]
fn check_foreign_item(
&mut self,
cx: &LateContext<'tcx>,
fi: &'tcx rustc_hir::ForeignItem<'tcx>,
) {
match fi.kind {
hir::ForeignItemKind::Fn(fn_sig, _idents, _generics) => check_fn_like(cx, fn_sig.decl),
hir::ForeignItemKind::Static(..) => {}
hir::ForeignItemKind::Type => {}
}
}
}
fn check_fn_like<'tcx>(cx: &LateContext<'tcx>, fd: &'tcx hir::FnDecl<'tcx>) {
let mut input_map = Default::default();
let mut output_map = Default::default();
for input in fd.inputs {
LifetimeInfoCollector::collect(input, &mut input_map);
}
if let hir::FnRetTy::Return(output) = fd.output {
LifetimeInfoCollector::collect(output, &mut output_map);
}
report_mismatches(cx, &input_map, &output_map);
}
#[instrument(skip_all)]

View file

@ -1133,13 +1133,6 @@ pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
/// Each local naturally corresponds to the place `Place { local, projection: [] }`. This place has
/// the address of the local's allocation and the type of the local.
///
/// **Needs clarification:** Unsized locals seem to present a bit of an issue. Their allocation
/// can't actually be created on `StorageLive`, because it's unclear how big to make the allocation.
/// Furthermore, MIR produces assignments to unsized locals, although that is not permitted under
/// `#![feature(unsized_locals)]` in Rust. Besides just putting "unsized locals are special and
/// different" in a bunch of places, I (JakobDegen) don't know how to incorporate this behavior into
/// the current MIR semantics in a clean way - possibly this needs some design work first.
///
/// For places that are not locals, ie they have a non-empty list of projections, we define the
/// values as a function of the parent place, that is the place with its last [`ProjectionElem`]
/// stripped. The way this is computed of course depends on the kind of that last projection

View file

@ -4,7 +4,7 @@
pub mod tls;
use std::assert_matches::{assert_matches, debug_assert_matches};
use std::assert_matches::debug_assert_matches;
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::env::VarError;
@ -283,9 +283,9 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
def_id: DefId,
args: ty::GenericArgsRef<'tcx>,
) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) {
assert_matches!(self.def_kind(def_id), DefKind::AssocTy | DefKind::AssocConst);
debug_assert_matches!(self.def_kind(def_id), DefKind::AssocTy | DefKind::AssocConst);
let trait_def_id = self.parent(def_id);
assert_matches!(self.def_kind(trait_def_id), DefKind::Trait);
debug_assert_matches!(self.def_kind(trait_def_id), DefKind::Trait);
let trait_generics = self.generics_of(trait_def_id);
(
ty::TraitRef::new_from_args(self, trait_def_id, args.truncate_to(self, trait_generics)),

View file

@ -588,7 +588,7 @@ impl<'tcx> GenericArgs<'tcx> {
}
pub fn truncate_to(&self, tcx: TyCtxt<'tcx>, generics: &ty::Generics) -> GenericArgsRef<'tcx> {
tcx.mk_args_from_iter(self.iter().take(generics.count()))
tcx.mk_args(&self[..generics.count()])
}
pub fn print_as_list(&self) -> String {

View file

@ -79,7 +79,7 @@ pub enum InstanceKind<'tcx> {
Intrinsic(DefId),
/// `<T as Trait>::method` where `method` receives unsizeable `self: Self` (part of the
/// `unsized_locals` feature).
/// `unsized_fn_params` feature).
///
/// The generated shim will take `Self` via `*mut Self` - conceptually this is `&owned Self` -
/// and dereference the argument to call the original function.

View file

@ -55,9 +55,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// local variable of unsized type. For example, consider this program:
///
/// ```
/// #![feature(unsized_locals, unsized_fn_params)]
/// #![feature(unsized_fn_params)]
/// # use core::fmt::Debug;
/// fn foo(p: dyn Debug) { dbg!(p); }
/// fn foo(_p: dyn Debug) { /* ... */ }
///
/// fn bar(box_p: Box<dyn Debug>) { foo(*box_p); }
/// ```
@ -84,7 +84,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// will actually provide a pointer to the interior of the box, and not move the `dyn Debug`
/// value to the stack.
///
/// See #68304 for more details.
/// See <https://github.com/rust-lang/rust/issues/68304> for more details.
pub(crate) fn as_local_call_operand(
&mut self,
block: BasicBlock,

View file

@ -1417,9 +1417,9 @@ fn check_field_tys_sized<'tcx>(
coroutine_layout: &CoroutineLayout<'tcx>,
def_id: LocalDefId,
) {
// No need to check if unsized_locals/unsized_fn_params is disabled,
// No need to check if unsized_fn_params is disabled,
// since we will error during typeck.
if !tcx.features().unsized_locals() && !tcx.features().unsized_fn_params() {
if !tcx.features().unsized_fn_params() {
return;
}

View file

@ -240,8 +240,6 @@ struct VnState<'body, 'tcx> {
next_opaque: usize,
/// Cache the deref values.
derefs: Vec<VnIndex>,
/// Cache the value of the `unsized_locals` features, to avoid fetching it repeatedly in a loop.
feature_unsized_locals: bool,
ssa: &'body SsaLocals,
dominators: Dominators<BasicBlock>,
reused_locals: DenseBitSet<Local>,
@ -273,7 +271,6 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
evaluated: IndexVec::with_capacity(num_values),
next_opaque: 1,
derefs: Vec::new(),
feature_unsized_locals: tcx.features().unsized_locals(),
ssa,
dominators,
reused_locals: DenseBitSet::new_empty(local_decls.len()),
@ -329,13 +326,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
fn assign(&mut self, local: Local, value: VnIndex) {
debug_assert!(self.ssa.is_ssa(local));
self.locals[local] = Some(value);
// Only register the value if its type is `Sized`, as we will emit copies of it.
let is_sized = !self.feature_unsized_locals
|| self.local_decls[local].ty.is_sized(self.tcx, self.typing_env());
if is_sized {
self.rev_locals[value].push(local);
}
self.rev_locals[value].push(local);
}
fn insert_constant(&mut self, value: Const<'tcx>) -> VnIndex {

View file

@ -1,5 +1,3 @@
use std::sync::Arc;
use rustc_ast as ast;
use rustc_data_structures::stable_hasher::{HashStable, HashingControls, StableHasher};
use rustc_hir::def_id::{DefId, LocalDefId};
@ -7,7 +5,9 @@ use rustc_hir::definitions::DefPathHash;
use rustc_session::Session;
use rustc_session::cstore::Untracked;
use rustc_span::source_map::SourceMap;
use rustc_span::{BytePos, CachingSourceMapView, DUMMY_SP, SourceFile, Span, SpanData, Symbol};
use rustc_span::{
BytePos, CachingSourceMapView, DUMMY_SP, Span, SpanData, StableSourceFileId, Symbol,
};
use crate::ich;
@ -118,7 +118,7 @@ impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> {
fn span_data_to_lines_and_cols(
&mut self,
span: &SpanData,
) -> Option<(Arc<SourceFile>, usize, BytePos, usize, BytePos)> {
) -> Option<(StableSourceFileId, usize, BytePos, usize, BytePos)> {
self.source_map().span_data_to_lines_and_cols(span)
}

View file

@ -2,7 +2,7 @@ use std::ops::Range;
use std::sync::Arc;
use crate::source_map::SourceMap;
use crate::{BytePos, Pos, RelativeBytePos, SourceFile, SpanData};
use crate::{BytePos, Pos, RelativeBytePos, SourceFile, SpanData, StableSourceFileId};
#[derive(Clone)]
struct CacheEntry {
@ -114,7 +114,7 @@ impl<'sm> CachingSourceMapView<'sm> {
pub fn span_data_to_lines_and_cols(
&mut self,
span_data: &SpanData,
) -> Option<(Arc<SourceFile>, usize, BytePos, usize, BytePos)> {
) -> Option<(StableSourceFileId, usize, BytePos, usize, BytePos)> {
self.time_stamp += 1;
// Check if lo and hi are in the cached lines.
@ -132,7 +132,7 @@ impl<'sm> CachingSourceMapView<'sm> {
}
(
Arc::clone(&lo.file),
lo.file.stable_id,
lo.line_number,
span_data.lo - lo.line.start,
hi.line_number,
@ -226,7 +226,7 @@ impl<'sm> CachingSourceMapView<'sm> {
assert_eq!(lo.file_index, hi.file_index);
Some((
Arc::clone(&lo.file),
lo.file.stable_id,
lo.line_number,
span_data.lo - lo.line.start,
hi.line_number,

View file

@ -2600,7 +2600,7 @@ pub trait HashStableContext {
fn span_data_to_lines_and_cols(
&mut self,
span: &SpanData,
) -> Option<(Arc<SourceFile>, usize, BytePos, usize, BytePos)>;
) -> Option<(StableSourceFileId, usize, BytePos, usize, BytePos)>;
fn hashing_controls(&self) -> HashingControls;
}
@ -2657,7 +2657,7 @@ where
};
Hash::hash(&TAG_VALID_SPAN, hasher);
Hash::hash(&file.stable_id, hasher);
Hash::hash(&file, hasher);
// Hash both the length and the end location (line/column) of a span. If we
// hash only the length, for example, then two otherwise equal spans with

View file

@ -2995,9 +2995,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
if local {
err.note("all local variables must have a statically known size");
}
if !tcx.features().unsized_locals() {
err.help("unsized locals are gated as an unstable feature");
}
}
ObligationCauseCode::SizedArgumentType(hir_id) => {
let mut ty = None;

View file

@ -414,8 +414,8 @@ fn virtual_call_violations_for_method<'tcx>(
let receiver_ty = tcx.liberate_late_bound_regions(method.def_id, sig.input(0));
// Until `unsized_locals` is fully implemented, `self: Self` can't be dispatched on.
// However, this is already considered dyn compatible. We allow it as a special case here.
// `self: Self` can't be dispatched on.
// However, this is considered dyn compatible. We allow it as a special case here.
// FIXME(mikeyhew) get rid of this `if` statement once `receiver_is_dispatchable` allows
// `Receiver: Unsize<Receiver[Self => dyn Trait]>`.
if receiver_ty != tcx.types.self_param {

View file

@ -237,9 +237,6 @@ impl<I: Interner, const INSTANTIATE_LHS_WITH_INFER: bool, const INSTANTIATE_RHS_
}
pub fn types_may_unify(self, lhs: I::Ty, rhs: I::Ty) -> bool {
if lhs == rhs {
return true;
}
self.types_may_unify_inner(lhs, rhs, Self::STARTING_DEPTH)
}
@ -268,6 +265,10 @@ impl<I: Interner, const INSTANTIATE_LHS_WITH_INFER: bool, const INSTANTIATE_RHS_
}
fn types_may_unify_inner(self, lhs: I::Ty, rhs: I::Ty, depth: usize) -> bool {
if lhs == rhs {
return true;
}
match rhs.kind() {
// Start by checking whether the `rhs` type may unify with
// pretty much everything. Just return `true` in that case.

View file

@ -1220,11 +1220,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// assert_eq!(high.into_iter().collect::<Vec<_>>(), [4, 5, 6, 7]);
/// ```
#[unstable(feature = "btree_extract_if", issue = "70530")]
pub fn extract_if<'a, F, R>(&'a mut self, range: R, pred: F) -> ExtractIf<'a, T, R, F, A>
pub fn extract_if<F, R>(&mut self, range: R, pred: F) -> ExtractIf<'_, T, R, F, A>
where
T: Ord,
R: RangeBounds<T>,
F: 'a + FnMut(&T) -> bool,
F: FnMut(&T) -> bool,
{
let (inner, alloc) = self.map.extract_if_inner(range);
ExtractIf { pred, inner, alloc }
@ -1585,11 +1585,11 @@ where
}
#[unstable(feature = "btree_extract_if", issue = "70530")]
impl<'a, T, R, F, A: Allocator + Clone> Iterator for ExtractIf<'_, T, R, F, A>
impl<T, R, F, A: Allocator + Clone> Iterator for ExtractIf<'_, T, R, F, A>
where
T: PartialOrd,
R: RangeBounds<T>,
F: 'a + FnMut(&T) -> bool,
F: FnMut(&T) -> bool,
{
type Item = T;

View file

@ -1,13 +0,0 @@
[workspace]
# As part of the release process, we delete `libm/Cargo.toml`. Since
# this is only run in CI, we shouldn't need to worry about it.
allow_dirty = true
publish_allow_dirty = true
[[package]]
name = "compiler_builtins"
semver_check = false
changelog_include = ["libm"] # libm is included as part of builtins
[[package]]
name = "libm"

View file

@ -1,8 +1,8 @@
[workspace]
resolver = "2"
members = [
"builtins-shim",
"builtins-test",
"compiler-builtins",
"crates/josh-sync",
"crates/libm-macros",
"crates/musl-math-sys",
@ -14,8 +14,8 @@ members = [
]
default-members = [
"builtins-shim",
"builtins-test",
"compiler-builtins",
"crates/libm-macros",
"libm",
"libm-test",
@ -26,6 +26,10 @@ exclude = [
# and `mangled-names` disabled, which is the opposite of what is needed for
# other tests, so it makes sense to keep it out of the workspace.
"builtins-test-intrinsics",
# We test via the `builtins-shim` crate, so exclude the `compiler-builtins`
# that has a dependency on `core`. See `builtins-shim/Cargo.toml` for more
# details.
"compiler-builtins",
]
[profile.release]

View file

@ -0,0 +1,63 @@
# NOTE: Must be kept in sync with `../compiler-builtins/Cargo.toml`.
#
# The manifest at `../compiler-builtins` is what actually gets used in the
# rust-lang/rust tree; however, we can't build it out of tree because it
# depends on `core` by path, and even optional Cargo dependencies need to be
# available at build time. So, we work around this by having this "shim"
# manifest that is identical except for the `core` dependency and forwards
# to the same sources, which acts as the `compiler-builtins` Cargo entrypoint
# for out of tree testing
[package]
name = "compiler_builtins"
version = "0.1.160"
authors = ["Jorge Aparicio <japaricious@gmail.com>"]
description = "Compiler intrinsics used by the Rust compiler."
repository = "https://github.com/rust-lang/compiler-builtins"
license = "MIT AND Apache-2.0 WITH LLVM-exception AND (MIT OR Apache-2.0)"
edition = "2024"
publish = false
links = "compiler-rt"
build = "../compiler-builtins/build.rs"
[lib]
path = "../compiler-builtins/src/lib.rs"
bench = false
doctest = false
test = false
[build-dependencies]
cc = { optional = true, version = "1.2" }
[features]
default = ["compiler-builtins"]
# Enable compilation of C code in compiler-rt, filling in some more optimized
# implementations and also filling in unimplemented intrinsics
c = ["dep:cc"]
# Workaround for the Cranelift codegen backend. Disables any implementations
# which use inline assembly and fall back to pure Rust versions (if available).
no-asm = []
# Workaround for codegen backends which haven't yet implemented `f16` and
# `f128` support. Disabled any intrinsics which use those types.
no-f16-f128 = []
# Flag this library as the unstable compiler-builtins lib
compiler-builtins = []
# Generate memory-related intrinsics like memcpy
mem = []
# Mangle all names so this can be linked in with other versions or other
# compiler-rt implementations. Also used for testing
mangled-names = []
# Only used in the compiler's build system
rustc-dep-of-std = ["compiler-builtins"]
# This makes certain traits and function specializations public that
# are not normally public but are required by the `builtins-test`
unstable-public-internals = []

View file

@ -6,7 +6,7 @@ publish = false
license = "MIT OR Apache-2.0"
[dependencies]
compiler_builtins = { path = "../compiler-builtins", features = ["compiler-builtins"] }
compiler_builtins = { path = "../builtins-shim", features = ["compiler-builtins"] }
panic-handler = { path = "../crates/panic-handler" }
[features]

View file

@ -17,7 +17,7 @@ rustc_apfloat = "0.2.2"
iai-callgrind = { version = "0.14.1", optional = true }
[dependencies.compiler_builtins]
path = "../compiler-builtins"
path = "../builtins-shim"
default-features = false
features = ["unstable-public-internals"]

View file

@ -1,4 +1,5 @@
#![feature(decl_macro)] // so we can use pub(super)
#![feature(macro_metavar_expr_concat)]
#![cfg(all(target_arch = "aarch64", target_os = "linux", not(feature = "no-asm")))]
/// Translate a byte size to a Rust type.
@ -87,7 +88,7 @@ test_op!(add, |left, right| left.wrapping_add(right));
test_op!(clr, |left, right| left & !right);
test_op!(xor, std::ops::BitXor::bitxor);
test_op!(or, std::ops::BitOr::bitor);
use compiler_builtins::{foreach_bytes, foreach_ordering};
compiler_builtins::foreach_cas!(cas::test);
compiler_builtins::foreach_cas16!(test_cas16);
compiler_builtins::foreach_swp!(swap::test);

View file

@ -57,7 +57,7 @@ function run_icount_benchmarks() {
# Disregard regressions after merge
echo "Benchmarks completed with regressions; ignoring (not in a PR)"
else
./ci/ci-util.py handle-banch-regressions "$PR_NUMBER"
./ci/ci-util.py handle-bench-regressions "$PR_NUMBER"
fi
}

View file

@ -1,14 +1,18 @@
# NOTE: Must be kept in sync with `../builtins-shim/Cargo.toml`.
#
# This manifest is actually used in-tree by rust-lang/rust,
# `../builtins-shim/Cargo.toml` is used by out-of-tree testing. See the other
# manifest for further details.
[package]
authors = ["Jorge Aparicio <japaricious@gmail.com>"]
name = "compiler_builtins"
version = "0.1.160"
license = "MIT AND Apache-2.0 WITH LLVM-exception AND (MIT OR Apache-2.0)"
readme = "README.md"
repository = "https://github.com/rust-lang/compiler-builtins"
homepage = "https://github.com/rust-lang/compiler-builtins"
documentation = "https://docs.rs/compiler_builtins"
edition = "2024"
authors = ["Jorge Aparicio <japaricious@gmail.com>"]
description = "Compiler intrinsics used by the Rust compiler."
repository = "https://github.com/rust-lang/compiler-builtins"
license = "MIT AND Apache-2.0 WITH LLVM-exception AND (MIT OR Apache-2.0)"
edition = "2024"
publish = false
links = "compiler-rt"
[lib]
@ -53,7 +57,3 @@ rustc-dep-of-std = ["compiler-builtins", "dep:core"]
# This makes certain traits and function specializations public that
# are not normally public but are required by the `builtins-test`
unstable-public-internals = []
[lints.rust]
# The cygwin config can be dropped after our benchmark toolchain is bumped
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(bootstrap)', 'cfg(target_os, values("cygwin"))'] }

View file

@ -1,9 +1,6 @@
mod configure;
use std::collections::BTreeMap;
use std::env;
use std::path::PathBuf;
use std::sync::atomic::Ordering;
use configure::{Target, configure_aliases, configure_f16_f128};
@ -86,10 +83,6 @@ fn main() {
{
println!("cargo:rustc-cfg=kernel_user_helpers")
}
if llvm_target[0].starts_with("aarch64") {
generate_aarch64_outlined_atomics();
}
}
/// Run configuration for `libm` since it is included directly.
@ -132,61 +125,6 @@ fn configure_libm(target: &Target) {
println!("cargo:rustc-cfg=feature=\"unstable-intrinsics\"");
}
fn aarch64_symbol(ordering: Ordering) -> &'static str {
match ordering {
Ordering::Relaxed => "relax",
Ordering::Acquire => "acq",
Ordering::Release => "rel",
Ordering::AcqRel => "acq_rel",
_ => panic!("unknown symbol for {ordering:?}"),
}
}
/// The `concat_idents` macro is extremely annoying and doesn't allow us to define new items.
/// Define them from the build script instead.
/// Note that the majority of the code is still defined in `aarch64.rs` through inline macros.
fn generate_aarch64_outlined_atomics() {
use std::fmt::Write;
// #[macro_export] so that we can use this in tests
let gen_macro =
|name| format!("#[macro_export] macro_rules! foreach_{name} {{ ($macro:path) => {{\n");
// Generate different macros for add/clr/eor/set so that we can test them separately.
let sym_names = ["cas", "ldadd", "ldclr", "ldeor", "ldset", "swp"];
let mut macros = BTreeMap::new();
for sym in sym_names {
macros.insert(sym, gen_macro(sym));
}
// Only CAS supports 16 bytes, and it has a different implementation that uses a different macro.
let mut cas16 = gen_macro("cas16");
for ordering in [
Ordering::Relaxed,
Ordering::Acquire,
Ordering::Release,
Ordering::AcqRel,
] {
let sym_ordering = aarch64_symbol(ordering);
for size in [1, 2, 4, 8] {
for (sym, macro_) in &mut macros {
let name = format!("__aarch64_{sym}{size}_{sym_ordering}");
writeln!(macro_, "$macro!( {ordering:?}, {size}, {name} );").unwrap();
}
}
let name = format!("__aarch64_cas16_{sym_ordering}");
writeln!(cas16, "$macro!( {ordering:?}, {name} );").unwrap();
}
let mut buf = String::new();
for macro_def in macros.values().chain(std::iter::once(&cas16)) {
buf += macro_def;
buf += "}; }\n";
}
let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap());
std::fs::write(out_dir.join("outlined_atomics.rs"), buf).unwrap();
}
/// Emit directives for features we expect to support that aren't in `Cargo.toml`.
///
/// These are mostly cfg elements emitted by this `build.rs`.

View file

@ -262,8 +262,78 @@ macro_rules! or {
};
}
// See `generate_aarch64_outlined_atomics` in build.rs.
include!(concat!(env!("OUT_DIR"), "/outlined_atomics.rs"));
#[macro_export]
macro_rules! foreach_ordering {
($macro:path, $bytes:tt, $name:ident) => {
$macro!( Relaxed, $bytes, ${concat($name, _relax)} );
$macro!( Acquire, $bytes, ${concat($name, _acq)} );
$macro!( Release, $bytes, ${concat($name, _rel)} );
$macro!( AcqRel, $bytes, ${concat($name, _acq_rel)} );
};
($macro:path, $name:ident) => {
$macro!( Relaxed, ${concat($name, _relax)} );
$macro!( Acquire, ${concat($name, _acq)} );
$macro!( Release, ${concat($name, _rel)} );
$macro!( AcqRel, ${concat($name, _acq_rel)} );
};
}
#[macro_export]
macro_rules! foreach_bytes {
($macro:path, $name:ident) => {
foreach_ordering!( $macro, 1, ${concat(__aarch64_, $name, "1")} );
foreach_ordering!( $macro, 2, ${concat(__aarch64_, $name, "2")} );
foreach_ordering!( $macro, 4, ${concat(__aarch64_, $name, "4")} );
foreach_ordering!( $macro, 8, ${concat(__aarch64_, $name, "8")} );
};
}
/// Generate different macros for cas/swp/add/clr/eor/set so that we can test them separately.
#[macro_export]
macro_rules! foreach_cas {
($macro:path) => {
foreach_bytes!($macro, cas);
};
}
/// Only CAS supports 16 bytes, and it has a different implementation that uses a different macro.
#[macro_export]
macro_rules! foreach_cas16 {
($macro:path) => {
foreach_ordering!($macro, __aarch64_cas16);
};
}
#[macro_export]
macro_rules! foreach_swp {
($macro:path) => {
foreach_bytes!($macro, swp);
};
}
#[macro_export]
macro_rules! foreach_ldadd {
($macro:path) => {
foreach_bytes!($macro, ldadd);
};
}
#[macro_export]
macro_rules! foreach_ldclr {
($macro:path) => {
foreach_bytes!($macro, ldclr);
};
}
#[macro_export]
macro_rules! foreach_ldeor {
($macro:path) => {
foreach_bytes!($macro, ldeor);
};
}
#[macro_export]
macro_rules! foreach_ldset {
($macro:path) => {
foreach_bytes!($macro, ldset);
};
}
foreach_cas!(compare_and_swap);
foreach_cas16!(compare_and_swap_i128);
foreach_swp!(swap);

View file

@ -8,6 +8,7 @@
#![feature(linkage)]
#![feature(naked_functions)]
#![feature(repr_simd)]
#![feature(macro_metavar_expr_concat)]
#![feature(rustc_attrs)]
#![cfg_attr(f16_enabled, feature(f16))]
#![cfg_attr(f128_enabled, feature(f128))]

View file

@ -1,14 +1,12 @@
[package]
authors = ["Jorge Aparicio <jorge@japaric.io>"]
categories = ["no-std"]
description = "libm in pure Rust"
documentation = "https://docs.rs/libm"
keywords = ["libm", "math"]
license = "MIT"
name = "libm"
readme = "README.md"
repository = "https://github.com/rust-lang/compiler-builtins"
version = "0.2.15"
authors = ["Jorge Aparicio <jorge@japaric.io>"]
description = "libm in pure Rust"
categories = ["no-std"]
keywords = ["libm", "math"]
repository = "https://github.com/rust-lang/compiler-builtins"
license = "MIT"
edition = "2021"
rust-version = "1.63"

View file

@ -82,22 +82,77 @@ mod tests {
fn fmin_spec_test<F: Float>(f: impl Fn(F, F) -> F) {
let cases = [
(F::ZERO, F::ZERO, F::ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ZERO, F::ONE, F::ZERO),
(F::ONE, F::ZERO, F::ZERO),
(F::ZERO, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::ZERO, F::NEG_ONE),
(F::INFINITY, F::ZERO, F::ZERO),
(F::NEG_INFINITY, F::ZERO, F::NEG_INFINITY),
(F::NAN, F::ZERO, F::ZERO),
(F::ZERO, F::INFINITY, F::ZERO),
(F::ZERO, F::NEG_INFINITY, F::NEG_INFINITY),
(F::ZERO, F::NAN, F::ZERO),
(F::ZERO, F::NEG_NAN, F::ZERO),
(F::NEG_ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::ONE, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ZERO, F::INFINITY, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_ZERO, F::NAN, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_NAN, F::NEG_ZERO),
(F::ONE, F::ZERO, F::ZERO),
(F::ONE, F::NEG_ZERO, F::NEG_ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ONE, F::NEG_ONE, F::NEG_ONE),
(F::ONE, F::INFINITY, F::ONE),
(F::ONE, F::NEG_INFINITY, F::NEG_INFINITY),
(F::ONE, F::NAN, F::ONE),
(F::ONE, F::NEG_NAN, F::ONE),
(F::NEG_ONE, F::ZERO, F::NEG_ONE),
(F::NEG_ONE, F::NEG_ZERO, F::NEG_ONE),
(F::NEG_ONE, F::ONE, F::NEG_ONE),
(F::NEG_ONE, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::INFINITY, F::NEG_ONE),
(F::NEG_ONE, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_ONE, F::NAN, F::NEG_ONE),
(F::NEG_ONE, F::NEG_NAN, F::NEG_ONE),
(F::INFINITY, F::ZERO, F::ZERO),
(F::INFINITY, F::NEG_ZERO, F::NEG_ZERO),
(F::INFINITY, F::ONE, F::ONE),
(F::INFINITY, F::NEG_ONE, F::NEG_ONE),
(F::INFINITY, F::INFINITY, F::INFINITY),
(F::INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::INFINITY, F::NAN, F::INFINITY),
(F::INFINITY, F::NEG_NAN, F::INFINITY),
(F::NEG_INFINITY, F::ZERO, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_ZERO, F::NEG_INFINITY),
(F::NEG_INFINITY, F::ONE, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_ONE, F::NEG_INFINITY),
(F::NEG_INFINITY, F::INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NAN, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_NAN, F::NEG_INFINITY),
(F::NAN, F::ZERO, F::ZERO),
(F::NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NAN, F::ONE, F::ONE),
(F::NAN, F::NEG_ONE, F::NEG_ONE),
(F::NAN, F::INFINITY, F::INFINITY),
(F::NAN, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NAN, F::NAN, F::NAN),
(F::NEG_NAN, F::ZERO, F::ZERO),
(F::NEG_NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_NAN, F::ONE, F::ONE),
(F::NEG_NAN, F::NEG_ONE, F::NEG_ONE),
(F::NEG_NAN, F::INFINITY, F::INFINITY),
(F::NEG_NAN, F::NEG_INFINITY, F::NEG_INFINITY),
];
for (x, y, res) in cases {
let val = f(x, y);
assert_biteq!(val, res, "fmin({}, {})", Hexf(x), Hexf(y));
}
// Ordering between zeros and NaNs does not matter
assert_eq!(f(F::ZERO, F::NEG_ZERO), F::ZERO);
assert_eq!(f(F::NEG_ZERO, F::ZERO), F::ZERO);
assert!(f(F::NAN, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::NAN).is_nan());
assert!(f(F::NEG_NAN, F::NEG_NAN).is_nan());
}
#[test]
@ -125,22 +180,77 @@ mod tests {
fn fmax_spec_test<F: Float>(f: impl Fn(F, F) -> F) {
let cases = [
(F::ZERO, F::ZERO, F::ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ZERO, F::ONE, F::ONE),
(F::ONE, F::ZERO, F::ONE),
(F::ZERO, F::NEG_ONE, F::ZERO),
(F::NEG_ONE, F::ZERO, F::ZERO),
(F::INFINITY, F::ZERO, F::INFINITY),
(F::NEG_INFINITY, F::ZERO, F::ZERO),
(F::NAN, F::ZERO, F::ZERO),
(F::ZERO, F::INFINITY, F::INFINITY),
(F::ZERO, F::NEG_INFINITY, F::ZERO),
(F::ZERO, F::NAN, F::ZERO),
(F::ZERO, F::NEG_NAN, F::ZERO),
(F::NEG_ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::ONE, F::ONE),
(F::NEG_ZERO, F::NEG_ONE, F::NEG_ZERO),
(F::NEG_ZERO, F::INFINITY, F::INFINITY),
(F::NEG_ZERO, F::NEG_INFINITY, F::NEG_ZERO),
(F::NEG_ZERO, F::NAN, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_NAN, F::NEG_ZERO),
(F::ONE, F::ZERO, F::ONE),
(F::ONE, F::NEG_ZERO, F::ONE),
(F::ONE, F::ONE, F::ONE),
(F::ONE, F::NEG_ONE, F::ONE),
(F::ONE, F::INFINITY, F::INFINITY),
(F::ONE, F::NEG_INFINITY, F::ONE),
(F::ONE, F::NAN, F::ONE),
(F::ONE, F::NEG_NAN, F::ONE),
(F::NEG_ONE, F::ZERO, F::ZERO),
(F::NEG_ONE, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ONE, F::ONE, F::ONE),
(F::NEG_ONE, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::INFINITY, F::INFINITY),
(F::NEG_ONE, F::NEG_INFINITY, F::NEG_ONE),
(F::NEG_ONE, F::NAN, F::NEG_ONE),
(F::NEG_ONE, F::NEG_NAN, F::NEG_ONE),
(F::INFINITY, F::ZERO, F::INFINITY),
(F::INFINITY, F::NEG_ZERO, F::INFINITY),
(F::INFINITY, F::ONE, F::INFINITY),
(F::INFINITY, F::NEG_ONE, F::INFINITY),
(F::INFINITY, F::INFINITY, F::INFINITY),
(F::INFINITY, F::NEG_INFINITY, F::INFINITY),
(F::INFINITY, F::NAN, F::INFINITY),
(F::INFINITY, F::NEG_NAN, F::INFINITY),
(F::NEG_INFINITY, F::ZERO, F::ZERO),
(F::NEG_INFINITY, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_INFINITY, F::ONE, F::ONE),
(F::NEG_INFINITY, F::NEG_ONE, F::NEG_ONE),
(F::NEG_INFINITY, F::INFINITY, F::INFINITY),
(F::NEG_INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NAN, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_NAN, F::NEG_INFINITY),
(F::NAN, F::ZERO, F::ZERO),
(F::NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NAN, F::ONE, F::ONE),
(F::NAN, F::NEG_ONE, F::NEG_ONE),
(F::NAN, F::INFINITY, F::INFINITY),
(F::NAN, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NAN, F::NAN, F::NAN),
(F::NEG_NAN, F::ZERO, F::ZERO),
(F::NEG_NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_NAN, F::ONE, F::ONE),
(F::NEG_NAN, F::NEG_ONE, F::NEG_ONE),
(F::NEG_NAN, F::INFINITY, F::INFINITY),
(F::NEG_NAN, F::NEG_INFINITY, F::NEG_INFINITY),
];
for (x, y, res) in cases {
let val = f(x, y);
assert_biteq!(val, res, "fmax({}, {})", Hexf(x), Hexf(y));
}
// Ordering between zeros and NaNs does not matter
assert_eq!(f(F::ZERO, F::NEG_ZERO), F::ZERO);
assert_eq!(f(F::NEG_ZERO, F::ZERO), F::ZERO);
assert!(f(F::NAN, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::NAN).is_nan());
assert!(f(F::NEG_NAN, F::NEG_NAN).is_nan());
}
#[test]

View file

@ -74,24 +74,77 @@ mod tests {
fn fminimum_spec_test<F: Float>(f: impl Fn(F, F) -> F) {
let cases = [
(F::ZERO, F::ZERO, F::ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ZERO, F::ONE, F::ZERO),
(F::ONE, F::ZERO, F::ZERO),
(F::ZERO, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::ZERO, F::NEG_ONE),
(F::INFINITY, F::ZERO, F::ZERO),
(F::NEG_INFINITY, F::ZERO, F::NEG_INFINITY),
(F::NAN, F::ZERO, F::NAN),
(F::ZERO, F::NAN, F::NAN),
(F::NAN, F::NAN, F::NAN),
(F::ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::ZERO, F::ONE, F::ZERO),
(F::ZERO, F::NEG_ONE, F::NEG_ONE),
(F::ZERO, F::INFINITY, F::ZERO),
(F::ZERO, F::NEG_INFINITY, F::NEG_INFINITY),
(F::ZERO, F::NAN, F::NAN),
(F::NEG_ZERO, F::ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::ONE, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ZERO, F::INFINITY, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_ZERO, F::NAN, F::NAN),
(F::ONE, F::ZERO, F::ZERO),
(F::ONE, F::NEG_ZERO, F::NEG_ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ONE, F::NEG_ONE, F::NEG_ONE),
(F::ONE, F::INFINITY, F::ONE),
(F::ONE, F::NEG_INFINITY, F::NEG_INFINITY),
(F::ONE, F::NAN, F::NAN),
(F::NEG_ONE, F::ZERO, F::NEG_ONE),
(F::NEG_ONE, F::NEG_ZERO, F::NEG_ONE),
(F::NEG_ONE, F::ONE, F::NEG_ONE),
(F::NEG_ONE, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::INFINITY, F::NEG_ONE),
(F::NEG_ONE, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_ONE, F::NAN, F::NAN),
(F::INFINITY, F::ZERO, F::ZERO),
(F::INFINITY, F::NEG_ZERO, F::NEG_ZERO),
(F::INFINITY, F::ONE, F::ONE),
(F::INFINITY, F::NEG_ONE, F::NEG_ONE),
(F::INFINITY, F::INFINITY, F::INFINITY),
(F::INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::INFINITY, F::NAN, F::NAN),
(F::NEG_INFINITY, F::ZERO, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_ZERO, F::NEG_INFINITY),
(F::NEG_INFINITY, F::ONE, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_ONE, F::NEG_INFINITY),
(F::NEG_INFINITY, F::INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NAN, F::NAN),
(F::NAN, F::ZERO, F::NAN),
(F::NAN, F::NEG_ZERO, F::NAN),
(F::NAN, F::ONE, F::NAN),
(F::NAN, F::NEG_ONE, F::NAN),
(F::NAN, F::INFINITY, F::NAN),
(F::NAN, F::NEG_INFINITY, F::NAN),
(F::NAN, F::NAN, F::NAN),
];
for (x, y, res) in cases {
let val = f(x, y);
assert_biteq!(val, res, "fminimum({}, {})", Hexf(x), Hexf(y));
}
// Ordering between NaNs does not matter
assert!(f(F::NAN, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::NAN).is_nan());
assert!(f(F::ZERO, F::NEG_NAN).is_nan());
assert!(f(F::NEG_ZERO, F::NEG_NAN).is_nan());
assert!(f(F::ONE, F::NEG_NAN).is_nan());
assert!(f(F::NEG_ONE, F::NEG_NAN).is_nan());
assert!(f(F::INFINITY, F::NEG_NAN).is_nan());
assert!(f(F::NEG_INFINITY, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::ZERO).is_nan());
assert!(f(F::NEG_NAN, F::NEG_ZERO).is_nan());
assert!(f(F::NEG_NAN, F::ONE).is_nan());
assert!(f(F::NEG_NAN, F::NEG_ONE).is_nan());
assert!(f(F::NEG_NAN, F::INFINITY).is_nan());
assert!(f(F::NEG_NAN, F::NEG_INFINITY).is_nan());
assert!(f(F::NEG_NAN, F::NEG_NAN).is_nan());
}
#[test]
@ -119,24 +172,77 @@ mod tests {
fn fmaximum_spec_test<F: Float>(f: impl Fn(F, F) -> F) {
let cases = [
(F::ZERO, F::ZERO, F::ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ZERO, F::ONE, F::ONE),
(F::ONE, F::ZERO, F::ONE),
(F::ZERO, F::NEG_ONE, F::ZERO),
(F::NEG_ONE, F::ZERO, F::ZERO),
(F::INFINITY, F::ZERO, F::INFINITY),
(F::NEG_INFINITY, F::ZERO, F::ZERO),
(F::NAN, F::ZERO, F::NAN),
(F::ZERO, F::NAN, F::NAN),
(F::NAN, F::NAN, F::NAN),
(F::ZERO, F::NEG_ZERO, F::ZERO),
(F::ZERO, F::ONE, F::ONE),
(F::ZERO, F::NEG_ONE, F::ZERO),
(F::ZERO, F::INFINITY, F::INFINITY),
(F::ZERO, F::NEG_INFINITY, F::ZERO),
(F::ZERO, F::NAN, F::NAN),
(F::NEG_ZERO, F::ZERO, F::ZERO),
(F::NEG_ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::ONE, F::ONE),
(F::NEG_ZERO, F::NEG_ONE, F::NEG_ZERO),
(F::NEG_ZERO, F::INFINITY, F::INFINITY),
(F::NEG_ZERO, F::NEG_INFINITY, F::NEG_ZERO),
(F::NEG_ZERO, F::NAN, F::NAN),
(F::ONE, F::ZERO, F::ONE),
(F::ONE, F::NEG_ZERO, F::ONE),
(F::ONE, F::ONE, F::ONE),
(F::ONE, F::NEG_ONE, F::ONE),
(F::ONE, F::INFINITY, F::INFINITY),
(F::ONE, F::NEG_INFINITY, F::ONE),
(F::ONE, F::NAN, F::NAN),
(F::NEG_ONE, F::ZERO, F::ZERO),
(F::NEG_ONE, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ONE, F::ONE, F::ONE),
(F::NEG_ONE, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::INFINITY, F::INFINITY),
(F::NEG_ONE, F::NEG_INFINITY, F::NEG_ONE),
(F::NEG_ONE, F::NAN, F::NAN),
(F::INFINITY, F::ZERO, F::INFINITY),
(F::INFINITY, F::NEG_ZERO, F::INFINITY),
(F::INFINITY, F::ONE, F::INFINITY),
(F::INFINITY, F::NEG_ONE, F::INFINITY),
(F::INFINITY, F::INFINITY, F::INFINITY),
(F::INFINITY, F::NEG_INFINITY, F::INFINITY),
(F::INFINITY, F::NAN, F::NAN),
(F::NEG_INFINITY, F::ZERO, F::ZERO),
(F::NEG_INFINITY, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_INFINITY, F::ONE, F::ONE),
(F::NEG_INFINITY, F::NEG_ONE, F::NEG_ONE),
(F::NEG_INFINITY, F::INFINITY, F::INFINITY),
(F::NEG_INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NAN, F::NAN),
(F::NAN, F::ZERO, F::NAN),
(F::NAN, F::NEG_ZERO, F::NAN),
(F::NAN, F::ONE, F::NAN),
(F::NAN, F::NEG_ONE, F::NAN),
(F::NAN, F::INFINITY, F::NAN),
(F::NAN, F::NEG_INFINITY, F::NAN),
(F::NAN, F::NAN, F::NAN),
];
for (x, y, res) in cases {
let val = f(x, y);
assert_biteq!(val, res, "fmaximum({}, {})", Hexf(x), Hexf(y));
}
// Ordering between NaNs does not matter
assert!(f(F::NAN, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::NAN).is_nan());
assert!(f(F::ZERO, F::NEG_NAN).is_nan());
assert!(f(F::NEG_ZERO, F::NEG_NAN).is_nan());
assert!(f(F::ONE, F::NEG_NAN).is_nan());
assert!(f(F::NEG_ONE, F::NEG_NAN).is_nan());
assert!(f(F::INFINITY, F::NEG_NAN).is_nan());
assert!(f(F::NEG_INFINITY, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::ZERO).is_nan());
assert!(f(F::NEG_NAN, F::NEG_ZERO).is_nan());
assert!(f(F::NEG_NAN, F::ONE).is_nan());
assert!(f(F::NEG_NAN, F::NEG_ONE).is_nan());
assert!(f(F::NEG_NAN, F::INFINITY).is_nan());
assert!(f(F::NEG_NAN, F::NEG_INFINITY).is_nan());
assert!(f(F::NEG_NAN, F::NEG_NAN).is_nan());
}
#[test]

View file

@ -74,24 +74,77 @@ mod tests {
fn fminimum_num_spec_test<F: Float>(f: impl Fn(F, F) -> F) {
let cases = [
(F::ZERO, F::ZERO, F::ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ZERO, F::ONE, F::ZERO),
(F::ONE, F::ZERO, F::ZERO),
(F::ZERO, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::ZERO, F::NEG_ONE),
(F::INFINITY, F::ZERO, F::ZERO),
(F::NEG_INFINITY, F::ZERO, F::NEG_INFINITY),
(F::NAN, F::ZERO, F::ZERO),
(F::ZERO, F::NAN, F::ZERO),
(F::NAN, F::NAN, F::NAN),
(F::ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::ZERO, F::ONE, F::ZERO),
(F::ZERO, F::NEG_ONE, F::NEG_ONE),
(F::ZERO, F::INFINITY, F::ZERO),
(F::ZERO, F::NEG_INFINITY, F::NEG_INFINITY),
(F::ZERO, F::NAN, F::ZERO),
(F::ZERO, F::NEG_NAN, F::ZERO),
(F::NEG_ZERO, F::ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::ONE, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ZERO, F::INFINITY, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_ZERO, F::NAN, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_NAN, F::NEG_ZERO),
(F::ONE, F::ZERO, F::ZERO),
(F::ONE, F::NEG_ZERO, F::NEG_ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ONE, F::NEG_ONE, F::NEG_ONE),
(F::ONE, F::INFINITY, F::ONE),
(F::ONE, F::NEG_INFINITY, F::NEG_INFINITY),
(F::ONE, F::NAN, F::ONE),
(F::ONE, F::NEG_NAN, F::ONE),
(F::NEG_ONE, F::ZERO, F::NEG_ONE),
(F::NEG_ONE, F::NEG_ZERO, F::NEG_ONE),
(F::NEG_ONE, F::ONE, F::NEG_ONE),
(F::NEG_ONE, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::INFINITY, F::NEG_ONE),
(F::NEG_ONE, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_ONE, F::NAN, F::NEG_ONE),
(F::NEG_ONE, F::NEG_NAN, F::NEG_ONE),
(F::INFINITY, F::ZERO, F::ZERO),
(F::INFINITY, F::NEG_ZERO, F::NEG_ZERO),
(F::INFINITY, F::ONE, F::ONE),
(F::INFINITY, F::NEG_ONE, F::NEG_ONE),
(F::INFINITY, F::INFINITY, F::INFINITY),
(F::INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::INFINITY, F::NAN, F::INFINITY),
(F::INFINITY, F::NEG_NAN, F::INFINITY),
(F::NEG_INFINITY, F::ZERO, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_ZERO, F::NEG_INFINITY),
(F::NEG_INFINITY, F::ONE, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_ONE, F::NEG_INFINITY),
(F::NEG_INFINITY, F::INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NAN, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_NAN, F::NEG_INFINITY),
(F::NAN, F::ZERO, F::ZERO),
(F::NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NAN, F::ONE, F::ONE),
(F::NAN, F::NEG_ONE, F::NEG_ONE),
(F::NAN, F::INFINITY, F::INFINITY),
(F::NAN, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NAN, F::NAN, F::NAN),
(F::NEG_NAN, F::ZERO, F::ZERO),
(F::NEG_NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_NAN, F::ONE, F::ONE),
(F::NEG_NAN, F::NEG_ONE, F::NEG_ONE),
(F::NEG_NAN, F::INFINITY, F::INFINITY),
(F::NEG_NAN, F::NEG_INFINITY, F::NEG_INFINITY),
];
for (x, y, res) in cases {
let val = f(x, y);
assert_biteq!(val, res, "fminimum_num({}, {})", Hexf(x), Hexf(y));
for (x, y, expected) in cases {
let actual = f(x, y);
assert_biteq!(actual, expected, "fminimum_num({}, {})", Hexf(x), Hexf(y));
}
// Ordering between NaNs does not matter
assert!(f(F::NAN, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::NAN).is_nan());
assert!(f(F::NEG_NAN, F::NEG_NAN).is_nan());
}
#[test]
@ -119,24 +172,77 @@ mod tests {
fn fmaximum_num_spec_test<F: Float>(f: impl Fn(F, F) -> F) {
let cases = [
(F::ZERO, F::ZERO, F::ZERO),
(F::ONE, F::ONE, F::ONE),
(F::ZERO, F::ONE, F::ONE),
(F::ONE, F::ZERO, F::ONE),
(F::ZERO, F::NEG_ONE, F::ZERO),
(F::NEG_ONE, F::ZERO, F::ZERO),
(F::INFINITY, F::ZERO, F::INFINITY),
(F::NEG_INFINITY, F::ZERO, F::ZERO),
(F::NAN, F::ZERO, F::ZERO),
(F::ZERO, F::NAN, F::ZERO),
(F::NAN, F::NAN, F::NAN),
(F::ZERO, F::NEG_ZERO, F::ZERO),
(F::ZERO, F::ONE, F::ONE),
(F::ZERO, F::NEG_ONE, F::ZERO),
(F::ZERO, F::INFINITY, F::INFINITY),
(F::ZERO, F::NEG_INFINITY, F::ZERO),
(F::ZERO, F::NAN, F::ZERO),
(F::ZERO, F::NEG_NAN, F::ZERO),
(F::NEG_ZERO, F::ZERO, F::ZERO),
(F::NEG_ZERO, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ZERO, F::ONE, F::ONE),
(F::NEG_ZERO, F::NEG_ONE, F::NEG_ZERO),
(F::NEG_ZERO, F::INFINITY, F::INFINITY),
(F::NEG_ZERO, F::NEG_INFINITY, F::NEG_ZERO),
(F::NEG_ZERO, F::NAN, F::NEG_ZERO),
(F::NEG_ZERO, F::NEG_NAN, F::NEG_ZERO),
(F::ONE, F::ZERO, F::ONE),
(F::ONE, F::NEG_ZERO, F::ONE),
(F::ONE, F::ONE, F::ONE),
(F::ONE, F::NEG_ONE, F::ONE),
(F::ONE, F::INFINITY, F::INFINITY),
(F::ONE, F::NEG_INFINITY, F::ONE),
(F::ONE, F::NAN, F::ONE),
(F::ONE, F::NEG_NAN, F::ONE),
(F::NEG_ONE, F::ZERO, F::ZERO),
(F::NEG_ONE, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_ONE, F::ONE, F::ONE),
(F::NEG_ONE, F::NEG_ONE, F::NEG_ONE),
(F::NEG_ONE, F::INFINITY, F::INFINITY),
(F::NEG_ONE, F::NEG_INFINITY, F::NEG_ONE),
(F::NEG_ONE, F::NAN, F::NEG_ONE),
(F::NEG_ONE, F::NEG_NAN, F::NEG_ONE),
(F::INFINITY, F::ZERO, F::INFINITY),
(F::INFINITY, F::NEG_ZERO, F::INFINITY),
(F::INFINITY, F::ONE, F::INFINITY),
(F::INFINITY, F::NEG_ONE, F::INFINITY),
(F::INFINITY, F::INFINITY, F::INFINITY),
(F::INFINITY, F::NEG_INFINITY, F::INFINITY),
(F::INFINITY, F::NAN, F::INFINITY),
(F::INFINITY, F::NEG_NAN, F::INFINITY),
(F::NEG_INFINITY, F::ZERO, F::ZERO),
(F::NEG_INFINITY, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_INFINITY, F::ONE, F::ONE),
(F::NEG_INFINITY, F::NEG_ONE, F::NEG_ONE),
(F::NEG_INFINITY, F::INFINITY, F::INFINITY),
(F::NEG_INFINITY, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NAN, F::NEG_INFINITY),
(F::NEG_INFINITY, F::NEG_NAN, F::NEG_INFINITY),
(F::NAN, F::ZERO, F::ZERO),
(F::NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NAN, F::ONE, F::ONE),
(F::NAN, F::NEG_ONE, F::NEG_ONE),
(F::NAN, F::INFINITY, F::INFINITY),
(F::NAN, F::NEG_INFINITY, F::NEG_INFINITY),
(F::NAN, F::NAN, F::NAN),
(F::NEG_NAN, F::ZERO, F::ZERO),
(F::NEG_NAN, F::NEG_ZERO, F::NEG_ZERO),
(F::NEG_NAN, F::ONE, F::ONE),
(F::NEG_NAN, F::NEG_ONE, F::NEG_ONE),
(F::NEG_NAN, F::INFINITY, F::INFINITY),
(F::NEG_NAN, F::NEG_INFINITY, F::NEG_INFINITY),
];
for (x, y, res) in cases {
let val = f(x, y);
assert_biteq!(val, res, "fmaximum_num({}, {})", Hexf(x), Hexf(y));
for (x, y, expected) in cases {
let actual = f(x, y);
assert_biteq!(actual, expected, "fmaximum_num({}, {})", Hexf(x), Hexf(y));
}
// Ordering between NaNs does not matter
assert!(f(F::NAN, F::NEG_NAN).is_nan());
assert!(f(F::NEG_NAN, F::NAN).is_nan());
assert!(f(F::NEG_NAN, F::NEG_NAN).is_nan());
}
#[test]

View file

@ -19,6 +19,5 @@ use crate::support::Float;
#[inline]
pub fn fmax<F: Float>(x: F, y: F) -> F {
let res = if x.is_nan() || x < y { y } else { x };
// Canonicalize
res * F::ONE
res.canonicalize()
}

View file

@ -4,8 +4,8 @@
//! Per the spec, returns the canonicalized result of:
//! - `x` if `x > y`
//! - `y` if `y > x`
//! - +0.0 if x and y are zero with opposite signs
//! - qNaN if either operation is NaN
//! - Logic following +0.0 > -0.0
//!
//! Excluded from our implementation is sNaN handling.
@ -23,6 +23,5 @@ pub fn fmaximum<F: Float>(x: F, y: F) -> F {
y
};
// Canonicalize
res * F::ONE
res.canonicalize()
}

View file

@ -4,10 +4,10 @@
//! Per the spec, returns:
//! - `x` if `x > y`
//! - `y` if `y > x`
//! - Non-NaN if one operand is NaN
//! - Logic following +0.0 > -0.0
//! - +0.0 if x and y are zero with opposite signs
//! - Either `x` or `y` if `x == y` and the signs are the same
//! - qNaN if either operand is a NaN
//! - Non-NaN if one operand is NaN
//! - qNaN if both operands are NaNx
//!
//! Excluded from our implementation is sNaN handling.
@ -15,12 +15,15 @@ use crate::support::Float;
#[inline]
pub fn fmaximum_num<F: Float>(x: F, y: F) -> F {
let res = if x.is_nan() || x < y || (x.biteq(F::NEG_ZERO) && y.is_sign_positive()) {
y
} else {
let res = if x > y || y.is_nan() {
x
} else if y > x || x.is_nan() {
y
} else if x.is_sign_positive() {
x
} else {
y
};
// Canonicalize
res * F::ONE
res.canonicalize()
}

View file

@ -19,6 +19,5 @@ use crate::support::Float;
#[inline]
pub fn fmin<F: Float>(x: F, y: F) -> F {
let res = if y.is_nan() || x < y { x } else { y };
// Canonicalize
res * F::ONE
res.canonicalize()
}

View file

@ -4,8 +4,8 @@
//! Per the spec, returns the canonicalized result of:
//! - `x` if `x < y`
//! - `y` if `y < x`
//! - -0.0 if x and y are zero with opposite signs
//! - qNaN if either operation is NaN
//! - Logic following +0.0 > -0.0
//!
//! Excluded from our implementation is sNaN handling.
@ -23,6 +23,5 @@ pub fn fminimum<F: Float>(x: F, y: F) -> F {
y
};
// Canonicalize
res * F::ONE
res.canonicalize()
}

View file

@ -4,10 +4,10 @@
//! Per the spec, returns:
//! - `x` if `x < y`
//! - `y` if `y < x`
//! - Non-NaN if one operand is NaN
//! - Logic following +0.0 > -0.0
//! - -0.0 if x and y are zero with opposite signs
//! - Either `x` or `y` if `x == y` and the signs are the same
//! - qNaN if either operand is a NaN
//! - Non-NaN if one operand is NaN
//! - qNaN if both operands are NaNx
//!
//! Excluded from our implementation is sNaN handling.
@ -15,12 +15,15 @@ use crate::support::Float;
#[inline]
pub fn fminimum_num<F: Float>(x: F, y: F) -> F {
let res = if y.is_nan() || x < y || (x.biteq(F::NEG_ZERO) && y.is_sign_positive()) {
x
} else {
let res = if x > y || x.is_nan() {
y
} else if y > x || y.is_nan() {
x
} else if x.is_sign_positive() {
y
} else {
x
};
// Canonicalize
res * F::ONE
res.canonicalize()
}

View file

@ -190,6 +190,15 @@ pub trait Float:
Self::ONE.copysign(self)
}
}
/// Make a best-effort attempt to canonicalize the number. Note that this is allowed
/// to be a nop and does not always quiet sNaNs.
fn canonicalize(self) -> Self {
// FIXME: LLVM often removes this. We should determine whether we can remove the operation,
// or switch to something based on `llvm.canonicalize` (which has crashes,
// <https://github.com/llvm/llvm-project/issues/32650>).
self * Self::ONE
}
}
/// Access the associated `Int` type from a float (helper to avoid ambiguous associated types).

View file

@ -143,10 +143,12 @@ macro_rules! assert_biteq {
let bits = $crate::support::Int::leading_zeros(l.to_bits() - l.to_bits());
assert!(
$crate::support::Float::biteq(l, r),
"{}\nl: {l:?} ({lb:#0width$x})\nr: {r:?} ({rb:#0width$x})",
"{}\nl: {l:?} ({lb:#0width$x} {lh})\nr: {r:?} ({rb:#0width$x} {rh})",
format_args!($($tt)*),
lb = l.to_bits(),
lh = $crate::support::Hexf(l),
rb = r.to_bits(),
rh = $crate::support::Hexf(r),
width = ((bits / 4) + 2) as usize,
);

View file

@ -1 +1 @@
df8102fe5f24f28a918660b0cd918d7331c3896e
d087f112b7d1323446c7b39a8b616aee7fa56b3d

View file

@ -149,8 +149,32 @@ pub const fn forget<T>(t: T) {
/// Like [`forget`], but also accepts unsized values.
///
/// This function is just a shim intended to be removed when the `unsized_locals` feature gets
/// stabilized.
/// While Rust does not permit unsized locals since its removal in [#111942] it is
/// still possible to call functions with unsized values from a function argument
/// or in-place construction.
///
/// ```rust
/// #![feature(unsized_fn_params, forget_unsized)]
/// #![allow(internal_features)]
///
/// use std::mem::forget_unsized;
///
/// pub fn in_place() {
/// forget_unsized(*Box::<str>::from("str"));
/// }
///
/// pub fn param(x: str) {
/// forget_unsized(x);
/// }
/// ```
///
/// This works because the compiler will alter these functions to pass the parameter
/// by reference instead. This trick is necessary to support `Box<dyn FnOnce()>: FnOnce()`.
/// See [#68304] and [#71170] for more information.
///
/// [#111942]: https://github.com/rust-lang/rust/issues/111942
/// [#68304]: https://github.com/rust-lang/rust/issues/68304
/// [#71170]: https://github.com/rust-lang/rust/pull/71170
#[inline]
#[unstable(feature = "forget_unsized", issue = "none")]
pub fn forget_unsized<T: ?Sized>(t: T) {

View file

@ -5,6 +5,8 @@ use core::ops::{Add, Div, Mul, Sub};
use std::f128::consts;
use std::num::FpCategory as Fp;
use super::{assert_approx_eq, assert_biteq};
// Note these tolerances make sense around zero, but not for more extreme exponents.
/// Default tolerances. Works for values that should be near precise but not exact. Roughly
@ -53,34 +55,6 @@ fn test_num_f128() {
// FIXME(f16_f128,miri): many of these have to be disabled since miri does not yet support
// the intrinsics.
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_min_nan() {
assert_biteq!(f128::NAN.min(2.0), 2.0);
assert_biteq!(2.0f128.min(f128::NAN), 2.0);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_max_nan() {
assert_biteq!(f128::NAN.max(2.0), 2.0);
assert_biteq!(2.0f128.max(f128::NAN), 2.0);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_minimum() {
assert!(f128::NAN.minimum(2.0).is_nan());
assert!(2.0f128.minimum(f128::NAN).is_nan());
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_maximum() {
assert!(f128::NAN.maximum(2.0).is_nan());
assert!(2.0f128.maximum(f128::NAN).is_nan());
}
#[test]
fn test_nan() {
let nan: f128 = f128::NAN;
@ -232,98 +206,6 @@ fn test_classify() {
assert_eq!(1e-4932f128.classify(), Fp::Subnormal);
}
#[test]
#[cfg(target_has_reliable_f128_math)]
fn test_floor() {
assert_biteq!(1.0f128.floor(), 1.0f128);
assert_biteq!(1.3f128.floor(), 1.0f128);
assert_biteq!(1.5f128.floor(), 1.0f128);
assert_biteq!(1.7f128.floor(), 1.0f128);
assert_biteq!(0.0f128.floor(), 0.0f128);
assert_biteq!((-0.0f128).floor(), -0.0f128);
assert_biteq!((-1.0f128).floor(), -1.0f128);
assert_biteq!((-1.3f128).floor(), -2.0f128);
assert_biteq!((-1.5f128).floor(), -2.0f128);
assert_biteq!((-1.7f128).floor(), -2.0f128);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_ceil() {
assert_biteq!(1.0f128.ceil(), 1.0f128);
assert_biteq!(1.3f128.ceil(), 2.0f128);
assert_biteq!(1.5f128.ceil(), 2.0f128);
assert_biteq!(1.7f128.ceil(), 2.0f128);
assert_biteq!(0.0f128.ceil(), 0.0f128);
assert_biteq!((-0.0f128).ceil(), -0.0f128);
assert_biteq!((-1.0f128).ceil(), -1.0f128);
assert_biteq!((-1.3f128).ceil(), -1.0f128);
assert_biteq!((-1.5f128).ceil(), -1.0f128);
assert_biteq!((-1.7f128).ceil(), -1.0f128);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_round() {
assert_biteq!(2.5f128.round(), 3.0f128);
assert_biteq!(1.0f128.round(), 1.0f128);
assert_biteq!(1.3f128.round(), 1.0f128);
assert_biteq!(1.5f128.round(), 2.0f128);
assert_biteq!(1.7f128.round(), 2.0f128);
assert_biteq!(0.0f128.round(), 0.0f128);
assert_biteq!((-0.0f128).round(), -0.0f128);
assert_biteq!((-1.0f128).round(), -1.0f128);
assert_biteq!((-1.3f128).round(), -1.0f128);
assert_biteq!((-1.5f128).round(), -2.0f128);
assert_biteq!((-1.7f128).round(), -2.0f128);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_round_ties_even() {
assert_biteq!(2.5f128.round_ties_even(), 2.0f128);
assert_biteq!(1.0f128.round_ties_even(), 1.0f128);
assert_biteq!(1.3f128.round_ties_even(), 1.0f128);
assert_biteq!(1.5f128.round_ties_even(), 2.0f128);
assert_biteq!(1.7f128.round_ties_even(), 2.0f128);
assert_biteq!(0.0f128.round_ties_even(), 0.0f128);
assert_biteq!((-0.0f128).round_ties_even(), -0.0f128);
assert_biteq!((-1.0f128).round_ties_even(), -1.0f128);
assert_biteq!((-1.3f128).round_ties_even(), -1.0f128);
assert_biteq!((-1.5f128).round_ties_even(), -2.0f128);
assert_biteq!((-1.7f128).round_ties_even(), -2.0f128);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_trunc() {
assert_biteq!(1.0f128.trunc(), 1.0f128);
assert_biteq!(1.3f128.trunc(), 1.0f128);
assert_biteq!(1.5f128.trunc(), 1.0f128);
assert_biteq!(1.7f128.trunc(), 1.0f128);
assert_biteq!(0.0f128.trunc(), 0.0f128);
assert_biteq!((-0.0f128).trunc(), -0.0f128);
assert_biteq!((-1.0f128).trunc(), -1.0f128);
assert_biteq!((-1.3f128).trunc(), -1.0f128);
assert_biteq!((-1.5f128).trunc(), -1.0f128);
assert_biteq!((-1.7f128).trunc(), -1.0f128);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_fract() {
assert_biteq!(1.0f128.fract(), 0.0f128);
assert_biteq!(1.3f128.fract(), 0.300000000000000000000000000000000039f128);
assert_biteq!(1.5f128.fract(), 0.5f128);
assert_biteq!(1.7f128.fract(), 0.7f128);
assert_biteq!(0.0f128.fract(), 0.0f128);
assert_biteq!((-0.0f128).fract(), 0.0f128);
assert_biteq!((-1.0f128).fract(), 0.0f128);
assert_biteq!((-1.3f128).fract(), -0.300000000000000000000000000000000039f128);
assert_biteq!((-1.5f128).fract(), -0.5f128);
assert_biteq!((-1.7f128).fract(), -0.699999999999999999999999999999999961f128);
}
#[test]
#[cfg(any(miri, target_has_reliable_f128_math))]
fn test_abs() {

View file

@ -4,6 +4,8 @@
use std::f16::consts;
use std::num::FpCategory as Fp;
use super::{assert_approx_eq, assert_biteq};
/// Tolerance for results on the order of 10.0e-2
#[allow(unused)]
const TOL_N2: f16 = 0.0001;
@ -49,34 +51,6 @@ fn test_num_f16() {
// FIXME(f16_f128,miri): many of these have to be disabled since miri does not yet support
// the intrinsics.
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_min_nan() {
assert_biteq!(f16::NAN.min(2.0), 2.0);
assert_biteq!(2.0f16.min(f16::NAN), 2.0);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_max_nan() {
assert_biteq!(f16::NAN.max(2.0), 2.0);
assert_biteq!(2.0f16.max(f16::NAN), 2.0);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_minimum() {
assert!(f16::NAN.minimum(2.0).is_nan());
assert!(2.0f16.minimum(f16::NAN).is_nan());
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_maximum() {
assert!(f16::NAN.maximum(2.0).is_nan());
assert!(2.0f16.maximum(f16::NAN).is_nan());
}
#[test]
fn test_nan() {
let nan: f16 = f16::NAN;
@ -228,98 +202,6 @@ fn test_classify() {
assert_eq!(1e-5f16.classify(), Fp::Subnormal);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_floor() {
assert_biteq!(1.0f16.floor(), 1.0f16);
assert_biteq!(1.3f16.floor(), 1.0f16);
assert_biteq!(1.5f16.floor(), 1.0f16);
assert_biteq!(1.7f16.floor(), 1.0f16);
assert_biteq!(0.0f16.floor(), 0.0f16);
assert_biteq!((-0.0f16).floor(), -0.0f16);
assert_biteq!((-1.0f16).floor(), -1.0f16);
assert_biteq!((-1.3f16).floor(), -2.0f16);
assert_biteq!((-1.5f16).floor(), -2.0f16);
assert_biteq!((-1.7f16).floor(), -2.0f16);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_ceil() {
assert_biteq!(1.0f16.ceil(), 1.0f16);
assert_biteq!(1.3f16.ceil(), 2.0f16);
assert_biteq!(1.5f16.ceil(), 2.0f16);
assert_biteq!(1.7f16.ceil(), 2.0f16);
assert_biteq!(0.0f16.ceil(), 0.0f16);
assert_biteq!((-0.0f16).ceil(), -0.0f16);
assert_biteq!((-1.0f16).ceil(), -1.0f16);
assert_biteq!((-1.3f16).ceil(), -1.0f16);
assert_biteq!((-1.5f16).ceil(), -1.0f16);
assert_biteq!((-1.7f16).ceil(), -1.0f16);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_round() {
assert_biteq!(2.5f16.round(), 3.0f16);
assert_biteq!(1.0f16.round(), 1.0f16);
assert_biteq!(1.3f16.round(), 1.0f16);
assert_biteq!(1.5f16.round(), 2.0f16);
assert_biteq!(1.7f16.round(), 2.0f16);
assert_biteq!(0.0f16.round(), 0.0f16);
assert_biteq!((-0.0f16).round(), -0.0f16);
assert_biteq!((-1.0f16).round(), -1.0f16);
assert_biteq!((-1.3f16).round(), -1.0f16);
assert_biteq!((-1.5f16).round(), -2.0f16);
assert_biteq!((-1.7f16).round(), -2.0f16);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_round_ties_even() {
assert_biteq!(2.5f16.round_ties_even(), 2.0f16);
assert_biteq!(1.0f16.round_ties_even(), 1.0f16);
assert_biteq!(1.3f16.round_ties_even(), 1.0f16);
assert_biteq!(1.5f16.round_ties_even(), 2.0f16);
assert_biteq!(1.7f16.round_ties_even(), 2.0f16);
assert_biteq!(0.0f16.round_ties_even(), 0.0f16);
assert_biteq!((-0.0f16).round_ties_even(), -0.0f16);
assert_biteq!((-1.0f16).round_ties_even(), -1.0f16);
assert_biteq!((-1.3f16).round_ties_even(), -1.0f16);
assert_biteq!((-1.5f16).round_ties_even(), -2.0f16);
assert_biteq!((-1.7f16).round_ties_even(), -2.0f16);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_trunc() {
assert_biteq!(1.0f16.trunc(), 1.0f16);
assert_biteq!(1.3f16.trunc(), 1.0f16);
assert_biteq!(1.5f16.trunc(), 1.0f16);
assert_biteq!(1.7f16.trunc(), 1.0f16);
assert_biteq!(0.0f16.trunc(), 0.0f16);
assert_biteq!((-0.0f16).trunc(), -0.0f16);
assert_biteq!((-1.0f16).trunc(), -1.0f16);
assert_biteq!((-1.3f16).trunc(), -1.0f16);
assert_biteq!((-1.5f16).trunc(), -1.0f16);
assert_biteq!((-1.7f16).trunc(), -1.0f16);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_fract() {
assert_biteq!(1.0f16.fract(), 0.0f16);
assert_biteq!(1.3f16.fract(), 0.2998f16);
assert_biteq!(1.5f16.fract(), 0.5f16);
assert_biteq!(1.7f16.fract(), 0.7f16);
assert_biteq!(0.0f16.fract(), 0.0f16);
assert_biteq!((-0.0f16).fract(), 0.0f16);
assert_biteq!((-1.0f16).fract(), 0.0f16);
assert_biteq!((-1.3f16).fract(), -0.2998f16);
assert_biteq!((-1.5f16).fract(), -0.5f16);
assert_biteq!((-1.7f16).fract(), -0.7f16);
}
#[test]
#[cfg(any(miri, target_has_reliable_f16_math))]
fn test_abs() {

View file

@ -2,6 +2,8 @@ use core::f32;
use core::f32::consts;
use core::num::FpCategory as Fp;
use super::{assert_approx_eq, assert_biteq};
/// Smallest number
const TINY_BITS: u32 = 0x1;
@ -33,30 +35,6 @@ fn test_num_f32() {
super::test_num(10f32, 2f32);
}
#[test]
fn test_min_nan() {
assert_biteq!(f32::NAN.min(2.0), 2.0);
assert_biteq!(2.0f32.min(f32::NAN), 2.0);
}
#[test]
fn test_max_nan() {
assert_biteq!(f32::NAN.max(2.0), 2.0);
assert_biteq!(2.0f32.max(f32::NAN), 2.0);
}
#[test]
fn test_minimum() {
assert!(f32::NAN.minimum(2.0).is_nan());
assert!(2.0f32.minimum(f32::NAN).is_nan());
}
#[test]
fn test_maximum() {
assert!(f32::NAN.maximum(2.0).is_nan());
assert!(2.0f32.maximum(f32::NAN).is_nan());
}
#[test]
fn test_nan() {
let nan: f32 = f32::NAN;
@ -208,92 +186,6 @@ fn test_classify() {
assert_eq!(1e-38f32.classify(), Fp::Subnormal);
}
#[test]
fn test_floor() {
assert_biteq!(f32::math::floor(1.0f32), 1.0f32);
assert_biteq!(f32::math::floor(1.3f32), 1.0f32);
assert_biteq!(f32::math::floor(1.5f32), 1.0f32);
assert_biteq!(f32::math::floor(1.7f32), 1.0f32);
assert_biteq!(f32::math::floor(0.0f32), 0.0f32);
assert_biteq!(f32::math::floor(-0.0f32), -0.0f32);
assert_biteq!(f32::math::floor(-1.0f32), -1.0f32);
assert_biteq!(f32::math::floor(-1.3f32), -2.0f32);
assert_biteq!(f32::math::floor(-1.5f32), -2.0f32);
assert_biteq!(f32::math::floor(-1.7f32), -2.0f32);
}
#[test]
fn test_ceil() {
assert_biteq!(f32::math::ceil(1.0f32), 1.0f32);
assert_biteq!(f32::math::ceil(1.3f32), 2.0f32);
assert_biteq!(f32::math::ceil(1.5f32), 2.0f32);
assert_biteq!(f32::math::ceil(1.7f32), 2.0f32);
assert_biteq!(f32::math::ceil(0.0f32), 0.0f32);
assert_biteq!(f32::math::ceil(-0.0f32), -0.0f32);
assert_biteq!(f32::math::ceil(-1.0f32), -1.0f32);
assert_biteq!(f32::math::ceil(-1.3f32), -1.0f32);
assert_biteq!(f32::math::ceil(-1.5f32), -1.0f32);
assert_biteq!(f32::math::ceil(-1.7f32), -1.0f32);
}
#[test]
fn test_round() {
assert_biteq!(f32::math::round(2.5f32), 3.0f32);
assert_biteq!(f32::math::round(1.0f32), 1.0f32);
assert_biteq!(f32::math::round(1.3f32), 1.0f32);
assert_biteq!(f32::math::round(1.5f32), 2.0f32);
assert_biteq!(f32::math::round(1.7f32), 2.0f32);
assert_biteq!(f32::math::round(0.0f32), 0.0f32);
assert_biteq!(f32::math::round(-0.0f32), -0.0f32);
assert_biteq!(f32::math::round(-1.0f32), -1.0f32);
assert_biteq!(f32::math::round(-1.3f32), -1.0f32);
assert_biteq!(f32::math::round(-1.5f32), -2.0f32);
assert_biteq!(f32::math::round(-1.7f32), -2.0f32);
}
#[test]
fn test_round_ties_even() {
assert_biteq!(f32::math::round_ties_even(2.5f32), 2.0f32);
assert_biteq!(f32::math::round_ties_even(1.0f32), 1.0f32);
assert_biteq!(f32::math::round_ties_even(1.3f32), 1.0f32);
assert_biteq!(f32::math::round_ties_even(1.5f32), 2.0f32);
assert_biteq!(f32::math::round_ties_even(1.7f32), 2.0f32);
assert_biteq!(f32::math::round_ties_even(0.0f32), 0.0f32);
assert_biteq!(f32::math::round_ties_even(-0.0f32), -0.0f32);
assert_biteq!(f32::math::round_ties_even(-1.0f32), -1.0f32);
assert_biteq!(f32::math::round_ties_even(-1.3f32), -1.0f32);
assert_biteq!(f32::math::round_ties_even(-1.5f32), -2.0f32);
assert_biteq!(f32::math::round_ties_even(-1.7f32), -2.0f32);
}
#[test]
fn test_trunc() {
assert_biteq!(f32::math::trunc(1.0f32), 1.0f32);
assert_biteq!(f32::math::trunc(1.3f32), 1.0f32);
assert_biteq!(f32::math::trunc(1.5f32), 1.0f32);
assert_biteq!(f32::math::trunc(1.7f32), 1.0f32);
assert_biteq!(f32::math::trunc(0.0f32), 0.0f32);
assert_biteq!(f32::math::trunc(-0.0f32), -0.0f32);
assert_biteq!(f32::math::trunc(-1.0f32), -1.0f32);
assert_biteq!(f32::math::trunc(-1.3f32), -1.0f32);
assert_biteq!(f32::math::trunc(-1.5f32), -1.0f32);
assert_biteq!(f32::math::trunc(-1.7f32), -1.0f32);
}
#[test]
fn test_fract() {
assert_biteq!(f32::math::fract(1.0f32), 0.0f32);
assert_biteq!(f32::math::fract(1.3f32), 0.29999995f32);
assert_biteq!(f32::math::fract(1.5f32), 0.5f32);
assert_biteq!(f32::math::fract(1.7f32), 0.70000005f32);
assert_biteq!(f32::math::fract(0.0f32), 0.0f32);
assert_biteq!(f32::math::fract(-0.0f32), 0.0f32);
assert_biteq!(f32::math::fract(-1.0f32), 0.0f32);
assert_biteq!(f32::math::fract(-1.3f32), -0.29999995f32);
assert_biteq!(f32::math::fract(-1.5f32), -0.5f32);
assert_biteq!(f32::math::fract(-1.7f32), -0.70000005f32);
}
#[test]
fn test_abs() {
assert_biteq!(f32::INFINITY.abs(), f32::INFINITY);

View file

@ -2,6 +2,8 @@ use core::f64;
use core::f64::consts;
use core::num::FpCategory as Fp;
use super::{assert_approx_eq, assert_biteq};
/// Smallest number
const TINY_BITS: u64 = 0x1;
@ -28,18 +30,6 @@ fn test_num_f64() {
super::test_num(10f64, 2f64);
}
#[test]
fn test_min_nan() {
assert_biteq!(f64::NAN.min(2.0), 2.0);
assert_biteq!(2.0f64.min(f64::NAN), 2.0);
}
#[test]
fn test_max_nan() {
assert_biteq!(f64::NAN.max(2.0), 2.0);
assert_biteq!(2.0f64.max(f64::NAN), 2.0);
}
#[test]
fn test_nan() {
let nan: f64 = f64::NAN;
@ -190,92 +180,6 @@ fn test_classify() {
assert_eq!(1e-308f64.classify(), Fp::Subnormal);
}
#[test]
fn test_floor() {
assert_biteq!(f64::math::floor(1.0f64), 1.0f64);
assert_biteq!(f64::math::floor(1.3f64), 1.0f64);
assert_biteq!(f64::math::floor(1.5f64), 1.0f64);
assert_biteq!(f64::math::floor(1.7f64), 1.0f64);
assert_biteq!(f64::math::floor(0.0f64), 0.0f64);
assert_biteq!(f64::math::floor(-0.0f64), -0.0f64);
assert_biteq!(f64::math::floor(-1.0f64), -1.0f64);
assert_biteq!(f64::math::floor(-1.3f64), -2.0f64);
assert_biteq!(f64::math::floor(-1.5f64), -2.0f64);
assert_biteq!(f64::math::floor(-1.7f64), -2.0f64);
}
#[test]
fn test_ceil() {
assert_biteq!(f64::math::ceil(1.0f64), 1.0f64);
assert_biteq!(f64::math::ceil(1.3f64), 2.0f64);
assert_biteq!(f64::math::ceil(1.5f64), 2.0f64);
assert_biteq!(f64::math::ceil(1.7f64), 2.0f64);
assert_biteq!(f64::math::ceil(0.0f64), 0.0f64);
assert_biteq!(f64::math::ceil(-0.0f64), -0.0f64);
assert_biteq!(f64::math::ceil(-1.0f64), -1.0f64);
assert_biteq!(f64::math::ceil(-1.3f64), -1.0f64);
assert_biteq!(f64::math::ceil(-1.5f64), -1.0f64);
assert_biteq!(f64::math::ceil(-1.7f64), -1.0f64);
}
#[test]
fn test_round() {
assert_biteq!(f64::math::round(2.5f64), 3.0f64);
assert_biteq!(f64::math::round(1.0f64), 1.0f64);
assert_biteq!(f64::math::round(1.3f64), 1.0f64);
assert_biteq!(f64::math::round(1.5f64), 2.0f64);
assert_biteq!(f64::math::round(1.7f64), 2.0f64);
assert_biteq!(f64::math::round(0.0f64), 0.0f64);
assert_biteq!(f64::math::round(-0.0f64), -0.0f64);
assert_biteq!(f64::math::round(-1.0f64), -1.0f64);
assert_biteq!(f64::math::round(-1.3f64), -1.0f64);
assert_biteq!(f64::math::round(-1.5f64), -2.0f64);
assert_biteq!(f64::math::round(-1.7f64), -2.0f64);
}
#[test]
fn test_round_ties_even() {
assert_biteq!(f64::math::round_ties_even(2.5f64), 2.0f64);
assert_biteq!(f64::math::round_ties_even(1.0f64), 1.0f64);
assert_biteq!(f64::math::round_ties_even(1.3f64), 1.0f64);
assert_biteq!(f64::math::round_ties_even(1.5f64), 2.0f64);
assert_biteq!(f64::math::round_ties_even(1.7f64), 2.0f64);
assert_biteq!(f64::math::round_ties_even(0.0f64), 0.0f64);
assert_biteq!(f64::math::round_ties_even(-0.0f64), -0.0f64);
assert_biteq!(f64::math::round_ties_even(-1.0f64), -1.0f64);
assert_biteq!(f64::math::round_ties_even(-1.3f64), -1.0f64);
assert_biteq!(f64::math::round_ties_even(-1.5f64), -2.0f64);
assert_biteq!(f64::math::round_ties_even(-1.7f64), -2.0f64);
}
#[test]
fn test_trunc() {
assert_biteq!(f64::math::trunc(1.0f64), 1.0f64);
assert_biteq!(f64::math::trunc(1.3f64), 1.0f64);
assert_biteq!(f64::math::trunc(1.5f64), 1.0f64);
assert_biteq!(f64::math::trunc(1.7f64), 1.0f64);
assert_biteq!(f64::math::trunc(0.0f64), 0.0f64);
assert_biteq!(f64::math::trunc(-0.0f64), -0.0f64);
assert_biteq!(f64::math::trunc(-1.0f64), -1.0f64);
assert_biteq!(f64::math::trunc(-1.3f64), -1.0f64);
assert_biteq!(f64::math::trunc(-1.5f64), -1.0f64);
assert_biteq!(f64::math::trunc(-1.7f64), -1.0f64);
}
#[test]
fn test_fract() {
assert_biteq!(f64::math::fract(1.0f64), 0.0f64);
assert_biteq!(f64::math::fract(1.3f64), 0.30000000000000004f64);
assert_biteq!(f64::math::fract(1.5f64), 0.5f64);
assert_biteq!(f64::math::fract(1.7f64), 0.7f64);
assert_biteq!(f64::math::fract(0.0f64), 0.0f64);
assert_biteq!(f64::math::fract(-0.0f64), 0.0f64);
assert_biteq!(f64::math::fract(-1.0f64), 0.0f64);
assert_biteq!(f64::math::fract(-1.3f64), -0.30000000000000004f64);
assert_biteq!(f64::math::fract(-1.5f64), -0.5f64);
assert_biteq!(f64::math::fract(-1.7f64), -0.69999999999999996f64);
}
#[test]
fn test_abs() {
assert_biteq!(f64::INFINITY.abs(), f64::INFINITY);

View file

@ -1,9 +1,34 @@
use std::fmt;
use std::ops::{Add, Div, Mul, Rem, Sub};
/// Verify that floats are within a tolerance of each other, 1.0e-6 by default.
macro_rules! assert_approx_eq {
($a:expr, $b:expr) => {{ assert_approx_eq!($a, $b, 1.0e-6) }};
/// Set the default tolerance for float comparison based on the type.
trait Approx {
const LIM: Self;
}
impl Approx for f16 {
const LIM: Self = 1e-3;
}
impl Approx for f32 {
const LIM: Self = 1e-6;
}
impl Approx for f64 {
const LIM: Self = 1e-6;
}
impl Approx for f128 {
const LIM: Self = 1e-9;
}
/// Determine the tolerance for values of the argument type.
const fn lim_for_ty<T: Approx + Copy>(_x: T) -> T {
T::LIM
}
// We have runtime ("rt") and const versions of these macros.
/// Verify that floats are within a tolerance of each other.
macro_rules! assert_approx_eq_rt {
($a:expr, $b:expr) => {{ assert_approx_eq_rt!($a, $b, $crate::floats::lim_for_ty($a)) }};
($a:expr, $b:expr, $lim:expr) => {{
let (a, b) = (&$a, &$b);
let diff = (*a - *b).abs();
@ -14,10 +39,18 @@ macro_rules! assert_approx_eq {
);
}};
}
macro_rules! assert_approx_eq_const {
($a:expr, $b:expr) => {{ assert_approx_eq_const!($a, $b, $crate::floats::lim_for_ty($a)) }};
($a:expr, $b:expr, $lim:expr) => {{
let (a, b) = (&$a, &$b);
let diff = (*a - *b).abs();
assert!(diff <= $lim);
}};
}
/// Verify that floats have the same bitwise representation. Used to avoid the default `0.0 == -0.0`
/// behavior, as well as to ensure exact NaN bitpatterns.
macro_rules! assert_biteq {
macro_rules! assert_biteq_rt {
(@inner $left:expr, $right:expr, $msg_sep:literal, $($tt:tt)*) => {{
let l = $left;
let r = $right;
@ -41,31 +74,49 @@ macro_rules! assert_biteq {
if !l.is_nan() && !r.is_nan() {
// Also check that standard equality holds, since most tests use `assert_biteq` rather
// than `assert_eq`.
assert_eq!(l, r)
assert_eq!(l, r);
}
}};
($left:expr, $right:expr , $($tt:tt)*) => {
assert_biteq!(@inner $left, $right, "\n", $($tt)*)
assert_biteq_rt!(@inner $left, $right, "\n", $($tt)*)
};
($left:expr, $right:expr $(,)?) => {
assert_biteq!(@inner $left, $right, "", "")
assert_biteq_rt!(@inner $left, $right, "", "")
};
}
macro_rules! assert_biteq_const {
(@inner $left:expr, $right:expr, $msg_sep:literal, $($tt:tt)*) => {{
let l = $left;
let r = $right;
// Hack to coerce left and right to the same type
let mut _eq_ty = l;
_eq_ty = r;
assert!(l.to_bits() == r.to_bits());
if !l.is_nan() && !r.is_nan() {
// Also check that standard equality holds, since most tests use `assert_biteq` rather
// than `assert_eq`.
assert!(l == r);
}
}};
($left:expr, $right:expr , $($tt:tt)*) => {
assert_biteq_const!(@inner $left, $right, "\n", $($tt)*)
};
($left:expr, $right:expr $(,)?) => {
assert_biteq_const!(@inner $left, $right, "", "")
};
}
mod const_asserts {
// Shadow some assert implementations that would otherwise not compile in a const-context.
// Every macro added here also needs to be added in the `float_test!` macro below.
macro_rules! assert_eq {
($left:expr, $right:expr $(,)?) => {
std::assert!($left == $right)
};
($left:expr, $right:expr, $($arg:tt)+) => {
std::assert!($left == $right, $($arg)+)
};
}
// Use the runtime version by default.
// This way, they can be shadowed by the const versions.
pub(crate) use {assert_approx_eq_rt as assert_approx_eq, assert_biteq_rt as assert_biteq};
pub(crate) use assert_eq;
}
// Also make the const version available for re-exports.
#[rustfmt::skip]
pub(crate) use assert_biteq_const;
pub(crate) use assert_approx_eq_const;
/// Generate float tests for all our float types, for compile-time and run-time behavior.
///
@ -84,6 +135,7 @@ mod const_asserts {
/// /* write tests here, using `Float` as the type */
/// }
/// }
/// ```
macro_rules! float_test {
(
name: $name:ident,
@ -101,6 +153,8 @@ macro_rules! float_test {
test<$fty:ident> $test:block
) => {
mod $name {
use super::*;
#[test]
$( $( #[$f16_meta] )+ )?
fn test_f16() {
@ -131,7 +185,14 @@ macro_rules! float_test {
$( $( #[$const_meta] )+ )?
mod const_ {
use $crate::floats::const_asserts::assert_eq;
#[allow(unused)]
use super::Approx;
// Shadow the runtime versions of the macro with const-compatible versions.
#[allow(unused)]
use $crate::floats::{
assert_approx_eq_const as assert_approx_eq,
assert_biteq_const as assert_biteq,
};
#[test]
$( $( #[$f16_const_meta] )+ )?
@ -196,29 +257,25 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).min(0.0), 0.0);
assert!((0.0 as Float).min(0.0).is_sign_positive());
assert_eq!((-0.0 as Float).min(-0.0), -0.0);
assert!((-0.0 as Float).min(-0.0).is_sign_negative());
assert_eq!((9.0 as Float).min(9.0), 9.0);
assert_eq!((-9.0 as Float).min(0.0), -9.0);
assert_eq!((0.0 as Float).min(9.0), 0.0);
assert!((0.0 as Float).min(9.0).is_sign_positive());
assert_eq!((-0.0 as Float).min(9.0), -0.0);
assert!((-0.0 as Float).min(9.0).is_sign_negative());
assert_eq!((-0.0 as Float).min(-9.0), -9.0);
assert_eq!(Float::INFINITY.min(9.0), 9.0);
assert_eq!((9.0 as Float).min(Float::INFINITY), 9.0);
assert_eq!(Float::INFINITY.min(-9.0), -9.0);
assert_eq!((-9.0 as Float).min(Float::INFINITY), -9.0);
assert_eq!(Float::NEG_INFINITY.min(9.0), Float::NEG_INFINITY);
assert_eq!((9.0 as Float).min(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert_eq!(Float::NEG_INFINITY.min(-9.0), Float::NEG_INFINITY);
assert_eq!((-9.0 as Float).min(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert_eq!(Float::NAN.min(9.0), 9.0);
assert_eq!(Float::NAN.min(-9.0), -9.0);
assert_eq!((9.0 as Float).min(Float::NAN), 9.0);
assert_eq!((-9.0 as Float).min(Float::NAN), -9.0);
assert_biteq!((0.0 as Float).min(0.0), 0.0);
assert_biteq!((-0.0 as Float).min(-0.0), -0.0);
assert_biteq!((9.0 as Float).min(9.0), 9.0);
assert_biteq!((-9.0 as Float).min(0.0), -9.0);
assert_biteq!((0.0 as Float).min(9.0), 0.0);
assert_biteq!((-0.0 as Float).min(9.0), -0.0);
assert_biteq!((-0.0 as Float).min(-9.0), -9.0);
assert_biteq!(Float::INFINITY.min(9.0), 9.0);
assert_biteq!((9.0 as Float).min(Float::INFINITY), 9.0);
assert_biteq!(Float::INFINITY.min(-9.0), -9.0);
assert_biteq!((-9.0 as Float).min(Float::INFINITY), -9.0);
assert_biteq!(Float::NEG_INFINITY.min(9.0), Float::NEG_INFINITY);
assert_biteq!((9.0 as Float).min(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert_biteq!(Float::NEG_INFINITY.min(-9.0), Float::NEG_INFINITY);
assert_biteq!((-9.0 as Float).min(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert_biteq!(Float::NAN.min(9.0), 9.0);
assert_biteq!(Float::NAN.min(-9.0), -9.0);
assert_biteq!((9.0 as Float).min(Float::NAN), 9.0);
assert_biteq!((-9.0 as Float).min(Float::NAN), -9.0);
assert!(Float::NAN.min(Float::NAN).is_nan());
}
}
@ -230,32 +287,26 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).max(0.0), 0.0);
assert!((0.0 as Float).max(0.0).is_sign_positive());
assert_eq!((-0.0 as Float).max(-0.0), -0.0);
assert!((-0.0 as Float).max(-0.0).is_sign_negative());
assert_eq!((9.0 as Float).max(9.0), 9.0);
assert_eq!((-9.0 as Float).max(0.0), 0.0);
assert!((-9.0 as Float).max(0.0).is_sign_positive());
assert_eq!((-9.0 as Float).max(-0.0), -0.0);
assert!((-9.0 as Float).max(-0.0).is_sign_negative());
assert_eq!((0.0 as Float).max(9.0), 9.0);
assert_eq!((0.0 as Float).max(-9.0), 0.0);
assert!((0.0 as Float).max(-9.0).is_sign_positive());
assert_eq!((-0.0 as Float).max(-9.0), -0.0);
assert!((-0.0 as Float).max(-9.0).is_sign_negative());
assert_eq!(Float::INFINITY.max(9.0), Float::INFINITY);
assert_eq!((9.0 as Float).max(Float::INFINITY), Float::INFINITY);
assert_eq!(Float::INFINITY.max(-9.0), Float::INFINITY);
assert_eq!((-9.0 as Float).max(Float::INFINITY), Float::INFINITY);
assert_eq!(Float::NEG_INFINITY.max(9.0), 9.0);
assert_eq!((9.0 as Float).max(Float::NEG_INFINITY), 9.0);
assert_eq!(Float::NEG_INFINITY.max(-9.0), -9.0);
assert_eq!((-9.0 as Float).max(Float::NEG_INFINITY), -9.0);
assert_eq!(Float::NAN.max(9.0), 9.0);
assert_eq!(Float::NAN.max(-9.0), -9.0);
assert_eq!((9.0 as Float).max(Float::NAN), 9.0);
assert_eq!((-9.0 as Float).max(Float::NAN), -9.0);
assert_biteq!((0.0 as Float).max(0.0), 0.0);
assert_biteq!((-0.0 as Float).max(-0.0), -0.0);
assert_biteq!((9.0 as Float).max(9.0), 9.0);
assert_biteq!((-9.0 as Float).max(0.0), 0.0);
assert_biteq!((-9.0 as Float).max(-0.0), -0.0);
assert_biteq!((0.0 as Float).max(9.0), 9.0);
assert_biteq!((0.0 as Float).max(-9.0), 0.0);
assert_biteq!((-0.0 as Float).max(-9.0), -0.0);
assert_biteq!(Float::INFINITY.max(9.0), Float::INFINITY);
assert_biteq!((9.0 as Float).max(Float::INFINITY), Float::INFINITY);
assert_biteq!(Float::INFINITY.max(-9.0), Float::INFINITY);
assert_biteq!((-9.0 as Float).max(Float::INFINITY), Float::INFINITY);
assert_biteq!(Float::NEG_INFINITY.max(9.0), 9.0);
assert_biteq!((9.0 as Float).max(Float::NEG_INFINITY), 9.0);
assert_biteq!(Float::NEG_INFINITY.max(-9.0), -9.0);
assert_biteq!((-9.0 as Float).max(Float::NEG_INFINITY), -9.0);
assert_biteq!(Float::NAN.max(9.0), 9.0);
assert_biteq!(Float::NAN.max(-9.0), -9.0);
assert_biteq!((9.0 as Float).max(Float::NAN), 9.0);
assert_biteq!((-9.0 as Float).max(Float::NAN), -9.0);
assert!(Float::NAN.max(Float::NAN).is_nan());
}
}
@ -267,27 +318,22 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).minimum(0.0), 0.0);
assert!((0.0 as Float).minimum(0.0).is_sign_positive());
assert_eq!((-0.0 as Float).minimum(0.0), -0.0);
assert!((-0.0 as Float).minimum(0.0).is_sign_negative());
assert_eq!((-0.0 as Float).minimum(-0.0), -0.0);
assert!((-0.0 as Float).minimum(-0.0).is_sign_negative());
assert_eq!((9.0 as Float).minimum(9.0), 9.0);
assert_eq!((-9.0 as Float).minimum(0.0), -9.0);
assert_eq!((0.0 as Float).minimum(9.0), 0.0);
assert!((0.0 as Float).minimum(9.0).is_sign_positive());
assert_eq!((-0.0 as Float).minimum(9.0), -0.0);
assert!((-0.0 as Float).minimum(9.0).is_sign_negative());
assert_eq!((-0.0 as Float).minimum(-9.0), -9.0);
assert_eq!(Float::INFINITY.minimum(9.0), 9.0);
assert_eq!((9.0 as Float).minimum(Float::INFINITY), 9.0);
assert_eq!(Float::INFINITY.minimum(-9.0), -9.0);
assert_eq!((-9.0 as Float).minimum(Float::INFINITY), -9.0);
assert_eq!(Float::NEG_INFINITY.minimum(9.0), Float::NEG_INFINITY);
assert_eq!((9.0 as Float).minimum(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert_eq!(Float::NEG_INFINITY.minimum(-9.0), Float::NEG_INFINITY);
assert_eq!((-9.0 as Float).minimum(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert_biteq!((0.0 as Float).minimum(0.0), 0.0);
assert_biteq!((-0.0 as Float).minimum(0.0), -0.0);
assert_biteq!((-0.0 as Float).minimum(-0.0), -0.0);
assert_biteq!((9.0 as Float).minimum(9.0), 9.0);
assert_biteq!((-9.0 as Float).minimum(0.0), -9.0);
assert_biteq!((0.0 as Float).minimum(9.0), 0.0);
assert_biteq!((-0.0 as Float).minimum(9.0), -0.0);
assert_biteq!((-0.0 as Float).minimum(-9.0), -9.0);
assert_biteq!(Float::INFINITY.minimum(9.0), 9.0);
assert_biteq!((9.0 as Float).minimum(Float::INFINITY), 9.0);
assert_biteq!(Float::INFINITY.minimum(-9.0), -9.0);
assert_biteq!((-9.0 as Float).minimum(Float::INFINITY), -9.0);
assert_biteq!(Float::NEG_INFINITY.minimum(9.0), Float::NEG_INFINITY);
assert_biteq!((9.0 as Float).minimum(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert_biteq!(Float::NEG_INFINITY.minimum(-9.0), Float::NEG_INFINITY);
assert_biteq!((-9.0 as Float).minimum(Float::NEG_INFINITY), Float::NEG_INFINITY);
assert!(Float::NAN.minimum(9.0).is_nan());
assert!(Float::NAN.minimum(-9.0).is_nan());
assert!((9.0 as Float).minimum(Float::NAN).is_nan());
@ -303,30 +349,23 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).maximum(0.0), 0.0);
assert!((0.0 as Float).maximum(0.0).is_sign_positive());
assert_eq!((-0.0 as Float).maximum(0.0), 0.0);
assert!((-0.0 as Float).maximum(0.0).is_sign_positive());
assert_eq!((-0.0 as Float).maximum(-0.0), -0.0);
assert!((-0.0 as Float).maximum(-0.0).is_sign_negative());
assert_eq!((9.0 as Float).maximum(9.0), 9.0);
assert_eq!((-9.0 as Float).maximum(0.0), 0.0);
assert!((-9.0 as Float).maximum(0.0).is_sign_positive());
assert_eq!((-9.0 as Float).maximum(-0.0), -0.0);
assert!((-9.0 as Float).maximum(-0.0).is_sign_negative());
assert_eq!((0.0 as Float).maximum(9.0), 9.0);
assert_eq!((0.0 as Float).maximum(-9.0), 0.0);
assert!((0.0 as Float).maximum(-9.0).is_sign_positive());
assert_eq!((-0.0 as Float).maximum(-9.0), -0.0);
assert!((-0.0 as Float).maximum(-9.0).is_sign_negative());
assert_eq!(Float::INFINITY.maximum(9.0), Float::INFINITY);
assert_eq!((9.0 as Float).maximum(Float::INFINITY), Float::INFINITY);
assert_eq!(Float::INFINITY.maximum(-9.0), Float::INFINITY);
assert_eq!((-9.0 as Float).maximum(Float::INFINITY), Float::INFINITY);
assert_eq!(Float::NEG_INFINITY.maximum(9.0), 9.0);
assert_eq!((9.0 as Float).maximum(Float::NEG_INFINITY), 9.0);
assert_eq!(Float::NEG_INFINITY.maximum(-9.0), -9.0);
assert_eq!((-9.0 as Float).maximum(Float::NEG_INFINITY), -9.0);
assert_biteq!((0.0 as Float).maximum(0.0), 0.0);
assert_biteq!((-0.0 as Float).maximum(0.0), 0.0);
assert_biteq!((-0.0 as Float).maximum(-0.0), -0.0);
assert_biteq!((9.0 as Float).maximum(9.0), 9.0);
assert_biteq!((-9.0 as Float).maximum(0.0), 0.0);
assert_biteq!((-9.0 as Float).maximum(-0.0), -0.0);
assert_biteq!((0.0 as Float).maximum(9.0), 9.0);
assert_biteq!((0.0 as Float).maximum(-9.0), 0.0);
assert_biteq!((-0.0 as Float).maximum(-9.0), -0.0);
assert_biteq!(Float::INFINITY.maximum(9.0), Float::INFINITY);
assert_biteq!((9.0 as Float).maximum(Float::INFINITY), Float::INFINITY);
assert_biteq!(Float::INFINITY.maximum(-9.0), Float::INFINITY);
assert_biteq!((-9.0 as Float).maximum(Float::INFINITY), Float::INFINITY);
assert_biteq!(Float::NEG_INFINITY.maximum(9.0), 9.0);
assert_biteq!((9.0 as Float).maximum(Float::NEG_INFINITY), 9.0);
assert_biteq!(Float::NEG_INFINITY.maximum(-9.0), -9.0);
assert_biteq!((-9.0 as Float).maximum(Float::NEG_INFINITY), -9.0);
assert!(Float::NAN.maximum(9.0).is_nan());
assert!(Float::NAN.maximum(-9.0).is_nan());
assert!((9.0 as Float).maximum(Float::NAN).is_nan());
@ -342,41 +381,43 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.5 as Float).midpoint(0.5), 0.5);
assert_eq!((0.5 as Float).midpoint(2.5), 1.5);
assert_eq!((3.0 as Float).midpoint(4.0), 3.5);
assert_eq!((-3.0 as Float).midpoint(4.0), 0.5);
assert_eq!((3.0 as Float).midpoint(-4.0), -0.5);
assert_eq!((-3.0 as Float).midpoint(-4.0), -3.5);
assert_eq!((0.0 as Float).midpoint(0.0), 0.0);
assert_eq!((-0.0 as Float).midpoint(-0.0), -0.0);
assert_eq!((-5.0 as Float).midpoint(5.0), 0.0);
assert_eq!(Float::MAX.midpoint(Float::MIN), 0.0);
assert_eq!(Float::MIN.midpoint(Float::MAX), -0.0);
assert_eq!(Float::MAX.midpoint(Float::MIN_POSITIVE), Float::MAX / 2.);
assert_eq!((-Float::MAX).midpoint(Float::MIN_POSITIVE), -Float::MAX / 2.);
assert_eq!(Float::MAX.midpoint(-Float::MIN_POSITIVE), Float::MAX / 2.);
assert_eq!((-Float::MAX).midpoint(-Float::MIN_POSITIVE), -Float::MAX / 2.);
assert_eq!((Float::MIN_POSITIVE).midpoint(Float::MAX), Float::MAX / 2.);
assert_eq!((Float::MIN_POSITIVE).midpoint(-Float::MAX), -Float::MAX / 2.);
assert_eq!((-Float::MIN_POSITIVE).midpoint(Float::MAX), Float::MAX / 2.);
assert_eq!((-Float::MIN_POSITIVE).midpoint(-Float::MAX), -Float::MAX / 2.);
assert_eq!(Float::MAX.midpoint(Float::MAX), Float::MAX);
assert_eq!(
assert_biteq!((0.5 as Float).midpoint(0.5), 0.5);
assert_biteq!((0.5 as Float).midpoint(2.5), 1.5);
assert_biteq!((3.0 as Float).midpoint(4.0), 3.5);
assert_biteq!((-3.0 as Float).midpoint(4.0), 0.5);
assert_biteq!((3.0 as Float).midpoint(-4.0), -0.5);
assert_biteq!((-3.0 as Float).midpoint(-4.0), -3.5);
assert_biteq!((0.0 as Float).midpoint(0.0), 0.0);
assert_biteq!((-0.0 as Float).midpoint(-0.0), -0.0);
assert_biteq!((-5.0 as Float).midpoint(5.0), 0.0);
assert_biteq!(Float::MAX.midpoint(Float::MIN), 0.0);
assert_biteq!(Float::MIN.midpoint(Float::MAX), 0.0);
assert_biteq!(Float::MAX.midpoint(Float::MIN_POSITIVE), Float::MAX / 2.);
assert_biteq!((-Float::MAX).midpoint(Float::MIN_POSITIVE), -Float::MAX / 2.);
assert_biteq!(Float::MAX.midpoint(-Float::MIN_POSITIVE), Float::MAX / 2.);
assert_biteq!((-Float::MAX).midpoint(-Float::MIN_POSITIVE), -Float::MAX / 2.);
assert_biteq!((Float::MIN_POSITIVE).midpoint(Float::MAX), Float::MAX / 2.);
assert_biteq!((Float::MIN_POSITIVE).midpoint(-Float::MAX), -Float::MAX / 2.);
assert_biteq!((-Float::MIN_POSITIVE).midpoint(Float::MAX), Float::MAX / 2.);
assert_biteq!((-Float::MIN_POSITIVE).midpoint(-Float::MAX), -Float::MAX / 2.);
assert_biteq!(Float::MAX.midpoint(Float::MAX), Float::MAX);
assert_biteq!(
(Float::MIN_POSITIVE).midpoint(Float::MIN_POSITIVE),
Float::MIN_POSITIVE
);
assert_eq!(
assert_biteq!(
(-Float::MIN_POSITIVE).midpoint(-Float::MIN_POSITIVE),
-Float::MIN_POSITIVE
);
assert_eq!(Float::MAX.midpoint(5.0), Float::MAX / 2.0 + 2.5);
assert_eq!(Float::MAX.midpoint(-5.0), Float::MAX / 2.0 - 2.5);
assert_eq!(Float::INFINITY.midpoint(Float::INFINITY), Float::INFINITY);
assert_eq!(
assert_biteq!(Float::MAX.midpoint(5.0), Float::MAX / 2.0 + 2.5);
assert_biteq!(Float::MAX.midpoint(-5.0), Float::MAX / 2.0 - 2.5);
assert_biteq!(Float::INFINITY.midpoint(Float::INFINITY), Float::INFINITY);
assert_biteq!(
Float::NEG_INFINITY.midpoint(Float::NEG_INFINITY),
Float::NEG_INFINITY
);
assert!(Float::NEG_INFINITY.midpoint(Float::INFINITY).is_nan());
assert!(Float::INFINITY.midpoint(Float::NEG_INFINITY).is_nan());
assert!(Float::NAN.midpoint(1.0).is_nan());
assert!((1.0 as Float).midpoint(Float::NAN).is_nan());
assert!(Float::NAN.midpoint(Float::NAN).is_nan());
@ -410,7 +451,7 @@ float_test! {
let naive = (large + small) / 2.0;
let midpoint = large.midpoint(small);
assert_eq!(naive, midpoint);
assert_biteq!(naive, midpoint);
}
}
}
@ -423,10 +464,10 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((-1.0 as Float).abs(), 1.0);
assert_eq!((1.0 as Float).abs(), 1.0);
assert_eq!(Float::NEG_INFINITY.abs(), Float::INFINITY);
assert_eq!(Float::INFINITY.abs(), Float::INFINITY);
assert_biteq!((-1.0 as Float).abs(), 1.0);
assert_biteq!((1.0 as Float).abs(), 1.0);
assert_biteq!(Float::NEG_INFINITY.abs(), Float::INFINITY);
assert_biteq!(Float::INFINITY.abs(), Float::INFINITY);
}
}
@ -437,10 +478,10 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((1.0 as Float).copysign(-2.0), -1.0);
assert_eq!((-1.0 as Float).copysign(2.0), 1.0);
assert_eq!(Float::INFINITY.copysign(-0.0), Float::NEG_INFINITY);
assert_eq!(Float::NEG_INFINITY.copysign(0.0), Float::INFINITY);
assert_biteq!((1.0 as Float).copysign(-2.0), -1.0);
assert_biteq!((-1.0 as Float).copysign(2.0), 1.0);
assert_biteq!(Float::INFINITY.copysign(-0.0), Float::NEG_INFINITY);
assert_biteq!(Float::NEG_INFINITY.copysign(0.0), Float::INFINITY);
}
}
@ -453,7 +494,7 @@ float_test! {
},
test<Float> {
assert!(Float::INFINITY.rem_euclid(42.0 as Float).is_nan());
assert_eq!((42.0 as Float).rem_euclid(Float::INFINITY), (42.0 as Float));
assert_biteq!((42.0 as Float).rem_euclid(Float::INFINITY), 42.0 as Float);
assert!((42.0 as Float).rem_euclid(Float::NAN).is_nan());
assert!(Float::INFINITY.rem_euclid(Float::INFINITY).is_nan());
assert!(Float::INFINITY.rem_euclid(Float::NAN).is_nan());
@ -469,7 +510,7 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((42.0 as Float).div_euclid(Float::INFINITY), 0.0);
assert_biteq!((42.0 as Float).div_euclid(Float::INFINITY), 0.0);
assert!((42.0 as Float).div_euclid(Float::NAN).is_nan());
assert!(Float::INFINITY.div_euclid(Float::INFINITY).is_nan());
assert!(Float::INFINITY.div_euclid(Float::NAN).is_nan());
@ -484,20 +525,25 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).floor(), 0.0);
assert!((0.0 as Float).floor().is_sign_positive());
assert_eq!((-0.0 as Float).floor(), -0.0);
assert!((-0.0 as Float).floor().is_sign_negative());
assert_eq!((0.5 as Float).floor(), 0.0);
assert_eq!((-0.5 as Float).floor(), -1.0);
assert_eq!((1.5 as Float).floor(), 1.0);
assert_eq!(Float::MAX.floor(), Float::MAX);
assert_eq!(Float::MIN.floor(), Float::MIN);
assert_eq!(Float::MIN_POSITIVE.floor(), 0.0);
assert_eq!((-Float::MIN_POSITIVE).floor(), -1.0);
assert_biteq!((1.0 as Float).floor(), 1.0);
assert_biteq!((1.3 as Float).floor(), 1.0);
assert_biteq!((1.5 as Float).floor(), 1.0);
assert_biteq!((1.7 as Float).floor(), 1.0);
assert_biteq!((0.5 as Float).floor(), 0.0);
assert_biteq!((0.0 as Float).floor(), 0.0);
assert_biteq!((-0.0 as Float).floor(), -0.0);
assert_biteq!((-0.5 as Float).floor(), -1.0);
assert_biteq!((-1.0 as Float).floor(), -1.0);
assert_biteq!((-1.3 as Float).floor(), -2.0);
assert_biteq!((-1.5 as Float).floor(), -2.0);
assert_biteq!((-1.7 as Float).floor(), -2.0);
assert_biteq!(Float::MAX.floor(), Float::MAX);
assert_biteq!(Float::MIN.floor(), Float::MIN);
assert_biteq!(Float::MIN_POSITIVE.floor(), 0.0);
assert_biteq!((-Float::MIN_POSITIVE).floor(), -1.0);
assert!(Float::NAN.floor().is_nan());
assert_eq!(Float::INFINITY.floor(), Float::INFINITY);
assert_eq!(Float::NEG_INFINITY.floor(), Float::NEG_INFINITY);
assert_biteq!(Float::INFINITY.floor(), Float::INFINITY);
assert_biteq!(Float::NEG_INFINITY.floor(), Float::NEG_INFINITY);
}
}
@ -508,19 +554,25 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).ceil(), 0.0);
assert!((0.0 as Float).ceil().is_sign_positive());
assert_eq!((-0.0 as Float).ceil(), 0.0);
assert!((-0.0 as Float).ceil().is_sign_negative());
assert_eq!((0.5 as Float).ceil(), 1.0);
assert_eq!((-0.5 as Float).ceil(), 0.0);
assert_eq!(Float::MAX.ceil(), Float::MAX);
assert_eq!(Float::MIN.ceil(), Float::MIN);
assert_eq!(Float::MIN_POSITIVE.ceil(), 1.0);
assert_eq!((-Float::MIN_POSITIVE).ceil(), 0.0);
assert_biteq!((1.0 as Float).ceil(), 1.0);
assert_biteq!((1.3 as Float).ceil(), 2.0);
assert_biteq!((1.5 as Float).ceil(), 2.0);
assert_biteq!((1.7 as Float).ceil(), 2.0);
assert_biteq!((0.5 as Float).ceil(), 1.0);
assert_biteq!((0.0 as Float).ceil(), 0.0);
assert_biteq!((-0.0 as Float).ceil(), -0.0);
assert_biteq!((-0.5 as Float).ceil(), -0.0);
assert_biteq!((-1.0 as Float).ceil(), -1.0);
assert_biteq!((-1.3 as Float).ceil(), -1.0);
assert_biteq!((-1.5 as Float).ceil(), -1.0);
assert_biteq!((-1.7 as Float).ceil(), -1.0);
assert_biteq!(Float::MAX.ceil(), Float::MAX);
assert_biteq!(Float::MIN.ceil(), Float::MIN);
assert_biteq!(Float::MIN_POSITIVE.ceil(), 1.0);
assert_biteq!((-Float::MIN_POSITIVE).ceil(), -0.0);
assert!(Float::NAN.ceil().is_nan());
assert_eq!(Float::INFINITY.ceil(), Float::INFINITY);
assert_eq!(Float::NEG_INFINITY.ceil(), Float::NEG_INFINITY);
assert_biteq!(Float::INFINITY.ceil(), Float::INFINITY);
assert_biteq!(Float::NEG_INFINITY.ceil(), Float::NEG_INFINITY);
}
}
@ -531,19 +583,26 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).round(), 0.0);
assert!((0.0 as Float).round().is_sign_positive());
assert_eq!((-0.0 as Float).round(), -0.0);
assert!((-0.0 as Float).round().is_sign_negative());
assert_eq!((0.5 as Float).round(), 1.0);
assert_eq!((-0.5 as Float).round(), -1.0);
assert_eq!(Float::MAX.round(), Float::MAX);
assert_eq!(Float::MIN.round(), Float::MIN);
assert_eq!(Float::MIN_POSITIVE.round(), 0.0);
assert_eq!((-Float::MIN_POSITIVE).round(), 0.0);
assert_biteq!((2.5 as Float).round(), 3.0);
assert_biteq!((1.0 as Float).round(), 1.0);
assert_biteq!((1.3 as Float).round(), 1.0);
assert_biteq!((1.5 as Float).round(), 2.0);
assert_biteq!((1.7 as Float).round(), 2.0);
assert_biteq!((0.5 as Float).round(), 1.0);
assert_biteq!((0.0 as Float).round(), 0.0);
assert_biteq!((-0.0 as Float).round(), -0.0);
assert_biteq!((-0.5 as Float).round(), -1.0);
assert_biteq!((-1.0 as Float).round(), -1.0);
assert_biteq!((-1.3 as Float).round(), -1.0);
assert_biteq!((-1.5 as Float).round(), -2.0);
assert_biteq!((-1.7 as Float).round(), -2.0);
assert_biteq!(Float::MAX.round(), Float::MAX);
assert_biteq!(Float::MIN.round(), Float::MIN);
assert_biteq!(Float::MIN_POSITIVE.round(), 0.0);
assert_biteq!((-Float::MIN_POSITIVE).round(), -0.0);
assert!(Float::NAN.round().is_nan());
assert_eq!(Float::INFINITY.round(), Float::INFINITY);
assert_eq!(Float::NEG_INFINITY.round(), Float::NEG_INFINITY);
assert_biteq!(Float::INFINITY.round(), Float::INFINITY);
assert_biteq!(Float::NEG_INFINITY.round(), Float::NEG_INFINITY);
}
}
@ -554,21 +613,26 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).round_ties_even(), 0.0);
assert!((0.0 as Float).round_ties_even().is_sign_positive());
assert_eq!((-0.0 as Float).round_ties_even(), -0.0);
assert!((-0.0 as Float).round_ties_even().is_sign_negative());
assert_eq!((0.5 as Float).round_ties_even(), 0.0);
assert!((0.5 as Float).round_ties_even().is_sign_positive());
assert_eq!((-0.5 as Float).round_ties_even(), -0.0);
assert!((-0.5 as Float).round_ties_even().is_sign_negative());
assert_eq!(Float::MAX.round_ties_even(), Float::MAX);
assert_eq!(Float::MIN.round_ties_even(), Float::MIN);
assert_eq!(Float::MIN_POSITIVE.round_ties_even(), 0.0);
assert_eq!((-Float::MIN_POSITIVE).round_ties_even(), 0.0);
assert_biteq!((2.5 as Float).round_ties_even(), 2.0);
assert_biteq!((1.0 as Float).round_ties_even(), 1.0);
assert_biteq!((1.3 as Float).round_ties_even(), 1.0);
assert_biteq!((1.5 as Float).round_ties_even(), 2.0);
assert_biteq!((1.7 as Float).round_ties_even(), 2.0);
assert_biteq!((0.5 as Float).round_ties_even(), 0.0);
assert_biteq!((0.0 as Float).round_ties_even(), 0.0);
assert_biteq!((-0.0 as Float).round_ties_even(), -0.0);
assert_biteq!((-0.5 as Float).round_ties_even(), -0.0);
assert_biteq!((-1.0 as Float).round_ties_even(), -1.0);
assert_biteq!((-1.3 as Float).round_ties_even(), -1.0);
assert_biteq!((-1.5 as Float).round_ties_even(), -2.0);
assert_biteq!((-1.7 as Float).round_ties_even(), -2.0);
assert_biteq!(Float::MAX.round_ties_even(), Float::MAX);
assert_biteq!(Float::MIN.round_ties_even(), Float::MIN);
assert_biteq!(Float::MIN_POSITIVE.round_ties_even(), 0.0);
assert_biteq!((-Float::MIN_POSITIVE).round_ties_even(), -0.0);
assert!(Float::NAN.round_ties_even().is_nan());
assert_eq!(Float::INFINITY.round_ties_even(), Float::INFINITY);
assert_eq!(Float::NEG_INFINITY.round_ties_even(), Float::NEG_INFINITY);
assert_biteq!(Float::INFINITY.round_ties_even(), Float::INFINITY);
assert_biteq!(Float::NEG_INFINITY.round_ties_even(), Float::NEG_INFINITY);
}
}
@ -579,21 +643,25 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).trunc(), 0.0);
assert!((0.0 as Float).trunc().is_sign_positive());
assert_eq!((-0.0 as Float).trunc(), -0.0);
assert!((-0.0 as Float).trunc().is_sign_negative());
assert_eq!((0.5 as Float).trunc(), 0.0);
assert!((0.5 as Float).trunc().is_sign_positive());
assert_eq!((-0.5 as Float).trunc(), -0.0);
assert!((-0.5 as Float).trunc().is_sign_negative());
assert_eq!(Float::MAX.trunc(), Float::MAX);
assert_eq!(Float::MIN.trunc(), Float::MIN);
assert_eq!(Float::MIN_POSITIVE.trunc(), 0.0);
assert_eq!((-Float::MIN_POSITIVE).trunc(), 0.0);
assert_biteq!((1.0 as Float).trunc(), 1.0);
assert_biteq!((1.3 as Float).trunc(), 1.0);
assert_biteq!((1.5 as Float).trunc(), 1.0);
assert_biteq!((1.7 as Float).trunc(), 1.0);
assert_biteq!((0.5 as Float).trunc(), 0.0);
assert_biteq!((0.0 as Float).trunc(), 0.0);
assert_biteq!((-0.0 as Float).trunc(), -0.0);
assert_biteq!((-0.5 as Float).trunc(), -0.0);
assert_biteq!((-1.0 as Float).trunc(), -1.0);
assert_biteq!((-1.3 as Float).trunc(), -1.0);
assert_biteq!((-1.5 as Float).trunc(), -1.0);
assert_biteq!((-1.7 as Float).trunc(), -1.0);
assert_biteq!(Float::MAX.trunc(), Float::MAX);
assert_biteq!(Float::MIN.trunc(), Float::MIN);
assert_biteq!(Float::MIN_POSITIVE.trunc(), 0.0);
assert_biteq!((-Float::MIN_POSITIVE).trunc(), -0.0);
assert!(Float::NAN.trunc().is_nan());
assert_eq!(Float::INFINITY.trunc(), Float::INFINITY);
assert_eq!(Float::NEG_INFINITY.trunc(), Float::NEG_INFINITY);
assert_biteq!(Float::INFINITY.trunc(), Float::INFINITY);
assert_biteq!(Float::NEG_INFINITY.trunc(), Float::NEG_INFINITY);
}
}
@ -604,19 +672,23 @@ float_test! {
f128: #[cfg(any(miri, target_has_reliable_f128_math))],
},
test<Float> {
assert_eq!((0.0 as Float).fract(), 0.0);
assert!((0.0 as Float).fract().is_sign_positive());
assert_eq!((-0.0 as Float).fract(), 0.0);
assert!((-0.0 as Float).fract().is_sign_positive());
assert_eq!((0.5 as Float).fract(), 0.5);
assert!((0.5 as Float).fract().is_sign_positive());
assert_eq!((-0.5 as Float).fract(), -0.5);
assert!((-0.5 as Float).fract().is_sign_negative());
assert_eq!(Float::MAX.fract(), 0.0);
assert_eq!(Float::MIN.fract(), 0.0);
assert_eq!(Float::MIN_POSITIVE.fract(), Float::MIN_POSITIVE);
assert_biteq!((1.0 as Float).fract(), 0.0);
assert_approx_eq!((1.3 as Float).fract(), 0.3); // rounding differs between float types
assert_biteq!((1.5 as Float).fract(), 0.5);
assert_approx_eq!((1.7 as Float).fract(), 0.7);
assert_biteq!((0.5 as Float).fract(), 0.5);
assert_biteq!((0.0 as Float).fract(), 0.0);
assert_biteq!((-0.0 as Float).fract(), 0.0);
assert_biteq!((-0.5 as Float).fract(), -0.5);
assert_biteq!((-1.0 as Float).fract(), 0.0);
assert_approx_eq!((-1.3 as Float).fract(), -0.3); // rounding differs between float types
assert_biteq!((-1.5 as Float).fract(), -0.5);
assert_approx_eq!((-1.7 as Float).fract(), -0.7);
assert_biteq!(Float::MAX.fract(), 0.0);
assert_biteq!(Float::MIN.fract(), 0.0);
assert_biteq!(Float::MIN_POSITIVE.fract(), Float::MIN_POSITIVE);
assert!(Float::MIN_POSITIVE.fract().is_sign_positive());
assert_eq!((-Float::MIN_POSITIVE).fract(), -Float::MIN_POSITIVE);
assert_biteq!((-Float::MIN_POSITIVE).fract(), -Float::MIN_POSITIVE);
assert!((-Float::MIN_POSITIVE).fract().is_sign_negative());
assert!(Float::NAN.fract().is_nan());
assert!(Float::INFINITY.fract().is_nan());

View file

@ -653,7 +653,6 @@ fn thin_box() {
// if `{size,align}_of_for_meta<T: ?Sized>(T::Metadata)` are added.
// * Constructing a `ThinBox` without consuming and deallocating a `Box`
// requires either the unstable `Unsize` marker trait,
// or the unstable `unsized_locals` language feature,
// or taking `&dyn T` and restricting to `T: Copy`.
use std::alloc::*;

View file

@ -1,3 +1,4 @@
use crate::bstr::ByteStr;
use crate::ffi::OsStr;
#[cfg(any(doc, target_os = "android", target_os = "linux"))]
use crate::os::net::linux_ext;
@ -61,7 +62,7 @@ pub(super) fn sockaddr_un(path: &Path) -> io::Result<(libc::sockaddr_un, libc::s
enum AddressKind<'a> {
Unnamed,
Pathname(&'a Path),
Abstract(&'a [u8]),
Abstract(&'a ByteStr),
}
/// An address associated with a Unix socket.
@ -245,7 +246,7 @@ impl SocketAddr {
{
AddressKind::Unnamed
} else if self.addr.sun_path[0] == 0 {
AddressKind::Abstract(&path[1..len])
AddressKind::Abstract(ByteStr::from_bytes(&path[1..len]))
} else {
AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref())
}
@ -260,7 +261,7 @@ impl Sealed for SocketAddr {}
#[stable(feature = "unix_socket_abstract", since = "1.70.0")]
impl linux_ext::addr::SocketAddrExt for SocketAddr {
fn as_abstract_name(&self) -> Option<&[u8]> {
if let AddressKind::Abstract(name) = self.address() { Some(name) } else { None }
if let AddressKind::Abstract(name) = self.address() { Some(name.as_bytes()) } else { None }
}
fn from_abstract_name<N>(name: N) -> crate::io::Result<Self>
@ -295,7 +296,7 @@ impl fmt::Debug for SocketAddr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.address() {
AddressKind::Unnamed => write!(fmt, "(unnamed)"),
AddressKind::Abstract(name) => write!(fmt, "\"{}\" (abstract)", name.escape_ascii()),
AddressKind::Abstract(name) => write!(fmt, "{name:?} (abstract)"),
AddressKind::Pathname(path) => write!(fmt, "{path:?} (pathname)"),
}
}

View file

@ -411,6 +411,15 @@ fn test_unix_datagram_timeout_zero_duration() {
assert_eq!(err.kind(), ErrorKind::InvalidInput);
}
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
fn abstract_socket_addr_debug() {
assert_eq!(
r#""\0hello world\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x11\x12\r\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \x7f\x80\x81\xfe\xff" (abstract)"#,
format!("{:?}", SocketAddr::from_abstract_name(b"\0hello world\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x11\x12\r\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \x7f\x80\x81\xfe\xff").unwrap()),
);
}
#[test]
fn abstract_namespace_not_allowed_connect() {
assert!(UnixStream::connect("\0asdf").is_err());

View file

@ -156,8 +156,8 @@ a new unstable feature:
[`incomplete_features` lint]: https://doc.rust-lang.org/rustc/lints/listing/warn-by-default.html#incomplete-features
```rust ignore
/// Allows unsized rvalues at arguments and parameters.
(incomplete, unsized_locals, "CURRENT_RUSTC_VERSION", Some(48055), None),
/// Allows deref patterns.
(incomplete, deref_patterns, "CURRENT_RUSTC_VERSION", Some(87121), None),
```
To avoid [semantic merge conflicts], please use `CURRENT_RUSTC_VERSION` instead of `1.70` or

View file

@ -581,7 +581,9 @@ For this rust code:
```rust
/// ```
/// #![allow(dead_code)]
/// let x = 12;
/// Ok(())
/// ```
pub trait Trait {}
```
@ -590,10 +592,10 @@ The generated output (formatted) will look like this:
```json
{
"format_version": 1,
"format_version": 2,
"doctests": [
{
"file": "foo.rs",
"file": "src/lib.rs",
"line": 1,
"doctest_attributes": {
"original": "",
@ -609,9 +611,17 @@ The generated output (formatted) will look like this:
"added_css_classes": [],
"unknown": []
},
"original_code": "let x = 12;",
"doctest_code": "#![allow(unused)]\nfn main() {\nlet x = 12;\n}",
"name": "foo.rs - Trait (line 1)"
"original_code": "#![allow(dead_code)]\nlet x = 12;\nOk(())",
"doctest_code": {
"crate_level": "#![allow(unused)]\n#![allow(dead_code)]\n\n",
"code": "let x = 12;\nOk(())",
"wrapper": {
"before": "fn main() { fn _inner() -> core::result::Result<(), impl core::fmt::Debug> {\n",
"after": "\n} _inner().unwrap() }",
"returns_result": true
}
},
"name": "src/lib.rs - (line 1)"
}
]
}
@ -624,6 +634,10 @@ The generated output (formatted) will look like this:
* `doctest_attributes` contains computed information about the attributes used on the doctests. For more information about doctest attributes, take a look [here](write-documentation/documentation-tests.html#attributes).
* `original_code` is the code as written in the source code before rustdoc modifies it.
* `doctest_code` is the code modified by rustdoc that will be run. If there is a fatal syntax error, this field will not be present.
* `crate_level` is the crate level code (like attributes or `extern crate`) that will be added at the top-level of the generated doctest.
* `code` is "naked" doctest without anything from `crate_level` and `wrapper` content.
* `wrapper` contains extra code that will be added before and after `code`.
* `returns_result` is a boolean. If `true`, it means that the doctest returns a `Result` type.
* `name` is the name generated by rustdoc which represents this doctest.
### html

View file

@ -1,175 +0,0 @@
# `unsized_locals`
The tracking issue for this feature is: [#48055]
[#48055]: https://github.com/rust-lang/rust/issues/48055
------------------------
This implements [RFC1909]. When turned on, you can have unsized arguments and locals:
[RFC1909]: https://github.com/rust-lang/rfcs/blob/master/text/1909-unsized-rvalues.md
```rust
#![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::any::Any;
fn main() {
let x: Box<dyn Any> = Box::new(42);
let x: dyn Any = *x;
// ^ unsized local variable
// ^^ unsized temporary
foo(x);
}
fn foo(_: dyn Any) {}
// ^^^^^^ unsized argument
```
The RFC still forbids the following unsized expressions:
```rust,compile_fail
#![feature(unsized_locals)]
use std::any::Any;
struct MyStruct<T: ?Sized> {
content: T,
}
struct MyTupleStruct<T: ?Sized>(T);
fn answer() -> Box<dyn Any> {
Box::new(42)
}
fn main() {
// You CANNOT have unsized statics.
static X: dyn Any = *answer(); // ERROR
const Y: dyn Any = *answer(); // ERROR
// You CANNOT have struct initialized unsized.
MyStruct { content: *answer() }; // ERROR
MyTupleStruct(*answer()); // ERROR
(42, *answer()); // ERROR
// You CANNOT have unsized return types.
fn my_function() -> dyn Any { *answer() } // ERROR
// You CAN have unsized local variables...
let mut x: dyn Any = *answer(); // OK
// ...but you CANNOT reassign to them.
x = *answer(); // ERROR
// You CANNOT even initialize them separately.
let y: dyn Any; // OK
y = *answer(); // ERROR
// Not mentioned in the RFC, but by-move captured variables are also Sized.
let x: dyn Any = *answer();
(move || { // ERROR
let y = x;
})();
// You CAN create a closure with unsized arguments,
// but you CANNOT call it.
// This is an implementation detail and may be changed in the future.
let f = |x: dyn Any| {};
f(*answer()); // ERROR
}
```
## By-value trait objects
With this feature, you can have by-value `self` arguments without `Self: Sized` bounds.
```rust
#![feature(unsized_fn_params)]
trait Foo {
fn foo(self) {}
}
impl<T: ?Sized> Foo for T {}
fn main() {
let slice: Box<[i32]> = Box::new([1, 2, 3]);
<[i32] as Foo>::foo(*slice);
}
```
And `Foo` will also be object-safe.
```rust
#![feature(unsized_fn_params)]
trait Foo {
fn foo(self) {}
}
impl<T: ?Sized> Foo for T {}
fn main () {
let slice: Box<dyn Foo> = Box::new([1, 2, 3]);
// doesn't compile yet
<dyn Foo as Foo>::foo(*slice);
}
```
One of the objectives of this feature is to allow `Box<dyn FnOnce>`.
## Variable length arrays
The RFC also describes an extension to the array literal syntax: `[e; dyn n]`. In the syntax, `n` isn't necessarily a constant expression. The array is dynamically allocated on the stack and has the type of `[T]`, instead of `[T; n]`.
```rust,ignore (not-yet-implemented)
#![feature(unsized_locals)]
fn mergesort<T: Ord>(a: &mut [T]) {
let mut tmp = [T; dyn a.len()];
// ...
}
fn main() {
let mut a = [3, 1, 5, 6];
mergesort(&mut a);
assert_eq!(a, [1, 3, 5, 6]);
}
```
VLAs are not implemented yet. The syntax isn't final, either. We may need an alternative syntax for Rust 2015 because, in Rust 2015, expressions like `[e; dyn(1)]` would be ambiguous. One possible alternative proposed in the RFC is `[e; n]`: if `n` captures one or more local variables, then it is considered as `[e; dyn n]`.
## Advisory on stack usage
It's advised not to casually use the `#![feature(unsized_locals)]` feature. Typical use-cases are:
- When you need a by-value trait objects.
- When you really need a fast allocation of small temporary arrays.
Another pitfall is repetitive allocation and temporaries. Currently the compiler simply extends the stack frame every time it encounters an unsized assignment. So for example, the code
```rust
#![feature(unsized_locals)]
fn main() {
let x: Box<[i32]> = Box::new([1, 2, 3, 4, 5]);
let _x = {{{{{{{{{{*x}}}}}}}}}};
}
```
and the code
```rust
#![feature(unsized_locals)]
fn main() {
for _ in 0..10 {
let x: Box<[i32]> = Box::new([1, 2, 3, 4, 5]);
let _x = *x;
}
}
```
will unnecessarily extend the stack frame.

View file

@ -1053,14 +1053,14 @@ fn doctest_run_fn(
let report_unused_externs = |uext| {
unused_externs.lock().unwrap().push(uext);
};
let (full_test_code, full_test_line_offset) = doctest.generate_unique_doctest(
let (wrapped, full_test_line_offset) = doctest.generate_unique_doctest(
&scraped_test.text,
scraped_test.langstr.test_harness,
&global_opts,
Some(&global_opts.crate_name),
);
let runnable_test = RunnableDocTest {
full_test_code,
full_test_code: wrapped.to_string(),
full_test_line_offset,
test_opts,
global_opts,

View file

@ -3,8 +3,10 @@
//! This module contains the logic to extract doctests and output a JSON containing this
//! information.
use rustc_span::edition::Edition;
use serde::Serialize;
use super::make::DocTestWrapResult;
use super::{BuildDocTestBuilder, ScrapedDocTest};
use crate::config::Options as RustdocOptions;
use crate::html::markdown;
@ -14,7 +16,7 @@ use crate::html::markdown;
/// This integer is incremented with every breaking change to the API,
/// and is returned along with the JSON blob into the `format_version` root field.
/// Consuming code should assert that this value matches the format version(s) that it supports.
const FORMAT_VERSION: u32 = 1;
const FORMAT_VERSION: u32 = 2;
#[derive(Serialize)]
pub(crate) struct ExtractedDocTests {
@ -34,7 +36,16 @@ impl ExtractedDocTests {
options: &RustdocOptions,
) {
let edition = scraped_test.edition(options);
self.add_test_with_edition(scraped_test, opts, edition)
}
/// This method is used by unit tests to not have to provide a `RustdocOptions`.
pub(crate) fn add_test_with_edition(
&mut self,
scraped_test: ScrapedDocTest,
opts: &super::GlobalTestOptions,
edition: Edition,
) {
let ScrapedDocTest { filename, line, langstr, text, name, global_crate_attrs, .. } =
scraped_test;
@ -44,8 +55,7 @@ impl ExtractedDocTests {
.edition(edition)
.lang_str(&langstr)
.build(None);
let (full_test_code, size) = doctest.generate_unique_doctest(
let (wrapped, _size) = doctest.generate_unique_doctest(
&text,
langstr.test_harness,
opts,
@ -55,11 +65,46 @@ impl ExtractedDocTests {
file: filename.prefer_remapped_unconditionaly().to_string(),
line,
doctest_attributes: langstr.into(),
doctest_code: if size != 0 { Some(full_test_code) } else { None },
doctest_code: match wrapped {
DocTestWrapResult::Valid { crate_level_code, wrapper, code } => Some(DocTest {
crate_level: crate_level_code,
code,
wrapper: wrapper.map(
|super::make::WrapperInfo { before, after, returns_result, .. }| {
WrapperInfo { before, after, returns_result }
},
),
}),
DocTestWrapResult::SyntaxError { .. } => None,
},
original_code: text,
name,
});
}
#[cfg(test)]
pub(crate) fn doctests(&self) -> &[ExtractedDocTest] {
&self.doctests
}
}
#[derive(Serialize)]
pub(crate) struct WrapperInfo {
before: String,
after: String,
returns_result: bool,
}
#[derive(Serialize)]
pub(crate) struct DocTest {
crate_level: String,
code: String,
/// This field can be `None` if one of the following conditions is true:
///
/// * The doctest's codeblock has the `test_harness` attribute.
/// * The doctest has a `main` function.
/// * The doctest has the `![no_std]` attribute.
pub(crate) wrapper: Option<WrapperInfo>,
}
#[derive(Serialize)]
@ -69,7 +114,7 @@ pub(crate) struct ExtractedDocTest {
doctest_attributes: LangString,
original_code: String,
/// `None` if the code syntax is invalid.
doctest_code: Option<String>,
pub(crate) doctest_code: Option<DocTest>,
name: String,
}

View file

@ -196,6 +196,80 @@ pub(crate) struct DocTestBuilder {
pub(crate) can_be_merged: bool,
}
/// Contains needed information for doctest to be correctly generated with expected "wrapping".
pub(crate) struct WrapperInfo {
pub(crate) before: String,
pub(crate) after: String,
pub(crate) returns_result: bool,
insert_indent_space: bool,
}
impl WrapperInfo {
fn len(&self) -> usize {
self.before.len() + self.after.len()
}
}
/// Contains a doctest information. Can be converted into code with the `to_string()` method.
pub(crate) enum DocTestWrapResult {
Valid {
crate_level_code: String,
/// This field can be `None` if one of the following conditions is true:
///
/// * The doctest's codeblock has the `test_harness` attribute.
/// * The doctest has a `main` function.
/// * The doctest has the `![no_std]` attribute.
wrapper: Option<WrapperInfo>,
/// Contains the doctest processed code without the wrappers (which are stored in the
/// `wrapper` field).
code: String,
},
/// Contains the original source code.
SyntaxError(String),
}
impl std::string::ToString for DocTestWrapResult {
fn to_string(&self) -> String {
match self {
Self::SyntaxError(s) => s.clone(),
Self::Valid { crate_level_code, wrapper, code } => {
let mut prog_len = code.len() + crate_level_code.len();
if let Some(wrapper) = wrapper {
prog_len += wrapper.len();
if wrapper.insert_indent_space {
prog_len += code.lines().count() * 4;
}
}
let mut prog = String::with_capacity(prog_len);
prog.push_str(crate_level_code);
if let Some(wrapper) = wrapper {
prog.push_str(&wrapper.before);
// add extra 4 spaces for each line to offset the code block
if wrapper.insert_indent_space {
write!(
prog,
"{}",
fmt::from_fn(|f| code
.lines()
.map(|line| fmt::from_fn(move |f| write!(f, " {line}")))
.joined("\n", f))
)
.unwrap();
} else {
prog.push_str(code);
}
prog.push_str(&wrapper.after);
} else {
prog.push_str(code);
}
prog
}
}
}
}
impl DocTestBuilder {
fn invalid(
global_crate_attrs: Vec<String>,
@ -228,50 +302,49 @@ impl DocTestBuilder {
dont_insert_main: bool,
opts: &GlobalTestOptions,
crate_name: Option<&str>,
) -> (String, usize) {
) -> (DocTestWrapResult, usize) {
if self.invalid_ast {
// If the AST failed to compile, no need to go generate a complete doctest, the error
// will be better this way.
debug!("invalid AST:\n{test_code}");
return (test_code.to_string(), 0);
return (DocTestWrapResult::SyntaxError(test_code.to_string()), 0);
}
let mut line_offset = 0;
let mut prog = String::new();
let everything_else = self.everything_else.trim();
let mut crate_level_code = String::new();
let processed_code = self.everything_else.trim();
if self.global_crate_attrs.is_empty() {
// If there aren't any attributes supplied by #![doc(test(attr(...)))], then allow some
// lints that are commonly triggered in doctests. The crate-level test attributes are
// commonly used to make tests fail in case they trigger warnings, so having this there in
// that case may cause some tests to pass when they shouldn't have.
prog.push_str("#![allow(unused)]\n");
crate_level_code.push_str("#![allow(unused)]\n");
line_offset += 1;
}
// Next, any attributes that came from #![doc(test(attr(...)))].
for attr in &self.global_crate_attrs {
prog.push_str(&format!("#![{attr}]\n"));
crate_level_code.push_str(&format!("#![{attr}]\n"));
line_offset += 1;
}
// Now push any outer attributes from the example, assuming they
// are intended to be crate attributes.
if !self.crate_attrs.is_empty() {
prog.push_str(&self.crate_attrs);
crate_level_code.push_str(&self.crate_attrs);
if !self.crate_attrs.ends_with('\n') {
prog.push('\n');
crate_level_code.push('\n');
}
}
if !self.maybe_crate_attrs.is_empty() {
prog.push_str(&self.maybe_crate_attrs);
crate_level_code.push_str(&self.maybe_crate_attrs);
if !self.maybe_crate_attrs.ends_with('\n') {
prog.push('\n');
crate_level_code.push('\n');
}
}
if !self.crates.is_empty() {
prog.push_str(&self.crates);
crate_level_code.push_str(&self.crates);
if !self.crates.ends_with('\n') {
prog.push('\n');
crate_level_code.push('\n');
}
}
@ -289,17 +362,20 @@ impl DocTestBuilder {
{
// rustdoc implicitly inserts an `extern crate` item for the own crate
// which may be unused, so we need to allow the lint.
prog.push_str("#[allow(unused_extern_crates)]\n");
crate_level_code.push_str("#[allow(unused_extern_crates)]\n");
prog.push_str(&format!("extern crate r#{crate_name};\n"));
crate_level_code.push_str(&format!("extern crate r#{crate_name};\n"));
line_offset += 1;
}
// FIXME: This code cannot yet handle no_std test cases yet
if dont_insert_main || self.has_main_fn || prog.contains("![no_std]") {
prog.push_str(everything_else);
let wrapper = if dont_insert_main
|| self.has_main_fn
|| crate_level_code.contains("![no_std]")
{
None
} else {
let returns_result = everything_else.ends_with("(())");
let returns_result = processed_code.ends_with("(())");
// Give each doctest main function a unique name.
// This is for example needed for the tooling around `-C instrument-coverage`.
let inner_fn_name = if let Some(ref test_id) = self.test_id {
@ -333,28 +409,22 @@ impl DocTestBuilder {
// /// ``` <- end of the inner main
line_offset += 1;
prog.push_str(&main_pre);
Some(WrapperInfo {
before: main_pre,
after: main_post,
returns_result,
insert_indent_space: opts.insert_indent_space,
})
};
// add extra 4 spaces for each line to offset the code block
if opts.insert_indent_space {
write!(
prog,
"{}",
fmt::from_fn(|f| everything_else
.lines()
.map(|line| fmt::from_fn(move |f| write!(f, " {line}")))
.joined("\n", f))
)
.unwrap();
} else {
prog.push_str(everything_else);
};
prog.push_str(&main_post);
}
debug!("final doctest:\n{prog}");
(prog, line_offset)
(
DocTestWrapResult::Valid {
code: processed_code.to_string(),
wrapper,
crate_level_code,
},
line_offset,
)
}
}

View file

@ -1,6 +1,11 @@
use std::path::PathBuf;
use super::{BuildDocTestBuilder, GlobalTestOptions};
use rustc_span::edition::Edition;
use rustc_span::{DUMMY_SP, FileName};
use super::extracted::ExtractedDocTests;
use super::{BuildDocTestBuilder, GlobalTestOptions, ScrapedDocTest};
use crate::html::markdown::LangString;
fn make_test(
test_code: &str,
@ -19,9 +24,9 @@ fn make_test(
builder = builder.test_id(test_id.to_string());
}
let doctest = builder.build(None);
let (code, line_offset) =
let (wrapped, line_offset) =
doctest.generate_unique_doctest(test_code, dont_insert_main, opts, crate_name);
(code, line_offset)
(wrapped.to_string(), line_offset)
}
/// Default [`GlobalTestOptions`] for these unit tests.
@ -461,3 +466,51 @@ pub mod outer_module {
let (output, len) = make_test(input, None, false, &opts, Vec::new(), None);
assert_eq!((output, len), (expected, 2));
}
fn get_extracted_doctests(code: &str) -> ExtractedDocTests {
let opts = default_global_opts("");
let mut extractor = ExtractedDocTests::new();
extractor.add_test_with_edition(
ScrapedDocTest::new(
FileName::Custom(String::new()),
0,
Vec::new(),
LangString::default(),
code.to_string(),
DUMMY_SP,
Vec::new(),
),
&opts,
Edition::Edition2018,
);
extractor
}
// Test that `extracted::DocTest::wrapper` is `None` if the doctest has a `main` function.
#[test]
fn test_extracted_doctest_wrapper_field() {
let extractor = get_extracted_doctests("fn main() {}");
assert_eq!(extractor.doctests().len(), 1);
let doctest_code = extractor.doctests()[0].doctest_code.as_ref().unwrap();
assert!(doctest_code.wrapper.is_none());
}
// Test that `ExtractedDocTest::doctest_code` is `None` if the doctest has syntax error.
#[test]
fn test_extracted_doctest_doctest_code_field() {
let extractor = get_extracted_doctests("let x +=");
assert_eq!(extractor.doctests().len(), 1);
assert!(extractor.doctests()[0].doctest_code.is_none());
}
// Test that `extracted::DocTest::wrapper` is `Some` if the doctest needs wrapping.
#[test]
fn test_extracted_doctest_wrapper_field_with_info() {
let extractor = get_extracted_doctests("let x = 12;");
assert_eq!(extractor.doctests().len(), 1);
let doctest_code = extractor.doctests()[0].doctest_code.as_ref().unwrap();
assert!(doctest_code.wrapper.is_some());
}

View file

@ -56,7 +56,7 @@ pub(crate) trait FormatRenderer<'tcx>: Sized {
fn restore_module_data(&mut self, info: Self::ModuleData);
/// Renders a single non-module item. This means no recursive sub-item rendering is required.
fn item(&mut self, item: clean::Item) -> Result<(), Error>;
fn item(&mut self, item: &clean::Item) -> Result<(), Error>;
/// Renders a module (should not handle recursing into children).
fn mod_item_in(&mut self, item: &clean::Item) -> Result<(), Error>;
@ -67,14 +67,14 @@ pub(crate) trait FormatRenderer<'tcx>: Sized {
}
/// Post processing hook for cleanup and dumping output to files.
fn after_krate(&mut self) -> Result<(), Error>;
fn after_krate(self) -> Result<(), Error>;
fn cache(&self) -> &Cache;
}
fn run_format_inner<'tcx, T: FormatRenderer<'tcx>>(
cx: &mut T,
item: clean::Item,
item: &clean::Item,
prof: &SelfProfilerRef,
) -> Result<(), Error> {
if item.is_mod() && T::RUN_ON_MODULE {
@ -84,12 +84,12 @@ fn run_format_inner<'tcx, T: FormatRenderer<'tcx>>(
prof.generic_activity_with_arg("render_mod_item", item.name.unwrap().to_string());
cx.mod_item_in(&item)?;
let (clean::StrippedItem(box clean::ModuleItem(module)) | clean::ModuleItem(module)) =
item.inner.kind
let (clean::StrippedItem(box clean::ModuleItem(ref module))
| clean::ModuleItem(ref module)) = item.inner.kind
else {
unreachable!()
};
for it in module.items {
for it in module.items.iter() {
let info = cx.save_module_data();
run_format_inner(cx, it, prof)?;
cx.restore_module_data(info);
@ -101,7 +101,7 @@ fn run_format_inner<'tcx, T: FormatRenderer<'tcx>>(
} else if let Some(item_name) = item.name
&& !item.is_extern_crate()
{
prof.generic_activity_with_arg("render_item", item_name.as_str()).run(|| cx.item(item))?;
prof.generic_activity_with_arg("render_item", item_name.as_str()).run(|| cx.item(&item))?;
}
Ok(())
}
@ -125,7 +125,7 @@ pub(crate) fn run_format<'tcx, T: FormatRenderer<'tcx>>(
}
// Render the crate documentation
run_format_inner(&mut format_renderer, krate.module, prof)?;
run_format_inner(&mut format_renderer, &krate.module, prof)?;
prof.verbose_generic_activity_with_arg("renderer_after_krate", T::descr())
.run(|| format_renderer.after_krate())

View file

@ -307,7 +307,8 @@ impl<'a, I: Iterator<Item = Event<'a>>> Iterator for CodeBlocks<'_, 'a, I> {
builder = builder.crate_name(krate);
}
let doctest = builder.build(None);
let (test, _) = doctest.generate_unique_doctest(&test, false, &opts, krate);
let (wrapped, _) = doctest.generate_unique_doctest(&test, false, &opts, krate);
let test = wrapped.to_string();
let channel = if test.contains("#![feature(") { "&amp;version=nightly" } else { "" };
let test_escaped = small_url_encode(test);

View file

@ -609,7 +609,7 @@ impl<'tcx> FormatRenderer<'tcx> for Context<'tcx> {
self.info = info;
}
fn after_krate(&mut self) -> Result<(), Error> {
fn after_krate(mut self) -> Result<(), Error> {
let crate_name = self.tcx().crate_name(LOCAL_CRATE);
let final_file = self.dst.join(crate_name.as_str()).join("all.html");
let settings_file = self.dst.join("settings.html");
@ -830,7 +830,7 @@ impl<'tcx> FormatRenderer<'tcx> for Context<'tcx> {
Ok(())
}
fn item(&mut self, item: clean::Item) -> Result<(), Error> {
fn item(&mut self, item: &clean::Item) -> Result<(), Error> {
// Stripped modules survive the rustdoc passes (i.e., `strip-private`)
// if they contain impls for public types. These modules can also
// contain items such as publicly re-exported structures.

View file

@ -13,6 +13,7 @@ use rustc_metadata::rendered_const;
use rustc_middle::{bug, ty};
use rustc_span::{Pos, Symbol, kw};
use rustdoc_json_types::*;
use thin_vec::ThinVec;
use crate::clean::{self, ItemId};
use crate::formats::FormatRenderer;
@ -21,7 +22,7 @@ use crate::json::JsonRenderer;
use crate::passes::collect_intra_doc_links::UrlFragment;
impl JsonRenderer<'_> {
pub(super) fn convert_item(&self, item: clean::Item) -> Option<Item> {
pub(super) fn convert_item(&self, item: &clean::Item) -> Option<Item> {
let deprecation = item.deprecation(self.tcx);
let links = self
.cache
@ -107,49 +108,54 @@ impl JsonRenderer<'_> {
}
}
fn ids(&self, items: impl IntoIterator<Item = clean::Item>) -> Vec<Id> {
fn ids(&self, items: &[clean::Item]) -> Vec<Id> {
items
.into_iter()
.filter(|x| !x.is_stripped() && !x.is_keyword())
.iter()
.filter(|i| !i.is_stripped() && !i.is_keyword())
.map(|i| self.id_from_item(&i))
.collect()
}
fn ids_keeping_stripped(
&self,
items: impl IntoIterator<Item = clean::Item>,
) -> Vec<Option<Id>> {
fn ids_keeping_stripped(&self, items: &[clean::Item]) -> Vec<Option<Id>> {
items
.into_iter()
.iter()
.map(|i| (!i.is_stripped() && !i.is_keyword()).then(|| self.id_from_item(&i)))
.collect()
}
}
pub(crate) trait FromClean<T> {
fn from_clean(f: T, renderer: &JsonRenderer<'_>) -> Self;
fn from_clean(f: &T, renderer: &JsonRenderer<'_>) -> Self;
}
pub(crate) trait IntoJson<T> {
fn into_json(self, renderer: &JsonRenderer<'_>) -> T;
fn into_json(&self, renderer: &JsonRenderer<'_>) -> T;
}
impl<T, U> IntoJson<U> for T
where
U: FromClean<T>,
{
fn into_json(self, renderer: &JsonRenderer<'_>) -> U {
fn into_json(&self, renderer: &JsonRenderer<'_>) -> U {
U::from_clean(self, renderer)
}
}
impl<I, T, U> FromClean<I> for Vec<U>
impl<T, U> FromClean<Vec<T>> for Vec<U>
where
I: IntoIterator<Item = T>,
U: FromClean<T>,
{
fn from_clean(f: I, renderer: &JsonRenderer<'_>) -> Vec<U> {
f.into_iter().map(|x| x.into_json(renderer)).collect()
fn from_clean(items: &Vec<T>, renderer: &JsonRenderer<'_>) -> Vec<U> {
items.iter().map(|i| i.into_json(renderer)).collect()
}
}
impl<T, U> FromClean<ThinVec<T>> for Vec<U>
where
U: FromClean<T>,
{
fn from_clean(items: &ThinVec<T>, renderer: &JsonRenderer<'_>) -> Vec<U> {
items.iter().map(|i| i.into_json(renderer)).collect()
}
}
@ -165,7 +171,7 @@ pub(crate) fn from_deprecation(deprecation: attrs::Deprecation) -> Deprecation {
}
impl FromClean<clean::GenericArgs> for GenericArgs {
fn from_clean(args: clean::GenericArgs, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(args: &clean::GenericArgs, renderer: &JsonRenderer<'_>) -> Self {
use clean::GenericArgs::*;
match args {
AngleBracketed { args, constraints } => GenericArgs::AngleBracketed {
@ -174,7 +180,7 @@ impl FromClean<clean::GenericArgs> for GenericArgs {
},
Parenthesized { inputs, output } => GenericArgs::Parenthesized {
inputs: inputs.into_json(renderer),
output: output.map(|a| (*a).into_json(renderer)),
output: output.as_ref().map(|a| a.as_ref().into_json(renderer)),
},
ReturnTypeNotation => GenericArgs::ReturnTypeNotation,
}
@ -182,7 +188,7 @@ impl FromClean<clean::GenericArgs> for GenericArgs {
}
impl FromClean<clean::GenericArg> for GenericArg {
fn from_clean(arg: clean::GenericArg, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(arg: &clean::GenericArg, renderer: &JsonRenderer<'_>) -> Self {
use clean::GenericArg::*;
match arg {
Lifetime(l) => GenericArg::Lifetime(convert_lifetime(l)),
@ -195,7 +201,7 @@ impl FromClean<clean::GenericArg> for GenericArg {
impl FromClean<clean::Constant> for Constant {
// FIXME(generic_const_items): Add support for generic const items.
fn from_clean(constant: clean::Constant, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(constant: &clean::Constant, renderer: &JsonRenderer<'_>) -> Self {
let tcx = renderer.tcx;
let expr = constant.expr(tcx);
let value = constant.value(tcx);
@ -206,7 +212,7 @@ impl FromClean<clean::Constant> for Constant {
impl FromClean<clean::ConstantKind> for Constant {
// FIXME(generic_const_items): Add support for generic const items.
fn from_clean(constant: clean::ConstantKind, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(constant: &clean::ConstantKind, renderer: &JsonRenderer<'_>) -> Self {
let tcx = renderer.tcx;
let expr = constant.expr(tcx);
let value = constant.value(tcx);
@ -216,7 +222,7 @@ impl FromClean<clean::ConstantKind> for Constant {
}
impl FromClean<clean::AssocItemConstraint> for AssocItemConstraint {
fn from_clean(constraint: clean::AssocItemConstraint, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(constraint: &clean::AssocItemConstraint, renderer: &JsonRenderer<'_>) -> Self {
AssocItemConstraint {
name: constraint.assoc.name.to_string(),
args: constraint.assoc.args.into_json(renderer),
@ -226,7 +232,7 @@ impl FromClean<clean::AssocItemConstraint> for AssocItemConstraint {
}
impl FromClean<clean::AssocItemConstraintKind> for AssocItemConstraintKind {
fn from_clean(kind: clean::AssocItemConstraintKind, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(kind: &clean::AssocItemConstraintKind, renderer: &JsonRenderer<'_>) -> Self {
use clean::AssocItemConstraintKind::*;
match kind {
Equality { term } => AssocItemConstraintKind::Equality(term.into_json(renderer)),
@ -235,15 +241,15 @@ impl FromClean<clean::AssocItemConstraintKind> for AssocItemConstraintKind {
}
}
fn from_clean_item(item: clean::Item, renderer: &JsonRenderer<'_>) -> ItemEnum {
fn from_clean_item(item: &clean::Item, renderer: &JsonRenderer<'_>) -> ItemEnum {
use clean::ItemKind::*;
let name = item.name;
let is_crate = item.is_crate();
let header = item.fn_header(renderer.tcx);
match item.inner.kind {
match &item.inner.kind {
ModuleItem(m) => {
ItemEnum::Module(Module { is_crate, items: renderer.ids(m.items), is_stripped: false })
ItemEnum::Module(Module { is_crate, items: renderer.ids(&m.items), is_stripped: false })
}
ImportItem(i) => ItemEnum::Use(i.into_json(renderer)),
StructItem(s) => ItemEnum::Struct(s.into_json(renderer)),
@ -251,27 +257,27 @@ fn from_clean_item(item: clean::Item, renderer: &JsonRenderer<'_>) -> ItemEnum {
StructFieldItem(f) => ItemEnum::StructField(f.into_json(renderer)),
EnumItem(e) => ItemEnum::Enum(e.into_json(renderer)),
VariantItem(v) => ItemEnum::Variant(v.into_json(renderer)),
FunctionItem(f) => ItemEnum::Function(from_function(*f, true, header.unwrap(), renderer)),
FunctionItem(f) => ItemEnum::Function(from_function(f, true, header.unwrap(), renderer)),
ForeignFunctionItem(f, _) => {
ItemEnum::Function(from_function(*f, false, header.unwrap(), renderer))
ItemEnum::Function(from_function(f, false, header.unwrap(), renderer))
}
TraitItem(t) => ItemEnum::Trait((*t).into_json(renderer)),
TraitItem(t) => ItemEnum::Trait(t.as_ref().into_json(renderer)),
TraitAliasItem(t) => ItemEnum::TraitAlias(t.into_json(renderer)),
MethodItem(m, _) => ItemEnum::Function(from_function(*m, true, header.unwrap(), renderer)),
MethodItem(m, _) => ItemEnum::Function(from_function(m, true, header.unwrap(), renderer)),
RequiredMethodItem(m) => {
ItemEnum::Function(from_function(*m, false, header.unwrap(), renderer))
ItemEnum::Function(from_function(m, false, header.unwrap(), renderer))
}
ImplItem(i) => ItemEnum::Impl((*i).into_json(renderer)),
StaticItem(s) => ItemEnum::Static(convert_static(s, rustc_hir::Safety::Safe, renderer)),
ImplItem(i) => ItemEnum::Impl(i.as_ref().into_json(renderer)),
StaticItem(s) => ItemEnum::Static(convert_static(s, &rustc_hir::Safety::Safe, renderer)),
ForeignStaticItem(s, safety) => ItemEnum::Static(convert_static(s, safety, renderer)),
ForeignTypeItem => ItemEnum::ExternType,
TypeAliasItem(t) => ItemEnum::TypeAlias(t.into_json(renderer)),
TypeAliasItem(t) => ItemEnum::TypeAlias(t.as_ref().into_json(renderer)),
// FIXME(generic_const_items): Add support for generic free consts
ConstantItem(ci) => ItemEnum::Constant {
type_: ci.type_.into_json(renderer),
const_: ci.kind.into_json(renderer),
},
MacroItem(m) => ItemEnum::Macro(m.source),
MacroItem(m) => ItemEnum::Macro(m.source.clone()),
ProcMacroItem(m) => ItemEnum::ProcMacro(m.into_json(renderer)),
PrimitiveItem(p) => {
ItemEnum::Primitive(Primitive {
@ -281,7 +287,7 @@ fn from_clean_item(item: clean::Item, renderer: &JsonRenderer<'_>) -> ItemEnum {
}
// FIXME(generic_const_items): Add support for generic associated consts.
RequiredAssocConstItem(_generics, ty) => {
ItemEnum::AssocConst { type_: (*ty).into_json(renderer), value: None }
ItemEnum::AssocConst { type_: ty.as_ref().into_json(renderer), value: None }
}
// FIXME(generic_const_items): Add support for generic associated consts.
ProvidedAssocConstItem(ci) | ImplAssocConstItem(ci) => ItemEnum::AssocConst {
@ -296,22 +302,22 @@ fn from_clean_item(item: clean::Item, renderer: &JsonRenderer<'_>) -> ItemEnum {
AssocTypeItem(t, b) => ItemEnum::AssocType {
generics: t.generics.into_json(renderer),
bounds: b.into_json(renderer),
type_: Some(t.item_type.unwrap_or(t.type_).into_json(renderer)),
type_: Some(t.item_type.as_ref().unwrap_or(&t.type_).into_json(renderer)),
},
// `convert_item` early returns `None` for stripped items and keywords.
KeywordItem => unreachable!(),
StrippedItem(inner) => {
match *inner {
match inner.as_ref() {
ModuleItem(m) => ItemEnum::Module(Module {
is_crate,
items: renderer.ids(m.items),
items: renderer.ids(&m.items),
is_stripped: true,
}),
// `convert_item` early returns `None` for stripped items we're not including
_ => unreachable!(),
}
}
ExternCrateItem { ref src } => ItemEnum::ExternCrate {
ExternCrateItem { src } => ItemEnum::ExternCrate {
name: name.as_ref().unwrap().to_string(),
rename: src.map(|x| x.to_string()),
},
@ -319,17 +325,17 @@ fn from_clean_item(item: clean::Item, renderer: &JsonRenderer<'_>) -> ItemEnum {
}
impl FromClean<clean::Struct> for Struct {
fn from_clean(struct_: clean::Struct, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(struct_: &clean::Struct, renderer: &JsonRenderer<'_>) -> Self {
let has_stripped_fields = struct_.has_stripped_entries();
let clean::Struct { ctor_kind, generics, fields } = struct_;
let kind = match ctor_kind {
Some(CtorKind::Fn) => StructKind::Tuple(renderer.ids_keeping_stripped(fields)),
Some(CtorKind::Fn) => StructKind::Tuple(renderer.ids_keeping_stripped(&fields)),
Some(CtorKind::Const) => {
assert!(fields.is_empty());
StructKind::Unit
}
None => StructKind::Plain { fields: renderer.ids(fields), has_stripped_fields },
None => StructKind::Plain { fields: renderer.ids(&fields), has_stripped_fields },
};
Struct {
@ -341,13 +347,13 @@ impl FromClean<clean::Struct> for Struct {
}
impl FromClean<clean::Union> for Union {
fn from_clean(union_: clean::Union, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(union_: &clean::Union, renderer: &JsonRenderer<'_>) -> Self {
let has_stripped_fields = union_.has_stripped_entries();
let clean::Union { generics, fields } = union_;
Union {
generics: generics.into_json(renderer),
has_stripped_fields,
fields: renderer.ids(fields),
fields: renderer.ids(&fields),
impls: Vec::new(), // Added in JsonRenderer::item
}
}
@ -377,12 +383,12 @@ fn convert_abi(a: ExternAbi) -> Abi {
}
}
fn convert_lifetime(l: clean::Lifetime) -> String {
fn convert_lifetime(l: &clean::Lifetime) -> String {
l.0.to_string()
}
impl FromClean<clean::Generics> for Generics {
fn from_clean(generics: clean::Generics, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(generics: &clean::Generics, renderer: &JsonRenderer<'_>) -> Self {
Generics {
params: generics.params.into_json(renderer),
where_predicates: generics.where_predicates.into_json(renderer),
@ -391,7 +397,7 @@ impl FromClean<clean::Generics> for Generics {
}
impl FromClean<clean::GenericParamDef> for GenericParamDef {
fn from_clean(generic_param: clean::GenericParamDef, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(generic_param: &clean::GenericParamDef, renderer: &JsonRenderer<'_>) -> Self {
GenericParamDef {
name: generic_param.name.to_string(),
kind: generic_param.kind.into_json(renderer),
@ -400,7 +406,7 @@ impl FromClean<clean::GenericParamDef> for GenericParamDef {
}
impl FromClean<clean::GenericParamDefKind> for GenericParamDefKind {
fn from_clean(kind: clean::GenericParamDefKind, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(kind: &clean::GenericParamDefKind, renderer: &JsonRenderer<'_>) -> Self {
use clean::GenericParamDefKind::*;
match kind {
Lifetime { outlives } => GenericParamDefKind::Lifetime {
@ -408,29 +414,29 @@ impl FromClean<clean::GenericParamDefKind> for GenericParamDefKind {
},
Type { bounds, default, synthetic } => GenericParamDefKind::Type {
bounds: bounds.into_json(renderer),
default: default.map(|x| (*x).into_json(renderer)),
is_synthetic: synthetic,
default: default.as_ref().map(|x| x.as_ref().into_json(renderer)),
is_synthetic: *synthetic,
},
Const { ty, default, synthetic: _ } => GenericParamDefKind::Const {
type_: (*ty).into_json(renderer),
default: default.map(|x| *x),
type_: ty.as_ref().into_json(renderer),
default: default.as_ref().map(|x| x.as_ref().clone()),
},
}
}
}
impl FromClean<clean::WherePredicate> for WherePredicate {
fn from_clean(predicate: clean::WherePredicate, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(predicate: &clean::WherePredicate, renderer: &JsonRenderer<'_>) -> Self {
use clean::WherePredicate::*;
match predicate {
BoundPredicate { ty, bounds, bound_params } => WherePredicate::BoundPredicate {
type_: ty.into_json(renderer),
bounds: bounds.into_json(renderer),
generic_params: bound_params
.into_iter()
.iter()
.map(|x| {
let name = x.name.to_string();
let kind = match x.kind {
let kind = match &x.kind {
clean::GenericParamDefKind::Lifetime { outlives } => {
GenericParamDefKind::Lifetime {
outlives: outlives.iter().map(|lt| lt.0.to_string()).collect(),
@ -442,14 +448,16 @@ impl FromClean<clean::WherePredicate> for WherePredicate {
.into_iter()
.map(|bound| bound.into_json(renderer))
.collect(),
default: default.map(|ty| (*ty).into_json(renderer)),
is_synthetic: synthetic,
default: default
.as_ref()
.map(|ty| ty.as_ref().into_json(renderer)),
is_synthetic: *synthetic,
}
}
clean::GenericParamDefKind::Const { ty, default, synthetic: _ } => {
GenericParamDefKind::Const {
type_: (*ty).into_json(renderer),
default: default.map(|d| *d),
type_: ty.as_ref().into_json(renderer),
default: default.as_ref().map(|d| d.as_ref().clone()),
}
}
};
@ -462,7 +470,7 @@ impl FromClean<clean::WherePredicate> for WherePredicate {
outlives: bounds
.iter()
.map(|bound| match bound {
clean::GenericBound::Outlives(lt) => convert_lifetime(*lt),
clean::GenericBound::Outlives(lt) => convert_lifetime(lt),
_ => bug!("found non-outlives-bound on lifetime predicate"),
})
.collect(),
@ -479,7 +487,7 @@ impl FromClean<clean::WherePredicate> for WherePredicate {
}
impl FromClean<clean::GenericBound> for GenericBound {
fn from_clean(bound: clean::GenericBound, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(bound: &clean::GenericBound, renderer: &JsonRenderer<'_>) -> Self {
use clean::GenericBound::*;
match bound {
TraitBound(clean::PolyTrait { trait_, generic_params }, modifier) => {
@ -494,7 +502,7 @@ impl FromClean<clean::GenericBound> for GenericBound {
args.iter()
.map(|arg| match arg {
clean::PreciseCapturingArg::Lifetime(lt) => {
PreciseCapturingArg::Lifetime(convert_lifetime(*lt))
PreciseCapturingArg::Lifetime(convert_lifetime(lt))
}
clean::PreciseCapturingArg::Param(param) => {
PreciseCapturingArg::Param(param.to_string())
@ -507,7 +515,7 @@ impl FromClean<clean::GenericBound> for GenericBound {
}
pub(crate) fn from_trait_bound_modifier(
modifiers: rustc_hir::TraitBoundModifiers,
modifiers: &rustc_hir::TraitBoundModifiers,
) -> TraitBoundModifier {
use rustc_hir as hir;
let hir::TraitBoundModifiers { constness, polarity } = modifiers;
@ -523,7 +531,7 @@ pub(crate) fn from_trait_bound_modifier(
}
impl FromClean<clean::Type> for Type {
fn from_clean(ty: clean::Type, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(ty: &clean::Type, renderer: &JsonRenderer<'_>) -> Self {
use clean::Type::{
Array, BareFunction, BorrowedRef, Generic, ImplTrait, Infer, Primitive, QPath,
RawPointer, SelfTy, Slice, Tuple, UnsafeBinder,
@ -532,35 +540,35 @@ impl FromClean<clean::Type> for Type {
match ty {
clean::Type::Path { path } => Type::ResolvedPath(path.into_json(renderer)),
clean::Type::DynTrait(bounds, lt) => Type::DynTrait(DynTrait {
lifetime: lt.map(convert_lifetime),
lifetime: lt.as_ref().map(convert_lifetime),
traits: bounds.into_json(renderer),
}),
Generic(s) => Type::Generic(s.to_string()),
// FIXME: add dedicated variant to json Type?
SelfTy => Type::Generic("Self".to_owned()),
Primitive(p) => Type::Primitive(p.as_sym().to_string()),
BareFunction(f) => Type::FunctionPointer(Box::new((*f).into_json(renderer))),
BareFunction(f) => Type::FunctionPointer(Box::new(f.as_ref().into_json(renderer))),
Tuple(t) => Type::Tuple(t.into_json(renderer)),
Slice(t) => Type::Slice(Box::new((*t).into_json(renderer))),
Slice(t) => Type::Slice(Box::new(t.as_ref().into_json(renderer))),
Array(t, s) => {
Type::Array { type_: Box::new((*t).into_json(renderer)), len: s.to_string() }
Type::Array { type_: Box::new(t.as_ref().into_json(renderer)), len: s.to_string() }
}
clean::Type::Pat(t, p) => Type::Pat {
type_: Box::new((*t).into_json(renderer)),
type_: Box::new(t.as_ref().into_json(renderer)),
__pat_unstable_do_not_use: p.to_string(),
},
ImplTrait(g) => Type::ImplTrait(g.into_json(renderer)),
Infer => Type::Infer,
RawPointer(mutability, type_) => Type::RawPointer {
is_mutable: mutability == ast::Mutability::Mut,
type_: Box::new((*type_).into_json(renderer)),
is_mutable: *mutability == ast::Mutability::Mut,
type_: Box::new(type_.as_ref().into_json(renderer)),
},
BorrowedRef { lifetime, mutability, type_ } => Type::BorrowedRef {
lifetime: lifetime.map(convert_lifetime),
is_mutable: mutability == ast::Mutability::Mut,
type_: Box::new((*type_).into_json(renderer)),
lifetime: lifetime.as_ref().map(convert_lifetime),
is_mutable: *mutability == ast::Mutability::Mut,
type_: Box::new(type_.as_ref().into_json(renderer)),
},
QPath(qpath) => (*qpath).into_json(renderer),
QPath(qpath) => qpath.as_ref().into_json(renderer),
// FIXME(unsafe_binder): Implement rustdoc-json.
UnsafeBinder(_) => todo!(),
}
@ -568,30 +576,30 @@ impl FromClean<clean::Type> for Type {
}
impl FromClean<clean::Path> for Path {
fn from_clean(path: clean::Path, renderer: &JsonRenderer<'_>) -> Path {
fn from_clean(path: &clean::Path, renderer: &JsonRenderer<'_>) -> Path {
Path {
path: path.whole_name(),
id: renderer.id_from_item_default(path.def_id().into()),
args: path.segments.last().map(|args| Box::new(args.clone().args.into_json(renderer))),
args: path.segments.last().map(|args| Box::new(args.args.into_json(renderer))),
}
}
}
impl FromClean<clean::QPathData> for Type {
fn from_clean(qpath: clean::QPathData, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(qpath: &clean::QPathData, renderer: &JsonRenderer<'_>) -> Self {
let clean::QPathData { assoc, self_type, should_fully_qualify: _, trait_ } = qpath;
Self::QualifiedPath {
name: assoc.name.to_string(),
args: Box::new(assoc.args.into_json(renderer)),
self_type: Box::new(self_type.into_json(renderer)),
trait_: trait_.map(|trait_| trait_.into_json(renderer)),
trait_: trait_.as_ref().map(|trait_| trait_.into_json(renderer)),
}
}
}
impl FromClean<clean::Term> for Term {
fn from_clean(term: clean::Term, renderer: &JsonRenderer<'_>) -> Term {
fn from_clean(term: &clean::Term, renderer: &JsonRenderer<'_>) -> Term {
match term {
clean::Term::Type(ty) => Term::Type(ty.into_json(renderer)),
clean::Term::Constant(c) => Term::Constant(c.into_json(renderer)),
@ -600,14 +608,14 @@ impl FromClean<clean::Term> for Term {
}
impl FromClean<clean::BareFunctionDecl> for FunctionPointer {
fn from_clean(bare_decl: clean::BareFunctionDecl, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(bare_decl: &clean::BareFunctionDecl, renderer: &JsonRenderer<'_>) -> Self {
let clean::BareFunctionDecl { safety, generic_params, decl, abi } = bare_decl;
FunctionPointer {
header: FunctionHeader {
is_unsafe: safety.is_unsafe(),
is_const: false,
is_async: false,
abi: convert_abi(abi),
abi: convert_abi(*abi),
},
generic_params: generic_params.into_json(renderer),
sig: decl.into_json(renderer),
@ -616,7 +624,7 @@ impl FromClean<clean::BareFunctionDecl> for FunctionPointer {
}
impl FromClean<clean::FnDecl> for FunctionSignature {
fn from_clean(decl: clean::FnDecl, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(decl: &clean::FnDecl, renderer: &JsonRenderer<'_>) -> Self {
let clean::FnDecl { inputs, output, c_variadic } = decl;
FunctionSignature {
inputs: inputs
@ -629,13 +637,13 @@ impl FromClean<clean::FnDecl> for FunctionSignature {
})
.collect(),
output: if output.is_unit() { None } else { Some(output.into_json(renderer)) },
is_c_variadic: c_variadic,
is_c_variadic: *c_variadic,
}
}
}
impl FromClean<clean::Trait> for Trait {
fn from_clean(trait_: clean::Trait, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(trait_: &clean::Trait, renderer: &JsonRenderer<'_>) -> Self {
let tcx = renderer.tcx;
let is_auto = trait_.is_auto(tcx);
let is_unsafe = trait_.safety(tcx).is_unsafe();
@ -645,7 +653,7 @@ impl FromClean<clean::Trait> for Trait {
is_auto,
is_unsafe,
is_dyn_compatible,
items: renderer.ids(items),
items: renderer.ids(&items),
generics: generics.into_json(renderer),
bounds: bounds.into_json(renderer),
implementations: Vec::new(), // Added in JsonRenderer::item
@ -655,7 +663,7 @@ impl FromClean<clean::Trait> for Trait {
impl FromClean<clean::PolyTrait> for PolyTrait {
fn from_clean(
clean::PolyTrait { trait_, generic_params }: clean::PolyTrait,
clean::PolyTrait { trait_, generic_params }: &clean::PolyTrait,
renderer: &JsonRenderer<'_>,
) -> Self {
PolyTrait {
@ -666,14 +674,14 @@ impl FromClean<clean::PolyTrait> for PolyTrait {
}
impl FromClean<clean::Impl> for Impl {
fn from_clean(impl_: clean::Impl, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(impl_: &clean::Impl, renderer: &JsonRenderer<'_>) -> Self {
let provided_trait_methods = impl_.provided_trait_methods(renderer.tcx);
let clean::Impl { safety, generics, trait_, for_, items, polarity, kind } = impl_;
// FIXME: use something like ImplKind in JSON?
let (is_synthetic, blanket_impl) = match kind {
clean::ImplKind::Normal | clean::ImplKind::FakeVariadic => (false, None),
clean::ImplKind::Auto => (true, None),
clean::ImplKind::Blanket(ty) => (false, Some(*ty)),
clean::ImplKind::Blanket(ty) => (false, Some(ty)),
};
let is_negative = match polarity {
ty::ImplPolarity::Positive | ty::ImplPolarity::Reservation => false,
@ -686,18 +694,18 @@ impl FromClean<clean::Impl> for Impl {
.into_iter()
.map(|x| x.to_string())
.collect(),
trait_: trait_.map(|path| path.into_json(renderer)),
trait_: trait_.as_ref().map(|path| path.into_json(renderer)),
for_: for_.into_json(renderer),
items: renderer.ids(items),
items: renderer.ids(&items),
is_negative,
is_synthetic,
blanket_impl: blanket_impl.map(|x| x.into_json(renderer)),
blanket_impl: blanket_impl.map(|x| x.as_ref().into_json(renderer)),
}
}
}
pub(crate) fn from_function(
clean::Function { decl, generics }: clean::Function,
clean::Function { decl, generics }: &clean::Function,
has_body: bool,
header: rustc_hir::FnHeader,
renderer: &JsonRenderer<'_>,
@ -711,30 +719,30 @@ pub(crate) fn from_function(
}
impl FromClean<clean::Enum> for Enum {
fn from_clean(enum_: clean::Enum, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(enum_: &clean::Enum, renderer: &JsonRenderer<'_>) -> Self {
let has_stripped_variants = enum_.has_stripped_entries();
let clean::Enum { variants, generics } = enum_;
Enum {
generics: generics.into_json(renderer),
has_stripped_variants,
variants: renderer.ids(variants),
variants: renderer.ids(&variants.as_slice().raw),
impls: Vec::new(), // Added in JsonRenderer::item
}
}
}
impl FromClean<clean::Variant> for Variant {
fn from_clean(variant: clean::Variant, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(variant: &clean::Variant, renderer: &JsonRenderer<'_>) -> Self {
use clean::VariantKind::*;
let discriminant = variant.discriminant.map(|d| d.into_json(renderer));
let discriminant = variant.discriminant.as_ref().map(|d| d.into_json(renderer));
let kind = match variant.kind {
let kind = match &variant.kind {
CLike => VariantKind::Plain,
Tuple(fields) => VariantKind::Tuple(renderer.ids_keeping_stripped(fields)),
Tuple(fields) => VariantKind::Tuple(renderer.ids_keeping_stripped(&fields)),
Struct(s) => VariantKind::Struct {
has_stripped_fields: s.has_stripped_entries(),
fields: renderer.ids(s.fields),
fields: renderer.ids(&s.fields),
},
};
@ -743,7 +751,7 @@ impl FromClean<clean::Variant> for Variant {
}
impl FromClean<clean::Discriminant> for Discriminant {
fn from_clean(disr: clean::Discriminant, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(disr: &clean::Discriminant, renderer: &JsonRenderer<'_>) -> Self {
let tcx = renderer.tcx;
Discriminant {
// expr is only none if going through the inlining path, which gets
@ -756,7 +764,7 @@ impl FromClean<clean::Discriminant> for Discriminant {
}
impl FromClean<clean::Import> for Use {
fn from_clean(import: clean::Import, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(import: &clean::Import, renderer: &JsonRenderer<'_>) -> Self {
use clean::ImportKind::*;
let (name, is_glob) = match import.kind {
Simple(s) => (s.to_string(), false),
@ -775,7 +783,7 @@ impl FromClean<clean::Import> for Use {
}
impl FromClean<clean::ProcMacro> for ProcMacro {
fn from_clean(mac: clean::ProcMacro, _renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(mac: &clean::ProcMacro, _renderer: &JsonRenderer<'_>) -> Self {
ProcMacro {
kind: from_macro_kind(mac.kind),
helpers: mac.helpers.iter().map(|x| x.to_string()).collect(),
@ -792,21 +800,21 @@ pub(crate) fn from_macro_kind(kind: rustc_span::hygiene::MacroKind) -> MacroKind
}
}
impl FromClean<Box<clean::TypeAlias>> for TypeAlias {
fn from_clean(type_alias: Box<clean::TypeAlias>, renderer: &JsonRenderer<'_>) -> Self {
let clean::TypeAlias { type_, generics, item_type: _, inner_type: _ } = *type_alias;
impl FromClean<clean::TypeAlias> for TypeAlias {
fn from_clean(type_alias: &clean::TypeAlias, renderer: &JsonRenderer<'_>) -> Self {
let clean::TypeAlias { type_, generics, item_type: _, inner_type: _ } = type_alias;
TypeAlias { type_: type_.into_json(renderer), generics: generics.into_json(renderer) }
}
}
fn convert_static(
stat: clean::Static,
safety: rustc_hir::Safety,
stat: &clean::Static,
safety: &rustc_hir::Safety,
renderer: &JsonRenderer<'_>,
) -> Static {
let tcx = renderer.tcx;
Static {
type_: (*stat.type_).into_json(renderer),
type_: stat.type_.as_ref().into_json(renderer),
is_mutable: stat.mutability == ast::Mutability::Mut,
is_unsafe: safety.is_unsafe(),
expr: stat
@ -817,7 +825,7 @@ fn convert_static(
}
impl FromClean<clean::TraitAlias> for TraitAlias {
fn from_clean(alias: clean::TraitAlias, renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(alias: &clean::TraitAlias, renderer: &JsonRenderer<'_>) -> Self {
TraitAlias {
generics: alias.generics.into_json(renderer),
params: alias.bounds.into_json(renderer),
@ -826,7 +834,7 @@ impl FromClean<clean::TraitAlias> for TraitAlias {
}
impl FromClean<ItemType> for ItemKind {
fn from_clean(kind: ItemType, _renderer: &JsonRenderer<'_>) -> Self {
fn from_clean(kind: &ItemType, _renderer: &JsonRenderer<'_>) -> Self {
use ItemType::*;
match kind {
Module => ItemKind::Module,

View file

@ -37,19 +37,18 @@ use crate::formats::cache::Cache;
use crate::json::conversions::IntoJson;
use crate::{clean, try_err};
#[derive(Clone)]
pub(crate) struct JsonRenderer<'tcx> {
tcx: TyCtxt<'tcx>,
/// A mapping of IDs that contains all local items for this crate which gets output as a top
/// level field of the JSON blob.
index: Rc<RefCell<FxHashMap<types::Id, types::Item>>>,
index: FxHashMap<types::Id, types::Item>,
/// The directory where the JSON blob should be written to.
///
/// If this is `None`, the blob will be printed to `stdout` instead.
out_dir: Option<PathBuf>,
cache: Rc<Cache>,
imported_items: DefIdSet,
id_interner: Rc<RefCell<ids::IdInterner>>,
id_interner: RefCell<ids::IdInterner>,
}
impl<'tcx> JsonRenderer<'tcx> {
@ -66,7 +65,7 @@ impl<'tcx> JsonRenderer<'tcx> {
.iter()
.map(|i| {
let item = &i.impl_item;
self.item(item.clone()).unwrap();
self.item(item).unwrap();
self.id_from_item(item)
})
.collect()
@ -97,7 +96,7 @@ impl<'tcx> JsonRenderer<'tcx> {
}
if item.item_id.is_local() || is_primitive_impl {
self.item(item.clone()).unwrap();
self.item(item).unwrap();
Some(self.id_from_item(item))
} else {
None
@ -198,7 +197,7 @@ impl<'tcx> FormatRenderer<'tcx> for JsonRenderer<'tcx> {
Ok((
JsonRenderer {
tcx,
index: Rc::new(RefCell::new(FxHashMap::default())),
index: FxHashMap::default(),
out_dir: if options.output_to_stdout { None } else { Some(options.output) },
cache: Rc::new(cache),
imported_items,
@ -218,7 +217,9 @@ impl<'tcx> FormatRenderer<'tcx> for JsonRenderer<'tcx> {
/// Inserts an item into the index. This should be used rather than directly calling insert on
/// the hashmap because certain items (traits and types) need to have their mappings for trait
/// implementations filled out before they're inserted.
fn item(&mut self, item: clean::Item) -> Result<(), Error> {
fn item(&mut self, item: &clean::Item) -> Result<(), Error> {
use std::collections::hash_map::Entry;
let item_type = item.type_();
let item_name = item.name;
trace!("rendering {item_type} {item_name:?}");
@ -226,11 +227,11 @@ impl<'tcx> FormatRenderer<'tcx> for JsonRenderer<'tcx> {
// Flatten items that recursively store other items. We include orphaned items from
// stripped modules and etc that are otherwise reachable.
if let ItemKind::StrippedItem(inner) = &item.kind {
inner.inner_items().for_each(|i| self.item(i.clone()).unwrap());
inner.inner_items().for_each(|i| self.item(i).unwrap());
}
// Flatten items that recursively store other items
item.kind.inner_items().for_each(|i| self.item(i.clone()).unwrap());
item.kind.inner_items().for_each(|i| self.item(i).unwrap());
let item_id = item.item_id;
if let Some(mut new_item) = self.convert_item(item) {
@ -273,18 +274,25 @@ impl<'tcx> FormatRenderer<'tcx> for JsonRenderer<'tcx> {
| types::ItemEnum::Macro(_)
| types::ItemEnum::ProcMacro(_) => false,
};
let removed = self.index.borrow_mut().insert(new_item.id, new_item.clone());
// FIXME(adotinthevoid): Currently, the index is duplicated. This is a sanity check
// to make sure the items are unique. The main place this happens is when an item, is
// reexported in more than one place. See `rustdoc-json/reexport/in_root_and_mod`
if let Some(old_item) = removed {
// In case of generic implementations (like `impl<T> Trait for T {}`), all the
// inner items will be duplicated so we can ignore if they are slightly different.
if !can_be_ignored {
assert_eq!(old_item, new_item);
match self.index.entry(new_item.id) {
Entry::Vacant(entry) => {
entry.insert(new_item);
}
Entry::Occupied(mut entry) => {
// In case of generic implementations (like `impl<T> Trait for T {}`), all the
// inner items will be duplicated so we can ignore if they are slightly
// different.
let old_item = entry.get_mut();
if !can_be_ignored {
assert_eq!(*old_item, new_item);
}
trace!("replaced {old_item:?}\nwith {new_item:?}");
*old_item = new_item;
}
trace!("replaced {old_item:?}\nwith {new_item:?}");
}
}
@ -296,11 +304,13 @@ impl<'tcx> FormatRenderer<'tcx> for JsonRenderer<'tcx> {
unreachable!("RUN_ON_MODULE = false, should never call mod_item_in")
}
fn after_krate(&mut self) -> Result<(), Error> {
fn after_krate(mut self) -> Result<(), Error> {
debug!("Done with crate");
let e = ExternalCrate { crate_num: LOCAL_CRATE };
let index = (*self.index).clone().into_inner();
// We've finished using the index, and don't want to clone it, because it is big.
let index = std::mem::take(&mut self.index);
// Note that tcx.rust_target_features is inappropriate here because rustdoc tries to run for
// multiple targets: https://github.com/rust-lang/rust/pull/137632
@ -325,7 +335,7 @@ impl<'tcx> FormatRenderer<'tcx> for JsonRenderer<'tcx> {
types::ItemSummary {
crate_id: k.krate.as_u32(),
path: path.iter().map(|s| s.to_string()).collect(),
kind: kind.into_json(self),
kind: kind.into_json(&self),
},
)
})

View file

@ -1,8 +1,7 @@
//@ normalize-stderr-test: "\b10000(08|16|32)\b" -> "100$$PTR"
//@ normalize-stderr-test: "\b2500(060|120)\b" -> "250$$PTR"
#![allow(unused, incomplete_features)]
#![allow(unused)]
#![warn(clippy::large_stack_frames)]
#![feature(unsized_locals)]
use std::hint::black_box;
@ -11,11 +10,6 @@ fn generic<T: Default>() {
black_box(&x);
}
fn unsized_local() {
let x: dyn std::fmt::Display = *(Box::new(1) as Box<dyn std::fmt::Display>);
black_box(&x);
}
struct ArrayDefault<const N: usize>([u8; N]);
impl<const N: usize> Default for ArrayDefault<N> {

View file

@ -1,5 +1,5 @@
error: this function may allocate 250$PTR bytes on the stack
--> tests/ui/large_stack_frames.rs:27:4
--> tests/ui/large_stack_frames.rs:21:4
|
LL | fn many_small_arrays() {
| ^^^^^^^^^^^^^^^^^
@ -13,7 +13,7 @@ LL | let x5 = [0u8; 500_000];
= help: to override `-D warnings` add `#[allow(clippy::large_stack_frames)]`
error: this function may allocate 1000000 bytes on the stack
--> tests/ui/large_stack_frames.rs:38:4
--> tests/ui/large_stack_frames.rs:32:4
|
LL | fn large_return_value() -> ArrayDefault<1_000_000> {
| ^^^^^^^^^^^^^^^^^^ ----------------------- this is the largest part, at 1000000 bytes for type `ArrayDefault<1000000>`
@ -21,7 +21,7 @@ LL | fn large_return_value() -> ArrayDefault<1_000_000> {
= note: 1000000 bytes is larger than Clippy's configured `stack-size-threshold` of 512000
error: this function may allocate 100$PTR bytes on the stack
--> tests/ui/large_stack_frames.rs:44:4
--> tests/ui/large_stack_frames.rs:38:4
|
LL | fn large_fn_arg(x: ArrayDefault<1_000_000>) {
| ^^^^^^^^^^^^ - `x` is the largest part, at 1000000 bytes for type `ArrayDefault<1000000>`
@ -29,7 +29,7 @@ LL | fn large_fn_arg(x: ArrayDefault<1_000_000>) {
= note: 100$PTR bytes is larger than Clippy's configured `stack-size-threshold` of 512000
error: this function may allocate 100$PTR bytes on the stack
--> tests/ui/large_stack_frames.rs:51:13
--> tests/ui/large_stack_frames.rs:45:13
|
LL | let f = || black_box(&[0u8; 1_000_000]);
| ^^^^^^^^^^^^^^----------------^

View file

@ -1 +1 @@
64033a4ee541c3e9c178fd593e979c74bb798cdc
0cbc0764380630780a275c437260e4d4d5f28c92

View file

@ -1,23 +0,0 @@
#![feature(unsized_locals)]
#![allow(incomplete_features)]
fn main() {
pub trait Foo {
fn foo(self) -> String;
}
struct A;
impl Foo for A {
fn foo(self) -> String {
format!("hello")
}
}
let x = *(Box::new(A) as Box<dyn Foo>); //~ERROR: unsized locals are not supported
assert_eq!(x.foo(), format!("hello"));
// I'm not sure whether we want this to work
let x = Box::new(A) as Box<dyn Foo>;
assert_eq!(x.foo(), format!("hello"));
}

View file

@ -1,14 +0,0 @@
error: unsupported operation: unsized locals are not supported
--> tests/fail/unsized-local.rs:LL:CC
|
LL | let x = *(Box::new(A) as Box<dyn Foo>);
| ^ unsupported operation occurred here
|
= help: this is likely not a bug in the program; it indicates that the program performed an operation that Miri does not support
= note: BACKTRACE:
= note: inside `main` at tests/fail/unsized-local.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -754,7 +754,6 @@ ui/consts/issue-46553.rs
ui/consts/issue-47789.rs
ui/consts/issue-50439.rs
ui/consts/issue-52023-array-size-pointer-cast.rs
ui/consts/issue-54224.rs
ui/consts/issue-54348.rs
ui/consts/issue-54387.rs
ui/consts/issue-54582.rs

View file

@ -10,8 +10,8 @@
//@ compile-flags: -C opt-level=2 -Z merge-functions=disabled
#![crate_type = "lib"]
#![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
#![allow(internal_features)]
#![feature(unsized_fn_params)]
// CHECK-LABEL: emptyfn:
#[no_mangle]
@ -357,27 +357,3 @@ pub fn unsized_fn_param(s: [u8], l: bool, f: fn([u8])) {
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}
// CHECK-LABEL: unsized_local
#[no_mangle]
pub fn unsized_local(s: &[u8], l: bool, f: fn(&mut [u8])) {
let n = if l { 1 } else { 2 };
let mut a: [u8] = *Box::<[u8]>::from(&s[0..n]); // slice-copy with Box::from
f(&mut a);
// This function allocates a slice as a local variable in its stack
// frame. Since the size is not a compile-time constant, an array
// alloca is required, and the function is protected by both the
// `strong` and `basic` heuristic.
// We should have a __security_check_cookie call in `all`, `strong` and `basic` modes but
// LLVM does not support generating stack protectors in functions with funclet
// based EH personalities.
// https://github.com/llvm/llvm-project/blob/37fd3c96b917096d8a550038f6e61cdf0fc4174f/llvm/lib/CodeGen/StackProtector.cpp#L103C1-L109C4
// all-NOT: __security_check_cookie
// strong-NOT: __security_check_cookie
// basic-NOT: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}

View file

@ -10,8 +10,7 @@
//@ compile-flags: -C opt-level=2 -Z merge-functions=disabled
#![crate_type = "lib"]
#![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
#![feature(unsized_fn_params)]
// CHECK-LABEL: emptyfn:
#[no_mangle]
@ -365,27 +364,3 @@ pub fn unsized_fn_param(s: [u8], l: bool, f: fn([u8])) {
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}
// CHECK-LABEL: unsized_local
#[no_mangle]
pub fn unsized_local(s: &[u8], l: bool, f: fn(&mut [u8])) {
let n = if l { 1 } else { 2 };
let mut a: [u8] = *Box::<[u8]>::from(&s[0..n]); // slice-copy with Box::from
f(&mut a);
// This function allocates a slice as a local variable in its stack
// frame. Since the size is not a compile-time constant, an array
// alloca is required, and the function is protected by both the
// `strong` and `basic` heuristic.
// We should have a __security_check_cookie call in `all`, `strong` and `basic` modes but
// LLVM does not support generating stack protectors in functions with funclet
// based EH personalities.
// https://github.com/llvm/llvm-project/blob/37fd3c96b917096d8a550038f6e61cdf0fc4174f/llvm/lib/CodeGen/StackProtector.cpp#L103C1-L109C4
// all-NOT: __security_check_cookie
// strong-NOT: __security_check_cookie
// basic-NOT: __security_check_cookie
// none-NOT: __security_check_cookie
// missing-NOT: __security_check_cookie
}

View file

@ -16,8 +16,8 @@
// See comments on https://github.com/rust-lang/rust/issues/114903.
#![crate_type = "lib"]
#![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
#![allow(internal_features)]
#![feature(unsized_fn_params)]
// CHECK-LABEL: emptyfn{{:|\[}}
#[no_mangle]
@ -343,22 +343,3 @@ pub fn unsized_fn_param(s: [u8], l: bool, f: fn([u8])) {
// none-NOT: __stack_chk_fail
// missing-NOT: __stack_chk_fail
}
// CHECK-LABEL: unsized_local{{:|\[}}
#[no_mangle]
pub fn unsized_local(s: &[u8], l: bool, f: fn(&mut [u8])) {
let n = if l { 1 } else { 2 };
let mut a: [u8] = *Box::<[u8]>::from(&s[0..n]); // slice-copy with Box::from
f(&mut a);
// This function allocates a slice as a local variable in its stack
// frame. Since the size is not a compile-time constant, an array
// alloca is required, and the function is protected by both the
// `strong` and `basic` heuristic.
// all: __stack_chk_fail
// strong: __stack_chk_fail
// basic: __stack_chk_fail
// none-NOT: __stack_chk_fail
// missing-NOT: __stack_chk_fail
}

View file

@ -1,8 +1,8 @@
//@ compile-flags: -Copt-level=3
#![crate_type = "lib"]
#![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
#![allow(internal_features)]
#![feature(unsized_fn_params)]
use std::cell::Cell;
use std::hint;

View file

@ -6,8 +6,6 @@
// CHECK: @vtable.2 = {{.*}}, !type ![[TYPE2:[0-9]+]], !vcall_visibility ![[VCALL_VIS2:[0-9]+]]
#![crate_type = "lib"]
#![allow(incomplete_features)]
#![feature(unsized_locals)]
use std::rc::Rc;

View file

@ -1,16 +0,0 @@
//@ known-bug: #79409
#![feature(extern_types)]
#![feature(unsized_locals)]
extern {
type Device;
}
unsafe fn make_device() -> Box<Device> {
Box::from_raw(0 as *mut _)
}
fn main() {
let d: Device = unsafe { *make_device() };
}

View file

@ -0,0 +1,11 @@
// Test to ensure that it generates expected output for `--output-format=doctest` command-line
// flag.
//@ compile-flags:-Z unstable-options --output-format=doctest
//@ normalize-stdout: "tests/rustdoc-ui" -> "$$DIR"
//@ check-pass
//! ```
//! let x = 12;
//! Ok(())
//! ```

View file

@ -0,0 +1 @@
{"format_version":2,"doctests":[{"file":"$DIR/extract-doctests-result.rs","line":8,"doctest_attributes":{"original":"","should_panic":false,"no_run":false,"ignore":"None","rust":true,"test_harness":false,"compile_fail":false,"standalone_crate":false,"error_codes":[],"edition":null,"added_css_classes":[],"unknown":[]},"original_code":"let x = 12;\nOk(())","doctest_code":{"crate_level":"#![allow(unused)]\n","code":"let x = 12;\nOk(())","wrapper":{"before":"fn main() { fn _inner() -> core::result::Result<(), impl core::fmt::Debug> {\n","after":"\n} _inner().unwrap() }","returns_result":true}},"name":"$DIR/extract-doctests-result.rs - (line 8)"}]}

View file

@ -1 +1 @@
{"format_version":1,"doctests":[{"file":"$DIR/extract-doctests.rs","line":8,"doctest_attributes":{"original":"ignore (checking attributes)","should_panic":false,"no_run":false,"ignore":"All","rust":true,"test_harness":false,"compile_fail":false,"standalone_crate":false,"error_codes":[],"edition":null,"added_css_classes":[],"unknown":[]},"original_code":"let x = 12;\nlet y = 14;","doctest_code":"#![allow(unused)]\nfn main() {\nlet x = 12;\nlet y = 14;\n}","name":"$DIR/extract-doctests.rs - (line 8)"},{"file":"$DIR/extract-doctests.rs","line":13,"doctest_attributes":{"original":"edition2018,compile_fail","should_panic":false,"no_run":true,"ignore":"None","rust":true,"test_harness":false,"compile_fail":true,"standalone_crate":false,"error_codes":[],"edition":"2018","added_css_classes":[],"unknown":[]},"original_code":"let","doctest_code":null,"name":"$DIR/extract-doctests.rs - (line 13)"}]}
{"format_version":2,"doctests":[{"file":"$DIR/extract-doctests.rs","line":8,"doctest_attributes":{"original":"ignore (checking attributes)","should_panic":false,"no_run":false,"ignore":"All","rust":true,"test_harness":false,"compile_fail":false,"standalone_crate":false,"error_codes":[],"edition":null,"added_css_classes":[],"unknown":[]},"original_code":"let x = 12;\nlet y = 14;","doctest_code":{"crate_level":"#![allow(unused)]\n","code":"let x = 12;\nlet y = 14;","wrapper":{"before":"fn main() {\n","after":"\n}","returns_result":false}},"name":"$DIR/extract-doctests.rs - (line 8)"},{"file":"$DIR/extract-doctests.rs","line":13,"doctest_attributes":{"original":"edition2018,compile_fail","should_panic":false,"no_run":true,"ignore":"None","rust":true,"test_harness":false,"compile_fail":true,"standalone_crate":false,"error_codes":[],"edition":"2018","added_css_classes":[],"unknown":[]},"original_code":"let","doctest_code":null,"name":"$DIR/extract-doctests.rs - (line 13)"}]}

View file

@ -0,0 +1,22 @@
// issue: <https://github.com/rust-lang/rust/issues/142473>
//
//@ run-rustfix
#![allow(unused)]
struct T();
trait Trait {
type Assoc;
fn f();
}
impl Trait for () {
type Assoc = T;
fn f() {
T();
//~^ ERROR no associated item named `Assoc` found for unit type `()` in the current scope
}
}
fn main() {}

View file

@ -0,0 +1,22 @@
// issue: <https://github.com/rust-lang/rust/issues/142473>
//
//@ run-rustfix
#![allow(unused)]
struct T();
trait Trait {
type Assoc;
fn f();
}
impl Trait for () {
type Assoc = T;
fn f() {
<Self>::Assoc();
//~^ ERROR no associated item named `Assoc` found for unit type `()` in the current scope
}
}
fn main() {}

Some files were not shown because too many files have changed in this diff Show more