Merge ref 'f957826bff' from rust-lang/rust

Pull recent changes from https://github.com/rust-lang/rust via Josh.

Upstream ref: f957826bff
Filtered ref: 60bcfdb370a3bae71714fc99a88aa9f2d2892733
Upstream diff: 848e6746fe...f957826bff

This merge was created using https://github.com/rust-lang/josh-sync.
This commit is contained in:
The Miri Cronjob Bot 2025-09-29 05:01:37 +00:00
commit 14d02ab2db
192 changed files with 3358 additions and 1241 deletions

View file

@ -31,8 +31,8 @@ bootstrapping, the compiler architecture, source code representation, and more.
## [Getting help](https://rustc-dev-guide.rust-lang.org/getting-started.html#asking-questions)
There are many ways you can get help when you're stuck. Rust has many platforms for this:
[internals], [rust-zulip], and [rust-discord]. It is recommended to ask for help on
There are many ways you can get help when you're stuck. Rust has two platforms for this:
[internals] and [rust-zulip]. It is recommended to ask for help on
the [rust-zulip], but any of these platforms are great ways to seek help and even
find a mentor! You can learn more about asking questions and getting help in the
[Asking Questions](https://rustc-dev-guide.rust-lang.org/getting-started.html#asking-questions) chapter of the [rustc-dev-guide].
@ -47,5 +47,4 @@ refer to [this section][contributing-bug-reports] and [open an issue][issue temp
[contributing-bug-reports]: https://rustc-dev-guide.rust-lang.org/contributing.html#bug-reports
[issue template]: https://github.com/rust-lang/rust/issues/new/choose
[internals]: https://internals.rust-lang.org
[rust-discord]: http://discord.gg/rust-lang
[rust-zulip]: https://rust-lang.zulipchat.com

View file

@ -334,8 +334,10 @@ dependencies = [
"anyhow",
"build_helper",
"curl",
"hex",
"indexmap",
"serde",
"sha2",
"toml 0.8.23",
]
@ -5239,9 +5241,9 @@ dependencies = [
[[package]]
name = "stringdex"
version = "0.0.1-alpha9"
version = "0.0.1-alpha10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7081029913fd7d591c0112182aba8c98ae886b4f12edb208130496cd17dc3c15"
checksum = "0fa846a7d509d1828a4f90962dc09810e161abcada7fc6a921e92c168d0811d7"
dependencies = [
"stacker",
]

View file

@ -768,8 +768,7 @@
# make this default to false.
#rust.lld = false in all cases, except on `x86_64-unknown-linux-gnu` as described above, where it is true
# Indicates whether LLD will be used to link Rust crates during bootstrap on
# supported platforms.
# Indicates if we should override the linker used to link Rust crates during bootstrap to be LLD.
# If set to `true` or `"external"`, a global `lld` binary that has to be in $PATH
# will be used.
# If set to `"self-contained"`, rust-lld from the snapshot compiler will be used.
@ -777,7 +776,7 @@
# On MSVC, LLD will not be used if we're cross linking.
#
# Explicitly setting the linker for a target will override this option when targeting MSVC.
#rust.use-lld = false
#rust.bootstrap-override-lld = false
# Indicates whether some LLVM tools, like llvm-objdump, will be made available in the
# sysroot.
@ -950,7 +949,7 @@
# Linker to be used to bootstrap Rust code. Note that the
# default value is platform specific, and if not specified it may also depend on
# what platform is crossing to what platform.
# Setting this will override the `use-lld` option for Rust code when targeting MSVC.
# Setting this will override the `bootstrap-override-lld` option for Rust code when targeting MSVC.
#linker = "cc" (path)
# Should rustc and the standard library be built with split debuginfo? Default

View file

@ -6,6 +6,7 @@
use std::fmt::{self, Display, Formatter};
use std::str::FromStr;
use crate::expand::typetree::TypeTree;
use crate::expand::{Decodable, Encodable, HashStable_Generic};
use crate::{Ty, TyKind};
@ -84,6 +85,8 @@ pub struct AutoDiffItem {
/// The name of the function being generated
pub target: String,
pub attrs: AutoDiffAttrs,
pub inputs: Vec<TypeTree>,
pub output: TypeTree,
}
#[derive(Clone, Eq, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
@ -275,14 +278,22 @@ impl AutoDiffAttrs {
!matches!(self.mode, DiffMode::Error | DiffMode::Source)
}
pub fn into_item(self, source: String, target: String) -> AutoDiffItem {
AutoDiffItem { source, target, attrs: self }
pub fn into_item(
self,
source: String,
target: String,
inputs: Vec<TypeTree>,
output: TypeTree,
) -> AutoDiffItem {
AutoDiffItem { source, target, inputs, output, attrs: self }
}
}
impl fmt::Display for AutoDiffItem {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Differentiating {} -> {}", self.source, self.target)?;
write!(f, " with attributes: {:?}", self.attrs)
write!(f, " with attributes: {:?}", self.attrs)?;
write!(f, " with inputs: {:?}", self.inputs)?;
write!(f, " with output: {:?}", self.output)
}
}

View file

@ -31,6 +31,7 @@ pub enum Kind {
Half,
Float,
Double,
F128,
Unknown,
}

View file

@ -1383,6 +1383,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
_src_align: Align,
size: RValue<'gcc>,
flags: MemFlags,
_tt: Option<rustc_ast::expand::typetree::FncTree>, // Autodiff TypeTrees are LLVM-only, ignored in GCC backend
) {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_size_t(), false);

View file

@ -770,6 +770,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
scratch_align,
bx.const_usize(self.layout.size.bytes()),
MemFlags::empty(),
None,
);
bx.lifetime_end(scratch, scratch_size);

View file

@ -246,6 +246,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
scratch_align,
bx.const_usize(copy_bytes),
MemFlags::empty(),
None,
);
bx.lifetime_end(llscratch, scratch_size);
}

View file

@ -563,6 +563,8 @@ fn enable_autodiff_settings(ad: &[config::AutoDiff]) {
config::AutoDiff::Enable => {}
// We handle this below
config::AutoDiff::NoPostopt => {}
// Disables TypeTree generation
config::AutoDiff::NoTT => {}
}
}
// This helps with handling enums for now.

View file

@ -2,6 +2,7 @@ use std::borrow::{Borrow, Cow};
use std::ops::Deref;
use std::{iter, ptr};
use rustc_ast::expand::typetree::FncTree;
pub(crate) mod autodiff;
pub(crate) mod gpu_offload;
@ -1107,11 +1108,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
src_align: Align,
size: &'ll Value,
flags: MemFlags,
tt: Option<FncTree>,
) {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
unsafe {
let memcpy = unsafe {
llvm::LLVMRustBuildMemCpy(
self.llbuilder,
dst,
@ -1120,7 +1122,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
src_align.bytes() as c_uint,
size,
is_volatile,
);
)
};
// TypeTree metadata for memcpy is especially important: when Enzyme encounters
// a memcpy during autodiff, it needs to know the structure of the data being
// copied to properly track derivatives. For example, copying an array of floats
// vs. copying a struct with mixed types requires different derivative handling.
// The TypeTree tells Enzyme exactly what memory layout to expect.
if let Some(tt) = tt {
crate::typetree::add_tt(self.cx().llmod, self.cx().llcx, memcpy, tt);
}
}

View file

@ -1,6 +1,7 @@
use std::ptr;
use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, DiffActivity, DiffMode};
use rustc_ast::expand::typetree::FncTree;
use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
use rustc_middle::ty::{Instance, PseudoCanonicalInput, TyCtxt, TypingEnv};
@ -294,6 +295,7 @@ pub(crate) fn generate_enzyme_call<'ll, 'tcx>(
fn_args: &[&'ll Value],
attrs: AutoDiffAttrs,
dest: PlaceRef<'tcx, &'ll Value>,
fnc_tree: FncTree,
) {
// We have to pick the name depending on whether we want forward or reverse mode autodiff.
let mut ad_name: String = match attrs.mode {
@ -370,6 +372,10 @@ pub(crate) fn generate_enzyme_call<'ll, 'tcx>(
fn_args,
);
if !fnc_tree.args.is_empty() || !fnc_tree.ret.0.is_empty() {
crate::typetree::add_tt(cx.llmod, cx.llcx, fn_to_diff, fnc_tree);
}
let call = builder.call(enzyme_ty, None, None, ad_fn, &args, None, None);
builder.store_to_place(call, dest.val);

View file

@ -117,7 +117,7 @@ fn build_fixed_size_array_di_node<'ll, 'tcx>(
.try_to_target_usize(cx.tcx)
.expect("expected monomorphic const in codegen") as c_longlong;
let subrange = unsafe { llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound) };
let subrange = unsafe { llvm::LLVMDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound) };
let subscripts = &[subrange];
let di_node = unsafe {

View file

@ -52,15 +52,6 @@ mod utils;
use self::create_scope_map::compute_mir_scopes;
pub(crate) use self::metadata::build_global_var_di_node;
// FIXME(Zalathar): These `DW_TAG_*` constants are fake values that were
// removed from LLVM in 2015, and are only used by our own `RustWrapper.cpp`
// to decide which C++ API to call. Instead, we should just have two separate
// FFI functions and choose the correct one on the Rust side.
#[allow(non_upper_case_globals)]
const DW_TAG_auto_variable: c_uint = 0x100;
#[allow(non_upper_case_globals)]
const DW_TAG_arg_variable: c_uint = 0x101;
/// A context object for maintaining all state needed by the debuginfo module.
pub(crate) struct CodegenUnitDebugContext<'ll, 'tcx> {
llmod: &'ll llvm::Module,
@ -174,35 +165,38 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
if direct_offset.bytes() > 0 {
addr_ops.push(DW_OP_plus_uconst);
addr_ops.push(direct_offset.bytes() as u64);
addr_ops.push(direct_offset.bytes());
}
for &offset in indirect_offsets {
addr_ops.push(DW_OP_deref);
if offset.bytes() > 0 {
addr_ops.push(DW_OP_plus_uconst);
addr_ops.push(offset.bytes() as u64);
addr_ops.push(offset.bytes());
}
}
if let Some(fragment) = fragment {
// `DW_OP_LLVM_fragment` takes as arguments the fragment's
// offset and size, both of them in bits.
addr_ops.push(DW_OP_LLVM_fragment);
addr_ops.push(fragment.start.bits() as u64);
addr_ops.push((fragment.end - fragment.start).bits() as u64);
addr_ops.push(fragment.start.bits());
addr_ops.push((fragment.end - fragment.start).bits());
}
let di_builder = DIB(self.cx());
let addr_expr = unsafe {
llvm::LLVMDIBuilderCreateExpression(di_builder, addr_ops.as_ptr(), addr_ops.len())
};
unsafe {
// FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
DIB(self.cx()),
llvm::LLVMDIBuilderInsertDeclareRecordAtEnd(
di_builder,
variable_alloca,
dbg_var,
addr_ops.as_ptr(),
addr_ops.len() as c_uint,
addr_expr,
dbg_loc,
self.llbb(),
);
}
)
};
}
fn set_dbg_loc(&mut self, dbg_loc: &'ll DILocation) {
@ -630,28 +624,39 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let type_metadata = spanned_type_di_node(self, variable_type, span);
let (argument_index, dwarf_tag) = match variable_kind {
ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
LocalVariable => (0, DW_TAG_auto_variable),
};
let align = self.align_of(variable_type);
let name = variable_name.as_str();
unsafe {
llvm::LLVMRustDIBuilderCreateVariable(
DIB(self),
dwarf_tag,
scope_metadata,
name.as_c_char_ptr(),
name.len(),
file_metadata,
loc.line,
type_metadata,
true,
DIFlags::FlagZero,
argument_index,
align.bits() as u32,
)
match variable_kind {
ArgumentVariable(arg_index) => unsafe {
llvm::LLVMDIBuilderCreateParameterVariable(
DIB(self),
scope_metadata,
name.as_ptr(),
name.len(),
arg_index as c_uint,
file_metadata,
loc.line,
type_metadata,
llvm::Bool::TRUE, // (preserve descriptor during optimizations)
DIFlags::FlagZero,
)
},
LocalVariable => unsafe {
llvm::LLVMDIBuilderCreateAutoVariable(
DIB(self),
scope_metadata,
name.as_ptr(),
name.len(),
file_metadata,
loc.line,
type_metadata,
llvm::Bool::TRUE, // (preserve descriptor during optimizations)
DIFlags::FlagZero,
align.bits() as u32,
)
},
}
}
}

View file

@ -28,7 +28,7 @@ pub(crate) fn create_DIArray<'ll>(
builder: &DIBuilder<'ll>,
arr: &[Option<&'ll DIDescriptor>],
) -> &'ll DIArray {
unsafe { llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) }
unsafe { llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len()) }
}
#[inline]

View file

@ -1212,6 +1212,9 @@ fn codegen_autodiff<'ll, 'tcx>(
&mut diff_attrs.input_activity,
);
let fnc_tree =
rustc_middle::ty::fnc_typetrees(tcx, fn_source.ty(tcx, TypingEnv::fully_monomorphized()));
// Build body
generate_enzyme_call(
bx,
@ -1222,6 +1225,7 @@ fn codegen_autodiff<'ll, 'tcx>(
&val_arr,
diff_attrs.clone(),
result,
fnc_tree,
);
}

View file

@ -68,6 +68,7 @@ mod llvm_util;
mod mono_item;
mod type_;
mod type_of;
mod typetree;
mod va_arg;
mod value;

View file

@ -3,9 +3,36 @@
use libc::{c_char, c_uint};
use super::MetadataKindId;
use super::ffi::{AttributeKind, BasicBlock, Metadata, Module, Type, Value};
use super::ffi::{AttributeKind, BasicBlock, Context, Metadata, Module, Type, Value};
use crate::llvm::{Bool, Builder};
// TypeTree types
pub(crate) type CTypeTreeRef = *mut EnzymeTypeTree;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub(crate) struct EnzymeTypeTree {
_unused: [u8; 0],
}
#[repr(u32)]
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub(crate) enum CConcreteType {
DT_Anything = 0,
DT_Integer = 1,
DT_Pointer = 2,
DT_Half = 3,
DT_Float = 4,
DT_Double = 5,
DT_Unknown = 6,
DT_FP128 = 9,
}
pub(crate) struct TypeTree {
pub(crate) inner: CTypeTreeRef,
}
#[link(name = "llvm-wrapper", kind = "static")]
unsafe extern "C" {
// Enzyme
@ -68,10 +95,40 @@ pub(crate) mod Enzyme_AD {
use libc::c_void;
use super::{CConcreteType, CTypeTreeRef, Context};
unsafe extern "C" {
pub(crate) fn EnzymeSetCLBool(arg1: *mut ::std::os::raw::c_void, arg2: u8);
pub(crate) fn EnzymeSetCLString(arg1: *mut ::std::os::raw::c_void, arg2: *const c_char);
}
// TypeTree functions
unsafe extern "C" {
pub(crate) fn EnzymeNewTypeTree() -> CTypeTreeRef;
pub(crate) fn EnzymeNewTypeTreeCT(arg1: CConcreteType, ctx: &Context) -> CTypeTreeRef;
pub(crate) fn EnzymeNewTypeTreeTR(arg1: CTypeTreeRef) -> CTypeTreeRef;
pub(crate) fn EnzymeFreeTypeTree(CTT: CTypeTreeRef);
pub(crate) fn EnzymeMergeTypeTree(arg1: CTypeTreeRef, arg2: CTypeTreeRef) -> bool;
pub(crate) fn EnzymeTypeTreeOnlyEq(arg1: CTypeTreeRef, pos: i64);
pub(crate) fn EnzymeTypeTreeData0Eq(arg1: CTypeTreeRef);
pub(crate) fn EnzymeTypeTreeShiftIndiciesEq(
arg1: CTypeTreeRef,
data_layout: *const c_char,
offset: i64,
max_size: i64,
add_offset: u64,
);
pub(crate) fn EnzymeTypeTreeInsertEq(
CTT: CTypeTreeRef,
indices: *const i64,
len: usize,
ct: CConcreteType,
ctx: &Context,
);
pub(crate) fn EnzymeTypeTreeToString(arg1: CTypeTreeRef) -> *const c_char;
pub(crate) fn EnzymeTypeTreeToStringFree(arg1: *const c_char);
}
unsafe extern "C" {
static mut EnzymePrintPerf: c_void;
static mut EnzymePrintActivity: c_void;
@ -141,6 +198,67 @@ pub(crate) use self::Fallback_AD::*;
pub(crate) mod Fallback_AD {
#![allow(unused_variables)]
use libc::c_char;
use super::{CConcreteType, CTypeTreeRef, Context};
// TypeTree function fallbacks
pub(crate) unsafe fn EnzymeNewTypeTree() -> CTypeTreeRef {
unimplemented!()
}
pub(crate) unsafe fn EnzymeNewTypeTreeCT(arg1: CConcreteType, ctx: &Context) -> CTypeTreeRef {
unimplemented!()
}
pub(crate) unsafe fn EnzymeNewTypeTreeTR(arg1: CTypeTreeRef) -> CTypeTreeRef {
unimplemented!()
}
pub(crate) unsafe fn EnzymeFreeTypeTree(CTT: CTypeTreeRef) {
unimplemented!()
}
pub(crate) unsafe fn EnzymeMergeTypeTree(arg1: CTypeTreeRef, arg2: CTypeTreeRef) -> bool {
unimplemented!()
}
pub(crate) unsafe fn EnzymeTypeTreeOnlyEq(arg1: CTypeTreeRef, pos: i64) {
unimplemented!()
}
pub(crate) unsafe fn EnzymeTypeTreeData0Eq(arg1: CTypeTreeRef) {
unimplemented!()
}
pub(crate) unsafe fn EnzymeTypeTreeShiftIndiciesEq(
arg1: CTypeTreeRef,
data_layout: *const c_char,
offset: i64,
max_size: i64,
add_offset: u64,
) {
unimplemented!()
}
pub(crate) unsafe fn EnzymeTypeTreeInsertEq(
CTT: CTypeTreeRef,
indices: *const i64,
len: usize,
ct: CConcreteType,
ctx: &Context,
) {
unimplemented!()
}
pub(crate) unsafe fn EnzymeTypeTreeToString(arg1: CTypeTreeRef) -> *const c_char {
unimplemented!()
}
pub(crate) unsafe fn EnzymeTypeTreeToStringFree(arg1: *const c_char) {
unimplemented!()
}
pub(crate) fn set_inline(val: bool) {
unimplemented!()
}
@ -169,3 +287,89 @@ pub(crate) mod Fallback_AD {
unimplemented!()
}
}
impl TypeTree {
pub(crate) fn new() -> TypeTree {
let inner = unsafe { EnzymeNewTypeTree() };
TypeTree { inner }
}
pub(crate) fn from_type(t: CConcreteType, ctx: &Context) -> TypeTree {
let inner = unsafe { EnzymeNewTypeTreeCT(t, ctx) };
TypeTree { inner }
}
pub(crate) fn merge(self, other: Self) -> Self {
unsafe {
EnzymeMergeTypeTree(self.inner, other.inner);
}
drop(other);
self
}
#[must_use]
pub(crate) fn shift(
self,
layout: &str,
offset: isize,
max_size: isize,
add_offset: usize,
) -> Self {
let layout = std::ffi::CString::new(layout).unwrap();
unsafe {
EnzymeTypeTreeShiftIndiciesEq(
self.inner,
layout.as_ptr(),
offset as i64,
max_size as i64,
add_offset as u64,
);
}
self
}
pub(crate) fn insert(&mut self, indices: &[i64], ct: CConcreteType, ctx: &Context) {
unsafe {
EnzymeTypeTreeInsertEq(self.inner, indices.as_ptr(), indices.len(), ct, ctx);
}
}
}
impl Clone for TypeTree {
fn clone(&self) -> Self {
let inner = unsafe { EnzymeNewTypeTreeTR(self.inner) };
TypeTree { inner }
}
}
impl std::fmt::Display for TypeTree {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ptr = unsafe { EnzymeTypeTreeToString(self.inner) };
let cstr = unsafe { std::ffi::CStr::from_ptr(ptr) };
match cstr.to_str() {
Ok(x) => write!(f, "{}", x)?,
Err(err) => write!(f, "could not parse: {}", err)?,
}
// delete C string pointer
unsafe {
EnzymeTypeTreeToStringFree(ptr);
}
Ok(())
}
}
impl std::fmt::Debug for TypeTree {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
<Self as std::fmt::Display>::fmt(self, f)
}
}
impl Drop for TypeTree {
fn drop(&mut self) {
unsafe { EnzymeFreeTypeTree(self.inner) }
}
}

View file

@ -25,8 +25,8 @@ use rustc_target::spec::SymbolVisibility;
use super::RustString;
use super::debuginfo::{
DIArray, DIBuilder, DIDerivedType, DIDescriptor, DIEnumerator, DIFile, DIFlags,
DIGlobalVariableExpression, DILocation, DISPFlags, DIScope, DISubprogram, DISubrange,
DITemplateTypeParameter, DIType, DIVariable, DebugEmissionKind, DebugNameTableKind,
DIGlobalVariableExpression, DILocation, DISPFlags, DIScope, DISubprogram,
DITemplateTypeParameter, DIType, DebugEmissionKind, DebugNameTableKind,
};
use crate::llvm;
@ -807,6 +807,8 @@ unsafe extern "C" {
pub(crate) type Metadata;
pub(crate) type BasicBlock;
pub(crate) type Comdat;
/// `&'ll DbgRecord` represents `LLVMDbgRecordRef`.
pub(crate) type DbgRecord;
}
#[repr(C)]
pub(crate) struct Builder<'a>(InvariantOpaque<'a>);
@ -891,7 +893,6 @@ pub(crate) mod debuginfo {
pub(crate) type DIVariable = DIDescriptor;
pub(crate) type DIGlobalVariableExpression = DIDescriptor;
pub(crate) type DIArray = DIDescriptor;
pub(crate) type DISubrange = DIDescriptor;
pub(crate) type DIEnumerator = DIDescriptor;
pub(crate) type DITemplateTypeParameter = DIDescriptor;
@ -1992,6 +1993,59 @@ unsafe extern "C" {
Scope: Option<&'ll Metadata>,
AlignInBits: u32, // (optional; default is 0)
) -> &'ll Metadata;
pub(crate) fn LLVMDIBuilderGetOrCreateSubrange<'ll>(
Builder: &DIBuilder<'ll>,
LowerBound: i64,
Count: i64,
) -> &'ll Metadata;
pub(crate) fn LLVMDIBuilderGetOrCreateArray<'ll>(
Builder: &DIBuilder<'ll>,
Data: *const Option<&'ll Metadata>,
NumElements: size_t,
) -> &'ll Metadata;
pub(crate) fn LLVMDIBuilderCreateExpression<'ll>(
Builder: &DIBuilder<'ll>,
Addr: *const u64,
Length: size_t,
) -> &'ll Metadata;
pub(crate) fn LLVMDIBuilderInsertDeclareRecordAtEnd<'ll>(
Builder: &DIBuilder<'ll>,
Storage: &'ll Value,
VarInfo: &'ll Metadata,
Expr: &'ll Metadata,
DebugLoc: &'ll Metadata,
Block: &'ll BasicBlock,
) -> &'ll DbgRecord;
pub(crate) fn LLVMDIBuilderCreateAutoVariable<'ll>(
Builder: &DIBuilder<'ll>,
Scope: &'ll Metadata,
Name: *const c_uchar, // See "PTR_LEN_STR".
NameLen: size_t,
File: &'ll Metadata,
LineNo: c_uint,
Ty: &'ll Metadata,
AlwaysPreserve: llvm::Bool, // "If true, this descriptor will survive optimizations."
Flags: DIFlags,
AlignInBits: u32,
) -> &'ll Metadata;
pub(crate) fn LLVMDIBuilderCreateParameterVariable<'ll>(
Builder: &DIBuilder<'ll>,
Scope: &'ll Metadata,
Name: *const c_uchar, // See "PTR_LEN_STR".
NameLen: size_t,
ArgNo: c_uint,
File: &'ll Metadata,
LineNo: c_uint,
Ty: &'ll Metadata,
AlwaysPreserve: llvm::Bool, // "If true, this descriptor will survive optimizations."
Flags: DIFlags,
) -> &'ll Metadata;
}
#[link(name = "llvm-wrapper", kind = "static")]
@ -2358,43 +2412,6 @@ unsafe extern "C" {
AlignInBits: u32,
) -> &'a DIGlobalVariableExpression;
pub(crate) fn LLVMRustDIBuilderCreateVariable<'a>(
Builder: &DIBuilder<'a>,
Tag: c_uint,
Scope: &'a DIDescriptor,
Name: *const c_char,
NameLen: size_t,
File: &'a DIFile,
LineNo: c_uint,
Ty: &'a DIType,
AlwaysPreserve: bool,
Flags: DIFlags,
ArgNo: c_uint,
AlignInBits: u32,
) -> &'a DIVariable;
pub(crate) fn LLVMRustDIBuilderGetOrCreateSubrange<'a>(
Builder: &DIBuilder<'a>,
Lo: i64,
Count: i64,
) -> &'a DISubrange;
pub(crate) fn LLVMRustDIBuilderGetOrCreateArray<'a>(
Builder: &DIBuilder<'a>,
Ptr: *const Option<&'a DIDescriptor>,
Count: c_uint,
) -> &'a DIArray;
pub(crate) fn LLVMRustDIBuilderInsertDeclareAtEnd<'a>(
Builder: &DIBuilder<'a>,
Val: &'a Value,
VarInfo: &'a DIVariable,
AddrOps: *const u64,
AddrOpsCount: c_uint,
DL: &'a DILocation,
InsertAtEnd: &'a BasicBlock,
);
pub(crate) fn LLVMRustDIBuilderCreateEnumerator<'a>(
Builder: &DIBuilder<'a>,
Name: *const c_char,

View file

@ -0,0 +1,122 @@
use rustc_ast::expand::typetree::FncTree;
#[cfg(feature = "llvm_enzyme")]
use {
crate::attributes,
rustc_ast::expand::typetree::TypeTree as RustTypeTree,
std::ffi::{CString, c_char, c_uint},
};
use crate::llvm::{self, Value};
#[cfg(feature = "llvm_enzyme")]
fn to_enzyme_typetree(
rust_typetree: RustTypeTree,
_data_layout: &str,
llcx: &llvm::Context,
) -> llvm::TypeTree {
let mut enzyme_tt = llvm::TypeTree::new();
process_typetree_recursive(&mut enzyme_tt, &rust_typetree, &[], llcx);
enzyme_tt
}
#[cfg(feature = "llvm_enzyme")]
fn process_typetree_recursive(
enzyme_tt: &mut llvm::TypeTree,
rust_typetree: &RustTypeTree,
parent_indices: &[i64],
llcx: &llvm::Context,
) {
for rust_type in &rust_typetree.0 {
let concrete_type = match rust_type.kind {
rustc_ast::expand::typetree::Kind::Anything => llvm::CConcreteType::DT_Anything,
rustc_ast::expand::typetree::Kind::Integer => llvm::CConcreteType::DT_Integer,
rustc_ast::expand::typetree::Kind::Pointer => llvm::CConcreteType::DT_Pointer,
rustc_ast::expand::typetree::Kind::Half => llvm::CConcreteType::DT_Half,
rustc_ast::expand::typetree::Kind::Float => llvm::CConcreteType::DT_Float,
rustc_ast::expand::typetree::Kind::Double => llvm::CConcreteType::DT_Double,
rustc_ast::expand::typetree::Kind::F128 => llvm::CConcreteType::DT_FP128,
rustc_ast::expand::typetree::Kind::Unknown => llvm::CConcreteType::DT_Unknown,
};
let mut indices = parent_indices.to_vec();
if !parent_indices.is_empty() {
indices.push(rust_type.offset as i64);
} else if rust_type.offset == -1 {
indices.push(-1);
} else {
indices.push(rust_type.offset as i64);
}
enzyme_tt.insert(&indices, concrete_type, llcx);
if rust_type.kind == rustc_ast::expand::typetree::Kind::Pointer
&& !rust_type.child.0.is_empty()
{
process_typetree_recursive(enzyme_tt, &rust_type.child, &indices, llcx);
}
}
}
#[cfg(feature = "llvm_enzyme")]
pub(crate) fn add_tt<'ll>(
llmod: &'ll llvm::Module,
llcx: &'ll llvm::Context,
fn_def: &'ll Value,
tt: FncTree,
) {
let inputs = tt.args;
let ret_tt: RustTypeTree = tt.ret;
let llvm_data_layout: *const c_char = unsafe { llvm::LLVMGetDataLayoutStr(&*llmod) };
let llvm_data_layout =
std::str::from_utf8(unsafe { std::ffi::CStr::from_ptr(llvm_data_layout) }.to_bytes())
.expect("got a non-UTF8 data-layout from LLVM");
let attr_name = "enzyme_type";
let c_attr_name = CString::new(attr_name).unwrap();
for (i, input) in inputs.iter().enumerate() {
unsafe {
let enzyme_tt = to_enzyme_typetree(input.clone(), llvm_data_layout, llcx);
let c_str = llvm::EnzymeTypeTreeToString(enzyme_tt.inner);
let c_str = std::ffi::CStr::from_ptr(c_str);
let attr = llvm::LLVMCreateStringAttribute(
llcx,
c_attr_name.as_ptr(),
c_attr_name.as_bytes().len() as c_uint,
c_str.as_ptr(),
c_str.to_bytes().len() as c_uint,
);
attributes::apply_to_llfn(fn_def, llvm::AttributePlace::Argument(i as u32), &[attr]);
llvm::EnzymeTypeTreeToStringFree(c_str.as_ptr());
}
}
unsafe {
let enzyme_tt = to_enzyme_typetree(ret_tt, llvm_data_layout, llcx);
let c_str = llvm::EnzymeTypeTreeToString(enzyme_tt.inner);
let c_str = std::ffi::CStr::from_ptr(c_str);
let ret_attr = llvm::LLVMCreateStringAttribute(
llcx,
c_attr_name.as_ptr(),
c_attr_name.as_bytes().len() as c_uint,
c_str.as_ptr(),
c_str.to_bytes().len() as c_uint,
);
attributes::apply_to_llfn(fn_def, llvm::AttributePlace::ReturnValue, &[ret_attr]);
llvm::EnzymeTypeTreeToStringFree(c_str.as_ptr());
}
}
#[cfg(not(feature = "llvm_enzyme"))]
pub(crate) fn add_tt<'ll>(
_llmod: &'ll llvm::Module,
_llcx: &'ll llvm::Context,
_fn_def: &'ll Value,
_tt: FncTree,
) {
unimplemented!()
}

View file

@ -738,6 +738,7 @@ fn copy_to_temporary_if_more_aligned<'ll, 'tcx>(
src_align,
bx.const_u32(layout.layout.size().bytes() as u32),
MemFlags::empty(),
None,
);
tmp
} else {

View file

@ -1626,6 +1626,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
align,
bx.const_usize(copy_bytes),
MemFlags::empty(),
None,
);
// ...and then load it with the ABI type.
llval = load_cast(bx, cast, llscratch, scratch_align);

View file

@ -30,7 +30,7 @@ fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if allow_overlap {
bx.memmove(dst, align, src, align, size, flags);
} else {
bx.memcpy(dst, align, src, align, size, flags);
bx.memcpy(dst, align, src, align, size, flags, None);
}
}

View file

@ -90,7 +90,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let align = pointee_layout.align;
let dst = dst_val.immediate();
let src = src_val.immediate();
bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty(), None);
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::Retag { .. }

View file

@ -451,6 +451,7 @@ pub trait BuilderMethods<'a, 'tcx>:
src_align: Align,
size: Self::Value,
flags: MemFlags,
tt: Option<rustc_ast::expand::typetree::FncTree>,
);
fn memmove(
&mut self,
@ -507,7 +508,7 @@ pub trait BuilderMethods<'a, 'tcx>:
temp.val.store_with_flags(self, dst.with_type(layout), flags);
} else if !layout.is_zst() {
let bytes = self.const_usize(layout.size.bytes());
self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags);
self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags, None);
}
}

View file

@ -550,7 +550,7 @@ declare_features! (
/// Allows fused `loop`/`match` for direct intraprocedural jumps.
(incomplete, loop_match, "1.90.0", Some(132306)),
/// Allow `macro_rules!` attribute rules
(unstable, macro_attr, "1.91.0", Some(83527)),
(unstable, macro_attr, "1.91.0", Some(143547)),
/// Allow `macro_rules!` derive rules
(unstable, macro_derive, "1.91.0", Some(143549)),
/// Give access to additional metadata about declarative macro meta-variables.

View file

@ -440,6 +440,7 @@ language_item_table! {
// Reborrowing related lang-items
Reborrow, sym::reborrow, reborrow, Target::Trait, GenericRequirement::Exact(0);
CoerceShared, sym::coerce_shared, coerce_shared, Target::Trait, GenericRequirement::Exact(0);
}
/// The requirement imposed on the generics of a lang item

View file

@ -765,7 +765,7 @@ fn test_unstable_options_tracking_hash() {
tracked!(allow_features, Some(vec![String::from("lang_items")]));
tracked!(always_encode_mir, true);
tracked!(assume_incomplete_release, true);
tracked!(autodiff, vec![AutoDiff::Enable]);
tracked!(autodiff, vec![AutoDiff::Enable, AutoDiff::NoTT]);
tracked!(binary_dep_depinfo, true);
tracked!(box_noalias, false);
tracked!(

View file

@ -569,25 +569,43 @@ extern "C" LLVMRustResult LLVMRustOptimize(
}
std::optional<PGOOptions> PGOOpt;
#if LLVM_VERSION_LT(22, 0)
auto FS = vfs::getRealFileSystem();
#endif
if (PGOGenPath) {
assert(!PGOUsePath && !PGOSampleUsePath);
PGOOpt = PGOOptions(
#if LLVM_VERSION_GE(22, 0)
PGOGenPath, "", "", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
#else
PGOGenPath, "", "", "", FS, PGOOptions::IRInstr, PGOOptions::NoCSAction,
#endif
PGOOptions::ColdFuncOpt::Default, DebugInfoForProfiling);
} else if (PGOUsePath) {
assert(!PGOSampleUsePath);
PGOOpt = PGOOptions(
#if LLVM_VERSION_GE(22, 0)
PGOUsePath, "", "", "", PGOOptions::IRUse, PGOOptions::NoCSAction,
#else
PGOUsePath, "", "", "", FS, PGOOptions::IRUse, PGOOptions::NoCSAction,
#endif
PGOOptions::ColdFuncOpt::Default, DebugInfoForProfiling);
} else if (PGOSampleUsePath) {
PGOOpt =
#if LLVM_VERSION_GE(22, 0)
PGOOptions(PGOSampleUsePath, "", "", "", PGOOptions::SampleUse,
#else
PGOOptions(PGOSampleUsePath, "", "", "", FS, PGOOptions::SampleUse,
#endif
PGOOptions::NoCSAction, PGOOptions::ColdFuncOpt::Default,
DebugInfoForProfiling);
} else if (DebugInfoForProfiling) {
PGOOpt = PGOOptions(
#if LLVM_VERSION_GE(22, 0)
"", "", "", "", PGOOptions::NoAction, PGOOptions::NoCSAction,
#else
"", "", "", "", FS, PGOOptions::NoAction, PGOOptions::NoCSAction,
#endif
PGOOptions::ColdFuncOpt::Default, DebugInfoForProfiling);
}

View file

@ -990,14 +990,6 @@ extern "C" void LLVMRustGlobalAddMetadata(LLVMValueRef Global, unsigned Kind,
unwrap<GlobalObject>(Global)->addMetadata(Kind, *unwrap<MDNode>(MD));
}
extern "C" LLVMDIBuilderRef LLVMRustDIBuilderCreate(LLVMModuleRef M) {
return wrap(new DIBuilder(*unwrap(M)));
}
extern "C" void LLVMRustDIBuilderDispose(LLVMDIBuilderRef Builder) {
delete unwrap(Builder);
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateCompileUnit(
LLVMDIBuilderRef Builder, unsigned Lang, LLVMMetadataRef FileRef,
const char *Producer, size_t ProducerLen, bool isOptimized,
@ -1129,51 +1121,6 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticVariable(
return wrap(VarExpr);
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariable(
LLVMDIBuilderRef Builder, unsigned Tag, LLVMMetadataRef Scope,
const char *Name, size_t NameLen, LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, bool AlwaysPreserve, LLVMDIFlags Flags, unsigned ArgNo,
uint32_t AlignInBits) {
if (Tag == 0x100) { // DW_TAG_auto_variable
return wrap(unwrap(Builder)->createAutoVariable(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), AlwaysPreserve,
fromRust(Flags), AlignInBits));
} else {
return wrap(unwrap(Builder)->createParameterVariable(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ArgNo,
unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), AlwaysPreserve,
fromRust(Flags)));
}
}
extern "C" LLVMMetadataRef
LLVMRustDIBuilderGetOrCreateSubrange(LLVMDIBuilderRef Builder, int64_t Lo,
int64_t Count) {
return wrap(unwrap(Builder)->getOrCreateSubrange(Lo, Count));
}
extern "C" LLVMMetadataRef
LLVMRustDIBuilderGetOrCreateArray(LLVMDIBuilderRef Builder,
LLVMMetadataRef *Ptr, unsigned Count) {
Metadata **DataValue = unwrap(Ptr);
return wrap(unwrap(Builder)
->getOrCreateArray(ArrayRef<Metadata *>(DataValue, Count))
.get());
}
extern "C" void
LLVMRustDIBuilderInsertDeclareAtEnd(LLVMDIBuilderRef Builder, LLVMValueRef V,
LLVMMetadataRef VarInfo, uint64_t *AddrOps,
unsigned AddrOpsCount, LLVMMetadataRef DL,
LLVMBasicBlockRef InsertAtEnd) {
unwrap(Builder)->insertDeclare(
unwrap(V), unwrap<DILocalVariable>(VarInfo),
unwrap(Builder)->createExpression(
llvm::ArrayRef<uint64_t>(AddrOps, AddrOpsCount)),
DebugLoc(cast<MDNode>(unwrap(DL))), unwrap(InsertAtEnd));
}
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateEnumerator(LLVMDIBuilderRef Builder, const char *Name,
size_t NameLen, const uint64_t Value[2],
@ -1865,3 +1812,15 @@ extern "C" void LLVMRustSetNoSanitizeHWAddress(LLVMValueRef Global) {
MD.NoHWAddress = true;
GV.setSanitizerMetadata(MD);
}
#ifdef ENZYME
extern "C" {
extern llvm::cl::opt<unsigned> EnzymeMaxTypeDepth;
}
extern "C" size_t LLVMRustEnzymeGetMaxTypeDepth() { return EnzymeMaxTypeDepth; }
#else
extern "C" size_t LLVMRustEnzymeGetMaxTypeDepth() {
return 6; // Default fallback depth
}
#endif

View file

@ -1555,7 +1555,7 @@ impl<'a> CrateMetadataRef<'a> {
}
#[inline]
fn def_path_hash_to_def_index(self, hash: DefPathHash) -> DefIndex {
fn def_path_hash_to_def_index(self, hash: DefPathHash) -> Option<DefIndex> {
self.def_path_hash_map.def_path_hash_to_def_index(&hash)
}

View file

@ -691,8 +691,8 @@ fn provide_cstore_hooks(providers: &mut Providers) {
.get(&stable_crate_id)
.unwrap_or_else(|| bug!("uninterned StableCrateId: {stable_crate_id:?}"));
assert_ne!(cnum, LOCAL_CRATE);
let def_index = cstore.get_crate_data(cnum).def_path_hash_to_def_index(hash);
DefId { krate: cnum, index: def_index }
let def_index = cstore.get_crate_data(cnum).def_path_hash_to_def_index(hash)?;
Some(DefId { krate: cnum, index: def_index })
};
providers.hooks.expn_hash_to_expn_id = |tcx, cnum, index_guess, hash| {

View file

@ -12,11 +12,12 @@ pub(crate) enum DefPathHashMapRef<'tcx> {
impl DefPathHashMapRef<'_> {
#[inline]
pub(crate) fn def_path_hash_to_def_index(&self, def_path_hash: &DefPathHash) -> DefIndex {
pub(crate) fn def_path_hash_to_def_index(
&self,
def_path_hash: &DefPathHash,
) -> Option<DefIndex> {
match *self {
DefPathHashMapRef::OwnedFromMetadata(ref map) => {
map.get(&def_path_hash.local_hash()).unwrap()
}
DefPathHashMapRef::OwnedFromMetadata(ref map) => map.get(&def_path_hash.local_hash()),
DefPathHashMapRef::BorrowedFromTcx(_) => {
panic!("DefPathHashMap::BorrowedFromTcx variant only exists for serialization")
}

View file

@ -37,7 +37,6 @@ pub(crate) struct OpaqueHiddenTypeMismatch<'tcx> {
pub sub: TypeMismatchReason,
}
// FIXME(autodiff): I should get used somewhere
#[derive(Diagnostic)]
#[diag(middle_unsupported_union)]
pub struct UnsupportedUnion {

View file

@ -77,7 +77,7 @@ declare_hooks! {
/// session, if it still exists. This is used during incremental compilation to
/// turn a deserialized `DefPathHash` into its current `DefId`.
/// Will fetch a DefId from a DefPathHash for a foreign crate.
hook def_path_hash_to_def_id_extern(hash: DefPathHash, stable_crate_id: StableCrateId) -> DefId;
hook def_path_hash_to_def_id_extern(hash: DefPathHash, stable_crate_id: StableCrateId) -> Option<DefId>;
/// Returns `true` if we should codegen an instance in the local crate, or returns `false` if we
/// can just link to the upstream crate and therefore don't need a mono item.

View file

@ -2012,7 +2012,7 @@ impl<'tcx> TyCtxt<'tcx> {
if stable_crate_id == self.stable_crate_id(LOCAL_CRATE) {
Some(self.untracked.definitions.read().local_def_path_hash_to_def_id(hash)?.to_def_id())
} else {
Some(self.def_path_hash_to_def_id_extern(hash, stable_crate_id))
self.def_path_hash_to_def_id_extern(hash, stable_crate_id)
}
}

View file

@ -25,6 +25,7 @@ pub use generic_args::{GenericArgKind, TermKind, *};
pub use generics::*;
pub use intrinsic::IntrinsicDef;
use rustc_abi::{Align, FieldIdx, Integer, IntegerType, ReprFlags, ReprOptions, VariantIdx};
use rustc_ast::expand::typetree::{FncTree, Kind, Type, TypeTree};
use rustc_ast::node_id::NodeMap;
pub use rustc_ast_ir::{Movability, Mutability, try_visit};
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
@ -62,7 +63,7 @@ pub use rustc_type_ir::solve::SizedTraitKind;
pub use rustc_type_ir::*;
#[allow(hidden_glob_reexports, unused_imports)]
use rustc_type_ir::{InferCtxtLike, Interner};
use tracing::{debug, instrument};
use tracing::{debug, instrument, trace};
pub use vtable::*;
use {rustc_ast as ast, rustc_hir as hir};
@ -2216,3 +2217,225 @@ pub struct DestructuredConst<'tcx> {
pub variant: Option<VariantIdx>,
pub fields: &'tcx [ty::Const<'tcx>],
}
/// Generate TypeTree information for autodiff.
/// This function creates TypeTree metadata that describes the memory layout
/// of function parameters and return types for Enzyme autodiff.
pub fn fnc_typetrees<'tcx>(tcx: TyCtxt<'tcx>, fn_ty: Ty<'tcx>) -> FncTree {
// Check if TypeTrees are disabled via NoTT flag
if tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::NoTT) {
return FncTree { args: vec![], ret: TypeTree::new() };
}
// Check if this is actually a function type
if !fn_ty.is_fn() {
return FncTree { args: vec![], ret: TypeTree::new() };
}
// Get the function signature
let fn_sig = fn_ty.fn_sig(tcx);
let sig = tcx.instantiate_bound_regions_with_erased(fn_sig);
// Create TypeTrees for each input parameter
let mut args = vec![];
for ty in sig.inputs().iter() {
let type_tree = typetree_from_ty(tcx, *ty);
args.push(type_tree);
}
// Create TypeTree for return type
let ret = typetree_from_ty(tcx, sig.output());
FncTree { args, ret }
}
/// Generate TypeTree for a specific type.
/// This function analyzes a Rust type and creates appropriate TypeTree metadata.
pub fn typetree_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> TypeTree {
let mut visited = Vec::new();
typetree_from_ty_inner(tcx, ty, 0, &mut visited)
}
/// Maximum recursion depth for TypeTree generation to prevent stack overflow
/// from pathological deeply nested types. Combined with cycle detection.
const MAX_TYPETREE_DEPTH: usize = 6;
/// Internal recursive function for TypeTree generation with cycle detection and depth limiting.
fn typetree_from_ty_inner<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
depth: usize,
visited: &mut Vec<Ty<'tcx>>,
) -> TypeTree {
if depth >= MAX_TYPETREE_DEPTH {
trace!("typetree depth limit {} reached for type: {}", MAX_TYPETREE_DEPTH, ty);
return TypeTree::new();
}
if visited.contains(&ty) {
return TypeTree::new();
}
visited.push(ty);
let result = typetree_from_ty_impl(tcx, ty, depth, visited);
visited.pop();
result
}
/// Implementation of TypeTree generation logic.
fn typetree_from_ty_impl<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
depth: usize,
visited: &mut Vec<Ty<'tcx>>,
) -> TypeTree {
typetree_from_ty_impl_inner(tcx, ty, depth, visited, false)
}
/// Internal implementation with context about whether this is for a reference target.
fn typetree_from_ty_impl_inner<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
depth: usize,
visited: &mut Vec<Ty<'tcx>>,
is_reference_target: bool,
) -> TypeTree {
if ty.is_scalar() {
let (kind, size) = if ty.is_integral() || ty.is_char() || ty.is_bool() {
(Kind::Integer, ty.primitive_size(tcx).bytes_usize())
} else if ty.is_floating_point() {
match ty {
x if x == tcx.types.f16 => (Kind::Half, 2),
x if x == tcx.types.f32 => (Kind::Float, 4),
x if x == tcx.types.f64 => (Kind::Double, 8),
x if x == tcx.types.f128 => (Kind::F128, 16),
_ => (Kind::Integer, 0),
}
} else {
(Kind::Integer, 0)
};
// Use offset 0 for scalars that are direct targets of references (like &f64)
// Use offset -1 for scalars used directly (like function return types)
let offset = if is_reference_target && !ty.is_array() { 0 } else { -1 };
return TypeTree(vec![Type { offset, size, kind, child: TypeTree::new() }]);
}
if ty.is_ref() || ty.is_raw_ptr() || ty.is_box() {
let inner_ty = if let Some(inner) = ty.builtin_deref(true) {
inner
} else {
return TypeTree::new();
};
let child = typetree_from_ty_impl_inner(tcx, inner_ty, depth + 1, visited, true);
return TypeTree(vec![Type {
offset: -1,
size: tcx.data_layout.pointer_size().bytes_usize(),
kind: Kind::Pointer,
child,
}]);
}
if ty.is_array() {
if let ty::Array(element_ty, len_const) = ty.kind() {
let len = len_const.try_to_target_usize(tcx).unwrap_or(0);
if len == 0 {
return TypeTree::new();
}
let element_tree =
typetree_from_ty_impl_inner(tcx, *element_ty, depth + 1, visited, false);
let mut types = Vec::new();
for elem_type in &element_tree.0 {
types.push(Type {
offset: -1,
size: elem_type.size,
kind: elem_type.kind,
child: elem_type.child.clone(),
});
}
return TypeTree(types);
}
}
if ty.is_slice() {
if let ty::Slice(element_ty) = ty.kind() {
let element_tree =
typetree_from_ty_impl_inner(tcx, *element_ty, depth + 1, visited, false);
return element_tree;
}
}
if let ty::Tuple(tuple_types) = ty.kind() {
if tuple_types.is_empty() {
return TypeTree::new();
}
let mut types = Vec::new();
let mut current_offset = 0;
for tuple_ty in tuple_types.iter() {
let element_tree =
typetree_from_ty_impl_inner(tcx, tuple_ty, depth + 1, visited, false);
let element_layout = tcx
.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(tuple_ty))
.ok()
.map(|layout| layout.size.bytes_usize())
.unwrap_or(0);
for elem_type in &element_tree.0 {
types.push(Type {
offset: if elem_type.offset == -1 {
current_offset as isize
} else {
current_offset as isize + elem_type.offset
},
size: elem_type.size,
kind: elem_type.kind,
child: elem_type.child.clone(),
});
}
current_offset += element_layout;
}
return TypeTree(types);
}
if let ty::Adt(adt_def, args) = ty.kind() {
if adt_def.is_struct() {
let struct_layout =
tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(ty));
if let Ok(layout) = struct_layout {
let mut types = Vec::new();
for (field_idx, field_def) in adt_def.all_fields().enumerate() {
let field_ty = field_def.ty(tcx, args);
let field_tree =
typetree_from_ty_impl_inner(tcx, field_ty, depth + 1, visited, false);
let field_offset = layout.fields.offset(field_idx).bytes_usize();
for elem_type in &field_tree.0 {
types.push(Type {
offset: if elem_type.offset == -1 {
field_offset as isize
} else {
field_offset as isize + elem_type.offset
},
size: elem_type.size,
kind: elem_type.kind,
child: elem_type.child.clone(),
});
}
}
return TypeTree(types);
}
}
}
TypeTree::new()
}

View file

@ -554,6 +554,21 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
visit::walk_expr(self, &self.thir[arg]);
return;
}
// Secondly, we allow raw borrows of union field accesses. Peel
// any of those off, and recurse normally on the LHS, which should
// reject any unsafe operations within.
let mut peeled = arg;
while let ExprKind::Scope { value: arg, .. } = self.thir[peeled].kind
&& let ExprKind::Field { lhs, name: _, variant_index: _ } = self.thir[arg].kind
&& let ty::Adt(def, _) = &self.thir[lhs].ty.kind()
&& def.is_union()
{
peeled = lhs;
}
visit::walk_expr(self, &self.thir[peeled]);
// And return so we don't recurse directly onto the union field access(es).
return;
}
ExprKind::Deref { arg } => {
if let ExprKind::StaticRef { def_id, .. } | ExprKind::ThreadLocalRef(def_id) =

View file

@ -1275,13 +1275,13 @@ fn report_non_exhaustive_match<'p, 'tcx>(
if ty.is_ptr_sized_integral() {
if ty.inner() == cx.tcx.types.usize {
err.note(format!(
"`{ty}` does not have a fixed maximum value, so half-open ranges are \
necessary to match exhaustively",
"`{ty}::MAX` is not treated as exhaustive, \
so half-open ranges are necessary to match exhaustively",
));
} else if ty.inner() == cx.tcx.types.isize {
err.note(format!(
"`{ty}` does not have fixed minimum and maximum values, so half-open \
ranges are necessary to match exhaustively",
"`{ty}::MIN` and `{ty}::MAX` are not treated as exhaustive, \
so half-open ranges are necessary to match exhaustively",
));
}
} else if ty.inner() == cx.tcx.types.str_ {

View file

@ -74,20 +74,28 @@ where
}
}
fn is_initial_provisional_result(
cx: Self::Cx,
kind: PathKind,
input: CanonicalInput<I>,
result: QueryResult<I>,
) -> bool {
Self::initial_provisional_result(cx, kind, input) == result
fn is_initial_provisional_result(result: QueryResult<I>) -> Option<PathKind> {
match result {
Ok(response) => {
if has_no_inference_or_external_constraints(response) {
if response.value.certainty == Certainty::Yes {
return Some(PathKind::Coinductive);
} else if response.value.certainty == Certainty::overflow(false) {
return Some(PathKind::Unknown);
}
}
None
}
Err(NoSolution) => Some(PathKind::Inductive),
}
}
fn on_stack_overflow(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
fn stack_overflow_result(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
response_no_constraints(cx, input, Certainty::overflow(true))
}
fn on_fixpoint_overflow(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
fn fixpoint_overflow_result(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
response_no_constraints(cx, input, Certainty::overflow(false))
}

View file

@ -258,6 +258,8 @@ pub enum AutoDiff {
LooseTypes,
/// Runs Enzyme's aggressive inlining
Inline,
/// Disable Type Tree
NoTT,
}
/// Settings for `-Z instrument-xray` flag.

View file

@ -792,7 +792,7 @@ mod desc {
pub(crate) const parse_list: &str = "a space-separated list of strings";
pub(crate) const parse_list_with_polarity: &str =
"a comma-separated list of strings, with elements beginning with + or -";
pub(crate) const parse_autodiff: &str = "a comma separated list of settings: `Enable`, `PrintSteps`, `PrintTA`, `PrintTAFn`, `PrintAA`, `PrintPerf`, `PrintModBefore`, `PrintModAfter`, `PrintModFinal`, `PrintPasses`, `NoPostopt`, `LooseTypes`, `Inline`";
pub(crate) const parse_autodiff: &str = "a comma separated list of settings: `Enable`, `PrintSteps`, `PrintTA`, `PrintTAFn`, `PrintAA`, `PrintPerf`, `PrintModBefore`, `PrintModAfter`, `PrintModFinal`, `PrintPasses`, `NoPostopt`, `LooseTypes`, `Inline`, `NoTT`";
pub(crate) const parse_offload: &str = "a comma separated list of settings: `Enable`";
pub(crate) const parse_comma_list: &str = "a comma-separated list of strings";
pub(crate) const parse_opt_comma_list: &str = parse_comma_list;
@ -1481,6 +1481,7 @@ pub mod parse {
"PrintPasses" => AutoDiff::PrintPasses,
"LooseTypes" => AutoDiff::LooseTypes,
"Inline" => AutoDiff::Inline,
"NoTT" => AutoDiff::NoTT,
_ => {
// FIXME(ZuseZ4): print an error saying which value is not recognized
return false;

View file

@ -81,8 +81,8 @@ cfg_select! {
// use `loadu`, which supports unaligned loading.
let chunk = unsafe { _mm_loadu_si128(chunk.as_ptr() as *const __m128i) };
// For character in the chunk, see if its byte value is < 0, which
// indicates that it's part of a UTF-8 char.
// For each character in the chunk, see if its byte value is < 0,
// which indicates that it's part of a UTF-8 char.
let multibyte_test = _mm_cmplt_epi8(chunk, _mm_set1_epi8(0));
// Create a bit mask from the comparison results.
let multibyte_mask = _mm_movemask_epi8(multibyte_test);
@ -132,8 +132,111 @@ cfg_select! {
}
}
}
target_arch = "loongarch64" => {
fn analyze_source_file_dispatch(
src: &str,
lines: &mut Vec<RelativeBytePos>,
multi_byte_chars: &mut Vec<MultiByteChar>,
) {
use std::arch::is_loongarch_feature_detected;
if is_loongarch_feature_detected!("lsx") {
unsafe {
analyze_source_file_lsx(src, lines, multi_byte_chars);
}
} else {
analyze_source_file_generic(
src,
src.len(),
RelativeBytePos::from_u32(0),
lines,
multi_byte_chars,
);
}
}
/// Checks 16 byte chunks of text at a time. If the chunk contains
/// something other than printable ASCII characters and newlines, the
/// function falls back to the generic implementation. Otherwise it uses
/// LSX intrinsics to quickly find all newlines.
#[target_feature(enable = "lsx")]
unsafe fn analyze_source_file_lsx(
src: &str,
lines: &mut Vec<RelativeBytePos>,
multi_byte_chars: &mut Vec<MultiByteChar>,
) {
use std::arch::loongarch64::*;
const CHUNK_SIZE: usize = 16;
let (chunks, tail) = src.as_bytes().as_chunks::<CHUNK_SIZE>();
// This variable keeps track of where we should start decoding a
// chunk. If a multi-byte character spans across chunk boundaries,
// we need to skip that part in the next chunk because we already
// handled it.
let mut intra_chunk_offset = 0;
for (chunk_index, chunk) in chunks.iter().enumerate() {
// All LSX memory instructions support unaligned access, so using
// vld is fine.
let chunk = unsafe { lsx_vld::<0>(chunk.as_ptr() as *const i8) };
// For each character in the chunk, see if its byte value is < 0,
// which indicates that it's part of a UTF-8 char.
let multibyte_mask = lsx_vmskltz_b(chunk);
// Create a bit mask from the comparison results.
let multibyte_mask = lsx_vpickve2gr_w::<0>(multibyte_mask);
// If the bit mask is all zero, we only have ASCII chars here:
if multibyte_mask == 0 {
assert!(intra_chunk_offset == 0);
// Check for newlines in the chunk
let newlines_test = lsx_vseqi_b::<{b'\n' as i32}>(chunk);
let newlines_mask = lsx_vmskltz_b(newlines_test);
let mut newlines_mask = lsx_vpickve2gr_w::<0>(newlines_mask);
let output_offset = RelativeBytePos::from_usize(chunk_index * CHUNK_SIZE + 1);
while newlines_mask != 0 {
let index = newlines_mask.trailing_zeros();
lines.push(RelativeBytePos(index) + output_offset);
// Clear the bit, so we can find the next one.
newlines_mask &= newlines_mask - 1;
}
} else {
// The slow path.
// There are multibyte chars in here, fallback to generic decoding.
let scan_start = chunk_index * CHUNK_SIZE + intra_chunk_offset;
intra_chunk_offset = analyze_source_file_generic(
&src[scan_start..],
CHUNK_SIZE - intra_chunk_offset,
RelativeBytePos::from_usize(scan_start),
lines,
multi_byte_chars,
);
}
}
// There might still be a tail left to analyze
let tail_start = src.len() - tail.len() + intra_chunk_offset;
if tail_start < src.len() {
analyze_source_file_generic(
&src[tail_start..],
src.len() - tail_start,
RelativeBytePos::from_usize(tail_start),
lines,
multi_byte_chars,
);
}
}
}
_ => {
// The target (or compiler version) does not support SSE2 ...
// The target (or compiler version) does not support vector instructions
// our specialized implementations need (x86 SSE2, loongarch64 LSX)...
fn analyze_source_file_dispatch(
src: &str,
lines: &mut Vec<RelativeBytePos>,

View file

@ -17,6 +17,7 @@
// tidy-alphabetical-start
#![allow(internal_features)]
#![cfg_attr(target_arch = "loongarch64", feature(stdarch_loongarch))]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)]
#![feature(array_windows)]

View file

@ -679,6 +679,7 @@ symbols! {
cmpxchg16b_target_feature,
cmse_nonsecure_entry,
coerce_pointee_validated,
coerce_shared,
coerce_unsized,
cold,
cold_path,

View file

@ -86,14 +86,12 @@ pub trait Delegate: Sized {
kind: PathKind,
input: <Self::Cx as Cx>::Input,
) -> <Self::Cx as Cx>::Result;
fn is_initial_provisional_result(
fn is_initial_provisional_result(result: <Self::Cx as Cx>::Result) -> Option<PathKind>;
fn stack_overflow_result(
cx: Self::Cx,
kind: PathKind,
input: <Self::Cx as Cx>::Input,
result: <Self::Cx as Cx>::Result,
) -> bool;
fn on_stack_overflow(cx: Self::Cx, input: <Self::Cx as Cx>::Input) -> <Self::Cx as Cx>::Result;
fn on_fixpoint_overflow(
) -> <Self::Cx as Cx>::Result;
fn fixpoint_overflow_result(
cx: Self::Cx,
input: <Self::Cx as Cx>::Input,
) -> <Self::Cx as Cx>::Result;
@ -215,6 +213,27 @@ impl HeadUsages {
let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = self;
inductive == 0 && unknown == 0 && coinductive == 0 && forced_ambiguity == 0
}
fn is_single(self, path_kind: PathKind) -> bool {
match path_kind {
PathKind::Inductive => matches!(
self,
HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 },
),
PathKind::Unknown => matches!(
self,
HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 },
),
PathKind::Coinductive => matches!(
self,
HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 },
),
PathKind::ForcedAmbiguity => matches!(
self,
HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ },
),
}
}
}
#[derive(Debug, Default)]
@ -869,7 +888,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
}
debug!("encountered stack overflow");
D::on_stack_overflow(cx, input)
D::stack_overflow_result(cx, input)
}
/// When reevaluating a goal with a changed provisional result, all provisional cache entry
@ -888,7 +907,29 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
!entries.is_empty()
});
}
}
/// We need to rebase provisional cache entries when popping one of their cycle
/// heads from the stack. This may not necessarily mean that we've actually
/// reached a fixpoint for that cycle head, which impacts the way we rebase
/// provisional cache entries.
enum RebaseReason {
NoCycleUsages,
Ambiguity,
Overflow,
/// We've actually reached a fixpoint.
///
/// This either happens in the first evaluation step for the cycle head.
/// In this case the used provisional result depends on the cycle `PathKind`.
/// We store this path kind to check whether the the provisional cache entry
/// we're rebasing relied on the same cycles.
///
/// In later iterations cycles always return `stack_entry.provisional_result`
/// so we no longer depend on the `PathKind`. We store `None` in that case.
ReachedFixpoint(Option<PathKind>),
}
impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
/// A necessary optimization to handle complex solver cycles. A provisional cache entry
/// relies on a set of cycle heads and the path towards these heads. When popping a cycle
/// head from the stack after we've finished computing it, we can't be sure that the
@ -908,8 +949,9 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// to me.
fn rebase_provisional_cache_entries(
&mut self,
cx: X,
stack_entry: &StackEntry<X>,
mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result,
rebase_reason: RebaseReason,
) {
let popped_head_index = self.stack.next_index();
#[allow(rustc::potential_query_instability)]
@ -927,6 +969,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
return true;
};
let Some(new_highest_head_index) = heads.opt_highest_cycle_head_index() else {
return false;
};
// We're rebasing an entry `e` over a head `p`. This head
// has a number of own heads `h` it depends on.
//
@ -977,22 +1023,37 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
let eph = ep.extend_with_paths(ph);
heads.insert(head_index, eph, head.usages);
}
}
let Some(head_index) = heads.opt_highest_cycle_head_index() else {
return false;
};
// The provisional cache entry does depend on the provisional result
// of the popped cycle head. We need to mutate the result of our
// provisional cache entry in case we did not reach a fixpoint.
match rebase_reason {
// If the cycle head does not actually depend on itself, then
// the provisional result used by the provisional cache entry
// is not actually equal to the final provisional result. We
// need to discard the provisional cache entry in this case.
RebaseReason::NoCycleUsages => return false,
RebaseReason::Ambiguity => {
*result = D::propagate_ambiguity(cx, input, *result);
}
RebaseReason::Overflow => *result = D::fixpoint_overflow_result(cx, input),
RebaseReason::ReachedFixpoint(None) => {}
RebaseReason::ReachedFixpoint(Some(path_kind)) => {
if !popped_head.usages.is_single(path_kind) {
return false;
}
}
};
}
// We now care about the path from the next highest cycle head to the
// provisional cache entry.
*path_from_head = path_from_head.extend(Self::cycle_path_kind(
&self.stack,
stack_entry.step_kind_from_parent,
head_index,
new_highest_head_index,
));
// Mutate the result of the provisional cache entry in case we did
// not reach a fixpoint.
*result = mutate_result(input, *result);
true
});
!entries.is_empty()
@ -1209,33 +1270,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// Whether we've reached a fixpoint when evaluating a cycle head.
fn reached_fixpoint(
&mut self,
cx: X,
stack_entry: &StackEntry<X>,
usages: HeadUsages,
result: X::Result,
) -> bool {
) -> Result<Option<PathKind>, ()> {
let provisional_result = stack_entry.provisional_result;
if usages.is_empty() {
true
} else if let Some(provisional_result) = provisional_result {
provisional_result == result
if let Some(provisional_result) = provisional_result {
if provisional_result == result { Ok(None) } else { Err(()) }
} else if let Some(path_kind) = D::is_initial_provisional_result(result)
.filter(|&path_kind| usages.is_single(path_kind))
{
Ok(Some(path_kind))
} else {
let check = |k| D::is_initial_provisional_result(cx, k, stack_entry.input, result);
match usages {
HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 } => {
check(PathKind::Inductive)
}
HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 } => {
check(PathKind::Unknown)
}
HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 } => {
check(PathKind::Coinductive)
}
HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ } => {
check(PathKind::ForcedAmbiguity)
}
_ => false,
}
Err(())
}
}
@ -1280,8 +1327,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// is equal to the provisional result of the previous iteration, or because
// this was only the head of either coinductive or inductive cycles, and the
// final result is equal to the initial response for that case.
if self.reached_fixpoint(cx, &stack_entry, usages, result) {
self.rebase_provisional_cache_entries(&stack_entry, |_, result| result);
if let Ok(fixpoint) = self.reached_fixpoint(&stack_entry, usages, result) {
self.rebase_provisional_cache_entries(
cx,
&stack_entry,
RebaseReason::ReachedFixpoint(fixpoint),
);
return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
} else if usages.is_empty() {
self.rebase_provisional_cache_entries(
cx,
&stack_entry,
RebaseReason::NoCycleUsages,
);
return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
}
@ -1298,9 +1356,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// we also taint all provisional cache entries which depend on the
// current goal.
if D::is_ambiguous_result(result) {
self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
D::propagate_ambiguity(cx, input, result)
});
self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Ambiguity);
return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
};
@ -1309,10 +1365,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
i += 1;
if i >= D::FIXPOINT_STEP_LIMIT {
debug!("canonical cycle overflow");
let result = D::on_fixpoint_overflow(cx, input);
self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
D::on_fixpoint_overflow(cx, input)
});
let result = D::fixpoint_overflow_result(cx, input);
self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Overflow);
return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
}

View file

@ -103,7 +103,6 @@ pub struct VecDeque<
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
#[track_caller]
fn clone(&self) -> Self {
let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone());
deq.extend(self.iter().cloned());
@ -114,7 +113,6 @@ impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
///
/// This method is preferred over simply assigning `source.clone()` to `self`,
/// as it avoids reallocation if possible.
#[track_caller]
fn clone_from(&mut self, source: &Self) {
self.clear();
self.extend(source.iter().cloned());
@ -577,7 +575,6 @@ impl<T> VecDeque<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
#[track_caller]
pub fn with_capacity(capacity: usize) -> VecDeque<T> {
Self::with_capacity_in(capacity, Global)
}
@ -633,7 +630,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// let deque: VecDeque<u32> = VecDeque::with_capacity(10);
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
#[track_caller]
pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque<T, A> {
VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) }
}
@ -799,7 +795,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
///
/// [`reserve`]: VecDeque::reserve
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn reserve_exact(&mut self, additional: usize) {
let new_cap = self.len.checked_add(additional).expect("capacity overflow");
let old_cap = self.capacity();
@ -830,7 +825,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "vecdeque_reserve")]
#[track_caller]
pub fn reserve(&mut self, additional: usize) {
let new_cap = self.len.checked_add(additional).expect("capacity overflow");
let old_cap = self.capacity();
@ -962,7 +956,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert!(buf.capacity() >= 4);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
#[track_caller]
pub fn shrink_to_fit(&mut self) {
self.shrink_to(0);
}
@ -988,7 +981,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert!(buf.capacity() >= 4);
/// ```
#[stable(feature = "shrink_to", since = "1.56.0")]
#[track_caller]
pub fn shrink_to(&mut self, min_capacity: usize) {
let target_cap = min_capacity.max(self.len);
@ -1891,7 +1883,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(d.front(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn push_front(&mut self, value: T) {
let _ = self.push_front_mut(value);
}
@ -1910,7 +1901,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(d.front(), Some(&7));
/// ```
#[unstable(feature = "push_mut", issue = "135974")]
#[track_caller]
#[must_use = "if you don't need a reference to the value, use `VecDeque::push_front` instead"]
pub fn push_front_mut(&mut self, value: T) -> &mut T {
if self.is_full() {
@ -1937,7 +1927,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_confusables("push", "put", "append")]
#[track_caller]
pub fn push_back(&mut self, value: T) {
let _ = self.push_back_mut(value);
}
@ -1956,7 +1945,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(d.back(), Some(&10));
/// ```
#[unstable(feature = "push_mut", issue = "135974")]
#[track_caller]
#[must_use = "if you don't need a reference to the value, use `VecDeque::push_back` instead"]
pub fn push_back_mut(&mut self, value: T) -> &mut T {
if self.is_full() {
@ -2071,7 +2059,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(vec_deque, &['a', 'd', 'b', 'c', 'e']);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
#[track_caller]
pub fn insert(&mut self, index: usize, value: T) {
let _ = self.insert_mut(index, value);
}
@ -2099,7 +2086,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(vec_deque, &[1, 12, 2, 3]);
/// ```
#[unstable(feature = "push_mut", issue = "135974")]
#[track_caller]
#[must_use = "if you don't need a reference to the value, use `VecDeque::insert` instead"]
pub fn insert_mut(&mut self, index: usize, value: T) -> &mut T {
assert!(index <= self.len(), "index out of bounds");
@ -2205,7 +2191,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[must_use = "use `.truncate()` if you don't need the other half"]
#[stable(feature = "split_off", since = "1.4.0")]
#[track_caller]
pub fn split_off(&mut self, at: usize) -> Self
where
A: Clone,
@ -2272,7 +2257,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[inline]
#[stable(feature = "append", since = "1.4.0")]
#[track_caller]
pub fn append(&mut self, other: &mut Self) {
if T::IS_ZST {
self.len = self.len.checked_add(other.len).expect("capacity overflow");
@ -2395,7 +2379,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
// be called in cold paths.
// This may panic or abort
#[inline(never)]
#[track_caller]
fn grow(&mut self) {
// Extend or possibly remove this assertion when valid use-cases for growing the
// buffer without it being full emerge
@ -2434,7 +2417,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(buf, [5, 10, 101, 102, 103]);
/// ```
#[stable(feature = "vec_resize_with", since = "1.33.0")]
#[track_caller]
pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T) {
let len = self.len;
@ -2981,7 +2963,6 @@ impl<T: Clone, A: Allocator> VecDeque<T, A> {
/// assert_eq!(buf, [5, 10, 20, 20, 20]);
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
#[track_caller]
pub fn resize(&mut self, new_len: usize, value: T) {
if new_len > self.len() {
let extra = new_len - self.len();
@ -3101,7 +3082,6 @@ impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for VecDeque<T> {
#[track_caller]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> VecDeque<T> {
SpecFromIter::spec_from_iter(iter.into_iter())
}
@ -3141,19 +3121,16 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> Extend<T> for VecDeque<T, A> {
#[track_caller]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
<Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter());
}
#[inline]
#[track_caller]
fn extend_one(&mut self, elem: T) {
self.push_back(elem);
}
#[inline]
#[track_caller]
fn extend_reserve(&mut self, additional: usize) {
self.reserve(additional);
}
@ -3169,19 +3146,16 @@ impl<T, A: Allocator> Extend<T> for VecDeque<T, A> {
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Copy, A: Allocator> Extend<&'a T> for VecDeque<T, A> {
#[track_caller]
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.spec_extend(iter.into_iter());
}
#[inline]
#[track_caller]
fn extend_one(&mut self, &elem: &'a T) {
self.push_back(elem);
}
#[inline]
#[track_caller]
fn extend_reserve(&mut self, additional: usize) {
self.reserve(additional);
}
@ -3279,7 +3253,6 @@ impl<T, const N: usize> From<[T; N]> for VecDeque<T> {
/// let deq2: VecDeque<_> = [1, 2, 3, 4].into();
/// assert_eq!(deq1, deq2);
/// ```
#[track_caller]
fn from(arr: [T; N]) -> Self {
let mut deq = VecDeque::with_capacity(N);
let arr = ManuallyDrop::new(arr);

View file

@ -8,7 +8,6 @@ use crate::vec;
// Specialization trait used for VecDeque::extend
pub(super) trait SpecExtend<T, I> {
#[track_caller]
fn spec_extend(&mut self, iter: I);
}
@ -16,7 +15,6 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where
I: Iterator<Item = T>,
{
#[track_caller]
default fn spec_extend(&mut self, mut iter: I) {
// This function should be the moral equivalent of:
//
@ -47,7 +45,6 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where
I: TrustedLen<Item = T>,
{
#[track_caller]
default fn spec_extend(&mut self, iter: I) {
// This is the case for a TrustedLen iterator.
let (low, high) = iter.size_hint();
@ -81,7 +78,6 @@ where
#[cfg(not(test))]
impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
#[track_caller]
fn spec_extend(&mut self, mut iterator: vec::IntoIter<T>) {
let slice = iterator.as_slice();
self.reserve(slice.len());
@ -99,7 +95,6 @@ where
I: Iterator<Item = &'a T>,
T: Copy,
{
#[track_caller]
default fn spec_extend(&mut self, iterator: I) {
self.spec_extend(iterator.copied())
}
@ -109,7 +104,6 @@ impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for VecDeque
where
T: Copy,
{
#[track_caller]
fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
let slice = iterator.as_slice();
self.reserve(slice.len());

View file

@ -9,7 +9,6 @@ impl<T, I> SpecFromIter<T, I> for VecDeque<T>
where
I: Iterator<Item = T>,
{
#[track_caller]
default fn spec_from_iter(iterator: I) -> Self {
// Since converting is O(1) now, just re-use the `Vec` logic for
// anything where we can't do something extra-special for `VecDeque`,

View file

@ -24,7 +24,6 @@ mod tests;
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
#[cfg_attr(not(panic = "immediate-abort"), inline(never))]
#[track_caller]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}
@ -123,7 +122,6 @@ impl<T> RawVec<T, Global> {
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
#[track_caller]
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self { inner: RawVecInner::with_capacity(capacity, T::LAYOUT), _marker: PhantomData }
}
@ -132,7 +130,6 @@ impl<T> RawVec<T, Global> {
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
#[track_caller]
pub(crate) fn with_capacity_zeroed(capacity: usize) -> Self {
Self {
inner: RawVecInner::with_capacity_zeroed_in(capacity, Global, T::LAYOUT),
@ -145,7 +142,6 @@ impl RawVecInner<Global> {
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
#[track_caller]
fn with_capacity(capacity: usize, elem_layout: Layout) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, Global, elem_layout) {
Ok(res) => res,
@ -186,7 +182,6 @@ impl<T, A: Allocator> RawVec<T, A> {
/// allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
pub(crate) fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self {
inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT),
@ -208,7 +203,6 @@ impl<T, A: Allocator> RawVec<T, A> {
/// of allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
pub(crate) fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self {
inner: RawVecInner::with_capacity_zeroed_in(capacity, alloc, T::LAYOUT),
@ -328,7 +322,6 @@ impl<T, A: Allocator> RawVec<T, A> {
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
pub(crate) fn reserve(&mut self, len: usize, additional: usize) {
// SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
unsafe { self.inner.reserve(len, additional, T::LAYOUT) }
@ -338,7 +331,6 @@ impl<T, A: Allocator> RawVec<T, A> {
/// caller to ensure `len == self.capacity()`.
#[cfg(not(no_global_oom_handling))]
#[inline(never)]
#[track_caller]
pub(crate) fn grow_one(&mut self) {
// SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
unsafe { self.inner.grow_one(T::LAYOUT) }
@ -372,7 +364,6 @@ impl<T, A: Allocator> RawVec<T, A> {
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[track_caller]
pub(crate) fn reserve_exact(&mut self, len: usize, additional: usize) {
// SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
unsafe { self.inner.reserve_exact(len, additional, T::LAYOUT) }
@ -399,7 +390,6 @@ impl<T, A: Allocator> RawVec<T, A> {
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[track_caller]
#[inline]
pub(crate) fn shrink_to_fit(&mut self, cap: usize) {
// SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
@ -425,7 +415,6 @@ impl<A: Allocator> RawVecInner<A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) {
Ok(this) => {
@ -450,7 +439,6 @@ impl<A: Allocator> RawVecInner<A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
fn with_capacity_zeroed_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc, elem_layout) {
Ok(res) => res,
@ -553,7 +541,6 @@ impl<A: Allocator> RawVecInner<A> {
/// - `elem_layout`'s size must be a multiple of its alignment
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
unsafe fn reserve(&mut self, len: usize, additional: usize, elem_layout: Layout) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
@ -585,7 +572,6 @@ impl<A: Allocator> RawVecInner<A> {
/// - `elem_layout`'s size must be a multiple of its alignment
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
unsafe fn grow_one(&mut self, elem_layout: Layout) {
// SAFETY: Precondition passed to caller
if let Err(err) = unsafe { self.grow_amortized(self.cap.as_inner(), 1, elem_layout) } {
@ -621,7 +607,6 @@ impl<A: Allocator> RawVecInner<A> {
/// initially construct `self`
/// - `elem_layout`'s size must be a multiple of its alignment
#[cfg(not(no_global_oom_handling))]
#[track_caller]
unsafe fn reserve_exact(&mut self, len: usize, additional: usize, elem_layout: Layout) {
// SAFETY: Precondition passed to caller
if let Err(err) = unsafe { self.try_reserve_exact(len, additional, elem_layout) } {
@ -659,7 +644,6 @@ impl<A: Allocator> RawVecInner<A> {
/// - `cap` must be less than or equal to `self.capacity(elem_layout.size())`
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
unsafe fn shrink_to_fit(&mut self, cap: usize, elem_layout: Layout) {
if let Err(err) = unsafe { self.shrink(cap, elem_layout) } {
handle_error(err);
@ -872,7 +856,6 @@ where
#[cfg(not(no_global_oom_handling))]
#[cold]
#[optimize(size)]
#[track_caller]
fn handle_error(e: TryReserveError) -> ! {
match e.kind() {
CapacityOverflow => capacity_overflow(),

View file

@ -1105,7 +1105,6 @@ impl String {
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_confusables("append", "push")]
#[rustc_diagnostic_item = "string_push_str"]
@ -1208,7 +1207,6 @@ impl String {
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
self.vec.reserve(additional)
@ -1260,7 +1258,6 @@ impl String {
#[cfg(not(no_global_oom_handling))]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn reserve_exact(&mut self, additional: usize) {
self.vec.reserve_exact(additional)
}
@ -1356,7 +1353,6 @@ impl String {
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
self.vec.shrink_to_fit()
@ -1384,7 +1380,6 @@ impl String {
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
#[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.vec.shrink_to(min_capacity)
@ -1406,7 +1401,6 @@ impl String {
#[cfg(not(no_global_oom_handling))]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn push(&mut self, ch: char) {
let len = self.len();
let ch_len = ch.len_utf8();
@ -2115,7 +2109,6 @@ impl String {
#[stable(feature = "box_str", since = "1.4.0")]
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
#[track_caller]
pub fn into_boxed_str(self) -> Box<str> {
let slice = self.vec.into_boxed_slice();
unsafe { from_boxed_utf8_unchecked(slice) }
@ -2293,7 +2286,6 @@ impl Error for FromUtf16Error {}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for String {
#[track_caller]
fn clone(&self) -> Self {
String { vec: self.vec.clone() }
}
@ -2302,7 +2294,6 @@ impl Clone for String {
///
/// This method is preferred over simply assigning `source.clone()` to `self`,
/// as it avoids reallocation if possible.
#[track_caller]
fn clone_from(&mut self, source: &Self) {
self.vec.clone_from(&source.vec);
}
@ -2477,13 +2468,11 @@ impl<'a> Extend<Cow<'a, str>> for String {
#[unstable(feature = "ascii_char", issue = "110998")]
impl Extend<core::ascii::Char> for String {
#[inline]
#[track_caller]
fn extend<I: IntoIterator<Item = core::ascii::Char>>(&mut self, iter: I) {
self.vec.extend(iter.into_iter().map(|c| c.to_u8()));
}
#[inline]
#[track_caller]
fn extend_one(&mut self, c: core::ascii::Char) {
self.vec.push(c.to_u8());
}
@ -2493,13 +2482,11 @@ impl Extend<core::ascii::Char> for String {
#[unstable(feature = "ascii_char", issue = "110998")]
impl<'a> Extend<&'a core::ascii::Char> for String {
#[inline]
#[track_caller]
fn extend<I: IntoIterator<Item = &'a core::ascii::Char>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
#[inline]
#[track_caller]
fn extend_one(&mut self, c: &'a core::ascii::Char) {
self.vec.push(c.to_u8());
}

View file

@ -58,7 +58,6 @@ impl<'a, T> FromIterator<T> for Cow<'a, [T]>
where
T: Clone,
{
#[track_caller]
fn from_iter<I: IntoIterator<Item = T>>(it: I) -> Cow<'a, [T]> {
Cow::Owned(FromIterator::from_iter(it))
}

View file

@ -229,7 +229,6 @@ where
I: Iterator<Item = T> + InPlaceCollect,
<I as SourceIter>::Source: AsVecIntoIter,
{
#[track_caller]
default fn from_iter(iterator: I) -> Self {
// Select the implementation in const eval to avoid codegen of the dead branch to improve compile times.
let fun: fn(I) -> Vec<T> = const {
@ -247,7 +246,6 @@ where
}
}
#[track_caller]
fn from_iter_in_place<I, T>(mut iterator: I) -> Vec<T>
where
I: Iterator<Item = T> + InPlaceCollect,

View file

@ -515,7 +515,6 @@ impl<T> Vec<T> {
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
#[rustc_diagnostic_item = "vec_with_capacity"]
#[track_caller]
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
@ -926,7 +925,6 @@ impl<T, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
#[track_caller]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
}
@ -1335,7 +1333,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
#[rustc_diagnostic_item = "vec_reserve"]
pub fn reserve(&mut self, additional: usize) {
self.buf.reserve(self.len, additional);
@ -1367,7 +1364,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn reserve_exact(&mut self, additional: usize) {
self.buf.reserve_exact(self.len, additional);
}
@ -1471,7 +1467,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
#[inline]
pub fn shrink_to_fit(&mut self) {
// The capacity is never less than the length, and there's nothing to do when
@ -1502,7 +1497,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shrink_to", since = "1.56.0")]
#[track_caller]
pub fn shrink_to(&mut self, min_capacity: usize) {
if self.capacity() > min_capacity {
self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
@ -1536,7 +1530,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn into_boxed_slice(mut self) -> Box<[T], A> {
unsafe {
self.shrink_to_fit();
@ -2021,7 +2014,6 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn swap_remove(&mut self, index: usize) -> T {
#[cold]
#[cfg_attr(not(panic = "immediate-abort"), inline(never))]
#[track_caller]
#[optimize(size)]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("swap_remove index (is {index}) should be < len (is {len})");
@ -2568,7 +2560,6 @@ impl<T, A: Allocator> Vec<T, A> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_confusables("push_back", "put", "append")]
#[track_caller]
pub fn push(&mut self, value: T) {
let _ = self.push_mut(value);
}
@ -2645,7 +2636,6 @@ impl<T, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
#[unstable(feature = "push_mut", issue = "135974")]
#[track_caller]
#[must_use = "if you don't need a reference to the value, use `Vec::push` instead"]
pub fn push_mut(&mut self, value: T) -> &mut T {
// Inform codegen that the length does not change across grow_one().
@ -2793,7 +2783,6 @@ impl<T, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
#[stable(feature = "append", since = "1.4.0")]
#[track_caller]
pub fn append(&mut self, other: &mut Self) {
unsafe {
self.append_elements(other.as_slice() as _);
@ -2804,7 +2793,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// Appends elements to `self` from other buffer.
#[cfg(not(no_global_oom_handling))]
#[inline]
#[track_caller]
unsafe fn append_elements(&mut self, other: *const [T]) {
let count = other.len();
self.reserve(count);
@ -3039,7 +3027,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_resize_with", since = "1.33.0")]
#[track_caller]
pub fn resize_with<F>(&mut self, new_len: usize, f: F)
where
F: FnMut() -> T,
@ -3304,7 +3291,6 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_resize", since = "1.5.0")]
#[track_caller]
pub fn resize(&mut self, new_len: usize, value: T) {
let len = self.len();
@ -3335,7 +3321,6 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
/// [`extend`]: Vec::extend
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_extend_from_slice", since = "1.6.0")]
#[track_caller]
pub fn extend_from_slice(&mut self, other: &[T]) {
self.spec_extend(other.iter())
}
@ -3366,7 +3351,6 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_extend_from_within", since = "1.53.0")]
#[track_caller]
pub fn extend_from_within<R>(&mut self, src: R)
where
R: RangeBounds<usize>,
@ -3427,7 +3411,6 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
impl<T: Clone, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[track_caller]
/// Extend the vector by `n` clones of value.
fn extend_with(&mut self, n: usize, value: T) {
self.reserve(n);
@ -3488,7 +3471,6 @@ impl<T: PartialEq, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "vec_from_elem"]
#[track_caller]
pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
<T as SpecFromElem>::from_elem(elem, n, Global)
}
@ -3496,7 +3478,6 @@ pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
#[track_caller]
pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
<T as SpecFromElem>::from_elem(elem, n, alloc)
}
@ -3587,7 +3568,6 @@ unsafe impl<T, A: Allocator> ops::DerefPure for Vec<T, A> {}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
#[track_caller]
fn clone(&self) -> Self {
let alloc = self.allocator().clone();
<[T]>::to_vec_in(&**self, alloc)
@ -3615,7 +3595,6 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// // And no reallocation occurred
/// assert_eq!(yp, y.as_ptr());
/// ```
#[track_caller]
fn clone_from(&mut self, source: &Self) {
crate::slice::SpecCloneIntoVec::clone_into(source.as_slice(), self);
}
@ -3706,7 +3685,6 @@ impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for Vec<T> {
#[inline]
#[track_caller]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
<Self as SpecFromIter<T, I::IntoIter>>::from_iter(iter.into_iter())
}
@ -3775,19 +3753,16 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> Extend<T> for Vec<T, A> {
#[inline]
#[track_caller]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
<Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
}
#[inline]
#[track_caller]
fn extend_one(&mut self, item: T) {
self.push(item);
}
#[inline]
#[track_caller]
fn extend_reserve(&mut self, additional: usize) {
self.reserve(additional);
}
@ -3807,7 +3782,6 @@ impl<T, A: Allocator> Vec<T, A> {
// leaf method to which various SpecFrom/SpecExtend implementations delegate when
// they have no further optimizations to apply
#[cfg(not(no_global_oom_handling))]
#[track_caller]
fn extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) {
// This is the case for a general iterator.
//
@ -3835,7 +3809,6 @@ impl<T, A: Allocator> Vec<T, A> {
// specific extend for `TrustedLen` iterators, called both by the specializations
// and internal places where resolving specialization makes compilation slower
#[cfg(not(no_global_oom_handling))]
#[track_caller]
fn extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) {
let (low, high) = iterator.size_hint();
if let Some(additional) = high {
@ -4013,19 +3986,16 @@ impl<T, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: Copy + 'a, A: Allocator> Extend<&'a T> for Vec<T, A> {
#[track_caller]
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.spec_extend(iter.into_iter())
}
#[inline]
#[track_caller]
fn extend_one(&mut self, &item: &'a T) {
self.push(item);
}
#[inline]
#[track_caller]
fn extend_reserve(&mut self, additional: usize) {
self.reserve(additional);
}
@ -4136,7 +4106,6 @@ impl<T: Clone> From<&[T]> for Vec<T> {
/// ```
/// assert_eq!(Vec::from(&[1, 2, 3][..]), vec![1, 2, 3]);
/// ```
#[track_caller]
fn from(s: &[T]) -> Vec<T> {
s.to_vec()
}
@ -4152,7 +4121,6 @@ impl<T: Clone> From<&mut [T]> for Vec<T> {
/// ```
/// assert_eq!(Vec::from(&mut [1, 2, 3][..]), vec![1, 2, 3]);
/// ```
#[track_caller]
fn from(s: &mut [T]) -> Vec<T> {
s.to_vec()
}
@ -4168,7 +4136,6 @@ impl<T: Clone, const N: usize> From<&[T; N]> for Vec<T> {
/// ```
/// assert_eq!(Vec::from(&[1, 2, 3]), vec![1, 2, 3]);
/// ```
#[track_caller]
fn from(s: &[T; N]) -> Vec<T> {
Self::from(s.as_slice())
}
@ -4184,7 +4151,6 @@ impl<T: Clone, const N: usize> From<&mut [T; N]> for Vec<T> {
/// ```
/// assert_eq!(Vec::from(&mut [1, 2, 3]), vec![1, 2, 3]);
/// ```
#[track_caller]
fn from(s: &mut [T; N]) -> Vec<T> {
Self::from(s.as_mut_slice())
}
@ -4200,7 +4166,6 @@ impl<T, const N: usize> From<[T; N]> for Vec<T> {
/// ```
/// assert_eq!(Vec::from([1, 2, 3]), vec![1, 2, 3]);
/// ```
#[track_caller]
fn from(s: [T; N]) -> Vec<T> {
<[T]>::into_vec(Box::new(s))
}
@ -4225,7 +4190,6 @@ where
/// let b: Cow<'_, [i32]> = Cow::Borrowed(&[1, 2, 3]);
/// assert_eq!(Vec::from(o), Vec::from(b));
/// ```
#[track_caller]
fn from(s: Cow<'a, [T]>) -> Vec<T> {
s.into_owned()
}
@ -4272,7 +4236,6 @@ impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
///
/// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice());
/// ```
#[track_caller]
fn from(v: Vec<T, A>) -> Self {
v.into_boxed_slice()
}
@ -4288,7 +4251,6 @@ impl From<&str> for Vec<u8> {
/// ```
/// assert_eq!(Vec::from("123"), vec![b'1', b'2', b'3']);
/// ```
#[track_caller]
fn from(s: &str) -> Vec<u8> {
From::from(s.as_bytes())
}

View file

@ -6,7 +6,6 @@ use crate::alloc::Allocator;
// Specialization trait used for Vec::extend
pub(super) trait SpecExtend<T, I> {
#[track_caller]
fn spec_extend(&mut self, iter: I);
}
@ -14,7 +13,6 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
where
I: Iterator<Item = T>,
{
#[track_caller]
default fn spec_extend(&mut self, iter: I) {
self.extend_desugared(iter)
}
@ -24,14 +22,12 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
where
I: TrustedLen<Item = T>,
{
#[track_caller]
default fn spec_extend(&mut self, iterator: I) {
self.extend_trusted(iterator)
}
}
impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
#[track_caller]
fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
unsafe {
self.append_elements(iterator.as_slice() as _);
@ -45,7 +41,6 @@ where
I: Iterator<Item = &'a T>,
T: Clone,
{
#[track_caller]
default fn spec_extend(&mut self, iterator: I) {
self.spec_extend(iterator.cloned())
}
@ -55,7 +50,6 @@ impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A
where
T: Copy,
{
#[track_caller]
fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
let slice = iterator.as_slice();
unsafe { self.append_elements(slice) };

View file

@ -10,7 +10,6 @@ pub(super) trait SpecFromElem: Sized {
}
impl<T: Clone> SpecFromElem for T {
#[track_caller]
default fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A> {
let mut v = Vec::with_capacity_in(n, alloc);
v.extend_with(n, elem);
@ -20,7 +19,6 @@ impl<T: Clone> SpecFromElem for T {
impl<T: Clone + IsZero> SpecFromElem for T {
#[inline]
#[track_caller]
default fn from_elem<A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
if elem.is_zero() {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
@ -33,7 +31,6 @@ impl<T: Clone + IsZero> SpecFromElem for T {
impl SpecFromElem for i8 {
#[inline]
#[track_caller]
fn from_elem<A: Allocator>(elem: i8, n: usize, alloc: A) -> Vec<i8, A> {
if elem == 0 {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
@ -49,7 +46,6 @@ impl SpecFromElem for i8 {
impl SpecFromElem for u8 {
#[inline]
#[track_caller]
fn from_elem<A: Allocator>(elem: u8, n: usize, alloc: A) -> Vec<u8, A> {
if elem == 0 {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };

View file

@ -29,14 +29,12 @@ impl<T, I> SpecFromIter<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
#[track_caller]
default fn from_iter(iterator: I) -> Self {
SpecFromIterNested::from_iter(iterator)
}
}
impl<T> SpecFromIter<T, IntoIter<T>> for Vec<T> {
#[track_caller]
fn from_iter(iterator: IntoIter<T>) -> Self {
// A common case is passing a vector into a function which immediately
// re-collects into a vector. We can short circuit this if the IntoIter

View file

@ -15,7 +15,6 @@ impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
#[track_caller]
default fn from_iter(mut iterator: I) -> Self {
// Unroll the first iteration, as the vector is going to be
// expanded on this iteration in every case when the iterable is not
@ -48,7 +47,6 @@ impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: TrustedLen<Item = T>,
{
#[track_caller]
fn from_iter(iterator: I) -> Self {
let mut vector = match iterator.size_hint() {
(_, Some(upper)) => Vec::with_capacity(upper),

View file

@ -52,7 +52,6 @@ impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
#[stable(feature = "vec_splice", since = "1.21.0")]
impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
#[track_caller]
fn drop(&mut self) {
self.drain.by_ref().for_each(drop);
// At this point draining is done and the only remaining tasks are splicing
@ -124,7 +123,6 @@ impl<T, A: Allocator> Drain<'_, T, A> {
}
/// Makes room for inserting more elements before the tail.
#[track_caller]
unsafe fn move_tail(&mut self, additional: usize) {
let vec = unsafe { self.vec.as_mut() };
let len = self.tail_start + self.tail_len;

View file

@ -56,7 +56,7 @@ pub use self::primitives::{c_ptrdiff_t, c_size_t, c_ssize_t};
// be UB.
#[doc = include_str!("c_void.md")]
#[lang = "c_void"]
#[cfg_attr(not(doc), repr(u8))] // An implementation detail we don't want to show up in rustdoc
#[repr(u8)]
#[stable(feature = "core_c_void", since = "1.30.0")]
pub enum c_void {
#[unstable(

View file

@ -25,7 +25,7 @@ crate::cfg_select! {
///
/// [AArch64 Procedure Call Standard]:
/// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf
#[cfg_attr(not(doc), repr(C))] // work around https://github.com/rust-lang/rust/issues/66401
#[repr(C)]
#[derive(Debug)]
#[lang = "va_list"]
pub struct VaListImpl<'f> {
@ -39,7 +39,7 @@ crate::cfg_select! {
}
all(target_arch = "powerpc", not(target_os = "uefi"), not(windows)) => {
/// PowerPC ABI implementation of a `va_list`.
#[cfg_attr(not(doc), repr(C))] // work around https://github.com/rust-lang/rust/issues/66401
#[repr(C)]
#[derive(Debug)]
#[lang = "va_list"]
pub struct VaListImpl<'f> {
@ -53,7 +53,7 @@ crate::cfg_select! {
}
target_arch = "s390x" => {
/// s390x ABI implementation of a `va_list`.
#[cfg_attr(not(doc), repr(C))] // work around https://github.com/rust-lang/rust/issues/66401
#[repr(C)]
#[derive(Debug)]
#[lang = "va_list"]
pub struct VaListImpl<'f> {
@ -66,7 +66,7 @@ crate::cfg_select! {
}
all(target_arch = "x86_64", not(target_os = "uefi"), not(windows)) => {
/// x86_64 ABI implementation of a `va_list`.
#[cfg_attr(not(doc), repr(C))] // work around https://github.com/rust-lang/rust/issues/66401
#[repr(C)]
#[derive(Debug)]
#[lang = "va_list"]
pub struct VaListImpl<'f> {

View file

@ -386,8 +386,8 @@ impl FormattingOptions {
/// used. The alternate forms are:
/// - [`Debug`] : pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
/// - [`LowerHex`] as well as [`UpperHex`] - precedes the argument with a `0x`
/// - [`Octal`] - precedes the argument with a `0b`
/// - [`Binary`] - precedes the argument with a `0o`
/// - [`Octal`] - precedes the argument with a `0o`
/// - [`Binary`] - precedes the argument with a `0b`
#[unstable(feature = "formatting_options", issue = "118117")]
pub const fn alternate(&mut self, alternate: bool) -> &mut Self {
if alternate {

View file

@ -1341,11 +1341,3 @@ pub macro CoercePointee($item:item) {
pub trait CoercePointeeValidated {
/* compiler built-in */
}
/// Allows value to be reborrowed as exclusive, creating a copy of the value
/// that disables the source for reads and writes for the lifetime of the copy.
#[lang = "reborrow"]
#[unstable(feature = "reborrow", issue = "145612")]
pub trait Reborrow {
// Empty.
}

View file

@ -33,12 +33,12 @@ pub mod consts {
/// The golden ratio (φ)
#[unstable(feature = "f128", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const PHI: f128 = 1.61803398874989484820458683436563811772030917980576286213545_f128;
/// The Euler-Mascheroni constant (γ)
#[unstable(feature = "f128", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const EGAMMA: f128 = 0.577215664901532860606512090082402431042159335939923598805767_f128;
/// π/2
@ -67,14 +67,14 @@ pub mod consts {
/// 1/sqrt(π)
#[unstable(feature = "f128", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_PI: f128 =
0.564189583547756286948079451560772585844050629328998856844086_f128;
/// 1/sqrt(2π)
#[doc(alias = "FRAC_1_SQRT_TAU")]
#[unstable(feature = "f128", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_2PI: f128 =
0.398942280401432677939946059934381868475858631164934657665926_f128;
@ -98,12 +98,12 @@ pub mod consts {
/// sqrt(3)
#[unstable(feature = "f128", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const SQRT_3: f128 = 1.73205080756887729352744634150587236694280525381038062805581_f128;
/// 1/sqrt(3)
#[unstable(feature = "f128", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_3: f128 =
0.577350269189625764509148780501957455647601751270126876018602_f128;

View file

@ -35,12 +35,12 @@ pub mod consts {
/// The golden ratio (φ)
#[unstable(feature = "f16", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const PHI: f16 = 1.618033988749894848204586834365638118_f16;
/// The Euler-Mascheroni constant (γ)
#[unstable(feature = "f16", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const EGAMMA: f16 = 0.577215664901532860606512090082402431_f16;
/// π/2
@ -69,13 +69,13 @@ pub mod consts {
/// 1/sqrt(π)
#[unstable(feature = "f16", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
/// 1/sqrt(2π)
#[doc(alias = "FRAC_1_SQRT_TAU")]
#[unstable(feature = "f16", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
/// 2/π
@ -96,12 +96,12 @@ pub mod consts {
/// sqrt(3)
#[unstable(feature = "f16", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
/// 1/sqrt(3)
#[unstable(feature = "f16", issue = "116909")]
// Also, #[unstable(feature = "more_float_constants", issue = "103883")]
// Also, #[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
/// Euler's number (e)

View file

@ -291,11 +291,11 @@ pub mod consts {
pub const TAU: f32 = 6.28318530717958647692528676655900577_f32;
/// The golden ratio (φ)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const PHI: f32 = 1.618033988749894848204586834365638118_f32;
/// The Euler-Mascheroni constant (γ)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const EGAMMA: f32 = 0.577215664901532860606512090082402431_f32;
/// π/2
@ -323,12 +323,12 @@ pub mod consts {
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 1/sqrt(π)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_PI: f32 = 0.564189583547756286948079451560772586_f32;
/// 1/sqrt(2π)
#[doc(alias = "FRAC_1_SQRT_TAU")]
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_2PI: f32 = 0.398942280401432677939946059934381868_f32;
/// 2/π
@ -348,11 +348,11 @@ pub mod consts {
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// sqrt(3)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const SQRT_3: f32 = 1.732050807568877293527446341505872367_f32;
/// 1/sqrt(3)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_3: f32 = 0.577350269189625764509148780501957456_f32;
/// Euler's number (e)

View file

@ -291,11 +291,11 @@ pub mod consts {
pub const TAU: f64 = 6.28318530717958647692528676655900577_f64;
/// The golden ratio (φ)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const PHI: f64 = 1.618033988749894848204586834365638118_f64;
/// The Euler-Mascheroni constant (γ)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const EGAMMA: f64 = 0.577215664901532860606512090082402431_f64;
/// π/2
@ -323,12 +323,12 @@ pub mod consts {
pub const FRAC_1_PI: f64 = 0.318309886183790671537767526745028724_f64;
/// 1/sqrt(π)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_PI: f64 = 0.564189583547756286948079451560772586_f64;
/// 1/sqrt(2π)
#[doc(alias = "FRAC_1_SQRT_TAU")]
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_2PI: f64 = 0.398942280401432677939946059934381868_f64;
/// 2/π
@ -348,11 +348,11 @@ pub mod consts {
pub const FRAC_1_SQRT_2: f64 = 0.707106781186547524400844362104849039_f64;
/// sqrt(3)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const SQRT_3: f64 = 1.732050807568877293527446341505872367_f64;
/// 1/sqrt(3)
#[unstable(feature = "more_float_constants", issue = "103883")]
#[unstable(feature = "more_float_constants", issue = "146939")]
pub const FRAC_1_SQRT_3: f64 = 0.577350269189625764509148780501957456_f64;
/// Euler's number (e)

View file

@ -149,6 +149,7 @@ mod function;
mod index;
mod index_range;
mod range;
mod reborrow;
mod try_trait;
mod unsize;
@ -189,6 +190,8 @@ pub use self::range::{Bound, RangeBounds, RangeInclusive, RangeToInclusive};
pub use self::range::{OneSidedRange, OneSidedRangeBound};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::range::{Range, RangeFrom, RangeFull, RangeTo};
#[unstable(feature = "reborrow", issue = "145612")]
pub use self::reborrow::{CoerceShared, Reborrow};
#[unstable(feature = "try_trait_v2_residual", issue = "91285")]
pub use self::try_trait::Residual;
#[unstable(feature = "try_trait_v2_yeet", issue = "96374")]

View file

@ -0,0 +1,16 @@
/// Allows value to be reborrowed as exclusive, creating a copy of the value
/// that disables the source for reads and writes for the lifetime of the copy.
#[lang = "reborrow"]
#[unstable(feature = "reborrow", issue = "145612")]
pub trait Reborrow {
// Empty.
}
/// Allows reborrowable value to be reborrowed as shared, creating a copy
/// that disables the source for writes for the lifetime of the copy.
#[lang = "coerce_shared"]
#[unstable(feature = "reborrow", issue = "145612")]
pub trait CoerceShared: Reborrow {
/// The type of this value when reborrowed as shared.
type Target: Copy;
}

View file

@ -6,7 +6,7 @@
use crate::fmt;
/// Equivalent to Objective-Cs `struct objc_class` type.
#[cfg_attr(not(doc), repr(u8))] // An implementation detail we don't want to show up in rustdoc
#[repr(u8)]
pub enum objc_class {
#[unstable(
feature = "objc_class_variant",
@ -31,7 +31,7 @@ impl fmt::Debug for objc_class {
}
/// Equivalent to Objective-Cs `struct objc_selector` type.
#[cfg_attr(not(doc), repr(u8))] // An implementation detail we don't want to show up in rustdoc
#[repr(u8)]
pub enum objc_selector {
#[unstable(
feature = "objc_selector_variant",

View file

@ -1,28 +1,32 @@
//! Defines [`Exclusive`].
use core::cmp::Ordering;
use core::fmt;
use core::future::Future;
use core::marker::Tuple;
use core::hash::{Hash, Hasher};
use core::marker::{StructuralPartialEq, Tuple};
use core::ops::{Coroutine, CoroutineState};
use core::pin::Pin;
use core::task::{Context, Poll};
/// `Exclusive` provides only _mutable_ access, also referred to as _exclusive_
/// access to the underlying value. It provides no _immutable_, or _shared_
/// access to the underlying value.
/// `Exclusive` provides _mutable_ access, also referred to as _exclusive_
/// access to the underlying value. However, it only permits _immutable_, or _shared_
/// access to the underlying value when that value is [`Sync`].
///
/// While this may seem not very useful, it allows `Exclusive` to _unconditionally_
/// implement [`Sync`]. Indeed, the safety requirements of `Sync` state that for `Exclusive`
/// implement `Sync`. Indeed, the safety requirements of `Sync` state that for `Exclusive`
/// to be `Sync`, it must be sound to _share_ across threads, that is, it must be sound
/// for `&Exclusive` to cross thread boundaries. By design, a `&Exclusive` has no API
/// whatsoever, making it useless, thus harmless, thus memory safe.
/// for `&Exclusive` to cross thread boundaries. By design, a `&Exclusive<T>` for non-`Sync` T
/// has no API whatsoever, making it useless, thus harmless, thus memory safe.
///
/// Certain constructs like [`Future`]s can only be used with _exclusive_ access,
/// and are often `Send` but not `Sync`, so `Exclusive` can be used as hint to the
/// Rust compiler that something is `Sync` in practice.
///
/// ## Examples
/// Using a non-`Sync` future prevents the wrapping struct from being `Sync`
///
/// Using a non-`Sync` future prevents the wrapping struct from being `Sync`:
///
/// ```compile_fail
/// use core::cell::Cell;
///
@ -43,7 +47,8 @@ use core::task::{Context, Poll};
/// ```
///
/// `Exclusive` ensures the struct is `Sync` without stripping the future of its
/// functionality.
/// functionality:
///
/// ```
/// #![feature(exclusive_wrapper)]
/// use core::cell::Cell;
@ -66,6 +71,7 @@ use core::task::{Context, Poll};
/// ```
///
/// ## Parallels with a mutex
///
/// In some sense, `Exclusive` can be thought of as a _compile-time_ version of
/// a mutex, as the borrow-checker guarantees that only one `&mut` can exist
/// for any value. This is a parallel with the fact that
@ -75,7 +81,7 @@ use core::task::{Context, Poll};
#[doc(alias = "SyncWrapper")]
#[doc(alias = "SyncCell")]
#[doc(alias = "Unique")]
// `Exclusive` can't have `PartialOrd`, `Clone`, etc. impls as they would
// `Exclusive` can't have derived `PartialOrd`, `Clone`, etc. impls as they would
// use `&` access to the inner value, violating the `Sync` impl's safety
// requirements.
#[derive(Default)]
@ -195,6 +201,17 @@ where
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<F, Args> Fn<Args> for Exclusive<F>
where
F: Sync + Fn<Args>,
Args: Tuple,
{
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
self.as_ref().call(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Future for Exclusive<T>
where
@ -221,3 +238,80 @@ where
G::resume(self.get_pin_mut(), arg)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> AsRef<T> for Exclusive<T>
where
T: Sync + ?Sized,
{
#[inline]
fn as_ref(&self) -> &T {
&self.inner
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Clone for Exclusive<T>
where
T: Sync + Clone,
{
#[inline]
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Copy for Exclusive<T> where T: Sync + Copy {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T, U> PartialEq<Exclusive<U>> for Exclusive<T>
where
T: Sync + PartialEq<U> + ?Sized,
U: Sync + ?Sized,
{
#[inline]
fn eq(&self, other: &Exclusive<U>) -> bool {
self.inner == other.inner
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> StructuralPartialEq for Exclusive<T> where T: Sync + StructuralPartialEq + ?Sized {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Eq for Exclusive<T> where T: Sync + Eq + ?Sized {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Hash for Exclusive<T>
where
T: Sync + Hash + ?Sized,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.inner, state)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T, U> PartialOrd<Exclusive<U>> for Exclusive<T>
where
T: Sync + PartialOrd<U> + ?Sized,
U: Sync + ?Sized,
{
#[inline]
fn partial_cmp(&self, other: &Exclusive<U>) -> Option<Ordering> {
self.inner.partial_cmp(&other.inner)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Ord for Exclusive<T>
where
T: Sync + Ord + ?Sized,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.inner.cmp(&other.inner)
}
}

View file

@ -3234,7 +3234,7 @@ fn inlined_slow_read_byte<R: Read>(reader: &mut R) -> Option<Result<u8>> {
}
}
// Used by `BufReader::spec_read_byte`, for which the `inline(ever)` is
// Used by `BufReader::spec_read_byte`, for which the `inline(never)` is
// important.
#[inline(never)]
fn uninlined_slow_read_byte<R: Read>(reader: &mut R) -> Option<Result<u8>> {

View file

@ -94,7 +94,7 @@
//! pull-requests for your suggested changes.
//!
//! Contributions are appreciated! If you see a part of the docs that can be
//! improved, submit a PR, or chat with us first on [Discord][rust-discord]
//! improved, submit a PR, or chat with us first on [Zulip][rust-zulip]
//! #docs.
//!
//! # A Tour of The Rust Standard Library
@ -212,7 +212,7 @@
//! [multithreading]: thread
//! [other]: #what-is-in-the-standard-library-documentation
//! [primitive types]: ../book/ch03-02-data-types.html
//! [rust-discord]: https://discord.gg/rust-lang
//! [rust-zulip]: https://rust-lang.zulipchat.com/
//! [array]: prim@array
//! [slice]: prim@slice

View file

@ -4,6 +4,8 @@
use crate::sealed::Sealed;
use crate::sys_common::AsInner;
#[cfg(target_os = "linux")]
use crate::time::Duration;
use crate::{io, net};
/// Os-specific extensions for [`TcpStream`]
@ -59,11 +61,13 @@ pub trait TcpStreamExt: Sealed {
/// A socket listener will be awakened solely when data arrives.
///
/// The `accept` argument set the delay in seconds until the
/// The `accept` argument set the maximum delay until the
/// data is available to read, reducing the number of short lived
/// connections without data to process.
/// Contrary to other platforms `SO_ACCEPTFILTER` feature equivalent, there is
/// no necessity to set it after the `listen` call.
/// Note that the delay is expressed as Duration from user's perspective
/// the call rounds it down to the nearest second expressible as a `c_int`.
///
/// See [`man 7 tcp`](https://man7.org/linux/man-pages/man7/tcp.7.html)
///
@ -73,16 +77,17 @@ pub trait TcpStreamExt: Sealed {
/// #![feature(tcp_deferaccept)]
/// use std::net::TcpStream;
/// use std::os::linux::net::TcpStreamExt;
/// use std::time::Duration;
///
/// let stream = TcpStream::connect("127.0.0.1:8080")
/// .expect("Couldn't connect to the server...");
/// stream.set_deferaccept(1).expect("set_deferaccept call failed");
/// stream.set_deferaccept(Duration::from_secs(1u64)).expect("set_deferaccept call failed");
/// ```
#[unstable(feature = "tcp_deferaccept", issue = "119639")]
#[cfg(target_os = "linux")]
fn set_deferaccept(&self, accept: u32) -> io::Result<()>;
fn set_deferaccept(&self, accept: Duration) -> io::Result<()>;
/// Gets the accept delay value (in seconds) of the `TCP_DEFER_ACCEPT` option.
/// Gets the accept delay value of the `TCP_DEFER_ACCEPT` option.
///
/// For more information about this option, see [`TcpStreamExt::set_deferaccept`].
///
@ -92,15 +97,16 @@ pub trait TcpStreamExt: Sealed {
/// #![feature(tcp_deferaccept)]
/// use std::net::TcpStream;
/// use std::os::linux::net::TcpStreamExt;
/// use std::time::Duration;
///
/// let stream = TcpStream::connect("127.0.0.1:8080")
/// .expect("Couldn't connect to the server...");
/// stream.set_deferaccept(1).expect("set_deferaccept call failed");
/// assert_eq!(stream.deferaccept().unwrap_or(0), 1);
/// stream.set_deferaccept(Duration::from_secs(1u64)).expect("set_deferaccept call failed");
/// assert_eq!(stream.deferaccept().unwrap(), Duration::from_secs(1u64));
/// ```
#[unstable(feature = "tcp_deferaccept", issue = "119639")]
#[cfg(target_os = "linux")]
fn deferaccept(&self) -> io::Result<u32>;
fn deferaccept(&self) -> io::Result<Duration>;
}
#[stable(feature = "tcp_quickack", since = "1.89.0")]
@ -117,12 +123,12 @@ impl TcpStreamExt for net::TcpStream {
}
#[cfg(target_os = "linux")]
fn set_deferaccept(&self, accept: u32) -> io::Result<()> {
fn set_deferaccept(&self, accept: Duration) -> io::Result<()> {
self.as_inner().as_inner().set_deferaccept(accept)
}
#[cfg(target_os = "linux")]
fn deferaccept(&self) -> io::Result<u32> {
fn deferaccept(&self) -> io::Result<Duration> {
self.as_inner().as_inner().deferaccept()
}
}

View file

@ -32,6 +32,7 @@ fn deferaccept() {
use crate::net::test::next_test_ip4;
use crate::net::{TcpListener, TcpStream};
use crate::os::net::linux_ext::tcp::TcpStreamExt;
use crate::time::Duration;
macro_rules! t {
($e:expr) => {
@ -43,10 +44,12 @@ fn deferaccept() {
}
let addr = next_test_ip4();
let one = Duration::from_secs(1u64);
let zero = Duration::from_secs(0u64);
let _listener = t!(TcpListener::bind(&addr));
let stream = t!(TcpStream::connect(&("localhost", addr.port())));
stream.set_deferaccept(1).expect("set_deferaccept failed");
assert_eq!(stream.deferaccept().unwrap(), 1);
stream.set_deferaccept(0).expect("set_deferaccept failed");
assert_eq!(stream.deferaccept().unwrap(), 0);
stream.set_deferaccept(one).expect("set_deferaccept failed");
assert_eq!(stream.deferaccept().unwrap(), one);
stream.set_deferaccept(zero).expect("set_deferaccept failed");
assert_eq!(stream.deferaccept().unwrap(), zero);
}

View file

@ -485,14 +485,15 @@ impl Socket {
// bionic libc makes no use of this flag
#[cfg(target_os = "linux")]
pub fn set_deferaccept(&self, accept: u32) -> io::Result<()> {
setsockopt(self, libc::IPPROTO_TCP, libc::TCP_DEFER_ACCEPT, accept as c_int)
pub fn set_deferaccept(&self, accept: Duration) -> io::Result<()> {
let val = cmp::min(accept.as_secs(), c_int::MAX as u64) as c_int;
setsockopt(self, libc::IPPROTO_TCP, libc::TCP_DEFER_ACCEPT, val)
}
#[cfg(target_os = "linux")]
pub fn deferaccept(&self) -> io::Result<u32> {
pub fn deferaccept(&self) -> io::Result<Duration> {
let raw: c_int = getsockopt(self, libc::IPPROTO_TCP, libc::TCP_DEFER_ACCEPT)?;
Ok(raw as u32)
Ok(Duration::from_secs(raw as _))
}
#[cfg(any(target_os = "freebsd", target_os = "netbsd"))]

View file

@ -16,7 +16,7 @@ use crate::{fmt, io, iter, mem, ptr, slice, str};
const TMPBUF_SZ: usize = 128;
const PATH_SEPARATOR: u8 = if cfg!(target_os = "redox") { b';' } else { b':' };
const PATH_SEPARATOR: u8 = b':';
unsafe extern "C" {
#[cfg(not(any(target_os = "dragonfly", target_os = "vxworks", target_os = "rtems")))]

View file

@ -1221,7 +1221,7 @@ pub fn rustc_cargo(
// us a faster startup time. However GNU ld < 2.40 will error if we try to link a shared object
// with direct references to protected symbols, so for now we only use protected symbols if
// linking with LLD is enabled.
if builder.build.config.lld_mode.is_used() {
if builder.build.config.bootstrap_override_lld.is_used() {
cargo.rustflag("-Zdefault-visibility=protected");
}
@ -1258,7 +1258,7 @@ pub fn rustc_cargo(
// is already on by default in MSVC optimized builds, which is interpreted as --icf=all:
// https://github.com/llvm/llvm-project/blob/3329cec2f79185bafd678f310fafadba2a8c76d2/lld/COFF/Driver.cpp#L1746
// https://github.com/rust-lang/rust/blob/f22819bcce4abaff7d1246a56eec493418f9f4ee/compiler/rustc_codegen_ssa/src/back/linker.rs#L827
if builder.config.lld_mode.is_used() && !build_compiler.host.is_msvc() {
if builder.config.bootstrap_override_lld.is_used() && !build_compiler.host.is_msvc() {
cargo.rustflag("-Clink-args=-Wl,--icf=all");
}

View file

@ -41,7 +41,7 @@ use crate::core::config::toml::gcc::Gcc;
use crate::core::config::toml::install::Install;
use crate::core::config::toml::llvm::Llvm;
use crate::core::config::toml::rust::{
LldMode, Rust, RustOptimize, check_incompatible_options_for_ci_rustc,
BootstrapOverrideLld, Rust, RustOptimize, check_incompatible_options_for_ci_rustc,
default_lld_opt_in_targets, parse_codegen_backends,
};
use crate::core::config::toml::target::Target;
@ -174,7 +174,7 @@ pub struct Config {
pub llvm_from_ci: bool,
pub llvm_build_config: HashMap<String, String>,
pub lld_mode: LldMode,
pub bootstrap_override_lld: BootstrapOverrideLld,
pub lld_enabled: bool,
pub llvm_tools_enabled: bool,
pub llvm_bitcode_linker_enabled: bool,
@ -567,7 +567,8 @@ impl Config {
frame_pointers: rust_frame_pointers,
stack_protector: rust_stack_protector,
strip: rust_strip,
lld_mode: rust_lld_mode,
bootstrap_override_lld: rust_bootstrap_override_lld,
bootstrap_override_lld_legacy: rust_bootstrap_override_lld_legacy,
std_features: rust_std_features,
break_on_ice: rust_break_on_ice,
} = toml.rust.unwrap_or_default();
@ -615,6 +616,15 @@ impl Config {
let Gcc { download_ci_gcc: gcc_download_ci_gcc } = toml.gcc.unwrap_or_default();
if rust_bootstrap_override_lld.is_some() && rust_bootstrap_override_lld_legacy.is_some() {
panic!(
"Cannot use both `rust.use-lld` and `rust.bootstrap-override-lld`. Please use only `rust.bootstrap-override-lld`"
);
}
let bootstrap_override_lld =
rust_bootstrap_override_lld.or(rust_bootstrap_override_lld_legacy).unwrap_or_default();
if rust_optimize.as_ref().is_some_and(|v| matches!(v, RustOptimize::Bool(false))) {
eprintln!(
"WARNING: setting `optimize` to `false` is known to cause errors and \
@ -960,7 +970,7 @@ impl Config {
let initial_rustfmt = build_rustfmt.or_else(|| maybe_download_rustfmt(&dwn_ctx, &out));
if matches!(rust_lld_mode.unwrap_or_default(), LldMode::SelfContained)
if matches!(bootstrap_override_lld, BootstrapOverrideLld::SelfContained)
&& !lld_enabled
&& flags_stage.unwrap_or(0) > 0
{
@ -1172,6 +1182,7 @@ impl Config {
backtrace_on_ice: rust_backtrace_on_ice.unwrap_or(false),
bindir: install_bindir.map(PathBuf::from).unwrap_or("bin".into()),
bootstrap_cache_path: build_bootstrap_cache_path,
bootstrap_override_lld,
bypass_bootstrap_lock: flags_bypass_bootstrap_lock,
cargo_info,
cargo_native_static: build_cargo_native_static.unwrap_or(false),
@ -1238,7 +1249,6 @@ impl Config {
libdir: install_libdir.map(PathBuf::from),
library_docs_private_items: build_library_docs_private_items.unwrap_or(false),
lld_enabled,
lld_mode: rust_lld_mode.unwrap_or_default(),
lldb: build_lldb.map(PathBuf::from),
llvm_allow_old_toolchain: llvm_allow_old_toolchain.unwrap_or(false),
llvm_assertions,

View file

@ -37,7 +37,7 @@ use serde_derive::Deserialize;
pub use target_selection::TargetSelection;
pub use toml::BUILDER_CONFIG_FILENAME;
pub use toml::change_id::ChangeId;
pub use toml::rust::LldMode;
pub use toml::rust::BootstrapOverrideLld;
pub use toml::target::Target;
use crate::Display;

View file

@ -17,7 +17,9 @@ use crate::core::build_steps::clippy::{LintConfig, get_clippy_rules_in_order};
use crate::core::build_steps::llvm::LLVM_INVALIDATION_PATHS;
use crate::core::build_steps::{llvm, test};
use crate::core::config::toml::TomlConfig;
use crate::core::config::{CompilerBuiltins, LldMode, StringOrBool, Target, TargetSelection};
use crate::core::config::{
BootstrapOverrideLld, CompilerBuiltins, StringOrBool, Target, TargetSelection,
};
use crate::utils::tests::TestCtx;
use crate::utils::tests::git::git_test;
@ -222,11 +224,33 @@ fn verify_file_integrity() {
#[test]
fn rust_lld() {
assert!(matches!(parse("").lld_mode, LldMode::Unused));
assert!(matches!(parse("rust.use-lld = \"self-contained\"").lld_mode, LldMode::SelfContained));
assert!(matches!(parse("rust.use-lld = \"external\"").lld_mode, LldMode::External));
assert!(matches!(parse("rust.use-lld = true").lld_mode, LldMode::External));
assert!(matches!(parse("rust.use-lld = false").lld_mode, LldMode::Unused));
assert!(matches!(parse("").bootstrap_override_lld, BootstrapOverrideLld::None));
assert!(matches!(
parse("rust.bootstrap-override-lld = \"self-contained\"").bootstrap_override_lld,
BootstrapOverrideLld::SelfContained
));
assert!(matches!(
parse("rust.bootstrap-override-lld = \"external\"").bootstrap_override_lld,
BootstrapOverrideLld::External
));
assert!(matches!(
parse("rust.bootstrap-override-lld = true").bootstrap_override_lld,
BootstrapOverrideLld::External
));
assert!(matches!(
parse("rust.bootstrap-override-lld = false").bootstrap_override_lld,
BootstrapOverrideLld::None
));
// Also check the legacy options
assert!(matches!(
parse("rust.use-lld = true").bootstrap_override_lld,
BootstrapOverrideLld::External
));
assert!(matches!(
parse("rust.use-lld = false").bootstrap_override_lld,
BootstrapOverrideLld::None
));
}
#[test]

View file

@ -45,7 +45,9 @@ define_config! {
codegen_backends: Option<Vec<String>> = "codegen-backends",
llvm_bitcode_linker: Option<bool> = "llvm-bitcode-linker",
lld: Option<bool> = "lld",
lld_mode: Option<LldMode> = "use-lld",
bootstrap_override_lld: Option<BootstrapOverrideLld> = "bootstrap-override-lld",
// FIXME: Remove this option in Spring 2026
bootstrap_override_lld_legacy: Option<BootstrapOverrideLld> = "use-lld",
llvm_tools: Option<bool> = "llvm-tools",
deny_warnings: Option<bool> = "deny-warnings",
backtrace_on_ice: Option<bool> = "backtrace-on-ice",
@ -70,22 +72,33 @@ define_config! {
}
}
/// LLD in bootstrap works like this:
/// - Self-contained lld: use `rust-lld` from the compiler's sysroot
/// Determines if we should override the linker used for linking Rust code built
/// during the bootstrapping process to be LLD.
///
/// The primary use-case for this is to make local (re)builds of Rust code faster
/// when using bootstrap.
///
/// This does not affect the *behavior* of the built/distributed compiler when invoked
/// outside of bootstrap.
/// It might affect its performance/binary size though, as that can depend on the
/// linker that links rustc.
///
/// There are two ways of overriding the linker to be LLD:
/// - Self-contained LLD: use `rust-lld` from the compiler's sysroot
/// - External: use an external `lld` binary
///
/// It is configured depending on the target:
/// 1) Everything except MSVC
/// - Self-contained: `-Clinker-flavor=gnu-lld-cc -Clink-self-contained=+linker`
/// - External: `-Clinker-flavor=gnu-lld-cc`
/// - Self-contained: `-Clinker-features=+lld -Clink-self-contained=+linker`
/// - External: `-Clinker-features=+lld`
/// 2) MSVC
/// - Self-contained: `-Clinker=<path to rust-lld>`
/// - External: `-Clinker=lld`
#[derive(Copy, Clone, Default, Debug, PartialEq)]
pub enum LldMode {
/// Do not use LLD
pub enum BootstrapOverrideLld {
/// Do not override the linker LLD
#[default]
Unused,
None,
/// Use `rust-lld` from the compiler's sysroot
SelfContained,
/// Use an externally provided `lld` binary.
@ -94,16 +107,16 @@ pub enum LldMode {
External,
}
impl LldMode {
impl BootstrapOverrideLld {
pub fn is_used(&self) -> bool {
match self {
LldMode::SelfContained | LldMode::External => true,
LldMode::Unused => false,
BootstrapOverrideLld::SelfContained | BootstrapOverrideLld::External => true,
BootstrapOverrideLld::None => false,
}
}
}
impl<'de> Deserialize<'de> for LldMode {
impl<'de> Deserialize<'de> for BootstrapOverrideLld {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
@ -111,7 +124,7 @@ impl<'de> Deserialize<'de> for LldMode {
struct LldModeVisitor;
impl serde::de::Visitor<'_> for LldModeVisitor {
type Value = LldMode;
type Value = BootstrapOverrideLld;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
formatter.write_str("one of true, 'self-contained' or 'external'")
@ -121,7 +134,7 @@ impl<'de> Deserialize<'de> for LldMode {
where
E: serde::de::Error,
{
Ok(if v { LldMode::External } else { LldMode::Unused })
Ok(if v { BootstrapOverrideLld::External } else { BootstrapOverrideLld::None })
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
@ -129,8 +142,8 @@ impl<'de> Deserialize<'de> for LldMode {
E: serde::de::Error,
{
match v {
"external" => Ok(LldMode::External),
"self-contained" => Ok(LldMode::SelfContained),
"external" => Ok(BootstrapOverrideLld::External),
"self-contained" => Ok(BootstrapOverrideLld::SelfContained),
_ => Err(E::custom(format!("unknown mode {v}"))),
}
}
@ -311,7 +324,6 @@ pub fn check_incompatible_options_for_ci_rustc(
lto,
stack_protector,
strip,
lld_mode,
jemalloc,
rpath,
channel,
@ -359,6 +371,8 @@ pub fn check_incompatible_options_for_ci_rustc(
frame_pointers: _,
break_on_ice: _,
parallel_frontend_threads: _,
bootstrap_override_lld: _,
bootstrap_override_lld_legacy: _,
} = ci_rust_config;
// There are two kinds of checks for CI rustc incompatible options:
@ -374,7 +388,6 @@ pub fn check_incompatible_options_for_ci_rustc(
err!(current_rust_config.debuginfo_level_rustc, debuginfo_level_rustc, "rust");
err!(current_rust_config.rpath, rpath, "rust");
err!(current_rust_config.strip, strip, "rust");
err!(current_rust_config.lld_mode, lld_mode, "rust");
err!(current_rust_config.llvm_tools, llvm_tools, "rust");
err!(current_rust_config.llvm_bitcode_linker, llvm_bitcode_linker, "rust");
err!(current_rust_config.jemalloc, jemalloc, "rust");

View file

@ -506,7 +506,7 @@ pub(crate) fn maybe_download_rustfmt<'a>(
return Some(PathBuf::new());
}
let VersionMetadata { date, version } = dwn_ctx.stage0_metadata.rustfmt.as_ref()?;
let VersionMetadata { date, version, .. } = dwn_ctx.stage0_metadata.rustfmt.as_ref()?;
let channel = format!("{version}-{date}");
let host = dwn_ctx.host_target;

View file

@ -35,7 +35,7 @@ use utils::exec::ExecutionContext;
use crate::core::builder;
use crate::core::builder::Kind;
use crate::core::config::{DryRun, LldMode, LlvmLibunwind, TargetSelection, flags};
use crate::core::config::{BootstrapOverrideLld, DryRun, LlvmLibunwind, TargetSelection, flags};
use crate::utils::exec::{BootstrapCommand, command};
use crate::utils::helpers::{self, dir_is_empty, exe, libdir, set_file_times, split_debuginfo};
@ -1358,14 +1358,14 @@ impl Build {
&& !target.is_msvc()
{
Some(self.cc(target))
} else if self.config.lld_mode.is_used()
} else if self.config.bootstrap_override_lld.is_used()
&& self.is_lld_direct_linker(target)
&& self.host_target == target
{
match self.config.lld_mode {
LldMode::SelfContained => Some(self.initial_lld.clone()),
LldMode::External => Some("lld".into()),
LldMode::Unused => None,
match self.config.bootstrap_override_lld {
BootstrapOverrideLld::SelfContained => Some(self.initial_lld.clone()),
BootstrapOverrideLld::External => Some("lld".into()),
BootstrapOverrideLld::None => None,
}
} else {
None

View file

@ -556,4 +556,9 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[
severity: ChangeSeverity::Info,
summary: "New option `build.windows-rc` that will override which resource compiler on Windows will be used to compile Rust.",
},
ChangeInfo {
change_id: 99999,
severity: ChangeSeverity::Warning,
summary: "The `rust.use-lld` option has been renamed to `rust.bootstrap-override-lld`. Note that it only serves for overriding the linker used when building Rust code in bootstrap to be LLD.",
},
];

View file

@ -12,7 +12,7 @@ use std::{env, fs, io, panic, str};
use object::read::archive::ArchiveFile;
use crate::LldMode;
use crate::BootstrapOverrideLld;
use crate::core::builder::Builder;
use crate::core::config::{Config, TargetSelection};
use crate::utils::exec::{BootstrapCommand, command};
@ -357,15 +357,19 @@ pub fn get_clang_cl_resource_dir(builder: &Builder<'_>, clang_cl_path: &str) ->
/// Returns a flag that configures LLD to use only a single thread.
/// If we use an external LLD, we need to find out which version is it to know which flag should we
/// pass to it (LLD older than version 10 had a different flag).
fn lld_flag_no_threads(builder: &Builder<'_>, lld_mode: LldMode, is_windows: bool) -> &'static str {
fn lld_flag_no_threads(
builder: &Builder<'_>,
bootstrap_override_lld: BootstrapOverrideLld,
is_windows: bool,
) -> &'static str {
static LLD_NO_THREADS: OnceLock<(&'static str, &'static str)> = OnceLock::new();
let new_flags = ("/threads:1", "--threads=1");
let old_flags = ("/no-threads", "--no-threads");
let (windows_flag, other_flag) = LLD_NO_THREADS.get_or_init(|| {
let newer_version = match lld_mode {
LldMode::External => {
let newer_version = match bootstrap_override_lld {
BootstrapOverrideLld::External => {
let mut cmd = command("lld");
cmd.arg("-flavor").arg("ld").arg("--version");
let out = cmd.run_capture_stdout(builder).stdout();
@ -422,24 +426,28 @@ pub fn linker_flags(
lld_threads: LldThreads,
) -> Vec<String> {
let mut args = vec![];
if !builder.is_lld_direct_linker(target) && builder.config.lld_mode.is_used() {
match builder.config.lld_mode {
LldMode::External => {
if !builder.is_lld_direct_linker(target) && builder.config.bootstrap_override_lld.is_used() {
match builder.config.bootstrap_override_lld {
BootstrapOverrideLld::External => {
args.push("-Clinker-features=+lld".to_string());
args.push("-Zunstable-options".to_string());
}
LldMode::SelfContained => {
BootstrapOverrideLld::SelfContained => {
args.push("-Clinker-features=+lld".to_string());
args.push("-Clink-self-contained=+linker".to_string());
args.push("-Zunstable-options".to_string());
}
LldMode::Unused => unreachable!(),
BootstrapOverrideLld::None => unreachable!(),
};
if matches!(lld_threads, LldThreads::No) {
args.push(format!(
"-Clink-arg=-Wl,{}",
lld_flag_no_threads(builder, builder.config.lld_mode, target.is_windows())
lld_flag_no_threads(
builder,
builder.config.bootstrap_override_lld,
target.is_windows()
)
));
}
}

View file

@ -10,6 +10,8 @@ pub struct Stage0 {
#[derive(Default, Clone)]
pub struct VersionMetadata {
pub channel_manifest_hash: String,
pub git_commit_hash: String,
pub date: String,
pub version: String,
}
@ -50,9 +52,21 @@ pub fn parse_stage0_file() -> Stage0 {
"git_merge_commit_email" => stage0.config.git_merge_commit_email = value.to_owned(),
"nightly_branch" => stage0.config.nightly_branch = value.to_owned(),
"compiler_channel_manifest_hash" => {
stage0.compiler.channel_manifest_hash = value.to_owned()
}
"compiler_git_commit_hash" => stage0.compiler.git_commit_hash = value.to_owned(),
"compiler_date" => stage0.compiler.date = value.to_owned(),
"compiler_version" => stage0.compiler.version = value.to_owned(),
"rustfmt_channel_manifest_hash" => {
stage0.rustfmt.get_or_insert(VersionMetadata::default()).channel_manifest_hash =
value.to_owned();
}
"rustfmt_git_commit_hash" => {
stage0.rustfmt.get_or_insert(VersionMetadata::default()).git_commit_hash =
value.to_owned();
}
"rustfmt_date" => {
stage0.rustfmt.get_or_insert(VersionMetadata::default()).date = value.to_owned();
}

View file

@ -91,7 +91,7 @@ ENV RUST_CONFIGURE_ARGS \
--set llvm.ninja=false \
--set rust.debug-assertions=false \
--set rust.jemalloc \
--set rust.use-lld=true \
--set rust.bootstrap-override-lld=true \
--set rust.lto=thin \
--set rust.codegen-units=1

View file

@ -92,7 +92,7 @@ ENV RUST_CONFIGURE_ARGS \
--set llvm.ninja=false \
--set llvm.libzstd=true \
--set rust.jemalloc \
--set rust.use-lld=true \
--set rust.bootstrap-override-lld=true \
--set rust.lto=thin \
--set rust.codegen-units=1

View file

@ -431,9 +431,6 @@ auto:
MACOSX_DEPLOYMENT_TARGET: 10.12
MACOSX_STD_DEPLOYMENT_TARGET: 10.12
SELECT_XCODE: /Applications/Xcode_15.2.app
NO_LLVM_ASSERTIONS: 1
NO_DEBUG_ASSERTIONS: 1
NO_OVERFLOW_CHECKS: 1
DIST_REQUIRE_ALL_TOOLS: 1
CODEGEN_BACKENDS: llvm,cranelift
<<: *job-macos
@ -449,9 +446,6 @@ auto:
MACOSX_DEPLOYMENT_TARGET: 10.12
MACOSX_STD_DEPLOYMENT_TARGET: 10.12
SELECT_XCODE: /Applications/Xcode_15.2.app
NO_LLVM_ASSERTIONS: 1
NO_DEBUG_ASSERTIONS: 1
NO_OVERFLOW_CHECKS: 1
<<: *job-macos
- name: dist-aarch64-apple
@ -471,9 +465,6 @@ auto:
# supports the hardware.
MACOSX_DEPLOYMENT_TARGET: 11.0
MACOSX_STD_DEPLOYMENT_TARGET: 11.0
NO_LLVM_ASSERTIONS: 1
NO_DEBUG_ASSERTIONS: 1
NO_OVERFLOW_CHECKS: 1
DIST_REQUIRE_ALL_TOOLS: 1
CODEGEN_BACKENDS: llvm,cranelift
<<: *job-macos
@ -493,9 +484,6 @@ auto:
# supports the hardware, so only need to test it there.
MACOSX_DEPLOYMENT_TARGET: 11.0
MACOSX_STD_DEPLOYMENT_TARGET: 11.0
NO_LLVM_ASSERTIONS: 1
NO_DEBUG_ASSERTIONS: 1
NO_OVERFLOW_CHECKS: 1
<<: *job-macos
######################

View file

@ -121,6 +121,7 @@
- [\*-unknown-hermit](platform-support/hermit.md)
- [\*-unknown-freebsd](platform-support/freebsd.md)
- [\*-unknown-managarm-mlibc](platform-support/managarm.md)
- [\*-unknown-motor](platform-support/motor.md)
- [\*-unknown-netbsd\*](platform-support/netbsd.md)
- [\*-unknown-openbsd](platform-support/openbsd.md)
- [\*-unknown-redox](platform-support/redox.md)

View file

@ -431,7 +431,7 @@ target | std | host | notes
`x86_64-unknown-l4re-uclibc` | ? | |
[`x86_64-unknown-linux-none`](platform-support/x86_64-unknown-linux-none.md) | * | | 64-bit Linux with no libc
[`x86_64-unknown-managarm-mlibc`](platform-support/managarm.md) | ? | | x86_64 Managarm
[`x86_64-unknown-motor`[(platform-support/motor.md) | ? | | x86_64 Motor OS
[`x86_64-unknown-motor`](platform-support/motor.md) | ? | | x86_64 Motor OS
[`x86_64-unknown-openbsd`](platform-support/openbsd.md) | ✓ | ✓ | 64-bit OpenBSD
[`x86_64-unknown-trusty`](platform-support/trusty.md) | ✓ | |
`x86_64-uwp-windows-gnu` | ✓ | |

View file

@ -652,6 +652,21 @@ class MSVCEnumSyntheticProvider:
return name
def StructSummaryProvider(valobj: SBValue, _dict: LLDBOpaque) -> str:
output = []
for i in range(valobj.GetNumChildren()):
child: SBValue = valobj.GetChildAtIndex(i)
summary = child.summary
if summary is None:
summary = child.value
if summary is None:
summary = StructSummaryProvider(child, _dict)
summary = child.GetName() + ":" + summary
output.append(summary)
return "{" + ", ".join(output) + "}"
def MSVCEnumSummaryProvider(valobj: SBValue, _dict: LLDBOpaque) -> str:
enum_synth = MSVCEnumSyntheticProvider(valobj.GetNonSyntheticValue(), _dict)
variant_names: SBType = valobj.target.FindFirstType(
@ -695,16 +710,7 @@ def MSVCEnumSummaryProvider(valobj: SBValue, _dict: LLDBOpaque) -> str:
return name + TupleSummaryProvider(enum_synth.value, _dict)
else:
# enum variant is a regular struct
var_list = (
str(enum_synth.value.GetNonSyntheticValue()).split("= ", 1)[1].splitlines()
)
vars = [x.strip() for x in var_list if x not in ("{", "}")]
if vars[0][0] == "(":
vars[0] = vars[0][1:]
if vars[-1][-1] == ")":
vars[-1] = vars[-1][:-1]
return f"{name}{{{', '.join(vars)}}}"
return name + StructSummaryProvider(enum_synth.value, _dict)
class TupleSyntheticProvider:

View file

@ -26,7 +26,10 @@ ROOT_DIR="$(git rev-parse --show-toplevel)"
echo "Running pre-push script $ROOT_DIR/x test tidy"
cd "$ROOT_DIR"
./x test tidy --set build.locked-deps=true
# The env var is necessary for printing diffs in py (fmt/lint) and cpp.
TIDY_PRINT_DIFF=1 ./x test tidy \
--set build.locked-deps=true \
--extra-checks auto:py,auto:cpp,auto:js
if [ $? -ne 0 ]; then
echo "You may use \`git push --no-verify\` to skip this check."
exit 1

View file

@ -12,7 +12,7 @@ path = "lib.rs"
arrayvec = { version = "0.7", default-features = false }
askama = { version = "0.14", default-features = false, features = ["alloc", "config", "derive"] }
base64 = "0.21.7"
indexmap = "2"
indexmap = { version = "2", features = ["serde"] }
itertools = "0.12"
minifier = { version = "0.3.5", default-features = false }
pulldown-cmark-escape = { version = "0.11.0", features = ["simd"] }
@ -21,7 +21,7 @@ rustdoc-json-types = { path = "../rustdoc-json-types" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
smallvec = "1.8.1"
stringdex = { version = "0.0.1-alpha9" }
stringdex = { version = "0.0.1-alpha10" }
tempfile = "3"
threadpool = "1.8.1"
tracing = "0.1"

View file

@ -404,11 +404,15 @@ pub(crate) fn run_tests(
std::mem::drop(temp_dir.take());
times.display_times();
});
}
if nb_errors != 0 {
// We ensure temp dir destructor is called.
std::mem::drop(temp_dir);
} else {
// If the first condition branch exited successfully, `test_main_with_exit_callback` will
// not exit the process. So to prevent displaying the times twice, we put it behind an
// `else` condition.
times.display_times();
}
// We ensure temp dir destructor is called.
std::mem::drop(temp_dir);
if nb_errors != 0 {
std::process::exit(test::ERROR_EXIT_CODE);
}
}

View file

@ -37,10 +37,6 @@ use crate::html::escape::{Escape, EscapeBodyText};
use crate::html::render::Context;
use crate::passes::collect_intra_doc_links::UrlFragment;
pub(crate) fn write_str(s: &mut String, f: fmt::Arguments<'_>) {
s.write_fmt(f).unwrap();
}
pub(crate) fn print_generic_bounds(
bounds: &[clean::GenericBound],
cx: &Context<'_>,

View file

@ -48,6 +48,7 @@ use std::path::PathBuf;
use std::{fs, str};
use askama::Template;
use indexmap::IndexMap;
use itertools::Either;
use rustc_ast::join_path_syms;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
@ -60,8 +61,6 @@ use rustc_middle::ty::print::PrintTraitRefExt;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::symbol::{Symbol, sym};
use rustc_span::{BytePos, DUMMY_SP, FileName, RealFileName};
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use tracing::{debug, info};
pub(crate) use self::context::*;
@ -77,7 +76,6 @@ use crate::html::escape::Escape;
use crate::html::format::{
Ending, HrefError, PrintWithSpace, href, print_abi_with_space, print_constness_with_space,
print_default_space, print_generic_bounds, print_where_clause, visibility_print_with_space,
write_str,
};
use crate::html::markdown::{
HeadingOffset, IdMap, Markdown, MarkdownItemInfo, MarkdownSummaryLine,
@ -1477,12 +1475,10 @@ fn render_assoc_items_inner(
)
}
};
let mut impls_buf = String::new();
for i in &non_trait {
write_str(
&mut impls_buf,
format_args!(
"{}",
let impls_buf = fmt::from_fn(|f| {
non_trait
.iter()
.map(|i| {
render_impl(
cx,
i,
@ -1498,9 +1494,11 @@ fn render_assoc_items_inner(
toggle_open_by_default: true,
},
)
),
);
}
})
.joined("", f)
})
.to_string();
if !impls_buf.is_empty() {
write!(
w,
@ -1652,91 +1650,85 @@ fn notable_traits_button(ty: &clean::Type, cx: &Context<'_>) -> Option<impl fmt:
}
fn notable_traits_decl(ty: &clean::Type, cx: &Context<'_>) -> (String, String) {
let mut out = String::new();
let did = ty.def_id(cx.cache()).expect("notable_traits_button already checked this");
let impls = cx.cache().impls.get(&did).expect("notable_traits_button already checked this");
for i in impls {
let impl_ = i.inner_impl();
if impl_.polarity != ty::ImplPolarity::Positive {
continue;
}
if !ty.is_doc_subtype_of(&impl_.for_, cx.cache()) {
// Two different types might have the same did,
// without actually being the same.
continue;
}
if let Some(trait_) = &impl_.trait_ {
let trait_did = trait_.def_id();
if cx.cache().traits.get(&trait_did).is_some_and(|t| t.is_notable_trait(cx.tcx())) {
if out.is_empty() {
write_str(
&mut out,
format_args!(
"<h3>Notable traits for <code>{}</code></h3>\
<pre><code>",
impl_.for_.print(cx)
),
);
let out = fmt::from_fn(|f| {
let mut notable_impls = impls
.iter()
.map(|impl_| impl_.inner_impl())
.filter(|impl_| impl_.polarity == ty::ImplPolarity::Positive)
.filter(|impl_| {
// Two different types might have the same did, without actually being the same.
ty.is_doc_subtype_of(&impl_.for_, cx.cache())
})
.filter_map(|impl_| {
if let Some(trait_) = &impl_.trait_
&& let trait_did = trait_.def_id()
&& let Some(trait_) = cx.cache().traits.get(&trait_did)
&& trait_.is_notable_trait(cx.tcx())
{
Some((impl_, trait_did))
} else {
None
}
})
.peekable();
write_str(
&mut out,
format_args!("<div class=\"where\">{}</div>", impl_.print(false, cx)),
);
for it in &impl_.items {
if let clean::AssocTypeItem(ref tydef, ref _bounds) = it.kind {
let empty_set = FxIndexSet::default();
let src_link = AssocItemLink::GotoSource(trait_did.into(), &empty_set);
write_str(
&mut out,
format_args!(
"<div class=\"where\"> {};</div>",
assoc_type(
it,
&tydef.generics,
&[], // intentionally leaving out bounds
Some(&tydef.type_),
src_link,
0,
cx,
)
),
);
}
}
let has_notable_impl = if let Some((impl_, _)) = notable_impls.peek() {
write!(
f,
"<h3>Notable traits for <code>{}</code></h3>\
<pre><code>",
impl_.for_.print(cx)
)?;
true
} else {
false
};
for (impl_, trait_did) in notable_impls {
write!(f, "<div class=\"where\">{}</div>", impl_.print(false, cx))?;
for it in &impl_.items {
let clean::AssocTypeItem(tydef, ..) = &it.kind else {
continue;
};
let empty_set = FxIndexSet::default();
let src_link = AssocItemLink::GotoSource(trait_did.into(), &empty_set);
write!(
f,
"<div class=\"where\"> {};</div>",
assoc_type(
it,
&tydef.generics,
&[], // intentionally leaving out bounds
Some(&tydef.type_),
src_link,
0,
cx,
)
)?;
}
}
}
if out.is_empty() {
out.push_str("</code></pre>");
}
if !has_notable_impl {
f.write_str("</code></pre>")?;
}
Ok(())
})
.to_string();
(format!("{:#}", ty.print(cx)), out)
}
fn notable_traits_json<'a>(tys: impl Iterator<Item = &'a clean::Type>, cx: &Context<'_>) -> String {
let mut mp: Vec<(String, String)> = tys.map(|ty| notable_traits_decl(ty, cx)).collect();
mp.sort_by(|(name1, _html1), (name2, _html2)| name1.cmp(name2));
struct NotableTraitsMap(Vec<(String, String)>);
impl Serialize for NotableTraitsMap {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.0.len()))?;
for item in &self.0 {
map.serialize_entry(&item.0, &item.1)?;
}
map.end()
}
}
serde_json::to_string(&NotableTraitsMap(mp))
.expect("serialize (string, string) -> json object cannot fail")
let mut mp = tys.map(|ty| notable_traits_decl(ty, cx)).collect::<IndexMap<_, _>>();
mp.sort_unstable_keys();
serde_json::to_string(&mp).expect("serialize (string, string) -> json object cannot fail")
}
#[derive(Clone, Copy, Debug)]
@ -1810,27 +1802,19 @@ fn render_impl(
document_item_info(cx, it, Some(parent))
.render_into(&mut info_buffer)
.unwrap();
write_str(
&mut doc_buffer,
format_args!("{}", document_full(item, cx, HeadingOffset::H5)),
);
doc_buffer = document_full(item, cx, HeadingOffset::H5).to_string();
short_documented = false;
} else {
// In case the item isn't documented,
// provide short documentation from the trait.
write_str(
&mut doc_buffer,
format_args!(
"{}",
document_short(
it,
cx,
link,
parent,
rendering_params.show_def_docs,
)
),
);
doc_buffer = document_short(
it,
cx,
link,
parent,
rendering_params.show_def_docs,
)
.to_string();
}
}
} else {
@ -1838,21 +1822,14 @@ fn render_impl(
.render_into(&mut info_buffer)
.unwrap();
if rendering_params.show_def_docs {
write_str(
&mut doc_buffer,
format_args!("{}", document_full(item, cx, HeadingOffset::H5)),
);
doc_buffer = document_full(item, cx, HeadingOffset::H5).to_string();
short_documented = false;
}
}
} else {
write_str(
&mut doc_buffer,
format_args!(
"{}",
document_short(item, cx, link, parent, rendering_params.show_def_docs)
),
);
doc_buffer =
document_short(item, cx, link, parent, rendering_params.show_def_docs)
.to_string();
}
}
let mut w = if short_documented && trait_.is_some() {

View file

@ -241,6 +241,34 @@ impl SerializedSearchIndex {
self.alias_pointers.push(alias_pointer);
index
}
/// Add potential search result to the database and return the row ID.
///
/// The returned ID can be used to attach more data to the search result.
fn add_entry(&mut self, name: Symbol, entry_data: EntryData, desc: String) -> usize {
let fqp = if let Some(module_path_index) = entry_data.module_path {
let mut fqp = self.path_data[module_path_index].as_ref().unwrap().module_path.clone();
fqp.push(Symbol::intern(&self.names[module_path_index]));
fqp.push(name);
fqp
} else {
vec![name]
};
// If a path with the same name already exists, but no entry does,
// we can fill in the entry without having to allocate a new row ID.
//
// Because paths and entries both share the same index, using the same
// ID saves space by making the tree smaller.
if let Some(&other_path) = self.crate_paths_index.get(&(entry_data.ty, fqp))
&& self.entry_data[other_path].is_none()
&& self.descs[other_path].is_empty()
{
self.entry_data[other_path] = Some(entry_data);
self.descs[other_path] = desc;
other_path
} else {
self.push(name.as_str().to_string(), None, Some(entry_data), desc, None, None, None)
}
}
fn push_path(&mut self, name: String, path_data: PathData) -> usize {
self.push(name, Some(path_data), None, String::new(), None, None, None)
}
@ -1516,10 +1544,9 @@ pub(crate) fn build_index(
.as_ref()
.map(|path| serialized_index.get_id_by_module_path(path));
let new_entry_id = serialized_index.push(
item.name.as_str().to_string(),
None,
Some(EntryData {
let new_entry_id = serialized_index.add_entry(
item.name,
EntryData {
ty: item.ty,
parent: item.parent_idx,
module_path,
@ -1538,11 +1565,8 @@ pub(crate) fn build_index(
None
},
krate: crate_idx,
}),
},
item.desc.to_string(),
None, // filled in after all the types have been indexed
None,
None,
);
// Aliases

View file

@ -1108,22 +1108,39 @@ function loadDatabase(hooks) {
const id2 = id1 + ((nodeid[4] << 8) | nodeid[5]);
leaves = RoaringBitmap.makeSingleton(id1)
.union(RoaringBitmap.makeSingleton(id2));
} else if (!isWhole && (nodeid[0] & 0xf0) === 0x80) {
const id1 = ((nodeid[0] & 0x0f) << 16) | (nodeid[1] << 8) | nodeid[2];
const id2 = id1 + ((nodeid[3] << 4) | ((nodeid[4] >> 4) & 0x0f));
const id3 = id2 + (((nodeid[4] & 0x0f) << 8) | nodeid[5]);
leaves = RoaringBitmap.makeSingleton(id1)
.union(RoaringBitmap.makeSingleton(id2))
.union(RoaringBitmap.makeSingleton(id3));
} else {
leaves = RoaringBitmap.makeSingleton(
(nodeid[2] << 24) | (nodeid[3] << 16) |
(nodeid[4] << 8) | nodeid[5],
);
}
const data = (nodeid[0] & 0x20) !== 0 ?
Uint8Array.of(((nodeid[0] & 0x0f) << 4) | (nodeid[1] >> 4)) :
EMPTY_UINT8;
newPromise = Promise.resolve(new PrefixSearchTree(
EMPTY_SEARCH_TREE_BRANCHES,
EMPTY_SEARCH_TREE_BRANCHES,
data,
isWhole ? leaves : EMPTY_BITMAP,
isWhole ? EMPTY_BITMAP : leaves,
));
if (isWhole) {
const data = (nodeid[0] & 0x20) !== 0 ?
Uint8Array.of(((nodeid[0] & 0x0f) << 4) | (nodeid[1] >> 4)) :
EMPTY_UINT8;
newPromise = Promise.resolve(new PrefixSearchTree(
EMPTY_SEARCH_TREE_BRANCHES,
EMPTY_SEARCH_TREE_BRANCHES,
data,
leaves,
EMPTY_BITMAP,
));
} else {
const data = (nodeid[0] & 0xf0) === 0x80 ? 0 : (
((nodeid[0] & 0x0f) << 4) | (nodeid[1] >> 4));
newPromise = Promise.resolve(new SuffixSearchTree(
EMPTY_SEARCH_TREE_BRANCHES,
data,
leaves,
));
}
} else {
const hashHex = makeHexFromUint8Array(nodeid);
newPromise = new Promise((resolve, reject) => {
@ -2748,6 +2765,7 @@ function loadDatabase(hooks) {
// because that's the canonical, hashed version of the data
let compression_tag = input[i];
const is_pure_suffixes_only_node = (compression_tag & 0x01) !== 0;
let no_leaves_flag;
if (compression_tag > 1) {
// compressed node
const is_long_compressed = (compression_tag & 0x04) !== 0;
@ -2759,7 +2777,8 @@ function loadDatabase(hooks) {
compression_tag |= input[i] << 16;
i += 1;
}
let dlen = input[i];
let dlen = input[i] & 0x7F;
no_leaves_flag = input[i] & 0x80;
i += 1;
if (is_data_compressed) {
data = data_history[data_history.length - dlen - 1];
@ -2786,10 +2805,15 @@ function loadDatabase(hooks) {
let whole;
let suffix;
if (is_pure_suffixes_only_node) {
suffix = input[i] === 0 ?
EMPTY_BITMAP1 :
new RoaringBitmap(input, i);
i += suffix.consumed_len_bytes;
if (no_leaves_flag) {
whole = EMPTY_BITMAP;
suffix = EMPTY_BITMAP;
} else {
suffix = input[i] === 0 ?
EMPTY_BITMAP1 :
new RoaringBitmap(input, i);
i += suffix.consumed_len_bytes;
}
tree = new SuffixSearchTree(
branches,
dlen,
@ -2807,7 +2831,7 @@ function loadDatabase(hooks) {
let ci = 0;
canonical[ci] = 1;
ci += 1;
canonical[ci] = dlen;
canonical[ci] = dlen | no_leaves_flag;
ci += 1;
canonical[ci] = input[coffset]; // suffix child count
ci += 1;
@ -2821,10 +2845,9 @@ function loadDatabase(hooks) {
}
siphashOfBytes(canonical.subarray(0, clen), 0, 0, 0, 0, hash);
} else {
if (input[i] === 0xff) {
if (no_leaves_flag) {
whole = EMPTY_BITMAP;
suffix = EMPTY_BITMAP1;
i += 1;
suffix = EMPTY_BITMAP;
} else {
whole = input[i] === 0 ?
EMPTY_BITMAP1 :
@ -2856,7 +2879,7 @@ function loadDatabase(hooks) {
let ci = 0;
canonical[ci] = 0;
ci += 1;
canonical[ci] = dlen;
canonical[ci] = dlen | no_leaves_flag;
ci += 1;
canonical.set(data, ci);
ci += data.length;
@ -2880,9 +2903,11 @@ function loadDatabase(hooks) {
}
hash[2] &= 0x7f;
} else {
i += 1;
// uncompressed node
const dlen = input [i + 1];
i += 2;
const dlen = input[i] & 0x7F;
no_leaves_flag = input[i] & 0x80;
i += 1;
if (dlen === 0 || is_pure_suffixes_only_node) {
data = EMPTY_UINT8;
} else {
@ -2897,16 +2922,15 @@ function loadDatabase(hooks) {
i += branches_consumed_len_bytes;
let whole;
let suffix;
if (is_pure_suffixes_only_node) {
if (no_leaves_flag) {
whole = EMPTY_BITMAP;
suffix = EMPTY_BITMAP;
} else if (is_pure_suffixes_only_node) {
whole = EMPTY_BITMAP;
suffix = input[i] === 0 ?
EMPTY_BITMAP1 :
new RoaringBitmap(input, i);
i += suffix.consumed_len_bytes;
} else if (input[i] === 0xff) {
whole = EMPTY_BITMAP;
suffix = EMPTY_BITMAP;
i += 1;
} else {
whole = input[i] === 0 ?
EMPTY_BITMAP1 :

1022
src/stage0

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more