Merge from rustc

This commit is contained in:
The Miri Cronjob Bot 2025-06-03 05:01:53 +00:00
commit 08cad5a609
231 changed files with 4340 additions and 2057 deletions

View file

@ -653,7 +653,7 @@ Torsten Weber <TorstenWeber12@gmail.com> <torstenweber12@gmail.com>
Trevor Gross <tmgross@umich.edu> <t.gross35@gmail.com>
Trevor Gross <tmgross@umich.edu> <tgross@intrepidcs.com>
Trevor Spiteri <tspiteri@ieee.org> <trevor.spiteri@um.edu.mt>
Tshepang Mbambo <tshepang@gmail.com>
Tshepang Mbambo <hopsi@tuta.io> <tshepang@gmail.com>
Ty Overby <ty@pre-alpha.com>
Tyler Mandry <tmandry@gmail.com> <tmandry@google.com>
Tyler Ruckinger <t.ruckinger@gmail.com>

View file

@ -177,56 +177,26 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "askama"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d4744ed2eef2645831b441d8f5459689ade2ab27c854488fbab1fbe94fce1a7"
dependencies = [
"askama_derive 0.13.1",
"itoa",
"percent-encoding",
"serde",
"serde_json",
]
[[package]]
name = "askama"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f75363874b771be265f4ffe307ca705ef6f3baa19011c149da8674a87f1b75c4"
dependencies = [
"askama_derive 0.14.0",
"askama_derive",
"itoa",
"percent-encoding",
"serde",
"serde_json",
]
[[package]]
name = "askama_derive"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d661e0f57be36a5c14c48f78d09011e67e0cb618f269cca9f2fd8d15b68c46ac"
dependencies = [
"askama_parser 0.13.0",
"basic-toml",
"memchr",
"proc-macro2",
"quote",
"rustc-hash 2.1.1",
"serde",
"serde_derive",
"syn 2.0.101",
]
[[package]]
name = "askama_derive"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "129397200fe83088e8a68407a8e2b1f826cf0086b21ccdb866a722c8bcd3a94f"
dependencies = [
"askama_parser 0.14.0",
"askama_parser",
"basic-toml",
"memchr",
"proc-macro2",
@ -237,18 +207,6 @@ dependencies = [
"syn 2.0.101",
]
[[package]]
name = "askama_parser"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf315ce6524c857bb129ff794935cf6d42c82a6cff60526fe2a63593de4d0d4f"
dependencies = [
"memchr",
"serde",
"serde_derive",
"winnow 0.7.10",
]
[[package]]
name = "askama_parser"
version = "0.14.0"
@ -582,7 +540,7 @@ name = "clippy"
version = "0.1.89"
dependencies = [
"anstream",
"askama 0.13.1",
"askama",
"cargo_metadata 0.18.1",
"clippy_config",
"clippy_lints",
@ -1432,7 +1390,7 @@ name = "generate-copyright"
version = "0.1.0"
dependencies = [
"anyhow",
"askama 0.14.0",
"askama",
"cargo_metadata 0.18.1",
"serde",
"serde_json",
@ -4676,7 +4634,7 @@ name = "rustdoc"
version = "0.0.0"
dependencies = [
"arrayvec",
"askama 0.14.0",
"askama",
"base64",
"expect-test",
"indexmap",

View file

@ -1,3 +1,4 @@
use std::cell::OnceCell;
use std::collections::VecDeque;
use std::rc::Rc;
@ -197,8 +198,8 @@ pub struct RegionInferenceContext<'tcx> {
/// Reverse of the SCC constraint graph -- i.e., an edge `A -> B` exists if
/// `B: A`. This is used to compute the universal regions that are required
/// to outlive a given SCC. Computed lazily.
rev_scc_graph: Option<ReverseSccGraph>,
/// to outlive a given SCC.
rev_scc_graph: OnceCell<ReverseSccGraph>,
/// The "R0 member of [R1..Rn]" constraints, indexed by SCC.
member_constraints: Rc<MemberConstraintSet<'tcx, ConstraintSccIndex>>,
@ -502,7 +503,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
constraint_graph,
constraint_sccs,
scc_annotations,
rev_scc_graph: None,
rev_scc_graph: OnceCell::new(),
member_constraints,
member_constraints_applied: Vec::new(),
universe_causes,
@ -809,9 +810,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
member_constraint_index: NllMemberConstraintIndex,
choice_regions: &[ty::RegionVid],
) {
// Lazily compute the reverse graph, we'll need it later.
self.compute_reverse_scc_graph();
// Create a mutable vector of the options. We'll try to winnow
// them down.
let mut choice_regions: Vec<ty::RegionVid> = choice_regions.to_vec();
@ -849,7 +847,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// R0`). Therefore, we need only keep an option `O` if `UB: O`
// for all UB.
let universal_region_relations = &self.universal_region_relations;
for ub in self.rev_scc_graph.as_ref().unwrap().upper_bounds(scc) {
for ub in self.reverse_scc_graph().upper_bounds(scc) {
debug!(?ub);
choice_regions.retain(|&o_r| universal_region_relations.outlives(ub, o_r));
}

View file

@ -215,9 +215,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// FIXME: We could probably compute the LUB if there is one.
let scc = self.constraint_sccs.scc(vid);
let upper_bounds: Vec<_> = self
.rev_scc_graph
.as_ref()
.unwrap()
.reverse_scc_graph()
.upper_bounds(scc)
.filter_map(|vid| self.definitions[vid].external_name)
.filter(|r| !r.is_static())

View file

@ -59,13 +59,10 @@ impl ReverseSccGraph {
}
impl RegionInferenceContext<'_> {
/// Compute the reverse SCC-based constraint graph (lazily).
pub(super) fn compute_reverse_scc_graph(&mut self) {
if self.rev_scc_graph.is_some() {
return;
}
self.rev_scc_graph =
Some(ReverseSccGraph::compute(&self.constraint_sccs, self.universal_regions()));
/// Return the reverse graph of the region SCCs, initialising it if needed.
pub(super) fn reverse_scc_graph(&self) -> &ReverseSccGraph {
self.rev_scc_graph.get_or_init(|| {
ReverseSccGraph::compute(&self.constraint_sccs, self.universal_regions())
})
}
}

View file

@ -23,7 +23,7 @@ use rustc_codegen_ssa::traits::{
use rustc_middle::bug;
#[cfg(feature = "master")]
use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf};
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::{self, Instance, Ty};
use rustc_span::{Span, Symbol, sym};
use rustc_target::callconv::{ArgAbi, PassMode};
@ -205,21 +205,10 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
span: Span,
) -> Result<(), Instance<'tcx>> {
let tcx = self.tcx;
let callee_ty = instance.ty(tcx, self.typing_env());
let (def_id, fn_args) = match *callee_ty.kind() {
ty::FnDef(def_id, fn_args) => (def_id, fn_args),
_ => bug!("expected fn item type, found {}", callee_ty),
};
let sig = callee_ty.fn_sig(tcx);
let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig);
let arg_tys = sig.inputs();
let ret_ty = sig.output();
let name = tcx.item_name(def_id);
let name = tcx.item_name(instance.def_id());
let name_str = name.as_str();
let llret_ty = self.layout_of(ret_ty).gcc_type(self);
let fn_args = instance.args;
let simple = get_simple_intrinsic(self, name);
let simple_func = get_simple_function(self, name);
@ -320,8 +309,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
| sym::rotate_right
| sym::saturating_add
| sym::saturating_sub => {
let ty = arg_tys[0];
match int_type_width_signed(ty, self) {
match int_type_width_signed(args[0].layout.ty, self) {
Some((width, signed)) => match name {
sym::ctlz | sym::cttz => {
let func = self.current_func.borrow().expect("func");
@ -400,7 +388,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
name,
ty,
ty: args[0].layout.ty,
});
return Ok(());
}
@ -492,7 +480,14 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
}
_ if name_str.starts_with("simd_") => {
match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
match generic_simd_intrinsic(
self,
name,
args,
result.layout.ty,
result.layout.gcc_type(self),
span,
) {
Ok(value) => value,
Err(()) => return Ok(()),
}
@ -503,13 +498,10 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
};
if result.layout.ty.is_bool() {
OperandRef::from_immediate_or_packed_pair(self, value, result.layout)
.val
.store(self, result);
let val = self.from_immediate(value);
self.store_to_place(val, result.val);
} else if !result.layout.ty.is_unit() {
let ptr_llty = self.type_ptr_to(result.layout.gcc_type(self));
let ptr = self.pointercast(result.val.llval, ptr_llty);
self.store(value, ptr, result.val.align);
self.store_to_place(value, result.val);
}
Ok(())
}

View file

@ -28,7 +28,6 @@ use crate::context::CodegenCx;
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
bx: &mut Builder<'a, 'gcc, 'tcx>,
name: Symbol,
callee_ty: Ty<'tcx>,
args: &[OperandRef<'tcx, RValue<'gcc>>],
ret_ty: Ty<'tcx>,
llret_ty: Type<'gcc>,
@ -54,24 +53,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
};
}
let tcx = bx.tcx();
let sig = tcx.normalize_erasing_late_bound_regions(
ty::TypingEnv::fully_monomorphized(),
callee_ty.fn_sig(tcx),
);
let arg_tys = sig.inputs();
if name == sym::simd_select_bitmask {
require_simd!(
arg_tys[1],
InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
args[1].layout.ty,
InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty }
);
let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let expected_int_bits = (len.max(8) - 1).next_power_of_two();
let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
let mask_ty = arg_tys[0];
let mask_ty = args[0].layout.ty;
let mut mask = match *mask_ty.kind() {
ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
@ -121,8 +113,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
}
// every intrinsic below takes a SIMD vector as its first argument
require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] });
let in_ty = arg_tys[0];
require_simd!(
args[0].layout.ty,
InvalidMonomorphization::SimdInput { span, name, ty: args[0].layout.ty }
);
let in_ty = args[0].layout.ty;
let comparison = match name {
sym::simd_eq => Some(BinOp::Eq),
@ -134,7 +129,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
_ => None,
};
let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
let (in_len, in_elem) = args[0].layout.ty.simd_size_and_type(bx.tcx());
if let Some(cmp_op) = comparison {
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
@ -401,13 +396,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
#[cfg(feature = "master")]
if name == sym::simd_insert || name == sym::simd_insert_dyn {
require!(
in_elem == arg_tys[2],
in_elem == args[2].layout.ty,
InvalidMonomorphization::InsertedType {
span,
name,
in_elem,
in_ty,
out_ty: arg_tys[2]
out_ty: args[2].layout.ty
}
);
@ -439,10 +434,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let m_elem_ty = in_elem;
let m_len = in_len;
require_simd!(
arg_tys[1],
InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
args[1].layout.ty,
InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty }
);
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (v_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
require!(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
@ -911,18 +906,18 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// All types must be simd vector types
require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
require_simd!(
arg_tys[1],
InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
args[1].layout.ty,
InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty }
);
require_simd!(
arg_tys[2],
InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
args[2].layout.ty,
InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty }
);
require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
// Of the same length:
let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
let (out_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (out_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::SecondArgumentLength {
@ -930,7 +925,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[1],
arg_ty: args[1].layout.ty,
out_len
}
);
@ -941,7 +936,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[2],
arg_ty: args[2].layout.ty,
out_len: out_len2
}
);
@ -970,8 +965,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx());
let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
ty::RawPtr(p_ty, _) if p_ty == in_elem => {
(ptr_count(element_ty1), non_ptr(element_ty1))
@ -983,7 +978,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
span,
name,
expected_element: element_ty1,
second_arg: arg_tys[1],
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Not,
@ -998,7 +993,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// The element type of the third argument must be an integer type of any width:
// TODO: also support unsigned integers.
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx());
match *element_ty2.kind() {
ty::Int(_) => (),
_ => {
@ -1030,17 +1025,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// All types must be simd vector types
require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
require_simd!(
arg_tys[1],
InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
args[1].layout.ty,
InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty }
);
require_simd!(
arg_tys[2],
InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
args[2].layout.ty,
InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty }
);
// Of the same length:
let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
let (element_len1, _) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (element_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx());
require!(
in_len == element_len1,
InvalidMonomorphization::SecondArgumentLength {
@ -1048,7 +1043,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[1],
arg_ty: args[1].layout.ty,
out_len: element_len1
}
);
@ -1059,7 +1054,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[2],
arg_ty: args[2].layout.ty,
out_len: element_len2
}
);
@ -1082,9 +1077,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx());
let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx());
let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
ty::RawPtr(p_ty, mutbl) if p_ty == in_elem && mutbl == hir::Mutability::Mut => {
(ptr_count(element_ty1), non_ptr(element_ty1))
@ -1096,7 +1091,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
span,
name,
expected_element: element_ty1,
second_arg: arg_tys[1],
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Mut,
@ -1194,8 +1189,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
return_error!(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
vector_type: arg_tys[0],
expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
vector_type: args[0].layout.ty,
});
}
};

View file

@ -169,19 +169,9 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
span: Span,
) -> Result<(), ty::Instance<'tcx>> {
let tcx = self.tcx;
let callee_ty = instance.ty(tcx, self.typing_env());
let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
bug!("expected fn item type, found {}", callee_ty);
};
let sig = callee_ty.fn_sig(tcx);
let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig);
let arg_tys = sig.inputs();
let ret_ty = sig.output();
let name = tcx.item_name(def_id);
let llret_ty = self.layout_of(ret_ty).llvm_type(self);
let name = tcx.item_name(instance.def_id());
let fn_args = instance.args;
let simple = get_simple_intrinsic(self, name);
let llval = match name {
@ -265,22 +255,22 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
BackendRepr::Scalar(scalar) => {
match scalar.primitive() {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
if self.cx().size_of(result.layout.ty).bytes() < 4 {
// `va_arg` should not be called on an integer type
// less than 4 bytes in length. If it is, promote
// the integer to an `i32` and truncate the result
// back to the smaller type.
let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
self.trunc(promoted_result, llret_ty)
self.trunc(promoted_result, result.layout.llvm_type(self))
} else {
emit_va_arg(self, args[0], ret_ty)
emit_va_arg(self, args[0], result.layout.ty)
}
}
Primitive::Float(Float::F16) => {
bug!("the va_arg intrinsic does not work with `f16`")
}
Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
emit_va_arg(self, args[0], ret_ty)
emit_va_arg(self, args[0], result.layout.ty)
}
// `va_arg` should never be used with the return type f32.
Primitive::Float(Float::F32) => {
@ -384,7 +374,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
| sym::rotate_right
| sym::saturating_add
| sym::saturating_sub => {
let ty = arg_tys[0];
let ty = args[0].layout.ty;
if !ty.is_integral() {
tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
@ -403,26 +393,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
&[args[0].immediate(), y],
);
self.intcast(ret, llret_ty, false)
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::ctlz_nonzero => {
let y = self.const_bool(true);
let llvm_name = &format!("llvm.ctlz.i{width}");
let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
self.intcast(ret, llret_ty, false)
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::cttz_nonzero => {
let y = self.const_bool(true);
let llvm_name = &format!("llvm.cttz.i{width}");
let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
self.intcast(ret, llret_ty, false)
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::ctpop => {
let ret = self.call_intrinsic(
&format!("llvm.ctpop.i{width}"),
&[args[0].immediate()],
);
self.intcast(ret, llret_ty, false)
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::bswap => {
if width == 8 {
@ -554,16 +544,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
// Unpack non-power-of-2 #[repr(packed, simd)] arguments.
// This gives them the expected layout of a regular #[repr(simd)] vector.
let mut loaded_args = Vec::new();
for (ty, arg) in arg_tys.iter().zip(args) {
for arg in args {
loaded_args.push(
// #[repr(packed, simd)] vectors are passed like arrays (as references,
// with reduced alignment and no padding) rather than as immediates.
// We can use a vector load to fix the layout and turn the argument
// into an immediate.
if ty.is_simd()
if arg.layout.ty.is_simd()
&& let OperandValue::Ref(place) = arg.val
{
let (size, elem_ty) = ty.simd_size_and_type(self.tcx());
let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {
ty::Float(f) => self.type_float_from_ty(*f),
ty::Int(i) => self.type_int_from_ty(*i),
@ -580,10 +570,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
);
}
let llret_ty = if ret_ty.is_simd()
&& let BackendRepr::Memory { .. } = self.layout_of(ret_ty).layout.backend_repr
let llret_ty = if result.layout.ty.is_simd()
&& let BackendRepr::Memory { .. } = result.layout.backend_repr
{
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
let elem_ll_ty = match elem_ty.kind() {
ty::Float(f) => self.type_float_from_ty(*f),
ty::Int(i) => self.type_int_from_ty(*i),
@ -593,16 +583,15 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
};
self.type_vector(elem_ll_ty, size)
} else {
llret_ty
result.layout.llvm_type(self)
};
match generic_simd_intrinsic(
self,
name,
callee_ty,
fn_args,
&loaded_args,
ret_ty,
result.layout.ty,
llret_ty,
span,
) {
@ -621,9 +610,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
};
if result.layout.ty.is_bool() {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val
.store(self, result);
let val = self.from_immediate(llval);
self.store_to_place(val, result.val);
} else if !result.layout.ty.is_unit() {
self.store_to_place(llval, result.val);
}
@ -1151,7 +1139,6 @@ fn get_rust_try_fn<'a, 'll, 'tcx>(
fn generic_simd_intrinsic<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
name: Symbol,
callee_ty: Ty<'tcx>,
fn_args: GenericArgsRef<'tcx>,
args: &[OperandRef<'tcx, &'ll Value>],
ret_ty: Ty<'tcx>,
@ -1222,26 +1209,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
}
let tcx = bx.tcx();
let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx));
let arg_tys = sig.inputs();
// Sanity-check: all vector arguments must be immediates.
if cfg!(debug_assertions) {
for (ty, arg) in arg_tys.iter().zip(args) {
if ty.is_simd() {
for arg in args {
if arg.layout.ty.is_simd() {
assert_matches!(arg.val, OperandValue::Immediate(_));
}
}
}
if name == sym::simd_select_bitmask {
let (len, _) = require_simd!(arg_tys[1], SimdArgument);
let (len, _) = require_simd!(args[1].layout.ty, SimdArgument);
let expected_int_bits = len.max(8).next_power_of_two();
let expected_bytes = len.div_ceil(8);
let mask_ty = arg_tys[0];
let mask_ty = args[0].layout.ty;
let mask = match mask_ty.kind() {
ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
@ -1275,8 +1258,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
// every intrinsic below takes a SIMD vector as its first argument
let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput);
let in_ty = arg_tys[0];
let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
let in_ty = args[0].layout.ty;
let comparison = match name {
sym::simd_eq => Some(BinOp::Eq),
@ -1407,13 +1390,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_insert || name == sym::simd_insert_dyn {
require!(
in_elem == arg_tys[2],
in_elem == args[2].layout.ty,
InvalidMonomorphization::InsertedType {
span,
name,
in_elem,
in_ty,
out_ty: arg_tys[2]
out_ty: args[2].layout.ty
}
);
@ -1464,7 +1447,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
let (v_len, _) = require_simd!(arg_tys[1], SimdArgument);
let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
require!(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
@ -1665,9 +1648,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
// The element type of the third argument must be a signed integer type of any width:
let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
require_simd!(ret_ty, SimdReturn);
// Of the same length:
@ -1678,7 +1661,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[1],
arg_ty: args[1].layout.ty,
out_len
}
);
@ -1689,7 +1672,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[2],
arg_ty: args[2].layout.ty,
out_len: out_len2
}
);
@ -1709,7 +1692,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
span,
name,
expected_element: element_ty1,
second_arg: arg_tys[1],
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Not,
@ -1770,10 +1753,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let (mask_len, mask_elem) = (in_len, in_elem);
// The second argument must be a pointer matching the element type
let pointer_ty = arg_tys[1];
let pointer_ty = args[1].layout.ty;
// The last argument is a passthrough vector providing values for disabled lanes
let values_ty = arg_tys[2];
let values_ty = args[2].layout.ty;
let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
require_simd!(ret_ty, SimdReturn);
@ -1861,10 +1844,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let (mask_len, mask_elem) = (in_len, in_elem);
// The second argument must be a pointer matching the element type
let pointer_ty = arg_tys[1];
let pointer_ty = args[1].layout.ty;
// The last argument specifies the values to store to memory
let values_ty = arg_tys[2];
let values_ty = args[2].layout.ty;
let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
// Of the same length:
@ -1944,8 +1927,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
// Of the same length:
require!(
@ -1955,7 +1938,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[1],
arg_ty: args[1].layout.ty,
out_len: element_len1
}
);
@ -1966,7 +1949,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
name,
in_len,
in_ty,
arg_ty: arg_tys[2],
arg_ty: args[2].layout.ty,
out_len: element_len2
}
);
@ -1981,7 +1964,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
span,
name,
expected_element: element_ty1,
second_arg: arg_tys[1],
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Mut,
@ -2503,7 +2486,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ptrs = args[0].immediate();
// The second argument must be a ptr-sized integer.
// (We don't care about the signedness, this is wrapping anyway.)
let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
span_bug!(
span,
@ -2527,8 +2510,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
return_error!(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
vector_type: arg_tys[0]
expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
vector_type: args[0].layout.ty
});
}
};

View file

@ -40,6 +40,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
align: Align,
slot_size: Align,
allow_higher_align: bool,
force_right_adjust: bool,
) -> (&'ll Value, Align) {
let va_list_ty = bx.type_ptr();
let va_list_addr = list.immediate();
@ -57,7 +58,10 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
let next = bx.inbounds_ptradd(addr, full_direct_size);
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
if size.bytes() < slot_size.bytes()
&& bx.tcx().sess.target.endian == Endian::Big
&& force_right_adjust
{
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
(adjusted, addr_align)
@ -81,6 +85,11 @@ enum AllowHigherAlign {
Yes,
}
enum ForceRightAdjust {
No,
Yes,
}
fn emit_ptr_va_arg<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
@ -88,9 +97,11 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
pass_mode: PassMode,
slot_size: SlotSize,
allow_higher_align: AllowHigherAlign,
force_right_adjust: ForceRightAdjust,
) -> &'ll Value {
let indirect = matches!(pass_mode, PassMode::Indirect);
let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes);
let force_right_adjust = matches!(force_right_adjust, ForceRightAdjust::Yes);
let slot_size = Align::from_bytes(slot_size as u64).unwrap();
let layout = bx.cx.layout_of(target_ty);
@ -103,8 +114,15 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
} else {
(layout.llvm_type(bx.cx), layout.size, layout.align)
};
let (addr, addr_align) =
emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
let (addr, addr_align) = emit_direct_ptr_va_arg(
bx,
list,
size,
align.abi,
slot_size,
allow_higher_align,
force_right_adjust,
);
if indirect {
let tmp_ret = bx.load(llty, addr, addr_align);
bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
@ -208,6 +226,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
PassMode::Direct,
SlotSize::Bytes8,
AllowHigherAlign::Yes,
ForceRightAdjust::No,
);
bx.br(end);
@ -218,6 +237,150 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
val
}
fn emit_powerpc_va_arg<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
target_ty: Ty<'tcx>,
) -> &'ll Value {
let dl = bx.cx.data_layout();
// struct __va_list_tag {
// unsigned char gpr;
// unsigned char fpr;
// unsigned short reserved;
// void *overflow_arg_area;
// void *reg_save_area;
// };
let va_list_addr = list.immediate();
// Peel off any newtype wrappers.
let layout = {
let mut layout = bx.cx.layout_of(target_ty);
while let Some((_, inner)) = layout.non_1zst_field(bx.cx) {
layout = inner;
}
layout
};
// Rust does not currently support any powerpc softfloat targets.
let target = &bx.cx.tcx.sess.target;
let is_soft_float_abi = target.abi == "softfloat";
assert!(!is_soft_float_abi);
// All instances of VaArgSafe are passed directly.
let is_indirect = false;
let (is_i64, is_int, is_f64) = match layout.layout.backend_repr() {
BackendRepr::Scalar(scalar) => match scalar.primitive() {
rustc_abi::Primitive::Int(integer, _) => (integer.size().bits() == 64, true, false),
rustc_abi::Primitive::Float(float) => (false, false, float.size().bits() == 64),
rustc_abi::Primitive::Pointer(_) => (false, true, false),
},
_ => unreachable!("all instances of VaArgSafe are represented as scalars"),
};
let num_regs_addr = if is_int || is_soft_float_abi {
va_list_addr // gpr
} else {
bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr
};
let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi);
// "Align" the register count when the type is passed as `i64`.
if is_i64 || (is_f64 && is_soft_float_abi) {
num_regs = bx.add(num_regs, bx.const_u8(1));
num_regs = bx.and(num_regs, bx.const_u8(0b1111_1110));
}
let max_regs = 8u8;
let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs));
let in_reg = bx.append_sibling_block("va_arg.in_reg");
let in_mem = bx.append_sibling_block("va_arg.in_mem");
let end = bx.append_sibling_block("va_arg.end");
bx.cond_br(use_regs, in_reg, in_mem);
let reg_addr = {
bx.switch_to_block(in_reg);
let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4));
let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi);
// Floating-point registers start after the general-purpose registers.
if !is_int && !is_soft_float_abi {
reg_addr = bx.inbounds_ptradd(reg_addr, bx.cx.const_usize(32))
}
// Get the address of the saved value by scaling the number of
// registers we've used by the number of.
let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 };
let reg_offset = bx.mul(num_regs, bx.cx().const_u8(reg_size));
let reg_addr = bx.inbounds_ptradd(reg_addr, reg_offset);
// Increase the used-register count.
let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 };
let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr));
bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi);
bx.br(end);
reg_addr
};
let mem_addr = {
bx.switch_to_block(in_mem);
bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi);
// Everything in the overflow area is rounded up to a size of at least 4.
let overflow_area_align = Align::from_bytes(4).unwrap();
let size = if !is_indirect {
layout.layout.size.align_to(overflow_area_align)
} else {
dl.pointer_size
};
let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2));
let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi);
// Round up address of argument to alignment
if layout.layout.align.abi > overflow_area_align {
overflow_area = round_pointer_up_to_alignment(
bx,
overflow_area,
layout.layout.align.abi,
bx.type_ptr(),
);
}
let mem_addr = overflow_area;
// Increase the overflow area.
overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes()));
bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi);
bx.br(end);
mem_addr
};
// Return the appropriate result.
bx.switch_to_block(end);
let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
let val_type = layout.llvm_type(bx);
let val_addr = if is_indirect {
bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi)
} else {
val_addr
};
bx.load(val_type, val_addr, layout.align.abi)
}
fn emit_s390x_va_arg<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
@ -728,6 +891,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
PassMode::Direct,
SlotSize::Bytes4,
if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
ForceRightAdjust::No,
),
"aarch64" | "arm64ec" if target.is_like_windows || target.is_like_darwin => {
emit_ptr_va_arg(
@ -737,10 +901,24 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
PassMode::Direct,
SlotSize::Bytes8,
if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
ForceRightAdjust::No,
)
}
"aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
"s390x" => emit_s390x_va_arg(bx, addr, target_ty),
"powerpc" => emit_powerpc_va_arg(bx, addr, target_ty),
"powerpc64" | "powerpc64le" => emit_ptr_va_arg(
bx,
addr,
target_ty,
PassMode::Direct,
SlotSize::Bytes8,
AllowHigherAlign::Yes,
match &*target.arch {
"powerpc64" => ForceRightAdjust::Yes,
_ => ForceRightAdjust::No,
},
),
// Windows x86_64
"x86_64" if target.is_like_windows => {
let target_ty_size = bx.cx.size_of(target_ty).bytes();
@ -755,6 +933,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
},
SlotSize::Bytes8,
AllowHigherAlign::No,
ForceRightAdjust::No,
)
}
// This includes `target.is_like_darwin`, which on x86_64 targets is like sysv64.

View file

@ -3,7 +3,7 @@ mod raw_dylib;
use std::collections::BTreeSet;
use std::ffi::OsString;
use std::fs::{File, OpenOptions, read};
use std::io::{BufWriter, Write};
use std::io::{BufReader, BufWriter, Write};
use std::ops::{ControlFlow, Deref};
use std::path::{Path, PathBuf};
use std::process::{Output, Stdio};
@ -184,6 +184,12 @@ pub fn link_binary(
);
}
if sess.target.binary_format == BinaryFormat::Elf {
if let Err(err) = warn_if_linked_with_gold(sess, &out_filename) {
info!(?err, "Error while checking if gold was the linker");
}
}
if output.is_stdout() {
if output.is_tty() {
sess.dcx().emit_err(errors::BinaryOutputToTty {
@ -3375,3 +3381,54 @@ fn add_lld_args(
}
}
}
// gold has been deprecated with binutils 2.44
// and is known to behave incorrectly around Rust programs.
// There have been reports of being unable to bootstrap with gold:
// https://github.com/rust-lang/rust/issues/139425
// Additionally, gold miscompiles SHF_GNU_RETAIN sections, which are
// emitted with `#[used(linker)]`.
fn warn_if_linked_with_gold(sess: &Session, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
use object::read::elf::{FileHeader, SectionHeader};
use object::read::{ReadCache, ReadRef, Result};
use object::{Endianness, elf};
fn elf_has_gold_version_note<'a>(
elf: &impl FileHeader,
data: impl ReadRef<'a>,
) -> Result<bool> {
let endian = elf.endian()?;
let section =
elf.sections(endian, data)?.section_by_name(endian, b".note.gnu.gold-version");
if let Some((_, section)) = section {
if let Some(mut notes) = section.notes(endian, data)? {
return Ok(notes.any(|note| {
note.is_ok_and(|note| note.n_type(endian) == elf::NT_GNU_GOLD_VERSION)
}));
}
}
Ok(false)
}
let data = ReadCache::new(BufReader::new(File::open(path)?));
let was_linked_with_gold = if sess.target.pointer_width == 64 {
let elf = elf::FileHeader64::<Endianness>::parse(&data)?;
elf_has_gold_version_note(elf, &data)?
} else if sess.target.pointer_width == 32 {
let elf = elf::FileHeader32::<Endianness>::parse(&data)?;
elf_has_gold_version_note(elf, &data)?
} else {
return Ok(());
};
if was_linked_with_gold {
let mut warn =
sess.dcx().struct_warn("the gold linker is deprecated and has known bugs with Rust");
warn.help("consider using LLD or ld from GNU binutils instead");
warn.emit();
}
Ok(())
}

View file

@ -1,7 +1,7 @@
use rustc_abi::WrappingRange;
use rustc_middle::bug;
use rustc_middle::mir::SourceInfo;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{bug, span_bug};
use rustc_session::config::OptLevel;
use rustc_span::sym;
@ -60,18 +60,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
source_info: SourceInfo,
) -> Result<(), ty::Instance<'tcx>> {
let span = source_info.span;
let callee_ty = instance.ty(bx.tcx(), bx.typing_env());
let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
span_bug!(span, "expected fn item type, found {}", callee_ty);
};
let sig = callee_ty.fn_sig(bx.tcx());
let sig = bx.tcx().normalize_erasing_late_bound_regions(bx.typing_env(), sig);
let arg_tys = sig.inputs();
let ret_ty = sig.output();
let name = bx.tcx().item_name(def_id);
let name = bx.tcx().item_name(instance.def_id());
let name_str = name.as_str();
let fn_args = instance.args;
// If we're swapping something that's *not* an `OperandValue::Ref`,
// then we can do it directly and avoid the alloca.
@ -97,13 +89,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
let ret_llval = |bx: &mut Bx, llval| {
if result.layout.ty.is_bool() {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val
.store(bx, result);
let val = bx.from_immediate(llval);
bx.store_to_place(val, result.val);
} else if !result.layout.ty.is_unit() {
bx.store_to_place(llval, result.val);
}
@ -143,7 +132,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
_ => bug!(),
};
let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable, callee_ty);
let value = meth::VirtualIndex::from_index(idx).get_usize(
bx,
vtable,
instance.ty(bx.tcx(), bx.typing_env()),
);
match name {
// Size is always <= isize::MAX.
sym::vtable_size => {
@ -164,7 +157,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
| sym::type_name
| sym::variant_count => {
let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap();
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
OperandRef::from_const(bx, value, result.layout.ty).immediate_or_packed_pair(bx)
}
sym::arith_offset => {
let ty = fn_args.type_at(0);
@ -248,7 +241,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.or_disjoint(a, b)
}
sym::exact_div => {
let ty = arg_tys[0];
let ty = args[0].layout.ty;
match int_type_width_signed(ty, bx.tcx()) {
Some((_width, signed)) => {
if signed {
@ -268,7 +261,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
match float_type_width(arg_tys[0]) {
match float_type_width(args[0].layout.ty) {
Some(_width) => match name {
sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
@ -281,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
span,
name,
ty: arg_tys[0],
ty: args[0].layout.ty,
});
return Ok(());
}
@ -291,7 +284,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
| sym::fsub_algebraic
| sym::fmul_algebraic
| sym::fdiv_algebraic
| sym::frem_algebraic => match float_type_width(arg_tys[0]) {
| sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
Some(_width) => match name {
sym::fadd_algebraic => {
bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
@ -314,31 +307,32 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
span,
name,
ty: arg_tys[0],
ty: args[0].layout.ty,
});
return Ok(());
}
},
sym::float_to_int_unchecked => {
if float_type_width(arg_tys[0]).is_none() {
if float_type_width(args[0].layout.ty).is_none() {
bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
span,
ty: arg_tys[0],
ty: args[0].layout.ty,
});
return Ok(());
}
let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
span,
ty: ret_ty,
ty: result.layout.ty,
});
return Ok(());
};
if signed {
bx.fptosi(args[0].immediate(), llret_ty)
bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
} else {
bx.fptoui(args[0].immediate(), llret_ty)
bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
}
}

View file

@ -4,7 +4,18 @@
//! green/native threading. This is just a bare-bones enough solution for
//! librustdoc, it is not production quality at all.
cfg_select! {
// cfg(bootstrap)
macro_rules! cfg_select_dispatch {
($($tokens:tt)*) => {
#[cfg(bootstrap)]
cfg_match! { $($tokens)* }
#[cfg(not(bootstrap))]
cfg_select! { $($tokens)* }
};
}
cfg_select_dispatch! {
target_os = "linux" => {
mod linux;
use linux as imp;

View file

@ -10,6 +10,8 @@
#![allow(internal_features)]
#![allow(rustc::default_hash_types)]
#![allow(rustc::potential_query_instability)]
#![cfg_attr(bootstrap, feature(cfg_match))]
#![cfg_attr(not(bootstrap), feature(cfg_select))]
#![deny(unsafe_op_in_unsafe_fn)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)]
@ -19,7 +21,6 @@
#![feature(ascii_char_variants)]
#![feature(assert_matches)]
#![feature(auto_traits)]
#![feature(cfg_select)]
#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
#![feature(extend_one)]

View file

@ -859,8 +859,19 @@ fn get_thread_id() -> u32 {
std::thread::current().id().as_u64().get() as u32
}
// cfg(bootstrap)
macro_rules! cfg_select_dispatch {
($($tokens:tt)*) => {
#[cfg(bootstrap)]
cfg_match! { $($tokens)* }
#[cfg(not(bootstrap))]
cfg_select! { $($tokens)* }
};
}
// Memory reporting
cfg_select! {
cfg_select_dispatch! {
windows => {
pub fn get_resident_set_size() -> Option<usize> {
use windows::{

View file

@ -1012,10 +1012,6 @@ fn run_required_analyses(tcx: TyCtxt<'_>) {
{
tcx.ensure_ok().mir_drops_elaborated_and_const_checked(def_id);
}
});
});
sess.time("coroutine_obligations", || {
tcx.par_hir_body_owners(|def_id| {
if tcx.is_coroutine(def_id.to_def_id()) {
tcx.ensure_ok().mir_coroutine_witnesses(def_id);
let _ = tcx.ensure_ok().check_coroutine_obligations(

View file

@ -715,6 +715,7 @@ fn test_unstable_options_tracking_hash() {
untracked!(no_analysis, true);
untracked!(no_leak_check, true);
untracked!(no_parallel_backend, true);
untracked!(no_steal_thir, true);
untracked!(parse_crate_root_only, true);
// `pre_link_arg` is omitted because it just forwards to `pre_link_args`.
untracked!(pre_link_args, vec![String::from("abc"), String::from("def")]);

View file

@ -21,6 +21,7 @@ use rustc_hir::def_id::{CrateNum, LOCAL_CRATE, LocalDefId, StableCrateId};
use rustc_hir::definitions::Definitions;
use rustc_index::IndexVec;
use rustc_middle::bug;
use rustc_middle::ty::data_structures::IndexSet;
use rustc_middle::ty::{TyCtxt, TyCtxtFeed};
use rustc_proc_macro::bridge::client::ProcMacro;
use rustc_session::config::{
@ -281,7 +282,7 @@ impl CStore {
.filter_map(|(cnum, data)| data.as_deref_mut().map(|data| (cnum, data)))
}
fn push_dependencies_in_postorder(&self, deps: &mut Vec<CrateNum>, cnum: CrateNum) {
fn push_dependencies_in_postorder(&self, deps: &mut IndexSet<CrateNum>, cnum: CrateNum) {
if !deps.contains(&cnum) {
let data = self.get_crate_data(cnum);
for dep in data.dependencies() {
@ -290,12 +291,12 @@ impl CStore {
}
}
deps.push(cnum);
deps.insert(cnum);
}
}
pub(crate) fn crate_dependencies_in_postorder(&self, cnum: CrateNum) -> Vec<CrateNum> {
let mut deps = Vec::new();
pub(crate) fn crate_dependencies_in_postorder(&self, cnum: CrateNum) -> IndexSet<CrateNum> {
let mut deps = IndexSet::default();
if cnum == LOCAL_CRATE {
for (cnum, _) in self.iter_crate_data() {
self.push_dependencies_in_postorder(&mut deps, cnum);
@ -306,10 +307,11 @@ impl CStore {
deps
}
fn crate_dependencies_in_reverse_postorder(&self, cnum: CrateNum) -> Vec<CrateNum> {
let mut deps = self.crate_dependencies_in_postorder(cnum);
deps.reverse();
deps
fn crate_dependencies_in_reverse_postorder(
&self,
cnum: CrateNum,
) -> impl Iterator<Item = CrateNum> {
self.crate_dependencies_in_postorder(cnum).into_iter().rev()
}
pub(crate) fn injected_panic_runtime(&self) -> Option<CrateNum> {

View file

@ -549,8 +549,9 @@ pub(in crate::rmeta) fn provide(providers: &mut Providers) {
has_global_allocator: |tcx, LocalCrate| CStore::from_tcx(tcx).has_global_allocator(),
has_alloc_error_handler: |tcx, LocalCrate| CStore::from_tcx(tcx).has_alloc_error_handler(),
postorder_cnums: |tcx, ()| {
tcx.arena
.alloc_slice(&CStore::from_tcx(tcx).crate_dependencies_in_postorder(LOCAL_CRATE))
tcx.arena.alloc_from_iter(
CStore::from_tcx(tcx).crate_dependencies_in_postorder(LOCAL_CRATE).into_iter(),
)
},
crates: |tcx, ()| {
// The list of loaded crates is now frozen in query cache,

View file

@ -535,7 +535,8 @@ rustc_queries! {
separate_provide_extern
}
/// Fetch the THIR for a given body.
/// Fetch the THIR for a given body. The THIR body gets stolen by unsafety checking unless
/// `-Zno-steal-thir` is on.
query thir_body(key: LocalDefId) -> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed> {
// Perf tests revealed that hashing THIR is inefficient (see #85729).
no_hash

View file

@ -1882,10 +1882,8 @@ impl<'tcx> Ty<'tcx> {
// Needs normalization or revealing to determine, so no is the safe answer.
ty::Alias(..) => false,
ty::Param(..) | ty::Placeholder(..) | ty::Infer(..) | ty::Error(..) => false,
ty::Bound(..) => {
bug!("`is_trivially_pure_clone_copy` applied to unexpected type: {:?}", self);
ty::Param(..) | ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(..) => {
false
}
}
}

View file

@ -201,9 +201,14 @@ impl<'tcx> UnsafetyVisitor<'_, 'tcx> {
/// Handle closures/coroutines/inline-consts, which is unsafecked with their parent body.
fn visit_inner_body(&mut self, def: LocalDefId) {
if let Ok((inner_thir, expr)) = self.tcx.thir_body(def) {
// Runs all other queries that depend on THIR.
// Run all other queries that depend on THIR.
self.tcx.ensure_done().mir_built(def);
let inner_thir = &inner_thir.steal();
let inner_thir = if self.tcx.sess.opts.unstable_opts.no_steal_thir {
&inner_thir.borrow()
} else {
// We don't have other use for the THIR. Steal it to reduce memory usage.
&inner_thir.steal()
};
let hir_context = self.tcx.local_def_id_to_hir_id(def);
let safety_context = mem::replace(&mut self.safety_context, SafetyContext::Safe);
let mut inner_visitor = UnsafetyVisitor {
@ -1157,7 +1162,12 @@ pub(crate) fn check_unsafety(tcx: TyCtxt<'_>, def: LocalDefId) {
let Ok((thir, expr)) = tcx.thir_body(def) else { return };
// Runs all other queries that depend on THIR.
tcx.ensure_done().mir_built(def);
let thir = &thir.steal();
let thir = if tcx.sess.opts.unstable_opts.no_steal_thir {
&thir.borrow()
} else {
// We don't have other use for the THIR. Steal it to reduce memory usage.
&thir.steal()
};
let hir_id = tcx.local_def_id_to_hir_id(def);
let safety_context = tcx.hir_fn_sig_by_hir_id(hir_id).map_or(SafetyContext::Safe, |fn_sig| {

View file

@ -3,8 +3,6 @@ use std::ops::Deref;
use rustc_type_ir::solve::{Certainty, Goal, NoSolution};
use rustc_type_ir::{self as ty, InferCtxtLike, Interner, TypeFoldable};
use crate::solve::HasChanged;
pub trait SolverDelegate: Deref<Target = Self::Infcx> + Sized {
type Infcx: InferCtxtLike<Interner = Self::Interner>;
type Interner: Interner;
@ -23,7 +21,7 @@ pub trait SolverDelegate: Deref<Target = Self::Infcx> + Sized {
&self,
goal: Goal<Self::Interner, <Self::Interner as Interner>::Predicate>,
span: <Self::Interner as Interner>::Span,
) -> Option<HasChanged>;
) -> Option<Certainty>;
fn fresh_var_for_kind_with_span(
&self,

View file

@ -671,10 +671,13 @@ where
// If this loop did not result in any progress, what's our final certainty.
let mut unchanged_certainty = Some(Certainty::Yes);
for (source, goal, stalled_on) in mem::take(&mut self.nested_goals) {
if let Some(has_changed) = self.delegate.compute_goal_fast_path(goal, self.origin_span)
{
if matches!(has_changed, HasChanged::Yes) {
unchanged_certainty = None;
if let Some(certainty) = self.delegate.compute_goal_fast_path(goal, self.origin_span) {
match certainty {
Certainty::Yes => {}
Certainty::Maybe(_) => {
self.nested_goals.push((source, goal, None));
unchanged_certainty = unchanged_certainty.map(|c| c.and(certainty));
}
}
continue;
}

View file

@ -2312,7 +2312,9 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
fn check_macro_use(&self, hir_id: HirId, attr: &Attribute, target: Target) {
let name = attr.name().unwrap();
let Some(name) = attr.name() else {
return;
};
match target {
Target::ExternCrate | Target::Mod => {}
_ => {

View file

@ -426,10 +426,16 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
hir_visit::walk_fn(self, fk, fd, b, id)
}
fn visit_use(&mut self, p: &'v hir::UsePath<'v>, hir_id: HirId) {
fn visit_use(&mut self, p: &'v hir::UsePath<'v>, _hir_id: HirId) {
// This is `visit_use`, but the type is `Path` so record it that way.
self.record("Path", None, p);
hir_visit::walk_use(self, p, hir_id)
// Don't call `hir_visit::walk_use(self, p, hir_id)`: it calls
// `visit_path` up to three times, once for each namespace result in
// `p.res`, by building temporary `Path`s that are not part of the real
// HIR, which causes `p` to be double- or triple-counted. Instead just
// walk the path internals (i.e. the segments) directly.
let hir::Path { span: _, res: _, segments } = *p;
ast_visit::walk_list!(self, visit_path_segment, segments);
}
fn visit_trait_item(&mut self, ti: &'v hir::TraitItem<'v>) {

View file

@ -193,6 +193,16 @@ impl<'a, 'ra, 'tcx> UnusedImportCheckVisitor<'a, 'ra, 'tcx> {
continue;
}
let module = self
.r
.get_nearest_non_block_module(self.r.local_def_id(extern_crate.id).to_def_id());
if module.no_implicit_prelude {
// If the module has `no_implicit_prelude`, then we don't suggest
// replacing the extern crate with a use, as it would not be
// inserted into the prelude. User writes `extern` style deliberately.
continue;
}
let vis_span = extern_crate
.vis_span
.find_ancestor_inside(extern_crate.span)

View file

@ -415,7 +415,7 @@ pub(crate) enum AliasPossibility {
}
#[derive(Copy, Clone, Debug)]
pub(crate) enum PathSource<'a> {
pub(crate) enum PathSource<'a, 'c> {
/// Type paths `Path`.
Type,
/// Trait paths in bounds or impls.
@ -429,7 +429,10 @@ pub(crate) enum PathSource<'a> {
/// Paths in tuple struct patterns `Path(..)`.
TupleStruct(Span, &'a [Span]),
/// `m::A::B` in `<T as m::A>::B::C`.
TraitItem(Namespace),
///
/// Second field holds the "cause" of this one, i.e. the context within
/// which the trait item is resolved. Used for diagnostics.
TraitItem(Namespace, &'c PathSource<'a, 'c>),
/// Paths in delegation item
Delegation,
/// An arg in a `use<'a, N>` precise-capturing bound.
@ -440,7 +443,7 @@ pub(crate) enum PathSource<'a> {
DefineOpaques,
}
impl<'a> PathSource<'a> {
impl<'a> PathSource<'a, '_> {
fn namespace(self) -> Namespace {
match self {
PathSource::Type
@ -452,7 +455,7 @@ impl<'a> PathSource<'a> {
| PathSource::TupleStruct(..)
| PathSource::Delegation
| PathSource::ReturnTypeNotation => ValueNS,
PathSource::TraitItem(ns) => ns,
PathSource::TraitItem(ns, _) => ns,
PathSource::PreciseCapturingArg(ns) => ns,
}
}
@ -480,8 +483,9 @@ impl<'a> PathSource<'a> {
PathSource::Trait(_) => "trait",
PathSource::Pat => "unit struct, unit variant or constant",
PathSource::Struct => "struct, variant or union type",
PathSource::TupleStruct(..) => "tuple struct or tuple variant",
PathSource::TraitItem(ns) => match ns {
PathSource::TraitItem(ValueNS, PathSource::TupleStruct(..))
| PathSource::TupleStruct(..) => "tuple struct or tuple variant",
PathSource::TraitItem(ns, _) => match ns {
TypeNS => "associated type",
ValueNS => "method or associated constant",
MacroNS => bug!("associated macro"),
@ -585,7 +589,7 @@ impl<'a> PathSource<'a> {
) | Res::SelfTyParam { .. }
| Res::SelfTyAlias { .. }
),
PathSource::TraitItem(ns) => match res {
PathSource::TraitItem(ns, _) => match res {
Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) if ns == ValueNS => true,
Res::Def(DefKind::AssocTy, _) if ns == TypeNS => true,
_ => false,
@ -2007,7 +2011,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
&mut self,
partial_res: PartialRes,
path: &[Segment],
source: PathSource<'_>,
source: PathSource<'_, '_>,
path_span: Span,
) {
let proj_start = path.len() - partial_res.unresolved_segments();
@ -4206,7 +4210,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
id: NodeId,
qself: &Option<P<QSelf>>,
path: &Path,
source: PathSource<'ast>,
source: PathSource<'ast, '_>,
) {
self.smart_resolve_path_fragment(
qself,
@ -4223,7 +4227,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
&mut self,
qself: &Option<P<QSelf>>,
path: &[Segment],
source: PathSource<'ast>,
source: PathSource<'ast, '_>,
finalize: Finalize,
record_partial_res: RecordPartialRes,
parent_qself: Option<&QSelf>,
@ -4404,6 +4408,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
path_span,
source.defer_to_typeck(),
finalize,
source,
) {
Ok(Some(partial_res)) if let Some(res) = partial_res.full_res() => {
// if we also have an associated type that matches the ident, stash a suggestion
@ -4526,12 +4531,13 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
span: Span,
defer_to_typeck: bool,
finalize: Finalize,
source: PathSource<'ast, '_>,
) -> Result<Option<PartialRes>, Spanned<ResolutionError<'ra>>> {
let mut fin_res = None;
for (i, &ns) in [primary_ns, TypeNS, ValueNS].iter().enumerate() {
if i == 0 || ns != primary_ns {
match self.resolve_qpath(qself, path, ns, finalize)? {
match self.resolve_qpath(qself, path, ns, finalize, source)? {
Some(partial_res)
if partial_res.unresolved_segments() == 0 || defer_to_typeck =>
{
@ -4568,6 +4574,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
path: &[Segment],
ns: Namespace,
finalize: Finalize,
source: PathSource<'ast, '_>,
) -> Result<Option<PartialRes>, Spanned<ResolutionError<'ra>>> {
debug!(
"resolve_qpath(qself={:?}, path={:?}, ns={:?}, finalize={:?})",
@ -4615,7 +4622,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
let partial_res = self.smart_resolve_path_fragment(
&None,
&path[..=qself.position],
PathSource::TraitItem(ns),
PathSource::TraitItem(ns, &source),
Finalize::with_root_span(finalize.node_id, finalize.path_span, qself.path_span),
RecordPartialRes::No,
Some(&qself),

View file

@ -175,7 +175,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
&mut self,
path: &[Segment],
span: Span,
source: PathSource<'_>,
source: PathSource<'_, '_>,
res: Option<Res>,
) -> BaseError {
// Make the base error.
@ -421,7 +421,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
path: &[Segment],
following_seg: Option<&Segment>,
span: Span,
source: PathSource<'_>,
source: PathSource<'_, '_>,
res: Option<Res>,
qself: Option<&QSelf>,
) -> (Diag<'tcx>, Vec<ImportSuggestion>) {
@ -539,12 +539,12 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
path: &[Segment],
following_seg: Option<&Segment>,
span: Span,
source: PathSource<'_>,
source: PathSource<'_, '_>,
res: Option<Res>,
qself: Option<&QSelf>,
) {
if let Some(Res::Def(DefKind::AssocFn, _)) = res
&& let PathSource::TraitItem(TypeNS) = source
&& let PathSource::TraitItem(TypeNS, _) = source
&& let None = following_seg
&& let Some(qself) = qself
&& let TyKind::Path(None, ty_path) = &qself.ty.kind
@ -650,7 +650,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn try_lookup_name_relaxed(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
path: &[Segment],
following_seg: Option<&Segment>,
span: Span,
@ -940,7 +940,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_trait_and_bounds(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
res: Option<Res>,
span: Span,
base_error: &BaseError,
@ -1017,7 +1017,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_typo(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
path: &[Segment],
following_seg: Option<&Segment>,
span: Span,
@ -1063,7 +1063,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_shadowed(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
path: &[Segment],
following_seg: Option<&Segment>,
span: Span,
@ -1096,7 +1096,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn err_code_special_cases(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
path: &[Segment],
span: Span,
) {
@ -1141,7 +1141,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_self_ty(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
path: &[Segment],
span: Span,
) -> bool {
@ -1164,7 +1164,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_self_value(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
path: &[Segment],
span: Span,
) -> bool {
@ -1332,7 +1332,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_swapping_misplaced_self_ty_and_trait(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
res: Option<Res>,
span: Span,
) {
@ -1361,7 +1361,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
&mut self,
err: &mut Diag<'_>,
res: Option<Res>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
) {
let PathSource::TupleStruct(_, _) = source else { return };
let Some(Res::Def(DefKind::Fn, _)) = res else { return };
@ -1373,7 +1373,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
&mut self,
err: &mut Diag<'_>,
res: Option<Res>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
span: Span,
) {
let PathSource::Trait(_) = source else { return };
@ -1422,7 +1422,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_pattern_match_with_let(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
span: Span,
) -> bool {
if let PathSource::Expr(_) = source
@ -1448,10 +1448,10 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn get_single_associated_item(
&mut self,
path: &[Segment],
source: &PathSource<'_>,
source: &PathSource<'_, '_>,
filter_fn: &impl Fn(Res) -> bool,
) -> Option<TypoSuggestion> {
if let crate::PathSource::TraitItem(_) = source {
if let crate::PathSource::TraitItem(_, _) = source {
let mod_path = &path[..path.len() - 1];
if let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
self.resolve_path(mod_path, None, None)
@ -1556,7 +1556,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
/// Check if the source is call expression and the first argument is `self`. If true,
/// return the span of whole call and the span for all arguments expect the first one (`self`).
fn call_has_self_arg(&self, source: PathSource<'_>) -> Option<(Span, Option<Span>)> {
fn call_has_self_arg(&self, source: PathSource<'_, '_>) -> Option<(Span, Option<Span>)> {
let mut has_self_arg = None;
if let PathSource::Expr(Some(parent)) = source
&& let ExprKind::Call(_, args) = &parent.kind
@ -1614,7 +1614,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
&mut self,
err: &mut Diag<'_>,
span: Span,
source: PathSource<'_>,
source: PathSource<'_, '_>,
path: &[Segment],
res: Res,
path_str: &str,
@ -1666,7 +1666,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
}
};
let find_span = |source: &PathSource<'_>, err: &mut Diag<'_>| {
let find_span = |source: &PathSource<'_, '_>, err: &mut Diag<'_>| {
match source {
PathSource::Expr(Some(Expr { span, kind: ExprKind::Call(_, _), .. }))
| PathSource::TupleStruct(span, _) => {
@ -2050,8 +2050,86 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
err.span_label(span, fallback_label.to_string());
err.note("can't use `Self` as a constructor, you must use the implemented struct");
}
(Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), _) if ns == ValueNS => {
(
Res::Def(DefKind::TyAlias | DefKind::AssocTy, _),
PathSource::TraitItem(ValueNS, PathSource::TupleStruct(whole, args)),
) => {
err.note("can't use a type alias as tuple pattern");
let mut suggestion = Vec::new();
if let &&[first, ..] = args
&& let &&[.., last] = args
{
suggestion.extend([
// "0: " has to be included here so that the fix is machine applicable.
//
// If this would only add " { " and then the code below add "0: ",
// rustfix would crash, because end of this suggestion is the same as start
// of the suggestion below. Thus, we have to merge these...
(span.between(first), " { 0: ".to_owned()),
(last.between(whole.shrink_to_hi()), " }".to_owned()),
]);
suggestion.extend(
args.iter()
.enumerate()
.skip(1) // See above
.map(|(index, &arg)| (arg.shrink_to_lo(), format!("{index}: "))),
)
} else {
suggestion.push((span.between(whole.shrink_to_hi()), " {}".to_owned()));
}
err.multipart_suggestion(
"use struct pattern instead",
suggestion,
Applicability::MachineApplicable,
);
}
(
Res::Def(DefKind::TyAlias | DefKind::AssocTy, _),
PathSource::TraitItem(
ValueNS,
PathSource::Expr(Some(ast::Expr {
span: whole,
kind: ast::ExprKind::Call(_, args),
..
})),
),
) => {
err.note("can't use a type alias as a constructor");
let mut suggestion = Vec::new();
if let [first, ..] = &**args
&& let [.., last] = &**args
{
suggestion.extend([
// "0: " has to be included here so that the fix is machine applicable.
//
// If this would only add " { " and then the code below add "0: ",
// rustfix would crash, because end of this suggestion is the same as start
// of the suggestion below. Thus, we have to merge these...
(span.between(first.span), " { 0: ".to_owned()),
(last.span.between(whole.shrink_to_hi()), " }".to_owned()),
]);
suggestion.extend(
args.iter()
.enumerate()
.skip(1) // See above
.map(|(index, arg)| (arg.span.shrink_to_lo(), format!("{index}: "))),
)
} else {
suggestion.push((span.between(whole.shrink_to_hi()), " {}".to_owned()));
}
err.multipart_suggestion(
"use struct expression instead",
suggestion,
Applicability::MachineApplicable,
);
}
_ => return false,
}
@ -2621,7 +2699,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
fn suggest_using_enum_variant(
&mut self,
err: &mut Diag<'_>,
source: PathSource<'_>,
source: PathSource<'_, '_>,
def_id: DefId,
span: Span,
) {
@ -2799,7 +2877,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
pub(crate) fn suggest_adding_generic_parameter(
&self,
path: &[Segment],
source: PathSource<'_>,
source: PathSource<'_, '_>,
) -> Option<(Span, &'static str, String, Applicability)> {
let (ident, span) = match path {
[segment]

View file

@ -12,10 +12,14 @@ use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::unord::UnordSet;
use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::DefId;
use rustc_span::source_map::SourceMap;
use rustc_span::{DUMMY_SP, InnerSpan, Span, Symbol, sym};
use thin_vec::ThinVec;
use tracing::{debug, trace};
#[cfg(test)]
mod tests;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum DocFragmentKind {
/// A doc fragment created from a `///` or `//!` doc comment.
@ -531,10 +535,20 @@ pub fn source_span_for_markdown_range(
markdown: &str,
md_range: &Range<usize>,
fragments: &[DocFragment],
) -> Option<Span> {
let map = tcx.sess.source_map();
source_span_for_markdown_range_inner(map, markdown, md_range, fragments)
}
// inner function used for unit testing
pub fn source_span_for_markdown_range_inner(
map: &SourceMap,
markdown: &str,
md_range: &Range<usize>,
fragments: &[DocFragment],
) -> Option<Span> {
use rustc_span::BytePos;
let map = tcx.sess.source_map();
if let &[fragment] = &fragments
&& fragment.kind == DocFragmentKind::RawDoc
&& let Ok(snippet) = map.span_to_snippet(fragment.span)
@ -570,7 +584,13 @@ pub fn source_span_for_markdown_range(
{
// If there is either a match in a previous fragment, or
// multiple matches in this fragment, there is ambiguity.
if match_data.is_none() && !snippet[match_start + 1..].contains(pat) {
// the snippet cannot be zero-sized, because it matches
// the pattern, which is checked to not be zero sized.
if match_data.is_none()
&& !snippet.as_bytes()[match_start + 1..]
.windows(pat.len())
.any(|s| s == pat.as_bytes())
{
match_data = Some((i, match_start));
} else {
// Heirustic produced ambiguity, return nothing.

View file

@ -0,0 +1,50 @@
use std::path::PathBuf;
use rustc_span::source_map::{FilePathMapping, SourceMap};
use rustc_span::symbol::sym;
use rustc_span::{BytePos, Span};
use super::{DocFragment, DocFragmentKind, source_span_for_markdown_range_inner};
#[test]
fn single_backtick() {
let sm = SourceMap::new(FilePathMapping::empty());
sm.new_source_file(PathBuf::from("foo.rs").into(), r#"#[doc = "`"] fn foo() {}"#.to_string());
let span = source_span_for_markdown_range_inner(
&sm,
"`",
&(0..1),
&[DocFragment {
span: Span::with_root_ctxt(BytePos(8), BytePos(11)),
item_id: None,
kind: DocFragmentKind::RawDoc,
doc: sym::empty, // unused placeholder
indent: 0,
}],
)
.unwrap();
assert_eq!(span.lo(), BytePos(9));
assert_eq!(span.hi(), BytePos(10));
}
#[test]
fn utf8() {
// regression test for https://github.com/rust-lang/rust/issues/141665
let sm = SourceMap::new(FilePathMapping::empty());
sm.new_source_file(PathBuf::from("foo.rs").into(), r#"#[doc = "⚠"] fn foo() {}"#.to_string());
let span = source_span_for_markdown_range_inner(
&sm,
"",
&(0..3),
&[DocFragment {
span: Span::with_root_ctxt(BytePos(8), BytePos(14)),
item_id: None,
kind: DocFragmentKind::RawDoc,
doc: sym::empty, // unused placeholder
indent: 0,
}],
)
.unwrap();
assert_eq!(span.lo(), BytePos(9));
assert_eq!(span.hi(), BytePos(12));
}

View file

@ -172,6 +172,11 @@ fn current_dll_path() -> Result<PathBuf, String> {
Ok(OsString::from_wide(&filename).into())
}
#[cfg(target_os = "wasi")]
fn current_dll_path() -> Result<PathBuf, String> {
Err("current_dll_path is not supported on WASI".to_string())
}
pub fn sysroot_candidates() -> SmallVec<[PathBuf; 2]> {
let target = crate::config::host_tuple();
let mut sysroot_candidates: SmallVec<[PathBuf; 2]> = smallvec![get_or_default_sysroot()];

View file

@ -2366,6 +2366,8 @@ options! {
"run LLVM in non-parallel mode (while keeping codegen-units and ThinLTO)"),
no_profiler_runtime: bool = (false, parse_no_value, [TRACKED],
"prevent automatic injection of the profiler_builtins crate"),
no_steal_thir: bool = (false, parse_bool, [UNTRACKED],
"don't steal the THIR when we're done with it; useful for rustc drivers (default: no)"),
no_trait_vptr: bool = (false, parse_no_value, [TRACKED],
"disable generation of trait vptr in vtable for upcasting"),
no_unique_section_names: bool = (false, parse_bool, [TRACKED],

View file

@ -29,7 +29,18 @@ pub(crate) fn analyze_source_file(src: &str) -> (Vec<RelativeBytePos>, Vec<Multi
(lines, multi_byte_chars)
}
cfg_select! {
// cfg(bootstrap)
macro_rules! cfg_select_dispatch {
($($tokens:tt)*) => {
#[cfg(bootstrap)]
cfg_match! { $($tokens)* }
#[cfg(not(bootstrap))]
cfg_select! { $($tokens)* }
};
}
cfg_select_dispatch! {
any(target_arch = "x86", target_arch = "x86_64") => {
fn analyze_source_file_dispatch(
src: &str,

View file

@ -17,10 +17,11 @@
// tidy-alphabetical-start
#![allow(internal_features)]
#![cfg_attr(bootstrap, feature(cfg_match))]
#![cfg_attr(not(bootstrap), feature(cfg_select))]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)]
#![feature(array_windows)]
#![feature(cfg_select)]
#![feature(core_io_borrowed_buf)]
#![feature(hash_set_entry)]
#![feature(if_let_guard)]

View file

@ -937,8 +937,10 @@ symbols! {
external_doc,
f,
f128,
f128_epsilon,
f128_nan,
f16,
f16_epsilon,
f16_nan,
f16c_target_feature,
f32,

View file

@ -11,8 +11,9 @@ use rustc_infer::infer::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TyCtx
use rustc_infer::traits::solve::Goal;
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::Certainty;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitableExt as _, TypingMode};
use rustc_next_trait_solver::solve::HasChanged;
use rustc_middle::ty::{
self, Ty, TyCtxt, TypeFlags, TypeFoldable, TypeVisitableExt as _, TypingMode,
};
use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span};
use crate::traits::{EvaluateConstErr, ObligationCause, specialization_graph};
@ -61,11 +62,41 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate<
&self,
goal: Goal<'tcx, ty::Predicate<'tcx>>,
span: Span,
) -> Option<HasChanged> {
) -> Option<Certainty> {
if let Some(trait_pred) = goal.predicate.as_trait_clause() {
if trait_pred.polarity() == ty::PredicatePolarity::Positive {
match self.0.tcx.as_lang_item(trait_pred.def_id()) {
Some(LangItem::Sized)
if self
.resolve_vars_if_possible(trait_pred.self_ty().skip_binder())
.is_trivially_sized(self.0.tcx) =>
{
return Some(Certainty::Yes);
}
Some(LangItem::Copy | LangItem::Clone) => {
let self_ty =
self.resolve_vars_if_possible(trait_pred.self_ty().skip_binder());
// Unlike `Sized` traits, which always prefer the built-in impl,
// `Copy`/`Clone` may be shadowed by a param-env candidate which
// could force a lifetime error or guide inference. While that's
// not generally desirable, it is observable, so for now let's
// ignore this fast path for types that have regions or infer.
if !self_ty
.has_type_flags(TypeFlags::HAS_FREE_REGIONS | TypeFlags::HAS_INFER)
&& self_ty.is_trivially_pure_clone_copy()
{
return Some(Certainty::Yes);
}
}
_ => {}
}
}
}
let pred = goal.predicate.kind();
match pred.no_bound_vars()? {
ty::PredicateKind::DynCompatible(def_id) if self.0.tcx.is_dyn_compatible(def_id) => {
Some(HasChanged::No)
Some(Certainty::Yes)
}
ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(outlives)) => {
self.0.sub_regions(
@ -73,7 +104,7 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate<
outlives.1,
outlives.0,
);
Some(HasChanged::No)
Some(Certainty::Yes)
}
ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(outlives)) => {
self.0.register_type_outlives_constraint(
@ -82,22 +113,7 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate<
&ObligationCause::dummy_with_span(span),
);
Some(HasChanged::No)
}
ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred)) => {
match self.0.tcx.as_lang_item(trait_pred.def_id()) {
Some(LangItem::Sized)
if trait_pred.self_ty().is_trivially_sized(self.0.tcx) =>
{
Some(HasChanged::No)
}
Some(LangItem::Copy | LangItem::Clone)
if trait_pred.self_ty().is_trivially_pure_clone_copy() =>
{
Some(HasChanged::No)
}
_ => None,
}
Some(Certainty::Yes)
}
_ => None,
}

View file

@ -195,10 +195,15 @@ where
let goal = obligation.as_goal();
let delegate = <&SolverDelegate<'tcx>>::from(infcx);
if let Some(fast_path_has_changed) =
if let Some(certainty) =
delegate.compute_goal_fast_path(goal, obligation.cause.span)
{
any_changed |= matches!(fast_path_has_changed, HasChanged::Yes);
match certainty {
Certainty::Yes => {}
Certainty::Maybe(_) => {
self.obligations.register(obligation, None);
}
}
continue;
}

View file

@ -207,7 +207,7 @@ impl CStr {
/// * `ptr` must be [valid] for reads of bytes up to and including the nul terminator.
/// This means in particular:
///
/// * The entire memory range of this `CStr` must be contained within a single allocated object!
/// * The entire memory range of this `CStr` must be contained within a single allocation!
/// * `ptr` must be non-null even for a zero-length cstr.
///
/// * The memory referenced by the returned `CStr` must not be mutated for

View file

@ -1723,7 +1723,7 @@ pub const fn needs_drop<T: ?Sized>() -> bool;
/// # Safety
///
/// If the computed offset is non-zero, then both the starting and resulting pointer must be
/// either in bounds or at the end of an allocated object. If either pointer is out
/// either in bounds or at the end of an allocation. If either pointer is out
/// of bounds or arithmetic overflow occurs then this operation is undefined behavior.
///
/// The stabilized version of this intrinsic is [`pointer::offset`].

View file

@ -200,7 +200,7 @@ pub trait Unsize<T: ?Sized> {
///
/// Constants are only allowed as patterns if (a) their type implements
/// `PartialEq`, and (b) interpreting the value of the constant as a pattern
/// is equialent to calling `PartialEq`. This ensures that constants used as
/// is equivalent to calling `PartialEq`. This ensures that constants used as
/// patterns cannot expose implementation details in an unexpected way or
/// cause semver hazards.
///

View file

@ -171,6 +171,7 @@ impl f128 {
/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
/// [`MANTISSA_DIGITS`]: f128::MANTISSA_DIGITS
#[unstable(feature = "f128", issue = "116909")]
#[rustc_diagnostic_item = "f128_epsilon"]
pub const EPSILON: f128 = 1.92592994438723585305597794258492732e-34_f128;
/// Smallest finite `f128` value.

View file

@ -168,6 +168,7 @@ impl f16 {
/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
/// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
#[unstable(feature = "f16", issue = "116909")]
#[rustc_diagnostic_item = "f16_epsilon"]
pub const EPSILON: f16 = 9.7656e-4_f16;
/// Smallest finite `f16` value.

View file

@ -1623,7 +1623,7 @@ mod prim_usize {}
/// * if `size_of_val(t) > 0`, then `t` is dereferenceable for `size_of_val(t)` many bytes
///
/// If `t` points at address `a`, being "dereferenceable" for N bytes means that the memory range
/// `[a, a + N)` is all contained within a single [allocated object].
/// `[a, a + N)` is all contained within a single [allocation].
///
/// For instance, this means that unsafe code in a safe function may assume these invariants are
/// ensured of arguments passed by the caller, and it may assume that these invariants are ensured
@ -1639,7 +1639,7 @@ mod prim_usize {}
/// may be unsound or become unsound in future versions of Rust depending on how this question is
/// decided.
///
/// [allocated object]: ptr#allocated-object
/// [allocation]: ptr#allocation
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_ref {}

View file

@ -482,17 +482,17 @@ impl<T: ?Sized> *const T {
///
/// This operation itself is always safe, but using the resulting pointer is not.
///
/// The resulting pointer "remembers" the [allocated object] that `self` points to
/// The resulting pointer "remembers" the [allocation] that `self` points to
/// (this is called "[Provenance](ptr/index.html#provenance)").
/// The pointer must not be used to read or write other allocated objects.
/// The pointer must not be used to read or write other allocations.
///
/// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
/// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
/// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
/// `x` and `y` point into the same allocated object.
/// `x` and `y` point into the same allocation.
///
/// Compared to [`offset`], this method basically delays the requirement of staying within the
/// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
/// same allocation: [`offset`] is immediate Undefined Behavior when crossing object
/// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
/// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
/// can be optimized better and is thus preferable in performance-sensitive code.
@ -500,10 +500,10 @@ impl<T: ?Sized> *const T {
/// The delayed check only considers the value of the pointer that was dereferenced, not the
/// intermediate values used during the computation of the final result. For example,
/// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
/// words, leaving the allocated object and then re-entering it later is permitted.
/// words, leaving the allocation and then re-entering it later is permitted.
///
/// [`offset`]: #method.offset
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -616,7 +616,7 @@ impl<T: ?Sized> *const T {
/// * `self` and `origin` must either
///
/// * point to the same address, or
/// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocated object], and the memory range between
/// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocation], and the memory range between
/// the two pointers must be in bounds of that object. (See below for an example.)
///
/// * The distance between the pointers, in bytes, must be an exact multiple
@ -624,10 +624,10 @@ impl<T: ?Sized> *const T {
///
/// As a consequence, the absolute distance between the pointers, in bytes, computed on
/// mathematical integers (without "wrapping around"), cannot overflow an `isize`. This is
/// implied by the in-bounds requirement, and the fact that no allocated object can be larger
/// implied by the in-bounds requirement, and the fact that no allocation can be larger
/// than `isize::MAX` bytes.
///
/// The requirement for pointers to be derived from the same allocated object is primarily
/// The requirement for pointers to be derived from the same allocation is primarily
/// needed for `const`-compatibility: the distance between pointers into *different* allocated
/// objects is not known at compile-time. However, the requirement also exists at
/// runtime and may be exploited by optimizations. If you wish to compute the difference between
@ -636,7 +636,7 @@ impl<T: ?Sized> *const T {
// FIXME: recommend `addr()` instead of `as usize` once that is stable.
///
/// [`add`]: #method.add
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Panics
///
@ -969,12 +969,12 @@ impl<T: ?Sized> *const T {
/// "wrapping around"), must fit in an `isize`.
///
/// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some
/// [allocated object], and the entire memory range between `self` and the result must be in
/// bounds of that allocated object. In particular, this range must not "wrap around" the edge
/// [allocation], and the entire memory range between `self` and the result must be in
/// bounds of that allocation. In particular, this range must not "wrap around" the edge
/// of the address space.
///
/// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement.
/// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement.
/// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always
/// safe.
///
@ -983,7 +983,7 @@ impl<T: ?Sized> *const T {
/// enables more aggressive compiler optimizations.
///
/// [`wrapping_sub`]: #method.wrapping_sub
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -1073,16 +1073,16 @@ impl<T: ?Sized> *const T {
///
/// This operation itself is always safe, but using the resulting pointer is not.
///
/// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
/// be used to read or write other allocated objects.
/// The resulting pointer "remembers" the [allocation] that `self` points to; it must not
/// be used to read or write other allocations.
///
/// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
/// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
/// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
/// `x` and `y` point into the same allocated object.
/// `x` and `y` point into the same allocation.
///
/// Compared to [`add`], this method basically delays the requirement of staying within the
/// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
/// same allocation: [`add`] is immediate Undefined Behavior when crossing object
/// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
/// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
/// can be optimized better and is thus preferable in performance-sensitive code.
@ -1090,10 +1090,10 @@ impl<T: ?Sized> *const T {
/// The delayed check only considers the value of the pointer that was dereferenced, not the
/// intermediate values used during the computation of the final result. For example,
/// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
/// allocated object and then re-entering it later is permitted.
/// allocation and then re-entering it later is permitted.
///
/// [`add`]: #method.add
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -1152,16 +1152,16 @@ impl<T: ?Sized> *const T {
///
/// This operation itself is always safe, but using the resulting pointer is not.
///
/// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
/// be used to read or write other allocated objects.
/// The resulting pointer "remembers" the [allocation] that `self` points to; it must not
/// be used to read or write other allocations.
///
/// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
/// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
/// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
/// `x` and `y` point into the same allocated object.
/// `x` and `y` point into the same allocation.
///
/// Compared to [`sub`], this method basically delays the requirement of staying within the
/// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
/// same allocation: [`sub`] is immediate Undefined Behavior when crossing object
/// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
/// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
/// can be optimized better and is thus preferable in performance-sensitive code.
@ -1169,10 +1169,10 @@ impl<T: ?Sized> *const T {
/// The delayed check only considers the value of the pointer that was dereferenced, not the
/// intermediate values used during the computation of the final result. For example,
/// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
/// allocated object and then re-entering it later is permitted.
/// allocation and then re-entering it later is permitted.
///
/// [`sub`]: #method.sub
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -1564,8 +1564,8 @@ impl<T> *const [T] {
/// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single [allocated object]!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single [allocation]!
/// Slices can never span across multiple allocations.
///
/// * The pointer must be aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
@ -1586,7 +1586,7 @@ impl<T> *const [T] {
/// See also [`slice::from_raw_parts`][].
///
/// [valid]: crate::ptr#safety
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Panics during const evaluation
///

View file

@ -15,12 +15,12 @@ If any of the following conditions are violated, the result is Undefined Behavio
"wrapping around"), must fit in an `isize`.
* If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some
[allocated object], and the entire memory range between `self` and the result must be in
bounds of that allocated object. In particular, this range must not "wrap around" the edge
[allocation], and the entire memory range between `self` and the result must be in
bounds of that allocation. In particular, this range must not "wrap around" the edge
of the address space.
Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset
stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement.
stays in bounds of the allocation, it is guaranteed to satisfy the first requirement.
This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always
safe.
@ -29,4 +29,4 @@ difficult to satisfy. The only advantage of this method is that it
enables more aggressive compiler optimizations.
[`wrapping_add`]: #method.wrapping_add
[allocated object]: crate::ptr#allocated-object
[allocation]: crate::ptr#allocation

View file

@ -11,13 +11,13 @@ If any of the following conditions are violated, the result is Undefined Behavio
"wrapping around"), must fit in an `isize`.
* If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some
[allocated object], and the entire memory range between `self` and the result must be in
bounds of that allocated object. In particular, this range must not "wrap around" the edge
[allocation], and the entire memory range between `self` and the result must be in
bounds of that allocation. In particular, this range must not "wrap around" the edge
of the address space. Note that "range" here refers to a half-open range as usual in Rust,
i.e., `self..result` for non-negative offsets and `result..self` for negative offsets.
Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset
stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement.
stays in bounds of the allocation, it is guaranteed to satisfy the first requirement.
This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always
safe.
@ -26,4 +26,4 @@ difficult to satisfy. The only advantage of this method is that it
enables more aggressive compiler optimizations.
[`wrapping_offset`]: #method.wrapping_offset
[allocated object]: crate::ptr#allocated-object
[allocation]: crate::ptr#allocation

View file

@ -19,10 +19,10 @@
//! pointer. The following points are only concerned with non-zero-sized accesses.
//! * A [null] pointer is *never* valid.
//! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer be
//! *dereferenceable*. The [provenance] of the pointer is used to determine which [allocated
//! object] it is derived from; a pointer is dereferenceable if the memory range of the given size
//! starting at the pointer is entirely contained within the bounds of that allocated object. Note
//! that in Rust, every (stack-allocated) variable is considered a separate allocated object.
//! *dereferenceable*. The [provenance] of the pointer is used to determine which [allocation]
//! it is derived from; a pointer is dereferenceable if the memory range of the given size
//! starting at the pointer is entirely contained within the bounds of that allocation. Note
//! that in Rust, every (stack-allocated) variable is considered a separate allocation.
//! * All accesses performed by functions in this module are *non-atomic* in the sense
//! of [atomic operations] used to synchronize between threads. This means it is
//! undefined behavior to perform two concurrent accesses to the same location from different
@ -30,7 +30,7 @@
//! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
//! be used for inter-thread synchronization.
//! * The result of casting a reference to a pointer is valid for as long as the
//! underlying object is live and no reference (just raw pointers) is used to
//! underlying allocation is live and no reference (just raw pointers) is used to
//! access the same memory. That is, reference and pointer accesses cannot be
//! interleaved.
//!
@ -95,24 +95,26 @@
//!
//! [valid value]: ../../reference/behavior-considered-undefined.html#invalid-values
//!
//! ## Allocated object
//! ## Allocation
//!
//! An *allocated object* is a subset of program memory which is addressable
//! <a id="allocated-object"></a> <!-- keep old URLs working -->
//!
//! An *allocation* is a subset of program memory which is addressable
//! from Rust, and within which pointer arithmetic is possible. Examples of
//! allocated objects include heap allocations, stack-allocated variables,
//! allocations include heap allocations, stack-allocated variables,
//! statics, and consts. The safety preconditions of some Rust operations -
//! such as `offset` and field projections (`expr.field`) - are defined in
//! terms of the allocated objects on which they operate.
//! terms of the allocations on which they operate.
//!
//! An allocated object has a base address, a size, and a set of memory
//! addresses. It is possible for an allocated object to have zero size, but
//! such an allocated object will still have a base address. The base address
//! of an allocated object is not necessarily unique. While it is currently the
//! case that an allocated object always has a set of memory addresses which is
//! An allocation has a base address, a size, and a set of memory
//! addresses. It is possible for an allocation to have zero size, but
//! such an allocation will still have a base address. The base address
//! of an allocation is not necessarily unique. While it is currently the
//! case that an allocation always has a set of memory addresses which is
//! fully contiguous (i.e., has no "holes"), there is no guarantee that this
//! will not change in the future.
//!
//! For any allocated object with `base` address, `size`, and a set of
//! For any allocation with `base` address, `size`, and a set of
//! `addresses`, the following are guaranteed:
//! - For all addresses `a` in `addresses`, `a` is in the range `base .. (base +
//! size)` (note that this requires `a < base + size`, not `a <= base + size`)
@ -122,11 +124,11 @@
//! - `size <= isize::MAX`
//!
//! As a consequence of these guarantees, given any address `a` within the set
//! of addresses of an allocated object:
//! of addresses of an allocation:
//! - It is guaranteed that `a - base` does not overflow `isize`
//! - It is guaranteed that `a - base` is non-negative
//! - It is guaranteed that, given `o = a - base` (i.e., the offset of `a` within
//! the allocated object), `base + o` will not wrap around the address space (in
//! the allocation), `base + o` will not wrap around the address space (in
//! other words, will not overflow `usize`)
//!
//! [`null()`]: null
@ -138,8 +140,8 @@
//! and the freed memory gets reallocated before your read/write (in fact this is the
//! worst-case scenario, UAFs would be much less concerning if this didn't happen!).
//! As another example, consider that [`wrapping_offset`] is documented to "remember"
//! the allocated object that the original pointer points to, even if it is offset far
//! outside the memory range occupied by that allocated object.
//! the allocation that the original pointer points to, even if it is offset far
//! outside the memory range occupied by that allocation.
//! To rationalize claims like this, pointers need to somehow be *more* than just their addresses:
//! they must have **provenance**.
//!
@ -159,12 +161,12 @@
//! writes. Note that this can interact with the other components, e.g. a pointer might permit
//! mutation only for a subset of addresses, or only for a subset of its maximal timespan.
//!
//! When an [allocated object] is created, it has a unique Original Pointer. For alloc
//! When an [allocation] is created, it has a unique Original Pointer. For alloc
//! APIs this is literally the pointer the call returns, and for local variables and statics,
//! this is the name of the variable/static. (This is mildly overloading the term "pointer"
//! for the sake of brevity/exposition.)
//!
//! The Original Pointer for an allocated object has provenance that constrains the *spatial*
//! The Original Pointer for an allocation has provenance that constrains the *spatial*
//! permissions of this pointer to the memory range of the allocation, and the *temporal*
//! permissions to the lifetime of the allocation. Provenance is implicitly inherited by all
//! pointers transitively derived from the Original Pointer through operations like [`offset`],
@ -192,10 +194,10 @@
//! provenance since they access an empty range of memory.
//!
//! * It is undefined behavior to [`offset`] a pointer across a memory range that is not contained
//! in the allocated object it is derived from, or to [`offset_from`] two pointers not derived
//! from the same allocated object. Provenance is used to say what exactly "derived from" even
//! in the allocation it is derived from, or to [`offset_from`] two pointers not derived
//! from the same allocation. Provenance is used to say what exactly "derived from" even
//! means: the lineage of a pointer is traced back to the Original Pointer it descends from, and
//! that identifies the relevant allocated object. In particular, it's always UB to offset a
//! that identifies the relevant allocation. In particular, it's always UB to offset a
//! pointer derived from something that is now deallocated, except if the offset is 0.
//!
//! But it *is* still sound to:
@ -216,7 +218,7 @@
//! * Compare arbitrary pointers by address. Pointer comparison ignores provenance and addresses
//! *are* just integers, so there is always a coherent answer, even if the pointers are dangling
//! or from different provenances. Note that if you get "lucky" and notice that a pointer at the
//! end of one allocated object is the "same" address as the start of another allocated object,
//! end of one allocation is the "same" address as the start of another allocation,
//! anything you do with that fact is *probably* going to be gibberish. The scope of that
//! gibberish is kept under control by the fact that the two pointers *still* aren't allowed to
//! access the other's allocation (bytes), because they still have different provenance.
@ -369,7 +371,7 @@
//! integer-to-pointer casts.
//!
//! [aliasing]: ../../nomicon/aliasing.html
//! [allocated object]: #allocated-object
//! [allocation]: #allocation
//! [provenance]: #provenance
//! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
//! [ub]: ../../reference/behavior-considered-undefined.html
@ -1289,7 +1291,7 @@ pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
// SAFETY: the caller must guarantee that `x` and `y` are
// valid for writes and properly aligned. `tmp` cannot be
// overlapping either `x` or `y` because `tmp` was just allocated
// on the stack as a separate allocated object.
// on the stack as a separate allocation.
unsafe {
copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
copy(y, x, 1); // `x` and `y` may overlap
@ -1409,7 +1411,7 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
// Going though a slice here helps codegen know the size fits in `isize`
let slice = slice_from_raw_parts_mut(x, count);
// SAFETY: This is all readable from the pointer, meaning it's one
// allocated object, and thus cannot be more than isize::MAX bytes.
// allocation, and thus cannot be more than isize::MAX bytes.
let bytes = unsafe { mem::size_of_val_raw::<[T]>(slice) };
if let Some(bytes) = NonZero::new(bytes) {
// SAFETY: These are the same ranges, just expressed in a different
@ -1563,7 +1565,7 @@ pub const unsafe fn replace<T>(dst: *mut T, src: T) -> T {
// SAFETY: the caller must guarantee that `dst` is valid to be
// cast to a mutable reference (valid for writes, aligned, initialized),
// and cannot overlap `src` since `dst` must point to a distinct
// allocated object.
// allocation.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
@ -1810,7 +1812,7 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
// SAFETY: the caller must guarantee that `src` is valid for reads.
// `src` cannot overlap `tmp` because `tmp` was just allocated on
// the stack as a separate allocated object.
// the stack as a separate allocation.
//
// Also, since we just wrote a valid value into `tmp`, it is guaranteed
// to be properly initialized.

View file

@ -448,7 +448,7 @@ impl<T: ?Sized> *mut T {
// SAFETY: the caller must uphold the safety contract for `offset`.
// The obtained pointer is valid for writes since the caller must
// guarantee that it points to the same allocated object as `self`.
// guarantee that it points to the same allocation as `self`.
unsafe { intrinsics::offset(self, count) }
}
@ -481,17 +481,17 @@ impl<T: ?Sized> *mut T {
///
/// This operation itself is always safe, but using the resulting pointer is not.
///
/// The resulting pointer "remembers" the [allocated object] that `self` points to
/// The resulting pointer "remembers" the [allocation] that `self` points to
/// (this is called "[Provenance](ptr/index.html#provenance)").
/// The pointer must not be used to read or write other allocated objects.
/// The pointer must not be used to read or write other allocations.
///
/// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
/// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
/// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
/// `x` and `y` point into the same allocated object.
/// `x` and `y` point into the same allocation.
///
/// Compared to [`offset`], this method basically delays the requirement of staying within the
/// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
/// same allocation: [`offset`] is immediate Undefined Behavior when crossing object
/// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
/// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
/// can be optimized better and is thus preferable in performance-sensitive code.
@ -499,10 +499,10 @@ impl<T: ?Sized> *mut T {
/// The delayed check only considers the value of the pointer that was dereferenced, not the
/// intermediate values used during the computation of the final result. For example,
/// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
/// words, leaving the allocated object and then re-entering it later is permitted.
/// words, leaving the allocation and then re-entering it later is permitted.
///
/// [`offset`]: #method.offset
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -791,7 +791,7 @@ impl<T: ?Sized> *mut T {
/// * `self` and `origin` must either
///
/// * point to the same address, or
/// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocated object], and the memory range between
/// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocation], and the memory range between
/// the two pointers must be in bounds of that object. (See below for an example.)
///
/// * The distance between the pointers, in bytes, must be an exact multiple
@ -799,10 +799,10 @@ impl<T: ?Sized> *mut T {
///
/// As a consequence, the absolute distance between the pointers, in bytes, computed on
/// mathematical integers (without "wrapping around"), cannot overflow an `isize`. This is
/// implied by the in-bounds requirement, and the fact that no allocated object can be larger
/// implied by the in-bounds requirement, and the fact that no allocation can be larger
/// than `isize::MAX` bytes.
///
/// The requirement for pointers to be derived from the same allocated object is primarily
/// The requirement for pointers to be derived from the same allocation is primarily
/// needed for `const`-compatibility: the distance between pointers into *different* allocated
/// objects is not known at compile-time. However, the requirement also exists at
/// runtime and may be exploited by optimizations. If you wish to compute the difference between
@ -811,7 +811,7 @@ impl<T: ?Sized> *mut T {
// FIXME: recommend `addr()` instead of `as usize` once that is stable.
///
/// [`add`]: #method.add
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Panics
///
@ -1061,12 +1061,12 @@ impl<T: ?Sized> *mut T {
/// "wrapping around"), must fit in an `isize`.
///
/// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some
/// [allocated object], and the entire memory range between `self` and the result must be in
/// bounds of that allocated object. In particular, this range must not "wrap around" the edge
/// [allocation], and the entire memory range between `self` and the result must be in
/// bounds of that allocation. In particular, this range must not "wrap around" the edge
/// of the address space.
///
/// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement.
/// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement.
/// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always
/// safe.
///
@ -1075,7 +1075,7 @@ impl<T: ?Sized> *mut T {
/// enables more aggressive compiler optimizations.
///
/// [`wrapping_sub`]: #method.wrapping_sub
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -1165,16 +1165,16 @@ impl<T: ?Sized> *mut T {
///
/// This operation itself is always safe, but using the resulting pointer is not.
///
/// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
/// be used to read or write other allocated objects.
/// The resulting pointer "remembers" the [allocation] that `self` points to; it must not
/// be used to read or write other allocations.
///
/// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
/// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
/// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
/// `x` and `y` point into the same allocated object.
/// `x` and `y` point into the same allocation.
///
/// Compared to [`add`], this method basically delays the requirement of staying within the
/// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
/// same allocation: [`add`] is immediate Undefined Behavior when crossing object
/// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
/// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
/// can be optimized better and is thus preferable in performance-sensitive code.
@ -1182,10 +1182,10 @@ impl<T: ?Sized> *mut T {
/// The delayed check only considers the value of the pointer that was dereferenced, not the
/// intermediate values used during the computation of the final result. For example,
/// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
/// allocated object and then re-entering it later is permitted.
/// allocation and then re-entering it later is permitted.
///
/// [`add`]: #method.add
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -1241,16 +1241,16 @@ impl<T: ?Sized> *mut T {
///
/// This operation itself is always safe, but using the resulting pointer is not.
///
/// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
/// be used to read or write other allocated objects.
/// The resulting pointer "remembers" the [allocation] that `self` points to; it must not
/// be used to read or write other allocations.
///
/// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
/// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
/// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
/// `x` and `y` point into the same allocated object.
/// `x` and `y` point into the same allocation.
///
/// Compared to [`sub`], this method basically delays the requirement of staying within the
/// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
/// same allocation: [`sub`] is immediate Undefined Behavior when crossing object
/// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
/// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
/// can be optimized better and is thus preferable in performance-sensitive code.
@ -1258,10 +1258,10 @@ impl<T: ?Sized> *mut T {
/// The delayed check only considers the value of the pointer that was dereferenced, not the
/// intermediate values used during the computation of the final result. For example,
/// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
/// allocated object and then re-entering it later is permitted.
/// allocation and then re-entering it later is permitted.
///
/// [`sub`]: #method.sub
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -1770,7 +1770,7 @@ impl<T> *mut [T] {
///
/// # Safety
///
/// `mid` must be [in-bounds] of the underlying [allocated object].
/// `mid` must be [in-bounds] of the underlying [allocation].
/// Which means `self` must be dereferenceable and span a single allocation
/// that is at least `mid * size_of::<T>()` bytes long. Not upholding these
/// requirements is *[undefined behavior]* even if the resulting pointers are not used.
@ -1781,7 +1781,7 @@ impl<T> *mut [T] {
///
/// [`split_at_mut_unchecked`]: #method.split_at_mut_unchecked
/// [in-bounds]: #method.add
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
@ -1816,13 +1816,14 @@ impl<T> *mut [T] {
///
/// # Safety
///
/// `mid` must be [in-bounds] of the underlying [allocated object].
/// `mid` must be [in-bounds] of the underlying [allocation].
/// Which means `self` must be dereferenceable and span a single allocation
/// that is at least `mid * size_of::<T>()` bytes long. Not upholding these
/// requirements is *[undefined behavior]* even if the resulting pointers are not used.
///
/// [in-bounds]: #method.add
/// [out-of-bounds index]: #method.add
/// [allocation]: crate::ptr#allocation
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
@ -1922,8 +1923,8 @@ impl<T> *mut [T] {
/// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single [allocated object]!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single [allocation]!
/// Slices can never span across multiple allocations.
///
/// * The pointer must be aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
@ -1944,7 +1945,7 @@ impl<T> *mut [T] {
/// See also [`slice::from_raw_parts`][].
///
/// [valid]: crate::ptr#safety
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Panics during const evaluation
///
@ -1980,8 +1981,8 @@ impl<T> *mut [T] {
/// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()`
/// many bytes, and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single [allocated object]!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single [allocation]!
/// Slices can never span across multiple allocations.
///
/// * The pointer must be aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
@ -2002,7 +2003,7 @@ impl<T> *mut [T] {
/// See also [`slice::from_raw_parts_mut`][].
///
/// [valid]: crate::ptr#safety
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Panics during const evaluation
///

View file

@ -530,16 +530,16 @@ impl<T: ?Sized> NonNull<T> {
/// * The computed offset, `count * size_of::<T>()` bytes, must not overflow `isize`.
///
/// * If the computed offset is non-zero, then `self` must be derived from a pointer to some
/// [allocated object], and the entire memory range between `self` and the result must be in
/// bounds of that allocated object. In particular, this range must not "wrap around" the edge
/// [allocation], and the entire memory range between `self` and the result must be in
/// bounds of that allocation. In particular, this range must not "wrap around" the edge
/// of the address space.
///
/// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement.
/// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement.
/// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always
/// safe.
///
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -606,16 +606,16 @@ impl<T: ?Sized> NonNull<T> {
/// * The computed offset, `count * size_of::<T>()` bytes, must not overflow `isize`.
///
/// * If the computed offset is non-zero, then `self` must be derived from a pointer to some
/// [allocated object], and the entire memory range between `self` and the result must be in
/// bounds of that allocated object. In particular, this range must not "wrap around" the edge
/// [allocation], and the entire memory range between `self` and the result must be in
/// bounds of that allocation. In particular, this range must not "wrap around" the edge
/// of the address space.
///
/// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement.
/// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement.
/// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always
/// safe.
///
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -683,16 +683,16 @@ impl<T: ?Sized> NonNull<T> {
/// * The computed offset, `count * size_of::<T>()` bytes, must not overflow `isize`.
///
/// * If the computed offset is non-zero, then `self` must be derived from a pointer to some
/// [allocated object], and the entire memory range between `self` and the result must be in
/// bounds of that allocated object. In particular, this range must not "wrap around" the edge
/// [allocation], and the entire memory range between `self` and the result must be in
/// bounds of that allocation. In particular, this range must not "wrap around" the edge
/// of the address space.
///
/// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement.
/// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset
/// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement.
/// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always
/// safe.
///
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Examples
///
@ -775,7 +775,7 @@ impl<T: ?Sized> NonNull<T> {
/// * `self` and `origin` must either
///
/// * point to the same address, or
/// * both be *derived from* a pointer to the same [allocated object], and the memory range between
/// * both be *derived from* a pointer to the same [allocation], and the memory range between
/// the two pointers must be in bounds of that object. (See below for an example.)
///
/// * The distance between the pointers, in bytes, must be an exact multiple
@ -783,10 +783,10 @@ impl<T: ?Sized> NonNull<T> {
///
/// As a consequence, the absolute distance between the pointers, in bytes, computed on
/// mathematical integers (without "wrapping around"), cannot overflow an `isize`. This is
/// implied by the in-bounds requirement, and the fact that no allocated object can be larger
/// implied by the in-bounds requirement, and the fact that no allocation can be larger
/// than `isize::MAX` bytes.
///
/// The requirement for pointers to be derived from the same allocated object is primarily
/// The requirement for pointers to be derived from the same allocation is primarily
/// needed for `const`-compatibility: the distance between pointers into *different* allocated
/// objects is not known at compile-time. However, the requirement also exists at
/// runtime and may be exploited by optimizations. If you wish to compute the difference between
@ -795,7 +795,7 @@ impl<T: ?Sized> NonNull<T> {
// FIXME: recommend `addr()` instead of `as usize` once that is stable.
///
/// [`add`]: #method.add
/// [allocated object]: crate::ptr#allocated-object
/// [allocation]: crate::ptr#allocation
///
/// # Panics
///
@ -1475,8 +1475,8 @@ impl<T> NonNull<[T]> {
/// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single allocation!
/// Slices can never span across multiple allocations.
///
/// * The pointer must be aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
@ -1520,8 +1520,8 @@ impl<T> NonNull<[T]> {
/// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()`
/// many bytes, and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single allocation!
/// Slices can never span across multiple allocations.
///
/// * The pointer must be aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references

View file

@ -14,8 +14,8 @@ use crate::{array, ptr, ub_checks};
/// * `data` must be non-null, [valid] for reads for `len * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects. See [below](#incorrect-usage)
/// * The entire memory range of this slice must be contained within a single allocation!
/// Slices can never span across multiple allocations. See [below](#incorrect-usage)
/// for an example incorrectly not taking this into account.
/// * `data` must be non-null and aligned even for zero-length slices or slices of ZSTs. One
/// reason for this is that enum layout optimizations may rely on references
@ -65,14 +65,14 @@ use crate::{array, ptr, ub_checks};
/// assert_eq!(fst_end, snd_start, "Slices must be contiguous!");
/// unsafe {
/// // The assertion above ensures `fst` and `snd` are contiguous, but they might
/// // still be contained within _different allocated objects_, in which case
/// // still be contained within _different allocations_, in which case
/// // creating this slice is undefined behavior.
/// slice::from_raw_parts(fst.as_ptr(), fst.len() + snd.len())
/// }
/// }
///
/// fn main() {
/// // `a` and `b` are different allocated objects...
/// // `a` and `b` are different allocations...
/// let a = 42;
/// let b = 27;
/// // ... which may nevertheless be laid out contiguously in memory: | a | b |
@ -150,8 +150,8 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
/// * `data` must be non-null, [valid] for both reads and writes for `len * size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single allocation!
/// Slices can never span across multiple allocations.
/// * `data` must be non-null and aligned even for zero-length slices or slices of ZSTs. One
/// reason for this is that enum layout optimizations may rely on references
/// (including slices of any length) being aligned and non-null to distinguish
@ -228,8 +228,8 @@ pub const fn from_mut<T>(s: &mut T) -> &mut [T] {
/// the last element, such that the offset from the end to the start pointer is
/// the length of the slice.
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single allocation!
/// Slices can never span across multiple allocations.
///
/// * The range must contain `N` consecutive properly initialized values of type `T`.
///
@ -298,8 +298,8 @@ pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] {
/// the last element, such that the offset from the end to the start pointer is
/// the length of the slice.
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects.
/// * The entire memory range of this slice must be contained within a single allocation!
/// Slices can never span across multiple allocations.
///
/// * The range must contain `N` consecutive properly initialized values of type `T`.
///

View file

@ -1,17 +1,18 @@
# These defaults are meant for contributors to the standard library and documentation.
[build]
# When building the standard library, you almost never want to build the compiler itself.
build-stage = 0
test-stage = 0
bench-stage = 0
build-stage = 1
test-stage = 1
bench-stage = 1
[rust]
# This greatly increases the speed of rebuilds, especially when there are only minor changes. However, it makes the initial build slightly slower.
incremental = true
# Make the compiler and standard library faster to build, at the expense of a ~20% runtime slowdown.
lto = "off"
# Download rustc by default for library profile if compiler-affecting
# directories are not modified. For CI this is disabled.
# When building the standard library, you almost never want to build the compiler itself.
#
# If compiler-affecting directories are not modified, use precompiled rustc to speed up
# library development by skipping compiler builds.
download-rustc = "if-unchanged"
[llvm]

View file

@ -120,14 +120,12 @@ fn main() {
};
cmd.args(&args).env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
if let Some(crate_name) = crate_name {
if let Some(target) = env::var_os("RUSTC_TIME") {
if target == "all"
|| target.into_string().unwrap().split(',').any(|c| c.trim() == crate_name)
{
cmd.arg("-Ztime-passes");
}
}
if let Some(crate_name) = crate_name
&& let Some(target) = env::var_os("RUSTC_TIME")
&& (target == "all"
|| target.into_string().unwrap().split(',').any(|c| c.trim() == crate_name))
{
cmd.arg("-Ztime-passes");
}
// Print backtrace in case of ICE
@ -242,10 +240,10 @@ fn main() {
}
}
if env::var_os("RUSTC_BOLT_LINK_FLAGS").is_some() {
if let Some("rustc_driver") = crate_name {
cmd.arg("-Clink-args=-Wl,-q");
}
if env::var_os("RUSTC_BOLT_LINK_FLAGS").is_some()
&& let Some("rustc_driver") = crate_name
{
cmd.arg("-Clink-args=-Wl,-q");
}
let is_test = args.iter().any(|a| a == "--test");
@ -282,25 +280,24 @@ fn main() {
(child, status)
};
if env::var_os("RUSTC_PRINT_STEP_TIMINGS").is_some()
|| env::var_os("RUSTC_PRINT_STEP_RUSAGE").is_some()
if (env::var_os("RUSTC_PRINT_STEP_TIMINGS").is_some()
|| env::var_os("RUSTC_PRINT_STEP_RUSAGE").is_some())
&& let Some(crate_name) = crate_name
{
if let Some(crate_name) = crate_name {
let dur = start.elapsed();
// If the user requested resource usage data, then
// include that in addition to the timing output.
let rusage_data =
env::var_os("RUSTC_PRINT_STEP_RUSAGE").and_then(|_| format_rusage_data(child));
eprintln!(
"[RUSTC-TIMING] {} test:{} {}.{:03}{}{}",
crate_name,
is_test,
dur.as_secs(),
dur.subsec_millis(),
if rusage_data.is_some() { " " } else { "" },
rusage_data.unwrap_or_default(),
);
}
let dur = start.elapsed();
// If the user requested resource usage data, then
// include that in addition to the timing output.
let rusage_data =
env::var_os("RUSTC_PRINT_STEP_RUSAGE").and_then(|_| format_rusage_data(child));
eprintln!(
"[RUSTC-TIMING] {} test:{} {}.{:03}{}{}",
crate_name,
is_test,
dur.as_secs(),
dur.subsec_millis(),
if rusage_data.is_some() { " " } else { "" },
rusage_data.unwrap_or_default(),
);
}
if status.success() {

View file

@ -1,5 +1,6 @@
//! Implementation of compiling the compiler and standard library, in "check"-based modes.
use crate::core::build_steps::compile;
use crate::core::build_steps::compile::{
add_to_sysroot, run_cargo, rustc_cargo, rustc_cargo_env, std_cargo, std_crates_for_run_make,
};
@ -45,10 +46,12 @@ impl Step for Std {
const DEFAULT: bool = true;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
let stage = run.builder.top_stage;
run.crate_or_deps("sysroot")
.crate_or_deps("coretests")
.crate_or_deps("alloctests")
.path("library")
.default_condition(stage != 0)
}
fn make_run(run: RunConfig<'_>) {
@ -57,11 +60,24 @@ impl Step for Std {
}
fn run(self, builder: &Builder<'_>) {
if !builder.download_rustc() && builder.config.skip_std_check_if_no_download_rustc {
eprintln!(
"WARNING: `--skip-std-check-if-no-download-rustc` flag was passed and `rust.download-rustc` is not available. Skipping."
);
return;
}
builder.require_submodule("library/stdarch", None);
let target = self.target;
let compiler = builder.compiler(builder.top_stage, builder.config.build);
if builder.top_stage == 0 {
// Reuse the stage0 libstd
builder.ensure(compile::Std::new(compiler, target));
return;
}
let mut cargo = builder::Cargo::new(
builder,
compiler,

View file

@ -207,16 +207,18 @@ impl Step for Rustc {
let compiler = builder.compiler(builder.top_stage, builder.config.build);
let target = self.target;
if compiler.stage != 0 {
// If we're not in stage 0, then we won't have a std from the beta
// compiler around. That means we need to make sure there's one in
// the sysroot for the compiler to find. Otherwise, we're going to
// fail when building crates that need to generate code (e.g., build
// scripts and their dependencies).
builder.ensure(compile::Std::new(compiler, compiler.host));
builder.ensure(compile::Std::new(compiler, target));
} else {
builder.ensure(check::Std::new(target).build_kind(Some(Kind::Check)));
if !builder.download_rustc() {
if compiler.stage != 0 {
// If we're not in stage 0, then we won't have a std from the beta
// compiler around. That means we need to make sure there's one in
// the sysroot for the compiler to find. Otherwise, we're going to
// fail when building crates that need to generate code (e.g., build
// scripts and their dependencies).
builder.ensure(compile::Std::new(compiler, compiler.host));
builder.ensure(compile::Std::new(compiler, target));
} else {
builder.ensure(check::Std::new(target).build_kind(Some(Kind::Check)));
}
}
let mut cargo = builder::Cargo::new(
@ -286,7 +288,9 @@ macro_rules! lint_any {
let compiler = builder.compiler(builder.top_stage, builder.config.build);
let target = self.target;
builder.ensure(check::Rustc::new(target, builder).build_kind(Some(Kind::Check)));
if !builder.download_rustc() {
builder.ensure(check::Rustc::new(target, builder).build_kind(Some(Kind::Check)));
};
let cargo = prepare_tool_cargo(
builder,

View file

@ -147,14 +147,27 @@ impl Step for Std {
)]
fn run(self, builder: &Builder<'_>) {
let target = self.target;
let compiler = self.compiler;
// We already have std ready to be used for stage 0.
if self.compiler.stage == 0 {
let compiler = self.compiler;
builder.ensure(StdLink::from_std(self, compiler));
return;
}
let compiler = if builder.download_rustc() && self.force_recompile {
// When there are changes in the library tree with CI-rustc, we want to build
// the stageN library and that requires using stageN-1 compiler.
builder.compiler(self.compiler.stage.saturating_sub(1), builder.config.build)
} else {
self.compiler
};
// When using `download-rustc`, we already have artifacts for the host available. Don't
// recompile them.
if builder.download_rustc() && builder.config.is_host_target(target)
// NOTE: the beta compiler may generate different artifacts than the downloaded compiler, so
// its artifacts can't be reused.
&& compiler.stage != 0
if builder.download_rustc()
&& builder.config.is_host_target(target)
&& !self.force_recompile
{
let sysroot = builder.ensure(Sysroot { compiler, force_recompile: false });
@ -189,7 +202,13 @@ impl Step for Std {
let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target);
trace!(?compiler_to_use);
if compiler_to_use != compiler {
if compiler_to_use != compiler
// Never uplift std unless we have compiled stage 1; if stage 1 is compiled,
// uplift it from there.
//
// FIXME: improve `fn compiler_for` to avoid adding stage condition here.
&& compiler.stage > 1
{
trace!(?compiler_to_use, ?compiler, "compiler != compiler_to_use, uplifting library");
builder.ensure(Std::new(compiler_to_use, target));
@ -222,27 +241,6 @@ impl Step for Std {
target_deps.extend(self.copy_extra_objects(builder, &compiler, target));
// The LLD wrappers and `rust-lld` are self-contained linking components that can be
// necessary to link the stdlib on some targets. We'll also need to copy these binaries to
// the `stage0-sysroot` to ensure the linker is found when bootstrapping on such a target.
if compiler.stage == 0 && builder.config.is_host_target(compiler.host) {
trace!(
"(build == host) copying linking components to `stage0-sysroot` for bootstrapping"
);
// We want to copy the host `bin` folder within the `rustlib` folder in the sysroot.
let src_sysroot_bin = builder
.rustc_snapshot_sysroot()
.join("lib")
.join("rustlib")
.join(compiler.host)
.join("bin");
if src_sysroot_bin.exists() {
let target_sysroot_bin = builder.sysroot_target_bindir(compiler, target);
t!(fs::create_dir_all(&target_sysroot_bin));
builder.cp_link_r(&src_sysroot_bin, &target_sysroot_bin);
}
}
// We build a sysroot for mir-opt tests using the same trick that Miri does: A check build
// with -Zalways-encode-mir. This frees us from the need to have a target linker, and the
// fact that this is a check build integrates nicely with run_cargo.
@ -628,18 +626,18 @@ pub fn std_cargo(builder: &Builder<'_>, target: TargetSelection, stage: u32, car
// Help the libc crate compile by assisting it in finding various
// sysroot native libraries.
if target.contains("musl") {
if let Some(p) = builder.musl_libdir(target) {
let root = format!("native={}", p.to_str().unwrap());
cargo.rustflag("-L").rustflag(&root);
}
if target.contains("musl")
&& let Some(p) = builder.musl_libdir(target)
{
let root = format!("native={}", p.to_str().unwrap());
cargo.rustflag("-L").rustflag(&root);
}
if target.contains("-wasi") {
if let Some(dir) = builder.wasi_libdir(target) {
let root = format!("native={}", dir.to_str().unwrap());
cargo.rustflag("-L").rustflag(&root);
}
if target.contains("-wasi")
&& let Some(dir) = builder.wasi_libdir(target)
{
let root = format!("native={}", dir.to_str().unwrap());
cargo.rustflag("-L").rustflag(&root);
}
}
@ -737,7 +735,7 @@ impl Step for StdLink {
let target = self.target;
// NOTE: intentionally does *not* check `target == builder.build` to avoid having to add the same check in `test::Crate`.
let (libdir, hostdir) = if self.force_recompile && builder.download_rustc() {
let (libdir, hostdir) = if !self.force_recompile && builder.download_rustc() {
// NOTE: copies part of `sysroot_libdir` to avoid having to add a new `force_recompile` argument there too
let lib = builder.sysroot_libdir_relative(self.compiler);
let sysroot = builder.ensure(crate::core::build_steps::compile::Sysroot {
@ -753,23 +751,16 @@ impl Step for StdLink {
(libdir, hostdir)
};
add_to_sysroot(
builder,
&libdir,
&hostdir,
&build_stamp::libstd_stamp(builder, compiler, target),
);
let is_downloaded_beta_stage0 = builder
.build
.config
.initial_rustc
.starts_with(builder.out.join(compiler.host).join("stage0/bin"));
// Special case for stage0, to make `rustup toolchain link` and `x dist --stage 0`
// work for stage0-sysroot. We only do this if the stage0 compiler comes from beta,
// and is not set to a custom path.
if compiler.stage == 0
&& builder
.build
.config
.initial_rustc
.starts_with(builder.out.join(compiler.host).join("stage0/bin"))
{
if compiler.stage == 0 && is_downloaded_beta_stage0 {
// Copy bin files from stage0/bin to stage0-sysroot/bin
let sysroot = builder.out.join(compiler.host).join("stage0-sysroot");
@ -779,21 +770,9 @@ impl Step for StdLink {
t!(fs::create_dir_all(&sysroot_bin_dir));
builder.cp_link_r(&stage0_bin_dir, &sysroot_bin_dir);
// Copy all files from stage0/lib to stage0-sysroot/lib
let stage0_lib_dir = builder.out.join(host).join("stage0/lib");
if let Ok(files) = fs::read_dir(stage0_lib_dir) {
for file in files {
let file = t!(file);
let path = file.path();
if path.is_file() {
builder.copy_link(
&path,
&sysroot.join("lib").join(path.file_name().unwrap()),
FileType::Regular,
);
}
}
}
t!(fs::create_dir_all(sysroot.join("lib")));
builder.cp_link_r(&stage0_lib_dir, &sysroot.join("lib"));
// Copy codegen-backends from stage0
let sysroot_codegen_backends = builder.sysroot_codegen_backends(compiler);
@ -807,6 +786,30 @@ impl Step for StdLink {
if stage0_codegen_backends.exists() {
builder.cp_link_r(&stage0_codegen_backends, &sysroot_codegen_backends);
}
} else if compiler.stage == 0 {
let sysroot = builder.out.join(compiler.host.triple).join("stage0-sysroot");
if builder.local_rebuild {
// On local rebuilds this path might be a symlink to the project root,
// which can be read-only (e.g., on CI). So remove it before copying
// the stage0 lib.
let _ = fs::remove_dir_all(sysroot.join("lib/rustlib/src/rust"));
}
builder.cp_link_r(&builder.initial_sysroot.join("lib"), &sysroot.join("lib"));
} else {
if builder.download_rustc() {
// Ensure there are no CI-rustc std artifacts.
let _ = fs::remove_dir_all(&libdir);
let _ = fs::remove_dir_all(&hostdir);
}
add_to_sysroot(
builder,
&libdir,
&hostdir,
&build_stamp::libstd_stamp(builder, compiler, target),
);
}
}
}
@ -1029,7 +1032,7 @@ impl Step for Rustc {
let compiler = self.compiler;
let target = self.target;
// NOTE: the ABI of the beta compiler is different from the ABI of the downloaded compiler,
// NOTE: the ABI of the stage0 compiler is different from the ABI of the downloaded compiler,
// so its artifacts can't be reused.
if builder.download_rustc() && compiler.stage != 0 {
trace!(stage = compiler.stage, "`download_rustc` requested");
@ -1388,12 +1391,13 @@ fn rustc_llvm_env(builder: &Builder<'_>, cargo: &mut Cargo, target: TargetSelect
// found. This is to avoid the linker errors about undefined references to
// `__llvm_profile_instrument_memop` when linking `rustc_driver`.
let mut llvm_linker_flags = String::new();
if builder.config.llvm_profile_generate && target.is_msvc() {
if let Some(ref clang_cl_path) = builder.config.llvm_clang_cl {
// Add clang's runtime library directory to the search path
let clang_rt_dir = get_clang_cl_resource_dir(builder, clang_cl_path);
llvm_linker_flags.push_str(&format!("-L{}", clang_rt_dir.display()));
}
if builder.config.llvm_profile_generate
&& target.is_msvc()
&& let Some(ref clang_cl_path) = builder.config.llvm_clang_cl
{
// Add clang's runtime library directory to the search path
let clang_rt_dir = get_clang_cl_resource_dir(builder, clang_cl_path);
llvm_linker_flags.push_str(&format!("-L{}", clang_rt_dir.display()));
}
// The config can also specify its own llvm linker flags.
@ -1785,9 +1789,9 @@ impl Step for Sysroot {
t!(fs::create_dir_all(&sysroot));
// In some cases(see https://github.com/rust-lang/rust/issues/109314), when the stage0
// compiler relies on more recent version of LLVM than the beta compiler, it may not
// compiler relies on more recent version of LLVM than the stage0 compiler, it may not
// be able to locate the correct LLVM in the sysroot. This situation typically occurs
// when we upgrade LLVM version while the beta compiler continues to use an older version.
// when we upgrade LLVM version while the stage0 compiler continues to use an older version.
//
// Make sure to add the correct version of LLVM into the stage0 sysroot.
if compiler.stage == 0 {

View file

@ -2276,11 +2276,12 @@ impl Step for LlvmTools {
let target = self.target;
// Run only if a custom llvm-config is not used
if let Some(config) = builder.config.target_config.get(&target) {
if !builder.config.llvm_from_ci && config.llvm_config.is_some() {
builder.info(&format!("Skipping LlvmTools ({target}): external LLVM"));
return None;
}
if let Some(config) = builder.config.target_config.get(&target)
&& !builder.config.llvm_from_ci
&& config.llvm_config.is_some()
{
builder.info(&format!("Skipping LlvmTools ({target}): external LLVM"));
return None;
}
if !builder.config.dry_run() {
@ -2398,11 +2399,11 @@ impl Step for RustDev {
let target = self.target;
/* run only if llvm-config isn't used */
if let Some(config) = builder.config.target_config.get(&target) {
if let Some(ref _s) = config.llvm_config {
builder.info(&format!("Skipping RustDev ({target}): external LLVM"));
return None;
}
if let Some(config) = builder.config.target_config.get(&target)
&& let Some(ref _s) = config.llvm_config
{
builder.info(&format!("Skipping RustDev ({target}): external LLVM"));
return None;
}
if !builder.config.dry_run() {

View file

@ -318,10 +318,10 @@ pub fn format(build: &Builder<'_>, check: bool, all: bool, paths: &[PathBuf]) {
// `into_path` produces an absolute path. Try to strip `cwd` to get a shorter
// relative path.
let mut path = entry.clone().into_path();
if let Ok(cwd) = cwd {
if let Ok(path2) = path.strip_prefix(cwd) {
path = path2.to_path_buf();
}
if let Ok(cwd) = cwd
&& let Ok(path2) = path.strip_prefix(cwd)
{
path = path2.to_path_buf();
}
path.display().to_string()
});

View file

@ -107,18 +107,18 @@ pub fn prebuilt_llvm_config(
// If we're using a custom LLVM bail out here, but we can only use a
// custom LLVM for the build triple.
if let Some(config) = builder.config.target_config.get(&target) {
if let Some(ref s) = config.llvm_config {
check_llvm_version(builder, s);
let llvm_config = s.to_path_buf();
let mut llvm_cmake_dir = llvm_config.clone();
llvm_cmake_dir.pop();
llvm_cmake_dir.pop();
llvm_cmake_dir.push("lib");
llvm_cmake_dir.push("cmake");
llvm_cmake_dir.push("llvm");
return LlvmBuildStatus::AlreadyBuilt(LlvmResult { llvm_config, llvm_cmake_dir });
}
if let Some(config) = builder.config.target_config.get(&target)
&& let Some(ref s) = config.llvm_config
{
check_llvm_version(builder, s);
let llvm_config = s.to_path_buf();
let mut llvm_cmake_dir = llvm_config.clone();
llvm_cmake_dir.pop();
llvm_cmake_dir.pop();
llvm_cmake_dir.push("lib");
llvm_cmake_dir.push("cmake");
llvm_cmake_dir.push("llvm");
return LlvmBuildStatus::AlreadyBuilt(LlvmResult { llvm_config, llvm_cmake_dir });
}
if handle_submodule_when_needed {
@ -468,10 +468,10 @@ impl Step for Llvm {
cfg.define("LLVM_ENABLE_RUNTIMES", enabled_llvm_runtimes.join(";"));
}
if let Some(num_linkers) = builder.config.llvm_link_jobs {
if num_linkers > 0 {
cfg.define("LLVM_PARALLEL_LINK_JOBS", num_linkers.to_string());
}
if let Some(num_linkers) = builder.config.llvm_link_jobs
&& num_linkers > 0
{
cfg.define("LLVM_PARALLEL_LINK_JOBS", num_linkers.to_string());
}
// https://llvm.org/docs/HowToCrossCompileLLVM.html
@ -597,10 +597,10 @@ fn check_llvm_version(builder: &Builder<'_>, llvm_config: &Path) {
let version = get_llvm_version(builder, llvm_config);
let mut parts = version.split('.').take(2).filter_map(|s| s.parse::<u32>().ok());
if let (Some(major), Some(_minor)) = (parts.next(), parts.next()) {
if major >= 19 {
return;
}
if let (Some(major), Some(_minor)) = (parts.next(), parts.next())
&& major >= 19
{
return;
}
panic!("\n\nbad LLVM version: {version}, need >=19\n\n")
}
@ -730,11 +730,9 @@ fn configure_cmake(
// If ccache is configured we inform the build a little differently how
// to invoke ccache while also invoking our compilers.
if use_compiler_launcher {
if let Some(ref ccache) = builder.config.ccache {
cfg.define("CMAKE_C_COMPILER_LAUNCHER", ccache)
.define("CMAKE_CXX_COMPILER_LAUNCHER", ccache);
}
if use_compiler_launcher && let Some(ref ccache) = builder.config.ccache {
cfg.define("CMAKE_C_COMPILER_LAUNCHER", ccache)
.define("CMAKE_CXX_COMPILER_LAUNCHER", ccache);
}
cfg.define("CMAKE_C_COMPILER", sanitize_cc(&cc))
.define("CMAKE_CXX_COMPILER", sanitize_cc(&cxx))
@ -792,20 +790,20 @@ fn configure_cmake(
cxxflags.push(format!(" --target={target}"));
}
cfg.define("CMAKE_CXX_FLAGS", cxxflags);
if let Some(ar) = builder.ar(target) {
if ar.is_absolute() {
// LLVM build breaks if `CMAKE_AR` is a relative path, for some reason it
// tries to resolve this path in the LLVM build directory.
cfg.define("CMAKE_AR", sanitize_cc(&ar));
}
if let Some(ar) = builder.ar(target)
&& ar.is_absolute()
{
// LLVM build breaks if `CMAKE_AR` is a relative path, for some reason it
// tries to resolve this path in the LLVM build directory.
cfg.define("CMAKE_AR", sanitize_cc(&ar));
}
if let Some(ranlib) = builder.ranlib(target) {
if ranlib.is_absolute() {
// LLVM build breaks if `CMAKE_RANLIB` is a relative path, for some reason it
// tries to resolve this path in the LLVM build directory.
cfg.define("CMAKE_RANLIB", sanitize_cc(&ranlib));
}
if let Some(ranlib) = builder.ranlib(target)
&& ranlib.is_absolute()
{
// LLVM build breaks if `CMAKE_RANLIB` is a relative path, for some reason it
// tries to resolve this path in the LLVM build directory.
cfg.define("CMAKE_RANLIB", sanitize_cc(&ranlib));
}
if let Some(ref flags) = builder.config.llvm_ldflags {
@ -1038,13 +1036,14 @@ impl Step for Lld {
// when doing PGO on CI, cmake or clang-cl don't automatically link clang's
// profiler runtime in. In that case, we need to manually ask cmake to do it, to avoid
// linking errors, much like LLVM's cmake setup does in that situation.
if builder.config.llvm_profile_generate && target.is_msvc() {
if let Some(clang_cl_path) = builder.config.llvm_clang_cl.as_ref() {
// Find clang's runtime library directory and push that as a search path to the
// cmake linker flags.
let clang_rt_dir = get_clang_cl_resource_dir(builder, clang_cl_path);
ldflags.push_all(format!("/libpath:{}", clang_rt_dir.display()));
}
if builder.config.llvm_profile_generate
&& target.is_msvc()
&& let Some(clang_cl_path) = builder.config.llvm_clang_cl.as_ref()
{
// Find clang's runtime library directory and push that as a search path to the
// cmake linker flags.
let clang_rt_dir = get_clang_cl_resource_dir(builder, clang_cl_path);
ldflags.push_all(format!("/libpath:{}", clang_rt_dir.display()));
}
// LLD is built as an LLVM tool, but is distributed outside of the `llvm-tools` component,

View file

@ -154,10 +154,10 @@ Consider setting `rust.debuginfo-level = 1` in `bootstrap.toml`."#);
let compiler = builder.compiler(builder.top_stage, builder.config.build);
builder.ensure(Std::new(compiler, builder.config.build));
if let Some(opts) = args.cmd.shared_opts() {
if opts.profiles.contains(&Profile::Doc) {
builder.ensure(Rustdoc { compiler });
}
if let Some(opts) = args.cmd.shared_opts()
&& opts.profiles.contains(&Profile::Doc)
{
builder.ensure(Rustdoc { compiler });
}
let sysroot = builder.ensure(Sysroot::new(compiler));

View file

@ -5,7 +5,6 @@
use std::path::PathBuf;
use crate::Mode;
use crate::core::build_steps::dist::distdir;
use crate::core::build_steps::test;
use crate::core::build_steps::tool::{self, SourceType, Tool};
@ -14,6 +13,7 @@ use crate::core::builder::{Builder, Kind, RunConfig, ShouldRun, Step};
use crate::core::config::TargetSelection;
use crate::core::config::flags::get_completion;
use crate::utils::exec::command;
use crate::{Mode, t};
#[derive(Debug, PartialOrd, Ord, Clone, Hash, PartialEq, Eq)]
pub struct BuildManifest;
@ -253,6 +253,7 @@ impl Step for GenerateCopyright {
cmd.env("SRC_DIR", &builder.src);
cmd.env("VENDOR_DIR", &vendored_sources);
cmd.env("CARGO", &builder.initial_cargo);
cmd.env("CARGO_HOME", t!(home::cargo_home()));
// it is important that generate-copyright runs from the root of the
// source tree, because it uses relative paths
cmd.current_dir(&builder.src);

View file

@ -241,10 +241,10 @@ impl Step for Link {
if run.builder.config.dry_run() {
return;
}
if let [cmd] = &run.paths[..] {
if cmd.assert_single_path().path.as_path().as_os_str() == "link" {
run.builder.ensure(Link);
}
if let [cmd] = &run.paths[..]
&& cmd.assert_single_path().path.as_path().as_os_str() == "link"
{
run.builder.ensure(Link);
}
}
fn run(self, builder: &Builder<'_>) -> Self::Output {
@ -457,10 +457,10 @@ impl Step for Hook {
}
fn make_run(run: RunConfig<'_>) {
if let [cmd] = &run.paths[..] {
if cmd.assert_single_path().path.as_path().as_os_str() == "hook" {
run.builder.ensure(Hook);
}
if let [cmd] = &run.paths[..]
&& cmd.assert_single_path().path.as_path().as_os_str() == "hook"
{
run.builder.ensure(Hook);
}
}
@ -672,10 +672,10 @@ impl Step for Editor {
if run.builder.config.dry_run() {
return;
}
if let [cmd] = &run.paths[..] {
if cmd.assert_single_path().path.as_path().as_os_str() == "editor" {
run.builder.ensure(Editor);
}
if let [cmd] = &run.paths[..]
&& cmd.assert_single_path().path.as_path().as_os_str() == "editor"
{
run.builder.ensure(Editor);
}
}

View file

@ -1576,7 +1576,7 @@ impl Step for Compiletest {
if builder.top_stage == 0 && env::var("COMPILETEST_FORCE_STAGE0").is_err() {
eprintln!("\
ERROR: `--stage 0` runs compiletest on the beta compiler, not your local changes, and will almost always cause tests to fail
ERROR: `--stage 0` runs compiletest on the stage0 (precompiled) compiler, not your local changes, and will almost always cause tests to fail
HELP: to test the compiler, use `--stage 1` instead
HELP: to test the standard library, use `--stage 0 library/std` instead
NOTE: if you're sure you want to do this, please open an issue as to why. In the meantime, you can override this with `COMPILETEST_FORCE_STAGE0=1`."
@ -1604,9 +1604,9 @@ NOTE: if you're sure you want to do this, please open an issue as to why. In the
// NOTE: Only stage 1 is special cased because we need the rustc_private artifacts to match the
// running compiler in stage 2 when plugins run.
let (stage, stage_id) = if suite == "ui-fulldeps" && compiler.stage == 1 {
// At stage 0 (stage - 1) we are using the beta compiler. Using `self.target` can lead
// finding an incorrect compiler path on cross-targets, as the stage 0 beta compiler is
// always equal to `build.build` in the configuration.
// At stage 0 (stage - 1) we are using the stage0 compiler. Using `self.target` can lead
// finding an incorrect compiler path on cross-targets, as the stage 0 is always equal to
// `build.build` in the configuration.
let build = builder.build.build;
compiler = builder.compiler(compiler.stage - 1, build);
let test_stage = compiler.stage + 1;
@ -1692,7 +1692,7 @@ NOTE: if you're sure you want to do this, please open an issue as to why. In the
}
if mode == "rustdoc-json" {
// Use the beta compiler for jsondocck
// Use the stage0 compiler for jsondocck
let json_compiler = compiler.with_stage(0);
cmd.arg("--jsondocck-path")
.arg(builder.ensure(tool::JsonDocCk { compiler: json_compiler, target }).tool_path);
@ -2417,10 +2417,10 @@ impl Step for ErrorIndex {
}
fn markdown_test(builder: &Builder<'_>, compiler: Compiler, markdown: &Path) -> bool {
if let Ok(contents) = fs::read_to_string(markdown) {
if !contents.contains("```") {
return true;
}
if let Ok(contents) = fs::read_to_string(markdown)
&& !contents.contains("```")
{
return true;
}
builder.verbose(|| println!("doc tests for: {}", markdown.display()));

View file

@ -329,9 +329,9 @@ pub(crate) fn get_tool_rustc_compiler(
return target_compiler;
}
if builder.download_rustc() && target_compiler.stage > 0 {
// We already have the stage N compiler, we don't need to cut the stage.
return builder.compiler(target_compiler.stage, builder.config.build);
if builder.download_rustc() && target_compiler.stage == 1 {
// We shouldn't drop to stage0 compiler when using CI rustc.
return builder.compiler(1, builder.config.build);
}
// Similar to `compile::Assemble`, build with the previous stage's compiler. Otherwise
@ -1197,9 +1197,9 @@ fn run_tool_build_step(
artifact_kind: ToolArtifactKind::Binary,
});
// FIXME: This should just be an if-let-chain, but those are unstable.
if let Some(add_bins_to_sysroot) =
add_bins_to_sysroot.filter(|bins| !bins.is_empty() && target_compiler.stage > 0)
if let Some(add_bins_to_sysroot) = add_bins_to_sysroot
&& !add_bins_to_sysroot.is_empty()
&& target_compiler.stage > 0
{
let bindir = builder.sysroot(target_compiler).join("bin");
t!(fs::create_dir_all(&bindir));

View file

@ -988,15 +988,15 @@ impl Builder<'_> {
// requirement, but the `-L` library path is not propagated across
// separate Cargo projects. We can add LLVM's library path to the
// rustc args as a workaround.
if mode == Mode::ToolRustc || mode == Mode::Codegen {
if let Some(llvm_config) = self.llvm_config(target) {
let llvm_libdir =
command(llvm_config).arg("--libdir").run_capture_stdout(self).stdout();
if target.is_msvc() {
rustflags.arg(&format!("-Clink-arg=-LIBPATH:{llvm_libdir}"));
} else {
rustflags.arg(&format!("-Clink-arg=-L{llvm_libdir}"));
}
if (mode == Mode::ToolRustc || mode == Mode::Codegen)
&& let Some(llvm_config) = self.llvm_config(target)
{
let llvm_libdir =
command(llvm_config).arg("--libdir").run_capture_stdout(self).stdout();
if target.is_msvc() {
rustflags.arg(&format!("-Clink-arg=-LIBPATH:{llvm_libdir}"));
} else {
rustflags.arg(&format!("-Clink-arg=-L{llvm_libdir}"));
}
}
@ -1004,7 +1004,12 @@ impl Builder<'_> {
// efficient initial-exec TLS model. This doesn't work with `dlopen`,
// so we can't use it by default in general, but we can use it for tools
// and our own internal libraries.
if !mode.must_support_dlopen() && !target.triple.starts_with("powerpc-") {
//
// Cygwin only supports emutls.
if !mode.must_support_dlopen()
&& !target.triple.starts_with("powerpc-")
&& !target.triple.contains("cygwin")
{
cargo.env("RUSTC_TLS_MODEL_INITIAL_EXEC", "1");
}
@ -1226,12 +1231,11 @@ impl Builder<'_> {
_ => None,
};
if let Some(limit) = limit {
if stage == 0
|| self.config.default_codegen_backend(target).unwrap_or_default() == "llvm"
{
rustflags.arg(&format!("-Cllvm-args=-import-instr-limit={limit}"));
}
if let Some(limit) = limit
&& (stage == 0
|| self.config.default_codegen_backend(target).unwrap_or_default() == "llvm")
{
rustflags.arg(&format!("-Cllvm-args=-import-instr-limit={limit}"));
}
}

View file

@ -237,7 +237,7 @@ fn alias_and_path_for_library() {
);
assert_eq!(
first(cache.all::<doc::Std>()),
&[doc_std!(TEST_TRIPLE_1 => TEST_TRIPLE_1, stage = 0)]
&[doc_std!(TEST_TRIPLE_1 => TEST_TRIPLE_1, stage = 1)]
);
}
@ -254,19 +254,6 @@ fn ci_rustc_if_unchanged_invalidate_on_compiler_changes() {
});
}
#[test]
fn ci_rustc_if_unchanged_invalidate_on_library_changes_in_ci() {
git_test(|ctx| {
prepare_rustc_checkout(ctx);
ctx.create_upstream_merge(&["compiler/bar"]);
// This change should invalidate download-ci-rustc
ctx.create_nonupstream_merge(&["library/foo"]);
let config = parse_config_download_rustc_at(ctx.get_path(), "if-unchanged", true);
assert_eq!(config.download_rustc_commit, None);
});
}
#[test]
fn ci_rustc_if_unchanged_do_not_invalidate_on_library_changes_outside_ci() {
git_test(|ctx| {
@ -433,14 +420,14 @@ mod defaults {
assert_eq!(first(cache.all::<doc::ErrorIndex>()), &[doc::ErrorIndex { target: a },]);
assert_eq!(
first(cache.all::<tool::ErrorIndex>()),
&[tool::ErrorIndex { compiler: Compiler::new(0, a) }]
&[tool::ErrorIndex { compiler: Compiler::new(1, a) }]
);
// docs should be built with the beta compiler, not with the stage0 artifacts.
// docs should be built with the stage0 compiler, not with the stage0 artifacts.
// recall that rustdoc is off-by-one: `stage` is the compiler rustdoc is _linked_ to,
// not the one it was built by.
assert_eq!(
first(cache.all::<tool::Rustdoc>()),
&[tool::Rustdoc { compiler: Compiler::new(0, a) },]
&[tool::Rustdoc { compiler: Compiler::new(1, a) },]
);
}
}

View file

@ -45,6 +45,7 @@ use crate::utils::helpers::{self, exe, output, t};
/// final output/compiler, which can be significantly affected by changes made to the bootstrap sources.
#[rustfmt::skip] // We don't want rustfmt to oneline this list
pub(crate) const RUSTC_IF_UNCHANGED_ALLOWED_PATHS: &[&str] = &[
":!library",
":!src/tools",
":!src/librustdoc",
":!src/rustdoc-json-types",
@ -422,6 +423,11 @@ pub struct Config {
/// Cache for determining path modifications
pub path_modification_cache: Arc<Mutex<HashMap<Vec<&'static str>, PathFreshness>>>,
/// Skip checking the standard library if `rust.download-rustc` isn't available.
/// This is mostly for RA as building the stage1 compiler to check the library tree
/// on each code change might be too much for some computers.
pub skip_std_check_if_no_download_rustc: bool,
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
@ -1506,6 +1512,7 @@ impl Config {
config.enable_bolt_settings = flags.enable_bolt_settings;
config.bypass_bootstrap_lock = flags.bypass_bootstrap_lock;
config.is_running_on_ci = flags.ci.unwrap_or(CiEnv::is_ci());
config.skip_std_check_if_no_download_rustc = flags.skip_std_check_if_no_download_rustc;
// Infer the rest of the configuration.
@ -1699,20 +1706,20 @@ impl Config {
};
// We want to be able to set string values without quotes,
// like in `configure.py`. Try adding quotes around the right hand side
if let Some((key, value)) = option.split_once('=') {
if !value.contains('"') {
match get_table(&format!(r#"{key}="{value}""#)) {
Ok(v) => {
override_toml.merge(
None,
&mut Default::default(),
v,
ReplaceOpt::ErrorOnDuplicate,
);
continue;
}
Err(e) => err = e,
if let Some((key, value)) = option.split_once('=')
&& !value.contains('"')
{
match get_table(&format!(r#"{key}="{value}""#)) {
Ok(v) => {
override_toml.merge(
None,
&mut Default::default(),
v,
ReplaceOpt::ErrorOnDuplicate,
);
continue;
}
Err(e) => err = e,
}
}
eprintln!("failed to parse override `{option}`: `{err}");
@ -2056,16 +2063,15 @@ impl Config {
|| (matches!(debug_toml, Some(true))
&& !matches!(rustc_debug_assertions_toml, Some(false)));
if debug_assertions_requested {
if let Some(ref opt) = download_rustc {
if opt.is_string_or_true() {
eprintln!(
"WARN: currently no CI rustc builds have rustc debug assertions \
if debug_assertions_requested
&& let Some(ref opt) = download_rustc
&& opt.is_string_or_true()
{
eprintln!(
"WARN: currently no CI rustc builds have rustc debug assertions \
enabled. Please either set `rust.debug-assertions` to `false` if you \
want to use download CI rustc or set `rust.download-rustc` to `false`."
);
}
}
);
}
config.download_rustc_commit = config.download_ci_rustc_commit(
@ -2176,19 +2182,17 @@ impl Config {
// We need to override `rust.channel` if it's manually specified when using the CI rustc.
// This is because if the compiler uses a different channel than the one specified in bootstrap.toml,
// tests may fail due to using a different channel than the one used by the compiler during tests.
if let Some(commit) = &config.download_rustc_commit {
if is_user_configured_rust_channel {
println!(
"WARNING: `rust.download-rustc` is enabled. The `rust.channel` option will be overridden by the CI rustc's channel."
);
if let Some(commit) = &config.download_rustc_commit
&& is_user_configured_rust_channel
{
println!(
"WARNING: `rust.download-rustc` is enabled. The `rust.channel` option will be overridden by the CI rustc's channel."
);
let channel = config
.read_file_by_commit(Path::new("src/ci/channel"), commit)
.trim()
.to_owned();
let channel =
config.read_file_by_commit(Path::new("src/ci/channel"), commit).trim().to_owned();
config.channel = channel;
}
config.channel = channel;
}
if let Some(llvm) = toml.llvm {
@ -2533,10 +2537,12 @@ impl Config {
|| bench_stage.is_some();
// See https://github.com/rust-lang/compiler-team/issues/326
config.stage = match config.cmd {
Subcommand::Check { .. } => flags.stage.or(check_stage).unwrap_or(0),
Subcommand::Check { .. } | Subcommand::Clippy { .. } | Subcommand::Fix => {
flags.stage.or(check_stage).unwrap_or(1)
}
// `download-rustc` only has a speed-up for stage2 builds. Default to stage2 unless explicitly overridden.
Subcommand::Doc { .. } => {
flags.stage.or(doc_stage).unwrap_or(if download_rustc { 2 } else { 0 })
flags.stage.or(doc_stage).unwrap_or(if download_rustc { 2 } else { 1 })
}
Subcommand::Build => {
flags.stage.or(build_stage).unwrap_or(if download_rustc { 2 } else { 1 })
@ -2551,8 +2557,6 @@ impl Config {
// These are all bootstrap tools, which don't depend on the compiler.
// The stage we pass shouldn't matter, but use 0 just in case.
Subcommand::Clean { .. }
| Subcommand::Clippy { .. }
| Subcommand::Fix
| Subcommand::Run { .. }
| Subcommand::Setup { .. }
| Subcommand::Format { .. }
@ -2698,10 +2702,10 @@ impl Config {
let bindir = &self.bindir;
if bindir.is_absolute() {
// Try to make it relative to the prefix.
if let Some(prefix) = &self.prefix {
if let Ok(stripped) = bindir.strip_prefix(prefix) {
return stripped;
}
if let Some(prefix) = &self.prefix
&& let Ok(stripped) = bindir.strip_prefix(prefix)
{
return stripped;
}
}
bindir
@ -3150,24 +3154,10 @@ impl Config {
}
};
// RUSTC_IF_UNCHANGED_ALLOWED_PATHS
let mut allowed_paths = RUSTC_IF_UNCHANGED_ALLOWED_PATHS.to_vec();
// In CI, disable ci-rustc if there are changes in the library tree. But for non-CI, allow
// these changes to speed up the build process for library developers. This provides consistent
// functionality for library developers between `download-rustc=true` and `download-rustc="if-unchanged"`
// options.
//
// If you update "library" logic here, update `builder::tests::ci_rustc_if_unchanged_logic` test
// logic accordingly.
if !self.is_running_on_ci {
allowed_paths.push(":!library");
}
let commit = if self.rust_info.is_managed_git_subrepository() {
// Look for a version to compare to based on the current commit.
// Only commits merged by bors will have CI artifacts.
let freshness = self.check_path_modifications(&allowed_paths);
let freshness = self.check_path_modifications(RUSTC_IF_UNCHANGED_ALLOWED_PATHS);
self.verbose(|| {
eprintln!("rustc freshness: {freshness:?}");
});
@ -3493,19 +3483,19 @@ fn check_incompatible_options_for_ci_rustc(
// We always build the in-tree compiler on cross targets, so we only care
// about the host target here.
let host_str = host.to_string();
if let Some(current_cfg) = current_config_toml.target.as_ref().and_then(|c| c.get(&host_str)) {
if current_cfg.profiler.is_some() {
let ci_target_toml = ci_config_toml.target.as_ref().and_then(|c| c.get(&host_str));
let ci_cfg = ci_target_toml.ok_or(format!(
"Target specific config for '{host_str}' is not present for CI-rustc"
))?;
if let Some(current_cfg) = current_config_toml.target.as_ref().and_then(|c| c.get(&host_str))
&& current_cfg.profiler.is_some()
{
let ci_target_toml = ci_config_toml.target.as_ref().and_then(|c| c.get(&host_str));
let ci_cfg = ci_target_toml.ok_or(format!(
"Target specific config for '{host_str}' is not present for CI-rustc"
))?;
let profiler = &ci_cfg.profiler;
err!(current_cfg.profiler, profiler, "build");
let profiler = &ci_cfg.profiler;
err!(current_cfg.profiler, profiler, "build");
let optimized_compiler_builtins = &ci_cfg.optimized_compiler_builtins;
err!(current_cfg.optimized_compiler_builtins, optimized_compiler_builtins, "build");
}
let optimized_compiler_builtins = &ci_cfg.optimized_compiler_builtins;
err!(current_cfg.optimized_compiler_builtins, optimized_compiler_builtins, "build");
}
let (Some(current_rust_config), Some(ci_rust_config)) =

View file

@ -182,6 +182,11 @@ pub struct Flags {
/// Make bootstrap to behave as it's running on the CI environment or not.
#[arg(global = true, long, value_name = "bool")]
pub ci: Option<bool>,
/// Skip checking the standard library if `rust.download-rustc` isn't available.
/// This is mostly for RA as building the stage1 compiler to check the library tree
/// on each code change might be too much for some computers.
#[arg(global = true, long)]
pub skip_std_check_if_no_download_rustc: bool,
}
impl Flags {

View file

@ -666,7 +666,7 @@ impl Config {
}
};
// For the beta compiler, put special effort into ensuring the checksums are valid.
// For the stage0 compiler, put special effort into ensuring the checksums are valid.
let checksum = if should_verify {
let error = format!(
"src/stage0 doesn't contain a checksum for {url}. \
@ -709,10 +709,10 @@ download-rustc = false
";
}
self.download_file(&format!("{base_url}/{url}"), &tarball, help_on_error);
if let Some(sha256) = checksum {
if !self.verify(&tarball, sha256) {
panic!("failed to verify {}", tarball.display());
}
if let Some(sha256) = checksum
&& !self.verify(&tarball, sha256)
{
panic!("failed to verify {}", tarball.display());
}
self.unpack(&tarball, &bin_root, prefix);

View file

@ -1451,23 +1451,23 @@ Executed at: {executed_at}"#,
// Look for Wasmtime, and for its default options be sure to disable
// its caching system since we're executing quite a lot of tests and
// ideally shouldn't pollute the cache too much.
if let Some(path) = finder.maybe_have("wasmtime") {
if let Ok(mut path) = path.into_os_string().into_string() {
path.push_str(" run -C cache=n --dir .");
// Make sure that tests have access to RUSTC_BOOTSTRAP. This (for example) is
// required for libtest to work on beta/stable channels.
//
// NB: with Wasmtime 20 this can change to `-S inherit-env` to
// inherit the entire environment rather than just this single
// environment variable.
path.push_str(" --env RUSTC_BOOTSTRAP");
if let Some(path) = finder.maybe_have("wasmtime")
&& let Ok(mut path) = path.into_os_string().into_string()
{
path.push_str(" run -C cache=n --dir .");
// Make sure that tests have access to RUSTC_BOOTSTRAP. This (for example) is
// required for libtest to work on beta/stable channels.
//
// NB: with Wasmtime 20 this can change to `-S inherit-env` to
// inherit the entire environment rather than just this single
// environment variable.
path.push_str(" --env RUSTC_BOOTSTRAP");
if target.contains("wasip2") {
path.push_str(" --wasi inherit-network --wasi allow-ip-name-lookup");
}
return Some(path);
if target.contains("wasip2") {
path.push_str(" --wasi inherit-network --wasi allow-ip-name-lookup");
}
return Some(path);
}
None
@ -1637,12 +1637,12 @@ Executed at: {executed_at}"#,
/// sha, version, etc.
fn rust_version(&self) -> String {
let mut version = self.rust_info().version(self, &self.version);
if let Some(ref s) = self.config.description {
if !s.is_empty() {
version.push_str(" (");
version.push_str(s);
version.push(')');
}
if let Some(ref s) = self.config.description
&& !s.is_empty()
{
version.push_str(" (");
version.push_str(s);
version.push(')');
}
version
}
@ -1760,14 +1760,14 @@ Executed at: {executed_at}"#,
pub fn copy_link(&self, src: &Path, dst: &Path, file_type: FileType) {
self.copy_link_internal(src, dst, false);
if file_type.could_have_split_debuginfo() {
if let Some(dbg_file) = split_debuginfo(src) {
self.copy_link_internal(
&dbg_file,
&dst.with_extension(dbg_file.extension().unwrap()),
false,
);
}
if file_type.could_have_split_debuginfo()
&& let Some(dbg_file) = split_debuginfo(src)
{
self.copy_link_internal(
&dbg_file,
&dst.with_extension(dbg_file.extension().unwrap()),
false,
);
}
}
@ -1779,13 +1779,14 @@ Executed at: {executed_at}"#,
if src == dst {
return;
}
if let Err(e) = fs::remove_file(dst) {
if cfg!(windows) && e.kind() != io::ErrorKind::NotFound {
// workaround for https://github.com/rust-lang/rust/issues/127126
// if removing the file fails, attempt to rename it instead.
let now = t!(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH));
let _ = fs::rename(dst, format!("{}-{}", dst.display(), now.as_nanos()));
}
if let Err(e) = fs::remove_file(dst)
&& cfg!(windows)
&& e.kind() != io::ErrorKind::NotFound
{
// workaround for https://github.com/rust-lang/rust/issues/127126
// if removing the file fails, attempt to rename it instead.
let now = t!(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH));
let _ = fs::rename(dst, format!("{}-{}", dst.display(), now.as_nanos()));
}
let metadata = t!(src.symlink_metadata(), format!("src = {}", src.display()));
let mut src = src.to_path_buf();
@ -1894,10 +1895,10 @@ Executed at: {executed_at}"#,
chmod(&dst, file_type.perms());
// If this file can have debuginfo, look for split debuginfo and install it too.
if file_type.could_have_split_debuginfo() {
if let Some(dbg_file) = split_debuginfo(src) {
self.install(&dbg_file, dstdir, FileType::Regular);
}
if file_type.could_have_split_debuginfo()
&& let Some(dbg_file) = split_debuginfo(src)
{
self.install(&dbg_file, dstdir, FileType::Regular);
}
}

View file

@ -46,10 +46,10 @@ pub fn find_recent_config_change_ids(current_id: usize) -> &'static [ChangeInfo]
// an empty list (it may be due to switching from a recent branch to an
// older one); otherwise, return the full list (assuming the user provided
// the incorrect change-id by accident).
if let Some(config) = CONFIG_CHANGE_HISTORY.iter().max_by_key(|config| config.change_id) {
if current_id > config.change_id {
return &[];
}
if let Some(config) = CONFIG_CHANGE_HISTORY.iter().max_by_key(|config| config.change_id)
&& current_id > config.change_id
{
return &[];
}
CONFIG_CHANGE_HISTORY
@ -411,4 +411,14 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[
severity: ChangeSeverity::Info,
summary: "`./x run` now supports running in-tree `rustfmt`, e.g., `./x run rustfmt -- --check /path/to/file.rs`.",
},
ChangeInfo {
change_id: 119899,
severity: ChangeSeverity::Warning,
summary: "Stage0 library no longer matches the in-tree library, which means stage1 compiler now uses the beta library.",
},
ChangeInfo {
change_id: 141970,
severity: ChangeSeverity::Info,
summary: "Added new bootstrap flag `--skip-std-check-if-no-download-rustc` that skips std checks when download-rustc is unavailable. Mainly intended for developers to reduce RA overhead.",
},
];

View file

@ -13,7 +13,7 @@ use crate::utils::load_env_var;
#[derive(serde::Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub struct Job {
/// Name of the job, e.g. mingw-check
/// Name of the job, e.g. mingw-check-1
pub name: String,
/// GitHub runner on which the job should be executed
pub os: String,

View file

@ -40,7 +40,7 @@ try-job: dist-i686-msvc"#,
fn pr_jobs() {
let stdout = get_matrix("pull_request", "commit", "refs/heads/pr/1234");
insta::assert_snapshot!(stdout, @r#"
jobs=[{"name":"mingw-check","full_name":"PR - mingw-check","os":"ubuntu-24.04","env":{"PR_CI_JOB":1},"free_disk":true},{"name":"mingw-check-tidy","full_name":"PR - mingw-check-tidy","os":"ubuntu-24.04","env":{"PR_CI_JOB":1},"continue_on_error":true,"free_disk":true,"doc_url":"https://foo.bar"}]
jobs=[{"name":"mingw-check-1","full_name":"PR - mingw-check-1","os":"ubuntu-24.04","env":{"PR_CI_JOB":1},"free_disk":true},{"name":"mingw-check-2","full_name":"PR - mingw-check-2","os":"ubuntu-24.04","env":{"PR_CI_JOB":1},"free_disk":true},{"name":"mingw-check-tidy","full_name":"PR - mingw-check-tidy","os":"ubuntu-24.04","env":{"PR_CI_JOB":1},"continue_on_error":true,"free_disk":true,"doc_url":"https://foo.bar"}]
run_type=pr
"#);
}
@ -51,6 +51,8 @@ fn get_matrix(event_name: &str, commit_msg: &str, branch_ref: &str) -> String {
.env("GITHUB_EVENT_NAME", event_name)
.env("COMMIT_MESSAGE", commit_msg)
.env("GITHUB_REF", branch_ref)
.env("GITHUB_RUN_ID", "123")
.env("GITHUB_RUN_ATTEMPT", "1")
.stdout(Stdio::piped())
.output()
.expect("Failed to execute command");

View file

@ -64,7 +64,9 @@ envs:
# These jobs automatically inherit envs.pr, to avoid repeating
# it in each job definition.
pr:
- name: mingw-check
- name: mingw-check-1
<<: *job-linux-4c
- name: mingw-check-2
<<: *job-linux-4c
- name: mingw-check-tidy
continue_on_error: true

View file

@ -34,31 +34,23 @@ RUN npm install es-check@6.1.1 eslint@8.6.0 typescript@5.7.3 -g
COPY scripts/sccache.sh /scripts/
RUN sh /scripts/sccache.sh
COPY host-x86_64/mingw-check/reuse-requirements.txt /tmp/
COPY host-x86_64/mingw-check-1/reuse-requirements.txt /tmp/
RUN pip3 install --no-deps --no-cache-dir --require-hashes -r /tmp/reuse-requirements.txt
COPY host-x86_64/mingw-check/check-default-config-profiles.sh /scripts/
COPY host-x86_64/mingw-check/validate-toolstate.sh /scripts/
COPY host-x86_64/mingw-check/validate-error-codes.sh /scripts/
COPY host-x86_64/mingw-check-1/check-default-config-profiles.sh /scripts/
COPY host-x86_64/mingw-check-1/validate-toolstate.sh /scripts/
COPY host-x86_64/mingw-check-1/validate-error-codes.sh /scripts/
# Check library crates on all tier 1 targets.
# We disable optimized compiler built-ins because that requires a C toolchain for the target.
# We also skip the x86_64-unknown-linux-gnu target as it is well-tested by other jobs.
ENV SCRIPT \
python3 ../x.py check --stage 0 --set build.optimized-compiler-builtins=false core alloc std --target=aarch64-unknown-linux-gnu,i686-pc-windows-msvc,i686-unknown-linux-gnu,x86_64-apple-darwin,x86_64-pc-windows-gnu,x86_64-pc-windows-msvc && \
/scripts/check-default-config-profiles.sh && \
python3 ../x.py check compiletest --set build.compiletest-use-stage0-libtest=true && \
python3 ../x.py check --target=x86_64-pc-windows-gnu --host=x86_64-pc-windows-gnu && \
python3 ../x.py clippy ci && \
python3 ../x.py build --stage 0 src/tools/build-manifest && \
python3 ../x.py test --stage 0 src/tools/compiletest && \
python3 ../x.py test --stage 0 core alloc std test proc_macro && \
# Build both public and internal documentation.
RUSTDOCFLAGS=\"--document-private-items --document-hidden-items\" python3 ../x.py doc --stage 0 library && \
mkdir -p /checkout/obj/staging/doc && \
cp -r build/x86_64-unknown-linux-gnu/doc /checkout/obj/staging && \
RUSTDOCFLAGS=\"--document-private-items --document-hidden-items\" python3 ../x.py doc --stage 0 compiler && \
RUSTDOCFLAGS=\"--document-private-items --document-hidden-items\" python3 ../x.py doc --stage 0 library/test && \
python3 ../x.py check compiletest --set build.compiletest-use-stage0-libtest=true && \
python3 ../x.py check --stage 1 --target=i686-pc-windows-gnu --host=i686-pc-windows-gnu && \
python3 ../x.py check --stage 1 --set build.optimized-compiler-builtins=false core alloc std --target=aarch64-unknown-linux-gnu,i686-pc-windows-msvc,i686-unknown-linux-gnu,x86_64-apple-darwin,x86_64-pc-windows-gnu,x86_64-pc-windows-msvc && \
/scripts/validate-toolstate.sh && \
/scripts/validate-error-codes.sh && \
reuse --include-submodules lint && \

View file

@ -0,0 +1,37 @@
FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
g++ \
make \
ninja-build \
file \
curl \
ca-certificates \
python3 \
python3-pip \
python3-pkg-resources \
git \
cmake \
sudo \
gdb \
xz-utils \
libssl-dev \
pkg-config \
mingw-w64 \
&& rm -rf /var/lib/apt/lists/*
ENV RUST_CONFIGURE_ARGS="--set rust.validate-mir-opts=3"
COPY scripts/sccache.sh /scripts/
RUN sh /scripts/sccache.sh
ENV SCRIPT \
python3 ../x.py clippy ci && \
python3 ../x.py test --stage 1 core alloc std test proc_macro && \
# Build both public and internal documentation.
RUSTDOCFLAGS=\"--document-private-items --document-hidden-items\" python3 ../x.py doc --stage 0 compiler && \
RUSTDOCFLAGS=\"--document-private-items --document-hidden-items\" python3 ../x.py doc --stage 1 library && \
mkdir -p /checkout/obj/staging/doc && \
cp -r build/x86_64-unknown-linux-gnu/doc /checkout/obj/staging && \
RUSTDOCFLAGS=\"--document-private-items --document-hidden-items\" python3 ../x.py doc --stage 1 library/test

View file

@ -34,12 +34,12 @@ COPY host-x86_64/mingw-check-tidy/eslint.version /tmp/
COPY scripts/sccache.sh /scripts/
RUN sh /scripts/sccache.sh
COPY host-x86_64/mingw-check/reuse-requirements.txt /tmp/
COPY host-x86_64/mingw-check-1/reuse-requirements.txt /tmp/
RUN pip3 install --no-deps --no-cache-dir --require-hashes -r /tmp/reuse-requirements.txt \
&& pip3 install virtualenv
COPY host-x86_64/mingw-check/validate-toolstate.sh /scripts/
COPY host-x86_64/mingw-check/validate-error-codes.sh /scripts/
COPY host-x86_64/mingw-check-1/validate-toolstate.sh /scripts/
COPY host-x86_64/mingw-check-1/validate-error-codes.sh /scripts/
# NOTE: intentionally uses python2 for x.py so we can test it still works.
# validate-toolstate only runs in our CI, so it's ok for it to only support python3.

View file

@ -29,5 +29,5 @@ RUN echo "optimize = false" >> /config/nopt-std-config.toml
ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu \
--disable-optimize-tests \
--set rust.test-compare-mode
ENV SCRIPT python3 ../x.py test --stage 0 --config /config/nopt-std-config.toml library/std \
ENV SCRIPT python3 ../x.py test --stage 1 --config /config/nopt-std-config.toml library/std \
&& python3 ../x.py --stage 2 test

View file

@ -111,7 +111,9 @@ envs:
# These jobs automatically inherit envs.pr, to avoid repeating
# it in each job definition.
pr:
- name: mingw-check
- name: mingw-check-1
<<: *job-linux-4c
- name: mingw-check-2
<<: *job-linux-4c
- name: mingw-check-tidy
continue_on_error: true
@ -125,7 +127,7 @@ pr:
env:
IMAGE: aarch64-gnu-llvm-19
DOCKER_SCRIPT: stage_2_test_set1.sh
<<: *job-aarch64-linux-8c
<<: *job-aarch64-linux
- name: aarch64-gnu-llvm-19-2
env:
IMAGE: aarch64-gnu-llvm-19
@ -281,11 +283,14 @@ auto:
env:
IMAGE: i686-gnu-nopt
DOCKER_SCRIPT: >-
python3 ../x.py test --stage 0 --config /config/nopt-std-config.toml library/std &&
python3 ../x.py test --stage 1 --config /config/nopt-std-config.toml library/std &&
/scripts/stage_2_test_set2.sh
<<: *job-linux-4c
- name: mingw-check
- name: mingw-check-1
<<: *job-linux-4c
- name: mingw-check-2
<<: *job-linux-4c
- name: test-various

View file

@ -3,3 +3,4 @@ Jynn Nelson <github@jyn.dev> <joshua@yottadb.com>
Jynn Nelson <github@jyn.dev> <jyn.nelson@redjack.com>
Jynn Nelson <github@jyn.dev> <jnelson@cloudflare.com>
Jynn Nelson <github@jyn.dev>
Tshepang Mbambo <hopsi@tuta.io> <tshepang@gmail.com>

View file

@ -1 +1 @@
e42bbfe1f7c26f8760a99c4b1f27d33aba1040bb
99e7c15e81385b38a8186b51edc4577d5d7b5bdd

View file

@ -46,3 +46,4 @@ These are videos where various experts explain different parts of the compiler:
## Code Generation
- [January 2019: Cranelift](https://www.youtube.com/watch?v=9OIA7DTFQWU)
- [December 2024: LLVM Developers' Meeting - Rust ❤️ LLVM](https://www.youtube.com/watch?v=Kqz-umsAnk8)

View file

@ -76,7 +76,7 @@ borrow.
[`AutoBorrow`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/adjustment/enum.AutoBorrow.html
[converted]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir_build/thir/cx/expr/trait.ToBorrowKind.html#method.to_borrow_kind
[`BorrowKind`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/mir/enum.BorrowKind.html
[`GatherBorrows`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/mir/visit/trait.Visitor.html#method.visit_local
[`GatherBorrows`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/borrow_set/struct.GatherBorrows.html
[`BorrowData`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/borrow_set/struct.BorrowData.html
## Checking two-phase borrows

View file

@ -45,13 +45,13 @@ compiler.
```mermaid
graph TD
s0c["stage0 compiler (1.63)"]:::downloaded -->|A| s0l("stage0 std (1.64)"):::with-s0c;
s0c["stage0 compiler (1.86.0-beta.1)"]:::downloaded -->|A| s0l("stage0 std (1.86.0-beta.1)"):::downloaded;
s0c & s0l --- stepb[ ]:::empty;
stepb -->|B| s0ca["stage0 compiler artifacts (1.64)"]:::with-s0c;
s0ca -->|copy| s1c["stage1 compiler (1.64)"]:::with-s0c;
s1c -->|C| s1l("stage1 std (1.64)"):::with-s1c;
stepb -->|B| s0ca["stage0 compiler artifacts (1.87.0-dev)"]:::with-s0c;
s0ca -->|copy| s1c["stage1 compiler (1.87.0-dev)"]:::with-s0c;
s1c -->|C| s1l("stage1 std (1.87.0-dev)"):::with-s1c;
s1c & s1l --- stepd[ ]:::empty;
stepd -->|D| s1ca["stage1 compiler artifacts (1.64)"]:::with-s1c;
stepd -->|D| s1ca["stage1 compiler artifacts (1.87.0-dev)"]:::with-s1c;
s1ca -->|copy| s2c["stage2 compiler"]:::with-s1c;
classDef empty width:0px,height:0px;
@ -62,19 +62,21 @@ graph TD
### Stage 0: the pre-compiled compiler
The stage0 compiler is usually the current _beta_ `rustc` compiler and its
The stage0 compiler is by default the very recent _beta_ `rustc` compiler and its
associated dynamic libraries, which `./x.py` will download for you. (You can
also configure `./x.py` to use something else.)
also configure `./x.py` to change stage0 to something else.)
The stage0 compiler is then used only to compile [`src/bootstrap`],
[`library/std`], and [`compiler/rustc`]. When assembling the libraries and
binaries that will become the stage1 `rustc` compiler, the freshly compiled
`std` and `rustc` are used. There are two concepts at play here: a compiler
(with its set of dependencies) and its 'target' or 'object' libraries (`std` and
`rustc`). Both are staged, but in a staggered manner.
The precompiled stage0 compiler is then used only to compile [`src/bootstrap`] and [`compiler/rustc`]
with precompiled stage0 std.
Note that to build the stage1 compiler we use the precompiled stage0 compiler and std.
Therefore, to use a compiler with a std that is freshly built from the tree, you need to
build the stage2 compiler.
There are two concepts at play here: a compiler (with its set of dependencies) and its
'target' or 'object' libraries (`std` and `rustc`). Both are staged, but in a staggered manner.
[`compiler/rustc`]: https://github.com/rust-lang/rust/tree/master/compiler/rustc
[`library/std`]: https://github.com/rust-lang/rust/tree/master/library/std
[`src/bootstrap`]: https://github.com/rust-lang/rust/tree/master/src/bootstrap
### Stage 1: from current code, by an earlier compiler
@ -84,16 +86,14 @@ The rustc source code is then compiled with the `stage0` compiler to produce the
### Stage 2: the truly current compiler
We then rebuild our `stage1` compiler with itself to produce the `stage2`
We then rebuild the compiler using `stage1` compiler with in-tree std to produce the `stage2`
compiler.
In theory, the `stage1` compiler is functionally identical to the `stage2`
compiler, but in practice there are subtle differences. In particular, the
`stage1` compiler itself was built by `stage0` and hence not by the source in
your working directory. This means that the ABI generated by the `stage0`
compiler may not match the ABI that would have been made by the `stage1`
compiler, which can cause problems for dynamic libraries, tests, and tools using
`rustc_private`.
The `stage1` compiler itself was built by precompiled `stage0` compiler and std
and hence not by the source in your working directory. This means that the ABI
generated by the `stage0` compiler may not match the ABI that would have been made
by the `stage1` compiler, which can cause problems for dynamic libraries, tests
and tools using `rustc_private`.
Note that the `proc_macro` crate avoids this issue with a `C` FFI layer called
`proc_macro::bridge`, allowing it to be used with `stage1`.
@ -101,9 +101,10 @@ Note that the `proc_macro` crate avoids this issue with a `C` FFI layer called
The `stage2` compiler is the one distributed with `rustup` and all other install
methods. However, it takes a very long time to build because one must first
build the new compiler with an older compiler and then use that to build the new
compiler with itself. For development, you usually only want the `stage1`
compiler, which you can build with `./x build library`. See [Building the
compiler](../how-to-build-and-run.html#building-the-compiler).
compiler with itself.
For development, you usually only want to use `--stage 1` flag to build things.
See [Building the compiler](../how-to-build-and-run.html#building-the-compiler).
### Stage 3: the same-result test
@ -114,10 +115,11 @@ something has broken.
### Building the stages
The script [`./x`] tries to be helpful and pick the stage you most likely meant
for each subcommand. These defaults are as follows:
for each subcommand. Here are some `x` commands with their default stages:
- `check`: `--stage 0`
- `doc`: `--stage 0`
- `check`: `--stage 1`
- `clippy`: `--stage 1`
- `doc`: `--stage 1`
- `build`: `--stage 1`
- `test`: `--stage 1`
- `dist`: `--stage 2`
@ -191,8 +193,8 @@ include, but are not limited to:
without building `rustc` from source ('build with `stage0`, then test the
artifacts'). If you're working on the standard library, this is normally the
test command you want.
- `./x build --stage 0` means to build with the beta `rustc`.
- `./x doc --stage 0` means to document using the beta `rustdoc`.
- `./x build --stage 0` means to build with the stage0 `rustc`.
- `./x doc --stage 0` means to document using the stage0 `rustdoc`.
#### Examples of what *not* to do
@ -208,9 +210,6 @@ include, but are not limited to:
### Building vs. running
Note that `build --stage N compiler/rustc` **does not** build the stage N
compiler: instead it builds the stage N+1 compiler _using_ the stage N compiler.
In short, _stage 0 uses the `stage0` compiler to create `stage0` artifacts which
will later be uplifted to be the stage1 compiler_.
@ -268,23 +267,6 @@ However, when cross-compiling, `stage1` `std` will only run on the host. So the
(See in the table how `stage2` only builds non-host `std` targets).
### Why does only libstd use `cfg(bootstrap)`?
For docs on `cfg(bootstrap)` itself, see [Complications of
Bootstrapping](#complications-of-bootstrapping).
The `rustc` generated by the `stage0` compiler is linked to the freshly-built
`std`, which means that for the most part only `std` needs to be `cfg`-gated, so
that `rustc` can use features added to `std` immediately after their addition,
without need for them to get into the downloaded `beta` compiler.
Note this is different from any other Rust program: `stage1` `rustc` is built by
the _beta_ compiler, but using the _master_ version of `libstd`!
The only time `rustc` uses `cfg(bootstrap)` is when it adds internal lints that
use diagnostic items, or when it uses unstable library features that were
recently changed.
### What is a 'sysroot'?
When you build a project with `cargo`, the build artifacts for dependencies are
@ -459,7 +441,6 @@ compiler itself uses to run. These aren't actually used by artifacts the new
compiler generates. This step also copies the `rustc` and `rustdoc` binaries we
generated into `build/$HOST/stage/bin`.
The `stage1/bin/rustc` is a fully functional compiler, but it doesn't yet have
any libraries to link built binaries or libraries to. The next 3 steps will
provide those libraries for it; they are mostly equivalent to constructing the
`stage1/bin` compiler so we don't go through them individually here.
The `stage1/bin/rustc` is a fully functional compiler built with stage0 (precompiled) compiler and std.
To use a compiler built entirely from source with the in-tree compiler and std, you need to build the
stage2 compiler, which is compiled using the stage1 (in-tree) compiler and std.

View file

@ -217,7 +217,6 @@ probably the best "go to" command for building a local compiler:
This may *look* like it only builds the standard library, but that is not the case.
What this command does is the following:
- Build `std` using the stage0 compiler
- Build `rustc` using the stage0 compiler
- This produces the stage1 compiler
- Build `std` using the stage1 compiler
@ -241,8 +240,7 @@ build. The **full** `rustc` build (what you get with `./x build
--stage 2 compiler/rustc`) has quite a few more steps:
- Build `rustc` with the stage1 compiler.
- The resulting compiler here is called the "stage2" compiler.
- Build `std` with stage2 compiler.
- The resulting compiler here is called the "stage2" compiler, which uses stage1 std from the previous command.
- Build `librustdoc` and a bunch of other things with the stage2 compiler.
You almost never need to do this.
@ -250,14 +248,14 @@ You almost never need to do this.
### Build specific components
If you are working on the standard library, you probably don't need to build
the compiler unless you are planning to use a recently added nightly feature.
Instead, you can just build using the bootstrap compiler.
every other default component. Instead, you can build a specific component by
providing its name, like this:
```bash
./x build --stage 0 library
./x build --stage 1 library
```
If you choose the `library` profile when running `x setup`, you can omit `--stage 0` (it's the
If you choose the `library` profile when running `x setup`, you can omit `--stage 1` (it's the
default).
## Creating a rustup toolchain
@ -271,7 +269,6 @@ you will likely need to build at some point; for example, if you want
to run the entire test suite).
```bash
rustup toolchain link stage0 build/host/stage0-sysroot # beta compiler + stage0 std
rustup toolchain link stage1 build/host/stage1
rustup toolchain link stage2 build/host/stage2
```

View file

@ -85,7 +85,7 @@ Look for existing targets to use as examples.
After adding your target to the `rustc_target` crate you may want to add
`core`, `std`, ... with support for your new target. In that case you will
probably need access to some `target_*` cfg. Unfortunately when building with
stage0 (the beta compiler), you'll get an error that the target cfg is
stage0 (a precompiled compiler), you'll get an error that the target cfg is
unexpected because stage0 doesn't know about the new target specification and
we pass `--check-cfg` in order to tell it to check.

View file

@ -59,6 +59,14 @@ always overrides the inner ones.
## Configuring `rust-analyzer` for `rustc`
### Checking the "library" tree
Checking the "library" tree requires a stage1 compiler, which can be a heavy process on some computers.
For this reason, bootstrap has a flag called `--skip-std-check-if-no-download-rustc` that skips checking the
"library" tree if `rust.download-rustc` isn't available. If you want to avoid putting a heavy load on your computer
with `rust-analyzer`, you can add the `--skip-std-check-if-no-download-rustc` flag to your `./x check` command in
the `rust-analyzer` configuration.
### Project-local rust-analyzer setup
`rust-analyzer` can help you check and format your code whenever you save a
@ -91,7 +99,7 @@ for two reasons:
additional rebuilds in some cases.
To avoid these problems:
- Add `--build-dir=build-rust-analyzer` to all of the custom `x` commands in
- Add `--build-dir=build/rust-analyzer` to all of the custom `x` commands in
your editor's rust-analyzer configuration.
(Feel free to choose a different directory name if desired.)
- Modify the `rust-analyzer.rustfmt.overrideCommand` setting so that it points
@ -100,10 +108,7 @@ To avoid these problems:
copy of `rust-analyzer-proc-macro-srv` in that other build directory.
Using separate build directories for command-line builds and rust-analyzer
requires extra disk space, and also means that running `./x clean` on the
command-line will not clean out the separate build directory. To clean the
separate build directory, run `./x clean --build-dir=build-rust-analyzer`
instead.
requires extra disk space.
### Visual Studio Code
@ -137,7 +142,7 @@ Task] instead:
### Neovim
For Neovim users there are several options for configuring for rustc. The
For Neovim users, there are a few options. The
easiest way is by using [neoconf.nvim](https://github.com/folke/neoconf.nvim/),
which allows for project-local configuration files with the native LSP. The
steps for how to use it are below. Note that they require rust-analyzer to
@ -310,51 +315,15 @@ lets you use `cargo fmt`.
[the section on vscode]: suggested.md#configuring-rust-analyzer-for-rustc
[the section on rustup]: how-to-build-and-run.md?highlight=rustup#creating-a-rustup-toolchain
## Faster builds with `--keep-stage`.
## Faster Builds with CI-rustc
Sometimes just checking whether the compiler builds is not enough. A common
example is that you need to add a `debug!` statement to inspect the value of
some state or better understand the problem. In that case, you don't really need
a full build. By bypassing bootstrap's cache invalidation, you can often get
these builds to complete very fast (e.g., around 30 seconds). The only catch is
this requires a bit of fudging and may produce compilers that don't work (but
that is easily detected and fixed).
The sequence of commands you want is as follows:
- Initial build: `./x build library`
- As [documented previously], this will build a functional stage1 compiler as
part of running all stage0 commands (which include building a `std`
compatible with the stage1 compiler) as well as the first few steps of the
"stage 1 actions" up to "stage1 (sysroot stage1) builds std".
- Subsequent builds: `./x build library --keep-stage 1`
- Note that we added the `--keep-stage 1` flag here
[documented previously]: ./how-to-build-and-run.md#building-the-compiler
As mentioned, the effect of `--keep-stage 1` is that we just _assume_ that the
old standard library can be re-used. If you are editing the compiler, this is
almost always true: you haven't changed the standard library, after all. But
sometimes, it's not true: for example, if you are editing the "metadata" part of
the compiler, which controls how the compiler encodes types and other states
into the `rlib` files, or if you are editing things that wind up in the metadata
(such as the definition of the MIR).
**The TL;DR is that you might get weird behavior from a compile when using
`--keep-stage 1`** -- for example, strange [ICEs](../appendix/glossary.html#ice)
or other panics. In that case, you should simply remove the `--keep-stage 1`
from the command and rebuild. That ought to fix the problem.
You can also use `--keep-stage 1` when running tests. Something like this:
- Initial test run: `./x test tests/ui`
- Subsequent test run: `./x test tests/ui --keep-stage 1`
### Iterating the standard library with `--keep-stage`
If you are making changes to the standard library, you can use `./x build
--keep-stage 0 library` to iteratively rebuild the standard library without
rebuilding the compiler.
If you are not working on the compiler, you often don't need to build the compiler tree.
For example, you can skip building the compiler and only build the `library` tree or the
tools under `src/tools`. To achieve that, you have to enable this by setting the `download-rustc`
option in your configuration. This tells bootstrap to use the latest nightly compiler for `stage > 0`
steps, meaning it will have two precompiled compilers: stage0 compiler and `download-rustc` compiler
for `stage > 0` steps. This way, it will never need to build the in-tree compiler. As a result, your
build time will be significantly reduced by not building the in-tree compiler.
## Using incremental compilation

View file

@ -866,19 +866,17 @@ struct](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/json/struct
(and sub-structs) for the JSON serialization. Don't confuse this with
[`errors::Diag`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/struct.Diag.html)!
## `#[rustc_on_unimplemented(...)]`
## `#[rustc_on_unimplemented]`
The `#[rustc_on_unimplemented]` attribute allows trait definitions to add specialized
notes to error messages when an implementation was expected but not found.
You can refer to the trait's generic arguments by name and to the resolved type using `Self`.
For example:
This attribute allows trait definitions to modify error messages when an implementation was
expected but not found. The string literals in the attribute are format strings and can be
formatted with named parameters. See the Formatting
section below for what parameters are permitted.
```rust,ignore
#![feature(rustc_attrs)]
#[rustc_on_unimplemented="an iterator over elements of type `{A}` \
cannot be built from a collection of type `{Self}`"]
#[rustc_on_unimplemented(message = "an iterator over \
elements of type `{A}` cannot be built from a \
collection of type `{Self}`")]
trait MyIterator<A> {
fn next(&mut self) -> A;
}
@ -895,32 +893,26 @@ fn main() {
When the user compiles this, they will see the following;
```txt
error[E0277]: the trait bound `&[{integer}]: MyIterator<char>` is not satisfied
--> <anon>:14:5
error[E0277]: an iterator over elements of type `char` cannot be built from a collection of type `&[{integer}]`
--> src/main.rs:13:19
|
14 | iterate_chars(&[1, 2, 3][..]);
| ^^^^^^^^^^^^^ an iterator over elements of type `char` cannot be built from a collection of type `&[{integer}]`
13 | iterate_chars(&[1, 2, 3][..]);
| ------------- ^^^^^^^^^^^^^^ the trait `MyIterator<char>` is not implemented for `&[{integer}]`
| |
| required by a bound introduced by this call
|
= help: the trait `MyIterator<char>` is not implemented for `&[{integer}]`
= note: required by `iterate_chars`
note: required by a bound in `iterate_chars`
```
`rustc_on_unimplemented` also supports advanced filtering for better targeting
of messages, as well as modifying specific parts of the error message. You
target the text of:
You can modify the contents of:
- the main error message (`message`)
- the label (`label`)
- an extra note (`note`)
- the note(s) (`note`)
For example, the following attribute
```rust,ignore
#[rustc_on_unimplemented(
message="message",
label="label",
note="note"
)]
#[rustc_on_unimplemented(message = "message", label = "label", note = "note")]
trait MyIterator<A> {
fn next(&mut self) -> A;
}
@ -930,45 +922,61 @@ Would generate the following output:
```text
error[E0277]: message
--> <anon>:14:5
--> <file>:10:19
|
14 | iterate_chars(&[1, 2, 3][..]);
| ^^^^^^^^^^^^^ label
10 | iterate_chars(&[1, 2, 3][..]);
| ------------- ^^^^^^^^^^^^^^ label
| |
| required by a bound introduced by this call
|
= note: note
= help: the trait `MyIterator<char>` is not implemented for `&[{integer}]`
= note: required by `iterate_chars`
= note: note
note: required by a bound in `iterate_chars`
```
To allow more targeted error messages, it is possible to filter the
application of these fields based on a variety of attributes when using
`on`:
The functionality discussed so far is also available with
[`#[diagnostic::on_unimplemented]`](https://doc.rust-lang.org/nightly/reference/attributes/diagnostics.html#the-diagnosticon_unimplemented-attribute).
If you can, you should use that instead.
### Filtering
To allow more targeted error messages, it is possible to filter the
application of these fields with `on`.
You can filter on the following boolean flags:
- `crate_local`: whether the code causing the trait bound to not be
fulfilled is part of the user's crate. This is used to avoid suggesting
code changes that would require modifying a dependency.
- Any of the generic arguments that can be substituted in the text can be
referred by name as well for filtering, like `Rhs="i32"`, except for
`Self`.
- `_Self`: to filter only on a particular calculated trait resolution, like
`Self="std::iter::Iterator<char>"`. This is needed because `Self` is a
keyword which cannot appear in attributes.
- `direct`: user-specified rather than derived obligation.
- `from_desugaring`: usable both as boolean (whether the flag is present)
or matching against a particular desugaring. The desugaring is identified
with its variant name in the `DesugaringKind` enum.
- `direct`: whether this is an user-specified rather than derived obligation.
- `from_desugaring`: whether we are in some kind of desugaring, like `?`
or a `try` block for example. This flag can also be matched on, see below.
For example, the `Iterator` trait can be annotated in the following way:
You can match on the following names and values, using `name = "value"`:
- `cause`: Match against one variant of the `ObligationCauseCode`
enum. Only `"MainFunctionType"` is supported.
- `from_desugaring`: Match against a particular variant of the `DesugaringKind`
enum. The desugaring is identified by its variant name, for example
`"QuestionMark"` for `?` desugaring or `"TryBlock"` for `try` blocks.
- `Self` and any generic arguments of the trait, like `Self = "alloc::string::String"`
or `Rhs="i32"`.
The compiler can provide several values to match on, for example:
- the self_ty, pretty printed with and without type arguments resolved.
- `"{integral}"`, if self_ty is an integral of which the type is known.
- `"[]"`, `"[{ty}]"`, `"[{ty}; _]"`, `"[{ty}; $N]"` when applicable.
- references to said slices and arrays.
- `"fn"`, `"unsafe fn"` or `"#[target_feature] fn"` when self is a function.
- `"{integer}"` and `"{float}"` if the type is a number but we haven't inferred it yet.
- combinations of the above, like `"[{integral}; _]"`.
For example, the `Iterator` trait can be filtered in the following way:
```rust,ignore
#[rustc_on_unimplemented(
on(
_Self="&str",
note="call `.chars()` or `.as_bytes()` on `{Self}`"
),
message="`{Self}` is not an iterator",
label="`{Self}` is not an iterator",
note="maybe try calling `.iter()` or a similar method"
on(Self = "&str", note = "call `.chars()` or `.as_bytes()` on `{Self}`"),
message = "`{Self}` is not an iterator",
label = "`{Self}` is not an iterator",
note = "maybe try calling `.iter()` or a similar method"
)]
pub trait Iterator {}
```
@ -997,15 +1005,47 @@ error[E0277]: `&str` is not an iterator
= note: required by `std::iter::IntoIterator::into_iter`
```
If you need to filter on multiple attributes, you can use `all`, `any` or
`not` in the following way:
The `on` filter accepts `all`, `any` and `not` predicates similar to the `cfg` attribute:
```rust,ignore
#[rustc_on_unimplemented(
on(
all(_Self="&str", T="std::string::String"),
note="you can coerce a `{T}` into a `{Self}` by writing `&*variable`"
)
)]
pub trait From<T>: Sized { /* ... */ }
#[rustc_on_unimplemented(on(
all(Self = "&str", T = "alloc::string::String"),
note = "you can coerce a `{T}` into a `{Self}` by writing `&*variable`"
))]
pub trait From<T>: Sized {
/* ... */
}
```
### Formatting
The string literals are format strings that accept parameters wrapped in braces
but positional and listed parameters and format specifiers are not accepted.
The following parameter names are valid:
- `Self` and all generic parameters of the trait.
- `This`: the name of the trait the attribute is on, without generics.
- `Trait`: the name of the "sugared" trait. See `TraitRefPrintSugared`.
- `ItemContext`: the kind of `hir::Node` we're in, things like `"an async block"`,
`"a function"`, `"an async function"`, etc.
Something like:
```rust,ignore
#![feature(rustc_attrs)]
#[rustc_on_unimplemented(message = "Self = `{Self}`, \
T = `{T}`, this = `{This}`, trait = `{Trait}`, \
context = `{ItemContext}`")]
pub trait From<T>: Sized {
fn from(x: T) -> Self;
}
fn main() {
let x: i8 = From::from(42_i32);
}
```
Will format the message into
```text
"Self = `i8`, T = `i32`, this = `From`, trait = `From<i32>`, context = `a function`"
```

View file

@ -174,7 +174,8 @@ As mentioned previously, the distinction between early and late bound parameters
- When naming a function (early)
- When calling a function (late)
There currently is no syntax for explicitly specifying generic arguments for late bound parameters as part of the call step, only specifying generic arguments when naming a function. The syntax `foo::<'static>();`, despite being part of a function call, behaves as `(foo::<'static>)();` and instantiates the early bound generic parameters on the function item type.
There is currently no syntax for explicitly specifying generic arguments for late bound parameters during the call step; generic arguments can only be specified for early bound parameters when naming a function.
The syntax `foo::<'static>();`, despite being part of a function call, behaves as `(foo::<'static>)();` and instantiates the early bound generic parameters on the function item type.
See the following example:
```rust

View file

@ -73,21 +73,32 @@ To build a corpus, you may want to use:
- The rustc/rust-analyzer/clippy test suites (or even source code) --- though avoid
tests that are already known to cause failures, which often begin with comments
like `// failure-status: 101` or `// known-bug: #NNN`.
- The already-fixed ICEs in [Glacier][glacier] --- though avoid the unfixed
ones in `ices/`!
like `//@ failure-status: 101` or `//@ known-bug: #NNN`.
- The already-fixed ICEs in the archived [Glacier][glacier] repository --- though
avoid the unfixed ones in `ices/`!
[glacier]: https://github.com/rust-lang/glacier
## Extra credit
Here are a few things you can do to help the Rust project after filing an ICE.
- [Bisect][bisect] the bug to figure out when it was introduced
- [Bisect][bisect] the bug to figure out when it was introduced.
If you find the regressing PR / commit, you can mark the issue with the label
`S-has-bisection`. If not, consider applying `E-needs-bisection` instead.
- Fix "distractions": problems with the test case that don't contribute to
triggering the ICE, such as syntax errors or borrow-checking errors
- Minimize the test case (see below)
- Add the minimal test case to [Glacier][glacier]
- Minimize the test case (see below). If successful, you can label the
issue with `S-has-mcve`. Otherwise, you can apply `E-needs-mcve`.
- Add the minimal test case to the rust-lang/rust repo as a [crashes test].
While you're at it, consider including other "untracked" crashes in your PR.
Please don't forget to mark your issue with `S-bug-has-test` afterwards.
See also [applying and removing labels][labeling].
[bisect]: https://rust-lang.github.io/cargo-bisect-rustc/
[crashes test]: tests/compiletest.html#crashes-tests
[labeling]: https://forge.rust-lang.org/release/issue-triaging.html#applying-and-removing-labels
## Minimization
@ -143,7 +154,6 @@ ICEs that require debug assertions to reproduce should be tagged
- [tree-splicer][tree-splicer] generates new source files by combining existing
ones while maintaining correct syntax
[glacier]: https://github.com/rust-lang/glacier
[fuzz-rustc]: https://github.com/dwrensha/fuzz-rustc
[icemaker]: https://github.com/matthiaskrgr/icemaker/
[tree-splicer]: https://github.com/langston-barrett/tree-splicer/

View file

@ -89,7 +89,7 @@ filtering the search to areas you're interested in. For example:
Not all important or beginner work has issue labels.
See below for how to find work that isn't labelled.
[help-wanted-search]: https://github.com/issues?q=is%3Aopen+is%3Aissue+org%3Arust-lang+no%3Aassignee+label%3AE-easy%2C%22good+first+issue%22%2Cgood-first-issue%2CE-medium%2CEasy%2CE-help-wanted%2CE-mentor+-label%3AS-blocked+-linked:pr+
[help-wanted-search]: https://github.com/issues?q=is%3Aopen+is%3Aissue+org%3Arust-lang+no%3Aassignee+label%3AE-easy%2C%22good+first+issue%22%2Cgood-first-issue%2CE-medium%2CEasy%2CE-help-wanted%2CE-mentor+-label%3AS-blocked+-linked%3Apr+
[Triage]: ./contributing.md#issue-triage
### Recurring work
@ -98,8 +98,6 @@ Some work is too large to be done by a single person. In this case, it's common
issues" to co-ordinate the work between contributors. Here are some example tracking issues where
it's easy to pick up work without a large time commitment:
- [Rustdoc Askama Migration](https://github.com/rust-lang/rust/issues/108868)
- [Diagnostic Translation](https://github.com/rust-lang/rust/issues/100717)
- [Move UI tests to subdirectories](https://github.com/rust-lang/rust/issues/73494)
If you find more recurring work, please feel free to add it here!

View file

@ -166,7 +166,10 @@ In this example:
When interfacing with the type system it will often be the case that it's necessary to request a type be normalized. There are a number of different entry points to the underlying normalization logic and each entry point should only be used in specific parts of the compiler.
An additional complication is that the compiler is currently undergoing a transition from the old trait solver to the new trait solver. As part of this transition our approach to normalization in the compiler has changed somewhat significantly, resulting in some normalization entry points being "old solver only" slated for removal in the long-term once the new solver has stabilized.
<!-- date-check: May 2025 -->
An additional complication is that the compiler is currently undergoing a transition from the old trait solver to the new trait solver.
As part of this transition our approach to normalization in the compiler has changed somewhat significantly, resulting in some normalization entry points being "old solver only" slated for removal in the long-term once the new solver has stabilized.
The transition can be tracked via the [WG-trait-system-refactor](https://github.com/rust-lang/rust/labels/WG-trait-system-refactor) label in Github.
Here is a rough overview of the different entry points to normalization in the compiler:
- `infcx.at.structurally_normalize`
@ -306,4 +309,4 @@ Const aliases differ from type aliases a bit here; well formedness of const alia
[^5]: Const aliases certainly wouldn't be *less* sound than type aliases if we stopped doing this
[const_evaluatable]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.ClauseKind.html#variant.ConstEvaluatable
[const_evaluatable]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.ClauseKind.html#variant.ConstEvaluatable

View file

@ -13,13 +13,16 @@ it can work across functions and function bodies.
To help explain how it works, let's consider an example.
```rust
#![feature(type_alias_impl_trait)]
mod m {
pub type Seq<T> = impl IntoIterator<Item = T>;
#[define_opaque(Seq)]
pub fn produce_singleton<T>(t: T) -> Seq<T> {
vec![t]
}
#[define_opaque(Seq)]
pub fn produce_doubleton<T>(t: T, u: T) -> Seq<T> {
vec![t, u]
}

View file

@ -356,7 +356,7 @@ trait Foo {
Failing because a down-stream impl could theoretically provide an
implementation for `RPITIT` without providing an implementation of
`foo`:
`bar`:
```text
error[E0308]: mismatched types

View file

@ -281,10 +281,10 @@ using `XPath` notation to get a precise look at the output. The full
description of all the commands available to `rustdoc` tests (e.g. [`@has`] and
[`@matches`]) is in [`htmldocck.py`].
To use multiple crates in a `rustdoc` test, add `// aux-build:filename.rs`
To use multiple crates in a `rustdoc` test, add `//@ aux-build:filename.rs`
to the top of the test file. `filename.rs` should be placed in an `auxiliary`
directory relative to the test file with the comment. If you need to build
docs for the auxiliary file, use `// build-aux-docs`.
docs for the auxiliary file, use `//@ build-aux-docs`.
In addition, there are separate tests for the search index and `rustdoc`'s
ability to query it. The files in `tests/rustdoc-js` each contain a

View file

@ -93,13 +93,13 @@ does is call the `main()` that's in this crate's `lib.rs`, though.)
interactivity. For information on how to write this form of test,
see [`tests/rustdoc-gui/README.md`][rustdoc-gui-readme]
as well as [the description of the `.goml` format][goml-script]
* Additionally, JavaScript type annotations are written using [TypeScript-flavored JSDoc]
comments and an external d.ts file. The code itself is plain, valid JavaScript; we only
use tsc as a linter.
* The tests on the structure of rustdoc HTML output are located in `tests/rustdoc`,
* Tests on the structure of rustdoc HTML output are located in `tests/rustdoc`,
where they're handled by the test runner of bootstrap and
the supplementary script `src/etc/htmldocck.py`.
[These tests have several extra directives available to them](./rustdoc-internals/rustdoc-test-suite.md).
* Additionally, JavaScript type annotations are written using [TypeScript-flavored JSDoc]
comments and an external d.ts file. The code itself is plain, valid JavaScript; we only
use tsc as a linter.
[TypeScript-flavored JSDoc]: https://www.typescriptlang.org/docs/handbook/jsdoc-supported-types.html
[rustdoc-gui-readme]: https://github.com/rust-lang/rust/blob/master/tests/rustdoc-gui/README.md
@ -116,6 +116,28 @@ Certain browser features that require secure origins, like `localStorage` and
Service Workers, don't work reliably. We can still use such features but we
should make sure pages are still usable without them.
Rustdoc [does not type-check function bodies][platform-specific docs].
This works by [overriding the built-in queries for typeck][override queries],
by [silencing name resolution errors], and by [not resolving opaque types].
This comes with several caveats: in particular, rustdoc *cannot* run any parts of the compiler that
require type-checking bodies; for example it cannot generate `.rlib` files or run most lints.
We want to move away from this model eventually, but we need some alternative for
[the people using it][async-std]; see [various][zulip stop accepting broken code]
[previous][rustdoc meeting 2024-07-08] [zulip][compiler meeting 2023-01-26] [discussion][notriddle rfc].
For examples of code that breaks if this hack is removed, see
[`tests/rustdoc-ui/error-in-impl-trait`].
[platform-specific docs]: https://doc.rust-lang.org/rustdoc/advanced-features.html#interactions-between-platform-specific-docs
[override queries]: https://github.com/rust-lang/rust/blob/52bf0cf795dfecc8b929ebb1c1e2545c3f41d4c9/src/librustdoc/core.rs#L299-L323
[silencing name resolution errors]: https://github.com/rust-lang/rust/blob/52bf0cf795dfecc8b929ebb1c1e2545c3f41d4c9/compiler/rustc_resolve/src/late.rs#L4517
[not resolving opaque types]: https://github.com/rust-lang/rust/blob/52bf0cf795dfecc8b929ebb1c1e2545c3f41d4c9/compiler/rustc_hir_analysis/src/check/check.rs#L188-L194
[async-std]: https://github.com/rust-lang/rust/issues/75100
[rustdoc meeting 2024-07-08]: https://rust-lang.zulipchat.com/#narrow/channel/393423-t-rustdoc.2Fmeetings/topic/meeting.202024-07-08/near/449969836
[compiler meeting 2023-01-26]: https://rust-lang.zulipchat.com/#narrow/channel/238009-t-compiler.2Fmeetings/topic/.5Bweekly.5D.202023-01-26/near/323755789
[zulip stop accepting broken code]: https://rust-lang.zulipchat.com/#narrow/stream/266220-rustdoc/topic/stop.20accepting.20broken.20code
[notriddle rfc]: https://rust-lang.zulipchat.com/#narrow/channel/266220-t-rustdoc/topic/Pre-RFC.3A.20stop.20accepting.20broken.20code
[`tests/rustdoc-ui/error-in-impl-trait`]: https://github.com/rust-lang/rust/tree/163cb4ea3f0ae3bc7921cc259a08a7bf92e73ee6/tests/rustdoc-ui/error-in-impl-trait
## Multiple runs, same output directory
Rustdoc can be run multiple times for varying inputs, with its output set to the

Some files were not shown because too many files have changed in this diff Show more