Auto merge of #3069 - rust-lang:rustup-2023-09-21, r=RalfJung

Automatic sync from rustc
This commit is contained in:
bors 2023-09-21 08:01:45 +00:00
commit d2db689e1e
262 changed files with 9500 additions and 5735 deletions

2
.gitmodules vendored
View file

@ -33,7 +33,7 @@
[submodule "src/llvm-project"]
path = src/llvm-project
url = https://github.com/rust-lang/llvm-project.git
branch = rustc/17.0-2023-07-29
branch = rustc/17.0-2023-09-19
shallow = true
[submodule "src/doc/embedded-book"]
path = src/doc/embedded-book

View file

@ -627,13 +627,13 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "colored"
version = "2.0.0"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd"
checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6"
dependencies = [
"atty",
"is-terminal",
"lazy_static",
"winapi",
"windows-sys 0.48.0",
]
[[package]]
@ -644,9 +644,9 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "compiler_builtins"
version = "0.1.100"
version = "0.1.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6c0f24437059853f0fa64afc51f338f93647a3de4cf3358ba1bb4171a199775"
checksum = "01a6d58e9c3408138099a396a98fd0d0e6cfb25d723594d2ae48b5004513fd5b"
dependencies = [
"cc",
"rustc-std-workspace-core",
@ -1169,19 +1169,6 @@ dependencies = [
"termcolor",
]
[[package]]
name = "env_logger"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
dependencies = [
"atty",
"humantime 2.1.0",
"log",
"regex",
"termcolor",
]
[[package]]
name = "env_logger"
version = "0.10.0"
@ -2142,9 +2129,9 @@ checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760"
[[package]]
name = "libc"
version = "0.2.147"
version = "0.2.148"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b"
dependencies = [
"rustc-std-workspace-core",
]
@ -2454,7 +2441,7 @@ version = "0.1.0"
dependencies = [
"colored",
"ctrlc",
"env_logger 0.9.3",
"env_logger 0.10.0",
"getrandom",
"lazy_static",
"libc",
@ -5591,11 +5578,11 @@ dependencies = [
[[package]]
name = "tracing-tree"
version = "0.2.3"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f9742d8df709837409dbb22aa25dd7769c260406f20ff48a2320b80a4a6aed0"
checksum = "92d6b63348fad3ae0439b8bebf8d38fb5bda0b115d7a8a7e6f165f12790c58c3"
dependencies = [
"atty",
"is-terminal",
"nu-ansi-term",
"tracing-core",
"tracing-log",

View file

@ -1,3 +1,13 @@
Version 1.72.1 (2023-09-19)
===========================
- [Adjust codegen change to improve LLVM codegen](https://github.com/rust-lang/rust/pull/115236)
- [rustdoc: Fix self ty params in objects with lifetimes](https://github.com/rust-lang/rust/pull/115276)
- [Fix regression in compile times](https://github.com/rust-lang/rust/pull/114948)
- Resolve some ICE regressions in the compiler:
- [#115215](https://github.com/rust-lang/rust/pull/115215)
- [#115559](https://github.com/rust-lang/rust/pull/115559)
Version 1.72.0 (2023-08-24)
==========================
@ -103,6 +113,16 @@ Compatibility Notes
to a registry.
[#12291](https://github.com/rust-lang/cargo/pull/12291)
Version 1.71.1 (2023-08-03)
===========================
- [Fix CVE-2023-38497: Cargo did not respect the umask when extracting dependencies](https://github.com/rust-lang/cargo/security/advisories/GHSA-j3xp-wfr4-hx87)
- [Fix bash completion for users of Rustup](https://github.com/rust-lang/rust/pull/113579)
- [Do not show `suspicious_double_ref_op` lint when calling `borrow()`](https://github.com/rust-lang/rust/pull/112517)
- [Fix ICE: substitute types before checking inlining compatibility](https://github.com/rust-lang/rust/pull/113802)
- [Fix ICE: don't use `can_eq` in `derive(..)` suggestion for missing method](https://github.com/rust-lang/rust/pull/111516)
- [Fix building Rust 1.71.0 from the source tarball](https://github.com/rust-lang/rust/issues/113678)
Version 1.71.0 (2023-07-13)
==========================

View file

@ -1431,12 +1431,6 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
),
ImplTraitContext::Universal => {
let span = t.span;
self.create_def(
self.current_hir_id_owner.def_id,
*def_node_id,
DefPathData::ImplTrait,
span,
);
// HACK: pprust breaks strings with newlines when the type
// gets too long. We don't want these to show up in compiler
@ -1447,6 +1441,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
span,
);
self.create_def(
self.current_hir_id_owner.def_id,
*def_node_id,
DefPathData::TypeNs(ident.name),
span,
);
let (param, bounds, path) = self.lower_universal_param_and_bounds(
*def_node_id,
span,

View file

@ -2,7 +2,8 @@
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{read_target_uint, AllocId, ConstValue, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{read_target_uint, AllocId, GlobalAlloc, Scalar};
use rustc_middle::mir::ConstValue;
use cranelift_module::*;
@ -183,15 +184,11 @@ pub(crate) fn codegen_const_value<'tcx>(
.offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
layout,
),
ConstValue::Slice { data, start, end } => {
ConstValue::Slice { data, meta } => {
let alloc_id = fx.tcx.reserve_and_set_memory_alloc(data);
let ptr = pointer_for_allocation(fx, alloc_id)
.offset_i64(fx, i64::try_from(start).unwrap())
.get_addr(fx);
let len = fx
.bcx
.ins()
.iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
let ptr = pointer_for_allocation(fx, alloc_id).get_addr(fx);
// FIXME: the `try_from` here can actually fail, e.g. for very long ZST slices.
let len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(meta).unwrap());
CValue::by_val_pair(ptr, len, layout)
}
}

View file

@ -1,7 +1,7 @@
#![allow(non_camel_case_types)]
use rustc_hir::LangItem;
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir;
use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
use rustc_span::Span;
@ -194,10 +194,10 @@ pub fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
pub fn asm_const_to_str<'tcx>(
tcx: TyCtxt<'tcx>,
sp: Span,
const_value: ConstValue<'tcx>,
const_value: mir::ConstValue<'tcx>,
ty_and_layout: TyAndLayout<'tcx>,
) -> String {
let ConstValue::Scalar(scalar) = const_value else {
let mir::ConstValue::Scalar(scalar) = const_value else {
span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
};
let value = scalar.assert_bits(ty_and_layout.size);

View file

@ -2,7 +2,7 @@ use crate::errors;
use crate::mir::operand::OperandRef;
use crate::traits::*;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::{self, Ty};
use rustc_target::abi::Abi;
@ -20,7 +20,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef::from_const(bx, val, ty)
}
pub fn eval_mir_constant(&self, constant: &mir::Constant<'tcx>) -> ConstValue<'tcx> {
pub fn eval_mir_constant(&self, constant: &mir::Constant<'tcx>) -> mir::ConstValue<'tcx> {
self.monomorphize(constant.literal)
.eval(self.cx.tcx(), ty::ParamEnv::reveal_all(), Some(constant.span))
.expect("erroneous constant not captured by required_consts")

View file

@ -6,8 +6,8 @@ use crate::glue;
use crate::traits::*;
use crate::MemFlags;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar};
use rustc_middle::mir::interpret::{alloc_range, Pointer, Scalar};
use rustc_middle::mir::{self, ConstValue};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
use rustc_target::abi::{self, Abi, Align, Size};
@ -86,7 +86,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
val: ConstValue<'tcx>,
val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
) -> Self {
let layout = bx.layout_of(ty);
@ -100,15 +100,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
OperandValue::Immediate(llval)
}
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
ConstValue::Slice { data, start, end } => {
ConstValue::Slice { data, meta } => {
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
};
let a = Scalar::from_pointer(
Pointer::new(
bx.tcx().reserve_and_set_memory_alloc(data),
Size::from_bytes(start),
),
Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data), Size::ZERO),
&bx.tcx(),
);
let a_llval = bx.scalar_to_backend(
@ -116,7 +113,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
a_scalar,
bx.scalar_pair_element_backend_type(layout, 0, true),
);
let b_llval = bx.const_usize((end - start) as u64);
let b_llval = bx.const_usize(meta);
OperandValue::Pair(a_llval, b_llval)
}
ConstValue::Indirect { alloc_id, offset } => {

View file

@ -4,9 +4,9 @@ use crate::errors::ConstEvalError;
use either::{Left, Right};
use rustc_hir::def::DefKind;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{ErrorHandled, InterpErrorInfo};
use rustc_middle::mir::pretty::write_allocation_bytes;
use rustc_middle::mir::{self, ConstAlloc, ConstValue};
use rustc_middle::traits::Reveal;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
@ -18,9 +18,8 @@ use super::{CanAccessStatics, CompileTimeEvalContext, CompileTimeInterpreter};
use crate::errors;
use crate::interpret::eval_nullary_intrinsic;
use crate::interpret::{
intern_const_alloc_recursive, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId, Immediate,
InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking,
StackPopCleanup,
intern_const_alloc_recursive, CtfeValidationMode, GlobalId, Immediate, InternKind, InterpCx,
InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, StackPopCleanup,
};
// Returns a pointer to where the result lives
@ -152,19 +151,26 @@ pub(super) fn op_to_const<'tcx>(
Immediate::Scalar(x) => ConstValue::Scalar(x),
Immediate::ScalarPair(a, b) => {
debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
// FIXME: assert that this has an appropriate type.
// Currently we actually get here for non-[u8] slices during valtree construction!
let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to actually allocated memory";
// This codepath solely exists for `valtree_to_const_value` to not need to generate
// a `ConstValue::Indirect` for wide references, so it is tightly restricted to just
// that case.
let pointee_ty = imm.layout.ty.builtin_deref(false).unwrap().ty; // `false` = no raw ptrs
debug_assert!(
matches!(
ecx.tcx.struct_tail_without_normalization(pointee_ty).kind(),
ty::Str | ty::Slice(..),
),
"`ConstValue::Slice` is for slice-tailed types only, but got {}",
imm.layout.ty,
);
let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to the beginning of an actual allocation";
// We know `offset` is relative to the allocation, so we can use `into_parts`.
// We use `ConstValue::Slice` so that we don't have to generate an allocation for
// `ConstValue::Indirect` here.
let (alloc_id, offset) = a.to_pointer(ecx).expect(msg).into_parts();
let alloc_id = alloc_id.expect(msg);
let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
let start = offset.bytes_usize();
let len = b.to_target_usize(ecx).expect(msg);
let len: usize = len.try_into().unwrap();
ConstValue::Slice { data, start, end: start + len }
assert!(offset == abi::Size::ZERO, "{}", msg);
let meta = b.to_target_usize(ecx).expect(msg);
ConstValue::Slice { data, meta }
}
Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty),
},

View file

@ -1,7 +1,7 @@
// Not in interpret to make sure we do not use private implementation details
use crate::errors::MaxNumNodesInConstErr;
use crate::interpret::{intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, Scalar};
use crate::interpret::{intern_const_alloc_recursive, InternKind, InterpCx, Scalar};
use rustc_middle::mir;
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
use rustc_middle::ty::{self, Ty, TyCtxt};
@ -22,7 +22,7 @@ pub(crate) use valtrees::{const_to_valtree_inner, valtree_to_const_value};
pub(crate) fn const_caller_location(
tcx: TyCtxt<'_>,
(file, line, col): (Symbol, u32, u32),
) -> ConstValue<'_> {
) -> mir::ConstValue<'_> {
trace!("const_caller_location: {}:{}:{}", file, line, col);
let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), CanAccessStatics::No);
@ -30,7 +30,7 @@ pub(crate) fn const_caller_location(
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
bug!("intern_const_alloc_recursive should not error in this case")
}
ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
mir::ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
}
// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
@ -87,7 +87,7 @@ pub(crate) fn eval_to_valtree<'tcx>(
#[instrument(skip(tcx), level = "debug")]
pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
tcx: TyCtxt<'tcx>,
val: ConstValue<'tcx>,
val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
) -> Option<mir::DestructuredConstant<'tcx>> {
let param_env = ty::ParamEnv::reveal_all();

View file

@ -4,9 +4,10 @@ use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
use crate::const_eval::CanAccessStatics;
use crate::interpret::MPlaceTy;
use crate::interpret::{
intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
MemoryKind, PlaceTy, Projectable, Scalar,
intern_const_alloc_recursive, ImmTy, Immediate, InternKind, MemPlaceMeta, MemoryKind, PlaceTy,
Projectable, Scalar,
};
use rustc_middle::mir;
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_span::source_map::DUMMY_SP;
@ -206,7 +207,7 @@ pub fn valtree_to_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
valtree: ty::ValTree<'tcx>,
) -> ConstValue<'tcx> {
) -> mir::ConstValue<'tcx> {
// Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
// (those for constants with type bool, int, uint, float or char).
// For all other types we create an `MPlace` and fill that by walking
@ -219,10 +220,10 @@ pub fn valtree_to_const_value<'tcx>(
match ty.kind() {
ty::FnDef(..) => {
assert!(valtree.unwrap_branch().is_empty());
ConstValue::ZeroSized
mir::ConstValue::ZeroSized
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
ty::ValTree::Leaf(scalar_int) => mir::ConstValue::Scalar(Scalar::Int(scalar_int)),
ty::ValTree::Branch(_) => bug!(
"ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
),
@ -237,7 +238,7 @@ pub fn valtree_to_const_value<'tcx>(
let layout = tcx.layout_of(param_env_ty).unwrap();
if layout.is_zst() {
// Fast path to avoid some allocations.
return ConstValue::ZeroSized;
return mir::ConstValue::ZeroSized;
}
if layout.abi.is_scalar()
&& (matches!(ty.kind(), ty::Tuple(_))

View file

@ -351,7 +351,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
(&ty::Array(_, length), &ty::Slice(_)) => {
let ptr = self.read_scalar(src)?;
let ptr = self.read_pointer(src)?;
// u64 cast is from usize to u64, which is always good
let val = Immediate::new_slice(
ptr,
@ -367,6 +367,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return self.write_immediate(*val, dest);
}
let (old_data, old_vptr) = val.to_scalar_pair();
let old_data = old_data.to_pointer(self)?;
let old_vptr = old_vptr.to_pointer(self)?;
let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
if old_trait != data_a.principal() {
@ -378,7 +379,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(_, &ty::Dynamic(data, _, ty::Dyn)) => {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
let ptr = self.read_scalar(src)?;
let ptr = self.read_pointer(src)?;
let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
self.write_immediate(val, dest)
}

View file

@ -5,10 +5,8 @@
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{
self,
interpret::{
Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar,
},
BinOp, NonDivergingIntrinsic,
interpret::{Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar},
BinOp, ConstValue, NonDivergingIntrinsic,
};
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
@ -64,7 +62,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
sym::type_name => {
ensure_monomorphic_enough(tcx, tp_ty)?;
let alloc = alloc_type_name(tcx, tp_ty);
ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() }
ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }
}
sym::needs_drop => {
ensure_monomorphic_enough(tcx, tp_ty)?;

View file

@ -13,9 +13,8 @@ use rustc_middle::{mir, ty};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, InterpCx,
InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, Projectable,
Provenance, Scalar,
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, Frame, InterpCx, InterpResult,
MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, Projectable, Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@ -44,24 +43,30 @@ impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
}
impl<Prov: Provenance> Immediate<Prov> {
pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(Scalar::from_pointer(p, cx))
pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(Scalar::from_pointer(ptr, cx))
}
pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(Scalar::from_maybe_pointer(p, cx))
pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(Scalar::from_maybe_pointer(ptr, cx))
}
pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx))
pub fn new_slice(ptr: Pointer<Option<Prov>>, len: u64, cx: &impl HasDataLayout) -> Self {
Immediate::ScalarPair(
Scalar::from_maybe_pointer(ptr, cx),
Scalar::from_target_usize(len, cx),
)
}
pub fn new_dyn_trait(
val: Scalar<Prov>,
val: Pointer<Option<Prov>>,
vtable: Pointer<Option<Prov>>,
cx: &impl HasDataLayout,
) -> Self {
Immediate::ScalarPair(val, Scalar::from_maybe_pointer(vtable, cx))
Immediate::ScalarPair(
Scalar::from_maybe_pointer(val, cx),
Scalar::from_maybe_pointer(vtable, cx),
)
}
#[inline]
@ -702,7 +707,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub(crate) fn const_val_to_op(
&self,
val_val: ConstValue<'tcx>,
val_val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
@ -715,24 +720,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
let op = match val_val {
ConstValue::Indirect { alloc_id, offset } => {
mir::ConstValue::Indirect { alloc_id, offset } => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen.
let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?;
Operand::Indirect(MemPlace::from_ptr(ptr.into()))
}
ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
ConstValue::Slice { data, start, end } => {
mir::ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
mir::ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
mir::ConstValue::Slice { data, meta } => {
// We rely on mutability being set correctly in `data` to prevent writes
// where none should happen.
let ptr = Pointer::new(
self.tcx.reserve_and_set_memory_alloc(data),
Size::from_bytes(start), // offset: `start`
);
let ptr = Pointer::new(self.tcx.reserve_and_set_memory_alloc(data), Size::ZERO);
Operand::Immediate(Immediate::new_slice(
Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
self.global_base_pointer(ptr)?.into(),
meta,
self,
))
}

View file

@ -9,16 +9,15 @@ use either::{Either, Left, Right};
use rustc_ast::Mutability;
use rustc_index::IndexSlice;
use rustc_middle::mir;
use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
Pointer, Projectable, Provenance, Readable, Scalar,
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, ImmTy,
Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand, Pointer,
PointerArithmetic, Projectable, Provenance, Readable, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@ -1037,7 +1036,7 @@ where
pub fn raw_const_to_mplace(
&self,
raw: ConstAlloc<'tcx>,
raw: mir::ConstAlloc<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
// This must be an allocation in `tcx`
let _ = self.tcx.global_alloc(raw.alloc_id);

View file

@ -47,6 +47,8 @@ extern crate cfg_if;
#[macro_use]
extern crate rustc_macros;
use std::fmt;
pub use rustc_index::static_assert_size;
#[inline(never)]
@ -126,6 +128,23 @@ impl<F: FnOnce()> Drop for OnDrop<F> {
}
}
/// Turns a closure that takes an `&mut Formatter` into something that can be display-formatted.
pub fn make_display(f: impl Fn(&mut fmt::Formatter<'_>) -> fmt::Result) -> impl fmt::Display {
struct Printer<F> {
f: F,
}
impl<F> fmt::Display for Printer<F>
where
F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
(self.f)(fmt)
}
}
Printer { f }
}
// See comments in src/librustc_middle/lib.rs
#[doc(hidden)]
pub fn __noop_fix_for_27438() {}

View file

@ -7,7 +7,7 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(lazy_cell)]
#![feature(decl_macro)]
#![feature(ice_to_disk)]
#![feature(panic_update_hook)]
#![feature(let_chains)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
@ -50,9 +50,9 @@ use std::collections::BTreeMap;
use std::env;
use std::ffi::OsString;
use std::fmt::Write as _;
use std::fs;
use std::fs::{self, File};
use std::io::{self, IsTerminal, Read, Write};
use std::panic::{self, catch_unwind};
use std::panic::{self, catch_unwind, PanicInfo};
use std::path::PathBuf;
use std::process::{self, Command, Stdio};
use std::str;
@ -1326,31 +1326,59 @@ pub fn install_ice_hook(bug_report_url: &'static str, extra_info: fn(&Handler))
std::env::set_var("RUST_BACKTRACE", "full");
}
panic::set_hook(Box::new(move |info| {
// If the error was caused by a broken pipe then this is not a bug.
// Write the error and return immediately. See #98700.
#[cfg(windows)]
if let Some(msg) = info.payload().downcast_ref::<String>() {
if msg.starts_with("failed printing to stdout: ") && msg.ends_with("(os error 232)") {
// the error code is already going to be reported when the panic unwinds up the stack
let handler = EarlyErrorHandler::new(ErrorOutputType::default());
let _ = handler.early_error_no_abort(msg.clone());
return;
panic::update_hook(Box::new(
move |default_hook: &(dyn Fn(&PanicInfo<'_>) + Send + Sync + 'static),
info: &PanicInfo<'_>| {
// If the error was caused by a broken pipe then this is not a bug.
// Write the error and return immediately. See #98700.
#[cfg(windows)]
if let Some(msg) = info.payload().downcast_ref::<String>() {
if msg.starts_with("failed printing to stdout: ") && msg.ends_with("(os error 232)")
{
// the error code is already going to be reported when the panic unwinds up the stack
let handler = EarlyErrorHandler::new(ErrorOutputType::default());
let _ = handler.early_error_no_abort(msg.clone());
return;
}
};
// Invoke the default handler, which prints the actual panic message and optionally a backtrace
// Don't do this for delayed bugs, which already emit their own more useful backtrace.
if !info.payload().is::<rustc_errors::DelayedBugPanic>() {
default_hook(info);
// Separate the output with an empty line
eprintln!();
if let Some(ice_path) = ice_path()
&& let Ok(mut out) =
File::options().create(true).append(true).open(&ice_path)
{
// The current implementation always returns `Some`.
let location = info.location().unwrap();
let msg = match info.payload().downcast_ref::<&'static str>() {
Some(s) => *s,
None => match info.payload().downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
let thread = std::thread::current();
let name = thread.name().unwrap_or("<unnamed>");
let _ = write!(
&mut out,
"thread '{name}' panicked at {location}:\n\
{msg}\n\
stack backtrace:\n\
{:#}",
std::backtrace::Backtrace::force_capture()
);
}
}
};
// Invoke the default handler, which prints the actual panic message and optionally a backtrace
// Don't do this for delayed bugs, which already emit their own more useful backtrace.
if !info.payload().is::<rustc_errors::DelayedBugPanic>() {
std::panic_hook_with_disk_dump(info, ice_path().as_deref());
// Separate the output with an empty line
eprintln!();
}
// Print the ICE message
report_ice(info, bug_report_url, extra_info);
}));
// Print the ICE message
report_ice(info, bug_report_url, extra_info);
},
));
}
/// Prints the ICE message, including query stack, but without backtrace.

View file

@ -278,7 +278,8 @@ pub enum DefPathData {
Ctor,
/// A constant expression (see `{ast,hir}::AnonConst`).
AnonConst,
/// An `impl Trait` type node.
/// An existential `impl Trait` type node.
/// Argument position `impl Trait` have a `TypeNs` with their pretty-printed name.
ImplTrait,
/// `impl Trait` generated associated type node.
ImplTraitAssocTy,

View file

@ -21,12 +21,16 @@ hir_analysis_auto_deref_reached_recursion_limit = reached the recursion limit wh
.label = deref recursion limit reached
.help = consider increasing the recursion limit by adding a `#![recursion_limit = "{$suggested_limit}"]` attribute to your crate (`{$crate_name}`)
hir_analysis_cannot_capture_late_bound_const_in_anon_const =
cannot capture late-bound const parameter in a constant
hir_analysis_cannot_capture_late_bound_const =
cannot capture late-bound const parameter in {$what}
.label = parameter defined here
hir_analysis_cannot_capture_late_bound_ty_in_anon_const =
cannot capture late-bound type parameter in a constant
hir_analysis_cannot_capture_late_bound_lifetime =
cannot capture late-bound lifetime in {$what}
.label = lifetime defined here
hir_analysis_cannot_capture_late_bound_ty =
cannot capture late-bound type parameter in {$what}
.label = parameter defined here
hir_analysis_cast_thin_pointer_to_fat_pointer = cannot cast thin pointer `{$expr_ty}` to fat pointer `{$cast_ty}`

View file

@ -268,9 +268,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// (*) -- not late-bound, won't change
}
Some(rbv::ResolvedArg::Error(_)) => {
bug!("only ty/ct should resolve as ResolvedArg::Error")
}
Some(rbv::ResolvedArg::Error(guar)) => ty::Region::new_error(tcx, guar),
None => {
self.re_infer(def, lifetime.ident.span).unwrap_or_else(|| {

View file

@ -18,7 +18,7 @@ use rustc_middle::ty::util::ExplicitSelf;
use rustc_middle::ty::{
self, GenericArgs, Ty, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt,
};
use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt};
use rustc_middle::ty::{GenericParamDefKind, TyCtxt};
use rustc_span::{Span, DUMMY_SP};
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
@ -2196,16 +2196,16 @@ pub(super) fn check_type_bounds<'tcx>(
//
// impl<T> X for T where T: X { type Y = <T as X>::Y; }
}
_ => predicates.push(
_ => predicates.push(ty::Clause::from_projection_clause(
tcx,
ty::Binder::bind_with_vars(
ty::ProjectionPredicate {
projection_ty: tcx.mk_alias_ty(trait_ty.def_id, rebased_args),
term: normalize_impl_ty.into(),
},
bound_vars,
)
.to_predicate(tcx),
),
),
)),
};
ty::ParamEnv::new(tcx.mk_clauses(&predicates), Reveal::UserFacing)
};

View file

@ -158,13 +158,14 @@ enum Scope<'a> {
s: ScopeRef<'a>,
},
/// Disallows capturing non-lifetime binders from parent scopes.
/// Disallows capturing late-bound vars from parent scopes.
///
/// This is necessary for something like `for<T> [(); { /* references T */ }]:`,
/// since we don't do something more correct like replacing any captured
/// late-bound vars with early-bound params in the const's own generics.
AnonConstBoundary {
LateBoundary {
s: ScopeRef<'a>,
what: &'static str,
},
Root {
@ -216,7 +217,9 @@ impl<'a> fmt::Debug for TruncatedScopeDebug<'a> {
.field("s", &"..")
.finish(),
Scope::TraitRefBoundary { s: _ } => f.debug_struct("TraitRefBoundary").finish(),
Scope::AnonConstBoundary { s: _ } => f.debug_struct("AnonConstBoundary").finish(),
Scope::LateBoundary { s: _, what } => {
f.debug_struct("LateBoundary").field("what", what).finish()
}
Scope::Root { opt_parent_item } => {
f.debug_struct("Root").field("opt_parent_item", &opt_parent_item).finish()
}
@ -318,7 +321,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
break (vec![], BinderScopeType::Normal);
}
Scope::ObjectLifetimeDefault { s, .. } | Scope::AnonConstBoundary { s } => {
Scope::ObjectLifetimeDefault { s, .. } | Scope::LateBoundary { s, .. } => {
scope = s;
}
@ -697,9 +700,12 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
}) => {
intravisit::walk_ty(self, ty);
// Elided lifetimes are not allowed in non-return
// position impl Trait
let scope = Scope::TraitRefBoundary { s: self.scope };
// Elided lifetimes and late-bound lifetimes (from the parent)
// are not allowed in non-return position impl Trait
let scope = Scope::LateBoundary {
s: &Scope::TraitRefBoundary { s: self.scope },
what: "type alias impl trait",
};
self.with(scope, |this| intravisit::walk_item(this, opaque_ty));
return;
@ -979,7 +985,7 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
}
fn visit_anon_const(&mut self, c: &'tcx hir::AnonConst) {
self.with(Scope::AnonConstBoundary { s: self.scope }, |this| {
self.with(Scope::LateBoundary { s: self.scope, what: "constant" }, |this| {
intravisit::walk_anon_const(this, c);
});
}
@ -1174,6 +1180,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
let mut late_depth = 0;
let mut scope = self.scope;
let mut outermost_body = None;
let mut crossed_late_boundary = None;
let result = loop {
match *scope {
Scope::Body { id, s } => {
@ -1258,8 +1265,12 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
| Scope::AnonConstBoundary { s } => {
| Scope::TraitRefBoundary { s, .. } => {
scope = s;
}
Scope::LateBoundary { s, what } => {
crossed_late_boundary = Some(what);
scope = s;
}
}
@ -1268,6 +1279,22 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
if let Some(mut def) = result {
if let ResolvedArg::EarlyBound(..) = def {
// Do not free early-bound regions, only late-bound ones.
} else if let ResolvedArg::LateBound(_, _, param_def_id) = def
&& let Some(what) = crossed_late_boundary
{
let use_span = lifetime_ref.ident.span;
let def_span = self.tcx.def_span(param_def_id);
let guar = match self.tcx.def_kind(param_def_id) {
DefKind::LifetimeParam => {
self.tcx.sess.emit_err(errors::CannotCaptureLateBound::Lifetime {
use_span,
def_span,
what,
})
}
_ => unreachable!(),
};
def = ResolvedArg::Error(guar);
} else if let Some(body_id) = outermost_body {
let fn_id = self.tcx.hir().body_owner(body_id);
match self.tcx.hir().get(fn_id) {
@ -1322,7 +1349,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
| Scope::AnonConstBoundary { s } => {
| Scope::LateBoundary { s, .. } => {
scope = s;
}
}
@ -1341,7 +1368,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
// search.
let mut late_depth = 0;
let mut scope = self.scope;
let mut crossed_anon_const = false;
let mut crossed_late_boundary = None;
let result = loop {
match *scope {
@ -1376,28 +1403,32 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
scope = s;
}
Scope::AnonConstBoundary { s } => {
crossed_anon_const = true;
Scope::LateBoundary { s, what } => {
crossed_late_boundary = Some(what);
scope = s;
}
}
};
if let Some(def) = result {
if let ResolvedArg::LateBound(..) = def && crossed_anon_const {
if let ResolvedArg::LateBound(..) = def
&& let Some(what) = crossed_late_boundary
{
let use_span = self.tcx.hir().span(hir_id);
let def_span = self.tcx.def_span(param_def_id);
let guar = match self.tcx.def_kind(param_def_id) {
DefKind::ConstParam => {
self.tcx.sess.emit_err(errors::CannotCaptureLateBoundInAnonConst::Const {
self.tcx.sess.emit_err(errors::CannotCaptureLateBound::Const {
use_span,
def_span,
what,
})
}
DefKind::TyParam => {
self.tcx.sess.emit_err(errors::CannotCaptureLateBoundInAnonConst::Type {
self.tcx.sess.emit_err(errors::CannotCaptureLateBound::Type {
use_span,
def_span,
what,
})
}
_ => unreachable!(),
@ -1446,7 +1477,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
| Scope::AnonConstBoundary { s } => {
| Scope::LateBoundary { s, .. } => {
scope = s;
}
}
@ -1526,7 +1557,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
| Scope::AnonConstBoundary { s } => {
| Scope::LateBoundary { s, .. } => {
scope = s;
}
}
@ -1831,7 +1862,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
| Scope::AnonConstBoundary { s } => {
| Scope::LateBoundary { s, .. } => {
scope = s;
}
}

View file

@ -430,20 +430,30 @@ pub(crate) struct VariadicFunctionCompatibleConvention<'a> {
}
#[derive(Diagnostic)]
pub(crate) enum CannotCaptureLateBoundInAnonConst {
#[diag(hir_analysis_cannot_capture_late_bound_ty_in_anon_const)]
pub(crate) enum CannotCaptureLateBound {
#[diag(hir_analysis_cannot_capture_late_bound_ty)]
Type {
#[primary_span]
use_span: Span,
#[label]
def_span: Span,
what: &'static str,
},
#[diag(hir_analysis_cannot_capture_late_bound_const_in_anon_const)]
#[diag(hir_analysis_cannot_capture_late_bound_const)]
Const {
#[primary_span]
use_span: Span,
#[label]
def_span: Span,
what: &'static str,
},
#[diag(hir_analysis_cannot_capture_late_bound_lifetime)]
Lifetime {
#[primary_span]
use_span: Span,
#[label]
def_span: Span,
what: &'static str,
},
}

View file

@ -33,27 +33,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let generics = self.tcx.generics_of(def_id);
let predicate_args = match unsubstituted_pred.kind().skip_binder() {
ty::ClauseKind::Trait(pred) => pred.trait_ref.args.to_vec(),
ty::ClauseKind::Projection(pred) => pred.projection_ty.args.to_vec(),
ty::ClauseKind::ConstArgHasType(arg, ty) => {
vec![ty.into(), arg.into()]
}
ty::ClauseKind::ConstEvaluatable(e) => vec![e.into()],
_ => return false,
};
let (predicate_args, predicate_self_type_to_point_at) =
match unsubstituted_pred.kind().skip_binder() {
ty::ClauseKind::Trait(pred) => {
(pred.trait_ref.args.to_vec(), Some(pred.self_ty().into()))
}
ty::ClauseKind::Projection(pred) => (pred.projection_ty.args.to_vec(), None),
ty::ClauseKind::ConstArgHasType(arg, ty) => (vec![ty.into(), arg.into()], None),
ty::ClauseKind::ConstEvaluatable(e) => (vec![e.into()], None),
_ => return false,
};
let direct_param = if let ty::ClauseKind::Trait(pred) = unsubstituted_pred.kind().skip_binder()
&& let ty = pred.trait_ref.self_ty()
&& let ty::Param(_param) = ty.kind()
&& let Some(arg) = predicate_args.get(0)
&& let ty::GenericArgKind::Type(arg_ty) = arg.unpack()
&& arg_ty == ty
{
Some(*arg)
} else {
None
};
let find_param_matching = |matches: &dyn Fn(ty::ParamTerm) -> bool| {
predicate_args.iter().find_map(|arg| {
arg.walk().find_map(|arg| {
@ -112,18 +102,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let qpath =
if let hir::ExprKind::Path(qpath) = expr.kind { Some(qpath) } else { None };
(Some(*expr), qpath)
(Some(&expr.kind), qpath)
}
hir::Node::Ty(hir::Ty { kind: hir::TyKind::Path(qpath), .. }) => (None, Some(*qpath)),
_ => return false,
};
if let Some(qpath) = qpath {
if let Some(param) = direct_param {
if self.point_at_path_if_possible(error, def_id, param, &qpath) {
return true;
}
// Prefer pointing at the turbofished arg that corresponds to the
// self type of the failing predicate over anything else.
if let Some(param) = predicate_self_type_to_point_at
&& self.point_at_path_if_possible(error, def_id, param, &qpath)
{
return true;
}
if let hir::Node::Expr(hir::Expr {
kind: hir::ExprKind::Call(callee, args),
hir_id: call_hir_id,
@ -166,11 +159,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
match expr.map(|e| e.kind) {
match expr {
Some(hir::ExprKind::MethodCall(segment, receiver, args, ..)) => {
if let Some(param) = direct_param
if let Some(param) = predicate_self_type_to_point_at
&& self.point_at_generic_if_possible(error, def_id, param, segment)
{
// HACK: This is not correct, since `predicate_self_type_to_point_at` might
// not actually correspond to the receiver of the method call. But we
// re-adjust the cause code here in order to prefer pointing at one of
// the method's turbofish segments but still use `FunctionArgumentObligation`
// elsewhere. Hopefully this doesn't break something.
error.obligation.cause.map_code(|parent_code| {
ObligationCauseCode::FunctionArgumentObligation {
arg_hir_id: receiver.hir_id,
@ -180,6 +178,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
});
return true;
}
for param in [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
.into_iter()
.flatten()
@ -237,7 +236,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
for param in [
direct_param,
predicate_self_type_to_point_at,
param_to_point_at,
fallback_param_to_point_at,
self_param_to_point_at,

View file

@ -325,13 +325,16 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, span: Span) {
// FIXME: normalization and escaping regions
let ty = if !ty.has_escaping_bound_vars() {
if let ty::Alias(
ty::AliasKind::Projection | ty::AliasKind::Weak,
ty::AliasTy { args, def_id, .. },
) = ty.kind()
// NOTE: These obligations are 100% redundant and are implied by
// WF obligations that are registered elsewhere, but they have a
// better cause code assigned to them in `add_required_obligations_for_hir`.
// This means that they should shadow obligations with worse spans.
if let ty::Alias(ty::Projection | ty::Weak, ty::AliasTy { args, def_id, .. }) =
ty.kind()
{
self.add_required_obligations_for_hir(span, *def_id, args, hir_id);
}
self.normalize(span, ty)
} else {
ty

View file

@ -5,7 +5,6 @@ use rustc_hir::def::{CtorKind, CtorOf};
use rustc_index::Idx;
use rustc_middle::ty::{ParameterizedOverTcx, UnusedGenericParams};
use rustc_serialize::opaque::FileEncoder;
use rustc_serialize::Encoder as _;
use rustc_span::hygiene::MacroKind;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
@ -468,7 +467,10 @@ impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]>> TableBui
let width = self.width;
for block in &self.blocks {
buf.emit_raw_bytes(&block[..width]);
buf.write_with(|dest| {
*dest = *block;
width
});
}
LazyTable::from_position_and_encoded_size(

View file

@ -0,0 +1,583 @@
use std::fmt::{self, Debug, Display, Formatter};
use rustc_hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::{self as hir};
use rustc_span::Span;
use rustc_target::abi::{HasDataLayout, Size};
use crate::mir::interpret::{
alloc_range, AllocId, ConstAllocation, ErrorHandled, GlobalAlloc, Scalar,
};
use crate::mir::{pretty_print_const_value, Promoted};
use crate::ty::{self, print::pretty_print_const, List, Ty, TyCtxt};
use crate::ty::{GenericArgs, GenericArgsRef};
use crate::ty::{ScalarInt, UserTypeAnnotationIndex};
///////////////////////////////////////////////////////////////////////////
/// Evaluated Constants
/// Represents the result of const evaluation via the `eval_to_allocation` query.
/// Not to be confused with `ConstAllocation`, which directly refers to the underlying data!
/// Here we indirect via an `AllocId`.
#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
pub struct ConstAlloc<'tcx> {
/// The value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
/// (so you can use `AllocMap::unwrap_memory`).
pub alloc_id: AllocId,
pub ty: Ty<'tcx>,
}
/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
/// array length computations, enum discriminants and the pattern matching logic.
#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable, Lift)]
pub enum ConstValue<'tcx> {
/// Used for types with `layout::abi::Scalar` ABI.
///
/// Not using the enum `Value` to encode that this must not be `Uninit`.
Scalar(Scalar),
/// Only for ZSTs.
ZeroSized,
/// Used for references to unsized types with slice tail.
///
/// This is worth an optimized representation since Rust has literals of type `&str` and
/// `&[u8]`. Not having to indirect those through an `AllocId` (or two, if we used `Indirect`)
/// has shown measurable performance improvements on stress tests. We then reuse this
/// optimization for slice-tail types more generally during valtree-to-constval conversion.
Slice {
/// The allocation storing the slice contents.
/// This always points to the beginning of the allocation.
data: ConstAllocation<'tcx>,
/// The metadata field of the reference.
/// This is a "target usize", so we use `u64` as in the interpreter.
meta: u64,
},
/// A value not representable by the other variants; needs to be stored in-memory.
///
/// Must *not* be used for scalars or ZST, but having `&str` or other slices in this variant is fine.
Indirect {
/// The backing memory of the value. May contain more memory than needed for just the value
/// if this points into some other larger ConstValue.
///
/// We use an `AllocId` here instead of a `ConstAllocation<'tcx>` to make sure that when a
/// raw constant (which is basically just an `AllocId`) is turned into a `ConstValue` and
/// back, we can preserve the original `AllocId`.
alloc_id: AllocId,
/// Offset into `alloc`
offset: Size,
},
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstValue<'_>, 24);
impl<'tcx> ConstValue<'tcx> {
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
match *self {
ConstValue::Indirect { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None,
ConstValue::Scalar(val) => Some(val),
}
}
pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
self.try_to_scalar()?.try_to_int().ok()
}
pub fn try_to_bits(&self, size: Size) -> Option<u128> {
self.try_to_scalar_int()?.to_bits(size).ok()
}
pub fn try_to_bool(&self) -> Option<bool> {
self.try_to_scalar_int()?.try_into().ok()
}
pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
self.try_to_scalar_int()?.try_to_target_usize(tcx).ok()
}
pub fn try_to_bits_for_ty(
&self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
) -> Option<u128> {
let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
self.try_to_bits(size)
}
pub fn from_bool(b: bool) -> Self {
ConstValue::Scalar(Scalar::from_bool(b))
}
pub fn from_u64(i: u64) -> Self {
ConstValue::Scalar(Scalar::from_u64(i))
}
pub fn from_u128(i: u128) -> Self {
ConstValue::Scalar(Scalar::from_u128(i))
}
pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self {
ConstValue::Scalar(Scalar::from_target_usize(i, cx))
}
/// Must only be called on constants of type `&str` or `&[u8]`!
pub fn try_get_slice_bytes_for_diagnostics(&self, tcx: TyCtxt<'tcx>) -> Option<&'tcx [u8]> {
let (data, start, end) = match self {
ConstValue::Scalar(_) | ConstValue::ZeroSized => {
bug!("`try_get_slice_bytes` on non-slice constant")
}
&ConstValue::Slice { data, meta } => (data, 0, meta),
&ConstValue::Indirect { alloc_id, offset } => {
// The reference itself is stored behind an indirection.
// Load the reference, and then load the actual slice contents.
let a = tcx.global_alloc(alloc_id).unwrap_memory().inner();
let ptr_size = tcx.data_layout.pointer_size;
if a.size() < offset + 2 * ptr_size {
// (partially) dangling reference
return None;
}
// Read the wide pointer components.
let ptr = a
.read_scalar(
&tcx,
alloc_range(offset, ptr_size),
/* read_provenance */ true,
)
.ok()?;
let ptr = ptr.to_pointer(&tcx).ok()?;
let len = a
.read_scalar(
&tcx,
alloc_range(offset + ptr_size, ptr_size),
/* read_provenance */ false,
)
.ok()?;
let len = len.to_target_usize(&tcx).ok()?;
if len == 0 {
return Some(&[]);
}
// Non-empty slice, must have memory. We know this is a relative pointer.
let (inner_alloc_id, offset) = ptr.into_parts();
let data = tcx.global_alloc(inner_alloc_id?).unwrap_memory();
(data, offset.bytes(), offset.bytes() + len)
}
};
// This is for diagnostics only, so we are okay to use `inspect_with_uninit_and_ptr_outside_interpreter`.
let start = start.try_into().unwrap();
let end = end.try_into().unwrap();
Some(data.inner().inspect_with_uninit_and_ptr_outside_interpreter(start..end))
}
}
///////////////////////////////////////////////////////////////////////////
/// Constants
///
/// Two constants are equal if they are the same constant. Note that
/// this does not necessarily mean that they are `==` in Rust. In
/// particular, one must be wary of `NaN`!
#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]
pub struct Constant<'tcx> {
pub span: Span,
/// Optional user-given type: for something like
/// `collect::<Vec<_>>`, this would be present and would
/// indicate that `Vec<_>` was explicitly specified.
///
/// Needed for NLL to impose user-given type constraints.
pub user_ty: Option<UserTypeAnnotationIndex>,
pub literal: ConstantKind<'tcx>,
}
#[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
#[derive(TypeFoldable, TypeVisitable)]
pub enum ConstantKind<'tcx> {
/// This constant came from the type system.
///
/// Any way of turning `ty::Const` into `ConstValue` should go through `valtree_to_const_val`;
/// this ensures that we consistently produce "clean" values without data in the padding or
/// anything like that.
Ty(ty::Const<'tcx>),
/// An unevaluated mir constant which is not part of the type system.
///
/// Note that `Ty(ty::ConstKind::Unevaluated)` and this variant are *not* identical! `Ty` will
/// always flow through a valtree, so all data not captured in the valtree is lost. This variant
/// directly uses the evaluated result of the given constant, including e.g. data stored in
/// padding.
Unevaluated(UnevaluatedConst<'tcx>, Ty<'tcx>),
/// This constant cannot go back into the type system, as it represents
/// something the type system cannot handle (e.g. pointers).
Val(ConstValue<'tcx>, Ty<'tcx>),
}
impl<'tcx> Constant<'tcx> {
pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
match self.literal.try_to_scalar() {
Some(Scalar::Ptr(ptr, _size)) => match tcx.global_alloc(ptr.provenance) {
GlobalAlloc::Static(def_id) => {
assert!(!tcx.is_thread_local_static(def_id));
Some(def_id)
}
_ => None,
},
_ => None,
}
}
#[inline]
pub fn ty(&self) -> Ty<'tcx> {
self.literal.ty()
}
}
impl<'tcx> ConstantKind<'tcx> {
#[inline(always)]
pub fn ty(&self) -> Ty<'tcx> {
match self {
ConstantKind::Ty(c) => c.ty(),
ConstantKind::Val(_, ty) | ConstantKind::Unevaluated(_, ty) => *ty,
}
}
#[inline]
pub fn try_to_scalar(self) -> Option<Scalar> {
match self {
ConstantKind::Ty(c) => match c.kind() {
ty::ConstKind::Value(valtree) => match valtree {
ty::ValTree::Leaf(scalar_int) => Some(Scalar::Int(scalar_int)),
ty::ValTree::Branch(_) => None,
},
_ => None,
},
ConstantKind::Val(val, _) => val.try_to_scalar(),
ConstantKind::Unevaluated(..) => None,
}
}
#[inline]
pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
self.try_to_scalar()?.try_to_int().ok()
}
#[inline]
pub fn try_to_bits(self, size: Size) -> Option<u128> {
self.try_to_scalar_int()?.to_bits(size).ok()
}
#[inline]
pub fn try_to_bool(self) -> Option<bool> {
self.try_to_scalar_int()?.try_into().ok()
}
#[inline]
pub fn eval(
self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
span: Option<Span>,
) -> Result<ConstValue<'tcx>, ErrorHandled> {
match self {
ConstantKind::Ty(c) => {
// We want to consistently have a "clean" value for type system constants (i.e., no
// data hidden in the padding), so we always go through a valtree here.
let val = c.eval(tcx, param_env, span)?;
Ok(tcx.valtree_to_const_val((self.ty(), val)))
}
ConstantKind::Unevaluated(uneval, _) => {
// FIXME: We might want to have a `try_eval`-like function on `Unevaluated`
tcx.const_eval_resolve(param_env, uneval, span)
}
ConstantKind::Val(val, _) => Ok(val),
}
}
/// Normalizes the constant to a value or an error if possible.
#[inline]
pub fn normalize(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
match self.eval(tcx, param_env, None) {
Ok(val) => Self::Val(val, self.ty()),
Err(ErrorHandled::Reported(guar, _span)) => {
Self::Ty(ty::Const::new_error(tcx, guar.into(), self.ty()))
}
Err(ErrorHandled::TooGeneric(_span)) => self,
}
}
#[inline]
pub fn try_eval_scalar(
self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Option<Scalar> {
self.eval(tcx, param_env, None).ok()?.try_to_scalar()
}
#[inline]
pub fn try_eval_scalar_int(
self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Option<ScalarInt> {
self.try_eval_scalar(tcx, param_env)?.try_to_int().ok()
}
#[inline]
pub fn try_eval_bits(
&self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
) -> Option<u128> {
let int = self.try_eval_scalar_int(tcx, param_env)?;
assert_eq!(self.ty(), ty);
let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
int.to_bits(size).ok()
}
/// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
#[inline]
pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
self.try_eval_bits(tcx, param_env, ty)
.unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
}
#[inline]
pub fn try_eval_target_usize(
self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Option<u64> {
self.try_eval_scalar_int(tcx, param_env)?.try_to_target_usize(tcx).ok()
}
#[inline]
/// Panics if the value cannot be evaluated or doesn't contain a valid `usize`.
pub fn eval_target_usize(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> u64 {
self.try_eval_target_usize(tcx, param_env)
.unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
}
#[inline]
pub fn try_eval_bool(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<bool> {
self.try_eval_scalar_int(tcx, param_env)?.try_into().ok()
}
#[inline]
pub fn from_value(val: ConstValue<'tcx>, ty: Ty<'tcx>) -> Self {
Self::Val(val, ty)
}
pub fn from_bits(
tcx: TyCtxt<'tcx>,
bits: u128,
param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
) -> Self {
let size = tcx
.layout_of(param_env_ty)
.unwrap_or_else(|e| {
bug!("could not compute layout for {:?}: {:?}", param_env_ty.value, e)
})
.size;
let cv = ConstValue::Scalar(Scalar::from_uint(bits, size));
Self::Val(cv, param_env_ty.value)
}
#[inline]
pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> Self {
let cv = ConstValue::from_bool(v);
Self::Val(cv, tcx.types.bool)
}
#[inline]
pub fn zero_sized(ty: Ty<'tcx>) -> Self {
let cv = ConstValue::ZeroSized;
Self::Val(cv, ty)
}
pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> Self {
let ty = tcx.types.usize;
Self::from_bits(tcx, n as u128, ty::ParamEnv::empty().and(ty))
}
#[inline]
pub fn from_scalar(_tcx: TyCtxt<'tcx>, s: Scalar, ty: Ty<'tcx>) -> Self {
let val = ConstValue::Scalar(s);
Self::Val(val, ty)
}
/// Literals are converted to `ConstantKindVal`, const generic parameters are eagerly
/// converted to a constant, everything else becomes `Unevaluated`.
#[instrument(skip(tcx), level = "debug", ret)]
pub fn from_anon_const(
tcx: TyCtxt<'tcx>,
def: LocalDefId,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
let body_id = match tcx.hir().get_by_def_id(def) {
hir::Node::AnonConst(ac) => ac.body,
_ => {
span_bug!(tcx.def_span(def), "from_anon_const can only process anonymous constants")
}
};
let expr = &tcx.hir().body(body_id).value;
debug!(?expr);
// Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
// currently have to be wrapped in curly brackets, so it's necessary to special-case.
let expr = match &expr.kind {
hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
block.expr.as_ref().unwrap()
}
_ => expr,
};
debug!("expr.kind: {:?}", expr.kind);
let ty = tcx.type_of(def).instantiate_identity();
debug!(?ty);
// FIXME(const_generics): We currently have to special case parameters because `min_const_generics`
// does not provide the parents generics to anonymous constants. We still allow generic const
// parameters by themselves however, e.g. `N`. These constants would cause an ICE if we were to
// ever try to substitute the generic parameters in their bodies.
//
// While this doesn't happen as these constants are always used as `ty::ConstKind::Param`, it does
// cause issues if we were to remove that special-case and try to evaluate the constant instead.
use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
match expr.kind {
ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
// Find the name and index of the const parameter by indexing the generics of
// the parent item and construct a `ParamConst`.
let item_def_id = tcx.parent(def_id);
let generics = tcx.generics_of(item_def_id);
let index = generics.param_def_id_to_index[&def_id];
let name = tcx.item_name(def_id);
let ty_const = ty::Const::new_param(tcx, ty::ParamConst::new(index, name), ty);
debug!(?ty_const);
return Self::Ty(ty_const);
}
_ => {}
}
let hir_id = tcx.hir().local_def_id_to_hir_id(def);
let parent_args = if let Some(parent_hir_id) = tcx.hir().opt_parent_id(hir_id)
&& let Some(parent_did) = parent_hir_id.as_owner()
{
GenericArgs::identity_for_item(tcx, parent_did)
} else {
List::empty()
};
debug!(?parent_args);
let did = def.to_def_id();
let child_args = GenericArgs::identity_for_item(tcx, did);
let args = tcx.mk_args_from_iter(parent_args.into_iter().chain(child_args.into_iter()));
debug!(?args);
let span = tcx.def_span(def);
let uneval = UnevaluatedConst::new(did, args);
debug!(?span, ?param_env);
match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
Ok(val) => {
debug!("evaluated const value");
Self::Val(val, ty)
}
Err(_) => {
debug!("error encountered during evaluation");
// Error was handled in `const_eval_resolve`. Here we just create a
// new unevaluated const and error hard later in codegen
Self::Unevaluated(
UnevaluatedConst {
def: did,
args: GenericArgs::identity_for_item(tcx, did),
promoted: None,
},
ty,
)
}
}
}
pub fn from_ty_const(c: ty::Const<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
match c.kind() {
ty::ConstKind::Value(valtree) => {
// Make sure that if `c` is normalized, then the return value is normalized.
let const_val = tcx.valtree_to_const_val((c.ty(), valtree));
Self::Val(const_val, c.ty())
}
_ => Self::Ty(c),
}
}
}
/// An unevaluated (potentially generic) constant used in MIR.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
pub struct UnevaluatedConst<'tcx> {
pub def: DefId,
pub args: GenericArgsRef<'tcx>,
pub promoted: Option<Promoted>,
}
impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
pub fn shrink(self) -> ty::UnevaluatedConst<'tcx> {
assert_eq!(self.promoted, None);
ty::UnevaluatedConst { def: self.def, args: self.args }
}
}
impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
pub fn new(def: DefId, args: GenericArgsRef<'tcx>) -> UnevaluatedConst<'tcx> {
UnevaluatedConst { def, args, promoted: Default::default() }
}
#[inline]
pub fn from_instance(instance: ty::Instance<'tcx>) -> Self {
UnevaluatedConst::new(instance.def_id(), instance.args)
}
}
impl<'tcx> Debug for Constant<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
write!(fmt, "{self}")
}
}
impl<'tcx> Display for Constant<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
match self.ty().kind() {
ty::FnDef(..) => {}
_ => write!(fmt, "const ")?,
}
Display::fmt(&self.literal, fmt)
}
}
impl<'tcx> Display for ConstantKind<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
match *self {
ConstantKind::Ty(c) => pretty_print_const(c, fmt, true),
ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt),
// FIXME(valtrees): Correctly print mir constants.
ConstantKind::Unevaluated(..) => {
fmt.write_str("_")?;
Ok(())
}
}
}
}

View file

@ -1,7 +1,7 @@
use super::{AllocId, AllocRange, ConstAlloc, Pointer, Scalar};
use super::{AllocId, AllocRange, Pointer, Scalar};
use crate::error;
use crate::mir::interpret::ConstValue;
use crate::mir::{ConstAlloc, ConstValue};
use crate::query::TyCtxtAt;
use crate::ty::{layout, tls, Ty, TyCtxt, ValTree};

View file

@ -149,7 +149,7 @@ pub use self::error::{
UnsupportedOpInfo, ValidationErrorInfo, ValidationErrorKind,
};
pub use self::value::{ConstAlloc, ConstValue, Scalar};
pub use self::value::Scalar;
pub use self::allocation::{
alloc_range, AllocBytes, AllocError, AllocRange, AllocResult, Allocation, ConstAllocation,

View file

@ -9,163 +9,9 @@ use rustc_apfloat::{
use rustc_macros::HashStable;
use rustc_target::abi::{HasDataLayout, Size};
use crate::{
mir::interpret::alloc_range,
ty::{ParamEnv, ScalarInt, Ty, TyCtxt},
};
use crate::ty::ScalarInt;
use super::{
AllocId, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
ScalarSizeMismatch,
};
/// Represents the result of const evaluation via the `eval_to_allocation` query.
#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
pub struct ConstAlloc<'tcx> {
/// The value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
/// (so you can use `AllocMap::unwrap_memory`).
pub alloc_id: AllocId,
pub ty: Ty<'tcx>,
}
/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
/// array length computations, enum discriminants and the pattern matching logic.
#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable, Lift)]
pub enum ConstValue<'tcx> {
/// Used for types with `layout::abi::Scalar` ABI.
///
/// Not using the enum `Value` to encode that this must not be `Uninit`.
Scalar(Scalar),
/// Only for ZSTs.
ZeroSized,
/// Used for `&[u8]` and `&str`.
///
/// This is worth an optimized representation since Rust has literals of these types.
/// Not having to indirect those through an `AllocId` (or two, if we used `Indirect`) has shown
/// measurable performance improvements on stress tests.
Slice { data: ConstAllocation<'tcx>, start: usize, end: usize },
/// A value not representable by the other variants; needs to be stored in-memory.
///
/// Must *not* be used for scalars or ZST, but having `&str` or other slices in this variant is fine.
Indirect {
/// The backing memory of the value. May contain more memory than needed for just the value
/// if this points into some other larger ConstValue.
///
/// We use an `AllocId` here instead of a `ConstAllocation<'tcx>` to make sure that when a
/// raw constant (which is basically just an `AllocId`) is turned into a `ConstValue` and
/// back, we can preserve the original `AllocId`.
alloc_id: AllocId,
/// Offset into `alloc`
offset: Size,
},
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstValue<'_>, 32);
impl<'tcx> ConstValue<'tcx> {
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
match *self {
ConstValue::Indirect { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None,
ConstValue::Scalar(val) => Some(val),
}
}
pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
self.try_to_scalar()?.try_to_int().ok()
}
pub fn try_to_bits(&self, size: Size) -> Option<u128> {
self.try_to_scalar_int()?.to_bits(size).ok()
}
pub fn try_to_bool(&self) -> Option<bool> {
self.try_to_scalar_int()?.try_into().ok()
}
pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
self.try_to_scalar_int()?.try_to_target_usize(tcx).ok()
}
pub fn try_to_bits_for_ty(
&self,
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
ty: Ty<'tcx>,
) -> Option<u128> {
let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
self.try_to_bits(size)
}
pub fn from_bool(b: bool) -> Self {
ConstValue::Scalar(Scalar::from_bool(b))
}
pub fn from_u64(i: u64) -> Self {
ConstValue::Scalar(Scalar::from_u64(i))
}
pub fn from_u128(i: u128) -> Self {
ConstValue::Scalar(Scalar::from_u128(i))
}
pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self {
ConstValue::Scalar(Scalar::from_target_usize(i, cx))
}
/// Must only be called on constants of type `&str` or `&[u8]`!
pub fn try_get_slice_bytes_for_diagnostics(&self, tcx: TyCtxt<'tcx>) -> Option<&'tcx [u8]> {
let (data, start, end) = match self {
ConstValue::Scalar(_) | ConstValue::ZeroSized => {
bug!("`try_get_slice_bytes` on non-slice constant")
}
&ConstValue::Slice { data, start, end } => (data, start, end),
&ConstValue::Indirect { alloc_id, offset } => {
// The reference itself is stored behind an indirection.
// Load the reference, and then load the actual slice contents.
let a = tcx.global_alloc(alloc_id).unwrap_memory().inner();
let ptr_size = tcx.data_layout.pointer_size;
if a.size() < offset + 2 * ptr_size {
// (partially) dangling reference
return None;
}
// Read the wide pointer components.
let ptr = a
.read_scalar(
&tcx,
alloc_range(offset, ptr_size),
/* read_provenance */ true,
)
.ok()?;
let ptr = ptr.to_pointer(&tcx).ok()?;
let len = a
.read_scalar(
&tcx,
alloc_range(offset + ptr_size, ptr_size),
/* read_provenance */ false,
)
.ok()?;
let len = len.to_target_usize(&tcx).ok()?;
let len: usize = len.try_into().ok()?;
if len == 0 {
return Some(&[]);
}
// Non-empty slice, must have memory. We know this is a relative pointer.
let (inner_alloc_id, offset) = ptr.into_parts();
let data = tcx.global_alloc(inner_alloc_id?).unwrap_memory();
(data, offset.bytes_usize(), offset.bytes_usize() + len)
}
};
// This is for diagnostics only, so we are okay to use `inspect_with_uninit_and_ptr_outside_interpreter`.
Some(data.inner().inspect_with_uninit_and_ptr_outside_interpreter(start..end))
}
}
use super::{AllocId, InterpResult, Pointer, PointerArithmetic, Provenance, ScalarSizeMismatch};
/// A `Scalar` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of an `Allocation`, up to 16 bytes in
@ -327,6 +173,16 @@ impl<Prov> Scalar<Prov> {
.unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
}
#[inline]
pub fn from_i8(i: i8) -> Self {
Self::from_int(i, Size::from_bits(8))
}
#[inline]
pub fn from_i16(i: i16) -> Self {
Self::from_int(i, Size::from_bits(16))
}
#[inline]
pub fn from_i32(i: i32) -> Self {
Self::from_int(i, Size::from_bits(32))
@ -554,15 +410,19 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
Ok(i64::try_from(b).unwrap())
}
#[inline]
pub fn to_float<F: Float>(self) -> InterpResult<'tcx, F> {
// Going through `to_uint` to check size and truncation.
Ok(F::from_bits(self.to_uint(Size::from_bits(F::BITS))?))
}
#[inline]
pub fn to_f32(self) -> InterpResult<'tcx, Single> {
// Going through `u32` to check size and truncation.
Ok(Single::from_bits(self.to_u32()?.into()))
self.to_float()
}
#[inline]
pub fn to_f64(self) -> InterpResult<'tcx, Double> {
// Going through `u64` to check size and truncation.
Ok(Double::from_bits(self.to_u64()?.into()))
self.to_float()
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,5 @@
//! Values computed by queries that use MIR.
use crate::mir::interpret::ConstValue;
use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt};
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::unord::UnordSet;
@ -16,7 +15,7 @@ use smallvec::SmallVec;
use std::cell::Cell;
use std::fmt::{self, Debug};
use super::SourceInfo;
use super::{ConstValue, SourceInfo};
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationKind {

View file

@ -0,0 +1,441 @@
/// Functionality for statements, operands, places, and things that appear in them.
use super::*;
///////////////////////////////////////////////////////////////////////////
// Statements
/// A statement in a basic block, including information about its source code.
#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Statement<'tcx> {
pub source_info: SourceInfo,
pub kind: StatementKind<'tcx>,
}
impl Statement<'_> {
/// Changes a statement to a nop. This is both faster than deleting instructions and avoids
/// invalidating statement indices in `Location`s.
pub fn make_nop(&mut self) {
self.kind = StatementKind::Nop
}
/// Changes a statement to a nop and returns the original statement.
#[must_use = "If you don't need the statement, use `make_nop` instead"]
pub fn replace_nop(&mut self) -> Self {
Statement {
source_info: self.source_info,
kind: mem::replace(&mut self.kind, StatementKind::Nop),
}
}
}
impl<'tcx> StatementKind<'tcx> {
pub fn as_assign_mut(&mut self) -> Option<&mut (Place<'tcx>, Rvalue<'tcx>)> {
match self {
StatementKind::Assign(x) => Some(x),
_ => None,
}
}
pub fn as_assign(&self) -> Option<&(Place<'tcx>, Rvalue<'tcx>)> {
match self {
StatementKind::Assign(x) => Some(x),
_ => None,
}
}
}
///////////////////////////////////////////////////////////////////////////
// Places
impl<V, T> ProjectionElem<V, T> {
/// Returns `true` if the target of this projection may refer to a different region of memory
/// than the base.
fn is_indirect(&self) -> bool {
match self {
Self::Deref => true,
Self::Field(_, _)
| Self::Index(_)
| Self::OpaqueCast(_)
| Self::ConstantIndex { .. }
| Self::Subslice { .. }
| Self::Downcast(_, _) => false,
}
}
/// Returns `true` if the target of this projection always refers to the same memory region
/// whatever the state of the program.
pub fn is_stable_offset(&self) -> bool {
match self {
Self::Deref | Self::Index(_) => false,
Self::Field(_, _)
| Self::OpaqueCast(_)
| Self::ConstantIndex { .. }
| Self::Subslice { .. }
| Self::Downcast(_, _) => true,
}
}
/// Returns `true` if this is a `Downcast` projection with the given `VariantIdx`.
pub fn is_downcast_to(&self, v: VariantIdx) -> bool {
matches!(*self, Self::Downcast(_, x) if x == v)
}
/// Returns `true` if this is a `Field` projection with the given index.
pub fn is_field_to(&self, f: FieldIdx) -> bool {
matches!(*self, Self::Field(x, _) if x == f)
}
/// Returns `true` if this is accepted inside `VarDebugInfoContents::Place`.
pub fn can_use_in_debuginfo(&self) -> bool {
match self {
Self::ConstantIndex { from_end: false, .. }
| Self::Deref
| Self::Downcast(_, _)
| Self::Field(_, _) => true,
Self::ConstantIndex { from_end: true, .. }
| Self::Index(_)
| Self::OpaqueCast(_)
| Self::Subslice { .. } => false,
}
}
}
/// Alias for projections as they appear in `UserTypeProjection`, where we
/// need neither the `V` parameter for `Index` nor the `T` for `Field`.
pub type ProjectionKind = ProjectionElem<(), ()>;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct PlaceRef<'tcx> {
pub local: Local,
pub projection: &'tcx [PlaceElem<'tcx>],
}
// Once we stop implementing `Ord` for `DefId`,
// this impl will be unnecessary. Until then, we'll
// leave this impl in place to prevent re-adding a
// dependency on the `Ord` impl for `DefId`
impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
impl<'tcx> Place<'tcx> {
// FIXME change this to a const fn by also making List::empty a const fn.
pub fn return_place() -> Place<'tcx> {
Place { local: RETURN_PLACE, projection: List::empty() }
}
/// Returns `true` if this `Place` contains a `Deref` projection.
///
/// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
/// same region of memory as its base.
pub fn is_indirect(&self) -> bool {
self.projection.iter().any(|elem| elem.is_indirect())
}
/// Returns `true` if this `Place`'s first projection is `Deref`.
///
/// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
/// `Deref` projections can only occur as the first projection. In that case this method
/// is equivalent to `is_indirect`, but faster.
pub fn is_indirect_first_projection(&self) -> bool {
self.as_ref().is_indirect_first_projection()
}
/// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
/// a single deref of a local.
#[inline(always)]
pub fn local_or_deref_local(&self) -> Option<Local> {
self.as_ref().local_or_deref_local()
}
/// If this place represents a local variable like `_X` with no
/// projections, return `Some(_X)`.
#[inline(always)]
pub fn as_local(&self) -> Option<Local> {
self.as_ref().as_local()
}
#[inline]
pub fn as_ref(&self) -> PlaceRef<'tcx> {
PlaceRef { local: self.local, projection: &self.projection }
}
/// Iterate over the projections in evaluation order, i.e., the first element is the base with
/// its projection and then subsequently more projections are added.
/// As a concrete example, given the place a.b.c, this would yield:
/// - (a, .b)
/// - (a.b, .c)
///
/// Given a place without projections, the iterator is empty.
#[inline]
pub fn iter_projections(
self,
) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
self.as_ref().iter_projections()
}
/// Generates a new place by appending `more_projections` to the existing ones
/// and interning the result.
pub fn project_deeper(self, more_projections: &[PlaceElem<'tcx>], tcx: TyCtxt<'tcx>) -> Self {
if more_projections.is_empty() {
return self;
}
self.as_ref().project_deeper(more_projections, tcx)
}
}
impl From<Local> for Place<'_> {
#[inline]
fn from(local: Local) -> Self {
Place { local, projection: List::empty() }
}
}
impl<'tcx> PlaceRef<'tcx> {
/// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
/// a single deref of a local.
pub fn local_or_deref_local(&self) -> Option<Local> {
match *self {
PlaceRef { local, projection: [] }
| PlaceRef { local, projection: [ProjectionElem::Deref] } => Some(local),
_ => None,
}
}
/// Returns `true` if this `Place` contains a `Deref` projection.
///
/// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
/// same region of memory as its base.
pub fn is_indirect(&self) -> bool {
self.projection.iter().any(|elem| elem.is_indirect())
}
/// Returns `true` if this `Place`'s first projection is `Deref`.
///
/// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
/// `Deref` projections can only occur as the first projection. In that case this method
/// is equivalent to `is_indirect`, but faster.
pub fn is_indirect_first_projection(&self) -> bool {
// To make sure this is not accidentally used in wrong mir phase
debug_assert!(
self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
);
self.projection.first() == Some(&PlaceElem::Deref)
}
/// If this place represents a local variable like `_X` with no
/// projections, return `Some(_X)`.
#[inline]
pub fn as_local(&self) -> Option<Local> {
match *self {
PlaceRef { local, projection: [] } => Some(local),
_ => None,
}
}
#[inline]
pub fn last_projection(&self) -> Option<(PlaceRef<'tcx>, PlaceElem<'tcx>)> {
if let &[ref proj_base @ .., elem] = self.projection {
Some((PlaceRef { local: self.local, projection: proj_base }, elem))
} else {
None
}
}
/// Iterate over the projections in evaluation order, i.e., the first element is the base with
/// its projection and then subsequently more projections are added.
/// As a concrete example, given the place a.b.c, this would yield:
/// - (a, .b)
/// - (a.b, .c)
///
/// Given a place without projections, the iterator is empty.
#[inline]
pub fn iter_projections(
self,
) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
self.projection.iter().enumerate().map(move |(i, proj)| {
let base = PlaceRef { local: self.local, projection: &self.projection[..i] };
(base, *proj)
})
}
/// Generates a new place by appending `more_projections` to the existing ones
/// and interning the result.
pub fn project_deeper(
self,
more_projections: &[PlaceElem<'tcx>],
tcx: TyCtxt<'tcx>,
) -> Place<'tcx> {
let mut v: Vec<PlaceElem<'tcx>>;
let new_projections = if self.projection.is_empty() {
more_projections
} else {
v = Vec::with_capacity(self.projection.len() + more_projections.len());
v.extend(self.projection);
v.extend(more_projections);
&v
};
Place { local: self.local, projection: tcx.mk_place_elems(new_projections) }
}
}
impl From<Local> for PlaceRef<'_> {
#[inline]
fn from(local: Local) -> Self {
PlaceRef { local, projection: &[] }
}
}
///////////////////////////////////////////////////////////////////////////
// Operands
impl<'tcx> Operand<'tcx> {
/// Convenience helper to make a constant that refers to the fn
/// with given `DefId` and args. Since this is used to synthesize
/// MIR, assumes `user_ty` is None.
pub fn function_handle(
tcx: TyCtxt<'tcx>,
def_id: DefId,
args: impl IntoIterator<Item = GenericArg<'tcx>>,
span: Span,
) -> Self {
let ty = Ty::new_fn_def(tcx, def_id, args);
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
literal: ConstantKind::Val(ConstValue::ZeroSized, ty),
}))
}
pub fn is_move(&self) -> bool {
matches!(self, Operand::Move(..))
}
/// Convenience helper to make a literal-like constant from a given scalar value.
/// Since this is used to synthesize MIR, assumes `user_ty` is None.
pub fn const_from_scalar(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
val: Scalar,
span: Span,
) -> Operand<'tcx> {
debug_assert!({
let param_env_and_ty = ty::ParamEnv::empty().and(ty);
let type_size = tcx
.layout_of(param_env_and_ty)
.unwrap_or_else(|e| panic!("could not compute layout for {ty:?}: {e:?}"))
.size;
let scalar_size = match val {
Scalar::Int(int) => int.size(),
_ => panic!("Invalid scalar type {val:?}"),
};
scalar_size == type_size
});
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
literal: ConstantKind::Val(ConstValue::Scalar(val), ty),
}))
}
pub fn to_copy(&self) -> Self {
match *self {
Operand::Copy(_) | Operand::Constant(_) => self.clone(),
Operand::Move(place) => Operand::Copy(place),
}
}
/// Returns the `Place` that is the target of this `Operand`, or `None` if this `Operand` is a
/// constant.
pub fn place(&self) -> Option<Place<'tcx>> {
match self {
Operand::Copy(place) | Operand::Move(place) => Some(*place),
Operand::Constant(_) => None,
}
}
/// Returns the `Constant` that is the target of this `Operand`, or `None` if this `Operand` is a
/// place.
pub fn constant(&self) -> Option<&Constant<'tcx>> {
match self {
Operand::Constant(x) => Some(&**x),
Operand::Copy(_) | Operand::Move(_) => None,
}
}
/// Gets the `ty::FnDef` from an operand if it's a constant function item.
///
/// While this is unlikely in general, it's the normal case of what you'll
/// find as the `func` in a [`TerminatorKind::Call`].
pub fn const_fn_def(&self) -> Option<(DefId, GenericArgsRef<'tcx>)> {
let const_ty = self.constant()?.literal.ty();
if let ty::FnDef(def_id, args) = *const_ty.kind() { Some((def_id, args)) } else { None }
}
}
///////////////////////////////////////////////////////////////////////////
/// Rvalues
impl<'tcx> Rvalue<'tcx> {
/// Returns true if rvalue can be safely removed when the result is unused.
#[inline]
pub fn is_safe_to_remove(&self) -> bool {
match self {
// Pointer to int casts may be side-effects due to exposing the provenance.
// While the model is undecided, we should be conservative. See
// <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
Rvalue::Use(_)
| Rvalue::CopyForDeref(_)
| Rvalue::Repeat(_, _)
| Rvalue::Ref(_, _, _)
| Rvalue::ThreadLocalRef(_)
| Rvalue::AddressOf(_, _)
| Rvalue::Len(_)
| Rvalue::Cast(
CastKind::IntToInt
| CastKind::FloatToInt
| CastKind::FloatToFloat
| CastKind::IntToFloat
| CastKind::FnPtrToPtr
| CastKind::PtrToPtr
| CastKind::PointerCoercion(_)
| CastKind::PointerFromExposedAddress
| CastKind::DynStar
| CastKind::Transmute,
_,
_,
)
| Rvalue::BinaryOp(_, _)
| Rvalue::CheckedBinaryOp(_, _)
| Rvalue::NullaryOp(_, _)
| Rvalue::UnaryOp(_, _)
| Rvalue::Discriminant(_)
| Rvalue::Aggregate(_, _)
| Rvalue::ShallowInitBox(_, _) => true,
}
}
}
impl BorrowKind {
pub fn mutability(&self) -> Mutability {
match *self {
BorrowKind::Shared | BorrowKind::Shallow => Mutability::Not,
BorrowKind::Mut { .. } => Mutability::Mut,
}
}
pub fn allows_two_phase_borrow(&self) -> bool {
match *self {
BorrowKind::Shared
| BorrowKind::Shallow
| BorrowKind::Mut { kind: MutBorrowKind::Default | MutBorrowKind::ClosureCapture } => {
false
}
BorrowKind::Mut { kind: MutBorrowKind::TwoPhaseBorrow } => true,
}
}
}

View file

@ -3,7 +3,7 @@
//! This is in a dedicated file so that changes to this file can be reviewed more carefully.
//! The intention is that this file only contains datatype declarations, no code.
use super::{BasicBlock, Constant, Local, SwitchTargets, UserTypeProjection};
use super::{BasicBlock, Constant, Local, UserTypeProjection};
use crate::mir::coverage::{CodeRegion, CoverageKind};
use crate::traits::Reveal;
@ -24,6 +24,7 @@ use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::Symbol;
use rustc_span::Span;
use rustc_target::asm::InlineAsmRegOrRegClass;
use smallvec::SmallVec;
/// Represents the "flavors" of MIR.
///
@ -828,6 +829,27 @@ impl TerminatorKind<'_> {
}
}
#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
pub struct SwitchTargets {
/// Possible values. The locations to branch to in each case
/// are found in the corresponding indices from the `targets` vector.
pub(super) values: SmallVec<[u128; 1]>,
/// Possible branch sites. The last element of this vector is used
/// for the otherwise branch, so targets.len() == values.len() + 1
/// should hold.
//
// This invariant is quite non-obvious and also could be improved.
// One way to make this invariant is to have something like this instead:
//
// branches: Vec<(ConstInt, BasicBlock)>,
// otherwise: Option<BasicBlock> // exhaustive if None
//
// However weve decided to keep this as-is until we figure a case
// where some other approach seems to be strictly better than other.
pub(super) targets: SmallVec<[BasicBlock; 2]>,
}
/// Action to be taken when a stack unwind happens.
#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]

View file

@ -1,39 +1,16 @@
/// Functionality for terminators and helper types that appear in terminators.
use rustc_hir::LangItem;
use smallvec::SmallVec;
use super::{BasicBlock, InlineAsmOperand, Operand, SourceInfo, TerminatorKind, UnwindAction};
use rustc_ast::InlineAsmTemplatePiece;
pub use rustc_ast::Mutability;
use rustc_macros::HashStable;
use std::borrow::Cow;
use std::fmt::{self, Debug, Formatter, Write};
use std::iter;
use std::slice;
pub use super::query::*;
use super::*;
#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
pub struct SwitchTargets {
/// Possible values. The locations to branch to in each case
/// are found in the corresponding indices from the `targets` vector.
values: SmallVec<[u128; 1]>,
/// Possible branch sites. The last element of this vector is used
/// for the otherwise branch, so targets.len() == values.len() + 1
/// should hold.
//
// This invariant is quite non-obvious and also could be improved.
// One way to make this invariant is to have something like this instead:
//
// branches: Vec<(ConstInt, BasicBlock)>,
// otherwise: Option<BasicBlock> // exhaustive if None
//
// However weve decided to keep this as-is until we figure a case
// where some other approach seems to be strictly better than other.
targets: SmallVec<[BasicBlock; 2]>,
}
impl SwitchTargets {
/// Creates switch targets from an iterator of values and target blocks.
///
@ -135,6 +112,168 @@ impl UnwindTerminateReason {
}
}
impl<O> AssertKind<O> {
/// Returns true if this an overflow checking assertion controlled by -C overflow-checks.
pub fn is_optional_overflow_check(&self) -> bool {
use AssertKind::*;
use BinOp::*;
matches!(self, OverflowNeg(..) | Overflow(Add | Sub | Mul | Shl | Shr, ..))
}
/// Get the message that is printed at runtime when this assertion fails.
///
/// The caller is expected to handle `BoundsCheck` and `MisalignedPointerDereference` by
/// invoking the appropriate lang item (panic_bounds_check/panic_misaligned_pointer_dereference)
/// instead of printing a static message.
pub fn description(&self) -> &'static str {
use AssertKind::*;
match self {
Overflow(BinOp::Add, _, _) => "attempt to add with overflow",
Overflow(BinOp::Sub, _, _) => "attempt to subtract with overflow",
Overflow(BinOp::Mul, _, _) => "attempt to multiply with overflow",
Overflow(BinOp::Div, _, _) => "attempt to divide with overflow",
Overflow(BinOp::Rem, _, _) => "attempt to calculate the remainder with overflow",
OverflowNeg(_) => "attempt to negate with overflow",
Overflow(BinOp::Shr, _, _) => "attempt to shift right with overflow",
Overflow(BinOp::Shl, _, _) => "attempt to shift left with overflow",
Overflow(op, _, _) => bug!("{:?} cannot overflow", op),
DivisionByZero(_) => "attempt to divide by zero",
RemainderByZero(_) => "attempt to calculate the remainder with a divisor of zero",
ResumedAfterReturn(GeneratorKind::Gen) => "generator resumed after completion",
ResumedAfterReturn(GeneratorKind::Async(_)) => "`async fn` resumed after completion",
ResumedAfterPanic(GeneratorKind::Gen) => "generator resumed after panicking",
ResumedAfterPanic(GeneratorKind::Async(_)) => "`async fn` resumed after panicking",
BoundsCheck { .. } | MisalignedPointerDereference { .. } => {
bug!("Unexpected AssertKind")
}
}
}
/// Format the message arguments for the `assert(cond, msg..)` terminator in MIR printing.
///
/// Needs to be kept in sync with the run-time behavior (which is defined by
/// `AssertKind::description` and the lang items mentioned in its docs).
/// Note that we deliberately show more details here than we do at runtime, such as the actual
/// numbers that overflowed -- it is much easier to do so here than at runtime.
pub fn fmt_assert_args<W: fmt::Write>(&self, f: &mut W) -> fmt::Result
where
O: Debug,
{
use AssertKind::*;
match self {
BoundsCheck { ref len, ref index } => write!(
f,
"\"index out of bounds: the length is {{}} but the index is {{}}\", {len:?}, {index:?}"
),
OverflowNeg(op) => {
write!(f, "\"attempt to negate `{{}}`, which would overflow\", {op:?}")
}
DivisionByZero(op) => write!(f, "\"attempt to divide `{{}}` by zero\", {op:?}"),
RemainderByZero(op) => write!(
f,
"\"attempt to calculate the remainder of `{{}}` with a divisor of zero\", {op:?}"
),
Overflow(BinOp::Add, l, r) => write!(
f,
"\"attempt to compute `{{}} + {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Sub, l, r) => write!(
f,
"\"attempt to compute `{{}} - {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Mul, l, r) => write!(
f,
"\"attempt to compute `{{}} * {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Div, l, r) => write!(
f,
"\"attempt to compute `{{}} / {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Rem, l, r) => write!(
f,
"\"attempt to compute the remainder of `{{}} % {{}}`, which would overflow\", {l:?}, {r:?}"
),
Overflow(BinOp::Shr, _, r) => {
write!(f, "\"attempt to shift right by `{{}}`, which would overflow\", {r:?}")
}
Overflow(BinOp::Shl, _, r) => {
write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {r:?}")
}
MisalignedPointerDereference { required, found } => {
write!(
f,
"\"misaligned pointer dereference: address must be a multiple of {{}} but is {{}}\", {required:?}, {found:?}"
)
}
_ => write!(f, "\"{}\"", self.description()),
}
}
/// Format the diagnostic message for use in a lint (e.g. when the assertion fails during const-eval).
///
/// Needs to be kept in sync with the run-time behavior (which is defined by
/// `AssertKind::description` and the lang items mentioned in its docs).
/// Note that we deliberately show more details here than we do at runtime, such as the actual
/// numbers that overflowed -- it is much easier to do so here than at runtime.
pub fn diagnostic_message(&self) -> DiagnosticMessage {
use crate::fluent_generated::*;
use AssertKind::*;
match self {
BoundsCheck { .. } => middle_bounds_check,
Overflow(BinOp::Shl, _, _) => middle_assert_shl_overflow,
Overflow(BinOp::Shr, _, _) => middle_assert_shr_overflow,
Overflow(_, _, _) => middle_assert_op_overflow,
OverflowNeg(_) => middle_assert_overflow_neg,
DivisionByZero(_) => middle_assert_divide_by_zero,
RemainderByZero(_) => middle_assert_remainder_by_zero,
ResumedAfterReturn(GeneratorKind::Async(_)) => middle_assert_async_resume_after_return,
ResumedAfterReturn(GeneratorKind::Gen) => middle_assert_generator_resume_after_return,
ResumedAfterPanic(GeneratorKind::Async(_)) => middle_assert_async_resume_after_panic,
ResumedAfterPanic(GeneratorKind::Gen) => middle_assert_generator_resume_after_panic,
MisalignedPointerDereference { .. } => middle_assert_misaligned_ptr_deref,
}
}
pub fn add_args(self, adder: &mut dyn FnMut(Cow<'static, str>, DiagnosticArgValue<'static>))
where
O: fmt::Debug,
{
use AssertKind::*;
macro_rules! add {
($name: expr, $value: expr) => {
adder($name.into(), $value.into_diagnostic_arg());
};
}
match self {
BoundsCheck { len, index } => {
add!("len", format!("{len:?}"));
add!("index", format!("{index:?}"));
}
Overflow(BinOp::Shl | BinOp::Shr, _, val)
| DivisionByZero(val)
| RemainderByZero(val)
| OverflowNeg(val) => {
add!("val", format!("{val:#?}"));
}
Overflow(binop, left, right) => {
add!("op", binop.to_hir_binop().as_str());
add!("left", format!("{left:#?}"));
add!("right", format!("{right:#?}"));
}
ResumedAfterReturn(_) | ResumedAfterPanic(_) => {}
MisalignedPointerDereference { required, found } => {
add!("required", format!("{required:#?}"));
add!("found", format!("{found:#?}"));
}
}
}
}
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Terminator<'tcx> {
pub source_info: SourceInfo,
@ -299,187 +438,6 @@ impl<'tcx> TerminatorKind<'tcx> {
}
}
impl<'tcx> Debug for TerminatorKind<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
self.fmt_head(fmt)?;
let successor_count = self.successors().count();
let labels = self.fmt_successor_labels();
assert_eq!(successor_count, labels.len());
// `Cleanup` is already included in successors
let show_unwind = !matches!(self.unwind(), None | Some(UnwindAction::Cleanup(_)));
let fmt_unwind = |fmt: &mut Formatter<'_>| -> fmt::Result {
write!(fmt, "unwind ")?;
match self.unwind() {
// Not needed or included in successors
None | Some(UnwindAction::Cleanup(_)) => unreachable!(),
Some(UnwindAction::Continue) => write!(fmt, "continue"),
Some(UnwindAction::Unreachable) => write!(fmt, "unreachable"),
Some(UnwindAction::Terminate(reason)) => {
write!(fmt, "terminate({})", reason.as_short_str())
}
}
};
match (successor_count, show_unwind) {
(0, false) => Ok(()),
(0, true) => {
write!(fmt, " -> ")?;
fmt_unwind(fmt)
}
(1, false) => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
_ => {
write!(fmt, " -> [")?;
for (i, target) in self.successors().enumerate() {
if i > 0 {
write!(fmt, ", ")?;
}
write!(fmt, "{}: {:?}", labels[i], target)?;
}
if show_unwind {
write!(fmt, ", ")?;
fmt_unwind(fmt)?;
}
write!(fmt, "]")
}
}
}
}
impl<'tcx> TerminatorKind<'tcx> {
/// Writes the "head" part of the terminator; that is, its name and the data it uses to pick the
/// successor basic block, if any. The only information not included is the list of possible
/// successors, which may be rendered differently between the text and the graphviz format.
pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
use self::TerminatorKind::*;
match self {
Goto { .. } => write!(fmt, "goto"),
SwitchInt { discr, .. } => write!(fmt, "switchInt({discr:?})"),
Return => write!(fmt, "return"),
GeneratorDrop => write!(fmt, "generator_drop"),
UnwindResume => write!(fmt, "resume"),
UnwindTerminate(reason) => {
write!(fmt, "abort({})", reason.as_short_str())
}
Yield { value, resume_arg, .. } => write!(fmt, "{resume_arg:?} = yield({value:?})"),
Unreachable => write!(fmt, "unreachable"),
Drop { place, .. } => write!(fmt, "drop({place:?})"),
Call { func, args, destination, .. } => {
write!(fmt, "{destination:?} = ")?;
write!(fmt, "{func:?}(")?;
for (index, arg) in args.iter().enumerate() {
if index > 0 {
write!(fmt, ", ")?;
}
write!(fmt, "{arg:?}")?;
}
write!(fmt, ")")
}
Assert { cond, expected, msg, .. } => {
write!(fmt, "assert(")?;
if !expected {
write!(fmt, "!")?;
}
write!(fmt, "{cond:?}, ")?;
msg.fmt_assert_args(fmt)?;
write!(fmt, ")")
}
FalseEdge { .. } => write!(fmt, "falseEdge"),
FalseUnwind { .. } => write!(fmt, "falseUnwind"),
InlineAsm { template, ref operands, options, .. } => {
write!(fmt, "asm!(\"{}\"", InlineAsmTemplatePiece::to_string(template))?;
for op in operands {
write!(fmt, ", ")?;
let print_late = |&late| if late { "late" } else { "" };
match op {
InlineAsmOperand::In { reg, value } => {
write!(fmt, "in({reg}) {value:?}")?;
}
InlineAsmOperand::Out { reg, late, place: Some(place) } => {
write!(fmt, "{}out({}) {:?}", print_late(late), reg, place)?;
}
InlineAsmOperand::Out { reg, late, place: None } => {
write!(fmt, "{}out({}) _", print_late(late), reg)?;
}
InlineAsmOperand::InOut {
reg,
late,
in_value,
out_place: Some(out_place),
} => {
write!(
fmt,
"in{}out({}) {:?} => {:?}",
print_late(late),
reg,
in_value,
out_place
)?;
}
InlineAsmOperand::InOut { reg, late, in_value, out_place: None } => {
write!(fmt, "in{}out({}) {:?} => _", print_late(late), reg, in_value)?;
}
InlineAsmOperand::Const { value } => {
write!(fmt, "const {value:?}")?;
}
InlineAsmOperand::SymFn { value } => {
write!(fmt, "sym_fn {value:?}")?;
}
InlineAsmOperand::SymStatic { def_id } => {
write!(fmt, "sym_static {def_id:?}")?;
}
}
}
write!(fmt, ", options({options:?}))")
}
}
}
/// Returns the list of labels for the edges to the successor basic blocks.
pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
use self::TerminatorKind::*;
match *self {
Return | UnwindResume | UnwindTerminate(_) | Unreachable | GeneratorDrop => vec![],
Goto { .. } => vec!["".into()],
SwitchInt { ref targets, .. } => targets
.values
.iter()
.map(|&u| Cow::Owned(u.to_string()))
.chain(iter::once("otherwise".into()))
.collect(),
Call { target: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
vec!["return".into(), "unwind".into()]
}
Call { target: Some(_), unwind: _, .. } => vec!["return".into()],
Call { target: None, unwind: UnwindAction::Cleanup(_), .. } => vec!["unwind".into()],
Call { target: None, unwind: _, .. } => vec![],
Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
Yield { drop: None, .. } => vec!["resume".into()],
Drop { unwind: UnwindAction::Cleanup(_), .. } => vec!["return".into(), "unwind".into()],
Drop { unwind: _, .. } => vec!["return".into()],
Assert { unwind: UnwindAction::Cleanup(_), .. } => {
vec!["success".into(), "unwind".into()]
}
Assert { unwind: _, .. } => vec!["success".into()],
FalseEdge { .. } => vec!["real".into(), "imaginary".into()],
FalseUnwind { unwind: UnwindAction::Cleanup(_), .. } => {
vec!["real".into(), "unwind".into()]
}
FalseUnwind { unwind: _, .. } => vec!["real".into()],
InlineAsm { destination: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
vec!["return".into(), "unwind".into()]
}
InlineAsm { destination: Some(_), unwind: _, .. } => {
vec!["return".into()]
}
InlineAsm { destination: None, unwind: UnwindAction::Cleanup(_), .. } => {
vec!["unwind".into()]
}
InlineAsm { destination: None, unwind: _, .. } => vec![],
}
}
}
#[derive(Copy, Clone, Debug)]
pub enum TerminatorEdges<'mir, 'tcx> {
/// For terminators that have no successor, like `return`.

View file

@ -121,16 +121,12 @@ impl EraseType for Result<mir::ConstantKind<'_>, mir::interpret::LitToConstError
[u8; size_of::<Result<mir::ConstantKind<'static>, mir::interpret::LitToConstError>>()];
}
impl EraseType for Result<mir::interpret::ConstAlloc<'_>, mir::interpret::ErrorHandled> {
type Result = [u8; size_of::<
Result<mir::interpret::ConstAlloc<'static>, mir::interpret::ErrorHandled>,
>()];
impl EraseType for Result<mir::ConstAlloc<'_>, mir::interpret::ErrorHandled> {
type Result = [u8; size_of::<Result<mir::ConstAlloc<'static>, mir::interpret::ErrorHandled>>()];
}
impl EraseType for Result<mir::interpret::ConstValue<'_>, mir::interpret::ErrorHandled> {
type Result = [u8; size_of::<
Result<mir::interpret::ConstValue<'static>, mir::interpret::ErrorHandled>,
>()];
impl EraseType for Result<mir::ConstValue<'_>, mir::interpret::ErrorHandled> {
type Result = [u8; size_of::<Result<mir::ConstValue<'static>, mir::interpret::ErrorHandled>>()];
}
impl EraseType for Result<Option<ty::ValTree<'_>>, mir::interpret::ErrorHandled> {
@ -317,8 +313,8 @@ tcx_lifetime! {
rustc_middle::middle::exported_symbols::ExportedSymbol,
rustc_middle::mir::ConstantKind,
rustc_middle::mir::DestructuredConstant,
rustc_middle::mir::interpret::ConstAlloc,
rustc_middle::mir::interpret::ConstValue,
rustc_middle::mir::ConstAlloc,
rustc_middle::mir::ConstValue,
rustc_middle::mir::interpret::GlobalId,
rustc_middle::mir::interpret::LitToConstInput,
rustc_middle::traits::query::MethodAutoderefStepsResult,

View file

@ -2,7 +2,6 @@
use crate::infer::canonical::Canonical;
use crate::mir;
use crate::mir::interpret::ConstValue;
use crate::traits;
use crate::ty::fast_reject::SimplifiedType;
use crate::ty::layout::{TyAndLayout, ValidityRequirement};
@ -369,7 +368,7 @@ impl<'tcx> Key for (ty::Const<'tcx>, FieldIdx) {
}
}
impl<'tcx> Key for (ConstValue<'tcx>, Ty<'tcx>) {
impl<'tcx> Key for (mir::ConstValue<'tcx>, Ty<'tcx>) {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, _: TyCtxt<'_>) -> Span {
@ -377,7 +376,7 @@ impl<'tcx> Key for (ConstValue<'tcx>, Ty<'tcx>) {
}
}
impl<'tcx> Key for mir::interpret::ConstAlloc<'tcx> {
impl<'tcx> Key for mir::ConstAlloc<'tcx> {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, _: TyCtxt<'_>) -> Span {

View file

@ -21,7 +21,7 @@ use crate::middle::stability::{self, DeprecationEntry};
use crate::mir;
use crate::mir::interpret::GlobalId;
use crate::mir::interpret::{
ConstValue, EvalToAllocationRawResult, EvalToConstValueResult, EvalToValTreeResult,
EvalToAllocationRawResult, EvalToConstValueResult, EvalToValTreeResult,
};
use crate::mir::interpret::{LitToConstError, LitToConstInput};
use crate::mir::mono::CodegenUnit;
@ -1091,7 +1091,7 @@ rustc_queries! {
}
/// Converts a type level constant value into `ConstValue`
query valtree_to_const_val(key: (Ty<'tcx>, ty::ValTree<'tcx>)) -> ConstValue<'tcx> {
query valtree_to_const_val(key: (Ty<'tcx>, ty::ValTree<'tcx>)) -> mir::ConstValue<'tcx> {
desc { "converting type-level constant value to mir constant value"}
}
@ -1104,14 +1104,14 @@ rustc_queries! {
/// Tries to destructure an `mir::ConstantKind` ADT or array into its variant index
/// and its field values. This should only be used for pretty printing.
query try_destructure_mir_constant_for_diagnostics(
key: (ConstValue<'tcx>, Ty<'tcx>)
key: (mir::ConstValue<'tcx>, Ty<'tcx>)
) -> Option<mir::DestructuredConstant<'tcx>> {
desc { "destructuring MIR constant"}
no_hash
eval_always
}
query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> {
query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> mir::ConstValue<'tcx> {
desc { "getting a &core::panic::Location referring to a span" }
}

View file

@ -1,30 +1,70 @@
//! Data structure used to inspect trait solver behavior.
//!
//! During trait solving we optionally build "proof trees", the root of
//! which is a [GoalEvaluation] with [GoalEvaluationKind::Root]. These
//! trees are used to improve the debug experience and are also used by
//! the compiler itself to provide necessary context for error messages.
//!
//! Because each nested goal in the solver gets [canonicalized] separately
//! and we discard inference progress via "probes", we cannot mechanically
//! use proof trees without somehow "lifting up" data local to the current
//! `InferCtxt`. Any data used mechanically is therefore canonicalized and
//! stored as [CanonicalState]. As printing canonicalized data worsens the
//! debugging dumps, we do not simply canonicalize everything.
//!
//! This means proof trees contain inference variables and placeholders
//! local to a different `InferCtxt` which must not be used with the
//! current one.
//!
//! [canonicalized]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
use super::{
CandidateSource, CanonicalInput, Certainty, Goal, IsNormalizesToHack, NoSolution, QueryInput,
QueryResult,
CandidateSource, Canonical, CanonicalInput, Certainty, Goal, IsNormalizesToHack, NoSolution,
QueryInput, QueryResult,
};
use crate::ty;
use crate::{infer::canonical::CanonicalVarValues, ty};
use format::ProofTreeFormatter;
use std::fmt::{Debug, Write};
mod format;
/// Some `data` together with information about how they relate to the input
/// of the canonical query.
///
/// This is only ever used as [CanonicalState]. Any type information in proof
/// trees used mechanically has to be canonicalized as we otherwise leak
/// inference variables from a nested `InferCtxt`.
#[derive(Debug, Clone, Copy, Eq, PartialEq, TypeFoldable, TypeVisitable)]
pub struct State<'tcx, T> {
pub var_values: CanonicalVarValues<'tcx>,
pub data: T,
}
pub type CanonicalState<'tcx, T> = Canonical<'tcx, State<'tcx, T>>;
#[derive(Debug, Eq, PartialEq)]
pub enum CacheHit {
Provisional,
Global,
}
/// When evaluating the root goals we also store the
/// original values for the `CanonicalVarValues` of the
/// canonicalized goal. We use this to map any [CanonicalState]
/// from the local `InferCtxt` of the solver query to
/// the `InferCtxt` of the caller.
#[derive(Eq, PartialEq)]
pub enum GoalEvaluationKind {
Root,
pub enum GoalEvaluationKind<'tcx> {
Root { orig_values: Vec<ty::GenericArg<'tcx>> },
Nested { is_normalizes_to_hack: IsNormalizesToHack },
}
#[derive(Eq, PartialEq)]
pub struct GoalEvaluation<'tcx> {
pub uncanonicalized_goal: Goal<'tcx, ty::Predicate<'tcx>>,
pub kind: GoalEvaluationKind,
pub kind: GoalEvaluationKind<'tcx>,
pub evaluation: CanonicalGoalEvaluation<'tcx>,
/// The nested goals from instantiating the query response.
pub returned_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
}
@ -66,6 +106,7 @@ pub struct GoalEvaluationStep<'tcx> {
/// of a goal.
#[derive(Eq, PartialEq)]
pub struct Probe<'tcx> {
/// What happened inside of this probe in chronological order.
pub steps: Vec<ProbeStep<'tcx>>,
pub kind: ProbeKind<'tcx>,
}
@ -78,12 +119,21 @@ impl Debug for Probe<'_> {
#[derive(Eq, PartialEq)]
pub enum ProbeStep<'tcx> {
AddGoal(Goal<'tcx, ty::Predicate<'tcx>>),
/// We added a goal to the `EvalCtxt` which will get proven
/// the next time `EvalCtxt::try_evaluate_added_goals` is called.
AddGoal(CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>),
/// The inside of a `EvalCtxt::try_evaluate_added_goals` call.
EvaluateGoals(AddedGoalsEvaluation<'tcx>),
/// A call to `probe` while proving the current goal. This is
/// used whenever there are multiple candidates to prove the
/// current goalby .
NestedProbe(Probe<'tcx>),
}
#[derive(Debug, PartialEq, Eq)]
/// What kind of probe we're in. In case the probe represents a candidate, or
/// the final result of the current goal - via [ProbeKind::Root] - we also
/// store the [QueryResult].
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum ProbeKind<'tcx> {
/// The root inference context while proving a goal.
Root { result: QueryResult<'tcx> },

View file

@ -41,7 +41,7 @@ impl<'a, 'b> ProofTreeFormatter<'a, 'b> {
pub(super) fn format_goal_evaluation(&mut self, eval: &GoalEvaluation<'_>) -> std::fmt::Result {
let goal_text = match eval.kind {
GoalEvaluationKind::Root => "ROOT GOAL",
GoalEvaluationKind::Root { orig_values: _ } => "ROOT GOAL",
GoalEvaluationKind::Nested { is_normalizes_to_hack } => match is_normalizes_to_hack {
IsNormalizesToHack::No => "GOAL",
IsNormalizesToHack::Yes => "NORMALIZES-TO HACK GOAL",

View file

@ -1118,6 +1118,10 @@ where
fn is_unit(this: TyAndLayout<'tcx>) -> bool {
matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
}
fn is_transparent(this: TyAndLayout<'tcx>) -> bool {
matches!(this.ty.kind(), ty::Adt(def, _) if def.repr().transparent())
}
}
/// Calculates whether a function's ABI can unwind or not.

View file

@ -566,6 +566,11 @@ impl rustc_errors::IntoDiagnosticArg for Clause<'_> {
pub struct Clause<'tcx>(Interned<'tcx, WithCachedTypeInfo<ty::Binder<'tcx, PredicateKind<'tcx>>>>);
impl<'tcx> Clause<'tcx> {
pub fn from_projection_clause(tcx: TyCtxt<'tcx>, pred: PolyProjectionPredicate<'tcx>) -> Self {
let pred: Predicate<'tcx> = pred.to_predicate(tcx);
pred.expect_clause()
}
pub fn as_predicate(self) -> Predicate<'tcx> {
Predicate(self.0)
}
@ -1253,14 +1258,6 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for TraitRef<'tcx> {
}
}
impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for TraitPredicate<'tcx> {
#[inline(always)]
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
let p: Predicate<'tcx> = self.to_predicate(tcx);
p.expect_clause()
}
}
impl<'tcx> ToPredicate<'tcx> for Binder<'tcx, TraitRef<'tcx>> {
#[inline(always)]
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
@ -1287,18 +1284,6 @@ impl<'tcx> ToPredicate<'tcx, PolyTraitPredicate<'tcx>> for Binder<'tcx, TraitRef
}
}
impl<'tcx> ToPredicate<'tcx, PolyTraitPredicate<'tcx>> for TraitRef<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> PolyTraitPredicate<'tcx> {
ty::Binder::dummy(self).to_predicate(tcx)
}
}
impl<'tcx> ToPredicate<'tcx, PolyTraitPredicate<'tcx>> for TraitPredicate<'tcx> {
fn to_predicate(self, _tcx: TyCtxt<'tcx>) -> PolyTraitPredicate<'tcx> {
ty::Binder::dummy(self)
}
}
impl<'tcx> ToPredicate<'tcx> for PolyTraitPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::Trait(p))).to_predicate(tcx)
@ -1312,12 +1297,6 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyTraitPredicate<'tcx> {
}
}
impl<'tcx> ToPredicate<'tcx> for OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
ty::Binder::dummy(PredicateKind::Clause(ClauseKind::RegionOutlives(self))).to_predicate(tcx)
}
}
impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::RegionOutlives(p))).to_predicate(tcx)
@ -1330,12 +1309,6 @@ impl<'tcx> ToPredicate<'tcx> for OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>> {
}
}
impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::TypeOutlives(p))).to_predicate(tcx)
}
}
impl<'tcx> ToPredicate<'tcx> for ProjectionPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
ty::Binder::dummy(PredicateKind::Clause(ClauseKind::Projection(self))).to_predicate(tcx)
@ -1355,13 +1328,6 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for ProjectionPredicate<'tcx> {
}
}
impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyProjectionPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
let p: Predicate<'tcx> = self.to_predicate(tcx);
p.expect_clause()
}
}
impl<'tcx> ToPredicate<'tcx> for TraitPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
PredicateKind::Clause(ClauseKind::Trait(self)).to_predicate(tcx)
@ -2214,10 +2180,6 @@ impl<'tcx> TyCtxt<'tcx> {
// The name of a constructor is that of its parent.
rustc_hir::definitions::DefPathData::Ctor => self
.opt_item_name(DefId { krate: def_id.krate, index: def_key.parent.unwrap() }),
// The name of opaque types only exists in HIR.
rustc_hir::definitions::DefPathData::ImplTrait
if let Some(def_id) = def_id.as_local() =>
self.hir().opt_name(self.hir().local_def_id_to_hir_id(def_id)),
_ => def_key.get_opt_name(),
}
}

View file

@ -1713,6 +1713,21 @@ pub trait PrettyPrinter<'tcx>:
}
}
pub(crate) fn pretty_print_const<'tcx>(
c: ty::Const<'tcx>,
fmt: &mut fmt::Formatter<'_>,
print_types: bool,
) -> fmt::Result {
ty::tls::with(|tcx| {
let literal = tcx.lift(c).unwrap();
let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
cx.print_alloc_ids = true;
let cx = cx.pretty_print_const(literal, print_types)?;
fmt.write_str(&cx.into_buffer())?;
Ok(())
})
}
// HACK(eddyb) boxed to avoid moving around a large struct by-value.
pub struct FmtPrinter<'a, 'tcx>(Box<FmtPrinterData<'a, 'tcx>>);

View file

@ -457,6 +457,7 @@ TrivialLiftImpls! {
(),
bool,
usize,
u64,
}
// For some things about which the type library does not know, or does not

View file

@ -725,7 +725,7 @@ impl<'tcx> PolyExistentialPredicate<'tcx> {
self.rebind(tr).with_self_ty(tcx, self_ty).to_predicate(tcx)
}
ExistentialPredicate::Projection(p) => {
self.rebind(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
ty::Clause::from_projection_clause(tcx, self.rebind(p.with_self_ty(tcx, self_ty)))
}
ExistentialPredicate::AutoTrait(did) => {
let generics = tcx.generics_of(did);
@ -2945,6 +2945,33 @@ impl<'tcx> Ty<'tcx> {
_ => false,
}
}
pub fn is_known_rigid(self) -> bool {
match self.kind() {
Bool
| Char
| Int(_)
| Uint(_)
| Float(_)
| Adt(_, _)
| Foreign(_)
| Str
| Array(_, _)
| Slice(_)
| RawPtr(_)
| Ref(_, _, _)
| FnDef(_, _)
| FnPtr(_)
| Dynamic(_, _, _)
| Closure(_, _)
| Generator(_, _, _)
| GeneratorWitness(_)
| GeneratorWitnessMIR(_, _)
| Never
| Tuple(_) => true,
Error(_) | Infer(_) | Alias(_, _) | Param(_) | Bound(_, _) | Placeholder(_) => false,
}
}
}
/// Extra information about why we ended up with a particular variance.

View file

@ -1,4 +1,4 @@
use rustc_middle::mir::interpret::{ConstValue, Scalar};
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::ty::cast::mir_cast_kind;
use rustc_middle::{mir::*, thir::*, ty};

View file

@ -3,9 +3,7 @@
use crate::build::{parse_float_into_constval, Builder};
use rustc_ast as ast;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{
Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
};
use rustc_middle::mir::interpret::{Allocation, LitToConstError, LitToConstInput, Scalar};
use rustc_middle::mir::*;
use rustc_middle::thir::*;
use rustc_middle::ty::{
@ -133,14 +131,14 @@ fn lit_to_mir_constant<'tcx>(
let s = s.as_str();
let allocation = Allocation::from_bytes_byte_aligned_immutable(s.as_bytes());
let allocation = tcx.mk_const_alloc(allocation);
ConstValue::Slice { data: allocation, start: 0, end: s.len() }
ConstValue::Slice { data: allocation, meta: allocation.inner().size().bytes() }
}
(ast::LitKind::ByteStr(data, _), ty::Ref(_, inner_ty, _))
if matches!(inner_ty.kind(), ty::Slice(_)) =>
{
let allocation = Allocation::from_bytes_byte_aligned_immutable(data as &[u8]);
let allocation = tcx.mk_const_alloc(allocation);
ConstValue::Slice { data: allocation, start: 0, end: data.len() }
ConstValue::Slice { data: allocation, meta: allocation.inner().size().bytes() }
}
(ast::LitKind::ByteStr(data, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
let id = tcx.allocate_bytes(data);
@ -150,7 +148,7 @@ fn lit_to_mir_constant<'tcx>(
{
let allocation = Allocation::from_bytes_byte_aligned_immutable(data as &[u8]);
let allocation = tcx.mk_const_alloc(allocation);
ConstValue::Slice { data: allocation, start: 0, end: data.len() }
ConstValue::Slice { data: allocation, meta: allocation.inner().size().bytes() }
}
(ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => {
ConstValue::Scalar(Scalar::from_uint(*n, Size::from_bytes(1)))

View file

@ -15,7 +15,6 @@ use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
use rustc_middle::middle::region;
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::*;
use rustc_middle::thir::{

View file

@ -18,7 +18,7 @@ use rustc_hir::pat_util::EnumerateAndAdjustIterator;
use rustc_hir::RangeEnd;
use rustc_index::Idx;
use rustc_middle::mir::interpret::{
ConstValue, ErrorHandled, GlobalId, LitToConstError, LitToConstInput, Scalar,
ErrorHandled, GlobalId, LitToConstError, LitToConstInput, Scalar,
};
use rustc_middle::mir::{self, ConstantKind, UserTypeProjection};
use rustc_middle::mir::{BorrowKind, Mutability};
@ -855,8 +855,8 @@ pub(crate) fn compare_const_vals<'tcx>(
ty::Float(_) | ty::Int(_) => {} // require special handling, see below
_ => match (a, b) {
(
mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(a)), _a_ty),
mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(b)), _b_ty),
mir::ConstantKind::Val(mir::ConstValue::Scalar(Scalar::Int(a)), _a_ty),
mir::ConstantKind::Val(mir::ConstValue::Scalar(Scalar::Int(b)), _b_ty),
) => return Some(a.cmp(&b)),
(mir::ConstantKind::Ty(a), mir::ConstantKind::Ty(b)) => {
return Some(a.kind().cmp(&b.kind()));

View file

@ -4,7 +4,7 @@ use rustc_hir::lang_items::LangItem;
use rustc_index::IndexVec;
use rustc_middle::mir::*;
use rustc_middle::mir::{
interpret::{ConstValue, Scalar},
interpret::Scalar,
visit::{PlaceContext, Visitor},
};
use rustc_middle::ty::{Ty, TyCtxt, TypeAndMut};

View file

@ -22,8 +22,8 @@ use rustc_target::spec::abi::Abi as CallAbi;
use crate::dataflow_const_prop::Patch;
use crate::MirPass;
use rustc_const_eval::interpret::{
self, compile_time_machine, AllocId, ConstAllocation, ConstValue, FnArg, Frame, ImmTy,
Immediate, InterpCx, InterpResult, MemoryKind, OpTy, PlaceTy, Pointer, Scalar, StackPopCleanup,
self, compile_time_machine, AllocId, ConstAllocation, FnArg, Frame, ImmTy, Immediate, InterpCx,
InterpResult, MemoryKind, OpTy, PlaceTy, Pointer, Scalar, StackPopCleanup,
};
/// The maximum number of bytes that we'll allocate space for a local or the return value.

View file

@ -1,10 +1,8 @@
use super::Error;
use super::debug;
use super::graph;
use super::spans;
use debug::{DebugCounters, NESTED_INDENT};
use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
use spans::CoverageSpan;
@ -16,6 +14,8 @@ use rustc_middle::mir::coverage::*;
use std::fmt::{self, Debug};
const NESTED_INDENT: &str = " ";
/// The coverage counter or counter expression associated with a particular
/// BCB node or BCB edge.
#[derive(Clone)]
@ -75,8 +75,6 @@ pub(super) struct CoverageCounters {
/// BCB/edge, but are needed as operands to more complex expressions.
/// These are always [`BcbCounter::Expression`].
pub(super) intermediate_expressions: Vec<BcbCounter>,
pub debug_counters: DebugCounters,
}
impl CoverageCounters {
@ -91,17 +89,9 @@ impl CoverageCounters {
bcb_edge_counters: FxHashMap::default(),
bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs),
intermediate_expressions: Vec::new(),
debug_counters: DebugCounters::new(),
}
}
/// Activate the `DebugCounters` data structures, to provide additional debug formatting
/// features when formatting [`BcbCounter`] (counter) values.
pub fn enable_debug(&mut self) {
self.debug_counters.enable();
}
/// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
/// indirectly associated with `CoverageSpans`, and accumulates additional `Expression`s
/// representing intermediate values.
@ -113,44 +103,18 @@ impl CoverageCounters {
MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(coverage_spans)
}
fn make_counter<F>(&mut self, debug_block_label_fn: F) -> BcbCounter
where
F: Fn() -> Option<String>,
{
let counter = BcbCounter::Counter { id: self.next_counter() };
if self.debug_counters.is_enabled() {
self.debug_counters.add_counter(&counter, (debug_block_label_fn)());
}
counter
fn make_counter(&mut self) -> BcbCounter {
let id = self.next_counter();
BcbCounter::Counter { id }
}
fn make_expression<F>(
&mut self,
lhs: Operand,
op: Op,
rhs: Operand,
debug_block_label_fn: F,
) -> BcbCounter
where
F: Fn() -> Option<String>,
{
fn make_expression(&mut self, lhs: Operand, op: Op, rhs: Operand) -> BcbCounter {
let id = self.next_expression();
let expression = BcbCounter::Expression { id, lhs, op, rhs };
if self.debug_counters.is_enabled() {
self.debug_counters.add_counter(&expression, (debug_block_label_fn)());
}
expression
BcbCounter::Expression { id, lhs, op, rhs }
}
pub fn make_identity_counter(&mut self, counter_operand: Operand) -> BcbCounter {
let some_debug_block_label = if self.debug_counters.is_enabled() {
self.debug_counters.some_block_label(counter_operand).cloned()
} else {
None
};
self.make_expression(counter_operand, Op::Add, Operand::Zero, || {
some_debug_block_label.clone()
})
self.make_expression(counter_operand, Op::Add, Operand::Zero)
}
/// Counter IDs start from one and go up.
@ -367,12 +331,8 @@ impl<'a> MakeBcbCounters<'a> {
branch_counter_operand,
Op::Add,
sumup_counter_operand,
|| None,
);
debug!(
" [new intermediate expression: {}]",
self.format_counter(&intermediate_expression)
);
debug!(" [new intermediate expression: {:?}]", intermediate_expression);
let intermediate_expression_operand = intermediate_expression.as_operand();
self.coverage_counters.intermediate_expressions.push(intermediate_expression);
some_sumup_counter_operand.replace(intermediate_expression_operand);
@ -394,9 +354,8 @@ impl<'a> MakeBcbCounters<'a> {
branching_counter_operand,
Op::Subtract,
sumup_counter_operand,
|| Some(format!("{expression_branch:?}")),
);
debug!("{:?} gets an expression: {}", expression_branch, self.format_counter(&expression));
debug!("{:?} gets an expression: {:?}", expression_branch, expression);
let bcb = expression_branch.target_bcb;
if expression_branch.is_only_path_to_target() {
self.coverage_counters.set_bcb_counter(bcb, expression)?;
@ -418,10 +377,10 @@ impl<'a> MakeBcbCounters<'a> {
// If the BCB already has a counter, return it.
if let Some(counter_kind) = &self.coverage_counters.bcb_counters[bcb] {
debug!(
"{}{:?} already has a counter: {}",
"{}{:?} already has a counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
self.format_counter(counter_kind),
counter_kind,
);
return Ok(counter_kind.as_operand());
}
@ -431,22 +390,22 @@ impl<'a> MakeBcbCounters<'a> {
// program results in a tight infinite loop, but it should still compile.
let one_path_to_target = self.bcb_has_one_path_to_target(bcb);
if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{bcb:?}")));
let counter_kind = self.coverage_counters.make_counter();
if one_path_to_target {
debug!(
"{}{:?} gets a new counter: {}",
"{}{:?} gets a new counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
self.format_counter(&counter_kind),
counter_kind,
);
} else {
debug!(
"{}{:?} has itself as its own predecessor. It can't be part of its own \
Expression sum, so it will get its own new counter: {}. (Note, the compiled \
Expression sum, so it will get its own new counter: {:?}. (Note, the compiled \
code will generate an infinite loop.)",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
self.format_counter(&counter_kind),
counter_kind,
);
}
return self.coverage_counters.set_bcb_counter(bcb, counter_kind);
@ -481,12 +440,11 @@ impl<'a> MakeBcbCounters<'a> {
sumup_edge_counter_operand,
Op::Add,
edge_counter_operand,
|| None,
);
debug!(
"{}new intermediate expression: {}",
"{}new intermediate expression: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
self.format_counter(&intermediate_expression)
intermediate_expression
);
let intermediate_expression_operand = intermediate_expression.as_operand();
self.coverage_counters.intermediate_expressions.push(intermediate_expression);
@ -497,13 +455,12 @@ impl<'a> MakeBcbCounters<'a> {
first_edge_counter_operand,
Op::Add,
some_sumup_edge_counter_operand.unwrap(),
|| Some(format!("{bcb:?}")),
);
debug!(
"{}{:?} gets a new counter (sum of predecessor counters): {}",
"{}{:?} gets a new counter (sum of predecessor counters): {:?}",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
self.format_counter(&counter_kind)
counter_kind
);
self.coverage_counters.set_bcb_counter(bcb, counter_kind)
}
@ -534,24 +491,23 @@ impl<'a> MakeBcbCounters<'a> {
self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
{
debug!(
"{}Edge {:?}->{:?} already has a counter: {}",
"{}Edge {:?}->{:?} already has a counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
from_bcb,
to_bcb,
self.format_counter(counter_kind)
counter_kind
);
return Ok(counter_kind.as_operand());
}
// Make a new counter to count this edge.
let counter_kind =
self.coverage_counters.make_counter(|| Some(format!("{from_bcb:?}->{to_bcb:?}")));
let counter_kind = self.coverage_counters.make_counter();
debug!(
"{}Edge {:?}->{:?} gets a new counter: {}",
"{}Edge {:?}->{:?} gets a new counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
from_bcb,
to_bcb,
self.format_counter(&counter_kind)
counter_kind
);
self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind)
}
@ -710,9 +666,4 @@ impl<'a> MakeBcbCounters<'a> {
fn bcb_dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool {
self.basic_coverage_blocks.dominates(dom, node)
}
#[inline]
fn format_counter(&self, counter_kind: &BcbCounter) -> String {
self.coverage_counters.debug_counters.format_counter(counter_kind)
}
}

View file

@ -1,797 +0,0 @@
//! The `InstrumentCoverage` MIR pass implementation includes debugging tools and options
//! to help developers understand and/or improve the analysis and instrumentation of a MIR.
//!
//! To enable coverage, include the rustc command line option:
//!
//! * `-C instrument-coverage`
//!
//! MIR Dump Files, with additional `CoverageGraph` graphviz and `CoverageSpan` spanview
//! ------------------------------------------------------------------------------------
//!
//! Additional debugging options include:
//!
//! * `-Z dump-mir=InstrumentCoverage` - Generate `.mir` files showing the state of the MIR,
//! before and after the `InstrumentCoverage` pass, for each compiled function.
//!
//! * `-Z dump-mir-graphviz` - If `-Z dump-mir` is also enabled for the current MIR node path,
//! each MIR dump is accompanied by a before-and-after graphical view of the MIR, in Graphviz
//! `.dot` file format (which can be visually rendered as a graph using any of a number of free
//! Graphviz viewers and IDE extensions).
//!
//! For the `InstrumentCoverage` pass, this option also enables generation of an additional
//! Graphviz `.dot` file for each function, rendering the `CoverageGraph`: the control flow
//! graph (CFG) of `BasicCoverageBlocks` (BCBs), as nodes, internally labeled to show the
//! `CoverageSpan`-based MIR elements each BCB represents (`BasicBlock`s, `Statement`s and
//! `Terminator`s), assigned coverage counters and/or expressions, and edge counters, as needed.
//!
//! (Note the additional option, `-Z graphviz-dark-mode`, can be added, to change the rendered
//! output from its default black-on-white background to a dark color theme, if desired.)
//!
//! * `-Z dump-mir-spanview` - If `-Z dump-mir` is also enabled for the current MIR node path,
//! each MIR dump is accompanied by a before-and-after `.html` document showing the function's
//! original source code, highlighted by it's MIR spans, at the `statement`-level (by default),
//! `terminator` only, or encompassing span for the `Terminator` plus all `Statement`s, in each
//! `block` (`BasicBlock`).
//!
//! For the `InstrumentCoverage` pass, this option also enables generation of an additional
//! spanview `.html` file for each function, showing the aggregated `CoverageSpan`s that will
//! require counters (or counter expressions) for accurate coverage analysis.
//!
//! Debug Logging
//! -------------
//!
//! The `InstrumentCoverage` pass includes debug logging messages at various phases and decision
//! points, which can be enabled via environment variable:
//!
//! ```shell
//! RUSTC_LOG=rustc_mir_transform::coverage=debug
//! ```
//!
//! Other module paths with coverage-related debug logs may also be of interest, particularly for
//! debugging the coverage map data, injected as global variables in the LLVM IR (during rustc's
//! code generation pass). For example:
//!
//! ```shell
//! RUSTC_LOG=rustc_mir_transform::coverage,rustc_codegen_llvm::coverageinfo=debug
//! ```
//!
//! Coverage Debug Options
//! ---------------------------------
//!
//! Additional debugging options can be enabled using the environment variable:
//!
//! ```shell
//! RUSTC_COVERAGE_DEBUG_OPTIONS=<options>
//! ```
//!
//! These options are comma-separated, and specified in the format `option-name=value`. For example:
//!
//! ```shell
//! $ RUSTC_COVERAGE_DEBUG_OPTIONS=counter-format=id+operation,allow-unused-expressions=yes cargo build
//! ```
//!
//! Coverage debug options include:
//!
//! * `allow-unused-expressions=yes` or `no` (default: `no`)
//!
//! The `InstrumentCoverage` algorithms _should_ only create and assign expressions to a
//! `BasicCoverageBlock`, or an incoming edge, if that expression is either (a) required to
//! count a `CoverageSpan`, or (b) a dependency of some other required counter expression.
//!
//! If an expression is generated that does not map to a `CoverageSpan` or dependency, this
//! probably indicates there was a bug in the algorithm that creates and assigns counters
//! and expressions.
//!
//! When this kind of bug is encountered, the rustc compiler will panic by default. Setting:
//! `allow-unused-expressions=yes` will log a warning message instead of panicking (effectively
//! ignoring the unused expressions), which may be helpful when debugging the root cause of
//! the problem.
//!
//! * `counter-format=<choices>`, where `<choices>` can be any plus-separated combination of `id`,
//! `block`, and/or `operation` (default: `block+operation`)
//!
//! This option effects both the `CoverageGraph` (graphviz `.dot` files) and debug logging, when
//! generating labels for counters and expressions.
//!
//! Depending on the values and combinations, counters can be labeled by:
//!
//! * `id` - counter or expression ID (ascending counter IDs, starting at 1, or descending
//! expression IDs, starting at `u32:MAX`)
//! * `block` - the `BasicCoverageBlock` label (for example, `bcb0`) or edge label (for
//! example `bcb0->bcb1`), for counters or expressions assigned to count a
//! `BasicCoverageBlock` or edge. Intermediate expressions (not directly associated with
//! a BCB or edge) will be labeled by their expression ID, unless `operation` is also
//! specified.
//! * `operation` - applied to expressions only, labels include the left-hand-side counter
//! or expression label (lhs operand), the operator (`+` or `-`), and the right-hand-side
//! counter or expression (rhs operand). Expression operand labels are generated
//! recursively, generating labels with nested operations, enclosed in parentheses
//! (for example: `bcb2 + (bcb0 - bcb1)`).
use std::iter;
use std::ops::Deref;
use std::sync::OnceLock;
use itertools::Itertools;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::mir::coverage::*;
use rustc_middle::mir::create_dump_file;
use rustc_middle::mir::generic_graphviz::GraphvizWriter;
use rustc_middle::mir::spanview::{self, SpanViewable};
use rustc_middle::mir::{self, BasicBlock};
use rustc_middle::ty::TyCtxt;
use rustc_span::Span;
use super::counters::{BcbCounter, CoverageCounters};
use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
use super::spans::CoverageSpan;
pub const NESTED_INDENT: &str = " ";
const RUSTC_COVERAGE_DEBUG_OPTIONS: &str = "RUSTC_COVERAGE_DEBUG_OPTIONS";
pub(super) fn debug_options<'a>() -> &'a DebugOptions {
static DEBUG_OPTIONS: OnceLock<DebugOptions> = OnceLock::new();
&DEBUG_OPTIONS.get_or_init(DebugOptions::from_env)
}
/// Parses and maintains coverage-specific debug options captured from the environment variable
/// "RUSTC_COVERAGE_DEBUG_OPTIONS", if set.
#[derive(Debug, Clone)]
pub(super) struct DebugOptions {
pub allow_unused_expressions: bool,
counter_format: ExpressionFormat,
}
impl DebugOptions {
fn from_env() -> Self {
let mut allow_unused_expressions = true;
let mut counter_format = ExpressionFormat::default();
if let Ok(env_debug_options) = std::env::var(RUSTC_COVERAGE_DEBUG_OPTIONS) {
for setting_str in env_debug_options.replace(' ', "").replace('-', "_").split(',') {
let (option, value) = match setting_str.split_once('=') {
None => (setting_str, None),
Some((k, v)) => (k, Some(v)),
};
match option {
"allow_unused_expressions" => {
allow_unused_expressions = bool_option_val(option, value);
debug!(
"{} env option `allow_unused_expressions` is set to {}",
RUSTC_COVERAGE_DEBUG_OPTIONS, allow_unused_expressions
);
}
"counter_format" => {
match value {
None => {
bug!(
"`{}` option in environment variable {} requires one or more \
plus-separated choices (a non-empty subset of \
`id+block+operation`)",
option,
RUSTC_COVERAGE_DEBUG_OPTIONS
);
}
Some(val) => {
counter_format = counter_format_option_val(val);
debug!(
"{} env option `counter_format` is set to {:?}",
RUSTC_COVERAGE_DEBUG_OPTIONS, counter_format
);
}
};
}
_ => bug!(
"Unsupported setting `{}` in environment variable {}",
option,
RUSTC_COVERAGE_DEBUG_OPTIONS
),
};
}
}
Self { allow_unused_expressions, counter_format }
}
}
fn bool_option_val(option: &str, some_strval: Option<&str>) -> bool {
if let Some(val) = some_strval {
if ["yes", "y", "on", "true"].contains(&val) {
true
} else if ["no", "n", "off", "false"].contains(&val) {
false
} else {
bug!(
"Unsupported value `{}` for option `{}` in environment variable {}",
option,
val,
RUSTC_COVERAGE_DEBUG_OPTIONS
)
}
} else {
true
}
}
fn counter_format_option_val(strval: &str) -> ExpressionFormat {
let mut counter_format = ExpressionFormat { id: false, block: false, operation: false };
let components = strval.splitn(3, '+');
for component in components {
match component {
"id" => counter_format.id = true,
"block" => counter_format.block = true,
"operation" => counter_format.operation = true,
_ => bug!(
"Unsupported counter_format choice `{}` in environment variable {}",
component,
RUSTC_COVERAGE_DEBUG_OPTIONS
),
}
}
counter_format
}
#[derive(Debug, Clone)]
struct ExpressionFormat {
id: bool,
block: bool,
operation: bool,
}
impl Default for ExpressionFormat {
fn default() -> Self {
Self { id: false, block: true, operation: true }
}
}
/// If enabled, this struct maintains a map from `BcbCounter` IDs (as `Operand`) to
/// the `BcbCounter` data and optional label (normally, the counter's associated
/// `BasicCoverageBlock` format string, if any).
///
/// Use `format_counter` to convert one of these `BcbCounter` counters to a debug output string,
/// as directed by the `DebugOptions`. This allows the format of counter labels in logs and dump
/// files (including the `CoverageGraph` graphviz file) to be changed at runtime, via environment
/// variable.
///
/// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be
/// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`.
pub(super) struct DebugCounters {
state: Option<DebugCountersState>,
}
#[derive(Default)]
struct DebugCountersState {
counters: FxHashMap<Operand, DebugCounter>,
}
impl DebugCounters {
pub fn new() -> Self {
Self { state: None }
}
pub fn enable(&mut self) {
debug_assert!(!self.is_enabled());
self.state = Some(DebugCountersState::default());
}
pub fn is_enabled(&self) -> bool {
self.state.is_some()
}
pub fn add_counter(&mut self, counter_kind: &BcbCounter, some_block_label: Option<String>) {
let Some(state) = &mut self.state else { return };
let id = counter_kind.as_operand();
state
.counters
.try_insert(id, DebugCounter::new(counter_kind.clone(), some_block_label))
.expect("attempt to add the same counter_kind to DebugCounters more than once");
}
pub fn some_block_label(&self, operand: Operand) -> Option<&String> {
let Some(state) = &self.state else { return None };
state.counters.get(&operand)?.some_block_label.as_ref()
}
pub fn format_counter(&self, counter_kind: &BcbCounter) -> String {
match *counter_kind {
BcbCounter::Counter { .. } => {
format!("Counter({})", self.format_counter_kind(counter_kind))
}
BcbCounter::Expression { .. } => {
format!("Expression({})", self.format_counter_kind(counter_kind))
}
}
}
fn format_counter_kind(&self, counter_kind: &BcbCounter) -> String {
let counter_format = &debug_options().counter_format;
if let BcbCounter::Expression { id, lhs, op, rhs } = *counter_kind {
if counter_format.operation {
return format!(
"{}{} {} {}",
if counter_format.id || !self.is_enabled() {
format!("#{} = ", id.index())
} else {
String::new()
},
self.format_operand(lhs),
match op {
Op::Add => "+",
Op::Subtract => "-",
},
self.format_operand(rhs),
);
}
}
let id = counter_kind.as_operand();
if let Some(state) = &self.state && (counter_format.block || !counter_format.id) {
if let Some(DebugCounter { some_block_label: Some(block_label), .. }) =
state.counters.get(&id)
{
return if counter_format.id {
format!("{}#{:?}", block_label, id)
} else {
block_label.to_string()
};
}
}
format!("#{:?}", id)
}
fn format_operand(&self, operand: Operand) -> String {
if matches!(operand, Operand::Zero) {
return String::from("0");
}
if let Some(state) = &self.state {
if let Some(DebugCounter { counter_kind, some_block_label }) =
state.counters.get(&operand)
{
if let BcbCounter::Expression { .. } = counter_kind {
if let Some(label) = some_block_label && debug_options().counter_format.block {
return format!(
"{}:({})",
label,
self.format_counter_kind(counter_kind)
);
}
return format!("({})", self.format_counter_kind(counter_kind));
}
return self.format_counter_kind(counter_kind);
}
}
format!("#{:?}", operand)
}
}
/// A non-public support class to `DebugCounters`.
#[derive(Debug)]
struct DebugCounter {
counter_kind: BcbCounter,
some_block_label: Option<String>,
}
impl DebugCounter {
fn new(counter_kind: BcbCounter, some_block_label: Option<String>) -> Self {
Self { counter_kind, some_block_label }
}
}
/// If enabled, this data structure captures additional debugging information used when generating
/// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes.
pub(super) struct GraphvizData {
state: Option<GraphvizDataState>,
}
#[derive(Default)]
struct GraphvizDataState {
bcb_to_coverage_spans_with_counters:
FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, BcbCounter)>>,
bcb_to_dependency_counters: FxHashMap<BasicCoverageBlock, Vec<BcbCounter>>,
edge_to_counter: FxHashMap<(BasicCoverageBlock, BasicBlock), BcbCounter>,
}
impl GraphvizData {
pub fn new() -> Self {
Self { state: None }
}
pub fn enable(&mut self) {
debug_assert!(!self.is_enabled());
self.state = Some(GraphvizDataState::default());
}
pub fn is_enabled(&self) -> bool {
self.state.is_some()
}
pub fn add_bcb_coverage_span_with_counter(
&mut self,
bcb: BasicCoverageBlock,
coverage_span: &CoverageSpan,
counter_kind: &BcbCounter,
) {
let Some(state) = &mut self.state else { return };
state
.bcb_to_coverage_spans_with_counters
.entry(bcb)
.or_insert_with(Vec::new)
.push((coverage_span.clone(), counter_kind.clone()));
}
pub fn get_bcb_coverage_spans_with_counters(
&self,
bcb: BasicCoverageBlock,
) -> Option<&[(CoverageSpan, BcbCounter)]> {
let Some(state) = &self.state else { return None };
state.bcb_to_coverage_spans_with_counters.get(&bcb).map(Deref::deref)
}
pub fn add_bcb_dependency_counter(
&mut self,
bcb: BasicCoverageBlock,
counter_kind: &BcbCounter,
) {
let Some(state) = &mut self.state else { return };
state
.bcb_to_dependency_counters
.entry(bcb)
.or_insert_with(Vec::new)
.push(counter_kind.clone());
}
pub fn get_bcb_dependency_counters(&self, bcb: BasicCoverageBlock) -> Option<&[BcbCounter]> {
let Some(state) = &self.state else { return None };
state.bcb_to_dependency_counters.get(&bcb).map(Deref::deref)
}
pub fn set_edge_counter(
&mut self,
from_bcb: BasicCoverageBlock,
to_bb: BasicBlock,
counter_kind: &BcbCounter,
) {
let Some(state) = &mut self.state else { return };
state
.edge_to_counter
.try_insert((from_bcb, to_bb), counter_kind.clone())
.expect("invalid attempt to insert more than one edge counter for the same edge");
}
pub fn get_edge_counter(
&self,
from_bcb: BasicCoverageBlock,
to_bb: BasicBlock,
) -> Option<&BcbCounter> {
let Some(state) = &self.state else { return None };
state.edge_to_counter.get(&(from_bcb, to_bb))
}
}
/// If enabled, this struct captures additional data used to track whether expressions were used,
/// directly or indirectly, to compute the coverage counts for all `CoverageSpan`s, and any that are
/// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs
/// and/or a `CoverageGraph` graphviz output).
pub(super) struct UsedExpressions {
state: Option<UsedExpressionsState>,
}
#[derive(Default)]
struct UsedExpressionsState {
used_expression_operands: FxHashSet<Operand>,
unused_expressions: Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)>,
}
impl UsedExpressions {
pub fn new() -> Self {
Self { state: None }
}
pub fn enable(&mut self) {
debug_assert!(!self.is_enabled());
self.state = Some(UsedExpressionsState::default())
}
pub fn is_enabled(&self) -> bool {
self.state.is_some()
}
pub fn add_expression_operands(&mut self, expression: &BcbCounter) {
let Some(state) = &mut self.state else { return };
if let BcbCounter::Expression { lhs, rhs, .. } = *expression {
state.used_expression_operands.insert(lhs);
state.used_expression_operands.insert(rhs);
}
}
pub fn expression_is_used(&self, expression: &BcbCounter) -> bool {
let Some(state) = &self.state else { return false };
state.used_expression_operands.contains(&expression.as_operand())
}
pub fn add_unused_expression_if_not_found(
&mut self,
expression: &BcbCounter,
edge_from_bcb: Option<BasicCoverageBlock>,
target_bcb: BasicCoverageBlock,
) {
let Some(state) = &mut self.state else { return };
if !state.used_expression_operands.contains(&expression.as_operand()) {
state.unused_expressions.push((expression.clone(), edge_from_bcb, target_bcb));
}
}
/// Return the list of unused counters (if any) as a tuple with the counter (`BcbCounter`),
/// optional `from_bcb` (if it was an edge counter), and `target_bcb`.
pub fn get_unused_expressions(
&self,
) -> Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)> {
let Some(state) = &self.state else { return Vec::new() };
state.unused_expressions.clone()
}
/// If enabled, validate that every BCB or edge counter not directly associated with a coverage
/// span is at least indirectly associated (it is a dependency of a BCB counter that _is_
/// associated with a coverage span).
pub fn validate(
&mut self,
bcb_counters_without_direct_coverage_spans: &[(
Option<BasicCoverageBlock>,
BasicCoverageBlock,
BcbCounter,
)],
) {
if !self.is_enabled() {
return;
}
let mut not_validated = bcb_counters_without_direct_coverage_spans
.iter()
.map(|(_, _, counter_kind)| counter_kind)
.collect::<Vec<_>>();
let mut validating_count = 0;
while not_validated.len() != validating_count {
let to_validate = not_validated.split_off(0);
validating_count = to_validate.len();
for counter_kind in to_validate {
if self.expression_is_used(counter_kind) {
self.add_expression_operands(counter_kind);
} else {
not_validated.push(counter_kind);
}
}
}
}
pub fn alert_on_unused_expressions(&self, debug_counters: &DebugCounters) {
let Some(state) = &self.state else { return };
for (counter_kind, edge_from_bcb, target_bcb) in &state.unused_expressions {
let unused_counter_message = if let Some(from_bcb) = edge_from_bcb.as_ref() {
format!(
"non-coverage edge counter found without a dependent expression, in \
{:?}->{:?}; counter={}",
from_bcb,
target_bcb,
debug_counters.format_counter(&counter_kind),
)
} else {
format!(
"non-coverage counter found without a dependent expression, in {:?}; \
counter={}",
target_bcb,
debug_counters.format_counter(&counter_kind),
)
};
if debug_options().allow_unused_expressions {
debug!("WARNING: {}", unused_counter_message);
} else {
bug!("{}", unused_counter_message);
}
}
}
}
/// Generates the MIR pass `CoverageSpan`-specific spanview dump file.
pub(super) fn dump_coverage_spanview<'tcx>(
tcx: TyCtxt<'tcx>,
mir_body: &mir::Body<'tcx>,
basic_coverage_blocks: &CoverageGraph,
pass_name: &str,
body_span: Span,
coverage_spans: &[CoverageSpan],
) {
let mir_source = mir_body.source;
let def_id = mir_source.def_id();
let span_viewables = span_viewables(tcx, mir_body, basic_coverage_blocks, &coverage_spans);
let mut file = create_dump_file(tcx, "html", false, pass_name, &0i32, mir_body)
.expect("Unexpected error creating MIR spanview HTML file");
let crate_name = tcx.crate_name(def_id.krate);
let item_name = tcx.def_path(def_id).to_filename_friendly_no_crate();
let title = format!("{crate_name}.{item_name} - Coverage Spans");
spanview::write_document(tcx, body_span, span_viewables, &title, &mut file)
.expect("Unexpected IO error dumping coverage spans as HTML");
}
/// Converts the computed `BasicCoverageBlockData`s into `SpanViewable`s.
fn span_viewables<'tcx>(
tcx: TyCtxt<'tcx>,
mir_body: &mir::Body<'tcx>,
basic_coverage_blocks: &CoverageGraph,
coverage_spans: &[CoverageSpan],
) -> Vec<SpanViewable> {
let mut span_viewables = Vec::new();
for coverage_span in coverage_spans {
let tooltip = coverage_span.format_coverage_statements(tcx, mir_body);
let CoverageSpan { span, bcb, .. } = coverage_span;
let bcb_data = &basic_coverage_blocks[*bcb];
let id = bcb_data.id();
let leader_bb = bcb_data.leader_bb();
span_viewables.push(SpanViewable { bb: leader_bb, span: *span, id, tooltip });
}
span_viewables
}
/// Generates the MIR pass coverage-specific graphviz dump file.
pub(super) fn dump_coverage_graphviz<'tcx>(
tcx: TyCtxt<'tcx>,
mir_body: &mir::Body<'tcx>,
pass_name: &str,
basic_coverage_blocks: &CoverageGraph,
coverage_counters: &CoverageCounters,
graphviz_data: &GraphvizData,
intermediate_expressions: &[BcbCounter],
debug_used_expressions: &UsedExpressions,
) {
let debug_counters = &coverage_counters.debug_counters;
let mir_source = mir_body.source;
let def_id = mir_source.def_id();
let node_content = |bcb| {
bcb_to_string_sections(
tcx,
mir_body,
coverage_counters,
bcb,
&basic_coverage_blocks[bcb],
graphviz_data.get_bcb_coverage_spans_with_counters(bcb),
graphviz_data.get_bcb_dependency_counters(bcb),
// intermediate_expressions are injected into the mir::START_BLOCK, so
// include them in the first BCB.
if bcb.index() == 0 { Some(&intermediate_expressions) } else { None },
)
};
let edge_labels = |from_bcb| {
let from_bcb_data = &basic_coverage_blocks[from_bcb];
let from_terminator = from_bcb_data.terminator(mir_body);
let mut edge_labels = from_terminator.kind.fmt_successor_labels();
edge_labels.retain(|label| label != "unreachable");
let edge_counters = from_terminator
.successors()
.map(|successor_bb| graphviz_data.get_edge_counter(from_bcb, successor_bb));
iter::zip(&edge_labels, edge_counters)
.map(|(label, some_counter)| {
if let Some(counter) = some_counter {
format!("{}\n{}", label, debug_counters.format_counter(counter))
} else {
label.to_string()
}
})
.collect::<Vec<_>>()
};
let graphviz_name = format!("Cov_{}_{}", def_id.krate.index(), def_id.index.index());
let mut graphviz_writer =
GraphvizWriter::new(basic_coverage_blocks, &graphviz_name, node_content, edge_labels);
let unused_expressions = debug_used_expressions.get_unused_expressions();
if unused_expressions.len() > 0 {
graphviz_writer.set_graph_label(&format!(
"Unused expressions:\n {}",
unused_expressions
.as_slice()
.iter()
.map(|(counter_kind, edge_from_bcb, target_bcb)| {
if let Some(from_bcb) = edge_from_bcb.as_ref() {
format!(
"{:?}->{:?}: {}",
from_bcb,
target_bcb,
debug_counters.format_counter(&counter_kind),
)
} else {
format!(
"{:?}: {}",
target_bcb,
debug_counters.format_counter(&counter_kind),
)
}
})
.join("\n ")
));
}
let mut file = create_dump_file(tcx, "dot", false, pass_name, &0i32, mir_body)
.expect("Unexpected error creating BasicCoverageBlock graphviz DOT file");
graphviz_writer
.write_graphviz(tcx, &mut file)
.expect("Unexpected error writing BasicCoverageBlock graphviz DOT file");
}
fn bcb_to_string_sections<'tcx>(
tcx: TyCtxt<'tcx>,
mir_body: &mir::Body<'tcx>,
coverage_counters: &CoverageCounters,
bcb: BasicCoverageBlock,
bcb_data: &BasicCoverageBlockData,
some_coverage_spans_with_counters: Option<&[(CoverageSpan, BcbCounter)]>,
some_dependency_counters: Option<&[BcbCounter]>,
some_intermediate_expressions: Option<&[BcbCounter]>,
) -> Vec<String> {
let debug_counters = &coverage_counters.debug_counters;
let len = bcb_data.basic_blocks.len();
let mut sections = Vec::new();
if let Some(collect_intermediate_expressions) = some_intermediate_expressions {
sections.push(
collect_intermediate_expressions
.iter()
.map(|expression| {
format!("Intermediate {}", debug_counters.format_counter(expression))
})
.join("\n"),
);
}
if let Some(coverage_spans_with_counters) = some_coverage_spans_with_counters {
sections.push(
coverage_spans_with_counters
.iter()
.map(|(covspan, counter)| {
format!(
"{} at {}",
debug_counters.format_counter(counter),
covspan.format(tcx, mir_body)
)
})
.join("\n"),
);
}
if let Some(dependency_counters) = some_dependency_counters {
sections.push(format!(
"Non-coverage counters:\n {}",
dependency_counters
.iter()
.map(|counter| debug_counters.format_counter(counter))
.join(" \n"),
));
}
if let Some(counter_kind) = coverage_counters.bcb_counter(bcb) {
sections.push(format!("{counter_kind:?}"));
}
let non_term_blocks = bcb_data.basic_blocks[0..len - 1]
.iter()
.map(|&bb| format!("{:?}: {}", bb, mir_body[bb].terminator().kind.name()))
.collect::<Vec<_>>();
if non_term_blocks.len() > 0 {
sections.push(non_term_blocks.join("\n"));
}
sections.push(format!(
"{:?}: {}",
bcb_data.basic_blocks.last().unwrap(),
bcb_data.terminator(mir_body).kind.name(),
));
sections
}

View file

@ -1,4 +1,3 @@
use itertools::Itertools;
use rustc_data_structures::graph::dominators::{self, Dominators};
use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
use rustc_index::bit_set::BitSet;
@ -8,8 +7,6 @@ use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, Terminator
use std::cmp::Ordering;
use std::ops::{Index, IndexMut};
const ID_SEPARATOR: &str = ",";
/// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s.
#[derive(Debug)]
@ -324,10 +321,6 @@ impl BasicCoverageBlockData {
pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
&mir_body[self.last_bb()].terminator()
}
pub fn id(&self) -> String {
format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR))
}
}
/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)

View file

@ -1,7 +1,6 @@
pub mod query;
mod counters;
mod debug;
mod graph;
mod spans;
@ -20,7 +19,6 @@ use rustc_index::IndexVec;
use rustc_middle::hir;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::*;
use rustc_middle::mir::dump_enabled;
use rustc_middle::mir::{
self, BasicBlock, BasicBlockData, Coverage, SourceInfo, Statement, StatementKind, Terminator,
TerminatorKind,
@ -94,13 +92,12 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
}
trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
Instrumentor::new(&self.name(), tcx, mir_body).inject_counters();
Instrumentor::new(tcx, mir_body).inject_counters();
trace!("InstrumentCoverage done for {:?}", mir_source.def_id());
}
}
struct Instrumentor<'a, 'tcx> {
pass_name: &'a str,
tcx: TyCtxt<'tcx>,
mir_body: &'a mut mir::Body<'tcx>,
source_file: Lrc<SourceFile>,
@ -112,7 +109,7 @@ struct Instrumentor<'a, 'tcx> {
}
impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
fn new(pass_name: &'a str, tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
fn new(tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
let source_map = tcx.sess.source_map();
let def_id = mir_body.source.def_id();
let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, def_id);
@ -141,7 +138,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let coverage_counters = CoverageCounters::new(&basic_coverage_blocks);
Self {
pass_name,
tcx,
mir_body,
source_file,
@ -154,28 +150,9 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
}
fn inject_counters(&'a mut self) {
let tcx = self.tcx;
let mir_source = self.mir_body.source;
let def_id = mir_source.def_id();
let fn_sig_span = self.fn_sig_span;
let body_span = self.body_span;
let mut graphviz_data = debug::GraphvizData::new();
let mut debug_used_expressions = debug::UsedExpressions::new();
let dump_mir = dump_enabled(tcx, self.pass_name, def_id);
let dump_graphviz = dump_mir && tcx.sess.opts.unstable_opts.dump_mir_graphviz;
let dump_spanview = dump_mir && tcx.sess.opts.unstable_opts.dump_mir_spanview.is_some();
if dump_graphviz {
graphviz_data.enable();
self.coverage_counters.enable_debug();
}
if dump_graphviz || level_enabled!(tracing::Level::DEBUG) {
debug_used_expressions.enable();
}
////////////////////////////////////////////////////
// Compute `CoverageSpan`s from the `CoverageGraph`.
let coverage_spans = CoverageSpans::generate_coverage_spans(
@ -185,17 +162,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
&self.basic_coverage_blocks,
);
if dump_spanview {
debug::dump_coverage_spanview(
tcx,
self.mir_body,
&self.basic_coverage_blocks,
self.pass_name,
body_span,
&coverage_spans,
);
}
////////////////////////////////////////////////////
// Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
// every `CoverageSpan` has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
@ -209,14 +175,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
.make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans);
if let Ok(()) = result {
// If debugging, add any intermediate expressions (which are not associated with any
// BCB) to the `debug_used_expressions` map.
if debug_used_expressions.is_enabled() {
for intermediate_expression in &self.coverage_counters.intermediate_expressions {
debug_used_expressions.add_expression_operands(intermediate_expression);
}
}
////////////////////////////////////////////////////
// Remove the counter or edge counter from of each `CoverageSpan`s associated
// `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
@ -227,11 +185,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
// These `CoverageSpan`-associated counters are removed from their associated
// `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
// are indirect counters (to be injected next, without associated code regions).
self.inject_coverage_span_counters(
coverage_spans,
&mut graphviz_data,
&mut debug_used_expressions,
);
self.inject_coverage_span_counters(coverage_spans);
////////////////////////////////////////////////////
// For any remaining `BasicCoverageBlock` counters (that were not associated with
@ -239,37 +193,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
// to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
// are in fact counted, even though they don't directly contribute to counting
// their own independent code region's coverage.
self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions);
self.inject_indirect_counters();
// Intermediate expressions will be injected as the final step, after generating
// debug output, if any.
////////////////////////////////////////////////////
};
if graphviz_data.is_enabled() {
// Even if there was an error, a partial CoverageGraph can still generate a useful
// graphviz output.
debug::dump_coverage_graphviz(
tcx,
self.mir_body,
self.pass_name,
&self.basic_coverage_blocks,
&self.coverage_counters,
&graphviz_data,
&self.coverage_counters.intermediate_expressions,
&debug_used_expressions,
);
}
if let Err(e) = result {
bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
};
// Depending on current `debug_options()`, `alert_on_unused_expressions()` could panic, so
// this check is performed as late as possible, to allow other debug output (logs and dump
// files), which might be helpful in analyzing unused expressions, to still be generated.
debug_used_expressions.alert_on_unused_expressions(&self.coverage_counters.debug_counters);
////////////////////////////////////////////////////
// Finally, inject the intermediate expressions collected along the way.
for intermediate_expression in &self.coverage_counters.intermediate_expressions {
@ -285,15 +219,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
/// `bcb` to its `Counter`, when injected. Subsequent `CoverageSpan`s for a BCB that already has
/// a `Counter` will inject an `Expression` instead, and compute its value by adding `ZERO` to
/// the BCB `Counter` value.
///
/// If debugging, add every BCB `Expression` associated with a `CoverageSpan`s to the
/// `used_expression_operands` map.
fn inject_coverage_span_counters(
&mut self,
coverage_spans: Vec<CoverageSpan>,
graphviz_data: &mut debug::GraphvizData,
debug_used_expressions: &mut debug::UsedExpressions,
) {
fn inject_coverage_span_counters(&mut self, coverage_spans: Vec<CoverageSpan>) {
let tcx = self.tcx;
let source_map = tcx.sess.source_map();
let body_span = self.body_span;
@ -307,12 +233,10 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
self.coverage_counters.make_identity_counter(counter_operand)
} else if let Some(counter_kind) = self.coverage_counters.take_bcb_counter(bcb) {
bcb_counters[bcb] = Some(counter_kind.as_operand());
debug_used_expressions.add_expression_operands(&counter_kind);
counter_kind
} else {
bug!("Every BasicCoverageBlock should have a Counter or Expression");
};
graphviz_data.add_bcb_coverage_span_with_counter(bcb, &covspan, &counter_kind);
let code_region = make_code_region(source_map, file_name, span, body_span);
@ -333,11 +257,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
/// associated with a `CoverageSpan`, should only exist if the counter is an `Expression`
/// dependency (one of the expression operands). Collect them, and inject the additional
/// counters into the MIR, without a reportable coverage span.
fn inject_indirect_counters(
&mut self,
graphviz_data: &mut debug::GraphvizData,
debug_used_expressions: &mut debug::UsedExpressions,
) {
fn inject_indirect_counters(&mut self) {
let mut bcb_counters_without_direct_coverage_spans = Vec::new();
for (target_bcb, counter_kind) in self.coverage_counters.drain_bcb_counters() {
bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
@ -352,19 +272,8 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
));
}
// If debug is enabled, validate that every BCB or edge counter not directly associated
// with a coverage span is at least indirectly associated (it is a dependency of a BCB
// counter that _is_ associated with a coverage span).
debug_used_expressions.validate(&bcb_counters_without_direct_coverage_spans);
for (edge_from_bcb, target_bcb, counter_kind) in bcb_counters_without_direct_coverage_spans
{
debug_used_expressions.add_unused_expression_if_not_found(
&counter_kind,
edge_from_bcb,
target_bcb,
);
match counter_kind {
BcbCounter::Counter { .. } => {
let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
@ -375,26 +284,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let to_bb = self.bcb_leader_bb(target_bcb);
let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
graphviz_data.set_edge_counter(from_bcb, new_bb, &counter_kind);
debug!(
"Edge {:?} (last {:?}) -> {:?} (leader {:?}) requires a new MIR \
BasicBlock {:?}, for unclaimed edge counter {}",
edge_from_bcb,
from_bb,
target_bcb,
to_bb,
new_bb,
self.format_counter(&counter_kind),
BasicBlock {:?}, for unclaimed edge counter {:?}",
edge_from_bcb, from_bb, target_bcb, to_bb, new_bb, counter_kind,
);
new_bb
} else {
let target_bb = self.bcb_last_bb(target_bcb);
graphviz_data.add_bcb_dependency_counter(target_bcb, &counter_kind);
debug!(
"{:?} ({:?}) gets a new Coverage statement for unclaimed counter {}",
target_bcb,
target_bb,
self.format_counter(&counter_kind),
"{:?} ({:?}) gets a new Coverage statement for unclaimed counter {:?}",
target_bcb, target_bb, counter_kind,
);
target_bb
};
@ -429,11 +329,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
&self.basic_coverage_blocks[bcb]
}
#[inline]
fn format_counter(&self, counter_kind: &BcbCounter) -> String {
self.coverage_counters.debug_counters.format_counter(counter_kind)
}
fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind {
match *counter_kind {
BcbCounter::Counter { id } => {

View file

@ -1,13 +1,10 @@
use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB};
use itertools::Itertools;
use rustc_data_structures::graph::WithNumNodes;
use rustc_middle::mir::spanview::source_range_no_file;
use rustc_middle::mir::{
self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
TerminatorKind,
};
use rustc_middle::ty::TyCtxt;
use rustc_span::source_map::original_sp;
use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol};
@ -20,31 +17,6 @@ pub(super) enum CoverageStatement {
}
impl CoverageStatement {
pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
match *self {
Self::Statement(bb, span, stmt_index) => {
let stmt = &mir_body[bb].statements[stmt_index];
format!(
"{}: @{}[{}]: {:?}",
source_range_no_file(tcx, span),
bb.index(),
stmt_index,
stmt
)
}
Self::Terminator(bb, span) => {
let term = mir_body[bb].terminator();
format!(
"{}: @{}.{}: {:?}",
source_range_no_file(tcx, span),
bb.index(),
term.kind.name(),
term.kind
)
}
}
}
pub fn span(&self) -> Span {
match self {
Self::Statement(_, span, _) | Self::Terminator(_, span) => *span,
@ -150,27 +122,6 @@ impl CoverageSpan {
self.bcb == other.bcb
}
pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
format!(
"{}\n {}",
source_range_no_file(tcx, self.span),
self.format_coverage_statements(tcx, mir_body).replace('\n', "\n "),
)
}
pub fn format_coverage_statements<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
mir_body: &mir::Body<'tcx>,
) -> String {
let mut sorted_coverage_statements = self.coverage_statements.clone();
sorted_coverage_statements.sort_unstable_by_key(|covstmt| match *covstmt {
CoverageStatement::Statement(bb, _, index) => (bb, index),
CoverageStatement::Terminator(bb, _) => (bb, usize::MAX),
});
sorted_coverage_statements.iter().map(|covstmt| covstmt.format(tcx, mir_body)).join("\n")
}
/// If the span is part of a macro, returns the macro name symbol.
pub fn current_macro(&self) -> Option<Symbol> {
self.current_macro_or_none

View file

@ -6,7 +6,7 @@ use rustc_const_eval::const_eval::CheckAlignment;
use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, Projectable};
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def::DefKind;
use rustc_middle::mir::interpret::{AllocId, ConstAllocation, ConstValue, InterpResult, Scalar};
use rustc_middle::mir::interpret::{AllocId, ConstAllocation, InterpResult, Scalar};
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::layout::TyAndLayout;

View file

@ -153,7 +153,7 @@ impl EnumSizeOpt {
span,
user_ty: None,
literal: ConstantKind::Val(
interpret::ConstValue::Indirect { alloc_id, offset: Size::ZERO },
ConstValue::Indirect { alloc_id, offset: Size::ZERO },
tmp_ty,
),
};

View file

@ -1,7 +1,6 @@
//! Removes operations on ZST places, and convert ZST operands to constants.
use crate::MirPass;
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};

View file

@ -170,8 +170,7 @@ use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId};
use rustc_hir::lang_items::LangItem;
use rustc_middle::mir::interpret::{AllocId, ConstValue};
use rustc_middle::mir::interpret::{ErrorHandled, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, GlobalAlloc, Scalar};
use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
use rustc_middle::mir::visit::Visitor as MirVisitor;
use rustc_middle::mir::{self, Local, Location};
@ -1442,13 +1441,15 @@ fn collect_used_items<'tcx>(
#[instrument(skip(tcx, output), level = "debug")]
fn collect_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
value: ConstValue<'tcx>,
value: mir::ConstValue<'tcx>,
output: &mut MonoItems<'tcx>,
) {
match value {
ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_alloc(tcx, ptr.provenance, output),
ConstValue::Indirect { alloc_id, .. } => collect_alloc(tcx, alloc_id, output),
ConstValue::Slice { data, start: _, end: _ } => {
mir::ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => {
collect_alloc(tcx, ptr.provenance, output)
}
mir::ConstValue::Indirect { alloc_id, .. } => collect_alloc(tcx, alloc_id, output),
mir::ConstValue::Slice { data, meta: _ } => {
for &id in data.inner().provenance().ptrs().values() {
collect_alloc(tcx, id, output);
}

View file

@ -404,7 +404,7 @@ struct NodeInfo<K: DepKind> {
impl<K: DepKind> Encodable<FileEncoder> for NodeInfo<K> {
fn encode(&self, e: &mut FileEncoder) {
let header = SerializedNodeHeader::new(self);
e.emit_raw_bytes(&header.bytes);
e.write_array(header.bytes);
if header.len().is_none() {
e.emit_usize(self.edges.len());
@ -412,8 +412,10 @@ impl<K: DepKind> Encodable<FileEncoder> for NodeInfo<K> {
let bytes_per_index = header.bytes_per_index();
for node_index in self.edges.iter() {
let bytes = node_index.as_u32().to_le_bytes();
e.emit_raw_bytes(&bytes[..bytes_per_index]);
e.write_with(|dest| {
*dest = node_index.as_u32().to_le_bytes();
bytes_per_index
});
}
}
}

View file

@ -991,9 +991,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if !is_prelude
&& let Some(max_vis) = max_vis.get()
&& !max_vis.is_at_least(import.expect_vis(), self.tcx)
{
self.lint_buffer.buffer_lint(UNUSED_IMPORTS, id, import.span, fluent::resolve_glob_import_doesnt_reexport);
}
{
self.lint_buffer.buffer_lint(UNUSED_IMPORTS, id, import.span, fluent::resolve_glob_import_doesnt_reexport);
}
return None;
}
_ => unreachable!(),

View file

@ -15,23 +15,20 @@ pub const fn largest_max_leb128_len() -> usize {
macro_rules! impl_write_unsigned_leb128 {
($fn_name:ident, $int_ty:ty) => {
#[inline]
pub fn $fn_name(
out: &mut [::std::mem::MaybeUninit<u8>; max_leb128_len::<$int_ty>()],
mut value: $int_ty,
) -> &[u8] {
pub fn $fn_name(out: &mut [u8; max_leb128_len::<$int_ty>()], mut value: $int_ty) -> usize {
let mut i = 0;
loop {
if value < 0x80 {
unsafe {
*out.get_unchecked_mut(i).as_mut_ptr() = value as u8;
*out.get_unchecked_mut(i) = value as u8;
}
i += 1;
break;
} else {
unsafe {
*out.get_unchecked_mut(i).as_mut_ptr() = ((value & 0x7f) | 0x80) as u8;
*out.get_unchecked_mut(i) = ((value & 0x7f) | 0x80) as u8;
}
value >>= 7;
@ -39,7 +36,7 @@ macro_rules! impl_write_unsigned_leb128 {
}
}
unsafe { ::std::mem::MaybeUninit::slice_assume_init_ref(&out.get_unchecked(..i)) }
i
}
};
}
@ -87,10 +84,7 @@ impl_read_unsigned_leb128!(read_usize_leb128, usize);
macro_rules! impl_write_signed_leb128 {
($fn_name:ident, $int_ty:ty) => {
#[inline]
pub fn $fn_name(
out: &mut [::std::mem::MaybeUninit<u8>; max_leb128_len::<$int_ty>()],
mut value: $int_ty,
) -> &[u8] {
pub fn $fn_name(out: &mut [u8; max_leb128_len::<$int_ty>()], mut value: $int_ty) -> usize {
let mut i = 0;
loop {
@ -104,7 +98,7 @@ macro_rules! impl_write_signed_leb128 {
}
unsafe {
*out.get_unchecked_mut(i).as_mut_ptr() = byte;
*out.get_unchecked_mut(i) = byte;
}
i += 1;
@ -114,7 +108,7 @@ macro_rules! impl_write_signed_leb128 {
}
}
unsafe { ::std::mem::MaybeUninit::slice_assume_init_ref(&out.get_unchecked(..i)) }
i
}
};
}

View file

@ -17,6 +17,9 @@ Core encoding and decoding interfaces.
#![feature(new_uninit)]
#![feature(allocator_api)]
#![feature(ptr_sub_ptr)]
#![feature(slice_first_last_chunk)]
#![feature(inline_const)]
#![feature(const_option)]
#![cfg_attr(test, feature(test))]
#![allow(rustc::internal)]
#![deny(rustc::untranslatable_diagnostic)]

View file

@ -3,10 +3,8 @@ use crate::serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fs::File;
use std::io::{self, Write};
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ops::Range;
use std::path::Path;
use std::ptr;
// -----------------------------------------------------------------------------
// Encoder
@ -24,10 +22,12 @@ const BUF_SIZE: usize = 8192;
/// size of the buffer, rather than the full length of the encoded data, and
/// because it doesn't need to reallocate memory along the way.
pub struct FileEncoder {
/// The input buffer. For adequate performance, we need more control over
/// buffering than `BufWriter` offers. If `BufWriter` ever offers a raw
/// buffer access API, we can use it, and remove `buf` and `buffered`.
buf: Box<[MaybeUninit<u8>]>,
// The input buffer. For adequate performance, we need to be able to write
// directly to the unwritten region of the buffer, without calling copy_from_slice.
// Note that our buffer is always initialized so that we can do that direct access
// without unsafe code. Users of this type write many more than BUF_SIZE bytes, so the
// initialization is approximately free.
buf: Box<[u8; BUF_SIZE]>,
buffered: usize,
flushed: usize,
file: File,
@ -38,15 +38,11 @@ pub struct FileEncoder {
impl FileEncoder {
pub fn new<P: AsRef<Path>>(path: P) -> io::Result<Self> {
// Create the file for reading and writing, because some encoders do both
// (e.g. the metadata encoder when -Zmeta-stats is enabled)
let file = File::options().read(true).write(true).create(true).truncate(true).open(path)?;
Ok(FileEncoder {
buf: Box::new_uninit_slice(BUF_SIZE),
buf: vec![0u8; BUF_SIZE].into_boxed_slice().try_into().unwrap(),
buffered: 0,
flushed: 0,
file,
file: File::create(path)?,
res: Ok(()),
})
}
@ -54,94 +50,19 @@ impl FileEncoder {
#[inline]
pub fn position(&self) -> usize {
// Tracking position this way instead of having a `self.position` field
// means that we don't have to update the position on every write call.
// means that we only need to update `self.buffered` on a write call,
// as opposed to updating `self.position` and `self.buffered`.
self.flushed + self.buffered
}
#[cold]
#[inline(never)]
pub fn flush(&mut self) {
// This is basically a copy of `BufWriter::flush`. If `BufWriter` ever
// offers a raw buffer access API, we can use it, and remove this.
/// Helper struct to ensure the buffer is updated after all the writes
/// are complete. It tracks the number of written bytes and drains them
/// all from the front of the buffer when dropped.
struct BufGuard<'a> {
buffer: &'a mut [u8],
encoder_buffered: &'a mut usize,
encoder_flushed: &'a mut usize,
flushed: usize,
}
impl<'a> BufGuard<'a> {
fn new(
buffer: &'a mut [u8],
encoder_buffered: &'a mut usize,
encoder_flushed: &'a mut usize,
) -> Self {
assert_eq!(buffer.len(), *encoder_buffered);
Self { buffer, encoder_buffered, encoder_flushed, flushed: 0 }
}
/// The unwritten part of the buffer
fn remaining(&self) -> &[u8] {
&self.buffer[self.flushed..]
}
/// Flag some bytes as removed from the front of the buffer
fn consume(&mut self, amt: usize) {
self.flushed += amt;
}
/// true if all of the bytes have been written
fn done(&self) -> bool {
self.flushed >= *self.encoder_buffered
}
}
impl Drop for BufGuard<'_> {
fn drop(&mut self) {
if self.flushed > 0 {
if self.done() {
*self.encoder_flushed += *self.encoder_buffered;
*self.encoder_buffered = 0;
} else {
self.buffer.copy_within(self.flushed.., 0);
*self.encoder_flushed += self.flushed;
*self.encoder_buffered -= self.flushed;
}
}
}
}
// If we've already had an error, do nothing. It'll get reported after
// `finish` is called.
if self.res.is_err() {
return;
}
let mut guard = BufGuard::new(
unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[..self.buffered]) },
&mut self.buffered,
&mut self.flushed,
);
while !guard.done() {
match self.file.write(guard.remaining()) {
Ok(0) => {
self.res = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
return;
}
Ok(n) => guard.consume(n),
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
self.res = Err(e);
return;
}
}
if self.res.is_ok() {
self.res = self.file.write_all(&self.buf[..self.buffered]);
}
self.flushed += self.buffered;
self.buffered = 0;
}
pub fn file(&self) -> &File {
@ -149,91 +70,89 @@ impl FileEncoder {
}
#[inline]
fn write_one(&mut self, value: u8) {
let mut buffered = self.buffered;
fn buffer_empty(&mut self) -> &mut [u8] {
// SAFETY: self.buffered is inbounds as an invariant of the type
unsafe { self.buf.get_unchecked_mut(self.buffered..) }
}
if std::intrinsics::unlikely(buffered + 1 > BUF_SIZE) {
self.flush();
buffered = 0;
#[cold]
#[inline(never)]
fn write_all_cold_path(&mut self, buf: &[u8]) {
self.flush();
if let Some(dest) = self.buf.get_mut(..buf.len()) {
dest.copy_from_slice(buf);
self.buffered += buf.len();
} else {
if self.res.is_ok() {
self.res = self.file.write_all(buf);
}
self.flushed += buf.len();
}
// SAFETY: The above check and `flush` ensures that there is enough
// room to write the input to the buffer.
unsafe {
*MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered) = value;
}
self.buffered = buffered + 1;
}
#[inline]
fn write_all(&mut self, buf: &[u8]) {
let buf_len = buf.len();
if std::intrinsics::likely(buf_len <= BUF_SIZE) {
let mut buffered = self.buffered;
if std::intrinsics::unlikely(buffered + buf_len > BUF_SIZE) {
self.flush();
buffered = 0;
}
// SAFETY: The above check and `flush` ensures that there is enough
// room to write the input to the buffer.
unsafe {
let src = buf.as_ptr();
let dst = MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered);
ptr::copy_nonoverlapping(src, dst, buf_len);
}
self.buffered = buffered + buf_len;
if let Some(dest) = self.buffer_empty().get_mut(..buf.len()) {
dest.copy_from_slice(buf);
self.buffered += buf.len();
} else {
self.write_all_unbuffered(buf);
self.write_all_cold_path(buf);
}
}
fn write_all_unbuffered(&mut self, mut buf: &[u8]) {
// If we've already had an error, do nothing. It'll get reported after
// `finish` is called.
if self.res.is_err() {
return;
}
if self.buffered > 0 {
/// Write up to `N` bytes to this encoder.
///
/// This function can be used to avoid the overhead of calling memcpy for writes that
/// have runtime-variable length, but are small and have a small fixed upper bound.
///
/// This can be used to do in-place encoding as is done for leb128 (without this function
/// we would need to write to a temporary buffer then memcpy into the encoder), and it can
/// also be used to implement the varint scheme we use for rmeta and dep graph encoding,
/// where we only want to encode the first few bytes of an integer. Copying in the whole
/// integer then only advancing the encoder state for the few bytes we care about is more
/// efficient than calling [`FileEncoder::write_all`], because variable-size copies are
/// always lowered to `memcpy`, which has overhead and contains a lot of logic we can bypass
/// with this function. Note that common architectures support fixed-size writes up to 8 bytes
/// with one instruction, so while this does in some sense do wasted work, we come out ahead.
#[inline]
pub fn write_with<const N: usize>(&mut self, visitor: impl FnOnce(&mut [u8; N]) -> usize) {
let flush_threshold = const { BUF_SIZE.checked_sub(N).unwrap() };
if std::intrinsics::unlikely(self.buffered > flush_threshold) {
self.flush();
}
// This is basically a copy of `Write::write_all` but also updates our
// `self.flushed`. It's necessary because `Write::write_all` does not
// return the number of bytes written when an error is encountered, and
// without that, we cannot accurately update `self.flushed` on error.
while !buf.is_empty() {
match self.file.write(buf) {
Ok(0) => {
self.res = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write whole buffer",
));
return;
}
Ok(n) => {
buf = &buf[n..];
self.flushed += n;
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
self.res = Err(e);
return;
}
}
// SAFETY: We checked above that that N < self.buffer_empty().len(),
// and if isn't, flush ensures that our empty buffer is now BUF_SIZE.
// We produce a post-mono error if N > BUF_SIZE.
let buf = unsafe { self.buffer_empty().first_chunk_mut::<N>().unwrap_unchecked() };
let written = visitor(buf);
// We have to ensure that an errant visitor cannot cause self.buffered to exeed BUF_SIZE.
if written > N {
Self::panic_invalid_write::<N>(written);
}
self.buffered += written;
}
#[cold]
#[inline(never)]
fn panic_invalid_write<const N: usize>(written: usize) {
panic!("FileEncoder::write_with::<{N}> cannot be used to write {written} bytes");
}
/// Helper for calls where [`FileEncoder::write_with`] always writes the whole array.
#[inline]
pub fn write_array<const N: usize>(&mut self, buf: [u8; N]) {
self.write_with(|dest| {
*dest = buf;
N
})
}
pub fn finish(mut self) -> Result<usize, io::Error> {
self.flush();
let res = std::mem::replace(&mut self.res, Ok(()));
res.map(|()| self.position())
match std::mem::replace(&mut self.res, Ok(())) {
Ok(()) => Ok(self.position()),
Err(e) => Err(e),
}
}
}
@ -241,7 +160,7 @@ impl Drop for FileEncoder {
fn drop(&mut self) {
// Likely to be a no-op, because `finish` should have been called and
// it also flushes. But do it just in case.
let _result = self.flush();
self.flush();
}
}
@ -249,26 +168,7 @@ macro_rules! write_leb128 {
($this_fn:ident, $int_ty:ty, $write_leb_fn:ident) => {
#[inline]
fn $this_fn(&mut self, v: $int_ty) {
const MAX_ENCODED_LEN: usize = $crate::leb128::max_leb128_len::<$int_ty>();
let mut buffered = self.buffered;
// This can't overflow because BUF_SIZE and MAX_ENCODED_LEN are both
// quite small.
if std::intrinsics::unlikely(buffered + MAX_ENCODED_LEN > BUF_SIZE) {
self.flush();
buffered = 0;
}
// SAFETY: The above check and flush ensures that there is enough
// room to write the encoded value to the buffer.
let buf = unsafe {
&mut *(self.buf.as_mut_ptr().add(buffered)
as *mut [MaybeUninit<u8>; MAX_ENCODED_LEN])
};
let encoded = leb128::$write_leb_fn(buf, v);
self.buffered = buffered + encoded.len();
self.write_with(|buf| leb128::$write_leb_fn(buf, v))
}
};
}
@ -281,12 +181,12 @@ impl Encoder for FileEncoder {
#[inline]
fn emit_u16(&mut self, v: u16) {
self.write_all(&v.to_le_bytes());
self.write_array(v.to_le_bytes());
}
#[inline]
fn emit_u8(&mut self, v: u8) {
self.write_one(v);
self.write_array([v]);
}
write_leb128!(emit_isize, isize, write_isize_leb128);
@ -296,7 +196,7 @@ impl Encoder for FileEncoder {
#[inline]
fn emit_i16(&mut self, v: i16) {
self.write_all(&v.to_le_bytes());
self.write_array(v.to_le_bytes());
}
#[inline]
@ -495,7 +395,7 @@ impl Encodable<FileEncoder> for IntEncodedWithFixedSize {
#[inline]
fn encode(&self, e: &mut FileEncoder) {
let _start_pos = e.position();
e.emit_raw_bytes(&self.0.to_le_bytes());
e.write_array(self.0.to_le_bytes());
let _end_pos = e.position();
debug_assert_eq!((_end_pos - _start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
}

View file

@ -1,8 +1,4 @@
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
use rustc_serialize::leb128::*;
use std::mem::MaybeUninit;
use rustc_serialize::Decoder;
macro_rules! impl_test_unsigned_leb128 {
@ -24,9 +20,10 @@ macro_rules! impl_test_unsigned_leb128 {
let mut stream = Vec::new();
let mut buf = Default::default();
for &x in &values {
let mut buf = MaybeUninit::uninit_array();
stream.extend($write_fn_name(&mut buf, x));
let n = $write_fn_name(&mut buf, x);
stream.extend(&buf[..n]);
}
let mut decoder = rustc_serialize::opaque::MemDecoder::new(&stream, 0);
@ -70,9 +67,10 @@ macro_rules! impl_test_signed_leb128 {
let mut stream = Vec::new();
let mut buf = Default::default();
for &x in &values {
let mut buf = MaybeUninit::uninit_array();
stream.extend($write_fn_name(&mut buf, x));
let n = $write_fn_name(&mut buf, x);
stream.extend(&buf[..n]);
}
let mut decoder = rustc_serialize::opaque::MemDecoder::new(&stream, 0);

View file

@ -1478,15 +1478,12 @@ options! {
dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED],
"exclude the pass number when dumping MIR (used in tests) (default: no)"),
dump_mir_graphviz: bool = (false, parse_bool, [UNTRACKED],
"in addition to `.mir` files, create graphviz `.dot` files (and with \
`-Z instrument-coverage`, also create a `.dot` file for the MIR-derived \
coverage graph) (default: no)"),
"in addition to `.mir` files, create graphviz `.dot` files (default: no)"),
dump_mir_spanview: Option<MirSpanview> = (None, parse_mir_spanview, [UNTRACKED],
"in addition to `.mir` files, create `.html` files to view spans for \
all `statement`s (including terminators), only `terminator` spans, or \
computed `block` spans (one span encompassing a block's terminator and \
all statements). If `-Z instrument-coverage` is also enabled, create \
an additional `.html` file showing the computed coverage spans."),
all statements)."),
dump_mono_stats: SwitchWithOptPath = (SwitchWithOptPath::Disabled,
parse_switch_with_opt_path, [UNTRACKED],
"output statistics about monomorphization collection"),

View file

@ -1,4 +1,7 @@
use rustc_middle::mir::interpret::{alloc_range, AllocRange, ConstValue, Pointer};
use rustc_middle::mir::{
interpret::{alloc_range, AllocRange, Pointer},
ConstValue,
};
use crate::{
rustc_smir::{Stable, Tables},
@ -44,14 +47,12 @@ pub fn new_allocation<'tcx>(
tables.tcx.layout_of(rustc_middle::ty::ParamEnv::empty().and(ty)).unwrap().align;
new_empty_allocation(align.abi)
}
ConstValue::Slice { data, start, end } => {
ConstValue::Slice { data, meta } => {
let alloc_id = tables.tcx.reserve_and_set_memory_alloc(data);
let ptr = Pointer::new(alloc_id, rustc_target::abi::Size::from_bytes(start));
let ptr = Pointer::new(alloc_id, rustc_target::abi::Size::ZERO);
let scalar_ptr = rustc_middle::mir::interpret::Scalar::from_pointer(ptr, &tables.tcx);
let scalar_len = rustc_middle::mir::interpret::Scalar::from_target_usize(
(end - start) as u64,
&tables.tcx,
);
let scalar_meta =
rustc_middle::mir::interpret::Scalar::from_target_usize(meta, &tables.tcx);
let layout =
tables.tcx.layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)).unwrap();
let mut allocation =
@ -66,8 +67,8 @@ pub fn new_allocation<'tcx>(
allocation
.write_scalar(
&tables.tcx,
alloc_range(tables.tcx.data_layout.pointer_size, scalar_len.size()),
scalar_len,
alloc_range(tables.tcx.data_layout.pointer_size, scalar_meta.size()),
scalar_meta,
)
.unwrap();
allocation.stable(tables)

View file

@ -127,10 +127,39 @@ impl FileLoader for RealFileLoader {
let mut bytes = Lrc::new_uninit_slice(len as usize);
let mut buf = BorrowedBuf::from(Lrc::get_mut(&mut bytes).unwrap());
file.read_buf_exact(buf.unfilled())?;
match file.read_buf_exact(buf.unfilled()) {
Ok(()) => {}
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => {
drop(bytes);
return fs::read(path).map(Vec::into);
}
Err(e) => return Err(e),
}
// SAFETY: If the read_buf_exact call returns Ok(()), then we have
// read len bytes and initialized the buffer.
Ok(unsafe { bytes.assume_init() })
let bytes = unsafe { bytes.assume_init() };
// At this point, we've read all the bytes that filesystem metadata reported exist.
// But we are not guaranteed to be at the end of the file, because we did not attempt to do
// a read with a non-zero-sized buffer and get Ok(0).
// So we do small read to a fixed-size buffer. If the read returns no bytes then we're
// already done, and we just return the Lrc we built above.
// If the read returns bytes however, we just fall back to reading into a Vec then turning
// that into an Lrc, losing our nice peak memory behavior. This fallback code path should
// be rarely exercised.
let mut probe = [0u8; 32];
let n = loop {
match file.read(&mut probe) {
Ok(0) => return Ok(bytes),
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
Ok(n) => break n,
}
};
let mut bytes: Vec<u8> = bytes.iter().copied().chain(probe[..n].iter().copied()).collect();
file.read_to_end(&mut bytes)?;
Ok(bytes.into())
}
}

View file

@ -567,3 +567,30 @@ fn test_next_point() {
assert_eq!(span.hi().0, 6);
assert!(sm.span_to_snippet(span).is_err());
}
#[cfg(target_os = "linux")]
#[test]
fn read_binary_file_handles_lying_stat() {
// read_binary_file tries to read the contents of a file into an Lrc<[u8]> while
// never having two copies of the data in memory at once. This is an optimization
// to support include_bytes! with large files. But since Rust allocators are
// sensitive to alignment, our implementation can't be bootstrapped off calling
// std::fs::read. So we test that we have the same behavior even on files where
// fs::metadata lies.
// stat always says that /proc/self/cmdline is length 0, but it isn't.
let cmdline = Path::new("/proc/self/cmdline");
let len = std::fs::metadata(cmdline).unwrap().len() as usize;
let real = std::fs::read(cmdline).unwrap();
assert!(len < real.len());
let bin = RealFileLoader.read_binary_file(cmdline).unwrap();
assert_eq!(&real[..], &bin[..]);
// stat always says that /sys/devices/system/cpu/kernel_max is the size of a block.
let kernel_max = Path::new("/sys/devices/system/cpu/kernel_max");
let len = std::fs::metadata(kernel_max).unwrap().len() as usize;
let real = std::fs::read(kernel_max).unwrap();
assert!(len > real.len());
let bin = RealFileLoader.read_binary_file(kernel_max).unwrap();
assert_eq!(&real[..], &bin[..]);
}

View file

@ -83,6 +83,17 @@ where
}
FieldsShape::Union(_) => {
if !arg_layout.is_zst() {
if arg_layout.is_transparent() {
let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
return should_use_fp_conv_helper(
cx,
&non_1zst_elem,
xlen,
flen,
field1_kind,
field2_kind,
);
}
return Err(CannotUseFpConv);
}
}

View file

@ -89,6 +89,17 @@ where
}
FieldsShape::Union(_) => {
if !arg_layout.is_zst() {
if arg_layout.is_transparent() {
let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
return should_use_fp_conv_helper(
cx,
&non_1zst_elem,
xlen,
flen,
field1_kind,
field2_kind,
);
}
return Err(CannotUseFpConv);
}
}

View file

@ -66,6 +66,7 @@ pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
fn is_never(this: TyAndLayout<'a, Self>) -> bool;
fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
fn is_transparent(this: TyAndLayout<'a, Self>) -> bool;
}
impl<'a, Ty> TyAndLayout<'a, Ty> {
@ -136,6 +137,13 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Ty::is_unit(self)
}
pub fn is_transparent<C>(self) -> bool
where
Ty: TyAbiInterface<'a, C>,
{
Ty::is_transparent(self)
}
pub fn offset_of_subfield<C>(self, cx: &C, indices: impl Iterator<Item = usize>) -> Size
where
Ty: TyAbiInterface<'a, C>,

View file

@ -344,7 +344,8 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
goal: Goal<'tcx, ty::Predicate<'tcx>>,
) -> Result<(bool, Certainty, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), NoSolution> {
let (orig_values, canonical_goal) = self.canonicalize_goal(goal);
let mut goal_evaluation = self.inspect.new_goal_evaluation(goal, goal_evaluation_kind);
let mut goal_evaluation =
self.inspect.new_goal_evaluation(goal, &orig_values, goal_evaluation_kind);
let encountered_overflow = self.search_graph.encountered_overflow();
let canonical_response = EvalCtxt::evaluate_canonical_goal(
self.tcx(),
@ -568,7 +569,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
GoalEvaluationKind::Nested { is_normalizes_to_hack: IsNormalizesToHack::Yes },
unconstrained_goal,
)?;
self.add_goals(instantiate_goals);
self.nested_goals.goals.extend(instantiate_goals);
// Finally, equate the goal's RHS with the unconstrained var.
// We put the nested goals from this into goals instead of
@ -605,7 +606,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
GoalEvaluationKind::Nested { is_normalizes_to_hack: IsNormalizesToHack::No },
goal,
)?;
self.add_goals(instantiate_goals);
self.nested_goals.goals.extend(instantiate_goals);
if has_changed {
unchanged_certainty = None;
}
@ -613,7 +614,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
match certainty {
Certainty::Yes => {}
Certainty::Maybe(_) => {
self.add_goal(goal);
self.nested_goals.goals.push(goal);
unchanged_certainty = unchanged_certainty.map(|c| c.unify_with(certainty));
}
}

View file

@ -10,17 +10,21 @@
//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
use super::{CanonicalInput, Certainty, EvalCtxt, Goal};
use crate::solve::canonicalize::{CanonicalizeMode, Canonicalizer};
use crate::solve::{response_no_constraints_raw, CanonicalResponse, QueryResult, Response};
use crate::solve::{
inspect, response_no_constraints_raw, CanonicalResponse, QueryResult, Response,
};
use rustc_data_structures::fx::FxHashSet;
use rustc_index::IndexVec;
use rustc_infer::infer::canonical::query_response::make_query_region_constraints;
use rustc_infer::infer::canonical::CanonicalVarValues;
use rustc_infer::infer::canonical::{CanonicalExt, QueryRegionConstraints};
use rustc_infer::infer::InferCtxt;
use rustc_infer::infer::{DefineOpaqueTypes, InferCtxt, InferOk};
use rustc_middle::infer::canonical::Canonical;
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::{
ExternalConstraintsData, MaybeCause, PredefinedOpaquesData, QueryInput,
};
use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::{
self, BoundVar, GenericArgKind, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
TypeVisitableExt,
@ -29,6 +33,22 @@ use rustc_span::DUMMY_SP;
use std::iter;
use std::ops::Deref;
trait ResponseT<'tcx> {
fn var_values(&self) -> CanonicalVarValues<'tcx>;
}
impl<'tcx> ResponseT<'tcx> for Response<'tcx> {
fn var_values(&self) -> CanonicalVarValues<'tcx> {
self.var_values
}
}
impl<'tcx, T> ResponseT<'tcx> for inspect::State<'tcx, T> {
fn var_values(&self) -> CanonicalVarValues<'tcx> {
self.var_values
}
}
impl<'tcx> EvalCtxt<'_, 'tcx> {
/// Canonicalizes the goal remembering the original values
/// for each bound variable.
@ -188,12 +208,14 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
original_values: Vec<ty::GenericArg<'tcx>>,
response: CanonicalResponse<'tcx>,
) -> Result<(Certainty, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), NoSolution> {
let substitution = self.compute_query_response_substitution(&original_values, &response);
let substitution =
Self::compute_query_response_substitution(self.infcx, &original_values, &response);
let Response { var_values, external_constraints, certainty } =
response.substitute(self.tcx(), &substitution);
let nested_goals = self.unify_query_var_values(param_env, &original_values, var_values)?;
let nested_goals =
Self::unify_query_var_values(self.infcx, param_env, &original_values, var_values)?;
let ExternalConstraintsData { region_constraints, opaque_types } =
external_constraints.deref();
@ -206,21 +228,21 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
/// This returns the substitutions to instantiate the bound variables of
/// the canonical response. This depends on the `original_values` for the
/// bound variables.
fn compute_query_response_substitution(
&self,
fn compute_query_response_substitution<T: ResponseT<'tcx>>(
infcx: &InferCtxt<'tcx>,
original_values: &[ty::GenericArg<'tcx>],
response: &CanonicalResponse<'tcx>,
response: &Canonical<'tcx, T>,
) -> CanonicalVarValues<'tcx> {
// FIXME: Longterm canonical queries should deal with all placeholders
// created inside of the query directly instead of returning them to the
// caller.
let prev_universe = self.infcx.universe();
let prev_universe = infcx.universe();
let universes_created_in_query = response.max_universe.index();
for _ in 0..universes_created_in_query {
self.infcx.create_next_universe();
infcx.create_next_universe();
}
let var_values = response.value.var_values;
let var_values = response.value.var_values();
assert_eq!(original_values.len(), var_values.len());
// If the query did not make progress with constraining inference variables,
@ -254,13 +276,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
}
}
let var_values = self.tcx().mk_args_from_iter(response.variables.iter().enumerate().map(
let var_values = infcx.tcx.mk_args_from_iter(response.variables.iter().enumerate().map(
|(index, info)| {
if info.universe() != ty::UniverseIndex::ROOT {
// A variable from inside a binder of the query. While ideally these shouldn't
// exist at all (see the FIXME at the start of this method), we have to deal with
// them for now.
self.infcx.instantiate_canonical_var(DUMMY_SP, info, |idx| {
infcx.instantiate_canonical_var(DUMMY_SP, info, |idx| {
ty::UniverseIndex::from(prev_universe.index() + idx.index())
})
} else if info.is_existential() {
@ -274,7 +296,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
if let Some(v) = opt_values[BoundVar::from_usize(index)] {
v
} else {
self.infcx.instantiate_canonical_var(DUMMY_SP, info, |_| prev_universe)
infcx.instantiate_canonical_var(DUMMY_SP, info, |_| prev_universe)
}
} else {
// For placeholders which were already part of the input, we simply map this
@ -287,9 +309,9 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
CanonicalVarValues { var_values }
}
#[instrument(level = "debug", skip(self, param_env), ret)]
#[instrument(level = "debug", skip(infcx, param_env), ret)]
fn unify_query_var_values(
&self,
infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
original_values: &[ty::GenericArg<'tcx>],
var_values: CanonicalVarValues<'tcx>,
@ -298,7 +320,18 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
let mut nested_goals = vec![];
for (&orig, response) in iter::zip(original_values, var_values.var_values) {
nested_goals.extend(self.eq_and_get_goals(param_env, orig, response)?);
nested_goals.extend(
infcx
.at(&ObligationCause::dummy(), param_env)
.eq(DefineOpaqueTypes::No, orig, response)
.map(|InferOk { value: (), obligations }| {
obligations.into_iter().map(|o| Goal::from(o))
})
.map_err(|e| {
debug!(?e, "failed to equate");
NoSolution
})?,
);
}
Ok(nested_goals)
@ -403,3 +436,35 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for EagerResolver<'_, 'tcx> {
}
}
}
impl<'tcx> inspect::ProofTreeBuilder<'tcx> {
pub fn make_canonical_state<T: TypeFoldable<TyCtxt<'tcx>>>(
ecx: &EvalCtxt<'_, 'tcx>,
data: T,
) -> inspect::CanonicalState<'tcx, T> {
let state = inspect::State { var_values: ecx.var_values, data };
let state = state.fold_with(&mut EagerResolver { infcx: ecx.infcx });
Canonicalizer::canonicalize(
ecx.infcx,
CanonicalizeMode::Response { max_input_universe: ecx.max_input_universe },
&mut vec![],
state,
)
}
pub fn instantiate_canonical_state<T: TypeFoldable<TyCtxt<'tcx>>>(
infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
original_values: &[ty::GenericArg<'tcx>],
state: inspect::CanonicalState<'tcx, T>,
) -> Result<(Vec<Goal<'tcx, ty::Predicate<'tcx>>>, T), NoSolution> {
let substitution =
EvalCtxt::compute_query_response_substitution(infcx, original_values, &state);
let inspect::State { var_values, data } = state.substitute(infcx.tcx, &substitution);
let nested_goals =
EvalCtxt::unify_query_var_values(infcx, param_env, original_values, var_values)?;
Ok((nested_goals, data))
}
}

View file

@ -0,0 +1,235 @@
/// An infrastructure to mechanically analyse proof trees.
///
/// It is unavoidable that this representation is somewhat
/// lossy as it should hide quite a few semantically relevant things,
/// e.g. canonicalization and the order of nested goals.
///
/// @lcnr: However, a lot of the weirdness here is not strictly necessary
/// and could be improved in the future. This is mostly good enough for
/// coherence right now and was annoying to implement, so I am leaving it
/// as is until we start using it for something else.
use std::ops::ControlFlow;
use rustc_infer::infer::InferCtxt;
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::{inspect, QueryResult};
use rustc_middle::traits::solve::{Certainty, Goal};
use rustc_middle::ty;
use crate::solve::inspect::ProofTreeBuilder;
use crate::solve::{GenerateProofTree, InferCtxtEvalExt, UseGlobalCache};
pub struct InspectGoal<'a, 'tcx> {
infcx: &'a InferCtxt<'tcx>,
depth: usize,
orig_values: &'a [ty::GenericArg<'tcx>],
goal: Goal<'tcx, ty::Predicate<'tcx>>,
evaluation: &'a inspect::GoalEvaluation<'tcx>,
}
pub struct InspectCandidate<'a, 'tcx> {
goal: &'a InspectGoal<'a, 'tcx>,
kind: inspect::ProbeKind<'tcx>,
nested_goals: Vec<inspect::CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>>,
result: QueryResult<'tcx>,
}
impl<'a, 'tcx> InspectCandidate<'a, 'tcx> {
pub fn infcx(&self) -> &'a InferCtxt<'tcx> {
self.goal.infcx
}
pub fn kind(&self) -> inspect::ProbeKind<'tcx> {
self.kind
}
pub fn result(&self) -> Result<Certainty, NoSolution> {
self.result.map(|c| c.value.certainty)
}
/// Visit the nested goals of this candidate.
///
/// FIXME(@lcnr): we have to slightly adapt this API
/// to also use it to compute the most relevant goal
/// for fulfillment errors. Will do that once we actually
/// need it.
pub fn visit_nested<V: ProofTreeVisitor<'tcx>>(
&self,
visitor: &mut V,
) -> ControlFlow<V::BreakTy> {
// HACK: An arbitrary cutoff to avoid dealing with overflow and cycles.
if self.goal.depth >= 10 {
let infcx = self.goal.infcx;
infcx.probe(|_| {
let mut instantiated_goals = vec![];
for goal in &self.nested_goals {
let goal = match ProofTreeBuilder::instantiate_canonical_state(
infcx,
self.goal.goal.param_env,
self.goal.orig_values,
*goal,
) {
Ok((_goals, goal)) => goal,
Err(NoSolution) => {
warn!(
"unexpected failure when instantiating {:?}: {:?}",
goal, self.nested_goals
);
return ControlFlow::Continue(());
}
};
instantiated_goals.push(goal);
}
for &goal in &instantiated_goals {
let (_, proof_tree) =
infcx.evaluate_root_goal(goal, GenerateProofTree::Yes(UseGlobalCache::No));
let proof_tree = proof_tree.unwrap();
visitor.visit_goal(&InspectGoal::new(
infcx,
self.goal.depth + 1,
&proof_tree,
))?;
}
ControlFlow::Continue(())
})?;
}
ControlFlow::Continue(())
}
}
impl<'a, 'tcx> InspectGoal<'a, 'tcx> {
pub fn infcx(&self) -> &'a InferCtxt<'tcx> {
self.infcx
}
pub fn goal(&self) -> Goal<'tcx, ty::Predicate<'tcx>> {
self.goal
}
pub fn result(&self) -> Result<Certainty, NoSolution> {
self.evaluation.evaluation.result.map(|c| c.value.certainty)
}
fn candidates_recur(
&'a self,
candidates: &mut Vec<InspectCandidate<'a, 'tcx>>,
nested_goals: &mut Vec<inspect::CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>>,
probe: &inspect::Probe<'tcx>,
) {
for step in &probe.steps {
match step {
&inspect::ProbeStep::AddGoal(goal) => nested_goals.push(goal),
inspect::ProbeStep::EvaluateGoals(_) => (),
inspect::ProbeStep::NestedProbe(ref probe) => {
// Nested probes have to prove goals added in their parent
// but do not leak them, so we truncate the added goals
// afterwards.
let num_goals = nested_goals.len();
self.candidates_recur(candidates, nested_goals, probe);
nested_goals.truncate(num_goals);
}
}
}
match probe.kind {
inspect::ProbeKind::NormalizedSelfTyAssembly
| inspect::ProbeKind::UnsizeAssembly
| inspect::ProbeKind::UpcastProjectionCompatibility => (),
// We add a candidate for the root evaluation if there
// is only one way to prove a given goal, e.g. for `WellFormed`.
//
// FIXME: This is currently wrong if we don't even try any
// candidates, e.g. for a trait goal, as in this case `candidates` is
// actually supposed to be empty.
inspect::ProbeKind::Root { result } => {
if candidates.is_empty() {
candidates.push(InspectCandidate {
goal: self,
kind: probe.kind,
nested_goals: nested_goals.clone(),
result,
});
}
}
inspect::ProbeKind::MiscCandidate { name: _, result }
| inspect::ProbeKind::TraitCandidate { source: _, result } => {
candidates.push(InspectCandidate {
goal: self,
kind: probe.kind,
nested_goals: nested_goals.clone(),
result,
});
}
}
}
pub fn candidates(&'a self) -> Vec<InspectCandidate<'a, 'tcx>> {
let mut candidates = vec![];
let last_eval_step = match self.evaluation.evaluation.kind {
inspect::CanonicalGoalEvaluationKind::Overflow
| inspect::CanonicalGoalEvaluationKind::CacheHit(_) => {
warn!("unexpected root evaluation: {:?}", self.evaluation);
return vec![];
}
inspect::CanonicalGoalEvaluationKind::Uncached { ref revisions } => {
if let Some(last) = revisions.last() {
last
} else {
return vec![];
}
}
};
let mut nested_goals = vec![];
self.candidates_recur(&mut candidates, &mut nested_goals, &last_eval_step.evaluation);
candidates
}
fn new(
infcx: &'a InferCtxt<'tcx>,
depth: usize,
root: &'a inspect::GoalEvaluation<'tcx>,
) -> Self {
match root.kind {
inspect::GoalEvaluationKind::Root { ref orig_values } => InspectGoal {
infcx,
depth,
orig_values,
goal: infcx.resolve_vars_if_possible(root.uncanonicalized_goal),
evaluation: root,
},
inspect::GoalEvaluationKind::Nested { .. } => unreachable!(),
}
}
}
/// The public API to interact with proof trees.
pub trait ProofTreeVisitor<'tcx> {
type BreakTy;
fn visit_goal(&mut self, goal: &InspectGoal<'_, 'tcx>) -> ControlFlow<Self::BreakTy>;
}
pub trait ProofTreeInferCtxtExt<'tcx> {
fn visit_proof_tree<V: ProofTreeVisitor<'tcx>>(
&self,
goal: Goal<'tcx, ty::Predicate<'tcx>>,
visitor: &mut V,
) -> ControlFlow<V::BreakTy>;
}
impl<'tcx> ProofTreeInferCtxtExt<'tcx> for InferCtxt<'tcx> {
fn visit_proof_tree<V: ProofTreeVisitor<'tcx>>(
&self,
goal: Goal<'tcx, ty::Predicate<'tcx>>,
visitor: &mut V,
) -> ControlFlow<V::BreakTy> {
let (_, proof_tree) =
self.evaluate_root_goal(goal, GenerateProofTree::Yes(UseGlobalCache::No));
let proof_tree = proof_tree.unwrap();
visitor.visit_goal(&InspectGoal::new(self, 0, &proof_tree))
}
}

View file

@ -1,153 +1,53 @@
//! Building proof trees incrementally during trait solving.
//!
//! This code is *a bit* of a mess and can hopefully be
//! mostly ignored. For a general overview of how it works,
//! see the comment on [ProofTreeBuilder].
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::inspect::{self, CacheHit, ProbeKind};
use rustc_middle::traits::solve::{
CanonicalInput, Certainty, Goal, IsNormalizesToHack, QueryInput, QueryResult,
};
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::config::DumpSolverProofTree;
use super::eval_ctxt::UseGlobalCache;
use super::{GenerateProofTree, GoalEvaluationKind};
use crate::solve::eval_ctxt::UseGlobalCache;
use crate::solve::{self, inspect, EvalCtxt, GenerateProofTree};
#[derive(Eq, PartialEq, Debug)]
pub struct WipGoalEvaluation<'tcx> {
pub uncanonicalized_goal: Goal<'tcx, ty::Predicate<'tcx>>,
pub kind: WipGoalEvaluationKind,
pub evaluation: Option<WipCanonicalGoalEvaluation<'tcx>>,
pub returned_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
/// The core data structure when building proof trees.
///
/// In case the current evaluation does not generate a proof
/// tree, `state` is simply `None` and we avoid any work.
///
/// The possible states of the solver are represented via
/// variants of [DebugSolver]. For any nested computation we call
/// `ProofTreeBuilder::new_nested_computation_kind` which
/// creates a new `ProofTreeBuilder` to temporarily replace the
/// current one. Once that nested computation is done,
/// `ProofTreeBuilder::nested_computation_kind` is called
/// to add the finished nested evaluation to the parent.
///
/// We provide additional information to the current state
/// by calling methods such as `ProofTreeBuilder::probe_kind`.
///
/// The actual structure closely mirrors the finished proof
/// trees. At the end of trait solving `ProofTreeBuilder::finalize`
/// is called to recursively convert the whole structure to a
/// finished proof tree.
pub(in crate::solve) struct ProofTreeBuilder<'tcx> {
state: Option<Box<BuilderData<'tcx>>>,
}
impl<'tcx> WipGoalEvaluation<'tcx> {
pub fn finalize(self) -> inspect::GoalEvaluation<'tcx> {
inspect::GoalEvaluation {
uncanonicalized_goal: self.uncanonicalized_goal,
kind: match self.kind {
WipGoalEvaluationKind::Root => inspect::GoalEvaluationKind::Root,
WipGoalEvaluationKind::Nested { is_normalizes_to_hack } => {
inspect::GoalEvaluationKind::Nested { is_normalizes_to_hack }
}
},
evaluation: self.evaluation.unwrap().finalize(),
returned_goals: self.returned_goals,
}
}
}
#[derive(Eq, PartialEq, Debug)]
pub enum WipGoalEvaluationKind {
Root,
Nested { is_normalizes_to_hack: IsNormalizesToHack },
}
#[derive(Eq, PartialEq, Debug)]
pub enum WipCanonicalGoalEvaluationKind {
Overflow,
CacheHit(CacheHit),
}
#[derive(Eq, PartialEq, Debug)]
pub struct WipCanonicalGoalEvaluation<'tcx> {
pub goal: CanonicalInput<'tcx>,
pub kind: Option<WipCanonicalGoalEvaluationKind>,
pub revisions: Vec<WipGoalEvaluationStep<'tcx>>,
pub result: Option<QueryResult<'tcx>>,
}
impl<'tcx> WipCanonicalGoalEvaluation<'tcx> {
pub fn finalize(self) -> inspect::CanonicalGoalEvaluation<'tcx> {
let kind = match self.kind {
Some(WipCanonicalGoalEvaluationKind::Overflow) => {
inspect::CanonicalGoalEvaluationKind::Overflow
}
Some(WipCanonicalGoalEvaluationKind::CacheHit(hit)) => {
inspect::CanonicalGoalEvaluationKind::CacheHit(hit)
}
None => inspect::CanonicalGoalEvaluationKind::Uncached {
revisions: self
.revisions
.into_iter()
.map(WipGoalEvaluationStep::finalize)
.collect(),
},
};
inspect::CanonicalGoalEvaluation { goal: self.goal, kind, result: self.result.unwrap() }
}
}
#[derive(Eq, PartialEq, Debug)]
pub struct WipAddedGoalsEvaluation<'tcx> {
pub evaluations: Vec<Vec<WipGoalEvaluation<'tcx>>>,
pub result: Option<Result<Certainty, NoSolution>>,
}
impl<'tcx> WipAddedGoalsEvaluation<'tcx> {
pub fn finalize(self) -> inspect::AddedGoalsEvaluation<'tcx> {
inspect::AddedGoalsEvaluation {
evaluations: self
.evaluations
.into_iter()
.map(|evaluations| {
evaluations.into_iter().map(WipGoalEvaluation::finalize).collect()
})
.collect(),
result: self.result.unwrap(),
}
}
}
#[derive(Eq, PartialEq, Debug)]
pub struct WipGoalEvaluationStep<'tcx> {
pub instantiated_goal: QueryInput<'tcx, ty::Predicate<'tcx>>,
pub evaluation: WipProbe<'tcx>,
}
impl<'tcx> WipGoalEvaluationStep<'tcx> {
pub fn finalize(self) -> inspect::GoalEvaluationStep<'tcx> {
let evaluation = self.evaluation.finalize();
match evaluation.kind {
ProbeKind::Root { .. } => (),
_ => unreachable!("unexpected root evaluation: {evaluation:?}"),
}
inspect::GoalEvaluationStep { instantiated_goal: self.instantiated_goal, evaluation }
}
}
#[derive(Eq, PartialEq, Debug)]
pub struct WipProbe<'tcx> {
pub steps: Vec<WipProbeStep<'tcx>>,
pub kind: Option<ProbeKind<'tcx>>,
}
impl<'tcx> WipProbe<'tcx> {
pub fn finalize(self) -> inspect::Probe<'tcx> {
inspect::Probe {
steps: self.steps.into_iter().map(WipProbeStep::finalize).collect(),
kind: self.kind.unwrap(),
}
}
}
#[derive(Eq, PartialEq, Debug)]
pub enum WipProbeStep<'tcx> {
AddGoal(Goal<'tcx, ty::Predicate<'tcx>>),
EvaluateGoals(WipAddedGoalsEvaluation<'tcx>),
NestedProbe(WipProbe<'tcx>),
}
impl<'tcx> WipProbeStep<'tcx> {
pub fn finalize(self) -> inspect::ProbeStep<'tcx> {
match self {
WipProbeStep::AddGoal(goal) => inspect::ProbeStep::AddGoal(goal),
WipProbeStep::EvaluateGoals(eval) => inspect::ProbeStep::EvaluateGoals(eval.finalize()),
WipProbeStep::NestedProbe(probe) => inspect::ProbeStep::NestedProbe(probe.finalize()),
}
}
struct BuilderData<'tcx> {
tree: DebugSolver<'tcx>,
use_global_cache: UseGlobalCache,
}
/// The current state of the proof tree builder, at most places
/// in the code, only one or two variants are actually possible.
///
/// We simply ICE in case that assumption is broken.
#[derive(Debug)]
pub enum DebugSolver<'tcx> {
enum DebugSolver<'tcx> {
Root,
GoalEvaluation(WipGoalEvaluation<'tcx>),
CanonicalGoalEvaluation(WipCanonicalGoalEvaluation<'tcx>),
@ -186,13 +86,143 @@ impl<'tcx> From<WipProbe<'tcx>> for DebugSolver<'tcx> {
}
}
pub struct ProofTreeBuilder<'tcx> {
state: Option<Box<BuilderData<'tcx>>>,
#[derive(Eq, PartialEq, Debug)]
struct WipGoalEvaluation<'tcx> {
pub uncanonicalized_goal: Goal<'tcx, ty::Predicate<'tcx>>,
pub kind: WipGoalEvaluationKind<'tcx>,
pub evaluation: Option<WipCanonicalGoalEvaluation<'tcx>>,
pub returned_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
}
struct BuilderData<'tcx> {
tree: DebugSolver<'tcx>,
use_global_cache: UseGlobalCache,
impl<'tcx> WipGoalEvaluation<'tcx> {
fn finalize(self) -> inspect::GoalEvaluation<'tcx> {
inspect::GoalEvaluation {
uncanonicalized_goal: self.uncanonicalized_goal,
kind: match self.kind {
WipGoalEvaluationKind::Root { orig_values } => {
inspect::GoalEvaluationKind::Root { orig_values }
}
WipGoalEvaluationKind::Nested { is_normalizes_to_hack } => {
inspect::GoalEvaluationKind::Nested { is_normalizes_to_hack }
}
},
evaluation: self.evaluation.unwrap().finalize(),
returned_goals: self.returned_goals,
}
}
}
#[derive(Eq, PartialEq, Debug)]
pub(in crate::solve) enum WipGoalEvaluationKind<'tcx> {
Root { orig_values: Vec<ty::GenericArg<'tcx>> },
Nested { is_normalizes_to_hack: IsNormalizesToHack },
}
#[derive(Eq, PartialEq, Debug)]
pub(in crate::solve) enum WipCanonicalGoalEvaluationKind {
Overflow,
CacheHit(inspect::CacheHit),
}
#[derive(Eq, PartialEq, Debug)]
struct WipCanonicalGoalEvaluation<'tcx> {
goal: CanonicalInput<'tcx>,
kind: Option<WipCanonicalGoalEvaluationKind>,
revisions: Vec<WipGoalEvaluationStep<'tcx>>,
result: Option<QueryResult<'tcx>>,
}
impl<'tcx> WipCanonicalGoalEvaluation<'tcx> {
fn finalize(self) -> inspect::CanonicalGoalEvaluation<'tcx> {
let kind = match self.kind {
Some(WipCanonicalGoalEvaluationKind::Overflow) => {
inspect::CanonicalGoalEvaluationKind::Overflow
}
Some(WipCanonicalGoalEvaluationKind::CacheHit(hit)) => {
inspect::CanonicalGoalEvaluationKind::CacheHit(hit)
}
None => inspect::CanonicalGoalEvaluationKind::Uncached {
revisions: self
.revisions
.into_iter()
.map(WipGoalEvaluationStep::finalize)
.collect(),
},
};
inspect::CanonicalGoalEvaluation { goal: self.goal, kind, result: self.result.unwrap() }
}
}
#[derive(Eq, PartialEq, Debug)]
struct WipAddedGoalsEvaluation<'tcx> {
evaluations: Vec<Vec<WipGoalEvaluation<'tcx>>>,
result: Option<Result<Certainty, NoSolution>>,
}
impl<'tcx> WipAddedGoalsEvaluation<'tcx> {
fn finalize(self) -> inspect::AddedGoalsEvaluation<'tcx> {
inspect::AddedGoalsEvaluation {
evaluations: self
.evaluations
.into_iter()
.map(|evaluations| {
evaluations.into_iter().map(WipGoalEvaluation::finalize).collect()
})
.collect(),
result: self.result.unwrap(),
}
}
}
#[derive(Eq, PartialEq, Debug)]
struct WipGoalEvaluationStep<'tcx> {
instantiated_goal: QueryInput<'tcx, ty::Predicate<'tcx>>,
evaluation: WipProbe<'tcx>,
}
impl<'tcx> WipGoalEvaluationStep<'tcx> {
fn finalize(self) -> inspect::GoalEvaluationStep<'tcx> {
let evaluation = self.evaluation.finalize();
match evaluation.kind {
inspect::ProbeKind::Root { .. } => (),
_ => unreachable!("unexpected root evaluation: {evaluation:?}"),
}
inspect::GoalEvaluationStep { instantiated_goal: self.instantiated_goal, evaluation }
}
}
#[derive(Eq, PartialEq, Debug)]
struct WipProbe<'tcx> {
pub steps: Vec<WipProbeStep<'tcx>>,
pub kind: Option<inspect::ProbeKind<'tcx>>,
}
impl<'tcx> WipProbe<'tcx> {
fn finalize(self) -> inspect::Probe<'tcx> {
inspect::Probe {
steps: self.steps.into_iter().map(WipProbeStep::finalize).collect(),
kind: self.kind.unwrap(),
}
}
}
#[derive(Eq, PartialEq, Debug)]
enum WipProbeStep<'tcx> {
AddGoal(inspect::CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>),
EvaluateGoals(WipAddedGoalsEvaluation<'tcx>),
NestedProbe(WipProbe<'tcx>),
}
impl<'tcx> WipProbeStep<'tcx> {
fn finalize(self) -> inspect::ProbeStep<'tcx> {
match self {
WipProbeStep::AddGoal(goal) => inspect::ProbeStep::AddGoal(goal),
WipProbeStep::EvaluateGoals(eval) => inspect::ProbeStep::EvaluateGoals(eval.finalize()),
WipProbeStep::NestedProbe(probe) => inspect::ProbeStep::NestedProbe(probe.finalize()),
}
}
}
impl<'tcx> ProofTreeBuilder<'tcx> {
@ -273,16 +303,19 @@ impl<'tcx> ProofTreeBuilder<'tcx> {
self.state.is_none()
}
pub(super) fn new_goal_evaluation(
pub(in crate::solve) fn new_goal_evaluation(
&mut self,
goal: Goal<'tcx, ty::Predicate<'tcx>>,
kind: GoalEvaluationKind,
orig_values: &[ty::GenericArg<'tcx>],
kind: solve::GoalEvaluationKind,
) -> ProofTreeBuilder<'tcx> {
self.nested(|| WipGoalEvaluation {
uncanonicalized_goal: goal,
kind: match kind {
GoalEvaluationKind::Root => WipGoalEvaluationKind::Root,
GoalEvaluationKind::Nested { is_normalizes_to_hack } => {
solve::GoalEvaluationKind::Root => {
WipGoalEvaluationKind::Root { orig_values: orig_values.to_vec() }
}
solve::GoalEvaluationKind::Nested { is_normalizes_to_hack } => {
WipGoalEvaluationKind::Nested { is_normalizes_to_hack }
}
},
@ -379,7 +412,7 @@ impl<'tcx> ProofTreeBuilder<'tcx> {
self.nested(|| WipProbe { steps: vec![], kind: None })
}
pub fn probe_kind(&mut self, probe_kind: ProbeKind<'tcx>) {
pub fn probe_kind(&mut self, probe_kind: inspect::ProbeKind<'tcx>) {
if let Some(this) = self.as_mut() {
match this {
DebugSolver::Probe(this) => {
@ -390,18 +423,22 @@ impl<'tcx> ProofTreeBuilder<'tcx> {
}
}
pub fn add_goal(&mut self, goal: Goal<'tcx, ty::Predicate<'tcx>>) {
if let Some(this) = self.as_mut() {
match this {
DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep {
evaluation: WipProbe { steps, .. },
..
})
| DebugSolver::Probe(WipProbe { steps, .. }) => {
steps.push(WipProbeStep::AddGoal(goal))
}
_ => unreachable!(),
}
pub fn add_goal(ecx: &mut EvalCtxt<'_, 'tcx>, goal: Goal<'tcx, ty::Predicate<'tcx>>) {
// Can't use `if let Some(this) = ecx.inspect.as_mut()` here because
// we have to immutably use the `EvalCtxt` for `make_canonical_state`.
if ecx.inspect.is_noop() {
return;
}
let goal = Self::make_canonical_state(ecx, goal);
match ecx.inspect.as_mut().unwrap() {
DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep {
evaluation: WipProbe { steps, .. },
..
})
| DebugSolver::Probe(WipProbe { steps, .. }) => steps.push(WipProbeStep::AddGoal(goal)),
s => unreachable!("tried to add {goal:?} to {s:?}"),
}
}
@ -471,7 +508,10 @@ impl<'tcx> ProofTreeBuilder<'tcx> {
}
DebugSolver::GoalEvaluationStep(evaluation_step) => {
assert_eq!(
evaluation_step.evaluation.kind.replace(ProbeKind::Root { result }),
evaluation_step
.evaluation
.kind
.replace(inspect::ProbeKind::Root { result }),
None
);
}

View file

@ -0,0 +1,7 @@
pub use rustc_middle::traits::solve::inspect::*;
mod build;
pub(in crate::solve) use build::*;
mod analyse;
pub use analyse::*;

View file

@ -235,7 +235,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
#[instrument(level = "debug", skip(self))]
fn add_goal(&mut self, goal: Goal<'tcx, ty::Predicate<'tcx>>) {
self.inspect.add_goal(goal);
inspect::ProofTreeBuilder::add_goal(self, goal);
self.nested_goals.goals.push(goal);
}

View file

@ -346,13 +346,15 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
ty::TraitRef::from_lang_item(tcx, LangItem::Sized, DUMMY_SP, [output])
});
let pred = tupled_inputs_and_output
.map_bound(|(inputs, output)| ty::ProjectionPredicate {
let pred = ty::Clause::from_projection_clause(
tcx,
tupled_inputs_and_output.map_bound(|(inputs, output)| ty::ProjectionPredicate {
projection_ty: tcx
.mk_alias_ty(goal.predicate.def_id(), [goal.predicate.self_ty(), inputs]),
term: output.into(),
})
.to_predicate(tcx);
}),
);
// A built-in `Fn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
Self::consider_implied_clause(ecx, goal, pred, [goal.with(tcx, output_is_sized_pred)])

View file

@ -6,9 +6,15 @@
use crate::infer::outlives::env::OutlivesEnvironment;
use crate::infer::InferOk;
use crate::solve::inspect;
use crate::solve::inspect::{InspectGoal, ProofTreeInferCtxtExt, ProofTreeVisitor};
use crate::traits::engine::TraitEngineExt;
use crate::traits::outlives_bounds::InferCtxtExt as _;
use crate::traits::query::evaluate_obligation::InferCtxtExt;
use crate::traits::select::{IntercrateAmbiguityCause, TreatInductiveCycleAs};
use crate::traits::structural_normalize::StructurallyNormalizeExt;
use crate::traits::util::impl_subject_and_oblig;
use crate::traits::NormalizeExt;
use crate::traits::SkipLeakCheck;
use crate::traits::{
self, Obligation, ObligationCause, ObligationCtxt, PredicateObligation, PredicateObligations,
@ -18,10 +24,13 @@ use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::Diagnostic;
use rustc_hir::def_id::{DefId, CRATE_DEF_ID, LOCAL_CRATE};
use rustc_infer::infer::{DefineOpaqueTypes, InferCtxt, TyCtxtInferExt};
use rustc_infer::traits::util;
use rustc_infer::traits::{util, TraitEngine};
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::{Certainty, Goal};
use rustc_middle::traits::specialization_graph::OverlapMode;
use rustc_middle::traits::DefiningAnchor;
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitor};
use rustc_session::lint::builtin::COINDUCTIVE_OVERLAP_IN_COHERENCE;
@ -31,9 +40,6 @@ use std::fmt::Debug;
use std::iter;
use std::ops::ControlFlow;
use super::query::evaluate_obligation::InferCtxtExt;
use super::NormalizeExt;
/// Whether we do the orphan check relative to this crate or
/// to some remote crate.
#[derive(Copy, Clone, Debug)]
@ -205,19 +211,19 @@ fn overlap<'tcx>(
// Equate the headers to find their intersection (the general type, with infer vars,
// that may apply both impls).
let equate_obligations = equate_impl_headers(selcx.infcx, &impl1_header, &impl2_header)?;
let mut obligations = equate_impl_headers(selcx.infcx, &impl1_header, &impl2_header)?;
debug!("overlap: unification check succeeded");
obligations.extend(
[&impl1_header.predicates, &impl2_header.predicates].into_iter().flatten().map(
|&predicate| Obligation::new(infcx.tcx, ObligationCause::dummy(), param_env, predicate),
),
);
if overlap_mode.use_implicit_negative() {
for mode in [TreatInductiveCycleAs::Ambig, TreatInductiveCycleAs::Recur] {
if let Some(failing_obligation) = selcx.with_treat_inductive_cycle_as(mode, |selcx| {
impl_intersection_has_impossible_obligation(
selcx,
param_env,
&impl1_header,
&impl2_header,
&equate_obligations,
)
impl_intersection_has_impossible_obligation(selcx, &obligations)
}) {
if matches!(mode, TreatInductiveCycleAs::Recur) {
let first_local_impl = impl1_header
@ -281,7 +287,14 @@ fn overlap<'tcx>(
return None;
}
let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes();
let intercrate_ambiguity_causes = if !overlap_mode.use_implicit_negative() {
Default::default()
} else if infcx.next_trait_solver() {
compute_intercrate_ambiguity_causes(&infcx, &obligations)
} else {
selcx.take_intercrate_ambiguity_causes()
};
debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes);
let involves_placeholder = infcx
.inner
@ -335,34 +348,24 @@ fn equate_impl_headers<'tcx>(
/// of the two impls above to be empty.
///
/// Importantly, this works even if there isn't a `impl !Error for MyLocalType`.
fn impl_intersection_has_impossible_obligation<'cx, 'tcx>(
fn impl_intersection_has_impossible_obligation<'a, 'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
impl1_header: &ty::ImplHeader<'tcx>,
impl2_header: &ty::ImplHeader<'tcx>,
obligations: &PredicateObligations<'tcx>,
) -> Option<PredicateObligation<'tcx>> {
obligations: &'a [PredicateObligation<'tcx>],
) -> Option<&'a PredicateObligation<'tcx>> {
let infcx = selcx.infcx;
[&impl1_header.predicates, &impl2_header.predicates]
.into_iter()
.flatten()
.map(|&predicate| {
Obligation::new(infcx.tcx, ObligationCause::dummy(), param_env, predicate)
})
.chain(obligations.into_iter().cloned())
.find(|obligation: &PredicateObligation<'tcx>| {
if infcx.next_trait_solver() {
infcx.evaluate_obligation(obligation).map_or(false, |result| !result.may_apply())
} else {
// We use `evaluate_root_obligation` to correctly track intercrate
// ambiguity clauses. We cannot use this in the new solver.
selcx.evaluate_root_obligation(obligation).map_or(
false, // Overflow has occurred, and treat the obligation as possibly holding.
|result| !result.may_apply(),
)
}
})
obligations.iter().find(|obligation| {
if infcx.next_trait_solver() {
infcx.evaluate_obligation(obligation).map_or(false, |result| !result.may_apply())
} else {
// We use `evaluate_root_obligation` to correctly track intercrate
// ambiguity clauses. We cannot use this in the new solver.
selcx.evaluate_root_obligation(obligation).map_or(
false, // Overflow has occurred, and treat the obligation as possibly holding.
|result| !result.may_apply(),
)
}
})
}
/// Check if both impls can be satisfied by a common type by considering whether
@ -882,3 +885,144 @@ where
ControlFlow::Continue(())
}
}
/// Compute the `intercrate_ambiguity_causes` for the new solver using
/// "proof trees".
///
/// This is a bit scuffed but seems to be good enough, at least
/// when looking at UI tests. Given that it is only used to improve
/// diagnostics this is good enough. We can always improve it once there
/// are test cases where it is currently not enough.
fn compute_intercrate_ambiguity_causes<'tcx>(
infcx: &InferCtxt<'tcx>,
obligations: &[PredicateObligation<'tcx>],
) -> FxIndexSet<IntercrateAmbiguityCause> {
let mut causes: FxIndexSet<IntercrateAmbiguityCause> = Default::default();
for obligation in obligations {
search_ambiguity_causes(infcx, obligation.clone().into(), &mut causes);
}
causes
}
struct AmbiguityCausesVisitor<'a> {
causes: &'a mut FxIndexSet<IntercrateAmbiguityCause>,
}
impl<'a, 'tcx> ProofTreeVisitor<'tcx> for AmbiguityCausesVisitor<'a> {
type BreakTy = !;
fn visit_goal(&mut self, goal: &InspectGoal<'_, 'tcx>) -> ControlFlow<Self::BreakTy> {
let infcx = goal.infcx();
for cand in goal.candidates() {
cand.visit_nested(self)?;
}
// When searching for intercrate ambiguity causes, we only need to look
// at ambiguous goals, as for others the coherence unknowable candidate
// was irrelevant.
match goal.result() {
Ok(Certainty::Maybe(_)) => {}
Ok(Certainty::Yes) | Err(NoSolution) => return ControlFlow::Continue(()),
}
let Goal { param_env, predicate } = goal.goal();
// For bound predicates we simply call `infcx.replace_bound_vars_with_placeholders`
// and then prove the resulting predicate as a nested goal.
let trait_ref = match predicate.kind().no_bound_vars() {
Some(ty::PredicateKind::Clause(ty::ClauseKind::Trait(tr))) => tr.trait_ref,
Some(ty::PredicateKind::Clause(ty::ClauseKind::Projection(proj))) => {
proj.projection_ty.trait_ref(infcx.tcx)
}
_ => return ControlFlow::Continue(()),
};
let mut ambiguity_cause = None;
for cand in goal.candidates() {
// FIXME: boiiii, using string comparisions here sure is scuffed.
if let inspect::ProbeKind::MiscCandidate { name: "coherence unknowable", result: _ } =
cand.kind()
{
let lazily_normalize_ty = |ty: Ty<'tcx>| {
let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(infcx);
if matches!(ty.kind(), ty::Alias(..)) {
// FIXME(-Ztrait-solver=next-coherence): we currently don't
// normalize opaque types here, resulting in diverging behavior
// for TAITs.
match infcx
.at(&ObligationCause::dummy(), param_env)
.structurally_normalize(ty, &mut *fulfill_cx)
{
Ok(ty) => Ok(ty),
Err(_errs) => Err(()),
}
} else {
Ok(ty)
}
};
infcx.probe(|_| {
match trait_ref_is_knowable(infcx.tcx, trait_ref, lazily_normalize_ty) {
Err(()) => {}
Ok(Ok(())) => warn!("expected an unknowable trait ref: {trait_ref:?}"),
Ok(Err(conflict)) => {
if !trait_ref.references_error() {
let self_ty = trait_ref.self_ty();
let (trait_desc, self_desc) = with_no_trimmed_paths!({
let trait_desc = trait_ref.print_only_trait_path().to_string();
let self_desc = self_ty
.has_concrete_skeleton()
.then(|| self_ty.to_string());
(trait_desc, self_desc)
});
ambiguity_cause = Some(match conflict {
Conflict::Upstream => {
IntercrateAmbiguityCause::UpstreamCrateUpdate {
trait_desc,
self_desc,
}
}
Conflict::Downstream => {
IntercrateAmbiguityCause::DownstreamCrate {
trait_desc,
self_desc,
}
}
});
}
}
}
})
} else {
match cand.result() {
// We only add an ambiguity cause if the goal would otherwise
// result in an error.
//
// FIXME: While this matches the behavior of the
// old solver, it is not the only way in which the unknowable
// candidates *weaken* coherence, they can also force otherwise
// sucessful normalization to be ambiguous.
Ok(Certainty::Maybe(_) | Certainty::Yes) => {
ambiguity_cause = None;
break;
}
Err(NoSolution) => continue,
}
}
}
if let Some(ambiguity_cause) = ambiguity_cause {
self.causes.insert(ambiguity_cause);
}
ControlFlow::Continue(())
}
}
fn search_ambiguity_causes<'tcx>(
infcx: &InferCtxt<'tcx>,
goal: Goal<'tcx, ty::Predicate<'tcx>>,
causes: &mut FxIndexSet<IntercrateAmbiguityCause>,
) {
infcx.visit_proof_tree(goal, &mut AmbiguityCausesVisitor { causes });
}

View file

@ -986,6 +986,8 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
}
self.explain_hrtb_projection(&mut err, trait_predicate, obligation.param_env, &obligation.cause);
// Return early if the trait is Debug or Display and the invocation
// originates within a standard library macro, because the output
// is otherwise overwhelming and unhelpful (see #85844 for an

View file

@ -406,6 +406,14 @@ pub trait TypeErrCtxtExt<'tcx> {
candidate_impls: &[ImplCandidate<'tcx>],
span: Span,
);
fn explain_hrtb_projection(
&self,
diag: &mut Diagnostic,
pred: ty::PolyTraitPredicate<'tcx>,
param_env: ty::ParamEnv<'tcx>,
cause: &ObligationCause<'tcx>,
);
}
fn predicate_constraint(generics: &hir::Generics<'_>, pred: ty::Predicate<'_>) -> (Span, String) {
@ -4027,6 +4035,71 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
}
}
fn explain_hrtb_projection(
&self,
diag: &mut Diagnostic,
pred: ty::PolyTraitPredicate<'tcx>,
param_env: ty::ParamEnv<'tcx>,
cause: &ObligationCause<'tcx>,
) {
if pred.skip_binder().has_escaping_bound_vars() && pred.skip_binder().has_non_region_infer()
{
self.probe(|_| {
let ocx = ObligationCtxt::new(self);
let pred = self.instantiate_binder_with_placeholders(pred);
let pred = ocx.normalize(&ObligationCause::dummy(), param_env, pred);
ocx.register_obligation(Obligation::new(
self.tcx,
ObligationCause::dummy(),
param_env,
pred,
));
if !ocx.select_where_possible().is_empty() {
// encountered errors.
return;
}
if let ObligationCauseCode::FunctionArgumentObligation {
call_hir_id,
arg_hir_id,
parent_code: _,
} = cause.code()
{
let arg_span = self.tcx.hir().span(*arg_hir_id);
let mut sp: MultiSpan = arg_span.into();
sp.push_span_label(
arg_span,
"the trait solver is unable to infer the \
generic types that should be inferred from this argument",
);
sp.push_span_label(
self.tcx.hir().span(*call_hir_id),
"add turbofish arguments to this call to \
specify the types manually, even if it's redundant",
);
diag.span_note(
sp,
"this is a known limitation of the trait solver that \
will be lifted in the future",
);
} else {
let mut sp: MultiSpan = cause.span.into();
sp.push_span_label(
cause.span,
"try adding turbofish arguments to this expression to \
specify the types manually, even if it's redundant",
);
diag.span_note(
sp,
"this is a known limitation of the trait solver that \
will be lifted in the future",
);
}
});
}
}
}
/// Add a hint to add a missing borrow or remove an unnecessary one.

View file

@ -1644,7 +1644,7 @@ fn assemble_candidates_from_object_ty<'cx, 'tcx>(
let env_predicates = data
.projection_bounds()
.filter(|bound| bound.item_def_id() == obligation.predicate.def_id)
.map(|p| p.with_self_ty(tcx, object_ty).to_predicate(tcx));
.map(|p| ty::Clause::from_projection_clause(tcx, p.with_self_ty(tcx, object_ty)));
assemble_candidates_from_predicates(
selcx,

View file

@ -22,9 +22,14 @@ impl<'tcx> StructurallyNormalizeExt<'tcx> for At<'_, 'tcx> {
assert!(!ty.is_ty_var(), "should have resolved vars before calling");
if self.infcx.next_trait_solver() {
while let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, projection_ty) =
*ty.kind()
{
// FIXME(-Ztrait-solver=next): correctly handle
// overflow here.
for _ in 0..256 {
let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, projection_ty) = *ty.kind()
else {
break;
};
let new_infer_ty = self.infcx.next_ty_var(TypeVariableOrigin {
kind: TypeVariableOriginKind::NormalizeProjectionType,
span: self.cause.span,
@ -49,6 +54,7 @@ impl<'tcx> StructurallyNormalizeExt<'tcx> for At<'_, 'tcx> {
break;
}
}
Ok(ty)
} else {
Ok(self.normalize(ty).into_value_registering_obligations(self.infcx, fulfill_cx))

View file

@ -3,10 +3,13 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::query::Providers;
use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
use rustc_trait_selection::infer::InferCtxtBuilderExt;
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
use rustc_trait_selection::traits::query::{
normalize::NormalizationResult, CanonicalProjectionGoal, NoSolution,
};
use rustc_trait_selection::traits::{self, ObligationCause, SelectionContext};
use rustc_trait_selection::traits::{
self, FulfillmentErrorCode, ObligationCause, SelectionContext,
};
use std::sync::atomic::Ordering;
pub(crate) fn provide(p: &mut Providers) {
@ -40,6 +43,27 @@ fn normalize_projection_ty<'tcx>(
&mut obligations,
);
ocx.register_obligations(obligations);
// #112047: With projections and opaques, we are able to create opaques that
// are recursive (given some substitution of the opaque's type variables).
// In that case, we may only realize a cycle error when calling
// `normalize_erasing_regions` in mono.
if !ocx.infcx.next_trait_solver() {
let errors = ocx.select_where_possible();
if !errors.is_empty() {
// Rustdoc may attempt to normalize type alias types which are not
// well-formed. Rustdoc also normalizes types that are just not
// well-formed, since we don't do as much HIR analysis (checking
// that impl vars are constrained by the signature, for example).
if !tcx.sess.opts.actually_rustdoc {
for error in &errors {
if let FulfillmentErrorCode::CodeCycle(cycle) = &error.code {
ocx.infcx.err_ctxt().report_overflow_obligation_cycle(cycle);
}
}
}
return Err(NoSolution);
}
}
// FIXME(associated_const_equality): All users of normalize_projection_ty expected
// a type, but there is the possibility it could've been a const now. Maybe change
// it to a Term later?

View file

@ -141,11 +141,34 @@ fn resolve_associated_item<'tcx>(
false
}
};
if !eligible {
return Ok(None);
}
// HACK: We may have overlapping `dyn Trait` built-in impls and
// user-provided blanket impls. Detect that case here, and return
// ambiguity.
//
// This should not affect totally monomorphized contexts, only
// resolve calls that happen polymorphically, such as the mir-inliner
// and const-prop (and also some lints).
let self_ty = rcvr_args.type_at(0);
if !self_ty.is_known_rigid() {
let predicates = tcx
.predicates_of(impl_data.impl_def_id)
.instantiate(tcx, impl_data.args)
.predicates;
let sized_def_id = tcx.lang_items().sized_trait();
// If we find a `Self: Sized` bound on the item, then we know
// that `dyn Trait` can certainly never apply here.
if !predicates.into_iter().filter_map(ty::Clause::as_trait_clause).any(|clause| {
Some(clause.def_id()) == sized_def_id
&& clause.skip_binder().self_ty() == self_ty
}) {
return Ok(None);
}
}
// Any final impl is required to define all associated items.
if !leaf_def.item.defaultness(tcx).has_value() {
let guard = tcx.sess.delay_span_bug(

View file

@ -4,7 +4,7 @@ use rustc_hir::def::DefKind;
use rustc_index::bit_set::BitSet;
use rustc_middle::query::Providers;
use rustc_middle::ty::{
self, EarlyBinder, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
self, EarlyBinder, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
};
use rustc_span::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
use rustc_span::DUMMY_SP;
@ -220,13 +220,10 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitFinder<'_, 'tcx> {
// strategy, then just reinterpret the associated type like an opaque :^)
let default_ty = self.tcx.type_of(shifted_alias_ty.def_id).instantiate(self.tcx, shifted_alias_ty.args);
self.predicates.push(
ty::Binder::bind_with_vars(
ty::ProjectionPredicate { projection_ty: shifted_alias_ty, term: default_ty.into() },
self.bound_vars,
)
.to_predicate(self.tcx),
);
self.predicates.push(ty::Clause::from_projection_clause(self.tcx, ty::Binder::bind_with_vars(
ty::ProjectionPredicate { projection_ty: shifted_alias_ty, term: default_ty.into() },
self.bound_vars,
)));
// We walk the *un-shifted* alias ty, because we're tracking the de bruijn
// binder depth, and if we were to walk `shifted_alias_ty` instead, we'd

View file

@ -214,6 +214,8 @@ impl CStr {
/// * The memory referenced by the returned `CStr` must not be mutated for
/// the duration of lifetime `'a`.
///
/// * The nul terminator must be within `isize::MAX` from `ptr`
///
/// > **Note**: This operation is intended to be a 0-cost cast but it is
/// > currently implemented with an up-front calculation of the length of
/// > the string. This is not guaranteed to always be the case.
@ -259,42 +261,16 @@ impl CStr {
#[rustc_const_unstable(feature = "const_cstr_from_ptr", issue = "113219")]
pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
// SAFETY: The caller has provided a pointer that points to a valid C
// string with a NUL terminator of size less than `isize::MAX`, whose
// content remain valid and doesn't change for the lifetime of the
// returned `CStr`.
//
// Thus computing the length is fine (a NUL byte exists), the call to
// from_raw_parts is safe because we know the length is at most `isize::MAX`, meaning
// the call to `from_bytes_with_nul_unchecked` is correct.
// string with a NUL terminator less than `isize::MAX` from `ptr`.
let len = unsafe { const_strlen(ptr) };
// SAFETY: The caller has provided a valid pointer with length less than
// `isize::MAX`, so `from_raw_parts` is safe. The content remains valid
// and doesn't change for the lifetime of the returned `CStr`. This
// means the call to `from_bytes_with_nul_unchecked` is correct.
//
// The cast from c_char to u8 is ok because a c_char is always one byte.
unsafe {
const fn strlen_ct(s: *const c_char) -> usize {
let mut len = 0;
// SAFETY: Outer caller has provided a pointer to a valid C string.
while unsafe { *s.add(len) } != 0 {
len += 1;
}
len
}
// `inline` is necessary for codegen to see strlen.
#[inline]
fn strlen_rt(s: *const c_char) -> usize {
extern "C" {
/// Provided by libc or compiler_builtins.
fn strlen(s: *const c_char) -> usize;
}
// SAFETY: Outer caller has provided a pointer to a valid C string.
unsafe { strlen(s) }
}
let len = intrinsics::const_eval_select((ptr,), strlen_ct, strlen_rt);
Self::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr.cast(), len + 1))
}
unsafe { Self::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr.cast(), len + 1)) }
}
/// Creates a C string wrapper from a byte slice with any number of nuls.
@ -516,6 +492,34 @@ impl CStr {
self.inner.as_ptr()
}
/// Returns the length of `self`. Like C's `strlen`, this does not include the nul terminator.
///
/// > **Note**: This method is currently implemented as a constant-time
/// > cast, but it is planned to alter its definition in the future to
/// > perform the length calculation whenever this method is called.
///
/// # Examples
///
/// ```
/// #![feature(cstr_count_bytes)]
///
/// use std::ffi::CStr;
///
/// let cstr = CStr::from_bytes_with_nul(b"foo\0").unwrap();
/// assert_eq!(cstr.count_bytes(), 3);
///
/// let cstr = CStr::from_bytes_with_nul(b"\0").unwrap();
/// assert_eq!(cstr.count_bytes(), 0);
/// ```
#[inline]
#[must_use]
#[doc(alias("len", "strlen"))]
#[unstable(feature = "cstr_count_bytes", issue = "114441")]
#[rustc_const_unstable(feature = "const_cstr_from_ptr", issue = "113219")]
pub const fn count_bytes(&self) -> usize {
self.inner.len() - 1
}
/// Returns `true` if `self.to_bytes()` has a length of 0.
///
/// # Examples
@ -682,3 +686,37 @@ impl AsRef<CStr> for CStr {
self
}
}
/// Calculate the length of a nul-terminated string. Defers to C's `strlen` when possible.
///
/// # Safety
///
/// The pointer must point to a valid buffer that contains a NUL terminator. The NUL must be
/// located within `isize::MAX` from `ptr`.
#[inline]
const unsafe fn const_strlen(ptr: *const c_char) -> usize {
const fn strlen_ct(s: *const c_char) -> usize {
let mut len = 0;
// SAFETY: Outer caller has provided a pointer to a valid C string.
while unsafe { *s.add(len) } != 0 {
len += 1;
}
len
}
#[inline]
fn strlen_rt(s: *const c_char) -> usize {
extern "C" {
/// Provided by libc or compiler_builtins.
fn strlen(s: *const c_char) -> usize;
}
// SAFETY: Outer caller has provided a pointer to a valid C string.
unsafe { strlen(s) }
}
// SAFETY: the two functions always provide equivalent functionality
unsafe { intrinsics::const_eval_select((ptr,), strlen_ct, strlen_rt) }
}

View file

@ -957,6 +957,7 @@ impl f32 {
} else if self == other {
if self.is_sign_negative() && other.is_sign_positive() { self } else { other }
} else {
// At least one input is NaN. Use `+` to perform NaN propagation and quieting.
self + other
}
}

View file

@ -968,6 +968,7 @@ impl f64 {
} else if self == other {
if self.is_sign_negative() && other.is_sign_positive() { self } else { other }
} else {
// At least one input is NaN. Use `+` to perform NaN propagation and quieting.
self + other
}
}

View file

@ -99,7 +99,7 @@ pub macro unreachable_2021 {
/// use.
#[unstable(feature = "std_internals", issue = "none")]
#[doc(hidden)]
pub unsafe trait BoxMeUp {
pub unsafe trait PanicPayload {
/// Take full ownership of the contents.
/// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in core.
///
@ -107,7 +107,7 @@ pub unsafe trait BoxMeUp {
/// Calling this method twice, or calling `get` after calling this method, is an error.
///
/// The argument is borrowed because the panic runtime (`__rust_start_panic`) only
/// gets a borrowed `dyn BoxMeUp`.
/// gets a borrowed `dyn PanicPayload`.
fn take_box(&mut self) -> *mut (dyn Any + Send);
/// Just borrow the contents.

View file

@ -1,6 +1,6 @@
use alloc::string::String;
use core::mem::transmute;
use core::panic::BoxMeUp;
use core::panic::PanicPayload;
use core::ptr::copy_nonoverlapping;
const ANDROID_SET_ABORT_MESSAGE: &[u8] = b"android_set_abort_message\0";
@ -15,7 +15,7 @@ type SetAbortMessageType = unsafe extern "C" fn(*const libc::c_char) -> ();
//
// Weakly resolve the symbol for android_set_abort_message. This function is only available
// for API >= 21.
pub(crate) unsafe fn android_set_abort_message(payload: &mut dyn BoxMeUp) {
pub(crate) unsafe fn android_set_abort_message(payload: &mut dyn PanicPayload) {
let func_addr =
libc::dlsym(libc::RTLD_DEFAULT, ANDROID_SET_ABORT_MESSAGE.as_ptr() as *const libc::c_char)
as usize;

View file

@ -20,7 +20,7 @@
mod android;
use core::any::Any;
use core::panic::BoxMeUp;
use core::panic::PanicPayload;
#[rustc_std_internal_symbol]
#[allow(improper_ctypes_definitions)]
@ -30,7 +30,7 @@ pub unsafe extern "C" fn __rust_panic_cleanup(_: *mut u8) -> *mut (dyn Any + Sen
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
pub unsafe fn __rust_start_panic(_payload: &mut dyn BoxMeUp) -> u32 {
pub unsafe fn __rust_start_panic(_payload: &mut dyn PanicPayload) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
@ -43,7 +43,8 @@ pub unsafe fn __rust_start_panic(_payload: &mut dyn BoxMeUp) -> u32 {
libc::abort();
}
} else if #[cfg(any(target_os = "hermit",
all(target_vendor = "fortanix", target_env = "sgx")
all(target_vendor = "fortanix", target_env = "sgx"),
target_os = "xous"
))] {
unsafe fn abort() -> ! {
// call std::sys::abort_internal

View file

@ -29,7 +29,7 @@
use alloc::boxed::Box;
use core::any::Any;
use core::panic::BoxMeUp;
use core::panic::PanicPayload;
cfg_if::cfg_if! {
if #[cfg(target_os = "emscripten")] {
@ -99,7 +99,7 @@ pub unsafe extern "C" fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any
// Entry point for raising an exception, just delegates to the platform-specific
// implementation.
#[rustc_std_internal_symbol]
pub unsafe fn __rust_start_panic(payload: &mut dyn BoxMeUp) -> u32 {
pub unsafe fn __rust_start_panic(payload: &mut dyn PanicPayload) -> u32 {
let payload = Box::from_raw(payload.take_box());
imp::panic(payload)

View file

@ -17,7 +17,7 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core", public = true }
libc = { version = "0.2.146", default-features = false, features = ['rustc-dep-of-std'], public = true }
libc = { version = "0.2.148", default-features = false, features = ['rustc-dep-of-std'], public = true }
compiler_builtins = { version = "0.1.100" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
@ -36,8 +36,8 @@ object = { version = "0.32.0", default-features = false, optional = true, featur
rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
rand_xorshift = "0.3.0"
[target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies]
dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] }
[target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), target_os = "xous", all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies]
dlmalloc = { version = "0.2.4", features = ['rustc-dep-of-std'] }
[target.x86_64-fortanix-unknown-sgx.dependencies]
fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'], public = true }

View file

@ -37,6 +37,7 @@ fn main() {
|| target.contains("nintendo-3ds")
|| target.contains("vita")
|| target.contains("nto")
|| target.contains("xous")
// See src/bootstrap/synthetic_targets.rs
|| env::var("RUSTC_BOOTSTRAP_SYNTHETIC_TARGET").is_ok()
{

View file

@ -8,7 +8,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)]
#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))]
#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))]
mod tests;
use crate::ffi::OsString;

View file

@ -511,6 +511,7 @@ impl Error {
/// let eof_error = Error::from(ErrorKind::UnexpectedEof);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline(never)]
pub fn new<E>(kind: ErrorKind, error: E) -> Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,

Some files were not shown because too many files have changed in this diff Show more