Auto merge of #148753 - matthiaskrgr:rollup-48jzbqw, r=matthiaskrgr

Rollup of 10 pull requests

Successful merges:

 - rust-lang/rust#148683 (Remove `#[const_trait]`)
 - rust-lang/rust#148687 (std: use a non-poisoning `RwLock` for the panic hook)
 - rust-lang/rust#148709 (fix: disable self-contained linker when bootstrap-override-lld is set)
 - rust-lang/rust#148716 (mgca: Finish implementation of `#[type_const]`)
 - rust-lang/rust#148722 (Add Crystal Durham to .mailmap)
 - rust-lang/rust#148723 (bootstrap: Render doctest timing reports as text, not JSON)
 - rust-lang/rust#148724 (tidy: Don't bypass stderr output capture in unit tests)
 - rust-lang/rust#148734 (miri subtree update)
 - rust-lang/rust#148736 (Fix typo in unstable-book link)
 - rust-lang/rust#148744 (Add myself(chenyukang) to the review rotation)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2025-11-09 16:44:31 +00:00
commit 86b95ebc24
304 changed files with 2364 additions and 1970 deletions

View file

@ -139,7 +139,6 @@ Christian Poveda <git@pvdrz.com> <31802960+christianpoveda@users.noreply.github.
Christian Poveda <git@pvdrz.com> <christianpoveda@uhura.edef.eu>
Christian Vallentin <vallentinsource@gmail.com>
Christoffer Buchholz <chris@chrisbuchholz.me>
Christopher Durham <cad97@cad97.com>
Clark Gaebel <cg.wowus.cg@gmail.com> <cgaebel@mozilla.com>
Clement Miao <clementmiao@gmail.com>
Clément Renault <renault.cle@gmail.com>
@ -148,6 +147,7 @@ Clinton Ryan <clint.ryan3@gmail.com>
Taylor Cramer <cramertaylorj@gmail.com> <cramertj@google.com>
ember arlynx <ember@lunar.town> <corey@octayn.net>
Crazycolorz5 <Crazycolorz5@gmail.com>
Crystal Durham <cad97@cad97.com>
csmoe <35686186+csmoe@users.noreply.github.com>
Cyryl Płotnicki <cyplo@cyplo.net>
Damien Schoof <damien.schoof@gmail.com>

View file

@ -290,7 +290,7 @@ ast_passes_trait_fn_const =
*[false] {""}
}
.make_impl_const_sugg = ... and declare the impl to be const instead
.make_trait_const_sugg = ... and declare the trait to be a `#[const_trait]` instead
.make_trait_const_sugg = ... and declare the trait to be const instead
ast_passes_trait_object_single_bound = only a single explicit lifetime bound is permitted

View file

@ -48,7 +48,7 @@ enum SelfSemantic {
}
enum TraitOrTraitImpl {
Trait { span: Span, constness: Const },
Trait { vis: Span, constness: Const },
TraitImpl { constness: Const, polarity: ImplPolarity, trait_ref_span: Span },
}
@ -109,10 +109,10 @@ impl<'a> AstValidator<'a> {
self.outer_trait_or_trait_impl = old;
}
fn with_in_trait(&mut self, span: Span, constness: Const, f: impl FnOnce(&mut Self)) {
fn with_in_trait(&mut self, vis: Span, constness: Const, f: impl FnOnce(&mut Self)) {
let old = mem::replace(
&mut self.outer_trait_or_trait_impl,
Some(TraitOrTraitImpl::Trait { span, constness }),
Some(TraitOrTraitImpl::Trait { vis, constness }),
);
f(self);
self.outer_trait_or_trait_impl = old;
@ -265,10 +265,12 @@ impl<'a> AstValidator<'a> {
None
};
let map = self.sess.source_map();
let make_trait_const_sugg = if const_trait_impl
&& let TraitOrTraitImpl::Trait { span, constness: ast::Const::No } = parent
&& let &TraitOrTraitImpl::Trait { vis, constness: ast::Const::No } = parent
{
Some(span.shrink_to_lo())
Some(map.span_extend_while_whitespace(vis).shrink_to_hi())
} else {
None
};
@ -279,7 +281,7 @@ impl<'a> AstValidator<'a> {
in_impl: matches!(parent, TraitOrTraitImpl::TraitImpl { .. }),
const_context_label: parent_constness,
remove_const_sugg: (
self.sess.source_map().span_extend_while_whitespace(span),
map.span_extend_while_whitespace(span),
match parent_constness {
Some(_) => rustc_errors::Applicability::MachineApplicable,
None => rustc_errors::Applicability::MaybeIncorrect,
@ -1165,13 +1167,6 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
..
}) => {
self.visit_attrs_vis_ident(&item.attrs, &item.vis, ident);
// FIXME(const_trait_impl) remove this
let alt_const_trait_span =
attr::find_by_name(&item.attrs, sym::const_trait).map(|attr| attr.span);
let constness = match (*constness, alt_const_trait_span) {
(Const::Yes(span), _) | (Const::No, Some(span)) => Const::Yes(span),
(Const::No, None) => Const::No,
};
if *is_auto == IsAuto::Yes {
// Auto traits cannot have generics, super traits nor contain items.
self.deny_generic_params(generics, ident.span);
@ -1188,7 +1183,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
this.visit_generics(generics);
walk_list!(this, visit_param_bound, bounds, BoundKind::SuperTraits)
});
self.with_in_trait(item.span, constness, |this| {
self.with_in_trait(item.vis.span, *constness, |this| {
walk_list!(this, visit_assoc_item, items, AssocCtxt::Trait);
});
}

View file

@ -56,7 +56,7 @@ pub(crate) struct TraitFnConst {
pub make_impl_const_sugg: Option<Span>,
#[suggestion(
ast_passes_make_trait_const_sugg,
code = "#[const_trait]\n",
code = "const ",
applicability = "maybe-incorrect"
)]
pub make_trait_const_sugg: Option<Span>,

View file

@ -66,7 +66,8 @@ pub(crate) struct TypeConstParser;
impl<S: Stage> NoArgsAttributeParser<S> for TypeConstParser {
const PATH: &[Symbol] = &[sym::type_const];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::AssocConst)]);
const ALLOWED_TARGETS: AllowedTargets =
AllowedTargets::AllowList(&[Allow(Target::Const), Allow(Target::AssocConst)]);
const CREATE: fn(Span) -> AttributeKind = AttributeKind::TypeConst;
}
@ -101,17 +102,6 @@ impl<S: Stage> NoArgsAttributeParser<S> for DoNotImplementViaObjectParser {
const CREATE: fn(Span) -> AttributeKind = AttributeKind::DoNotImplementViaObject;
}
// FIXME(const_trait_impl): remove this
// Const traits
pub(crate) struct ConstTraitParser;
impl<S: Stage> NoArgsAttributeParser<S> for ConstTraitParser {
const PATH: &[Symbol] = &[sym::const_trait];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Trait)]);
const CREATE: fn(Span) -> AttributeKind = AttributeKind::ConstTrait;
}
// Specialization
pub(crate) struct SpecializationTraitParser;

View file

@ -64,7 +64,7 @@ use crate::attributes::stability::{
};
use crate::attributes::test_attrs::{IgnoreParser, ShouldPanicParser};
use crate::attributes::traits::{
AllowIncoherentImplParser, CoinductiveParser, ConstTraitParser, DenyExplicitImplParser,
AllowIncoherentImplParser, CoinductiveParser, DenyExplicitImplParser,
DoNotImplementViaObjectParser, FundamentalParser, MarkerParser, ParenSugarParser,
PointeeParser, SkipDuringMethodDispatchParser, SpecializationTraitParser, TypeConstParser,
UnsafeSpecializationMarkerParser,
@ -218,7 +218,6 @@ attribute_parsers!(
Single<WithoutArgs<ColdParser>>,
Single<WithoutArgs<ConstContinueParser>>,
Single<WithoutArgs<ConstStabilityIndirectParser>>,
Single<WithoutArgs<ConstTraitParser>>,
Single<WithoutArgs<CoroutineParser>>,
Single<WithoutArgs<DenyExplicitImplParser>>,
Single<WithoutArgs<DoNotImplementViaObjectParser>>,

View file

@ -381,23 +381,21 @@ fn build_error_for_const_call<'tcx>(
`{trait_name}` is not const",
),
);
if parent.is_local() && ccx.tcx.sess.is_nightly_build() {
if let Some(parent) = parent.as_local()
&& ccx.tcx.sess.is_nightly_build()
{
if !ccx.tcx.features().const_trait_impl() {
err.help(
"add `#![feature(const_trait_impl)]` to the crate attributes to \
enable `#[const_trait]`",
enable const traits",
);
}
let indentation = ccx
.tcx
.sess
.source_map()
.indentation_before(trait_span)
.unwrap_or_default();
let span = ccx.tcx.hir_expect_item(parent).vis_span;
let span = ccx.tcx.sess.source_map().span_extend_while_whitespace(span);
err.span_suggestion_verbose(
trait_span.shrink_to_lo(),
span.shrink_to_hi(),
format!("consider making trait `{trait_name}` const"),
format!("#[const_trait]\n{indentation}"),
"const ".to_owned(),
Applicability::MaybeIncorrect,
);
} else if !ccx.tcx.sess.is_nightly_build() {

View file

@ -846,14 +846,6 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
EncodeCrossCrate::No, experimental!(register_tool),
),
// RFC 2632
// FIXME(const_trait_impl) remove this
gated!(
const_trait, Normal, template!(Word), WarnFollowing, EncodeCrossCrate::No, const_trait_impl,
"`const_trait` is a temporary placeholder for marking a trait that is suitable for `const` \
`impls` and all default bodies as `const`, which may be removed or renamed in the \
future."
),
// lang-team MCP 147
gated!(
deprecated_safe, Normal, template!(List: &[r#"since = "version", note = "...""#]), ErrorFollowing,

View file

@ -499,9 +499,6 @@ pub enum AttributeKind {
/// Represents `#[rustc_const_stable_indirect]`.
ConstStabilityIndirect,
/// Represents `#[const_trait]`.
ConstTrait(Span),
/// Represents `#[coroutine]`.
Coroutine(Span),

View file

@ -32,7 +32,6 @@ impl AttributeKind {
ConstContinue(..) => No,
ConstStability { .. } => Yes,
ConstStabilityIndirect => No,
ConstTrait(..) => No,
Coroutine(..) => No,
Coverage(..) => No,
CrateName { .. } => No,

View file

@ -3065,7 +3065,7 @@ macro_rules! expect_methods_self_kind {
$(
#[track_caller]
pub fn $name(&self) -> $ret_ty {
let $pat = &self.kind else { expect_failed(stringify!($ident), self) };
let $pat = &self.kind else { expect_failed(stringify!($name), self) };
$ret_val
}
)*
@ -3077,7 +3077,7 @@ macro_rules! expect_methods_self {
$(
#[track_caller]
pub fn $name(&self) -> $ret_ty {
let $pat = self else { expect_failed(stringify!($ident), self) };
let $pat = self else { expect_failed(stringify!($name), self) };
$ret_val
}
)*
@ -4790,6 +4790,11 @@ impl<'hir> Node<'hir> {
ForeignItemKind::Static(ty, ..) => Some(ty),
_ => None,
},
Node::GenericParam(param) => match param.kind {
GenericParamKind::Lifetime { .. } => None,
GenericParamKind::Type { default, .. } => default,
GenericParamKind::Const { ty, .. } => Some(ty),
},
_ => None,
}
}

View file

@ -757,22 +757,18 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(),
}
match tcx.def_kind(def_id) {
def_kind @ (DefKind::Static { .. } | DefKind::Const) => {
DefKind::Static { .. } => {
tcx.ensure_ok().generics_of(def_id);
tcx.ensure_ok().type_of(def_id);
tcx.ensure_ok().predicates_of(def_id);
match def_kind {
DefKind::Static { .. } => {
check_static_inhabited(tcx, def_id);
check_static_linkage(tcx, def_id);
let ty = tcx.type_of(def_id).instantiate_identity();
res = res.and(wfcheck::check_static_item(
tcx, def_id, ty, /* should_check_for_sync */ true,
));
}
DefKind::Const => res = res.and(wfcheck::check_const_item(tcx, def_id)),
_ => unreachable!(),
}
check_static_inhabited(tcx, def_id);
check_static_linkage(tcx, def_id);
let ty = tcx.type_of(def_id).instantiate_identity();
res = res.and(wfcheck::check_static_item(
tcx, def_id, ty, /* should_check_for_sync */ true,
));
// Only `Node::Item` and `Node::ForeignItem` still have HIR based
// checks. Returning early here does not miss any checks and
// avoids this query from having a direct dependency edge on the HIR
@ -900,6 +896,39 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(),
// avoids this query from having a direct dependency edge on the HIR
return res;
}
DefKind::Const => {
tcx.ensure_ok().generics_of(def_id);
tcx.ensure_ok().type_of(def_id);
tcx.ensure_ok().predicates_of(def_id);
res = res.and(enter_wf_checking_ctxt(tcx, def_id, |wfcx| {
let ty = tcx.type_of(def_id).instantiate_identity();
let ty_span = tcx.ty_span(def_id);
let ty = wfcx.deeply_normalize(ty_span, Some(WellFormedLoc::Ty(def_id)), ty);
wfcx.register_wf_obligation(ty_span, Some(WellFormedLoc::Ty(def_id)), ty.into());
wfcx.register_bound(
traits::ObligationCause::new(
ty_span,
def_id,
ObligationCauseCode::SizedConstOrStatic,
),
tcx.param_env(def_id),
ty,
tcx.require_lang_item(LangItem::Sized, ty_span),
);
check_where_clauses(wfcx, def_id);
if find_attr!(tcx.get_all_attrs(def_id), AttributeKind::TypeConst(_)) {
wfcheck::check_type_const(wfcx, def_id, ty, true)?;
}
Ok(())
}));
// Only `Node::Item` and `Node::ForeignItem` still have HIR based
// checks. Returning early here does not miss any checks and
// avoids this query from having a direct dependency edge on the HIR
return res;
}
DefKind::TyAlias => {
tcx.ensure_ok().generics_of(def_id);
tcx.ensure_ok().type_of(def_id);
@ -920,6 +949,11 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(),
}));
check_variances_for_type_defn(tcx, def_id);
}
// Only `Node::Item` and `Node::ForeignItem` still have HIR based
// checks. Returning early here does not miss any checks and
// avoids this query from having a direct dependency edge on the HIR
return res;
}
DefKind::ForeignMod => {
let it = tcx.hir_expect_item(def_id);

View file

@ -6,9 +6,10 @@ use hir::def_id::{DefId, DefIdMap, LocalDefId};
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_errors::codes::*;
use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan, pluralize, struct_span_code_err};
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::intravisit::VisitorExt;
use rustc_hir::{self as hir, AmbigArg, GenericParamKind, ImplItemKind, intravisit};
use rustc_hir::{self as hir, AmbigArg, GenericParamKind, ImplItemKind, find_attr, intravisit};
use rustc_infer::infer::{self, BoundRegionConversionTime, InferCtxt, TyCtxtInferExt};
use rustc_infer::traits::util;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
@ -1984,12 +1985,46 @@ fn compare_impl_const<'tcx>(
trait_const_item: ty::AssocItem,
impl_trait_ref: ty::TraitRef<'tcx>,
) -> Result<(), ErrorGuaranteed> {
compare_type_const(tcx, impl_const_item, trait_const_item)?;
compare_number_of_generics(tcx, impl_const_item, trait_const_item, false)?;
compare_generic_param_kinds(tcx, impl_const_item, trait_const_item, false)?;
check_region_bounds_on_impl_item(tcx, impl_const_item, trait_const_item, false)?;
compare_const_predicate_entailment(tcx, impl_const_item, trait_const_item, impl_trait_ref)
}
fn compare_type_const<'tcx>(
tcx: TyCtxt<'tcx>,
impl_const_item: ty::AssocItem,
trait_const_item: ty::AssocItem,
) -> Result<(), ErrorGuaranteed> {
let impl_is_type_const =
find_attr!(tcx.get_all_attrs(impl_const_item.def_id), AttributeKind::TypeConst(_));
let trait_type_const_span = find_attr!(
tcx.get_all_attrs(trait_const_item.def_id),
AttributeKind::TypeConst(sp) => *sp
);
if let Some(trait_type_const_span) = trait_type_const_span
&& !impl_is_type_const
{
return Err(tcx
.dcx()
.struct_span_err(
tcx.def_span(impl_const_item.def_id),
"implementation of `#[type_const]` const must be marked with `#[type_const]`",
)
.with_span_note(
MultiSpan::from_spans(vec![
tcx.def_span(trait_const_item.def_id),
trait_type_const_span,
]),
"trait declaration of const is marked with `#[type_const]`",
)
.emit());
}
Ok(())
}
/// The equivalent of [compare_method_predicate_entailment], but for associated constants
/// instead of associated functions.
// FIXME(generic_const_items): If possible extract the common parts of `compare_{type,const}_predicate_entailment`.

View file

@ -6,10 +6,11 @@ use rustc_abi::ExternAbi;
use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
use rustc_errors::codes::*;
use rustc_errors::{Applicability, ErrorGuaranteed, pluralize, struct_span_code_err};
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::lang_items::LangItem;
use rustc_hir::{AmbigArg, ItemKind};
use rustc_hir::{AmbigArg, ItemKind, find_attr};
use rustc_infer::infer::outlives::env::OutlivesEnvironment;
use rustc_infer::infer::{self, InferCtxt, SubregionOrigin, TyCtxtInferExt};
use rustc_lint_defs::builtin::SUPERTRAIT_ITEM_SHADOWING_DEFINITION;
@ -925,11 +926,11 @@ fn check_param_wf(tcx: TyCtxt<'_>, param: &ty::GenericParamDef) -> Result<(), Er
#[instrument(level = "debug", skip(tcx))]
pub(crate) fn check_associated_item(
tcx: TyCtxt<'_>,
item_id: LocalDefId,
def_id: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
let loc = Some(WellFormedLoc::Ty(item_id));
enter_wf_checking_ctxt(tcx, item_id, |wfcx| {
let item = tcx.associated_item(item_id);
let loc = Some(WellFormedLoc::Ty(def_id));
enter_wf_checking_ctxt(tcx, def_id, |wfcx| {
let item = tcx.associated_item(def_id);
// Avoid bogus "type annotations needed `Foo: Bar`" errors on `impl Bar for Foo` in case
// other `Foo` impls are incoherent.
@ -942,27 +943,36 @@ pub(crate) fn check_associated_item(
}
};
let span = tcx.def_span(item_id);
let span = tcx.def_span(def_id);
match item.kind {
ty::AssocKind::Const { .. } => {
let ty = tcx.type_of(item.def_id).instantiate_identity();
let ty = wfcx.deeply_normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
let ty = tcx.type_of(def_id).instantiate_identity();
let ty = wfcx.deeply_normalize(span, Some(WellFormedLoc::Ty(def_id)), ty);
wfcx.register_wf_obligation(span, loc, ty.into());
check_sized_if_body(
wfcx,
item.def_id.expect_local(),
ty,
Some(span),
ObligationCauseCode::SizedConstOrStatic,
);
let has_value = item.defaultness(tcx).has_value();
if find_attr!(tcx.get_all_attrs(def_id), AttributeKind::TypeConst(_)) {
check_type_const(wfcx, def_id, ty, has_value)?;
}
if has_value {
let code = ObligationCauseCode::SizedConstOrStatic;
wfcx.register_bound(
ObligationCause::new(span, def_id, code),
wfcx.param_env,
ty,
tcx.require_lang_item(LangItem::Sized, span),
);
}
Ok(())
}
ty::AssocKind::Fn { .. } => {
let sig = tcx.fn_sig(item.def_id).instantiate_identity();
let sig = tcx.fn_sig(def_id).instantiate_identity();
let hir_sig =
tcx.hir_node_by_def_id(item_id).fn_sig().expect("bad signature for method");
check_fn_or_method(wfcx, sig, hir_sig.decl, item_id);
tcx.hir_node_by_def_id(def_id).fn_sig().expect("bad signature for method");
check_fn_or_method(wfcx, sig, hir_sig.decl, def_id);
check_method_receiver(wfcx, hir_sig, item, self_ty)
}
ty::AssocKind::Type { .. } => {
@ -970,8 +980,8 @@ pub(crate) fn check_associated_item(
check_associated_type_bounds(wfcx, item, span)
}
if item.defaultness(tcx).has_value() {
let ty = tcx.type_of(item.def_id).instantiate_identity();
let ty = wfcx.deeply_normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
let ty = tcx.type_of(def_id).instantiate_identity();
let ty = wfcx.deeply_normalize(span, Some(WellFormedLoc::Ty(def_id)), ty);
wfcx.register_wf_obligation(span, loc, ty.into());
}
Ok(())
@ -1222,28 +1232,36 @@ pub(crate) fn check_static_item<'tcx>(
})
}
pub(crate) fn check_const_item(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), ErrorGuaranteed> {
enter_wf_checking_ctxt(tcx, def_id, |wfcx| {
let ty = tcx.type_of(def_id).instantiate_identity();
let ty_span = tcx.ty_span(def_id);
let ty = wfcx.deeply_normalize(ty_span, Some(WellFormedLoc::Ty(def_id)), ty);
#[instrument(level = "debug", skip(wfcx))]
pub(super) fn check_type_const<'tcx>(
wfcx: &WfCheckingCtxt<'_, 'tcx>,
def_id: LocalDefId,
item_ty: Ty<'tcx>,
has_value: bool,
) -> Result<(), ErrorGuaranteed> {
let tcx = wfcx.tcx();
let span = tcx.def_span(def_id);
wfcx.register_wf_obligation(ty_span, Some(WellFormedLoc::Ty(def_id)), ty.into());
wfcx.register_bound(
traits::ObligationCause::new(
ty_span,
wfcx.body_def_id,
ObligationCauseCode::SizedConstOrStatic,
),
wfcx.register_bound(
ObligationCause::new(span, def_id, ObligationCauseCode::ConstParam(item_ty)),
wfcx.param_env,
item_ty,
tcx.require_lang_item(LangItem::ConstParamTy, span),
);
if has_value {
let raw_ct = tcx.const_of_item(def_id).instantiate_identity();
let norm_ct = wfcx.deeply_normalize(span, Some(WellFormedLoc::Ty(def_id)), raw_ct);
wfcx.register_wf_obligation(span, Some(WellFormedLoc::Ty(def_id)), norm_ct.into());
wfcx.register_obligation(Obligation::new(
tcx,
ObligationCause::new(span, def_id, ObligationCauseCode::WellFormed(None)),
wfcx.param_env,
ty,
tcx.require_lang_item(LangItem::Sized, ty_span),
);
check_where_clauses(wfcx, def_id);
Ok(())
})
ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(norm_ct, item_ty)),
));
}
Ok(())
}
#[instrument(level = "debug", skip(tcx, impl_))]
@ -1583,33 +1601,16 @@ fn check_fn_or_method<'tcx>(
}
// If the function has a body, additionally require that the return type is sized.
check_sized_if_body(
wfcx,
def_id,
sig.output(),
match hir_decl.output {
hir::FnRetTy::Return(ty) => Some(ty.span),
hir::FnRetTy::DefaultReturn(_) => None,
},
ObligationCauseCode::SizedReturnType,
);
}
fn check_sized_if_body<'tcx>(
wfcx: &WfCheckingCtxt<'_, 'tcx>,
def_id: LocalDefId,
ty: Ty<'tcx>,
maybe_span: Option<Span>,
code: ObligationCauseCode<'tcx>,
) {
let tcx = wfcx.tcx();
if let Some(body) = tcx.hir_maybe_body_owned_by(def_id) {
let span = maybe_span.unwrap_or(body.value.span);
let span = match hir_decl.output {
hir::FnRetTy::Return(ty) => ty.span,
hir::FnRetTy::DefaultReturn(_) => body.value.span,
};
wfcx.register_bound(
ObligationCause::new(span, def_id, code),
ObligationCause::new(span, def_id, ObligationCauseCode::SizedReturnType),
wfcx.param_env,
ty,
sig.output(),
tcx.require_lang_item(LangItem::Sized, span),
);
}

View file

@ -891,15 +891,6 @@ fn trait_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::TraitDef {
};
let attrs = tcx.get_all_attrs(def_id);
// Only regular traits can be const.
// FIXME(const_trait_impl): remove this
let constness = if constness == hir::Constness::Const
|| !is_alias && find_attr!(attrs, AttributeKind::ConstTrait(_))
{
hir::Constness::Const
} else {
hir::Constness::NotConst
};
let paren_sugar = find_attr!(attrs, AttributeKind::ParenSugar(_));
if paren_sugar && !tcx.features().unboxed_closures() {
@ -1382,22 +1373,27 @@ fn check_impl_constness(
}
let trait_name = tcx.item_name(trait_def_id).to_string();
let (local_trait_span, suggestion_pre) =
match (trait_def_id.is_local(), tcx.sess.is_nightly_build()) {
(true, true) => (
Some(tcx.def_span(trait_def_id).shrink_to_lo()),
let (suggestion, suggestion_pre) = match (trait_def_id.as_local(), tcx.sess.is_nightly_build())
{
(Some(trait_def_id), true) => {
let span = tcx.hir_expect_item(trait_def_id).vis_span;
let span = tcx.sess.source_map().span_extend_while_whitespace(span);
(
Some(span.shrink_to_hi()),
if tcx.features().const_trait_impl() {
""
} else {
"enable `#![feature(const_trait_impl)]` in your crate and "
},
),
(false, _) | (_, false) => (None, ""),
};
)
}
(None, _) | (_, false) => (None, ""),
};
tcx.dcx().emit_err(errors::ConstImplForNonConstTrait {
trait_ref_span: hir_trait_ref.path.span,
trait_name,
local_trait_span,
suggestion,
suggestion_pre,
marking: (),
adding: (),
@ -1615,8 +1611,12 @@ fn const_of_item<'tcx>(
};
let ct_arg = match ct_rhs {
hir::ConstItemRhs::TypeConst(ct_arg) => ct_arg,
hir::ConstItemRhs::Body(body_id) => {
bug!("cannot call const_of_item on a non-type_const {body_id:?}")
hir::ConstItemRhs::Body(_) => {
let e = tcx.dcx().span_delayed_bug(
tcx.def_span(def_id),
"cannot call const_of_item on a non-type_const",
);
return ty::EarlyBinder::bind(Const::new_error(tcx, e));
}
};
let icx = ItemCtxt::new(tcx, def_id);

View file

@ -500,13 +500,8 @@ pub(crate) struct ConstImplForNonConstTrait {
#[label]
pub trait_ref_span: Span,
pub trait_name: String,
#[suggestion(
applicability = "machine-applicable",
// FIXME(const_trait_impl) fix this suggestion
code = "#[const_trait] ",
style = "verbose"
)]
pub local_trait_span: Option<Span>,
#[suggestion(applicability = "machine-applicable", code = "const ", style = "verbose")]
pub suggestion: Option<Span>,
pub suggestion_pre: &'static str,
#[note]
pub marking: (),
@ -523,14 +518,9 @@ pub(crate) struct ConstBoundForNonConstTrait {
pub modifier: &'static str,
#[note]
pub def_span: Option<Span>,
pub suggestion_pre: &'static str,
#[suggestion(
applicability = "machine-applicable",
// FIXME(const_trait_impl) fix this suggestion
code = "#[const_trait] ",
style = "verbose"
)]
#[suggestion(applicability = "machine-applicable", code = "const ", style = "verbose")]
pub suggestion: Option<Span>,
pub suggestion_pre: &'static str,
pub trait_name: String,
}

View file

@ -881,28 +881,33 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
}
if let hir::BoundConstness::Always(span) | hir::BoundConstness::Maybe(span) = constness
&& !self.tcx().is_const_trait(trait_def_id)
&& !tcx.is_const_trait(trait_def_id)
{
let (def_span, suggestion, suggestion_pre) =
match (trait_def_id.is_local(), self.tcx().sess.is_nightly_build()) {
(true, true) => (
None,
Some(tcx.def_span(trait_def_id).shrink_to_lo()),
if self.tcx().features().const_trait_impl() {
""
} else {
"enable `#![feature(const_trait_impl)]` in your crate and "
},
),
(false, _) | (_, false) => (Some(tcx.def_span(trait_def_id)), None, ""),
match (trait_def_id.as_local(), tcx.sess.is_nightly_build()) {
(Some(trait_def_id), true) => {
let span = tcx.hir_expect_item(trait_def_id).vis_span;
let span = tcx.sess.source_map().span_extend_while_whitespace(span);
(
None,
Some(span.shrink_to_hi()),
if self.tcx().features().const_trait_impl() {
""
} else {
"enable `#![feature(const_trait_impl)]` in your crate and "
},
)
}
(None, _) | (_, false) => (Some(tcx.def_span(trait_def_id)), None, ""),
};
self.dcx().emit_err(crate::errors::ConstBoundForNonConstTrait {
span,
modifier: constness.as_str(),
def_span,
trait_name: self.tcx().def_path_str(trait_def_id),
suggestion_pre,
trait_name: tcx.def_path_str(trait_def_id),
suggestion,
suggestion_pre,
});
} else {
match predicate_filter {

View file

@ -150,9 +150,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
Attribute::Parsed(AttributeKind::ProcMacroDerive { .. }) => {
self.check_proc_macro(hir_id, target, ProcMacroKind::Derive)
}
&Attribute::Parsed(AttributeKind::TypeConst(attr_span)) => {
self.check_type_const(hir_id, attr_span, target)
}
Attribute::Parsed(
AttributeKind::Stability {
span: attr_span,
@ -235,7 +232,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| AttributeKind::Marker(..)
| AttributeKind::SkipDuringMethodDispatch { .. }
| AttributeKind::Coinductive(..)
| AttributeKind::ConstTrait(..)
| AttributeKind::DenyExplicitImpl(..)
| AttributeKind::DoNotImplementViaObject(..)
| AttributeKind::SpecializationTrait(..)
@ -243,6 +239,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| AttributeKind::ParenSugar(..)
| AttributeKind::AllowIncoherentImpl(..)
| AttributeKind::Confusables { .. }
| AttributeKind::TypeConst{..}
// `#[doc]` is actually a lot more than just doc comments, so is checked below
| AttributeKind::DocComment {..}
// handled below this loop and elsewhere
@ -2115,16 +2112,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
}
fn check_type_const(&self, _hir_id: HirId, attr_span: Span, target: Target) {
if matches!(target, Target::AssocConst | Target::Const) {
return;
} else {
self.dcx()
.struct_span_err(attr_span, "`#[type_const]` must only be applied to const items")
.emit();
}
}
fn check_rustc_pub_transparent(&self, attr_span: Span, span: Span, attrs: &[Attribute]) {
if !find_attr!(attrs, AttributeKind::Repr { reprs, .. } => reprs.iter().any(|(r, _)| r == &ReprAttr::ReprTransparent))
.unwrap_or(false)

View file

@ -750,7 +750,6 @@ symbols! {
const_raw_ptr_to_usize_cast,
const_refs_to_cell,
const_refs_to_static,
const_trait,
const_trait_bound_opt_out,
const_trait_impl,
const_try,

View file

@ -1286,12 +1286,8 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
ty: Ty<'tcx>,
obligation: &PredicateObligation<'tcx>,
) -> Diag<'a> {
let param = obligation.cause.body_id;
let hir::GenericParamKind::Const { ty: &hir::Ty { span, .. }, .. } =
self.tcx.hir_node_by_def_id(param).expect_generic_param().kind
else {
bug!()
};
let def_id = obligation.cause.body_id;
let span = self.tcx.ty_span(def_id);
let mut file = None;
let ty_str = self.tcx.short_string(ty, &mut file);

View file

@ -132,9 +132,8 @@ where
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
impl<T: [const] Eq, const N: usize> const Eq for [T; N] {}
#[const_trait]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
trait SpecArrayEq<Other, const N: usize>: Sized {
const trait SpecArrayEq<Other, const N: usize>: Sized {
fn spec_eq(a: &[Self; N], b: &[Other; N]) -> bool;
fn spec_ne(a: &[Self; N], b: &[Other; N]) -> bool;
}

View file

@ -17,8 +17,7 @@ use crate::num::NonZero;
/// - Neither `Self` nor `Rhs` have provenance, so integer comparisons are correct.
/// - `<Self as PartialEq<Rhs>>::{eq,ne}` are equivalent to comparing the bytes.
#[rustc_specialization_trait]
#[const_trait] // FIXME(const_trait_impl): Migrate to `const unsafe trait` once #146122 is fixed.
pub(crate) unsafe trait BytewiseEq<Rhs = Self>:
pub(crate) const unsafe trait BytewiseEq<Rhs = Self>:
[const] PartialEq<Rhs> + Sized
{
}

View file

@ -816,9 +816,8 @@ impl<T: Clone> Bound<&T> {
/// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`.
#[stable(feature = "collections_range", since = "1.28.0")]
#[rustc_diagnostic_item = "RangeBounds"]
#[const_trait]
#[rustc_const_unstable(feature = "const_range", issue = "none")]
pub trait RangeBounds<T: ?Sized> {
pub const trait RangeBounds<T: ?Sized> {
/// Start index bound.
///
/// Returns the start value as a `Bound`.
@ -954,9 +953,8 @@ pub trait RangeBounds<T: ?Sized> {
/// `IntoBounds` is implemented by Rusts built-in range types, produced
/// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`.
#[unstable(feature = "range_into_bounds", issue = "136903")]
#[const_trait]
#[rustc_const_unstable(feature = "const_range", issue = "none")]
pub trait IntoBounds<T>: [const] RangeBounds<T> {
pub const trait IntoBounds<T>: [const] RangeBounds<T> {
/// Convert this range into the start and end bounds.
/// Returns `(start_bound, end_bound)`.
///
@ -1319,9 +1317,8 @@ pub enum OneSidedRangeBound {
/// Types that implement `OneSidedRange<T>` must return `Bound::Unbounded`
/// from one of `RangeBounds::start_bound` or `RangeBounds::end_bound`.
#[unstable(feature = "one_sided_range", issue = "69780")]
#[const_trait]
#[rustc_const_unstable(feature = "const_range", issue = "none")]
pub trait OneSidedRange<T>: RangeBounds<T> {
pub const trait OneSidedRange<T>: RangeBounds<T> {
/// An internal-only helper function for `split_off` and
/// `split_off_mut` that returns the bound of the one-sided range.
fn bound(self) -> (OneSidedRangeBound, T);

View file

@ -155,18 +155,16 @@ where
}
#[doc(hidden)]
#[const_trait]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
// intermediate trait for specialization of slice's PartialOrd
trait SlicePartialOrd: Sized {
const trait SlicePartialOrd: Sized {
fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
}
#[doc(hidden)]
#[const_trait]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
// intermediate trait for specialization of slice's PartialOrd chaining methods
trait SliceChain: Sized {
const trait SliceChain: Sized {
fn chaining_lt(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
fn chaining_le(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
fn chaining_gt(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
@ -244,9 +242,8 @@ impl<A: [const] AlwaysApplicableOrd> const SlicePartialOrd for A {
}
#[rustc_specialization_trait]
#[const_trait]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
trait AlwaysApplicableOrd: [const] SliceOrd + [const] Ord {}
const trait AlwaysApplicableOrd: [const] SliceOrd + [const] Ord {}
macro_rules! always_applicable_ord {
($([$($p:tt)*] $t:ty,)*) => {
@ -265,10 +262,9 @@ always_applicable_ord! {
}
#[doc(hidden)]
#[const_trait]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
// intermediate trait for specialization of slice's Ord
trait SliceOrd: Sized {
const trait SliceOrd: Sized {
fn compare(left: &[Self], right: &[Self]) -> Ordering;
}
@ -292,8 +288,7 @@ impl<A: Ord> SliceOrd for A {
/// * For every `x` and `y` of this type, `Ord(x, y)` must return the same
/// value as `Ord::cmp(transmute::<_, u8>(x), transmute::<_, u8>(y))`.
#[rustc_specialization_trait]
#[const_trait]
unsafe trait UnsignedBytewiseOrd: [const] Ord {}
const unsafe trait UnsignedBytewiseOrd: [const] Ord {}
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
unsafe impl const UnsignedBytewiseOrd for bool {}

View file

@ -159,9 +159,8 @@ mod private_slice_index {
message = "the type `{T}` cannot be indexed by `{Self}`",
label = "slice indices are of type `usize` or ranges of `usize`"
)]
#[const_trait] // FIXME(const_trait_impl): Migrate to `const unsafe trait` once #146122 is fixed.
#[rustc_const_unstable(feature = "const_index", issue = "143775")]
pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
pub const unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
/// The output type returned by methods.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
type Output: ?Sized;

View file

@ -22,7 +22,7 @@ use crate::io::try_set_output_capture;
use crate::mem::{self, ManuallyDrop};
use crate::panic::{BacktraceStyle, PanicHookInfo};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
use crate::sync::{PoisonError, RwLock};
use crate::sync::nonpoison::RwLock;
use crate::sys::backtrace;
use crate::sys::stdio::panic_output;
use crate::{fmt, intrinsics, process, thread};
@ -144,13 +144,9 @@ pub fn set_hook(hook: Box<dyn Fn(&PanicHookInfo<'_>) + 'static + Sync + Send>) {
panic!("cannot modify the panic hook from a panicking thread");
}
let new = Hook::Custom(hook);
let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner);
let old = mem::replace(&mut *hook, new);
drop(hook);
// Only drop the old hook after releasing the lock to avoid deadlocking
// if its destructor panics.
drop(old);
// Drop the old hook after changing the hook to avoid deadlocking if its
// destructor panics.
drop(HOOK.replace(Hook::Custom(hook)));
}
/// Unregisters the current panic hook and returns it, registering the default hook
@ -188,11 +184,7 @@ pub fn take_hook() -> Box<dyn Fn(&PanicHookInfo<'_>) + 'static + Sync + Send> {
panic!("cannot modify the panic hook from a panicking thread");
}
let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner);
let old_hook = mem::take(&mut *hook);
drop(hook);
old_hook.into_box()
HOOK.replace(Hook::Default).into_box()
}
/// Atomic combination of [`take_hook`] and [`set_hook`]. Use this to replace the panic handler with
@ -238,7 +230,7 @@ where
panic!("cannot modify the panic hook from a panicking thread");
}
let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner);
let mut hook = HOOK.write();
let prev = mem::take(&mut *hook).into_box();
*hook = Hook::Custom(Box::new(move |info| hook_fn(&prev, info)));
}
@ -822,7 +814,7 @@ fn panic_with_hook(
crate::process::abort();
}
match *HOOK.read().unwrap_or_else(PoisonError::into_inner) {
match *HOOK.read() {
// Some platforms (like wasm) know that printing to stderr won't ever actually
// print anything, and if that's the case we can skip the default
// hook. Since string formatting happens lazily when calling `payload`

View file

@ -430,6 +430,7 @@ pub fn linker_flags(
match builder.config.bootstrap_override_lld {
BootstrapOverrideLld::External => {
args.push("-Clinker-features=+lld".to_string());
args.push("-Clink-self-contained=-linker".to_string());
args.push("-Zunstable-options".to_string());
}
BootstrapOverrideLld::SelfContained => {

View file

@ -306,6 +306,14 @@ impl<'a> Renderer<'a> {
);
}
fn render_report(&self, report: &Report) {
let &Report { total_time, compilation_time } = report;
// Should match `write_merged_doctest_times` in `library/test/src/formatters/pretty.rs`.
println!(
"all doctests ran in {total_time:.2}s; merged doctests compilation took {compilation_time:.2}s"
);
}
fn render_message(&mut self, message: Message) {
match message {
Message::Suite(SuiteMessage::Started { test_count }) => {
@ -323,6 +331,9 @@ impl<'a> Renderer<'a> {
Message::Suite(SuiteMessage::Failed(outcome)) => {
self.render_suite_outcome(Outcome::Failed, &outcome);
}
Message::Report(report) => {
self.render_report(&report);
}
Message::Bench(outcome) => {
// The formatting for benchmarks doesn't replicate 1:1 the formatting libtest
// outputs, mostly because libtest's formatting is broken in terse mode, which is
@ -435,6 +446,7 @@ enum Message {
Suite(SuiteMessage),
Test(TestMessage),
Bench(BenchOutcome),
Report(Report),
}
#[derive(serde_derive::Deserialize)]
@ -481,3 +493,10 @@ struct TestOutcome {
stdout: Option<String>,
message: Option<String>,
}
/// Emitted when running doctests.
#[derive(serde_derive::Deserialize)]
struct Report {
total_time: f64,
compilation_time: f64,
}

View file

@ -993,7 +993,7 @@ Sanitizers produce symbolized stacktraces when llvm-symbolizer binary is in `PAT
[clang-kcfi]: https://clang.llvm.org/docs/ControlFlowIntegrity.html#fsanitize-kcfi
[clang-lsan]: https://clang.llvm.org/docs/LeakSanitizer.html
[clang-msan]: https://clang.llvm.org/docs/MemorySanitizer.html
[clan-rtsan]: https://clang.llvm.org/docs/RealtimeSanitizer.html
[clang-rtsan]: https://clang.llvm.org/docs/RealtimeSanitizer.html
[clang-safestack]: https://clang.llvm.org/docs/SafeStack.html
[clang-scs]: https://clang.llvm.org/docs/ShadowCallStack.html
[clang-tsan]: https://clang.llvm.org/docs/ThreadSanitizer.html

View file

@ -52,18 +52,6 @@ fn show_version() {
println!();
}
fn forward_patched_extern_arg(args: &mut impl Iterator<Item = String>, cmd: &mut Command) {
cmd.arg("--extern"); // always forward flag, but adjust filename:
let path = args.next().expect("`--extern` should be followed by a filename");
if let Some(lib) = path.strip_suffix(".rlib") {
// If this is an rlib, make it an rmeta.
cmd.arg(format!("{lib}.rmeta"));
} else {
// Some other extern file (e.g. a `.so`). Forward unchanged.
cmd.arg(path);
}
}
pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
// Require a subcommand before any flags.
// We cannot know which of those flags take arguments and which do not,
@ -276,7 +264,7 @@ pub enum RustcPhase {
Rustdoc,
}
pub fn phase_rustc(mut args: impl Iterator<Item = String>, phase: RustcPhase) {
pub fn phase_rustc(args: impl Iterator<Item = String>, phase: RustcPhase) {
/// Determines if we are being invoked (as rustc) to build a crate for
/// the "target" architecture, in contrast to the "host" architecture.
/// Host crates are for build scripts and proc macros and still need to
@ -444,7 +432,6 @@ pub fn phase_rustc(mut args: impl Iterator<Item = String>, phase: RustcPhase) {
}
let mut cmd = miri();
let mut emit_link_hack = false;
// Arguments are treated very differently depending on whether this crate is
// for interpretation by Miri, or for use by a build script / proc macro.
if target_crate {
@ -455,7 +442,7 @@ pub fn phase_rustc(mut args: impl Iterator<Item = String>, phase: RustcPhase) {
}
// Forward arguments, but patched.
let emit_flag = "--emit";
// This hack helps bootstrap run standard library tests in Miri. The issue is as follows:
// when running `cargo miri test` on libcore, cargo builds a local copy of core and makes it
// a dependency of the integration test crate. This copy duplicates all the lang items, so
@ -471,30 +458,7 @@ pub fn phase_rustc(mut args: impl Iterator<Item = String>, phase: RustcPhase) {
let replace_librs = env::var_os("MIRI_REPLACE_LIBRS_IF_NOT_TEST").is_some()
&& !runnable_crate
&& phase == RustcPhase::Build;
while let Some(arg) = args.next() {
// Patch `--emit`: remove "link" from "--emit" to make this a check-only build.
if let Some(val) = arg.strip_prefix(emit_flag) {
// Patch this argument. First, extract its value.
let val =
val.strip_prefix('=').expect("`cargo` should pass `--emit=X` as one argument");
let mut val: Vec<_> = val.split(',').collect();
// Now make sure "link" is not in there, but "metadata" is.
if let Some(i) = val.iter().position(|&s| s == "link") {
emit_link_hack = true;
val.remove(i);
if !val.contains(&"metadata") {
val.push("metadata");
}
}
cmd.arg(format!("{emit_flag}={}", val.join(",")));
continue;
}
// Patch `--extern` filenames, since Cargo sometimes passes stub `.rlib` files:
// https://github.com/rust-lang/miri/issues/1705
if arg == "--extern" {
forward_patched_extern_arg(&mut args, &mut cmd);
continue;
}
for arg in args {
// If the REPLACE_LIBRS hack is enabled and we are building a `lib.rs` file, and a
// `lib.miri.rs` file exists, then build that instead.
if replace_librs {
@ -543,17 +507,6 @@ pub fn phase_rustc(mut args: impl Iterator<Item = String>, phase: RustcPhase) {
eprintln!("[cargo-miri rustc] target_crate={target_crate} runnable_crate={runnable_crate}");
}
// Create a stub .rlib file if "link" was requested by cargo.
// This is necessary to prevent cargo from doing rebuilds all the time.
if emit_link_hack {
for filename in out_filenames() {
if verbose > 0 {
eprintln!("[cargo-miri rustc] creating fake lib file at `{}`", filename.display());
}
File::create(filename).expect("failed to create fake lib file");
}
}
debug_cmd("[cargo-miri rustc]", verbose, &cmd);
exec(cmd);
}
@ -624,17 +577,11 @@ pub fn phase_runner(mut binary_args: impl Iterator<Item = String>, phase: Runner
cmd.arg("--sysroot").arg(env::var_os("MIRI_SYSROOT").unwrap());
}
// Forward rustc arguments.
// We need to patch "--extern" filenames because we forced a check-only
// build without cargo knowing about that: replace `.rlib` suffix by
// `.rmeta`.
// We also need to remove `--error-format` as cargo specifies that to be JSON,
// We need to remove `--error-format` as cargo specifies that to be JSON,
// but when we run here, cargo does not interpret the JSON any more. `--json`
// then also needs to be dropped.
let mut args = info.args.iter();
while let Some(arg) = args.next() {
if arg == "--extern" {
forward_patched_extern_arg(&mut (&mut args).cloned(), &mut cmd);
} else if let Some(suffix) = arg.strip_prefix("--error-format") {
for arg in &info.args {
if let Some(suffix) = arg.strip_prefix("--error-format") {
assert!(suffix.starts_with('='));
// Drop this argument.
} else if let Some(suffix) = arg.strip_prefix("--json") {
@ -668,7 +615,7 @@ pub fn phase_runner(mut binary_args: impl Iterator<Item = String>, phase: Runner
}
}
pub fn phase_rustdoc(mut args: impl Iterator<Item = String>) {
pub fn phase_rustdoc(args: impl Iterator<Item = String>) {
let verbose = env::var("MIRI_VERBOSE")
.map_or(0, |verbose| verbose.parse().expect("verbosity flag must be an integer"));
@ -676,15 +623,7 @@ pub fn phase_rustdoc(mut args: impl Iterator<Item = String>) {
// of the old value into MIRI_ORIG_RUSTDOC. So that's what we have to invoke now.
let rustdoc = env::var("MIRI_ORIG_RUSTDOC").unwrap_or("rustdoc".to_string());
let mut cmd = Command::new(rustdoc);
while let Some(arg) = args.next() {
if arg == "--extern" {
// Patch --extern arguments to use *.rmeta files, since phase_cargo_rustc only creates stub *.rlib files.
forward_patched_extern_arg(&mut args, &mut cmd);
} else {
cmd.arg(arg);
}
}
cmd.args(args);
// Doctests of `proc-macro` crates (and their dependencies) are always built for the host,
// so we are not able to run them in Miri.

View file

@ -160,7 +160,7 @@ pub fn setup(
// Do the build.
let status = SysrootBuilder::new(&sysroot_dir, target)
.build_mode(BuildMode::Check)
.build_mode(BuildMode::Build) // not a real build, since we use dummy codegen
.rustc_version(rustc_version.clone())
.sysroot_config(sysroot_config)
.rustflags(rustflags)

View file

@ -57,7 +57,9 @@ impl MiriEnv {
.arg("--")
.args(&["miri", "setup", "--print-sysroot"])
.args(target_flag);
cmd.set_quiet(quiet);
if quiet {
cmd = cmd.arg("--quiet");
}
let output = cmd.read()?;
self.sh.set_var("MIRI_SYSROOT", &output);
Ok(output.into())
@ -112,8 +114,8 @@ impl Command {
Command::Check { features, flags } => Self::check(features, flags),
Command::Test { bless, target, coverage, features, flags } =>
Self::test(bless, target, coverage, features, flags),
Command::Run { dep, verbose, target, edition, features, flags } =>
Self::run(dep, verbose, target, edition, features, flags),
Command::Run { dep, quiet, target, edition, features, flags } =>
Self::run(dep, quiet, target, edition, features, flags),
Command::Doc { features, flags } => Self::doc(features, flags),
Command::Fmt { flags } => Self::fmt(flags),
Command::Clippy { features, flags } => Self::clippy(features, flags),
@ -458,7 +460,7 @@ impl Command {
fn run(
dep: bool,
verbose: bool,
quiet: bool,
target: Option<String>,
edition: Option<String>,
features: Vec<String>,
@ -468,7 +470,7 @@ impl Command {
// Preparation: get a sysroot, and get the miri binary.
let miri_sysroot =
e.build_miri_sysroot(/* quiet */ !verbose, target.as_deref(), &features)?;
e.build_miri_sysroot(/* quiet */ quiet, target.as_deref(), &features)?;
let miri_bin = e
.build_get_binary(".", &features)
.context("failed to get filename of miri executable")?;
@ -492,7 +494,7 @@ impl Command {
// Compute flags.
let miri_flags = e.sh.var("MIRIFLAGS").unwrap_or_default();
let miri_flags = flagsplit(&miri_flags);
let quiet_flag = if verbose { None } else { Some("--quiet") };
let quiet_flag = if quiet { Some("--quiet") } else { None };
// Run Miri.
// The basic command that executes the Miri driver.
@ -506,7 +508,7 @@ impl Command {
} else {
cmd!(e.sh, "{miri_bin}")
};
cmd.set_quiet(!verbose);
cmd.set_quiet(quiet);
// Add Miri flags
let mut cmd = cmd.args(&miri_flags).args(&early_flags).args(&flags);
// For `--dep` we also need to set the target in the env var.

View file

@ -78,9 +78,9 @@ pub enum Command {
/// Build the program with the dependencies declared in `tests/deps/Cargo.toml`.
#[arg(long)]
dep: bool,
/// Show build progress.
/// Hide build progress.
#[arg(long, short)]
verbose: bool,
quiet: bool,
/// The cross-interpretation target.
#[arg(long)]
target: Option<String>,

View file

@ -1 +1 @@
5f9dd05862d2e4bceb3be1031b6c936e35671501
ceb7df7e6f17c92c7d49f7e4f02df0e68bc9b38b

View file

@ -16,7 +16,6 @@ extern crate rustc_hir;
extern crate rustc_hir_analysis;
extern crate rustc_interface;
extern crate rustc_log;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
@ -26,10 +25,8 @@ mod log;
use std::env;
use std::num::NonZero;
use std::ops::Range;
use std::path::PathBuf;
use std::rc::Rc;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::atomic::{AtomicI32, AtomicU32, Ordering};
use miri::{
@ -51,10 +48,8 @@ use rustc_middle::middle::exported_symbols::{
use rustc_middle::query::LocalCrate;
use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::util::Providers;
use rustc_session::EarlyDiagCtxt;
use rustc_session::config::{CrateType, ErrorOutputType, OptLevel};
use rustc_session::search_paths::PathKind;
use rustc_span::def_id::DefId;
use crate::log::setup::{deinit_loggers, init_early_loggers, init_late_loggers};
@ -126,21 +121,6 @@ fn entry_fn(tcx: TyCtxt<'_>) -> (DefId, MiriEntryFnType) {
}
impl rustc_driver::Callbacks for MiriCompilerCalls {
fn config(&mut self, config: &mut Config) {
config.override_queries = Some(|_, providers| {
providers.extern_queries.used_crate_source = |tcx, cnum| {
let mut providers = Providers::default();
rustc_metadata::provide(&mut providers);
let mut crate_source = (providers.extern_queries.used_crate_source)(tcx, cnum);
// HACK: rustc will emit "crate ... required to be available in rlib format, but
// was not found in this form" errors once we use `tcx.dependency_formats()` if
// there's no rlib provided, so setting a dummy path here to workaround those errors.
Arc::make_mut(&mut crate_source).rlib = Some((PathBuf::new(), PathKind::All));
crate_source
};
});
}
fn after_analysis<'tcx>(
&mut self,
_: &rustc_interface::interface::Compiler,
@ -253,12 +233,26 @@ impl rustc_driver::Callbacks for MiriBeRustCompilerCalls {
#[allow(rustc::potential_query_instability)] // rustc_codegen_ssa (where this code is copied from) also allows this lint
fn config(&mut self, config: &mut Config) {
if config.opts.prints.is_empty() && self.target_crate {
#[allow(rustc::bad_opt_access)] // tcx does not exist yet
{
let any_crate_types = !config.opts.crate_types.is_empty();
// Avoid warnings about unsupported crate types.
config
.opts
.crate_types
.retain(|&c| c == CrateType::Executable || c == CrateType::Rlib);
if any_crate_types {
// Assert that we didn't remove all crate types if any crate type was passed on
// the cli. Otherwise we might silently change what kind of crate we are building.
assert!(!config.opts.crate_types.is_empty());
}
}
// Queries overridden here affect the data stored in `rmeta` files of dependencies,
// which will be used later in non-`MIRI_BE_RUSTC` mode.
config.override_queries = Some(|_, local_providers| {
// `exported_non_generic_symbols` and `reachable_non_generics` provided by rustc always returns
// an empty result if `tcx.sess.opts.output_types.should_codegen()` is false.
// In addition we need to add #[used] symbols to exported_symbols for `lookup_link_section`.
// We need to add #[used] symbols to exported_symbols for `lookup_link_section`.
// FIXME handle this somehow in rustc itself to avoid this hack.
local_providers.exported_non_generic_symbols = |tcx, LocalCrate| {
let reachable_set = tcx.with_stable_hashing_context(|hcx| {
tcx.reachable_set(()).to_sorted(&hcx, true)

View file

@ -11,6 +11,22 @@ use crate::*;
pub mod stacked_borrows;
pub mod tree_borrows;
/// Indicates which kind of access is being performed.
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
pub enum AccessKind {
Read,
Write,
}
impl fmt::Display for AccessKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AccessKind::Read => write!(f, "read access"),
AccessKind::Write => write!(f, "write access"),
}
}
}
/// Tracking pointer provenance
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct BorTag(NonZero<u64>);
@ -115,15 +131,6 @@ impl VisitProvenance for GlobalStateInner {
/// We need interior mutable access to the global state.
pub type GlobalState = RefCell<GlobalStateInner>;
impl fmt::Display for AccessKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AccessKind::Read => write!(f, "read access"),
AccessKind::Write => write!(f, "write access"),
}
}
}
/// Policy on whether to recurse into fields to retag
#[derive(Copy, Clone, Debug)]
pub enum RetagFields {

View file

@ -5,7 +5,7 @@ use rustc_data_structures::fx::FxHashSet;
use rustc_span::{Span, SpanData};
use smallvec::SmallVec;
use crate::borrow_tracker::{GlobalStateInner, ProtectorKind};
use crate::borrow_tracker::{AccessKind, GlobalStateInner, ProtectorKind};
use crate::*;
/// Error reporting

View file

@ -21,7 +21,7 @@ pub use self::stack::Stack;
use crate::borrow_tracker::stacked_borrows::diagnostics::{
AllocHistory, DiagnosticCx, DiagnosticCxBuilder,
};
use crate::borrow_tracker::{GlobalStateInner, ProtectorKind};
use crate::borrow_tracker::{AccessKind, GlobalStateInner, ProtectorKind};
use crate::concurrency::data_race::{NaReadType, NaWriteType};
use crate::*;

View file

@ -4,10 +4,10 @@ use std::ops::Range;
use rustc_data_structures::fx::FxHashMap;
use rustc_span::{Span, SpanData};
use crate::borrow_tracker::ProtectorKind;
use crate::borrow_tracker::tree_borrows::perms::{PermTransition, Permission};
use crate::borrow_tracker::tree_borrows::tree::LocationState;
use crate::borrow_tracker::tree_borrows::unimap::UniIndex;
use crate::borrow_tracker::{AccessKind, ProtectorKind};
use crate::*;
/// Cause of an access: either a real access or one

View file

@ -5,7 +5,7 @@ use rustc_middle::ty::{self, Ty};
use self::foreign_access_skipping::IdempotentForeignAccess;
use self::tree::LocationState;
use crate::borrow_tracker::{GlobalState, GlobalStateInner, ProtectorKind};
use crate::borrow_tracker::{AccessKind, GlobalState, GlobalStateInner, ProtectorKind};
use crate::concurrency::data_race::NaReadType;
use crate::*;

View file

@ -1,7 +1,7 @@
use std::cmp::{Ordering, PartialOrd};
use std::fmt;
use crate::AccessKind;
use crate::borrow_tracker::AccessKind;
use crate::borrow_tracker::tree_borrows::diagnostics::TransitionError;
use crate::borrow_tracker::tree_borrows::tree::AccessRelatedness;

View file

@ -25,7 +25,7 @@ use crate::borrow_tracker::tree_borrows::diagnostics::{
use crate::borrow_tracker::tree_borrows::foreign_access_skipping::IdempotentForeignAccess;
use crate::borrow_tracker::tree_borrows::perms::PermTransition;
use crate::borrow_tracker::tree_borrows::unimap::{UniIndex, UniKeyMap, UniValMap};
use crate::borrow_tracker::{GlobalState, ProtectorKind};
use crate::borrow_tracker::{AccessKind, GlobalState, ProtectorKind};
use crate::*;
mod tests;

View file

@ -57,6 +57,10 @@ impl InitOnceRef {
pub fn begin(&self) {
self.0.borrow_mut().begin();
}
pub fn queue_is_empty(&self) -> bool {
self.0.borrow().waiters.is_empty()
}
}
impl VisitProvenance for InitOnceRef {

View file

@ -1,3 +1,4 @@
use std::any::Any;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::collections::hash_map::Entry;
@ -5,6 +6,7 @@ use std::default::Default;
use std::ops::Not;
use std::rc::Rc;
use std::time::Duration;
use std::{fmt, iter};
use rustc_abi::Size;
use rustc_data_structures::fx::FxHashMap;
@ -12,6 +14,52 @@ use rustc_data_structures::fx::FxHashMap;
use super::vector_clock::VClock;
use crate::*;
/// Indicates which kind of access is being performed.
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
pub enum AccessKind {
Read,
Write,
Dealloc,
}
impl fmt::Display for AccessKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AccessKind::Read => write!(f, "read"),
AccessKind::Write => write!(f, "write"),
AccessKind::Dealloc => write!(f, "deallocation"),
}
}
}
/// A trait for the synchronization metadata that can be attached to a memory location.
pub trait SyncObj: Any {
/// Determines whether reads/writes to this object's location are currently permitted.
fn on_access<'tcx>(&self, _access_kind: AccessKind) -> InterpResult<'tcx> {
interp_ok(())
}
/// Determines whether this object's metadata shall be deleted when a write to its
/// location occurs.
fn delete_on_write(&self) -> bool {
false
}
}
impl dyn SyncObj {
#[inline(always)]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
let x: &dyn Any = self;
x.downcast_ref()
}
}
impl fmt::Debug for dyn SyncObj {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SyncObj").finish_non_exhaustive()
}
}
/// The mutex state.
#[derive(Default, Debug)]
struct Mutex {
@ -37,6 +85,10 @@ impl MutexRef {
pub fn owner(&self) -> Option<ThreadId> {
self.0.borrow().owner
}
pub fn queue_is_empty(&self) -> bool {
self.0.borrow().queue.is_empty()
}
}
impl VisitProvenance for MutexRef {
@ -113,6 +165,11 @@ impl RwLockRef {
pub fn is_write_locked(&self) -> bool {
self.0.borrow().is_write_locked()
}
pub fn queue_is_empty(&self) -> bool {
let inner = self.0.borrow();
inner.reader_queue.is_empty() && inner.writer_queue.is_empty()
}
}
impl VisitProvenance for RwLockRef {
@ -140,8 +197,8 @@ impl CondvarRef {
Self(Default::default())
}
pub fn is_awaited(&self) -> bool {
!self.0.borrow().waiters.is_empty()
pub fn queue_is_empty(&self) -> bool {
self.0.borrow().waiters.is_empty()
}
}
@ -214,104 +271,21 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
impl<'tcx> AllocExtra<'tcx> {
fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
self.sync_objs.get(&offset).and_then(|s| s.downcast_ref::<T>())
}
}
/// We designate an `init`` field in all primitives.
/// If `init` is set to this, we consider the primitive initialized.
pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
// Public interface to synchronization primitives. Please note that in most
// Public interface to synchronization objects. Please note that in most
// cases, the function calls are infallible and it is the client's (shim
// implementation's) responsibility to detect and deal with erroneous
// situations.
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Helper for lazily initialized `alloc_extra.sync` data:
/// this forces an immediate init.
/// Return a reference to the data in the machine state.
fn lazy_sync_init<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
data: T,
) -> InterpResult<'tcx, &'a T>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
alloc_extra.sync.insert(offset, Box::new(data));
// Mark this as "initialized".
let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
this.write_scalar_atomic(
Scalar::from_u32(LAZY_INIT_COOKIE),
&init_field,
AtomicWriteOrd::Relaxed,
)?;
interp_ok(this.get_alloc_extra(alloc)?.get_sync::<T>(offset).unwrap())
}
/// Helper for lazily initialized `alloc_extra.sync` data:
/// Checks if the primitive is initialized:
/// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
/// and stores that in `alloc_extra.sync`.
/// - Otherwise, calls `new_data` to initialize the primitive.
///
/// Return a reference to the data in the machine state.
fn lazy_sync_get_data<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, &'a T>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
// Check if this is already initialized. Needs to be atomic because we can race with another
// thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
// So we just try to replace MUTEX_INIT_COOKIE with itself.
let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
let (_init, success) = this
.atomic_compare_exchange_scalar(
&init_field,
&ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
init_cookie,
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
/* can_fail_spuriously */ false,
)?
.to_scalar_pair();
if success.to_bool()? {
// If it is initialized, it must be found in the "sync primitive" table,
// or else it has been moved illegally.
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
// Due to borrow checker reasons, we have to do the lookup twice.
if alloc_extra.get_sync::<T>(offset).is_none() {
let data = missing_data()?;
alloc_extra.sync.insert(offset, Box::new(data));
}
interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
} else {
let data = new_data(this)?;
this.lazy_sync_init(primitive, init_offset, data)
}
}
/// Get the synchronization primitive associated with the given pointer,
/// Get the synchronization object associated with the given pointer,
/// or initialize a new one.
///
/// Return `None` if this pointer does not point to at least 1 byte of mutable memory.
fn get_sync_or_init<'a, T: 'static>(
fn get_sync_or_init<'a, T: SyncObj>(
&'a mut self,
ptr: Pointer,
new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
@ -332,11 +306,108 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Due to borrow checker reasons, we have to do the lookup twice.
if alloc_extra.get_sync::<T>(offset).is_none() {
let new = new(machine);
alloc_extra.sync.insert(offset, Box::new(new));
alloc_extra.sync_objs.insert(offset, Box::new(new));
}
Some(alloc_extra.get_sync::<T>(offset).unwrap())
}
/// Helper for "immovable" synchronization objects: the expected protocol for these objects is
/// that they use a static initializer of `uninit_val`, and we set them to `init_val` upon
/// initialization. At that point we also register a synchronization object, which is expected
/// to have `delete_on_write() == true`. So in the future, if we still see the object, we know
/// the location must still contain `init_val`. If the object is copied somewhere, that will
/// show up as a non-`init_val` value without a synchronization object, which we can then use to
/// error.
///
/// `new_meta_obj` gets invoked when there is not yet an initialization object.
/// It has to ensure that the in-memory representation indeed matches `uninit_val`.
///
/// The point of storing an `init_val` is so that if this memory gets copied somewhere else,
/// it does not look like the static initializer (i.e., `uninit_val`) any more. For some
/// objects we could just entirely forbid reading their bytes to ensure they don't get copied,
/// but that does not work for objects without a destructor (Windows `InitOnce`, macOS
/// `os_unfair_lock`).
fn get_immovable_sync_with_static_init<'a, T: SyncObj>(
&'a mut self,
obj: &MPlaceTy<'tcx>,
init_offset: Size,
uninit_val: u8,
init_val: u8,
new_meta_obj: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, &'a T>
where
'tcx: 'a,
{
assert!(init_val != uninit_val);
let this = self.eval_context_mut();
this.check_ptr_access(obj.ptr(), obj.layout.size, CheckInAllocMsg::Dereferenceable)?;
assert!(init_offset < obj.layout.size); // ensure our 1-byte flag fits
let init_field = obj.offset(init_offset, this.machine.layouts.u8, this)?;
let (alloc, offset, _) = this.ptr_get_alloc_id(init_field.ptr(), 0)?;
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
// Due to borrow checker reasons, we have to do the lookup twice.
if alloc_extra.get_sync::<T>(offset).is_some() {
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc).unwrap();
return interp_ok(alloc_extra.get_sync::<T>(offset).unwrap());
}
// There's no sync object there yet. Create one, and try a CAS for uninit_val to init_val.
let meta_obj = new_meta_obj(this)?;
let (old_init, success) = this
.atomic_compare_exchange_scalar(
&init_field,
&ImmTy::from_scalar(Scalar::from_u8(uninit_val), this.machine.layouts.u8),
Scalar::from_u8(init_val),
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
/* can_fail_spuriously */ false,
)?
.to_scalar_pair();
if !success.to_bool()? {
// This can happen for the macOS lock if it is already marked as initialized.
assert_eq!(
old_init.to_u8()?,
init_val,
"`new_meta_obj` should have ensured that this CAS succeeds"
);
}
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc).unwrap();
assert!(meta_obj.delete_on_write());
alloc_extra.sync_objs.insert(offset, Box::new(meta_obj));
interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
}
/// Explicitly initializes an object that would usually be implicitly initialized with
/// `get_immovable_sync_with_static_init`.
fn init_immovable_sync<'a, T: SyncObj>(
&'a mut self,
obj: &MPlaceTy<'tcx>,
init_offset: Size,
init_val: u8,
new_meta_obj: T,
) -> InterpResult<'tcx, Option<&'a T>>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
this.check_ptr_access(obj.ptr(), obj.layout.size, CheckInAllocMsg::Dereferenceable)?;
assert!(init_offset < obj.layout.size); // ensure our 1-byte flag fits
let init_field = obj.offset(init_offset, this.machine.layouts.u8, this)?;
// Zero the entire object, and then store `init_val` directly.
this.write_bytes_ptr(obj.ptr(), iter::repeat_n(0, obj.layout.size.bytes_usize()))?;
this.write_scalar(Scalar::from_u8(init_val), &init_field)?;
// Create meta-level initialization object.
let (alloc, offset, _) = this.ptr_get_alloc_id(init_field.ptr(), 0)?;
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc).unwrap();
assert!(new_meta_obj.delete_on_write());
alloc_extra.sync_objs.insert(offset, Box::new(new_meta_obj));
interp_ok(Some(alloc_extra.get_sync::<T>(offset).unwrap()))
}
/// Lock by setting the mutex owner and increasing the lock count.
fn mutex_lock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx> {
let this = self.eval_context_mut();

View file

@ -128,7 +128,7 @@ pub enum NonHaltingDiagnostic {
PoppedPointerTag(Item, String),
TrackingAlloc(AllocId, Size, Align),
FreedAlloc(AllocId),
AccessedAlloc(AllocId, AllocRange, AccessKind),
AccessedAlloc(AllocId, AllocRange, borrow_tracker::AccessKind),
RejectedIsolatedOp(String),
ProgressReport {
block_count: u64, // how many basic blocks have been run so far

View file

@ -22,13 +22,6 @@ use rustc_symbol_mangling::mangle_internal_symbol;
use crate::*;
/// Indicates which kind of access is being performed.
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
pub enum AccessKind {
Read,
Write,
}
/// Gets an instance for a path.
///
/// A `None` namespace indicates we are looking for a module.

View file

@ -139,7 +139,7 @@ pub use crate::diagnostics::{
EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo, report_error,
};
pub use crate::eval::{MiriConfig, MiriEntryFnType, create_ecx, eval_entry};
pub use crate::helpers::{AccessKind, EvalContextExt as _, ToU64 as _, ToUsize as _};
pub use crate::helpers::{EvalContextExt as _, ToU64 as _, ToUsize as _};
pub use crate::intrinsics::EvalContextExt as _;
pub use crate::machine::{
AlignmentCheck, AllocExtra, BacktraceStyle, DynMachineCallback, FloatRoundingErrorMode,
@ -165,6 +165,7 @@ pub use crate::shims::unwind::{CatchUnwindData, EvalContextExt as _};
/// Also disable the MIR pass that inserts an alignment check on every pointer dereference. Miri
/// does that too, and with a better error message.
pub const MIRI_DEFAULT_ARGS: &[&str] = &[
"-Zcodegen-backend=dummy",
"--cfg=miri",
"-Zalways-encode-mir",
"-Zextra-const-ub-checks",

View file

@ -1,9 +1,9 @@
//! Global machine state as well as implementation of the interpreter engine
//! `Machine` trait.
use std::any::Any;
use std::borrow::Cow;
use std::cell::{Cell, RefCell};
use std::collections::BTreeMap;
use std::path::Path;
use std::rc::Rc;
use std::{fmt, process};
@ -36,6 +36,7 @@ use rustc_target::spec::Arch;
use crate::alloc_addresses::EvalContextExt;
use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
use crate::concurrency::sync::SyncObj;
use crate::concurrency::{
AllocDataRaceHandler, GenmcCtx, GenmcEvalContextExt as _, GlobalDataRaceHandler, weak_memory,
};
@ -399,11 +400,11 @@ pub struct AllocExtra<'tcx> {
/// if this allocation is leakable. The backtrace is not
/// pruned yet; that should be done before printing it.
pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
/// Synchronization primitives like to attach extra data to particular addresses. We store that
/// Synchronization objects like to attach extra data to particular addresses. We store that
/// inside the relevant allocation, to ensure that everything is removed when the allocation is
/// freed.
/// This maps offsets to synchronization-primitive-specific data.
pub sync: FxHashMap<Size, Box<dyn Any>>,
pub sync_objs: BTreeMap<Size, Box<dyn SyncObj>>,
}
// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
@ -416,7 +417,7 @@ impl<'tcx> Clone for AllocExtra<'tcx> {
impl VisitProvenance for AllocExtra<'_> {
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
let AllocExtra { borrow_tracker, data_race, backtrace: _, sync_objs: _ } = self;
borrow_tracker.visit_provenance(visit);
data_race.visit_provenance(visit);
@ -991,7 +992,12 @@ impl<'tcx> MiriMachine<'tcx> {
.insert(id, (ecx.machine.current_user_relevant_span(), None));
}
interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
interp_ok(AllocExtra {
borrow_tracker,
data_race,
backtrace,
sync_objs: BTreeMap::default(),
})
}
}
@ -1516,7 +1522,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
machine.emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(
alloc_id,
range,
AccessKind::Read,
borrow_tracker::AccessKind::Read,
));
}
// The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
@ -1536,6 +1542,11 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
}
// Check if there are any sync objects that would like to prevent reading this memory.
for (_offset, obj) in alloc_extra.sync_objs.range(range.start..range.end()) {
obj.on_access(concurrency::sync::AccessKind::Read)?;
}
interp_ok(())
}
@ -1552,7 +1563,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
machine.emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(
alloc_id,
range,
AccessKind::Write,
borrow_tracker::AccessKind::Write,
));
}
match &machine.data_race {
@ -1576,6 +1587,20 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
}
// Delete sync objects that don't like writes.
// Most of the time, we can just skip this.
if !alloc_extra.sync_objs.is_empty() {
let mut to_delete = vec![];
for (offset, obj) in alloc_extra.sync_objs.range(range.start..range.end()) {
obj.on_access(concurrency::sync::AccessKind::Write)?;
if obj.delete_on_write() {
to_delete.push(*offset);
}
}
for offset in to_delete {
alloc_extra.sync_objs.remove(&offset);
}
}
interp_ok(())
}
@ -1612,6 +1637,11 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
}
// Check if there are any sync objects that would like to prevent freeing this memory.
for obj in alloc_extra.sync_objs.values() {
obj.on_access(concurrency::sync::AccessKind::Dealloc)?;
}
if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
{
*deallocated_at = Some(machine.current_user_relevant_span());

View file

@ -800,10 +800,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Target-specific shims
name if name.starts_with("llvm.x86.")
&& matches!(
this.tcx.sess.target.arch,
Arch::X86 | Arch::X86_64
) =>
&& matches!(this.tcx.sess.target.arch, Arch::X86 | Arch::X86_64) =>
{
return shims::x86::EvalContextExt::emulate_x86_intrinsic(
this, link_name, abi, args, dest,

View file

@ -4,13 +4,15 @@ use core::time::Duration;
use rustc_abi::FieldIdx;
use crate::concurrency::sync::FutexRef;
use crate::concurrency::sync::{FutexRef, SyncObj};
use crate::*;
pub struct FreeBsdFutex {
futex: FutexRef,
}
impl SyncObj for FreeBsdFutex {}
/// Extended variant of the `timespec` struct.
pub struct UmtxTime {
timeout: Duration,

View file

@ -1,4 +1,4 @@
use crate::concurrency::sync::FutexRef;
use crate::concurrency::sync::{FutexRef, SyncObj};
use crate::shims::sig::check_min_vararg_count;
use crate::*;
@ -6,6 +6,8 @@ struct LinuxFutex {
futex: FutexRef,
}
impl SyncObj for LinuxFutex {}
/// Implementation of the SYS_futex syscall.
/// `args` is the arguments *including* the syscall number.
pub fn futex<'tcx>(

View file

@ -13,15 +13,32 @@
use std::cell::Cell;
use std::time::Duration;
use rustc_abi::Size;
use rustc_abi::{Endian, FieldIdx, Size};
use crate::concurrency::sync::FutexRef;
use crate::concurrency::sync::{AccessKind, FutexRef, SyncObj};
use crate::*;
#[derive(Clone)]
enum MacOsUnfairLock {
Poisoned,
Active { mutex_ref: MutexRef },
PermanentlyLocked,
}
impl SyncObj for MacOsUnfairLock {
fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
if let MacOsUnfairLock::Active { mutex_ref } = self
&& !mutex_ref.queue_is_empty()
{
throw_ub_format!(
"{access_kind} of `os_unfair_lock` is forbidden while the queue is non-empty"
);
}
interp_ok(())
}
fn delete_on_write(&self) -> bool {
true
}
}
pub enum MacOsFutexTimeout<'a, 'tcx> {
@ -44,6 +61,8 @@ struct MacOsFutex {
shared: Cell<bool>,
}
impl SyncObj for MacOsFutex {}
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_get_data<'a>(
@ -53,22 +72,35 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
where
'tcx: 'a,
{
// `os_unfair_lock_s` wraps a single `u32` field. We use the first byte to store the "init"
// flag. Due to macOS always being little endian, that's the least significant byte.
let this = self.eval_context_mut();
assert!(this.tcx.data_layout.endian == Endian::Little);
let lock = this.deref_pointer_as(lock_ptr, this.libc_ty_layout("os_unfair_lock_s"))?;
this.lazy_sync_get_data(
this.get_immovable_sync_with_static_init(
&lock,
Size::ZERO, // offset for init tracking
|| {
// If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
// this means the lock was moved while locked. This can happen with a `std` lock,
// but then any future attempt to unlock will just deadlock. In practice, terrible
// things can probably happen if you swap two locked locks, since they'd wake up
// from the wrong queue... we just won't catch all UB of this library API then (we
// would need to store some unique identifer in-memory for this, instead of a static
// LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
interp_ok(MacOsUnfairLock::Poisoned)
/* uninit_val */ 0,
/* init_val */ 1,
|this| {
let field = this.project_field(&lock, FieldIdx::from_u32(0))?;
let val = this.read_scalar(&field)?.to_u32()?;
if val == 0 {
interp_ok(MacOsUnfairLock::Active { mutex_ref: MutexRef::new() })
} else if val == 1 {
// This is a lock that got copied while it is initialized. We de-initialize
// locks when they get released, so it got copied while locked. Unfortunately
// that is something `std` needs to support (the guard could have been leaked).
// On the plus side, we know nobody was queued for the lock while it got copied;
// that would have been rejected by our `on_access`. So we behave like a
// futex-based lock would in this case: any attempt to acquire the lock will
// just wait forever, since there's nobody to wake us up.
interp_ok(MacOsUnfairLock::PermanentlyLocked)
} else {
throw_ub_format!("`os_unfair_lock` was not properly initialized at this location, or it got overwritten");
}
},
|_| interp_ok(MacOsUnfairLock::Active { mutex_ref: MutexRef::new() }),
)
}
}
@ -332,7 +364,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
// A perma-locked lock is definitely not held by us.
throw_machine_stop!(TerminationInfo::Abort(
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
));
@ -361,7 +393,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
// A perma-locked lock is definitely not held by us.
throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
));
@ -383,7 +415,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
// A perma-locked lock is definitely not held by us.
return interp_ok(());
};
let mutex_ref = mutex_ref.clone();

View file

@ -1,13 +1,11 @@
use rustc_abi::Size;
use crate::concurrency::sync::LAZY_INIT_COOKIE;
use crate::concurrency::sync::{AccessKind, SyncObj};
use crate::*;
/// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if
/// Do a bytewise comparison of the two places. This is used to check if
/// a synchronization primitive matches its static initializer value.
///
/// The reads happen in chunks of 4, so all racing accesses must also use that access size.
fn bytewise_equal_atomic_relaxed<'tcx>(
fn bytewise_equal<'tcx>(
ecx: &MiriInterpCx<'tcx>,
left: &MPlaceTy<'tcx>,
right: &MPlaceTy<'tcx>,
@ -15,25 +13,16 @@ fn bytewise_equal_atomic_relaxed<'tcx>(
let size = left.layout.size;
assert_eq!(size, right.layout.size);
// We do this in chunks of 4, so that we are okay to race with (sufficiently aligned)
// 4-byte atomic accesses.
assert!(size.bytes().is_multiple_of(4));
for i in 0..(size.bytes() / 4) {
let offset = Size::from_bytes(i.strict_mul(4));
let load = |place: &MPlaceTy<'tcx>| {
let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
};
let left = load(left)?;
let right = load(right)?;
if left != right {
return interp_ok(false);
}
}
let left_bytes = ecx.read_bytes_ptr_strip_provenance(left.ptr(), size)?;
let right_bytes = ecx.read_bytes_ptr_strip_provenance(right.ptr(), size)?;
interp_ok(true)
interp_ok(left_bytes == right_bytes)
}
// The in-memory marker values we use to indicate whether objects have been initialized.
const PTHREAD_UNINIT: u8 = 0;
const PTHREAD_INIT: u8 = 1;
// # pthread_mutexattr_t
// We store some data directly inside the type, ignoring the platform layout:
// - kind: i32
@ -103,7 +92,7 @@ fn mutexattr_translate_kind<'tcx>(
// # pthread_mutex_t
// We store some data directly inside the type, ignoring the platform layout:
// - init: u32
// - init: u8
/// The mutex kind.
#[derive(Debug, Clone, Copy)]
@ -120,6 +109,21 @@ struct PthreadMutex {
kind: MutexKind,
}
impl SyncObj for PthreadMutex {
fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
if !self.mutex_ref.queue_is_empty() {
throw_ub_format!(
"{access_kind} of `pthread_mutex_t` is forbidden while the queue is non-empty"
);
}
interp_ok(())
}
fn delete_on_write(&self) -> bool {
true
}
}
/// To ensure an initialized mutex that was moved somewhere else can be distinguished from
/// a statically initialized mutex that is used the first time, we pick some offset within
/// `pthread_mutex_t` and use it as an "initialized" flag.
@ -138,11 +142,11 @@ fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size>
let check_static_initializer = |name| {
let static_initializer = ecx.eval_path(&["libc", name]);
let init_field =
static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
assert_ne!(
init, LAZY_INIT_COOKIE,
"{name} is incompatible with our initialization cookie"
static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
assert_eq!(
init, PTHREAD_UNINIT,
"{name} is incompatible with our initialization logic"
);
};
@ -172,7 +176,7 @@ fn mutex_create<'tcx>(
) -> InterpResult<'tcx, PthreadMutex> {
let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
let data = PthreadMutex { mutex_ref: MutexRef::new(), kind };
ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
ecx.init_immovable_sync(&mutex, mutex_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
interp_ok(data)
}
@ -186,10 +190,11 @@ where
'tcx: 'a,
{
let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
ecx.lazy_sync_get_data(
ecx.get_immovable_sync_with_static_init(
&mutex,
mutex_init_offset(ecx)?,
|| throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
PTHREAD_UNINIT,
PTHREAD_INIT,
|ecx| {
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
interp_ok(PthreadMutex { mutex_ref: MutexRef::new(), kind })
@ -203,8 +208,7 @@ fn mutex_kind_from_static_initializer<'tcx>(
mutex: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, MutexKind> {
// All the static initializers recognized here *must* be checked in `mutex_init_offset`!
let is_initializer =
|name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
let is_initializer = |name| bytewise_equal(ecx, mutex, &ecx.eval_path(&["libc", name]));
// PTHREAD_MUTEX_INITIALIZER is recognized on all targets.
if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
@ -220,18 +224,35 @@ fn mutex_kind_from_static_initializer<'tcx>(
},
_ => {}
}
throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
throw_ub_format!(
"`pthread_mutex_t` was not properly initialized at this location, or it got overwritten"
);
}
// # pthread_rwlock_t
// We store some data directly inside the type, ignoring the platform layout:
// - init: u32
// - init: u8
#[derive(Debug, Clone)]
struct PthreadRwLock {
rwlock_ref: RwLockRef,
}
impl SyncObj for PthreadRwLock {
fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
if !self.rwlock_ref.queue_is_empty() {
throw_ub_format!(
"{access_kind} of `pthread_rwlock_t` is forbidden while the queue is non-empty"
);
}
interp_ok(())
}
fn delete_on_write(&self) -> bool {
true
}
}
fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
let offset = match &*ecx.tcx.sess.target.os {
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
@ -245,11 +266,11 @@ fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size
// the `init` field must start out not equal to LAZY_INIT_COOKIE.
if !ecx.machine.pthread_rwlock_sanity.replace(true) {
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
assert_ne!(
init, LAZY_INIT_COOKIE,
"PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
assert_eq!(
init, PTHREAD_UNINIT,
"PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization logic"
);
}
@ -264,17 +285,20 @@ where
'tcx: 'a,
{
let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
ecx.lazy_sync_get_data(
ecx.get_immovable_sync_with_static_init(
&rwlock,
rwlock_init_offset(ecx)?,
|| throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
PTHREAD_UNINIT,
PTHREAD_INIT,
|ecx| {
if !bytewise_equal_atomic_relaxed(
if !bytewise_equal(
ecx,
&rwlock,
&ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
)? {
throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
throw_ub_format!(
"`pthread_rwlock_t` was not properly initialized at this location, or it got overwritten"
);
}
interp_ok(PthreadRwLock { rwlock_ref: RwLockRef::new() })
},
@ -322,7 +346,7 @@ fn condattr_set_clock_id<'tcx>(
// # pthread_cond_t
// We store some data directly inside the type, ignoring the platform layout:
// - init: u32
// - init: u8
fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
let offset = match &*ecx.tcx.sess.target.os {
@ -337,11 +361,11 @@ fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size>
// the `init` field must start out not equal to LAZY_INIT_COOKIE.
if !ecx.machine.pthread_condvar_sanity.replace(true) {
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
assert_ne!(
init, LAZY_INIT_COOKIE,
"PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u8, ecx).unwrap();
let init = ecx.read_scalar(&init_field).unwrap().to_u8().unwrap();
assert_eq!(
init, PTHREAD_UNINIT,
"PTHREAD_COND_INITIALIZER is incompatible with our initialization logic"
);
}
@ -354,6 +378,21 @@ struct PthreadCondvar {
clock: TimeoutClock,
}
impl SyncObj for PthreadCondvar {
fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
if !self.condvar_ref.queue_is_empty() {
throw_ub_format!(
"{access_kind} of `pthread_cond_t` is forbidden while the queue is non-empty"
);
}
interp_ok(())
}
fn delete_on_write(&self) -> bool {
true
}
}
fn cond_create<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
cond_ptr: &OpTy<'tcx>,
@ -361,7 +400,7 @@ fn cond_create<'tcx>(
) -> InterpResult<'tcx, PthreadCondvar> {
let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
let data = PthreadCondvar { condvar_ref: CondvarRef::new(), clock };
ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data.clone())?;
ecx.init_immovable_sync(&cond, cond_init_offset(ecx)?, PTHREAD_INIT, data.clone())?;
interp_ok(data)
}
@ -373,17 +412,20 @@ where
'tcx: 'a,
{
let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
ecx.lazy_sync_get_data(
ecx.get_immovable_sync_with_static_init(
&cond,
cond_init_offset(ecx)?,
|| throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
PTHREAD_UNINIT,
PTHREAD_INIT,
|ecx| {
if !bytewise_equal_atomic_relaxed(
if !bytewise_equal(
ecx,
&cond,
&ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
)? {
throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
throw_ub_format!(
"`pthread_cond_t` was not properly initialized at this location, or it got overwritten"
);
}
// This used the static initializer. The clock there is always CLOCK_REALTIME.
interp_ok(PthreadCondvar {
@ -575,11 +617,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
throw_ub_format!("destroyed a locked mutex");
}
// This write also deletes the interpreter state for this mutex.
// This might lead to false positives, see comment in pthread_mutexattr_destroy
this.write_uninit(
&this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?,
)?;
// FIXME: delete interpreter state associated with this mutex.
let mutex_place =
this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?;
this.write_uninit(&mutex_place)?;
interp_ok(())
}
@ -693,11 +735,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
throw_ub_format!("destroyed a locked rwlock");
}
// This write also deletes the interpreter state for this rwlock.
// This might lead to false positives, see comment in pthread_mutexattr_destroy
this.write_uninit(
&this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?,
)?;
// FIXME: delete interpreter state associated with this rwlock.
let rwlock_place =
this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?;
this.write_uninit(&rwlock_place)?;
interp_ok(())
}
@ -885,13 +927,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Reading the field also has the side-effect that we detect double-`destroy`
// since we make the field uninit below.
let condvar = &cond_get_data(this, cond_op)?.condvar_ref;
if condvar.is_awaited() {
if !condvar.queue_is_empty() {
throw_ub_format!("destroying an awaited conditional variable");
}
// This write also deletes the interpreter state for this mutex.
// This might lead to false positives, see comment in pthread_mutexattr_destroy
this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
// FIXME: delete interpreter state associated with this condvar.
let cond_place = this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?;
this.write_uninit(&cond_place)?;
interp_ok(())
}

View file

@ -1,9 +1,9 @@
use std::time::Duration;
use rustc_abi::Size;
use rustc_abi::{FieldIdx, Size};
use crate::concurrency::init_once::{EvalContextExt as _, InitOnceStatus};
use crate::concurrency::sync::FutexRef;
use crate::concurrency::sync::{AccessKind, FutexRef, SyncObj};
use crate::*;
#[derive(Clone)]
@ -11,14 +11,31 @@ struct WindowsInitOnce {
init_once: InitOnceRef,
}
impl SyncObj for WindowsInitOnce {
fn on_access<'tcx>(&self, access_kind: AccessKind) -> InterpResult<'tcx> {
if !self.init_once.queue_is_empty() {
throw_ub_format!(
"{access_kind} of `INIT_ONCE` is forbidden while the queue is non-empty"
);
}
interp_ok(())
}
fn delete_on_write(&self) -> bool {
true
}
}
struct WindowsFutex {
futex: FutexRef,
}
impl SyncObj for WindowsFutex {}
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Windows sync primitives are pointer sized.
// We only use the first 4 bytes for the id.
// We only use the first byte for the "init" flag.
fn init_once_get_data<'a>(
&'a mut self,
@ -33,13 +50,19 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.deref_pointer_as(init_once_ptr, this.windows_ty_layout("INIT_ONCE"))?;
let init_offset = Size::ZERO;
this.lazy_sync_get_data(
this.get_immovable_sync_with_static_init(
&init_once,
init_offset,
|| throw_ub_format!("`INIT_ONCE` can't be moved after first use"),
|_| {
// TODO: check that this is still all-zero.
interp_ok(WindowsInitOnce { init_once: InitOnceRef::new() })
/* uninit_val */ 0,
/* init_val */ 1,
|this| {
let ptr_field = this.project_field(&init_once, FieldIdx::from_u32(0))?;
let val = this.read_target_usize(&ptr_field)?;
if val == 0 {
interp_ok(WindowsInitOnce { init_once: InitOnceRef::new() })
} else {
throw_ub_format!("`INIT_ONCE` was not properly initialized at this location, or it got overwritten");
}
},
)
}

View file

@ -1,14 +1,12 @@
use rustc_abi::CanonAbi;
use rustc_apfloat::ieee::{Double, Single};
use rustc_middle::mir;
use rustc_middle::ty::Ty;
use rustc_span::Symbol;
use rustc_target::callconv::FnAbi;
use super::{
FloatBinOp, FloatUnaryOp, bin_op_simd_float_all, conditional_dot_product, convert_float_to_int,
horizontal_bin_op, mask_load, mask_store, round_all, test_bits_masked, test_high_bits_masked,
unary_op_ps,
mask_load, mask_store, round_all, test_bits_masked, test_high_bits_masked, unary_op_ps,
};
use crate::*;
@ -93,21 +91,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
conditional_dot_product(this, left, right, imm, dest)?;
}
// Used to implement the _mm256_h{add,sub}_p{s,d} functions.
// Horizontally add/subtract adjacent floating point values
// in `left` and `right`.
"hadd.ps.256" | "hadd.pd.256" | "hsub.ps.256" | "hsub.pd.256" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let which = match unprefixed_name {
"hadd.ps.256" | "hadd.pd.256" => mir::BinOp::Add,
"hsub.ps.256" | "hsub.pd.256" => mir::BinOp::Sub,
_ => unreachable!(),
};
horizontal_bin_op(this, which, /*saturating*/ false, left, right, dest)?;
}
// Used to implement the _mm256_cmp_ps function.
// Performs a comparison operation on each component of `left`
// and `right`. For each component, returns 0 if false or u32::MAX
@ -251,40 +234,31 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Unaligned copy, which is what we want.
this.mem_copy(src_ptr, dest.ptr(), dest.layout.size, /*nonoverlapping*/ true)?;
}
// Used to implement the _mm256_testz_si256, _mm256_testc_si256 and
// _mm256_testnzc_si256 functions.
// Tests `op & mask == 0`, `op & mask == mask` or
// `op & mask != 0 && op & mask != mask`
"ptestz.256" | "ptestc.256" | "ptestnzc.256" => {
// Used to implement the _mm256_testnzc_si256 function.
// Tests `op & mask != 0 && op & mask != mask`
"ptestnzc.256" => {
let [op, mask] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (all_zero, masked_set) = test_bits_masked(this, op, mask)?;
let res = match unprefixed_name {
"ptestz.256" => all_zero,
"ptestc.256" => masked_set,
"ptestnzc.256" => !all_zero && !masked_set,
_ => unreachable!(),
};
let res = !all_zero && !masked_set;
this.write_scalar(Scalar::from_i32(res.into()), dest)?;
}
// Used to implement the _mm256_testz_pd, _mm256_testc_pd, _mm256_testnzc_pd
// _mm_testz_pd, _mm_testc_pd, _mm_testnzc_pd, _mm256_testz_ps,
// _mm256_testc_ps, _mm256_testnzc_ps, _mm_testz_ps, _mm_testc_ps and
// _mm_testnzc_pd, _mm256_testz_ps, _mm256_testc_ps, _mm256_testnzc_ps and
// _mm_testnzc_ps functions.
// Calculates two booleans:
// `direct`, which is true when the highest bit of each element of `op & mask` is zero.
// `negated`, which is true when the highest bit of each element of `!op & mask` is zero.
// Return `direct` (testz), `negated` (testc) or `!direct & !negated` (testnzc)
"vtestz.pd.256" | "vtestc.pd.256" | "vtestnzc.pd.256" | "vtestz.pd" | "vtestc.pd"
| "vtestnzc.pd" | "vtestz.ps.256" | "vtestc.ps.256" | "vtestnzc.ps.256"
| "vtestz.ps" | "vtestc.ps" | "vtestnzc.ps" => {
"vtestz.pd.256" | "vtestc.pd.256" | "vtestnzc.pd.256" | "vtestnzc.pd"
| "vtestz.ps.256" | "vtestc.ps.256" | "vtestnzc.ps.256" | "vtestnzc.ps" => {
let [op, mask] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (direct, negated) = test_high_bits_masked(this, op, mask)?;
let res = match unprefixed_name {
"vtestz.pd.256" | "vtestz.pd" | "vtestz.ps.256" | "vtestz.ps" => direct,
"vtestc.pd.256" | "vtestc.pd" | "vtestc.ps.256" | "vtestc.ps" => negated,
"vtestz.pd.256" | "vtestz.ps.256" => direct,
"vtestc.pd.256" | "vtestc.ps.256" => negated,
"vtestnzc.pd.256" | "vtestnzc.pd" | "vtestnzc.ps.256" | "vtestnzc.ps" =>
!direct && !negated,
_ => unreachable!(),

View file

@ -5,8 +5,8 @@ use rustc_span::Symbol;
use rustc_target::callconv::FnAbi;
use super::{
ShiftOp, horizontal_bin_op, int_abs, mask_load, mask_store, mpsadbw, packssdw, packsswb,
packusdw, packuswb, pmulhrsw, psign, shift_simd_by_scalar, shift_simd_by_simd,
ShiftOp, horizontal_bin_op, mask_load, mask_store, mpsadbw, packssdw, packsswb, packusdw,
packuswb, pmulhrsw, psign, shift_simd_by_scalar, shift_simd_by_simd,
};
use crate::*;
@ -25,29 +25,20 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let unprefixed_name = link_name.as_str().strip_prefix("llvm.x86.avx2.").unwrap();
match unprefixed_name {
// Used to implement the _mm256_abs_epi{8,16,32} functions.
// Calculates the absolute value of packed 8/16/32-bit integers.
"pabs.b" | "pabs.w" | "pabs.d" => {
let [op] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
int_abs(this, op, dest)?;
}
// Used to implement the _mm256_h{add,adds,sub}_epi{16,32} functions.
// Horizontally add / add with saturation / subtract adjacent 16/32-bit
// Used to implement the _mm256_h{adds,subs}_epi16 functions.
// Horizontally add / subtract with saturation adjacent 16-bit
// integer values in `left` and `right`.
"phadd.w" | "phadd.sw" | "phadd.d" | "phsub.w" | "phsub.sw" | "phsub.d" => {
"phadd.sw" | "phsub.sw" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (which, saturating) = match unprefixed_name {
"phadd.w" | "phadd.d" => (mir::BinOp::Add, false),
"phadd.sw" => (mir::BinOp::Add, true),
"phsub.w" | "phsub.d" => (mir::BinOp::Sub, false),
"phsub.sw" => (mir::BinOp::Sub, true),
let which = match unprefixed_name {
"phadd.sw" => mir::BinOp::Add,
"phsub.sw" => mir::BinOp::Sub,
_ => unreachable!(),
};
horizontal_bin_op(this, which, saturating, left, right, dest)?;
horizontal_bin_op(this, which, /*saturating*/ true, left, right, dest)?;
}
// Used to implement `_mm{,_mask}_{i32,i64}gather_{epi32,epi64,pd,ps}` functions
// Gathers elements from `slice` using `offsets * scale` as indices.
@ -110,42 +101,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(Scalar::from_int(0, dest.layout.size), &dest)?;
}
}
// Used to implement the _mm256_madd_epi16 function.
// Multiplies packed signed 16-bit integers in `left` and `right`, producing
// intermediate signed 32-bit integers. Horizontally add adjacent pairs of
// intermediate 32-bit integers, and pack the results in `dest`.
"pmadd.wd" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (left, left_len) = this.project_to_simd(left)?;
let (right, right_len) = this.project_to_simd(right)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
assert_eq!(left_len, right_len);
assert_eq!(dest_len.strict_mul(2), left_len);
for i in 0..dest_len {
let j1 = i.strict_mul(2);
let left1 = this.read_scalar(&this.project_index(&left, j1)?)?.to_i16()?;
let right1 = this.read_scalar(&this.project_index(&right, j1)?)?.to_i16()?;
let j2 = j1.strict_add(1);
let left2 = this.read_scalar(&this.project_index(&left, j2)?)?.to_i16()?;
let right2 = this.read_scalar(&this.project_index(&right, j2)?)?.to_i16()?;
let dest = this.project_index(&dest, i)?;
// Multiplications are i16*i16->i32, which will not overflow.
let mul1 = i32::from(left1).strict_mul(right1.into());
let mul2 = i32::from(left2).strict_mul(right2.into());
// However, this addition can overflow in the most extreme case
// (-0x8000)*(-0x8000)+(-0x8000)*(-0x8000) = 0x80000000
let res = mul1.wrapping_add(mul2);
this.write_scalar(Scalar::from_i32(res), &dest)?;
}
}
// Used to implement the _mm256_maddubs_epi16 function.
// Multiplies packed 8-bit unsigned integers from `left` and packed
// signed 8-bit integers from `right` into 16-bit signed integers. Then,
@ -285,39 +240,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.copy_op(&left, &dest)?;
}
}
// Used to implement the _mm256_permute2x128_si256 function.
// Shuffles 128-bit blocks of `a` and `b` using `imm` as pattern.
"vperm2i128" => {
let [left, right, imm] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
assert_eq!(left.layout.size.bits(), 256);
assert_eq!(right.layout.size.bits(), 256);
assert_eq!(dest.layout.size.bits(), 256);
// Transmute to `[i128; 2]`
let array_layout =
this.layout_of(Ty::new_array(this.tcx.tcx, this.tcx.types.i128, 2))?;
let left = left.transmute(array_layout, this)?;
let right = right.transmute(array_layout, this)?;
let dest = dest.transmute(array_layout, this)?;
let imm = this.read_scalar(imm)?.to_u8()?;
for i in 0..2 {
let dest = this.project_index(&dest, i)?;
let src = match (imm >> i.strict_mul(4)) & 0b11 {
0 => this.project_index(&left, 0)?,
1 => this.project_index(&left, 1)?,
2 => this.project_index(&right, 0)?,
3 => this.project_index(&right, 1)?,
_ => unreachable!(),
};
this.copy_op(&src, &dest)?;
}
}
// Used to implement the _mm256_sad_epu8 function.
// Compute the absolute differences of packed unsigned 8-bit integers
// in `left` and `right`, then horizontally sum each consecutive 8

View file

@ -42,9 +42,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-8/addcarry-u32-addcarry-u64.html
// https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-8/subborrow-u32-subborrow-u64.html
"addcarry.32" | "addcarry.64" | "subborrow.32" | "subborrow.64" => {
if unprefixed_name.ends_with("64")
&& this.tcx.sess.target.arch != Arch::X86_64
{
if unprefixed_name.ends_with("64") && this.tcx.sess.target.arch != Arch::X86_64 {
return interp_ok(EmulateItemResult::NotSupported);
}
@ -61,28 +59,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_immediate(*sum, &this.project_field(dest, FieldIdx::ONE)?)?;
}
// Used to implement the `_addcarryx_u{32, 64}` functions. They are semantically identical with the `_addcarry_u{32, 64}` functions,
// except for a slightly different type signature and the requirement for the "adx" target feature.
// https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-8/addcarryx-u32-addcarryx-u64.html
"addcarryx.u32" | "addcarryx.u64" => {
this.expect_target_feature_for_intrinsic(link_name, "adx")?;
let is_u64 = unprefixed_name.ends_with("64");
if is_u64 && this.tcx.sess.target.arch != Arch::X86_64 {
return interp_ok(EmulateItemResult::NotSupported);
}
let [c_in, a, b, out] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let out = this.deref_pointer_as(
out,
if is_u64 { this.machine.layouts.u64 } else { this.machine.layouts.u32 },
)?;
let (sum, c_out) = carrying_add(this, c_in, a, b, mir::BinOp::AddWithOverflow)?;
this.write_scalar(c_out, dest)?;
this.write_immediate(*sum, &out)?;
}
// Used to implement the `_mm_pause` function.
// The intrinsic is used to hint the processor that the code is in a spin-loop.
// It is compiled down to a `pause` instruction. When SSE2 is not available,
@ -721,36 +697,6 @@ fn convert_float_to_int<'tcx>(
interp_ok(())
}
/// Calculates absolute value of integers in `op` and stores the result in `dest`.
///
/// In case of overflow (when the operand is the minimum value), the operation
/// will wrap around.
fn int_abs<'tcx>(
ecx: &mut crate::MiriInterpCx<'tcx>,
op: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let (op, op_len) = ecx.project_to_simd(op)?;
let (dest, dest_len) = ecx.project_to_simd(dest)?;
assert_eq!(op_len, dest_len);
let zero = ImmTy::from_int(0, op.layout.field(ecx, 0));
for i in 0..dest_len {
let op = ecx.read_immediate(&ecx.project_index(&op, i)?)?;
let dest = ecx.project_index(&dest, i)?;
let lt_zero = ecx.binary_op(mir::BinOp::Lt, &op, &zero)?;
let res =
if lt_zero.to_scalar().to_bool()? { ecx.unary_op(mir::UnOp::Neg, &op)? } else { op };
ecx.write_immediate(*res, &dest)?;
}
interp_ok(())
}
/// Splits `op` (which must be a SIMD vector) into 128-bit chunks.
///
/// Returns a tuple where:

View file

@ -180,29 +180,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_immediate(*res, dest)?;
}
// Used to implement the _mm_cvtsi32_ss and _mm_cvtsi64_ss functions.
// Converts `right` from i32/i64 to f32. Returns a SIMD vector with
// the result in the first component and the remaining components
// are copied from `left`.
// https://www.felixcloutier.com/x86/cvtsi2ss
"cvtsi2ss" | "cvtsi642ss" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (left, left_len) = this.project_to_simd(left)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
assert_eq!(dest_len, left_len);
let right = this.read_immediate(right)?;
let dest0 = this.project_index(&dest, 0)?;
let res0 = this.int_to_int_or_float(&right, dest0.layout)?;
this.write_immediate(*res0, &dest0)?;
for i in 1..dest_len {
this.copy_op(&this.project_index(&left, i)?, &this.project_index(&dest, i)?)?;
}
}
_ => return interp_ok(EmulateItemResult::NotSupported),
}
interp_ok(EmulateItemResult::NeedsReturn)

View file

@ -36,42 +36,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Intrinsincs sufixed with "epiX" or "epuX" operate with X-bit signed or unsigned
// vectors.
match unprefixed_name {
// Used to implement the _mm_madd_epi16 function.
// Multiplies packed signed 16-bit integers in `left` and `right`, producing
// intermediate signed 32-bit integers. Horizontally add adjacent pairs of
// intermediate 32-bit integers, and pack the results in `dest`.
"pmadd.wd" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (left, left_len) = this.project_to_simd(left)?;
let (right, right_len) = this.project_to_simd(right)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
assert_eq!(left_len, right_len);
assert_eq!(dest_len.strict_mul(2), left_len);
for i in 0..dest_len {
let j1 = i.strict_mul(2);
let left1 = this.read_scalar(&this.project_index(&left, j1)?)?.to_i16()?;
let right1 = this.read_scalar(&this.project_index(&right, j1)?)?.to_i16()?;
let j2 = j1.strict_add(1);
let left2 = this.read_scalar(&this.project_index(&left, j2)?)?.to_i16()?;
let right2 = this.read_scalar(&this.project_index(&right, j2)?)?.to_i16()?;
let dest = this.project_index(&dest, i)?;
// Multiplications are i16*i16->i32, which will not overflow.
let mul1 = i32::from(left1).strict_mul(right1.into());
let mul2 = i32::from(left2).strict_mul(right2.into());
// However, this addition can overflow in the most extreme case
// (-0x8000)*(-0x8000)+(-0x8000)*(-0x8000) = 0x80000000
let res = mul1.wrapping_add(mul2);
this.write_scalar(Scalar::from_i32(res), &dest)?;
}
}
// Used to implement the _mm_sad_epu8 function.
// Computes the absolute differences of packed unsigned 8-bit integers in `a`
// and `b`, then horizontally sum each consecutive 8 differences to produce
@ -320,10 +284,10 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_immediate(*res, dest)?;
}
// Used to implement the _mm_cvtsd_ss and _mm_cvtss_sd functions.
// Converts the first f64/f32 from `right` to f32/f64 and copies
// the remaining elements from `left`
"cvtsd2ss" | "cvtss2sd" => {
// Used to implement the _mm_cvtsd_ss function.
// Converts the first f64 from `right` to f32 and copies the remaining
// elements from `left`
"cvtsd2ss" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
@ -336,8 +300,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Convert first element of `right`
let right0 = this.read_immediate(&this.project_index(&right, 0)?)?;
let dest0 = this.project_index(&dest, 0)?;
// `float_to_float_or_int` here will convert from f64 to f32 (cvtsd2ss) or
// from f32 to f64 (cvtss2sd).
let res0 = this.float_to_float_or_int(&right0, dest0.layout)?;
this.write_immediate(*res0, &dest0)?;

View file

@ -1,10 +1,8 @@
use rustc_abi::CanonAbi;
use rustc_middle::mir;
use rustc_middle::ty::Ty;
use rustc_span::Symbol;
use rustc_target::callconv::FnAbi;
use super::horizontal_bin_op;
use crate::*;
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -22,21 +20,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let unprefixed_name = link_name.as_str().strip_prefix("llvm.x86.sse3.").unwrap();
match unprefixed_name {
// Used to implement the _mm_h{add,sub}_p{s,d} functions.
// Horizontally add/subtract adjacent floating point values
// in `left` and `right`.
"hadd.ps" | "hadd.pd" | "hsub.ps" | "hsub.pd" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let which = match unprefixed_name {
"hadd.ps" | "hadd.pd" => mir::BinOp::Add,
"hsub.ps" | "hsub.pd" => mir::BinOp::Sub,
_ => unreachable!(),
};
horizontal_bin_op(this, which, /*saturating*/ false, left, right, dest)?;
}
// Used to implement the _mm_lddqu_si128 function.
// Reads a 128-bit vector from an unaligned pointer. This intrinsic
// is expected to perform better than a regular unaligned read when

View file

@ -157,20 +157,13 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
mpsadbw(this, left, right, imm, dest)?;
}
// Used to implement the _mm_testz_si128, _mm_testc_si128
// and _mm_testnzc_si128 functions.
// Tests `(op & mask) == 0`, `(op & mask) == mask` or
// `(op & mask) != 0 && (op & mask) != mask`
"ptestz" | "ptestc" | "ptestnzc" => {
// Used to implement the _mm_testnzc_si128 function.
// Tests `(op & mask) != 0 && (op & mask) != mask`
"ptestnzc" => {
let [op, mask] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (all_zero, masked_set) = test_bits_masked(this, op, mask)?;
let res = match unprefixed_name {
"ptestz" => all_zero,
"ptestc" => masked_set,
"ptestnzc" => !all_zero && !masked_set,
_ => unreachable!(),
};
let res = !all_zero && !masked_set;
this.write_scalar(Scalar::from_i32(res.into()), dest)?;
}

View file

@ -4,7 +4,7 @@ use rustc_middle::ty::Ty;
use rustc_span::Symbol;
use rustc_target::callconv::FnAbi;
use super::{horizontal_bin_op, int_abs, pmulhrsw, psign};
use super::{horizontal_bin_op, pmulhrsw, psign};
use crate::*;
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -22,13 +22,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let unprefixed_name = link_name.as_str().strip_prefix("llvm.x86.ssse3.").unwrap();
match unprefixed_name {
// Used to implement the _mm_abs_epi{8,16,32} functions.
// Calculates the absolute value of packed 8/16/32-bit integers.
"pabs.b.128" | "pabs.w.128" | "pabs.d.128" => {
let [op] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
int_abs(this, op, dest)?;
}
// Used to implement the _mm_shuffle_epi8 intrinsic.
// Shuffles bytes from `left` using `right` as pattern.
// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi8
@ -58,23 +51,20 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_scalar(res, &dest)?;
}
}
// Used to implement the _mm_h{add,adds,sub}_epi{16,32} functions.
// Horizontally add / add with saturation / subtract adjacent 16/32-bit
// Used to implement the _mm_h{adds,subs}_epi16 functions.
// Horizontally add / subtract with saturation adjacent 16-bit
// integer values in `left` and `right`.
"phadd.w.128" | "phadd.sw.128" | "phadd.d.128" | "phsub.w.128" | "phsub.sw.128"
| "phsub.d.128" => {
"phadd.sw.128" | "phsub.sw.128" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (which, saturating) = match unprefixed_name {
"phadd.w.128" | "phadd.d.128" => (mir::BinOp::Add, false),
"phadd.sw.128" => (mir::BinOp::Add, true),
"phsub.w.128" | "phsub.d.128" => (mir::BinOp::Sub, false),
"phsub.sw.128" => (mir::BinOp::Sub, true),
let which = match unprefixed_name {
"phadd.sw.128" => mir::BinOp::Add,
"phsub.sw.128" => mir::BinOp::Sub,
_ => unreachable!(),
};
horizontal_bin_op(this, which, saturating, left, right, dest)?;
horizontal_bin_op(this, which, /*saturating*/ true, left, right, dest)?;
}
// Used to implement the _mm_maddubs_epi16 function.
// Multiplies packed 8-bit unsigned integers from `left` and packed

View file

@ -27,7 +27,6 @@ dependencies = [
"autocfg",
"byteorder 0.5.3",
"byteorder 1.5.0",
"cdylib",
"exported_symbol",
"eyre",
"issue_1567",
@ -38,13 +37,6 @@ dependencies = [
"proc_macro_crate",
]
[[package]]
name = "cdylib"
version = "0.1.0"
dependencies = [
"byteorder 1.5.0",
]
[[package]]
name = "exported_symbol"
version = "0.1.0"

View file

@ -10,7 +10,6 @@ edition = "2024"
[dependencies]
byteorder = "1.0"
cdylib = { path = "cdylib" }
exported_symbol = { path = "exported-symbol" }
proc_macro_crate = { path = "proc-macro-crate" }
issue_1567 = { path = "issue-1567" }

View file

@ -1,12 +0,0 @@
[package]
name = "cdylib"
version = "0.1.0"
authors = ["Miri Team"]
edition = "2018"
[lib]
# cargo-miri used to handle `cdylib` crate-type specially (https://github.com/rust-lang/miri/pull/1577).
crate-type = ["cdylib"]
[dependencies]
byteorder = "1.0" # to test dependencies of sub-crates

View file

@ -1,6 +0,0 @@
use byteorder::{BigEndian, ByteOrder};
#[no_mangle]
extern "C" fn use_the_dependency() {
let _n = <BigEndian as ByteOrder>::read_u64(&[1, 2, 3, 4, 5, 6, 7, 8]);
}

View file

@ -1 +1 @@
subcrate,issue_1567,exported_symbol_dep,test_local_crate_detection,cargo_miri_test,cdylib,exported_symbol,issue_1691,issue_1705,issue_rust_86261,proc_macro_crate
subcrate,issue_1567,exported_symbol_dep,test_local_crate_detection,cargo_miri_test,exported_symbol,issue_1691,issue_1705,issue_rust_86261,proc_macro_crate

View file

@ -0,0 +1,29 @@
//@only-target: darwin
#![feature(sync_unsafe_cell)]
use std::cell::SyncUnsafeCell;
use std::sync::atomic::*;
use std::thread;
fn main() {
let lock = SyncUnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
thread::scope(|s| {
// First thread: grabs the lock.
s.spawn(|| {
unsafe { libc::os_unfair_lock_lock(lock.get()) };
thread::yield_now();
unreachable!();
});
// Second thread: queues for the lock.
s.spawn(|| {
unsafe { libc::os_unfair_lock_lock(lock.get()) };
unreachable!();
});
// Third thread: tries to read the lock while second thread is queued.
s.spawn(|| {
let atomic_ref = unsafe { &*lock.get().cast::<AtomicU32>() };
let _val = atomic_ref.load(Ordering::Relaxed); //~ERROR: read of `os_unfair_lock` is forbidden while the queue is non-empty
});
});
}

View file

@ -0,0 +1,13 @@
error: Undefined Behavior: read of `os_unfair_lock` is forbidden while the queue is non-empty
--> tests/fail-dep/concurrency/apple_os_unfair_lock_move_with_queue.rs:LL:CC
|
LL | let _val = atomic_ref.load(Ordering::Relaxed);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -1,6 +1,6 @@
//@ignore-target: windows # No pthreads on Windows
//@ normalize-stderr-test: "(\n)ALLOC \(.*\) \{\n(.*\n)*\}(\n)" -> "${1}ALLOC DUMP${3}"
//@ normalize-stderr-test: "\[0x[0-9a-z]..0x[0-9a-z]\]" -> "[0xX..0xY]"
//@ normalize-stderr-test: "\[0x[0-9a-z]+..0x[0-9a-z]+\]" -> "[0xX..0xY]"
/// Test that destroying a pthread_cond twice fails, even without a check for number validity

View file

@ -1,4 +1,4 @@
error: Undefined Behavior: `pthread_cond_t` can't be moved after first use
error: Undefined Behavior: `pthread_cond_t` was not properly initialized at this location, or it got overwritten
--> tests/fail-dep/concurrency/libc_pthread_cond_move.rs:LL:CC
|
LL | libc::pthread_cond_destroy(cond2.as_mut_ptr());

View file

@ -18,7 +18,7 @@ fn check() {
// move pthread_cond_t
let mut cond2 = cond;
libc::pthread_cond_destroy(cond2.as_mut_ptr()); //~[init] ERROR: can't be moved after first use
libc::pthread_cond_destroy(cond2.as_mut_ptr()); //~[init] ERROR: not properly initialized
}
}
@ -32,6 +32,6 @@ fn check() {
// move pthread_cond_t
let mut cond2 = cond;
libc::pthread_cond_destroy(&mut cond2 as *mut _); //~[static_initializer] ERROR: can't be moved after first use
libc::pthread_cond_destroy(&mut cond2 as *mut _); //~[static_initializer] ERROR: not properly initialized
}
}

View file

@ -1,4 +1,4 @@
error: Undefined Behavior: `pthread_cond_t` can't be moved after first use
error: Undefined Behavior: `pthread_cond_t` was not properly initialized at this location, or it got overwritten
--> tests/fail-dep/concurrency/libc_pthread_cond_move.rs:LL:CC
|
LL | libc::pthread_cond_destroy(&mut cond2 as *mut _);

View file

@ -1,6 +1,6 @@
//@ignore-target: windows # No pthreads on Windows
//@ normalize-stderr-test: "(\n)ALLOC \(.*\) \{\n(.*\n)*\}(\n)" -> "${1}ALLOC DUMP${3}"
//@ normalize-stderr-test: "\[0x[0-9a-z]..0x[0-9a-z]\]" -> "[0xX..0xY]"
//@ normalize-stderr-test: "\[0x[0-9a-z]+..0x[0-9a-z]+\]" -> "[0xX..0xY]"
/// Test that destroying a pthread_mutex twice fails, even without a check for number validity

View file

@ -0,0 +1,48 @@
//@ignore-target: windows # No pthreads on Windows
//@compile-flags: -Zmiri-deterministic-concurrency
//@error-in-other-file: deallocation of `pthread_mutex_t` is forbidden while the queue is non-empty
use std::cell::UnsafeCell;
use std::sync::atomic::*;
use std::thread;
struct Mutex(UnsafeCell<libc::pthread_mutex_t>);
impl Mutex {
fn get(&self) -> *mut libc::pthread_mutex_t {
self.0.get()
}
}
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
fn main() {
let m = Box::new(Mutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)));
let initialized = AtomicBool::new(false);
thread::scope(|s| {
// First thread: initializes the lock, and then grabs it.
s.spawn(|| {
// Initialize (so the third thread can happens-after the write that occurs here).
assert_eq!(unsafe { libc::pthread_mutex_lock(m.get()) }, 0);
assert_eq!(unsafe { libc::pthread_mutex_unlock(m.get()) }, 0);
initialized.store(true, Ordering::Release);
// Grab and hold.
assert_eq!(unsafe { libc::pthread_mutex_lock(m.get()) }, 0);
thread::yield_now();
unreachable!();
});
// Second thread: queues for the lock.
s.spawn(|| {
assert_eq!(unsafe { libc::pthread_mutex_lock(m.get()) }, 0);
unreachable!();
});
// Third thread: tries to free the lock while second thread is queued.
s.spawn(|| {
// Ensure we happen-after the initialization write.
assert!(initialized.load(Ordering::Acquire));
// Now drop it.
drop(unsafe { Box::from_raw(m.get().cast::<Mutex>()) });
});
});
unreachable!();
}

View file

@ -0,0 +1,22 @@
error: Undefined Behavior: deallocation of `pthread_mutex_t` is forbidden while the queue is non-empty
--> RUSTLIB/alloc/src/boxed.rs:LL:CC
|
LL | self.1.deallocate(From::from(ptr.cast()), layout);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside `<std::boxed::Box<Mutex> as std::ops::Drop>::drop` at RUSTLIB/alloc/src/boxed.rs:LL:CC
= note: inside `std::ptr::drop_in_place::<std::boxed::Box<Mutex>> - shim(Some(std::boxed::Box<Mutex>))` at RUSTLIB/core/src/ptr/mod.rs:LL:CC
= note: inside `std::mem::drop::<std::boxed::Box<Mutex>>` at RUSTLIB/core/src/mem/mod.rs:LL:CC
note: inside closure
--> tests/fail-dep/concurrency/libc_pthread_mutex_free_while_queued.rs:LL:CC
|
LL | drop(unsafe { Box::from_raw(m.get().cast::<Mutex>()) });
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -1,4 +1,4 @@
error: Undefined Behavior: `pthread_mutex_t` can't be moved after first use
error: Undefined Behavior: `pthread_mutex_t` was not properly initialized at this location, or it got overwritten
--> tests/fail-dep/concurrency/libc_pthread_mutex_move.rs:LL:CC
|
LL | libc::pthread_mutex_lock(&mut m2 as *mut _);

View file

@ -12,7 +12,7 @@ fn check() {
assert_eq!(libc::pthread_mutex_init(&mut m as *mut _, std::ptr::null()), 0);
let mut m2 = m; // move the mutex
libc::pthread_mutex_lock(&mut m2 as *mut _); //~[init] ERROR: can't be moved after first use
libc::pthread_mutex_lock(&mut m2 as *mut _); //~[init] ERROR: not properly initialized
}
}
@ -23,6 +23,6 @@ fn check() {
libc::pthread_mutex_lock(&mut m as *mut _);
let mut m2 = m; // move the mutex
libc::pthread_mutex_unlock(&mut m2 as *mut _); //~[static_initializer] ERROR: can't be moved after first use
libc::pthread_mutex_unlock(&mut m2 as *mut _); //~[static_initializer] ERROR: not properly initialized
}
}

View file

@ -1,4 +1,4 @@
error: Undefined Behavior: `pthread_mutex_t` can't be moved after first use
error: Undefined Behavior: `pthread_mutex_t` was not properly initialized at this location, or it got overwritten
--> tests/fail-dep/concurrency/libc_pthread_mutex_move.rs:LL:CC
|
LL | libc::pthread_mutex_unlock(&mut m2 as *mut _);

View file

@ -0,0 +1,14 @@
//@ignore-target: windows # No pthreads on Windows
fn main() {
unsafe {
let mut m: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
libc::pthread_mutex_lock(&mut m as *mut _);
// Overwrite the mutex with itself. This de-initializes it.
let copy = m;
m = copy;
libc::pthread_mutex_unlock(&mut m as *mut _); //~ERROR: not properly initialized
}
}

View file

@ -0,0 +1,13 @@
error: Undefined Behavior: `pthread_mutex_t` was not properly initialized at this location, or it got overwritten
--> tests/fail-dep/concurrency/libc_pthread_mutex_overwrite.rs:LL:CC
|
LL | libc::pthread_mutex_unlock(&mut m as *mut _);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,41 @@
//@ignore-target: windows # No pthreads on Windows
//@compile-flags: -Zmiri-fixed-schedule
use std::cell::UnsafeCell;
use std::sync::atomic::*;
use std::thread;
struct Mutex(UnsafeCell<libc::pthread_mutex_t>);
impl Mutex {
fn get(&self) -> *mut libc::pthread_mutex_t {
self.0.get()
}
}
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
// The offset to the "sensitive" part of the mutex (that Miri attaches the metadata to).
const OFFSET: usize = if cfg!(target_os = "macos") { 4 } else { 0 };
fn main() {
let m = Mutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
thread::scope(|s| {
// First thread: grabs the lock.
s.spawn(|| {
assert_eq!(unsafe { libc::pthread_mutex_lock(m.get()) }, 0);
thread::yield_now();
unreachable!();
});
// Second thread: queues for the lock.
s.spawn(|| {
assert_eq!(unsafe { libc::pthread_mutex_lock(m.get()) }, 0);
unreachable!();
});
// Third thread: tries to read the lock while second thread is queued.
s.spawn(|| {
let atomic_ref = unsafe { &*m.get().byte_add(OFFSET).cast::<AtomicU32>() };
let _val = atomic_ref.load(Ordering::Relaxed); //~ERROR: read of `pthread_mutex_t` is forbidden while the queue is non-empty
});
});
}

View file

@ -0,0 +1,13 @@
error: Undefined Behavior: read of `pthread_mutex_t` is forbidden while the queue is non-empty
--> tests/fail-dep/concurrency/libc_pthread_mutex_read_while_queued.rs:LL:CC
|
LL | ... let _val = atomic_ref.load(Ordering::Relaxed);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,41 @@
//@ignore-target: windows # No pthreads on Windows
//@compile-flags: -Zmiri-fixed-schedule
use std::cell::UnsafeCell;
use std::sync::atomic::*;
use std::thread;
struct Mutex(UnsafeCell<libc::pthread_mutex_t>);
impl Mutex {
fn get(&self) -> *mut libc::pthread_mutex_t {
self.0.get()
}
}
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
// The offset to the "sensitive" part of the mutex (that Miri attaches the metadata to).
const OFFSET: usize = if cfg!(target_os = "macos") { 4 } else { 0 };
fn main() {
let m = Mutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
thread::scope(|s| {
// First thread: grabs the lock.
s.spawn(|| {
assert_eq!(unsafe { libc::pthread_mutex_lock(m.get()) }, 0);
thread::yield_now();
unreachable!();
});
// Second thread: queues for the lock.
s.spawn(|| {
assert_eq!(unsafe { libc::pthread_mutex_lock(m.get()) }, 0);
unreachable!();
});
// Third thread: tries to overwrite the lock while second thread is queued.
s.spawn(|| {
let atomic_ref = unsafe { &*m.get().byte_add(OFFSET).cast::<AtomicU32>() };
atomic_ref.store(0, Ordering::Relaxed); //~ERROR: write of `pthread_mutex_t` is forbidden while the queue is non-empty
});
});
}

View file

@ -0,0 +1,13 @@
error: Undefined Behavior: write of `pthread_mutex_t` is forbidden while the queue is non-empty
--> tests/fail-dep/concurrency/libc_pthread_mutex_write_while_queued.rs:LL:CC
|
LL | atomic_ref.store(0, Ordering::Relaxed);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -1,6 +1,6 @@
//@ignore-target: windows # No pthreads on Windows
//@ normalize-stderr-test: "(\n)ALLOC \(.*\) \{\n(.*\n)*\}(\n)" -> "${1}ALLOC DUMP${3}"
//@ normalize-stderr-test: "\[0x[0-9a-z]..0x[0-9a-z]\]" -> "[0xX..0xY]"
//@ normalize-stderr-test: "\[0x[0-9a-z]+..0x[0-9a-z]+\]" -> "[0xX..0xY]"
/// Test that destroying a pthread_rwlock twice fails, even without a check for number validity

View file

@ -9,6 +9,6 @@ fn main() {
// Move rwlock
let mut rw2 = rw;
libc::pthread_rwlock_unlock(&mut rw2 as *mut _); //~ ERROR: can't be moved after first use
libc::pthread_rwlock_unlock(&mut rw2 as *mut _); //~ ERROR: not properly initialized
}
}

View file

@ -1,4 +1,4 @@
error: Undefined Behavior: `pthread_rwlock_t` can't be moved after first use
error: Undefined Behavior: `pthread_rwlock_t` was not properly initialized at this location, or it got overwritten
--> tests/fail-dep/concurrency/libx_pthread_rwlock_moved.rs:LL:CC
|
LL | libc::pthread_rwlock_unlock(&mut rw2 as *mut _);

View file

@ -6,7 +6,7 @@ use std::simd::*;
fn main() {
unsafe {
let buf = [0u32; 5];
//~v ERROR: accessing memory with alignment
//~v ERROR: accessing memory with alignment
simd_masked_load::<_, _, _, { SimdAlign::Element }>(
i32x4::splat(-1),
// This is not i32-aligned

View file

@ -14,12 +14,10 @@ fn main() {
libc::os_unfair_lock_assert_not_owner(lock.get());
}
// `os_unfair_lock`s can be moved and leaked.
// In the real implementation, even moving it while locked is possible
// (and "forks" the lock, i.e. old and new location have independent wait queues).
// We only test the somewhat sane case of moving while unlocked that `std` plans to rely on.
// `os_unfair_lock`s can be moved, and even acquired again then.
let lock = lock;
let locked = unsafe { libc::os_unfair_lock_trylock(lock.get()) };
assert!(locked);
let _lock = lock;
assert!(unsafe { libc::os_unfair_lock_trylock(lock.get()) });
// We can even move it while locked, but then we cannot acquire it any more.
let lock = lock;
assert!(!unsafe { libc::os_unfair_lock_trylock(lock.get()) });
}

View file

@ -8,18 +8,21 @@ use std::mem::MaybeUninit;
use std::{mem, ptr, thread};
fn main() {
test_mutex();
test_mutex_libc_init_recursive();
test_mutex_libc_init_normal();
test_mutex_libc_init_errorcheck();
test_rwlock_libc_static_initializer();
#[cfg(target_os = "linux")]
test_mutex_libc_static_initializer_recursive();
#[cfg(target_os = "linux")]
test_mutex_libc_static_initializer_errorcheck();
check_mutex();
check_rwlock_write();
check_rwlock_read_no_deadlock();
check_cond();
check_condattr();
test_cond();
test_condattr();
test_rwlock();
test_rwlock_write();
test_rwlock_read_no_deadlock();
}
// We want to only use pthread APIs here for easier testing.
@ -107,8 +110,7 @@ fn test_mutex_libc_init_errorcheck() {
}
}
// Only linux provides PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP,
// libc for macOS just has the default PTHREAD_MUTEX_INITIALIZER.
// Only linux provides PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP.
#[cfg(target_os = "linux")]
fn test_mutex_libc_static_initializer_recursive() {
let mutex = std::cell::UnsafeCell::new(libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
@ -126,6 +128,22 @@ fn test_mutex_libc_static_initializer_recursive() {
}
}
// Only linux provides PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP.
#[cfg(target_os = "linux")]
fn test_mutex_libc_static_initializer_errorcheck() {
let mutex = std::cell::UnsafeCell::new(libc::PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP);
unsafe {
assert_eq!(libc::pthread_mutex_lock(mutex.get()), 0);
assert_eq!(libc::pthread_mutex_trylock(mutex.get()), libc::EBUSY);
assert_eq!(libc::pthread_mutex_lock(mutex.get()), libc::EDEADLK);
assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0);
assert_eq!(libc::pthread_mutex_trylock(mutex.get()), 0);
assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0);
assert_eq!(libc::pthread_mutex_unlock(mutex.get()), libc::EPERM);
assert_eq!(libc::pthread_mutex_destroy(mutex.get()), 0);
}
}
struct SendPtr<T> {
ptr: *mut T,
}
@ -137,7 +155,7 @@ impl<T> Clone for SendPtr<T> {
}
}
fn check_mutex() {
fn test_mutex() {
let bomb = AbortOnDrop;
// Specifically *not* using `Arc` to make sure there is no synchronization apart from the mutex.
unsafe {
@ -168,7 +186,7 @@ fn check_mutex() {
bomb.defuse();
}
fn check_rwlock_write() {
fn test_rwlock_write() {
let bomb = AbortOnDrop;
unsafe {
let data = SyncUnsafeCell::new((libc::PTHREAD_RWLOCK_INITIALIZER, 0));
@ -209,7 +227,7 @@ fn check_rwlock_write() {
bomb.defuse();
}
fn check_rwlock_read_no_deadlock() {
fn test_rwlock_read_no_deadlock() {
let bomb = AbortOnDrop;
unsafe {
let l1 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
@ -237,12 +255,11 @@ fn check_rwlock_read_no_deadlock() {
bomb.defuse();
}
fn check_cond() {
fn test_cond() {
let bomb = AbortOnDrop;
unsafe {
let mut cond: MaybeUninit<libc::pthread_cond_t> = MaybeUninit::uninit();
assert_eq!(libc::pthread_cond_init(cond.as_mut_ptr(), ptr::null()), 0);
let cond = SendPtr { ptr: cond.as_mut_ptr() };
let mut cond: libc::pthread_cond_t = libc::PTHREAD_COND_INITIALIZER;
let cond = SendPtr { ptr: &mut cond };
let mut mutex: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
let mutex = SendPtr { ptr: &mut mutex };
@ -286,7 +303,7 @@ fn check_cond() {
bomb.defuse();
}
fn check_condattr() {
fn test_condattr() {
unsafe {
// Just smoke-testing that these functions can be called.
let mut attr: MaybeUninit<libc::pthread_condattr_t> = MaybeUninit::uninit();
@ -311,9 +328,7 @@ fn check_condattr() {
}
}
// std::sync::RwLock does not even used pthread_rwlock any more.
// Do some smoke testing of the API surface.
fn test_rwlock_libc_static_initializer() {
fn test_rwlock() {
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
unsafe {
assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0);

View file

@ -20,7 +20,7 @@ contributing_url = "https://github.com/rust-lang/miri/blob/master/CONTRIBUTING.m
[assign.custom_welcome_messages]
welcome-message = "(unused)"
welcome-message-no-reviewer = """
Thank you for contributing to Miri!
Thank you for contributing to Miri! A reviewer will take a look at your PR, typically within a week or two.
Please remember to not force-push to the PR branch except when you need to rebase due to a conflict or when the reviewer asks you for it.
"""

View file

@ -1,9 +1,10 @@
use std::collections::HashSet;
use std::fmt::{Display, Formatter};
use std::io;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use termcolor::{Color, WriteColor};
use termcolor::Color;
#[derive(Clone, Default)]
///CLI flags used by tidy.
@ -245,30 +246,63 @@ pub const COLOR_WARNING: Color = Color::Yellow;
/// Output a message to stderr.
/// The message can be optionally scoped to a certain check, and it can also have a certain color.
pub fn output_message(msg: &str, id: Option<&CheckId>, color: Option<Color>) {
use std::io::Write;
use termcolor::{ColorChoice, ColorSpec};
use termcolor::{ColorChoice, ColorSpec, StandardStream};
let stderr: &mut dyn termcolor::WriteColor = if cfg!(test) {
&mut StderrForUnitTests
} else {
&mut termcolor::StandardStream::stderr(ColorChoice::Auto)
};
let mut stderr = StandardStream::stderr(ColorChoice::Auto);
if let Some(color) = &color {
stderr.set_color(ColorSpec::new().set_fg(Some(*color))).unwrap();
}
match id {
Some(id) => {
write!(&mut stderr, "tidy [{}", id.name).unwrap();
write!(stderr, "tidy [{}", id.name).unwrap();
if let Some(path) = &id.path {
write!(&mut stderr, " ({})", path.display()).unwrap();
write!(stderr, " ({})", path.display()).unwrap();
}
write!(&mut stderr, "]").unwrap();
write!(stderr, "]").unwrap();
}
None => {
write!(&mut stderr, "tidy").unwrap();
write!(stderr, "tidy").unwrap();
}
}
if color.is_some() {
stderr.set_color(&ColorSpec::new()).unwrap();
}
writeln!(&mut stderr, ": {msg}").unwrap();
writeln!(stderr, ": {msg}").unwrap();
}
/// An implementation of `io::Write` and `termcolor::WriteColor` that writes
/// to stderr via `eprint!`, so that the output can be properly captured when
/// running tidy's unit tests.
struct StderrForUnitTests;
impl io::Write for StderrForUnitTests {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
eprint!("{}", String::from_utf8_lossy(buf));
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl termcolor::WriteColor for StderrForUnitTests {
fn supports_color(&self) -> bool {
false
}
fn set_color(&mut self, _spec: &termcolor::ColorSpec) -> io::Result<()> {
Ok(())
}
fn reset(&mut self) -> io::Result<()> {
Ok(())
}
}

View file

@ -1,11 +0,0 @@
//@ known-bug: #140729
#![feature(min_generic_const_args)]
const C: usize = 0;
pub struct A<const M: usize> {}
impl A<C> {
fn fun1() {}
}
impl A {
fn fun1() {}
}

View file

@ -1,10 +0,0 @@
//@ known-bug: #140860
#![feature(min_generic_const_args)]
#![feature(unsized_const_params)]
#![feature(with_negative_coherence, negative_impls)]
trait a < const b : &'static str> {} trait c {} struct d< e >(e);
impl<e> c for e where e: a<""> {}
impl<e> c for d<e> {}
impl<e> !a<f> for e {}
const f : &str = "";
fn main() {}

View file

@ -38,8 +38,8 @@ LL | trait Bar: [const] Foo {}
|
help: enable `#![feature(const_trait_impl)]` in your crate and mark `Foo` as `const` to allow it to have `const` implementations
|
LL | #[const_trait] trait Foo {
| ++++++++++++++
LL | const trait Foo {
| +++++
error: `[const]` can only be applied to `const` traits
--> const-super-trait.rs:9:17
@ -49,8 +49,8 @@ LL | const fn foo<T: [const] Bar>(x: &T) {
|
help: enable `#![feature(const_trait_impl)]` in your crate and mark `Bar` as `const` to allow it to have `const` implementations
|
LL | #[const_trait] trait Bar: [const] Foo {}
| ++++++++++++++
LL | const trait Bar: [const] Foo {}
| +++++
error[E0015]: cannot call non-const method `<T as Foo>::a` in constant functions
--> const-super-trait.rs:10:7
@ -65,13 +65,12 @@ LL | trait Foo {
| ^^^^^^^^^ this trait is not const
LL | fn a(&self);
| ------------ this method is not const
= help: add `#![feature(const_trait_impl)]` to the crate attributes to enable `#[const_trait]`
= help: add `#![feature(const_trait_impl)]` to the crate attributes to enable const traits
= note: calls in constant functions are limited to constant functions, tuple structs and tuple variants
help: consider making trait `Foo` const
|
LL + #[const_trait]
LL | trait Foo {
|
LL | const trait Foo {
| +++++
error: aborting due to 6 previous errors

View file

@ -18,8 +18,8 @@ LL | trait Bar: [const] Foo {}
|
help: mark `Foo` as `const` to allow it to have `const` implementations
|
LL | #[const_trait] trait Foo {
| ++++++++++++++
LL | const trait Foo {
| +++++
error: `[const]` can only be applied to `const` traits
--> const-super-trait.rs:9:17
@ -29,8 +29,8 @@ LL | const fn foo<T: [const] Bar>(x: &T) {
|
help: mark `Bar` as `const` to allow it to have `const` implementations
|
LL | #[const_trait] trait Bar: [const] Foo {}
| ++++++++++++++
LL | const trait Bar: [const] Foo {}
| +++++
error[E0015]: cannot call non-const method `<T as Foo>::a` in constant functions
--> const-super-trait.rs:10:7
@ -48,9 +48,8 @@ LL | fn a(&self);
= note: calls in constant functions are limited to constant functions, tuple structs and tuple variants
help: consider making trait `Foo` const
|
LL + #[const_trait]
LL | trait Foo {
|
LL | const trait Foo {
| +++++
error: aborting due to 4 previous errors

View file

@ -3,8 +3,7 @@
#![crate_name = "foo"]
#![feature(const_trait_impl)]
#[const_trait]
pub trait Tr {
pub const trait Tr {
fn f();
}

View file

@ -1,5 +1,4 @@
// check that we don't render `#[const_trait]` methods as `const` - even for
// const `trait`s and `impl`s.
// check that we don't render assoc fns as `const` - even for const `trait`s and `impl`s.
#![crate_name = "foo"]
#![feature(const_trait_impl)]
@ -8,8 +7,7 @@
//@ !has - '//*[@id="tymethod.required"]' 'const'
//@ has - '//*[@id="method.defaulted"]' 'fn defaulted()'
//@ !has - '//*[@id="method.defaulted"]' 'const'
#[const_trait]
pub trait Tr {
pub const trait Tr {
fn required();
fn defaulted() {}
}

View file

@ -19,8 +19,7 @@ pub struct S<T>(T);
//@ has - '//pre[@class="rust item-decl"]/code/a[@class="trait"]' 'Fn'
//@ !has - '//pre[@class="rust item-decl"]/code/span[@class="where"]' '[const]'
//@ has - '//pre[@class="rust item-decl"]/code/span[@class="where"]' ': Fn'
#[const_trait]
pub trait Tr<T> {
pub const trait Tr<T> {
//@ !has - '//section[@id="method.a"]/h4[@class="code-header"]' '[const]'
//@ has - '//section[@id="method.a"]/h4[@class="code-header"]/a[@class="trait"]' 'Fn'
//@ !has - '//section[@id="method.a"]/h4[@class="code-header"]/span[@class="where"]' '[const]'

Some files were not shown because too many files have changed in this diff Show more